summaryrefslogtreecommitdiffstats
path: root/net/smc
diff options
context:
space:
mode:
authorKarsten Graul <kgraul@linux.ibm.com>2020-07-08 17:05:12 +0200
committerDavid S. Miller <davem@davemloft.net>2020-07-08 21:35:15 +0200
commitb7eede757883a9892dcb7bf0280f4890fc74bcf6 (patch)
tree1a7fc7218b0640580dd0432418807571ab2da52f /net/smc
parentnet/smc: separate LLC wait queues for flow and messages (diff)
downloadlinux-b7eede757883a9892dcb7bf0280f4890fc74bcf6.tar.xz
linux-b7eede757883a9892dcb7bf0280f4890fc74bcf6.zip
net/smc: fix work request handling
Wait for pending sends only when smc_switch_conns() found a link to move the connections to. Do not wait during link freeing, this can lead to permanent hang situations. And refuse to provide a new tx slot on an unusable link. Fixes: c6f02ebeea3a ("net/smc: switch connections to alternate link") Reviewed-by: Ursula Braun <ubraun@linux.ibm.com> Signed-off-by: Karsten Graul <kgraul@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc')
-rw-r--r--net/smc/smc_llc.c8
-rw-r--r--net/smc/smc_wr.c10
2 files changed, 10 insertions, 8 deletions
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index df164232574b..c1a038689c63 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -1241,8 +1241,8 @@ static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr)
smc_llc_send_message(lnk, &qentry->msg); /* response */
if (smc_link_downing(&lnk_del->state)) {
- smc_switch_conns(lgr, lnk_del, false);
- smc_wr_tx_wait_no_pending_sends(lnk_del);
+ if (smc_switch_conns(lgr, lnk_del, false))
+ smc_wr_tx_wait_no_pending_sends(lnk_del);
}
smcr_link_clear(lnk_del, true);
@@ -1316,8 +1316,8 @@ static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr)
goto out; /* asymmetric link already deleted */
if (smc_link_downing(&lnk_del->state)) {
- smc_switch_conns(lgr, lnk_del, false);
- smc_wr_tx_wait_no_pending_sends(lnk_del);
+ if (smc_switch_conns(lgr, lnk_del, false))
+ smc_wr_tx_wait_no_pending_sends(lnk_del);
}
if (!list_empty(&lgr->list)) {
/* qentry is either a request from peer (send it back to
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 7239ba9b99dc..1e23cdd41eb1 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -169,6 +169,8 @@ void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
{
*idx = link->wr_tx_cnt;
+ if (!smc_link_usable(link))
+ return -ENOLINK;
for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
if (!test_and_set_bit(*idx, link->wr_tx_mask))
return 0;
@@ -560,15 +562,15 @@ void smc_wr_free_link(struct smc_link *lnk)
{
struct ib_device *ibdev;
+ if (!lnk->smcibdev)
+ return;
+ ibdev = lnk->smcibdev->ibdev;
+
if (smc_wr_tx_wait_no_pending_sends(lnk))
memset(lnk->wr_tx_mask, 0,
BITS_TO_LONGS(SMC_WR_BUF_CNT) *
sizeof(*lnk->wr_tx_mask));
- if (!lnk->smcibdev)
- return;
- ibdev = lnk->smcibdev->ibdev;
-
if (lnk->wr_rx_dma_addr) {
ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,