summaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
authorJulian Wiedmann <jwi@linux.ibm.com>2021-03-09 17:52:20 +0100
committerDavid S. Miller <davem@davemloft.net>2021-03-10 01:14:54 +0100
commit3e83d467a08e25b27c44c885f511624a71c84f7c (patch)
tree12693495613ac83e5a9cf06d8bf725f64267210a /drivers/s390
parents390/qeth: improve completion of pending TX buffers (diff)
downloadlinux-3e83d467a08e25b27c44c885f511624a71c84f7c.tar.xz
linux-3e83d467a08e25b27c44c885f511624a71c84f7c.zip
s390/qeth: schedule TX NAPI on QAOB completion
When a QAOB notifies us that a pending TX buffer has been delivered, the actual TX completion processing by qeth_tx_complete_pending_bufs() is done within the context of a TX NAPI instance. We shouldn't rely on this instance being scheduled by some other TX event, but just do it ourselves. qeth_qdio_handle_aob() is called from qeth_poll(), ie. our main NAPI instance. To avoid touching the TX queue's NAPI instance before/after it is (un-)registered, reorder the code in qeth_open() and qeth_stop() accordingly. Fixes: 0da9581ddb0f ("qeth: exploit asynchronous delivery of storage blocks") Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/net/qeth_core_main.c18
1 files changed, 12 insertions, 6 deletions
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 3763cd6d14f8..d0a56afec028 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -470,6 +470,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
struct qaob *aob;
struct qeth_qdio_out_buffer *buffer;
enum iucv_tx_notify notification;
+ struct qeth_qdio_out_q *queue;
unsigned int i;
aob = (struct qaob *) phys_to_virt(phys_aob_addr);
@@ -512,7 +513,9 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
buffer->is_header[i] = 0;
}
+ queue = buffer->q;
atomic_set(&buffer->state, QETH_QDIO_BUF_EMPTY);
+ napi_schedule(&queue->napi);
break;
default:
WARN_ON_ONCE(1);
@@ -7235,9 +7238,7 @@ int qeth_open(struct net_device *dev)
card->data.state = CH_STATE_UP;
netif_tx_start_all_queues(dev);
- napi_enable(&card->napi);
local_bh_disable();
- napi_schedule(&card->napi);
if (IS_IQD(card)) {
struct qeth_qdio_out_q *queue;
unsigned int i;
@@ -7249,8 +7250,12 @@ int qeth_open(struct net_device *dev)
napi_schedule(&queue->napi);
}
}
+
+ napi_enable(&card->napi);
+ napi_schedule(&card->napi);
/* kick-start the NAPI softirq: */
local_bh_enable();
+
return 0;
}
EXPORT_SYMBOL_GPL(qeth_open);
@@ -7260,6 +7265,11 @@ int qeth_stop(struct net_device *dev)
struct qeth_card *card = dev->ml_priv;
QETH_CARD_TEXT(card, 4, "qethstop");
+
+ napi_disable(&card->napi);
+ cancel_delayed_work_sync(&card->buffer_reclaim_work);
+ qdio_stop_irq(CARD_DDEV(card));
+
if (IS_IQD(card)) {
struct qeth_qdio_out_q *queue;
unsigned int i;
@@ -7280,10 +7290,6 @@ int qeth_stop(struct net_device *dev)
netif_tx_disable(dev);
}
- napi_disable(&card->napi);
- cancel_delayed_work_sync(&card->buffer_reclaim_work);
- qdio_stop_irq(CARD_DDEV(card));
-
return 0;
}
EXPORT_SYMBOL_GPL(qeth_stop);