diff options
author | Julian Wiedmann <jwi@linux.ibm.com> | 2021-01-28 12:41:06 +0100 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2021-01-29 05:36:21 +0100 |
commit | ef6af7bdb9e6c14eae8dc5fe852aefe1e089c85c (patch) | |
tree | 1e9d200306516e7009f8050e5d373a85e8eaf93e /net/iucv | |
parent | net/af_iucv: don't lookup the socket on TX notification (diff) | |
download | linux-ef6af7bdb9e6c14eae8dc5fe852aefe1e089c85c.tar.xz linux-ef6af7bdb9e6c14eae8dc5fe852aefe1e089c85c.zip |
net/af_iucv: count packets in the xmit path
The TX code keeps track of all skbs that are in-flight but haven't
actually been sent out yet. For native IUCV sockets that's not a huge
deal, but with TRANS_HIPER sockets it would be much better if we
didn't need to maintain a list of skb clones.
Note that we actually only care about the _count_ of skbs in this stage
of the TX pipeline. So as prep work for removing the skb tracking on
TRANS_HIPER sockets, keep track of the skb count in a separate variable
and pair any list {enqueue, unlink} with a count {increment, decrement}.
Then replace all occurences where we currently look at the skb list's
fill level.
Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
Acked-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/iucv')
-rw-r--r-- | net/iucv/af_iucv.c | 30 |
1 files changed, 24 insertions, 6 deletions
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 8683b6939f45..e487f472027a 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -182,7 +182,7 @@ static inline int iucv_below_msglim(struct sock *sk) if (sk->sk_state != IUCV_CONNECTED) return 1; if (iucv->transport == AF_IUCV_TRANS_IUCV) - return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); + return (atomic_read(&iucv->skbs_in_xmit) < iucv->path->msglim); else return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && (atomic_read(&iucv->pendings) <= 0)); @@ -269,8 +269,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock, } skb_queue_tail(&iucv->send_skb_q, nskb); + atomic_inc(&iucv->skbs_in_xmit); err = dev_queue_xmit(skb); if (net_xmit_eval(err)) { + atomic_dec(&iucv->skbs_in_xmit); skb_unlink(nskb, &iucv->send_skb_q); kfree_skb(nskb); } else { @@ -424,7 +426,7 @@ static void iucv_sock_close(struct sock *sk) sk->sk_state = IUCV_CLOSING; sk->sk_state_change(sk); - if (!err && !skb_queue_empty(&iucv->send_skb_q)) { + if (!err && atomic_read(&iucv->skbs_in_xmit) > 0) { if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) timeo = sk->sk_lingertime; else @@ -491,6 +493,7 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio, atomic_set(&iucv->pendings, 0); iucv->flags = 0; iucv->msglimit = 0; + atomic_set(&iucv->skbs_in_xmit, 0); atomic_set(&iucv->msg_sent, 0); atomic_set(&iucv->msg_recv, 0); iucv->path = NULL; @@ -1055,6 +1058,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, } } else { /* Classic VM IUCV transport */ skb_queue_tail(&iucv->send_skb_q, skb); + atomic_inc(&iucv->skbs_in_xmit); if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) && skb->len <= 7) { @@ -1063,6 +1067,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, /* on success: there is no message_complete callback */ /* for an IPRMDATA msg; remove skb from send queue */ if (err == 0) { + atomic_dec(&iucv->skbs_in_xmit); skb_unlink(skb, &iucv->send_skb_q); kfree_skb(skb); } @@ -1071,6 +1076,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, /* IUCV_IPRMDATA path flag is set... sever path */ if (err == 0x15) { pr_iucv->path_sever(iucv->path, NULL); + atomic_dec(&iucv->skbs_in_xmit); skb_unlink(skb, &iucv->send_skb_q); err = -EPIPE; goto fail; @@ -1109,6 +1115,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, } else { err = -EPIPE; } + + atomic_dec(&iucv->skbs_in_xmit); skb_unlink(skb, &iucv->send_skb_q); goto fail; } @@ -1748,10 +1756,14 @@ static void iucv_callback_txdone(struct iucv_path *path, { struct sock *sk = path->private; struct sk_buff *this = NULL; - struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; + struct sk_buff_head *list; struct sk_buff *list_skb; + struct iucv_sock *iucv; unsigned long flags; + iucv = iucv_sk(sk); + list = &iucv->send_skb_q; + bh_lock_sock(sk); spin_lock_irqsave(&list->lock, flags); @@ -1761,8 +1773,11 @@ static void iucv_callback_txdone(struct iucv_path *path, break; } } - if (this) + if (this) { + atomic_dec(&iucv->skbs_in_xmit); __skb_unlink(this, list); + } + spin_unlock_irqrestore(&list->lock, flags); if (this) { @@ -1772,7 +1787,7 @@ static void iucv_callback_txdone(struct iucv_path *path, } if (sk->sk_state == IUCV_CLOSING) { - if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { + if (atomic_read(&iucv->skbs_in_xmit) == 0) { sk->sk_state = IUCV_CLOSED; sk->sk_state_change(sk); } @@ -2150,6 +2165,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, if (skb_shinfo(list_skb) == skb_shinfo(skb)) { switch (n) { case TX_NOTIFY_OK: + atomic_dec(&iucv->skbs_in_xmit); __skb_unlink(list_skb, list); kfree_skb(list_skb); iucv_sock_wake_msglim(sk); @@ -2158,6 +2174,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, atomic_inc(&iucv->pendings); break; case TX_NOTIFY_DELAYED_OK: + atomic_dec(&iucv->skbs_in_xmit); __skb_unlink(list_skb, list); atomic_dec(&iucv->pendings); if (atomic_read(&iucv->pendings) <= 0) @@ -2169,6 +2186,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, case TX_NOTIFY_TPQFULL: /* not yet used */ case TX_NOTIFY_GENERALERROR: case TX_NOTIFY_DELAYED_GENERALERROR: + atomic_dec(&iucv->skbs_in_xmit); __skb_unlink(list_skb, list); kfree_skb(list_skb); if (sk->sk_state == IUCV_CONNECTED) { @@ -2183,7 +2201,7 @@ static void afiucv_hs_callback_txnotify(struct sk_buff *skb, spin_unlock_irqrestore(&list->lock, flags); if (sk->sk_state == IUCV_CLOSING) { - if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { + if (atomic_read(&iucv->skbs_in_xmit) == 0) { sk->sk_state = IUCV_CLOSED; sk->sk_state_change(sk); } |