summaryrefslogtreecommitdiffstats
path: root/net/core/netpoll.c
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2014-03-27 23:42:20 +0100
committerDavid S. Miller <davem@davemloft.net>2014-03-29 22:58:37 +0100
commit5efeac44cfca62f66a1b2919fc8ec7f7c726d15b (patch)
treeb4d7d338f3ace293b905014afc70afc3189a89ff /net/core/netpoll.c
parentnetpoll: Remove strong unnecessary assumptions about skbs (diff)
downloadlinux-5efeac44cfca62f66a1b2919fc8ec7f7c726d15b.tar.xz
linux-5efeac44cfca62f66a1b2919fc8ec7f7c726d15b.zip
netpoll: Respect NETIF_F_LLTX
Stop taking the transmit lock when a network device has specified NETIF_F_LLTX. If no locks needed to trasnmit a packet this is the ideal scenario for netpoll as all packets can be trasnmitted immediately. Even if some locks are needed in ndo_start_xmit skipping any unnecessary serialization is desirable for netpoll as it makes it more likely a debugging packet may be trasnmitted immediately instead of being deferred until later. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r--net/core/netpoll.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index d44af2306f23..ed7740f7a94d 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -119,17 +119,17 @@ static void queue_process(struct work_struct *work)
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
local_irq_save(flags);
- __netif_tx_lock(txq, smp_processor_id());
+ HARD_TX_LOCK(dev, txq, smp_processor_id());
if (netif_xmit_frozen_or_stopped(txq) ||
netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
skb_queue_head(&npinfo->txq, skb);
- __netif_tx_unlock(txq);
+ HARD_TX_UNLOCK(dev, txq);
local_irq_restore(flags);
schedule_delayed_work(&npinfo->tx_work, HZ/10);
return;
}
- __netif_tx_unlock(txq);
+ HARD_TX_UNLOCK(dev, txq);
local_irq_restore(flags);
}
}
@@ -345,11 +345,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
/* try until next clock tick */
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
tries > 0; --tries) {
- if (__netif_tx_trylock(txq)) {
+ if (HARD_TX_TRYLOCK(dev, txq)) {
if (!netif_xmit_stopped(txq))
status = netpoll_start_xmit(skb, dev, txq);
- __netif_tx_unlock(txq);
+ HARD_TX_UNLOCK(dev, txq);
if (status == NETDEV_TX_OK)
break;