summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2014-03-27 23:38:17 +0100
committerDavid S. Miller <davem@davemloft.net>2014-03-29 22:58:37 +0100
commit3f4df2066b4e02cb609fa33b2eae8403b5821f4f (patch)
treee960a9af040e91c50c9bb56c48a76a4c873006e6 /net/core
parentnetpoll: Only call ndo_start_xmit from a single place (diff)
downloadlinux-3f4df2066b4e02cb609fa33b2eae8403b5821f4f.tar.xz
linux-3f4df2066b4e02cb609fa33b2eae8403b5821f4f.zip
netpoll: Move rx enable/disable into __dev_close_many
Today netpoll_rx_enable and netpoll_rx_disable are called from dev_close and and __dev_close, and not from dev_close_many. Move the calls into __dev_close_many so that we have a single call site to maintain, and so that dev_close_many gains this protection as well. Which importantly makes batched network device deletes safe. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c13
1 files changed, 4 insertions, 9 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 98ba581b89f0..8d55fe780e3f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1313,6 +1313,9 @@ static int __dev_close_many(struct list_head *head)
might_sleep();
list_for_each_entry(dev, head, close_list) {
+ /* Temporarily disable netpoll until the interface is down */
+ netpoll_rx_disable(dev);
+
call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
clear_bit(__LINK_STATE_START, &dev->state);
@@ -1343,6 +1346,7 @@ static int __dev_close_many(struct list_head *head)
dev->flags &= ~IFF_UP;
net_dmaengine_put();
+ netpoll_rx_enable(dev);
}
return 0;
@@ -1353,14 +1357,10 @@ static int __dev_close(struct net_device *dev)
int retval;
LIST_HEAD(single);
- /* Temporarily disable netpoll until the interface is down */
- netpoll_rx_disable(dev);
-
list_add(&dev->close_list, &single);
retval = __dev_close_many(&single);
list_del(&single);
- netpoll_rx_enable(dev);
return retval;
}
@@ -1398,14 +1398,9 @@ int dev_close(struct net_device *dev)
if (dev->flags & IFF_UP) {
LIST_HEAD(single);
- /* Block netpoll rx while the interface is going down */
- netpoll_rx_disable(dev);
-
list_add(&dev->close_list, &single);
dev_close_many(&single);
list_del(&single);
-
- netpoll_rx_enable(dev);
}
return 0;
}