diff options
author | Eric Dumazet <edumazet@google.com> | 2020-05-07 18:32:22 +0200 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2020-05-08 03:11:07 +0200 |
commit | ae46f184bc1fb15bf2de47114c29236e61ca4bbc (patch) | |
tree | 5d1540325ac32853f85625cdb0262934f8244dbc /drivers/net/bonding/bond_main.c | |
parent | netpoll: accept NULL np argument in netpoll_send_skb() (diff) | |
download | linux-ae46f184bc1fb15bf2de47114c29236e61ca4bbc.tar.xz linux-ae46f184bc1fb15bf2de47114c29236e61ca4bbc.zip |
bonding: propagate transmit status
Currently, bonding always returns NETDEV_TX_OK to its caller.
It is worth trying to be more accurate : TCP for instance
can have different recovery strategies if it can have more
precise status, if packet was dropped by slave qdisc.
This is especially important when host is under stress.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Jay Vosburgh <j.vosburgh@gmail.com>
Cc: Veaceslav Falico <vfalico@gmail.com>
Cc: Andy Gospodarek <andy@greyhouse.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bonding/bond_main.c')
-rw-r--r-- | drivers/net/bonding/bond_main.c | 60 |
1 files changed, 22 insertions, 38 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index baa93191dfdd..4f9e7c421f57 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -287,7 +287,7 @@ const char *bond_mode_name(int mode) * @skb: hw accel VLAN tagged skb to transmit * @slave_dev: slave that is supposed to xmit this skbuff */ -void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, +netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev) { skb->dev = slave_dev; @@ -297,9 +297,9 @@ void bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); if (unlikely(netpoll_tx_running(bond->dev))) - bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); - else - dev_queue_xmit(skb); + return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); + + return dev_queue_xmit(skb); } /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid, @@ -3932,7 +3932,7 @@ unwind: * it fails, it tries to find the first available slave for transmission. * The skb is consumed in all cases, thus the function is void. */ -static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id) +static netdev_tx_t bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id) { struct list_head *iter; struct slave *slave; @@ -3941,10 +3941,8 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl /* Here we start from the slave with slave_id */ bond_for_each_slave_rcu(bond, slave, iter) { if (--i < 0) { - if (bond_slave_can_tx(slave)) { - bond_dev_queue_xmit(bond, skb, slave->dev); - return; - } + if (bond_slave_can_tx(slave)) + return bond_dev_queue_xmit(bond, skb, slave->dev); } } @@ -3953,13 +3951,11 @@ static void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int sl bond_for_each_slave_rcu(bond, slave, iter) { if (--i < 0) break; - if (bond_slave_can_tx(slave)) { - bond_dev_queue_xmit(bond, skb, slave->dev); - return; - } + if (bond_slave_can_tx(slave)) + return bond_dev_queue_xmit(bond, skb, slave->dev); } /* no slave that can tx has been found */ - bond_tx_drop(bond->dev, skb); + return bond_tx_drop(bond->dev, skb); } /** @@ -4020,10 +4016,8 @@ static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb, if (iph->protocol == IPPROTO_IGMP) { slave = rcu_dereference(bond->curr_active_slave); if (slave) - bond_dev_queue_xmit(bond, skb, slave->dev); - else - bond_xmit_slave_id(bond, skb, 0); - return NETDEV_TX_OK; + return bond_dev_queue_xmit(bond, skb, slave->dev); + return bond_xmit_slave_id(bond, skb, 0); } } @@ -4031,11 +4025,9 @@ non_igmp: slave_cnt = READ_ONCE(bond->slave_cnt); if (likely(slave_cnt)) { slave_id = bond_rr_gen_slave_id(bond); - bond_xmit_slave_id(bond, skb, slave_id % slave_cnt); - } else { - bond_tx_drop(bond_dev, skb); + return bond_xmit_slave_id(bond, skb, slave_id % slave_cnt); } - return NETDEV_TX_OK; + return bond_tx_drop(bond_dev, skb); } /* In active-backup mode, we know that bond->curr_active_slave is always valid if @@ -4049,11 +4041,9 @@ static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb, slave = rcu_dereference(bond->curr_active_slave); if (slave) - bond_dev_queue_xmit(bond, skb, slave->dev); - else - bond_tx_drop(bond_dev, skb); + return bond_dev_queue_xmit(bond, skb, slave->dev); - return NETDEV_TX_OK; + return bond_tx_drop(bond_dev, skb); } /* Use this to update slave_array when (a) it's not appropriate to update @@ -4196,12 +4186,9 @@ static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb, count = slaves ? READ_ONCE(slaves->count) : 0; if (likely(count)) { slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; - bond_dev_queue_xmit(bond, skb, slave->dev); - } else { - bond_tx_drop(dev, skb); + return bond_dev_queue_xmit(bond, skb, slave->dev); } - - return NETDEV_TX_OK; + return bond_tx_drop(dev, skb); } /* in broadcast mode, we send everything to all usable interfaces. */ @@ -4227,11 +4214,9 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb, } } if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) - bond_dev_queue_xmit(bond, skb, slave->dev); - else - bond_tx_drop(bond_dev, skb); + return bond_dev_queue_xmit(bond, skb, slave->dev); - return NETDEV_TX_OK; + return bond_tx_drop(bond_dev, skb); } /*------------------------- Device initialization ---------------------------*/ @@ -4310,8 +4295,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev /* Should never happen, mode already checked */ netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond)); WARN_ON_ONCE(1); - bond_tx_drop(dev, skb); - return NETDEV_TX_OK; + return bond_tx_drop(dev, skb); } } @@ -4330,7 +4314,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) if (bond_has_slaves(bond)) ret = __bond_start_xmit(skb, dev); else - bond_tx_drop(dev, skb); + ret = bond_tx_drop(dev, skb); rcu_read_unlock(); return ret; |