diff options
author | dingtianhong <dingtianhong@huawei.com> | 2014-03-25 10:00:09 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-03-26 21:40:24 +0100 |
commit | 054bb8801038c93c42cb6cde75141aa396afd065 (patch) | |
tree | 825cf25616cca0b1959c2a81e58abdfdb20b8cb1 /drivers/net/bonding | |
parent | xen-netback: Functional follow-up patch for grant mapping series (diff) | |
download | linux-054bb8801038c93c42cb6cde75141aa396afd065.tar.xz linux-054bb8801038c93c42cb6cde75141aa396afd065.zip |
bonding: slight optimization for bond xmit path
Add unlikely() micro to the unlikely conditions in the bond
xmit path for slight optimization.
Signed-off-by: Ding Tianhong <dingtianhong@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bonding')
-rw-r--r-- | drivers/net/bonding/bond_main.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e717db301d46..ee17c246326e 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2957,7 +2957,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, fk->ports = 0; noff = skb_network_offset(skb); if (skb->protocol == htons(ETH_P_IP)) { - if (!pskb_may_pull(skb, noff + sizeof(*iph))) + if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph)))) return false; iph = ip_hdr(skb); fk->src = iph->saddr; @@ -2966,7 +2966,7 @@ static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, if (!ip_is_fragment(iph)) proto = iph->protocol; } else if (skb->protocol == htons(ETH_P_IPV6)) { - if (!pskb_may_pull(skb, noff + sizeof(*iph6))) + if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph6)))) return false; iph6 = ipv6_hdr(skb); fk->src = (__force __be32)ipv6_addr_hash(&iph6->saddr); @@ -3768,7 +3768,7 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) * If we risk deadlock from transmitting this in the * netpoll path, tell netpoll to queue the frame for later tx */ - if (is_netpoll_tx_blocked(dev)) + if (unlikely(is_netpoll_tx_blocked(dev))) return NETDEV_TX_BUSY; rcu_read_lock(); |