diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/core/dev.c | 7 | ||||
-rw-r--r-- | net/core/skbuff.c | 5 |
2 files changed, 7 insertions, 5 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index d93456c75b55..a5e663e1a75a 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4582,9 +4582,12 @@ static void rps_trigger_softirq(void *data) #endif /* CONFIG_RPS */ /* Called from hardirq (IPI) context */ -static void trigger_rx_softirq(void *data __always_unused) +static void trigger_rx_softirq(void *data) { + struct softnet_data *sd = data; + __raise_softirq_irqoff(NET_RX_SOFTIRQ); + smp_store_release(&sd->defer_ipi_scheduled, 0); } /* @@ -11382,7 +11385,7 @@ static int __init net_dev_init(void) INIT_CSD(&sd->csd, rps_trigger_softirq, sd); sd->cpu = i; #endif - INIT_CSD(&sd->defer_csd, trigger_rx_softirq, NULL); + INIT_CSD(&sd->defer_csd, trigger_rx_softirq, sd); spin_lock_init(&sd->defer_lock); init_gro_hash(&sd->backlog); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2fea964f09d8..b40c8cdf4785 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -6516,8 +6516,7 @@ void skb_attempt_defer_free(struct sk_buff *skb) sd->defer_count++; /* kick every time queue length reaches 128. - * This should avoid blocking in smp_call_function_single_async(). - * This condition should hardly be bit under normal conditions, + * This condition should hardly be hit under normal conditions, * unless cpu suddenly stopped to receive NIC interrupts. */ kick = sd->defer_count == 128; @@ -6527,6 +6526,6 @@ void skb_attempt_defer_free(struct sk_buff *skb) /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU * if we are unlucky enough (this seems very unlikely). */ - if (unlikely(kick)) + if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) smp_call_function_single_async(cpu, &sd->defer_csd); } |