summaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorPaolo Abeni <pabeni@redhat.com>2020-09-10 23:33:18 +0200
committerDavid S. Miller <davem@davemloft.net>2020-09-14 23:39:00 +0200
commit2de79ee27fdb52626ac4ac48ec6d8d52ba6f9047 (patch)
treee5ab4d5630bb0de1ad741d616d17ef97882bfb9e /net/core
parentMerge branch 'mlxsw-Derive-SBIB-from-maximum-port-speed-and-MTU' (diff)
downloadlinux-2de79ee27fdb52626ac4ac48ec6d8d52ba6f9047.tar.xz
linux-2de79ee27fdb52626ac4ac48ec6d8d52ba6f9047.zip
net: try to avoid unneeded backlog flush
flush_all_backlogs() may cause deadlock on systems running processes with FIFO scheduling policy. The above is critical in -RT scenarios, where user-space specifically ensure no network activity is scheduled on the CPU running the mentioned FIFO process, but still get stuck. This commit tries to address the problem checking the backlog status on the remote CPUs before scheduling the flush operation. If the backlog is empty, we can skip it. v1 -> v2: - explicitly clear flushed cpu mask - Eric Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c51
1 files changed, 47 insertions, 4 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 03624192862a..6089ac9d17ba 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5621,17 +5621,60 @@ static void flush_backlog(struct work_struct *work)
local_bh_enable();
}
+static bool flush_required(int cpu)
+{
+#if IS_ENABLED(CONFIG_RPS)
+ struct softnet_data *sd = &per_cpu(softnet_data, cpu);
+ bool do_flush;
+
+ local_irq_disable();
+ rps_lock(sd);
+
+ /* as insertion into process_queue happens with the rps lock held,
+ * process_queue access may race only with dequeue
+ */
+ do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
+ !skb_queue_empty_lockless(&sd->process_queue);
+ rps_unlock(sd);
+ local_irq_enable();
+
+ return do_flush;
+#endif
+ /* without RPS we can't safely check input_pkt_queue: during a
+ * concurrent remote skb_queue_splice() we can detect as empty both
+ * input_pkt_queue and process_queue even if the latter could end-up
+ * containing a lot of packets.
+ */
+ return true;
+}
+
static void flush_all_backlogs(void)
{
+ static cpumask_t flush_cpus;
unsigned int cpu;
+ /* since we are under rtnl lock protection we can use static data
+ * for the cpumask and avoid allocating on stack the possibly
+ * large mask
+ */
+ ASSERT_RTNL();
+
get_online_cpus();
- for_each_online_cpu(cpu)
- queue_work_on(cpu, system_highpri_wq,
- per_cpu_ptr(&flush_works, cpu));
+ cpumask_clear(&flush_cpus);
+ for_each_online_cpu(cpu) {
+ if (flush_required(cpu)) {
+ queue_work_on(cpu, system_highpri_wq,
+ per_cpu_ptr(&flush_works, cpu));
+ cpumask_set_cpu(cpu, &flush_cpus);
+ }
+ }
- for_each_online_cpu(cpu)
+ /* we can have in flight packet[s] on the cpus we are not flushing,
+ * synchronize_net() in rollback_registered_many() will take care of
+ * them
+ */
+ for_each_cpu(cpu, &flush_cpus)
flush_work(per_cpu_ptr(&flush_works, cpu));
put_online_cpus();