summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-11-10 02:00:18 +0100
committerDavid S. Miller <davem@davemloft.net>2017-11-10 02:00:18 +0100
commit4dc6758d7824a6d25717ccceefc488cafdb07210 (patch)
tree992e5d5996910af35a5c12fe94da14d0bb167452 /kernel
parentMerge branch 'dsa-setup-stage' (diff)
parentMerge tag 'pm-final-4.14' of git://git.kernel.org/pub/scm/linux/kernel/git/ra... (diff)
downloadlinux-4dc6758d7824a6d25717ccceefc488cafdb07210.tar.xz
linux-4dc6758d7824a6d25717ccceefc488cafdb07210.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Simple cases of overlapping changes in the packet scheduler. Must easier to resolve this time. Which probably means that I screwed it up somehow. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c6
-rw-r--r--kernel/futex.c23
-rw-r--r--kernel/sched/cpufreq_schedutil.c6
-rw-r--r--kernel/watchdog_hld.c15
-rw-r--r--kernel/workqueue_internal.h3
5 files changed, 37 insertions, 16 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9660ee65fbef..42d24bd64ea4 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -901,9 +901,11 @@ list_update_cgroup_event(struct perf_event *event,
cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
/* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
if (add) {
+ struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
+
list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
- if (perf_cgroup_from_task(current, ctx) == event->cgrp)
- cpuctx->cgrp = event->cgrp;
+ if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
+ cpuctx->cgrp = cgrp;
} else {
list_del(cpuctx_entry);
cpuctx->cgrp = NULL;
diff --git a/kernel/futex.c b/kernel/futex.c
index 0d638f008bb1..76ed5921117a 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -903,11 +903,27 @@ void exit_pi_state_list(struct task_struct *curr)
*/
raw_spin_lock_irq(&curr->pi_lock);
while (!list_empty(head)) {
-
next = head->next;
pi_state = list_entry(next, struct futex_pi_state, list);
key = pi_state->key;
hb = hash_futex(&key);
+
+ /*
+ * We can race against put_pi_state() removing itself from the
+ * list (a waiter going away). put_pi_state() will first
+ * decrement the reference count and then modify the list, so
+ * its possible to see the list entry but fail this reference
+ * acquire.
+ *
+ * In that case; drop the locks to let put_pi_state() make
+ * progress and retry the loop.
+ */
+ if (!atomic_inc_not_zero(&pi_state->refcount)) {
+ raw_spin_unlock_irq(&curr->pi_lock);
+ cpu_relax();
+ raw_spin_lock_irq(&curr->pi_lock);
+ continue;
+ }
raw_spin_unlock_irq(&curr->pi_lock);
spin_lock(&hb->lock);
@@ -918,8 +934,10 @@ void exit_pi_state_list(struct task_struct *curr)
* task still owns the PI-state:
*/
if (head->next != next) {
+ /* retain curr->pi_lock for the loop invariant */
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
spin_unlock(&hb->lock);
+ put_pi_state(pi_state);
continue;
}
@@ -927,9 +945,8 @@ void exit_pi_state_list(struct task_struct *curr)
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
pi_state->owner = NULL;
- raw_spin_unlock(&curr->pi_lock);
- get_pi_state(pi_state);
+ raw_spin_unlock(&curr->pi_lock);
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
spin_unlock(&hb->lock);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 9209d83ecdcf..ba0da243fdd8 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -649,6 +649,7 @@ static int sugov_start(struct cpufreq_policy *policy)
struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu);
memset(sg_cpu, 0, sizeof(*sg_cpu));
+ sg_cpu->cpu = cpu;
sg_cpu->sg_policy = sg_policy;
sg_cpu->flags = SCHED_CPUFREQ_RT;
sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq;
@@ -714,11 +715,6 @@ struct cpufreq_governor *cpufreq_default_governor(void)
static int __init sugov_register(void)
{
- int cpu;
-
- for_each_possible_cpu(cpu)
- per_cpu(sugov_cpu, cpu).cpu = cpu;
-
return cpufreq_register_governor(&schedutil_gov);
}
fs_initcall(sugov_register);
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 4583feb66393..e449a23e9d59 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -13,6 +13,7 @@
#define pr_fmt(fmt) "NMI watchdog: " fmt
#include <linux/nmi.h>
+#include <linux/atomic.h>
#include <linux/module.h>
#include <linux/sched/debug.h>
@@ -22,10 +23,11 @@
static DEFINE_PER_CPU(bool, hard_watchdog_warn);
static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+static DEFINE_PER_CPU(struct perf_event *, dead_event);
static struct cpumask dead_events_mask;
static unsigned long hardlockup_allcpu_dumped;
-static unsigned int watchdog_cpus;
+static atomic_t watchdog_cpus = ATOMIC_INIT(0);
void arch_touch_nmi_watchdog(void)
{
@@ -189,7 +191,8 @@ void hardlockup_detector_perf_enable(void)
if (hardlockup_detector_event_create())
return;
- if (!watchdog_cpus++)
+ /* use original value for check */
+ if (!atomic_fetch_inc(&watchdog_cpus))
pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
perf_event_enable(this_cpu_read(watchdog_ev));
@@ -204,8 +207,10 @@ void hardlockup_detector_perf_disable(void)
if (event) {
perf_event_disable(event);
+ this_cpu_write(watchdog_ev, NULL);
+ this_cpu_write(dead_event, event);
cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
- watchdog_cpus--;
+ atomic_dec(&watchdog_cpus);
}
}
@@ -219,7 +224,7 @@ void hardlockup_detector_perf_cleanup(void)
int cpu;
for_each_cpu(cpu, &dead_events_mask) {
- struct perf_event *event = per_cpu(watchdog_ev, cpu);
+ struct perf_event *event = per_cpu(dead_event, cpu);
/*
* Required because for_each_cpu() reports unconditionally
@@ -227,7 +232,7 @@ void hardlockup_detector_perf_cleanup(void)
*/
if (event)
perf_event_release_kernel(event);
- per_cpu(watchdog_ev, cpu) = NULL;
+ per_cpu(dead_event, cpu) = NULL;
}
cpumask_clear(&dead_events_mask);
}
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index efdd72e15794..d390d1be3748 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -10,6 +10,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
+#include <linux/preempt.h>
struct worker_pool;
@@ -60,7 +61,7 @@ struct worker {
*/
static inline struct worker *current_wq_worker(void)
{
- if (current->flags & PF_WQ_WORKER)
+ if (in_task() && (current->flags & PF_WQ_WORKER))
return kthread_data(current);
return NULL;
}