summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c5
-rw-r--r--kernel/context_tracking.c12
-rw-r--r--kernel/events/core.c21
-rw-r--r--kernel/kmod.c4
-rw-r--r--kernel/params.c6
-rw-r--r--kernel/pid.c5
-rw-r--r--kernel/reboot.c9
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/watchdog.c60
9 files changed, 116 insertions, 15 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 91e53d04b6a9..7b0e23a740ce 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
sleep_time = timeout_start + audit_backlog_wait_time -
jiffies;
- if ((long)sleep_time > 0)
+ if ((long)sleep_time > 0) {
wait_for_auditd(sleep_time);
- continue;
+ continue;
+ }
}
if (audit_rate_check() && printk_ratelimit())
printk(KERN_WARNING
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 247091bf0587..859c8dfd78a1 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -51,6 +51,15 @@ void context_tracking_user_enter(void)
unsigned long flags;
/*
+ * Repeat the user_enter() check here because some archs may be calling
+ * this from asm and if no CPU needs context tracking, they shouldn't
+ * go further. Repeat the check here until they support the static key
+ * check.
+ */
+ if (!static_key_false(&context_tracking_enabled))
+ return;
+
+ /*
* Some contexts may involve an exception occuring in an irq,
* leading to that nesting:
* rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
@@ -151,6 +160,9 @@ void context_tracking_user_exit(void)
{
unsigned long flags;
+ if (!static_key_false(&context_tracking_enabled))
+ return;
+
if (in_interrupt())
return;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index dd236b66ca3a..cb4238e85b38 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3660,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event,
*running = ctx_time - event->tstamp_running;
}
+static void perf_event_init_userpage(struct perf_event *event)
+{
+ struct perf_event_mmap_page *userpg;
+ struct ring_buffer *rb;
+
+ rcu_read_lock();
+ rb = rcu_dereference(event->rb);
+ if (!rb)
+ goto unlock;
+
+ userpg = rb->user_page;
+
+ /* Allow new userspace to detect that bit 0 is deprecated */
+ userpg->cap_bit0_is_deprecated = 1;
+ userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
+
+unlock:
+ rcu_read_unlock();
+}
+
void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
{
}
@@ -4044,6 +4064,7 @@ again:
ring_buffer_attach(event, rb);
rcu_assign_pointer(event->rb, rb);
+ perf_event_init_userpage(event);
perf_event_update_userpage(event);
unlock:
diff --git a/kernel/kmod.c b/kernel/kmod.c
index fb326365b694..b086006c59e7 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -571,6 +571,10 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
DECLARE_COMPLETION_ONSTACK(done);
int retval = 0;
+ if (!sub_info->path) {
+ call_usermodehelper_freeinfo(sub_info);
+ return -EINVAL;
+ }
helper_lock();
if (!khelper_wq || usermodehelper_disabled) {
retval = -EBUSY;
diff --git a/kernel/params.c b/kernel/params.c
index 81c4e78c8f4c..c00d5b502aa4 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -254,11 +254,11 @@ int parse_args(const char *doing,
STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul);
-STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul);
+STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol);
STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul);
-STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul);
+STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol);
STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul);
-STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul);
+STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol);
STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul);
int param_set_charp(const char *val, const struct kernel_param *kp)
diff --git a/kernel/pid.c b/kernel/pid.c
index ebe5e80b10f8..9b9a26698144 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -273,6 +273,11 @@ void free_pid(struct pid *pid)
*/
wake_up_process(ns->child_reaper);
break;
+ case PIDNS_HASH_ADDING:
+ /* Handle a fork failure of the first process */
+ WARN_ON(ns->child_reaper);
+ ns->nr_hashed = 0;
+ /* fall through */
case 0:
schedule_work(&ns->proc_work);
break;
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 269ed9384cc4..f813b3474646 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid);
#endif
enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
-int reboot_default;
+/*
+ * This variable is used privately to keep track of whether or not
+ * reboot_type is still set to its default value (i.e., reboot= hasn't
+ * been set on the command line). This is needed so that we can
+ * suppress DMI scanning for reboot quirks. Without it, it's
+ * impossible to override a faulty reboot quirk without recompiling.
+ */
+int reboot_default = 1;
int reboot_cpu;
enum reboot_type reboot_type = BOOT_ACPI;
int reboot_force;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 11cd13667359..7c70201fbc61 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4242,7 +4242,7 @@ static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
}
if (!se) {
- cfs_rq->h_load = rq->avg.load_avg_contrib;
+ cfs_rq->h_load = cfs_rq->runnable_load_avg;
cfs_rq->last_h_load_update = now;
}
@@ -4823,8 +4823,8 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
(busiest->load_per_task * SCHED_POWER_SCALE) /
busiest->group_power;
- if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >=
- (scaled_busy_load_per_task * imbn)) {
+ if (busiest->avg_load + scaled_busy_load_per_task >=
+ local->avg_load + (scaled_busy_load_per_task * imbn)) {
env->imbalance = busiest->load_per_task;
return;
}
@@ -4896,7 +4896,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* max load less than avg load(as we skip the groups at or below
* its cpu_power, while calculating max_load..)
*/
- if (busiest->avg_load < sds->avg_load) {
+ if (busiest->avg_load <= sds->avg_load ||
+ local->avg_load >= sds->avg_load) {
env->imbalance = 0;
return fix_small_imbalance(env, sds);
}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 51c4f34d258e..4431610f049a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = {
.unpark = watchdog_enable,
};
-static int watchdog_enable_all_cpus(void)
+static void restart_watchdog_hrtimer(void *info)
+{
+ struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
+ int ret;
+
+ /*
+ * No need to cancel and restart hrtimer if it is currently executing
+ * because it will reprogram itself with the new period now.
+ * We should never see it unqueued here because we are running per-cpu
+ * with interrupts disabled.
+ */
+ ret = hrtimer_try_to_cancel(hrtimer);
+ if (ret == 1)
+ hrtimer_start(hrtimer, ns_to_ktime(sample_period),
+ HRTIMER_MODE_REL_PINNED);
+}
+
+static void update_timers(int cpu)
+{
+ struct call_single_data data = {.func = restart_watchdog_hrtimer};
+ /*
+ * Make sure that perf event counter will adopt to a new
+ * sampling period. Updating the sampling period directly would
+ * be much nicer but we do not have an API for that now so
+ * let's use a big hammer.
+ * Hrtimer will adopt the new period on the next tick but this
+ * might be late already so we have to restart the timer as well.
+ */
+ watchdog_nmi_disable(cpu);
+ __smp_call_function_single(cpu, &data, 1);
+ watchdog_nmi_enable(cpu);
+}
+
+static void update_timers_all_cpus(void)
+{
+ int cpu;
+
+ get_online_cpus();
+ preempt_disable();
+ for_each_online_cpu(cpu)
+ update_timers(cpu);
+ preempt_enable();
+ put_online_cpus();
+}
+
+static int watchdog_enable_all_cpus(bool sample_period_changed)
{
int err = 0;
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void)
pr_err("Failed to create watchdog threads, disabled\n");
else
watchdog_running = 1;
+ } else if (sample_period_changed) {
+ update_timers_all_cpus();
}
return err;
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int err, old_thresh, old_enabled;
+ static DEFINE_MUTEX(watchdog_proc_mutex);
+ mutex_lock(&watchdog_proc_mutex);
old_thresh = ACCESS_ONCE(watchdog_thresh);
old_enabled = ACCESS_ONCE(watchdog_user_enabled);
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (err || !write)
- return err;
+ goto out;
set_sample_period();
/*
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
* watchdog_*_all_cpus() function takes care of this.
*/
if (watchdog_user_enabled && watchdog_thresh)
- err = watchdog_enable_all_cpus();
+ err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
else
watchdog_disable_all_cpus();
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write,
watchdog_thresh = old_thresh;
watchdog_user_enabled = old_enabled;
}
-
+out:
+ mutex_unlock(&watchdog_proc_mutex);
return err;
}
#endif /* CONFIG_SYSCTL */
@@ -554,5 +604,5 @@ void __init lockup_detector_init(void)
set_sample_period();
if (watchdog_user_enabled)
- watchdog_enable_all_cpus();
+ watchdog_enable_all_cpus(false);
}