summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorEric Paris <eparis@redhat.com>2013-11-23 00:57:08 +0100
committerEric Paris <eparis@redhat.com>2013-11-23 00:57:54 +0100
commitfc582aef7dcc27a7120cf232c1e76c569c7b6eab (patch)
tree7d275dd4ceab6067b91e9a25a5f6338b425fbccd /kernel
parentaudit: fix type of sessionid in audit_set_loginuid() (diff)
parentLinux 3.12 (diff)
downloadlinux-fc582aef7dcc27a7120cf232c1e76c569c7b6eab.tar.xz
linux-fc582aef7dcc27a7120cf232c1e76c569c7b6eab.zip
Merge tag 'v3.12'
Linux 3.12 Conflicts: fs/exec.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/audit.c5
-rw-r--r--kernel/capability.c13
-rw-r--r--kernel/cgroup.c1676
-rw-r--r--kernel/cgroup_freezer.c155
-rw-r--r--kernel/context_tracking.c137
-rw-r--r--kernel/cpu.c9
-rw-r--r--kernel/cpuset.c317
-rw-r--r--kernel/events/callchain.c3
-rw-r--r--kernel/events/core.c442
-rw-r--r--kernel/events/ring_buffer.c31
-rw-r--r--kernel/events/uprobes.c4
-rw-r--r--kernel/extable.c2
-rw-r--r--kernel/fork.c40
-rw-r--r--kernel/gcov/fs.c2
-rw-r--r--kernel/groups.c2
-rw-r--r--kernel/hung_task.c13
-rw-r--r--kernel/irq/Kconfig12
-rw-r--r--kernel/jump_label.c1
-rw-r--r--kernel/kexec.c5
-rw-r--r--kernel/kmod.c4
-rw-r--r--kernel/kprobes.c95
-rw-r--r--kernel/ksysfs.c2
-rw-r--r--kernel/lglock.c12
-rw-r--r--kernel/modsign_pubkey.c6
-rw-r--r--kernel/module.c30
-rw-r--r--kernel/mutex.c75
-rw-r--r--kernel/nsproxy.c36
-rw-r--r--kernel/padata.c32
-rw-r--r--kernel/panic.c8
-rw-r--r--kernel/params.c34
-rw-r--r--kernel/pid.c6
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/power/hibernate.c49
-rw-r--r--kernel/power/snapshot.c17
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/power/user.c32
-rw-r--r--kernel/printk/printk.c7
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/rcu.h12
-rw-r--r--kernel/rcupdate.c104
-rw-r--r--kernel/rcutiny.c2
-rw-r--r--kernel/rcutiny_plugin.h2
-rw-r--r--kernel/rcutorture.c396
-rw-r--r--kernel/rcutree.c255
-rw-r--r--kernel/rcutree.h19
-rw-r--r--kernel/rcutree_plugin.h460
-rw-r--r--kernel/reboot.c9
-rw-r--r--kernel/res_counter.c25
-rw-r--r--kernel/sched/core.c156
-rw-r--r--kernel/sched/cpuacct.c51
-rw-r--r--kernel/sched/cputime.c74
-rw-r--r--kernel/sched/debug.c6
-rw-r--r--kernel/sched/fair.c631
-rw-r--r--kernel/sched/sched.h14
-rw-r--r--kernel/sched/stats.h5
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/smp.c32
-rw-r--r--kernel/softirq.c17
-rw-r--r--kernel/spinlock.c14
-rw-r--r--kernel/sys.c20
-rw-r--r--kernel/sysctl.c8
-rw-r--r--kernel/task_work.c40
-rw-r--r--kernel/time/Kconfig51
-rw-r--r--kernel/time/clockevents.c65
-rw-r--r--kernel/time/ntp.c6
-rw-r--r--kernel/time/tick-sched.c61
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/trace/ftrace.c17
-rw-r--r--kernel/trace/trace.c37
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_events.c207
-rw-r--r--kernel/trace/trace_printk.c19
-rw-r--r--kernel/trace/trace_syscalls.c10
-rw-r--r--kernel/uid16.c2
-rw-r--r--kernel/up.c58
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/user_namespace.c2
-rw-r--r--kernel/utsname.c2
-rw-r--r--kernel/watchdog.c68
-rw-r--r--kernel/workqueue.c148
81 files changed, 3684 insertions, 2757 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 35ef1185e359..1ce47553fb02 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -26,6 +26,7 @@ obj-y += sched/
obj-y += power/
obj-y += printk/
obj-y += cpu/
+obj-y += irq/
obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
obj-$(CONFIG_FREEZER) += freezer.o
@@ -79,7 +80,6 @@ obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_KGDB) += debug/
obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o
obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o
-obj-$(CONFIG_GENERIC_HARDIRQS) += irq/
obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
obj-$(CONFIG_TREE_RCU) += rcutree.o
diff --git a/kernel/audit.c b/kernel/audit.c
index b8831ac25b70..906ae5a0233a 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1222,9 +1222,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
sleep_time = timeout_start + audit_backlog_wait_time -
jiffies;
- if ((long)sleep_time > 0)
+ if ((long)sleep_time > 0) {
wait_for_auditd(sleep_time);
- continue;
+ continue;
+ }
}
if (audit_rate_check() && printk_ratelimit())
printk(KERN_WARNING
diff --git a/kernel/capability.c b/kernel/capability.c
index f6c2ce5701e1..4e66bf9275b0 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -433,18 +433,6 @@ bool capable(int cap)
EXPORT_SYMBOL(capable);
/**
- * nsown_capable - Check superior capability to one's own user_ns
- * @cap: The capability in question
- *
- * Return true if the current task has the given superior capability
- * targeted at its own user namespace.
- */
-bool nsown_capable(int cap)
-{
- return ns_capable(current_user_ns(), cap);
-}
-
-/**
* inode_capable - Check superior capability over inode
* @inode: The inode in question
* @cap: The capability in question
@@ -464,3 +452,4 @@ bool inode_capable(const struct inode *inode, int cap)
return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
}
+EXPORT_SYMBOL(inode_capable);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e91963302c0d..8bd9cfdc70d7 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -60,6 +60,7 @@
#include <linux/poll.h>
#include <linux/flex_array.h> /* used in cgroup_attach_task */
#include <linux/kthread.h>
+#include <linux/file.h>
#include <linux/atomic.h>
@@ -81,7 +82,7 @@
*/
#ifdef CONFIG_PROVE_RCU
DEFINE_MUTEX(cgroup_mutex);
-EXPORT_SYMBOL_GPL(cgroup_mutex); /* only for task_subsys_state_check() */
+EXPORT_SYMBOL_GPL(cgroup_mutex); /* only for lockdep */
#else
static DEFINE_MUTEX(cgroup_mutex);
#endif
@@ -117,6 +118,7 @@ struct cfent {
struct list_head node;
struct dentry *dentry;
struct cftype *type;
+ struct cgroup_subsys_state *css;
/* file xattrs */
struct simple_xattrs xattrs;
@@ -159,9 +161,9 @@ struct css_id {
*/
struct cgroup_event {
/*
- * Cgroup which the event belongs to.
+ * css which the event belongs to.
*/
- struct cgroup *cgrp;
+ struct cgroup_subsys_state *css;
/*
* Control file which the event associated.
*/
@@ -215,10 +217,33 @@ static u64 cgroup_serial_nr_next = 1;
*/
static int need_forkexit_callback __read_mostly;
-static void cgroup_offline_fn(struct work_struct *work);
+static struct cftype cgroup_base_files[];
+
+static void cgroup_destroy_css_killed(struct cgroup *cgrp);
static int cgroup_destroy_locked(struct cgroup *cgrp);
-static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
- struct cftype cfts[], bool is_add);
+static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
+ bool is_add);
+
+/**
+ * cgroup_css - obtain a cgroup's css for the specified subsystem
+ * @cgrp: the cgroup of interest
+ * @ss: the subsystem of interest (%NULL returns the dummy_css)
+ *
+ * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This
+ * function must be called either under cgroup_mutex or rcu_read_lock() and
+ * the caller is responsible for pinning the returned css if it wants to
+ * keep accessing it outside the said locks. This function may return
+ * %NULL if @cgrp doesn't have @subsys_id enabled.
+ */
+static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
+ struct cgroup_subsys *ss)
+{
+ if (ss)
+ return rcu_dereference_check(cgrp->subsys[ss->subsys_id],
+ lockdep_is_held(&cgroup_mutex));
+ else
+ return &cgrp->dummy_css;
+}
/* convenient tests for these bits */
static inline bool cgroup_is_dead(const struct cgroup *cgrp)
@@ -365,9 +390,11 @@ static struct cgrp_cset_link init_cgrp_cset_link;
static int cgroup_init_idr(struct cgroup_subsys *ss,
struct cgroup_subsys_state *css);
-/* css_set_lock protects the list of css_set objects, and the
- * chain of tasks off each css_set. Nests outside task->alloc_lock
- * due to cgroup_iter_start() */
+/*
+ * css_set_lock protects the list of css_set objects, and the chain of
+ * tasks off each css_set. Nests outside task->alloc_lock due to
+ * css_task_iter_start().
+ */
static DEFINE_RWLOCK(css_set_lock);
static int css_set_count;
@@ -392,10 +419,12 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
return key;
}
-/* We don't maintain the lists running through each css_set to its
- * task until after the first call to cgroup_iter_start(). This
- * reduces the fork()/exit() overhead for people who have cgroups
- * compiled into their kernel but not actually in use */
+/*
+ * We don't maintain the lists running through each css_set to its task
+ * until after the first call to css_task_iter_start(). This reduces the
+ * fork()/exit() overhead for people who have cgroups compiled into their
+ * kernel but not actually in use.
+ */
static int use_task_css_set_links __read_mostly;
static void __put_css_set(struct css_set *cset, int taskexit)
@@ -464,7 +493,7 @@ static inline void put_css_set_taskexit(struct css_set *cset)
* @new_cgrp: cgroup that's being entered by the task
* @template: desired set of css pointers in css_set (pre-calculated)
*
- * Returns true if "cg" matches "old_cg" except for the hierarchy
+ * Returns true if "cset" matches "old_cset" except for the hierarchy
* which "new_cgrp" belongs to, for which it should match "new_cgrp".
*/
static bool compare_css_sets(struct css_set *cset,
@@ -555,7 +584,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset,
/* Subsystem is in this hierarchy. So we want
* the subsystem state from the new
* cgroup */
- template[i] = cgrp->subsys[i];
+ template[i] = cgroup_css(cgrp, ss);
} else {
/* Subsystem is not in this hierarchy, so we
* don't want to change the subsystem state */
@@ -803,8 +832,7 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task,
static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
-static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
- unsigned long subsys_mask);
+static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask);
static const struct inode_operations cgroup_dir_inode_operations;
static const struct file_operations proc_cgroupstats_operations;
@@ -813,8 +841,7 @@ static struct backing_dev_info cgroup_backing_dev_info = {
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
-static int alloc_css_id(struct cgroup_subsys *ss,
- struct cgroup *parent, struct cgroup *child);
+static int alloc_css_id(struct cgroup_subsys_state *child_css);
static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
{
@@ -845,15 +872,8 @@ static struct cgroup_name *cgroup_alloc_name(struct dentry *dentry)
static void cgroup_free_fn(struct work_struct *work)
{
struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work);
- struct cgroup_subsys *ss;
mutex_lock(&cgroup_mutex);
- /*
- * Release the subsystem state objects.
- */
- for_each_root_subsys(cgrp->root, ss)
- ss->css_free(cgrp);
-
cgrp->root->number_of_cgroups--;
mutex_unlock(&cgroup_mutex);
@@ -864,8 +884,6 @@ static void cgroup_free_fn(struct work_struct *work)
*/
dput(cgrp->parent->dentry);
- ida_simple_remove(&cgrp->root->cgroup_ida, cgrp->id);
-
/*
* Drop the active superblock reference that we took when we
* created the cgroup. This will free cgrp->root, if we are
@@ -956,27 +974,22 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
}
/**
- * cgroup_clear_directory - selective removal of base and subsystem files
- * @dir: directory containing the files
- * @base_files: true if the base files should be removed
+ * cgroup_clear_dir - remove subsys files in a cgroup directory
+ * @cgrp: target cgroup
* @subsys_mask: mask of the subsystem ids whose files should be removed
*/
-static void cgroup_clear_directory(struct dentry *dir, bool base_files,
- unsigned long subsys_mask)
+static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask)
{
- struct cgroup *cgrp = __d_cgrp(dir);
struct cgroup_subsys *ss;
+ int i;
- for_each_root_subsys(cgrp->root, ss) {
+ for_each_subsys(ss, i) {
struct cftype_set *set;
- if (!test_bit(ss->subsys_id, &subsys_mask))
+
+ if (!test_bit(i, &subsys_mask))
continue;
list_for_each_entry(set, &ss->cftsets, node)
- cgroup_addrm_files(cgrp, NULL, set->cfts, false);
- }
- if (base_files) {
- while (!list_empty(&cgrp->files))
- cgroup_rm_file(cgrp, NULL);
+ cgroup_addrm_files(cgrp, set->cfts, false);
}
}
@@ -986,9 +999,6 @@ static void cgroup_clear_directory(struct dentry *dir, bool base_files,
static void cgroup_d_remove_dir(struct dentry *dentry)
{
struct dentry *parent;
- struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
-
- cgroup_clear_directory(dentry, true, root->subsys_mask);
parent = dentry->d_parent;
spin_lock(&parent->d_lock);
@@ -1009,79 +1019,84 @@ static int rebind_subsystems(struct cgroupfs_root *root,
{
struct cgroup *cgrp = &root->top_cgroup;
struct cgroup_subsys *ss;
- int i;
+ unsigned long pinned = 0;
+ int i, ret;
BUG_ON(!mutex_is_locked(&cgroup_mutex));
BUG_ON(!mutex_is_locked(&cgroup_root_mutex));
/* Check that any added subsystems are currently free */
for_each_subsys(ss, i) {
- unsigned long bit = 1UL << i;
-
- if (!(bit & added_mask))
+ if (!(added_mask & (1 << i)))
continue;
+ /* is the subsystem mounted elsewhere? */
if (ss->root != &cgroup_dummy_root) {
- /* Subsystem isn't free */
- return -EBUSY;
+ ret = -EBUSY;
+ goto out_put;
+ }
+
+ /* pin the module */
+ if (!try_module_get(ss->module)) {
+ ret = -ENOENT;
+ goto out_put;
}
+ pinned |= 1 << i;
}
- /* Currently we don't handle adding/removing subsystems when
- * any child cgroups exist. This is theoretically supportable
- * but involves complex error handling, so it's being left until
- * later */
- if (root->number_of_cgroups > 1)
- return -EBUSY;
+ /* subsys could be missing if unloaded between parsing and here */
+ if (added_mask != pinned) {
+ ret = -ENOENT;
+ goto out_put;
+ }
+
+ ret = cgroup_populate_dir(cgrp, added_mask);
+ if (ret)
+ goto out_put;
+
+ /*
+ * Nothing can fail from this point on. Remove files for the
+ * removed subsystems and rebind each subsystem.
+ */
+ cgroup_clear_dir(cgrp, removed_mask);
- /* Process each subsystem */
for_each_subsys(ss, i) {
unsigned long bit = 1UL << i;
if (bit & added_mask) {
/* We're binding this subsystem to this hierarchy */
- BUG_ON(cgrp->subsys[i]);
- BUG_ON(!cgroup_dummy_top->subsys[i]);
- BUG_ON(cgroup_dummy_top->subsys[i]->cgroup != cgroup_dummy_top);
+ BUG_ON(cgroup_css(cgrp, ss));
+ BUG_ON(!cgroup_css(cgroup_dummy_top, ss));
+ BUG_ON(cgroup_css(cgroup_dummy_top, ss)->cgroup != cgroup_dummy_top);
+
+ rcu_assign_pointer(cgrp->subsys[i],
+ cgroup_css(cgroup_dummy_top, ss));
+ cgroup_css(cgrp, ss)->cgroup = cgrp;
- cgrp->subsys[i] = cgroup_dummy_top->subsys[i];
- cgrp->subsys[i]->cgroup = cgrp;
list_move(&ss->sibling, &root->subsys_list);
ss->root = root;
if (ss->bind)
- ss->bind(cgrp);
+ ss->bind(cgroup_css(cgrp, ss));
/* refcount was already taken, and we're keeping it */
root->subsys_mask |= bit;
} else if (bit & removed_mask) {
/* We're removing this subsystem */
- BUG_ON(cgrp->subsys[i] != cgroup_dummy_top->subsys[i]);
- BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
+ BUG_ON(cgroup_css(cgrp, ss) != cgroup_css(cgroup_dummy_top, ss));
+ BUG_ON(cgroup_css(cgrp, ss)->cgroup != cgrp);
if (ss->bind)
- ss->bind(cgroup_dummy_top);
- cgroup_dummy_top->subsys[i]->cgroup = cgroup_dummy_top;
- cgrp->subsys[i] = NULL;
+ ss->bind(cgroup_css(cgroup_dummy_top, ss));
+
+ cgroup_css(cgroup_dummy_top, ss)->cgroup = cgroup_dummy_top;
+ RCU_INIT_POINTER(cgrp->subsys[i], NULL);
+
cgroup_subsys[i]->root = &cgroup_dummy_root;
list_move(&ss->sibling, &cgroup_dummy_root.subsys_list);
/* subsystem is now free - drop reference on module */
module_put(ss->module);
root->subsys_mask &= ~bit;
- } else if (bit & root->subsys_mask) {
- /* Subsystem state should already exist */
- BUG_ON(!cgrp->subsys[i]);
- /*
- * a refcount was taken, but we already had one, so
- * drop the extra reference.
- */
- module_put(ss->module);
-#ifdef CONFIG_MODULE_UNLOAD
- BUG_ON(ss->module && !module_refcount(ss->module));
-#endif
- } else {
- /* Subsystem state shouldn't exist */
- BUG_ON(cgrp->subsys[i]);
}
}
@@ -1092,6 +1107,12 @@ static int rebind_subsystems(struct cgroupfs_root *root,
root->flags |= CGRP_ROOT_SUBSYS_BOUND;
return 0;
+
+out_put:
+ for_each_subsys(ss, i)
+ if (pinned & (1 << i))
+ module_put(ss->module);
+ return ret;
}
static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
@@ -1142,7 +1163,6 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
char *token, *o = data;
bool all_ss = false, one_ss = false;
unsigned long mask = (unsigned long)-1;
- bool module_pin_failed = false;
struct cgroup_subsys *ss;
int i;
@@ -1285,52 +1305,9 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
if (!opts->subsys_mask && !opts->name)
return -EINVAL;
- /*
- * Grab references on all the modules we'll need, so the subsystems
- * don't dance around before rebind_subsystems attaches them. This may
- * take duplicate reference counts on a subsystem that's already used,
- * but rebind_subsystems handles this case.
- */
- for_each_subsys(ss, i) {
- if (!(opts->subsys_mask & (1UL << i)))
- continue;
- if (!try_module_get(cgroup_subsys[i]->module)) {
- module_pin_failed = true;
- break;
- }
- }
- if (module_pin_failed) {
- /*
- * oops, one of the modules was going away. this means that we
- * raced with a module_delete call, and to the user this is
- * essentially a "subsystem doesn't exist" case.
- */
- for (i--; i >= 0; i--) {
- /* drop refcounts only on the ones we took */
- unsigned long bit = 1UL << i;
-
- if (!(bit & opts->subsys_mask))
- continue;
- module_put(cgroup_subsys[i]->module);
- }
- return -ENOENT;
- }
-
return 0;
}
-static void drop_parsed_module_refcounts(unsigned long subsys_mask)
-{
- struct cgroup_subsys *ss;
- int i;
-
- mutex_lock(&cgroup_mutex);
- for_each_subsys(ss, i)
- if (subsys_mask & (1UL << i))
- module_put(cgroup_subsys[i]->module);
- mutex_unlock(&cgroup_mutex);
-}
-
static int cgroup_remount(struct super_block *sb, int *flags, char *data)
{
int ret = 0;
@@ -1370,22 +1347,15 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
goto out_unlock;
}
- /*
- * Clear out the files of subsystems that should be removed, do
- * this before rebind_subsystems, since rebind_subsystems may
- * change this hierarchy's subsys_list.
- */
- cgroup_clear_directory(cgrp->dentry, false, removed_mask);
-
- ret = rebind_subsystems(root, added_mask, removed_mask);
- if (ret) {
- /* rebind_subsystems failed, re-populate the removed files */
- cgroup_populate_dir(cgrp, false, removed_mask);
+ /* remounting is not allowed for populated hierarchies */
+ if (root->number_of_cgroups > 1) {
+ ret = -EBUSY;
goto out_unlock;
}
- /* re-populate subsystem files */
- cgroup_populate_dir(cgrp, false, added_mask);
+ ret = rebind_subsystems(root, added_mask, removed_mask);
+ if (ret)
+ goto out_unlock;
if (opts.release_agent)
strcpy(root->release_agent_path, opts.release_agent);
@@ -1395,8 +1365,6 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
- if (ret)
- drop_parsed_module_refcounts(opts.subsys_mask);
return ret;
}
@@ -1416,6 +1384,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
INIT_LIST_HEAD(&cgrp->release_list);
INIT_LIST_HEAD(&cgrp->pidlists);
mutex_init(&cgrp->pidlist_mutex);
+ cgrp->dummy_css.cgroup = cgrp;
INIT_LIST_HEAD(&cgrp->event_list);
spin_lock_init(&cgrp->event_list_lock);
simple_xattrs_init(&cgrp->xattrs);
@@ -1431,6 +1400,7 @@ static void init_cgroup_root(struct cgroupfs_root *root)
cgrp->root = root;
RCU_INIT_POINTER(cgrp->name, &root_cgroup_name);
init_cgroup_housekeeping(cgrp);
+ idr_init(&root->cgroup_idr);
}
static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end)
@@ -1503,7 +1473,6 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
*/
root->subsys_mask = opts->subsys_mask;
root->flags = opts->flags;
- ida_init(&root->cgroup_ida);
if (opts->release_agent)
strcpy(root->release_agent_path, opts->release_agent);
if (opts->name)
@@ -1519,7 +1488,7 @@ static void cgroup_free_root(struct cgroupfs_root *root)
/* hierarhcy ID shoulid already have been released */
WARN_ON_ONCE(root->hierarchy_id);
- ida_destroy(&root->cgroup_ida);
+ idr_destroy(&root->cgroup_idr);
kfree(root);
}
}
@@ -1584,7 +1553,9 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
int ret = 0;
struct super_block *sb;
struct cgroupfs_root *new_root;
+ struct list_head tmp_links;
struct inode *inode;
+ const struct cred *cred;
/* First find the desired set of subsystems */
mutex_lock(&cgroup_mutex);
@@ -1600,7 +1571,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
new_root = cgroup_root_from_opts(&opts);
if (IS_ERR(new_root)) {
ret = PTR_ERR(new_root);
- goto drop_modules;
+ goto out_err;
}
opts.new_root = new_root;
@@ -1609,17 +1580,15 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
cgroup_free_root(opts.new_root);
- goto drop_modules;
+ goto out_err;
}
root = sb->s_fs_info;
BUG_ON(!root);
if (root == opts.new_root) {
/* We used the new root structure, so this is a new hierarchy */
- struct list_head tmp_links;
struct cgroup *root_cgrp = &root->top_cgroup;
struct cgroupfs_root *existing_root;
- const struct cred *cred;
int i;
struct css_set *cset;
@@ -1634,6 +1603,11 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
mutex_lock(&cgroup_mutex);
mutex_lock(&cgroup_root_mutex);
+ root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp,
+ 0, 1, GFP_KERNEL);
+ if (root_cgrp->id < 0)
+ goto unlock_drop;
+
/* Check for name clashes with existing mounts */
ret = -EBUSY;
if (strlen(root->name))
@@ -1657,26 +1631,37 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
if (ret)
goto unlock_drop;
+ sb->s_root->d_fsdata = root_cgrp;
+ root_cgrp->dentry = sb->s_root;
+
+ /*
+ * We're inside get_sb() and will call lookup_one_len() to
+ * create the root files, which doesn't work if SELinux is
+ * in use. The following cred dancing somehow works around
+ * it. See 2ce9738ba ("cgroupfs: use init_cred when
+ * populating new cgroupfs mount") for more details.
+ */
+ cred = override_creds(&init_cred);
+
+ ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true);
+ if (ret)
+ goto rm_base_files;
+
ret = rebind_subsystems(root, root->subsys_mask, 0);
- if (ret == -EBUSY) {
- free_cgrp_cset_links(&tmp_links);
- goto unlock_drop;
- }
+ if (ret)
+ goto rm_base_files;
+
+ revert_creds(cred);
+
/*
* There must be no failure case after here, since rebinding
* takes care of subsystems' refcounts, which are explicitly
* dropped in the failure exit path.
*/
- /* EBUSY should be the only error here */
- BUG_ON(ret);
-
list_add(&root->root_list, &cgroup_roots);
cgroup_root_count++;
- sb->s_root->d_fsdata = root_cgrp;
- root->top_cgroup.dentry = sb->s_root;
-
/* Link the top cgroup in this hierarchy into all
* the css_set objects */
write_lock(&css_set_lock);
@@ -1689,9 +1674,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
BUG_ON(!list_empty(&root_cgrp->children));
BUG_ON(root->number_of_cgroups != 1);
- cred = override_creds(&init_cred);
- cgroup_populate_dir(root_cgrp, true, root->subsys_mask);
- revert_creds(cred);
mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&inode->i_mutex);
@@ -1711,15 +1693,16 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
}
}
-
- /* no subsys rebinding, so refcounts don't change */
- drop_parsed_module_refcounts(opts.subsys_mask);
}
kfree(opts.release_agent);
kfree(opts.name);
return dget(sb->s_root);
+ rm_base_files:
+ free_cgrp_cset_links(&tmp_links);
+ cgroup_addrm_files(&root->top_cgroup, cgroup_base_files, false);
+ revert_creds(cred);
unlock_drop:
cgroup_exit_root_id(root);
mutex_unlock(&cgroup_root_mutex);
@@ -1727,8 +1710,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
mutex_unlock(&inode->i_mutex);
drop_new_super:
deactivate_locked_super(sb);
- drop_modules:
- drop_parsed_module_refcounts(opts.subsys_mask);
out_err:
kfree(opts.release_agent);
kfree(opts.name);
@@ -1746,6 +1727,7 @@ static void cgroup_kill_sb(struct super_block *sb) {
BUG_ON(root->number_of_cgroups != 1);
BUG_ON(!list_empty(&cgrp->children));
+ mutex_lock(&cgrp->dentry->d_inode->i_mutex);
mutex_lock(&cgroup_mutex);
mutex_lock(&cgroup_root_mutex);
@@ -1778,6 +1760,7 @@ static void cgroup_kill_sb(struct super_block *sb) {
mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
+ mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
simple_xattrs_free(&cgrp->xattrs);
@@ -1889,7 +1872,7 @@ EXPORT_SYMBOL_GPL(task_cgroup_path);
struct task_and_cgroup {
struct task_struct *task;
struct cgroup *cgrp;
- struct css_set *cg;
+ struct css_set *cset;
};
struct cgroup_taskset {
@@ -1939,18 +1922,20 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
EXPORT_SYMBOL_GPL(cgroup_taskset_next);
/**
- * cgroup_taskset_cur_cgroup - return the matching cgroup for the current task
+ * cgroup_taskset_cur_css - return the matching css for the current task
* @tset: taskset of interest
+ * @subsys_id: the ID of the target subsystem
*
- * Return the cgroup for the current (last returned) task of @tset. This
- * function must be preceded by either cgroup_taskset_first() or
- * cgroup_taskset_next().
+ * Return the css for the current (last returned) task of @tset for
+ * subsystem specified by @subsys_id. This function must be preceded by
+ * either cgroup_taskset_first() or cgroup_taskset_next().
*/
-struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset)
+struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset,
+ int subsys_id)
{
- return tset->cur_cgrp;
+ return cgroup_css(tset->cur_cgrp, cgroup_subsys[subsys_id]);
}
-EXPORT_SYMBOL_GPL(cgroup_taskset_cur_cgroup);
+EXPORT_SYMBOL_GPL(cgroup_taskset_cur_css);
/**
* cgroup_taskset_size - return the number of tasks in taskset
@@ -2054,7 +2039,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
/* @tsk either already exited or can't exit until the end */
if (tsk->flags & PF_EXITING)
- continue;
+ goto next;
/* as per above, nr_threads may decrease, but not increase. */
BUG_ON(i >= group_size);
@@ -2062,7 +2047,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
ent.cgrp = task_cgroup_from_root(tsk, root);
/* nothing to do if this task is already in the cgroup */
if (ent.cgrp == cgrp)
- continue;
+ goto next;
/*
* saying GFP_ATOMIC has no effect here because we did prealloc
* earlier, but it's good form to communicate our expectations.
@@ -2070,7 +2055,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
retval = flex_array_put(group, i, &ent, GFP_ATOMIC);
BUG_ON(retval != 0);
i++;
-
+ next:
if (!threadgroup)
break;
} while_each_thread(leader, tsk);
@@ -2089,8 +2074,10 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
* step 1: check that we can legitimately attach to the cgroup.
*/
for_each_root_subsys(root, ss) {
+ struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
+
if (ss->can_attach) {
- retval = ss->can_attach(cgrp, &tset);
+ retval = ss->can_attach(css, &tset);
if (retval) {
failed_ss = ss;
goto out_cancel_attach;
@@ -2107,8 +2094,8 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
tc = flex_array_get(group, i);
old_cset = task_css_set(tc->task);
- tc->cg = find_css_set(old_cset, cgrp);
- if (!tc->cg) {
+ tc->cset = find_css_set(old_cset, cgrp);
+ if (!tc->cset) {
retval = -ENOMEM;
goto out_put_css_set_refs;
}
@@ -2121,7 +2108,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
*/
for (i = 0; i < group_size; i++) {
tc = flex_array_get(group, i);
- cgroup_task_migrate(tc->cgrp, tc->task, tc->cg);
+ cgroup_task_migrate(tc->cgrp, tc->task, tc->cset);
}
/* nothing is sensitive to fork() after this point. */
@@ -2129,8 +2116,10 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
* step 4: do subsystem attach callbacks.
*/
for_each_root_subsys(root, ss) {
+ struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
+
if (ss->attach)
- ss->attach(cgrp, &tset);
+ ss->attach(css, &tset);
}
/*
@@ -2141,18 +2130,20 @@ out_put_css_set_refs:
if (retval) {
for (i = 0; i < group_size; i++) {
tc = flex_array_get(group, i);
- if (!tc->cg)
+ if (!tc->cset)
break;
- put_css_set(tc->cg);
+ put_css_set(tc->cset);
}
}
out_cancel_attach:
if (retval) {
for_each_root_subsys(root, ss) {
+ struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
+
if (ss == failed_ss)
break;
if (ss->cancel_attach)
- ss->cancel_attach(cgrp, &tset);
+ ss->cancel_attach(css, &tset);
}
}
out_free_group_list:
@@ -2253,9 +2244,9 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
mutex_lock(&cgroup_mutex);
for_each_active_root(root) {
- struct cgroup *from_cg = task_cgroup_from_root(from, root);
+ struct cgroup *from_cgrp = task_cgroup_from_root(from, root);
- retval = cgroup_attach_task(from_cg, tsk, false);
+ retval = cgroup_attach_task(from_cgrp, tsk, false);
if (retval)
break;
}
@@ -2265,34 +2256,38 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
}
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
-static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
+static int cgroup_tasks_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 pid)
{
- return attach_task_by_pid(cgrp, pid, false);
+ return attach_task_by_pid(css->cgroup, pid, false);
}
-static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
+static int cgroup_procs_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 tgid)
{
- return attach_task_by_pid(cgrp, tgid, true);
+ return attach_task_by_pid(css->cgroup, tgid, true);
}
-static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
- const char *buffer)
+static int cgroup_release_agent_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, const char *buffer)
{
- BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
+ BUILD_BUG_ON(sizeof(css->cgroup->root->release_agent_path) < PATH_MAX);
if (strlen(buffer) >= PATH_MAX)
return -EINVAL;
- if (!cgroup_lock_live_group(cgrp))
+ if (!cgroup_lock_live_group(css->cgroup))
return -ENODEV;
mutex_lock(&cgroup_root_mutex);
- strcpy(cgrp->root->release_agent_path, buffer);
+ strcpy(css->cgroup->root->release_agent_path, buffer);
mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
return 0;
}
-static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *seq)
+static int cgroup_release_agent_show(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *seq)
{
+ struct cgroup *cgrp = css->cgroup;
+
if (!cgroup_lock_live_group(cgrp))
return -ENODEV;
seq_puts(seq, cgrp->root->release_agent_path);
@@ -2301,20 +2296,20 @@ static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
return 0;
}
-static int cgroup_sane_behavior_show(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *seq)
+static int cgroup_sane_behavior_show(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *seq)
{
- seq_printf(seq, "%d\n", cgroup_sane_behavior(cgrp));
+ seq_printf(seq, "%d\n", cgroup_sane_behavior(css->cgroup));
return 0;
}
/* A buffer size big enough for numbers or short strings */
#define CGROUP_LOCAL_BUFFER_SIZE 64
-static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
- struct file *file,
- const char __user *userbuf,
- size_t nbytes, loff_t *unused_ppos)
+static ssize_t cgroup_write_X64(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct file *file,
+ const char __user *userbuf, size_t nbytes,
+ loff_t *unused_ppos)
{
char buffer[CGROUP_LOCAL_BUFFER_SIZE];
int retval = 0;
@@ -2332,22 +2327,22 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
u64 val = simple_strtoull(strstrip(buffer), &end, 0);
if (*end)
return -EINVAL;
- retval = cft->write_u64(cgrp, cft, val);
+ retval = cft->write_u64(css, cft, val);
} else {
s64 val = simple_strtoll(strstrip(buffer), &end, 0);
if (*end)
return -EINVAL;
- retval = cft->write_s64(cgrp, cft, val);
+ retval = cft->write_s64(css, cft, val);
}
if (!retval)
retval = nbytes;
return retval;
}
-static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
- struct file *file,
- const char __user *userbuf,
- size_t nbytes, loff_t *unused_ppos)
+static ssize_t cgroup_write_string(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct file *file,
+ const char __user *userbuf, size_t nbytes,
+ loff_t *unused_ppos)
{
char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
int retval = 0;
@@ -2370,7 +2365,7 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
}
buffer[nbytes] = 0; /* nul-terminate */
- retval = cft->write_string(cgrp, cft, strstrip(buffer));
+ retval = cft->write_string(css, cft, strstrip(buffer));
if (!retval)
retval = nbytes;
out:
@@ -2380,65 +2375,60 @@ out:
}
static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
- size_t nbytes, loff_t *ppos)
+ size_t nbytes, loff_t *ppos)
{
+ struct cfent *cfe = __d_cfe(file->f_dentry);
struct cftype *cft = __d_cft(file->f_dentry);
- struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
+ struct cgroup_subsys_state *css = cfe->css;
- if (cgroup_is_dead(cgrp))
- return -ENODEV;
if (cft->write)
- return cft->write(cgrp, cft, file, buf, nbytes, ppos);
+ return cft->write(css, cft, file, buf, nbytes, ppos);
if (cft->write_u64 || cft->write_s64)
- return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
+ return cgroup_write_X64(css, cft, file, buf, nbytes, ppos);
if (cft->write_string)
- return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
+ return cgroup_write_string(css, cft, file, buf, nbytes, ppos);
if (cft->trigger) {
- int ret = cft->trigger(cgrp, (unsigned int)cft->private);
+ int ret = cft->trigger(css, (unsigned int)cft->private);
return ret ? ret : nbytes;
}
return -EINVAL;
}
-static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
- struct file *file,
- char __user *buf, size_t nbytes,
- loff_t *ppos)
+static ssize_t cgroup_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct file *file,
+ char __user *buf, size_t nbytes, loff_t *ppos)
{
char tmp[CGROUP_LOCAL_BUFFER_SIZE];
- u64 val = cft->read_u64(cgrp, cft);
+ u64 val = cft->read_u64(css, cft);
int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
}
-static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
- struct file *file,
- char __user *buf, size_t nbytes,
- loff_t *ppos)
+static ssize_t cgroup_read_s64(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct file *file,
+ char __user *buf, size_t nbytes, loff_t *ppos)
{
char tmp[CGROUP_LOCAL_BUFFER_SIZE];
- s64 val = cft->read_s64(cgrp, cft);
+ s64 val = cft->read_s64(css, cft);
int len = sprintf(tmp, "%lld\n", (long long) val);
return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
}
static ssize_t cgroup_file_read(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+ size_t nbytes, loff_t *ppos)
{
+ struct cfent *cfe = __d_cfe(file->f_dentry);
struct cftype *cft = __d_cft(file->f_dentry);
- struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
-
- if (cgroup_is_dead(cgrp))
- return -ENODEV;
+ struct cgroup_subsys_state *css = cfe->css;
if (cft->read)
- return cft->read(cgrp, cft, file, buf, nbytes, ppos);
+ return cft->read(css, cft, file, buf, nbytes, ppos);
if (cft->read_u64)
- return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos);
+ return cgroup_read_u64(css, cft, file, buf, nbytes, ppos);
if (cft->read_s64)
- return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos);
+ return cgroup_read_s64(css, cft, file, buf, nbytes, ppos);
return -EINVAL;
}
@@ -2447,11 +2437,6 @@ static ssize_t cgroup_file_read(struct file *file, char __user *buf,
* supports string->u64 maps, but can be extended in future.
*/
-struct cgroup_seqfile_state {
- struct cftype *cft;
- struct cgroup *cgroup;
-};
-
static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
{
struct seq_file *sf = cb->state;
@@ -2460,69 +2445,86 @@ static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
static int cgroup_seqfile_show(struct seq_file *m, void *arg)
{
- struct cgroup_seqfile_state *state = m->private;
- struct cftype *cft = state->cft;
+ struct cfent *cfe = m->private;
+ struct cftype *cft = cfe->type;
+ struct cgroup_subsys_state *css = cfe->css;
+
if (cft->read_map) {
struct cgroup_map_cb cb = {
.fill = cgroup_map_add,
.state = m,
};
- return cft->read_map(state->cgroup, cft, &cb);
+ return cft->read_map(css, cft, &cb);
}
- return cft->read_seq_string(state->cgroup, cft, m);
-}
-
-static int cgroup_seqfile_release(struct inode *inode, struct file *file)
-{
- struct seq_file *seq = file->private_data;
- kfree(seq->private);
- return single_release(inode, file);
+ return cft->read_seq_string(css, cft, m);
}
static const struct file_operations cgroup_seqfile_operations = {
.read = seq_read,
.write = cgroup_file_write,
.llseek = seq_lseek,
- .release = cgroup_seqfile_release,
+ .release = single_release,
};
static int cgroup_file_open(struct inode *inode, struct file *file)
{
+ struct cfent *cfe = __d_cfe(file->f_dentry);
+ struct cftype *cft = __d_cft(file->f_dentry);
+ struct cgroup *cgrp = __d_cgrp(cfe->dentry->d_parent);
+ struct cgroup_subsys_state *css;
int err;
- struct cftype *cft;
err = generic_file_open(inode, file);
if (err)
return err;
- cft = __d_cft(file->f_dentry);
- if (cft->read_map || cft->read_seq_string) {
- struct cgroup_seqfile_state *state;
+ /*
+ * If the file belongs to a subsystem, pin the css. Will be
+ * unpinned either on open failure or release. This ensures that
+ * @css stays alive for all file operations.
+ */
+ rcu_read_lock();
+ css = cgroup_css(cgrp, cft->ss);
+ if (cft->ss && !css_tryget(css))
+ css = NULL;
+ rcu_read_unlock();
- state = kzalloc(sizeof(*state), GFP_USER);
- if (!state)
- return -ENOMEM;
+ if (!css)
+ return -ENODEV;
- state->cft = cft;
- state->cgroup = __d_cgrp(file->f_dentry->d_parent);
+ /*
+ * @cfe->css is used by read/write/close to determine the
+ * associated css. @file->private_data would be a better place but
+ * that's already used by seqfile. Multiple accessors may use it
+ * simultaneously which is okay as the association never changes.
+ */
+ WARN_ON_ONCE(cfe->css && cfe->css != css);
+ cfe->css = css;
+
+ if (cft->read_map || cft->read_seq_string) {
file->f_op = &cgroup_seqfile_operations;
- err = single_open(file, cgroup_seqfile_show, state);
- if (err < 0)
- kfree(state);
- } else if (cft->open)
+ err = single_open(file, cgroup_seqfile_show, cfe);
+ } else if (cft->open) {
err = cft->open(inode, file);
- else
- err = 0;
+ }
+ if (css->ss && err)
+ css_put(css);
return err;
}
static int cgroup_file_release(struct inode *inode, struct file *file)
{
+ struct cfent *cfe = __d_cfe(file->f_dentry);
struct cftype *cft = __d_cft(file->f_dentry);
+ struct cgroup_subsys_state *css = cfe->css;
+ int ret = 0;
+
if (cft->release)
- return cft->release(inode, file);
- return 0;
+ ret = cft->release(inode, file);
+ if (css->ss)
+ css_put(css);
+ return ret;
}
/*
@@ -2736,8 +2738,7 @@ static umode_t cgroup_file_mode(const struct cftype *cft)
return mode;
}
-static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
- struct cftype *cft)
+static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
{
struct dentry *dir = cgrp->dentry;
struct cgroup *parent = __d_cgrp(dir);
@@ -2747,8 +2748,9 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
umode_t mode;
char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
- if (subsys && !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
- strcpy(name, subsys->name);
+ if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
+ !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
+ strcpy(name, cft->ss->name);
strcat(name, ".");
}
strcat(name, cft->name);
@@ -2782,11 +2784,25 @@ out:
return error;
}
-static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
- struct cftype cfts[], bool is_add)
+/**
+ * cgroup_addrm_files - add or remove files to a cgroup directory
+ * @cgrp: the target cgroup
+ * @cfts: array of cftypes to be added
+ * @is_add: whether to add or remove
+ *
+ * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
+ * For removals, this function never fails. If addition fails, this
+ * function doesn't remove files already added. The caller is responsible
+ * for cleaning up.
+ */
+static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
+ bool is_add)
{
struct cftype *cft;
- int err, ret = 0;
+ int ret;
+
+ lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
+ lockdep_assert_held(&cgroup_mutex);
for (cft = cfts; cft->name[0] != '\0'; cft++) {
/* does cft->flags tell us to skip this file on @cgrp? */
@@ -2798,16 +2814,17 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
continue;
if (is_add) {
- err = cgroup_add_file(cgrp, subsys, cft);
- if (err)
+ ret = cgroup_add_file(cgrp, cft);
+ if (ret) {
pr_warn("cgroup_addrm_files: failed to add %s, err=%d\n",
- cft->name, err);
- ret = err;
+ cft->name, ret);
+ return ret;
+ }
} else {
cgroup_rm_file(cgrp, cft);
}
}
- return ret;
+ return 0;
}
static void cgroup_cfts_prepare(void)
@@ -2816,28 +2833,30 @@ static void cgroup_cfts_prepare(void)
/*
* Thanks to the entanglement with vfs inode locking, we can't walk
* the existing cgroups under cgroup_mutex and create files.
- * Instead, we use cgroup_for_each_descendant_pre() and drop RCU
- * read lock before calling cgroup_addrm_files().
+ * Instead, we use css_for_each_descendant_pre() and drop RCU read
+ * lock before calling cgroup_addrm_files().
*/
mutex_lock(&cgroup_mutex);
}
-static void cgroup_cfts_commit(struct cgroup_subsys *ss,
- struct cftype *cfts, bool is_add)
+static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
__releases(&cgroup_mutex)
{
LIST_HEAD(pending);
- struct cgroup *cgrp, *root = &ss->root->top_cgroup;
+ struct cgroup_subsys *ss = cfts[0].ss;
+ struct cgroup *root = &ss->root->top_cgroup;
struct super_block *sb = ss->root->sb;
struct dentry *prev = NULL;
struct inode *inode;
+ struct cgroup_subsys_state *css;
u64 update_before;
+ int ret = 0;
/* %NULL @cfts indicates abort and don't bother if @ss isn't attached */
if (!cfts || ss->root == &cgroup_dummy_root ||
!atomic_inc_not_zero(&sb->s_active)) {
mutex_unlock(&cgroup_mutex);
- return;
+ return 0;
}
/*
@@ -2849,17 +2868,11 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss,
mutex_unlock(&cgroup_mutex);
- /* @root always needs to be updated */
- inode = root->dentry->d_inode;
- mutex_lock(&inode->i_mutex);
- mutex_lock(&cgroup_mutex);
- cgroup_addrm_files(root, ss, cfts, is_add);
- mutex_unlock(&cgroup_mutex);
- mutex_unlock(&inode->i_mutex);
-
/* add/rm files for all cgroups created before */
rcu_read_lock();
- cgroup_for_each_descendant_pre(cgrp, root) {
+ css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
+ struct cgroup *cgrp = css->cgroup;
+
if (cgroup_is_dead(cgrp))
continue;
@@ -2873,15 +2886,18 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss,
mutex_lock(&inode->i_mutex);
mutex_lock(&cgroup_mutex);
if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp))
- cgroup_addrm_files(cgrp, ss, cfts, is_add);
+ ret = cgroup_addrm_files(cgrp, cfts, is_add);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&inode->i_mutex);
rcu_read_lock();
+ if (ret)
+ break;
}
rcu_read_unlock();
dput(prev);
deactivate_super(sb);
+ return ret;
}
/**
@@ -2901,49 +2917,56 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss,
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
struct cftype_set *set;
+ struct cftype *cft;
+ int ret;
set = kzalloc(sizeof(*set), GFP_KERNEL);
if (!set)
return -ENOMEM;
+ for (cft = cfts; cft->name[0] != '\0'; cft++)
+ cft->ss = ss;
+
cgroup_cfts_prepare();
set->cfts = cfts;
list_add_tail(&set->node, &ss->cftsets);
- cgroup_cfts_commit(ss, cfts, true);
-
- return 0;
+ ret = cgroup_cfts_commit(cfts, true);
+ if (ret)
+ cgroup_rm_cftypes(cfts);
+ return ret;
}
EXPORT_SYMBOL_GPL(cgroup_add_cftypes);
/**
* cgroup_rm_cftypes - remove an array of cftypes from a subsystem
- * @ss: target cgroup subsystem
* @cfts: zero-length name terminated array of cftypes
*
- * Unregister @cfts from @ss. Files described by @cfts are removed from
- * all existing cgroups to which @ss is attached and all future cgroups
- * won't have them either. This function can be called anytime whether @ss
- * is attached or not.
+ * Unregister @cfts. Files described by @cfts are removed from all
+ * existing cgroups and all future cgroups won't have them either. This
+ * function can be called anytime whether @cfts' subsys is attached or not.
*
* Returns 0 on successful unregistration, -ENOENT if @cfts is not
- * registered with @ss.
+ * registered.
*/
-int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+int cgroup_rm_cftypes(struct cftype *cfts)
{
struct cftype_set *set;
+ if (!cfts || !cfts[0].ss)
+ return -ENOENT;
+
cgroup_cfts_prepare();
- list_for_each_entry(set, &ss->cftsets, node) {
+ list_for_each_entry(set, &cfts[0].ss->cftsets, node) {
if (set->cfts == cfts) {
list_del(&set->node);
kfree(set);
- cgroup_cfts_commit(ss, cfts, false);
+ cgroup_cfts_commit(cfts, false);
return 0;
}
}
- cgroup_cfts_commit(ss, NULL, false);
+ cgroup_cfts_commit(NULL, false);
return -ENOENT;
}
@@ -2966,34 +2989,10 @@ int cgroup_task_count(const struct cgroup *cgrp)
}
/*
- * Advance a list_head iterator. The iterator should be positioned at
- * the start of a css_set
- */
-static void cgroup_advance_iter(struct cgroup *cgrp, struct cgroup_iter *it)
-{
- struct list_head *l = it->cset_link;
- struct cgrp_cset_link *link;
- struct css_set *cset;
-
- /* Advance to the next non-empty css_set */
- do {
- l = l->next;
- if (l == &cgrp->cset_links) {
- it->cset_link = NULL;
- return;
- }
- link = list_entry(l, struct cgrp_cset_link, cset_link);
- cset = link->cset;
- } while (list_empty(&cset->tasks));
- it->cset_link = l;
- it->task = cset->tasks.next;
-}
-
-/*
- * To reduce the fork() overhead for systems that are not actually
- * using their cgroups capability, we don't maintain the lists running
- * through each css_set to its tasks until we see the list actually
- * used - in other words after the first call to cgroup_iter_start().
+ * To reduce the fork() overhead for systems that are not actually using
+ * their cgroups capability, we don't maintain the lists running through
+ * each css_set to its tasks until we see the list actually used - in other
+ * words after the first call to css_task_iter_start().
*/
static void cgroup_enable_task_cg_lists(void)
{
@@ -3024,16 +3023,21 @@ static void cgroup_enable_task_cg_lists(void)
}
/**
- * cgroup_next_sibling - find the next sibling of a given cgroup
- * @pos: the current cgroup
+ * css_next_child - find the next child of a given css
+ * @pos_css: the current position (%NULL to initiate traversal)
+ * @parent_css: css whose children to walk
*
- * This function returns the next sibling of @pos and should be called
- * under RCU read lock. The only requirement is that @pos is accessible.
- * The next sibling is guaranteed to be returned regardless of @pos's
- * state.
+ * This function returns the next child of @parent_css and should be called
+ * under RCU read lock. The only requirement is that @parent_css and
+ * @pos_css are accessible. The next sibling is guaranteed to be returned
+ * regardless of their states.
*/
-struct cgroup *cgroup_next_sibling(struct cgroup *pos)
+struct cgroup_subsys_state *
+css_next_child(struct cgroup_subsys_state *pos_css,
+ struct cgroup_subsys_state *parent_css)
{
+ struct cgroup *pos = pos_css ? pos_css->cgroup : NULL;
+ struct cgroup *cgrp = parent_css->cgroup;
struct cgroup *next;
WARN_ON_ONCE(!rcu_read_lock_held());
@@ -3048,78 +3052,81 @@ struct cgroup *cgroup_next_sibling(struct cgroup *pos)
* safe to dereference from this RCU critical section. If
* ->sibling.next is inaccessible, cgroup_is_dead() is guaranteed
* to be visible as %true here.
+ *
+ * If @pos is dead, its next pointer can't be dereferenced;
+ * however, as each cgroup is given a monotonically increasing
+ * unique serial number and always appended to the sibling list,
+ * the next one can be found by walking the parent's children until
+ * we see a cgroup with higher serial number than @pos's. While
+ * this path can be slower, it's taken only when either the current
+ * cgroup is removed or iteration and removal race.
*/
- if (likely(!cgroup_is_dead(pos))) {
+ if (!pos) {
+ next = list_entry_rcu(cgrp->children.next, struct cgroup, sibling);
+ } else if (likely(!cgroup_is_dead(pos))) {
next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling);
- if (&next->sibling != &pos->parent->children)
- return next;
- return NULL;
+ } else {
+ list_for_each_entry_rcu(next, &cgrp->children, sibling)
+ if (next->serial_nr > pos->serial_nr)
+ break;
}
- /*
- * Can't dereference the next pointer. Each cgroup is given a
- * monotonically increasing unique serial number and always
- * appended to the sibling list, so the next one can be found by
- * walking the parent's children until we see a cgroup with higher
- * serial number than @pos's.
- *
- * While this path can be slow, it's taken only when either the
- * current cgroup is removed or iteration and removal race.
- */
- list_for_each_entry_rcu(next, &pos->parent->children, sibling)
- if (next->serial_nr > pos->serial_nr)
- return next;
- return NULL;
+ if (&next->sibling == &cgrp->children)
+ return NULL;
+
+ return cgroup_css(next, parent_css->ss);
}
-EXPORT_SYMBOL_GPL(cgroup_next_sibling);
+EXPORT_SYMBOL_GPL(css_next_child);
/**
- * cgroup_next_descendant_pre - find the next descendant for pre-order walk
+ * css_next_descendant_pre - find the next descendant for pre-order walk
* @pos: the current position (%NULL to initiate traversal)
- * @cgroup: cgroup whose descendants to walk
+ * @root: css whose descendants to walk
*
- * To be used by cgroup_for_each_descendant_pre(). Find the next
- * descendant to visit for pre-order traversal of @cgroup's descendants.
+ * To be used by css_for_each_descendant_pre(). Find the next descendant
+ * to visit for pre-order traversal of @root's descendants. @root is
+ * included in the iteration and the first node to be visited.
*
* While this function requires RCU read locking, it doesn't require the
* whole traversal to be contained in a single RCU critical section. This
* function will return the correct next descendant as long as both @pos
- * and @cgroup are accessible and @pos is a descendant of @cgroup.
+ * and @root are accessible and @pos is a descendant of @root.
*/
-struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
- struct cgroup *cgroup)
+struct cgroup_subsys_state *
+css_next_descendant_pre(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *root)
{
- struct cgroup *next;
+ struct cgroup_subsys_state *next;
WARN_ON_ONCE(!rcu_read_lock_held());
- /* if first iteration, pretend we just visited @cgroup */
+ /* if first iteration, visit @root */
if (!pos)
- pos = cgroup;
+ return root;
/* visit the first child if exists */
- next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling);
+ next = css_next_child(NULL, pos);
if (next)
return next;
/* no child, visit my or the closest ancestor's next sibling */
- while (pos != cgroup) {
- next = cgroup_next_sibling(pos);
+ while (pos != root) {
+ next = css_next_child(pos, css_parent(pos));
if (next)
return next;
- pos = pos->parent;
+ pos = css_parent(pos);
}
return NULL;
}
-EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre);
+EXPORT_SYMBOL_GPL(css_next_descendant_pre);
/**
- * cgroup_rightmost_descendant - return the rightmost descendant of a cgroup
- * @pos: cgroup of interest
+ * css_rightmost_descendant - return the rightmost descendant of a css
+ * @pos: css of interest
*
- * Return the rightmost descendant of @pos. If there's no descendant,
- * @pos is returned. This can be used during pre-order traversal to skip
+ * Return the rightmost descendant of @pos. If there's no descendant, @pos
+ * is returned. This can be used during pre-order traversal to skip
* subtree of @pos.
*
* While this function requires RCU read locking, it doesn't require the
@@ -3127,9 +3134,10 @@ EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre);
* function will return the correct rightmost descendant as long as @pos is
* accessible.
*/
-struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos)
+struct cgroup_subsys_state *
+css_rightmost_descendant(struct cgroup_subsys_state *pos)
{
- struct cgroup *last, *tmp;
+ struct cgroup_subsys_state *last, *tmp;
WARN_ON_ONCE(!rcu_read_lock_held());
@@ -3137,82 +3145,136 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos)
last = pos;
/* ->prev isn't RCU safe, walk ->next till the end */
pos = NULL;
- list_for_each_entry_rcu(tmp, &last->children, sibling)
+ css_for_each_child(tmp, last)
pos = tmp;
} while (pos);
return last;
}
-EXPORT_SYMBOL_GPL(cgroup_rightmost_descendant);
+EXPORT_SYMBOL_GPL(css_rightmost_descendant);
-static struct cgroup *cgroup_leftmost_descendant(struct cgroup *pos)
+static struct cgroup_subsys_state *
+css_leftmost_descendant(struct cgroup_subsys_state *pos)
{
- struct cgroup *last;
+ struct cgroup_subsys_state *last;
do {
last = pos;
- pos = list_first_or_null_rcu(&pos->children, struct cgroup,
- sibling);
+ pos = css_next_child(NULL, pos);
} while (pos);
return last;
}
/**
- * cgroup_next_descendant_post - find the next descendant for post-order walk
+ * css_next_descendant_post - find the next descendant for post-order walk
* @pos: the current position (%NULL to initiate traversal)
- * @cgroup: cgroup whose descendants to walk
+ * @root: css whose descendants to walk
*
- * To be used by cgroup_for_each_descendant_post(). Find the next
- * descendant to visit for post-order traversal of @cgroup's descendants.
+ * To be used by css_for_each_descendant_post(). Find the next descendant
+ * to visit for post-order traversal of @root's descendants. @root is
+ * included in the iteration and the last node to be visited.
*
* While this function requires RCU read locking, it doesn't require the
* whole traversal to be contained in a single RCU critical section. This
* function will return the correct next descendant as long as both @pos
* and @cgroup are accessible and @pos is a descendant of @cgroup.
*/
-struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
- struct cgroup *cgroup)
+struct cgroup_subsys_state *
+css_next_descendant_post(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *root)
{
- struct cgroup *next;
+ struct cgroup_subsys_state *next;
WARN_ON_ONCE(!rcu_read_lock_held());
- /* if first iteration, visit the leftmost descendant */
- if (!pos) {
- next = cgroup_leftmost_descendant(cgroup);
- return next != cgroup ? next : NULL;
- }
+ /* if first iteration, visit leftmost descendant which may be @root */
+ if (!pos)
+ return css_leftmost_descendant(root);
+
+ /* if we visited @root, we're done */
+ if (pos == root)
+ return NULL;
/* if there's an unvisited sibling, visit its leftmost descendant */
- next = cgroup_next_sibling(pos);
+ next = css_next_child(pos, css_parent(pos));
if (next)
- return cgroup_leftmost_descendant(next);
+ return css_leftmost_descendant(next);
/* no sibling left, visit parent */
- next = pos->parent;
- return next != cgroup ? next : NULL;
+ return css_parent(pos);
+}
+EXPORT_SYMBOL_GPL(css_next_descendant_post);
+
+/**
+ * css_advance_task_iter - advance a task itererator to the next css_set
+ * @it: the iterator to advance
+ *
+ * Advance @it to the next css_set to walk.
+ */
+static void css_advance_task_iter(struct css_task_iter *it)
+{
+ struct list_head *l = it->cset_link;
+ struct cgrp_cset_link *link;
+ struct css_set *cset;
+
+ /* Advance to the next non-empty css_set */
+ do {
+ l = l->next;
+ if (l == &it->origin_css->cgroup->cset_links) {
+ it->cset_link = NULL;
+ return;
+ }
+ link = list_entry(l, struct cgrp_cset_link, cset_link);
+ cset = link->cset;
+ } while (list_empty(&cset->tasks));
+ it->cset_link = l;
+ it->task = cset->tasks.next;
}
-EXPORT_SYMBOL_GPL(cgroup_next_descendant_post);
-void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
+/**
+ * css_task_iter_start - initiate task iteration
+ * @css: the css to walk tasks of
+ * @it: the task iterator to use
+ *
+ * Initiate iteration through the tasks of @css. The caller can call
+ * css_task_iter_next() to walk through the tasks until the function
+ * returns NULL. On completion of iteration, css_task_iter_end() must be
+ * called.
+ *
+ * Note that this function acquires a lock which is released when the
+ * iteration finishes. The caller can't sleep while iteration is in
+ * progress.
+ */
+void css_task_iter_start(struct cgroup_subsys_state *css,
+ struct css_task_iter *it)
__acquires(css_set_lock)
{
/*
- * The first time anyone tries to iterate across a cgroup,
- * we need to enable the list linking each css_set to its
- * tasks, and fix up all existing tasks.
+ * The first time anyone tries to iterate across a css, we need to
+ * enable the list linking each css_set to its tasks, and fix up
+ * all existing tasks.
*/
if (!use_task_css_set_links)
cgroup_enable_task_cg_lists();
read_lock(&css_set_lock);
- it->cset_link = &cgrp->cset_links;
- cgroup_advance_iter(cgrp, it);
+
+ it->origin_css = css;
+ it->cset_link = &css->cgroup->cset_links;
+
+ css_advance_task_iter(it);
}
-struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
- struct cgroup_iter *it)
+/**
+ * css_task_iter_next - return the next task for the iterator
+ * @it: the task iterator being iterated
+ *
+ * The "next" function for task iteration. @it should have been
+ * initialized via css_task_iter_start(). Returns NULL when the iteration
+ * reaches the end.
+ */
+struct task_struct *css_task_iter_next(struct css_task_iter *it)
{
struct task_struct *res;
struct list_head *l = it->task;
@@ -3226,16 +3288,24 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
l = l->next;
link = list_entry(it->cset_link, struct cgrp_cset_link, cset_link);
if (l == &link->cset->tasks) {
- /* We reached the end of this task list - move on to
- * the next cg_cgroup_link */
- cgroup_advance_iter(cgrp, it);
+ /*
+ * We reached the end of this task list - move on to the
+ * next cgrp_cset_link.
+ */
+ css_advance_task_iter(it);
} else {
it->task = l;
}
return res;
}
-void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
+/**
+ * css_task_iter_end - finish task iteration
+ * @it: the task iterator to finish
+ *
+ * Finish task iteration started by css_task_iter_start().
+ */
+void css_task_iter_end(struct css_task_iter *it)
__releases(css_set_lock)
{
read_unlock(&css_set_lock);
@@ -3276,46 +3346,49 @@ static inline int started_after(void *p1, void *p2)
}
/**
- * cgroup_scan_tasks - iterate though all the tasks in a cgroup
- * @scan: struct cgroup_scanner containing arguments for the scan
+ * css_scan_tasks - iterate though all the tasks in a css
+ * @css: the css to iterate tasks of
+ * @test: optional test callback
+ * @process: process callback
+ * @data: data passed to @test and @process
+ * @heap: optional pre-allocated heap used for task iteration
+ *
+ * Iterate through all the tasks in @css, calling @test for each, and if it
+ * returns %true, call @process for it also.
*
- * Arguments include pointers to callback functions test_task() and
- * process_task().
- * Iterate through all the tasks in a cgroup, calling test_task() for each,
- * and if it returns true, call process_task() for it also.
- * The test_task pointer may be NULL, meaning always true (select all tasks).
- * Effectively duplicates cgroup_iter_{start,next,end}()
- * but does not lock css_set_lock for the call to process_task().
- * The struct cgroup_scanner may be embedded in any structure of the caller's
- * creation.
- * It is guaranteed that process_task() will act on every task that
- * is a member of the cgroup for the duration of this call. This
- * function may or may not call process_task() for tasks that exit
- * or move to a different cgroup during the call, or are forked or
- * move into the cgroup during the call.
+ * @test may be NULL, meaning always true (select all tasks), which
+ * effectively duplicates css_task_iter_{start,next,end}() but does not
+ * lock css_set_lock for the call to @process.
*
- * Note that test_task() may be called with locks held, and may in some
- * situations be called multiple times for the same task, so it should
- * be cheap.
- * If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been
- * pre-allocated and will be used for heap operations (and its "gt" member will
- * be overwritten), else a temporary heap will be used (allocation of which
- * may cause this function to fail).
+ * It is guaranteed that @process will act on every task that is a member
+ * of @css for the duration of this call. This function may or may not
+ * call @process for tasks that exit or move to a different css during the
+ * call, or are forked or move into the css during the call.
+ *
+ * Note that @test may be called with locks held, and may in some
+ * situations be called multiple times for the same task, so it should be
+ * cheap.
+ *
+ * If @heap is non-NULL, a heap has been pre-allocated and will be used for
+ * heap operations (and its "gt" member will be overwritten), else a
+ * temporary heap will be used (allocation of which may cause this function
+ * to fail).
*/
-int cgroup_scan_tasks(struct cgroup_scanner *scan)
+int css_scan_tasks(struct cgroup_subsys_state *css,
+ bool (*test)(struct task_struct *, void *),
+ void (*process)(struct task_struct *, void *),
+ void *data, struct ptr_heap *heap)
{
int retval, i;
- struct cgroup_iter it;
+ struct css_task_iter it;
struct task_struct *p, *dropped;
/* Never dereference latest_task, since it's not refcounted */
struct task_struct *latest_task = NULL;
struct ptr_heap tmp_heap;
- struct ptr_heap *heap;
struct timespec latest_time = { 0, 0 };
- if (scan->heap) {
+ if (heap) {
/* The caller supplied our heap and pre-allocated its memory */
- heap = scan->heap;
heap->gt = &started_after;
} else {
/* We need to allocate our own heap memory */
@@ -3328,25 +3401,24 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
again:
/*
- * Scan tasks in the cgroup, using the scanner's "test_task" callback
- * to determine which are of interest, and using the scanner's
- * "process_task" callback to process any of them that need an update.
- * Since we don't want to hold any locks during the task updates,
- * gather tasks to be processed in a heap structure.
- * The heap is sorted by descending task start time.
- * If the statically-sized heap fills up, we overflow tasks that
- * started later, and in future iterations only consider tasks that
- * started after the latest task in the previous pass. This
+ * Scan tasks in the css, using the @test callback to determine
+ * which are of interest, and invoking @process callback on the
+ * ones which need an update. Since we don't want to hold any
+ * locks during the task updates, gather tasks to be processed in a
+ * heap structure. The heap is sorted by descending task start
+ * time. If the statically-sized heap fills up, we overflow tasks
+ * that started later, and in future iterations only consider tasks
+ * that started after the latest task in the previous pass. This
* guarantees forward progress and that we don't miss any tasks.
*/
heap->size = 0;
- cgroup_iter_start(scan->cg, &it);
- while ((p = cgroup_iter_next(scan->cg, &it))) {
+ css_task_iter_start(css, &it);
+ while ((p = css_task_iter_next(&it))) {
/*
* Only affect tasks that qualify per the caller's callback,
* if he provided one
*/
- if (scan->test_task && !scan->test_task(p, scan))
+ if (test && !test(p, data))
continue;
/*
* Only process tasks that started after the last task
@@ -3374,7 +3446,7 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
* the heap and wasn't inserted
*/
}
- cgroup_iter_end(scan->cg, &it);
+ css_task_iter_end(&it);
if (heap->size) {
for (i = 0; i < heap->size; i++) {
@@ -3384,7 +3456,7 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
latest_task = q;
}
/* Process the task per the caller's callback */
- scan->process_task(q, scan);
+ process(q, data);
put_task_struct(q);
}
/*
@@ -3401,10 +3473,9 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
return 0;
}
-static void cgroup_transfer_one_task(struct task_struct *task,
- struct cgroup_scanner *scan)
+static void cgroup_transfer_one_task(struct task_struct *task, void *data)
{
- struct cgroup *new_cgroup = scan->data;
+ struct cgroup *new_cgroup = data;
mutex_lock(&cgroup_mutex);
cgroup_attach_task(new_cgroup, task, false);
@@ -3418,15 +3489,8 @@ static void cgroup_transfer_one_task(struct task_struct *task,
*/
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
{
- struct cgroup_scanner scan;
-
- scan.cg = from;
- scan.test_task = NULL; /* select all tasks in cgroup */
- scan.process_task = cgroup_transfer_one_task;
- scan.heap = NULL;
- scan.data = to;
-
- return cgroup_scan_tasks(&scan);
+ return css_scan_tasks(&from->dummy_css, NULL, cgroup_transfer_one_task,
+ to, NULL);
}
/*
@@ -3468,7 +3532,7 @@ struct cgroup_pidlist {
/* pointer to the cgroup we belong to, for list removal purposes */
struct cgroup *owner;
/* protects the other fields */
- struct rw_semaphore mutex;
+ struct rw_semaphore rwsem;
};
/*
@@ -3541,7 +3605,7 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
struct pid_namespace *ns = task_active_pid_ns(current);
/*
- * We can't drop the pidlist_mutex before taking the l->mutex in case
+ * We can't drop the pidlist_mutex before taking the l->rwsem in case
* the last ref-holder is trying to remove l from the list at the same
* time. Holding the pidlist_mutex precludes somebody taking whichever
* list we find out from under us - compare release_pid_array().
@@ -3550,7 +3614,7 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
list_for_each_entry(l, &cgrp->pidlists, links) {
if (l->key.type == type && l->key.ns == ns) {
/* make sure l doesn't vanish out from under us */
- down_write(&l->mutex);
+ down_write(&l->rwsem);
mutex_unlock(&cgrp->pidlist_mutex);
return l;
}
@@ -3561,8 +3625,8 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
mutex_unlock(&cgrp->pidlist_mutex);
return l;
}
- init_rwsem(&l->mutex);
- down_write(&l->mutex);
+ init_rwsem(&l->rwsem);
+ down_write(&l->rwsem);
l->key.type = type;
l->key.ns = get_pid_ns(ns);
l->owner = cgrp;
@@ -3580,7 +3644,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
pid_t *array;
int length;
int pid, n = 0; /* used for populating the array */
- struct cgroup_iter it;
+ struct css_task_iter it;
struct task_struct *tsk;
struct cgroup_pidlist *l;
@@ -3595,8 +3659,8 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
if (!array)
return -ENOMEM;
/* now, populate the array */
- cgroup_iter_start(cgrp, &it);
- while ((tsk = cgroup_iter_next(cgrp, &it))) {
+ css_task_iter_start(&cgrp->dummy_css, &it);
+ while ((tsk = css_task_iter_next(&it))) {
if (unlikely(n == length))
break;
/* get tgid or pid for procs or tasks file respectively */
@@ -3607,7 +3671,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
if (pid > 0) /* make sure to only use valid results */
array[n++] = pid;
}
- cgroup_iter_end(cgrp, &it);
+ css_task_iter_end(&it);
length = n;
/* now sort & (if procs) strip out duplicates */
sort(array, length, sizeof(pid_t), cmppid, NULL);
@@ -3623,7 +3687,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
l->list = array;
l->length = length;
l->use_count++;
- up_write(&l->mutex);
+ up_write(&l->rwsem);
*lp = l;
return 0;
}
@@ -3641,7 +3705,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
{
int ret = -EINVAL;
struct cgroup *cgrp;
- struct cgroup_iter it;
+ struct css_task_iter it;
struct task_struct *tsk;
/*
@@ -3655,8 +3719,8 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
ret = 0;
cgrp = dentry->d_fsdata;
- cgroup_iter_start(cgrp, &it);
- while ((tsk = cgroup_iter_next(cgrp, &it))) {
+ css_task_iter_start(&cgrp->dummy_css, &it);
+ while ((tsk = css_task_iter_next(&it))) {
switch (tsk->state) {
case TASK_RUNNING:
stats->nr_running++;
@@ -3676,7 +3740,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
break;
}
}
- cgroup_iter_end(cgrp, &it);
+ css_task_iter_end(&it);
err:
return ret;
@@ -3701,7 +3765,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
int index = 0, pid = *pos;
int *iter;
- down_read(&l->mutex);
+ down_read(&l->rwsem);
if (pid) {
int end = l->length;
@@ -3728,7 +3792,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
static void cgroup_pidlist_stop(struct seq_file *s, void *v)
{
struct cgroup_pidlist *l = s->private;
- up_read(&l->mutex);
+ up_read(&l->rwsem);
}
static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
@@ -3774,7 +3838,7 @@ static void cgroup_release_pid_array(struct cgroup_pidlist *l)
* pidlist_mutex, we have to take pidlist_mutex first.
*/
mutex_lock(&l->owner->pidlist_mutex);
- down_write(&l->mutex);
+ down_write(&l->rwsem);
BUG_ON(!l->use_count);
if (!--l->use_count) {
/* we're the last user if refcount is 0; remove and free */
@@ -3782,12 +3846,12 @@ static void cgroup_release_pid_array(struct cgroup_pidlist *l)
mutex_unlock(&l->owner->pidlist_mutex);
pidlist_free(l->list);
put_pid_ns(l->key.ns);
- up_write(&l->mutex);
+ up_write(&l->rwsem);
kfree(l);
return;
}
mutex_unlock(&l->owner->pidlist_mutex);
- up_write(&l->mutex);
+ up_write(&l->rwsem);
}
static int cgroup_pidlist_release(struct inode *inode, struct file *file)
@@ -3851,21 +3915,20 @@ static int cgroup_procs_open(struct inode *unused, struct file *file)
return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
}
-static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
- struct cftype *cft)
+static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return notify_on_release(cgrp);
+ return notify_on_release(css->cgroup);
}
-static int cgroup_write_notify_on_release(struct cgroup *cgrp,
- struct cftype *cft,
- u64 val)
+static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 val)
{
- clear_bit(CGRP_RELEASABLE, &cgrp->flags);
+ clear_bit(CGRP_RELEASABLE, &css->cgroup->flags);
if (val)
- set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
+ set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
else
- clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
+ clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
return 0;
}
@@ -3895,18 +3958,18 @@ static void cgroup_event_remove(struct work_struct *work)
{
struct cgroup_event *event = container_of(work, struct cgroup_event,
remove);
- struct cgroup *cgrp = event->cgrp;
+ struct cgroup_subsys_state *css = event->css;
remove_wait_queue(event->wqh, &event->wait);
- event->cft->unregister_event(cgrp, event->cft, event->eventfd);
+ event->cft->unregister_event(css, event->cft, event->eventfd);
/* Notify userspace the event is going away. */
eventfd_signal(event->eventfd, 1);
eventfd_ctx_put(event->eventfd);
kfree(event);
- cgroup_dput(cgrp);
+ css_put(css);
}
/*
@@ -3919,7 +3982,7 @@ static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
{
struct cgroup_event *event = container_of(wait,
struct cgroup_event, wait);
- struct cgroup *cgrp = event->cgrp;
+ struct cgroup *cgrp = event->css->cgroup;
unsigned long flags = (unsigned long)key;
if (flags & POLLHUP) {
@@ -3963,14 +4026,15 @@ static void cgroup_event_ptable_queue_proc(struct file *file,
* Input must be in format '<event_fd> <control_fd> <args>'.
* Interpretation of args is defined by control file implementation.
*/
-static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
- const char *buffer)
+static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css,
+ struct cftype *cft, const char *buffer)
{
- struct cgroup_event *event = NULL;
- struct cgroup *cgrp_cfile;
+ struct cgroup *cgrp = dummy_css->cgroup;
+ struct cgroup_event *event;
+ struct cgroup_subsys_state *cfile_css;
unsigned int efd, cfd;
- struct file *efile = NULL;
- struct file *cfile = NULL;
+ struct fd efile;
+ struct fd cfile;
char *endp;
int ret;
@@ -3987,109 +4051,113 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return -ENOMEM;
- event->cgrp = cgrp;
+
INIT_LIST_HEAD(&event->list);
init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc);
init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
INIT_WORK(&event->remove, cgroup_event_remove);
- efile = eventfd_fget(efd);
- if (IS_ERR(efile)) {
- ret = PTR_ERR(efile);
- goto fail;
+ efile = fdget(efd);
+ if (!efile.file) {
+ ret = -EBADF;
+ goto out_kfree;
}
- event->eventfd = eventfd_ctx_fileget(efile);
+ event->eventfd = eventfd_ctx_fileget(efile.file);
if (IS_ERR(event->eventfd)) {
ret = PTR_ERR(event->eventfd);
- goto fail;
+ goto out_put_efile;
}
- cfile = fget(cfd);
- if (!cfile) {
+ cfile = fdget(cfd);
+ if (!cfile.file) {
ret = -EBADF;
- goto fail;
+ goto out_put_eventfd;
}
/* the process need read permission on control file */
/* AV: shouldn't we check that it's been opened for read instead? */
- ret = inode_permission(file_inode(cfile), MAY_READ);
+ ret = inode_permission(file_inode(cfile.file), MAY_READ);
if (ret < 0)
- goto fail;
+ goto out_put_cfile;
- event->cft = __file_cft(cfile);
+ event->cft = __file_cft(cfile.file);
if (IS_ERR(event->cft)) {
ret = PTR_ERR(event->cft);
- goto fail;
+ goto out_put_cfile;
+ }
+
+ if (!event->cft->ss) {
+ ret = -EBADF;
+ goto out_put_cfile;
}
/*
- * The file to be monitored must be in the same cgroup as
- * cgroup.event_control is.
+ * Determine the css of @cfile, verify it belongs to the same
+ * cgroup as cgroup.event_control, and associate @event with it.
+ * Remaining events are automatically removed on cgroup destruction
+ * but the removal is asynchronous, so take an extra ref.
*/
- cgrp_cfile = __d_cgrp(cfile->f_dentry->d_parent);
- if (cgrp_cfile != cgrp) {
- ret = -EINVAL;
- goto fail;
- }
+ rcu_read_lock();
+
+ ret = -EINVAL;
+ event->css = cgroup_css(cgrp, event->cft->ss);
+ cfile_css = css_from_dir(cfile.file->f_dentry->d_parent, event->cft->ss);
+ if (event->css && event->css == cfile_css && css_tryget(event->css))
+ ret = 0;
+
+ rcu_read_unlock();
+ if (ret)
+ goto out_put_cfile;
if (!event->cft->register_event || !event->cft->unregister_event) {
ret = -EINVAL;
- goto fail;
+ goto out_put_css;
}
- ret = event->cft->register_event(cgrp, event->cft,
+ ret = event->cft->register_event(event->css, event->cft,
event->eventfd, buffer);
if (ret)
- goto fail;
+ goto out_put_css;
- efile->f_op->poll(efile, &event->pt);
-
- /*
- * Events should be removed after rmdir of cgroup directory, but before
- * destroying subsystem state objects. Let's take reference to cgroup
- * directory dentry to do that.
- */
- dget(cgrp->dentry);
+ efile.file->f_op->poll(efile.file, &event->pt);
spin_lock(&cgrp->event_list_lock);
list_add(&event->list, &cgrp->event_list);
spin_unlock(&cgrp->event_list_lock);
- fput(cfile);
- fput(efile);
+ fdput(cfile);
+ fdput(efile);
return 0;
-fail:
- if (cfile)
- fput(cfile);
-
- if (event && event->eventfd && !IS_ERR(event->eventfd))
- eventfd_ctx_put(event->eventfd);
-
- if (!IS_ERR_OR_NULL(efile))
- fput(efile);
-
+out_put_css:
+ css_put(event->css);
+out_put_cfile:
+ fdput(cfile);
+out_put_eventfd:
+ eventfd_ctx_put(event->eventfd);
+out_put_efile:
+ fdput(efile);
+out_kfree:
kfree(event);
return ret;
}
-static u64 cgroup_clone_children_read(struct cgroup *cgrp,
- struct cftype *cft)
+static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
+ return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
}
-static int cgroup_clone_children_write(struct cgroup *cgrp,
- struct cftype *cft,
- u64 val)
+static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 val)
{
if (val)
- set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
+ set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
else
- clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
+ clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
return 0;
}
@@ -4148,36 +4216,34 @@ static struct cftype cgroup_base_files[] = {
};
/**
- * cgroup_populate_dir - selectively creation of files in a directory
+ * cgroup_populate_dir - create subsys files in a cgroup directory
* @cgrp: target cgroup
- * @base_files: true if the base files should be added
* @subsys_mask: mask of the subsystem ids whose files should be added
+ *
+ * On failure, no file is added.
*/
-static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
- unsigned long subsys_mask)
+static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask)
{
- int err;
struct cgroup_subsys *ss;
-
- if (base_files) {
- err = cgroup_addrm_files(cgrp, NULL, cgroup_base_files, true);
- if (err < 0)
- return err;
- }
+ int i, ret = 0;
/* process cftsets of each subsystem */
- for_each_root_subsys(cgrp->root, ss) {
+ for_each_subsys(ss, i) {
struct cftype_set *set;
- if (!test_bit(ss->subsys_id, &subsys_mask))
+
+ if (!test_bit(i, &subsys_mask))
continue;
- list_for_each_entry(set, &ss->cftsets, node)
- cgroup_addrm_files(cgrp, ss, set->cfts, true);
+ list_for_each_entry(set, &ss->cftsets, node) {
+ ret = cgroup_addrm_files(cgrp, set->cfts, true);
+ if (ret < 0)
+ goto err;
+ }
}
/* This cgroup is ready now */
for_each_root_subsys(cgrp->root, ss) {
- struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
+ struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
struct css_id *id = rcu_dereference_protected(css->id, true);
/*
@@ -4190,14 +4256,57 @@ static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
}
return 0;
+err:
+ cgroup_clear_dir(cgrp, subsys_mask);
+ return ret;
}
-static void css_dput_fn(struct work_struct *work)
+/*
+ * css destruction is four-stage process.
+ *
+ * 1. Destruction starts. Killing of the percpu_ref is initiated.
+ * Implemented in kill_css().
+ *
+ * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
+ * and thus css_tryget() is guaranteed to fail, the css can be offlined
+ * by invoking offline_css(). After offlining, the base ref is put.
+ * Implemented in css_killed_work_fn().
+ *
+ * 3. When the percpu_ref reaches zero, the only possible remaining
+ * accessors are inside RCU read sections. css_release() schedules the
+ * RCU callback.
+ *
+ * 4. After the grace period, the css can be freed. Implemented in
+ * css_free_work_fn().
+ *
+ * It is actually hairier because both step 2 and 4 require process context
+ * and thus involve punting to css->destroy_work adding two additional
+ * steps to the already complex sequence.
+ */
+static void css_free_work_fn(struct work_struct *work)
{
struct cgroup_subsys_state *css =
- container_of(work, struct cgroup_subsys_state, dput_work);
+ container_of(work, struct cgroup_subsys_state, destroy_work);
+ struct cgroup *cgrp = css->cgroup;
+
+ if (css->parent)
+ css_put(css->parent);
- cgroup_dput(css->cgroup);
+ css->ss->css_free(css);
+ cgroup_dput(cgrp);
+}
+
+static void css_free_rcu_fn(struct rcu_head *rcu_head)
+{
+ struct cgroup_subsys_state *css =
+ container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
+
+ /*
+ * css holds an extra ref to @cgrp->dentry which is put on the last
+ * css_put(). dput() requires process context which we don't have.
+ */
+ INIT_WORK(&css->destroy_work, css_free_work_fn);
+ schedule_work(&css->destroy_work);
}
static void css_release(struct percpu_ref *ref)
@@ -4205,49 +4314,47 @@ static void css_release(struct percpu_ref *ref)
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
- schedule_work(&css->dput_work);
+ call_rcu(&css->rcu_head, css_free_rcu_fn);
}
-static void init_cgroup_css(struct cgroup_subsys_state *css,
- struct cgroup_subsys *ss,
- struct cgroup *cgrp)
+static void init_css(struct cgroup_subsys_state *css, struct cgroup_subsys *ss,
+ struct cgroup *cgrp)
{
css->cgroup = cgrp;
+ css->ss = ss;
css->flags = 0;
css->id = NULL;
- if (cgrp == cgroup_dummy_top)
+
+ if (cgrp->parent)
+ css->parent = cgroup_css(cgrp->parent, ss);
+ else
css->flags |= CSS_ROOT;
- BUG_ON(cgrp->subsys[ss->subsys_id]);
- cgrp->subsys[ss->subsys_id] = css;
- /*
- * css holds an extra ref to @cgrp->dentry which is put on the last
- * css_put(). dput() requires process context, which css_put() may
- * be called without. @css->dput_work will be used to invoke
- * dput() asynchronously from css_put().
- */
- INIT_WORK(&css->dput_work, css_dput_fn);
+ BUG_ON(cgroup_css(cgrp, ss));
}
-/* invoke ->post_create() on a new CSS and mark it online if successful */
-static int online_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
+/* invoke ->css_online() on a new CSS and mark it online if successful */
+static int online_css(struct cgroup_subsys_state *css)
{
+ struct cgroup_subsys *ss = css->ss;
int ret = 0;
lockdep_assert_held(&cgroup_mutex);
if (ss->css_online)
- ret = ss->css_online(cgrp);
- if (!ret)
- cgrp->subsys[ss->subsys_id]->flags |= CSS_ONLINE;
+ ret = ss->css_online(css);
+ if (!ret) {
+ css->flags |= CSS_ONLINE;
+ css->cgroup->nr_css++;
+ rcu_assign_pointer(css->cgroup->subsys[ss->subsys_id], css);
+ }
return ret;
}
-/* if the CSS is online, invoke ->pre_destory() on it and mark it offline */
-static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
- __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
+/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
+static void offline_css(struct cgroup_subsys_state *css)
{
- struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
+ struct cgroup_subsys *ss = css->ss;
lockdep_assert_held(&cgroup_mutex);
@@ -4255,9 +4362,11 @@ static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
return;
if (ss->css_offline)
- ss->css_offline(cgrp);
+ ss->css_offline(css);
- cgrp->subsys[ss->subsys_id]->flags &= ~CSS_ONLINE;
+ css->flags &= ~CSS_ONLINE;
+ css->cgroup->nr_css--;
+ RCU_INIT_POINTER(css->cgroup->subsys[ss->subsys_id], css);
}
/*
@@ -4271,6 +4380,7 @@ static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
umode_t mode)
{
+ struct cgroup_subsys_state *css_ar[CGROUP_SUBSYS_COUNT] = { };
struct cgroup *cgrp;
struct cgroup_name *name;
struct cgroupfs_root *root = parent->root;
@@ -4288,7 +4398,11 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
goto err_free_cgrp;
rcu_assign_pointer(cgrp->name, name);
- cgrp->id = ida_simple_get(&root->cgroup_ida, 1, 0, GFP_KERNEL);
+ /*
+ * Temporarily set the pointer to NULL, so idr_find() won't return
+ * a half-baked cgroup.
+ */
+ cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
if (cgrp->id < 0)
goto err_free_name;
@@ -4317,6 +4431,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
cgrp->dentry = dentry;
cgrp->parent = parent;
+ cgrp->dummy_css.parent = &parent->dummy_css;
cgrp->root = parent->root;
if (notify_on_release(parent))
@@ -4328,22 +4443,21 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
for_each_root_subsys(root, ss) {
struct cgroup_subsys_state *css;
- css = ss->css_alloc(cgrp);
+ css = ss->css_alloc(cgroup_css(parent, ss));
if (IS_ERR(css)) {
err = PTR_ERR(css);
goto err_free_all;
}
+ css_ar[ss->subsys_id] = css;
err = percpu_ref_init(&css->refcnt, css_release);
- if (err) {
- ss->css_free(cgrp);
+ if (err)
goto err_free_all;
- }
- init_cgroup_css(css, ss, cgrp);
+ init_css(css, ss, cgrp);
if (ss->use_id) {
- err = alloc_css_id(ss, parent, cgrp);
+ err = alloc_css_id(css);
if (err)
goto err_free_all;
}
@@ -4365,16 +4479,22 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
root->number_of_cgroups++;
- /* each css holds a ref to the cgroup's dentry */
- for_each_root_subsys(root, ss)
+ /* each css holds a ref to the cgroup's dentry and the parent css */
+ for_each_root_subsys(root, ss) {
+ struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
+
dget(dentry);
+ css_get(css->parent);
+ }
/* hold a ref to the parent's dentry */
dget(parent->dentry);
/* creation succeeded, notify subsystems */
for_each_root_subsys(root, ss) {
- err = online_css(ss, cgrp);
+ struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
+
+ err = online_css(css);
if (err)
goto err_destroy;
@@ -4388,7 +4508,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
}
}
- err = cgroup_populate_dir(cgrp, true, root->subsys_mask);
+ idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
+
+ err = cgroup_addrm_files(cgrp, cgroup_base_files, true);
+ if (err)
+ goto err_destroy;
+
+ err = cgroup_populate_dir(cgrp, root->subsys_mask);
if (err)
goto err_destroy;
@@ -4399,18 +4525,18 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
err_free_all:
for_each_root_subsys(root, ss) {
- struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
+ struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
if (css) {
percpu_ref_cancel_init(&css->refcnt);
- ss->css_free(cgrp);
+ ss->css_free(css);
}
}
mutex_unlock(&cgroup_mutex);
/* Release the reference count that we took on the superblock */
deactivate_super(sb);
err_free_id:
- ida_simple_remove(&root->cgroup_ida, cgrp->id);
+ idr_remove(&root->cgroup_idr, cgrp->id);
err_free_name:
kfree(rcu_dereference_raw(cgrp->name));
err_free_cgrp:
@@ -4432,22 +4558,84 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
return cgroup_create(c_parent, dentry, mode | S_IFDIR);
}
-static void cgroup_css_killed(struct cgroup *cgrp)
+/*
+ * This is called when the refcnt of a css is confirmed to be killed.
+ * css_tryget() is now guaranteed to fail.
+ */
+static void css_killed_work_fn(struct work_struct *work)
{
- if (!atomic_dec_and_test(&cgrp->css_kill_cnt))
- return;
+ struct cgroup_subsys_state *css =
+ container_of(work, struct cgroup_subsys_state, destroy_work);
+ struct cgroup *cgrp = css->cgroup;
- /* percpu ref's of all css's are killed, kick off the next step */
- INIT_WORK(&cgrp->destroy_work, cgroup_offline_fn);
- schedule_work(&cgrp->destroy_work);
+ mutex_lock(&cgroup_mutex);
+
+ /*
+ * css_tryget() is guaranteed to fail now. Tell subsystems to
+ * initate destruction.
+ */
+ offline_css(css);
+
+ /*
+ * If @cgrp is marked dead, it's waiting for refs of all css's to
+ * be disabled before proceeding to the second phase of cgroup
+ * destruction. If we are the last one, kick it off.
+ */
+ if (!cgrp->nr_css && cgroup_is_dead(cgrp))
+ cgroup_destroy_css_killed(cgrp);
+
+ mutex_unlock(&cgroup_mutex);
+
+ /*
+ * Put the css refs from kill_css(). Each css holds an extra
+ * reference to the cgroup's dentry and cgroup removal proceeds
+ * regardless of css refs. On the last put of each css, whenever
+ * that may be, the extra dentry ref is put so that dentry
+ * destruction happens only after all css's are released.
+ */
+ css_put(css);
}
-static void css_ref_killed_fn(struct percpu_ref *ref)
+/* css kill confirmation processing requires process context, bounce */
+static void css_killed_ref_fn(struct percpu_ref *ref)
{
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
- cgroup_css_killed(css->cgroup);
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+ schedule_work(&css->destroy_work);
+}
+
+/**
+ * kill_css - destroy a css
+ * @css: css to destroy
+ *
+ * This function initiates destruction of @css by removing cgroup interface
+ * files and putting its base reference. ->css_offline() will be invoked
+ * asynchronously once css_tryget() is guaranteed to fail and when the
+ * reference count reaches zero, @css will be released.
+ */
+static void kill_css(struct cgroup_subsys_state *css)
+{
+ cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id);
+
+ /*
+ * Killing would put the base ref, but we need to keep it alive
+ * until after ->css_offline().
+ */
+ css_get(css);
+
+ /*
+ * cgroup core guarantees that, by the time ->css_offline() is
+ * invoked, no new css reference will be given out via
+ * css_tryget(). We can't simply call percpu_ref_kill() and
+ * proceed to offlining css's because percpu_ref_kill() doesn't
+ * guarantee that the ref is seen as killed on all CPUs on return.
+ *
+ * Use percpu_ref_kill_and_confirm() to get notifications as each
+ * css is confirmed to be seen as killed on all CPUs.
+ */
+ percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
}
/**
@@ -4513,41 +4701,19 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
return -EBUSY;
/*
- * Block new css_tryget() by killing css refcnts. cgroup core
- * guarantees that, by the time ->css_offline() is invoked, no new
- * css reference will be given out via css_tryget(). We can't
- * simply call percpu_ref_kill() and proceed to offlining css's
- * because percpu_ref_kill() doesn't guarantee that the ref is seen
- * as killed on all CPUs on return.
- *
- * Use percpu_ref_kill_and_confirm() to get notifications as each
- * css is confirmed to be seen as killed on all CPUs. The
- * notification callback keeps track of the number of css's to be
- * killed and schedules cgroup_offline_fn() to perform the rest of
- * destruction once the percpu refs of all css's are confirmed to
- * be killed.
+ * Initiate massacre of all css's. cgroup_destroy_css_killed()
+ * will be invoked to perform the rest of destruction once the
+ * percpu refs of all css's are confirmed to be killed.
*/
- atomic_set(&cgrp->css_kill_cnt, 1);
- for_each_root_subsys(cgrp->root, ss) {
- struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
-
- /*
- * Killing would put the base ref, but we need to keep it
- * alive until after ->css_offline.
- */
- percpu_ref_get(&css->refcnt);
-
- atomic_inc(&cgrp->css_kill_cnt);
- percpu_ref_kill_and_confirm(&css->refcnt, css_ref_killed_fn);
- }
- cgroup_css_killed(cgrp);
+ for_each_root_subsys(cgrp->root, ss)
+ kill_css(cgroup_css(cgrp, ss));
/*
* Mark @cgrp dead. This prevents further task migration and child
* creation by disabling cgroup_lock_live_group(). Note that
- * CGRP_DEAD assertion is depended upon by cgroup_next_sibling() to
+ * CGRP_DEAD assertion is depended upon by css_next_child() to
* resume iteration after dropping RCU read lock. See
- * cgroup_next_sibling() for details.
+ * css_next_child() for details.
*/
set_bit(CGRP_DEAD, &cgrp->flags);
@@ -4558,9 +4724,20 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
raw_spin_unlock(&release_list_lock);
/*
- * Remove @cgrp directory. The removal puts the base ref but we
- * aren't quite done with @cgrp yet, so hold onto it.
+ * If @cgrp has css's attached, the second stage of cgroup
+ * destruction is kicked off from css_killed_work_fn() after the
+ * refs of all attached css's are killed. If @cgrp doesn't have
+ * any css, we kick it off here.
*/
+ if (!cgrp->nr_css)
+ cgroup_destroy_css_killed(cgrp);
+
+ /*
+ * Clear the base files and remove @cgrp directory. The removal
+ * puts the base ref but we aren't quite done with @cgrp yet, so
+ * hold onto it.
+ */
+ cgroup_addrm_files(cgrp, cgroup_base_files, false);
dget(d);
cgroup_d_remove_dir(d);
@@ -4580,50 +4757,36 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
};
/**
- * cgroup_offline_fn - the second step of cgroup destruction
+ * cgroup_destroy_css_killed - the second step of cgroup destruction
* @work: cgroup->destroy_free_work
*
* This function is invoked from a work item for a cgroup which is being
- * destroyed after the percpu refcnts of all css's are guaranteed to be
- * seen as killed on all CPUs, and performs the rest of destruction. This
- * is the second step of destruction described in the comment above
- * cgroup_destroy_locked().
+ * destroyed after all css's are offlined and performs the rest of
+ * destruction. This is the second step of destruction described in the
+ * comment above cgroup_destroy_locked().
*/
-static void cgroup_offline_fn(struct work_struct *work)
+static void cgroup_destroy_css_killed(struct cgroup *cgrp)
{
- struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work);
struct cgroup *parent = cgrp->parent;
struct dentry *d = cgrp->dentry;
- struct cgroup_subsys *ss;
- mutex_lock(&cgroup_mutex);
+ lockdep_assert_held(&cgroup_mutex);
- /*
- * css_tryget() is guaranteed to fail now. Tell subsystems to
- * initate destruction.
- */
- for_each_root_subsys(cgrp->root, ss)
- offline_css(ss, cgrp);
+ /* delete this cgroup from parent->children */
+ list_del_rcu(&cgrp->sibling);
/*
- * Put the css refs from cgroup_destroy_locked(). Each css holds
- * an extra reference to the cgroup's dentry and cgroup removal
- * proceeds regardless of css refs. On the last put of each css,
- * whenever that may be, the extra dentry ref is put so that dentry
- * destruction happens only after all css's are released.
+ * We should remove the cgroup object from idr before its grace
+ * period starts, so we won't be looking up a cgroup while the
+ * cgroup is being freed.
*/
- for_each_root_subsys(cgrp->root, ss)
- css_put(cgrp->subsys[ss->subsys_id]);
-
- /* delete this cgroup from parent->children */
- list_del_rcu(&cgrp->sibling);
+ idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
+ cgrp->id = -1;
dput(d);
set_bit(CGRP_RELEASABLE, &parent->flags);
check_for_release(parent);
-
- mutex_unlock(&cgroup_mutex);
}
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
@@ -4646,6 +4809,11 @@ static void __init_or_module cgroup_init_cftsets(struct cgroup_subsys *ss)
* deregistration.
*/
if (ss->base_cftypes) {
+ struct cftype *cft;
+
+ for (cft = ss->base_cftypes; cft->name[0] != '\0'; cft++)
+ cft->ss = ss;
+
ss->base_cftset.cfts = ss->base_cftypes;
list_add_tail(&ss->base_cftset.node, &ss->cftsets);
}
@@ -4665,10 +4833,10 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
/* Create the top cgroup state for this subsystem */
list_add(&ss->sibling, &cgroup_dummy_root.subsys_list);
ss->root = &cgroup_dummy_root;
- css = ss->css_alloc(cgroup_dummy_top);
+ css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss));
/* We don't handle early failures gracefully */
BUG_ON(IS_ERR(css));
- init_cgroup_css(css, ss, cgroup_dummy_top);
+ init_css(css, ss, cgroup_dummy_top);
/* Update the init_css_set to contain a subsys
* pointer to this state - since the subsystem is
@@ -4683,7 +4851,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
* need to invoke fork callbacks here. */
BUG_ON(!list_empty(&init_task.tasks));
- BUG_ON(online_css(ss, cgroup_dummy_top));
+ BUG_ON(online_css(css));
mutex_unlock(&cgroup_mutex);
@@ -4744,7 +4912,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
* struct, so this can happen first (i.e. before the dummy root
* attachment).
*/
- css = ss->css_alloc(cgroup_dummy_top);
+ css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss));
if (IS_ERR(css)) {
/* failure case - need to deassign the cgroup_subsys[] slot. */
cgroup_subsys[ss->subsys_id] = NULL;
@@ -4756,8 +4924,8 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
ss->root = &cgroup_dummy_root;
/* our new subsystem will be attached to the dummy hierarchy. */
- init_cgroup_css(css, ss, cgroup_dummy_top);
- /* init_idr must be after init_cgroup_css because it sets css->id. */
+ init_css(css, ss, cgroup_dummy_top);
+ /* init_idr must be after init_css() because it sets css->id. */
if (ss->use_id) {
ret = cgroup_init_idr(ss, css);
if (ret)
@@ -4787,7 +4955,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
}
write_unlock(&css_set_lock);
- ret = online_css(ss, cgroup_dummy_top);
+ ret = online_css(css);
if (ret)
goto err_unload;
@@ -4819,14 +4987,14 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
/*
* we shouldn't be called if the subsystem is in use, and the use of
- * try_module_get in parse_cgroupfs_options should ensure that it
+ * try_module_get() in rebind_subsystems() should ensure that it
* doesn't start being used while we're killing it off.
*/
BUG_ON(ss->root != &cgroup_dummy_root);
mutex_lock(&cgroup_mutex);
- offline_css(ss, cgroup_dummy_top);
+ offline_css(cgroup_css(cgroup_dummy_top, ss));
if (ss->use_id)
idr_destroy(&ss->idr);
@@ -4860,8 +5028,8 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
* the cgrp->subsys pointer to find their state. note that this
* also takes care of freeing the css_id.
*/
- ss->css_free(cgroup_dummy_top);
- cgroup_dummy_top->subsys[ss->subsys_id] = NULL;
+ ss->css_free(cgroup_css(cgroup_dummy_top, ss));
+ RCU_INIT_POINTER(cgroup_dummy_top->subsys[ss->subsys_id], NULL);
mutex_unlock(&cgroup_mutex);
}
@@ -4943,6 +5111,10 @@ int __init cgroup_init(void)
BUG_ON(cgroup_init_root_id(&cgroup_dummy_root, 0, 1));
+ err = idr_alloc(&cgroup_dummy_root.cgroup_idr, cgroup_dummy_top,
+ 0, 1, GFP_KERNEL);
+ BUG_ON(err < 0);
+
mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
@@ -5099,7 +5271,7 @@ void cgroup_fork(struct task_struct *child)
* Adds the task to the list running through its css_set if necessary and
* call the subsystem fork() callbacks. Has to be after the task is
* visible on the task list in case we race with the first call to
- * cgroup_iter_start() - to guarantee that the new task ends up on its
+ * cgroup_task_iter_start() - to guarantee that the new task ends up on its
* list.
*/
void cgroup_post_fork(struct task_struct *child)
@@ -5212,10 +5384,10 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
*/
for_each_builtin_subsys(ss, i) {
if (ss->exit) {
- struct cgroup *old_cgrp = cset->subsys[i]->cgroup;
- struct cgroup *cgrp = task_cgroup(tsk, i);
+ struct cgroup_subsys_state *old_css = cset->subsys[i];
+ struct cgroup_subsys_state *css = task_css(tsk, i);
- ss->exit(cgrp, old_cgrp, tsk);
+ ss->exit(css, old_css, tsk);
}
}
}
@@ -5474,20 +5646,16 @@ static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
return 0;
}
-static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
- struct cgroup *child)
+static int alloc_css_id(struct cgroup_subsys_state *child_css)
{
- int subsys_id, i, depth = 0;
- struct cgroup_subsys_state *parent_css, *child_css;
+ struct cgroup_subsys_state *parent_css = css_parent(child_css);
struct css_id *child_id, *parent_id;
+ int i, depth;
- subsys_id = ss->subsys_id;
- parent_css = parent->subsys[subsys_id];
- child_css = child->subsys[subsys_id];
parent_id = rcu_dereference_protected(parent_css->id, true);
depth = parent_id->depth + 1;
- child_id = get_new_cssid(ss, depth);
+ child_id = get_new_cssid(child_css->ss, depth);
if (IS_ERR(child_id))
return PTR_ERR(child_id);
@@ -5525,31 +5693,56 @@ struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
}
EXPORT_SYMBOL_GPL(css_lookup);
-/*
- * get corresponding css from file open on cgroupfs directory
+/**
+ * css_from_dir - get corresponding css from the dentry of a cgroup dir
+ * @dentry: directory dentry of interest
+ * @ss: subsystem of interest
+ *
+ * Must be called under RCU read lock. The caller is responsible for
+ * pinning the returned css if it needs to be accessed outside the RCU
+ * critical section.
*/
-struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
+struct cgroup_subsys_state *css_from_dir(struct dentry *dentry,
+ struct cgroup_subsys *ss)
{
struct cgroup *cgrp;
- struct inode *inode;
- struct cgroup_subsys_state *css;
- inode = file_inode(f);
- /* check in cgroup filesystem dir */
- if (inode->i_op != &cgroup_dir_inode_operations)
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ /* is @dentry a cgroup dir? */
+ if (!dentry->d_inode ||
+ dentry->d_inode->i_op != &cgroup_dir_inode_operations)
return ERR_PTR(-EBADF);
- if (id < 0 || id >= CGROUP_SUBSYS_COUNT)
- return ERR_PTR(-EINVAL);
+ cgrp = __d_cgrp(dentry);
+ return cgroup_css(cgrp, ss) ?: ERR_PTR(-ENOENT);
+}
- /* get cgroup */
- cgrp = __d_cgrp(f->f_dentry);
- css = cgrp->subsys[id];
- return css ? css : ERR_PTR(-ENOENT);
+/**
+ * css_from_id - lookup css by id
+ * @id: the cgroup id
+ * @ss: cgroup subsys to be looked into
+ *
+ * Returns the css if there's valid one with @id, otherwise returns NULL.
+ * Should be called under rcu_read_lock().
+ */
+struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
+{
+ struct cgroup *cgrp;
+
+ rcu_lockdep_assert(rcu_read_lock_held() ||
+ lockdep_is_held(&cgroup_mutex),
+ "css_from_id() needs proper protection");
+
+ cgrp = idr_find(&ss->root->cgroup_idr, id);
+ if (cgrp)
+ return cgroup_css(cgrp, ss);
+ return NULL;
}
#ifdef CONFIG_CGROUP_DEBUG
-static struct cgroup_subsys_state *debug_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+debug_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
@@ -5559,22 +5752,24 @@ static struct cgroup_subsys_state *debug_css_alloc(struct cgroup *cgrp)
return css;
}
-static void debug_css_free(struct cgroup *cgrp)
+static void debug_css_free(struct cgroup_subsys_state *css)
{
- kfree(cgrp->subsys[debug_subsys_id]);
+ kfree(css);
}
-static u64 debug_taskcount_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return cgroup_task_count(cgrp);
+ return cgroup_task_count(css->cgroup);
}
-static u64 current_css_set_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 current_css_set_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
return (u64)(unsigned long)current->cgroups;
}
-static u64 current_css_set_refcount_read(struct cgroup *cgrp,
+static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
u64 count;
@@ -5585,7 +5780,7 @@ static u64 current_css_set_refcount_read(struct cgroup *cgrp,
return count;
}
-static int current_css_set_cg_links_read(struct cgroup *cgrp,
+static int current_css_set_cg_links_read(struct cgroup_subsys_state *css,
struct cftype *cft,
struct seq_file *seq)
{
@@ -5612,14 +5807,13 @@ static int current_css_set_cg_links_read(struct cgroup *cgrp,
}
#define MAX_TASKS_SHOWN_PER_CSS 25
-static int cgroup_css_links_read(struct cgroup *cgrp,
- struct cftype *cft,
- struct seq_file *seq)
+static int cgroup_css_links_read(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *seq)
{
struct cgrp_cset_link *link;
read_lock(&css_set_lock);
- list_for_each_entry(link, &cgrp->cset_links, cset_link) {
+ list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
struct css_set *cset = link->cset;
struct task_struct *task;
int count = 0;
@@ -5638,9 +5832,9 @@ static int cgroup_css_links_read(struct cgroup *cgrp,
return 0;
}
-static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
- return test_bit(CGRP_RELEASABLE, &cgrp->flags);
+ return test_bit(CGRP_RELEASABLE, &css->cgroup->flags);
}
static struct cftype debug_files[] = {
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 75dda1ea5026..f0ff64d0ebaa 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -45,25 +45,19 @@ struct freezer {
spinlock_t lock;
};
-static inline struct freezer *cgroup_freezer(struct cgroup *cgroup)
+static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
{
- return container_of(cgroup_subsys_state(cgroup, freezer_subsys_id),
- struct freezer, css);
+ return css ? container_of(css, struct freezer, css) : NULL;
}
static inline struct freezer *task_freezer(struct task_struct *task)
{
- return container_of(task_subsys_state(task, freezer_subsys_id),
- struct freezer, css);
+ return css_freezer(task_css(task, freezer_subsys_id));
}
static struct freezer *parent_freezer(struct freezer *freezer)
{
- struct cgroup *pcg = freezer->css.cgroup->parent;
-
- if (pcg)
- return cgroup_freezer(pcg);
- return NULL;
+ return css_freezer(css_parent(&freezer->css));
}
bool cgroup_freezing(struct task_struct *task)
@@ -92,7 +86,8 @@ static const char *freezer_state_strs(unsigned int state)
struct cgroup_subsys freezer_subsys;
-static struct cgroup_subsys_state *freezer_css_alloc(struct cgroup *cgroup)
+static struct cgroup_subsys_state *
+freezer_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct freezer *freezer;
@@ -105,22 +100,22 @@ static struct cgroup_subsys_state *freezer_css_alloc(struct cgroup *cgroup)
}
/**
- * freezer_css_online - commit creation of a freezer cgroup
- * @cgroup: cgroup being created
+ * freezer_css_online - commit creation of a freezer css
+ * @css: css being created
*
- * We're committing to creation of @cgroup. Mark it online and inherit
+ * We're committing to creation of @css. Mark it online and inherit
* parent's freezing state while holding both parent's and our
* freezer->lock.
*/
-static int freezer_css_online(struct cgroup *cgroup)
+static int freezer_css_online(struct cgroup_subsys_state *css)
{
- struct freezer *freezer = cgroup_freezer(cgroup);
+ struct freezer *freezer = css_freezer(css);
struct freezer *parent = parent_freezer(freezer);
/*
* The following double locking and freezing state inheritance
* guarantee that @cgroup can never escape ancestors' freezing
- * states. See cgroup_for_each_descendant_pre() for details.
+ * states. See css_for_each_descendant_pre() for details.
*/
if (parent)
spin_lock_irq(&parent->lock);
@@ -141,15 +136,15 @@ static int freezer_css_online(struct cgroup *cgroup)
}
/**
- * freezer_css_offline - initiate destruction of @cgroup
- * @cgroup: cgroup being destroyed
+ * freezer_css_offline - initiate destruction of a freezer css
+ * @css: css being destroyed
*
- * @cgroup is going away. Mark it dead and decrement system_freezing_count
- * if it was holding one.
+ * @css is going away. Mark it dead and decrement system_freezing_count if
+ * it was holding one.
*/
-static void freezer_css_offline(struct cgroup *cgroup)
+static void freezer_css_offline(struct cgroup_subsys_state *css)
{
- struct freezer *freezer = cgroup_freezer(cgroup);
+ struct freezer *freezer = css_freezer(css);
spin_lock_irq(&freezer->lock);
@@ -161,9 +156,9 @@ static void freezer_css_offline(struct cgroup *cgroup)
spin_unlock_irq(&freezer->lock);
}
-static void freezer_css_free(struct cgroup *cgroup)
+static void freezer_css_free(struct cgroup_subsys_state *css)
{
- kfree(cgroup_freezer(cgroup));
+ kfree(css_freezer(css));
}
/*
@@ -175,25 +170,26 @@ static void freezer_css_free(struct cgroup *cgroup)
* @freezer->lock. freezer_attach() makes the new tasks conform to the
* current state and all following state changes can see the new tasks.
*/
-static void freezer_attach(struct cgroup *new_cgrp, struct cgroup_taskset *tset)
+static void freezer_attach(struct cgroup_subsys_state *new_css,
+ struct cgroup_taskset *tset)
{
- struct freezer *freezer = cgroup_freezer(new_cgrp);
+ struct freezer *freezer = css_freezer(new_css);
struct task_struct *task;
bool clear_frozen = false;
spin_lock_irq(&freezer->lock);
/*
- * Make the new tasks conform to the current state of @new_cgrp.
+ * Make the new tasks conform to the current state of @new_css.
* For simplicity, when migrating any task to a FROZEN cgroup, we
* revert it to FREEZING and let update_if_frozen() determine the
* correct state later.
*
- * Tasks in @tset are on @new_cgrp but may not conform to its
+ * Tasks in @tset are on @new_css but may not conform to its
* current state before executing the following - !frozen tasks may
* be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
*/
- cgroup_taskset_for_each(task, new_cgrp, tset) {
+ cgroup_taskset_for_each(task, new_css, tset) {
if (!(freezer->state & CGROUP_FREEZING)) {
__thaw_task(task);
} else {
@@ -231,7 +227,7 @@ static void freezer_fork(struct task_struct *task)
* The root cgroup is non-freezable, so we can skip the
* following check.
*/
- if (!freezer->css.cgroup->parent)
+ if (!parent_freezer(freezer))
goto out;
spin_lock_irq(&freezer->lock);
@@ -244,7 +240,7 @@ out:
/**
* update_if_frozen - update whether a cgroup finished freezing
- * @cgroup: cgroup of interest
+ * @css: css of interest
*
* Once FREEZING is initiated, transition to FROZEN is lazily updated by
* calling this function. If the current state is FREEZING but not FROZEN,
@@ -255,14 +251,14 @@ out:
* update_if_frozen() on all descendants prior to invoking this function.
*
* Task states and freezer state might disagree while tasks are being
- * migrated into or out of @cgroup, so we can't verify task states against
+ * migrated into or out of @css, so we can't verify task states against
* @freezer state here. See freezer_attach() for details.
*/
-static void update_if_frozen(struct cgroup *cgroup)
+static void update_if_frozen(struct cgroup_subsys_state *css)
{
- struct freezer *freezer = cgroup_freezer(cgroup);
- struct cgroup *pos;
- struct cgroup_iter it;
+ struct freezer *freezer = css_freezer(css);
+ struct cgroup_subsys_state *pos;
+ struct css_task_iter it;
struct task_struct *task;
WARN_ON_ONCE(!rcu_read_lock_held());
@@ -274,8 +270,8 @@ static void update_if_frozen(struct cgroup *cgroup)
goto out_unlock;
/* are all (live) children frozen? */
- cgroup_for_each_child(pos, cgroup) {
- struct freezer *child = cgroup_freezer(pos);
+ css_for_each_child(pos, css) {
+ struct freezer *child = css_freezer(pos);
if ((child->state & CGROUP_FREEZER_ONLINE) &&
!(child->state & CGROUP_FROZEN))
@@ -283,9 +279,9 @@ static void update_if_frozen(struct cgroup *cgroup)
}
/* are all tasks frozen? */
- cgroup_iter_start(cgroup, &it);
+ css_task_iter_start(css, &it);
- while ((task = cgroup_iter_next(cgroup, &it))) {
+ while ((task = css_task_iter_next(&it))) {
if (freezing(task)) {
/*
* freezer_should_skip() indicates that the task
@@ -300,52 +296,49 @@ static void update_if_frozen(struct cgroup *cgroup)
freezer->state |= CGROUP_FROZEN;
out_iter_end:
- cgroup_iter_end(cgroup, &it);
+ css_task_iter_end(&it);
out_unlock:
spin_unlock_irq(&freezer->lock);
}
-static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
+static int freezer_read(struct cgroup_subsys_state *css, struct cftype *cft,
struct seq_file *m)
{
- struct cgroup *pos;
+ struct cgroup_subsys_state *pos;
rcu_read_lock();
/* update states bottom-up */
- cgroup_for_each_descendant_post(pos, cgroup)
+ css_for_each_descendant_post(pos, css)
update_if_frozen(pos);
- update_if_frozen(cgroup);
rcu_read_unlock();
- seq_puts(m, freezer_state_strs(cgroup_freezer(cgroup)->state));
+ seq_puts(m, freezer_state_strs(css_freezer(css)->state));
seq_putc(m, '\n');
return 0;
}
static void freeze_cgroup(struct freezer *freezer)
{
- struct cgroup *cgroup = freezer->css.cgroup;
- struct cgroup_iter it;
+ struct css_task_iter it;
struct task_struct *task;
- cgroup_iter_start(cgroup, &it);
- while ((task = cgroup_iter_next(cgroup, &it)))
+ css_task_iter_start(&freezer->css, &it);
+ while ((task = css_task_iter_next(&it)))
freeze_task(task);
- cgroup_iter_end(cgroup, &it);
+ css_task_iter_end(&it);
}
static void unfreeze_cgroup(struct freezer *freezer)
{
- struct cgroup *cgroup = freezer->css.cgroup;
- struct cgroup_iter it;
+ struct css_task_iter it;
struct task_struct *task;
- cgroup_iter_start(cgroup, &it);
- while ((task = cgroup_iter_next(cgroup, &it)))
+ css_task_iter_start(&freezer->css, &it);
+ while ((task = css_task_iter_next(&it)))
__thaw_task(task);
- cgroup_iter_end(cgroup, &it);
+ css_task_iter_end(&it);
}
/**
@@ -395,12 +388,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
*/
static void freezer_change_state(struct freezer *freezer, bool freeze)
{
- struct cgroup *pos;
-
- /* update @freezer */
- spin_lock_irq(&freezer->lock);
- freezer_apply_state(freezer, freeze, CGROUP_FREEZING_SELF);
- spin_unlock_irq(&freezer->lock);
+ struct cgroup_subsys_state *pos;
/*
* Update all its descendants in pre-order traversal. Each
@@ -408,24 +396,33 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
* CGROUP_FREEZING_PARENT.
*/
rcu_read_lock();
- cgroup_for_each_descendant_pre(pos, freezer->css.cgroup) {
- struct freezer *pos_f = cgroup_freezer(pos);
+ css_for_each_descendant_pre(pos, &freezer->css) {
+ struct freezer *pos_f = css_freezer(pos);
struct freezer *parent = parent_freezer(pos_f);
- /*
- * Our update to @parent->state is already visible which is
- * all we need. No need to lock @parent. For more info on
- * synchronization, see freezer_post_create().
- */
spin_lock_irq(&pos_f->lock);
- freezer_apply_state(pos_f, parent->state & CGROUP_FREEZING,
- CGROUP_FREEZING_PARENT);
+
+ if (pos_f == freezer) {
+ freezer_apply_state(pos_f, freeze,
+ CGROUP_FREEZING_SELF);
+ } else {
+ /*
+ * Our update to @parent->state is already visible
+ * which is all we need. No need to lock @parent.
+ * For more info on synchronization, see
+ * freezer_post_create().
+ */
+ freezer_apply_state(pos_f,
+ parent->state & CGROUP_FREEZING,
+ CGROUP_FREEZING_PARENT);
+ }
+
spin_unlock_irq(&pos_f->lock);
}
rcu_read_unlock();
}
-static int freezer_write(struct cgroup *cgroup, struct cftype *cft,
+static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buffer)
{
bool freeze;
@@ -437,20 +434,22 @@ static int freezer_write(struct cgroup *cgroup, struct cftype *cft,
else
return -EINVAL;
- freezer_change_state(cgroup_freezer(cgroup), freeze);
+ freezer_change_state(css_freezer(css), freeze);
return 0;
}
-static u64 freezer_self_freezing_read(struct cgroup *cgroup, struct cftype *cft)
+static u64 freezer_self_freezing_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- struct freezer *freezer = cgroup_freezer(cgroup);
+ struct freezer *freezer = css_freezer(css);
return (bool)(freezer->state & CGROUP_FREEZING_SELF);
}
-static u64 freezer_parent_freezing_read(struct cgroup *cgroup, struct cftype *cft)
+static u64 freezer_parent_freezing_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- struct freezer *freezer = cgroup_freezer(cgroup);
+ struct freezer *freezer = css_freezer(css);
return (bool)(freezer->state & CGROUP_FREEZING_PARENT);
}
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 383f8231e436..859c8dfd78a1 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -20,26 +20,46 @@
#include <linux/hardirq.h>
#include <linux/export.h>
-DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
-#ifdef CONFIG_CONTEXT_TRACKING_FORCE
- .active = true,
-#endif
-};
+#define CREATE_TRACE_POINTS
+#include <trace/events/context_tracking.h>
+
+struct static_key context_tracking_enabled = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(context_tracking_enabled);
+
+DEFINE_PER_CPU(struct context_tracking, context_tracking);
+EXPORT_SYMBOL_GPL(context_tracking);
+
+void context_tracking_cpu_set(int cpu)
+{
+ if (!per_cpu(context_tracking.active, cpu)) {
+ per_cpu(context_tracking.active, cpu) = true;
+ static_key_slow_inc(&context_tracking_enabled);
+ }
+}
/**
- * user_enter - Inform the context tracking that the CPU is going to
- * enter userspace mode.
+ * context_tracking_user_enter - Inform the context tracking that the CPU is going to
+ * enter userspace mode.
*
* This function must be called right before we switch from the kernel
* to userspace, when it's guaranteed the remaining kernel instructions
* to execute won't use any RCU read side critical section because this
* function sets RCU in extended quiescent state.
*/
-void user_enter(void)
+void context_tracking_user_enter(void)
{
unsigned long flags;
/*
+ * Repeat the user_enter() check here because some archs may be calling
+ * this from asm and if no CPU needs context tracking, they shouldn't
+ * go further. Repeat the check here until they support the static key
+ * check.
+ */
+ if (!static_key_false(&context_tracking_enabled))
+ return;
+
+ /*
* Some contexts may involve an exception occuring in an irq,
* leading to that nesting:
* rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
@@ -54,17 +74,32 @@ void user_enter(void)
WARN_ON_ONCE(!current->mm);
local_irq_save(flags);
- if (__this_cpu_read(context_tracking.active) &&
- __this_cpu_read(context_tracking.state) != IN_USER) {
+ if ( __this_cpu_read(context_tracking.state) != IN_USER) {
+ if (__this_cpu_read(context_tracking.active)) {
+ trace_user_enter(0);
+ /*
+ * At this stage, only low level arch entry code remains and
+ * then we'll run in userspace. We can assume there won't be
+ * any RCU read-side critical section until the next call to
+ * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
+ * on the tick.
+ */
+ vtime_user_enter(current);
+ rcu_user_enter();
+ }
/*
- * At this stage, only low level arch entry code remains and
- * then we'll run in userspace. We can assume there won't be
- * any RCU read-side critical section until the next call to
- * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
- * on the tick.
+ * Even if context tracking is disabled on this CPU, because it's outside
+ * the full dynticks mask for example, we still have to keep track of the
+ * context transitions and states to prevent inconsistency on those of
+ * other CPUs.
+ * If a task triggers an exception in userspace, sleep on the exception
+ * handler and then migrate to another CPU, that new CPU must know where
+ * the exception returns by the time we call exception_exit().
+ * This information can only be provided by the previous CPU when it called
+ * exception_enter().
+ * OTOH we can spare the calls to vtime and RCU when context_tracking.active
+ * is false because we know that CPU is not tickless.
*/
- vtime_user_enter(current);
- rcu_user_enter();
__this_cpu_write(context_tracking.state, IN_USER);
}
local_irq_restore(flags);
@@ -87,10 +122,9 @@ void user_enter(void)
*/
void __sched notrace preempt_schedule_context(void)
{
- struct thread_info *ti = current_thread_info();
enum ctx_state prev_ctx;
- if (likely(ti->preempt_count || irqs_disabled()))
+ if (likely(!preemptible()))
return;
/*
@@ -112,8 +146,8 @@ EXPORT_SYMBOL_GPL(preempt_schedule_context);
#endif /* CONFIG_PREEMPT */
/**
- * user_exit - Inform the context tracking that the CPU is
- * exiting userspace mode and entering the kernel.
+ * context_tracking_user_exit - Inform the context tracking that the CPU is
+ * exiting userspace mode and entering the kernel.
*
* This function must be called after we entered the kernel from userspace
* before any use of RCU read side critical section. This potentially include
@@ -122,47 +156,34 @@ EXPORT_SYMBOL_GPL(preempt_schedule_context);
* This call supports re-entrancy. This way it can be called from any exception
* handler without needing to know if we came from userspace or not.
*/
-void user_exit(void)
+void context_tracking_user_exit(void)
{
unsigned long flags;
+ if (!static_key_false(&context_tracking_enabled))
+ return;
+
if (in_interrupt())
return;
local_irq_save(flags);
if (__this_cpu_read(context_tracking.state) == IN_USER) {
- /*
- * We are going to run code that may use RCU. Inform
- * RCU core about that (ie: we may need the tick again).
- */
- rcu_user_exit();
- vtime_user_exit(current);
+ if (__this_cpu_read(context_tracking.active)) {
+ /*
+ * We are going to run code that may use RCU. Inform
+ * RCU core about that (ie: we may need the tick again).
+ */
+ rcu_user_exit();
+ vtime_user_exit(current);
+ trace_user_exit(0);
+ }
__this_cpu_write(context_tracking.state, IN_KERNEL);
}
local_irq_restore(flags);
}
-void guest_enter(void)
-{
- if (vtime_accounting_enabled())
- vtime_guest_enter(current);
- else
- __guest_enter();
-}
-EXPORT_SYMBOL_GPL(guest_enter);
-
-void guest_exit(void)
-{
- if (vtime_accounting_enabled())
- vtime_guest_exit(current);
- else
- __guest_exit();
-}
-EXPORT_SYMBOL_GPL(guest_exit);
-
-
/**
- * context_tracking_task_switch - context switch the syscall callbacks
+ * __context_tracking_task_switch - context switch the syscall callbacks
* @prev: the task that is being switched out
* @next: the task that is being switched in
*
@@ -174,11 +195,19 @@ EXPORT_SYMBOL_GPL(guest_exit);
* migrate to some CPU that doesn't do the context tracking. As such the TIF
* flag may not be desired there.
*/
-void context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next)
+void __context_tracking_task_switch(struct task_struct *prev,
+ struct task_struct *next)
{
- if (__this_cpu_read(context_tracking.active)) {
- clear_tsk_thread_flag(prev, TIF_NOHZ);
- set_tsk_thread_flag(next, TIF_NOHZ);
- }
+ clear_tsk_thread_flag(prev, TIF_NOHZ);
+ set_tsk_thread_flag(next, TIF_NOHZ);
}
+
+#ifdef CONFIG_CONTEXT_TRACKING_FORCE
+void __init context_tracking_init(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ context_tracking_cpu_set(cpu);
+}
+#endif
diff --git a/kernel/cpu.c b/kernel/cpu.c
index b2b227b82123..d7f07a2da5a6 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -113,7 +113,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
* get_online_cpus() not an api which is called all that often.
*
*/
-static void cpu_hotplug_begin(void)
+void cpu_hotplug_begin(void)
{
cpu_hotplug.active_writer = current;
@@ -127,7 +127,7 @@ static void cpu_hotplug_begin(void)
}
}
-static void cpu_hotplug_done(void)
+void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
mutex_unlock(&cpu_hotplug.lock);
@@ -154,10 +154,7 @@ void cpu_hotplug_enable(void)
cpu_maps_update_done();
}
-#else /* #if CONFIG_HOTPLUG_CPU */
-static void cpu_hotplug_begin(void) {}
-static void cpu_hotplug_done(void) {}
-#endif /* #else #if CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_HOTPLUG_CPU */
/* Need to know about CPUs going up/down? */
int __ref register_cpu_notifier(struct notifier_block *nb)
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index ea1966db34f2..6bf981e13c43 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -68,10 +68,6 @@
*/
int number_of_cpusets __read_mostly;
-/* Forward declare cgroup structures */
-struct cgroup_subsys cpuset_subsys;
-struct cpuset;
-
/* See "Frequency meter" comments, below. */
struct fmeter {
@@ -115,27 +111,20 @@ struct cpuset {
int relax_domain_level;
};
-/* Retrieve the cpuset for a cgroup */
-static inline struct cpuset *cgroup_cs(struct cgroup *cgrp)
+static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
{
- return container_of(cgroup_subsys_state(cgrp, cpuset_subsys_id),
- struct cpuset, css);
+ return css ? container_of(css, struct cpuset, css) : NULL;
}
/* Retrieve the cpuset for a task */
static inline struct cpuset *task_cs(struct task_struct *task)
{
- return container_of(task_subsys_state(task, cpuset_subsys_id),
- struct cpuset, css);
+ return css_cs(task_css(task, cpuset_subsys_id));
}
-static inline struct cpuset *parent_cs(const struct cpuset *cs)
+static inline struct cpuset *parent_cs(struct cpuset *cs)
{
- struct cgroup *pcgrp = cs->css.cgroup->parent;
-
- if (pcgrp)
- return cgroup_cs(pcgrp);
- return NULL;
+ return css_cs(css_parent(&cs->css));
}
#ifdef CONFIG_NUMA
@@ -212,29 +201,30 @@ static struct cpuset top_cpuset = {
/**
* cpuset_for_each_child - traverse online children of a cpuset
* @child_cs: loop cursor pointing to the current child
- * @pos_cgrp: used for iteration
+ * @pos_css: used for iteration
* @parent_cs: target cpuset to walk children of
*
* Walk @child_cs through the online children of @parent_cs. Must be used
* with RCU read locked.
*/
-#define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs) \
- cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup) \
- if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp)))))
+#define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
+ css_for_each_child((pos_css), &(parent_cs)->css) \
+ if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
/**
* cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
* @des_cs: loop cursor pointing to the current descendant
- * @pos_cgrp: used for iteration
+ * @pos_css: used for iteration
* @root_cs: target cpuset to walk ancestor of
*
* Walk @des_cs through the online descendants of @root_cs. Must be used
- * with RCU read locked. The caller may modify @pos_cgrp by calling
- * cgroup_rightmost_descendant() to skip subtree.
+ * with RCU read locked. The caller may modify @pos_css by calling
+ * css_rightmost_descendant() to skip subtree. @root_cs is included in the
+ * iteration and the first node to be visited.
*/
-#define cpuset_for_each_descendant_pre(des_cs, pos_cgrp, root_cs) \
- cgroup_for_each_descendant_pre((pos_cgrp), (root_cs)->css.cgroup) \
- if (is_cpuset_online(((des_cs) = cgroup_cs((pos_cgrp)))))
+#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
+ css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
+ if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
/*
* There are two global mutexes guarding cpuset structures - cpuset_mutex
@@ -320,8 +310,7 @@ static struct file_system_type cpuset_fs_type = {
*
* Call with callback_mutex held.
*/
-static void guarantee_online_cpus(const struct cpuset *cs,
- struct cpumask *pmask)
+static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
{
while (!cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
cs = parent_cs(cs);
@@ -339,7 +328,7 @@ static void guarantee_online_cpus(const struct cpuset *cs,
*
* Call with callback_mutex held.
*/
-static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
+static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
{
while (!nodes_intersects(cs->mems_allowed, node_states[N_MEMORY]))
cs = parent_cs(cs);
@@ -384,7 +373,7 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
* alloc_trial_cpuset - allocate a trial cpuset
* @cs: the cpuset that the trial cpuset duplicates
*/
-static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
+static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
{
struct cpuset *trial;
@@ -431,9 +420,9 @@ static void free_trial_cpuset(struct cpuset *trial)
* Return 0 if valid, -errno if not.
*/
-static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
+static int validate_change(struct cpuset *cur, struct cpuset *trial)
{
- struct cgroup *cgrp;
+ struct cgroup_subsys_state *css;
struct cpuset *c, *par;
int ret;
@@ -441,7 +430,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
/* Each of our child cpusets must be a subset of us */
ret = -EBUSY;
- cpuset_for_each_child(c, cgrp, cur)
+ cpuset_for_each_child(c, css, cur)
if (!is_cpuset_subset(c, trial))
goto out;
@@ -462,7 +451,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
* overlap
*/
ret = -EINVAL;
- cpuset_for_each_child(c, cgrp, par) {
+ cpuset_for_each_child(c, css, par) {
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
c != cur &&
cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
@@ -515,13 +504,16 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
struct cpuset *root_cs)
{
struct cpuset *cp;
- struct cgroup *pos_cgrp;
+ struct cgroup_subsys_state *pos_css;
rcu_read_lock();
- cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
+ cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
+ if (cp == root_cs)
+ continue;
+
/* skip the whole subtree if @cp doesn't have any CPU */
if (cpumask_empty(cp->cpus_allowed)) {
- pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
+ pos_css = css_rightmost_descendant(pos_css);
continue;
}
@@ -596,7 +588,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
struct sched_domain_attr *dattr; /* attributes for custom domains */
int ndoms = 0; /* number of sched domains in result */
int nslot; /* next empty doms[] struct cpumask slot */
- struct cgroup *pos_cgrp;
+ struct cgroup_subsys_state *pos_css;
doms = NULL;
dattr = NULL;
@@ -625,7 +617,9 @@ static int generate_sched_domains(cpumask_var_t **domains,
csn = 0;
rcu_read_lock();
- cpuset_for_each_descendant_pre(cp, pos_cgrp, &top_cpuset) {
+ cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
+ if (cp == &top_cpuset)
+ continue;
/*
* Continue traversing beyond @cp iff @cp has some CPUs and
* isn't load balancing. The former is obvious. The
@@ -642,7 +636,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
csa[csn++] = cp;
/* skip @cp's subtree */
- pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
+ pos_css = css_rightmost_descendant(pos_css);
}
rcu_read_unlock();
@@ -837,52 +831,45 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
/**
* cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
* @tsk: task to test
- * @scan: struct cgroup_scanner containing the cgroup of the task
+ * @data: cpuset to @tsk belongs to
*
- * Called by cgroup_scan_tasks() for each task in a cgroup whose
- * cpus_allowed mask needs to be changed.
+ * Called by css_scan_tasks() for each task in a cgroup whose cpus_allowed
+ * mask needs to be changed.
*
* We don't need to re-check for the cgroup/cpuset membership, since we're
* holding cpuset_mutex at this point.
*/
-static void cpuset_change_cpumask(struct task_struct *tsk,
- struct cgroup_scanner *scan)
+static void cpuset_change_cpumask(struct task_struct *tsk, void *data)
{
- struct cpuset *cpus_cs;
+ struct cpuset *cs = data;
+ struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
- cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cg));
set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
}
/**
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
- * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
+ * @heap: if NULL, defer allocating heap memory to css_scan_tasks()
*
* Called with cpuset_mutex held
*
- * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
+ * The css_scan_tasks() function will scan all the tasks in a cgroup,
* calling callback functions for each.
*
- * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
+ * No return value. It's guaranteed that css_scan_tasks() always returns 0
* if @heap != NULL.
*/
static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
{
- struct cgroup_scanner scan;
-
- scan.cg = cs->css.cgroup;
- scan.test_task = NULL;
- scan.process_task = cpuset_change_cpumask;
- scan.heap = heap;
- cgroup_scan_tasks(&scan);
+ css_scan_tasks(&cs->css, NULL, cpuset_change_cpumask, cs, heap);
}
/*
* update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
* @root_cs: the root cpuset of the hierarchy
* @update_root: update root cpuset or not?
- * @heap: the heap used by cgroup_scan_tasks()
+ * @heap: the heap used by css_scan_tasks()
*
* This will update cpumasks of tasks in @root_cs and all other empty cpusets
* which take on cpumask of @root_cs.
@@ -893,17 +880,19 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs,
bool update_root, struct ptr_heap *heap)
{
struct cpuset *cp;
- struct cgroup *pos_cgrp;
-
- if (update_root)
- update_tasks_cpumask(root_cs, heap);
+ struct cgroup_subsys_state *pos_css;
rcu_read_lock();
- cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
- /* skip the whole subtree if @cp have some CPU */
- if (!cpumask_empty(cp->cpus_allowed)) {
- pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
- continue;
+ cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
+ if (cp == root_cs) {
+ if (!update_root)
+ continue;
+ } else {
+ /* skip the whole subtree if @cp have some CPU */
+ if (!cpumask_empty(cp->cpus_allowed)) {
+ pos_css = css_rightmost_descendant(pos_css);
+ continue;
+ }
}
if (!css_tryget(&cp->css))
continue;
@@ -1059,20 +1048,24 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
task_unlock(tsk);
}
+struct cpuset_change_nodemask_arg {
+ struct cpuset *cs;
+ nodemask_t *newmems;
+};
+
/*
* Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
* of it to cpuset's new mems_allowed, and migrate pages to new nodes if
* memory_migrate flag is set. Called with cpuset_mutex held.
*/
-static void cpuset_change_nodemask(struct task_struct *p,
- struct cgroup_scanner *scan)
+static void cpuset_change_nodemask(struct task_struct *p, void *data)
{
- struct cpuset *cs = cgroup_cs(scan->cg);
+ struct cpuset_change_nodemask_arg *arg = data;
+ struct cpuset *cs = arg->cs;
struct mm_struct *mm;
int migrate;
- nodemask_t *newmems = scan->data;
- cpuset_change_task_nodemask(p, newmems);
+ cpuset_change_task_nodemask(p, arg->newmems);
mm = get_task_mm(p);
if (!mm)
@@ -1082,7 +1075,7 @@ static void cpuset_change_nodemask(struct task_struct *p,
mpol_rebind_mm(mm, &cs->mems_allowed);
if (migrate)
- cpuset_migrate_mm(mm, &cs->old_mems_allowed, newmems);
+ cpuset_migrate_mm(mm, &cs->old_mems_allowed, arg->newmems);
mmput(mm);
}
@@ -1091,28 +1084,22 @@ static void *cpuset_being_rebound;
/**
* update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
- * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
+ * @heap: if NULL, defer allocating heap memory to css_scan_tasks()
*
- * Called with cpuset_mutex held
- * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
- * if @heap != NULL.
+ * Called with cpuset_mutex held. No return value. It's guaranteed that
+ * css_scan_tasks() always returns 0 if @heap != NULL.
*/
static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
{
static nodemask_t newmems; /* protected by cpuset_mutex */
- struct cgroup_scanner scan;
struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
+ struct cpuset_change_nodemask_arg arg = { .cs = cs,
+ .newmems = &newmems };
cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
guarantee_online_mems(mems_cs, &newmems);
- scan.cg = cs->css.cgroup;
- scan.test_task = NULL;
- scan.process_task = cpuset_change_nodemask;
- scan.heap = heap;
- scan.data = &newmems;
-
/*
* The mpol_rebind_mm() call takes mmap_sem, which we couldn't
* take while holding tasklist_lock. Forks can happen - the
@@ -1123,7 +1110,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes.
*/
- cgroup_scan_tasks(&scan);
+ css_scan_tasks(&cs->css, NULL, cpuset_change_nodemask, &arg, heap);
/*
* All the tasks' nodemasks have been updated, update
@@ -1139,7 +1126,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
* update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
* @cs: the root cpuset of the hierarchy
* @update_root: update the root cpuset or not?
- * @heap: the heap used by cgroup_scan_tasks()
+ * @heap: the heap used by css_scan_tasks()
*
* This will update nodemasks of tasks in @root_cs and all other empty cpusets
* which take on nodemask of @root_cs.
@@ -1150,17 +1137,19 @@ static void update_tasks_nodemask_hier(struct cpuset *root_cs,
bool update_root, struct ptr_heap *heap)
{
struct cpuset *cp;
- struct cgroup *pos_cgrp;
-
- if (update_root)
- update_tasks_nodemask(root_cs, heap);
+ struct cgroup_subsys_state *pos_css;
rcu_read_lock();
- cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
- /* skip the whole subtree if @cp have some CPU */
- if (!nodes_empty(cp->mems_allowed)) {
- pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
- continue;
+ cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
+ if (cp == root_cs) {
+ if (!update_root)
+ continue;
+ } else {
+ /* skip the whole subtree if @cp have some CPU */
+ if (!nodes_empty(cp->mems_allowed)) {
+ pos_css = css_rightmost_descendant(pos_css);
+ continue;
+ }
}
if (!css_tryget(&cp->css))
continue;
@@ -1267,44 +1256,39 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
return 0;
}
-/*
+/**
* cpuset_change_flag - make a task's spread flags the same as its cpuset's
* @tsk: task to be updated
- * @scan: struct cgroup_scanner containing the cgroup of the task
+ * @data: cpuset to @tsk belongs to
*
- * Called by cgroup_scan_tasks() for each task in a cgroup.
+ * Called by css_scan_tasks() for each task in a cgroup.
*
* We don't need to re-check for the cgroup/cpuset membership, since we're
* holding cpuset_mutex at this point.
*/
-static void cpuset_change_flag(struct task_struct *tsk,
- struct cgroup_scanner *scan)
+static void cpuset_change_flag(struct task_struct *tsk, void *data)
{
- cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
+ struct cpuset *cs = data;
+
+ cpuset_update_task_spread_flag(cs, tsk);
}
-/*
+/**
* update_tasks_flags - update the spread flags of tasks in the cpuset.
* @cs: the cpuset in which each task's spread flags needs to be changed
- * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
+ * @heap: if NULL, defer allocating heap memory to css_scan_tasks()
*
* Called with cpuset_mutex held
*
- * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
+ * The css_scan_tasks() function will scan all the tasks in a cgroup,
* calling callback functions for each.
*
- * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
+ * No return value. It's guaranteed that css_scan_tasks() always returns 0
* if @heap != NULL.
*/
static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
{
- struct cgroup_scanner scan;
-
- scan.cg = cs->css.cgroup;
- scan.test_task = NULL;
- scan.process_task = cpuset_change_flag;
- scan.heap = heap;
- cgroup_scan_tasks(&scan);
+ css_scan_tasks(&cs->css, NULL, cpuset_change_flag, cs, heap);
}
/*
@@ -1462,9 +1446,10 @@ static int fmeter_getrate(struct fmeter *fmp)
}
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
-static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static int cpuset_can_attach(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
struct task_struct *task;
int ret;
@@ -1475,11 +1460,11 @@ static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
* flag is set.
*/
ret = -ENOSPC;
- if (!cgroup_sane_behavior(cgrp) &&
+ if (!cgroup_sane_behavior(css->cgroup) &&
(cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
goto out_unlock;
- cgroup_taskset_for_each(task, cgrp, tset) {
+ cgroup_taskset_for_each(task, css, tset) {
/*
* Kthreads which disallow setaffinity shouldn't be moved
* to a new cpuset; we don't want to change their cpu
@@ -1508,11 +1493,11 @@ out_unlock:
return ret;
}
-static void cpuset_cancel_attach(struct cgroup *cgrp,
+static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
mutex_lock(&cpuset_mutex);
- cgroup_cs(cgrp)->attach_in_progress--;
+ css_cs(css)->attach_in_progress--;
mutex_unlock(&cpuset_mutex);
}
@@ -1523,16 +1508,18 @@ static void cpuset_cancel_attach(struct cgroup *cgrp,
*/
static cpumask_var_t cpus_attach;
-static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static void cpuset_attach(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset)
{
/* static buf protected by cpuset_mutex */
static nodemask_t cpuset_attach_nodemask_to;
struct mm_struct *mm;
struct task_struct *task;
struct task_struct *leader = cgroup_taskset_first(tset);
- struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
- struct cpuset *cs = cgroup_cs(cgrp);
- struct cpuset *oldcs = cgroup_cs(oldcgrp);
+ struct cgroup_subsys_state *oldcss = cgroup_taskset_cur_css(tset,
+ cpuset_subsys_id);
+ struct cpuset *cs = css_cs(css);
+ struct cpuset *oldcs = css_cs(oldcss);
struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
@@ -1546,7 +1533,7 @@ static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
- cgroup_taskset_for_each(task, cgrp, tset) {
+ cgroup_taskset_for_each(task, css, tset) {
/*
* can_attach beforehand should guarantee that this doesn't
* fail. TODO: have a better way to handle failure here
@@ -1608,9 +1595,10 @@ typedef enum {
FILE_SPREAD_SLAB,
} cpuset_filetype_t;
-static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
+static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 val)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
cpuset_filetype_t type = cft->private;
int retval = 0;
@@ -1657,9 +1645,10 @@ out_unlock:
return retval;
}
-static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
+static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
+ s64 val)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
cpuset_filetype_t type = cft->private;
int retval = -ENODEV;
@@ -1683,10 +1672,10 @@ out_unlock:
/*
* Common handling for a write to a "cpus" or "mems" file.
*/
-static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
- const char *buf)
+static int cpuset_write_resmask(struct cgroup_subsys_state *css,
+ struct cftype *cft, const char *buf)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
struct cpuset *trialcs;
int retval = -ENODEV;
@@ -1765,13 +1754,12 @@ static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
return count;
}
-static ssize_t cpuset_common_file_read(struct cgroup *cgrp,
- struct cftype *cft,
- struct file *file,
- char __user *buf,
- size_t nbytes, loff_t *ppos)
+static ssize_t cpuset_common_file_read(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct file *file,
+ char __user *buf, size_t nbytes,
+ loff_t *ppos)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
cpuset_filetype_t type = cft->private;
char *page;
ssize_t retval = 0;
@@ -1801,9 +1789,9 @@ out:
return retval;
}
-static u64 cpuset_read_u64(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
cpuset_filetype_t type = cft->private;
switch (type) {
case FILE_CPU_EXCLUSIVE:
@@ -1832,9 +1820,9 @@ static u64 cpuset_read_u64(struct cgroup *cgrp, struct cftype *cft)
return 0;
}
-static s64 cpuset_read_s64(struct cgroup *cgrp, struct cftype *cft)
+static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
cpuset_filetype_t type = cft->private;
switch (type) {
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
@@ -1949,11 +1937,12 @@ static struct cftype files[] = {
* cgrp: control group that the new cpuset will be part of
*/
-static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct cpuset *cs;
- if (!cgrp->parent)
+ if (!parent_css)
return &top_cpuset.css;
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
@@ -1973,12 +1962,12 @@ static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cgrp)
return &cs->css;
}
-static int cpuset_css_online(struct cgroup *cgrp)
+static int cpuset_css_online(struct cgroup_subsys_state *css)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
struct cpuset *parent = parent_cs(cs);
struct cpuset *tmp_cs;
- struct cgroup *pos_cg;
+ struct cgroup_subsys_state *pos_css;
if (!parent)
return 0;
@@ -1993,7 +1982,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
number_of_cpusets++;
- if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags))
+ if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
/*
@@ -2010,7 +1999,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
* (and likewise for mems) to the new cgroup.
*/
rcu_read_lock();
- cpuset_for_each_child(tmp_cs, pos_cg, parent) {
+ cpuset_for_each_child(tmp_cs, pos_css, parent) {
if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
rcu_read_unlock();
goto out_unlock;
@@ -2027,9 +2016,15 @@ out_unlock:
return 0;
}
-static void cpuset_css_offline(struct cgroup *cgrp)
+/*
+ * If the cpuset being removed has its flag 'sched_load_balance'
+ * enabled, then simulate turning sched_load_balance off, which
+ * will call rebuild_sched_domains_locked().
+ */
+
+static void cpuset_css_offline(struct cgroup_subsys_state *css)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
mutex_lock(&cpuset_mutex);
@@ -2042,15 +2037,9 @@ static void cpuset_css_offline(struct cgroup *cgrp)
mutex_unlock(&cpuset_mutex);
}
-/*
- * If the cpuset being removed has its flag 'sched_load_balance'
- * enabled, then simulate turning sched_load_balance off, which
- * will call rebuild_sched_domains_locked().
- */
-
-static void cpuset_css_free(struct cgroup *cgrp)
+static void cpuset_css_free(struct cgroup_subsys_state *css)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
free_cpumask_var(cs->cpus_allowed);
kfree(cs);
@@ -2257,11 +2246,11 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
/* if cpus or mems changed, we need to propagate to descendants */
if (cpus_updated || mems_updated) {
struct cpuset *cs;
- struct cgroup *pos_cgrp;
+ struct cgroup_subsys_state *pos_css;
rcu_read_lock();
- cpuset_for_each_descendant_pre(cs, pos_cgrp, &top_cpuset) {
- if (!css_tryget(&cs->css))
+ cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
+ if (cs == &top_cpuset || !css_tryget(&cs->css))
continue;
rcu_read_unlock();
@@ -2350,7 +2339,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
{
- const struct cpuset *cpus_cs;
+ struct cpuset *cpus_cs;
rcu_read_lock();
cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
@@ -2423,7 +2412,7 @@ int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
* callback_mutex. If no ancestor is mem_exclusive or mem_hardwall
* (an unusual configuration), then returns the root cpuset.
*/
-static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
+static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
{
while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
cs = parent_cs(cs);
@@ -2493,7 +2482,7 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
*/
int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
{
- const struct cpuset *cs; /* current cpuset ancestors */
+ struct cpuset *cs; /* current cpuset ancestors */
int allowed; /* is allocation in zone z allowed? */
if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
@@ -2731,7 +2720,7 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v)
goto out_free;
rcu_read_lock();
- css = task_subsys_state(tsk, cpuset_subsys_id);
+ css = task_css(tsk, cpuset_subsys_id);
retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
rcu_read_unlock();
if (retval < 0)
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index c77206184b8b..97b67df8fbfe 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -116,6 +116,9 @@ int get_callchain_buffers(void)
err = alloc_callchain_buffers();
exit:
+ if (err)
+ atomic_dec(&nr_callchain_events);
+
mutex_unlock(&callchain_mutex);
return err;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f86599e8c123..953c14348375 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -145,6 +145,7 @@ static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
+static atomic_t nr_freq_events __read_mostly;
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
@@ -340,8 +341,8 @@ struct perf_cgroup {
static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task)
{
- return container_of(task_subsys_state(task, perf_subsys_id),
- struct perf_cgroup, css);
+ return container_of(task_css(task, perf_subsys_id),
+ struct perf_cgroup, css);
}
static inline bool
@@ -591,7 +592,9 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
if (!f.file)
return -EBADF;
- css = cgroup_css_from_dir(f.file, perf_subsys_id);
+ rcu_read_lock();
+
+ css = css_from_dir(f.file->f_dentry, &perf_subsys);
if (IS_ERR(css)) {
ret = PTR_ERR(css);
goto out;
@@ -617,6 +620,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
ret = -EINVAL;
}
out:
+ rcu_read_unlock();
fdput(f);
return ret;
}
@@ -869,12 +873,8 @@ static void perf_pmu_rotate_start(struct pmu *pmu)
WARN_ON(!irqs_disabled());
- if (list_empty(&cpuctx->rotation_list)) {
- int was_empty = list_empty(head);
+ if (list_empty(&cpuctx->rotation_list))
list_add(&cpuctx->rotation_list, head);
- if (was_empty)
- tick_nohz_full_kick();
- }
}
static void get_ctx(struct perf_event_context *ctx)
@@ -1216,6 +1216,9 @@ static void perf_event__id_header_size(struct perf_event *event)
if (sample_type & PERF_SAMPLE_TIME)
size += sizeof(data->time);
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ size += sizeof(data->id);
+
if (sample_type & PERF_SAMPLE_ID)
size += sizeof(data->id);
@@ -2712,7 +2715,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
hwc = &event->hw;
- if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
+ if (hwc->interrupts == MAX_INTERRUPTS) {
hwc->interrupts = 0;
perf_log_throttle(event, 1);
event->pmu->start(event, 0);
@@ -2811,10 +2814,11 @@ done:
#ifdef CONFIG_NO_HZ_FULL
bool perf_event_can_stop_tick(void)
{
- if (list_empty(&__get_cpu_var(rotation_list)))
- return true;
- else
+ if (atomic_read(&nr_freq_events) ||
+ __this_cpu_read(perf_throttled_count))
return false;
+ else
+ return true;
}
#endif
@@ -3128,36 +3132,63 @@ static void free_event_rcu(struct rcu_head *head)
static void ring_buffer_put(struct ring_buffer *rb);
static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
-static void free_event(struct perf_event *event)
+static void unaccount_event_cpu(struct perf_event *event, int cpu)
{
- irq_work_sync(&event->pending);
+ if (event->parent)
+ return;
+
+ if (has_branch_stack(event)) {
+ if (!(event->attach_state & PERF_ATTACH_TASK))
+ atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
+ }
+ if (is_cgroup_event(event))
+ atomic_dec(&per_cpu(perf_cgroup_events, cpu));
+}
+
+static void unaccount_event(struct perf_event *event)
+{
+ if (event->parent)
+ return;
+
+ if (event->attach_state & PERF_ATTACH_TASK)
+ static_key_slow_dec_deferred(&perf_sched_events);
+ if (event->attr.mmap || event->attr.mmap_data)
+ atomic_dec(&nr_mmap_events);
+ if (event->attr.comm)
+ atomic_dec(&nr_comm_events);
+ if (event->attr.task)
+ atomic_dec(&nr_task_events);
+ if (event->attr.freq)
+ atomic_dec(&nr_freq_events);
+ if (is_cgroup_event(event))
+ static_key_slow_dec_deferred(&perf_sched_events);
+ if (has_branch_stack(event))
+ static_key_slow_dec_deferred(&perf_sched_events);
+
+ unaccount_event_cpu(event, event->cpu);
+}
+static void __free_event(struct perf_event *event)
+{
if (!event->parent) {
- if (event->attach_state & PERF_ATTACH_TASK)
- static_key_slow_dec_deferred(&perf_sched_events);
- if (event->attr.mmap || event->attr.mmap_data)
- atomic_dec(&nr_mmap_events);
- if (event->attr.comm)
- atomic_dec(&nr_comm_events);
- if (event->attr.task)
- atomic_dec(&nr_task_events);
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
put_callchain_buffers();
- if (is_cgroup_event(event)) {
- atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
- static_key_slow_dec_deferred(&perf_sched_events);
- }
-
- if (has_branch_stack(event)) {
- static_key_slow_dec_deferred(&perf_sched_events);
- /* is system-wide event */
- if (!(event->attach_state & PERF_ATTACH_TASK)) {
- atomic_dec(&per_cpu(perf_branch_stack_events,
- event->cpu));
- }
- }
}
+ if (event->destroy)
+ event->destroy(event);
+
+ if (event->ctx)
+ put_ctx(event->ctx);
+
+ call_rcu(&event->rcu_head, free_event_rcu);
+}
+static void free_event(struct perf_event *event)
+{
+ irq_work_sync(&event->pending);
+
+ unaccount_event(event);
+
if (event->rb) {
struct ring_buffer *rb;
@@ -3180,13 +3211,8 @@ static void free_event(struct perf_event *event)
if (is_cgroup_event(event))
perf_detach_cgroup(event);
- if (event->destroy)
- event->destroy(event);
- if (event->ctx)
- put_ctx(event->ctx);
-
- call_rcu(&event->rcu_head, free_event_rcu);
+ __free_event(event);
}
int perf_event_release_kernel(struct perf_event *event)
@@ -3544,6 +3570,15 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case PERF_EVENT_IOC_PERIOD:
return perf_event_period(event, (u64 __user *)arg);
+ case PERF_EVENT_IOC_ID:
+ {
+ u64 id = primary_event_id(event);
+
+ if (copy_to_user((void __user *)arg, &id, sizeof(id)))
+ return -EFAULT;
+ return 0;
+ }
+
case PERF_EVENT_IOC_SET_OUTPUT:
{
int ret;
@@ -3625,6 +3660,26 @@ static void calc_timer_values(struct perf_event *event,
*running = ctx_time - event->tstamp_running;
}
+static void perf_event_init_userpage(struct perf_event *event)
+{
+ struct perf_event_mmap_page *userpg;
+ struct ring_buffer *rb;
+
+ rcu_read_lock();
+ rb = rcu_dereference(event->rb);
+ if (!rb)
+ goto unlock;
+
+ userpg = rb->user_page;
+
+ /* Allow new userspace to detect that bit 0 is deprecated */
+ userpg->cap_bit0_is_deprecated = 1;
+ userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
+
+unlock:
+ rcu_read_unlock();
+}
+
void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
{
}
@@ -3641,6 +3696,10 @@ void perf_event_update_userpage(struct perf_event *event)
u64 enabled, running, now;
rcu_read_lock();
+ rb = rcu_dereference(event->rb);
+ if (!rb)
+ goto unlock;
+
/*
* compute total_time_enabled, total_time_running
* based on snapshot values taken when the event
@@ -3651,12 +3710,8 @@ void perf_event_update_userpage(struct perf_event *event)
* NMI context
*/
calc_timer_values(event, &now, &enabled, &running);
- rb = rcu_dereference(event->rb);
- if (!rb)
- goto unlock;
userpg = rb->user_page;
-
/*
* Disable preemption so as to not let the corresponding user-space
* spin too long if we get preempted.
@@ -4009,6 +4064,7 @@ again:
ring_buffer_attach(event, rb);
rcu_assign_pointer(event->rb, rb);
+ perf_event_init_userpage(event);
perf_event_update_userpage(event);
unlock:
@@ -4251,7 +4307,7 @@ static void __perf_event_header__init_id(struct perf_event_header *header,
if (sample_type & PERF_SAMPLE_TIME)
data->time = perf_clock();
- if (sample_type & PERF_SAMPLE_ID)
+ if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
data->id = primary_event_id(event);
if (sample_type & PERF_SAMPLE_STREAM_ID)
@@ -4290,6 +4346,9 @@ static void __perf_event__output_id_sample(struct perf_output_handle *handle,
if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(handle, data->cpu_entry);
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ perf_output_put(handle, data->id);
}
void perf_event__output_id_sample(struct perf_event *event,
@@ -4355,7 +4414,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
n = 0;
- if (sub != event)
+ if ((sub != event) &&
+ (sub->state == PERF_EVENT_STATE_ACTIVE))
sub->pmu->read(sub);
values[n++] = perf_event_count(sub);
@@ -4402,6 +4462,9 @@ void perf_output_sample(struct perf_output_handle *handle,
perf_output_put(handle, *header);
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ perf_output_put(handle, data->id);
+
if (sample_type & PERF_SAMPLE_IP)
perf_output_put(handle, data->ip);
@@ -4462,20 +4525,6 @@ void perf_output_sample(struct perf_output_handle *handle,
}
}
- if (!event->attr.watermark) {
- int wakeup_events = event->attr.wakeup_events;
-
- if (wakeup_events) {
- struct ring_buffer *rb = handle->rb;
- int events = local_inc_return(&rb->events);
-
- if (events >= wakeup_events) {
- local_sub(wakeup_events, &rb->events);
- local_inc(&rb->wakeup);
- }
- }
- }
-
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
if (data->br_stack) {
size_t size;
@@ -4511,16 +4560,31 @@ void perf_output_sample(struct perf_output_handle *handle,
}
}
- if (sample_type & PERF_SAMPLE_STACK_USER)
+ if (sample_type & PERF_SAMPLE_STACK_USER) {
perf_output_sample_ustack(handle,
data->stack_user_size,
data->regs_user.regs);
+ }
if (sample_type & PERF_SAMPLE_WEIGHT)
perf_output_put(handle, data->weight);
if (sample_type & PERF_SAMPLE_DATA_SRC)
perf_output_put(handle, data->data_src.val);
+
+ if (!event->attr.watermark) {
+ int wakeup_events = event->attr.wakeup_events;
+
+ if (wakeup_events) {
+ struct ring_buffer *rb = handle->rb;
+ int events = local_inc_return(&rb->events);
+
+ if (events >= wakeup_events) {
+ local_sub(wakeup_events, &rb->events);
+ local_inc(&rb->wakeup);
+ }
+ }
+ }
}
void perf_prepare_sample(struct perf_event_header *header,
@@ -4680,12 +4744,10 @@ perf_event_read_event(struct perf_event *event,
perf_output_end(&handle);
}
-typedef int (perf_event_aux_match_cb)(struct perf_event *event, void *data);
typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
static void
perf_event_aux_ctx(struct perf_event_context *ctx,
- perf_event_aux_match_cb match,
perf_event_aux_output_cb output,
void *data)
{
@@ -4696,15 +4758,12 @@ perf_event_aux_ctx(struct perf_event_context *ctx,
continue;
if (!event_filter_match(event))
continue;
- if (match(event, data))
- output(event, data);
+ output(event, data);
}
}
static void
-perf_event_aux(perf_event_aux_match_cb match,
- perf_event_aux_output_cb output,
- void *data,
+perf_event_aux(perf_event_aux_output_cb output, void *data,
struct perf_event_context *task_ctx)
{
struct perf_cpu_context *cpuctx;
@@ -4717,7 +4776,7 @@ perf_event_aux(perf_event_aux_match_cb match,
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
goto next;
- perf_event_aux_ctx(&cpuctx->ctx, match, output, data);
+ perf_event_aux_ctx(&cpuctx->ctx, output, data);
if (task_ctx)
goto next;
ctxn = pmu->task_ctx_nr;
@@ -4725,14 +4784,14 @@ perf_event_aux(perf_event_aux_match_cb match,
goto next;
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
if (ctx)
- perf_event_aux_ctx(ctx, match, output, data);
+ perf_event_aux_ctx(ctx, output, data);
next:
put_cpu_ptr(pmu->pmu_cpu_context);
}
if (task_ctx) {
preempt_disable();
- perf_event_aux_ctx(task_ctx, match, output, data);
+ perf_event_aux_ctx(task_ctx, output, data);
preempt_enable();
}
rcu_read_unlock();
@@ -4741,7 +4800,7 @@ next:
/*
* task tracking -- fork/exit
*
- * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
+ * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
*/
struct perf_task_event {
@@ -4759,6 +4818,13 @@ struct perf_task_event {
} event_id;
};
+static int perf_event_task_match(struct perf_event *event)
+{
+ return event->attr.comm || event->attr.mmap ||
+ event->attr.mmap2 || event->attr.mmap_data ||
+ event->attr.task;
+}
+
static void perf_event_task_output(struct perf_event *event,
void *data)
{
@@ -4768,6 +4834,9 @@ static void perf_event_task_output(struct perf_event *event,
struct task_struct *task = task_event->task;
int ret, size = task_event->event_id.header.size;
+ if (!perf_event_task_match(event))
+ return;
+
perf_event_header__init_id(&task_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
@@ -4790,13 +4859,6 @@ out:
task_event->event_id.header.size = size;
}
-static int perf_event_task_match(struct perf_event *event,
- void *data __maybe_unused)
-{
- return event->attr.comm || event->attr.mmap ||
- event->attr.mmap_data || event->attr.task;
-}
-
static void perf_event_task(struct task_struct *task,
struct perf_event_context *task_ctx,
int new)
@@ -4825,8 +4887,7 @@ static void perf_event_task(struct task_struct *task,
},
};
- perf_event_aux(perf_event_task_match,
- perf_event_task_output,
+ perf_event_aux(perf_event_task_output,
&task_event,
task_ctx);
}
@@ -4853,6 +4914,11 @@ struct perf_comm_event {
} event_id;
};
+static int perf_event_comm_match(struct perf_event *event)
+{
+ return event->attr.comm;
+}
+
static void perf_event_comm_output(struct perf_event *event,
void *data)
{
@@ -4862,6 +4928,9 @@ static void perf_event_comm_output(struct perf_event *event,
int size = comm_event->event_id.header.size;
int ret;
+ if (!perf_event_comm_match(event))
+ return;
+
perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
comm_event->event_id.header.size);
@@ -4883,12 +4952,6 @@ out:
comm_event->event_id.header.size = size;
}
-static int perf_event_comm_match(struct perf_event *event,
- void *data __maybe_unused)
-{
- return event->attr.comm;
-}
-
static void perf_event_comm_event(struct perf_comm_event *comm_event)
{
char comm[TASK_COMM_LEN];
@@ -4903,8 +4966,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
- perf_event_aux(perf_event_comm_match,
- perf_event_comm_output,
+ perf_event_aux(perf_event_comm_output,
comm_event,
NULL);
}
@@ -4955,6 +5017,9 @@ struct perf_mmap_event {
const char *file_name;
int file_size;
+ int maj, min;
+ u64 ino;
+ u64 ino_generation;
struct {
struct perf_event_header header;
@@ -4967,6 +5032,17 @@ struct perf_mmap_event {
} event_id;
};
+static int perf_event_mmap_match(struct perf_event *event,
+ void *data)
+{
+ struct perf_mmap_event *mmap_event = data;
+ struct vm_area_struct *vma = mmap_event->vma;
+ int executable = vma->vm_flags & VM_EXEC;
+
+ return (!executable && event->attr.mmap_data) ||
+ (executable && (event->attr.mmap || event->attr.mmap2));
+}
+
static void perf_event_mmap_output(struct perf_event *event,
void *data)
{
@@ -4976,6 +5052,17 @@ static void perf_event_mmap_output(struct perf_event *event,
int size = mmap_event->event_id.header.size;
int ret;
+ if (!perf_event_mmap_match(event, data))
+ return;
+
+ if (event->attr.mmap2) {
+ mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
+ mmap_event->event_id.header.size += sizeof(mmap_event->maj);
+ mmap_event->event_id.header.size += sizeof(mmap_event->min);
+ mmap_event->event_id.header.size += sizeof(mmap_event->ino);
+ mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
+ }
+
perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
mmap_event->event_id.header.size);
@@ -4986,6 +5073,14 @@ static void perf_event_mmap_output(struct perf_event *event,
mmap_event->event_id.tid = perf_event_tid(event, current);
perf_output_put(&handle, mmap_event->event_id);
+
+ if (event->attr.mmap2) {
+ perf_output_put(&handle, mmap_event->maj);
+ perf_output_put(&handle, mmap_event->min);
+ perf_output_put(&handle, mmap_event->ino);
+ perf_output_put(&handle, mmap_event->ino_generation);
+ }
+
__output_copy(&handle, mmap_event->file_name,
mmap_event->file_size);
@@ -4996,21 +5091,12 @@ out:
mmap_event->event_id.header.size = size;
}
-static int perf_event_mmap_match(struct perf_event *event,
- void *data)
-{
- struct perf_mmap_event *mmap_event = data;
- struct vm_area_struct *vma = mmap_event->vma;
- int executable = vma->vm_flags & VM_EXEC;
-
- return (!executable && event->attr.mmap_data) ||
- (executable && event->attr.mmap);
-}
-
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
{
struct vm_area_struct *vma = mmap_event->vma;
struct file *file = vma->vm_file;
+ int maj = 0, min = 0;
+ u64 ino = 0, gen = 0;
unsigned int size;
char tmp[16];
char *buf = NULL;
@@ -5019,6 +5105,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
memset(tmp, 0, sizeof(tmp));
if (file) {
+ struct inode *inode;
+ dev_t dev;
/*
* d_path works from the end of the rb backwards, so we
* need to add enough zero bytes after the string to handle
@@ -5034,6 +5122,13 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
name = strncpy(tmp, "//toolong", sizeof(tmp));
goto got_name;
}
+ inode = file_inode(vma->vm_file);
+ dev = inode->i_sb->s_dev;
+ ino = inode->i_ino;
+ gen = inode->i_generation;
+ maj = MAJOR(dev);
+ min = MINOR(dev);
+
} else {
if (arch_vma_name(mmap_event->vma)) {
name = strncpy(tmp, arch_vma_name(mmap_event->vma),
@@ -5064,14 +5159,17 @@ got_name:
mmap_event->file_name = name;
mmap_event->file_size = size;
+ mmap_event->maj = maj;
+ mmap_event->min = min;
+ mmap_event->ino = ino;
+ mmap_event->ino_generation = gen;
if (!(vma->vm_flags & VM_EXEC))
mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
- perf_event_aux(perf_event_mmap_match,
- perf_event_mmap_output,
+ perf_event_aux(perf_event_mmap_output,
mmap_event,
NULL);
@@ -5101,6 +5199,10 @@ void perf_event_mmap(struct vm_area_struct *vma)
.len = vma->vm_end - vma->vm_start,
.pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
},
+ /* .maj (attr_mmap2 only) */
+ /* .min (attr_mmap2 only) */
+ /* .ino (attr_mmap2 only) */
+ /* .ino_generation (attr_mmap2 only) */
};
perf_event_mmap_event(&mmap_event);
@@ -5178,6 +5280,7 @@ static int __perf_event_overflow(struct perf_event *event,
__this_cpu_inc(perf_throttled_count);
hwc->interrupts = MAX_INTERRUPTS;
perf_log_throttle(event, 0);
+ tick_nohz_full_kick();
ret = 1;
}
}
@@ -6443,6 +6546,44 @@ unlock:
return pmu;
}
+static void account_event_cpu(struct perf_event *event, int cpu)
+{
+ if (event->parent)
+ return;
+
+ if (has_branch_stack(event)) {
+ if (!(event->attach_state & PERF_ATTACH_TASK))
+ atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
+ }
+ if (is_cgroup_event(event))
+ atomic_inc(&per_cpu(perf_cgroup_events, cpu));
+}
+
+static void account_event(struct perf_event *event)
+{
+ if (event->parent)
+ return;
+
+ if (event->attach_state & PERF_ATTACH_TASK)
+ static_key_slow_inc(&perf_sched_events.key);
+ if (event->attr.mmap || event->attr.mmap_data)
+ atomic_inc(&nr_mmap_events);
+ if (event->attr.comm)
+ atomic_inc(&nr_comm_events);
+ if (event->attr.task)
+ atomic_inc(&nr_task_events);
+ if (event->attr.freq) {
+ if (atomic_inc_return(&nr_freq_events) == 1)
+ tick_nohz_full_kick_all();
+ }
+ if (has_branch_stack(event))
+ static_key_slow_inc(&perf_sched_events.key);
+ if (is_cgroup_event(event))
+ static_key_slow_inc(&perf_sched_events.key);
+
+ account_event_cpu(event, event->cpu);
+}
+
/*
* Allocate and initialize a event structure
*/
@@ -6457,7 +6598,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct pmu *pmu;
struct perf_event *event;
struct hw_perf_event *hwc;
- long err;
+ long err = -EINVAL;
if ((unsigned)cpu >= nr_cpu_ids) {
if (!task || cpu != -1)
@@ -6540,49 +6681,35 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
* we currently do not support PERF_FORMAT_GROUP on inherited events
*/
if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
- goto done;
+ goto err_ns;
pmu = perf_init_event(event);
-
-done:
- err = 0;
if (!pmu)
- err = -EINVAL;
- else if (IS_ERR(pmu))
+ goto err_ns;
+ else if (IS_ERR(pmu)) {
err = PTR_ERR(pmu);
-
- if (err) {
- if (event->ns)
- put_pid_ns(event->ns);
- kfree(event);
- return ERR_PTR(err);
+ goto err_ns;
}
if (!event->parent) {
- if (event->attach_state & PERF_ATTACH_TASK)
- static_key_slow_inc(&perf_sched_events.key);
- if (event->attr.mmap || event->attr.mmap_data)
- atomic_inc(&nr_mmap_events);
- if (event->attr.comm)
- atomic_inc(&nr_comm_events);
- if (event->attr.task)
- atomic_inc(&nr_task_events);
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
err = get_callchain_buffers();
- if (err) {
- free_event(event);
- return ERR_PTR(err);
- }
- }
- if (has_branch_stack(event)) {
- static_key_slow_inc(&perf_sched_events.key);
- if (!(event->attach_state & PERF_ATTACH_TASK))
- atomic_inc(&per_cpu(perf_branch_stack_events,
- event->cpu));
+ if (err)
+ goto err_pmu;
}
}
return event;
+
+err_pmu:
+ if (event->destroy)
+ event->destroy(event);
+err_ns:
+ if (event->ns)
+ put_pid_ns(event->ns);
+ kfree(event);
+
+ return ERR_PTR(err);
}
static int perf_copy_attr(struct perf_event_attr __user *uattr,
@@ -6640,6 +6767,10 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
if (ret)
return -EFAULT;
+ /* disabled for now */
+ if (attr->mmap2)
+ return -EINVAL;
+
if (attr->__reserved_1)
return -EINVAL;
@@ -6864,17 +6995,14 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & PERF_FLAG_PID_CGROUP) {
err = perf_cgroup_connect(pid, event, &attr, group_leader);
- if (err)
- goto err_alloc;
- /*
- * one more event:
- * - that has cgroup constraint on event->cpu
- * - that may need work on context switch
- */
- atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
- static_key_slow_inc(&perf_sched_events.key);
+ if (err) {
+ __free_event(event);
+ goto err_task;
+ }
}
+ account_event(event);
+
/*
* Special case software events and allow them to be part of
* any hardware group.
@@ -7070,6 +7198,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
goto err;
}
+ account_event(event);
+
ctx = find_get_context(event->pmu, task, cpu);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
@@ -7106,18 +7236,20 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
event_entry) {
perf_remove_from_context(event);
+ unaccount_event_cpu(event, src_cpu);
put_ctx(src_ctx);
- list_add(&event->event_entry, &events);
+ list_add(&event->migrate_entry, &events);
}
mutex_unlock(&src_ctx->mutex);
synchronize_rcu();
mutex_lock(&dst_ctx->mutex);
- list_for_each_entry_safe(event, tmp, &events, event_entry) {
- list_del(&event->event_entry);
+ list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
+ list_del(&event->migrate_entry);
if (event->state >= PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_INACTIVE;
+ account_event_cpu(event, dst_cpu);
perf_install_in_context(dst_ctx, event, dst_cpu);
get_ctx(dst_ctx);
}
@@ -7798,7 +7930,8 @@ unlock:
device_initcall(perf_event_sysfs_init);
#ifdef CONFIG_CGROUP_PERF
-static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
+static struct cgroup_subsys_state *
+perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct perf_cgroup *jc;
@@ -7815,11 +7948,10 @@ static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
return &jc->css;
}
-static void perf_cgroup_css_free(struct cgroup *cont)
+static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
{
- struct perf_cgroup *jc;
- jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
- struct perf_cgroup, css);
+ struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
+
free_percpu(jc->info);
kfree(jc);
}
@@ -7831,15 +7963,17 @@ static int __perf_cgroup_move(void *info)
return 0;
}
-static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static void perf_cgroup_attach(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset)
{
struct task_struct *task;
- cgroup_taskset_for_each(task, cgrp, tset)
+ cgroup_taskset_for_each(task, css, tset)
task_function_call(task, __perf_cgroup_move, task);
}
-static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
+static void perf_cgroup_exit(struct cgroup_subsys_state *css,
+ struct cgroup_subsys_state *old_css,
struct task_struct *task)
{
/*
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index cd55144270b5..9c2ddfbf4525 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -87,10 +87,31 @@ again:
goto out;
/*
- * Publish the known good head. Rely on the full barrier implied
- * by atomic_dec_and_test() order the rb->head read and this
- * write.
+ * Since the mmap() consumer (userspace) can run on a different CPU:
+ *
+ * kernel user
+ *
+ * READ ->data_tail READ ->data_head
+ * smp_mb() (A) smp_rmb() (C)
+ * WRITE $data READ $data
+ * smp_wmb() (B) smp_mb() (D)
+ * STORE ->data_head WRITE ->data_tail
+ *
+ * Where A pairs with D, and B pairs with C.
+ *
+ * I don't think A needs to be a full barrier because we won't in fact
+ * write data until we see the store from userspace. So we simply don't
+ * issue the data WRITE until we observe it. Be conservative for now.
+ *
+ * OTOH, D needs to be a full barrier since it separates the data READ
+ * from the tail WRITE.
+ *
+ * For B a WMB is sufficient since it separates two WRITEs, and for C
+ * an RMB is sufficient since it separates two READs.
+ *
+ * See perf_output_begin().
*/
+ smp_wmb();
rb->user_page->data_head = head;
/*
@@ -154,9 +175,11 @@ int perf_output_begin(struct perf_output_handle *handle,
* Userspace could choose to issue a mb() before updating the
* tail pointer. So that all reads will be completed before the
* write is issued.
+ *
+ * See perf_output_put_handle().
*/
tail = ACCESS_ONCE(rb->user_page->data_tail);
- smp_rmb();
+ smp_mb();
offset = head = local_read(&rb->head);
head += size;
if (unlikely(!perf_output_space(rb, tail, offset, head)))
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index f3569747d629..ad8e1bdca70e 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1682,12 +1682,10 @@ static bool handle_trampoline(struct pt_regs *regs)
tmp = ri;
ri = ri->next;
kfree(tmp);
+ utask->depth--;
if (!chained)
break;
-
- utask->depth--;
-
BUG_ON(!ri);
}
diff --git a/kernel/extable.c b/kernel/extable.c
index 67460b93b1a1..832cb28105bb 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -41,7 +41,7 @@ u32 __initdata main_extable_sort_needed = 1;
/* Sort the kernel's built-in exception table */
void __init sort_main_extable(void)
{
- if (main_extable_sort_needed) {
+ if (main_extable_sort_needed && __stop___ex_table > __start___ex_table) {
pr_notice("Sorting __ex_table...\n");
sort_extable(__start___ex_table, __stop___ex_table);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index bf46287c91a4..086fe73ad6bd 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -351,7 +351,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
struct rb_node **rb_link, *rb_parent;
int retval;
unsigned long charge;
- struct mempolicy *pol;
uprobe_start_dup_mmap();
down_write(&oldmm->mmap_sem);
@@ -400,11 +399,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
goto fail_nomem;
*tmp = *mpnt;
INIT_LIST_HEAD(&tmp->anon_vma_chain);
- pol = mpol_dup(vma_policy(mpnt));
- retval = PTR_ERR(pol);
- if (IS_ERR(pol))
+ retval = vma_dup_policy(mpnt, tmp);
+ if (retval)
goto fail_nomem_policy;
- vma_set_policy(tmp, pol);
tmp->vm_mm = mm;
if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
@@ -472,7 +469,7 @@ out:
uprobe_end_dup_mmap();
return retval;
fail_nomem_anon_vma_fork:
- mpol_put(pol);
+ mpol_put(vma_policy(tmp));
fail_nomem_policy:
kmem_cache_free(vm_area_cachep, tmp);
fail_nomem:
@@ -522,7 +519,7 @@ static void mm_init_aio(struct mm_struct *mm)
{
#ifdef CONFIG_AIO
spin_lock_init(&mm->ioctx_lock);
- INIT_HLIST_HEAD(&mm->ioctx_list);
+ mm->ioctx_table = NULL;
#endif
}
@@ -1173,13 +1170,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
return ERR_PTR(-EINVAL);
/*
- * If the new process will be in a different pid namespace
- * don't allow the creation of threads.
+ * If the new process will be in a different pid or user namespace
+ * do not allow it to share a thread group or signal handlers or
+ * parent with the forking task.
*/
- if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
- (task_active_pid_ns(current) !=
- current->nsproxy->pid_ns_for_children))
- return ERR_PTR(-EINVAL);
+ if (clone_flags & (CLONE_SIGHAND | CLONE_PARENT)) {
+ if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
+ (task_active_pid_ns(current) !=
+ current->nsproxy->pid_ns_for_children))
+ return ERR_PTR(-EINVAL);
+ }
retval = security_task_create(clone_flags);
if (retval)
@@ -1576,15 +1576,6 @@ long do_fork(unsigned long clone_flags,
long nr;
/*
- * Do some preliminary argument and permissions checking before we
- * actually start allocating stuff
- */
- if (clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) {
- if (clone_flags & (CLONE_THREAD|CLONE_PARENT))
- return -EINVAL;
- }
-
- /*
* Determine whether and which event to report to ptracer. When
* called from kernel_thread or CLONE_UNTRACED is explicitly
* requested, no event is reported; otherwise, report if the event
@@ -1825,11 +1816,6 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
if (unshare_flags & CLONE_NEWUSER)
unshare_flags |= CLONE_THREAD | CLONE_FS;
/*
- * If unsharing a pid namespace must also unshare the thread.
- */
- if (unshare_flags & CLONE_NEWPID)
- unshare_flags |= CLONE_THREAD;
- /*
* If unsharing a thread from a thread group, must also unshare vm.
*/
if (unshare_flags & CLONE_THREAD)
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c
index 9bd0934f6c33..7a7d2ee96d42 100644
--- a/kernel/gcov/fs.c
+++ b/kernel/gcov/fs.c
@@ -74,7 +74,7 @@ static int __init gcov_persist_setup(char *str)
{
unsigned long val;
- if (strict_strtoul(str, 0, &val)) {
+ if (kstrtoul(str, 0, &val)) {
pr_warning("invalid gcov_persist parameter '%s'\n", str);
return 0;
}
diff --git a/kernel/groups.c b/kernel/groups.c
index 6b2588dd04ff..90cf1c38c8ea 100644
--- a/kernel/groups.c
+++ b/kernel/groups.c
@@ -233,7 +233,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
struct group_info *group_info;
int retval;
- if (!nsown_capable(CAP_SETGID))
+ if (!ns_capable(current_user_ns(), CAP_SETGID))
return -EPERM;
if ((unsigned)gidsetsize > NGROUPS_MAX)
return -EINVAL;
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 6df614912b9d..3e97fb126e6b 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -15,6 +15,7 @@
#include <linux/lockdep.h>
#include <linux/export.h>
#include <linux/sysctl.h>
+#include <linux/utsname.h>
/*
* The number of tasks checked:
@@ -99,10 +100,14 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
* Ok, the task did not get scheduled for more than 2 minutes,
* complain:
*/
- printk(KERN_ERR "INFO: task %s:%d blocked for more than "
- "%ld seconds.\n", t->comm, t->pid, timeout);
- printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
- " disables this message.\n");
+ pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
+ t->comm, t->pid, timeout);
+ pr_err(" %s %s %.*s\n",
+ print_tainted(), init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+ pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
+ " disables this message.\n");
sched_show_task(t);
debug_show_held_locks(t);
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index d1a758bc972a..4a1fef09f658 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -1,15 +1,4 @@
-# Select this to activate the generic irq options below
-config HAVE_GENERIC_HARDIRQS
- bool
-
-if HAVE_GENERIC_HARDIRQS
menu "IRQ subsystem"
-#
-# Interrupt subsystem related configuration options
-#
-config GENERIC_HARDIRQS
- def_bool y
-
# Options selectable by the architecture code
# Make sparse irq Kconfig switch below available
@@ -84,4 +73,3 @@ config SPARSE_IRQ
If you don't know what to do here, say N.
endmenu
-endif
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 60f48fa0fd0d..297a9247a3b3 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -13,6 +13,7 @@
#include <linux/sort.h>
#include <linux/err.h>
#include <linux/static_key.h>
+#include <linux/jump_label_ratelimit.h>
#ifdef HAVE_JUMP_LABEL
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 59f7b55ba745..2a74f307c5ec 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1474,11 +1474,8 @@ static int __init __parse_crashkernel(char *cmdline,
if (first_colon && (!first_space || first_colon < first_space))
return parse_crashkernel_mem(ck_cmdline, system_ram,
crash_size, crash_base);
- else
- return parse_crashkernel_simple(ck_cmdline, crash_size,
- crash_base);
- return 0;
+ return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
}
/*
diff --git a/kernel/kmod.c b/kernel/kmod.c
index fb326365b694..b086006c59e7 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -571,6 +571,10 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
DECLARE_COMPLETION_ONSTACK(done);
int retval = 0;
+ if (!sub_info->path) {
+ call_usermodehelper_freeinfo(sub_info);
+ return -EINVAL;
+ }
helper_lock();
if (!khelper_wq || usermodehelper_disabled) {
retval = -EBUSY;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 6e33498d665c..a0d367a49122 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -112,6 +112,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
struct kprobe_insn_page {
struct list_head list;
kprobe_opcode_t *insns; /* Page of instruction slots */
+ struct kprobe_insn_cache *cache;
int nused;
int ngarbage;
char slot_used[];
@@ -121,12 +122,6 @@ struct kprobe_insn_page {
(offsetof(struct kprobe_insn_page, slot_used) + \
(sizeof(char) * (slots)))
-struct kprobe_insn_cache {
- struct list_head pages; /* list of kprobe_insn_page */
- size_t insn_size; /* size of instruction slot */
- int nr_garbage;
-};
-
static int slots_per_page(struct kprobe_insn_cache *c)
{
return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
@@ -138,8 +133,20 @@ enum kprobe_slot_state {
SLOT_USED = 2,
};
-static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */
-static struct kprobe_insn_cache kprobe_insn_slots = {
+static void *alloc_insn_page(void)
+{
+ return module_alloc(PAGE_SIZE);
+}
+
+static void free_insn_page(void *page)
+{
+ module_free(NULL, page);
+}
+
+struct kprobe_insn_cache kprobe_insn_slots = {
+ .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
+ .alloc = alloc_insn_page,
+ .free = free_insn_page,
.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
.insn_size = MAX_INSN_SIZE,
.nr_garbage = 0,
@@ -150,10 +157,12 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
* __get_insn_slot() - Find a slot on an executable page for an instruction.
* We allocate an executable page if there's no room on existing ones.
*/
-static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
+kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
{
struct kprobe_insn_page *kip;
+ kprobe_opcode_t *slot = NULL;
+ mutex_lock(&c->mutex);
retry:
list_for_each_entry(kip, &c->pages, list) {
if (kip->nused < slots_per_page(c)) {
@@ -162,7 +171,8 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
if (kip->slot_used[i] == SLOT_CLEAN) {
kip->slot_used[i] = SLOT_USED;
kip->nused++;
- return kip->insns + (i * c->insn_size);
+ slot = kip->insns + (i * c->insn_size);
+ goto out;
}
}
/* kip->nused is broken. Fix it. */
@@ -178,37 +188,29 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
/* All out of space. Need to allocate a new page. */
kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
if (!kip)
- return NULL;
+ goto out;
/*
* Use module_alloc so this page is within +/- 2GB of where the
* kernel image and loaded module images reside. This is required
* so x86_64 can correctly handle the %rip-relative fixups.
*/
- kip->insns = module_alloc(PAGE_SIZE);
+ kip->insns = c->alloc();
if (!kip->insns) {
kfree(kip);
- return NULL;
+ goto out;
}
INIT_LIST_HEAD(&kip->list);
memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
kip->slot_used[0] = SLOT_USED;
kip->nused = 1;
kip->ngarbage = 0;
+ kip->cache = c;
list_add(&kip->list, &c->pages);
- return kip->insns;
-}
-
-
-kprobe_opcode_t __kprobes *get_insn_slot(void)
-{
- kprobe_opcode_t *ret = NULL;
-
- mutex_lock(&kprobe_insn_mutex);
- ret = __get_insn_slot(&kprobe_insn_slots);
- mutex_unlock(&kprobe_insn_mutex);
-
- return ret;
+ slot = kip->insns;
+out:
+ mutex_unlock(&c->mutex);
+ return slot;
}
/* Return 1 if all garbages are collected, otherwise 0. */
@@ -225,7 +227,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
*/
if (!list_is_singular(&kip->list)) {
list_del(&kip->list);
- module_free(NULL, kip->insns);
+ kip->cache->free(kip->insns);
kfree(kip);
}
return 1;
@@ -255,11 +257,12 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
return 0;
}
-static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
- kprobe_opcode_t *slot, int dirty)
+void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
+ kprobe_opcode_t *slot, int dirty)
{
struct kprobe_insn_page *kip;
+ mutex_lock(&c->mutex);
list_for_each_entry(kip, &c->pages, list) {
long idx = ((long)slot - (long)kip->insns) /
(c->insn_size * sizeof(kprobe_opcode_t));
@@ -272,45 +275,25 @@ static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
collect_garbage_slots(c);
} else
collect_one_slot(kip, idx);
- return;
+ goto out;
}
}
/* Could not free this slot. */
WARN_ON(1);
+out:
+ mutex_unlock(&c->mutex);
}
-void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
-{
- mutex_lock(&kprobe_insn_mutex);
- __free_insn_slot(&kprobe_insn_slots, slot, dirty);
- mutex_unlock(&kprobe_insn_mutex);
-}
#ifdef CONFIG_OPTPROBES
/* For optimized_kprobe buffer */
-static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
-static struct kprobe_insn_cache kprobe_optinsn_slots = {
+struct kprobe_insn_cache kprobe_optinsn_slots = {
+ .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
+ .alloc = alloc_insn_page,
+ .free = free_insn_page,
.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
/* .insn_size is initialized later */
.nr_garbage = 0,
};
-/* Get a slot for optimized_kprobe buffer */
-kprobe_opcode_t __kprobes *get_optinsn_slot(void)
-{
- kprobe_opcode_t *ret = NULL;
-
- mutex_lock(&kprobe_optinsn_mutex);
- ret = __get_insn_slot(&kprobe_optinsn_slots);
- mutex_unlock(&kprobe_optinsn_mutex);
-
- return ret;
-}
-
-void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
-{
- mutex_lock(&kprobe_optinsn_mutex);
- __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
- mutex_unlock(&kprobe_optinsn_mutex);
-}
#endif
#endif
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 6ada93c23a9a..9659d38e008f 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -113,7 +113,7 @@ static ssize_t kexec_crash_size_store(struct kobject *kobj,
unsigned long cnt;
int ret;
- if (strict_strtoul(buf, 0, &cnt))
+ if (kstrtoul(buf, 0, &cnt))
return -EINVAL;
ret = crash_shrink_memory(cnt);
diff --git a/kernel/lglock.c b/kernel/lglock.c
index 6535a667a5a7..86ae2aebf004 100644
--- a/kernel/lglock.c
+++ b/kernel/lglock.c
@@ -21,7 +21,7 @@ void lg_local_lock(struct lglock *lg)
arch_spinlock_t *lock;
preempt_disable();
- rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+ lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
lock = this_cpu_ptr(lg->lock);
arch_spin_lock(lock);
}
@@ -31,7 +31,7 @@ void lg_local_unlock(struct lglock *lg)
{
arch_spinlock_t *lock;
- rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ lock_release(&lg->lock_dep_map, 1, _RET_IP_);
lock = this_cpu_ptr(lg->lock);
arch_spin_unlock(lock);
preempt_enable();
@@ -43,7 +43,7 @@ void lg_local_lock_cpu(struct lglock *lg, int cpu)
arch_spinlock_t *lock;
preempt_disable();
- rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+ lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
lock = per_cpu_ptr(lg->lock, cpu);
arch_spin_lock(lock);
}
@@ -53,7 +53,7 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu)
{
arch_spinlock_t *lock;
- rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ lock_release(&lg->lock_dep_map, 1, _RET_IP_);
lock = per_cpu_ptr(lg->lock, cpu);
arch_spin_unlock(lock);
preempt_enable();
@@ -65,7 +65,7 @@ void lg_global_lock(struct lglock *lg)
int i;
preempt_disable();
- rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_);
+ lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
for_each_possible_cpu(i) {
arch_spinlock_t *lock;
lock = per_cpu_ptr(lg->lock, i);
@@ -78,7 +78,7 @@ void lg_global_unlock(struct lglock *lg)
{
int i;
- rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ lock_release(&lg->lock_dep_map, 1, _RET_IP_);
for_each_possible_cpu(i) {
arch_spinlock_t *lock;
lock = per_cpu_ptr(lg->lock, i);
diff --git a/kernel/modsign_pubkey.c b/kernel/modsign_pubkey.c
index 2b6e69909c39..7cbd4507a7e6 100644
--- a/kernel/modsign_pubkey.c
+++ b/kernel/modsign_pubkey.c
@@ -18,14 +18,14 @@
struct key *modsign_keyring;
-extern __initdata const u8 modsign_certificate_list[];
-extern __initdata const u8 modsign_certificate_list_end[];
+extern __initconst const u8 modsign_certificate_list[];
+extern __initconst const u8 modsign_certificate_list_end[];
/*
* We need to make sure ccache doesn't cache the .o file as it doesn't notice
* if modsign.pub changes.
*/
-static __initdata const char annoy_ccache[] = __TIME__ "foo";
+static __initconst const char annoy_ccache[] = __TIME__ "foo";
/*
* Load the compiled-in keys
diff --git a/kernel/module.c b/kernel/module.c
index 206915830d29..dc582749fa13 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -136,6 +136,7 @@ static int param_set_bool_enable_only(const char *val,
}
static const struct kernel_param_ops param_ops_bool_enable_only = {
+ .flags = KERNEL_PARAM_FL_NOARG,
.set = param_set_bool_enable_only,
.get = param_get_bool,
};
@@ -603,7 +604,7 @@ static void setup_modinfo_##field(struct module *mod, const char *s) \
static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
struct module_kobject *mk, char *buffer) \
{ \
- return sprintf(buffer, "%s\n", mk->mod->field); \
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
} \
static int modinfo_##field##_exists(struct module *mod) \
{ \
@@ -1611,6 +1612,14 @@ static void module_remove_modinfo_attrs(struct module *mod)
kfree(mod->modinfo_attrs);
}
+static void mod_kobject_put(struct module *mod)
+{
+ DECLARE_COMPLETION_ONSTACK(c);
+ mod->mkobj.kobj_completion = &c;
+ kobject_put(&mod->mkobj.kobj);
+ wait_for_completion(&c);
+}
+
static int mod_sysfs_init(struct module *mod)
{
int err;
@@ -1638,7 +1647,7 @@ static int mod_sysfs_init(struct module *mod)
err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
"%s", mod->name);
if (err)
- kobject_put(&mod->mkobj.kobj);
+ mod_kobject_put(mod);
/* delay uevent until full sysfs population */
out:
@@ -1682,7 +1691,7 @@ out_unreg_param:
out_unreg_holders:
kobject_put(mod->holders_dir);
out_unreg:
- kobject_put(&mod->mkobj.kobj);
+ mod_kobject_put(mod);
out:
return err;
}
@@ -1691,7 +1700,7 @@ static void mod_sysfs_fini(struct module *mod)
{
remove_notes_attrs(mod);
remove_sect_attrs(mod);
- kobject_put(&mod->mkobj.kobj);
+ mod_kobject_put(mod);
}
#else /* !CONFIG_SYSFS */
@@ -2540,21 +2549,20 @@ static int copy_module_from_user(const void __user *umod, unsigned long len,
/* Sets info->hdr and info->len. */
static int copy_module_from_fd(int fd, struct load_info *info)
{
- struct file *file;
+ struct fd f = fdget(fd);
int err;
struct kstat stat;
loff_t pos;
ssize_t bytes = 0;
- file = fget(fd);
- if (!file)
+ if (!f.file)
return -ENOEXEC;
- err = security_kernel_module_from_file(file);
+ err = security_kernel_module_from_file(f.file);
if (err)
goto out;
- err = vfs_getattr(&file->f_path, &stat);
+ err = vfs_getattr(&f.file->f_path, &stat);
if (err)
goto out;
@@ -2577,7 +2585,7 @@ static int copy_module_from_fd(int fd, struct load_info *info)
pos = 0;
while (pos < stat.size) {
- bytes = kernel_read(file, pos, (char *)(info->hdr) + pos,
+ bytes = kernel_read(f.file, pos, (char *)(info->hdr) + pos,
stat.size - pos);
if (bytes < 0) {
vfree(info->hdr);
@@ -2591,7 +2599,7 @@ static int copy_module_from_fd(int fd, struct load_info *info)
info->len = pos;
out:
- fput(file);
+ fdput(f);
return err;
}
diff --git a/kernel/mutex.c b/kernel/mutex.c
index a52ee7bb830d..d24105b1b794 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -209,11 +209,13 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
*/
static inline int mutex_can_spin_on_owner(struct mutex *lock)
{
+ struct task_struct *owner;
int retval = 1;
rcu_read_lock();
- if (lock->owner)
- retval = lock->owner->on_cpu;
+ owner = ACCESS_ONCE(lock->owner);
+ if (owner)
+ retval = owner->on_cpu;
rcu_read_unlock();
/*
* if lock->owner is not set, the mutex owner may have just acquired
@@ -408,7 +410,7 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
static __always_inline int __sched
__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct lockdep_map *nest_lock, unsigned long ip,
- struct ww_acquire_ctx *ww_ctx)
+ struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
struct task_struct *task = current;
struct mutex_waiter waiter;
@@ -448,7 +450,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct task_struct *owner;
struct mspin_node node;
- if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
+ if (use_ww_ctx && ww_ctx->acquired > 0) {
struct ww_mutex *ww;
ww = container_of(lock, struct ww_mutex, base);
@@ -461,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* performed the optimistic spinning cannot be done.
*/
if (ACCESS_ONCE(ww->ctx))
- break;
+ goto slowpath;
}
/*
@@ -472,13 +474,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
owner = ACCESS_ONCE(lock->owner);
if (owner && !mutex_spin_on_owner(lock, owner)) {
mspin_unlock(MLOCK(lock), &node);
- break;
+ goto slowpath;
}
if ((atomic_read(&lock->count) == 1) &&
(atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
lock_acquired(&lock->dep_map, ip);
- if (!__builtin_constant_p(ww_ctx == NULL)) {
+ if (use_ww_ctx) {
struct ww_mutex *ww;
ww = container_of(lock, struct ww_mutex, base);
@@ -499,7 +501,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* the owner complete.
*/
if (!owner && (need_resched() || rt_task(task)))
- break;
+ goto slowpath;
/*
* The cpu_relax() call is a compiler barrier which forces
@@ -513,6 +515,10 @@ slowpath:
#endif
spin_lock_mutex(&lock->wait_lock, flags);
+ /* once more, can we acquire the lock? */
+ if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
+ goto skip_wait;
+
debug_mutex_lock_common(lock, &waiter);
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
@@ -520,9 +526,6 @@ slowpath:
list_add_tail(&waiter.list, &lock->wait_list);
waiter.task = task;
- if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
- goto done;
-
lock_contended(&lock->dep_map, ip);
for (;;) {
@@ -536,7 +539,7 @@ slowpath:
* other waiters:
*/
if (MUTEX_SHOW_NO_WAITER(lock) &&
- (atomic_xchg(&lock->count, -1) == 1))
+ (atomic_xchg(&lock->count, -1) == 1))
break;
/*
@@ -548,7 +551,7 @@ slowpath:
goto err;
}
- if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
+ if (use_ww_ctx && ww_ctx->acquired > 0) {
ret = __mutex_lock_check_stamp(lock, ww_ctx);
if (ret)
goto err;
@@ -561,24 +564,25 @@ slowpath:
schedule_preempt_disabled();
spin_lock_mutex(&lock->wait_lock, flags);
}
+ mutex_remove_waiter(lock, &waiter, current_thread_info());
+ /* set it to 0 if there are no waiters left: */
+ if (likely(list_empty(&lock->wait_list)))
+ atomic_set(&lock->count, 0);
+ debug_mutex_free_waiter(&waiter);
-done:
+skip_wait:
+ /* got the lock - cleanup and rejoice! */
lock_acquired(&lock->dep_map, ip);
- /* got the lock - rejoice! */
- mutex_remove_waiter(lock, &waiter, current_thread_info());
mutex_set_owner(lock);
- if (!__builtin_constant_p(ww_ctx == NULL)) {
- struct ww_mutex *ww = container_of(lock,
- struct ww_mutex,
- base);
+ if (use_ww_ctx) {
+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct mutex_waiter *cur;
/*
* This branch gets optimized out for the common case,
* and is only important for ww_mutex_lock.
*/
-
ww_mutex_lock_acquired(ww, ww_ctx);
ww->ctx = ww_ctx;
@@ -592,15 +596,8 @@ done:
}
}
- /* set it to 0 if there are no waiters left: */
- if (likely(list_empty(&lock->wait_list)))
- atomic_set(&lock->count, 0);
-
spin_unlock_mutex(&lock->wait_lock, flags);
-
- debug_mutex_free_waiter(&waiter);
preempt_enable();
-
return 0;
err:
@@ -618,7 +615,7 @@ mutex_lock_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
- subclass, NULL, _RET_IP_, NULL);
+ subclass, NULL, _RET_IP_, NULL, 0);
}
EXPORT_SYMBOL_GPL(mutex_lock_nested);
@@ -628,7 +625,7 @@ _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
{
might_sleep();
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
- 0, nest, _RET_IP_, NULL);
+ 0, nest, _RET_IP_, NULL, 0);
}
EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
@@ -638,7 +635,7 @@ mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
return __mutex_lock_common(lock, TASK_KILLABLE,
- subclass, NULL, _RET_IP_, NULL);
+ subclass, NULL, _RET_IP_, NULL, 0);
}
EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
@@ -647,7 +644,7 @@ mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
{
might_sleep();
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
- subclass, NULL, _RET_IP_, NULL);
+ subclass, NULL, _RET_IP_, NULL, 0);
}
EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -685,7 +682,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
might_sleep();
ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
- 0, &ctx->dep_map, _RET_IP_, ctx);
+ 0, &ctx->dep_map, _RET_IP_, ctx, 1);
if (!ret && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
@@ -700,7 +697,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
might_sleep();
ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
- 0, &ctx->dep_map, _RET_IP_, ctx);
+ 0, &ctx->dep_map, _RET_IP_, ctx, 1);
if (!ret && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
@@ -812,28 +809,28 @@ __mutex_lock_slowpath(atomic_t *lock_count)
struct mutex *lock = container_of(lock_count, struct mutex, count);
__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
- NULL, _RET_IP_, NULL);
+ NULL, _RET_IP_, NULL, 0);
}
static noinline int __sched
__mutex_lock_killable_slowpath(struct mutex *lock)
{
return __mutex_lock_common(lock, TASK_KILLABLE, 0,
- NULL, _RET_IP_, NULL);
+ NULL, _RET_IP_, NULL, 0);
}
static noinline int __sched
__mutex_lock_interruptible_slowpath(struct mutex *lock)
{
return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
- NULL, _RET_IP_, NULL);
+ NULL, _RET_IP_, NULL, 0);
}
static noinline int __sched
__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
- NULL, _RET_IP_, ctx);
+ NULL, _RET_IP_, ctx, 1);
}
static noinline int __sched
@@ -841,7 +838,7 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
- NULL, _RET_IP_, ctx);
+ NULL, _RET_IP_, ctx, 1);
}
#endif
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 997cbb951a3b..8e7811086b82 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -126,22 +126,16 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
struct nsproxy *old_ns = tsk->nsproxy;
struct user_namespace *user_ns = task_cred_xxx(tsk, user_ns);
struct nsproxy *new_ns;
- int err = 0;
- if (!old_ns)
+ if (likely(!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
+ CLONE_NEWPID | CLONE_NEWNET)))) {
+ get_nsproxy(old_ns);
return 0;
-
- get_nsproxy(old_ns);
-
- if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
- CLONE_NEWPID | CLONE_NEWNET)))
- return 0;
-
- if (!ns_capable(user_ns, CAP_SYS_ADMIN)) {
- err = -EPERM;
- goto out;
}
+ if (!ns_capable(user_ns, CAP_SYS_ADMIN))
+ return -EPERM;
+
/*
* CLONE_NEWIPC must detach from the undolist: after switching
* to a new ipc namespace, the semaphore arrays from the old
@@ -149,22 +143,16 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
* means share undolist with parent, so we must forbid using
* it along with CLONE_NEWIPC.
*/
- if ((flags & CLONE_NEWIPC) && (flags & CLONE_SYSVSEM)) {
- err = -EINVAL;
- goto out;
- }
+ if ((flags & (CLONE_NEWIPC | CLONE_SYSVSEM)) ==
+ (CLONE_NEWIPC | CLONE_SYSVSEM))
+ return -EINVAL;
new_ns = create_new_namespaces(flags, tsk, user_ns, tsk->fs);
- if (IS_ERR(new_ns)) {
- err = PTR_ERR(new_ns);
- goto out;
- }
+ if (IS_ERR(new_ns))
+ return PTR_ERR(new_ns);
tsk->nsproxy = new_ns;
-
-out:
- put_nsproxy(old_ns);
- return err;
+ return 0;
}
void free_nsproxy(struct nsproxy *ns)
diff --git a/kernel/padata.c b/kernel/padata.c
index 072f4ee4eb89..07af2c95dcfe 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -846,6 +846,8 @@ static int padata_cpu_callback(struct notifier_block *nfb,
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
if (!pinst_has_cpu(pinst, cpu))
break;
mutex_lock(&pinst->lock);
@@ -857,6 +859,8 @@ static int padata_cpu_callback(struct notifier_block *nfb,
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
if (!pinst_has_cpu(pinst, cpu))
break;
mutex_lock(&pinst->lock);
@@ -865,22 +869,6 @@ static int padata_cpu_callback(struct notifier_block *nfb,
if (err)
return notifier_from_errno(err);
break;
-
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- if (!pinst_has_cpu(pinst, cpu))
- break;
- mutex_lock(&pinst->lock);
- __padata_remove_cpu(pinst, cpu);
- mutex_unlock(&pinst->lock);
-
- case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- if (!pinst_has_cpu(pinst, cpu))
- break;
- mutex_lock(&pinst->lock);
- __padata_add_cpu(pinst, cpu);
- mutex_unlock(&pinst->lock);
}
return NOTIFY_OK;
@@ -1086,18 +1074,18 @@ struct padata_instance *padata_alloc(struct workqueue_struct *wq,
pinst->flags = 0;
-#ifdef CONFIG_HOTPLUG_CPU
- pinst->cpu_notifier.notifier_call = padata_cpu_callback;
- pinst->cpu_notifier.priority = 0;
- register_hotcpu_notifier(&pinst->cpu_notifier);
-#endif
-
put_online_cpus();
BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
kobject_init(&pinst->kobj, &padata_attr_type);
mutex_init(&pinst->lock);
+#ifdef CONFIG_HOTPLUG_CPU
+ pinst->cpu_notifier.notifier_call = padata_cpu_callback;
+ pinst->cpu_notifier.priority = 0;
+ register_hotcpu_notifier(&pinst->cpu_notifier);
+#endif
+
return pinst;
err_free_masks:
diff --git a/kernel/panic.c b/kernel/panic.c
index 801864600514..b6c482ccc5db 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -123,10 +123,14 @@ void panic(const char *fmt, ...)
*/
smp_send_stop();
- kmsg_dump(KMSG_DUMP_PANIC);
-
+ /*
+ * Run any panic handlers, including those that might need to
+ * add information to the kmsg dump output.
+ */
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
+ kmsg_dump(KMSG_DUMP_PANIC);
+
bust_spinlocks(0);
if (!panic_blink)
diff --git a/kernel/params.c b/kernel/params.c
index 440e65d1a544..c00d5b502aa4 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -103,8 +103,8 @@ static int parse_one(char *param,
|| params[i].level > max_level)
return 0;
/* No one handled NULL, so do it here. */
- if (!val && params[i].ops->set != param_set_bool
- && params[i].ops->set != param_set_bint)
+ if (!val &&
+ !(params[i].ops->flags & KERNEL_PARAM_FL_NOARG))
return -EINVAL;
pr_debug("handling %s with %p\n", param,
params[i].ops->set);
@@ -241,7 +241,8 @@ int parse_args(const char *doing,
} \
int param_get_##name(char *buffer, const struct kernel_param *kp) \
{ \
- return sprintf(buffer, format, *((type *)kp->arg)); \
+ return scnprintf(buffer, PAGE_SIZE, format, \
+ *((type *)kp->arg)); \
} \
struct kernel_param_ops param_ops_##name = { \
.set = param_set_##name, \
@@ -252,13 +253,13 @@ int parse_args(const char *doing,
EXPORT_SYMBOL(param_ops_##name)
-STANDARD_PARAM_DEF(byte, unsigned char, "%c", unsigned long, strict_strtoul);
-STANDARD_PARAM_DEF(short, short, "%hi", long, strict_strtol);
-STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, strict_strtoul);
-STANDARD_PARAM_DEF(int, int, "%i", long, strict_strtol);
-STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, strict_strtoul);
-STANDARD_PARAM_DEF(long, long, "%li", long, strict_strtol);
-STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, strict_strtoul);
+STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul);
+STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtol);
+STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul);
+STANDARD_PARAM_DEF(int, int, "%i", long, kstrtol);
+STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul);
+STANDARD_PARAM_DEF(long, long, "%li", long, kstrtol);
+STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul);
int param_set_charp(const char *val, const struct kernel_param *kp)
{
@@ -285,7 +286,7 @@ EXPORT_SYMBOL(param_set_charp);
int param_get_charp(char *buffer, const struct kernel_param *kp)
{
- return sprintf(buffer, "%s", *((char **)kp->arg));
+ return scnprintf(buffer, PAGE_SIZE, "%s", *((char **)kp->arg));
}
EXPORT_SYMBOL(param_get_charp);
@@ -320,6 +321,7 @@ int param_get_bool(char *buffer, const struct kernel_param *kp)
EXPORT_SYMBOL(param_get_bool);
struct kernel_param_ops param_ops_bool = {
+ .flags = KERNEL_PARAM_FL_NOARG,
.set = param_set_bool,
.get = param_get_bool,
};
@@ -370,6 +372,7 @@ int param_set_bint(const char *val, const struct kernel_param *kp)
EXPORT_SYMBOL(param_set_bint);
struct kernel_param_ops param_ops_bint = {
+ .flags = KERNEL_PARAM_FL_NOARG,
.set = param_set_bint,
.get = param_get_int,
};
@@ -827,7 +830,7 @@ ssize_t __modver_version_show(struct module_attribute *mattr,
struct module_version_attribute *vattr =
container_of(mattr, struct module_version_attribute, mattr);
- return sprintf(buf, "%s\n", vattr->version);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", vattr->version);
}
extern const struct module_version_attribute *__start___modver[];
@@ -912,7 +915,14 @@ static const struct kset_uevent_ops module_uevent_ops = {
struct kset *module_kset;
int module_sysfs_initialized;
+static void module_kobj_release(struct kobject *kobj)
+{
+ struct module_kobject *mk = to_module_kobject(kobj);
+ complete(mk->kobj_completion);
+}
+
struct kobj_type module_ktype = {
+ .release = module_kobj_release,
.sysfs_ops = &module_sysfs_ops,
};
diff --git a/kernel/pid.c b/kernel/pid.c
index 66505c1dfc51..9b9a26698144 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -265,6 +265,7 @@ void free_pid(struct pid *pid)
struct pid_namespace *ns = upid->ns;
hlist_del_rcu(&upid->pid_chain);
switch(--ns->nr_hashed) {
+ case 2:
case 1:
/* When all that is left in the pid namespace
* is the reaper wake up the reaper. The reaper
@@ -272,6 +273,11 @@ void free_pid(struct pid *pid)
*/
wake_up_process(ns->child_reaper);
break;
+ case PIDNS_HASH_ADDING:
+ /* Handle a fork failure of the first process */
+ WARN_ON(ns->child_reaper);
+ ns->nr_hashed = 0;
+ /* fall through */
case 0:
schedule_work(&ns->proc_work);
break;
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 601bb361c235..42086551a24a 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -329,7 +329,7 @@ static int pidns_install(struct nsproxy *nsproxy, void *ns)
struct pid_namespace *ancestor, *new = ns;
if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) ||
- !nsown_capable(CAP_SYS_ADMIN))
+ !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
/*
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index b26f5f1e773e..0121dab83f43 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -39,7 +39,7 @@ static int resume_delay;
static char resume_file[256] = CONFIG_PM_STD_PARTITION;
dev_t swsusp_resume_device;
sector_t swsusp_resume_block;
-int in_suspend __nosavedata;
+__visible int in_suspend __nosavedata;
enum {
HIBERNATION_INVALID,
@@ -644,22 +644,23 @@ int hibernate(void)
if (error)
goto Exit;
- /* Allocate memory management structures */
- error = create_basic_memory_bitmaps();
- if (error)
- goto Exit;
-
printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync();
printk("done.\n");
error = freeze_processes();
if (error)
- goto Free_bitmaps;
+ goto Exit;
+
+ lock_device_hotplug();
+ /* Allocate memory management structures */
+ error = create_basic_memory_bitmaps();
+ if (error)
+ goto Thaw;
error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM);
if (error || freezer_test_done)
- goto Thaw;
+ goto Free_bitmaps;
if (in_suspend) {
unsigned int flags = 0;
@@ -682,14 +683,14 @@ int hibernate(void)
pr_debug("PM: Image restored successfully.\n");
}
+ Free_bitmaps:
+ free_basic_memory_bitmaps();
Thaw:
+ unlock_device_hotplug();
thaw_processes();
/* Don't bother checking whether freezer_test_done is true */
freezer_test_done = false;
-
- Free_bitmaps:
- free_basic_memory_bitmaps();
Exit:
pm_notifier_call_chain(PM_POST_HIBERNATION);
pm_restore_console();
@@ -806,21 +807,20 @@ static int software_resume(void)
pm_prepare_console();
error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
if (error)
- goto close_finish;
-
- error = create_basic_memory_bitmaps();
- if (error)
- goto close_finish;
+ goto Close_Finish;
pr_debug("PM: Preparing processes for restore.\n");
error = freeze_processes();
- if (error) {
- swsusp_close(FMODE_READ);
- goto Done;
- }
+ if (error)
+ goto Close_Finish;
pr_debug("PM: Loading hibernation image.\n");
+ lock_device_hotplug();
+ error = create_basic_memory_bitmaps();
+ if (error)
+ goto Thaw;
+
error = swsusp_read(&flags);
swsusp_close(FMODE_READ);
if (!error)
@@ -828,9 +828,10 @@ static int software_resume(void)
printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n");
swsusp_free();
- thaw_processes();
- Done:
free_basic_memory_bitmaps();
+ Thaw:
+ unlock_device_hotplug();
+ thaw_processes();
Finish:
pm_notifier_call_chain(PM_POST_RESTORE);
pm_restore_console();
@@ -840,12 +841,12 @@ static int software_resume(void)
mutex_unlock(&pm_mutex);
pr_debug("PM: Hibernation image not present or could not be loaded.\n");
return error;
-close_finish:
+ Close_Finish:
swsusp_close(FMODE_READ);
goto Finish;
}
-late_initcall(software_resume);
+late_initcall_sync(software_resume);
static const char * const hibernation_modes[] = {
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 349587bb03e1..98c3b34a4cff 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -352,7 +352,7 @@ static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
struct mem_extent *ext, *cur, *aux;
zone_start = zone->zone_start_pfn;
- zone_end = zone->zone_start_pfn + zone->spanned_pages;
+ zone_end = zone_end_pfn(zone);
list_for_each_entry(ext, list, hook)
if (zone_start <= ext->end)
@@ -743,7 +743,10 @@ int create_basic_memory_bitmaps(void)
struct memory_bitmap *bm1, *bm2;
int error = 0;
- BUG_ON(forbidden_pages_map || free_pages_map);
+ if (forbidden_pages_map && free_pages_map)
+ return 0;
+ else
+ BUG_ON(forbidden_pages_map || free_pages_map);
bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
if (!bm1)
@@ -884,7 +887,7 @@ static unsigned int count_highmem_pages(void)
continue;
mark_free_pages(zone);
- max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (saveable_highmem_page(zone, pfn))
n++;
@@ -948,7 +951,7 @@ static unsigned int count_data_pages(void)
continue;
mark_free_pages(zone);
- max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (saveable_page(zone, pfn))
n++;
@@ -1041,7 +1044,7 @@ copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
unsigned long max_zone_pfn;
mark_free_pages(zone);
- max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (page_is_saveable(zone, pfn))
memory_bm_set_bit(orig_bm, pfn);
@@ -1093,7 +1096,7 @@ void swsusp_free(void)
unsigned long pfn, max_zone_pfn;
for_each_populated_zone(zone) {
- max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
@@ -1755,7 +1758,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm)
/* Clear page flags */
for_each_populated_zone(zone) {
- max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
+ max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (pfn_valid(pfn))
swsusp_unset_page_free(pfn_to_page(pfn));
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index ece04223bb1e..62ee437b5c7e 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -210,6 +210,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
goto Platform_wake;
}
+ ftrace_stop();
error = disable_nonboot_cpus();
if (error || suspend_test(TEST_CPUS))
goto Enable_cpus;
@@ -232,6 +233,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
Enable_cpus:
enable_nonboot_cpus();
+ ftrace_start();
Platform_wake:
if (need_suspend_ops(state) && suspend_ops->wake)
@@ -265,7 +267,6 @@ int suspend_devices_and_enter(suspend_state_t state)
goto Close;
}
suspend_console();
- ftrace_stop();
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
@@ -285,7 +286,6 @@ int suspend_devices_and_enter(suspend_state_t state)
suspend_test_start();
dpm_resume_end(PMSG_RESUME);
suspend_test_finish("resume devices");
- ftrace_start();
resume_console();
Close:
if (need_suspend_ops(state) && suspend_ops->end)
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 4ed81e74f86f..957f06164ad1 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -39,6 +39,7 @@ static struct snapshot_data {
char frozen;
char ready;
char platform_support;
+ bool free_bitmaps;
} snapshot_state;
atomic_t snapshot_device_available = ATOMIC_INIT(1);
@@ -60,11 +61,6 @@ static int snapshot_open(struct inode *inode, struct file *filp)
error = -ENOSYS;
goto Unlock;
}
- if(create_basic_memory_bitmaps()) {
- atomic_inc(&snapshot_device_available);
- error = -ENOMEM;
- goto Unlock;
- }
nonseekable_open(inode, filp);
data = &snapshot_state;
filp->private_data = data;
@@ -87,13 +83,16 @@ static int snapshot_open(struct inode *inode, struct file *filp)
data->swap = -1;
data->mode = O_WRONLY;
error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
+ if (!error) {
+ error = create_basic_memory_bitmaps();
+ data->free_bitmaps = !error;
+ }
if (error)
pm_notifier_call_chain(PM_POST_RESTORE);
}
- if (error) {
- free_basic_memory_bitmaps();
+ if (error)
atomic_inc(&snapshot_device_available);
- }
+
data->frozen = 0;
data->ready = 0;
data->platform_support = 0;
@@ -111,12 +110,14 @@ static int snapshot_release(struct inode *inode, struct file *filp)
lock_system_sleep();
swsusp_free();
- free_basic_memory_bitmaps();
data = filp->private_data;
free_all_swap_pages(data->swap);
if (data->frozen) {
pm_restore_gfp_mask();
+ free_basic_memory_bitmaps();
thaw_processes();
+ } else if (data->free_bitmaps) {
+ free_basic_memory_bitmaps();
}
pm_notifier_call_chain(data->mode == O_RDONLY ?
PM_POST_HIBERNATION : PM_POST_RESTORE);
@@ -207,6 +208,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
+ lock_device_hotplug();
data = filp->private_data;
switch (cmd) {
@@ -220,14 +222,23 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
printk("done.\n");
error = freeze_processes();
- if (!error)
+ if (error)
+ break;
+
+ error = create_basic_memory_bitmaps();
+ if (error)
+ thaw_processes();
+ else
data->frozen = 1;
+
break;
case SNAPSHOT_UNFREEZE:
if (!data->frozen || data->ready)
break;
pm_restore_gfp_mask();
+ free_basic_memory_bitmaps();
+ data->free_bitmaps = false;
thaw_processes();
data->frozen = 0;
break;
@@ -371,6 +382,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
}
+ unlock_device_hotplug();
mutex_unlock(&pm_mutex);
return error;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 5b5a7080e2a5..b4e8500afdb3 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2226,6 +2226,13 @@ void register_console(struct console *newcon)
struct console *bcon = NULL;
struct console_cmdline *c;
+ if (console_drivers)
+ for_each_console(bcon)
+ if (WARN(bcon == newcon,
+ "console '%s%d' already registered\n",
+ bcon->name, bcon->index))
+ return;
+
/*
* before we register a new CON_BOOT console, make sure we don't
* already have a valid console
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index a146ee327f6a..dd562e9aa2c8 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -236,7 +236,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
*/
int dumpable = 0;
/* Don't let security modules deny introspection */
- if (task == current)
+ if (same_thread_group(task, current))
return 0;
rcu_read_lock();
tcred = __task_cred(task);
diff --git a/kernel/rcu.h b/kernel/rcu.h
index 7f8e7590e3e5..77131966c4ad 100644
--- a/kernel/rcu.h
+++ b/kernel/rcu.h
@@ -67,12 +67,15 @@
extern struct debug_obj_descr rcuhead_debug_descr;
-static inline void debug_rcu_head_queue(struct rcu_head *head)
+static inline int debug_rcu_head_queue(struct rcu_head *head)
{
- debug_object_activate(head, &rcuhead_debug_descr);
+ int r1;
+
+ r1 = debug_object_activate(head, &rcuhead_debug_descr);
debug_object_active_state(head, &rcuhead_debug_descr,
STATE_RCU_HEAD_READY,
STATE_RCU_HEAD_QUEUED);
+ return r1;
}
static inline void debug_rcu_head_unqueue(struct rcu_head *head)
@@ -83,8 +86,9 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
debug_object_deactivate(head, &rcuhead_debug_descr);
}
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-static inline void debug_rcu_head_queue(struct rcu_head *head)
+static inline int debug_rcu_head_queue(struct rcu_head *head)
{
+ return 0;
}
static inline void debug_rcu_head_unqueue(struct rcu_head *head)
@@ -94,7 +98,7 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
extern void kfree(const void *);
-static inline bool __rcu_reclaim(char *rn, struct rcu_head *head)
+static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
{
unsigned long offset = (unsigned long)head->func;
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index cce6ba8bbace..b02a339836b4 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -122,7 +122,7 @@ struct lockdep_map rcu_sched_lock_map =
STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
-int debug_lockdep_rcu_enabled(void)
+int notrace debug_lockdep_rcu_enabled(void)
{
return rcu_scheduler_active && debug_locks &&
current->lockdep_recursion == 0;
@@ -212,43 +212,6 @@ static inline void debug_rcu_head_free(struct rcu_head *head)
}
/*
- * fixup_init is called when:
- * - an active object is initialized
- */
-static int rcuhead_fixup_init(void *addr, enum debug_obj_state state)
-{
- struct rcu_head *head = addr;
-
- switch (state) {
- case ODEBUG_STATE_ACTIVE:
- /*
- * Ensure that queued callbacks are all executed.
- * If we detect that we are nested in a RCU read-side critical
- * section, we should simply fail, otherwise we would deadlock.
- * In !PREEMPT configurations, there is no way to tell if we are
- * in a RCU read-side critical section or not, so we never
- * attempt any fixup and just print a warning.
- */
-#ifndef CONFIG_PREEMPT
- WARN_ON_ONCE(1);
- return 0;
-#endif
- if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
- irqs_disabled()) {
- WARN_ON_ONCE(1);
- return 0;
- }
- rcu_barrier();
- rcu_barrier_sched();
- rcu_barrier_bh();
- debug_object_init(head, &rcuhead_debug_descr);
- return 1;
- default:
- return 0;
- }
-}
-
-/*
* fixup_activate is called when:
* - an active object is activated
* - an unknown object is activated (might be a statically initialized object)
@@ -268,69 +231,8 @@ static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state)
debug_object_init(head, &rcuhead_debug_descr);
debug_object_activate(head, &rcuhead_debug_descr);
return 0;
-
- case ODEBUG_STATE_ACTIVE:
- /*
- * Ensure that queued callbacks are all executed.
- * If we detect that we are nested in a RCU read-side critical
- * section, we should simply fail, otherwise we would deadlock.
- * In !PREEMPT configurations, there is no way to tell if we are
- * in a RCU read-side critical section or not, so we never
- * attempt any fixup and just print a warning.
- */
-#ifndef CONFIG_PREEMPT
- WARN_ON_ONCE(1);
- return 0;
-#endif
- if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
- irqs_disabled()) {
- WARN_ON_ONCE(1);
- return 0;
- }
- rcu_barrier();
- rcu_barrier_sched();
- rcu_barrier_bh();
- debug_object_activate(head, &rcuhead_debug_descr);
- return 1;
default:
- return 0;
- }
-}
-
-/*
- * fixup_free is called when:
- * - an active object is freed
- */
-static int rcuhead_fixup_free(void *addr, enum debug_obj_state state)
-{
- struct rcu_head *head = addr;
-
- switch (state) {
- case ODEBUG_STATE_ACTIVE:
- /*
- * Ensure that queued callbacks are all executed.
- * If we detect that we are nested in a RCU read-side critical
- * section, we should simply fail, otherwise we would deadlock.
- * In !PREEMPT configurations, there is no way to tell if we are
- * in a RCU read-side critical section or not, so we never
- * attempt any fixup and just print a warning.
- */
-#ifndef CONFIG_PREEMPT
- WARN_ON_ONCE(1);
- return 0;
-#endif
- if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
- irqs_disabled()) {
- WARN_ON_ONCE(1);
- return 0;
- }
- rcu_barrier();
- rcu_barrier_sched();
- rcu_barrier_bh();
- debug_object_free(head, &rcuhead_debug_descr);
return 1;
- default:
- return 0;
}
}
@@ -369,15 +271,13 @@ EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
struct debug_obj_descr rcuhead_debug_descr = {
.name = "rcu_head",
- .fixup_init = rcuhead_fixup_init,
.fixup_activate = rcuhead_fixup_activate,
- .fixup_free = rcuhead_fixup_free,
};
EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
-void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp,
+void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
unsigned long secs,
unsigned long c_old, unsigned long c)
{
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index aa344111de3e..9ed6075dc562 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -264,7 +264,7 @@ void rcu_check_callbacks(int cpu, int user)
*/
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
{
- char *rn = NULL;
+ const char *rn = NULL;
struct rcu_head *next, *list;
unsigned long flags;
RCU_TRACE(int cb_count = 0);
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index 0cd385acccfa..280d06cae352 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -36,7 +36,7 @@ struct rcu_ctrlblk {
RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
- RCU_TRACE(char *name); /* Name of RCU type. */
+ RCU_TRACE(const char *name); /* Name of RCU type. */
};
/* Definition for rcupdate control block. */
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index f4871e52c546..be63101c6175 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -52,72 +52,78 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
-static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
-static int nfakewriters = 4; /* # fake writer threads */
-static int stat_interval = 60; /* Interval between stats, in seconds. */
- /* Zero means "only at end of test". */
-static bool verbose; /* Print more debug info. */
-static bool test_no_idle_hz = true;
- /* Test RCU support for tickless idle CPUs. */
-static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
-static int stutter = 5; /* Start/stop testing interval (in sec) */
-static int irqreader = 1; /* RCU readers from irq (timers). */
-static int fqs_duration; /* Duration of bursts (us), 0 to disable. */
-static int fqs_holdoff; /* Hold time within burst (us). */
-static int fqs_stutter = 3; /* Wait time between bursts (s). */
-static int n_barrier_cbs; /* Number of callbacks to test RCU barriers. */
-static int onoff_interval; /* Wait time between CPU hotplugs, 0=disable. */
-static int onoff_holdoff; /* Seconds after boot before CPU hotplugs. */
-static int shutdown_secs; /* Shutdown time (s). <=0 for no shutdown. */
-static int stall_cpu; /* CPU-stall duration (s). 0 for no stall. */
-static int stall_cpu_holdoff = 10; /* Time to wait until stall (s). */
-static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
-static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
-static int test_boost_duration = 4; /* Duration of each boost test, seconds. */
-static char *torture_type = "rcu"; /* What RCU implementation to torture. */
-
-module_param(nreaders, int, 0444);
-MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
-module_param(nfakewriters, int, 0444);
-MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
-module_param(stat_interval, int, 0644);
-MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
-module_param(verbose, bool, 0444);
-MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
-module_param(test_no_idle_hz, bool, 0444);
-MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
-module_param(shuffle_interval, int, 0444);
-MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
-module_param(stutter, int, 0444);
-MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
-module_param(irqreader, int, 0444);
-MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
+static int fqs_duration;
module_param(fqs_duration, int, 0444);
-MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
+MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us), 0 to disable");
+static int fqs_holdoff;
module_param(fqs_holdoff, int, 0444);
MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
+static int fqs_stutter = 3;
module_param(fqs_stutter, int, 0444);
MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
+static bool gp_exp;
+module_param(gp_exp, bool, 0444);
+MODULE_PARM_DESC(gp_exp, "Use expedited GP wait primitives");
+static bool gp_normal;
+module_param(gp_normal, bool, 0444);
+MODULE_PARM_DESC(gp_normal, "Use normal (non-expedited) GP wait primitives");
+static int irqreader = 1;
+module_param(irqreader, int, 0444);
+MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
+static int n_barrier_cbs;
module_param(n_barrier_cbs, int, 0444);
MODULE_PARM_DESC(n_barrier_cbs, "# of callbacks/kthreads for barrier testing");
-module_param(onoff_interval, int, 0444);
-MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable");
+static int nfakewriters = 4;
+module_param(nfakewriters, int, 0444);
+MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
+static int nreaders = -1;
+module_param(nreaders, int, 0444);
+MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
+static int object_debug;
+module_param(object_debug, int, 0444);
+MODULE_PARM_DESC(object_debug, "Enable debug-object double call_rcu() testing");
+static int onoff_holdoff;
module_param(onoff_holdoff, int, 0444);
MODULE_PARM_DESC(onoff_holdoff, "Time after boot before CPU hotplugs (s)");
+static int onoff_interval;
+module_param(onoff_interval, int, 0444);
+MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable");
+static int shuffle_interval = 3;
+module_param(shuffle_interval, int, 0444);
+MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
+static int shutdown_secs;
module_param(shutdown_secs, int, 0444);
-MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), zero to disable.");
+MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), <= zero to disable.");
+static int stall_cpu;
module_param(stall_cpu, int, 0444);
MODULE_PARM_DESC(stall_cpu, "Stall duration (s), zero to disable.");
+static int stall_cpu_holdoff = 10;
module_param(stall_cpu_holdoff, int, 0444);
MODULE_PARM_DESC(stall_cpu_holdoff, "Time to wait before starting stall (s).");
+static int stat_interval = 60;
+module_param(stat_interval, int, 0644);
+MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
+static int stutter = 5;
+module_param(stutter, int, 0444);
+MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
+static int test_boost = 1;
module_param(test_boost, int, 0444);
MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
-module_param(test_boost_interval, int, 0444);
-MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
+static int test_boost_duration = 4;
module_param(test_boost_duration, int, 0444);
MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds.");
+static int test_boost_interval = 7;
+module_param(test_boost_interval, int, 0444);
+MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
+static bool test_no_idle_hz = true;
+module_param(test_no_idle_hz, bool, 0444);
+MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
+static char *torture_type = "rcu";
module_param(torture_type, charp, 0444);
-MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
+MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
+static bool verbose;
+module_param(verbose, bool, 0444);
+MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
#define TORTURE_FLAG "-torture:"
#define PRINTK_STRING(s) \
@@ -267,7 +273,7 @@ rcutorture_shutdown_notify(struct notifier_block *unused1,
* Absorb kthreads into a kernel function that won't return, so that
* they won't ever access module text or data again.
*/
-static void rcutorture_shutdown_absorb(char *title)
+static void rcutorture_shutdown_absorb(const char *title)
{
if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
pr_notice(
@@ -337,7 +343,7 @@ rcu_random(struct rcu_random_state *rrsp)
}
static void
-rcu_stutter_wait(char *title)
+rcu_stutter_wait(const char *title)
{
while (stutter_pause_test || !rcutorture_runnable) {
if (rcutorture_runnable)
@@ -360,13 +366,14 @@ struct rcu_torture_ops {
int (*completed)(void);
void (*deferred_free)(struct rcu_torture *p);
void (*sync)(void);
+ void (*exp_sync)(void);
void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
void (*cb_barrier)(void);
void (*fqs)(void);
int (*stats)(char *page);
int irq_capable;
int can_boost;
- char *name;
+ const char *name;
};
static struct rcu_torture_ops *cur_ops;
@@ -443,81 +450,27 @@ static void rcu_torture_deferred_free(struct rcu_torture *p)
call_rcu(&p->rtort_rcu, rcu_torture_cb);
}
-static struct rcu_torture_ops rcu_ops = {
- .init = NULL,
- .readlock = rcu_torture_read_lock,
- .read_delay = rcu_read_delay,
- .readunlock = rcu_torture_read_unlock,
- .completed = rcu_torture_completed,
- .deferred_free = rcu_torture_deferred_free,
- .sync = synchronize_rcu,
- .call = call_rcu,
- .cb_barrier = rcu_barrier,
- .fqs = rcu_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .can_boost = rcu_can_boost(),
- .name = "rcu"
-};
-
-static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
-{
- int i;
- struct rcu_torture *rp;
- struct rcu_torture *rp1;
-
- cur_ops->sync();
- list_add(&p->rtort_free, &rcu_torture_removed);
- list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
- i = rp->rtort_pipe_count;
- if (i > RCU_TORTURE_PIPE_LEN)
- i = RCU_TORTURE_PIPE_LEN;
- atomic_inc(&rcu_torture_wcount[i]);
- if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
- rp->rtort_mbtest = 0;
- list_del(&rp->rtort_free);
- rcu_torture_free(rp);
- }
- }
-}
-
static void rcu_sync_torture_init(void)
{
INIT_LIST_HEAD(&rcu_torture_removed);
}
-static struct rcu_torture_ops rcu_sync_ops = {
+static struct rcu_torture_ops rcu_ops = {
.init = rcu_sync_torture_init,
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay,
.readunlock = rcu_torture_read_unlock,
.completed = rcu_torture_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
+ .deferred_free = rcu_torture_deferred_free,
.sync = synchronize_rcu,
- .call = NULL,
- .cb_barrier = NULL,
- .fqs = rcu_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .can_boost = rcu_can_boost(),
- .name = "rcu_sync"
-};
-
-static struct rcu_torture_ops rcu_expedited_ops = {
- .init = rcu_sync_torture_init,
- .readlock = rcu_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = rcu_torture_read_unlock,
- .completed = rcu_no_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
- .sync = synchronize_rcu_expedited,
- .call = NULL,
- .cb_barrier = NULL,
+ .exp_sync = synchronize_rcu_expedited,
+ .call = call_rcu,
+ .cb_barrier = rcu_barrier,
.fqs = rcu_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
.can_boost = rcu_can_boost(),
- .name = "rcu_expedited"
+ .name = "rcu"
};
/*
@@ -546,13 +499,14 @@ static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
}
static struct rcu_torture_ops rcu_bh_ops = {
- .init = NULL,
+ .init = rcu_sync_torture_init,
.readlock = rcu_bh_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_bh_torture_read_unlock,
.completed = rcu_bh_torture_completed,
.deferred_free = rcu_bh_torture_deferred_free,
.sync = synchronize_rcu_bh,
+ .exp_sync = synchronize_rcu_bh_expedited,
.call = call_rcu_bh,
.cb_barrier = rcu_barrier_bh,
.fqs = rcu_bh_force_quiescent_state,
@@ -561,38 +515,6 @@ static struct rcu_torture_ops rcu_bh_ops = {
.name = "rcu_bh"
};
-static struct rcu_torture_ops rcu_bh_sync_ops = {
- .init = rcu_sync_torture_init,
- .readlock = rcu_bh_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = rcu_bh_torture_read_unlock,
- .completed = rcu_bh_torture_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
- .sync = synchronize_rcu_bh,
- .call = NULL,
- .cb_barrier = NULL,
- .fqs = rcu_bh_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .name = "rcu_bh_sync"
-};
-
-static struct rcu_torture_ops rcu_bh_expedited_ops = {
- .init = rcu_sync_torture_init,
- .readlock = rcu_bh_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = rcu_bh_torture_read_unlock,
- .completed = rcu_bh_torture_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
- .sync = synchronize_rcu_bh_expedited,
- .call = NULL,
- .cb_barrier = NULL,
- .fqs = rcu_bh_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .name = "rcu_bh_expedited"
-};
-
/*
* Definitions for srcu torture testing.
*/
@@ -667,6 +589,11 @@ static int srcu_torture_stats(char *page)
return cnt;
}
+static void srcu_torture_synchronize_expedited(void)
+{
+ synchronize_srcu_expedited(&srcu_ctl);
+}
+
static struct rcu_torture_ops srcu_ops = {
.init = rcu_sync_torture_init,
.readlock = srcu_torture_read_lock,
@@ -675,45 +602,13 @@ static struct rcu_torture_ops srcu_ops = {
.completed = srcu_torture_completed,
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
+ .exp_sync = srcu_torture_synchronize_expedited,
.call = srcu_torture_call,
.cb_barrier = srcu_torture_barrier,
.stats = srcu_torture_stats,
.name = "srcu"
};
-static struct rcu_torture_ops srcu_sync_ops = {
- .init = rcu_sync_torture_init,
- .readlock = srcu_torture_read_lock,
- .read_delay = srcu_read_delay,
- .readunlock = srcu_torture_read_unlock,
- .completed = srcu_torture_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
- .sync = srcu_torture_synchronize,
- .call = NULL,
- .cb_barrier = NULL,
- .stats = srcu_torture_stats,
- .name = "srcu_sync"
-};
-
-static void srcu_torture_synchronize_expedited(void)
-{
- synchronize_srcu_expedited(&srcu_ctl);
-}
-
-static struct rcu_torture_ops srcu_expedited_ops = {
- .init = rcu_sync_torture_init,
- .readlock = srcu_torture_read_lock,
- .read_delay = srcu_read_delay,
- .readunlock = srcu_torture_read_unlock,
- .completed = srcu_torture_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
- .sync = srcu_torture_synchronize_expedited,
- .call = NULL,
- .cb_barrier = NULL,
- .stats = srcu_torture_stats,
- .name = "srcu_expedited"
-};
-
/*
* Definitions for sched torture testing.
*/
@@ -742,6 +637,8 @@ static struct rcu_torture_ops sched_ops = {
.completed = rcu_no_completed,
.deferred_free = rcu_sched_torture_deferred_free,
.sync = synchronize_sched,
+ .exp_sync = synchronize_sched_expedited,
+ .call = call_rcu_sched,
.cb_barrier = rcu_barrier_sched,
.fqs = rcu_sched_force_quiescent_state,
.stats = NULL,
@@ -749,35 +646,6 @@ static struct rcu_torture_ops sched_ops = {
.name = "sched"
};
-static struct rcu_torture_ops sched_sync_ops = {
- .init = rcu_sync_torture_init,
- .readlock = sched_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = sched_torture_read_unlock,
- .completed = rcu_no_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
- .sync = synchronize_sched,
- .cb_barrier = NULL,
- .fqs = rcu_sched_force_quiescent_state,
- .stats = NULL,
- .name = "sched_sync"
-};
-
-static struct rcu_torture_ops sched_expedited_ops = {
- .init = rcu_sync_torture_init,
- .readlock = sched_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = sched_torture_read_unlock,
- .completed = rcu_no_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
- .sync = synchronize_sched_expedited,
- .cb_barrier = NULL,
- .fqs = rcu_sched_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .name = "sched_expedited"
-};
-
/*
* RCU torture priority-boost testing. Runs one real-time thread per
* CPU for moderate bursts, repeatedly registering RCU callbacks and
@@ -927,9 +795,10 @@ rcu_torture_fqs(void *arg)
static int
rcu_torture_writer(void *arg)
{
+ bool exp;
int i;
- long oldbatch = rcu_batches_completed();
struct rcu_torture *rp;
+ struct rcu_torture *rp1;
struct rcu_torture *old_rp;
static DEFINE_RCU_RANDOM(rand);
@@ -954,10 +823,33 @@ rcu_torture_writer(void *arg)
i = RCU_TORTURE_PIPE_LEN;
atomic_inc(&rcu_torture_wcount[i]);
old_rp->rtort_pipe_count++;
- cur_ops->deferred_free(old_rp);
+ if (gp_normal == gp_exp)
+ exp = !!(rcu_random(&rand) & 0x80);
+ else
+ exp = gp_exp;
+ if (!exp) {
+ cur_ops->deferred_free(old_rp);
+ } else {
+ cur_ops->exp_sync();
+ list_add(&old_rp->rtort_free,
+ &rcu_torture_removed);
+ list_for_each_entry_safe(rp, rp1,
+ &rcu_torture_removed,
+ rtort_free) {
+ i = rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+ atomic_inc(&rcu_torture_wcount[i]);
+ if (++rp->rtort_pipe_count >=
+ RCU_TORTURE_PIPE_LEN) {
+ rp->rtort_mbtest = 0;
+ list_del(&rp->rtort_free);
+ rcu_torture_free(rp);
+ }
+ }
+ }
}
rcutorture_record_progress(++rcu_torture_current_version);
- oldbatch = cur_ops->completed();
rcu_stutter_wait("rcu_torture_writer");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
@@ -983,10 +875,18 @@ rcu_torture_fakewriter(void *arg)
schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
udelay(rcu_random(&rand) & 0x3ff);
if (cur_ops->cb_barrier != NULL &&
- rcu_random(&rand) % (nfakewriters * 8) == 0)
+ rcu_random(&rand) % (nfakewriters * 8) == 0) {
cur_ops->cb_barrier();
- else
+ } else if (gp_normal == gp_exp) {
+ if (rcu_random(&rand) & 0x80)
+ cur_ops->sync();
+ else
+ cur_ops->exp_sync();
+ } else if (gp_normal) {
cur_ops->sync();
+ } else {
+ cur_ops->exp_sync();
+ }
rcu_stutter_wait("rcu_torture_fakewriter");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
@@ -1364,7 +1264,7 @@ rcu_torture_stutter(void *arg)
}
static inline void
-rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
+rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
{
pr_alert("%s" TORTURE_FLAG
"--- %s: nreaders=%d nfakewriters=%d "
@@ -1534,7 +1434,13 @@ rcu_torture_onoff(void *arg)
torture_type, cpu);
starttime = jiffies;
n_online_attempts++;
- if (cpu_up(cpu) == 0) {
+ ret = cpu_up(cpu);
+ if (ret) {
+ if (verbose)
+ pr_alert("%s" TORTURE_FLAG
+ "rcu_torture_onoff task: online %d failed: errno %d\n",
+ torture_type, cpu, ret);
+ } else {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"rcu_torture_onoff task: onlined %d\n",
@@ -1934,6 +1840,62 @@ rcu_torture_cleanup(void)
rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
}
+#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+static void rcu_torture_leak_cb(struct rcu_head *rhp)
+{
+}
+
+static void rcu_torture_err_cb(struct rcu_head *rhp)
+{
+ /*
+ * This -might- happen due to race conditions, but is unlikely.
+ * The scenario that leads to this happening is that the
+ * first of the pair of duplicate callbacks is queued,
+ * someone else starts a grace period that includes that
+ * callback, then the second of the pair must wait for the
+ * next grace period. Unlikely, but can happen. If it
+ * does happen, the debug-objects subsystem won't have splatted.
+ */
+ pr_alert("rcutorture: duplicated callback was invoked.\n");
+}
+#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+
+/*
+ * Verify that double-free causes debug-objects to complain, but only
+ * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
+ * cannot be carried out.
+ */
+static void rcu_test_debug_objects(void)
+{
+#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+ struct rcu_head rh1;
+ struct rcu_head rh2;
+
+ init_rcu_head_on_stack(&rh1);
+ init_rcu_head_on_stack(&rh2);
+ pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n");
+
+ /* Try to queue the rh2 pair of callbacks for the same grace period. */
+ preempt_disable(); /* Prevent preemption from interrupting test. */
+ rcu_read_lock(); /* Make it impossible to finish a grace period. */
+ call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
+ local_irq_disable(); /* Make it harder to start a new grace period. */
+ call_rcu(&rh2, rcu_torture_leak_cb);
+ call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
+ local_irq_enable();
+ rcu_read_unlock();
+ preempt_enable();
+
+ /* Wait for them all to get done so we can safely return. */
+ rcu_barrier();
+ pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n");
+ destroy_rcu_head_on_stack(&rh1);
+ destroy_rcu_head_on_stack(&rh2);
+#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+ pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n");
+#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+}
+
static int __init
rcu_torture_init(void)
{
@@ -1941,11 +1903,9 @@ rcu_torture_init(void)
int cpu;
int firsterr = 0;
int retval;
- static struct rcu_torture_ops *torture_ops[] =
- { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
- &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
- &srcu_ops, &srcu_sync_ops, &srcu_expedited_ops,
- &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
+ static struct rcu_torture_ops *torture_ops[] = {
+ &rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops,
+ };
mutex_lock(&fullstop_mutex);
@@ -2163,6 +2123,8 @@ rcu_torture_init(void)
firsterr = retval;
goto unwind;
}
+ if (object_debug)
+ rcu_test_debug_objects();
rcutorture_record_test_transition();
mutex_unlock(&fullstop_mutex);
return 0;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 068de3a93606..32618b3fe4e6 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -53,18 +53,38 @@
#include <linux/delay.h>
#include <linux/stop_machine.h>
#include <linux/random.h>
+#include <linux/ftrace_event.h>
+#include <linux/suspend.h>
#include "rcutree.h"
#include <trace/events/rcu.h>
#include "rcu.h"
+/*
+ * Strings used in tracepoints need to be exported via the
+ * tracing system such that tools like perf and trace-cmd can
+ * translate the string address pointers to actual text.
+ */
+#define TPS(x) tracepoint_string(x)
+
/* Data structures. */
static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
-#define RCU_STATE_INITIALIZER(sname, sabbr, cr) { \
+/*
+ * In order to export the rcu_state name to the tracing tools, it
+ * needs to be added in the __tracepoint_string section.
+ * This requires defining a separate variable tp_<sname>_varname
+ * that points to the string being used, and this will allow
+ * the tracing userspace tools to be able to decipher the string
+ * address to the matching string.
+ */
+#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
+static char sname##_varname[] = #sname; \
+static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname; \
+struct rcu_state sname##_state = { \
.level = { &sname##_state.node[0] }, \
.call = cr, \
.fqs_state = RCU_GP_IDLE, \
@@ -75,16 +95,13 @@ static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
.orphan_donetail = &sname##_state.orphan_donelist, \
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
.onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
- .name = #sname, \
+ .name = sname##_varname, \
.abbr = sabbr, \
-}
-
-struct rcu_state rcu_sched_state =
- RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
-DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
+}; \
+DEFINE_PER_CPU(struct rcu_data, sname##_data)
-struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
-DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
+RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
+RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
static struct rcu_state *rcu_state;
LIST_HEAD(rcu_struct_flavors);
@@ -178,7 +195,7 @@ void rcu_sched_qs(int cpu)
struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
if (rdp->passed_quiesce == 0)
- trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs");
+ trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs"));
rdp->passed_quiesce = 1;
}
@@ -187,7 +204,7 @@ void rcu_bh_qs(int cpu)
struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
if (rdp->passed_quiesce == 0)
- trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
+ trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs"));
rdp->passed_quiesce = 1;
}
@@ -198,16 +215,20 @@ void rcu_bh_qs(int cpu)
*/
void rcu_note_context_switch(int cpu)
{
- trace_rcu_utilization("Start context switch");
+ trace_rcu_utilization(TPS("Start context switch"));
rcu_sched_qs(cpu);
rcu_preempt_note_context_switch(cpu);
- trace_rcu_utilization("End context switch");
+ trace_rcu_utilization(TPS("End context switch"));
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
.dynticks = ATOMIC_INIT(1),
+#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+ .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
+ .dynticks_idle = ATOMIC_INIT(1),
+#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
};
static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
@@ -226,7 +247,10 @@ module_param(jiffies_till_next_fqs, ulong, 0644);
static void rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
struct rcu_data *rdp);
-static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *));
+static void force_qs_rnp(struct rcu_state *rsp,
+ int (*f)(struct rcu_data *rsp, bool *isidle,
+ unsigned long *maxj),
+ bool *isidle, unsigned long *maxj);
static void force_quiescent_state(struct rcu_state *rsp);
static int rcu_pending(int cpu);
@@ -345,11 +369,11 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
bool user)
{
- trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting);
+ trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
if (!user && !is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());
- trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
+ trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm,
@@ -411,6 +435,7 @@ void rcu_idle_enter(void)
local_irq_save(flags);
rcu_eqs_enter(false);
+ rcu_sysidle_enter(&__get_cpu_var(rcu_dynticks), 0);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_enter);
@@ -428,27 +453,6 @@ void rcu_user_enter(void)
{
rcu_eqs_enter(1);
}
-
-/**
- * rcu_user_enter_after_irq - inform RCU that we are going to resume userspace
- * after the current irq returns.
- *
- * This is similar to rcu_user_enter() but in the context of a non-nesting
- * irq. After this call, RCU enters into idle mode when the interrupt
- * returns.
- */
-void rcu_user_enter_after_irq(void)
-{
- unsigned long flags;
- struct rcu_dynticks *rdtp;
-
- local_irq_save(flags);
- rdtp = &__get_cpu_var(rcu_dynticks);
- /* Ensure this irq is interrupting a non-idle RCU state. */
- WARN_ON_ONCE(!(rdtp->dynticks_nesting & DYNTICK_TASK_MASK));
- rdtp->dynticks_nesting = 1;
- local_irq_restore(flags);
-}
#endif /* CONFIG_RCU_USER_QS */
/**
@@ -479,9 +483,10 @@ void rcu_irq_exit(void)
rdtp->dynticks_nesting--;
WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
if (rdtp->dynticks_nesting)
- trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
+ trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
else
rcu_eqs_enter_common(rdtp, oldval, true);
+ rcu_sysidle_enter(rdtp, 1);
local_irq_restore(flags);
}
@@ -501,11 +506,11 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
smp_mb__after_atomic_inc(); /* See above. */
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
rcu_cleanup_after_idle(smp_processor_id());
- trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
+ trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
if (!user && !is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());
- trace_rcu_dyntick("Error on exit: not idle task",
+ trace_rcu_dyntick(TPS("Error on exit: not idle task"),
oldval, rdtp->dynticks_nesting);
ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
@@ -550,6 +555,7 @@ void rcu_idle_exit(void)
local_irq_save(flags);
rcu_eqs_exit(false);
+ rcu_sysidle_exit(&__get_cpu_var(rcu_dynticks), 0);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_exit);
@@ -565,28 +571,6 @@ void rcu_user_exit(void)
{
rcu_eqs_exit(1);
}
-
-/**
- * rcu_user_exit_after_irq - inform RCU that we won't resume to userspace
- * idle mode after the current non-nesting irq returns.
- *
- * This is similar to rcu_user_exit() but in the context of an irq.
- * This is called when the irq has interrupted a userspace RCU idle mode
- * context. When the current non-nesting interrupt returns after this call,
- * the CPU won't restore the RCU idle mode.
- */
-void rcu_user_exit_after_irq(void)
-{
- unsigned long flags;
- struct rcu_dynticks *rdtp;
-
- local_irq_save(flags);
- rdtp = &__get_cpu_var(rcu_dynticks);
- /* Ensure we are interrupting an RCU idle mode. */
- WARN_ON_ONCE(rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK);
- rdtp->dynticks_nesting += DYNTICK_TASK_EXIT_IDLE;
- local_irq_restore(flags);
-}
#endif /* CONFIG_RCU_USER_QS */
/**
@@ -620,9 +604,10 @@ void rcu_irq_enter(void)
rdtp->dynticks_nesting++;
WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
if (oldval)
- trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
+ trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
else
rcu_eqs_exit_common(rdtp, oldval, true);
+ rcu_sysidle_exit(rdtp, 1);
local_irq_restore(flags);
}
@@ -746,9 +731,11 @@ static int rcu_is_cpu_rrupt_from_idle(void)
* credit them with an implicit quiescent state. Return 1 if this CPU
* is in dynticks idle mode, which is an extended quiescent state.
*/
-static int dyntick_save_progress_counter(struct rcu_data *rdp)
+static int dyntick_save_progress_counter(struct rcu_data *rdp,
+ bool *isidle, unsigned long *maxj)
{
rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
+ rcu_sysidle_check_cpu(rdp, isidle, maxj);
return (rdp->dynticks_snap & 0x1) == 0;
}
@@ -758,7 +745,8 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
* idle state since the last call to dyntick_save_progress_counter()
* for this same CPU, or by virtue of having been offline.
*/
-static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
+ bool *isidle, unsigned long *maxj)
{
unsigned int curr;
unsigned int snap;
@@ -775,7 +763,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
* of the current RCU grace period.
*/
if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
- trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "dti");
+ trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
rdp->dynticks_fqs++;
return 1;
}
@@ -795,7 +783,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
return 0; /* Grace period is not old enough. */
barrier();
if (cpu_is_offline(rdp->cpu)) {
- trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl");
+ trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
rdp->offline_fqs++;
return 1;
}
@@ -1032,7 +1020,7 @@ static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
* rcu_nocb_wait_gp().
*/
static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
- unsigned long c, char *s)
+ unsigned long c, const char *s)
{
trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
rnp->completed, c, rnp->level,
@@ -1058,9 +1046,9 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
* grace period is already marked as needed, return to the caller.
*/
c = rcu_cbs_completed(rdp->rsp, rnp);
- trace_rcu_future_gp(rnp, rdp, c, "Startleaf");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
if (rnp->need_future_gp[c & 0x1]) {
- trace_rcu_future_gp(rnp, rdp, c, "Prestartleaf");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
return c;
}
@@ -1074,7 +1062,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
if (rnp->gpnum != rnp->completed ||
ACCESS_ONCE(rnp->gpnum) != ACCESS_ONCE(rnp->completed)) {
rnp->need_future_gp[c & 0x1]++;
- trace_rcu_future_gp(rnp, rdp, c, "Startedleaf");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
return c;
}
@@ -1102,7 +1090,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
* recorded, trace and leave.
*/
if (rnp_root->need_future_gp[c & 0x1]) {
- trace_rcu_future_gp(rnp, rdp, c, "Prestartedroot");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
goto unlock_out;
}
@@ -1111,9 +1099,9 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
/* If a grace period is not already in progress, start one. */
if (rnp_root->gpnum != rnp_root->completed) {
- trace_rcu_future_gp(rnp, rdp, c, "Startedleafroot");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
} else {
- trace_rcu_future_gp(rnp, rdp, c, "Startedroot");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
}
unlock_out:
@@ -1137,7 +1125,8 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
rcu_nocb_gp_cleanup(rsp, rnp);
rnp->need_future_gp[c & 0x1] = 0;
needmore = rnp->need_future_gp[(c + 1) & 0x1];
- trace_rcu_future_gp(rnp, rdp, c, needmore ? "CleanupMore" : "Cleanup");
+ trace_rcu_future_gp(rnp, rdp, c,
+ needmore ? TPS("CleanupMore") : TPS("Cleanup"));
return needmore;
}
@@ -1205,9 +1194,9 @@ static void rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
/* Trace depending on how much we were able to accelerate. */
if (!*rdp->nxttail[RCU_WAIT_TAIL])
- trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccWaitCB");
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
else
- trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccReadyCB");
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
}
/*
@@ -1273,7 +1262,7 @@ static void __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, struc
/* Remember that we saw this grace-period completion. */
rdp->completed = rnp->completed;
- trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend");
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
}
if (rdp->gpnum != rnp->gpnum) {
@@ -1283,7 +1272,7 @@ static void __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, struc
* go looking for one.
*/
rdp->gpnum = rnp->gpnum;
- trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart");
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
rdp->passed_quiesce = 0;
rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
zero_cpu_stall_ticks(rdp);
@@ -1315,6 +1304,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
struct rcu_data *rdp;
struct rcu_node *rnp = rcu_get_root(rsp);
+ rcu_bind_gp_kthread();
raw_spin_lock_irq(&rnp->lock);
rsp->gp_flags = 0; /* Clear all flags: New grace period. */
@@ -1326,7 +1316,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
/* Advance to a new grace period and initialize state. */
rsp->gpnum++;
- trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
+ trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
record_gp_stall_check_time(rsp);
raw_spin_unlock_irq(&rnp->lock);
@@ -1379,16 +1369,25 @@ static int rcu_gp_init(struct rcu_state *rsp)
int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
{
int fqs_state = fqs_state_in;
+ bool isidle = false;
+ unsigned long maxj;
struct rcu_node *rnp = rcu_get_root(rsp);
rsp->n_force_qs++;
if (fqs_state == RCU_SAVE_DYNTICK) {
/* Collect dyntick-idle snapshots. */
- force_qs_rnp(rsp, dyntick_save_progress_counter);
+ if (is_sysidle_rcu_state(rsp)) {
+ isidle = 1;
+ maxj = jiffies - ULONG_MAX / 4;
+ }
+ force_qs_rnp(rsp, dyntick_save_progress_counter,
+ &isidle, &maxj);
+ rcu_sysidle_report_gp(rsp, isidle, maxj);
fqs_state = RCU_FORCE_QS;
} else {
/* Handle dyntick-idle and offline CPUs. */
- force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
+ isidle = 0;
+ force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
}
/* Clear flag to prevent immediate re-entry. */
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
@@ -1448,7 +1447,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
rcu_nocb_gp_set(rnp, nocb);
rsp->completed = rsp->gpnum; /* Declare grace period done. */
- trace_rcu_grace_period(rsp->name, rsp->completed, "end");
+ trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
rsp->fqs_state = RCU_GP_IDLE;
rdp = this_cpu_ptr(rsp->rda);
rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */
@@ -1558,10 +1557,12 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
/*
* We can't do wakeups while holding the rnp->lock, as that
- * could cause possible deadlocks with the rq->lock. Deter
- * the wakeup to interrupt context.
+ * could cause possible deadlocks with the rq->lock. Defer
+ * the wakeup to interrupt context. And don't bother waking
+ * up the running kthread.
*/
- irq_work_queue(&rsp->wakeup_work);
+ if (current != rsp->gp_kthread)
+ irq_work_queue(&rsp->wakeup_work);
}
/*
@@ -1857,7 +1858,7 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
RCU_TRACE(mask = rdp->grpmask);
trace_rcu_grace_period(rsp->name,
rnp->gpnum + 1 - !!(rnp->qsmask & mask),
- "cpuofl");
+ TPS("cpuofl"));
}
/*
@@ -2044,7 +2045,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
*/
void rcu_check_callbacks(int cpu, int user)
{
- trace_rcu_utilization("Start scheduler-tick");
+ trace_rcu_utilization(TPS("Start scheduler-tick"));
increment_cpu_stall_ticks();
if (user || rcu_is_cpu_rrupt_from_idle()) {
@@ -2077,7 +2078,7 @@ void rcu_check_callbacks(int cpu, int user)
rcu_preempt_check_callbacks(cpu);
if (rcu_pending(cpu))
invoke_rcu_core();
- trace_rcu_utilization("End scheduler-tick");
+ trace_rcu_utilization(TPS("End scheduler-tick"));
}
/*
@@ -2087,7 +2088,10 @@ void rcu_check_callbacks(int cpu, int user)
*
* The caller must have suppressed start of new grace periods.
*/
-static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
+static void force_qs_rnp(struct rcu_state *rsp,
+ int (*f)(struct rcu_data *rsp, bool *isidle,
+ unsigned long *maxj),
+ bool *isidle, unsigned long *maxj)
{
unsigned long bit;
int cpu;
@@ -2110,9 +2114,12 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
cpu = rnp->grplo;
bit = 1;
for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
- if ((rnp->qsmask & bit) != 0 &&
- f(per_cpu_ptr(rsp->rda, cpu)))
- mask |= bit;
+ if ((rnp->qsmask & bit) != 0) {
+ if ((rnp->qsmaskinit & bit) != 0)
+ *isidle = 0;
+ if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
+ mask |= bit;
+ }
}
if (mask != 0) {
@@ -2208,10 +2215,10 @@ static void rcu_process_callbacks(struct softirq_action *unused)
if (cpu_is_offline(smp_processor_id()))
return;
- trace_rcu_utilization("Start RCU core");
+ trace_rcu_utilization(TPS("Start RCU core"));
for_each_rcu_flavor(rsp)
__rcu_process_callbacks(rsp);
- trace_rcu_utilization("End RCU core");
+ trace_rcu_utilization(TPS("End RCU core"));
}
/*
@@ -2287,6 +2294,13 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
}
/*
+ * RCU callback function to leak a callback.
+ */
+static void rcu_leak_callback(struct rcu_head *rhp)
+{
+}
+
+/*
* Helper function for call_rcu() and friends. The cpu argument will
* normally be -1, indicating "currently running CPU". It may specify
* a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier()
@@ -2300,7 +2314,12 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
struct rcu_data *rdp;
WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
- debug_rcu_head_queue(head);
+ if (debug_rcu_head_queue(head)) {
+ /* Probable double call_rcu(), so leak the callback. */
+ ACCESS_ONCE(head->func) = rcu_leak_callback;
+ WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
+ return;
+ }
head->func = func;
head->next = NULL;
@@ -2720,7 +2739,7 @@ static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
* Helper function for _rcu_barrier() tracing. If tracing is disabled,
* the compiler is expected to optimize this away.
*/
-static void _rcu_barrier_trace(struct rcu_state *rsp, char *s,
+static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
int cpu, unsigned long done)
{
trace_rcu_barrier(rsp->name, s, cpu,
@@ -2785,9 +2804,20 @@ static void _rcu_barrier(struct rcu_state *rsp)
* transition. The "if" expression below therefore rounds the old
* value up to the next even number and adds two before comparing.
*/
- snap_done = ACCESS_ONCE(rsp->n_barrier_done);
+ snap_done = rsp->n_barrier_done;
_rcu_barrier_trace(rsp, "Check", -1, snap_done);
- if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) {
+
+ /*
+ * If the value in snap is odd, we needed to wait for the current
+ * rcu_barrier() to complete, then wait for the next one, in other
+ * words, we need the value of snap_done to be three larger than
+ * the value of snap. On the other hand, if the value in snap is
+ * even, we only had to wait for the next rcu_barrier() to complete,
+ * in other words, we need the value of snap_done to be only two
+ * greater than the value of snap. The "(snap + 3) & ~0x1" computes
+ * this for us (thank you, Linus!).
+ */
+ if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
_rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
smp_mb(); /* caller's subsequent code after above check. */
mutex_unlock(&rsp->barrier_mutex);
@@ -2930,6 +2960,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
rdp->blimit = blimit;
init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
+ rcu_sysidle_init_percpu_data(rdp->dynticks);
atomic_set(&rdp->dynticks->dynticks,
(atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
@@ -2952,7 +2983,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
rdp->completed = rnp->completed;
rdp->passed_quiesce = 0;
rdp->qs_pending = 0;
- trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl");
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
}
raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
rnp = rnp->parent;
@@ -2982,7 +3013,7 @@ static int rcu_cpu_notify(struct notifier_block *self,
struct rcu_node *rnp = rdp->mynode;
struct rcu_state *rsp;
- trace_rcu_utilization("Start CPU hotplug");
+ trace_rcu_utilization(TPS("Start CPU hotplug"));
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
@@ -3011,7 +3042,26 @@ static int rcu_cpu_notify(struct notifier_block *self,
default:
break;
}
- trace_rcu_utilization("End CPU hotplug");
+ trace_rcu_utilization(TPS("End CPU hotplug"));
+ return NOTIFY_OK;
+}
+
+static int rcu_pm_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
+ rcu_expedited = 1;
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ rcu_expedited = 0;
+ break;
+ default:
+ break;
+ }
return NOTIFY_OK;
}
@@ -3256,6 +3306,7 @@ void __init rcu_init(void)
* or the scheduler are operational.
*/
cpu_notifier(rcu_cpu_notify, 0);
+ pm_notifier(rcu_pm_notify, 0);
for_each_online_cpu(cpu)
rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
}
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index b3832581043c..5f97eab602cd 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -88,6 +88,14 @@ struct rcu_dynticks {
/* Process level is worth LLONG_MAX/2. */
int dynticks_nmi_nesting; /* Track NMI nesting level. */
atomic_t dynticks; /* Even value for idle, else odd. */
+#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+ long long dynticks_idle_nesting;
+ /* irq/process nesting level from idle. */
+ atomic_t dynticks_idle; /* Even value for idle, else odd. */
+ /* "Idle" excludes userspace execution. */
+ unsigned long dynticks_idle_jiffies;
+ /* End of last non-NMI non-idle period. */
+#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
#ifdef CONFIG_RCU_FAST_NO_HZ
bool all_lazy; /* Are all CPU's CBs lazy? */
unsigned long nonlazy_posted;
@@ -445,7 +453,7 @@ struct rcu_state {
/* for CPU stalls. */
unsigned long gp_max; /* Maximum GP duration in */
/* jiffies. */
- char *name; /* Name of structure. */
+ const char *name; /* Name of structure. */
char abbr; /* Abbreviated name. */
struct list_head flavors; /* List of RCU flavors. */
struct irq_work wakeup_work; /* Postponed wakeups */
@@ -545,6 +553,15 @@ static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
static void rcu_kick_nohz_cpu(int cpu);
static bool init_nocb_callback_list(struct rcu_data *rdp);
+static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
+static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq);
+static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
+ unsigned long *maxj);
+static bool is_sysidle_rcu_state(struct rcu_state *rsp);
+static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
+ unsigned long maxj);
+static void rcu_bind_gp_kthread(void);
+static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 769e12e3151b..130c97b027f2 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -28,7 +28,7 @@
#include <linux/gfp.h>
#include <linux/oom.h>
#include <linux/smpboot.h>
-#include <linux/tick.h>
+#include "time/tick-internal.h"
#define RCU_KTHREAD_PRIO 1
@@ -110,9 +110,7 @@ static void __init rcu_bootup_announce_oddness(void)
#ifdef CONFIG_TREE_PREEMPT_RCU
-struct rcu_state rcu_preempt_state =
- RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
-DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
+RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
static struct rcu_state *rcu_state = &rcu_preempt_state;
static int rcu_preempted_readers_exp(struct rcu_node *rnp);
@@ -169,7 +167,7 @@ static void rcu_preempt_qs(int cpu)
struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
if (rdp->passed_quiesce == 0)
- trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
+ trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs"));
rdp->passed_quiesce = 1;
current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
}
@@ -388,7 +386,7 @@ void rcu_read_unlock_special(struct task_struct *t)
np = rcu_next_node_entry(t, rnp);
list_del_init(&t->rcu_node_entry);
t->rcu_blocked_node = NULL;
- trace_rcu_unlock_preempted_task("rcu_preempt",
+ trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
rnp->gpnum, t->pid);
if (&t->rcu_node_entry == rnp->gp_tasks)
rnp->gp_tasks = np;
@@ -412,7 +410,7 @@ void rcu_read_unlock_special(struct task_struct *t)
*/
empty_exp_now = !rcu_preempted_readers_exp(rnp);
if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
- trace_rcu_quiescent_state_report("preempt_rcu",
+ trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
rnp->gpnum,
0, rnp->qsmask,
rnp->level,
@@ -1250,12 +1248,12 @@ static int rcu_boost_kthread(void *arg)
int spincnt = 0;
int more2boost;
- trace_rcu_utilization("Start boost kthread@init");
+ trace_rcu_utilization(TPS("Start boost kthread@init"));
for (;;) {
rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
- trace_rcu_utilization("End boost kthread@rcu_wait");
+ trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
- trace_rcu_utilization("Start boost kthread@rcu_wait");
+ trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
more2boost = rcu_boost(rnp);
if (more2boost)
@@ -1264,14 +1262,14 @@ static int rcu_boost_kthread(void *arg)
spincnt = 0;
if (spincnt > 10) {
rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
- trace_rcu_utilization("End boost kthread@rcu_yield");
+ trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
schedule_timeout_interruptible(2);
- trace_rcu_utilization("Start boost kthread@rcu_yield");
+ trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
spincnt = 0;
}
}
/* NOTREACHED */
- trace_rcu_utilization("End boost kthread@notreached");
+ trace_rcu_utilization(TPS("End boost kthread@notreached"));
return 0;
}
@@ -1419,7 +1417,7 @@ static void rcu_cpu_kthread(unsigned int cpu)
int spincnt;
for (spincnt = 0; spincnt < 10; spincnt++) {
- trace_rcu_utilization("Start CPU kthread@rcu_wait");
+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
local_bh_disable();
*statusp = RCU_KTHREAD_RUNNING;
this_cpu_inc(rcu_cpu_kthread_loops);
@@ -1431,15 +1429,15 @@ static void rcu_cpu_kthread(unsigned int cpu)
rcu_kthread_do_work();
local_bh_enable();
if (*workp == 0) {
- trace_rcu_utilization("End CPU kthread@rcu_wait");
+ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
*statusp = RCU_KTHREAD_WAITING;
return;
}
}
*statusp = RCU_KTHREAD_YIELDING;
- trace_rcu_utilization("Start CPU kthread@rcu_yield");
+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
schedule_timeout_interruptible(2);
- trace_rcu_utilization("End CPU kthread@rcu_yield");
+ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
*statusp = RCU_KTHREAD_WAITING;
}
@@ -2202,7 +2200,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
* Wait for the grace period. Do so interruptibly to avoid messing
* up the load average.
*/
- trace_rcu_future_gp(rnp, rdp, c, "StartWait");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
for (;;) {
wait_event_interruptible(
rnp->nocb_gp_wq[c & 0x1],
@@ -2210,9 +2208,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
if (likely(d))
break;
flush_signals(current);
- trace_rcu_future_gp(rnp, rdp, c, "ResumeWait");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
}
- trace_rcu_future_gp(rnp, rdp, c, "EndWait");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
smp_mb(); /* Ensure that CB invocation happens after GP end. */
}
@@ -2375,3 +2373,425 @@ static void rcu_kick_nohz_cpu(int cpu)
smp_send_reschedule(cpu);
#endif /* #ifdef CONFIG_NO_HZ_FULL */
}
+
+
+#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+
+/*
+ * Define RCU flavor that holds sysidle state. This needs to be the
+ * most active flavor of RCU.
+ */
+#ifdef CONFIG_PREEMPT_RCU
+static struct rcu_state *rcu_sysidle_state = &rcu_preempt_state;
+#else /* #ifdef CONFIG_PREEMPT_RCU */
+static struct rcu_state *rcu_sysidle_state = &rcu_sched_state;
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+static int full_sysidle_state; /* Current system-idle state. */
+#define RCU_SYSIDLE_NOT 0 /* Some CPU is not idle. */
+#define RCU_SYSIDLE_SHORT 1 /* All CPUs idle for brief period. */
+#define RCU_SYSIDLE_LONG 2 /* All CPUs idle for long enough. */
+#define RCU_SYSIDLE_FULL 3 /* All CPUs idle, ready for sysidle. */
+#define RCU_SYSIDLE_FULL_NOTED 4 /* Actually entered sysidle state. */
+
+/*
+ * Invoked to note exit from irq or task transition to idle. Note that
+ * usermode execution does -not- count as idle here! After all, we want
+ * to detect full-system idle states, not RCU quiescent states and grace
+ * periods. The caller must have disabled interrupts.
+ */
+static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
+{
+ unsigned long j;
+
+ /* Adjust nesting, check for fully idle. */
+ if (irq) {
+ rdtp->dynticks_idle_nesting--;
+ WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
+ if (rdtp->dynticks_idle_nesting != 0)
+ return; /* Still not fully idle. */
+ } else {
+ if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) ==
+ DYNTICK_TASK_NEST_VALUE) {
+ rdtp->dynticks_idle_nesting = 0;
+ } else {
+ rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE;
+ WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
+ return; /* Still not fully idle. */
+ }
+ }
+
+ /* Record start of fully idle period. */
+ j = jiffies;
+ ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
+ smp_mb__before_atomic_inc();
+ atomic_inc(&rdtp->dynticks_idle);
+ smp_mb__after_atomic_inc();
+ WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
+}
+
+/*
+ * Unconditionally force exit from full system-idle state. This is
+ * invoked when a normal CPU exits idle, but must be called separately
+ * for the timekeeping CPU (tick_do_timer_cpu). The reason for this
+ * is that the timekeeping CPU is permitted to take scheduling-clock
+ * interrupts while the system is in system-idle state, and of course
+ * rcu_sysidle_exit() has no way of distinguishing a scheduling-clock
+ * interrupt from any other type of interrupt.
+ */
+void rcu_sysidle_force_exit(void)
+{
+ int oldstate = ACCESS_ONCE(full_sysidle_state);
+ int newoldstate;
+
+ /*
+ * Each pass through the following loop attempts to exit full
+ * system-idle state. If contention proves to be a problem,
+ * a trylock-based contention tree could be used here.
+ */
+ while (oldstate > RCU_SYSIDLE_SHORT) {
+ newoldstate = cmpxchg(&full_sysidle_state,
+ oldstate, RCU_SYSIDLE_NOT);
+ if (oldstate == newoldstate &&
+ oldstate == RCU_SYSIDLE_FULL_NOTED) {
+ rcu_kick_nohz_cpu(tick_do_timer_cpu);
+ return; /* We cleared it, done! */
+ }
+ oldstate = newoldstate;
+ }
+ smp_mb(); /* Order initial oldstate fetch vs. later non-idle work. */
+}
+
+/*
+ * Invoked to note entry to irq or task transition from idle. Note that
+ * usermode execution does -not- count as idle here! The caller must
+ * have disabled interrupts.
+ */
+static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
+{
+ /* Adjust nesting, check for already non-idle. */
+ if (irq) {
+ rdtp->dynticks_idle_nesting++;
+ WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
+ if (rdtp->dynticks_idle_nesting != 1)
+ return; /* Already non-idle. */
+ } else {
+ /*
+ * Allow for irq misnesting. Yes, it really is possible
+ * to enter an irq handler then never leave it, and maybe
+ * also vice versa. Handle both possibilities.
+ */
+ if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) {
+ rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE;
+ WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
+ return; /* Already non-idle. */
+ } else {
+ rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE;
+ }
+ }
+
+ /* Record end of idle period. */
+ smp_mb__before_atomic_inc();
+ atomic_inc(&rdtp->dynticks_idle);
+ smp_mb__after_atomic_inc();
+ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
+
+ /*
+ * If we are the timekeeping CPU, we are permitted to be non-idle
+ * during a system-idle state. This must be the case, because
+ * the timekeeping CPU has to take scheduling-clock interrupts
+ * during the time that the system is transitioning to full
+ * system-idle state. This means that the timekeeping CPU must
+ * invoke rcu_sysidle_force_exit() directly if it does anything
+ * more than take a scheduling-clock interrupt.
+ */
+ if (smp_processor_id() == tick_do_timer_cpu)
+ return;
+
+ /* Update system-idle state: We are clearly no longer fully idle! */
+ rcu_sysidle_force_exit();
+}
+
+/*
+ * Check to see if the current CPU is idle. Note that usermode execution
+ * does not count as idle. The caller must have disabled interrupts.
+ */
+static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
+ unsigned long *maxj)
+{
+ int cur;
+ unsigned long j;
+ struct rcu_dynticks *rdtp = rdp->dynticks;
+
+ /*
+ * If some other CPU has already reported non-idle, if this is
+ * not the flavor of RCU that tracks sysidle state, or if this
+ * is an offline or the timekeeping CPU, nothing to do.
+ */
+ if (!*isidle || rdp->rsp != rcu_sysidle_state ||
+ cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
+ return;
+ if (rcu_gp_in_progress(rdp->rsp))
+ WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
+
+ /* Pick up current idle and NMI-nesting counter and check. */
+ cur = atomic_read(&rdtp->dynticks_idle);
+ if (cur & 0x1) {
+ *isidle = false; /* We are not idle! */
+ return;
+ }
+ smp_mb(); /* Read counters before timestamps. */
+
+ /* Pick up timestamps. */
+ j = ACCESS_ONCE(rdtp->dynticks_idle_jiffies);
+ /* If this CPU entered idle more recently, update maxj timestamp. */
+ if (ULONG_CMP_LT(*maxj, j))
+ *maxj = j;
+}
+
+/*
+ * Is this the flavor of RCU that is handling full-system idle?
+ */
+static bool is_sysidle_rcu_state(struct rcu_state *rsp)
+{
+ return rsp == rcu_sysidle_state;
+}
+
+/*
+ * Bind the grace-period kthread for the sysidle flavor of RCU to the
+ * timekeeping CPU.
+ */
+static void rcu_bind_gp_kthread(void)
+{
+ int cpu = ACCESS_ONCE(tick_do_timer_cpu);
+
+ if (cpu < 0 || cpu >= nr_cpu_ids)
+ return;
+ if (raw_smp_processor_id() != cpu)
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+}
+
+/*
+ * Return a delay in jiffies based on the number of CPUs, rcu_node
+ * leaf fanout, and jiffies tick rate. The idea is to allow larger
+ * systems more time to transition to full-idle state in order to
+ * avoid the cache thrashing that otherwise occur on the state variable.
+ * Really small systems (less than a couple of tens of CPUs) should
+ * instead use a single global atomically incremented counter, and later
+ * versions of this will automatically reconfigure themselves accordingly.
+ */
+static unsigned long rcu_sysidle_delay(void)
+{
+ if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
+ return 0;
+ return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000);
+}
+
+/*
+ * Advance the full-system-idle state. This is invoked when all of
+ * the non-timekeeping CPUs are idle.
+ */
+static void rcu_sysidle(unsigned long j)
+{
+ /* Check the current state. */
+ switch (ACCESS_ONCE(full_sysidle_state)) {
+ case RCU_SYSIDLE_NOT:
+
+ /* First time all are idle, so note a short idle period. */
+ ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
+ break;
+
+ case RCU_SYSIDLE_SHORT:
+
+ /*
+ * Idle for a bit, time to advance to next state?
+ * cmpxchg failure means race with non-idle, let them win.
+ */
+ if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
+ (void)cmpxchg(&full_sysidle_state,
+ RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG);
+ break;
+
+ case RCU_SYSIDLE_LONG:
+
+ /*
+ * Do an additional check pass before advancing to full.
+ * cmpxchg failure means race with non-idle, let them win.
+ */
+ if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
+ (void)cmpxchg(&full_sysidle_state,
+ RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Found a non-idle non-timekeeping CPU, so kick the system-idle state
+ * back to the beginning.
+ */
+static void rcu_sysidle_cancel(void)
+{
+ smp_mb();
+ ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
+}
+
+/*
+ * Update the sysidle state based on the results of a force-quiescent-state
+ * scan of the CPUs' dyntick-idle state.
+ */
+static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
+ unsigned long maxj, bool gpkt)
+{
+ if (rsp != rcu_sysidle_state)
+ return; /* Wrong flavor, ignore. */
+ if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
+ return; /* Running state machine from timekeeping CPU. */
+ if (isidle)
+ rcu_sysidle(maxj); /* More idle! */
+ else
+ rcu_sysidle_cancel(); /* Idle is over. */
+}
+
+/*
+ * Wrapper for rcu_sysidle_report() when called from the grace-period
+ * kthread's context.
+ */
+static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
+ unsigned long maxj)
+{
+ rcu_sysidle_report(rsp, isidle, maxj, true);
+}
+
+/* Callback and function for forcing an RCU grace period. */
+struct rcu_sysidle_head {
+ struct rcu_head rh;
+ int inuse;
+};
+
+static void rcu_sysidle_cb(struct rcu_head *rhp)
+{
+ struct rcu_sysidle_head *rshp;
+
+ /*
+ * The following memory barrier is needed to replace the
+ * memory barriers that would normally be in the memory
+ * allocator.
+ */
+ smp_mb(); /* grace period precedes setting inuse. */
+
+ rshp = container_of(rhp, struct rcu_sysidle_head, rh);
+ ACCESS_ONCE(rshp->inuse) = 0;
+}
+
+/*
+ * Check to see if the system is fully idle, other than the timekeeping CPU.
+ * The caller must have disabled interrupts.
+ */
+bool rcu_sys_is_idle(void)
+{
+ static struct rcu_sysidle_head rsh;
+ int rss = ACCESS_ONCE(full_sysidle_state);
+
+ if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu))
+ return false;
+
+ /* Handle small-system case by doing a full scan of CPUs. */
+ if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) {
+ int oldrss = rss - 1;
+
+ /*
+ * One pass to advance to each state up to _FULL.
+ * Give up if any pass fails to advance the state.
+ */
+ while (rss < RCU_SYSIDLE_FULL && oldrss < rss) {
+ int cpu;
+ bool isidle = true;
+ unsigned long maxj = jiffies - ULONG_MAX / 4;
+ struct rcu_data *rdp;
+
+ /* Scan all the CPUs looking for nonidle CPUs. */
+ for_each_possible_cpu(cpu) {
+ rdp = per_cpu_ptr(rcu_sysidle_state->rda, cpu);
+ rcu_sysidle_check_cpu(rdp, &isidle, &maxj);
+ if (!isidle)
+ break;
+ }
+ rcu_sysidle_report(rcu_sysidle_state,
+ isidle, maxj, false);
+ oldrss = rss;
+ rss = ACCESS_ONCE(full_sysidle_state);
+ }
+ }
+
+ /* If this is the first observation of an idle period, record it. */
+ if (rss == RCU_SYSIDLE_FULL) {
+ rss = cmpxchg(&full_sysidle_state,
+ RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED);
+ return rss == RCU_SYSIDLE_FULL;
+ }
+
+ smp_mb(); /* ensure rss load happens before later caller actions. */
+
+ /* If already fully idle, tell the caller (in case of races). */
+ if (rss == RCU_SYSIDLE_FULL_NOTED)
+ return true;
+
+ /*
+ * If we aren't there yet, and a grace period is not in flight,
+ * initiate a grace period. Either way, tell the caller that
+ * we are not there yet. We use an xchg() rather than an assignment
+ * to make up for the memory barriers that would otherwise be
+ * provided by the memory allocator.
+ */
+ if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL &&
+ !rcu_gp_in_progress(rcu_sysidle_state) &&
+ !rsh.inuse && xchg(&rsh.inuse, 1) == 0)
+ call_rcu(&rsh.rh, rcu_sysidle_cb);
+ return false;
+}
+
+/*
+ * Initialize dynticks sysidle state for CPUs coming online.
+ */
+static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
+{
+ rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE;
+}
+
+#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+
+static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
+{
+}
+
+static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
+{
+}
+
+static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
+ unsigned long *maxj)
+{
+}
+
+static bool is_sysidle_rcu_state(struct rcu_state *rsp)
+{
+ return false;
+}
+
+static void rcu_bind_gp_kthread(void)
+{
+}
+
+static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
+ unsigned long maxj)
+{
+}
+
+static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
+{
+}
+
+#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 269ed9384cc4..f813b3474646 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -32,7 +32,14 @@ EXPORT_SYMBOL(cad_pid);
#endif
enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
-int reboot_default;
+/*
+ * This variable is used privately to keep track of whether or not
+ * reboot_type is still set to its default value (i.e., reboot= hasn't
+ * been set on the command line). This is needed so that we can
+ * suppress DMI scanning for reboot quirks. Without it, it's
+ * impossible to override a faulty reboot quirk without recompiling.
+ */
+int reboot_default = 1;
int reboot_cpu;
enum reboot_type reboot_type = BOOT_ACPI;
int reboot_force;
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index ff55247e7049..4aa8a305aede 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -17,8 +17,8 @@
void res_counter_init(struct res_counter *counter, struct res_counter *parent)
{
spin_lock_init(&counter->lock);
- counter->limit = RESOURCE_MAX;
- counter->soft_limit = RESOURCE_MAX;
+ counter->limit = RES_COUNTER_MAX;
+ counter->soft_limit = RES_COUNTER_MAX;
counter->parent = parent;
}
@@ -178,23 +178,30 @@ u64 res_counter_read_u64(struct res_counter *counter, int member)
#endif
int res_counter_memparse_write_strategy(const char *buf,
- unsigned long long *res)
+ unsigned long long *resp)
{
char *end;
+ unsigned long long res;
- /* return RESOURCE_MAX(unlimited) if "-1" is specified */
+ /* return RES_COUNTER_MAX(unlimited) if "-1" is specified */
if (*buf == '-') {
- *res = simple_strtoull(buf + 1, &end, 10);
- if (*res != 1 || *end != '\0')
+ res = simple_strtoull(buf + 1, &end, 10);
+ if (res != 1 || *end != '\0')
return -EINVAL;
- *res = RESOURCE_MAX;
+ *resp = RES_COUNTER_MAX;
return 0;
}
- *res = memparse(buf, &end);
+ res = memparse(buf, &end);
if (*end != '\0')
return -EINVAL;
- *res = PAGE_ALIGN(*res);
+ if (PAGE_ALIGN(res) >= res)
+ res = PAGE_ALIGN(res);
+ else
+ res = RES_COUNTER_MAX;
+
+ *resp = res;
+
return 0;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 05c39f030314..5ac63c9a995a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -978,13 +978,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
rq->skip_clock_update = 1;
}
-static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
-
-void register_task_migration_notifier(struct notifier_block *n)
-{
- atomic_notifier_chain_register(&task_migration_notifier, n);
-}
-
#ifdef CONFIG_SMP
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
@@ -1015,18 +1008,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
trace_sched_migrate_task(p, new_cpu);
if (task_cpu(p) != new_cpu) {
- struct task_migration_notifier tmn;
-
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
-
- tmn.task = p;
- tmn.from_cpu = task_cpu(p);
- tmn.to_cpu = new_cpu;
-
- atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
}
__set_task_cpu(p, new_cpu);
@@ -2527,13 +2512,11 @@ void __sched schedule_preempt_disabled(void)
*/
asmlinkage void __sched notrace preempt_schedule(void)
{
- struct thread_info *ti = current_thread_info();
-
/*
* If there is a non-zero preempt_count or interrupts are disabled,
* we do not want to preempt the current task. Just return..
*/
- if (likely(ti->preempt_count || irqs_disabled()))
+ if (likely(!preemptible()))
return;
do {
@@ -2677,7 +2660,7 @@ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
if (unlikely(!q))
return;
- if (unlikely(!nr_exclusive))
+ if (unlikely(nr_exclusive != 1))
wake_flags = 0;
spin_lock_irqsave(&q->lock, flags);
@@ -4964,7 +4947,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
SD_BALANCE_FORK |
SD_BALANCE_EXEC |
SD_SHARE_CPUPOWER |
- SD_SHARE_PKG_RESOURCES);
+ SD_SHARE_PKG_RESOURCES |
+ SD_PREFER_SIBLING);
if (nr_node_ids == 1)
pflags &= ~SD_SERIALIZE;
}
@@ -5133,18 +5117,23 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
* two cpus are in the same cache domain, see cpus_share_cache().
*/
DEFINE_PER_CPU(struct sched_domain *, sd_llc);
+DEFINE_PER_CPU(int, sd_llc_size);
DEFINE_PER_CPU(int, sd_llc_id);
static void update_top_cache_domain(int cpu)
{
struct sched_domain *sd;
int id = cpu;
+ int size = 1;
sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
- if (sd)
+ if (sd) {
id = cpumask_first(sched_domain_span(sd));
+ size = cpumask_weight(sched_domain_span(sd));
+ }
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
+ per_cpu(sd_llc_size, cpu) = size;
per_cpu(sd_llc_id, cpu) = id;
}
@@ -5168,6 +5157,13 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
tmp->parent = parent->parent;
if (parent->parent)
parent->parent->child = tmp;
+ /*
+ * Transfer SD_PREFER_SIBLING down in case of a
+ * degenerate parent; the spans match for this
+ * so the property transfers.
+ */
+ if (parent->flags & SD_PREFER_SIBLING)
+ tmp->flags |= SD_PREFER_SIBLING;
destroy_sched_domain(parent, cpu);
} else
tmp = tmp->parent;
@@ -6234,8 +6230,9 @@ match1:
;
}
+ n = ndoms_cur;
if (doms_new == NULL) {
- ndoms_cur = 0;
+ n = 0;
doms_new = &fallback_doms;
cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
WARN_ON_ONCE(dattr_new);
@@ -6243,7 +6240,7 @@ match1:
/* Build new domains */
for (i = 0; i < ndoms_new; i++) {
- for (j = 0; j < ndoms_cur && !new_topology; j++) {
+ for (j = 0; j < n && !new_topology; j++) {
if (cpumask_equal(doms_new[i], doms_cur[j])
&& dattrs_equal(dattr_new, i, dattr_cur, j))
goto match2;
@@ -6815,7 +6812,7 @@ void sched_move_task(struct task_struct *tsk)
if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk);
- tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
+ tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id,
lockdep_is_held(&tsk->sighand->siglock)),
struct task_group, css);
tg = autogroup_task_group(tsk, tg);
@@ -7137,23 +7134,22 @@ int sched_rt_handler(struct ctl_table *table, int write,
#ifdef CONFIG_CGROUP_SCHED
-/* return corresponding task_group object of a cgroup */
-static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
{
- return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
- struct task_group, css);
+ return css ? container_of(css, struct task_group, css) : NULL;
}
-static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
- struct task_group *tg, *parent;
+ struct task_group *parent = css_tg(parent_css);
+ struct task_group *tg;
- if (!cgrp->parent) {
+ if (!parent) {
/* This is early initialization for the top cgroup */
return &root_task_group.css;
}
- parent = cgroup_tg(cgrp->parent);
tg = sched_create_group(parent);
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
@@ -7161,41 +7157,38 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
return &tg->css;
}
-static int cpu_cgroup_css_online(struct cgroup *cgrp)
+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
{
- struct task_group *tg = cgroup_tg(cgrp);
- struct task_group *parent;
+ struct task_group *tg = css_tg(css);
+ struct task_group *parent = css_tg(css_parent(css));
- if (!cgrp->parent)
- return 0;
-
- parent = cgroup_tg(cgrp->parent);
- sched_online_group(tg, parent);
+ if (parent)
+ sched_online_group(tg, parent);
return 0;
}
-static void cpu_cgroup_css_free(struct cgroup *cgrp)
+static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
{
- struct task_group *tg = cgroup_tg(cgrp);
+ struct task_group *tg = css_tg(css);
sched_destroy_group(tg);
}
-static void cpu_cgroup_css_offline(struct cgroup *cgrp)
+static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
{
- struct task_group *tg = cgroup_tg(cgrp);
+ struct task_group *tg = css_tg(css);
sched_offline_group(tg);
}
-static int cpu_cgroup_can_attach(struct cgroup *cgrp,
+static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
struct task_struct *task;
- cgroup_taskset_for_each(task, cgrp, tset) {
+ cgroup_taskset_for_each(task, css, tset) {
#ifdef CONFIG_RT_GROUP_SCHED
- if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
+ if (!sched_rt_can_attach(css_tg(css), task))
return -EINVAL;
#else
/* We don't support RT-tasks being in separate groups */
@@ -7206,18 +7199,18 @@ static int cpu_cgroup_can_attach(struct cgroup *cgrp,
return 0;
}
-static void cpu_cgroup_attach(struct cgroup *cgrp,
+static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
struct task_struct *task;
- cgroup_taskset_for_each(task, cgrp, tset)
+ cgroup_taskset_for_each(task, css, tset)
sched_move_task(task);
}
-static void
-cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
- struct task_struct *task)
+static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
+ struct cgroup_subsys_state *old_css,
+ struct task_struct *task)
{
/*
* cgroup_exit() is called in the copy_process() failure path.
@@ -7231,15 +7224,16 @@ cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
}
#ifdef CONFIG_FAIR_GROUP_SCHED
-static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
- u64 shareval)
+static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
+ struct cftype *cftype, u64 shareval)
{
- return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
+ return sched_group_set_shares(css_tg(css), scale_load(shareval));
}
-static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- struct task_group *tg = cgroup_tg(cgrp);
+ struct task_group *tg = css_tg(css);
return (u64) scale_load_down(tg->shares);
}
@@ -7361,26 +7355,28 @@ long tg_get_cfs_period(struct task_group *tg)
return cfs_period_us;
}
-static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
+static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return tg_get_cfs_quota(cgroup_tg(cgrp));
+ return tg_get_cfs_quota(css_tg(css));
}
-static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
- s64 cfs_quota_us)
+static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
+ struct cftype *cftype, s64 cfs_quota_us)
{
- return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
+ return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
}
-static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return tg_get_cfs_period(cgroup_tg(cgrp));
+ return tg_get_cfs_period(css_tg(css));
}
-static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
- u64 cfs_period_us)
+static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
+ struct cftype *cftype, u64 cfs_period_us)
{
- return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
+ return tg_set_cfs_period(css_tg(css), cfs_period_us);
}
struct cfs_schedulable_data {
@@ -7461,10 +7457,10 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
return ret;
}
-static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
+static int cpu_stats_show(struct cgroup_subsys_state *css, struct cftype *cft,
struct cgroup_map_cb *cb)
{
- struct task_group *tg = cgroup_tg(cgrp);
+ struct task_group *tg = css_tg(css);
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
cb->fill(cb, "nr_periods", cfs_b->nr_periods);
@@ -7477,26 +7473,28 @@ static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
-static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
- s64 val)
+static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, s64 val)
{
- return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
+ return sched_group_set_rt_runtime(css_tg(css), val);
}
-static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
+static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return sched_group_rt_runtime(cgroup_tg(cgrp));
+ return sched_group_rt_runtime(css_tg(css));
}
-static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
- u64 rt_period_us)
+static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
+ struct cftype *cftype, u64 rt_period_us)
{
- return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
+ return sched_group_set_rt_period(css_tg(css), rt_period_us);
}
-static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return sched_group_rt_period(cgroup_tg(cgrp));
+ return sched_group_rt_period(css_tg(css));
}
#endif /* CONFIG_RT_GROUP_SCHED */
diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c
index dbb7e2cd95eb..f64722ff0299 100644
--- a/kernel/sched/cpuacct.c
+++ b/kernel/sched/cpuacct.c
@@ -33,30 +33,20 @@ struct cpuacct {
struct kernel_cpustat __percpu *cpustat;
};
-/* return cpu accounting group corresponding to this container */
-static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
+static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
{
- return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
- struct cpuacct, css);
+ return css ? container_of(css, struct cpuacct, css) : NULL;
}
/* return cpu accounting group to which this task belongs */
static inline struct cpuacct *task_ca(struct task_struct *tsk)
{
- return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
- struct cpuacct, css);
-}
-
-static inline struct cpuacct *__parent_ca(struct cpuacct *ca)
-{
- return cgroup_ca(ca->css.cgroup->parent);
+ return css_ca(task_css(tsk, cpuacct_subsys_id));
}
static inline struct cpuacct *parent_ca(struct cpuacct *ca)
{
- if (!ca->css.cgroup->parent)
- return NULL;
- return cgroup_ca(ca->css.cgroup->parent);
+ return css_ca(css_parent(&ca->css));
}
static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage);
@@ -66,11 +56,12 @@ static struct cpuacct root_cpuacct = {
};
/* create a new cpu accounting group */
-static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct cpuacct *ca;
- if (!cgrp->parent)
+ if (!parent_css)
return &root_cpuacct.css;
ca = kzalloc(sizeof(*ca), GFP_KERNEL);
@@ -96,9 +87,9 @@ out:
}
/* destroy an existing cpu accounting group */
-static void cpuacct_css_free(struct cgroup *cgrp)
+static void cpuacct_css_free(struct cgroup_subsys_state *css)
{
- struct cpuacct *ca = cgroup_ca(cgrp);
+ struct cpuacct *ca = css_ca(css);
free_percpu(ca->cpustat);
free_percpu(ca->cpuusage);
@@ -141,9 +132,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
}
/* return total cpu usage (in nanoseconds) of a group */
-static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
- struct cpuacct *ca = cgroup_ca(cgrp);
+ struct cpuacct *ca = css_ca(css);
u64 totalcpuusage = 0;
int i;
@@ -153,10 +144,10 @@ static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
return totalcpuusage;
}
-static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
- u64 reset)
+static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 reset)
{
- struct cpuacct *ca = cgroup_ca(cgrp);
+ struct cpuacct *ca = css_ca(css);
int err = 0;
int i;
@@ -172,10 +163,10 @@ out:
return err;
}
-static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
- struct seq_file *m)
+static int cpuacct_percpu_seq_read(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *m)
{
- struct cpuacct *ca = cgroup_ca(cgroup);
+ struct cpuacct *ca = css_ca(css);
u64 percpu;
int i;
@@ -192,10 +183,10 @@ static const char * const cpuacct_stat_desc[] = {
[CPUACCT_STAT_SYSTEM] = "system",
};
-static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
- struct cgroup_map_cb *cb)
+static int cpuacct_stats_show(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct cgroup_map_cb *cb)
{
- struct cpuacct *ca = cgroup_ca(cgrp);
+ struct cpuacct *ca = css_ca(css);
int cpu;
s64 val = 0;
@@ -281,7 +272,7 @@ void cpuacct_account_field(struct task_struct *p, int index, u64 val)
while (ca != &root_cpuacct) {
kcpustat = this_cpu_ptr(ca->cpustat);
kcpustat->cpustat[index] += val;
- ca = __parent_ca(ca);
+ ca = parent_ca(ca);
}
rcu_read_unlock();
}
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index a7959e05a9d5..99947919e30b 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -121,7 +121,7 @@ static inline void task_group_account_field(struct task_struct *p, int index,
* is the only cgroup, then nothing else should be necessary.
*
*/
- __get_cpu_var(kernel_cpustat).cpustat[index] += tmp;
+ __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
cpuacct_account_field(p, index, tmp);
}
@@ -378,11 +378,8 @@ static inline void irqtime_account_process_tick(struct task_struct *p, int user_
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
-void vtime_task_switch(struct task_struct *prev)
+void vtime_common_task_switch(struct task_struct *prev)
{
- if (!vtime_accounting_enabled())
- return;
-
if (is_idle_task(prev))
vtime_account_idle(prev);
else
@@ -404,11 +401,8 @@ void vtime_task_switch(struct task_struct *prev)
* vtime_account().
*/
#ifndef __ARCH_HAS_VTIME_ACCOUNT
-void vtime_account_irq_enter(struct task_struct *tsk)
+void vtime_common_account_irq_enter(struct task_struct *tsk)
{
- if (!vtime_accounting_enabled())
- return;
-
if (!in_interrupt()) {
/*
* If we interrupted user, context_tracking_in_user()
@@ -428,7 +422,7 @@ void vtime_account_irq_enter(struct task_struct *tsk)
}
vtime_account_system(tsk);
}
-EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
+EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter);
#endif /* __ARCH_HAS_VTIME_ACCOUNT */
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
@@ -557,16 +551,7 @@ static void cputime_adjust(struct task_cputime *curr,
struct cputime *prev,
cputime_t *ut, cputime_t *st)
{
- cputime_t rtime, stime, utime, total;
-
- if (vtime_accounting_enabled()) {
- *ut = curr->utime;
- *st = curr->stime;
- return;
- }
-
- stime = curr->stime;
- total = stime + curr->utime;
+ cputime_t rtime, stime, utime;
/*
* Tick based cputime accounting depend on random scheduling
@@ -588,13 +573,19 @@ static void cputime_adjust(struct task_cputime *curr,
if (prev->stime + prev->utime >= rtime)
goto out;
- if (total) {
+ stime = curr->stime;
+ utime = curr->utime;
+
+ if (utime == 0) {
+ stime = rtime;
+ } else if (stime == 0) {
+ utime = rtime;
+ } else {
+ cputime_t total = stime + utime;
+
stime = scale_stime((__force u64)stime,
(__force u64)rtime, (__force u64)total);
utime = rtime - stime;
- } else {
- stime = rtime;
- utime = 0;
}
/*
@@ -664,23 +655,17 @@ static void __vtime_account_system(struct task_struct *tsk)
void vtime_account_system(struct task_struct *tsk)
{
- if (!vtime_accounting_enabled())
- return;
-
write_seqlock(&tsk->vtime_seqlock);
__vtime_account_system(tsk);
write_sequnlock(&tsk->vtime_seqlock);
}
-void vtime_account_irq_exit(struct task_struct *tsk)
+void vtime_gen_account_irq_exit(struct task_struct *tsk)
{
- if (!vtime_accounting_enabled())
- return;
-
write_seqlock(&tsk->vtime_seqlock);
+ __vtime_account_system(tsk);
if (context_tracking_in_user())
tsk->vtime_snap_whence = VTIME_USER;
- __vtime_account_system(tsk);
write_sequnlock(&tsk->vtime_seqlock);
}
@@ -688,12 +673,8 @@ void vtime_account_user(struct task_struct *tsk)
{
cputime_t delta_cpu;
- if (!vtime_accounting_enabled())
- return;
-
- delta_cpu = get_vtime_delta(tsk);
-
write_seqlock(&tsk->vtime_seqlock);
+ delta_cpu = get_vtime_delta(tsk);
tsk->vtime_snap_whence = VTIME_SYS;
account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
write_sequnlock(&tsk->vtime_seqlock);
@@ -701,22 +682,27 @@ void vtime_account_user(struct task_struct *tsk)
void vtime_user_enter(struct task_struct *tsk)
{
- if (!vtime_accounting_enabled())
- return;
-
write_seqlock(&tsk->vtime_seqlock);
- tsk->vtime_snap_whence = VTIME_USER;
__vtime_account_system(tsk);
+ tsk->vtime_snap_whence = VTIME_USER;
write_sequnlock(&tsk->vtime_seqlock);
}
void vtime_guest_enter(struct task_struct *tsk)
{
+ /*
+ * The flags must be updated under the lock with
+ * the vtime_snap flush and update.
+ * That enforces a right ordering and update sequence
+ * synchronization against the reader (task_gtime())
+ * that can thus safely catch up with a tickless delta.
+ */
write_seqlock(&tsk->vtime_seqlock);
__vtime_account_system(tsk);
current->flags |= PF_VCPU;
write_sequnlock(&tsk->vtime_seqlock);
}
+EXPORT_SYMBOL_GPL(vtime_guest_enter);
void vtime_guest_exit(struct task_struct *tsk)
{
@@ -725,6 +711,7 @@ void vtime_guest_exit(struct task_struct *tsk)
current->flags &= ~PF_VCPU;
write_sequnlock(&tsk->vtime_seqlock);
}
+EXPORT_SYMBOL_GPL(vtime_guest_exit);
void vtime_account_idle(struct task_struct *tsk)
{
@@ -733,11 +720,6 @@ void vtime_account_idle(struct task_struct *tsk)
account_idle_time(delta_cpu);
}
-bool vtime_accounting_enabled(void)
-{
- return context_tracking_active();
-}
-
void arch_vtime_task_switch(struct task_struct *prev)
{
write_seqlock(&prev->vtime_seqlock);
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index e076bddd4c66..196559994f7c 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -124,7 +124,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
SEQ_printf(m, " ");
SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
- p->comm, p->pid,
+ p->comm, task_pid_nr(p),
SPLIT_NS(p->se.vruntime),
(long long)(p->nvcsw + p->nivcsw),
p->prio);
@@ -289,7 +289,7 @@ do { \
P(nr_load_updates);
P(nr_uninterruptible);
PN(next_balance);
- P(curr->pid);
+ SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
PN(clock);
P(cpu_load[0]);
P(cpu_load[1]);
@@ -492,7 +492,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
unsigned long nr_switches;
- SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
+ SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
get_nr_threads(p));
SEQ_printf(m,
"---------------------------------------------------------"
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 68f1609ca149..7c70201fbc61 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3018,6 +3018,23 @@ static unsigned long cpu_avg_load_per_task(int cpu)
return 0;
}
+static void record_wakee(struct task_struct *p)
+{
+ /*
+ * Rough decay (wiping) for cost saving, don't worry
+ * about the boundary, really active task won't care
+ * about the loss.
+ */
+ if (jiffies > current->wakee_flip_decay_ts + HZ) {
+ current->wakee_flips = 0;
+ current->wakee_flip_decay_ts = jiffies;
+ }
+
+ if (current->last_wakee != p) {
+ current->last_wakee = p;
+ current->wakee_flips++;
+ }
+}
static void task_waking_fair(struct task_struct *p)
{
@@ -3038,6 +3055,7 @@ static void task_waking_fair(struct task_struct *p)
#endif
se->vruntime -= min_vruntime;
+ record_wakee(p);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -3156,6 +3174,28 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu,
#endif
+static int wake_wide(struct task_struct *p)
+{
+ int factor = this_cpu_read(sd_llc_size);
+
+ /*
+ * Yeah, it's the switching-frequency, could means many wakee or
+ * rapidly switch, use factor here will just help to automatically
+ * adjust the loose-degree, so bigger node will lead to more pull.
+ */
+ if (p->wakee_flips > factor) {
+ /*
+ * wakee is somewhat hot, it needs certain amount of cpu
+ * resource, so if waker is far more hot, prefer to leave
+ * it alone.
+ */
+ if (current->wakee_flips > (factor * p->wakee_flips))
+ return 1;
+ }
+
+ return 0;
+}
+
static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
{
s64 this_load, load;
@@ -3165,6 +3205,13 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
unsigned long weight;
int balanced;
+ /*
+ * If we wake multiple tasks be careful to not bounce
+ * ourselves around too much.
+ */
+ if (wake_wide(p))
+ return 0;
+
idx = sd->wake_idx;
this_cpu = smp_processor_id();
prev_cpu = task_cpu(p);
@@ -4172,47 +4219,48 @@ static void update_blocked_averages(int cpu)
}
/*
- * Compute the cpu's hierarchical load factor for each task group.
+ * Compute the hierarchical load factor for cfs_rq and all its ascendants.
* This needs to be done in a top-down fashion because the load of a child
* group is a fraction of its parents load.
*/
-static int tg_load_down(struct task_group *tg, void *data)
+static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
{
- unsigned long load;
- long cpu = (long)data;
-
- if (!tg->parent) {
- load = cpu_rq(cpu)->avg.load_avg_contrib;
- } else {
- load = tg->parent->cfs_rq[cpu]->h_load;
- load = div64_ul(load * tg->se[cpu]->avg.load_avg_contrib,
- tg->parent->cfs_rq[cpu]->runnable_load_avg + 1);
- }
-
- tg->cfs_rq[cpu]->h_load = load;
-
- return 0;
-}
-
-static void update_h_load(long cpu)
-{
- struct rq *rq = cpu_rq(cpu);
+ struct rq *rq = rq_of(cfs_rq);
+ struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
unsigned long now = jiffies;
+ unsigned long load;
- if (rq->h_load_throttle == now)
+ if (cfs_rq->last_h_load_update == now)
return;
- rq->h_load_throttle = now;
+ cfs_rq->h_load_next = NULL;
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+ cfs_rq->h_load_next = se;
+ if (cfs_rq->last_h_load_update == now)
+ break;
+ }
- rcu_read_lock();
- walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
- rcu_read_unlock();
+ if (!se) {
+ cfs_rq->h_load = cfs_rq->runnable_load_avg;
+ cfs_rq->last_h_load_update = now;
+ }
+
+ while ((se = cfs_rq->h_load_next) != NULL) {
+ load = cfs_rq->h_load;
+ load = div64_ul(load * se->avg.load_avg_contrib,
+ cfs_rq->runnable_load_avg + 1);
+ cfs_rq = group_cfs_rq(se);
+ cfs_rq->h_load = load;
+ cfs_rq->last_h_load_update = now;
+ }
}
static unsigned long task_h_load(struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
+ update_cfs_rq_h_load(cfs_rq);
return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
cfs_rq->runnable_load_avg + 1);
}
@@ -4221,10 +4269,6 @@ static inline void update_blocked_averages(int cpu)
{
}
-static inline void update_h_load(long cpu)
-{
-}
-
static unsigned long task_h_load(struct task_struct *p)
{
return p->se.avg.load_avg_contrib;
@@ -4233,50 +4277,56 @@ static unsigned long task_h_load(struct task_struct *p)
/********** Helpers for find_busiest_group ************************/
/*
- * sd_lb_stats - Structure to store the statistics of a sched_domain
- * during load balancing.
- */
-struct sd_lb_stats {
- struct sched_group *busiest; /* Busiest group in this sd */
- struct sched_group *this; /* Local group in this sd */
- unsigned long total_load; /* Total load of all groups in sd */
- unsigned long total_pwr; /* Total power of all groups in sd */
- unsigned long avg_load; /* Average load across all groups in sd */
-
- /** Statistics of this group */
- unsigned long this_load;
- unsigned long this_load_per_task;
- unsigned long this_nr_running;
- unsigned long this_has_capacity;
- unsigned int this_idle_cpus;
-
- /* Statistics of the busiest group */
- unsigned int busiest_idle_cpus;
- unsigned long max_load;
- unsigned long busiest_load_per_task;
- unsigned long busiest_nr_running;
- unsigned long busiest_group_capacity;
- unsigned long busiest_has_capacity;
- unsigned int busiest_group_weight;
-
- int group_imb; /* Is there imbalance in this sd */
-};
-
-/*
* sg_lb_stats - stats of a sched_group required for load_balancing
*/
struct sg_lb_stats {
unsigned long avg_load; /*Avg load across the CPUs of the group */
unsigned long group_load; /* Total load over the CPUs of the group */
- unsigned long sum_nr_running; /* Nr tasks running in the group */
unsigned long sum_weighted_load; /* Weighted load of group's tasks */
- unsigned long group_capacity;
- unsigned long idle_cpus;
- unsigned long group_weight;
+ unsigned long load_per_task;
+ unsigned long group_power;
+ unsigned int sum_nr_running; /* Nr tasks running in the group */
+ unsigned int group_capacity;
+ unsigned int idle_cpus;
+ unsigned int group_weight;
int group_imb; /* Is there an imbalance in the group ? */
int group_has_capacity; /* Is there extra capacity in the group? */
};
+/*
+ * sd_lb_stats - Structure to store the statistics of a sched_domain
+ * during load balancing.
+ */
+struct sd_lb_stats {
+ struct sched_group *busiest; /* Busiest group in this sd */
+ struct sched_group *local; /* Local group in this sd */
+ unsigned long total_load; /* Total load of all groups in sd */
+ unsigned long total_pwr; /* Total power of all groups in sd */
+ unsigned long avg_load; /* Average load across all groups in sd */
+
+ struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
+ struct sg_lb_stats local_stat; /* Statistics of the local group */
+};
+
+static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
+{
+ /*
+ * Skimp on the clearing to avoid duplicate work. We can avoid clearing
+ * local_stat because update_sg_lb_stats() does a full clear/assignment.
+ * We must however clear busiest_stat::avg_load because
+ * update_sd_pick_busiest() reads this before assignment.
+ */
+ *sds = (struct sd_lb_stats){
+ .busiest = NULL,
+ .local = NULL,
+ .total_load = 0UL,
+ .total_pwr = 0UL,
+ .busiest_stat = {
+ .avg_load = 0UL,
+ },
+ };
+}
+
/**
* get_sd_load_idx - Obtain the load index for a given sched domain.
* @sd: The sched_domain whose load_idx is to be obtained.
@@ -4460,33 +4510,99 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
return 0;
}
+/*
+ * Group imbalance indicates (and tries to solve) the problem where balancing
+ * groups is inadequate due to tsk_cpus_allowed() constraints.
+ *
+ * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
+ * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
+ * Something like:
+ *
+ * { 0 1 2 3 } { 4 5 6 7 }
+ * * * * *
+ *
+ * If we were to balance group-wise we'd place two tasks in the first group and
+ * two tasks in the second group. Clearly this is undesired as it will overload
+ * cpu 3 and leave one of the cpus in the second group unused.
+ *
+ * The current solution to this issue is detecting the skew in the first group
+ * by noticing it has a cpu that is overloaded while the remaining cpus are
+ * idle -- or rather, there's a distinct imbalance in the cpus; see
+ * sg_imbalanced().
+ *
+ * When this is so detected; this group becomes a candidate for busiest; see
+ * update_sd_pick_busiest(). And calculcate_imbalance() and
+ * find_busiest_group() avoid some of the usual balance conditional to allow it
+ * to create an effective group imbalance.
+ *
+ * This is a somewhat tricky proposition since the next run might not find the
+ * group imbalance and decide the groups need to be balanced again. A most
+ * subtle and fragile situation.
+ */
+
+struct sg_imb_stats {
+ unsigned long max_nr_running, min_nr_running;
+ unsigned long max_cpu_load, min_cpu_load;
+};
+
+static inline void init_sg_imb_stats(struct sg_imb_stats *sgi)
+{
+ sgi->max_cpu_load = sgi->max_nr_running = 0UL;
+ sgi->min_cpu_load = sgi->min_nr_running = ~0UL;
+}
+
+static inline void
+update_sg_imb_stats(struct sg_imb_stats *sgi,
+ unsigned long load, unsigned long nr_running)
+{
+ if (load > sgi->max_cpu_load)
+ sgi->max_cpu_load = load;
+ if (sgi->min_cpu_load > load)
+ sgi->min_cpu_load = load;
+
+ if (nr_running > sgi->max_nr_running)
+ sgi->max_nr_running = nr_running;
+ if (sgi->min_nr_running > nr_running)
+ sgi->min_nr_running = nr_running;
+}
+
+static inline int
+sg_imbalanced(struct sg_lb_stats *sgs, struct sg_imb_stats *sgi)
+{
+ /*
+ * Consider the group unbalanced when the imbalance is larger
+ * than the average weight of a task.
+ *
+ * APZ: with cgroup the avg task weight can vary wildly and
+ * might not be a suitable number - should we keep a
+ * normalized nr_running number somewhere that negates
+ * the hierarchy?
+ */
+ if ((sgi->max_cpu_load - sgi->min_cpu_load) >= sgs->load_per_task &&
+ (sgi->max_nr_running - sgi->min_nr_running) > 1)
+ return 1;
+
+ return 0;
+}
+
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @env: The load balancing environment.
* @group: sched_group whose statistics are to be updated.
* @load_idx: Load index of sched_domain of this_cpu for load calc.
* @local_group: Does group contain this_cpu.
- * @balance: Should we balance.
* @sgs: variable to hold the statistics for this group.
*/
static inline void update_sg_lb_stats(struct lb_env *env,
struct sched_group *group, int load_idx,
- int local_group, int *balance, struct sg_lb_stats *sgs)
+ int local_group, struct sg_lb_stats *sgs)
{
- unsigned long nr_running, max_nr_running, min_nr_running;
- unsigned long load, max_cpu_load, min_cpu_load;
- unsigned int balance_cpu = -1, first_idle_cpu = 0;
- unsigned long avg_load_per_task = 0;
+ struct sg_imb_stats sgi;
+ unsigned long nr_running;
+ unsigned long load;
int i;
- if (local_group)
- balance_cpu = group_balance_cpu(group);
-
- /* Tally up the load of all CPUs in the group */
- max_cpu_load = 0;
- min_cpu_load = ~0UL;
- max_nr_running = 0;
- min_nr_running = ~0UL;
+ init_sg_imb_stats(&sgi);
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
struct rq *rq = cpu_rq(i);
@@ -4495,24 +4611,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
/* Bias balancing toward cpus of our domain */
if (local_group) {
- if (idle_cpu(i) && !first_idle_cpu &&
- cpumask_test_cpu(i, sched_group_mask(group))) {
- first_idle_cpu = 1;
- balance_cpu = i;
- }
-
load = target_load(i, load_idx);
} else {
load = source_load(i, load_idx);
- if (load > max_cpu_load)
- max_cpu_load = load;
- if (min_cpu_load > load)
- min_cpu_load = load;
-
- if (nr_running > max_nr_running)
- max_nr_running = nr_running;
- if (min_nr_running > nr_running)
- min_nr_running = nr_running;
+ update_sg_imb_stats(&sgi, load, nr_running);
}
sgs->group_load += load;
@@ -4522,46 +4624,25 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->idle_cpus++;
}
- /*
- * First idle cpu or the first cpu(busiest) in this sched group
- * is eligible for doing load balancing at this and above
- * domains. In the newly idle case, we will allow all the cpu's
- * to do the newly idle load balance.
- */
- if (local_group) {
- if (env->idle != CPU_NEWLY_IDLE) {
- if (balance_cpu != env->dst_cpu) {
- *balance = 0;
- return;
- }
- update_group_power(env->sd, env->dst_cpu);
- } else if (time_after_eq(jiffies, group->sgp->next_update))
- update_group_power(env->sd, env->dst_cpu);
- }
+ if (local_group && (env->idle != CPU_NEWLY_IDLE ||
+ time_after_eq(jiffies, group->sgp->next_update)))
+ update_group_power(env->sd, env->dst_cpu);
/* Adjust by relative CPU power of the group */
- sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
+ sgs->group_power = group->sgp->power;
+ sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
- /*
- * Consider the group unbalanced when the imbalance is larger
- * than the average weight of a task.
- *
- * APZ: with cgroup the avg task weight can vary wildly and
- * might not be a suitable number - should we keep a
- * normalized nr_running number somewhere that negates
- * the hierarchy?
- */
if (sgs->sum_nr_running)
- avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
+ sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
- if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
- (max_nr_running - min_nr_running) > 1)
- sgs->group_imb = 1;
+ sgs->group_imb = sg_imbalanced(sgs, &sgi);
+
+ sgs->group_capacity =
+ DIV_ROUND_CLOSEST(sgs->group_power, SCHED_POWER_SCALE);
- sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
- SCHED_POWER_SCALE);
if (!sgs->group_capacity)
sgs->group_capacity = fix_small_capacity(env->sd, group);
+
sgs->group_weight = group->group_weight;
if (sgs->group_capacity > sgs->sum_nr_running)
@@ -4586,7 +4667,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
struct sched_group *sg,
struct sg_lb_stats *sgs)
{
- if (sgs->avg_load <= sds->max_load)
+ if (sgs->avg_load <= sds->busiest_stat.avg_load)
return false;
if (sgs->sum_nr_running > sgs->group_capacity)
@@ -4619,11 +4700,11 @@ static bool update_sd_pick_busiest(struct lb_env *env,
* @sds: variable to hold the statistics for this sched_domain.
*/
static inline void update_sd_lb_stats(struct lb_env *env,
- int *balance, struct sd_lb_stats *sds)
+ struct sd_lb_stats *sds)
{
struct sched_domain *child = env->sd->child;
struct sched_group *sg = env->sd->groups;
- struct sg_lb_stats sgs;
+ struct sg_lb_stats tmp_sgs;
int load_idx, prefer_sibling = 0;
if (child && child->flags & SD_PREFER_SIBLING)
@@ -4632,17 +4713,17 @@ static inline void update_sd_lb_stats(struct lb_env *env,
load_idx = get_sd_load_idx(env->sd, env->idle);
do {
+ struct sg_lb_stats *sgs = &tmp_sgs;
int local_group;
local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
- memset(&sgs, 0, sizeof(sgs));
- update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
-
- if (local_group && !(*balance))
- return;
+ if (local_group) {
+ sds->local = sg;
+ sgs = &sds->local_stat;
+ }
- sds->total_load += sgs.group_load;
- sds->total_pwr += sg->sgp->power;
+ memset(sgs, 0, sizeof(*sgs));
+ update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
/*
* In case the child domain prefers tasks go to siblings
@@ -4654,26 +4735,17 @@ static inline void update_sd_lb_stats(struct lb_env *env,
* heaviest group when it is already under-utilized (possible
* with a large weight task outweighs the tasks on the system).
*/
- if (prefer_sibling && !local_group && sds->this_has_capacity)
- sgs.group_capacity = min(sgs.group_capacity, 1UL);
+ if (prefer_sibling && !local_group &&
+ sds->local && sds->local_stat.group_has_capacity)
+ sgs->group_capacity = min(sgs->group_capacity, 1U);
- if (local_group) {
- sds->this_load = sgs.avg_load;
- sds->this = sg;
- sds->this_nr_running = sgs.sum_nr_running;
- sds->this_load_per_task = sgs.sum_weighted_load;
- sds->this_has_capacity = sgs.group_has_capacity;
- sds->this_idle_cpus = sgs.idle_cpus;
- } else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
- sds->max_load = sgs.avg_load;
+ /* Now, start updating sd_lb_stats */
+ sds->total_load += sgs->group_load;
+ sds->total_pwr += sgs->group_power;
+
+ if (!local_group && update_sd_pick_busiest(env, sds, sg, sgs)) {
sds->busiest = sg;
- sds->busiest_nr_running = sgs.sum_nr_running;
- sds->busiest_idle_cpus = sgs.idle_cpus;
- sds->busiest_group_capacity = sgs.group_capacity;
- sds->busiest_load_per_task = sgs.sum_weighted_load;
- sds->busiest_has_capacity = sgs.group_has_capacity;
- sds->busiest_group_weight = sgs.group_weight;
- sds->group_imb = sgs.group_imb;
+ sds->busiest_stat = *sgs;
}
sg = sg->next;
@@ -4718,7 +4790,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
return 0;
env->imbalance = DIV_ROUND_CLOSEST(
- sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE);
+ sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
+ SCHED_POWER_SCALE);
return 1;
}
@@ -4736,24 +4809,23 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
unsigned long tmp, pwr_now = 0, pwr_move = 0;
unsigned int imbn = 2;
unsigned long scaled_busy_load_per_task;
+ struct sg_lb_stats *local, *busiest;
- if (sds->this_nr_running) {
- sds->this_load_per_task /= sds->this_nr_running;
- if (sds->busiest_load_per_task >
- sds->this_load_per_task)
- imbn = 1;
- } else {
- sds->this_load_per_task =
- cpu_avg_load_per_task(env->dst_cpu);
- }
+ local = &sds->local_stat;
+ busiest = &sds->busiest_stat;
+
+ if (!local->sum_nr_running)
+ local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
+ else if (busiest->load_per_task > local->load_per_task)
+ imbn = 1;
- scaled_busy_load_per_task = sds->busiest_load_per_task
- * SCHED_POWER_SCALE;
- scaled_busy_load_per_task /= sds->busiest->sgp->power;
+ scaled_busy_load_per_task =
+ (busiest->load_per_task * SCHED_POWER_SCALE) /
+ busiest->group_power;
- if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
- (scaled_busy_load_per_task * imbn)) {
- env->imbalance = sds->busiest_load_per_task;
+ if (busiest->avg_load + scaled_busy_load_per_task >=
+ local->avg_load + (scaled_busy_load_per_task * imbn)) {
+ env->imbalance = busiest->load_per_task;
return;
}
@@ -4763,34 +4835,37 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
* moving them.
*/
- pwr_now += sds->busiest->sgp->power *
- min(sds->busiest_load_per_task, sds->max_load);
- pwr_now += sds->this->sgp->power *
- min(sds->this_load_per_task, sds->this_load);
+ pwr_now += busiest->group_power *
+ min(busiest->load_per_task, busiest->avg_load);
+ pwr_now += local->group_power *
+ min(local->load_per_task, local->avg_load);
pwr_now /= SCHED_POWER_SCALE;
/* Amount of load we'd subtract */
- tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->busiest->sgp->power;
- if (sds->max_load > tmp)
- pwr_move += sds->busiest->sgp->power *
- min(sds->busiest_load_per_task, sds->max_load - tmp);
+ tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
+ busiest->group_power;
+ if (busiest->avg_load > tmp) {
+ pwr_move += busiest->group_power *
+ min(busiest->load_per_task,
+ busiest->avg_load - tmp);
+ }
/* Amount of load we'd add */
- if (sds->max_load * sds->busiest->sgp->power <
- sds->busiest_load_per_task * SCHED_POWER_SCALE)
- tmp = (sds->max_load * sds->busiest->sgp->power) /
- sds->this->sgp->power;
- else
- tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->this->sgp->power;
- pwr_move += sds->this->sgp->power *
- min(sds->this_load_per_task, sds->this_load + tmp);
+ if (busiest->avg_load * busiest->group_power <
+ busiest->load_per_task * SCHED_POWER_SCALE) {
+ tmp = (busiest->avg_load * busiest->group_power) /
+ local->group_power;
+ } else {
+ tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
+ local->group_power;
+ }
+ pwr_move += local->group_power *
+ min(local->load_per_task, local->avg_load + tmp);
pwr_move /= SCHED_POWER_SCALE;
/* Move if we gain throughput */
if (pwr_move > pwr_now)
- env->imbalance = sds->busiest_load_per_task;
+ env->imbalance = busiest->load_per_task;
}
/**
@@ -4802,11 +4877,18 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
{
unsigned long max_pull, load_above_capacity = ~0UL;
+ struct sg_lb_stats *local, *busiest;
+
+ local = &sds->local_stat;
+ busiest = &sds->busiest_stat;
- sds->busiest_load_per_task /= sds->busiest_nr_running;
- if (sds->group_imb) {
- sds->busiest_load_per_task =
- min(sds->busiest_load_per_task, sds->avg_load);
+ if (busiest->group_imb) {
+ /*
+ * In the group_imb case we cannot rely on group-wide averages
+ * to ensure cpu-load equilibrium, look at wider averages. XXX
+ */
+ busiest->load_per_task =
+ min(busiest->load_per_task, sds->avg_load);
}
/*
@@ -4814,21 +4896,23 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* max load less than avg load(as we skip the groups at or below
* its cpu_power, while calculating max_load..)
*/
- if (sds->max_load < sds->avg_load) {
+ if (busiest->avg_load <= sds->avg_load ||
+ local->avg_load >= sds->avg_load) {
env->imbalance = 0;
return fix_small_imbalance(env, sds);
}
- if (!sds->group_imb) {
+ if (!busiest->group_imb) {
/*
* Don't want to pull so many tasks that a group would go idle.
+ * Except of course for the group_imb case, since then we might
+ * have to drop below capacity to reach cpu-load equilibrium.
*/
- load_above_capacity = (sds->busiest_nr_running -
- sds->busiest_group_capacity);
+ load_above_capacity =
+ (busiest->sum_nr_running - busiest->group_capacity);
load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
-
- load_above_capacity /= sds->busiest->sgp->power;
+ load_above_capacity /= busiest->group_power;
}
/*
@@ -4838,15 +4922,14 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* we also don't want to reduce the group load below the group capacity
* (so that we can implement power-savings policies etc). Thus we look
* for the minimum possible imbalance.
- * Be careful of negative numbers as they'll appear as very large values
- * with unsigned longs.
*/
- max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
+ max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
/* How much load to actually move to equalise the imbalance */
- env->imbalance = min(max_pull * sds->busiest->sgp->power,
- (sds->avg_load - sds->this_load) * sds->this->sgp->power)
- / SCHED_POWER_SCALE;
+ env->imbalance = min(
+ max_pull * busiest->group_power,
+ (sds->avg_load - local->avg_load) * local->group_power
+ ) / SCHED_POWER_SCALE;
/*
* if *imbalance is less than the average load per runnable task
@@ -4854,9 +4937,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* a think about bumping its value to force at least one task to be
* moved
*/
- if (env->imbalance < sds->busiest_load_per_task)
+ if (env->imbalance < busiest->load_per_task)
return fix_small_imbalance(env, sds);
-
}
/******* find_busiest_group() helpers end here *********************/
@@ -4872,69 +4954,62 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* to restore balance.
*
* @env: The load balancing environment.
- * @balance: Pointer to a variable indicating if this_cpu
- * is the appropriate cpu to perform load balancing at this_level.
*
* Return: - The busiest group if imbalance exists.
* - If no imbalance and user has opted for power-savings balance,
* return the least loaded group whose CPUs can be
* put to idle by rebalancing its tasks onto our group.
*/
-static struct sched_group *
-find_busiest_group(struct lb_env *env, int *balance)
+static struct sched_group *find_busiest_group(struct lb_env *env)
{
+ struct sg_lb_stats *local, *busiest;
struct sd_lb_stats sds;
- memset(&sds, 0, sizeof(sds));
+ init_sd_lb_stats(&sds);
/*
* Compute the various statistics relavent for load balancing at
* this level.
*/
- update_sd_lb_stats(env, balance, &sds);
-
- /*
- * this_cpu is not the appropriate cpu to perform load balancing at
- * this level.
- */
- if (!(*balance))
- goto ret;
+ update_sd_lb_stats(env, &sds);
+ local = &sds.local_stat;
+ busiest = &sds.busiest_stat;
if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
check_asym_packing(env, &sds))
return sds.busiest;
/* There is no busy sibling group to pull tasks from */
- if (!sds.busiest || sds.busiest_nr_running == 0)
+ if (!sds.busiest || busiest->sum_nr_running == 0)
goto out_balanced;
sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
/*
* If the busiest group is imbalanced the below checks don't
- * work because they assumes all things are equal, which typically
+ * work because they assume all things are equal, which typically
* isn't true due to cpus_allowed constraints and the like.
*/
- if (sds.group_imb)
+ if (busiest->group_imb)
goto force_balance;
/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
- if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
- !sds.busiest_has_capacity)
+ if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
+ !busiest->group_has_capacity)
goto force_balance;
/*
* If the local group is more busy than the selected busiest group
* don't try and pull any tasks.
*/
- if (sds.this_load >= sds.max_load)
+ if (local->avg_load >= busiest->avg_load)
goto out_balanced;
/*
* Don't pull any tasks if this group is already above the domain
* average load.
*/
- if (sds.this_load >= sds.avg_load)
+ if (local->avg_load >= sds.avg_load)
goto out_balanced;
if (env->idle == CPU_IDLE) {
@@ -4944,15 +5019,16 @@ find_busiest_group(struct lb_env *env, int *balance)
* there is no imbalance between this and busiest group
* wrt to idle cpu's, it is balanced.
*/
- if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
- sds.busiest_nr_running <= sds.busiest_group_weight)
+ if ((local->idle_cpus < busiest->idle_cpus) &&
+ busiest->sum_nr_running <= busiest->group_weight)
goto out_balanced;
} else {
/*
* In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
* imbalance_pct to be conservative.
*/
- if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load)
+ if (100 * busiest->avg_load <=
+ env->sd->imbalance_pct * local->avg_load)
goto out_balanced;
}
@@ -4962,7 +5038,6 @@ force_balance:
return sds.busiest;
out_balanced:
-ret:
env->imbalance = 0;
return NULL;
}
@@ -4974,10 +5049,10 @@ static struct rq *find_busiest_queue(struct lb_env *env,
struct sched_group *group)
{
struct rq *busiest = NULL, *rq;
- unsigned long max_load = 0;
+ unsigned long busiest_load = 0, busiest_power = 1;
int i;
- for_each_cpu(i, sched_group_cpus(group)) {
+ for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
unsigned long power = power_of(i);
unsigned long capacity = DIV_ROUND_CLOSEST(power,
SCHED_POWER_SCALE);
@@ -4986,9 +5061,6 @@ static struct rq *find_busiest_queue(struct lb_env *env,
if (!capacity)
capacity = fix_small_capacity(env->sd, group);
- if (!cpumask_test_cpu(i, env->cpus))
- continue;
-
rq = cpu_rq(i);
wl = weighted_cpuload(i);
@@ -5004,11 +5076,15 @@ static struct rq *find_busiest_queue(struct lb_env *env,
* the weighted_cpuload() scaled with the cpu power, so that
* the load can be moved away from the cpu that is potentially
* running at a lower capacity.
+ *
+ * Thus we're looking for max(wl_i / power_i), crosswise
+ * multiplication to rid ourselves of the division works out
+ * to: wl_i * power_j > wl_j * power_i; where j is our
+ * previous maximum.
*/
- wl = (wl * SCHED_POWER_SCALE) / power;
-
- if (wl > max_load) {
- max_load = wl;
+ if (wl * busiest_power > busiest_load * power) {
+ busiest_load = wl;
+ busiest_power = power;
busiest = rq;
}
}
@@ -5045,13 +5121,47 @@ static int need_active_balance(struct lb_env *env)
static int active_load_balance_cpu_stop(void *data);
+static int should_we_balance(struct lb_env *env)
+{
+ struct sched_group *sg = env->sd->groups;
+ struct cpumask *sg_cpus, *sg_mask;
+ int cpu, balance_cpu = -1;
+
+ /*
+ * In the newly idle case, we will allow all the cpu's
+ * to do the newly idle load balance.
+ */
+ if (env->idle == CPU_NEWLY_IDLE)
+ return 1;
+
+ sg_cpus = sched_group_cpus(sg);
+ sg_mask = sched_group_mask(sg);
+ /* Try to find first idle cpu */
+ for_each_cpu_and(cpu, sg_cpus, env->cpus) {
+ if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
+ continue;
+
+ balance_cpu = cpu;
+ break;
+ }
+
+ if (balance_cpu == -1)
+ balance_cpu = group_balance_cpu(sg);
+
+ /*
+ * First idle cpu or the first cpu(busiest) in this sched group
+ * is eligible for doing load balancing at this and above domains.
+ */
+ return balance_cpu == env->dst_cpu;
+}
+
/*
* Check this_cpu to ensure it is balanced within domain. Attempt to move
* tasks if there is an imbalance.
*/
static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_domain *sd, enum cpu_idle_type idle,
- int *balance)
+ int *continue_balancing)
{
int ld_moved, cur_ld_moved, active_balance = 0;
struct sched_group *group;
@@ -5081,11 +5191,12 @@ static int load_balance(int this_cpu, struct rq *this_rq,
schedstat_inc(sd, lb_count[idle]);
redo:
- group = find_busiest_group(&env, balance);
-
- if (*balance == 0)
+ if (!should_we_balance(&env)) {
+ *continue_balancing = 0;
goto out_balanced;
+ }
+ group = find_busiest_group(&env);
if (!group) {
schedstat_inc(sd, lb_nobusyg[idle]);
goto out_balanced;
@@ -5114,7 +5225,6 @@ redo:
env.src_rq = busiest;
env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
- update_h_load(env.src_cpu);
more_balance:
local_irq_save(flags);
double_rq_lock(env.dst_rq, busiest);
@@ -5298,7 +5408,7 @@ void idle_balance(int this_cpu, struct rq *this_rq)
rcu_read_lock();
for_each_domain(this_cpu, sd) {
unsigned long interval;
- int balance = 1;
+ int continue_balancing = 1;
if (!(sd->flags & SD_LOAD_BALANCE))
continue;
@@ -5306,7 +5416,8 @@ void idle_balance(int this_cpu, struct rq *this_rq)
if (sd->flags & SD_BALANCE_NEWIDLE) {
/* If we've pulled tasks over stop searching: */
pulled_task = load_balance(this_cpu, this_rq,
- sd, CPU_NEWLY_IDLE, &balance);
+ sd, CPU_NEWLY_IDLE,
+ &continue_balancing);
}
interval = msecs_to_jiffies(sd->balance_interval);
@@ -5544,7 +5655,7 @@ void update_max_interval(void)
*/
static void rebalance_domains(int cpu, enum cpu_idle_type idle)
{
- int balance = 1;
+ int continue_balancing = 1;
struct rq *rq = cpu_rq(cpu);
unsigned long interval;
struct sched_domain *sd;
@@ -5576,7 +5687,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
}
if (time_after_eq(jiffies, sd->last_balance + interval)) {
- if (load_balance(cpu, rq, sd, idle, &balance)) {
+ if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
/*
* The LBF_SOME_PINNED logic could have changed
* env->dst_cpu, so we can't know our idle
@@ -5599,7 +5710,7 @@ out:
* CPU in our sched group which is doing load balancing more
* actively.
*/
- if (!balance)
+ if (!continue_balancing)
break;
}
rcu_read_unlock();
@@ -5818,11 +5929,15 @@ static void task_fork_fair(struct task_struct *p)
cfs_rq = task_cfs_rq(current);
curr = cfs_rq->curr;
- if (unlikely(task_cpu(p) != this_cpu)) {
- rcu_read_lock();
- __set_task_cpu(p, this_cpu);
- rcu_read_unlock();
- }
+ /*
+ * Not only the cpu but also the task_group of the parent might have
+ * been changed after parent->se.parent,cfs_rq were copied to
+ * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
+ * of child point to valid ones.
+ */
+ rcu_read_lock();
+ __set_task_cpu(p, this_cpu);
+ rcu_read_unlock();
update_curr(cfs_rq);
@@ -5895,11 +6010,9 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
* and ensure we don't carry in an old decay_count if we
* switch back.
*/
- if (p->se.avg.decay_count) {
- struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
- __synchronize_entity_decay(&p->se);
- subtract_blocked_load_contrib(cfs_rq,
- p->se.avg.load_avg_contrib);
+ if (se->avg.decay_count) {
+ __synchronize_entity_decay(se);
+ subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
}
#endif
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ef0a7b2439dd..b3c5653e1dca 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -285,7 +285,6 @@ struct cfs_rq {
/* Required to track per-cpu representation of a task_group */
u32 tg_runnable_contrib;
unsigned long tg_load_contrib;
-#endif /* CONFIG_FAIR_GROUP_SCHED */
/*
* h_load = weight * f(tg)
@@ -294,6 +293,9 @@ struct cfs_rq {
* this group.
*/
unsigned long h_load;
+ u64 last_h_load_update;
+ struct sched_entity *h_load_next;
+#endif /* CONFIG_FAIR_GROUP_SCHED */
#endif /* CONFIG_SMP */
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -429,9 +431,6 @@ struct rq {
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
struct list_head leaf_cfs_rq_list;
-#ifdef CONFIG_SMP
- unsigned long h_load_throttle;
-#endif /* CONFIG_SMP */
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
@@ -595,6 +594,7 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
}
DECLARE_PER_CPU(struct sched_domain *, sd_llc);
+DECLARE_PER_CPU(int, sd_llc_size);
DECLARE_PER_CPU(int, sd_llc_id);
struct sched_group_power {
@@ -665,9 +665,9 @@ extern int group_balance_cpu(struct sched_group *sg);
/*
* Return the group to which this tasks belongs.
*
- * We cannot use task_subsys_state() and friends because the cgroup
- * subsystem changes that value before the cgroup_subsys::attach() method
- * is called, therefore we cannot pin it and might observe the wrong value.
+ * We cannot use task_css() and friends because the cgroup subsystem
+ * changes that value before the cgroup_subsys::attach() method is called,
+ * therefore we cannot pin it and might observe the wrong value.
*
* The same is true for autogroup's p->signal->autogroup->tg, the autogroup
* core changes this before calling sched_move_task().
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 5aef494fc8b4..c7edee71bce8 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -104,8 +104,9 @@ static inline void sched_info_queued(struct task_struct *t)
}
/*
- * Called when a process ceases being the active-running process, either
- * voluntarily or involuntarily. Now we can calculate how long we ran.
+ * Called when a process ceases being the active-running process involuntarily
+ * due, typically, to expiring its time slice (this may also be called when
+ * switching to the idle task). Now we can calculate how long we ran.
* Also, if the process is still in the TASK_RUNNING state, call
* sched_info_queued() to mark that it has now again started waiting on
* the runqueue.
diff --git a/kernel/signal.c b/kernel/signal.c
index 50e41075ac77..ded28b91fa53 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3394,7 +3394,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
new_ka.sa.sa_restorer = compat_ptr(restorer);
#endif
ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
- ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
if (ret)
return -EFAULT;
sigset_from_compat(&new_ka.sa.sa_mask, &mask);
@@ -3406,7 +3406,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
&oact->sa_handler);
ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
- ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
#ifdef __ARCH_HAS_SA_RESTORER
ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
&oact->sa_restorer);
diff --git a/kernel/smp.c b/kernel/smp.c
index fe9f773d7114..0564571dcdf7 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -48,10 +48,13 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
cpu_to_node(cpu)))
return notifier_from_errno(-ENOMEM);
if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
- cpu_to_node(cpu)))
+ cpu_to_node(cpu))) {
+ free_cpumask_var(cfd->cpumask);
return notifier_from_errno(-ENOMEM);
+ }
cfd->csd = alloc_percpu(struct call_single_data);
if (!cfd->csd) {
+ free_cpumask_var(cfd->cpumask_ipi);
free_cpumask_var(cfd->cpumask);
return notifier_from_errno(-ENOMEM);
}
@@ -186,25 +189,13 @@ void generic_smp_call_function_single_interrupt(void)
while (!list_empty(&list)) {
struct call_single_data *csd;
- unsigned int csd_flags;
csd = list_entry(list.next, struct call_single_data, list);
list_del(&csd->list);
- /*
- * 'csd' can be invalid after this call if flags == 0
- * (when called through generic_exec_single()),
- * so save them away before making the call:
- */
- csd_flags = csd->flags;
-
csd->func(csd->info);
- /*
- * Unlocked CSDs are valid through generic_exec_single():
- */
- if (csd_flags & CSD_FLAG_LOCK)
- csd_unlock(csd);
+ csd_unlock(csd);
}
}
@@ -278,8 +269,6 @@ EXPORT_SYMBOL(smp_call_function_single);
* @wait: If true, wait until function has completed.
*
* Returns 0 on success, else a negative status code (if no cpus were online).
- * Note that @wait will be implicitly turned on in case of allocation failures,
- * since we fall back to on-stack allocation.
*
* Selection preference:
* 1) current cpu if in @mask
@@ -586,8 +575,10 @@ EXPORT_SYMBOL(on_each_cpu);
*
* If @wait is true, then returns once @func has returned.
*
- * You must not call this function with disabled interrupts or
- * from a hardware interrupt handler or from a bottom half handler.
+ * You must not call this function with disabled interrupts or from a
+ * hardware interrupt handler or from a bottom half handler. The
+ * exception is that it may be used during early boot while
+ * early_boot_irqs_disabled is set.
*/
void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
void *info, bool wait)
@@ -596,9 +587,10 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
smp_call_function_many(mask, func, info, wait);
if (cpumask_test_cpu(cpu, mask)) {
- local_irq_disable();
+ unsigned long flags;
+ local_irq_save(flags);
func(info);
- local_irq_enable();
+ local_irq_restore(flags);
}
put_cpu();
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index be3d3514c325..d7d498d8cc4f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -328,10 +328,19 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
- if (!force_irqthreads)
- __do_softirq();
- else
+ if (!force_irqthreads) {
+ /*
+ * We can safely execute softirq on the current stack if
+ * it is the irq stack, because it should be near empty
+ * at this stage. But we have no way to know if the arch
+ * calls irq_exit() on the irq stack. So call softirq
+ * in its own stack to prevent from any overrun on top
+ * of a potentially deep task stack.
+ */
+ do_softirq();
+ } else {
wakeup_softirqd();
+ }
}
static inline void tick_irq_exit(void)
@@ -876,7 +885,6 @@ int __init __weak early_irq_init(void)
return 0;
}
-#ifdef CONFIG_GENERIC_HARDIRQS
int __init __weak arch_probe_nr_irqs(void)
{
return NR_IRQS_LEGACY;
@@ -886,4 +894,3 @@ int __init __weak arch_early_irq_init(void)
{
return 0;
}
-#endif
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5cdd8065a3ce..4b082b5cac9e 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -34,6 +34,20 @@
#else
#define raw_read_can_lock(l) read_can_lock(l)
#define raw_write_can_lock(l) write_can_lock(l)
+
+/*
+ * Some architectures can relax in favour of the CPU owning the lock.
+ */
+#ifndef arch_read_relax
+# define arch_read_relax(l) cpu_relax()
+#endif
+#ifndef arch_write_relax
+# define arch_write_relax(l) cpu_relax()
+#endif
+#ifndef arch_spin_relax
+# define arch_spin_relax(l) cpu_relax()
+#endif
+
/*
* We build the __lock_function inlines here. They are too large for
* inlining all over the place, but here is only one user per function
diff --git a/kernel/sys.c b/kernel/sys.c
index 771129b299f8..c18ecca575b4 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -337,7 +337,7 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
if (rgid != (gid_t) -1) {
if (gid_eq(old->gid, krgid) ||
gid_eq(old->egid, krgid) ||
- nsown_capable(CAP_SETGID))
+ ns_capable(old->user_ns, CAP_SETGID))
new->gid = krgid;
else
goto error;
@@ -346,7 +346,7 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
if (gid_eq(old->gid, kegid) ||
gid_eq(old->egid, kegid) ||
gid_eq(old->sgid, kegid) ||
- nsown_capable(CAP_SETGID))
+ ns_capable(old->user_ns, CAP_SETGID))
new->egid = kegid;
else
goto error;
@@ -387,7 +387,7 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
old = current_cred();
retval = -EPERM;
- if (nsown_capable(CAP_SETGID))
+ if (ns_capable(old->user_ns, CAP_SETGID))
new->gid = new->egid = new->sgid = new->fsgid = kgid;
else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
new->egid = new->fsgid = kgid;
@@ -471,7 +471,7 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
new->uid = kruid;
if (!uid_eq(old->uid, kruid) &&
!uid_eq(old->euid, kruid) &&
- !nsown_capable(CAP_SETUID))
+ !ns_capable(old->user_ns, CAP_SETUID))
goto error;
}
@@ -480,7 +480,7 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
if (!uid_eq(old->uid, keuid) &&
!uid_eq(old->euid, keuid) &&
!uid_eq(old->suid, keuid) &&
- !nsown_capable(CAP_SETUID))
+ !ns_capable(old->user_ns, CAP_SETUID))
goto error;
}
@@ -534,7 +534,7 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
old = current_cred();
retval = -EPERM;
- if (nsown_capable(CAP_SETUID)) {
+ if (ns_capable(old->user_ns, CAP_SETUID)) {
new->suid = new->uid = kuid;
if (!uid_eq(kuid, old->uid)) {
retval = set_user(new);
@@ -591,7 +591,7 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
old = current_cred();
retval = -EPERM;
- if (!nsown_capable(CAP_SETUID)) {
+ if (!ns_capable(old->user_ns, CAP_SETUID)) {
if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
!uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
goto error;
@@ -673,7 +673,7 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
old = current_cred();
retval = -EPERM;
- if (!nsown_capable(CAP_SETGID)) {
+ if (!ns_capable(old->user_ns, CAP_SETGID)) {
if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
!gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
goto error;
@@ -744,7 +744,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
- nsown_capable(CAP_SETUID)) {
+ ns_capable(old->user_ns, CAP_SETUID)) {
if (!uid_eq(kuid, old->fsuid)) {
new->fsuid = kuid;
if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
@@ -783,7 +783,7 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
- nsown_capable(CAP_SETGID)) {
+ ns_capable(old->user_ns, CAP_SETGID)) {
if (!gid_eq(kgid, old->fsgid)) {
new->fsgid = kgid;
goto change_okay;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 07f6fc468e17..b2f06f3c6a3f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1225,7 +1225,7 @@ static struct ctl_table vm_table[] = {
.data = &hugepages_treat_as_movable,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = hugetlb_treat_movable_handler,
+ .proc_handler = proc_dointvec,
},
{
.procname = "nr_overcommit_hugepages",
@@ -1471,14 +1471,14 @@ static struct ctl_table fs_table[] = {
{
.procname = "inode-nr",
.data = &inodes_stat,
- .maxlen = 2*sizeof(int),
+ .maxlen = 2*sizeof(long),
.mode = 0444,
.proc_handler = proc_nr_inodes,
},
{
.procname = "inode-state",
.data = &inodes_stat,
- .maxlen = 7*sizeof(int),
+ .maxlen = 7*sizeof(long),
.mode = 0444,
.proc_handler = proc_nr_inodes,
},
@@ -1508,7 +1508,7 @@ static struct ctl_table fs_table[] = {
{
.procname = "dentry-state",
.data = &dentry_stat,
- .maxlen = 6*sizeof(int),
+ .maxlen = 6*sizeof(long),
.mode = 0444,
.proc_handler = proc_nr_dentry,
},
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 65bd3c92d6f3..8727032e3a6f 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -4,6 +4,23 @@
static struct callback_head work_exited; /* all we need is ->next == NULL */
+/**
+ * task_work_add - ask the @task to execute @work->func()
+ * @task: the task which should run the callback
+ * @work: the callback to run
+ * @notify: send the notification if true
+ *
+ * Queue @work for task_work_run() below and notify the @task if @notify.
+ * Fails if the @task is exiting/exited and thus it can't process this @work.
+ * Otherwise @work->func() will be called when the @task returns from kernel
+ * mode or exits.
+ *
+ * This is like the signal handler which runs in kernel mode, but it doesn't
+ * try to wake up the @task.
+ *
+ * RETURNS:
+ * 0 if succeeds or -ESRCH.
+ */
int
task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
{
@@ -21,11 +38,22 @@ task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
return 0;
}
+/**
+ * task_work_cancel - cancel a pending work added by task_work_add()
+ * @task: the task which should execute the work
+ * @func: identifies the work to remove
+ *
+ * Find the last queued pending work with ->func == @func and remove
+ * it from queue.
+ *
+ * RETURNS:
+ * The found work or NULL if not found.
+ */
struct callback_head *
task_work_cancel(struct task_struct *task, task_work_func_t func)
{
struct callback_head **pprev = &task->task_works;
- struct callback_head *work = NULL;
+ struct callback_head *work;
unsigned long flags;
/*
* If cmpxchg() fails we continue without updating pprev.
@@ -35,7 +63,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
*/
raw_spin_lock_irqsave(&task->pi_lock, flags);
while ((work = ACCESS_ONCE(*pprev))) {
- read_barrier_depends();
+ smp_read_barrier_depends();
if (work->func != func)
pprev = &work->next;
else if (cmpxchg(pprev, work, work->next) == work)
@@ -46,6 +74,14 @@ task_work_cancel(struct task_struct *task, task_work_func_t func)
return work;
}
+/**
+ * task_work_run - execute the works added by task_work_add()
+ *
+ * Flush the pending works. Should be used by the core kernel code.
+ * Called before the task returns to the user-mode or stops, or when
+ * it exits. In the latter case task_work_add() can no longer add the
+ * new work after task_work_run() returns.
+ */
void task_work_run(void)
{
struct task_struct *task = current;
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 70f27e89012b..2b62fe86f9ec 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -105,7 +105,6 @@ config NO_HZ_FULL
select RCU_USER_QS
select RCU_NOCB_CPU
select VIRT_CPU_ACCOUNTING_GEN
- select CONTEXT_TRACKING_FORCE
select IRQ_WORK
help
Adaptively try to shutdown the tick whenever possible, even when
@@ -134,6 +133,56 @@ config NO_HZ_FULL_ALL
Note the boot CPU will still be kept outside the range to
handle the timekeeping duty.
+config NO_HZ_FULL_SYSIDLE
+ bool "Detect full-system idle state for full dynticks system"
+ depends on NO_HZ_FULL
+ default n
+ help
+ At least one CPU must keep the scheduling-clock tick running for
+ timekeeping purposes whenever there is a non-idle CPU, where
+ "non-idle" also includes dynticks CPUs as long as they are
+ running non-idle tasks. Because the underlying adaptive-tick
+ support cannot distinguish between all CPUs being idle and
+ all CPUs each running a single task in dynticks mode, the
+ underlying support simply ensures that there is always a CPU
+ handling the scheduling-clock tick, whether or not all CPUs
+ are idle. This Kconfig option enables scalable detection of
+ the all-CPUs-idle state, thus allowing the scheduling-clock
+ tick to be disabled when all CPUs are idle. Note that scalable
+ detection of the all-CPUs-idle state means that larger systems
+ will be slower to declare the all-CPUs-idle state.
+
+ Say Y if you would like to help debug all-CPUs-idle detection.
+
+ Say N if you are unsure.
+
+config NO_HZ_FULL_SYSIDLE_SMALL
+ int "Number of CPUs above which large-system approach is used"
+ depends on NO_HZ_FULL_SYSIDLE
+ range 1 NR_CPUS
+ default 8
+ help
+ The full-system idle detection mechanism takes a lazy approach
+ on large systems, as is required to attain decent scalability.
+ However, on smaller systems, scalability is not anywhere near as
+ large a concern as is energy efficiency. The sysidle subsystem
+ therefore uses a fast but non-scalable algorithm for small
+ systems and a lazier but scalable algorithm for large systems.
+ This Kconfig parameter defines the number of CPUs in the largest
+ system that will be considered to be "small".
+
+ The default value will be fine in most cases. Battery-powered
+ systems that (1) enable NO_HZ_FULL_SYSIDLE, (2) have larger
+ numbers of CPUs, and (3) are suffering from battery-lifetime
+ problems due to long sysidle latencies might wish to experiment
+ with larger values for this Kconfig parameter. On the other
+ hand, they might be even better served by disabling NO_HZ_FULL
+ entirely, given that NO_HZ_FULL is intended for HPC and
+ real-time workloads that at present do not tend to be run on
+ battery-powered systems.
+
+ Take the default if you are unsure.
+
config NO_HZ
bool "Old Idle dynticks config"
depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 38959c866789..662c5798a685 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -33,29 +33,64 @@ struct ce_unbind {
int res;
};
-/**
- * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
- * @latch: value to convert
- * @evt: pointer to clock event device descriptor
- *
- * Math helper, returns latch value converted to nanoseconds (bound checked)
- */
-u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
+static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
+ bool ismax)
{
u64 clc = (u64) latch << evt->shift;
+ u64 rnd;
if (unlikely(!evt->mult)) {
evt->mult = 1;
WARN_ON(1);
}
+ rnd = (u64) evt->mult - 1;
+
+ /*
+ * Upper bound sanity check. If the backwards conversion is
+ * not equal latch, we know that the above shift overflowed.
+ */
+ if ((clc >> evt->shift) != (u64)latch)
+ clc = ~0ULL;
+
+ /*
+ * Scaled math oddities:
+ *
+ * For mult <= (1 << shift) we can safely add mult - 1 to
+ * prevent integer rounding loss. So the backwards conversion
+ * from nsec to device ticks will be correct.
+ *
+ * For mult > (1 << shift), i.e. device frequency is > 1GHz we
+ * need to be careful. Adding mult - 1 will result in a value
+ * which when converted back to device ticks can be larger
+ * than latch by up to (mult - 1) >> shift. For the min_delta
+ * calculation we still want to apply this in order to stay
+ * above the minimum device ticks limit. For the upper limit
+ * we would end up with a latch value larger than the upper
+ * limit of the device, so we omit the add to stay below the
+ * device upper boundary.
+ *
+ * Also omit the add if it would overflow the u64 boundary.
+ */
+ if ((~0ULL - clc > rnd) &&
+ (!ismax || evt->mult <= (1U << evt->shift)))
+ clc += rnd;
do_div(clc, evt->mult);
- if (clc < 1000)
- clc = 1000;
- if (clc > KTIME_MAX)
- clc = KTIME_MAX;
- return clc;
+ /* Deltas less than 1usec are pointless noise */
+ return clc > 1000 ? clc : 1000;
+}
+
+/**
+ * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
+ * @latch: value to convert
+ * @evt: pointer to clock event device descriptor
+ *
+ * Math helper, returns latch value converted to nanoseconds (bound checked)
+ */
+u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
+{
+ return cev_delta2ns(latch, evt, false);
}
EXPORT_SYMBOL_GPL(clockevent_delta2ns);
@@ -380,8 +415,8 @@ void clockevents_config(struct clock_event_device *dev, u32 freq)
sec = 600;
clockevents_calc_mult_shift(dev, freq, sec);
- dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
- dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
+ dev->min_delta_ns = cev_delta2ns(dev->min_delta_ticks, dev, false);
+ dev->max_delta_ns = cev_delta2ns(dev->max_delta_ticks, dev, true);
}
/**
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 8f5b3b98577b..bb2215174f05 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -516,13 +516,13 @@ static void sync_cmos_clock(struct work_struct *work)
schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
}
-static void notify_cmos_timer(void)
+void ntp_notify_cmos_timer(void)
{
schedule_delayed_work(&sync_cmos_work, 0);
}
#else
-static inline void notify_cmos_timer(void) { }
+void ntp_notify_cmos_timer(void) { }
#endif
@@ -687,8 +687,6 @@ int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai)
if (!(time_status & STA_NANO))
txc->time.tv_usec /= NSEC_PER_USEC;
- notify_cmos_timer();
-
return result;
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e8a1516cc0a3..3612fc77f834 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -23,6 +23,7 @@
#include <linux/irq_work.h>
#include <linux/posix-timers.h>
#include <linux/perf_event.h>
+#include <linux/context_tracking.h>
#include <asm/irq_regs.h>
@@ -148,8 +149,8 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
}
#ifdef CONFIG_NO_HZ_FULL
-static cpumask_var_t nohz_full_mask;
-bool have_nohz_full_mask;
+cpumask_var_t tick_nohz_full_mask;
+bool tick_nohz_full_running;
static bool can_stop_full_tick(void)
{
@@ -182,7 +183,7 @@ static bool can_stop_full_tick(void)
* Don't allow the user to think they can get
* full NO_HZ with this machine.
*/
- WARN_ONCE(have_nohz_full_mask,
+ WARN_ONCE(tick_nohz_full_running,
"NO_HZ FULL will not work with unstable sched clock");
return false;
}
@@ -197,7 +198,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
* Re-evaluate the need for the tick on the current CPU
* and restart it if necessary.
*/
-void tick_nohz_full_check(void)
+void __tick_nohz_full_check(void)
{
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
@@ -211,7 +212,7 @@ void tick_nohz_full_check(void)
static void nohz_full_kick_work_func(struct irq_work *work)
{
- tick_nohz_full_check();
+ __tick_nohz_full_check();
}
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
@@ -230,7 +231,7 @@ void tick_nohz_full_kick(void)
static void nohz_full_kick_ipi(void *info)
{
- tick_nohz_full_check();
+ __tick_nohz_full_check();
}
/*
@@ -239,12 +240,13 @@ static void nohz_full_kick_ipi(void *info)
*/
void tick_nohz_full_kick_all(void)
{
- if (!have_nohz_full_mask)
+ if (!tick_nohz_full_running)
return;
preempt_disable();
- smp_call_function_many(nohz_full_mask,
+ smp_call_function_many(tick_nohz_full_mask,
nohz_full_kick_ipi, NULL, false);
+ tick_nohz_full_kick();
preempt_enable();
}
@@ -253,7 +255,7 @@ void tick_nohz_full_kick_all(void)
* It might need the tick due to per task/process properties:
* perf events, posix cpu timers, ...
*/
-void tick_nohz_task_switch(struct task_struct *tsk)
+void __tick_nohz_task_switch(struct task_struct *tsk)
{
unsigned long flags;
@@ -269,31 +271,23 @@ out:
local_irq_restore(flags);
}
-int tick_nohz_full_cpu(int cpu)
-{
- if (!have_nohz_full_mask)
- return 0;
-
- return cpumask_test_cpu(cpu, nohz_full_mask);
-}
-
/* Parse the boot-time nohz CPU list from the kernel parameters. */
static int __init tick_nohz_full_setup(char *str)
{
int cpu;
- alloc_bootmem_cpumask_var(&nohz_full_mask);
- if (cpulist_parse(str, nohz_full_mask) < 0) {
+ alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
+ if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
pr_warning("NOHZ: Incorrect nohz_full cpumask\n");
return 1;
}
cpu = smp_processor_id();
- if (cpumask_test_cpu(cpu, nohz_full_mask)) {
+ if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
- cpumask_clear_cpu(cpu, nohz_full_mask);
+ cpumask_clear_cpu(cpu, tick_nohz_full_mask);
}
- have_nohz_full_mask = true;
+ tick_nohz_full_running = true;
return 1;
}
@@ -311,7 +305,7 @@ static int tick_nohz_cpu_down_callback(struct notifier_block *nfb,
* If we handle the timekeeping duty for full dynticks CPUs,
* we can't safely shutdown that CPU.
*/
- if (have_nohz_full_mask && tick_do_timer_cpu == cpu)
+ if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
return NOTIFY_BAD;
break;
}
@@ -330,31 +324,34 @@ static int tick_nohz_init_all(void)
int err = -1;
#ifdef CONFIG_NO_HZ_FULL_ALL
- if (!alloc_cpumask_var(&nohz_full_mask, GFP_KERNEL)) {
+ if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) {
pr_err("NO_HZ: Can't allocate full dynticks cpumask\n");
return err;
}
err = 0;
- cpumask_setall(nohz_full_mask);
- cpumask_clear_cpu(smp_processor_id(), nohz_full_mask);
- have_nohz_full_mask = true;
+ cpumask_setall(tick_nohz_full_mask);
+ cpumask_clear_cpu(smp_processor_id(), tick_nohz_full_mask);
+ tick_nohz_full_running = true;
#endif
return err;
}
void __init tick_nohz_init(void)
{
- if (!have_nohz_full_mask) {
+ int cpu;
+
+ if (!tick_nohz_full_running) {
if (tick_nohz_init_all() < 0)
return;
}
+ for_each_cpu(cpu, tick_nohz_full_mask)
+ context_tracking_cpu_set(cpu);
+
cpu_notifier(tick_nohz_cpu_down_callback, 0);
- cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask);
+ cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), tick_nohz_full_mask);
pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf);
}
-#else
-#define have_nohz_full_mask (0)
#endif
/*
@@ -732,7 +729,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
return false;
}
- if (have_nohz_full_mask) {
+ if (tick_nohz_full_enabled()) {
/*
* Keep the tick alive to guarantee timekeeping progression
* if there are full dynticks CPUs around
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 48b9fffabdc2..947ba25a95a0 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1703,6 +1703,8 @@ int do_adjtimex(struct timex *txc)
write_seqcount_end(&timekeeper_seq);
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
+ ntp_notify_cmos_timer();
+
return ret;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a6d098c6df3f..03cf44ac54d3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1978,12 +1978,27 @@ int __weak ftrace_arch_code_modify_post_process(void)
void ftrace_modify_all_code(int command)
{
+ int update = command & FTRACE_UPDATE_TRACE_FUNC;
+
+ /*
+ * If the ftrace_caller calls a ftrace_ops func directly,
+ * we need to make sure that it only traces functions it
+ * expects to trace. When doing the switch of functions,
+ * we need to update to the ftrace_ops_list_func first
+ * before the transition between old and new calls are set,
+ * as the ftrace_ops_list_func will check the ops hashes
+ * to make sure the ops are having the right functions
+ * traced.
+ */
+ if (update)
+ ftrace_update_ftrace_func(ftrace_ops_list_func);
+
if (command & FTRACE_UPDATE_CALLS)
ftrace_replace_code(1);
else if (command & FTRACE_DISABLE_CALLS)
ftrace_replace_code(0);
- if (command & FTRACE_UPDATE_TRACE_FUNC)
+ if (update && ftrace_trace_function != ftrace_ops_list_func)
ftrace_update_ftrace_func(ftrace_trace_function);
if (command & FTRACE_START_FUNC_RET)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 496f94d57698..7974ba20557d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3166,11 +3166,6 @@ static const struct file_operations show_traces_fops = {
};
/*
- * Only trace on a CPU if the bitmask is set:
- */
-static cpumask_var_t tracing_cpumask;
-
-/*
* The tracer itself will not take this lock, but still we want
* to provide a consistent cpumask to user-space:
*/
@@ -3186,11 +3181,12 @@ static ssize_t
tracing_cpumask_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
+ struct trace_array *tr = file_inode(filp)->i_private;
int len;
mutex_lock(&tracing_cpumask_update_lock);
- len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
+ len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
if (count - len < 2) {
count = -EINVAL;
goto out_err;
@@ -3208,7 +3204,7 @@ static ssize_t
tracing_cpumask_write(struct file *filp, const char __user *ubuf,
size_t count, loff_t *ppos)
{
- struct trace_array *tr = filp->private_data;
+ struct trace_array *tr = file_inode(filp)->i_private;
cpumask_var_t tracing_cpumask_new;
int err, cpu;
@@ -3228,12 +3224,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
* Increase/decrease the disabled counter if we are
* about to flip a bit in the cpumask:
*/
- if (cpumask_test_cpu(cpu, tracing_cpumask) &&
+ if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
!cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
}
- if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
+ if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
cpumask_test_cpu(cpu, tracing_cpumask_new)) {
atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
@@ -3242,7 +3238,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
arch_spin_unlock(&ftrace_max_lock);
local_irq_enable();
- cpumask_copy(tracing_cpumask, tracing_cpumask_new);
+ cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
mutex_unlock(&tracing_cpumask_update_lock);
free_cpumask_var(tracing_cpumask_new);
@@ -3256,9 +3252,10 @@ err_unlock:
}
static const struct file_operations tracing_cpumask_fops = {
- .open = tracing_open_generic,
+ .open = tracing_open_generic_tr,
.read = tracing_cpumask_read,
.write = tracing_cpumask_write,
+ .release = tracing_release_generic_tr,
.llseek = generic_file_llseek,
};
@@ -5938,6 +5935,11 @@ static int new_instance_create(const char *name)
if (!tr->name)
goto out_free_tr;
+ if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
+ goto out_free_tr;
+
+ cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
+
raw_spin_lock_init(&tr->start_lock);
tr->current_trace = &nop_trace;
@@ -5969,6 +5971,7 @@ static int new_instance_create(const char *name)
out_free_tr:
if (tr->trace_buffer.buffer)
ring_buffer_free(tr->trace_buffer.buffer);
+ free_cpumask_var(tr->tracing_cpumask);
kfree(tr->name);
kfree(tr);
@@ -6098,6 +6101,9 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
{
int cpu;
+ trace_create_file("tracing_cpumask", 0644, d_tracer,
+ tr, &tracing_cpumask_fops);
+
trace_create_file("trace_options", 0644, d_tracer,
tr, &tracing_iter_fops);
@@ -6147,9 +6153,6 @@ static __init int tracer_init_debugfs(void)
init_tracer_debugfs(&global_trace, d_tracer);
- trace_create_file("tracing_cpumask", 0644, d_tracer,
- &global_trace, &tracing_cpumask_fops);
-
trace_create_file("available_tracers", 0444, d_tracer,
&global_trace, &show_traces_fops);
@@ -6371,7 +6374,7 @@ __init static int tracer_alloc_buffers(void)
if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
goto out;
- if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
+ if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
goto out_free_buffer_mask;
/* Only allocate trace_printk buffers if a trace_printk exists */
@@ -6386,7 +6389,7 @@ __init static int tracer_alloc_buffers(void)
ring_buf_size = 1;
cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
- cpumask_copy(tracing_cpumask, cpu_all_mask);
+ cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
raw_spin_lock_init(&global_trace.start_lock);
@@ -6441,7 +6444,7 @@ out_free_cpumask:
#ifdef CONFIG_TRACER_MAX_TRACE
free_percpu(global_trace.max_buffer.data);
#endif
- free_cpumask_var(tracing_cpumask);
+ free_cpumask_var(global_trace.tracing_cpumask);
out_free_buffer_mask:
free_cpumask_var(tracing_buffer_mask);
out:
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index afaae41b0a02..10c86fb7a2b4 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -206,6 +206,7 @@ struct trace_array {
struct dentry *event_dir;
struct list_head systems;
struct list_head events;
+ cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
int ref;
};
@@ -1022,6 +1023,9 @@ extern struct list_head ftrace_events;
extern const char *__start___trace_bprintk_fmt[];
extern const char *__stop___trace_bprintk_fmt[];
+extern const char *__start___tracepoint_str[];
+extern const char *__stop___tracepoint_str[];
+
void trace_printk_init_buffers(void);
void trace_printk_start_comm(void);
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 29a7ebcfb426..368a4d50cc30 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1489,12 +1489,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
}
static int
-event_create_dir(struct dentry *parent,
- struct ftrace_event_file *file,
- const struct file_operations *id,
- const struct file_operations *enable,
- const struct file_operations *filter,
- const struct file_operations *format)
+event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
{
struct ftrace_event_call *call = file->event_call;
struct trace_array *tr = file->tr;
@@ -1522,12 +1517,13 @@ event_create_dir(struct dentry *parent,
if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
trace_create_file("enable", 0644, file->dir, file,
- enable);
+ &ftrace_enable_fops);
#ifdef CONFIG_PERF_EVENTS
if (call->event.type && call->class->reg)
trace_create_file("id", 0444, file->dir,
- (void *)(long)call->event.type, id);
+ (void *)(long)call->event.type,
+ &ftrace_event_id_fops);
#endif
/*
@@ -1544,10 +1540,10 @@ event_create_dir(struct dentry *parent,
}
}
trace_create_file("filter", 0644, file->dir, call,
- filter);
+ &ftrace_event_filter_fops);
trace_create_file("format", 0444, file->dir, call,
- format);
+ &ftrace_event_format_fops);
return 0;
}
@@ -1648,12 +1644,7 @@ trace_create_new_event(struct ftrace_event_call *call,
/* Add an event to a trace directory */
static int
-__trace_add_new_event(struct ftrace_event_call *call,
- struct trace_array *tr,
- const struct file_operations *id,
- const struct file_operations *enable,
- const struct file_operations *filter,
- const struct file_operations *format)
+__trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
{
struct ftrace_event_file *file;
@@ -1661,7 +1652,7 @@ __trace_add_new_event(struct ftrace_event_call *call,
if (!file)
return -ENOMEM;
- return event_create_dir(tr->event_dir, file, id, enable, filter, format);
+ return event_create_dir(tr->event_dir, file);
}
/*
@@ -1683,8 +1674,7 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
}
struct ftrace_module_file_ops;
-static void __add_event_to_tracers(struct ftrace_event_call *call,
- struct ftrace_module_file_ops *file_ops);
+static void __add_event_to_tracers(struct ftrace_event_call *call);
/* Add an additional event_call dynamically */
int trace_add_event_call(struct ftrace_event_call *call)
@@ -1695,7 +1685,7 @@ int trace_add_event_call(struct ftrace_event_call *call)
ret = __register_event(call, NULL);
if (ret >= 0)
- __add_event_to_tracers(call, NULL);
+ __add_event_to_tracers(call);
mutex_unlock(&event_mutex);
mutex_unlock(&trace_types_lock);
@@ -1769,100 +1759,21 @@ int trace_remove_event_call(struct ftrace_event_call *call)
#ifdef CONFIG_MODULES
-static LIST_HEAD(ftrace_module_file_list);
-
-/*
- * Modules must own their file_operations to keep up with
- * reference counting.
- */
-struct ftrace_module_file_ops {
- struct list_head list;
- struct module *mod;
- struct file_operations id;
- struct file_operations enable;
- struct file_operations format;
- struct file_operations filter;
-};
-
-static struct ftrace_module_file_ops *
-find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
-{
- /*
- * As event_calls are added in groups by module,
- * when we find one file_ops, we don't need to search for
- * each call in that module, as the rest should be the
- * same. Only search for a new one if the last one did
- * not match.
- */
- if (file_ops && mod == file_ops->mod)
- return file_ops;
-
- list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
- if (file_ops->mod == mod)
- return file_ops;
- }
- return NULL;
-}
-
-static struct ftrace_module_file_ops *
-trace_create_file_ops(struct module *mod)
-{
- struct ftrace_module_file_ops *file_ops;
-
- /*
- * This is a bit of a PITA. To allow for correct reference
- * counting, modules must "own" their file_operations.
- * To do this, we allocate the file operations that will be
- * used in the event directory.
- */
-
- file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
- if (!file_ops)
- return NULL;
-
- file_ops->mod = mod;
-
- file_ops->id = ftrace_event_id_fops;
- file_ops->id.owner = mod;
-
- file_ops->enable = ftrace_enable_fops;
- file_ops->enable.owner = mod;
-
- file_ops->filter = ftrace_event_filter_fops;
- file_ops->filter.owner = mod;
-
- file_ops->format = ftrace_event_format_fops;
- file_ops->format.owner = mod;
-
- list_add(&file_ops->list, &ftrace_module_file_list);
-
- return file_ops;
-}
-
static void trace_module_add_events(struct module *mod)
{
- struct ftrace_module_file_ops *file_ops = NULL;
struct ftrace_event_call **call, **start, **end;
start = mod->trace_events;
end = mod->trace_events + mod->num_trace_events;
- if (start == end)
- return;
-
- file_ops = trace_create_file_ops(mod);
- if (!file_ops)
- return;
-
for_each_event(call, start, end) {
__register_event(*call, mod);
- __add_event_to_tracers(*call, file_ops);
+ __add_event_to_tracers(*call);
}
}
static void trace_module_remove_events(struct module *mod)
{
- struct ftrace_module_file_ops *file_ops;
struct ftrace_event_call *call, *p;
bool clear_trace = false;
@@ -1874,16 +1785,6 @@ static void trace_module_remove_events(struct module *mod)
__trace_remove_event_call(call);
}
}
-
- /* Now free the file_operations */
- list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
- if (file_ops->mod == mod)
- break;
- }
- if (&file_ops->list != &ftrace_module_file_list) {
- list_del(&file_ops->list);
- kfree(file_ops);
- }
up_write(&trace_event_sem);
/*
@@ -1919,67 +1820,21 @@ static int trace_module_notify(struct notifier_block *self,
return 0;
}
-static int
-__trace_add_new_mod_event(struct ftrace_event_call *call,
- struct trace_array *tr,
- struct ftrace_module_file_ops *file_ops)
-{
- return __trace_add_new_event(call, tr,
- &file_ops->id, &file_ops->enable,
- &file_ops->filter, &file_ops->format);
-}
-
-#else
-static inline struct ftrace_module_file_ops *
-find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
-{
- return NULL;
-}
-static inline int trace_module_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- return 0;
-}
-static inline int
-__trace_add_new_mod_event(struct ftrace_event_call *call,
- struct trace_array *tr,
- struct ftrace_module_file_ops *file_ops)
-{
- return -ENODEV;
-}
+static struct notifier_block trace_module_nb = {
+ .notifier_call = trace_module_notify,
+ .priority = 0,
+};
#endif /* CONFIG_MODULES */
/* Create a new event directory structure for a trace directory. */
static void
__trace_add_event_dirs(struct trace_array *tr)
{
- struct ftrace_module_file_ops *file_ops = NULL;
struct ftrace_event_call *call;
int ret;
list_for_each_entry(call, &ftrace_events, list) {
- if (call->mod) {
- /*
- * Directories for events by modules need to
- * keep module ref counts when opened (as we don't
- * want the module to disappear when reading one
- * of these files). The file_ops keep account of
- * the module ref count.
- */
- file_ops = find_ftrace_file_ops(file_ops, call->mod);
- if (!file_ops)
- continue; /* Warn? */
- ret = __trace_add_new_mod_event(call, tr, file_ops);
- if (ret < 0)
- pr_warning("Could not create directory for event %s\n",
- call->name);
- continue;
- }
- ret = __trace_add_new_event(call, tr,
- &ftrace_event_id_fops,
- &ftrace_enable_fops,
- &ftrace_event_filter_fops,
- &ftrace_event_format_fops);
+ ret = __trace_add_new_event(call, tr);
if (ret < 0)
pr_warning("Could not create directory for event %s\n",
call->name);
@@ -2287,11 +2142,7 @@ __trace_early_add_event_dirs(struct trace_array *tr)
list_for_each_entry(file, &tr->events, list) {
- ret = event_create_dir(tr->event_dir, file,
- &ftrace_event_id_fops,
- &ftrace_enable_fops,
- &ftrace_event_filter_fops,
- &ftrace_event_format_fops);
+ ret = event_create_dir(tr->event_dir, file);
if (ret < 0)
pr_warning("Could not create directory for event %s\n",
file->event_call->name);
@@ -2332,29 +2183,14 @@ __trace_remove_event_dirs(struct trace_array *tr)
remove_event_file_dir(file);
}
-static void
-__add_event_to_tracers(struct ftrace_event_call *call,
- struct ftrace_module_file_ops *file_ops)
+static void __add_event_to_tracers(struct ftrace_event_call *call)
{
struct trace_array *tr;
- list_for_each_entry(tr, &ftrace_trace_arrays, list) {
- if (file_ops)
- __trace_add_new_mod_event(call, tr, file_ops);
- else
- __trace_add_new_event(call, tr,
- &ftrace_event_id_fops,
- &ftrace_enable_fops,
- &ftrace_event_filter_fops,
- &ftrace_event_format_fops);
- }
+ list_for_each_entry(tr, &ftrace_trace_arrays, list)
+ __trace_add_new_event(call, tr);
}
-static struct notifier_block trace_module_nb = {
- .notifier_call = trace_module_notify,
- .priority = 0,
-};
-
extern struct ftrace_event_call *__start_ftrace_events[];
extern struct ftrace_event_call *__stop_ftrace_events[];
@@ -2559,10 +2395,11 @@ static __init int event_trace_init(void)
if (ret)
return ret;
+#ifdef CONFIG_MODULES
ret = register_module_notifier(&trace_module_nb);
if (ret)
pr_warning("Failed to register trace events module notifier\n");
-
+#endif
return 0;
}
early_initcall(event_trace_memsetup);
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index a9077c1b4ad3..2900817ba65c 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -244,12 +244,31 @@ static const char **find_next(void *v, loff_t *pos)
{
const char **fmt = v;
int start_index;
+ int last_index;
start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
if (*pos < start_index)
return __start___trace_bprintk_fmt + *pos;
+ /*
+ * The __tracepoint_str section is treated the same as the
+ * __trace_printk_fmt section. The difference is that the
+ * __trace_printk_fmt section should only be used by trace_printk()
+ * in a debugging environment, as if anything exists in that section
+ * the trace_prink() helper buffers are allocated, which would just
+ * waste space in a production environment.
+ *
+ * The __tracepoint_str sections on the other hand are used by
+ * tracepoints which need to map pointers to their strings to
+ * the ASCII text for userspace.
+ */
+ last_index = start_index;
+ start_index = __stop___tracepoint_str - __start___tracepoint_str;
+
+ if (*pos < last_index + start_index)
+ return __start___tracepoint_str + (*pos - last_index);
+
return find_next_mod_format(start_index, v, fmt, pos);
}
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 8fd03657bc7d..559329d9bd2f 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -200,8 +200,8 @@ extern char *__bad_type_size(void);
#type, #name, offsetof(typeof(trace), name), \
sizeof(trace.name), is_signed_type(type)
-static
-int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
+static int __init
+__set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
{
int i;
int pos = 0;
@@ -228,7 +228,7 @@ int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
return pos;
}
-static int set_syscall_print_fmt(struct ftrace_event_call *call)
+static int __init set_syscall_print_fmt(struct ftrace_event_call *call)
{
char *print_fmt;
int len;
@@ -253,7 +253,7 @@ static int set_syscall_print_fmt(struct ftrace_event_call *call)
return 0;
}
-static void free_syscall_print_fmt(struct ftrace_event_call *call)
+static void __init free_syscall_print_fmt(struct ftrace_event_call *call)
{
struct syscall_metadata *entry = call->data;
@@ -459,7 +459,7 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
mutex_unlock(&syscall_trace_lock);
}
-static int init_syscall_trace(struct ftrace_event_call *call)
+static int __init init_syscall_trace(struct ftrace_event_call *call)
{
int id;
int num;
diff --git a/kernel/uid16.c b/kernel/uid16.c
index f6c83d7ef000..602e5bbbceff 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -176,7 +176,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
struct group_info *group_info;
int retval;
- if (!nsown_capable(CAP_SETGID))
+ if (!ns_capable(current_user_ns(), CAP_SETGID))
return -EPERM;
if ((unsigned)gidsetsize > NGROUPS_MAX)
return -EINVAL;
diff --git a/kernel/up.c b/kernel/up.c
index c54c75e9faf7..630d72bf7e41 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -10,12 +10,64 @@
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
int wait)
{
+ unsigned long flags;
+
WARN_ON(cpu != 0);
- local_irq_disable();
- (func)(info);
- local_irq_enable();
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
return 0;
}
EXPORT_SYMBOL(smp_call_function_single);
+
+int on_each_cpu(smp_call_func_t func, void *info, int wait)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ return 0;
+}
+EXPORT_SYMBOL(on_each_cpu);
+
+/*
+ * Note we still need to test the mask even for UP
+ * because we actually can get an empty mask from
+ * code that on SMP might call us without the local
+ * CPU in the mask.
+ */
+void on_each_cpu_mask(const struct cpumask *mask,
+ smp_call_func_t func, void *info, bool wait)
+{
+ unsigned long flags;
+
+ if (cpumask_test_cpu(0, mask)) {
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ }
+}
+EXPORT_SYMBOL(on_each_cpu_mask);
+
+/*
+ * Preemption is disabled here to make sure the cond_func is called under the
+ * same condtions in UP and SMP.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags)
+{
+ unsigned long flags;
+
+ preempt_disable();
+ if (cond_func(0, info)) {
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ }
+ preempt_enable();
+}
+EXPORT_SYMBOL(on_each_cpu_cond);
diff --git a/kernel/user.c b/kernel/user.c
index 69b4c3d48cde..5bbb91988e69 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -51,8 +51,6 @@ struct user_namespace init_user_ns = {
.owner = GLOBAL_ROOT_UID,
.group = GLOBAL_ROOT_GID,
.proc_inum = PROC_USER_INIT_INO,
- .may_mount_sysfs = true,
- .may_mount_proc = true,
};
EXPORT_SYMBOL_GPL(init_user_ns);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 9064b919a406..13fb1134ba58 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -101,8 +101,6 @@ int create_user_ns(struct cred *new)
set_cred_user_ns(new, ns);
- update_mnt_policy(ns);
-
return 0;
}
diff --git a/kernel/utsname.c b/kernel/utsname.c
index 2fc8576efaa8..fd393124e507 100644
--- a/kernel/utsname.c
+++ b/kernel/utsname.c
@@ -114,7 +114,7 @@ static int utsns_install(struct nsproxy *nsproxy, void *new)
struct uts_namespace *ns = new;
if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) ||
- !nsown_capable(CAP_SYS_ADMIN))
+ !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
return -EPERM;
get_uts_ns(ns);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 1241d8c91d5e..4431610f049a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -486,7 +486,52 @@ static struct smp_hotplug_thread watchdog_threads = {
.unpark = watchdog_enable,
};
-static int watchdog_enable_all_cpus(void)
+static void restart_watchdog_hrtimer(void *info)
+{
+ struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
+ int ret;
+
+ /*
+ * No need to cancel and restart hrtimer if it is currently executing
+ * because it will reprogram itself with the new period now.
+ * We should never see it unqueued here because we are running per-cpu
+ * with interrupts disabled.
+ */
+ ret = hrtimer_try_to_cancel(hrtimer);
+ if (ret == 1)
+ hrtimer_start(hrtimer, ns_to_ktime(sample_period),
+ HRTIMER_MODE_REL_PINNED);
+}
+
+static void update_timers(int cpu)
+{
+ struct call_single_data data = {.func = restart_watchdog_hrtimer};
+ /*
+ * Make sure that perf event counter will adopt to a new
+ * sampling period. Updating the sampling period directly would
+ * be much nicer but we do not have an API for that now so
+ * let's use a big hammer.
+ * Hrtimer will adopt the new period on the next tick but this
+ * might be late already so we have to restart the timer as well.
+ */
+ watchdog_nmi_disable(cpu);
+ __smp_call_function_single(cpu, &data, 1);
+ watchdog_nmi_enable(cpu);
+}
+
+static void update_timers_all_cpus(void)
+{
+ int cpu;
+
+ get_online_cpus();
+ preempt_disable();
+ for_each_online_cpu(cpu)
+ update_timers(cpu);
+ preempt_enable();
+ put_online_cpus();
+}
+
+static int watchdog_enable_all_cpus(bool sample_period_changed)
{
int err = 0;
@@ -496,6 +541,8 @@ static int watchdog_enable_all_cpus(void)
pr_err("Failed to create watchdog threads, disabled\n");
else
watchdog_running = 1;
+ } else if (sample_period_changed) {
+ update_timers_all_cpus();
}
return err;
@@ -520,13 +567,15 @@ int proc_dowatchdog(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
int err, old_thresh, old_enabled;
+ static DEFINE_MUTEX(watchdog_proc_mutex);
+ mutex_lock(&watchdog_proc_mutex);
old_thresh = ACCESS_ONCE(watchdog_thresh);
old_enabled = ACCESS_ONCE(watchdog_user_enabled);
err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (err || !write)
- return err;
+ goto out;
set_sample_period();
/*
@@ -535,7 +584,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
* watchdog_*_all_cpus() function takes care of this.
*/
if (watchdog_user_enabled && watchdog_thresh)
- err = watchdog_enable_all_cpus();
+ err = watchdog_enable_all_cpus(old_thresh != watchdog_thresh);
else
watchdog_disable_all_cpus();
@@ -544,7 +593,8 @@ int proc_dowatchdog(struct ctl_table *table, int write,
watchdog_thresh = old_thresh;
watchdog_user_enabled = old_enabled;
}
-
+out:
+ mutex_unlock(&watchdog_proc_mutex);
return err;
}
#endif /* CONFIG_SYSCTL */
@@ -553,14 +603,6 @@ void __init lockup_detector_init(void)
{
set_sample_period();
-#ifdef CONFIG_NO_HZ_FULL
- if (watchdog_user_enabled) {
- watchdog_user_enabled = 0;
- pr_warning("Disabled lockup detectors by default for full dynticks\n");
- pr_warning("You can reactivate it with 'sysctl -w kernel.watchdog=1'\n");
- }
-#endif
-
if (watchdog_user_enabled)
- watchdog_enable_all_cpus();
+ watchdog_enable_all_cpus(false);
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e93f7b9067d8..987293d03ebc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -16,9 +16,10 @@
*
* This is the generic async execution mechanism. Work items as are
* executed in process context. The worker pool is shared and
- * automatically managed. There is one worker pool for each CPU and
- * one extra for works which are better served by workers which are
- * not bound to any specific CPU.
+ * automatically managed. There are two worker pools for each CPU (one for
+ * normal work items and the other for high priority ones) and some extra
+ * pools for workqueues which are not bound to any specific CPU - the
+ * number of these backing pools is dynamic.
*
* Please read Documentation/workqueue.txt for details.
*/
@@ -540,6 +541,8 @@ static int worker_pool_assign_id(struct worker_pool *pool)
* This must be called either with pwq_lock held or sched RCU read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
+ *
+ * Return: The unbound pool_workqueue for @node.
*/
static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
int node)
@@ -638,8 +641,6 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
* get_work_pool - return the worker_pool a given work was associated with
* @work: the work item of interest
*
- * Return the worker_pool @work was last associated with. %NULL if none.
- *
* Pools are created and destroyed under wq_pool_mutex, and allows read
* access under sched-RCU read lock. As such, this function should be
* called under wq_pool_mutex or with preemption disabled.
@@ -648,6 +649,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
* mentioned locking is in effect. If the returned pool needs to be used
* beyond the critical section, the caller is responsible for ensuring the
* returned pool is and stays online.
+ *
+ * Return: The worker_pool @work was last associated with. %NULL if none.
*/
static struct worker_pool *get_work_pool(struct work_struct *work)
{
@@ -671,7 +674,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
* get_work_pool_id - return the worker pool ID a given work is associated with
* @work: the work item of interest
*
- * Return the worker_pool ID @work was last associated with.
+ * Return: The worker_pool ID @work was last associated with.
* %WORK_OFFQ_POOL_NONE if none.
*/
static int get_work_pool_id(struct work_struct *work)
@@ -830,7 +833,7 @@ void wq_worker_waking_up(struct task_struct *task, int cpu)
* CONTEXT:
* spin_lock_irq(rq->lock)
*
- * RETURNS:
+ * Return:
* Worker task on @cpu to wake up, %NULL if none.
*/
struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
@@ -965,8 +968,8 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
* CONTEXT:
* spin_lock_irq(pool->lock).
*
- * RETURNS:
- * Pointer to worker which is executing @work if found, NULL
+ * Return:
+ * Pointer to worker which is executing @work if found, %NULL
* otherwise.
*/
static struct worker *find_worker_executing_work(struct worker_pool *pool,
@@ -1154,14 +1157,16 @@ out_put:
* @flags: place to store irq state
*
* Try to grab PENDING bit of @work. This function can handle @work in any
- * stable state - idle, on timer or on worklist. Return values are
+ * stable state - idle, on timer or on worklist.
*
+ * Return:
* 1 if @work was pending and we successfully stole PENDING
* 0 if @work was idle and we claimed PENDING
* -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry
* -ENOENT if someone else is canceling @work, this state may persist
* for arbitrarily long
*
+ * Note:
* On >= 0 return, the caller owns @work's PENDING bit. To avoid getting
* interrupted while holding PENDING and @work off queue, irq must be
* disabled on entry. This, combined with delayed_work->timer being
@@ -1403,10 +1408,10 @@ retry:
* @wq: workqueue to use
* @work: work to queue
*
- * Returns %false if @work was already on a queue, %true otherwise.
- *
* We queue the work to a specific CPU, the caller must ensure it
* can't go away.
+ *
+ * Return: %false if @work was already on a queue, %true otherwise.
*/
bool queue_work_on(int cpu, struct workqueue_struct *wq,
struct work_struct *work)
@@ -1476,7 +1481,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
* @dwork: work to queue
* @delay: number of jiffies to wait before queueing
*
- * Returns %false if @work was already on a queue, %true otherwise. If
+ * Return: %false if @work was already on a queue, %true otherwise. If
* @delay is zero and @dwork is idle, it will be scheduled for immediate
* execution.
*/
@@ -1512,7 +1517,7 @@ EXPORT_SYMBOL(queue_delayed_work_on);
* zero, @work is guaranteed to be scheduled immediately regardless of its
* current state.
*
- * Returns %false if @dwork was idle and queued, %true if @dwork was
+ * Return: %false if @dwork was idle and queued, %true if @dwork was
* pending and its timer was modified.
*
* This function is safe to call from any context including IRQ handler.
@@ -1627,7 +1632,7 @@ static void worker_leave_idle(struct worker *worker)
* Might sleep. Called without any lock but returns with pool->lock
* held.
*
- * RETURNS:
+ * Return:
* %true if the associated pool is online (@worker is successfully
* bound), %false if offline.
*/
@@ -1688,7 +1693,7 @@ static struct worker *alloc_worker(void)
* CONTEXT:
* Might sleep. Does GFP_KERNEL allocations.
*
- * RETURNS:
+ * Return:
* Pointer to the newly created worker.
*/
static struct worker *create_worker(struct worker_pool *pool)
@@ -1788,6 +1793,8 @@ static void start_worker(struct worker *worker)
* @pool: the target pool
*
* Grab the managership of @pool and create and start a new worker for it.
+ *
+ * Return: 0 on success. A negative error code otherwise.
*/
static int create_and_start_worker(struct worker_pool *pool)
{
@@ -1932,7 +1939,7 @@ static void pool_mayday_timeout(unsigned long __pool)
* multiple times. Does GFP_KERNEL allocations. Called only from
* manager.
*
- * RETURNS:
+ * Return:
* %false if no action was taken and pool->lock stayed locked, %true
* otherwise.
*/
@@ -1989,7 +1996,7 @@ restart:
* spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Called only from manager.
*
- * RETURNS:
+ * Return:
* %false if no action was taken and pool->lock stayed locked, %true
* otherwise.
*/
@@ -2032,9 +2039,12 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
* spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations.
*
- * RETURNS:
- * spin_lock_irq(pool->lock) which may be released and regrabbed
- * multiple times. Does GFP_KERNEL allocations.
+ * Return:
+ * %false if the pool don't need management and the caller can safely start
+ * processing works, %true indicates that the function released pool->lock
+ * and reacquired it to perform some management function and that the
+ * conditions that the caller verified while holding the lock before
+ * calling the function might no longer be true.
*/
static bool manage_workers(struct worker *worker)
{
@@ -2255,6 +2265,8 @@ static void process_scheduled_works(struct worker *worker)
* work items regardless of their specific target workqueue. The only
* exception is work items which belong to workqueues with a rescuer which
* will be explained in rescuer_thread().
+ *
+ * Return: 0
*/
static int worker_thread(void *__worker)
{
@@ -2353,6 +2365,8 @@ sleep:
* those works so that forward progress can be guaranteed.
*
* This should happen rarely.
+ *
+ * Return: 0
*/
static int rescuer_thread(void *__rescuer)
{
@@ -2525,7 +2539,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
* CONTEXT:
* mutex_lock(wq->mutex).
*
- * RETURNS:
+ * Return:
* %true if @flush_color >= 0 and there's something to flush. %false
* otherwise.
*/
@@ -2846,7 +2860,7 @@ static bool __flush_work(struct work_struct *work)
* Wait until @work has finished execution. @work is guaranteed to be idle
* on return if it hasn't been requeued since flush started.
*
- * RETURNS:
+ * Return:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
@@ -2898,7 +2912,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
* The caller must ensure that the workqueue on which @work was last
* queued can't be destroyed before this function returns.
*
- * RETURNS:
+ * Return:
* %true if @work was pending, %false otherwise.
*/
bool cancel_work_sync(struct work_struct *work)
@@ -2915,7 +2929,7 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
* immediate execution. Like flush_work(), this function only
* considers the last queueing instance of @dwork.
*
- * RETURNS:
+ * Return:
* %true if flush_work() waited for the work to finish execution,
* %false if it was already idle.
*/
@@ -2933,11 +2947,15 @@ EXPORT_SYMBOL(flush_delayed_work);
* cancel_delayed_work - cancel a delayed work
* @dwork: delayed_work to cancel
*
- * Kill off a pending delayed_work. Returns %true if @dwork was pending
- * and canceled; %false if wasn't pending. Note that the work callback
- * function may still be running on return, unless it returns %true and the
- * work doesn't re-arm itself. Explicitly flush or use
- * cancel_delayed_work_sync() to wait on it.
+ * Kill off a pending delayed_work.
+ *
+ * Return: %true if @dwork was pending and canceled; %false if it wasn't
+ * pending.
+ *
+ * Note:
+ * The work callback function may still be running on return, unless
+ * it returns %true and the work doesn't re-arm itself. Explicitly flush or
+ * use cancel_delayed_work_sync() to wait on it.
*
* This function is safe to call from any context including IRQ handler.
*/
@@ -2966,7 +2984,7 @@ EXPORT_SYMBOL(cancel_delayed_work);
*
* This is cancel_work_sync() for delayed works.
*
- * RETURNS:
+ * Return:
* %true if @dwork was pending, %false otherwise.
*/
bool cancel_delayed_work_sync(struct delayed_work *dwork)
@@ -2983,7 +3001,7 @@ EXPORT_SYMBOL(cancel_delayed_work_sync);
* system workqueue and blocks until all CPUs have completed.
* schedule_on_each_cpu() is very slow.
*
- * RETURNS:
+ * Return:
* 0 on success, -errno on failure.
*/
int schedule_on_each_cpu(work_func_t func)
@@ -3051,7 +3069,7 @@ EXPORT_SYMBOL(flush_scheduled_work);
* Executes the function immediately if process context is available,
* otherwise schedules the function for delayed execution.
*
- * Returns: 0 - function was executed
+ * Return: 0 - function was executed
* 1 - function was scheduled for execution
*/
int execute_in_process_context(work_func_t fn, struct execute_work *ew)
@@ -3095,25 +3113,26 @@ static struct workqueue_struct *dev_to_wq(struct device *dev)
return wq_dev->wq;
}
-static ssize_t wq_per_cpu_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
}
+static DEVICE_ATTR_RO(per_cpu);
-static ssize_t wq_max_active_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t max_active_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
}
-static ssize_t wq_max_active_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t max_active_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct workqueue_struct *wq = dev_to_wq(dev);
int val;
@@ -3124,12 +3143,14 @@ static ssize_t wq_max_active_store(struct device *dev,
workqueue_set_max_active(wq, val);
return count;
}
+static DEVICE_ATTR_RW(max_active);
-static struct device_attribute wq_sysfs_attrs[] = {
- __ATTR(per_cpu, 0444, wq_per_cpu_show, NULL),
- __ATTR(max_active, 0644, wq_max_active_show, wq_max_active_store),
- __ATTR_NULL,
+static struct attribute *wq_sysfs_attrs[] = {
+ &dev_attr_per_cpu.attr,
+ &dev_attr_max_active.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(wq_sysfs);
static ssize_t wq_pool_ids_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -3279,7 +3300,7 @@ static struct device_attribute wq_sysfs_unbound_attrs[] = {
static struct bus_type wq_subsys = {
.name = "workqueue",
- .dev_attrs = wq_sysfs_attrs,
+ .dev_groups = wq_sysfs_groups,
};
static int __init wq_sysfs_init(void)
@@ -3308,7 +3329,7 @@ static void wq_device_release(struct device *dev)
* apply_workqueue_attrs() may race against userland updating the
* attributes.
*
- * Returns 0 on success, -errno on failure.
+ * Return: 0 on success, -errno on failure.
*/
int workqueue_sysfs_register(struct workqueue_struct *wq)
{
@@ -3401,7 +3422,9 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs)
* @gfp_mask: allocation mask to use
*
* Allocate a new workqueue_attrs, initialize with default settings and
- * return it. Returns NULL on failure.
+ * return it.
+ *
+ * Return: The allocated new workqueue_attr on success. %NULL on failure.
*/
struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
{
@@ -3460,7 +3483,8 @@ static bool wqattrs_equal(const struct workqueue_attrs *a,
* @pool: worker_pool to initialize
*
* Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs.
- * Returns 0 on success, -errno on failure. Even on failure, all fields
+ *
+ * Return: 0 on success, -errno on failure. Even on failure, all fields
* inside @pool proper are initialized and put_unbound_pool() can be called
* on @pool safely to release it.
*/
@@ -3567,9 +3591,12 @@ static void put_unbound_pool(struct worker_pool *pool)
* Obtain a worker_pool which has the same attributes as @attrs, bump the
* reference count and return it. If there already is a matching
* worker_pool, it will be used; otherwise, this function attempts to
- * create a new one. On failure, returns NULL.
+ * create a new one.
*
* Should be called with wq_pool_mutex held.
+ *
+ * Return: On success, a worker_pool with the same attributes as @attrs.
+ * On failure, %NULL.
*/
static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
{
@@ -3805,9 +3832,7 @@ static void free_unbound_pwq(struct pool_workqueue *pwq)
*
* Calculate the cpumask a workqueue with @attrs should use on @node. If
* @cpu_going_down is >= 0, that cpu is considered offline during
- * calculation. The result is stored in @cpumask. This function returns
- * %true if the resulting @cpumask is different from @attrs->cpumask,
- * %false if equal.
+ * calculation. The result is stored in @cpumask.
*
* If NUMA affinity is not enabled, @attrs->cpumask is always used. If
* enabled and @node has online CPUs requested by @attrs, the returned
@@ -3816,6 +3841,9 @@ static void free_unbound_pwq(struct pool_workqueue *pwq)
*
* The caller is responsible for ensuring that the cpumask of @node stays
* stable.
+ *
+ * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
+ * %false if equal.
*/
static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
int cpu_going_down, cpumask_t *cpumask)
@@ -3869,8 +3897,9 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
* items finish. Note that a work item which repeatedly requeues itself
* back-to-back will stay on its current pwq.
*
- * Performs GFP_KERNEL allocations. Returns 0 on success and -errno on
- * failure.
+ * Performs GFP_KERNEL allocations.
+ *
+ * Return: 0 on success and -errno on failure.
*/
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs)
@@ -4338,6 +4367,8 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
*
* Determine whether %current is a workqueue rescuer. Can be used from
* work functions to determine whether it's being run off the rescuer task.
+ *
+ * Return: %true if %current is a workqueue rescuer. %false otherwise.
*/
bool current_is_workqueue_rescuer(void)
{
@@ -4361,7 +4392,7 @@ bool current_is_workqueue_rescuer(void)
* workqueue being congested on one CPU doesn't mean the workqueue is also
* contested on other CPUs / NUMA nodes.
*
- * RETURNS:
+ * Return:
* %true if congested, %false otherwise.
*/
bool workqueue_congested(int cpu, struct workqueue_struct *wq)
@@ -4394,7 +4425,7 @@ EXPORT_SYMBOL_GPL(workqueue_congested);
* synchronization around this function and the test result is
* unreliable and only useful as advisory hints or for debugging.
*
- * RETURNS:
+ * Return:
* OR'd bitmask of WORK_BUSY_* bits.
*/
unsigned int work_busy(struct work_struct *work)
@@ -4772,9 +4803,10 @@ static void work_for_cpu_fn(struct work_struct *work)
* @fn: the function to run
* @arg: the function arg
*
- * This will return the value @fn returns.
* It is up to the caller to ensure that the cpu doesn't go offline.
* The caller must not hold any locks which would prevent @fn from completing.
+ *
+ * Return: The value @fn returns.
*/
long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
{
@@ -4846,7 +4878,7 @@ void freeze_workqueues_begin(void)
* CONTEXT:
* Grabs and releases wq_pool_mutex.
*
- * RETURNS:
+ * Return:
* %true if some freezable workqueues are still busy. %false if freezing
* is complete.
*/