summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.instrumentation6
-rw-r--r--kernel/acct.c4
-rw-r--r--kernel/exit.c11
-rw-r--r--kernel/fork.c21
-rw-r--r--kernel/futex.c78
-rw-r--r--kernel/hrtimer.c10
-rw-r--r--kernel/irq/chip.c9
-rw-r--r--kernel/kallsyms.c7
-rw-r--r--kernel/kexec.c1
-rw-r--r--kernel/kmod.c13
-rw-r--r--kernel/ksysfs.c82
-rw-r--r--kernel/lockdep.c61
-rw-r--r--kernel/module.c165
-rw-r--r--kernel/panic.c18
-rw-r--r--kernel/params.c34
-rw-r--r--kernel/power/disk.c20
-rw-r--r--kernel/power/main.c26
-rw-r--r--kernel/power/pm.c4
-rw-r--r--kernel/power/power.h4
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/ptrace.c6
-rw-r--r--kernel/rcupdate.c2
-rw-r--r--kernel/rtmutex-tester.c2
-rw-r--r--kernel/rwsem.c5
-rw-r--r--kernel/sched.c337
-rw-r--r--kernel/sched_debug.c18
-rw-r--r--kernel/sched_fair.c28
-rw-r--r--kernel/sched_rt.c3
-rw-r--r--kernel/sched_stats.h3
-rw-r--r--kernel/sysctl.c20
-rw-r--r--kernel/sysctl_check.c54
-rw-r--r--kernel/time/clockevents.c5
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/time/ntp.c9
-rw-r--r--kernel/time/tick-broadcast.c56
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/timer.c8
-rw-r--r--kernel/user.c114
-rw-r--r--kernel/utsname_sysctl.c4
-rw-r--r--kernel/workqueue.c5
41 files changed, 683 insertions, 578 deletions
diff --git a/kernel/Kconfig.instrumentation b/kernel/Kconfig.instrumentation
index f5f2c769d95e..468f47ad7503 100644
--- a/kernel/Kconfig.instrumentation
+++ b/kernel/Kconfig.instrumentation
@@ -20,8 +20,8 @@ config PROFILING
config OPROFILE
tristate "OProfile system profiling (EXPERIMENTAL)"
- depends on PROFILING
- depends on ALPHA || ARM || BLACKFIN || X86_32 || IA64 || M32R || MIPS || PARISC || PPC || S390 || SUPERH || SPARC || X86_64
+ depends on PROFILING && !UML
+ depends on ARCH_SUPPORTS_OPROFILE || ALPHA || ARM || BLACKFIN || IA64 || M32R || PARISC || PPC || S390 || SUPERH || SPARC
help
OProfile is a profiling system capable of profiling the
whole system, include the kernel, kernel modules, libraries,
@@ -31,7 +31,7 @@ config OPROFILE
config KPROBES
bool "Kprobes"
- depends on KALLSYMS && MODULES
+ depends on KALLSYMS && MODULES && !UML
depends on X86_32 || IA64 || PPC || S390 || SPARC64 || X86_64 || AVR32
help
Kprobes allows you to trap at almost any kernel address and
diff --git a/kernel/acct.c b/kernel/acct.c
index fce53d8df8a7..521dfa53cb99 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -413,7 +413,7 @@ static u32 encode_float(u64 value)
* The acct_process() call is the workhorse of the process
* accounting system. The struct acct is built here and then written
* into the accounting file. This function should only be called from
- * do_exit().
+ * do_exit() or when switching to a different output file.
*/
/*
@@ -482,7 +482,7 @@ static void do_acct_process(struct file *file)
#endif
#if ACCT_VERSION==3
ac.ac_pid = current->tgid;
- ac.ac_ppid = current->parent->tgid;
+ ac.ac_ppid = current->real_parent->tgid;
#endif
spin_lock_irq(&current->sighand->siglock);
diff --git a/kernel/exit.c b/kernel/exit.c
index cd0f1d4137a7..549c0558ba68 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1357,7 +1357,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
int __user *stat_addr, struct rusage __user *ru)
{
int retval, exit_code;
- struct pid_namespace *ns;
+ pid_t pid;
if (!p->exit_code)
return 0;
@@ -1376,12 +1376,11 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
* keep holding onto the tasklist_lock while we call getrusage and
* possibly take page faults for user memory.
*/
- ns = current->nsproxy->pid_ns;
+ pid = task_pid_nr_ns(p, current->nsproxy->pid_ns);
get_task_struct(p);
read_unlock(&tasklist_lock);
if (unlikely(noreap)) {
- pid_t pid = task_pid_nr_ns(p, ns);
uid_t uid = p->uid;
int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
@@ -1389,7 +1388,7 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
if (unlikely(!exit_code) || unlikely(p->exit_state))
goto bail_ref;
return wait_noreap_copyout(p, pid, uid,
- why, (exit_code << 8) | 0x7f,
+ why, exit_code,
infop, ru);
}
@@ -1451,11 +1450,11 @@ bail_ref:
if (!retval && infop)
retval = put_user(exit_code, &infop->si_status);
if (!retval && infop)
- retval = put_user(task_pid_nr_ns(p, ns), &infop->si_pid);
+ retval = put_user(pid, &infop->si_pid);
if (!retval && infop)
retval = put_user(p->uid, &infop->si_uid);
if (!retval)
- retval = task_pid_nr_ns(p, ns);
+ retval = pid;
put_task_struct(p);
BUG_ON(!retval);
diff --git a/kernel/fork.c b/kernel/fork.c
index 8ca1a14cdc8c..8dd8ff281009 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1292,23 +1292,14 @@ static struct task_struct *copy_process(unsigned long clone_flags,
__ptrace_link(p, current->parent);
if (thread_group_leader(p)) {
- if (clone_flags & CLONE_NEWPID) {
+ if (clone_flags & CLONE_NEWPID)
p->nsproxy->pid_ns->child_reaper = p;
- p->signal->tty = NULL;
- set_task_pgrp(p, p->pid);
- set_task_session(p, p->pid);
- attach_pid(p, PIDTYPE_PGID, pid);
- attach_pid(p, PIDTYPE_SID, pid);
- } else {
- p->signal->tty = current->signal->tty;
- set_task_pgrp(p, task_pgrp_nr(current));
- set_task_session(p, task_session_nr(current));
- attach_pid(p, PIDTYPE_PGID,
- task_pgrp(current));
- attach_pid(p, PIDTYPE_SID,
- task_session(current));
- }
+ p->signal->tty = current->signal->tty;
+ set_task_pgrp(p, task_pgrp_nr(current));
+ set_task_session(p, task_session_nr(current));
+ attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
+ attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__get_cpu_var(process_counts)++;
}
diff --git a/kernel/futex.c b/kernel/futex.c
index 9dc591ab681a..db9824de8bf0 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -658,7 +658,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
if (curval == -EFAULT)
ret = -EFAULT;
- if (curval != uval)
+ else if (curval != uval)
ret = -EINVAL;
if (ret) {
spin_unlock(&pi_state->pi_mutex.wait_lock);
@@ -1097,15 +1097,15 @@ static void unqueue_me_pi(struct futex_q *q)
}
/*
- * Fixup the pi_state owner with current.
+ * Fixup the pi_state owner with the new owner.
*
* Must be called with hash bucket lock held and mm->sem held for non
* private futexes.
*/
static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
- struct task_struct *curr)
+ struct task_struct *newowner)
{
- u32 newtid = task_pid_vnr(curr) | FUTEX_WAITERS;
+ u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
struct futex_pi_state *pi_state = q->pi_state;
u32 uval, curval, newval;
int ret;
@@ -1119,12 +1119,12 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
} else
newtid |= FUTEX_OWNER_DIED;
- pi_state->owner = curr;
+ pi_state->owner = newowner;
- spin_lock_irq(&curr->pi_lock);
+ spin_lock_irq(&newowner->pi_lock);
WARN_ON(!list_empty(&pi_state->list));
- list_add(&pi_state->list, &curr->pi_state_list);
- spin_unlock_irq(&curr->pi_lock);
+ list_add(&pi_state->list, &newowner->pi_state_list);
+ spin_unlock_irq(&newowner->pi_lock);
/*
* We own it, so we have to replace the pending owner
@@ -1149,9 +1149,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
/*
* In case we must use restart_block to restart a futex_wait,
- * we encode in the 'arg3' shared capability
+ * we encode in the 'flags' shared capability
*/
-#define ARG3_SHARED 1
+#define FLAGS_SHARED 1
static long futex_wait_restart(struct restart_block *restart);
@@ -1290,12 +1290,13 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
struct restart_block *restart;
restart = &current_thread_info()->restart_block;
restart->fn = futex_wait_restart;
- restart->arg0 = (unsigned long)uaddr;
- restart->arg1 = (unsigned long)val;
- restart->arg2 = (unsigned long)abs_time;
- restart->arg3 = 0;
+ restart->futex.uaddr = (u32 *)uaddr;
+ restart->futex.val = val;
+ restart->futex.time = abs_time->tv64;
+ restart->futex.flags = 0;
+
if (fshared)
- restart->arg3 |= ARG3_SHARED;
+ restart->futex.flags |= FLAGS_SHARED;
return -ERESTART_RESTARTBLOCK;
}
@@ -1310,15 +1311,15 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
static long futex_wait_restart(struct restart_block *restart)
{
- u32 __user *uaddr = (u32 __user *)restart->arg0;
- u32 val = (u32)restart->arg1;
- ktime_t *abs_time = (ktime_t *)restart->arg2;
+ u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
struct rw_semaphore *fshared = NULL;
+ ktime_t t;
+ t.tv64 = restart->futex.time;
restart->fn = do_no_restart_syscall;
- if (restart->arg3 & ARG3_SHARED)
+ if (restart->futex.flags & FLAGS_SHARED)
fshared = &current->mm->mmap_sem;
- return (long)futex_wait(uaddr, fshared, val, abs_time);
+ return (long)futex_wait(uaddr, fshared, restart->futex.val, &t);
}
@@ -1507,9 +1508,40 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
* when we were on the way back before we locked the
* hash bucket.
*/
- if (q.pi_state->owner == curr &&
- rt_mutex_trylock(&q.pi_state->pi_mutex)) {
- ret = 0;
+ if (q.pi_state->owner == curr) {
+ /*
+ * Try to get the rt_mutex now. This might
+ * fail as some other task acquired the
+ * rt_mutex after we removed ourself from the
+ * rt_mutex waiters list.
+ */
+ if (rt_mutex_trylock(&q.pi_state->pi_mutex))
+ ret = 0;
+ else {
+ /*
+ * pi_state is incorrect, some other
+ * task did a lock steal and we
+ * returned due to timeout or signal
+ * without taking the rt_mutex. Too
+ * late. We can access the
+ * rt_mutex_owner without locking, as
+ * the other task is now blocked on
+ * the hash bucket lock. Fix the state
+ * up.
+ */
+ struct task_struct *owner;
+ int res;
+
+ owner = rt_mutex_owner(&q.pi_state->pi_mutex);
+ res = fixup_pi_state_owner(uaddr, &q, owner);
+
+ WARN_ON(rt_mutex_owner(&q.pi_state->pi_mutex) !=
+ owner);
+
+ /* propagate -EFAULT, if the fixup failed */
+ if (res)
+ ret = res;
+ }
} else {
/*
* Paranoia check. If we did not take the lock
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 22a25142e4cf..f994bb8065e6 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -850,6 +850,14 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
#ifdef CONFIG_TIME_LOW_RES
tim = ktime_add(tim, base->resolution);
#endif
+ /*
+ * Careful here: User space might have asked for a
+ * very long sleep, so the add above might result in a
+ * negative number, which enqueues the timer in front
+ * of the queue.
+ */
+ if (tim.tv64 < 0)
+ tim.tv64 = KTIME_MAX;
}
timer->expires = tim;
@@ -1370,7 +1378,7 @@ sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
/*
* Functions related to boot-time initialization:
*/
-static void __devinit init_hrtimers_cpu(int cpu)
+static void __cpuinit init_hrtimers_cpu(int cpu)
{
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
int i;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 9b5dff6b3f6a..44019ce30a14 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -297,18 +297,13 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
if (unlikely(desc->status & IRQ_INPROGRESS))
goto out_unlock;
+ desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
kstat_cpu(cpu).irqs[irq]++;
action = desc->action;
- if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
- if (desc->chip->mask)
- desc->chip->mask(irq);
- desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
- desc->status |= IRQ_PENDING;
+ if (unlikely(!action || (desc->status & IRQ_DISABLED)))
goto out_unlock;
- }
- desc->status &= ~(IRQ_REPLAY | IRQ_WAITING | IRQ_PENDING);
desc->status |= IRQ_INPROGRESS;
spin_unlock(&desc->lock);
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 474219a41929..2fc25810509e 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -32,9 +32,14 @@
/* These will be re-linked against their real values during the second link stage */
extern const unsigned long kallsyms_addresses[] __attribute__((weak));
-extern const unsigned long kallsyms_num_syms __attribute__((weak));
extern const u8 kallsyms_names[] __attribute__((weak));
+/* tell the compiler that the count isn't in the small data section if the arch
+ * has one (eg: FRV)
+ */
+extern const unsigned long kallsyms_num_syms
+__attribute__((weak, section(".rodata")));
+
extern const u8 kallsyms_token_table[] __attribute__((weak));
extern const u16 kallsyms_token_index[] __attribute__((weak));
diff --git a/kernel/kexec.c b/kernel/kexec.c
index aa74a1ef2da8..9a26eec9eb04 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1404,6 +1404,7 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_OFFSET(list_head, next);
VMCOREINFO_OFFSET(list_head, prev);
VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
+ VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
VMCOREINFO_NUMBER(NR_FREE_PAGES);
arch_crash_save_vmcoreinfo();
diff --git a/kernel/kmod.c b/kernel/kmod.c
index c6a4f8aebeba..bb7df2a28bd7 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -451,13 +451,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
enum umh_wait wait)
{
DECLARE_COMPLETION_ONSTACK(done);
- int retval;
+ int retval = 0;
helper_lock();
- if (sub_info->path[0] == '\0') {
- retval = 0;
+ if (sub_info->path[0] == '\0')
goto out;
- }
if (!khelper_wq || usermodehelper_disabled) {
retval = -EBUSY;
@@ -468,13 +466,14 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info,
sub_info->wait = wait;
queue_work(khelper_wq, &sub_info->work);
- if (wait == UMH_NO_WAIT) /* task has freed sub_info */
- return 0;
+ if (wait == UMH_NO_WAIT) /* task has freed sub_info */
+ goto unlock;
wait_for_completion(&done);
retval = sub_info->retval;
- out:
+out:
call_usermodehelper_freeinfo(sub_info);
+unlock:
helper_unlock();
return retval;
}
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 65daa5373ca6..e53bc30e9ba5 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -17,30 +17,34 @@
#include <linux/sched.h>
#define KERNEL_ATTR_RO(_name) \
-static struct subsys_attribute _name##_attr = __ATTR_RO(_name)
+static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
#define KERNEL_ATTR_RW(_name) \
-static struct subsys_attribute _name##_attr = \
+static struct kobj_attribute _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
/* current uevent sequence number */
-static ssize_t uevent_seqnum_show(struct kset *kset, char *page)
+static ssize_t uevent_seqnum_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
- return sprintf(page, "%llu\n", (unsigned long long)uevent_seqnum);
+ return sprintf(buf, "%llu\n", (unsigned long long)uevent_seqnum);
}
KERNEL_ATTR_RO(uevent_seqnum);
/* uevent helper program, used during early boo */
-static ssize_t uevent_helper_show(struct kset *kset, char *page)
+static ssize_t uevent_helper_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
- return sprintf(page, "%s\n", uevent_helper);
+ return sprintf(buf, "%s\n", uevent_helper);
}
-static ssize_t uevent_helper_store(struct kset *kset, const char *page, size_t count)
+static ssize_t uevent_helper_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
if (count+1 > UEVENT_HELPER_PATH_LEN)
return -ENOENT;
- memcpy(uevent_helper, page, count);
+ memcpy(uevent_helper, buf, count);
uevent_helper[count] = '\0';
if (count && uevent_helper[count-1] == '\n')
uevent_helper[count-1] = '\0';
@@ -50,21 +54,24 @@ KERNEL_ATTR_RW(uevent_helper);
#endif
#ifdef CONFIG_KEXEC
-static ssize_t kexec_loaded_show(struct kset *kset, char *page)
+static ssize_t kexec_loaded_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
- return sprintf(page, "%d\n", !!kexec_image);
+ return sprintf(buf, "%d\n", !!kexec_image);
}
KERNEL_ATTR_RO(kexec_loaded);
-static ssize_t kexec_crash_loaded_show(struct kset *kset, char *page)
+static ssize_t kexec_crash_loaded_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
- return sprintf(page, "%d\n", !!kexec_crash_image);
+ return sprintf(buf, "%d\n", !!kexec_crash_image);
}
KERNEL_ATTR_RO(kexec_crash_loaded);
-static ssize_t vmcoreinfo_show(struct kset *kset, char *page)
+static ssize_t vmcoreinfo_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
{
- return sprintf(page, "%lx %x\n",
+ return sprintf(buf, "%lx %x\n",
paddr_vmcoreinfo_note(),
(unsigned int)vmcoreinfo_max_size);
}
@@ -94,8 +101,8 @@ static struct bin_attribute notes_attr = {
.read = &notes_read,
};
-decl_subsys(kernel, NULL, NULL);
-EXPORT_SYMBOL_GPL(kernel_subsys);
+struct kobject *kernel_kobj;
+EXPORT_SYMBOL_GPL(kernel_kobj);
static struct attribute * kernel_attrs[] = {
#if defined(CONFIG_HOTPLUG) && defined(CONFIG_NET)
@@ -116,24 +123,39 @@ static struct attribute_group kernel_attr_group = {
static int __init ksysfs_init(void)
{
- int error = subsystem_register(&kernel_subsys);
- if (!error)
- error = sysfs_create_group(&kernel_subsys.kobj,
- &kernel_attr_group);
+ int error;
- if (!error && notes_size > 0) {
- notes_attr.size = notes_size;
- error = sysfs_create_bin_file(&kernel_subsys.kobj,
- &notes_attr);
+ kernel_kobj = kobject_create_and_add("kernel", NULL);
+ if (!kernel_kobj) {
+ error = -ENOMEM;
+ goto exit;
}
+ error = sysfs_create_group(kernel_kobj, &kernel_attr_group);
+ if (error)
+ goto kset_exit;
- /*
- * Create "/sys/kernel/uids" directory and corresponding root user's
- * directory under it.
- */
- if (!error)
- error = uids_kobject_init();
+ if (notes_size > 0) {
+ notes_attr.size = notes_size;
+ error = sysfs_create_bin_file(kernel_kobj, &notes_attr);
+ if (error)
+ goto group_exit;
+ }
+ /* create the /sys/kernel/uids/ directory */
+ error = uids_sysfs_init();
+ if (error)
+ goto notes_exit;
+
+ return 0;
+
+notes_exit:
+ if (notes_size > 0)
+ sysfs_remove_bin_file(kernel_kobj, &notes_attr);
+group_exit:
+ sysfs_remove_group(kernel_kobj, &kernel_attr_group);
+kset_exit:
+ kobject_put(kernel_kobj);
+exit:
return error;
}
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index ed38bbfc48a3..e2c07ece367d 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2654,10 +2654,15 @@ static void check_flags(unsigned long flags)
if (!debug_locks)
return;
- if (irqs_disabled_flags(flags))
- DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled);
- else
- DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled);
+ if (irqs_disabled_flags(flags)) {
+ if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
+ printk("possible reason: unannotated irqs-off.\n");
+ }
+ } else {
+ if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
+ printk("possible reason: unannotated irqs-on.\n");
+ }
+ }
/*
* We dont accurately track softirq state in e.g.
@@ -2927,7 +2932,7 @@ static void zap_class(struct lock_class *class)
}
-static inline int within(void *addr, void *start, unsigned long size)
+static inline int within(const void *addr, void *start, unsigned long size)
{
return addr >= start && addr < start + size;
}
@@ -2938,9 +2943,10 @@ void lockdep_free_key_range(void *start, unsigned long size)
struct list_head *head;
unsigned long flags;
int i;
+ int locked;
raw_local_irq_save(flags);
- graph_lock();
+ locked = graph_lock();
/*
* Unhash all classes that were created by this module:
@@ -2949,12 +2955,16 @@ void lockdep_free_key_range(void *start, unsigned long size)
head = classhash_table + i;
if (list_empty(head))
continue;
- list_for_each_entry_safe(class, next, head, hash_entry)
+ list_for_each_entry_safe(class, next, head, hash_entry) {
if (within(class->key, start, size))
zap_class(class);
+ else if (within(class->name, start, size))
+ zap_class(class);
+ }
}
- graph_unlock();
+ if (locked)
+ graph_unlock();
raw_local_irq_restore(flags);
}
@@ -2964,6 +2974,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
struct list_head *head;
unsigned long flags;
int i, j;
+ int locked;
raw_local_irq_save(flags);
@@ -2982,7 +2993,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
* Debug check: in the end all mapped classes should
* be gone.
*/
- graph_lock();
+ locked = graph_lock();
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
if (list_empty(head))
@@ -2995,7 +3006,8 @@ void lockdep_reset_lock(struct lockdep_map *lock)
}
}
}
- graph_unlock();
+ if (locked)
+ graph_unlock();
out_restore:
raw_local_irq_restore(flags);
@@ -3054,11 +3066,6 @@ void __init lockdep_info(void)
#endif
}
-static inline int in_range(const void *start, const void *addr, const void *end)
-{
- return addr >= start && addr <= end;
-}
-
static void
print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
const void *mem_to, struct held_lock *hlock)
@@ -3080,6 +3087,13 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
dump_stack();
}
+static inline int not_in_range(const void* mem_from, unsigned long mem_len,
+ const void* lock_from, unsigned long lock_len)
+{
+ return lock_from + lock_len <= mem_from ||
+ mem_from + mem_len <= lock_from;
+}
+
/*
* Called when kernel memory is freed (or unmapped), or if a lock
* is destroyed or reinitialized - this code checks whether there is
@@ -3087,7 +3101,6 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
*/
void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
{
- const void *mem_to = mem_from + mem_len, *lock_from, *lock_to;
struct task_struct *curr = current;
struct held_lock *hlock;
unsigned long flags;
@@ -3100,14 +3113,11 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
- lock_from = (void *)hlock->instance;
- lock_to = (void *)(hlock->instance + 1);
-
- if (!in_range(mem_from, lock_from, mem_to) &&
- !in_range(mem_from, lock_to, mem_to))
+ if (not_in_range(mem_from, mem_len, hlock->instance,
+ sizeof(*hlock->instance)))
continue;
- print_freed_lock_bug(curr, mem_from, mem_to, hlock);
+ print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
break;
}
local_irq_restore(flags);
@@ -3173,6 +3183,13 @@ retry:
printk(" locked it.\n");
do_each_thread(g, p) {
+ /*
+ * It's not reliable to print a task's held locks
+ * if it's not sleeping (or if it's not the current
+ * task):
+ */
+ if (p->state == TASK_RUNNING && p != current)
+ continue;
if (p->lockdep_depth)
lockdep_print_held_locks(p);
if (!unlock)
diff --git a/kernel/module.c b/kernel/module.c
index 3202c9950073..dcb8a2cbf75e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -47,8 +47,6 @@
#include <asm/cacheflush.h>
#include <linux/license.h>
-extern int module_sysfs_initialized;
-
#if 0
#define DEBUGP printk
#else
@@ -81,7 +79,8 @@ int unregister_module_notifier(struct notifier_block * nb)
}
EXPORT_SYMBOL(unregister_module_notifier);
-/* We require a truly strong try_module_get() */
+/* We require a truly strong try_module_get(): 0 means failure due to
+ ongoing or failed initialization etc. */
static inline int strong_try_module_get(struct module *mod)
{
if (mod && mod->state == MODULE_STATE_COMING)
@@ -952,7 +951,8 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
ret = __find_symbol(name, &owner, &crc,
!(mod->taints & TAINT_PROPRIETARY_MODULE));
if (ret) {
- /* use_module can fail due to OOM, or module unloading */
+ /* use_module can fail due to OOM,
+ or module initialization or unloading */
if (!check_version(sechdrs, versindex, name, mod, crc) ||
!use_module(mod, owner))
ret = 0;
@@ -1120,7 +1120,7 @@ static void add_notes_attrs(struct module *mod, unsigned int nsect,
++loaded;
}
- notes_attrs->dir = kobject_add_dir(&mod->mkobj.kobj, "notes");
+ notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
if (!notes_attrs->dir)
goto out;
@@ -1217,15 +1217,16 @@ int mod_sysfs_init(struct module *mod)
err = -EINVAL;
goto out;
}
- memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
- err = kobject_set_name(&mod->mkobj.kobj, "%s", mod->name);
- if (err)
- goto out;
- kobj_set_kset_s(&mod->mkobj, module_subsys);
mod->mkobj.mod = mod;
- kobject_init(&mod->mkobj.kobj);
+ memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
+ mod->mkobj.kobj.kset = module_kset;
+ err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
+ "%s", mod->name);
+ if (err)
+ kobject_put(&mod->mkobj.kobj);
+ /* delay uevent until full sysfs population */
out:
return err;
}
@@ -1236,12 +1237,7 @@ int mod_sysfs_setup(struct module *mod,
{
int err;
- /* delay uevent until full sysfs population */
- err = kobject_add(&mod->mkobj.kobj);
- if (err)
- goto out;
-
- mod->holders_dir = kobject_add_dir(&mod->mkobj.kobj, "holders");
+ mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
if (!mod->holders_dir) {
err = -ENOMEM;
goto out_unreg;
@@ -1261,11 +1257,9 @@ int mod_sysfs_setup(struct module *mod,
out_unreg_param:
module_param_sysfs_remove(mod);
out_unreg_holders:
- kobject_unregister(mod->holders_dir);
+ kobject_put(mod->holders_dir);
out_unreg:
- kobject_del(&mod->mkobj.kobj);
kobject_put(&mod->mkobj.kobj);
-out:
return err;
}
#endif
@@ -1274,9 +1268,9 @@ static void mod_kobject_remove(struct module *mod)
{
module_remove_modinfo_attrs(mod);
module_param_sysfs_remove(mod);
- kobject_unregister(mod->mkobj.drivers_dir);
- kobject_unregister(mod->holders_dir);
- kobject_unregister(&mod->mkobj.kobj);
+ kobject_put(mod->mkobj.drivers_dir);
+ kobject_put(mod->holders_dir);
+ kobject_put(&mod->mkobj.kobj);
}
/*
@@ -1369,7 +1363,7 @@ dup:
return ret;
}
-/* Change all symbols so that sh_value encodes the pointer directly. */
+/* Change all symbols so that st_value encodes the pointer directly. */
static int simplify_symbols(Elf_Shdr *sechdrs,
unsigned int symindex,
const char *strtab,
@@ -1882,10 +1876,10 @@ static struct module *load_module(void __user *umod,
/* Now we've moved module, initialize linked lists, etc. */
module_unload_init(mod);
- /* Initialize kobject, so we can reference it. */
+ /* add kobject, so we can reference it. */
err = mod_sysfs_init(mod);
if (err)
- goto cleanup;
+ goto free_unload;
/* Set up license info based on the info section */
set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
@@ -2055,6 +2049,9 @@ static struct module *load_module(void __user *umod,
arch_cleanup:
module_arch_cleanup(mod);
cleanup:
+ kobject_del(&mod->mkobj.kobj);
+ kobject_put(&mod->mkobj.kobj);
+ free_unload:
module_unload_free(mod);
module_free(mod, mod->module_init);
free_core:
@@ -2212,29 +2209,34 @@ static const char *get_ksymbol(struct module *mod,
/* For kallsyms to ask for address resolution. NULL means not found.
We don't lock, as this is used for oops resolution and races are a
lesser concern. */
+/* FIXME: Risky: returns a pointer into a module w/o lock */
const char *module_address_lookup(unsigned long addr,
unsigned long *size,
unsigned long *offset,
char **modname)
{
struct module *mod;
+ const char *ret = NULL;
+ preempt_disable();
list_for_each_entry(mod, &modules, list) {
if (within(addr, mod->module_init, mod->init_size)
|| within(addr, mod->module_core, mod->core_size)) {
if (modname)
*modname = mod->name;
- return get_ksymbol(mod, addr, size, offset);
+ ret = get_ksymbol(mod, addr, size, offset);
+ break;
}
}
- return NULL;
+ preempt_enable();
+ return ret;
}
int lookup_module_symbol_name(unsigned long addr, char *symname)
{
struct module *mod;
- mutex_lock(&module_mutex);
+ preempt_disable();
list_for_each_entry(mod, &modules, list) {
if (within(addr, mod->module_init, mod->init_size) ||
within(addr, mod->module_core, mod->core_size)) {
@@ -2244,12 +2246,12 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)
if (!sym)
goto out;
strlcpy(symname, sym, KSYM_NAME_LEN);
- mutex_unlock(&module_mutex);
+ preempt_enable();
return 0;
}
}
out:
- mutex_unlock(&module_mutex);
+ preempt_enable();
return -ERANGE;
}
@@ -2258,7 +2260,7 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
{
struct module *mod;
- mutex_lock(&module_mutex);
+ preempt_disable();
list_for_each_entry(mod, &modules, list) {
if (within(addr, mod->module_init, mod->init_size) ||
within(addr, mod->module_core, mod->core_size)) {
@@ -2271,12 +2273,12 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
strlcpy(modname, mod->name, MODULE_NAME_LEN);
if (name)
strlcpy(name, sym, KSYM_NAME_LEN);
- mutex_unlock(&module_mutex);
+ preempt_enable();
return 0;
}
}
out:
- mutex_unlock(&module_mutex);
+ preempt_enable();
return -ERANGE;
}
@@ -2285,7 +2287,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
{
struct module *mod;
- mutex_lock(&module_mutex);
+ preempt_disable();
list_for_each_entry(mod, &modules, list) {
if (symnum < mod->num_symtab) {
*value = mod->symtab[symnum].st_value;
@@ -2294,12 +2296,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
KSYM_NAME_LEN);
strlcpy(module_name, mod->name, MODULE_NAME_LEN);
*exported = is_exported(name, mod);
- mutex_unlock(&module_mutex);
+ preempt_enable();
return 0;
}
symnum -= mod->num_symtab;
}
- mutex_unlock(&module_mutex);
+ preempt_enable();
return -ERANGE;
}
@@ -2322,6 +2324,7 @@ unsigned long module_kallsyms_lookup_name(const char *name)
unsigned long ret = 0;
/* Don't lock: we're in enough trouble already. */
+ preempt_disable();
if ((colon = strchr(name, ':')) != NULL) {
*colon = '\0';
if ((mod = find_module(name)) != NULL)
@@ -2332,6 +2335,7 @@ unsigned long module_kallsyms_lookup_name(const char *name)
if ((ret = mod_find_symname(mod, name)) != 0)
break;
}
+ preempt_enable();
return ret;
}
#endif /* CONFIG_KALLSYMS */
@@ -2493,93 +2497,6 @@ void print_modules(void)
printk("\n");
}
-#ifdef CONFIG_SYSFS
-static char *make_driver_name(struct device_driver *drv)
-{
- char *driver_name;
-
- driver_name = kmalloc(strlen(drv->name) + strlen(drv->bus->name) + 2,
- GFP_KERNEL);
- if (!driver_name)
- return NULL;
-
- sprintf(driver_name, "%s:%s", drv->bus->name, drv->name);
- return driver_name;
-}
-
-static void module_create_drivers_dir(struct module_kobject *mk)
-{
- if (!mk || mk->drivers_dir)
- return;
-
- mk->drivers_dir = kobject_add_dir(&mk->kobj, "drivers");
-}
-
-void module_add_driver(struct module *mod, struct device_driver *drv)
-{
- char *driver_name;
- int no_warn;
- struct module_kobject *mk = NULL;
-
- if (!drv)
- return;
-
- if (mod)
- mk = &mod->mkobj;
- else if (drv->mod_name) {
- struct kobject *mkobj;
-
- /* Lookup built-in module entry in /sys/modules */
- mkobj = kset_find_obj(&module_subsys, drv->mod_name);
- if (mkobj) {
- mk = container_of(mkobj, struct module_kobject, kobj);
- /* remember our module structure */
- drv->mkobj = mk;
- /* kset_find_obj took a reference */
- kobject_put(mkobj);
- }
- }
-
- if (!mk)
- return;
-
- /* Don't check return codes; these calls are idempotent */
- no_warn = sysfs_create_link(&drv->kobj, &mk->kobj, "module");
- driver_name = make_driver_name(drv);
- if (driver_name) {
- module_create_drivers_dir(mk);
- no_warn = sysfs_create_link(mk->drivers_dir, &drv->kobj,
- driver_name);
- kfree(driver_name);
- }
-}
-EXPORT_SYMBOL(module_add_driver);
-
-void module_remove_driver(struct device_driver *drv)
-{
- struct module_kobject *mk = NULL;
- char *driver_name;
-
- if (!drv)
- return;
-
- sysfs_remove_link(&drv->kobj, "module");
-
- if (drv->owner)
- mk = &drv->owner->mkobj;
- else if (drv->mkobj)
- mk = drv->mkobj;
- if (mk && mk->drivers_dir) {
- driver_name = make_driver_name(drv);
- if (driver_name) {
- sysfs_remove_link(mk->drivers_dir, driver_name);
- kfree(driver_name);
- }
- }
-}
-EXPORT_SYMBOL(module_remove_driver);
-#endif
-
#ifdef CONFIG_MODVERSIONS
/* Generate the signature for struct module here, too, for modversions. */
void struct_module(struct module *mod) { return; }
diff --git a/kernel/panic.c b/kernel/panic.c
index 6f6e03e91595..da4d6bac270e 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -19,6 +19,7 @@
#include <linux/nmi.h>
#include <linux/kexec.h>
#include <linux/debug_locks.h>
+#include <linux/random.h>
int panic_on_oops;
int tainted;
@@ -266,12 +267,29 @@ void oops_enter(void)
}
/*
+ * 64-bit random ID for oopses:
+ */
+static u64 oops_id;
+
+static int init_oops_id(void)
+{
+ if (!oops_id)
+ get_random_bytes(&oops_id, sizeof(oops_id));
+
+ return 0;
+}
+late_initcall(init_oops_id);
+
+/*
* Called when the architecture exits its oops handler, after printing
* everything.
*/
void oops_exit(void)
{
do_oops_enter_exit();
+ init_oops_id();
+ printk(KERN_WARNING "---[ end trace %016llx ]---\n",
+ (unsigned long long)oops_id);
}
#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/kernel/params.c b/kernel/params.c
index 2a4c51487e72..b4da9505f4d2 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -560,11 +560,10 @@ static void __init kernel_param_sysfs_setup(const char *name,
BUG_ON(!mk);
mk->mod = THIS_MODULE;
- kobj_set_kset_s(mk, module_subsys);
- kobject_set_name(&mk->kobj, name);
- kobject_init(&mk->kobj);
- ret = kobject_add(&mk->kobj);
+ mk->kobj.kset = module_kset;
+ ret = kobject_init_and_add(&mk->kobj, &module_ktype, NULL, "%s", name);
if (ret) {
+ kobject_put(&mk->kobj);
printk(KERN_ERR "Module '%s' failed to be added to sysfs, "
"error number %d\n", name, ret);
printk(KERN_ERR "The system will be unstable now.\n");
@@ -679,8 +678,6 @@ static struct sysfs_ops module_sysfs_ops = {
.store = module_attr_store,
};
-static struct kobj_type module_ktype;
-
static int uevent_filter(struct kset *kset, struct kobject *kobj)
{
struct kobj_type *ktype = get_ktype(kobj);
@@ -694,10 +691,10 @@ static struct kset_uevent_ops module_uevent_ops = {
.filter = uevent_filter,
};
-decl_subsys(module, &module_ktype, &module_uevent_ops);
+struct kset *module_kset;
int module_sysfs_initialized;
-static struct kobj_type module_ktype = {
+struct kobj_type module_ktype = {
.sysfs_ops = &module_sysfs_ops,
};
@@ -706,13 +703,11 @@ static struct kobj_type module_ktype = {
*/
static int __init param_sysfs_init(void)
{
- int ret;
-
- ret = subsystem_register(&module_subsys);
- if (ret < 0) {
- printk(KERN_WARNING "%s (%d): subsystem_register error: %d\n",
- __FILE__, __LINE__, ret);
- return ret;
+ module_kset = kset_create_and_add("module", &module_uevent_ops, NULL);
+ if (!module_kset) {
+ printk(KERN_WARNING "%s (%d): error creating kset\n",
+ __FILE__, __LINE__);
+ return -ENOMEM;
}
module_sysfs_initialized = 1;
@@ -722,14 +717,7 @@ static int __init param_sysfs_init(void)
}
subsys_initcall(param_sysfs_init);
-#else
-#if 0
-static struct sysfs_ops module_sysfs_ops = {
- .show = NULL,
- .store = NULL,
-};
-#endif
-#endif
+#endif /* CONFIG_SYSFS */
EXPORT_SYMBOL(param_set_byte);
EXPORT_SYMBOL(param_get_byte);
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 05b64790fe83..b138b431e271 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -567,7 +567,8 @@ static const char * const hibernation_modes[] = {
* supports it (as determined by having hibernation_ops).
*/
-static ssize_t disk_show(struct kset *kset, char *buf)
+static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
{
int i;
char *start = buf;
@@ -597,7 +598,8 @@ static ssize_t disk_show(struct kset *kset, char *buf)
}
-static ssize_t disk_store(struct kset *kset, const char *buf, size_t n)
+static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
{
int error = 0;
int i;
@@ -642,13 +644,15 @@ static ssize_t disk_store(struct kset *kset, const char *buf, size_t n)
power_attr(disk);
-static ssize_t resume_show(struct kset *kset, char *buf)
+static ssize_t resume_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
{
return sprintf(buf,"%d:%d\n", MAJOR(swsusp_resume_device),
MINOR(swsusp_resume_device));
}
-static ssize_t resume_store(struct kset *kset, const char *buf, size_t n)
+static ssize_t resume_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
{
unsigned int maj, min;
dev_t res;
@@ -674,12 +678,14 @@ static ssize_t resume_store(struct kset *kset, const char *buf, size_t n)
power_attr(resume);
-static ssize_t image_size_show(struct kset *kset, char *buf)
+static ssize_t image_size_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%lu\n", image_size);
}
-static ssize_t image_size_store(struct kset *kset, const char *buf, size_t n)
+static ssize_t image_size_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
{
unsigned long size;
@@ -708,7 +714,7 @@ static struct attribute_group attr_group = {
static int __init pm_disk_init(void)
{
- return sysfs_create_group(&power_subsys.kobj, &attr_group);
+ return sysfs_create_group(power_kobj, &attr_group);
}
core_initcall(pm_disk_init);
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 3cdf95b1dc92..efc08360e627 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -28,6 +28,9 @@ BLOCKING_NOTIFIER_HEAD(pm_chain_head);
DEFINE_MUTEX(pm_mutex);
+unsigned int pm_flags;
+EXPORT_SYMBOL(pm_flags);
+
#ifdef CONFIG_SUSPEND
/* This is just an arbitrary number */
@@ -273,8 +276,7 @@ EXPORT_SYMBOL(pm_suspend);
#endif /* CONFIG_SUSPEND */
-decl_subsys(power,NULL,NULL);
-
+struct kobject *power_kobj;
/**
* state - control system power state.
@@ -287,7 +289,8 @@ decl_subsys(power,NULL,NULL);
* proper enumerated value, and initiates a suspend transition.
*/
-static ssize_t state_show(struct kset *kset, char *buf)
+static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
{
char *s = buf;
#ifdef CONFIG_SUSPEND
@@ -308,7 +311,8 @@ static ssize_t state_show(struct kset *kset, char *buf)
return (s - buf);
}
-static ssize_t state_store(struct kset *kset, const char *buf, size_t n)
+static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
{
#ifdef CONFIG_SUSPEND
suspend_state_t state = PM_SUSPEND_STANDBY;
@@ -345,13 +349,15 @@ power_attr(state);
#ifdef CONFIG_PM_TRACE
int pm_trace_enabled;
-static ssize_t pm_trace_show(struct kset *kset, char *buf)
+static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", pm_trace_enabled);
}
static ssize_t
-pm_trace_store(struct kset *kset, const char *buf, size_t n)
+pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
{
int val;
@@ -383,10 +389,10 @@ static struct attribute_group attr_group = {
static int __init pm_init(void)
{
- int error = subsystem_register(&power_subsys);
- if (!error)
- error = sysfs_create_group(&power_subsys.kobj,&attr_group);
- return error;
+ power_kobj = kobject_create_and_add("power", NULL);
+ if (!power_kobj)
+ return -ENOMEM;
+ return sysfs_create_group(power_kobj, &attr_group);
}
core_initcall(pm_init);
diff --git a/kernel/power/pm.c b/kernel/power/pm.c
index c50d15266c10..60c73fa670d5 100644
--- a/kernel/power/pm.c
+++ b/kernel/power/pm.c
@@ -27,8 +27,6 @@
#include <linux/interrupt.h>
#include <linux/mutex.h>
-int pm_active;
-
/*
* Locking notes:
* pm_devs_lock can be a semaphore providing pm ops are not called
@@ -204,6 +202,4 @@ int pm_send_all(pm_request_t rqst, void *data)
EXPORT_SYMBOL(pm_register);
EXPORT_SYMBOL(pm_send_all);
-EXPORT_SYMBOL(pm_active);
-
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 195dc4611764..2093c3a9a994 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -54,7 +54,7 @@ extern int pfn_is_nosave(unsigned long);
extern struct mutex pm_mutex;
#define power_attr(_name) \
-static struct subsys_attribute _name##_attr = { \
+static struct kobj_attribute _name##_attr = { \
.attr = { \
.name = __stringify(_name), \
.mode = 0644, \
@@ -63,8 +63,6 @@ static struct subsys_attribute _name##_attr = { \
.store = _name##_store, \
}
-extern struct kset power_subsys;
-
/* Preferred image size in bytes (default 500 MB) */
extern unsigned long image_size;
extern int in_suspend;
diff --git a/kernel/printk.c b/kernel/printk.c
index a30fe33de395..89011bf8c106 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -817,7 +817,7 @@ __setup("console=", console_setup);
* commonly to provide a default console (ie from PROM variables) when
* the user has not supplied one.
*/
-int __init add_preferred_console(char *name, int idx, char *options)
+int add_preferred_console(char *name, int idx, char *options)
{
struct console_cmdline *c;
int i;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 7c76f2ffaeaa..c25db863081d 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -120,7 +120,7 @@ int ptrace_check_attach(struct task_struct *child, int kill)
return ret;
}
-static int may_attach(struct task_struct *task)
+int __ptrace_may_attach(struct task_struct *task)
{
/* May we inspect the given task?
* This check is used both for attaching with ptrace
@@ -154,7 +154,7 @@ int ptrace_may_attach(struct task_struct *task)
{
int err;
task_lock(task);
- err = may_attach(task);
+ err = __ptrace_may_attach(task);
task_unlock(task);
return !err;
}
@@ -196,7 +196,7 @@ repeat:
/* the same process cannot be attached many times */
if (task->ptrace & PT_PTRACED)
goto bad;
- retval = may_attach(task);
+ retval = __ptrace_may_attach(task);
if (retval)
goto bad;
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index a66d4d1615f7..f2c1a04e9b18 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -549,7 +549,7 @@ static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
rdp->blimit = blimit;
}
-static void __devinit rcu_online_cpu(int cpu)
+static void __cpuinit rcu_online_cpu(int cpu)
{
struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
index e3055ba69159..092e4c620af9 100644
--- a/kernel/rtmutex-tester.c
+++ b/kernel/rtmutex-tester.c
@@ -394,7 +394,7 @@ static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL);
static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command);
static struct sysdev_class rttest_sysclass = {
- set_kset_name("rttest"),
+ .name = "rttest",
};
static int init_test_thread(int id)
diff --git a/kernel/rwsem.c b/kernel/rwsem.c
index 1ec620c03064..cae050b05f5e 100644
--- a/kernel/rwsem.c
+++ b/kernel/rwsem.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <linux/module.h>
#include <linux/rwsem.h>
@@ -15,7 +16,7 @@
/*
* lock for reading
*/
-void down_read(struct rw_semaphore *sem)
+void __sched down_read(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
@@ -42,7 +43,7 @@ EXPORT_SYMBOL(down_read_trylock);
/*
* lock for writing
*/
-void down_write(struct rw_semaphore *sem)
+void __sched down_write(struct rw_semaphore *sem)
{
might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
diff --git a/kernel/sched.c b/kernel/sched.c
index 38933cafea8a..e76b11ca6df3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -209,9 +209,8 @@ static inline struct task_group *task_group(struct task_struct *p)
tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
struct task_group, css);
#else
- tg = &init_task_group;
+ tg = &init_task_group;
#endif
-
return tg;
}
@@ -249,15 +248,16 @@ struct cfs_rq {
#ifdef CONFIG_FAIR_GROUP_SCHED
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
- /* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
+ /*
+ * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities
* (like users, containers etc.)
*
* leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
* list is used during load balance.
*/
- struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
- struct task_group *tg; /* group that "owns" this runqueue */
+ struct list_head leaf_cfs_rq_list;
+ struct task_group *tg; /* group that "owns" this runqueue */
#endif
};
@@ -300,7 +300,7 @@ struct rq {
/* list of leaf cfs_rq on this cpu: */
struct list_head leaf_cfs_rq_list;
#endif
- struct rt_rq rt;
+ struct rt_rq rt;
/*
* This is part of a global counter where only the total sum
@@ -457,8 +457,8 @@ enum {
SCHED_FEAT_NEW_FAIR_SLEEPERS = 1,
SCHED_FEAT_WAKEUP_PREEMPT = 2,
SCHED_FEAT_START_DEBIT = 4,
- SCHED_FEAT_TREE_AVG = 8,
- SCHED_FEAT_APPROX_AVG = 16,
+ SCHED_FEAT_TREE_AVG = 8,
+ SCHED_FEAT_APPROX_AVG = 16,
};
const_debug unsigned int sysctl_sched_features =
@@ -488,7 +488,12 @@ unsigned long long cpu_clock(int cpu)
local_irq_save(flags);
rq = cpu_rq(cpu);
- update_rq_clock(rq);
+ /*
+ * Only call sched_clock() if the scheduler has already been
+ * initialized (some code might call cpu_clock() very early):
+ */
+ if (rq->idle)
+ update_rq_clock(rq);
now = rq->clock;
local_irq_restore(flags);
@@ -503,10 +508,15 @@ EXPORT_SYMBOL_GPL(cpu_clock);
# define finish_arch_switch(prev) do { } while (0)
#endif
+static inline int task_current(struct rq *rq, struct task_struct *p)
+{
+ return rq->curr == p;
+}
+
#ifndef __ARCH_WANT_UNLOCKED_CTXSW
static inline int task_running(struct rq *rq, struct task_struct *p)
{
- return rq->curr == p;
+ return task_current(rq, p);
}
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
@@ -535,7 +545,7 @@ static inline int task_running(struct rq *rq, struct task_struct *p)
#ifdef CONFIG_SMP
return p->oncpu;
#else
- return rq->curr == p;
+ return task_current(rq, p);
#endif
}
@@ -591,7 +601,7 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
/*
* task_rq_lock - lock the runqueue a given task resides on and disable
- * interrupts. Note the ordering: we can safely lookup the task_rq without
+ * interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption.
*/
static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
@@ -658,6 +668,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
struct rq *rq = cpu_rq(smp_processor_id());
u64 now = sched_clock();
+ touch_softlockup_watchdog();
rq->idle_clock += delta_ns;
/*
* Override the previous timestamp and ignore all
@@ -779,7 +790,7 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
* To aid in avoiding the subversion of "niceness" due to uneven distribution
* of tasks with abnormal "nice" values across CPUs the contribution that
* each task makes to its run queue's load is weighted according to its
- * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
+ * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
* scaled version of the new time slice allocation that they receive on time
* slice expiry etc.
*/
@@ -854,6 +865,12 @@ iter_move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct rq_iterator *iterator);
#endif
+#ifdef CONFIG_CGROUP_CPUACCT
+static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
+#else
+static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
+#endif
+
#include "sched_stats.h"
#include "sched_idletask.c"
#include "sched_fair.c"
@@ -1848,7 +1865,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev,
* and do any other architecture-specific cleanup actions.
*
* Note that we may have delayed dropping an mm in context_switch(). If
- * so, we finish that here outside of the runqueue lock. (Doing it
+ * so, we finish that here outside of the runqueue lock. (Doing it
* with the lock held can cause deadlocks; see schedule() for
* details.)
*/
@@ -2130,7 +2147,7 @@ static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
/*
* If dest_cpu is allowed for this process, migrate the task to it.
* This is accomplished by forcing the cpu_allowed mask to only
- * allow dest_cpu, which will force the cpu onto dest_cpu. Then
+ * allow dest_cpu, which will force the cpu onto dest_cpu. Then
* the cpu_allowed mask is restored.
*/
static void sched_migrate_task(struct task_struct *p, int dest_cpu)
@@ -2575,7 +2592,7 @@ group_next:
* tasks around. Thus we look for the minimum possible imbalance.
* Negative imbalances (*we* are more loaded than anyone else) will
* be counted as no imbalance for these purposes -- we can't fix that
- * by pulling tasks to us. Be careful of negative numbers as they'll
+ * by pulling tasks to us. Be careful of negative numbers as they'll
* appear as very large values with unsigned longs.
*/
if (max_load <= busiest_load_per_task)
@@ -3010,7 +3027,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
/*
* This condition is "impossible", if it occurs
- * we need to fix it. Originally reported by
+ * we need to fix it. Originally reported by
* Bjorn Helgaas on a 128-cpu setup.
*/
BUG_ON(busiest_rq == target_rq);
@@ -3042,7 +3059,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
#ifdef CONFIG_NO_HZ
static struct {
atomic_t load_balancer;
- cpumask_t cpu_mask;
+ cpumask_t cpu_mask;
} nohz ____cacheline_aligned = {
.load_balancer = ATOMIC_INIT(-1),
.cpu_mask = CPU_MASK_NONE,
@@ -3323,7 +3340,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
rq = task_rq_lock(p, &flags);
ns = p->se.sum_exec_runtime;
- if (rq->curr == p) {
+ if (task_current(rq, p)) {
update_rq_clock(rq);
delta_exec = rq->clock - p->se.exec_start;
if ((s64)delta_exec > 0)
@@ -3546,7 +3563,7 @@ static noinline void __schedule_bug(struct task_struct *prev)
static inline void schedule_debug(struct task_struct *prev)
{
/*
- * Test if we are atomic. Since do_exit() needs to call into
+ * Test if we are atomic. Since do_exit() needs to call into
* schedule() atomically, we ignore that path for now.
* Otherwise, whine if we are scheduling when we should not be.
*/
@@ -3668,7 +3685,7 @@ EXPORT_SYMBOL(schedule);
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
- * off of preempt_enable. Kernel preemptions off return from interrupt
+ * off of preempt_enable. Kernel preemptions off return from interrupt
* occur there and call schedule directly.
*/
asmlinkage void __sched preempt_schedule(void)
@@ -3680,7 +3697,7 @@ asmlinkage void __sched preempt_schedule(void)
#endif
/*
* If there is a non-zero preempt_count or interrupts are disabled,
- * we do not want to preempt the current task. Just return..
+ * we do not want to preempt the current task. Just return..
*/
if (likely(ti->preempt_count || irqs_disabled()))
return;
@@ -3766,12 +3783,12 @@ int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
EXPORT_SYMBOL(default_wake_function);
/*
- * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
- * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
+ * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
+ * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
* number) then we wake all the non-exclusive tasks and one exclusive task.
*
* There are circumstances in which we can try to wake a task which has already
- * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
+ * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
* zero in this (rare) case, and we handle it by continuing to scan the queue.
*/
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
@@ -4010,7 +4027,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
oldprio = p->prio;
on_rq = p->se.on_rq;
- running = task_running(rq, p);
+ running = task_current(rq, p);
if (on_rq) {
dequeue_task(rq, p, 0);
if (running)
@@ -4321,7 +4338,7 @@ recheck:
}
update_rq_clock(rq);
on_rq = p->se.on_rq;
- running = task_running(rq, p);
+ running = task_current(rq, p);
if (on_rq) {
deactivate_task(rq, p, 0);
if (running)
@@ -4384,8 +4401,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
* @policy: new policy.
* @param: structure containing the new RT priority.
*/
-asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
- struct sched_param __user *param)
+asmlinkage long
+sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
{
/* negative values for policy are not valid */
if (policy < 0)
@@ -4485,7 +4502,7 @@ long sched_setaffinity(pid_t pid, cpumask_t new_mask)
/*
* It is not safe to call set_cpus_allowed with the
- * tasklist_lock held. We will bump the task_struct's
+ * tasklist_lock held. We will bump the task_struct's
* usage count and then drop tasklist_lock.
*/
get_task_struct(p);
@@ -4681,7 +4698,7 @@ EXPORT_SYMBOL(cond_resched);
* cond_resched_lock() - if a reschedule is pending, drop the given lock,
* call schedule, and on return reacquire the lock.
*
- * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
+ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
* operations here to prevent schedule() from being called twice (once via
* spin_unlock(), once by hand).
*/
@@ -4735,7 +4752,7 @@ void __sched yield(void)
EXPORT_SYMBOL(yield);
/*
- * This task is about to go to sleep on IO. Increment rq->nr_iowait so
+ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
* that process accounting knows that this is a task in IO wait state.
*
* But don't do that if it is a deliberate, throttling IO wait (this task
@@ -4844,17 +4861,21 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
if (retval)
goto out_unlock;
- if (p->policy == SCHED_FIFO)
- time_slice = 0;
- else if (p->policy == SCHED_RR)
+ /*
+ * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
+ * tasks that are on an otherwise idle runqueue:
+ */
+ time_slice = 0;
+ if (p->policy == SCHED_RR) {
time_slice = DEF_TIMESLICE;
- else {
+ } else {
struct sched_entity *se = &p->se;
unsigned long flags;
struct rq *rq;
rq = task_rq_lock(p, &flags);
- time_slice = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se));
+ if (rq->cfs.load.weight)
+ time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
task_rq_unlock(rq, &flags);
}
read_unlock(&tasklist_lock);
@@ -4897,7 +4918,7 @@ static void show_task(struct task_struct *p)
}
#endif
printk(KERN_CONT "%5lu %5d %6d\n", free,
- task_pid_nr(p), task_pid_nr(p->parent));
+ task_pid_nr(p), task_pid_nr(p->real_parent));
if (state != TASK_RUNNING)
show_stack(p, NULL);
@@ -5040,7 +5061,7 @@ static inline void sched_init_granularity(void)
* is removed from the allowed bitmask.
*
* NOTE: the caller must have a valid reference to the task, the
- * task must not exit() & deallocate itself prematurely. The
+ * task must not exit() & deallocate itself prematurely. The
* call is not atomic; no spinlocks may be held.
*/
int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
@@ -5077,7 +5098,7 @@ out:
EXPORT_SYMBOL_GPL(set_cpus_allowed);
/*
- * Move (not current) task off this cpu, onto dest cpu. We're doing
+ * Move (not current) task off this cpu, onto dest cpu. We're doing
* this because either it can't run here any more (set_cpus_allowed()
* away from this CPU, or CPU going down), or because we're
* attempting to rebalance this task on exec (sched_exec).
@@ -5222,7 +5243,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
* Try to stay on the same cpuset, where the
* current cpuset may be a subset of all cpus.
* The cpuset_cpus_allowed_locked() variant of
- * cpuset_cpus_allowed() will not block. It must be
+ * cpuset_cpus_allowed() will not block. It must be
* called within calls to cpuset_lock/cpuset_unlock.
*/
rq = task_rq_lock(p, &flags);
@@ -5235,10 +5256,11 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
* kernel threads (both mm NULL), since they never
* leave kernel.
*/
- if (p->mm && printk_ratelimit())
+ if (p->mm && printk_ratelimit()) {
printk(KERN_INFO "process %d (%s) no "
"longer affine to cpu%d\n",
- task_pid_nr(p), p->comm, dead_cpu);
+ task_pid_nr(p), p->comm, dead_cpu);
+ }
}
} while (!__migrate_task_irq(p, dead_cpu, dest_cpu));
}
@@ -5340,7 +5362,7 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
/*
* Drop lock around migration; if someone else moves it,
- * that's OK. No task can be added to this CPU, so iteration is
+ * that's OK. No task can be added to this CPU, so iteration is
* fine.
*/
spin_unlock_irq(&rq->lock);
@@ -5404,7 +5426,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
/*
* In the intermediate directories, both the child directory and
* procname are dynamically allocated and could fail but the mode
- * will always be set. In the lowest directory the names are
+ * will always be set. In the lowest directory the names are
* static strings and all have proc handlers.
*/
for (entry = *tablep; entry->mode; entry++) {
@@ -5466,7 +5488,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
return table;
}
-static ctl_table * sd_alloc_ctl_cpu_table(int cpu)
+static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
{
struct ctl_table *entry, *table;
struct sched_domain *sd;
@@ -5575,7 +5597,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
case CPU_UP_CANCELED_FROZEN:
if (!cpu_rq(cpu)->migration_thread)
break;
- /* Unbind it from offline cpu so it can run. Fall thru. */
+ /* Unbind it from offline cpu so it can run. Fall thru. */
kthread_bind(cpu_rq(cpu)->migration_thread,
any_online_cpu(cpu_online_map));
kthread_stop(cpu_rq(cpu)->migration_thread);
@@ -5602,9 +5624,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
migrate_nr_uninterruptible(rq);
BUG_ON(rq->nr_running != 0);
- /* No need to migrate the tasks: it was best-effort if
- * they didn't take sched_hotcpu_mutex. Just wake up
- * the requestors. */
+ /*
+ * No need to migrate the tasks: it was best-effort if
+ * they didn't take sched_hotcpu_mutex. Just wake up
+ * the requestors.
+ */
spin_lock_irq(&rq->lock);
while (!list_empty(&rq->migration_queue)) {
struct migration_req *req;
@@ -5912,7 +5936,7 @@ init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
* @node: node whose sched_domain we're building
* @used_nodes: nodes already in the sched_domain
*
- * Find the next node to include in a given scheduling domain. Simply
+ * Find the next node to include in a given scheduling domain. Simply
* finds the closest node not already in the @used_nodes map.
*
* Should use nodemask_t.
@@ -5952,7 +5976,7 @@ static int find_next_best_node(int node, unsigned long *used_nodes)
* @node: node whose cpumask we're constructing
* @size: number of nodes to include in this span
*
- * Given a node, construct a good cpumask for its sched_domain to span. It
+ * Given a node, construct a good cpumask for its sched_domain to span. It
* should be one that prevents unnecessary balancing, but also spreads tasks
* out optimally.
*/
@@ -5989,8 +6013,8 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
-static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg)
+static int
+cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
if (sg)
*sg = &per_cpu(sched_group_cpus, cpu);
@@ -6007,8 +6031,8 @@ static DEFINE_PER_CPU(struct sched_group, sched_group_core);
#endif
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
-static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg)
+static int
+cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
int group;
cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
@@ -6019,8 +6043,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
return group;
}
#elif defined(CONFIG_SCHED_MC)
-static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg)
+static int
+cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
if (sg)
*sg = &per_cpu(sched_group_core, cpu);
@@ -6031,8 +6055,8 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
static DEFINE_PER_CPU(struct sched_domain, phys_domains);
static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
-static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg)
+static int
+cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
{
int group;
#ifdef CONFIG_SCHED_MC
@@ -6212,7 +6236,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
* Allocate the per-node list of sched groups
*/
sched_group_nodes = kcalloc(MAX_NUMNODES, sizeof(struct sched_group *),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!sched_group_nodes) {
printk(KERN_WARNING "Can not alloc sched group node list\n");
return -ENOMEM;
@@ -6459,7 +6483,7 @@ static int ndoms_cur; /* number of sched domains in 'doms_cur' */
static cpumask_t fallback_doms;
/*
- * Set up scheduler domains and groups. Callers must hold the hotplug lock.
+ * Set up scheduler domains and groups. Callers must hold the hotplug lock.
* For now this just excludes isolated cpus, but could be used to
* exclude other special cases in the future.
*/
@@ -6501,19 +6525,19 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
/*
* Partition sched domains as specified by the 'ndoms_new'
- * cpumasks in the array doms_new[] of cpumasks. This compares
+ * cpumasks in the array doms_new[] of cpumasks. This compares
* doms_new[] to the current sched domain partitioning, doms_cur[].
* It destroys each deleted domain and builds each new domain.
*
* 'doms_new' is an array of cpumask_t's of length 'ndoms_new'.
- * The masks don't intersect (don't overlap.) We should setup one
- * sched domain for each mask. CPUs not in any of the cpumasks will
- * not be load balanced. If the same cpumask appears both in the
+ * The masks don't intersect (don't overlap.) We should setup one
+ * sched domain for each mask. CPUs not in any of the cpumasks will
+ * not be load balanced. If the same cpumask appears both in the
* current 'doms_cur' domains and in the new 'doms_new', we can leave
* it as it is.
*
- * The passed in 'doms_new' should be kmalloc'd. This routine takes
- * ownership of it and will kfree it when done with it. If the caller
+ * The passed in 'doms_new' should be kmalloc'd. This routine takes
+ * ownership of it and will kfree it when done with it. If the caller
* failed the kmalloc call, then it can pass in doms_new == NULL,
* and partition_sched_domains() will fallback to the single partition
* 'fallback_doms'.
@@ -6643,7 +6667,7 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
#endif
/*
- * Force a reinitialization of the sched domains hierarchy. The domains
+ * Force a reinitialization of the sched domains hierarchy. The domains
* and groups cannot be updated in place without racing with the balancing
* code, so we temporarily attach all running cpus to the NULL domain
* which will prevent rebalancing while the sched domains are recalculated.
@@ -6708,9 +6732,6 @@ void __init sched_init_smp(void)
int in_sched_functions(unsigned long addr)
{
- /* Linker adds these: start and end of __sched functions */
- extern char __sched_text_start[], __sched_text_end[];
-
return in_lock_functions(addr) ||
(addr >= (unsigned long)__sched_text_start
&& addr < (unsigned long)__sched_text_end);
@@ -6936,8 +6957,8 @@ struct task_struct *curr_task(int cpu)
* @p: the task pointer to set.
*
* Description: This function must only be used when non-maskable interrupts
- * are serviced on a separate stack. It allows the architecture to switch the
- * notion of the current task on a cpu in a non-blocking manner. This function
+ * are serviced on a separate stack. It allows the architecture to switch the
+ * notion of the current task on a cpu in a non-blocking manner. This function
* must be called with all CPU's synchronized, and interrupts disabled, the
* and caller must save the original value of the current task (see
* curr_task() above) and restore that value before reenabling interrupts and
@@ -7086,7 +7107,7 @@ void sched_move_task(struct task_struct *tsk)
update_rq_clock(rq);
- running = task_running(rq, tsk);
+ running = task_current(rq, tsk);
on_rq = tsk->se.on_rq;
if (on_rq) {
@@ -7132,6 +7153,14 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
{
int i;
+ /*
+ * A weight of 0 or 1 can cause arithmetics problems.
+ * (The default weight is 1024 - so there's no practical
+ * limitation from this.)
+ */
+ if (shares < 2)
+ shares = 2;
+
spin_lock(&tg->lock);
if (tg->shares == shares)
goto done;
@@ -7186,16 +7215,17 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
return &tg->css;
}
-static void cpu_cgroup_destroy(struct cgroup_subsys *ss,
- struct cgroup *cgrp)
+static void
+cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
{
struct task_group *tg = cgroup_tg(cgrp);
sched_destroy_group(tg);
}
-static int cpu_cgroup_can_attach(struct cgroup_subsys *ss,
- struct cgroup *cgrp, struct task_struct *tsk)
+static int
+cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct task_struct *tsk)
{
/* We don't support RT-tasks being in separate groups */
if (tsk->sched_class != &fair_sched_class)
@@ -7224,38 +7254,12 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
return (u64) tg->shares;
}
-static u64 cpu_usage_read(struct cgroup *cgrp, struct cftype *cft)
-{
- struct task_group *tg = cgroup_tg(cgrp);
- unsigned long flags;
- u64 res = 0;
- int i;
-
- for_each_possible_cpu(i) {
- /*
- * Lock to prevent races with updating 64-bit counters
- * on 32-bit arches.
- */
- spin_lock_irqsave(&cpu_rq(i)->lock, flags);
- res += tg->se[i]->sum_exec_runtime;
- spin_unlock_irqrestore(&cpu_rq(i)->lock, flags);
- }
- /* Convert from ns to ms */
- do_div(res, NSEC_PER_MSEC);
-
- return res;
-}
-
static struct cftype cpu_files[] = {
{
.name = "shares",
.read_uint = cpu_shares_read_uint,
.write_uint = cpu_shares_write_uint,
},
- {
- .name = "usage",
- .read_uint = cpu_usage_read,
- },
};
static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
@@ -7275,3 +7279,126 @@ struct cgroup_subsys cpu_cgroup_subsys = {
};
#endif /* CONFIG_FAIR_CGROUP_SCHED */
+
+#ifdef CONFIG_CGROUP_CPUACCT
+
+/*
+ * CPU accounting code for task groups.
+ *
+ * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
+ * (balbir@in.ibm.com).
+ */
+
+/* track cpu usage of a group of tasks */
+struct cpuacct {
+ struct cgroup_subsys_state css;
+ /* cpuusage holds pointer to a u64-type object on every cpu */
+ u64 *cpuusage;
+};
+
+struct cgroup_subsys cpuacct_subsys;
+
+/* return cpu accounting group corresponding to this container */
+static inline struct cpuacct *cgroup_ca(struct cgroup *cont)
+{
+ return container_of(cgroup_subsys_state(cont, cpuacct_subsys_id),
+ struct cpuacct, css);
+}
+
+/* return cpu accounting group to which this task belongs */
+static inline struct cpuacct *task_ca(struct task_struct *tsk)
+{
+ return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
+ struct cpuacct, css);
+}
+
+/* create a new cpu accounting group */
+static struct cgroup_subsys_state *cpuacct_create(
+ struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+
+ if (!ca)
+ return ERR_PTR(-ENOMEM);
+
+ ca->cpuusage = alloc_percpu(u64);
+ if (!ca->cpuusage) {
+ kfree(ca);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return &ca->css;
+}
+
+/* destroy an existing cpu accounting group */
+static void
+cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ struct cpuacct *ca = cgroup_ca(cont);
+
+ free_percpu(ca->cpuusage);
+ kfree(ca);
+}
+
+/* return total cpu usage (in nanoseconds) of a group */
+static u64 cpuusage_read(struct cgroup *cont, struct cftype *cft)
+{
+ struct cpuacct *ca = cgroup_ca(cont);
+ u64 totalcpuusage = 0;
+ int i;
+
+ for_each_possible_cpu(i) {
+ u64 *cpuusage = percpu_ptr(ca->cpuusage, i);
+
+ /*
+ * Take rq->lock to make 64-bit addition safe on 32-bit
+ * platforms.
+ */
+ spin_lock_irq(&cpu_rq(i)->lock);
+ totalcpuusage += *cpuusage;
+ spin_unlock_irq(&cpu_rq(i)->lock);
+ }
+
+ return totalcpuusage;
+}
+
+static struct cftype files[] = {
+ {
+ .name = "usage",
+ .read_uint = cpuusage_read,
+ },
+};
+
+static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cont)
+{
+ return cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
+}
+
+/*
+ * charge this task's execution time to its accounting group.
+ *
+ * called with rq->lock held.
+ */
+static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
+{
+ struct cpuacct *ca;
+
+ if (!cpuacct_subsys.active)
+ return;
+
+ ca = task_ca(tsk);
+ if (ca) {
+ u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk));
+
+ *cpuusage += cputime;
+ }
+}
+
+struct cgroup_subsys cpuacct_subsys = {
+ .name = "cpuacct",
+ .create = cpuacct_create,
+ .destroy = cpuacct_destroy,
+ .populate = cpuacct_populate,
+ .subsys_id = cpuacct_subsys_id,
+};
+#endif /* CONFIG_CGROUP_CPUACCT */
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index ca198a797bfa..80fbbfc04290 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -31,9 +31,9 @@
/*
* Ease the printing of nsec fields:
*/
-static long long nsec_high(long long nsec)
+static long long nsec_high(unsigned long long nsec)
{
- if (nsec < 0) {
+ if ((long long)nsec < 0) {
nsec = -nsec;
do_div(nsec, 1000000);
return -nsec;
@@ -43,9 +43,9 @@ static long long nsec_high(long long nsec)
return nsec;
}
-static unsigned long nsec_low(long long nsec)
+static unsigned long nsec_low(unsigned long long nsec)
{
- if (nsec < 0)
+ if ((long long)nsec < 0)
nsec = -nsec;
return do_div(nsec, 1000000);
@@ -199,7 +199,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
u64 now = ktime_to_ns(ktime_get());
int cpu;
- SEQ_printf(m, "Sched Debug Version: v0.06-v22, %s %.*s\n",
+ SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n",
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
@@ -327,10 +327,12 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
avg_atom = -1LL;
avg_per_cpu = p->se.sum_exec_runtime;
- if (p->se.nr_migrations)
- avg_per_cpu = div64_64(avg_per_cpu, p->se.nr_migrations);
- else
+ if (p->se.nr_migrations) {
+ avg_per_cpu = div64_64(avg_per_cpu,
+ p->se.nr_migrations);
+ } else {
avg_per_cpu = -1LL;
+ }
__PN(avg_atom);
__PN(avg_per_cpu);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index ee00da284b12..da7c061e7206 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -22,7 +22,7 @@
/*
* Targeted preemption latency for CPU-bound tasks:
- * (default: 20ms * ilog(ncpus), units: nanoseconds)
+ * (default: 20ms * (1 + ilog(ncpus)), units: nanoseconds)
*
* NOTE: this latency value is not the same as the concept of
* 'timeslice length' - timeslices in CFS are of variable length
@@ -36,14 +36,14 @@ unsigned int sysctl_sched_latency = 20000000ULL;
/*
* Minimal preemption granularity for CPU-bound tasks:
- * (default: 1 msec * ilog(ncpus), units: nanoseconds)
+ * (default: 4 msec * (1 + ilog(ncpus)), units: nanoseconds)
*/
-unsigned int sysctl_sched_min_granularity = 1000000ULL;
+unsigned int sysctl_sched_min_granularity = 4000000ULL;
/*
* is kept at sysctl_sched_latency / sysctl_sched_min_granularity
*/
-static unsigned int sched_nr_latency = 20;
+static unsigned int sched_nr_latency = 5;
/*
* After fork, child runs first. (default) If set to 0 then
@@ -61,7 +61,7 @@ unsigned int __read_mostly sysctl_sched_compat_yield;
/*
* SCHED_BATCH wake-up granularity.
- * (default: 10 msec * ilog(ncpus), units: nanoseconds)
+ * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
*
* This option delays the preemption effects of decoupled workloads
* and reduces their over-scheduling. Synchronous workloads will still
@@ -71,7 +71,7 @@ unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
/*
* SCHED_OTHER wake-up granularity.
- * (default: 10 msec * ilog(ncpus), units: nanoseconds)
+ * (default: 10 msec * (1 + ilog(ncpus)), units: nanoseconds)
*
* This option delays the preemption effects of decoupled workloads
* and reduces their over-scheduling. Synchronous workloads will still
@@ -351,6 +351,12 @@ static void update_curr(struct cfs_rq *cfs_rq)
__update_curr(cfs_rq, curr, delta_exec);
curr->exec_start = now;
+
+ if (entity_is_task(curr)) {
+ struct task_struct *curtask = task_of(curr);
+
+ cpuacct_charge(curtask, delta_exec);
+ }
}
static inline void
@@ -505,8 +511,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
if (!initial) {
/* sleeps upto a single latency don't count. */
- if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) &&
- task_of(se)->policy != SCHED_BATCH)
+ if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se))
vruntime -= sysctl_sched_latency;
/* ensure we never gain time by being placed backwards. */
@@ -793,8 +798,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
*/
static void yield_task_fair(struct rq *rq)
{
- struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
- struct sched_entity *rightmost, *se = &rq->curr->se;
+ struct task_struct *curr = rq->curr;
+ struct cfs_rq *cfs_rq = task_cfs_rq(curr);
+ struct sched_entity *rightmost, *se = &curr->se;
/*
* Are we the only task in the tree?
@@ -802,7 +808,7 @@ static void yield_task_fair(struct rq *rq)
if (unlikely(cfs_rq->nr_running == 1))
return;
- if (likely(!sysctl_sched_compat_yield)) {
+ if (likely(!sysctl_sched_compat_yield) && curr->policy != SCHED_BATCH) {
__update_rq_clock(rq);
/*
* Update run-time statistics of the 'current'.
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 8abd752a0ebd..9ba3daa03475 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -23,6 +23,7 @@ static void update_curr_rt(struct rq *rq)
curr->se.sum_exec_runtime += delta_exec;
curr->se.exec_start = rq->clock;
+ cpuacct_charge(curr, delta_exec);
}
static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
@@ -207,6 +208,8 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
static void task_tick_rt(struct rq *rq, struct task_struct *p)
{
+ update_curr_rt(rq);
+
/*
* RR tasks need a special form of timeslice management.
* FIFO tasks have no timeslices.
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 630178e53bb6..5b32433e7ee5 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -52,7 +52,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
sd->lb_nobusyq[itype],
sd->lb_nobusyg[itype]);
}
- seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n",
+ seq_printf(seq,
+ " %u %u %u %u %u %u %u %u %u %u %u %u\n",
sd->alb_count, sd->alb_failed, sd->alb_pushed,
sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0deed82a6156..c68f68dcc605 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -225,10 +225,10 @@ static struct ctl_table root_table[] = {
};
#ifdef CONFIG_SCHED_DEBUG
-static unsigned long min_sched_granularity_ns = 100000; /* 100 usecs */
-static unsigned long max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
-static unsigned long min_wakeup_granularity_ns; /* 0 usecs */
-static unsigned long max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
+static int min_sched_granularity_ns = 100000; /* 100 usecs */
+static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */
+static int min_wakeup_granularity_ns; /* 0 usecs */
+static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
#endif
static struct ctl_table kern_table[] = {
@@ -906,11 +906,11 @@ static struct ctl_table vm_table[] = {
},
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "hugetlb_dynamic_pool",
- .data = &hugetlb_dynamic_pool,
- .maxlen = sizeof(hugetlb_dynamic_pool),
+ .procname = "nr_overcommit_hugepages",
+ .data = &nr_overcommit_huge_pages,
+ .maxlen = sizeof(nr_overcommit_huge_pages),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_doulongvec_minmax,
},
#endif
{
@@ -1588,6 +1588,10 @@ struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
void unregister_sysctl_table(struct ctl_table_header * header)
{
might_sleep();
+
+ if (header == NULL)
+ return;
+
spin_lock(&sysctl_lock);
start_unregistering(header);
spin_unlock(&sysctl_lock);
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index 4abc6d2306f4..a68425a5cc1d 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -96,7 +96,7 @@ static struct trans_ctl_table trans_kern_table[] = {
{ KERN_PTY, "pty", trans_pty_table },
{ KERN_NGROUPS_MAX, "ngroups_max" },
- { KERN_SPARC_SCONS_PWROFF, "scons_poweroff" },
+ { KERN_SPARC_SCONS_PWROFF, "scons-poweroff" },
{ KERN_HZ_TIMER, "hz_timer" },
{ KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" },
{ KERN_BOOTLOADER_TYPE, "bootloader_type" },
@@ -140,9 +140,6 @@ static struct trans_ctl_table trans_vm_table[] = {
{ VM_PANIC_ON_OOM, "panic_on_oom" },
{ VM_VDSO_ENABLED, "vdso_enabled" },
{ VM_MIN_SLAB, "min_slab_ratio" },
- { VM_CMM_PAGES, "cmm_pages" },
- { VM_CMM_TIMED_PAGES, "cmm_timed_pages" },
- { VM_CMM_TIMEOUT, "cmm_timeout" },
{}
};
@@ -237,36 +234,6 @@ static struct trans_ctl_table trans_net_ipv4_conf_table[] = {
{}
};
-
-static struct trans_ctl_table trans_net_ipv4_vs_table[] = {
- { NET_IPV4_VS_AMEMTHRESH, "amemthresh" },
- { NET_IPV4_VS_DEBUG_LEVEL, "debug_level" },
- { NET_IPV4_VS_AMDROPRATE, "am_droprate" },
- { NET_IPV4_VS_DROP_ENTRY, "drop_entry" },
- { NET_IPV4_VS_DROP_PACKET, "drop_packet" },
- { NET_IPV4_VS_SECURE_TCP, "secure_tcp" },
- { NET_IPV4_VS_TO_ES, "timeout_established" },
- { NET_IPV4_VS_TO_SS, "timeout_synsent" },
- { NET_IPV4_VS_TO_SR, "timeout_synrecv" },
- { NET_IPV4_VS_TO_FW, "timeout_finwait" },
- { NET_IPV4_VS_TO_TW, "timeout_timewait" },
- { NET_IPV4_VS_TO_CL, "timeout_close" },
- { NET_IPV4_VS_TO_CW, "timeout_closewait" },
- { NET_IPV4_VS_TO_LA, "timeout_lastack" },
- { NET_IPV4_VS_TO_LI, "timeout_listen" },
- { NET_IPV4_VS_TO_SA, "timeout_synack" },
- { NET_IPV4_VS_TO_UDP, "timeout_udp" },
- { NET_IPV4_VS_TO_ICMP, "timeout_icmp" },
- { NET_IPV4_VS_CACHE_BYPASS, "cache_bypass" },
- { NET_IPV4_VS_EXPIRE_NODEST_CONN, "expire_nodest_conn" },
- { NET_IPV4_VS_EXPIRE_QUIESCENT_TEMPLATE, "expire_quiescent_template" },
- { NET_IPV4_VS_SYNC_THRESHOLD, "sync_threshold" },
- { NET_IPV4_VS_NAT_ICMP_SEND, "nat_icmp_send" },
- { NET_IPV4_VS_LBLC_EXPIRE, "lblc_expiration" },
- { NET_IPV4_VS_LBLCR_EXPIRE, "lblcr_expiration" },
- {}
-};
-
static struct trans_ctl_table trans_net_neigh_vars_table[] = {
{ NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" },
{ NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" },
@@ -341,7 +308,6 @@ static struct trans_ctl_table trans_net_ipv4_table[] = {
{ NET_IPV4_ROUTE, "route", trans_net_ipv4_route_table },
/* NET_IPV4_FIB_HASH unused */
{ NET_IPV4_NETFILTER, "netfilter", trans_net_ipv4_netfilter_table },
- { NET_IPV4_VS, "vs", trans_net_ipv4_vs_table },
{ NET_IPV4_TCP_TIMESTAMPS, "tcp_timestamps" },
{ NET_IPV4_TCP_WINDOW_SCALING, "tcp_window_scaling" },
@@ -462,7 +428,7 @@ static struct trans_ctl_table trans_net_netrom_table[] = {
{}
};
-static struct trans_ctl_table trans_net_ax25_table[] = {
+static struct trans_ctl_table trans_net_ax25_param_table[] = {
{ NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" },
{ NET_AX25_DEFAULT_MODE, "ax25_default_mode" },
{ NET_AX25_BACKOFF_TYPE, "backoff_type" },
@@ -480,6 +446,11 @@ static struct trans_ctl_table trans_net_ax25_table[] = {
{}
};
+static struct trans_ctl_table trans_net_ax25_table[] = {
+ { 0, NULL, trans_net_ax25_param_table },
+ {}
+};
+
static struct trans_ctl_table trans_net_bridge_table[] = {
{ NET_BRIDGE_NF_CALL_ARPTABLES, "bridge-nf-call-arptables" },
{ NET_BRIDGE_NF_CALL_IPTABLES, "bridge-nf-call-iptables" },
@@ -1219,16 +1190,6 @@ static struct trans_ctl_table trans_arlan_table[] = {
{}
};
-static struct trans_ctl_table trans_appldata_table[] = {
- { CTL_APPLDATA_TIMER, "timer" },
- { CTL_APPLDATA_INTERVAL, "interval" },
- { CTL_APPLDATA_OS, "os" },
- { CTL_APPLDATA_NET_SUM, "net_sum" },
- { CTL_APPLDATA_MEM, "mem" },
- {}
-
-};
-
static struct trans_ctl_table trans_s390dbf_table[] = {
{ 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" },
{ 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" },
@@ -1273,7 +1234,6 @@ static struct trans_ctl_table trans_root_table[] = {
{ CTL_ABI, "abi" },
/* CTL_CPU not used */
{ CTL_ARLAN, "arlan", trans_arlan_table },
- { CTL_APPLDATA, "appldata", trans_appldata_table },
{ CTL_S390DBF, "s390dbf", trans_s390dbf_table },
{ CTL_SUNRPC, "sunrpc", trans_sunrpc_table },
{ CTL_PM, "pm", trans_pm_table },
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 822beebe664a..5fb139fef9fa 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -78,6 +78,11 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
unsigned long long clc;
int64_t delta;
+ if (unlikely(expires.tv64 < 0)) {
+ WARN_ON_ONCE(1);
+ return -ETIME;
+ }
+
delta = ktime_to_ns(ktime_sub(expires, now));
if (delta <= 0)
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index c8a9d13874df..8d6125ad2cf0 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -441,7 +441,7 @@ static SYSDEV_ATTR(available_clocksource, 0600,
sysfs_show_available_clocksources, NULL);
static struct sysdev_class clocksource_sysclass = {
- set_kset_name("clocksource"),
+ .name = "clocksource",
};
static struct sys_device device_clocksource = {
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 14a2ecf2b318..e64efaf957e8 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -249,10 +249,12 @@ int do_adjtimex(struct timex *txc)
/* Now we validate the data before disabling interrupts */
- if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT)
+ if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) {
/* singleshot must not be used with any other mode bits */
- if (txc->modes != ADJ_OFFSET_SINGLESHOT)
+ if (txc->modes != ADJ_OFFSET_SINGLESHOT &&
+ txc->modes != ADJ_OFFSET_SS_READ)
return -EINVAL;
+ }
if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET))
/* adjustment Offset limited to +- .512 seconds */
@@ -372,7 +374,8 @@ int do_adjtimex(struct timex *txc)
leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0)
result = TIME_ERROR;
- if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT)
+ if ((txc->modes == ADJ_OFFSET_SINGLESHOT) ||
+ (txc->modes == ADJ_OFFSET_SS_READ))
txc->offset = save_adjust;
else
txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) *
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index aa82d7bf478a..5b86698faa0b 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -384,45 +384,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
}
/*
- * Reprogram the broadcast device:
- *
- * Called with tick_broadcast_lock held and interrupts disabled.
- */
-static int tick_broadcast_reprogram(void)
-{
- ktime_t expires = { .tv64 = KTIME_MAX };
- struct tick_device *td;
- int cpu;
-
- /*
- * Find the event which expires next:
- */
- for (cpu = first_cpu(tick_broadcast_oneshot_mask); cpu != NR_CPUS;
- cpu = next_cpu(cpu, tick_broadcast_oneshot_mask)) {
- td = &per_cpu(tick_cpu_device, cpu);
- if (td->evtdev->next_event.tv64 < expires.tv64)
- expires = td->evtdev->next_event;
- }
-
- if (expires.tv64 == KTIME_MAX)
- return 0;
-
- return tick_broadcast_set_event(expires, 0);
-}
-
-/*
* Handle oneshot mode broadcasting
*/
static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
{
struct tick_device *td;
cpumask_t mask;
- ktime_t now;
+ ktime_t now, next_event;
int cpu;
spin_lock(&tick_broadcast_lock);
again:
dev->next_event.tv64 = KTIME_MAX;
+ next_event.tv64 = KTIME_MAX;
mask = CPU_MASK_NONE;
now = ktime_get();
/* Find all expired events */
@@ -431,19 +405,31 @@ again:
td = &per_cpu(tick_cpu_device, cpu);
if (td->evtdev->next_event.tv64 <= now.tv64)
cpu_set(cpu, mask);
+ else if (td->evtdev->next_event.tv64 < next_event.tv64)
+ next_event.tv64 = td->evtdev->next_event.tv64;
}
/*
- * Wakeup the cpus which have an expired event. The broadcast
- * device is reprogrammed in the return from idle code.
+ * Wakeup the cpus which have an expired event.
+ */
+ tick_do_broadcast(mask);
+
+ /*
+ * Two reasons for reprogram:
+ *
+ * - The global event did not expire any CPU local
+ * events. This happens in dyntick mode, as the maximum PIT
+ * delta is quite small.
+ *
+ * - There are pending events on sleeping CPUs which were not
+ * in the event mask
*/
- if (!tick_do_broadcast(mask)) {
+ if (next_event.tv64 != KTIME_MAX) {
/*
- * The global event did not expire any CPU local
- * events. This happens in dyntick mode, as the
- * maximum PIT delta is quite small.
+ * Rearm the broadcast device. If event expired,
+ * repeat the above
*/
- if (tick_broadcast_reprogram())
+ if (tick_broadcast_set_event(next_event, 0))
goto again;
}
spin_unlock(&tick_broadcast_lock);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 27a2338deb4a..cb89fa8db110 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -133,6 +133,8 @@ void tick_nohz_update_jiffies(void)
if (!ts->tick_stopped)
return;
+ touch_softlockup_watchdog();
+
cpu_clear(cpu, nohz_cpu_mask);
now = ktime_get();
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e5e466b27598..ab46ae8c062b 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -335,9 +335,9 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
/* sysfs resume/suspend bits for timekeeping */
static struct sysdev_class timekeeping_sysclass = {
+ .name = "timekeeping",
.resume = timekeeping_resume,
.suspend = timekeeping_suspend,
- set_kset_name("timekeeping"),
};
static struct sys_device device_timer = {
diff --git a/kernel/timer.c b/kernel/timer.c
index a05817c021d6..2a00c22203f3 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -978,7 +978,7 @@ asmlinkage long sys_getppid(void)
int pid;
rcu_read_lock();
- pid = task_ppid_nr_ns(current, current->nsproxy->pid_ns);
+ pid = task_tgid_nr_ns(current->real_parent, current->nsproxy->pid_ns);
rcu_read_unlock();
return pid;
@@ -1219,11 +1219,11 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info)
*/
static struct lock_class_key base_lock_keys[NR_CPUS];
-static int __devinit init_timers_cpu(int cpu)
+static int __cpuinit init_timers_cpu(int cpu)
{
int j;
tvec_base_t *base;
- static char __devinitdata tvec_base_done[NR_CPUS];
+ static char __cpuinitdata tvec_base_done[NR_CPUS];
if (!tvec_base_done[cpu]) {
static char boot_done;
@@ -1289,7 +1289,7 @@ static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
}
}
-static void __devinit migrate_timers(int cpu)
+static void __cpuinit migrate_timers(int cpu)
{
tvec_base_t *old_base;
tvec_base_t *new_base;
diff --git a/kernel/user.c b/kernel/user.c
index 0f3aa0234107..ab4fd706993b 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -115,7 +115,7 @@ static void sched_switch_user(struct task_struct *p) { }
#if defined(CONFIG_FAIR_USER_SCHED) && defined(CONFIG_SYSFS)
-static struct kobject uids_kobject; /* represents /sys/kernel/uids directory */
+static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
static DEFINE_MUTEX(uids_mutex);
static inline void uids_mutex_lock(void)
@@ -128,86 +128,83 @@ static inline void uids_mutex_unlock(void)
mutex_unlock(&uids_mutex);
}
-/* return cpu shares held by the user */
-static ssize_t cpu_shares_show(struct kset *kset, char *buffer)
+/* uid directory attributes */
+static ssize_t cpu_shares_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
{
- struct user_struct *up = container_of(kset, struct user_struct, kset);
+ struct user_struct *up = container_of(kobj, struct user_struct, kobj);
- return sprintf(buffer, "%lu\n", sched_group_shares(up->tg));
+ return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
}
-/* modify cpu shares held by the user */
-static ssize_t cpu_shares_store(struct kset *kset, const char *buffer,
- size_t size)
+static ssize_t cpu_shares_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t size)
{
- struct user_struct *up = container_of(kset, struct user_struct, kset);
+ struct user_struct *up = container_of(kobj, struct user_struct, kobj);
unsigned long shares;
int rc;
- sscanf(buffer, "%lu", &shares);
+ sscanf(buf, "%lu", &shares);
rc = sched_group_set_shares(up->tg, shares);
return (rc ? rc : size);
}
-static void user_attr_init(struct subsys_attribute *sa, char *name, int mode)
+static struct kobj_attribute cpu_share_attr =
+ __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
+
+/* default attributes per uid directory */
+static struct attribute *uids_attributes[] = {
+ &cpu_share_attr.attr,
+ NULL
+};
+
+/* the lifetime of user_struct is not managed by the core (now) */
+static void uids_release(struct kobject *kobj)
{
- sa->attr.name = name;
- sa->attr.mode = mode;
- sa->show = cpu_shares_show;
- sa->store = cpu_shares_store;
+ return;
}
-/* Create "/sys/kernel/uids/<uid>" directory and
- * "/sys/kernel/uids/<uid>/cpu_share" file for this user.
- */
-static int user_kobject_create(struct user_struct *up)
+static struct kobj_type uids_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_attrs = uids_attributes,
+ .release = uids_release,
+};
+
+/* create /sys/kernel/uids/<uid>/cpu_share file for this user */
+static int uids_user_create(struct user_struct *up)
{
- struct kset *kset = &up->kset;
- struct kobject *kobj = &kset->kobj;
+ struct kobject *kobj = &up->kobj;
int error;
- memset(kset, 0, sizeof(struct kset));
- kobj->parent = &uids_kobject; /* create under /sys/kernel/uids dir */
- kobject_set_name(kobj, "%d", up->uid);
- kset_init(kset);
- user_attr_init(&up->user_attr, "cpu_share", 0644);
-
- error = kobject_add(kobj);
- if (error)
+ memset(kobj, 0, sizeof(struct kobject));
+ kobj->kset = uids_kset;
+ error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
+ if (error) {
+ kobject_put(kobj);
goto done;
-
- error = sysfs_create_file(kobj, &up->user_attr.attr);
- if (error)
- kobject_del(kobj);
+ }
kobject_uevent(kobj, KOBJ_ADD);
-
done:
return error;
}
-/* create these in sysfs filesystem:
+/* create these entries in sysfs:
* "/sys/kernel/uids" directory
* "/sys/kernel/uids/0" directory (for root user)
* "/sys/kernel/uids/0/cpu_share" file (for root user)
*/
-int __init uids_kobject_init(void)
+int __init uids_sysfs_init(void)
{
- int error;
-
- /* create under /sys/kernel dir */
- uids_kobject.parent = &kernel_subsys.kobj;
- uids_kobject.kset = &kernel_subsys;
- kobject_set_name(&uids_kobject, "uids");
- kobject_init(&uids_kobject);
-
- error = kobject_add(&uids_kobject);
- if (!error)
- error = user_kobject_create(&root_user);
+ uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
+ if (!uids_kset)
+ return -ENOMEM;
- return error;
+ return uids_user_create(&root_user);
}
/* work function to remove sysfs directory for a user and free up
@@ -216,7 +213,6 @@ int __init uids_kobject_init(void)
static void remove_user_sysfs_dir(struct work_struct *w)
{
struct user_struct *up = container_of(w, struct user_struct, work);
- struct kobject *kobj = &up->kset.kobj;
unsigned long flags;
int remove_user = 0;
@@ -238,9 +234,9 @@ static void remove_user_sysfs_dir(struct work_struct *w)
if (!remove_user)
goto done;
- sysfs_remove_file(kobj, &up->user_attr.attr);
- kobject_uevent(kobj, KOBJ_REMOVE);
- kobject_del(kobj);
+ kobject_uevent(&up->kobj, KOBJ_REMOVE);
+ kobject_del(&up->kobj);
+ kobject_put(&up->kobj);
sched_destroy_user(up);
key_put(up->uid_keyring);
@@ -267,7 +263,8 @@ static inline void free_user(struct user_struct *up, unsigned long flags)
#else /* CONFIG_FAIR_USER_SCHED && CONFIG_SYSFS */
-static inline int user_kobject_create(struct user_struct *up) { return 0; }
+int uids_sysfs_init(void) { return 0; }
+static inline int uids_user_create(struct user_struct *up) { return 0; }
static inline void uids_mutex_lock(void) { }
static inline void uids_mutex_unlock(void) { }
@@ -324,7 +321,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
struct hlist_head *hashent = uidhashentry(ns, uid);
struct user_struct *up;
- /* Make uid_hash_find() + user_kobject_create() + uid_hash_insert()
+ /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
* atomic.
*/
uids_mutex_lock();
@@ -337,8 +334,11 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
struct user_struct *new;
new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
- if (!new)
+ if (!new) {
+ uids_mutex_unlock();
return NULL;
+ }
+
new->uid = uid;
atomic_set(&new->__count, 1);
atomic_set(&new->processes, 0);
@@ -355,6 +355,7 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
if (alloc_uid_keyring(new, current) < 0) {
kmem_cache_free(uid_cachep, new);
+ uids_mutex_unlock();
return NULL;
}
@@ -362,10 +363,11 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
key_put(new->uid_keyring);
key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
+ uids_mutex_unlock();
return NULL;
}
- if (user_kobject_create(new)) {
+ if (uids_user_create(new)) {
sched_destroy_user(new);
key_put(new->uid_keyring);
key_put(new->session_keyring);
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index c76c06466bfd..fe3a56c2256d 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -18,6 +18,10 @@
static void *get_uts(ctl_table *table, int write)
{
char *which = table->data;
+ struct uts_namespace *uts_ns;
+
+ uts_ns = current->nsproxy->uts_ns;
+ which = (which - (char *)&init_uts_ns) + (char *)uts_ns;
if (!write)
down_read(&uts_sem);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 52d5e7c9a8e6..8db0b597509e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -722,7 +722,8 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
struct workqueue_struct *__create_workqueue_key(const char *name,
int singlethread,
int freezeable,
- struct lock_class_key *key)
+ struct lock_class_key *key,
+ const char *lock_name)
{
struct workqueue_struct *wq;
struct cpu_workqueue_struct *cwq;
@@ -739,7 +740,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
}
wq->name = name;
- lockdep_init_map(&wq->lockdep_map, name, key, 0);
+ lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
wq->singlethread = singlethread;
wq->freezeable = freezeable;
INIT_LIST_HEAD(&wq->list);