summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c13
-rw-r--r--kernel/futex.c3
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/power/swsusp.c2
-rw-r--r--kernel/printk.c27
-rw-r--r--kernel/profile.c4
-rw-r--r--kernel/ptrace.c5
-rw-r--r--kernel/rcupdate.c22
-rw-r--r--kernel/sched.c28
-rw-r--r--kernel/signal.c9
-rw-r--r--kernel/stop_machine.c10
-rw-r--r--kernel/sys.c25
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/sysctl.c2
-rw-r--r--kernel/timer.c2
16 files changed, 103 insertions, 54 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 39d35935b371..7be283d98983 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -27,6 +27,7 @@
#include <linux/mempolicy.h>
#include <linux/cpuset.h>
#include <linux/syscalls.h>
+#include <linux/signal.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -38,6 +39,8 @@ extern struct task_struct *child_reaper;
int getrusage(struct task_struct *, int, struct rusage __user *);
+static void exit_mm(struct task_struct * tsk);
+
static void __unhash_process(struct task_struct *p)
{
nr_threads--;
@@ -209,7 +212,7 @@ static inline int has_stopped_jobs(int pgrp)
}
/**
- * reparent_to_init() - Reparent the calling kernel thread to the init task.
+ * reparent_to_init - Reparent the calling kernel thread to the init task.
*
* If a kernel thread is launched as a result of a system call, or if
* it ever exits, it should generally reparent itself to init so that
@@ -277,7 +280,7 @@ void set_special_pids(pid_t session, pid_t pgrp)
*/
int allow_signal(int sig)
{
- if (sig < 1 || sig > _NSIG)
+ if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(&current->sighand->siglock);
@@ -298,7 +301,7 @@ EXPORT_SYMBOL(allow_signal);
int disallow_signal(int sig)
{
- if (sig < 1 || sig > _NSIG)
+ if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(&current->sighand->siglock);
@@ -473,7 +476,7 @@ EXPORT_SYMBOL_GPL(exit_fs);
* Turn us into a lazy TLB process if we
* aren't already..
*/
-void exit_mm(struct task_struct * tsk)
+static void exit_mm(struct task_struct * tsk)
{
struct mm_struct *mm = tsk->mm;
@@ -517,8 +520,6 @@ static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_re
*/
BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE);
p->real_parent = reaper;
- if (p->parent == p->real_parent)
- BUG();
}
static inline void reparent_thread(task_t *p, task_t *father, int traced)
diff --git a/kernel/futex.c b/kernel/futex.c
index 7b54a672d0ad..c7130f86106c 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -39,6 +39,7 @@
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/syscalls.h>
+#include <linux/signal.h>
#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
@@ -654,7 +655,7 @@ static int futex_fd(unsigned long uaddr, int signal)
int ret, err;
ret = -EINVAL;
- if (signal < 0 || signal > _NSIG)
+ if (!valid_signal(signal))
goto out;
ret = get_unused_fd();
diff --git a/kernel/kthread.c b/kernel/kthread.c
index e377e2244103..f50f174e92da 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -174,7 +174,7 @@ int kthread_stop(struct task_struct *k)
/* Must init completion *before* thread sees kthread_stop_info.k */
init_completion(&kthread_stop_info.done);
- wmb();
+ smp_wmb();
/* Now set kthread_should_stop() to true, and wake it up. */
kthread_stop_info.k = k;
diff --git a/kernel/module.c b/kernel/module.c
index 2dbfa0773faf..5734ab09d3f9 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1801,7 +1801,7 @@ sys_init_module(void __user *umod,
/* Init routine failed: abort. Try to protect us from
buggy refcounters. */
mod->state = MODULE_STATE_GOING;
- synchronize_kernel();
+ synchronize_sched();
if (mod->unsafe)
printk(KERN_ERR "%s: module is now stuck!\n",
mod->name);
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index ae5bebc3b18f..90b3b68dee3f 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -1099,7 +1099,7 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
return pblist;
}
-/**
+/*
* Using bio to read from swap.
* This code requires a bit more work than just using buffer heads
* but, it is the recommended way for 2.5/2.6.
diff --git a/kernel/printk.c b/kernel/printk.c
index 1498689548d1..290a07ce2c8a 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -85,10 +85,6 @@ static int console_locked;
*/
static DEFINE_SPINLOCK(logbuf_lock);
-static char __log_buf[__LOG_BUF_LEN];
-static char *log_buf = __log_buf;
-static int log_buf_len = __LOG_BUF_LEN;
-
#define LOG_BUF_MASK (log_buf_len-1)
#define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK])
@@ -99,7 +95,6 @@ static int log_buf_len = __LOG_BUF_LEN;
static unsigned long log_start; /* Index into log_buf: next char to be read by syslog() */
static unsigned long con_start; /* Index into log_buf: next char to be sent to consoles */
static unsigned long log_end; /* Index into log_buf: most-recently-written-char + 1 */
-static unsigned long logged_chars; /* Number of chars produced since last read+clear operation */
/*
* Array of consoles built from command line options (console=)
@@ -120,6 +115,13 @@ static int preferred_console = -1;
/* Flag: console code may call schedule() */
static int console_may_schedule;
+#ifdef CONFIG_PRINTK
+
+static char __log_buf[__LOG_BUF_LEN];
+static char *log_buf = __log_buf;
+static int log_buf_len = __LOG_BUF_LEN;
+static unsigned long logged_chars; /* Number of chars produced since last read+clear operation */
+
/*
* Setup a list of consoles. Called from init/main.c
*/
@@ -535,6 +537,7 @@ __setup("time", printk_time_setup);
* then changes console_loglevel may break. This is because console_loglevel
* is inspected when the actual printing occurs.
*/
+
asmlinkage int printk(const char *fmt, ...)
{
va_list args;
@@ -655,6 +658,18 @@ out:
EXPORT_SYMBOL(printk);
EXPORT_SYMBOL(vprintk);
+#else
+
+asmlinkage long sys_syslog(int type, char __user * buf, int len)
+{
+ return 0;
+}
+
+int do_syslog(int type, char __user * buf, int len) { return 0; }
+static void call_console_drivers(unsigned long start, unsigned long end) {}
+
+#endif
+
/**
* acquire_console_sem - lock the console system for exclusive use.
*
@@ -931,7 +946,7 @@ int unregister_console(struct console * console)
return res;
}
EXPORT_SYMBOL(unregister_console);
-
+
/**
* tty_write_message - write a message to a certain tty, not just the console.
*
diff --git a/kernel/profile.c b/kernel/profile.c
index a38fa70075fe..0221a50ca867 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -184,7 +184,7 @@ void unregister_timer_hook(int (*hook)(struct pt_regs *))
WARN_ON(hook != timer_hook);
timer_hook = NULL;
/* make sure all CPUs see the NULL hook */
- synchronize_kernel();
+ synchronize_sched(); /* Allow ongoing interrupts to complete. */
}
EXPORT_SYMBOL_GPL(register_timer_hook);
@@ -522,7 +522,7 @@ static int __init create_hash_tables(void)
return 0;
out_cleanup:
prof_on = 0;
- mb();
+ smp_mb();
on_each_cpu(profile_nop, NULL, 0, 1);
for_each_online_cpu(cpu) {
struct page *page;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 88b306c4e841..8dcb8f6288bc 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -16,6 +16,7 @@
#include <linux/smp_lock.h>
#include <linux/ptrace.h>
#include <linux/security.h>
+#include <linux/signal.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -135,7 +136,7 @@ int ptrace_attach(struct task_struct *task)
(current->gid != task->sgid) ||
(current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
goto bad;
- rmb();
+ smp_rmb();
if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
goto bad;
/* the same process cannot be attached many times */
@@ -166,7 +167,7 @@ bad:
int ptrace_detach(struct task_struct *child, unsigned int data)
{
- if ((unsigned long) data > _NSIG)
+ if (!valid_signal(data))
return -EIO;
/* Architecture-specific hardware disable .. */
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index d00eded75d71..f436993bd590 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -444,15 +444,18 @@ static void wakeme_after_rcu(struct rcu_head *head)
}
/**
- * synchronize_kernel - wait until a grace period has elapsed.
+ * synchronize_rcu - wait until a grace period has elapsed.
*
* Control will return to the caller some time after a full grace
* period has elapsed, in other words after all currently executing RCU
* read-side critical sections have completed. RCU read-side critical
* sections are delimited by rcu_read_lock() and rcu_read_unlock(),
* and may be nested.
+ *
+ * If your read-side code is not protected by rcu_read_lock(), do -not-
+ * use synchronize_rcu().
*/
-void synchronize_kernel(void)
+void synchronize_rcu(void)
{
struct rcu_synchronize rcu;
@@ -464,7 +467,16 @@ void synchronize_kernel(void)
wait_for_completion(&rcu.completion);
}
+/*
+ * Deprecated, use synchronize_rcu() or synchronize_sched() instead.
+ */
+void synchronize_kernel(void)
+{
+ synchronize_rcu();
+}
+
module_param(maxbatch, int, 0);
-EXPORT_SYMBOL_GPL(call_rcu);
-EXPORT_SYMBOL_GPL(call_rcu_bh);
-EXPORT_SYMBOL_GPL(synchronize_kernel);
+EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */
+EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */
+EXPORT_SYMBOL_GPL(synchronize_rcu);
+EXPORT_SYMBOL(synchronize_kernel); /* WARNING: GPL-only in April 2006. */
diff --git a/kernel/sched.c b/kernel/sched.c
index 9bb7489ee645..0dc3158667a2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2906,6 +2906,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
* @q: the waitqueue
* @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up
+ * @key: is directly passed to the wakeup function
*/
void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, void *key)
@@ -2928,7 +2929,7 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
}
/**
- * __wake_up - sync- wake up threads blocked on a waitqueue.
+ * __wake_up_sync - wake up threads blocked on a waitqueue.
* @q: the waitqueue
* @mode: which threads
* @nr_exclusive: how many wake-one or wake-many threads to wake up
@@ -3223,6 +3224,19 @@ out_unlock:
EXPORT_SYMBOL(set_user_nice);
+/*
+ * can_nice - check if a task can reduce its nice value
+ * @p: task
+ * @nice: nice value
+ */
+int can_nice(const task_t *p, const int nice)
+{
+ /* convert nice value [19,-20] to rlimit style value [0,39] */
+ int nice_rlim = 19 - nice;
+ return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
+ capable(CAP_SYS_NICE));
+}
+
#ifdef __ARCH_WANT_SYS_NICE
/*
@@ -3242,12 +3256,8 @@ asmlinkage long sys_nice(int increment)
* We don't have to worry. Conceptually one call occurs first
* and we have a single winner.
*/
- if (increment < 0) {
- if (!capable(CAP_SYS_NICE))
- return -EPERM;
- if (increment < -40)
- increment = -40;
- }
+ if (increment < -40)
+ increment = -40;
if (increment > 40)
increment = 40;
@@ -3257,6 +3267,9 @@ asmlinkage long sys_nice(int increment)
if (nice > 19)
nice = 19;
+ if (increment < 0 && !can_nice(current, nice))
+ return -EPERM;
+
retval = security_task_setnice(current, nice);
if (retval)
return retval;
@@ -3372,6 +3385,7 @@ recheck:
return -EINVAL;
if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
+ param->sched_priority > p->signal->rlim[RLIMIT_RTPRIO].rlim_cur &&
!capable(CAP_SYS_NICE))
return -EPERM;
if ((current->euid != p->euid) && (current->euid != p->uid) &&
diff --git a/kernel/signal.c b/kernel/signal.c
index e6567d7f2b62..8f3debc77c5b 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -23,6 +23,7 @@
#include <linux/syscalls.h>
#include <linux/ptrace.h>
#include <linux/posix-timers.h>
+#include <linux/signal.h>
#include <asm/param.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -646,7 +647,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
struct task_struct *t)
{
int error = -EINVAL;
- if (sig < 0 || sig > _NSIG)
+ if (!valid_signal(sig))
return error;
error = -EPERM;
if ((!info || ((unsigned long)info != 1 &&
@@ -1245,7 +1246,7 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
* Make sure legacy kernel users don't send in bad values
* (normal paths check this in check_kill_permission).
*/
- if (sig < 0 || sig > _NSIG)
+ if (!valid_signal(sig))
return -EINVAL;
/*
@@ -1520,7 +1521,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
sig = 0;
}
- if (sig > 0 && sig <= _NSIG)
+ if (valid_signal(sig) && sig > 0)
__group_send_sig_info(sig, &info, tsk->parent);
__wake_up_parent(tsk, tsk->parent);
spin_unlock_irqrestore(&psig->siglock, flags);
@@ -2364,7 +2365,7 @@ do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact)
{
struct k_sigaction *k;
- if (sig < 1 || sig > _NSIG || (act && sig_kernel_only(sig)))
+ if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
return -EINVAL;
k = &current->sighand->action[sig-1];
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index c39ed70af174..6116b25aa7cf 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -33,7 +33,7 @@ static int stopmachine(void *cpu)
set_cpus_allowed(current, cpumask_of_cpu((int)(long)cpu));
/* Ack: we are alive */
- mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
+ smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */
atomic_inc(&stopmachine_thread_ack);
/* Simple state machine */
@@ -43,14 +43,14 @@ static int stopmachine(void *cpu)
local_irq_disable();
irqs_disabled = 1;
/* Ack: irqs disabled. */
- mb(); /* Must read state first. */
+ smp_mb(); /* Must read state first. */
atomic_inc(&stopmachine_thread_ack);
} else if (stopmachine_state == STOPMACHINE_PREPARE
&& !prepared) {
/* Everyone is in place, hold CPU. */
preempt_disable();
prepared = 1;
- mb(); /* Must read state first. */
+ smp_mb(); /* Must read state first. */
atomic_inc(&stopmachine_thread_ack);
}
/* Yield in first stage: migration threads need to
@@ -62,7 +62,7 @@ static int stopmachine(void *cpu)
}
/* Ack: we are exiting. */
- mb(); /* Must read state first. */
+ smp_mb(); /* Must read state first. */
atomic_inc(&stopmachine_thread_ack);
if (irqs_disabled)
@@ -77,7 +77,7 @@ static int stopmachine(void *cpu)
static void stopmachine_set_state(enum stopmachine_state state)
{
atomic_set(&stopmachine_thread_ack, 0);
- wmb();
+ smp_wmb();
stopmachine_state = state;
while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads)
cpu_relax();
diff --git a/kernel/sys.c b/kernel/sys.c
index 462d78d55895..f64e97cabe25 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -25,6 +25,7 @@
#include <linux/dcookies.h>
#include <linux/suspend.h>
#include <linux/tty.h>
+#include <linux/signal.h>
#include <linux/compat.h>
#include <linux/syscalls.h>
@@ -227,7 +228,7 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
error = -EPERM;
goto out;
}
- if (niceval < task_nice(p) && !capable(CAP_SYS_NICE)) {
+ if (niceval < task_nice(p) && !can_nice(p, niceval)) {
error = -EACCES;
goto out;
}
@@ -525,7 +526,7 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
if (new_egid != old_egid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
if (rgid != (gid_t) -1 ||
(egid != (gid_t) -1 && egid != old_rgid))
@@ -556,7 +557,7 @@ asmlinkage long sys_setgid(gid_t gid)
if(old_egid != gid)
{
current->mm->dumpable=0;
- wmb();
+ smp_wmb();
}
current->gid = current->egid = current->sgid = current->fsgid = gid;
}
@@ -565,7 +566,7 @@ asmlinkage long sys_setgid(gid_t gid)
if(old_egid != gid)
{
current->mm->dumpable=0;
- wmb();
+ smp_wmb();
}
current->egid = current->fsgid = gid;
}
@@ -596,7 +597,7 @@ static int set_user(uid_t new_ruid, int dumpclear)
if(dumpclear)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->uid = new_ruid;
return 0;
@@ -653,7 +654,7 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
if (new_euid != old_euid)
{
current->mm->dumpable=0;
- wmb();
+ smp_wmb();
}
current->fsuid = current->euid = new_euid;
if (ruid != (uid_t) -1 ||
@@ -703,7 +704,7 @@ asmlinkage long sys_setuid(uid_t uid)
if (old_euid != uid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->fsuid = current->euid = uid;
current->suid = new_suid;
@@ -748,7 +749,7 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
if (euid != current->euid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->euid = euid;
}
@@ -798,7 +799,7 @@ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
if (egid != current->egid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->egid = egid;
}
@@ -845,7 +846,7 @@ asmlinkage long sys_setfsuid(uid_t uid)
if (uid != old_fsuid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->fsuid = uid;
}
@@ -875,7 +876,7 @@ asmlinkage long sys_setfsgid(gid_t gid)
if (gid != old_fsgid)
{
current->mm->dumpable = 0;
- wmb();
+ smp_wmb();
}
current->fsgid = gid;
key_fsgid_changed(current);
@@ -1637,7 +1638,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
switch (option) {
case PR_SET_PDEATHSIG:
sig = arg2;
- if (sig < 0 || sig > _NSIG) {
+ if (!valid_signal(sig)) {
error = -EINVAL;
break;
}
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 1802a311dd3f..0dda70ed1f98 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -52,6 +52,7 @@ cond_syscall(sys_msgsnd);
cond_syscall(sys_msgrcv);
cond_syscall(sys_msgctl);
cond_syscall(sys_shmget);
+cond_syscall(sys_shmat);
cond_syscall(sys_shmdt);
cond_syscall(sys_shmctl);
cond_syscall(sys_mq_open);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 79dbd93bd697..701d12c63068 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1991,6 +1991,8 @@ int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
* @filp: the file structure
* @buffer: the user buffer
* @lenp: the size of the user buffer
+ * @ppos: file position
+ * @ppos: the current position in the file
*
* Reads/writes up to table->maxlen/sizeof(unsigned int) integer
* values from/to the user buffer, treated as an ASCII string.
diff --git a/kernel/timer.c b/kernel/timer.c
index ecb3d67c0e14..207aa4f0aa10 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1007,7 +1007,7 @@ asmlinkage long sys_getppid(void)
* Make sure we read the pid before re-reading the
* parent pointer:
*/
- rmb();
+ smp_rmb();
parent = me->group_leader->real_parent;
if (old != parent)
continue;