summaryrefslogtreecommitdiffstats
path: root/kernel/itimer.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-17 00:20:36 +0200
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-17 00:20:36 +0200
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /kernel/itimer.c
downloadlinux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.xz
linux-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'kernel/itimer.c')
-rw-r--r--kernel/itimer.c241
1 files changed, 241 insertions, 0 deletions
diff --git a/kernel/itimer.c b/kernel/itimer.c
new file mode 100644
index 000000000000..e9a40e947e07
--- /dev/null
+++ b/kernel/itimer.c
@@ -0,0 +1,241 @@
+/*
+ * linux/kernel/itimer.c
+ *
+ * Copyright (C) 1992 Darren Senn
+ */
+
+/* These are all the functions necessary to implement itimers */
+
+#include <linux/mm.h>
+#include <linux/smp_lock.h>
+#include <linux/interrupt.h>
+#include <linux/syscalls.h>
+#include <linux/time.h>
+#include <linux/posix-timers.h>
+
+#include <asm/uaccess.h>
+
+static unsigned long it_real_value(struct signal_struct *sig)
+{
+ unsigned long val = 0;
+ if (timer_pending(&sig->real_timer)) {
+ val = sig->real_timer.expires - jiffies;
+
+ /* look out for negative/zero itimer.. */
+ if ((long) val <= 0)
+ val = 1;
+ }
+ return val;
+}
+
+int do_getitimer(int which, struct itimerval *value)
+{
+ struct task_struct *tsk = current;
+ unsigned long interval, val;
+ cputime_t cinterval, cval;
+
+ switch (which) {
+ case ITIMER_REAL:
+ spin_lock_irq(&tsk->sighand->siglock);
+ interval = tsk->signal->it_real_incr;
+ val = it_real_value(tsk->signal);
+ spin_unlock_irq(&tsk->sighand->siglock);
+ jiffies_to_timeval(val, &value->it_value);
+ jiffies_to_timeval(interval, &value->it_interval);
+ break;
+ case ITIMER_VIRTUAL:
+ read_lock(&tasklist_lock);
+ spin_lock_irq(&tsk->sighand->siglock);
+ cval = tsk->signal->it_virt_expires;
+ cinterval = tsk->signal->it_virt_incr;
+ if (!cputime_eq(cval, cputime_zero)) {
+ struct task_struct *t = tsk;
+ cputime_t utime = tsk->signal->utime;
+ do {
+ utime = cputime_add(utime, t->utime);
+ t = next_thread(t);
+ } while (t != tsk);
+ if (cputime_le(cval, utime)) { /* about to fire */
+ cval = jiffies_to_cputime(1);
+ } else {
+ cval = cputime_sub(cval, utime);
+ }
+ }
+ spin_unlock_irq(&tsk->sighand->siglock);
+ read_unlock(&tasklist_lock);
+ cputime_to_timeval(cval, &value->it_value);
+ cputime_to_timeval(cinterval, &value->it_interval);
+ break;
+ case ITIMER_PROF:
+ read_lock(&tasklist_lock);
+ spin_lock_irq(&tsk->sighand->siglock);
+ cval = tsk->signal->it_prof_expires;
+ cinterval = tsk->signal->it_prof_incr;
+ if (!cputime_eq(cval, cputime_zero)) {
+ struct task_struct *t = tsk;
+ cputime_t ptime = cputime_add(tsk->signal->utime,
+ tsk->signal->stime);
+ do {
+ ptime = cputime_add(ptime,
+ cputime_add(t->utime,
+ t->stime));
+ t = next_thread(t);
+ } while (t != tsk);
+ if (cputime_le(cval, ptime)) { /* about to fire */
+ cval = jiffies_to_cputime(1);
+ } else {
+ cval = cputime_sub(cval, ptime);
+ }
+ }
+ spin_unlock_irq(&tsk->sighand->siglock);
+ read_unlock(&tasklist_lock);
+ cputime_to_timeval(cval, &value->it_value);
+ cputime_to_timeval(cinterval, &value->it_interval);
+ break;
+ default:
+ return(-EINVAL);
+ }
+ return 0;
+}
+
+asmlinkage long sys_getitimer(int which, struct itimerval __user *value)
+{
+ int error = -EFAULT;
+ struct itimerval get_buffer;
+
+ if (value) {
+ error = do_getitimer(which, &get_buffer);
+ if (!error &&
+ copy_to_user(value, &get_buffer, sizeof(get_buffer)))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+/*
+ * Called with P->sighand->siglock held and P->signal->real_timer inactive.
+ * If interval is nonzero, arm the timer for interval ticks from now.
+ */
+static inline void it_real_arm(struct task_struct *p, unsigned long interval)
+{
+ p->signal->it_real_value = interval; /* XXX unnecessary field?? */
+ if (interval == 0)
+ return;
+ if (interval > (unsigned long) LONG_MAX)
+ interval = LONG_MAX;
+ p->signal->real_timer.expires = jiffies + interval;
+ add_timer(&p->signal->real_timer);
+}
+
+void it_real_fn(unsigned long __data)
+{
+ struct task_struct * p = (struct task_struct *) __data;
+
+ send_group_sig_info(SIGALRM, SEND_SIG_PRIV, p);
+
+ /*
+ * Now restart the timer if necessary. We don't need any locking
+ * here because do_setitimer makes sure we have finished running
+ * before it touches anything.
+ */
+ it_real_arm(p, p->signal->it_real_incr);
+}
+
+int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue)
+{
+ struct task_struct *tsk = current;
+ unsigned long val, interval;
+ cputime_t cval, cinterval, nval, ninterval;
+
+ switch (which) {
+ case ITIMER_REAL:
+ spin_lock_irq(&tsk->sighand->siglock);
+ interval = tsk->signal->it_real_incr;
+ val = it_real_value(tsk->signal);
+ if (val)
+ del_timer_sync(&tsk->signal->real_timer);
+ tsk->signal->it_real_incr =
+ timeval_to_jiffies(&value->it_interval);
+ it_real_arm(tsk, timeval_to_jiffies(&value->it_value));
+ spin_unlock_irq(&tsk->sighand->siglock);
+ if (ovalue) {
+ jiffies_to_timeval(val, &ovalue->it_value);
+ jiffies_to_timeval(interval,
+ &ovalue->it_interval);
+ }
+ break;
+ case ITIMER_VIRTUAL:
+ nval = timeval_to_cputime(&value->it_value);
+ ninterval = timeval_to_cputime(&value->it_interval);
+ read_lock(&tasklist_lock);
+ spin_lock_irq(&tsk->sighand->siglock);
+ cval = tsk->signal->it_virt_expires;
+ cinterval = tsk->signal->it_virt_incr;
+ if (!cputime_eq(cval, cputime_zero) ||
+ !cputime_eq(nval, cputime_zero)) {
+ if (cputime_gt(nval, cputime_zero))
+ nval = cputime_add(nval,
+ jiffies_to_cputime(1));
+ set_process_cpu_timer(tsk, CPUCLOCK_VIRT,
+ &nval, &cval);
+ }
+ tsk->signal->it_virt_expires = nval;
+ tsk->signal->it_virt_incr = ninterval;
+ spin_unlock_irq(&tsk->sighand->siglock);
+ read_unlock(&tasklist_lock);
+ if (ovalue) {
+ cputime_to_timeval(cval, &ovalue->it_value);
+ cputime_to_timeval(cinterval, &ovalue->it_interval);
+ }
+ break;
+ case ITIMER_PROF:
+ nval = timeval_to_cputime(&value->it_value);
+ ninterval = timeval_to_cputime(&value->it_interval);
+ read_lock(&tasklist_lock);
+ spin_lock_irq(&tsk->sighand->siglock);
+ cval = tsk->signal->it_prof_expires;
+ cinterval = tsk->signal->it_prof_incr;
+ if (!cputime_eq(cval, cputime_zero) ||
+ !cputime_eq(nval, cputime_zero)) {
+ if (cputime_gt(nval, cputime_zero))
+ nval = cputime_add(nval,
+ jiffies_to_cputime(1));
+ set_process_cpu_timer(tsk, CPUCLOCK_PROF,
+ &nval, &cval);
+ }
+ tsk->signal->it_prof_expires = nval;
+ tsk->signal->it_prof_incr = ninterval;
+ spin_unlock_irq(&tsk->sighand->siglock);
+ read_unlock(&tasklist_lock);
+ if (ovalue) {
+ cputime_to_timeval(cval, &ovalue->it_value);
+ cputime_to_timeval(cinterval, &ovalue->it_interval);
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+asmlinkage long sys_setitimer(int which,
+ struct itimerval __user *value,
+ struct itimerval __user *ovalue)
+{
+ struct itimerval set_buffer, get_buffer;
+ int error;
+
+ if (value) {
+ if(copy_from_user(&set_buffer, value, sizeof(set_buffer)))
+ return -EFAULT;
+ } else
+ memset((char *) &set_buffer, 0, sizeof(set_buffer));
+
+ error = do_setitimer(which, &set_buffer, ovalue ? &get_buffer : NULL);
+ if (error || !ovalue)
+ return error;
+
+ if (copy_to_user(ovalue, &get_buffer, sizeof(get_buffer)))
+ return -EFAULT;
+ return 0;
+}