summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-10-15 17:00:19 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-15 17:00:19 +0200
commit71e20f1873d46e138c26ce83f8fe54b7221f572f (patch)
tree3b5c5083d3ffcf7c9d7ddad81acf30c09197ef99
parentsched: guest CPU accounting: maintain guest state in KVM (diff)
downloadlinux-71e20f1873d46e138c26ce83f8fe54b7221f572f.tar.xz
linux-71e20f1873d46e138c26ce83f8fe54b7221f572f.zip
sched: affine sync wakeups
make sync wakeups affine for cache-cold tasks: if a cache-cold task is woken up by a sync wakeup then use the opportunity to migrate it straight away. (the two tasks are 'related' because they communicate) Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--fs/pipe.c6
-rw-r--r--kernel/sched.c8
-rw-r--r--net/unix/af_unix.c4
3 files changed, 12 insertions, 6 deletions
diff --git a/fs/pipe.c b/fs/pipe.c
index f1fa2b412f0e..e66ec48e95d8 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -382,7 +382,7 @@ redo:
/* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
- wake_up_interruptible(&pipe->wait);
+ wake_up_interruptible_sync(&pipe->wait);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
if (ret > 0)
@@ -555,7 +555,7 @@ redo2:
out:
mutex_unlock(&inode->i_mutex);
if (do_wakeup) {
- wake_up_interruptible(&pipe->wait);
+ wake_up_interruptible_sync(&pipe->wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
if (ret > 0)
@@ -649,7 +649,7 @@ pipe_release(struct inode *inode, int decr, int decw)
if (!pipe->readers && !pipe->writers) {
free_pipe_info(inode);
} else {
- wake_up_interruptible(&pipe->wait);
+ wake_up_interruptible_sync(&pipe->wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 5a91fe0b5de6..7fd343462597 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1521,6 +1521,12 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
unsigned long tl = this_load;
unsigned long tl_per_task;
+ /*
+ * Attract cache-cold tasks on sync wakeups:
+ */
+ if (sync && !task_hot(p, rq->clock, this_sd))
+ goto out_set_cpu;
+
schedstat_inc(p, se.nr_wakeups_affine_attempts);
tl_per_task = cpu_avg_load_per_task(this_cpu);
@@ -1598,7 +1604,7 @@ out_activate:
* the waker guarantees that the freshly woken up task is going
* to be considered on this CPU.)
*/
- if (!sync || cpu != this_cpu)
+ if (!sync || rq->curr == rq->idle)
check_preempt_curr(rq, p);
success = 1;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 2b57eaf66abc..6996cba5aa96 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -334,7 +334,7 @@ static void unix_write_space(struct sock *sk)
read_lock(&sk->sk_callback_lock);
if (unix_writable(sk)) {
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible_sync(sk->sk_sleep);
sk_wake_async(sk, 2, POLL_OUT);
}
read_unlock(&sk->sk_callback_lock);
@@ -1639,7 +1639,7 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!skb)
goto out_unlock;
- wake_up_interruptible(&u->peer_wait);
+ wake_up_interruptible_sync(&u->peer_wait);
if (msg->msg_name)
unix_copy_addr(msg, skb->sk);