summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAnna-Maria Gleixner <anna-maria@linutronix.de>2018-05-25 11:05:07 +0200
committerThomas Gleixner <tglx@linutronix.de>2018-06-10 06:14:01 +0200
commit59dc6f3c6d81c0c4379025c4eb56919391d62b67 (patch)
treee4a46207c2932db28237039791f11790f266a1d2 /kernel
parentrcu: Update documentation of rcu_read_unlock() (diff)
downloadlinux-59dc6f3c6d81c0c4379025c4eb56919391d62b67.tar.xz
linux-59dc6f3c6d81c0c4379025c4eb56919391d62b67.zip
signal: Remove no longer required irqsave/restore
Commit a841796f11c9 ("signal: align __lock_task_sighand() irq disabling and RCU") introduced a rcu read side critical section with interrupts disabled. The changelog suggested that a better long-term fix would be "to make rt_mutex_unlock() disable irqs when acquiring the rt_mutex structure's ->wait_lock". This long-term fix has been made in commit b4abf91047cf ("rtmutex: Make wait_lock irq safe") for a different reason. Therefore revert commit a841796f11c9 ("signal: align > __lock_task_sighand() irq disabling and RCU") as the interrupt disable dance is not longer required. The change was tested on the base of b4abf91047cf ("rtmutex: Make wait_lock irq safe") with a four hour run of rcutorture scenario TREE03 with lockdep enabled as suggested by Paul McKenney. Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: "Eric W. Biederman" <ebiederm@xmission.com> Cc: bigeasy@linutronix.de Link: https://lkml.kernel.org/r/20180525090507.22248-3-anna-maria@linutronix.de
Diffstat (limited to 'kernel')
-rw-r--r--kernel/signal.c24
1 files changed, 7 insertions, 17 deletions
diff --git a/kernel/signal.c b/kernel/signal.c
index 0f865d67415d..8d8a940422a8 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1244,19 +1244,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
{
struct sighand_struct *sighand;
+ rcu_read_lock();
for (;;) {
- /*
- * Disable interrupts early to avoid deadlocks.
- * See rcu_read_unlock() comment header for details.
- */
- local_irq_save(*flags);
- rcu_read_lock();
sighand = rcu_dereference(tsk->sighand);
- if (unlikely(sighand == NULL)) {
- rcu_read_unlock();
- local_irq_restore(*flags);
+ if (unlikely(sighand == NULL))
break;
- }
+
/*
* This sighand can be already freed and even reused, but
* we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
@@ -1268,15 +1261,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
* __exit_signal(). In the latter case the next iteration
* must see ->sighand == NULL.
*/
- spin_lock(&sighand->siglock);
- if (likely(sighand == tsk->sighand)) {
- rcu_read_unlock();
+ spin_lock_irqsave(&sighand->siglock, *flags);
+ if (likely(sighand == tsk->sighand))
break;
- }
- spin_unlock(&sighand->siglock);
- rcu_read_unlock();
- local_irq_restore(*flags);
+ spin_unlock_irqrestore(&sighand->siglock, *flags);
}
+ rcu_read_unlock();
return sighand;
}