summaryrefslogtreecommitdiffstats
path: root/kernel/semaphore.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-02-24 09:50:22 +0100
committerIngo Molnar <mingo@elte.hu>2011-09-13 11:11:57 +0200
commit8292c9e15c3b069459794a04f5e2cf0d5665ddc4 (patch)
tree6dd98e626c2dc5fc38a7838295039226b301ee57 /kernel/semaphore.c
parentlocking, sched: Annotate thread_group_cputimer as raw (diff)
downloadlinux-8292c9e15c3b069459794a04f5e2cf0d5665ddc4.tar.xz
linux-8292c9e15c3b069459794a04f5e2cf0d5665ddc4.zip
locking, semaphores: Annotate inner lock as raw
There is no reason to have the spin_lock protecting the semaphore preemptible on -rt. Annotate it as a raw_spinlock. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. ( On rt this also solves lockdep complaining about the rt_mutex.wait_lock being not initialized. ) Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/semaphore.c')
-rw-r--r--kernel/semaphore.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index 94a62c0d4ade..d831841e55a7 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -54,12 +54,12 @@ void down(struct semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
__down(sem);
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
}
EXPORT_SYMBOL(down);
@@ -77,12 +77,12 @@ int down_interruptible(struct semaphore *sem)
unsigned long flags;
int result = 0;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
result = __down_interruptible(sem);
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
return result;
}
@@ -103,12 +103,12 @@ int down_killable(struct semaphore *sem)
unsigned long flags;
int result = 0;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
result = __down_killable(sem);
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
return result;
}
@@ -132,11 +132,11 @@ int down_trylock(struct semaphore *sem)
unsigned long flags;
int count;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
count = sem->count - 1;
if (likely(count >= 0))
sem->count = count;
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
return (count < 0);
}
@@ -157,12 +157,12 @@ int down_timeout(struct semaphore *sem, long jiffies)
unsigned long flags;
int result = 0;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(sem->count > 0))
sem->count--;
else
result = __down_timeout(sem, jiffies);
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
return result;
}
@@ -179,12 +179,12 @@ void up(struct semaphore *sem)
{
unsigned long flags;
- spin_lock_irqsave(&sem->lock, flags);
+ raw_spin_lock_irqsave(&sem->lock, flags);
if (likely(list_empty(&sem->wait_list)))
sem->count++;
else
__up(sem);
- spin_unlock_irqrestore(&sem->lock, flags);
+ raw_spin_unlock_irqrestore(&sem->lock, flags);
}
EXPORT_SYMBOL(up);
@@ -217,9 +217,9 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
if (timeout <= 0)
goto timed_out;
__set_task_state(task, state);
- spin_unlock_irq(&sem->lock);
+ raw_spin_unlock_irq(&sem->lock);
timeout = schedule_timeout(timeout);
- spin_lock_irq(&sem->lock);
+ raw_spin_lock_irq(&sem->lock);
if (waiter.up)
return 0;
}