diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-15 00:25:35 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-15 00:25:35 +0200 |
commit | ee67e6cbe1121da1ae4eceb7b2bcb535c5cbf65e (patch) | |
tree | 8ceefe56b6f325a4b8dbf0ee2dcda0d9216a52a3 | |
parent | Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/ker... (diff) | |
parent | Merge branch 'urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/o... (diff) | |
download | linux-ee67e6cbe1121da1ae4eceb7b2bcb535c5cbf65e.tar.xz linux-ee67e6cbe1121da1ae4eceb7b2bcb535c5cbf65e.zip |
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
oprofile: warn on freeing event buffer too early
oprofile: fix race condition in event_buffer free
lockdep: Use cpu_clock() for lockstat
-rw-r--r-- | drivers/oprofile/event_buffer.c | 35 | ||||
-rw-r--r-- | kernel/lockdep.c | 20 |
2 files changed, 38 insertions, 17 deletions
diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c index 2b7ae366ceb1..5df60a6b6776 100644 --- a/drivers/oprofile/event_buffer.c +++ b/drivers/oprofile/event_buffer.c @@ -35,12 +35,23 @@ static size_t buffer_pos; /* atomic_t because wait_event checks it outside of buffer_mutex */ static atomic_t buffer_ready = ATOMIC_INIT(0); -/* Add an entry to the event buffer. When we - * get near to the end we wake up the process - * sleeping on the read() of the file. +/* + * Add an entry to the event buffer. When we get near to the end we + * wake up the process sleeping on the read() of the file. To protect + * the event_buffer this function may only be called when buffer_mutex + * is set. */ void add_event_entry(unsigned long value) { + /* + * This shouldn't happen since all workqueues or handlers are + * canceled or flushed before the event buffer is freed. + */ + if (!event_buffer) { + WARN_ON_ONCE(1); + return; + } + if (buffer_pos == buffer_size) { atomic_inc(&oprofile_stats.event_lost_overflow); return; @@ -69,7 +80,6 @@ void wake_up_buffer_waiter(void) int alloc_event_buffer(void) { - int err = -ENOMEM; unsigned long flags; spin_lock_irqsave(&oprofilefs_lock, flags); @@ -80,21 +90,22 @@ int alloc_event_buffer(void) if (buffer_watershed >= buffer_size) return -EINVAL; + buffer_pos = 0; event_buffer = vmalloc(sizeof(unsigned long) * buffer_size); if (!event_buffer) - goto out; + return -ENOMEM; - err = 0; -out: - return err; + return 0; } void free_event_buffer(void) { + mutex_lock(&buffer_mutex); vfree(event_buffer); - + buffer_pos = 0; event_buffer = NULL; + mutex_unlock(&buffer_mutex); } @@ -167,6 +178,12 @@ static ssize_t event_buffer_read(struct file *file, char __user *buf, mutex_lock(&buffer_mutex); + /* May happen if the buffer is freed during pending reads. */ + if (!event_buffer) { + retval = -EINTR; + goto out; + } + atomic_set(&buffer_ready, 0); retval = -EFAULT; diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 3815ac1d58b2..9af56723c096 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -142,6 +142,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock) #ifdef CONFIG_LOCK_STAT static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); +static inline u64 lockstat_clock(void) +{ + return cpu_clock(smp_processor_id()); +} + static int lock_point(unsigned long points[], unsigned long ip) { int i; @@ -158,7 +163,7 @@ static int lock_point(unsigned long points[], unsigned long ip) return i; } -static void lock_time_inc(struct lock_time *lt, s64 time) +static void lock_time_inc(struct lock_time *lt, u64 time) { if (time > lt->max) lt->max = time; @@ -234,12 +239,12 @@ static void put_lock_stats(struct lock_class_stats *stats) static void lock_release_holdtime(struct held_lock *hlock) { struct lock_class_stats *stats; - s64 holdtime; + u64 holdtime; if (!lock_stat) return; - holdtime = sched_clock() - hlock->holdtime_stamp; + holdtime = lockstat_clock() - hlock->holdtime_stamp; stats = get_lock_stats(hlock_class(hlock)); if (hlock->read) @@ -2792,7 +2797,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, hlock->references = references; #ifdef CONFIG_LOCK_STAT hlock->waittime_stamp = 0; - hlock->holdtime_stamp = sched_clock(); + hlock->holdtime_stamp = lockstat_clock(); #endif if (check == 2 && !mark_irqflags(curr, hlock)) @@ -3322,7 +3327,7 @@ found_it: if (hlock->instance != lock) return; - hlock->waittime_stamp = sched_clock(); + hlock->waittime_stamp = lockstat_clock(); contention_point = lock_point(hlock_class(hlock)->contention_point, ip); contending_point = lock_point(hlock_class(hlock)->contending_point, @@ -3345,8 +3350,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip) struct held_lock *hlock, *prev_hlock; struct lock_class_stats *stats; unsigned int depth; - u64 now; - s64 waittime = 0; + u64 now, waittime = 0; int i, cpu; depth = curr->lockdep_depth; @@ -3374,7 +3378,7 @@ found_it: cpu = smp_processor_id(); if (hlock->waittime_stamp) { - now = sched_clock(); + now = lockstat_clock(); waittime = now - hlock->waittime_stamp; hlock->holdtime_stamp = now; } |