summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-04 19:23:18 +0200
committerIngo Molnar <mingo@elte.hu>2009-05-04 19:30:42 +0200
commit1dce8d99b85aba6eddb8b8260baea944922e6fe7 (patch)
tree77afc73743f8eb1c81b652418661b98b1f4b933d /kernel
parentperf_counter: initialize the per-cpu context earlier (diff)
downloadlinux-1dce8d99b85aba6eddb8b8260baea944922e6fe7.tar.xz
linux-1dce8d99b85aba6eddb8b8260baea944922e6fe7.zip
perf_counter: convert perf_resource_mutex to a spinlock
Now percpu counters can be initialized very early. But the init sequence uses mutex_lock(). Fortunately, perf_resource_mutex should be a spinlock anyway, so convert it. [ Impact: fix crash due to early init mutex use ] LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_counter.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index fcdafa234a5d..5f86a1156c94 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -46,9 +46,9 @@ static atomic_t nr_comm_tracking __read_mostly;
int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
/*
- * Mutex for (sysadmin-configurable) counter reservations:
+ * Lock for (sysadmin-configurable) counter reservations:
*/
-static DEFINE_MUTEX(perf_resource_mutex);
+static DEFINE_SPINLOCK(perf_resource_lock);
/*
* Architecture provided APIs - weak aliases:
@@ -3207,9 +3207,9 @@ static void __cpuinit perf_counter_init_cpu(int cpu)
cpuctx = &per_cpu(perf_cpu_context, cpu);
__perf_counter_init_context(&cpuctx->ctx, NULL);
- mutex_lock(&perf_resource_mutex);
+ spin_lock(&perf_resource_lock);
cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
- mutex_unlock(&perf_resource_mutex);
+ spin_unlock(&perf_resource_lock);
hw_perf_counter_setup(cpu);
}
@@ -3292,7 +3292,7 @@ perf_set_reserve_percpu(struct sysdev_class *class,
if (val > perf_max_counters)
return -EINVAL;
- mutex_lock(&perf_resource_mutex);
+ spin_lock(&perf_resource_lock);
perf_reserved_percpu = val;
for_each_online_cpu(cpu) {
cpuctx = &per_cpu(perf_cpu_context, cpu);
@@ -3302,7 +3302,7 @@ perf_set_reserve_percpu(struct sysdev_class *class,
cpuctx->max_pertask = mpt;
spin_unlock_irq(&cpuctx->ctx.lock);
}
- mutex_unlock(&perf_resource_mutex);
+ spin_unlock(&perf_resource_lock);
return count;
}
@@ -3324,9 +3324,9 @@ perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
if (val > 1)
return -EINVAL;
- mutex_lock(&perf_resource_mutex);
+ spin_lock(&perf_resource_lock);
perf_overcommit = val;
- mutex_unlock(&perf_resource_mutex);
+ spin_unlock(&perf_resource_lock);
return count;
}