diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-18 23:29:37 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-18 23:29:37 +0100 |
commit | 335bc70b6b5fdc9b4e28cb56c27f10124064f9da (patch) | |
tree | 7f5a1594a09cdcb2ea9a5d768ea72c0256f6aa3c /kernel | |
parent | Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kern... (diff) | |
parent | perf: Validate cpu early in perf_event_alloc() (diff) | |
download | linux-335bc70b6b5fdc9b4e28cb56c27f10124064f9da.tar.xz linux-335bc70b6b5fdc9b4e28cb56c27f10124064f9da.zip |
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
perf: Validate cpu early in perf_event_alloc()
perf: Find_get_context: fix the per-cpu-counter check
perf: Fix contexted inheritance
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_event.c | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 05ebe841270b..84522c796987 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -2228,14 +2228,11 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) unsigned long flags; int ctxn, err; - if (!task && cpu != -1) { + if (!task) { /* Must be root to operate on a CPU event: */ if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) return ERR_PTR(-EACCES); - if (cpu < 0 || cpu >= nr_cpumask_bits) - return ERR_PTR(-EINVAL); - /* * We could be clever and allow to attach a event to an * offline CPU and activate it when the CPU comes up, but @@ -5541,6 +5538,11 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, struct hw_perf_event *hwc; long err; + if ((unsigned)cpu >= nr_cpu_ids) { + if (!task || cpu != -1) + return ERR_PTR(-EINVAL); + } + event = kzalloc(sizeof(*event), GFP_KERNEL); if (!event) return ERR_PTR(-ENOMEM); @@ -5589,7 +5591,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, if (!overflow_handler && parent_event) overflow_handler = parent_event->overflow_handler; - + event->overflow_handler = overflow_handler; if (attr->disabled) @@ -6494,7 +6496,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn) raw_spin_lock_irqsave(&parent_ctx->lock, flags); parent_ctx->rotate_disable = 0; - raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); child_ctx = child->perf_event_ctxp[ctxn]; @@ -6502,12 +6503,11 @@ int perf_event_init_context(struct task_struct *child, int ctxn) /* * Mark the child context as a clone of the parent * context, or of whatever the parent is a clone of. - * Note that if the parent is a clone, it could get - * uncloned at any point, but that doesn't matter - * because the list of events and the generation - * count can't have changed since we took the mutex. + * + * Note that if the parent is a clone, the holding of + * parent_ctx->lock avoids it from being uncloned. */ - cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); + cloned_ctx = parent_ctx->parent_ctx; if (cloned_ctx) { child_ctx->parent_ctx = cloned_ctx; child_ctx->parent_gen = parent_ctx->parent_gen; @@ -6518,6 +6518,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn) get_ctx(child_ctx->parent_ctx); } + raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); mutex_unlock(&parent_ctx->mutex); perf_unpin_context(parent_ctx); |