diff options
author | Peter Zijlstra <peterz@infradead.org> | 2020-08-29 15:03:56 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-10-12 18:27:28 +0200 |
commit | 6e426e0fcd20ce144bb93e00b70df51e9f2e08c3 (patch) | |
tree | f4e6ad94a04db211bfacc7cc6a931a96ecd897fd /kernel/kprobes.c | |
parent | freelist: Implement lockless freelist (diff) | |
download | linux-6e426e0fcd20ce144bb93e00b70df51e9f2e08c3.tar.xz linux-6e426e0fcd20ce144bb93e00b70df51e9f2e08c3.zip |
kprobes: Replace rp->free_instance with freelist
Gets rid of rp->lock, and as a result kretprobes are now fully
lockless.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/159870623583.1229682.17472357584134058687.stgit@devnote2
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 56 |
1 files changed, 24 insertions, 32 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 850ee36a4051..30b8fe7d571d 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1228,11 +1228,8 @@ static void recycle_rp_inst(struct kretprobe_instance *ri) { struct kretprobe *rp = get_kretprobe(ri); - INIT_HLIST_NODE(&ri->hlist); if (likely(rp)) { - raw_spin_lock(&rp->lock); - hlist_add_head(&ri->hlist, &rp->free_instances); - raw_spin_unlock(&rp->lock); + freelist_add(&ri->freelist, &rp->freelist); } else call_rcu(&ri->rcu, free_rp_inst_rcu); } @@ -1290,11 +1287,14 @@ NOKPROBE_SYMBOL(kprobe_flush_task); static inline void free_rp_inst(struct kretprobe *rp) { struct kretprobe_instance *ri; - struct hlist_node *next; + struct freelist_node *node; int count = 0; - hlist_for_each_entry_safe(ri, next, &rp->free_instances, hlist) { - hlist_del(&ri->hlist); + node = rp->freelist.head; + while (node) { + ri = container_of(node, struct kretprobe_instance, freelist); + node = node->next; + kfree(ri); count++; } @@ -1925,32 +1925,26 @@ NOKPROBE_SYMBOL(__kretprobe_trampoline_handler) static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) { struct kretprobe *rp = container_of(p, struct kretprobe, kp); - unsigned long flags = 0; struct kretprobe_instance *ri; + struct freelist_node *fn; - /* TODO: consider to only swap the RA after the last pre_handler fired */ - raw_spin_lock_irqsave(&rp->lock, flags); - if (!hlist_empty(&rp->free_instances)) { - ri = hlist_entry(rp->free_instances.first, - struct kretprobe_instance, hlist); - hlist_del(&ri->hlist); - raw_spin_unlock_irqrestore(&rp->lock, flags); - - if (rp->entry_handler && rp->entry_handler(ri, regs)) { - raw_spin_lock_irqsave(&rp->lock, flags); - hlist_add_head(&ri->hlist, &rp->free_instances); - raw_spin_unlock_irqrestore(&rp->lock, flags); - return 0; - } - - arch_prepare_kretprobe(ri, regs); + fn = freelist_try_get(&rp->freelist); + if (!fn) { + rp->nmissed++; + return 0; + } - __llist_add(&ri->llist, ¤t->kretprobe_instances); + ri = container_of(fn, struct kretprobe_instance, freelist); - } else { - rp->nmissed++; - raw_spin_unlock_irqrestore(&rp->lock, flags); + if (rp->entry_handler && rp->entry_handler(ri, regs)) { + freelist_add(&ri->freelist, &rp->freelist); + return 0; } + + arch_prepare_kretprobe(ri, regs); + + __llist_add(&ri->llist, ¤t->kretprobe_instances); + return 0; } NOKPROBE_SYMBOL(pre_handler_kretprobe); @@ -2007,8 +2001,7 @@ int register_kretprobe(struct kretprobe *rp) rp->maxactive = num_possible_cpus(); #endif } - raw_spin_lock_init(&rp->lock); - INIT_HLIST_HEAD(&rp->free_instances); + rp->freelist.head = NULL; rp->rph = kzalloc(sizeof(struct kretprobe_holder), GFP_KERNEL); if (!rp->rph) return -ENOMEM; @@ -2023,8 +2016,7 @@ int register_kretprobe(struct kretprobe *rp) return -ENOMEM; } inst->rph = rp->rph; - INIT_HLIST_NODE(&inst->hlist); - hlist_add_head(&inst->hlist, &rp->free_instances); + freelist_add(&inst->freelist, &rp->freelist); } refcount_set(&rp->rph->ref, i); |