summaryrefslogtreecommitdiffstats
path: root/lib/event.c
diff options
context:
space:
mode:
authorDavid Lamparter <equinox@opensourcerouting.org>2023-09-07 11:48:22 +0200
committerDavid Lamparter <equinox@opensourcerouting.org>2023-09-07 17:19:39 +0200
commit3d1a678e249782e7c3e68158b0e13177907ade84 (patch)
tree797485a051512b6f4f8abe4d48f0df3362ea7b9f /lib/event.c
parentMerge pull request #14358 from donaldsharp/tc_possible_crash (diff)
downloadfrr-3d1a678e249782e7c3e68158b0e13177907ade84.tar.xz
frr-3d1a678e249782e7c3e68158b0e13177907ade84.zip
lib: fix delete during hash_iterate() in event_*
... by converting the hash table to a typesafe hash. Honestly I was just looking around for things to convert to the typesafe hash table code, but then I noticed that cpu_record_clear() deletes items from inside the hash_iterate() callback :( Signed-off-by: David Lamparter <equinox@opensourcerouting.org>
Diffstat (limited to 'lib/event.c')
-rw-r--r--lib/event.c116
1 files changed, 54 insertions, 62 deletions
diff --git a/lib/event.c b/lib/event.c
index 4b4ca9d7e..37a30d251 100644
--- a/lib/event.c
+++ b/lib/event.c
@@ -89,34 +89,41 @@ unsigned long walltime_threshold = CONSUMED_TIME_CHECK;
/* CLI start ---------------------------------------------------------------- */
#include "lib/event_clippy.c"
-static unsigned int cpu_record_hash_key(const struct cpu_event_history *a)
+static uint32_t cpu_record_hash_key(const struct cpu_event_history *a)
{
int size = sizeof(a->func);
return jhash(&a->func, size, 0);
}
-static bool cpu_record_hash_cmp(const struct cpu_event_history *a,
- const struct cpu_event_history *b)
+static int cpu_record_hash_cmp(const struct cpu_event_history *a,
+ const struct cpu_event_history *b)
{
- return a->func == b->func;
+ return numcmp((uintptr_t)a->func, (uintptr_t)b->func);
}
-static void *cpu_record_hash_alloc(struct cpu_event_history *a)
+DECLARE_HASH(cpu_records, struct cpu_event_history, item, cpu_record_hash_cmp,
+ cpu_record_hash_key);
+
+static struct cpu_event_history *cpu_records_get(struct event_loop *loop,
+ void (*func)(struct event *e),
+ const char *funcname)
{
- struct cpu_event_history *new;
+ struct cpu_event_history ref = { .func = func }, *res;
- new = XCALLOC(MTYPE_EVENT_STATS, sizeof(struct cpu_event_history));
- new->func = a->func;
- new->funcname = a->funcname;
- return new;
+ res = cpu_records_find(loop->cpu_records, &ref);
+ if (!res) {
+ res = XCALLOC(MTYPE_EVENT_STATS, sizeof(*res));
+ res->func = func;
+ res->funcname = funcname;
+ cpu_records_add(loop->cpu_records, res);
+ }
+ return res;
}
-static void cpu_record_hash_free(void *a)
+static void cpu_records_free(struct cpu_event_history **p)
{
- struct cpu_event_history *hist = a;
-
- XFREE(MTYPE_EVENT_STATS, hist);
+ XFREE(MTYPE_EVENT_STATS, *p);
}
static void vty_out_cpu_event_history(struct vty *vty,
@@ -136,14 +143,11 @@ static void vty_out_cpu_event_history(struct vty *vty,
a->types & (1 << EVENT_EXECUTE) ? 'X' : ' ', a->funcname);
}
-static void cpu_record_hash_print(struct hash_bucket *bucket, void *args[])
+static void cpu_record_print_one(struct vty *vty, uint8_t filter,
+ struct cpu_event_history *totals,
+ const struct cpu_event_history *a)
{
- struct cpu_event_history *totals = args[0];
struct cpu_event_history copy;
- struct vty *vty = args[1];
- uint8_t *filter = args[2];
-
- struct cpu_event_history *a = bucket->data;
copy.total_active =
atomic_load_explicit(&a->total_active, memory_order_seq_cst);
@@ -165,7 +169,7 @@ static void cpu_record_hash_print(struct hash_bucket *bucket, void *args[])
copy.types = atomic_load_explicit(&a->types, memory_order_seq_cst);
copy.funcname = a->funcname;
- if (!(copy.types & *filter))
+ if (!(copy.types & filter))
return;
vty_out_cpu_event_history(vty, &copy);
@@ -185,7 +189,6 @@ static void cpu_record_hash_print(struct hash_bucket *bucket, void *args[])
static void cpu_record_print(struct vty *vty, uint8_t filter)
{
struct cpu_event_history tmp;
- void *args[3] = {&tmp, vty, &filter};
struct event_loop *m;
struct listnode *ln;
@@ -222,13 +225,13 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
vty_out(vty,
" CPU_Warn Wall_Warn Starv_Warn Type Thread\n");
- if (m->cpu_record->count)
- hash_iterate(
- m->cpu_record,
- (void (*)(struct hash_bucket *,
- void *))cpu_record_hash_print,
- args);
- else
+ if (cpu_records_count(m->cpu_records)) {
+ struct cpu_event_history *rec;
+
+ frr_each (cpu_records, m->cpu_records, rec)
+ cpu_record_print_one(vty, filter, &tmp,
+ rec);
+ } else
vty_out(vty, "No data to display yet.\n");
vty_out(vty, "\n");
@@ -248,35 +251,29 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
vty_out_cpu_event_history(vty, &tmp);
}
-static void cpu_record_hash_clear(struct hash_bucket *bucket, void *args[])
-{
- uint8_t *filter = args[0];
- struct hash *cpu_record = args[1];
-
- struct cpu_event_history *a = bucket->data;
-
- if (!(a->types & *filter))
- return;
-
- hash_release(cpu_record, bucket->data);
-}
-
static void cpu_record_clear(uint8_t filter)
{
- uint8_t *tmp = &filter;
struct event_loop *m;
struct listnode *ln;
frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
frr_with_mutex (&m->mtx) {
- void *args[2] = {tmp, m->cpu_record};
+ struct cpu_event_history *item;
+ struct cpu_records_head old[1];
+
+ cpu_records_init(old);
+ cpu_records_swap_all(old, m->cpu_records);
- hash_iterate(
- m->cpu_record,
- (void (*)(struct hash_bucket *,
- void *))cpu_record_hash_clear,
- args);
+ while ((item = cpu_records_pop(old))) {
+ if (item->types & filter)
+ cpu_records_free(&item);
+ else
+ cpu_records_add(m->cpu_records,
+ item);
+ }
+
+ cpu_records_fini(old);
}
}
}
@@ -565,10 +562,7 @@ struct event_loop *event_master_create(const char *name)
snprintf(tmhashname, sizeof(tmhashname), "%s - threadmaster event hash",
name);
- rv->cpu_record = hash_create_size(
- 8, (unsigned int (*)(const void *))cpu_record_hash_key,
- (bool (*)(const void *, const void *))cpu_record_hash_cmp,
- tmhashname);
+ cpu_records_init(rv->cpu_records);
event_list_init(&rv->event);
event_list_init(&rv->ready);
@@ -686,6 +680,7 @@ void event_master_free_unused(struct event_loop *m)
/* Stop thread scheduler. */
void event_master_free(struct event_loop *m)
{
+ struct cpu_event_history *record;
struct event *t;
frr_with_mutex (&masters_mtx) {
@@ -708,7 +703,9 @@ void event_master_free(struct event_loop *m)
list_delete(&m->cancel_req);
m->cancel_req = NULL;
- hash_clean_and_free(&m->cpu_record, cpu_record_hash_free);
+ while ((record = cpu_records_pop(m->cpu_records)))
+ cpu_records_free(&record);
+ cpu_records_fini(m->cpu_records);
XFREE(MTYPE_EVENT_MASTER, m->name);
XFREE(MTYPE_EVENT_MASTER, m->handler.pfds);
@@ -781,7 +778,6 @@ static struct event *thread_get(struct event_loop *m, uint8_t type,
const struct xref_eventsched *xref)
{
struct event *thread = event_list_pop(&m->unuse);
- struct cpu_event_history tmp;
if (!thread) {
thread = XCALLOC(MTYPE_THREAD, sizeof(struct event));
@@ -809,13 +805,9 @@ static struct event *thread_get(struct event_loop *m, uint8_t type,
* hash_get lookups.
*/
if ((thread->xref && thread->xref->funcname != xref->funcname)
- || thread->func != func) {
- tmp.func = func;
- tmp.funcname = xref->funcname;
- thread->hist =
- hash_get(m->cpu_record, &tmp,
- (void *(*)(void *))cpu_record_hash_alloc);
- }
+ || thread->func != func)
+ thread->hist = cpu_records_get(m, func, xref->funcname);
+
thread->hist->total_active++;
thread->func = func;
thread->xref = xref;