summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDonald Sharp <donaldsharp72@gmail.com>2023-09-11 17:02:01 +0200
committerGitHub <noreply@github.com>2023-09-11 17:02:01 +0200
commitedd243280c56018e413a5773b2e8cb82d8be8421 (patch)
treee22fcdffd06b9430af2a011d7b2efc7c66c7c2d8
parentMerge pull request #14373 from donaldsharp/evpn_bgp_bestpath_debug_issues (diff)
parentlib: fix delete during hash_iterate() in event_* (diff)
downloadfrr-edd243280c56018e413a5773b2e8cb82d8be8421.tar.xz
frr-edd243280c56018e413a5773b2e8cb82d8be8421.zip
Merge pull request #14364 from opensourcerouting/event-fix-delete-during-hash_iterate
lib: fix delete during hash_iterate() in event_*
-rw-r--r--lib/event.c116
-rw-r--r--lib/frrevent.h6
2 files changed, 59 insertions, 63 deletions
diff --git a/lib/event.c b/lib/event.c
index 4b4ca9d7e..37a30d251 100644
--- a/lib/event.c
+++ b/lib/event.c
@@ -89,34 +89,41 @@ unsigned long walltime_threshold = CONSUMED_TIME_CHECK;
/* CLI start ---------------------------------------------------------------- */
#include "lib/event_clippy.c"
-static unsigned int cpu_record_hash_key(const struct cpu_event_history *a)
+static uint32_t cpu_record_hash_key(const struct cpu_event_history *a)
{
int size = sizeof(a->func);
return jhash(&a->func, size, 0);
}
-static bool cpu_record_hash_cmp(const struct cpu_event_history *a,
- const struct cpu_event_history *b)
+static int cpu_record_hash_cmp(const struct cpu_event_history *a,
+ const struct cpu_event_history *b)
{
- return a->func == b->func;
+ return numcmp((uintptr_t)a->func, (uintptr_t)b->func);
}
-static void *cpu_record_hash_alloc(struct cpu_event_history *a)
+DECLARE_HASH(cpu_records, struct cpu_event_history, item, cpu_record_hash_cmp,
+ cpu_record_hash_key);
+
+static struct cpu_event_history *cpu_records_get(struct event_loop *loop,
+ void (*func)(struct event *e),
+ const char *funcname)
{
- struct cpu_event_history *new;
+ struct cpu_event_history ref = { .func = func }, *res;
- new = XCALLOC(MTYPE_EVENT_STATS, sizeof(struct cpu_event_history));
- new->func = a->func;
- new->funcname = a->funcname;
- return new;
+ res = cpu_records_find(loop->cpu_records, &ref);
+ if (!res) {
+ res = XCALLOC(MTYPE_EVENT_STATS, sizeof(*res));
+ res->func = func;
+ res->funcname = funcname;
+ cpu_records_add(loop->cpu_records, res);
+ }
+ return res;
}
-static void cpu_record_hash_free(void *a)
+static void cpu_records_free(struct cpu_event_history **p)
{
- struct cpu_event_history *hist = a;
-
- XFREE(MTYPE_EVENT_STATS, hist);
+ XFREE(MTYPE_EVENT_STATS, *p);
}
static void vty_out_cpu_event_history(struct vty *vty,
@@ -136,14 +143,11 @@ static void vty_out_cpu_event_history(struct vty *vty,
a->types & (1 << EVENT_EXECUTE) ? 'X' : ' ', a->funcname);
}
-static void cpu_record_hash_print(struct hash_bucket *bucket, void *args[])
+static void cpu_record_print_one(struct vty *vty, uint8_t filter,
+ struct cpu_event_history *totals,
+ const struct cpu_event_history *a)
{
- struct cpu_event_history *totals = args[0];
struct cpu_event_history copy;
- struct vty *vty = args[1];
- uint8_t *filter = args[2];
-
- struct cpu_event_history *a = bucket->data;
copy.total_active =
atomic_load_explicit(&a->total_active, memory_order_seq_cst);
@@ -165,7 +169,7 @@ static void cpu_record_hash_print(struct hash_bucket *bucket, void *args[])
copy.types = atomic_load_explicit(&a->types, memory_order_seq_cst);
copy.funcname = a->funcname;
- if (!(copy.types & *filter))
+ if (!(copy.types & filter))
return;
vty_out_cpu_event_history(vty, &copy);
@@ -185,7 +189,6 @@ static void cpu_record_hash_print(struct hash_bucket *bucket, void *args[])
static void cpu_record_print(struct vty *vty, uint8_t filter)
{
struct cpu_event_history tmp;
- void *args[3] = {&tmp, vty, &filter};
struct event_loop *m;
struct listnode *ln;
@@ -222,13 +225,13 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
vty_out(vty,
" CPU_Warn Wall_Warn Starv_Warn Type Thread\n");
- if (m->cpu_record->count)
- hash_iterate(
- m->cpu_record,
- (void (*)(struct hash_bucket *,
- void *))cpu_record_hash_print,
- args);
- else
+ if (cpu_records_count(m->cpu_records)) {
+ struct cpu_event_history *rec;
+
+ frr_each (cpu_records, m->cpu_records, rec)
+ cpu_record_print_one(vty, filter, &tmp,
+ rec);
+ } else
vty_out(vty, "No data to display yet.\n");
vty_out(vty, "\n");
@@ -248,35 +251,29 @@ static void cpu_record_print(struct vty *vty, uint8_t filter)
vty_out_cpu_event_history(vty, &tmp);
}
-static void cpu_record_hash_clear(struct hash_bucket *bucket, void *args[])
-{
- uint8_t *filter = args[0];
- struct hash *cpu_record = args[1];
-
- struct cpu_event_history *a = bucket->data;
-
- if (!(a->types & *filter))
- return;
-
- hash_release(cpu_record, bucket->data);
-}
-
static void cpu_record_clear(uint8_t filter)
{
- uint8_t *tmp = &filter;
struct event_loop *m;
struct listnode *ln;
frr_with_mutex (&masters_mtx) {
for (ALL_LIST_ELEMENTS_RO(masters, ln, m)) {
frr_with_mutex (&m->mtx) {
- void *args[2] = {tmp, m->cpu_record};
+ struct cpu_event_history *item;
+ struct cpu_records_head old[1];
+
+ cpu_records_init(old);
+ cpu_records_swap_all(old, m->cpu_records);
- hash_iterate(
- m->cpu_record,
- (void (*)(struct hash_bucket *,
- void *))cpu_record_hash_clear,
- args);
+ while ((item = cpu_records_pop(old))) {
+ if (item->types & filter)
+ cpu_records_free(&item);
+ else
+ cpu_records_add(m->cpu_records,
+ item);
+ }
+
+ cpu_records_fini(old);
}
}
}
@@ -565,10 +562,7 @@ struct event_loop *event_master_create(const char *name)
snprintf(tmhashname, sizeof(tmhashname), "%s - threadmaster event hash",
name);
- rv->cpu_record = hash_create_size(
- 8, (unsigned int (*)(const void *))cpu_record_hash_key,
- (bool (*)(const void *, const void *))cpu_record_hash_cmp,
- tmhashname);
+ cpu_records_init(rv->cpu_records);
event_list_init(&rv->event);
event_list_init(&rv->ready);
@@ -686,6 +680,7 @@ void event_master_free_unused(struct event_loop *m)
/* Stop thread scheduler. */
void event_master_free(struct event_loop *m)
{
+ struct cpu_event_history *record;
struct event *t;
frr_with_mutex (&masters_mtx) {
@@ -708,7 +703,9 @@ void event_master_free(struct event_loop *m)
list_delete(&m->cancel_req);
m->cancel_req = NULL;
- hash_clean_and_free(&m->cpu_record, cpu_record_hash_free);
+ while ((record = cpu_records_pop(m->cpu_records)))
+ cpu_records_free(&record);
+ cpu_records_fini(m->cpu_records);
XFREE(MTYPE_EVENT_MASTER, m->name);
XFREE(MTYPE_EVENT_MASTER, m->handler.pfds);
@@ -781,7 +778,6 @@ static struct event *thread_get(struct event_loop *m, uint8_t type,
const struct xref_eventsched *xref)
{
struct event *thread = event_list_pop(&m->unuse);
- struct cpu_event_history tmp;
if (!thread) {
thread = XCALLOC(MTYPE_THREAD, sizeof(struct event));
@@ -809,13 +805,9 @@ static struct event *thread_get(struct event_loop *m, uint8_t type,
* hash_get lookups.
*/
if ((thread->xref && thread->xref->funcname != xref->funcname)
- || thread->func != func) {
- tmp.func = func;
- tmp.funcname = xref->funcname;
- thread->hist =
- hash_get(m->cpu_record, &tmp,
- (void *(*)(void *))cpu_record_hash_alloc);
- }
+ || thread->func != func)
+ thread->hist = cpu_records_get(m, func, xref->funcname);
+
thread->hist->total_active++;
thread->func = func;
thread->xref = xref;
diff --git a/lib/frrevent.h b/lib/frrevent.h
index fe33ffd1f..ab779d908 100644
--- a/lib/frrevent.h
+++ b/lib/frrevent.h
@@ -65,6 +65,8 @@ struct xref_eventsched {
uint32_t event_type;
};
+PREDECL_HASH(cpu_records);
+
/* Master of the theads. */
struct event_loop {
char *name;
@@ -76,7 +78,7 @@ struct event_loop {
struct list *cancel_req;
bool canceled;
pthread_cond_t cancel_cond;
- struct hash *cpu_record;
+ struct cpu_records_head cpu_records[1];
int io_pipe[2];
int fd_limit;
struct fd_handler handler;
@@ -130,6 +132,8 @@ struct event {
#endif
struct cpu_event_history {
+ struct cpu_records_item item;
+
void (*func)(struct event *e);
atomic_size_t total_cpu_warn;
atomic_size_t total_wall_warn;