1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
|
/*
* Copyright (c) 2017-19 David Lamparter, for NetDEF, Inc.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _FRRCU_H
#define _FRRCU_H
#include "memory.h"
#include "atomlist.h"
/* quick RCU primer:
* There's a global sequence counter. Whenever a thread does a
* rcu_read_lock(), it is marked as holding the current sequence counter.
* When something is cleaned with RCU, the global sequence counter is
* increased and the item is queued for cleanup - *after* all threads are
* at a more recent sequence counter (or no sequence counter / unheld).
*
* So, by delaying resource cleanup, RCU ensures that things don't go away
* while another thread may hold a (stale) reference.
*
* Note that even if a thread is in rcu_read_lock(), it is invalid for that
* thread to access bits after rcu_free() & co on them. This is a design
* choice to allow no-op'ing out the entire RCU mechanism if we're running
* singlethreaded. (Also allows some optimization on the counter bumping.)
*
* differences from Linux Kernel RCU:
* - there's no rcu_synchronize(), if you really need to defer something
* use rcu_call() (and double check it's really necessary)
* - rcu_dereference() and rcu_assign_pointer() don't exist, use atomic_*
* instead (ATOM* list structures do the right thing)
*/
/* opaque */
struct rcu_thread;
/* called before new thread creation, sets up rcu thread info for new thread
* before it actually exits. This ensures possible RCU references are held
* for thread startup.
*
* return value must be passed into the new thread's call to rcu_thread_start()
*/
extern struct rcu_thread *rcu_thread_prepare(void);
/* cleanup in case pthread_create() fails */
extern void rcu_thread_unprepare(struct rcu_thread *rcu_thread);
/* called early in the new thread, with the return value from the above.
* NB: new thread is initially in RCU-held state! (at depth 1)
*
* TBD: maybe inherit RCU state from rcu_thread_prepare()?
*/
extern void rcu_thread_start(struct rcu_thread *rcu_thread);
/* thread exit is handled through pthread_key_create's destructor function */
/* global RCU shutdown - must be called with only 1 active thread left. waits
* until remaining RCU actions are done & RCU thread has exited.
*
* This is mostly here to get a clean exit without memleaks.
*/
extern void rcu_shutdown(void);
/* enter / exit RCU-held state. counter-based, so can be called nested. */
extern void rcu_read_lock(void);
extern void rcu_read_unlock(void);
/* for debugging / safety checks */
extern void rcu_assert_read_locked(void);
extern void rcu_assert_read_unlocked(void);
enum rcu_action_type {
RCUA_INVALID = 0,
/* used internally by the RCU code, shouldn't ever show up outside */
RCUA_NEXT,
RCUA_END,
/* normal RCU actions, for outside use */
RCUA_FREE,
RCUA_CLOSE,
RCUA_CALL,
};
/* since rcu_head is intended to be embedded into structs which may exist
* with lots of copies, rcu_head is shrunk down to its absolute minimum -
* the atomlist pointer + a pointer to this action struct.
*/
struct rcu_action {
enum rcu_action_type type;
union {
struct {
struct memtype *mt;
ptrdiff_t offset;
} free;
struct {
void (*fptr)(void *arg);
ptrdiff_t offset;
} call;
} u;
};
/* RCU cleanup function queue item */
PREDECL_ATOMLIST(rcu_heads)
struct rcu_head {
struct rcu_heads_item head;
const struct rcu_action *action;
};
/* special RCU head for delayed fd-close */
struct rcu_head_close {
struct rcu_head rcu_head;
int fd;
};
/* enqueue RCU action - use the macros below to get the rcu_action set up */
extern void rcu_enqueue(struct rcu_head *head, const struct rcu_action *action);
/* RCU free() and file close() operations.
*
* freed memory / closed fds become _immediately_ unavailable to the calling
* thread, but will remain available for other threads until they have passed
* into RCU-released state.
*/
/* may be called with NULL mt to do non-MTYPE free() */
#define rcu_free(mtype, ptr, field) \
do { \
typeof(ptr) _ptr = (ptr); \
if (!_ptr) \
break; \
struct rcu_head *_rcu_head = &_ptr->field; \
static const struct rcu_action _rcu_action = { \
.type = RCUA_FREE, \
.u.free = { \
.mt = mtype, \
.offset = offsetof(typeof(*_ptr), field), \
}, \
}; \
rcu_enqueue(_rcu_head, &_rcu_action); \
} while (0)
/* use this sparingly, it runs on (and blocks) the RCU thread */
#define rcu_call(func, ptr, field) \
do { \
typeof(ptr) _ptr = (ptr); \
void (*fptype)(typeof(ptr)); \
struct rcu_head *_rcu_head = &_ptr->field; \
static const struct rcu_action _rcu_action = { \
.type = RCUA_CALL, \
.u.call = { \
.fptr = (void *)func, \
.offset = offsetof(typeof(*_ptr), field), \
}, \
}; \
(void)(_fptype = func); \
rcu_enqueue(_rcu_head, &_rcu_action); \
} while (0)
extern void rcu_close(struct rcu_head_close *head, int fd);
#endif /* _FRRCU_H */
|