summaryrefslogtreecommitdiffstats
path: root/server/mpm_unix.c
diff options
context:
space:
mode:
authorYann Ylavic <ylavic@apache.org>2018-01-18 19:35:20 +0100
committerYann Ylavic <ylavic@apache.org>2018-01-18 19:35:20 +0100
commitd35f2f7675d84991168bd1276cef44a88defc63e (patch)
treef348736002dde94f3e6d63d3a0ac5d744f3724f3 /server/mpm_unix.c
parentFollow up to r1821526: opacify fdqueue types. (diff)
downloadapache2-d35f2f7675d84991168bd1276cef44a88defc63e.tar.xz
apache2-d35f2f7675d84991168bd1276cef44a88defc63e.zip
Follow up to r1821526: style, no functional change.
[Reverted by r1821619] git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1821539 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'server/mpm_unix.c')
-rw-r--r--server/mpm_unix.c56
1 files changed, 26 insertions, 30 deletions
diff --git a/server/mpm_unix.c b/server/mpm_unix.c
index bc35f76359..0f87fc5a8a 100644
--- a/server/mpm_unix.c
+++ b/server/mpm_unix.c
@@ -1155,9 +1155,8 @@ static apr_status_t queue_info_cleanup(void *data_)
if (first_pool == NULL) {
break;
}
- if (apr_atomic_casptr
- ((void*) &(qi->recycled_pools), first_pool->next,
- first_pool) == first_pool) {
+ if (apr_atomic_casptr((void *)&qi->recycled_pools, first_pool->next,
+ first_pool) == first_pool) {
apr_pool_destroy(first_pool->pool);
}
}
@@ -1165,8 +1164,8 @@ static apr_status_t queue_info_cleanup(void *data_)
return APR_SUCCESS;
}
-apr_status_t ap_queue_info_create(fd_queue_info_t ** queue_info,
- apr_pool_t * pool, int max_idlers,
+apr_status_t ap_queue_info_create(fd_queue_info_t **queue_info,
+ apr_pool_t *pool, int max_idlers,
int max_recycled_pools)
{
apr_status_t rv;
@@ -1195,8 +1194,8 @@ apr_status_t ap_queue_info_create(fd_queue_info_t ** queue_info,
return APR_SUCCESS;
}
-apr_status_t ap_queue_info_set_idle(fd_queue_info_t * queue_info,
- apr_pool_t * pool_to_recycle)
+apr_status_t ap_queue_info_set_idle(fd_queue_info_t *queue_info,
+ apr_pool_t *pool_to_recycle)
{
apr_status_t rv;
@@ -1223,7 +1222,7 @@ apr_status_t ap_queue_info_set_idle(fd_queue_info_t * queue_info,
return APR_SUCCESS;
}
-apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info)
+apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t *queue_info)
{
/* Don't block if there isn't any idle worker. */
for (;;) {
@@ -1238,7 +1237,7 @@ apr_status_t ap_queue_info_try_get_idler(fd_queue_info_t * queue_info)
}
}
-apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t * queue_info,
+apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t *queue_info,
int *had_to_block)
{
apr_status_t rv;
@@ -1302,7 +1301,7 @@ apr_status_t ap_queue_info_wait_for_idler(fd_queue_info_t * queue_info,
}
}
-apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t * queue_info)
+apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t *queue_info)
{
apr_uint32_t val;
val = apr_atomic_read32(&queue_info->idlers);
@@ -1311,8 +1310,7 @@ apr_uint32_t ap_queue_info_get_idlers(fd_queue_info_t * queue_info)
return val - zero_pt;
}
-void ap_push_pool(fd_queue_info_t * queue_info,
- apr_pool_t * pool_to_recycle)
+void ap_push_pool(fd_queue_info_t *queue_info, apr_pool_t *pool_to_recycle)
{
struct recycled_pool *new_recycle;
/* If we have been given a pool to recycle, atomically link
@@ -1331,8 +1329,7 @@ void ap_push_pool(fd_queue_info_t * queue_info,
}
apr_pool_clear(pool_to_recycle);
- new_recycle = (struct recycled_pool *) apr_palloc(pool_to_recycle,
- sizeof (*new_recycle));
+ new_recycle = apr_palloc(pool_to_recycle, sizeof *new_recycle);
new_recycle->pool = pool_to_recycle;
for (;;) {
/*
@@ -1348,7 +1345,7 @@ void ap_push_pool(fd_queue_info_t * queue_info,
}
}
-void ap_pop_pool(apr_pool_t ** recycled_pool, fd_queue_info_t * queue_info)
+void ap_pop_pool(apr_pool_t **recycled_pool, fd_queue_info_t *queue_info)
{
/* Atomically pop a pool from the recycled list */
@@ -1368,9 +1365,8 @@ void ap_pop_pool(apr_pool_t ** recycled_pool, fd_queue_info_t * queue_info)
if (first_pool == NULL) {
break;
}
- if (apr_atomic_casptr
- ((void*) &(queue_info->recycled_pools),
- first_pool->next, first_pool) == first_pool) {
+ if (apr_atomic_casptr((void *)&queue_info->recycled_pools,
+ first_pool->next, first_pool) == first_pool) {
*recycled_pool = first_pool->pool;
if (queue_info->max_recycled_pools >= 0)
apr_atomic_dec32(&queue_info->recycled_pools_count);
@@ -1392,7 +1388,7 @@ void ap_free_idle_pools(fd_queue_info_t *queue_info)
}
-apr_status_t ap_queue_info_term(fd_queue_info_t * queue_info)
+apr_status_t ap_queue_info_term(fd_queue_info_t *queue_info)
{
apr_status_t rv;
rv = apr_thread_mutex_lock(queue_info->idlers_mutex);
@@ -1436,8 +1432,8 @@ static apr_status_t ap_queue_destroy(void *data)
/**
* Initialize the fd_queue_t.
*/
-apr_status_t ap_queue_init(fd_queue_t * queue, int queue_capacity,
- apr_pool_t * a)
+apr_status_t ap_queue_init(fd_queue_t *queue, int queue_capacity,
+ apr_pool_t *a)
{
int i;
apr_status_t rv;
@@ -1475,8 +1471,8 @@ apr_status_t ap_queue_init(fd_queue_t * queue, int queue_capacity,
* precondition: ap_queue_info_wait_for_idler has already been called
* to reserve an idle worker thread
*/
-apr_status_t ap_queue_push(fd_queue_t * queue, apr_socket_t * sd,
- void * baton, apr_pool_t * p)
+apr_status_t ap_queue_push(fd_queue_t *queue, apr_socket_t *sd,
+ void *baton, apr_pool_t *p)
{
fd_queue_elem_t *elem;
apr_status_t rv;
@@ -1506,7 +1502,7 @@ apr_status_t ap_queue_push(fd_queue_t * queue, apr_socket_t * sd,
return APR_SUCCESS;
}
-apr_status_t ap_queue_push_timer(fd_queue_t * queue, timer_event_t *te)
+apr_status_t ap_queue_push_timer(fd_queue_t *queue, timer_event_t *te)
{
apr_status_t rv;
@@ -1533,9 +1529,9 @@ apr_status_t ap_queue_push_timer(fd_queue_t * queue, timer_event_t *te)
* Once retrieved, the socket is placed into the address specified by
* 'sd'.
*/
-apr_status_t ap_queue_pop_something(fd_queue_t * queue, apr_socket_t ** sd,
- void ** baton, apr_pool_t ** p,
- timer_event_t ** te_out)
+apr_status_t ap_queue_pop_something(fd_queue_t *queue, apr_socket_t **sd,
+ void **baton, apr_pool_t **p,
+ timer_event_t **te_out)
{
fd_queue_elem_t *elem;
apr_status_t rv;
@@ -1610,17 +1606,17 @@ static apr_status_t queue_interrupt(fd_queue_t *queue, int all, int term)
return apr_thread_mutex_unlock(queue->one_big_mutex);
}
-apr_status_t ap_queue_interrupt_all(fd_queue_t * queue)
+apr_status_t ap_queue_interrupt_all(fd_queue_t *queue)
{
return queue_interrupt(queue, 1, 0);
}
-apr_status_t ap_queue_interrupt_one(fd_queue_t * queue)
+apr_status_t ap_queue_interrupt_one(fd_queue_t *queue)
{
return queue_interrupt(queue, 0, 0);
}
-apr_status_t ap_queue_term(fd_queue_t * queue)
+apr_status_t ap_queue_term(fd_queue_t *queue)
{
return queue_interrupt(queue, 1, 1);
}