diff options
author | Stefan Eissing <icing@apache.org> | 2016-11-01 20:40:37 +0100 |
---|---|---|
committer | Stefan Eissing <icing@apache.org> | 2016-11-01 20:40:37 +0100 |
commit | 29d168102556c119f62a3f28f5a3695fe93e9a96 (patch) | |
tree | eb8b608b33c0e634f3229a426d4e142a37a2c28d | |
parent | Rebuild. (diff) | |
download | apache2-29d168102556c119f62a3f28f5a3695fe93e9a96.tar.xz apache2-29d168102556c119f62a3f28f5a3695fe93e9a96.zip |
mod_http2: proper parsing and forwarding of multiple or unnaounce 1xx responses
mod_proxy_http2: improved robustness when main connection gets aborted
git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1767553 13f79535-47bb-0310-9956-ffa450edef68
-rw-r--r-- | CHANGES | 8 | ||||
-rw-r--r-- | docs/log-message-tags/next-number | 2 | ||||
-rw-r--r-- | modules/http2/h2.h | 4 | ||||
-rw-r--r-- | modules/http2/h2_from_h1.c | 10 | ||||
-rw-r--r-- | modules/http2/h2_mplx.c | 23 | ||||
-rw-r--r-- | modules/http2/h2_mplx.h | 4 | ||||
-rw-r--r-- | modules/http2/h2_ngn_shed.c | 6 | ||||
-rw-r--r-- | modules/http2/h2_proxy_session.c | 57 | ||||
-rw-r--r-- | modules/http2/h2_proxy_session.h | 4 | ||||
-rw-r--r-- | modules/http2/h2_request.c | 26 | ||||
-rw-r--r-- | modules/http2/h2_task.c | 18 | ||||
-rw-r--r-- | modules/http2/h2_task.h | 1 | ||||
-rw-r--r-- | modules/http2/mod_http2.c | 5 | ||||
-rw-r--r-- | modules/http2/mod_http2.h | 3 | ||||
-rw-r--r-- | modules/http2/mod_proxy_http2.c | 36 |
15 files changed, 118 insertions, 89 deletions
@@ -1,6 +1,14 @@ -*- coding: utf-8 -*- Changes with Apache 2.5.0 + *) mod_http2: unannounced and multiple interim responses (status code < 200) + are parsed and forwarded to client until a final response arrives. + [Stefan Eissing] + + *) mod_proxy_http2: improved robustness when main connection is closed early + by resetting all ongoing streams against the backend. + [Stefan Eissing] + *) mod_http2: allocators from slave connections are released earlier, resulting in less overall memory use on busy, long lived connections. [Stefan Eissing] diff --git a/docs/log-message-tags/next-number b/docs/log-message-tags/next-number index aa9490b165..ccd40ea172 100644 --- a/docs/log-message-tags/next-number +++ b/docs/log-message-tags/next-number @@ -1 +1 @@ -3472 +3473 diff --git a/modules/http2/h2.h b/modules/http2/h2.h index 62fec33626..59719ad8c7 100644 --- a/modules/http2/h2.h +++ b/modules/http2/h2.h @@ -122,15 +122,11 @@ struct h2_request { const char *scheme; const char *authority; const char *path; - apr_table_t *headers; apr_time_t request_time; - unsigned int chunked : 1; /* iff requst body needs to be forwarded as chunked */ unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */ - unsigned int expect_100 : 1; /* iff we need a 100-continue response */ - unsigned int expect_failed : 1; /* iff we are unable to fullfill expects */ }; typedef struct h2_headers h2_headers; diff --git a/modules/http2/h2_from_h1.c b/modules/http2/h2_from_h1.c index 2b4f79ac14..cdb444650b 100644 --- a/modules/http2/h2_from_h1.c +++ b/modules/http2/h2_from_h1.c @@ -424,8 +424,13 @@ static apr_status_t pass_response(h2_task *task, ap_filter_t *f, status = ap_pass_brigade(f->next, parser->tmp); apr_brigade_cleanup(parser->tmp); - parser->state = H2_RP_DONE; - task->output.parse_response = 0; + /* reset parser for possible next response */ + parser->state = H2_RP_STATUS_LINE; + apr_array_clear(parser->hlines); + + if (response->status >= 200) { + task->output.sent_response = 1; + } ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03197) "h2_task(%s): passed response %d", task->id, response->status); @@ -486,6 +491,7 @@ apr_status_t h2_from_h1_parse_response(h2_task *task, ap_filter_t *f, } else if (line[0] == '\0') { /* end of headers, pass response onward */ + return pass_response(task, f, parser); } else { diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c index 8890aa388e..d5635dd112 100644 --- a/modules/http2/h2_mplx.c +++ b/modules/http2/h2_mplx.c @@ -308,8 +308,7 @@ h2_mplx *h2_mplx_create(conn_rec *c, apr_pool_t *parent, m->stream_timeout = stream_timeout; m->workers = workers; m->workers_max = workers->max_workers; - m->workers_def_limit = 4; - m->workers_limit = m->workers_def_limit; + m->workers_limit = 6; /* the original h1 max parallel connections */ m->last_limit_change = m->last_idle_block = apr_time_now(); m->limit_change_interval = apr_time_from_msec(200); @@ -568,9 +567,8 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) h2_iq_clear(m->q); purge_streams(m); - /* 3. mark all slave connections as aborted and wakeup all sleeping - * tasks. Mark all still active streams as 'done'. m->streams has to - * be empty afterwards with streams either in + /* 3. wakeup all sleeping tasks. Mark all still active streams as 'done'. + * m->streams has to be empty afterwards with streams either in * a) m->shold because a task is still active * b) m->spurge because task is done, or was not started */ h2_ihash_iter(m->tasks, task_abort_connection, m); @@ -612,8 +610,9 @@ apr_status_t h2_mplx_release_and_join(h2_mplx *m, apr_thread_cond_t *wait) if (!h2_ihash_empty(m->tasks)) { /* when we are here, we lost track of the tasks still present. * this currently happens with mod_proxy_http2 when we shut - * down a h2_req_engine with tasks assigned... */ - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03056) + * down a h2_req_engine with tasks assigned. Since no parallel + * processing is going on any more, we just clean them up. */ + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03056) "h2_mplx(%ld): 3. release_join with %d tasks", m->id, (int)h2_ihash_count(m->tasks)); h2_ihash_iter(m->tasks, task_print, m); @@ -977,7 +976,8 @@ static void task_done(h2_mplx *m, h2_task *task, h2_req_engine *ngn) } if (task->engine) { - if (!h2_req_engine_is_shutdown(task->engine)) { + if (!m->aborted && !task->c->aborted + && !h2_req_engine_is_shutdown(task->engine)) { ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, "h2_mplx(%ld): task(%s) has not-shutdown " "engine(%s)", m->id, task->id, @@ -1314,7 +1314,8 @@ apr_status_t h2_mplx_req_engine_pull(h2_req_engine *ngn, return status; } -void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn) +void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn, + apr_status_t status) { h2_task *task = h2_ctx_cget_task(r_conn); @@ -1325,6 +1326,10 @@ void h2_mplx_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn) if (enter_mutex(m, &acquired) == APR_SUCCESS) { ngn_out_update_windows(m, ngn); h2_ngn_shed_done_task(m->ngn_shed, ngn, task); + if (status != APR_SUCCESS && h2_task_can_redo(task) + && !h2_ihash_get(m->redo_tasks, task->stream_id)) { + h2_ihash_add(m->redo_tasks, task); + } if (task->engine) { /* cannot report that as done until engine returns */ } diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h index cb48fd2eb0..25e07005e7 100644 --- a/modules/http2/h2_mplx.h +++ b/modules/http2/h2_mplx.h @@ -84,7 +84,6 @@ struct h2_mplx { int max_stream_started; /* highest stream id that started processing */ int workers_busy; /* # of workers processing on this mplx */ int workers_limit; /* current # of workers limit, dynamic */ - int workers_def_limit; /* default # of workers limit */ int workers_max; /* max, hard limit # of workers in a process */ apr_time_t last_idle_block; /* last time, this mplx entered IDLE while * streams were ready */ @@ -351,6 +350,7 @@ apr_status_t h2_mplx_req_engine_pull(struct h2_req_engine *ngn, apr_read_type_e block, int capacity, request_rec **pr); -void h2_mplx_req_engine_done(struct h2_req_engine *ngn, conn_rec *r_conn); +void h2_mplx_req_engine_done(struct h2_req_engine *ngn, conn_rec *r_conn, + apr_status_t status); #endif /* defined(__mod_h2__h2_mplx__) */ diff --git a/modules/http2/h2_ngn_shed.c b/modules/http2/h2_ngn_shed.c index 45329102f7..e0c40cfb23 100644 --- a/modules/http2/h2_ngn_shed.c +++ b/modules/http2/h2_ngn_shed.c @@ -335,7 +335,7 @@ void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn) if (!shed->aborted && !H2_REQ_ENTRIES_EMPTY(&ngn->entries)) { h2_ngn_entry *entry; - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, shed->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, "h2_ngn_shed(%ld): exit engine %s (%s), " "has still requests queued, shutdown=%d," "assigned=%ld, live=%ld, finished=%ld", @@ -347,7 +347,7 @@ void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn) entry != H2_REQ_ENTRIES_SENTINEL(&ngn->entries); entry = H2_NGN_ENTRY_NEXT(entry)) { h2_task *task = entry->task; - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, shed->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, "h2_ngn_shed(%ld): engine %s has queued task %s, " "frozen=%d, aborting", shed->c->id, ngn->id, task->id, task->frozen); @@ -356,7 +356,7 @@ void h2_ngn_shed_done_ngn(h2_ngn_shed *shed, struct h2_req_engine *ngn) } } if (!shed->aborted && (ngn->no_assigned > 1 || ngn->no_live > 1)) { - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, shed->c, + ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, shed->c, "h2_ngn_shed(%ld): exit engine %s (%s), " "assigned=%ld, live=%ld, finished=%ld", shed->c->id, ngn->id, ngn->type, diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c index a79c5da479..59ae9d48e2 100644 --- a/modules/http2/h2_proxy_session.c +++ b/modules/http2/h2_proxy_session.c @@ -1137,13 +1137,13 @@ static void ev_stream_done(h2_proxy_session *session, int stream_id, if (stream) { int touched = (stream->data_sent || stream_id <= session->last_stream_id); - int complete = (stream->error_code == 0); + apr_status_t status = (stream->error_code == 0)? APR_SUCCESS : APR_EINVAL; ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03364) "h2_proxy_sesssion(%s): stream(%d) closed " "(touched=%d, error=%d)", session->id, stream_id, touched, stream->error_code); - if (!complete) { + if (status != APR_SUCCESS) { stream->r->status = 500; } else if (!stream->data_received) { @@ -1164,7 +1164,7 @@ static void ev_stream_done(h2_proxy_session *session, int stream_id, h2_proxy_ihash_remove(session->streams, stream_id); h2_proxy_iq_remove(session->suspended, stream_id); if (session->done) { - session->done(session, stream->r, complete, touched); + session->done(session, stream->r, status, touched); } } @@ -1276,6 +1276,21 @@ static void dispatch_event(h2_proxy_session *session, h2_proxys_event_t ev, } } +static int send_loop(h2_proxy_session *session) +{ + while (nghttp2_session_want_write(session->ngh2)) { + int rv = nghttp2_session_send(session->ngh2); + if (rv < 0 && nghttp2_is_fatal(rv)) { + ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, + "h2_proxy_session(%s): write, rv=%d", session->id, rv); + dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, rv, NULL); + break; + } + return 1; + } + return 0; +} + apr_status_t h2_proxy_session_process(h2_proxy_session *session) { apr_status_t status; @@ -1300,16 +1315,7 @@ run_loop: case H2_PROXYS_ST_BUSY: case H2_PROXYS_ST_LOCAL_SHUTDOWN: case H2_PROXYS_ST_REMOTE_SHUTDOWN: - while (nghttp2_session_want_write(session->ngh2)) { - int rv = nghttp2_session_send(session->ngh2); - if (rv < 0 && nghttp2_is_fatal(rv)) { - ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c, - "h2_proxy_session(%s): write, rv=%d", session->id, rv); - dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, rv, NULL); - break; - } - have_written = 1; - } + have_written = send_loop(session); if (nghttp2_session_want_read(session->ngh2)) { status = h2_proxy_session_read(session, 0, 0); @@ -1386,13 +1392,36 @@ typedef struct { h2_proxy_request_done *done; } cleanup_iter_ctx; +static int cancel_iter(void *udata, void *val) +{ + cleanup_iter_ctx *ctx = udata; + h2_proxy_stream *stream = val; + nghttp2_submit_rst_stream(ctx->session->ngh2, NGHTTP2_FLAG_NONE, + stream->id, 0); + return 1; +} + +void h2_proxy_session_cancel_all(h2_proxy_session *session) +{ + if (!h2_proxy_ihash_empty(session->streams)) { + cleanup_iter_ctx ctx; + ctx.session = session; + ctx.done = session->done; + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03366) + "h2_proxy_session(%s): cancel %d streams", + session->id, (int)h2_proxy_ihash_count(session->streams)); + h2_proxy_ihash_iter(session->streams, cancel_iter, &ctx); + session_shutdown(session, 0, NULL); + } +} + static int done_iter(void *udata, void *val) { cleanup_iter_ctx *ctx = udata; h2_proxy_stream *stream = val; int touched = (stream->data_sent || stream->id <= ctx->session->last_stream_id); - ctx->done(ctx->session, stream->r, 0, touched); + ctx->done(ctx->session, stream->r, APR_ECONNABORTED, touched); return 1; } diff --git a/modules/http2/h2_proxy_session.h b/modules/http2/h2_proxy_session.h index 4f8205027f..709fe4b0b7 100644 --- a/modules/http2/h2_proxy_session.h +++ b/modules/http2/h2_proxy_session.h @@ -52,7 +52,7 @@ typedef enum { typedef struct h2_proxy_session h2_proxy_session; typedef void h2_proxy_request_done(h2_proxy_session *s, request_rec *r, - int complete, int touched); + apr_status_t status, int touched); struct h2_proxy_session { const char *id; @@ -103,6 +103,8 @@ apr_status_t h2_proxy_session_submit(h2_proxy_session *s, const char *url, */ apr_status_t h2_proxy_session_process(h2_proxy_session *s); +void h2_proxy_session_cancel_all(h2_proxy_session *s); + void h2_proxy_session_cleanup(h2_proxy_session *s, h2_proxy_request_done *done); void h2_proxy_session_update_window(h2_proxy_session *s, diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c index 0a6f6b58fc..6ba8108449 100644 --- a/modules/http2/h2_request.c +++ b/modules/http2/h2_request.c @@ -187,16 +187,6 @@ apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos) } } - s = apr_table_get(req->headers, "Expect"); - if (s && s[0]) { - if (ap_cstr_casecmp(s, "100-continue") == 0) { - req->expect_100 = 1; - } - else { - req->expect_failed = 1; - } - } - return APR_SUCCESS; } @@ -215,6 +205,7 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c) { int access_status = HTTP_OK; const char *rpath; + const char *s; request_rec *r = ap_create_request(c); @@ -250,12 +241,15 @@ request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c) /* we may have switched to another server */ r->per_dir_config = r->server->lookup_defaults; - if (req->expect_100) { - r->expecting_100 = 1; - } - else if (req->expect_failed) { - r->status = HTTP_EXPECTATION_FAILED; - ap_send_error_response(r, 0); + s = apr_table_get(r->headers_in, "Expect"); + if (s && s[0]) { + if (ap_cstr_casecmp(s, "100-continue") == 0) { + r->expecting_100 = 1; + } + else { + r->status = HTTP_EXPECTATION_FAILED; + ap_send_error_response(r, 0); + } } /* diff --git a/modules/http2/h2_task.c b/modules/http2/h2_task.c index a1f99bd51b..1419aab551 100644 --- a/modules/http2/h2_task.c +++ b/modules/http2/h2_task.c @@ -331,7 +331,7 @@ static apr_status_t h2_filter_slave_in(ap_filter_t* f, /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not * to support it. Seems to work. */ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c, - APLOGNO(02942) + APLOGNO(03472) "h2_slave_in(%s), unsupported READ mode %d", task->id, mode); status = APR_ENOTIMPL; @@ -368,7 +368,7 @@ static apr_status_t h2_filter_parse_h1(ap_filter_t* f, apr_bucket_brigade* bb) /* There are cases where we need to parse a serialized http/1.1 * response. One example is a 100-continue answer in serialized mode * or via a mod_proxy setup */ - while (task->output.parse_response) { + while (!task->output.sent_response) { status = h2_from_h1_parse_response(task, f, bb); ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c, "h2_task(%s): parsed response", task->id); @@ -563,23 +563,13 @@ apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id) task->input.bb = apr_brigade_create(task->pool, task->c->bucket_alloc); if (task->request->serialize) { ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, - "h2_task(%s): serialize request %s %s, expect-100=%d", - task->id, task->request->method, task->request->path, - task->request->expect_100); + "h2_task(%s): serialize request %s %s", + task->id, task->request->method, task->request->path); apr_brigade_printf(task->input.bb, NULL, NULL, "%s %s HTTP/1.1\r\n", task->request->method, task->request->path); apr_table_do(input_ser_header, task, task->request->headers, NULL); apr_brigade_puts(task->input.bb, NULL, NULL, "\r\n"); - if (task->request->expect_100) { - /* we are unable to suppress the serialization of the - * intermediate response and need to parse it */ - task->output.parse_response = 1; - } - } - - if (task->request->expect_100) { - task->output.parse_response = 1; } ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c, diff --git a/modules/http2/h2_task.h b/modules/http2/h2_task.h index ad8f056596..a8f0f2c315 100644 --- a/modules/http2/h2_task.h +++ b/modules/http2/h2_task.h @@ -70,7 +70,6 @@ struct h2_task { unsigned int opened : 1; unsigned int sent_response : 1; unsigned int copy_files : 1; - unsigned int parse_response : 1; struct h2_response_parser *rparser; apr_bucket_brigade *bb; } output; diff --git a/modules/http2/mod_http2.c b/modules/http2/mod_http2.c index 854e677a34..7452cd7c2b 100644 --- a/modules/http2/mod_http2.c +++ b/modules/http2/mod_http2.c @@ -166,9 +166,10 @@ static apr_status_t http2_req_engine_pull(h2_req_engine *ngn, return h2_mplx_req_engine_pull(ngn, block, capacity, pr); } -static void http2_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn) +static void http2_req_engine_done(h2_req_engine *ngn, conn_rec *r_conn, + apr_status_t status) { - h2_mplx_req_engine_done(ngn, r_conn); + h2_mplx_req_engine_done(ngn, r_conn, status); } /* Runs once per created child process. Perform any process diff --git a/modules/http2/mod_http2.h b/modules/http2/mod_http2.h index 15cf9d0677..f0cc9567ca 100644 --- a/modules/http2/mod_http2.h +++ b/modules/http2/mod_http2.h @@ -90,5 +90,6 @@ APR_DECLARE_OPTIONAL_FN(apr_status_t, request_rec **pr)); APR_DECLARE_OPTIONAL_FN(void, http2_req_engine_done, (h2_req_engine *engine, - conn_rec *rconn)); + conn_rec *rconn, + apr_status_t status)); #endif diff --git a/modules/http2/mod_proxy_http2.c b/modules/http2/mod_proxy_http2.c index b44672945a..503dd7dddf 100644 --- a/modules/http2/mod_proxy_http2.c +++ b/modules/http2/mod_proxy_http2.c @@ -46,7 +46,8 @@ static apr_status_t (*req_engine_pull)(h2_req_engine *engine, apr_read_type_e block, int capacity, request_rec **pr); -static void (*req_engine_done)(h2_req_engine *engine, conn_rec *r_conn); +static void (*req_engine_done)(h2_req_engine *engine, conn_rec *r_conn, + apr_status_t status); typedef struct h2_proxy_ctx { conn_rec *owner; @@ -270,12 +271,12 @@ static apr_status_t add_request(h2_proxy_session *session, request_rec *r) } static void request_done(h2_proxy_session *session, request_rec *r, - int complete, int touched) + apr_status_t status, int touched) { h2_proxy_ctx *ctx = session->user_data; const char *task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE); - if (!complete) { + if (status != APR_SUCCESS) { if (!touched) { /* untouched request, need rescheduling */ if (req_engine_push && is_h2 && is_h2(ctx->owner)) { @@ -292,7 +293,7 @@ static void request_done(h2_proxy_session *session, request_rec *r, else { const char *uri; uri = apr_uri_unparse(r->pool, &r->parsed_uri, 0); - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, r->connection, + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection, APLOGNO(03471) "h2_proxy_session(%s): request %s -> %s " "not complete, was touched", ctx->engine_id, task_id, uri); @@ -300,23 +301,15 @@ static void request_done(h2_proxy_session *session, request_rec *r, } if (r == ctx->rbase) { - ctx->r_status = complete? APR_SUCCESS : HTTP_GATEWAY_TIME_OUT; + ctx->r_status = (status == APR_SUCCESS)? APR_SUCCESS : HTTP_SERVICE_UNAVAILABLE; } if (req_engine_done && ctx->engine) { - if (complete) { - ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, r->connection, - APLOGNO(03370) - "h2_proxy_session(%s): finished request %s", - ctx->engine_id, task_id); - } - else { - ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, r->connection, - APLOGNO(03371) - "h2_proxy_session(%s): failed request %s", - ctx->engine_id, task_id); - } - req_engine_done(ctx->engine, r->connection); + ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, r->connection, + APLOGNO(03370) + "h2_proxy_session(%s): finished request %s", + ctx->engine_id, task_id); + req_engine_done(ctx->engine, r->connection, status); } } @@ -382,7 +375,12 @@ static apr_status_t proxy_engine_run(h2_proxy_ctx *ctx) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, s2, ctx->owner, APLOGNO(03374) "eng(%s): pull request", ctx->engine_id); - status = s2; + /* give notice that we're leaving and cancel all ongoing + * streams. */ + next_request(ctx, 1); + h2_proxy_session_cancel_all(ctx->session); + h2_proxy_session_process(ctx->session); + status = ctx->r_status = APR_SUCCESS; break; } if (!ctx->next && h2_proxy_ihash_empty(ctx->session->streams)) { |