summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt2
-rw-r--r--modules/http2/config2.m41
-rw-r--r--modules/http2/h2.h10
-rw-r--r--modules/http2/h2_bucket_beam.c7
-rw-r--r--modules/http2/h2_c2.c384
-rw-r--r--modules/http2/h2_c2.h14
-rw-r--r--modules/http2/h2_c2_filter.c908
-rw-r--r--modules/http2/h2_c2_filter.h43
-rw-r--r--modules/http2/h2_conn_ctx.h7
-rw-r--r--modules/http2/h2_headers.c207
-rw-r--r--modules/http2/h2_headers.h107
-rw-r--r--modules/http2/h2_mplx.c11
-rw-r--r--modules/http2/h2_protocol.c1
-rw-r--r--modules/http2/h2_push.c19
-rw-r--r--modules/http2/h2_push.h25
-rw-r--r--modules/http2/h2_request.c57
-rw-r--r--modules/http2/h2_request.h3
-rw-r--r--modules/http2/h2_session.c1
-rw-r--r--modules/http2/h2_stream.c68
-rw-r--r--modules/http2/h2_stream.h51
-rw-r--r--modules/http2/h2_switch.c14
-rw-r--r--modules/http2/h2_util.c41
-rw-r--r--modules/http2/h2_util.h20
-rw-r--r--modules/http2/h2_version.h4
-rw-r--r--modules/http2/h2_workers.c4
-rw-r--r--modules/http2/mod_http2.dsp4
26 files changed, 1915 insertions, 98 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index cfbeb8ce03..2a16c71d1b 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -473,7 +473,7 @@ SET(mod_http2_extra_sources
modules/http2/h2_c1.c modules/http2/h2_c1_io.c
modules/http2/h2_c2.c modules/http2/h2_c2_filter.c
modules/http2/h2_config.c modules/http2/h2_conn_ctx.c
- modules/http2/h2_mplx.c
+ modules/http2/h2_mplx.c modules/http2/h2_headers.c
modules/http2/h2_protocol.c modules/http2/h2_push.c
modules/http2/h2_request.c modules/http2/h2_session.c
modules/http2/h2_stream.c modules/http2/h2_switch.c
diff --git a/modules/http2/config2.m4 b/modules/http2/config2.m4
index 87d4cc2ae2..f89f5baa6c 100644
--- a/modules/http2/config2.m4
+++ b/modules/http2/config2.m4
@@ -27,6 +27,7 @@ h2_c2.lo dnl
h2_c2_filter.lo dnl
h2_config.lo dnl
h2_conn_ctx.lo dnl
+h2_headers.lo dnl
h2_mplx.lo dnl
h2_protocol.lo dnl
h2_push.lo dnl
diff --git a/modules/http2/h2.h b/modules/http2/h2.h
index f1017480ed..cff49e15f0 100644
--- a/modules/http2/h2.h
+++ b/modules/http2/h2.h
@@ -18,6 +18,7 @@
#define __mod_h2__h2__
#include <apr_version.h>
+#include <ap_mmn.h>
struct h2_session;
struct h2_stream;
@@ -180,4 +181,13 @@ typedef struct h2_stream *h2_stream_get_fn(struct h2_session *session, int strea
#define H2_HDR_CONFORMANCE_UNSAFE "unsafe"
#define H2_PUSH_MODE_NOTE "http2-push-mode"
+
+#if AP_MODULE_MAGIC_AT_LEAST(20211221, 6)
+#define AP_HAS_RESPONSE_BUCKETS 1
+
+#else /* AP_MODULE_MAGIC_AT_LEAST(20211221, 6) */
+#define AP_HAS_RESPONSE_BUCKETS 0
+
+#endif /* else AP_MODULE_MAGIC_AT_LEAST(20211221, 6) */
+
#endif /* defined(__mod_h2__h2__) */
diff --git a/modules/http2/h2_bucket_beam.c b/modules/http2/h2_bucket_beam.c
index 524d93bc93..84b985b949 100644
--- a/modules/http2/h2_bucket_beam.c
+++ b/modules/http2/h2_bucket_beam.c
@@ -28,6 +28,7 @@
#include "h2_private.h"
#include "h2_conn_ctx.h"
+#include "h2_headers.h"
#include "h2_util.h"
#include "h2_bucket_beam.h"
@@ -612,6 +613,7 @@ transfer:
else if (APR_BUCKET_IS_FLUSH(bsender)) {
brecv = apr_bucket_flush_create(bb->bucket_alloc);
}
+#if AP_HAS_RESPONSE_BUCKETS
else if (AP_BUCKET_IS_RESPONSE(bsender)) {
brecv = ap_bucket_response_clone(bsender, bb->p, bb->bucket_alloc);
}
@@ -621,6 +623,11 @@ transfer:
else if (AP_BUCKET_IS_HEADERS(bsender)) {
brecv = ap_bucket_headers_clone(bsender, bb->p, bb->bucket_alloc);
}
+#else
+ else if (H2_BUCKET_IS_HEADERS(bsender)) {
+ brecv = h2_bucket_headers_clone(bsender, bb->p, bb->bucket_alloc);
+ }
+#endif /* AP_HAS_RESPONSE_BUCKETS */
else if (AP_BUCKET_IS_ERROR(bsender)) {
ap_bucket_error *eb = bsender->data;
brecv = ap_bucket_error_create(eb->status, eb->data,
diff --git a/modules/http2/h2_c2.c b/modules/http2/h2_c2.c
index e70f4c7092..53e511a33a 100644
--- a/modules/http2/h2_c2.c
+++ b/modules/http2/h2_c2.c
@@ -45,6 +45,7 @@
#include "h2_protocol.h"
#include "h2_mplx.h"
#include "h2_request.h"
+#include "h2_headers.h"
#include "h2_session.h"
#include "h2_stream.h"
#include "h2_c2.h"
@@ -55,11 +56,14 @@ static module *mpm_module;
static int mpm_supported = 1;
static apr_socket_t *dummy_socket;
+#if AP_HAS_RESPONSE_BUCKETS
+
static ap_filter_rec_t *c2_net_in_filter_handle;
static ap_filter_rec_t *c2_net_out_filter_handle;
static ap_filter_rec_t *c2_request_in_filter_handle;
static ap_filter_rec_t *c2_notes_out_filter_handle;
+#endif /* AP_HAS_RESPONSE_BUCKETS */
static void check_modules(int force)
{
@@ -335,12 +339,18 @@ static apr_status_t beam_out(conn_rec *c2, h2_conn_ctx_t *conn_ctx, apr_bucket_b
for (b = APR_BRIGADE_FIRST(bb);
b != APR_BRIGADE_SENTINEL(bb);
b = APR_BUCKET_NEXT(b)) {
+#if AP_HAS_RESPONSE_BUCKETS
if (AP_BUCKET_IS_RESPONSE(b)) {
header_len += (apr_off_t)response_length_estimate(b->data);
}
if (AP_BUCKET_IS_HEADERS(b)) {
header_len += (apr_off_t)headers_length_estimate(b->data);
}
+#else
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ header_len += (apr_off_t)h2_bucket_headers_headers_length(b);
+ }
+#endif /* AP_HAS_RESPONSE_BUCKETS */
}
}
@@ -358,11 +368,13 @@ static apr_status_t beam_out(conn_rec *c2, h2_conn_ctx_t *conn_ctx, apr_bucket_b
static apr_status_t h2_c2_filter_out(ap_filter_t* f, apr_bucket_brigade* bb)
{
h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
- apr_bucket *e;
apr_status_t rv;
ap_assert(conn_ctx);
+#if AP_HAS_RESPONSE_BUCKETS
if (!conn_ctx->has_final_response) {
+ apr_bucket *e;
+
for (e = APR_BRIGADE_FIRST(bb);
e != APR_BRIGADE_SENTINEL(bb);
e = APR_BUCKET_NEXT(e))
@@ -379,6 +391,7 @@ static apr_status_t h2_c2_filter_out(ap_filter_t* f, apr_bucket_brigade* bb)
}
}
}
+#endif /* AP_HAS_RESPONSE_BUCKETS */
rv = beam_out(f->c, conn_ctx, bb);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, f->c,
@@ -390,38 +403,6 @@ static apr_status_t h2_c2_filter_out(ap_filter_t* f, apr_bucket_brigade* bb)
return rv;
}
-static int c2_hook_pre_connection(conn_rec *c2, void *csd)
-{
- h2_conn_ctx_t *conn_ctx;
-
- if (!c2->master || !(conn_ctx = h2_conn_ctx_get(c2)) || !conn_ctx->stream_id) {
- return DECLINED;
- }
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2,
- "h2_c2(%s-%d), adding filters",
- conn_ctx->id, conn_ctx->stream_id);
- ap_add_input_filter_handle(c2_net_in_filter_handle, NULL, NULL, c2);
- ap_add_output_filter_handle(c2_net_out_filter_handle, NULL, NULL, c2);
- if (c2->keepalives == 0) {
- /* Simulate that we had already a request on this connection. Some
- * hooks trigger special behaviour when keepalives is 0.
- * (Not necessarily in pre_connection, but later. Set it here, so it
- * is in place.) */
- c2->keepalives = 1;
- /* We signal that this connection will be closed after the request.
- * Which is true in that sense that we throw away all traffic data
- * on this c2 connection after each requests. Although we might
- * reuse internal structures like memory pools.
- * The wanted effect of this is that httpd does not try to clean up
- * any dangling data on this connection when a request is done. Which
- * is unnecessary on a h2 stream.
- */
- c2->keepalive = AP_CONN_CLOSE;
- }
- return OK;
-}
-
static void check_push(request_rec *r, const char *tag)
{
apr_array_header_t *push_list = h2_config_push_list(r);
@@ -449,6 +430,22 @@ static void check_push(request_rec *r, const char *tag)
}
}
+static int c2_hook_fixups(request_rec *r)
+{
+ conn_rec *c2 = r->connection;
+ h2_conn_ctx_t *conn_ctx;
+
+ if (!c2->master || !(conn_ctx = h2_conn_ctx_get(c2)) || !conn_ctx->stream_id) {
+ return DECLINED;
+ }
+
+ check_push(r, "late_fixup");
+
+ return DECLINED;
+}
+
+#if AP_HAS_RESPONSE_BUCKETS
+
static void c2_pre_read_request(request_rec *r, conn_rec *c2)
{
h2_conn_ctx_t *conn_ctx;
@@ -500,18 +497,36 @@ static int c2_post_read_request(request_rec *r)
return OK;
}
-static int c2_hook_fixups(request_rec *r)
+static int c2_hook_pre_connection(conn_rec *c2, void *csd)
{
- conn_rec *c2 = r->connection;
h2_conn_ctx_t *conn_ctx;
if (!c2->master || !(conn_ctx = h2_conn_ctx_get(c2)) || !conn_ctx->stream_id) {
return DECLINED;
}
- check_push(r, "late_fixup");
-
- return DECLINED;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2,
+ "h2_c2(%s-%d), adding filters",
+ conn_ctx->id, conn_ctx->stream_id);
+ ap_add_input_filter_handle(c2_net_in_filter_handle, NULL, NULL, c2);
+ ap_add_output_filter_handle(c2_net_out_filter_handle, NULL, NULL, c2);
+ if (c2->keepalives == 0) {
+ /* Simulate that we had already a request on this connection. Some
+ * hooks trigger special behaviour when keepalives is 0.
+ * (Not necessarily in pre_connection, but later. Set it here, so it
+ * is in place.) */
+ c2->keepalives = 1;
+ /* We signal that this connection will be closed after the request.
+ * Which is true in that sense that we throw away all traffic data
+ * on this c2 connection after each requests. Although we might
+ * reuse internal structures like memory pools.
+ * The wanted effect of this is that httpd does not try to clean up
+ * any dangling data on this connection when a request is done. Which
+ * is unnecessary on a h2 stream.
+ */
+ c2->keepalive = AP_CONN_CLOSE;
+ }
+ return OK;
}
void h2_c2_register_hooks(void)
@@ -542,3 +557,296 @@ void h2_c2_register_hooks(void)
NULL, AP_FTYPE_PROTOCOL);
}
+#else /* AP_HAS_RESPONSE_BUCKETS */
+
+static apr_status_t c2_run_pre_connection(conn_rec *c2, apr_socket_t *csd)
+{
+ if (c2->keepalives == 0) {
+ /* Simulate that we had already a request on this connection. Some
+ * hooks trigger special behaviour when keepalives is 0.
+ * (Not necessarily in pre_connection, but later. Set it here, so it
+ * is in place.) */
+ c2->keepalives = 1;
+ /* We signal that this connection will be closed after the request.
+ * Which is true in that sense that we throw away all traffic data
+ * on this c2 connection after each requests. Although we might
+ * reuse internal structures like memory pools.
+ * The wanted effect of this is that httpd does not try to clean up
+ * any dangling data on this connection when a request is done. Which
+ * is unnecessary on a h2 stream.
+ */
+ c2->keepalive = AP_CONN_CLOSE;
+ return ap_run_pre_connection(c2, csd);
+ }
+ ap_assert(c2->output_filters);
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_c2_process(conn_rec *c2, apr_thread_t *thread, int worker_id)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c2);
+
+ ap_assert(conn_ctx);
+ ap_assert(conn_ctx->mplx);
+
+ /* See the discussion at <https://github.com/icing/mod_h2/issues/195>
+ *
+ * Each conn_rec->id is supposed to be unique at a point in time. Since
+ * some modules (and maybe external code) uses this id as an identifier
+ * for the request_rec they handle, it needs to be unique for secondary
+ * connections also.
+ *
+ * The MPM module assigns the connection ids and mod_unique_id is using
+ * that one to generate identifier for requests. While the implementation
+ * works for HTTP/1.x, the parallel execution of several requests per
+ * connection will generate duplicate identifiers on load.
+ *
+ * The original implementation for secondary connection identifiers used
+ * to shift the master connection id up and assign the stream id to the
+ * lower bits. This was cramped on 32 bit systems, but on 64bit there was
+ * enough space.
+ *
+ * As issue 195 showed, mod_unique_id only uses the lower 32 bit of the
+ * connection id, even on 64bit systems. Therefore collisions in request ids.
+ *
+ * The way master connection ids are generated, there is some space "at the
+ * top" of the lower 32 bits on allmost all systems. If you have a setup
+ * with 64k threads per child and 255 child processes, you live on the edge.
+ *
+ * The new implementation shifts 8 bits and XORs in the worker
+ * id. This will experience collisions with > 256 h2 workers and heavy
+ * load still. There seems to be no way to solve this in all possible
+ * configurations by mod_h2 alone.
+ */
+ c2->id = (c2->master->id << 8)^worker_id;
+
+ if (!conn_ctx->pre_conn_done) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2,
+ "h2_c2(%s-%d), adding filters",
+ conn_ctx->id, conn_ctx->stream_id);
+ ap_add_input_filter("H2_C2_NET_IN", NULL, NULL, c2);
+ ap_add_output_filter("H2_C2_NET_CATCH_H1", NULL, NULL, c2);
+ ap_add_output_filter("H2_C2_NET_OUT", NULL, NULL, c2);
+
+ c2_run_pre_connection(c2, ap_get_conn_socket(c2));
+ conn_ctx->pre_conn_done = 1;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2,
+ "h2_c2(%s-%d): process connection",
+ conn_ctx->id, conn_ctx->stream_id);
+
+ c2->current_thread = thread;
+ ap_run_process_connection(c2);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2,
+ "h2_c2(%s-%d): processing done",
+ conn_ctx->id, conn_ctx->stream_id);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t c2_process(h2_conn_ctx_t *conn_ctx, conn_rec *c)
+{
+ const h2_request *req = conn_ctx->request;
+ conn_state_t *cs = c->cs;
+ request_rec *r;
+
+ r = h2_create_request_rec(conn_ctx->request, c);
+ if (!r) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_c2(%s-%d): create request_rec failed, r=NULL",
+ conn_ctx->id, conn_ctx->stream_id);
+ goto cleanup;
+ }
+ if (r->status != HTTP_OK) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_c2(%s-%d): create request_rec failed, r->status=%d",
+ conn_ctx->id, conn_ctx->stream_id, r->status);
+ goto cleanup;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_c2(%s-%d): created request_rec for %s",
+ conn_ctx->id, conn_ctx->stream_id, r->the_request);
+ conn_ctx->server = r->server;
+
+ /* the request_rec->server carries the timeout value that applies */
+ h2_conn_ctx_set_timeout(conn_ctx, r->server->timeout);
+
+ if (h2_config_sgeti(conn_ctx->server, H2_CONF_COPY_FILES)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_mplx(%s-%d): copy_files in output",
+ conn_ctx->id, conn_ctx->stream_id);
+ h2_beam_set_copy_files(conn_ctx->beam_out, 1);
+ }
+
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
+ if (cs) {
+ cs->state = CONN_STATE_HANDLER;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_c2(%s-%d): start process_request",
+ conn_ctx->id, conn_ctx->stream_id);
+
+ /* Add the raw bytes of the request (e.g. header frame lengths to
+ * the logio for this request. */
+ if (req->raw_bytes && h2_c_logio_add_bytes_in) {
+ h2_c_logio_add_bytes_in(c, req->raw_bytes);
+ }
+
+ ap_process_request(r);
+ /* After the call to ap_process_request, the
+ * request pool may have been deleted. */
+ r = NULL;
+ if (conn_ctx->beam_out) {
+ h2_beam_close(conn_ctx->beam_out, c);
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_c2(%s-%d): process_request done",
+ conn_ctx->id, conn_ctx->stream_id);
+ if (cs)
+ cs->state = CONN_STATE_WRITE_COMPLETION;
+
+cleanup:
+ return APR_SUCCESS;
+}
+
+conn_rec *h2_c2_create(conn_rec *c1, apr_pool_t *parent,
+ apr_bucket_alloc_t *buckt_alloc)
+{
+ apr_pool_t *pool;
+ conn_rec *c2;
+ void *cfg;
+
+ ap_assert(c1);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c1,
+ "h2_c2: create for c1(%ld)", c1->id);
+
+ /* We create a pool with its own allocator to be used for
+ * processing a request. This is the only way to have the processing
+ * independent of its parent pool in the sense that it can work in
+ * another thread.
+ */
+ apr_pool_create(&pool, parent);
+ apr_pool_tag(pool, "h2_c2_conn");
+
+ c2 = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
+ memcpy(c2, c1, sizeof(conn_rec));
+
+ c2->master = c1;
+ c2->pool = pool;
+ c2->conn_config = ap_create_conn_config(pool);
+ c2->notes = apr_table_make(pool, 5);
+ c2->input_filters = NULL;
+ c2->output_filters = NULL;
+ c2->keepalives = 0;
+#if AP_MODULE_MAGIC_AT_LEAST(20180903, 1)
+ c2->filter_conn_ctx = NULL;
+#endif
+ c2->bucket_alloc = apr_bucket_alloc_create(pool);
+#if !AP_MODULE_MAGIC_AT_LEAST(20180720, 1)
+ c2->data_in_input_filters = 0;
+ c2->data_in_output_filters = 0;
+#endif
+ /* prevent mpm_event from making wrong assumptions about this connection,
+ * like e.g. using its socket for an async read check. */
+ c2->clogging_input_filters = 1;
+ c2->log = NULL;
+ c2->aborted = 0;
+ /* We cannot install the master connection socket on the secondary, as
+ * modules mess with timeouts/blocking of the socket, with
+ * unwanted side effects to the master connection processing.
+ * Fortunately, since we never use the secondary socket, we can just install
+ * a single, process-wide dummy and everyone is happy.
+ */
+ ap_set_module_config(c2->conn_config, &core_module, dummy_socket);
+ /* TODO: these should be unique to this thread */
+ c2->sbh = NULL; /*c1->sbh;*/
+ /* TODO: not all mpm modules have learned about secondary connections yet.
+ * copy their config from master to secondary.
+ */
+ if (mpm_module) {
+ cfg = ap_get_module_config(c1->conn_config, mpm_module);
+ ap_set_module_config(c2->conn_config, mpm_module, cfg);
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c2,
+ "h2_c2(%s): created", c2->log_id);
+ return c2;
+}
+
+static int h2_c2_hook_post_read_request(request_rec *r)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(r->connection);
+
+ if (conn_ctx && conn_ctx->stream_id) {
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
+ "h2_c2(%s-%d): adding request filters",
+ conn_ctx->id, conn_ctx->stream_id);
+
+ /* setup the correct filters to process the request for h2 */
+ ap_add_input_filter("H2_C2_REQUEST_IN", NULL, r, r->connection);
+
+ /* replace the core http filter that formats response headers
+ * in HTTP/1 with our own that collects status and headers */
+ ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
+
+ ap_add_output_filter("H2_C2_RESPONSE_OUT", NULL, r, r->connection);
+ ap_add_output_filter("H2_C2_TRAILERS_OUT", NULL, r, r->connection);
+ }
+ return DECLINED;
+}
+
+static int h2_c2_hook_process(conn_rec* c)
+{
+ h2_conn_ctx_t *ctx;
+
+ if (!c->master) {
+ return DECLINED;
+ }
+
+ ctx = h2_conn_ctx_get(c);
+ if (ctx->stream_id) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_h2, processing request directly");
+ c2_process(ctx, c);
+ return DONE;
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "secondary_conn(%ld): no h2 stream assing?", c->id);
+ }
+ return DECLINED;
+}
+
+void h2_c2_register_hooks(void)
+{
+ /* When the connection processing actually starts, we might
+ * take over, if the connection is for a h2 stream.
+ */
+ ap_hook_process_connection(h2_c2_hook_process,
+ NULL, NULL, APR_HOOK_FIRST);
+ /* We need to manipulate the standard HTTP/1.1 protocol filters and
+ * install our own. This needs to be done very early. */
+ ap_hook_post_read_request(h2_c2_hook_post_read_request, NULL, NULL, APR_HOOK_REALLY_FIRST);
+ ap_hook_fixups(c2_hook_fixups, NULL, NULL, APR_HOOK_LAST);
+
+ ap_register_input_filter("H2_C2_NET_IN", h2_c2_filter_in,
+ NULL, AP_FTYPE_NETWORK);
+ ap_register_output_filter("H2_C2_NET_OUT", h2_c2_filter_out,
+ NULL, AP_FTYPE_NETWORK);
+ ap_register_output_filter("H2_C2_NET_CATCH_H1", h2_c2_filter_catch_h1_out,
+ NULL, AP_FTYPE_NETWORK);
+
+ ap_register_input_filter("H2_C2_REQUEST_IN", h2_c2_filter_request_in,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_register_output_filter("H2_C2_RESPONSE_OUT", h2_c2_filter_response_out,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_register_output_filter("H2_C2_TRAILERS_OUT", h2_c2_filter_trailers_out,
+ NULL, AP_FTYPE_PROTOCOL);
+}
+
+#endif /* else AP_HAS_RESPONSE_BUCKETS */
diff --git a/modules/http2/h2_c2.h b/modules/http2/h2_c2.h
index ac0da503a0..f278382125 100644
--- a/modules/http2/h2_c2.h
+++ b/modules/http2/h2_c2.h
@@ -19,6 +19,8 @@
#include <http_core.h>
+#include "h2.h"
+
const char *h2_conn_mpm_name(void);
int h2_mpm_supported(void);
@@ -28,6 +30,18 @@ int h2_mpm_supported(void);
*/
apr_status_t h2_c2_child_init(apr_pool_t *pool, server_rec *s);
+#if !AP_HAS_RESPONSE_BUCKETS
+
+conn_rec *h2_c2_create(conn_rec *c1, apr_pool_t *parent,
+ apr_bucket_alloc_t *buckt_alloc);
+
+/**
+ * Process a secondary connection for a HTTP/2 stream request.
+ */
+apr_status_t h2_c2_process(conn_rec *c, apr_thread_t *thread, int worker_id);
+
+#endif /* !AP_HAS_RESPONSE_BUCKETS */
+
void h2_c2_destroy(conn_rec *c2);
/**
diff --git a/modules/http2/h2_c2_filter.c b/modules/http2/h2_c2_filter.c
index ed2bc3661d..728761212a 100644
--- a/modules/http2/h2_c2_filter.c
+++ b/modules/http2/h2_c2_filter.c
@@ -33,6 +33,7 @@
#include "h2.h"
#include "h2_config.h"
#include "h2_conn_ctx.h"
+#include "h2_headers.h"
#include "h2_c1.h"
#include "h2_c2_filter.h"
#include "h2_c2.h"
@@ -41,6 +42,8 @@
#include "h2_util.h"
+#if AP_HAS_RESPONSE_BUCKETS
+
apr_status_t h2_c2_filter_notes_out(ap_filter_t *f, apr_bucket_brigade *bb)
{
apr_bucket *b;
@@ -124,3 +127,908 @@ apr_status_t h2_c2_filter_request_in(ap_filter_t *f,
return ap_get_brigade(f->next, bb, mode, block, readbytes);
}
+
+#else /* AP_HAS_RESPONSE_BUCKETS */
+
+#define H2_FILTER_LOG(name, c, level, rv, msg, bb) \
+ do { \
+ if (APLOG_C_IS_LEVEL((c),(level))) { \
+ char buffer[4 * 1024]; \
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \
+ len = h2_util_bb_print(buffer, bmax, "", "", (bb)); \
+ ap_log_cerror(APLOG_MARK, (level), rv, (c), \
+ "FILTER[%s]: %s %s", \
+ (name), (msg), len? buffer : ""); \
+ } \
+ } while (0)
+
+
+/* This routine is called by apr_table_do and merges all instances of
+ * the passed field values into a single array that will be further
+ * processed by some later routine. Originally intended to help split
+ * and recombine multiple Vary fields, though it is generic to any field
+ * consisting of comma/space-separated tokens.
+ */
+static int uniq_field_values(void *d, const char *key, const char *val)
+{
+ apr_array_header_t *values;
+ char *start;
+ char *e;
+ char **strpp;
+ int i;
+
+ (void)key;
+ values = (apr_array_header_t *)d;
+
+ e = apr_pstrdup(values->pool, val);
+
+ do {
+ /* Find a non-empty fieldname */
+
+ while (*e == ',' || apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e == '\0') {
+ break;
+ }
+ start = e;
+ while (*e != '\0' && *e != ',' && !apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e != '\0') {
+ *e++ = '\0';
+ }
+
+ /* Now add it to values if it isn't already represented.
+ * Could be replaced by a ap_array_strcasecmp() if we had one.
+ */
+ for (i = 0, strpp = (char **) values->elts; i < values->nelts;
+ ++i, ++strpp) {
+ if (*strpp && apr_strnatcasecmp(*strpp, start) == 0) {
+ break;
+ }
+ }
+ if (i == values->nelts) { /* if not found */
+ *(char **)apr_array_push(values) = start;
+ }
+ } while (*e != '\0');
+
+ return 1;
+}
+
+/*
+ * Since some clients choke violently on multiple Vary fields, or
+ * Vary fields with duplicate tokens, combine any multiples and remove
+ * any duplicates.
+ */
+static void fix_vary(request_rec *r)
+{
+ apr_array_header_t *varies;
+
+ varies = apr_array_make(r->pool, 5, sizeof(char *));
+
+ /* Extract all Vary fields from the headers_out, separate each into
+ * its comma-separated fieldname values, and then add them to varies
+ * if not already present in the array.
+ */
+ apr_table_do(uniq_field_values, varies, r->headers_out, "Vary", NULL);
+
+ /* If we found any, replace old Vary fields with unique-ified value */
+
+ if (varies->nelts > 0) {
+ apr_table_setn(r->headers_out, "Vary",
+ apr_array_pstrcat(r->pool, varies, ','));
+ }
+}
+
+static h2_headers *create_response(request_rec *r)
+{
+ const char *clheader;
+ const char *ctype;
+
+ /*
+ * Now that we are ready to send a response, we need to combine the two
+ * header field tables into a single table. If we don't do this, our
+ * later attempts to set or unset a given fieldname might be bypassed.
+ */
+ if (!apr_is_empty_table(r->err_headers_out)) {
+ r->headers_out = apr_table_overlay(r->pool, r->err_headers_out,
+ r->headers_out);
+ apr_table_clear(r->err_headers_out);
+ }
+
+ /*
+ * Remove the 'Vary' header field if the client can't handle it.
+ * Since this will have nasty effects on HTTP/1.1 caches, force
+ * the response into HTTP/1.0 mode.
+ */
+ if (apr_table_get(r->subprocess_env, "force-no-vary") != NULL) {
+ apr_table_unset(r->headers_out, "Vary");
+ r->proto_num = HTTP_VERSION(1,0);
+ apr_table_setn(r->subprocess_env, "force-response-1.0", "1");
+ }
+ else {
+ fix_vary(r);
+ }
+
+ /*
+ * Now remove any ETag response header field if earlier processing
+ * says so (such as a 'FileETag None' directive).
+ */
+ if (apr_table_get(r->notes, "no-etag") != NULL) {
+ apr_table_unset(r->headers_out, "ETag");
+ }
+
+ /* determine the protocol and whether we should use keepalives. */
+ ap_set_keepalive(r);
+
+ if (AP_STATUS_IS_HEADER_ONLY(r->status)) {
+ apr_table_unset(r->headers_out, "Transfer-Encoding");
+ apr_table_unset(r->headers_out, "Content-Length");
+ r->content_type = r->content_encoding = NULL;
+ r->content_languages = NULL;
+ r->clength = r->chunked = 0;
+ }
+ else if (r->chunked) {
+ apr_table_mergen(r->headers_out, "Transfer-Encoding", "chunked");
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ ctype = ap_make_content_type(r, r->content_type);
+ if (ctype) {
+ apr_table_setn(r->headers_out, "Content-Type", ctype);
+ }
+
+ if (r->content_encoding) {
+ apr_table_setn(r->headers_out, "Content-Encoding",
+ r->content_encoding);
+ }
+
+ if (!apr_is_empty_array(r->content_languages)) {
+ int i;
+ char *token;
+ char **languages = (char **)(r->content_languages->elts);
+ const char *field = apr_table_get(r->headers_out, "Content-Language");
+
+ while (field && (token = ap_get_list_item(r->pool, &field)) != NULL) {
+ for (i = 0; i < r->content_languages->nelts; ++i) {
+ if (!apr_strnatcasecmp(token, languages[i]))
+ break;
+ }
+ if (i == r->content_languages->nelts) {
+ *((char **) apr_array_push(r->content_languages)) = token;
+ }
+ }
+
+ field = apr_array_pstrcat(r->pool, r->content_languages, ',');
+ apr_table_setn(r->headers_out, "Content-Language", field);
+ }
+
+ /*
+ * Control cachability for non-cachable responses if not already set by
+ * some other part of the server configuration.
+ */
+ if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) {
+ char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ apr_table_add(r->headers_out, "Expires", date);
+ }
+
+ /* This is a hack, but I can't find anyway around it. The idea is that
+ * we don't want to send out 0 Content-Lengths if it is a head request.
+ * This happens when modules try to outsmart the server, and return
+ * if they see a HEAD request. Apache 1.3 handlers were supposed to
+ * just return in that situation, and the core handled the HEAD. In
+ * 2.0, if a handler returns, then the core sends an EOS bucket down
+ * the filter stack, and the content-length filter computes a C-L of
+ * zero and that gets put in the headers, and we end up sending a
+ * zero C-L to the client. We can't just remove the C-L filter,
+ * because well behaved 2.0 handlers will send their data down the stack,
+ * and we will compute a real C-L for the head request. RBB
+ */
+ if (r->header_only
+ && (clheader = apr_table_get(r->headers_out, "Content-Length"))
+ && !strcmp(clheader, "0")) {
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ /*
+ * keep the set-by-proxy server and date headers, otherwise
+ * generate a new server header / date header
+ */
+ if (r->proxyreq == PROXYREQ_NONE
+ || !apr_table_get(r->headers_out, "Date")) {
+ char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ apr_table_setn(r->headers_out, "Date", date );
+ }
+ if (r->proxyreq == PROXYREQ_NONE
+ || !apr_table_get(r->headers_out, "Server")) {
+ const char *us = ap_get_server_banner();
+ if (us && *us) {
+ apr_table_setn(r->headers_out, "Server", us);
+ }
+ }
+
+ return h2_headers_rcreate(r, r->status, r->headers_out, r->pool);
+}
+
+typedef enum {
+ H2_RP_STATUS_LINE,
+ H2_RP_HEADER_LINE,
+ H2_RP_DONE
+} h2_rp_state_t;
+
+typedef struct h2_response_parser h2_response_parser;
+struct h2_response_parser {
+ const char *id;
+ h2_rp_state_t state;
+ conn_rec *c;
+ apr_pool_t *pool;
+ int http_status;
+ apr_array_header_t *hlines;
+ apr_bucket_brigade *tmp;
+ apr_bucket_brigade *saveto;
+};
+
+static apr_status_t parse_header(h2_response_parser *parser, char *line) {
+ const char *hline;
+ if (line[0] == ' ' || line[0] == '\t') {
+ char **plast;
+ /* continuation line from the header before this */
+ while (line[0] == ' ' || line[0] == '\t') {
+ ++line;
+ }
+
+ plast = apr_array_pop(parser->hlines);
+ if (plast == NULL) {
+ /* not well formed */
+ return APR_EINVAL;
+ }
+ hline = apr_psprintf(parser->pool, "%s %s", *plast, line);
+ }
+ else {
+ /* new header line */
+ hline = apr_pstrdup(parser->pool, line);
+ }
+ APR_ARRAY_PUSH(parser->hlines, const char*) = hline;
+ return APR_SUCCESS;
+}
+
+static apr_status_t get_line(h2_response_parser *parser, apr_bucket_brigade *bb,
+ char *line, apr_size_t len)
+{
+ apr_status_t status;
+
+ if (!parser->tmp) {
+ parser->tmp = apr_brigade_create(parser->pool, parser->c->bucket_alloc);
+ }
+ status = apr_brigade_split_line(parser->tmp, bb, APR_BLOCK_READ,
+ len);
+ if (status == APR_SUCCESS) {
+ --len;
+ status = apr_brigade_flatten(parser->tmp, line, &len);
+ if (status == APR_SUCCESS) {
+ /* we assume a non-0 containing line and remove trailing crlf. */
+ line[len] = '\0';
+ /*
+ * XXX: What to do if there is an LF but no CRLF?
+ * Should we error out?
+ */
+ if (len >= 2 && !strcmp(H2_CRLF, line + len - 2)) {
+ len -= 2;
+ line[len] = '\0';
+ apr_brigade_cleanup(parser->tmp);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c,
+ "h2_c2(%s): read response line: %s",
+ parser->id, line);
+ }
+ else {
+ apr_off_t brigade_length;
+
+ /*
+ * If the brigade parser->tmp becomes longer than our buffer
+ * for flattening we never have a chance to get a complete
+ * line. This can happen if we are called multiple times after
+ * previous calls did not find a H2_CRLF and we returned
+ * APR_EAGAIN. In this case parser->tmp (correctly) grows
+ * with each call to apr_brigade_split_line.
+ *
+ * XXX: Currently a stack based buffer of HUGE_STRING_LEN is
+ * used. This means we cannot cope with lines larger than
+ * HUGE_STRING_LEN which might be an issue.
+ */
+ status = apr_brigade_length(parser->tmp, 0, &brigade_length);
+ if ((status != APR_SUCCESS) || (brigade_length > (apr_off_t)len)) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, parser->c, APLOGNO(10257)
+ "h2_c2(%s): read response, line too long",
+ parser->id);
+ return APR_ENOSPC;
+ }
+ /* this does not look like a complete line yet */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c,
+ "h2_c2(%s): read response, incomplete line: %s",
+ parser->id, line);
+ if (!parser->saveto) {
+ parser->saveto = apr_brigade_create(parser->pool,
+ parser->c->bucket_alloc);
+ }
+ /*
+ * Be on the save side and save the parser->tmp brigade
+ * as it could contain transient buckets which could be
+ * invalid next time we are here.
+ *
+ * NULL for the filter parameter is ok since we
+ * provide our own brigade as second parameter
+ * and ap_save_brigade does not need to create one.
+ */
+ ap_save_brigade(NULL, &(parser->saveto), &(parser->tmp),
+ parser->tmp->p);
+ APR_BRIGADE_CONCAT(parser->tmp, parser->saveto);
+ return APR_EAGAIN;
+ }
+ }
+ }
+ apr_brigade_cleanup(parser->tmp);
+ return status;
+}
+
+static apr_table_t *make_table(h2_response_parser *parser)
+{
+ apr_array_header_t *hlines = parser->hlines;
+ if (hlines) {
+ apr_table_t *headers = apr_table_make(parser->pool, hlines->nelts);
+ int i;
+
+ for (i = 0; i < hlines->nelts; ++i) {
+ char *hline = ((char **)hlines->elts)[i];
+ char *sep = ap_strchr(hline, ':');
+ if (!sep) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, parser->c,
+ APLOGNO(02955) "h2_c2(%s): invalid header[%d] '%s'",
+ parser->id, i, (char*)hline);
+ /* not valid format, abort */
+ return NULL;
+ }
+ (*sep++) = '\0';
+ while (*sep == ' ' || *sep == '\t') {
+ ++sep;
+ }
+
+ if (!h2_util_ignore_header(hline)) {
+ apr_table_merge(headers, hline, sep);
+ }
+ }
+ return headers;
+ }
+ else {
+ return apr_table_make(parser->pool, 0);
+ }
+}
+
+static apr_status_t pass_response(h2_conn_ctx_t *conn_ctx, ap_filter_t *f,
+ h2_response_parser *parser)
+{
+ apr_bucket *b;
+ apr_status_t status;
+
+ h2_headers *response = h2_headers_create(parser->http_status,
+ make_table(parser),
+ NULL, 0, parser->pool);
+ apr_brigade_cleanup(parser->tmp);
+ b = h2_bucket_headers_create(parser->c->bucket_alloc, response);
+ APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
+ b = apr_bucket_flush_create(parser->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
+ status = ap_pass_brigade(f->next, parser->tmp);
+ apr_brigade_cleanup(parser->tmp);
+
+ /* reset parser for possible next response */
+ parser->state = H2_RP_STATUS_LINE;
+ apr_array_clear(parser->hlines);
+
+ if (response->status >= 200) {
+ conn_ctx->has_final_response = 1;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c,
+ APLOGNO(03197) "h2_c2(%s): passed response %d",
+ parser->id, response->status);
+ return status;
+}
+
+static apr_status_t parse_status(h2_response_parser *parser, char *line)
+{
+ int sindex = (apr_date_checkmask(line, "HTTP/#.# ###*")? 9 :
+ (apr_date_checkmask(line, "HTTP/# ###*")? 7 : 0));
+ if (sindex > 0) {
+ int k = sindex + 3;
+ char keepchar = line[k];
+ line[k] = '\0';
+ parser->http_status = atoi(&line[sindex]);
+ line[k] = keepchar;
+ parser->state = H2_RP_HEADER_LINE;
+
+ return APR_SUCCESS;
+ }
+ /* Seems like there is garbage on the connection. May be a leftover
+ * from a previous proxy request.
+ * This should only happen if the H2_RESPONSE filter is not yet in
+ * place (post_read_request has not been reached and the handler wants
+ * to write something. Probably just the interim response we are
+ * waiting for. But if there is other data hanging around before
+ * that, this needs to fail. */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c, APLOGNO(03467)
+ "h2_c2(%s): unable to parse status line: %s",
+ parser->id, line);
+ return APR_EINVAL;
+}
+
+static apr_status_t parse_response(h2_response_parser *parser,
+ h2_conn_ctx_t *conn_ctx,
+ ap_filter_t* f, apr_bucket_brigade *bb)
+{
+ char line[HUGE_STRING_LEN];
+ apr_status_t status = APR_SUCCESS;
+
+ while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) {
+ switch (parser->state) {
+ case H2_RP_STATUS_LINE:
+ case H2_RP_HEADER_LINE:
+ status = get_line(parser, bb, line, sizeof(line));
+ if (status == APR_EAGAIN) {
+ /* need more data */
+ return APR_SUCCESS;
+ }
+ else if (status != APR_SUCCESS) {
+ return status;
+ }
+ if (parser->state == H2_RP_STATUS_LINE) {
+ /* instead of parsing, just take it directly */
+ status = parse_status(parser, line);
+ }
+ else if (line[0] == '\0') {
+ /* end of headers, pass response onward */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, parser->c,
+ "h2_c2(%s): end of response", parser->id);
+ return pass_response(conn_ctx, f, parser);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, parser->c,
+ "h2_c2(%s): response header %s", parser->id, line);
+ status = parse_header(parser, line);
+ }
+ break;
+
+ default:
+ return status;
+ }
+ }
+ return status;
+}
+
+apr_status_t h2_c2_filter_catch_h1_out(ap_filter_t* f, apr_bucket_brigade* bb)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
+ h2_response_parser *parser = f->ctx;
+ apr_status_t rv;
+
+ ap_assert(conn_ctx);
+ H2_FILTER_LOG("c2_catch_h1_out", f->c, APLOG_TRACE2, 0, "check", bb);
+
+ if (!conn_ctx->has_final_response) {
+ if (!parser) {
+ parser = apr_pcalloc(f->c->pool, sizeof(*parser));
+ parser->id = apr_psprintf(f->c->pool, "%s-%d", conn_ctx->id, conn_ctx->stream_id);
+ parser->pool = f->c->pool;
+ parser->c = f->c;
+ parser->state = H2_RP_STATUS_LINE;
+ parser->hlines = apr_array_make(parser->pool, 10, sizeof(char *));
+ f->ctx = parser;
+ }
+
+ if (!APR_BRIGADE_EMPTY(bb)) {
+ apr_bucket *b = APR_BRIGADE_FIRST(bb);
+ if (AP_BUCKET_IS_EOR(b)) {
+ /* TODO: Yikes, this happens when errors are encountered on input
+ * before anything from the repsonse has been processed. The
+ * ap_die_r() call will do nothing in certain conditions.
+ */
+ int result = ap_map_http_request_error(conn_ctx->last_err,
+ HTTP_INTERNAL_SERVER_ERROR);
+ request_rec *r = h2_create_request_rec(conn_ctx->request, f->c);
+ ap_die((result >= 400)? result : HTTP_INTERNAL_SERVER_ERROR, r);
+ b = ap_bucket_eor_create(f->c->bucket_alloc, r);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ }
+ }
+ /* There are cases where we need to parse a serialized http/1.1 response.
+ * One example is a 100-continue answer via a mod_proxy setup. */
+ while (bb && !f->c->aborted && !conn_ctx->has_final_response) {
+ rv = parse_response(parser, conn_ctx, f, bb);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, f->c,
+ "h2_c2(%s): parsed response", parser->id);
+ if (APR_BRIGADE_EMPTY(bb) || APR_SUCCESS != rv) {
+ return rv;
+ }
+ }
+ }
+
+ return ap_pass_brigade(f->next, bb);
+}
+
+apr_status_t h2_c2_filter_response_out(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
+ request_rec *r = f->r;
+ apr_bucket *b, *bresp, *body_bucket = NULL, *next;
+ ap_bucket_error *eb = NULL;
+ h2_headers *response = NULL;
+ int headers_passing = 0;
+
+ H2_FILTER_LOG("c2_response_out", f->c, APLOG_TRACE1, 0, "called with", bb);
+
+ if (f->c->aborted || !conn_ctx || conn_ctx->has_final_response) {
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ if (!conn_ctx->has_final_response) {
+ /* check, if we need to send the response now. Until we actually
+ * see a DATA bucket or some EOS/EOR, we do not do so. */
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b))
+ {
+ if (AP_BUCKET_IS_ERROR(b) && !eb) {
+ eb = b->data;
+ }
+ else if (AP_BUCKET_IS_EOC(b)) {
+ /* If we see an EOC bucket it is a signal that we should get out
+ * of the way doing nothing.
+ */
+ ap_remove_output_filter(f);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c,
+ "h2_c2(%s): eoc bucket passed", conn_ctx->id);
+ return ap_pass_brigade(f->next, bb);
+ }
+ else if (H2_BUCKET_IS_HEADERS(b)) {
+ headers_passing = 1;
+ }
+ else if (!APR_BUCKET_IS_FLUSH(b)) {
+ body_bucket = b;
+ break;
+ }
+ }
+
+ if (eb) {
+ int st = eb->status;
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03047)
+ "h2_c2(%s): err bucket status=%d",
+ conn_ctx->id, st);
+ /* throw everything away and replace it with the error response
+ * generated by ap_die() */
+ apr_brigade_cleanup(bb);
+ ap_die(st, r);
+ return AP_FILTER_ERROR;
+ }
+
+ if (body_bucket || !headers_passing) {
+ /* time to insert the response bucket before the body or if
+ * no h2_headers is passed, e.g. the response is empty */
+ response = create_response(r);
+ if (response == NULL) {
+ ap_log_cerror(APLOG_MARK, APLOG_NOTICE, 0, f->c, APLOGNO(03048)
+ "h2_c2(%s): unable to create response", conn_ctx->id);
+ return APR_ENOMEM;
+ }
+
+ bresp = h2_bucket_headers_create(f->c->bucket_alloc, response);
+ if (body_bucket) {
+ APR_BUCKET_INSERT_BEFORE(body_bucket, bresp);
+ }
+ else {
+ APR_BRIGADE_INSERT_HEAD(bb, bresp);
+ }
+ conn_ctx->has_final_response = 1;
+ r->sent_bodyct = 1;
+ ap_remove_output_filter_byhandle(f->r->output_filters, "H2_C2_NET_CATCH_H1");
+ }
+ }
+
+ if (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_c2(%s): headers only, cleanup output brigade", conn_ctx->id);
+ b = body_bucket? body_bucket : APR_BRIGADE_FIRST(bb);
+ while (b != APR_BRIGADE_SENTINEL(bb)) {
+ next = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) {
+ break;
+ }
+ if (!H2_BUCKET_IS_HEADERS(b)) {
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ }
+ b = next;
+ }
+ }
+ if (conn_ctx->has_final_response) {
+ /* lets get out of the way, our task is done */
+ ap_remove_output_filter(f);
+ }
+ return ap_pass_brigade(f->next, bb);
+}
+
+
+struct h2_chunk_filter_t {
+ const char *id;
+ int eos_chunk_added;
+ apr_bucket_brigade *bbchunk;
+ apr_off_t chunked_total;
+};
+typedef struct h2_chunk_filter_t h2_chunk_filter_t;
+
+
+static void make_chunk(conn_rec *c, h2_chunk_filter_t *fctx, apr_bucket_brigade *bb,
+ apr_bucket *first, apr_off_t chunk_len,
+ apr_bucket *tail)
+{
+ /* Surround the buckets [first, tail[ with new buckets carrying the
+ * HTTP/1.1 chunked encoding format. If tail is NULL, the chunk extends
+ * to the end of the brigade. */
+ char buffer[128];
+ apr_bucket *b;
+ apr_size_t len;
+
+ len = (apr_size_t)apr_snprintf(buffer, H2_ALEN(buffer),
+ "%"APR_UINT64_T_HEX_FMT"\r\n", (apr_uint64_t)chunk_len);
+ b = apr_bucket_heap_create(buffer, len, NULL, bb->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(first, b);
+ b = apr_bucket_immortal_create("\r\n", 2, bb->bucket_alloc);
+ if (tail) {
+ APR_BUCKET_INSERT_BEFORE(tail, b);
+ }
+ else {
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ }
+ fctx->chunked_total += chunk_len;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+ "h2_c2(%s): added chunk %ld, total %ld",
+ fctx->id, (long)chunk_len, (long)fctx->chunked_total);
+}
+
+static int ser_header(void *ctx, const char *name, const char *value)
+{
+ apr_bucket_brigade *bb = ctx;
+ apr_brigade_printf(bb, NULL, NULL, "%s: %s\r\n", name, value);
+ return 1;
+}
+
+static apr_status_t read_and_chunk(ap_filter_t *f, h2_conn_ctx_t *conn_ctx,
+ apr_read_type_e block) {
+ h2_chunk_filter_t *fctx = f->ctx;
+ request_rec *r = f->r;
+ apr_status_t status = APR_SUCCESS;
+
+ if (!fctx->bbchunk) {
+ fctx->bbchunk = apr_brigade_create(r->pool, f->c->bucket_alloc);
+ }
+
+ if (APR_BRIGADE_EMPTY(fctx->bbchunk)) {
+ apr_bucket *b, *next, *first_data = NULL;
+ apr_bucket_brigade *tmp;
+ apr_off_t bblen = 0;
+
+ /* get more data from the lower layer filters. Always do this
+ * in larger pieces, since we handle the read modes ourself. */
+ status = ap_get_brigade(f->next, fctx->bbchunk,
+ AP_MODE_READBYTES, block, conn_ctx->mplx->stream_max_mem);
+ if (status != APR_SUCCESS) {
+ return status;
+ }
+
+ for (b = APR_BRIGADE_FIRST(fctx->bbchunk);
+ b != APR_BRIGADE_SENTINEL(fctx->bbchunk);
+ b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (first_data) {
+ make_chunk(f->c, fctx, fctx->bbchunk, first_data, bblen, b);
+ first_data = NULL;
+ }
+
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ h2_headers *headers = h2_bucket_headers_get(b);
+
+ ap_assert(headers);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "h2_c2(%s-%d): receiving trailers",
+ conn_ctx->id, conn_ctx->stream_id);
+ tmp = apr_brigade_split_ex(fctx->bbchunk, b, NULL);
+ if (!apr_is_empty_table(headers->headers)) {
+ status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "0\r\n");
+ apr_table_do(ser_header, fctx->bbchunk, headers->headers, NULL);
+ status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "\r\n");
+ }
+ else {
+ status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "0\r\n\r\n");
+ }
+ r->trailers_in = apr_table_clone(r->pool, headers->headers);
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ APR_BRIGADE_CONCAT(fctx->bbchunk, tmp);
+ apr_brigade_destroy(tmp);
+ fctx->eos_chunk_added = 1;
+ }
+ else if (APR_BUCKET_IS_EOS(b)) {
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "h2_c2(%s-%d): receiving eos",
+ conn_ctx->id, conn_ctx->stream_id);
+ if (!fctx->eos_chunk_added) {
+ tmp = apr_brigade_split_ex(fctx->bbchunk, b, NULL);
+ status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "0\r\n\r\n");
+ APR_BRIGADE_CONCAT(fctx->bbchunk, tmp);
+ apr_brigade_destroy(tmp);
+ }
+ fctx->eos_chunk_added = 0;
+ }
+ }
+ else if (b->length == 0) {
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ }
+ else {
+ if (!first_data) {
+ first_data = b;
+ bblen = 0;
+ }
+ bblen += b->length;
+ }
+ }
+
+ if (first_data) {
+ make_chunk(f->c, fctx, fctx->bbchunk, first_data, bblen, NULL);
+ }
+ }
+ return status;
+}
+
+apr_status_t h2_c2_filter_request_in(ap_filter_t* f,
+ apr_bucket_brigade* bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
+ h2_chunk_filter_t *fctx = f->ctx;
+ request_rec *r = f->r;
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket *b, *next;
+ core_server_config *conf =
+ (core_server_config *) ap_get_module_config(r->server->module_config,
+ &core_module);
+ ap_assert(conn_ctx);
+
+ if (!fctx) {
+ fctx = apr_pcalloc(r->pool, sizeof(*fctx));
+ fctx->id = apr_psprintf(r->pool, "%s-%d", conn_ctx->id, conn_ctx->stream_id);
+ f->ctx = fctx;
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, f->r,
+ "h2_c2(%s-%d): request input, mode=%d, block=%d, "
+ "readbytes=%ld, exp=%d",
+ conn_ctx->id, conn_ctx->stream_id, mode, block,
+ (long)readbytes, r->expecting_100);
+ if (!conn_ctx->request->chunked) {
+ status = ap_get_brigade(f->next, bb, mode, block, readbytes);
+ /* pipe data through, just take care of trailers */
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb); b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ h2_headers *headers = h2_bucket_headers_get(b);
+ ap_assert(headers);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "h2_c2(%s-%d): receiving trailers",
+ conn_ctx->id, conn_ctx->stream_id);
+ r->trailers_in = headers->headers;
+ if (conf && conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE) {
+ r->headers_in = apr_table_overlay(r->pool, r->headers_in,
+ r->trailers_in);
+ }
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ ap_remove_input_filter(f);
+
+ if (headers->raw_bytes && h2_c_logio_add_bytes_in) {
+ h2_c_logio_add_bytes_in(f->c, headers->raw_bytes);
+ }
+ break;
+ }
+ }
+ return status;
+ }
+
+ /* Things are more complicated. The standard HTTP input filter, which
+ * does a lot what we do not want to duplicate, also cares about chunked
+ * transfer encoding and trailers.
+ * We need to simulate chunked encoding for it to be happy.
+ */
+ if ((status = read_and_chunk(f, conn_ctx, block)) != APR_SUCCESS) {
+ return status;
+ }
+
+ if (mode == AP_MODE_EXHAUSTIVE) {
+ /* return all we have */
+ APR_BRIGADE_CONCAT(bb, fctx->bbchunk);
+ }
+ else if (mode == AP_MODE_READBYTES) {
+ status = h2_brigade_concat_length(bb, fctx->bbchunk, readbytes);
+ }
+ else if (mode == AP_MODE_SPECULATIVE) {
+ status = h2_brigade_copy_length(bb, fctx->bbchunk, readbytes);
+ }
+ else if (mode == AP_MODE_GETLINE) {
+ /* we are reading a single LF line, e.g. the HTTP headers.
+ * this has the nasty side effect to split the bucket, even
+ * though it ends with CRLF and creates a 0 length bucket */
+ status = apr_brigade_split_line(bb, fctx->bbchunk, block, HUGE_STRING_LEN);
+ if (APLOGctrace1(f->c)) {
+ char buffer[1024];
+ apr_size_t len = sizeof(buffer)-1;
+ apr_brigade_flatten(bb, buffer, &len);
+ buffer[len] = 0;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_c2(%s-%d): getline: %s",
+ conn_ctx->id, conn_ctx->stream_id, buffer);
+ }
+ }
+ else {
+ /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
+ * to support it. Seems to work. */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
+ APLOGNO(02942)
+ "h2_c2, unsupported READ mode %d", mode);
+ status = APR_ENOTIMPL;
+ }
+
+ h2_util_bb_log(f->c, conn_ctx->stream_id, APLOG_TRACE2, "returning input", bb);
+ return status;
+}
+
+apr_status_t h2_c2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
+ request_rec *r = f->r;
+ apr_bucket *b, *e;
+
+ if (conn_ctx && r) {
+ /* Detect the EOS/EOR bucket and forward any trailers that may have
+ * been set to our h2_headers.
+ */
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b))
+ {
+ if ((APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b))
+ && r->trailers_out && !apr_is_empty_table(r->trailers_out)) {
+ h2_headers *headers;
+ apr_table_t *trailers;
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03049)
+ "h2_c2(%s-%d): sending trailers",
+ conn_ctx->id, conn_ctx->stream_id);
+ trailers = apr_table_clone(r->pool, r->trailers_out);
+ headers = h2_headers_rcreate(r, HTTP_OK, trailers, r->pool);
+ e = h2_bucket_headers_create(bb->bucket_alloc, headers);
+ APR_BUCKET_INSERT_BEFORE(b, e);
+ apr_table_clear(r->trailers_out);
+ ap_remove_output_filter(f);
+ break;
+ }
+ }
+ }
+
+ return ap_pass_brigade(f->next, bb);
+}
+
+#endif /* else #if AP_HAS_RESPONSE_BUCKETS */
diff --git a/modules/http2/h2_c2_filter.h b/modules/http2/h2_c2_filter.h
index c00fd2ae15..c6f50dd699 100644
--- a/modules/http2/h2_c2_filter.h
+++ b/modules/http2/h2_c2_filter.h
@@ -17,12 +17,7 @@
#ifndef __mod_h2__h2_c2_filter__
#define __mod_h2__h2_c2_filter__
-/**
- * Output filter that inspects the request_rec->notes of the request
- * itself and possible internal redirects to detect conditions that
- * merit specific HTTP/2 response codes, such as 421.
- */
-apr_status_t h2_c2_filter_notes_out(ap_filter_t *f, apr_bucket_brigade *bb);
+#include "h2.h"
/**
* Input filter on secondary connections that insert the REQUEST bucket
@@ -34,4 +29,40 @@ apr_status_t h2_c2_filter_request_in(ap_filter_t *f,
apr_read_type_e block,
apr_off_t readbytes);
+#if AP_HAS_RESPONSE_BUCKETS
+
+/**
+ * Output filter that inspects the request_rec->notes of the request
+ * itself and possible internal redirects to detect conditions that
+ * merit specific HTTP/2 response codes, such as 421.
+ */
+apr_status_t h2_c2_filter_notes_out(ap_filter_t *f, apr_bucket_brigade *bb);
+
+#else /* AP_HAS_RESPONSE_BUCKETS */
+
+/**
+ * h2_from_h1 parses a HTTP/1.1 response into
+ * - response status
+ * - a list of header values
+ * - a series of bytes that represent the response body alone, without
+ * any meta data, such as inserted by chunked transfer encoding.
+ *
+ * All data is allocated from the stream memory pool.
+ *
+ * Again, see comments in h2_request: ideally we would take the headers
+ * and status from the httpd structures instead of parsing them here, but
+ * we need to have all handlers and filters involved in request/response
+ * processing, so this seems to be the way for now.
+ */
+struct h2_headers;
+struct h2_response_parser;
+
+apr_status_t h2_c2_filter_catch_h1_out(ap_filter_t* f, apr_bucket_brigade* bb);
+
+apr_status_t h2_c2_filter_response_out(ap_filter_t *f, apr_bucket_brigade *bb);
+
+apr_status_t h2_c2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb);
+
+#endif /* else AP_HAS_RESPONSE_BUCKETS */
+
#endif /* defined(__mod_h2__h2_c2_filter__) */
diff --git a/modules/http2/h2_conn_ctx.h b/modules/http2/h2_conn_ctx.h
index 1508825276..dff627db2d 100644
--- a/modules/http2/h2_conn_ctx.h
+++ b/modules/http2/h2_conn_ctx.h
@@ -17,6 +17,8 @@
#ifndef __mod_h2__h2_conn_ctx__
#define __mod_h2__h2_conn_ctx__
+#include "h2.h"
+
struct h2_session;
struct h2_stream;
struct h2_mplx;
@@ -28,7 +30,7 @@ struct h2_c2_transit;
#define H2_PIPE_IN 1
/**
- * The h2 module context associated with a connection.
+ * The h2 module context associated with a connection.
*
* It keeps track of the different types of connections:
* - those from clients that use HTTP/2 protocol
@@ -43,6 +45,9 @@ struct h2_conn_ctx_t {
struct h2_mplx *mplx; /* c2: the multiplexer */
struct h2_c2_transit *transit; /* c2: transit pool and bucket_alloc */
+#if !AP_HAS_RESPONSE_BUCKETS
+ int pre_conn_done; /* has pre_connection setup run? */
+#endif
int stream_id; /* c1: 0, c2: stream id processed */
apr_pool_t *req_pool; /* c2: a c2 child pool for a request */
const struct h2_request *request; /* c2: the request to process */
diff --git a/modules/http2/h2_headers.c b/modules/http2/h2_headers.c
new file mode 100644
index 0000000000..5356d21b4f
--- /dev/null
+++ b/modules/http2/h2_headers.c
@@ -0,0 +1,207 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_log.h>
+#include <util_time.h>
+
+#include <nghttp2/nghttp2.h>
+
+#include "h2_private.h"
+#include "h2_protocol.h"
+#include "h2_config.h"
+#include "h2_util.h"
+#include "h2_request.h"
+#include "h2_headers.h"
+
+#if !AP_HAS_RESPONSE_BUCKETS
+
+static int is_unsafe(server_rec *s)
+{
+ core_server_config *conf = ap_get_core_module_config(s->module_config);
+ return (conf->http_conformance == AP_HTTP_CONFORMANCE_UNSAFE);
+}
+
+typedef struct {
+ apr_bucket_refcount refcount;
+ h2_headers *headers;
+} h2_bucket_headers;
+
+static apr_status_t bucket_read(apr_bucket *b, const char **str,
+ apr_size_t *len, apr_read_type_e block)
+{
+ (void)b;
+ (void)block;
+ *str = NULL;
+ *len = 0;
+ return APR_SUCCESS;
+}
+
+apr_bucket * h2_bucket_headers_make(apr_bucket *b, h2_headers *r)
+{
+ h2_bucket_headers *br;
+
+ br = apr_bucket_alloc(sizeof(*br), b->list);
+ br->headers = r;
+
+ b = apr_bucket_shared_make(b, br, 0, 0);
+ b->type = &h2_bucket_type_headers;
+ b->length = 0;
+
+ return b;
+}
+
+apr_bucket * h2_bucket_headers_create(apr_bucket_alloc_t *list,
+ h2_headers *r)
+{
+ apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
+
+ APR_BUCKET_INIT(b);
+ b->free = apr_bucket_free;
+ b->list = list;
+ b = h2_bucket_headers_make(b, r);
+ return b;
+}
+
+h2_headers *h2_bucket_headers_get(apr_bucket *b)
+{
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ return ((h2_bucket_headers *)b->data)->headers;
+ }
+ return NULL;
+}
+
+const apr_bucket_type_t h2_bucket_type_headers = {
+ "H2HEADERS", 5, APR_BUCKET_METADATA,
+ apr_bucket_destroy_noop,
+ bucket_read,
+ apr_bucket_setaside_noop,
+ apr_bucket_split_notimpl,
+ apr_bucket_shared_copy
+};
+
+apr_bucket *h2_bucket_headers_clone(apr_bucket *b, apr_pool_t *pool,
+ apr_bucket_alloc_t *list)
+{
+ h2_headers *hdrs = ((h2_bucket_headers *)b->data)->headers;
+ return h2_bucket_headers_create(list, h2_headers_clone(pool, hdrs));
+}
+
+
+h2_headers *h2_headers_create(int status, const apr_table_t *headers_in,
+ const apr_table_t *notes, apr_off_t raw_bytes,
+ apr_pool_t *pool)
+{
+ h2_headers *headers = apr_pcalloc(pool, sizeof(h2_headers));
+ headers->status = status;
+ headers->headers = (headers_in? apr_table_clone(pool, headers_in)
+ : apr_table_make(pool, 5));
+ headers->notes = (notes? apr_table_clone(pool, notes)
+ : apr_table_make(pool, 5));
+ return headers;
+}
+
+static int add_header_lengths(void *ctx, const char *name, const char *value)
+{
+ apr_size_t *plen = ctx;
+ *plen += strlen(name) + strlen(value);
+ return 1;
+}
+
+apr_size_t h2_headers_length(h2_headers *headers)
+{
+ apr_size_t len = 0;
+ apr_table_do(add_header_lengths, &len, headers->headers, NULL);
+ return len;
+}
+
+apr_size_t h2_bucket_headers_headers_length(apr_bucket *b)
+{
+ h2_headers *h = h2_bucket_headers_get(b);
+ return h? h2_headers_length(h) : 0;
+}
+
+h2_headers *h2_headers_rcreate(request_rec *r, int status,
+ const apr_table_t *header, apr_pool_t *pool)
+{
+ h2_headers *headers = h2_headers_create(status, header, r->notes, 0, pool);
+ if (headers->status == HTTP_FORBIDDEN) {
+ request_rec *r_prev;
+ for (r_prev = r; r_prev != NULL; r_prev = r_prev->prev) {
+ const char *cause = apr_table_get(r_prev->notes, "ssl-renegotiate-forbidden");
+ if (cause) {
+ /* This request triggered a TLS renegotiation that is not allowed
+ * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
+ */
+ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r,
+ APLOGNO(03061)
+ "h2_headers(%ld): renegotiate forbidden, cause: %s",
+ (long)r->connection->id, cause);
+ headers->status = H2_ERR_HTTP_1_1_REQUIRED;
+ break;
+ }
+ }
+ }
+ if (is_unsafe(r->server)) {
+ apr_table_setn(headers->notes, H2_HDR_CONFORMANCE, H2_HDR_CONFORMANCE_UNSAFE);
+ }
+ if (h2_config_rgeti(r, H2_CONF_PUSH) == 0 && h2_config_sgeti(r->server, H2_CONF_PUSH) != 0) {
+ apr_table_setn(headers->notes, H2_PUSH_MODE_NOTE, "0");
+ }
+ return headers;
+}
+
+h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h)
+{
+ return h2_headers_create(h->status, h->headers, h->notes, h->raw_bytes, pool);
+}
+
+h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h)
+{
+ return h2_headers_create(h->status, h->headers, h->notes, h->raw_bytes, pool);
+}
+
+h2_headers *h2_headers_die(apr_status_t type,
+ const h2_request *req, apr_pool_t *pool)
+{
+ h2_headers *headers;
+ char *date;
+
+ headers = apr_pcalloc(pool, sizeof(h2_headers));
+ headers->status = (type >= 200 && type < 600)? type : 500;
+ headers->headers = apr_table_make(pool, 5);
+ headers->notes = apr_table_make(pool, 5);
+
+ date = apr_palloc(pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, req? req->request_time : apr_time_now());
+ apr_table_setn(headers->headers, "Date", date);
+ apr_table_setn(headers->headers, "Server", ap_get_server_banner());
+
+ return headers;
+}
+
+int h2_headers_are_final_response(h2_headers *headers)
+{
+ return headers->status >= 200;
+}
+
+#endif /* !AP_HAS_RESPONSE_BUCKETS */
diff --git a/modules/http2/h2_headers.h b/modules/http2/h2_headers.h
new file mode 100644
index 0000000000..3d78dc357f
--- /dev/null
+++ b/modules/http2/h2_headers.h
@@ -0,0 +1,107 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_headers__
+#define __mod_h2__h2_headers__
+
+#include "h2.h"
+
+#if !AP_HAS_RESPONSE_BUCKETS
+
+struct h2_bucket_beam;
+
+typedef struct h2_headers h2_headers;
+struct h2_headers {
+ int status;
+ apr_table_t *headers;
+ apr_table_t *notes;
+ apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */
+};
+
+
+extern const apr_bucket_type_t h2_bucket_type_headers;
+
+#define H2_BUCKET_IS_HEADERS(e) (e->type == &h2_bucket_type_headers)
+
+apr_bucket * h2_bucket_headers_make(apr_bucket *b, h2_headers *r);
+
+apr_bucket * h2_bucket_headers_create(apr_bucket_alloc_t *list,
+ h2_headers *r);
+
+h2_headers *h2_bucket_headers_get(apr_bucket *b);
+
+/**
+ * Create the headers from the given status and headers
+ * @param status the headers status
+ * @param header the headers of the headers
+ * @param notes the notes carried by the headers
+ * @param raw_bytes the raw network bytes (if known) used to transmit these
+ * @param pool the memory pool to use
+ */
+h2_headers *h2_headers_create(int status, const apr_table_t *header,
+ const apr_table_t *notes, apr_off_t raw_bytes,
+ apr_pool_t *pool);
+
+/**
+ * Create the headers from the given request_rec.
+ * @param r the request record which was processed
+ * @param status the headers status
+ * @param header the headers of the headers
+ * @param pool the memory pool to use
+ */
+h2_headers *h2_headers_rcreate(request_rec *r, int status,
+ const apr_table_t *header, apr_pool_t *pool);
+
+/**
+ * Copy the headers into another pool. This will not copy any
+ * header strings.
+ */
+h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h);
+
+/**
+ * Clone the headers into another pool. This will also clone any
+ * header strings.
+ */
+h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h);
+
+/**
+ * Create the headers for the given error.
+ * @param type the error code
+ * @param req the original h2_request
+ * @param pool the memory pool to use
+ */
+h2_headers *h2_headers_die(apr_status_t type,
+ const struct h2_request *req, apr_pool_t *pool);
+
+int h2_headers_are_final_response(h2_headers *headers);
+
+/**
+ * Give the number of bytes of all contained header strings.
+ */
+apr_size_t h2_headers_length(h2_headers *headers);
+
+/**
+ * For H2HEADER buckets, return the length of all contained header strings.
+ * For all other buckets, return 0.
+ */
+apr_size_t h2_bucket_headers_headers_length(apr_bucket *b);
+
+apr_bucket *h2_bucket_headers_clone(apr_bucket *b, apr_pool_t *pool,
+ apr_bucket_alloc_t *list);
+
+#endif /* !AP_HAS_RESPONSE_BUCKETS */
+
+#endif /* defined(__mod_h2__h2_headers__) */
diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c
index 3cbd3aca6c..0ce1d410ed 100644
--- a/modules/http2/h2_mplx.c
+++ b/modules/http2/h2_mplx.c
@@ -847,7 +847,11 @@ static conn_rec *s_next_c2(h2_mplx *m)
}
transit = c2_transit_get(m);
+#if AP_HAS_RESPONSE_BUCKETS
c2 = ap_create_secondary_connection(transit->pool, m->c1, transit->bucket_alloc);
+#else
+ c2 = h2_c2_create(m->c1, transit->pool, transit->bucket_alloc);
+#endif
if (!c2) goto cleanup;
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c1,
H2_STRM_MSG(stream, "created new c2"));
@@ -1128,7 +1132,12 @@ static apr_status_t mplx_pollset_poll(h2_mplx *m, apr_interval_time_t timeout,
H2_MPLX_LEAVE(m);
rv = apr_pollset_poll(m->pollset, timeout >= 0? timeout : -1, &nresults, &results);
H2_MPLX_ENTER_ALWAYS(m);
-
+ if (APR_STATUS_IS_EINTR(rv) && m->shutdown) {
+ if (!m->aborted) {
+ rv = APR_SUCCESS;
+ }
+ goto cleanup;
+ }
} while (APR_STATUS_IS_EINTR(rv));
if (APR_SUCCESS != rv) {
diff --git a/modules/http2/h2_protocol.c b/modules/http2/h2_protocol.c
index a2861cabd3..874753e498 100644
--- a/modules/http2/h2_protocol.c
+++ b/modules/http2/h2_protocol.c
@@ -39,6 +39,7 @@
#include "h2_conn_ctx.h"
#include "h2_c1.h"
#include "h2_request.h"
+#include "h2_headers.h"
#include "h2_session.h"
#include "h2_util.h"
#include "h2_protocol.h"
diff --git a/modules/http2/h2_push.c b/modules/http2/h2_push.c
index c87dfa04db..7604df6678 100644
--- a/modules/http2/h2_push.c
+++ b/modules/http2/h2_push.c
@@ -432,8 +432,17 @@ static int head_iter(void *ctx, const char *key, const char *value)
return 1;
}
-apr_array_header_t *h2_push_collect(apr_pool_t *p, const h2_request *req,
- apr_uint32_t push_policy, const ap_bucket_response *res)
+#if AP_HAS_RESPONSE_BUCKETS
+apr_array_header_t *h2_push_collect(apr_pool_t *p,
+ const struct h2_request *req,
+ apr_uint32_t push_policy,
+ const ap_bucket_response *res)
+#else
+apr_array_header_t *h2_push_collect(apr_pool_t *p,
+ const struct h2_request *req,
+ apr_uint32_t push_policy,
+ const struct h2_headers *res)
+#endif
{
if (req && push_policy != H2_PUSH_NONE) {
/* Collect push candidates from the request/response pair.
@@ -674,9 +683,15 @@ apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t
return npushes;
}
+#if AP_HAS_RESPONSE_BUCKETS
apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
const struct h2_request *req,
const ap_bucket_response *res)
+#else
+apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
+ const struct h2_request *req,
+ const struct h2_headers *res)
+#endif
{
apr_array_header_t *pushes;
diff --git a/modules/http2/h2_push.h b/modules/http2/h2_push.h
index d514bbffed..947b73bc85 100644
--- a/modules/http2/h2_push.h
+++ b/modules/http2/h2_push.h
@@ -20,6 +20,7 @@
#include <http_protocol.h>
#include "h2.h"
+#include "h2_headers.h"
struct h2_request;
struct h2_ngheader;
@@ -98,14 +99,21 @@ struct h2_push_diary {
* @param res the response from the server
* @return array of h2_push addresses or NULL
*/
-apr_array_header_t *h2_push_collect(apr_pool_t *p,
- const struct h2_request *req,
- apr_uint32_t push_policy,
+#if AP_HAS_RESPONSE_BUCKETS
+apr_array_header_t *h2_push_collect(apr_pool_t *p,
+ const struct h2_request *req,
+ apr_uint32_t push_policy,
const ap_bucket_response *res);
+#else
+apr_array_header_t *h2_push_collect(apr_pool_t *p,
+ const struct h2_request *req,
+ apr_uint32_t push_policy,
+ const struct h2_headers *res);
+#endif
/**
* Create a new push diary for the given maximum number of entries.
- *
+ *
* @param p the pool to use
* @param N the max number of entries, rounded up to 2^x
* @return the created diary, might be NULL of max_entries is 0
@@ -122,14 +130,21 @@ apr_array_header_t *h2_push_diary_update(struct h2_session *session, apr_array_h
* Collect pushes for the given request/response pair, enter them into the
* diary and return those pushes newly entered.
*/
+#if AP_HAS_RESPONSE_BUCKETS
apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
const struct h2_request *req,
const ap_bucket_response *res);
+#else
+apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
+ const struct h2_request *req,
+ const struct h2_headers *res);
+#endif
+
/**
* Get a cache digest as described in
* https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
* from the contents of the push diary.
- *
+ *
* @param diary the diary to calculdate the digest from
* @param p the pool to use
* @param authority the authority to get the data for, use NULL/"*" for all
diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c
index 0a181b86a6..dec3338ee0 100644
--- a/modules/http2/h2_request.c
+++ b/modules/http2/h2_request.c
@@ -124,32 +124,32 @@ apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool,
x.headers = req->headers;
x.status = APR_SUCCESS;
apr_table_do(set_h1_header, &x, r->headers_in, NULL);
-
+
*preq = req;
return x.status;
}
-apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
+apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
const char *name, size_t nlen,
const char *value, size_t vlen,
size_t max_field_len, int *pwas_added)
{
apr_status_t status = APR_SUCCESS;
-
+
*pwas_added = 0;
if (nlen <= 0) {
return status;
}
-
+
if (name[0] == ':') {
/* pseudo header, see ch. 8.1.2.3, always should come first */
if (!apr_is_empty_table(req->headers)) {
ap_log_perror(APLOG_MARK, APLOG_ERR, 0, pool,
- APLOGNO(02917)
+ APLOGNO(02917)
"h2_request: pseudo header after request start");
return APR_EGENERAL;
}
-
+
if (H2_HEADER_METHOD_LEN == nlen
&& !strncmp(H2_HEADER_METHOD, name, nlen)) {
req->method = apr_pstrndup(pool, value, vlen);
@@ -171,17 +171,17 @@ apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
memset(buffer, 0, 32);
strncpy(buffer, name, (nlen > 31)? 31 : nlen);
ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, pool,
- APLOGNO(02954)
+ APLOGNO(02954)
"h2_request: ignoring unknown pseudo header %s",
buffer);
}
}
else {
/* non-pseudo header, add to table */
- status = h2_req_add_header(req->headers, pool, name, nlen, value, vlen,
+ status = h2_req_add_header(req->headers, pool, name, nlen, value, vlen,
max_field_len, pwas_added);
}
-
+
return status;
}
@@ -190,7 +190,7 @@ apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos,
const char *s;
/* rfc7540, ch. 8.1.2.3:
- * - if we have :authority, it overrides any Host header
+ * - if we have :authority, it overrides any Host header
* - :authority MUST be omitted when converting h1->h2, so we
* might get a stream without, but then Host needs to be there */
if (!req->authority) {
@@ -204,6 +204,7 @@ apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos,
apr_table_setn(req->headers, "Host", req->authority);
}
+#if AP_HAS_RESPONSE_BUCKETS
if (eos) {
s = apr_table_get(req->headers, "Content-Length");
if (!s && apr_table_get(req->headers, "Content-Type")) {
@@ -213,6 +214,29 @@ apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos,
apr_table_setn(req->headers, "Content-Length", "0");
}
}
+#else /* AP_HAS_RESPONSE_BUCKETS */
+ s = apr_table_get(req->headers, "Content-Length");
+ if (!s) {
+ /* HTTP/2 does not need a Content-Length for framing, but our
+ * internal request processing is used to HTTP/1.1, so we
+ * need to either add a Content-Length or a Transfer-Encoding
+ * if any content can be expected. */
+ if (!eos) {
+ /* We have not seen a content-length and have no eos,
+ * simulate a chunked encoding for our HTTP/1.1 infrastructure,
+ * in case we have "H2SerializeHeaders on" here
+ */
+ req->chunked = 1;
+ apr_table_mergen(req->headers, "Transfer-Encoding", "chunked");
+ }
+ else if (apr_table_get(req->headers, "Content-Type")) {
+ /* If we have a content-type, but already seen eos, no more
+ * data will come. Signal a zero content length explicitly.
+ */
+ apr_table_setn(req->headers, "Content-Length", "0");
+ }
+ }
+#endif /* else AP_HAS_RESPONSE_BUCKETS */
req->raw_bytes += raw_bytes;
return APR_SUCCESS;
@@ -286,6 +310,7 @@ static request_rec *my_ap_create_request(conn_rec *c)
}
#endif
+#if AP_HAS_RESPONSE_BUCKETS
apr_bucket *h2_request_create_bucket(const h2_request *req, request_rec *r)
{
conn_rec *c = r->connection;
@@ -306,10 +331,11 @@ apr_bucket *h2_request_create_bucket(const h2_request *req, request_rec *r)
return ap_bucket_request_create(req->method, uri, "HTTP/2.0", headers,
r->pool, c->bucket_alloc);
}
+#endif
request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c)
{
- int access_status = HTTP_OK;
+ int access_status = HTTP_OK;
#if AP_MODULE_MAGIC_AT_LEAST(20120211, 106)
request_rec *r = ap_create_request(c);
@@ -430,7 +456,7 @@ request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c)
*/
ap_add_input_filter_handle(ap_http_input_filter_handle,
NULL, r, r->connection);
-
+
if ((access_status = ap_post_read_request(r))) {
/* Request check post hooks failed. An example of this would be a
* request for a vhost where h2 is disabled --> 421.
@@ -441,8 +467,8 @@ request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c)
goto die;
}
- AP_READ_REQUEST_SUCCESS((uintptr_t)r, (char *)r->method,
- (char *)r->uri, (char *)r->server->defn_name,
+ AP_READ_REQUEST_SUCCESS((uintptr_t)r, (char *)r->method,
+ (char *)r->uri, (char *)r->server->defn_name,
r->status);
return r;
@@ -475,6 +501,3 @@ die:
AP_READ_REQUEST_FAILURE((uintptr_t)r);
return NULL;
}
-
-
-
diff --git a/modules/http2/h2_request.h b/modules/http2/h2_request.h
index 6c0a468937..40ae1c5c69 100644
--- a/modules/http2/h2_request.h
+++ b/modules/http2/h2_request.h
@@ -49,7 +49,8 @@ h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src);
*/
request_rec *h2_create_request_rec(const h2_request *req, conn_rec *conn);
+#if AP_HAS_RESPONSE_BUCKETS
apr_bucket *h2_request_create_bucket(const h2_request *req, request_rec *r);
-
+#endif
#endif /* defined(__mod_h2__h2_request__) */
diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c
index 92d3f0a7e6..852168142a 100644
--- a/modules/http2/h2_session.c
+++ b/modules/http2/h2_session.c
@@ -46,6 +46,7 @@
#include "h2_mplx.h"
#include "h2_push.h"
#include "h2_request.h"
+#include "h2_headers.h"
#include "h2_stream.h"
#include "h2_c2.h"
#include "h2_session.h"
diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c
index 2fc9b70b0a..abcbce355c 100644
--- a/modules/http2/h2_stream.c
+++ b/modules/http2/h2_stream.c
@@ -40,6 +40,7 @@
#include "h2_mplx.h"
#include "h2_push.h"
#include "h2_request.h"
+#include "h2_headers.h"
#include "h2_session.h"
#include "h2_stream.h"
#include "h2_c2.h"
@@ -147,7 +148,7 @@ static int on_frame(h2_stream_state_t state, int frame_type,
{
ap_assert(frame_type >= 0);
ap_assert(state >= 0);
- if (frame_type < 0 || (apr_size_t)frame_type >= maxlen) {
+ if ((apr_size_t)frame_type >= maxlen) {
return state; /* NOP, ignore unknown frame types */
}
return on_map(state, frame_map[frame_type]);
@@ -264,8 +265,14 @@ static apr_status_t close_input(h2_stream *stream)
&& !apr_is_empty_table(stream->trailers_in)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
H2_STRM_MSG(stream, "adding trailers"));
+#if AP_HAS_RESPONSE_BUCKETS
b = ap_bucket_headers_create(stream->trailers_in,
stream->pool, c->bucket_alloc);
+#else
+ b = h2_bucket_headers_create(c->bucket_alloc,
+ h2_headers_create(HTTP_OK, stream->trailers_in, NULL,
+ stream->in_trailer_octets, stream->pool));
+#endif
input_append_bucket(stream, b);
stream->trailers_in = NULL;
}
@@ -881,9 +888,15 @@ static apr_bucket *get_first_response_bucket(apr_bucket_brigade *bb)
if (bb) {
apr_bucket *b = APR_BRIGADE_FIRST(bb);
while (b != APR_BRIGADE_SENTINEL(bb)) {
+#if AP_HAS_RESPONSE_BUCKETS
if (AP_BUCKET_IS_RESPONSE(b)) {
return b;
}
+#else
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ return b;
+ }
+#endif
b = APR_BUCKET_NEXT(b);
}
}
@@ -971,9 +984,13 @@ cleanup:
static int bucket_pass_to_c1(apr_bucket *b)
{
+#if AP_HAS_RESPONSE_BUCKETS
return !AP_BUCKET_IS_RESPONSE(b)
&& !AP_BUCKET_IS_HEADERS(b)
&& !APR_BUCKET_IS_EOS(b);
+#else
+ return !H2_BUCKET_IS_HEADERS(b) && !APR_BUCKET_IS_EOS(b);
+#endif
}
apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
@@ -994,12 +1011,16 @@ apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
static apr_status_t buffer_output_process_headers(h2_stream *stream)
{
conn_rec *c1 = stream->session->c1;
- ap_bucket_response *resp = NULL;
- ap_bucket_headers *headers = NULL;
apr_status_t rv = APR_EAGAIN;
int ngrv = 0, is_empty;
h2_ngheader *nh = NULL;
apr_bucket *b, *e;
+#if AP_HAS_RESPONSE_BUCKETS
+ ap_bucket_response *resp = NULL;
+ ap_bucket_headers *headers = NULL;
+#else
+ h2_headers *headers = NULL, *resp = NULL;
+#endif
if (!stream->out_buffer) goto cleanup;
@@ -1007,6 +1028,7 @@ static apr_status_t buffer_output_process_headers(h2_stream *stream)
while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
e = APR_BUCKET_NEXT(b);
if (APR_BUCKET_IS_METADATA(b)) {
+#if AP_HAS_RESPONSE_BUCKETS
if (AP_BUCKET_IS_RESPONSE(b)) {
resp = b->data;
APR_BUCKET_REMOVE(b);
@@ -1026,6 +1048,22 @@ static apr_status_t buffer_output_process_headers(h2_stream *stream)
b = e;
break;
}
+#else /* AP_HAS_RESPONSE_BUCKETS */
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ headers = h2_bucket_headers_get(b);
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
+ H2_STRM_MSG(stream, "process headers, response %d"),
+ headers->status);
+ if (!stream->response) {
+ resp = headers;
+ headers = NULL;
+ }
+ b = e;
+ break;
+ }
+#endif /* else AP_HAS_RESPONSE_BUCKETS */
}
else {
if (!stream->response) {
@@ -1115,9 +1153,15 @@ static apr_status_t buffer_output_process_headers(h2_stream *stream)
is_empty = 0;
while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
if (APR_BUCKET_IS_METADATA(b)) {
+#if AP_HAS_RESPONSE_BUCKETS
if (AP_BUCKET_IS_HEADERS(b)) {
break;
}
+#else
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ break;
+ }
+#endif
else if (APR_BUCKET_IS_EOS(b)) {
is_empty = 1;
break;
@@ -1184,7 +1228,11 @@ cleanup:
return rv;
}
+#if AP_HAS_RESPONSE_BUCKETS
apr_status_t h2_stream_submit_pushes(h2_stream *stream, ap_bucket_response *response)
+#else
+apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response)
+#endif
{
apr_status_t status = APR_SUCCESS;
apr_array_header_t *pushes;
@@ -1212,8 +1260,13 @@ apr_table_t *h2_stream_get_trailers(h2_stream *stream)
return NULL;
}
-const h2_priority *h2_stream_get_priority(h2_stream *stream,
+#if AP_HAS_RESPONSE_BUCKETS
+const h2_priority *h2_stream_get_priority(h2_stream *stream,
ap_bucket_response *response)
+#else
+const h2_priority *h2_stream_get_priority(h2_stream *stream,
+ h2_headers *response)
+#endif
{
if (response && stream->initiated_on) {
const char *ctype = apr_table_get(response->headers, "content-type");
@@ -1323,6 +1376,7 @@ static apr_off_t output_data_buffered(h2_stream *stream, int *peos, int *pheader
*peos = 1;
break;
}
+#if AP_HAS_RESPONSE_BUCKETS
else if (AP_BUCKET_IS_RESPONSE(b)) {
break;
}
@@ -1330,6 +1384,12 @@ static apr_off_t output_data_buffered(h2_stream *stream, int *peos, int *pheader
*pheader_blocked = 1;
break;
}
+#else
+ else if (H2_BUCKET_IS_HEADERS(b)) {
+ *pheader_blocked = 1;
+ break;
+ }
+#endif
}
else {
buf_len += b->length;
diff --git a/modules/http2/h2_stream.h b/modules/http2/h2_stream.h
index a782b3ae45..5b5ef35c51 100644
--- a/modules/http2/h2_stream.h
+++ b/modules/http2/h2_stream.h
@@ -20,6 +20,7 @@
#include <http_protocol.h>
#include "h2.h"
+#include "h2_headers.h"
/**
* A HTTP/2 stream, e.g. a client request+response in HTTP/1.1 terms.
@@ -28,7 +29,12 @@
* connection to the client. The h2_session writes to the h2_stream,
* adding HEADERS and DATA and finally an EOS. When headers are done,
* h2_stream is scheduled for handling, which is expected to produce
- * RESPONSE buckets.
+ * h2_headers/RESPONSE buckets.
+ *
+ * The h2_headers may be followed by more h2_headers (interim responses) and
+ * by DATA frames read from the h2_stream until EOS is reached. Trailers
+ * are send when a last h2_headers is received. This always closes the stream
+ * output.
*/
struct h2_mplx;
@@ -71,13 +77,17 @@ struct h2_stream {
apr_table_t *trailers_in; /* optional, incoming trailers */
int request_headers_added; /* number of request headers added */
+#if AP_HAS_RESPONSE_BUCKETS
ap_bucket_response *response; /* the final, non-interim response or NULL */
+#else
+ struct h2_headers *response; /* the final, non-interim response or NULL */
+#endif
struct h2_bucket_beam *input;
apr_bucket_brigade *in_buffer;
int in_window_size;
apr_time_t in_last_write;
-
+
struct h2_bucket_beam *output;
apr_bucket_brigade *out_buffer;
@@ -90,7 +100,7 @@ struct h2_stream {
unsigned int output_eos : 1; /* output EOS in buffer/sent */
conn_rec *c2; /* connection processing stream */
-
+
const h2_priority *pref_priority; /* preferred priority for this stream */
apr_off_t out_frames; /* # of frames sent out */
apr_off_t out_frame_octets; /* # of RAW frame octets sent out */
@@ -99,7 +109,7 @@ struct h2_stream {
apr_off_t in_data_frames; /* # of DATA frames received */
apr_off_t in_data_octets; /* # of DATA octets (payload) received */
apr_off_t in_trailer_octets; /* # of HEADER octets (payload) received in trailers */
-
+
h2_stream_monitor *monitor; /* optional monitor for stream states */
};
@@ -111,13 +121,13 @@ struct h2_stream {
* @param id the stream identifier
* @param pool the memory pool to use for this stream
* @param session the session this stream belongs to
- * @param monitor an optional monitor to be called for events and
+ * @param monitor an optional monitor to be called for events and
* state transisitions
* @param initiated_on the id of the stream this one was initiated on (PUSH)
*
* @return the newly opened stream
*/
-h2_stream *h2_stream_create(int id, apr_pool_t *pool,
+h2_stream *h2_stream_create(int id, apr_pool_t *pool,
struct h2_session *session,
h2_stream_monitor *monitor,
int initiated_on);
@@ -160,7 +170,7 @@ apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount);
/**
* Set complete stream headers from given h2_request.
- *
+ *
* @param stream stream to write request to
* @param r the request with all the meta data
* @param eos != 0 iff stream input is closed
@@ -169,16 +179,16 @@ void h2_stream_set_request(h2_stream *stream, const h2_request *r);
/**
* Set complete stream header from given request_rec.
- *
+ *
* @param stream stream to write request to
* @param r the request with all the meta data
* @param eos != 0 iff stream input is closed
*/
-apr_status_t h2_stream_set_request_rec(h2_stream *stream,
+apr_status_t h2_stream_set_request_rec(h2_stream *stream,
request_rec *r, int eos);
/*
- * Add a HTTP/2 header (including pseudo headers) or trailer
+ * Add a HTTP/2 header (including pseudo headers) or trailer
* to the given stream, depending on stream state.
*
* @param stream stream to write the header to
@@ -190,7 +200,7 @@ apr_status_t h2_stream_set_request_rec(h2_stream *stream,
apr_status_t h2_stream_add_header(h2_stream *stream,
const char *name, size_t nlen,
const char *value, size_t vlen);
-
+
/* End the construction of request headers */
apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes);
@@ -235,7 +245,7 @@ apr_status_t h2_stream_read_output(h2_stream *stream);
/**
* Read a maximum number of bytes into the bucket brigade.
- *
+ *
* @param stream the stream to read from
* @param bb the brigade to append output to
* @param plen (in-/out) max. number of bytes to append and on return actual
@@ -245,7 +255,7 @@ apr_status_t h2_stream_read_output(h2_stream *stream);
* APR_EAGAIN if not data is available and end of stream has not been
* reached yet.
*/
-apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
+apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
apr_off_t *plen, int *peos);
/**
@@ -264,13 +274,24 @@ apr_table_t *h2_stream_get_trailers(h2_stream *stream);
*
* @param stream the stream for which to submit
*/
-apr_status_t h2_stream_submit_pushes(h2_stream *stream, ap_bucket_response *response);
+#if AP_HAS_RESPONSE_BUCKETS
+apr_status_t h2_stream_submit_pushes(h2_stream *stream,
+ ap_bucket_response *response);
+#else
+apr_status_t h2_stream_submit_pushes(h2_stream *stream,
+ struct h2_headers *response);
+#endif
/**
* Get priority information set for this stream.
*/
-const struct h2_priority *h2_stream_get_priority(h2_stream *stream,
+#if AP_HAS_RESPONSE_BUCKETS
+const struct h2_priority *h2_stream_get_priority(h2_stream *stream,
ap_bucket_response *response);
+#else
+const struct h2_priority *h2_stream_get_priority(h2_stream *stream,
+ struct h2_headers *response);
+#endif
/**
* Return a textual representation of the stream state as in RFC 7540
diff --git a/modules/http2/h2_switch.c b/modules/http2/h2_switch.c
index 93327afd5b..a30f27ce9d 100644
--- a/modules/http2/h2_switch.c
+++ b/modules/http2/h2_switch.c
@@ -29,6 +29,7 @@
#include <http_log.h>
#include "h2_private.h"
+#include "h2.h"
#include "h2_config.h"
#include "h2_conn_ctx.h"
@@ -125,6 +126,7 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r,
return proposed? DECLINED : OK;
}
+#if AP_HAS_RESPONSE_BUCKETS
static void remove_output_filters_below(ap_filter_t *f, ap_filter_type ftype)
{
ap_filter_t *fnext;
@@ -146,6 +148,7 @@ static void remove_input_filters_below(ap_filter_t *f, ap_filter_type ftype)
f = fnext;
}
}
+#endif
static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s,
const char *protocol)
@@ -174,6 +177,7 @@ static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s,
if (r != NULL) {
apr_status_t status;
+#if AP_HAS_RESPONSE_BUCKETS
/* Switching in the middle of a request means that
* we have to send out the response to this one in h2
* format. So we need to take over the connection
@@ -182,7 +186,15 @@ static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s,
*/
remove_input_filters_below(r->input_filters, AP_FTYPE_CONNECTION);
remove_output_filters_below(r->output_filters, AP_FTYPE_CONNECTION);
-
+#else
+ /* Switching in the middle of a request means that
+ * we have to send out the response to this one in h2
+ * format. So we need to take over the connection
+ * right away.
+ */
+ ap_remove_input_filter_byhandle(r->input_filters, "http_in");
+ ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
+#endif
/* Ok, start an h2_conn on this one. */
status = h2_c1_setup(c, r, s);
diff --git a/modules/http2/h2_util.c b/modules/http2/h2_util.c
index 1079d0b096..47b1309763 100644
--- a/modules/http2/h2_util.c
+++ b/modules/http2/h2_util.c
@@ -28,6 +28,7 @@
#include <nghttp2/nghttp2.h>
#include "h2.h"
+#include "h2_headers.h"
#include "h2_util.h"
/* h2_log2(n) iff n is a power of 2 */
@@ -1172,7 +1173,7 @@ apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra)
static void fit_bucket_into(apr_bucket *b, apr_off_t *plen)
{
/* signed apr_off_t is at least as large as unsigned apr_size_t.
- * Propblems may arise when they are both the same size. Then
+ * Problems may arise when they are both the same size. Then
* the bucket length *may* be larger than a value we can hold
* in apr_off_t. Before casting b->length to apr_off_t we must
* check the limitations.
@@ -1502,6 +1503,8 @@ static apr_status_t ngheader_create(h2_ngheader **ph, apr_pool_t *p,
return ctx.status;
}
+#if AP_HAS_RESPONSE_BUCKETS
+
static int is_unsafe(ap_bucket_response *h)
{
const char *v = h->notes? apr_table_get(h->notes, H2_HDR_CONFORMANCE) : NULL;
@@ -1528,6 +1531,36 @@ apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
H2_ALEN(keys), keys, values, response->headers);
}
+#else /* AP_HAS_RESPONSE_BUCKETS */
+
+static int is_unsafe(h2_headers *h)
+{
+ const char *v = h->notes? apr_table_get(h->notes, H2_HDR_CONFORMANCE) : NULL;
+ return (v && !strcmp(v, H2_HDR_CONFORMANCE_UNSAFE));
+}
+
+apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
+ h2_headers *headers)
+{
+ return ngheader_create(ph, p, is_unsafe(headers),
+ 0, NULL, NULL, headers->headers);
+}
+
+apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ h2_headers *headers)
+{
+ const char *keys[] = {
+ ":status"
+ };
+ const char *values[] = {
+ apr_psprintf(p, "%d", headers->status)
+ };
+ return ngheader_create(ph, p, is_unsafe(headers),
+ H2_ALEN(keys), keys, values, headers->headers);
+}
+
+#endif /* else AP_HAS_RESPONSE_BUCKETS */
+
apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
const struct h2_request *req)
{
@@ -1825,6 +1858,8 @@ apr_status_t h2_util_wait_on_pipe(apr_file_t *pipe)
return apr_file_read(pipe, rb, &nr);
}
+#if AP_HAS_RESPONSE_BUCKETS
+
static int add_header_lengths(void *ctx, const char *name, const char *value)
{
apr_size_t *plen = ctx;
@@ -1844,4 +1879,6 @@ apr_size_t response_length_estimate(ap_bucket_response *resp)
apr_size_t len = 3 + 1 + 8 + (resp->reason? strlen(resp->reason) : 10);
apr_table_do(add_header_lengths, &len, resp->headers, NULL);
return len;
-} \ No newline at end of file
+}
+
+#endif /* AP_HAS_RESPONSE_BUCKETS */ \ No newline at end of file
diff --git a/modules/http2/h2_util.h b/modules/http2/h2_util.h
index 02e8178ebb..1582fca8e3 100644
--- a/modules/http2/h2_util.h
+++ b/modules/http2/h2_util.h
@@ -18,6 +18,10 @@
#define __mod_h2__h2_util__
#include <nghttp2/nghttp2.h>
+#include <http_protocol.h>
+
+#include "h2.h"
+#include "h2_headers.h"
/*******************************************************************************
* some debugging/format helpers
@@ -396,12 +400,21 @@ typedef struct h2_ngheader {
apr_size_t nvlen;
} h2_ngheader;
-apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
+#if AP_HAS_RESPONSE_BUCKETS
+apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
ap_bucket_headers *headers);
apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
ap_bucket_response *response);
-apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ const struct h2_request *req);
+#else
+apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
+ struct h2_headers *headers);
+apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ struct h2_headers *headers);
+apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
const struct h2_request *req);
+#endif
/**
* Add a HTTP/2 header and return the table key if it really was added
@@ -507,6 +520,8 @@ void h2_util_drain_pipe(apr_file_t *pipe);
*/
apr_status_t h2_util_wait_on_pipe(apr_file_t *pipe);
+
+#if AP_HAS_RESPONSE_BUCKETS
/**
* Give an estimate of the length of the header fields,
* without compression or other formatting decorations.
@@ -518,5 +533,6 @@ apr_size_t headers_length_estimate(ap_bucket_headers *hdrs);
* without compression or other formatting decorations.
*/
apr_size_t response_length_estimate(ap_bucket_response *resp);
+#endif /* AP_HAS_RESPONSE_BUCKETS */
#endif /* defined(__mod_h2__h2_util__) */
diff --git a/modules/http2/h2_version.h b/modules/http2/h2_version.h
index c488758ce7..f0da61ed40 100644
--- a/modules/http2/h2_version.h
+++ b/modules/http2/h2_version.h
@@ -27,7 +27,7 @@
* @macro
* Version number of the http2 module as c string
*/
-#define MOD_HTTP2_VERSION "2.0.7"
+#define MOD_HTTP2_VERSION "2.0.8-dev"
/**
* @macro
@@ -35,7 +35,7 @@
* release. This is a 24 bit number with 8 bits for major number, 8 bits
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
*/
-#define MOD_HTTP2_VERSION_NUM 0x020007
+#define MOD_HTTP2_VERSION_NUM 0x020008
#endif /* mod_h2_h2_version_h */
diff --git a/modules/http2/h2_workers.c b/modules/http2/h2_workers.c
index ac64f53b34..215d8fa22c 100644
--- a/modules/http2/h2_workers.c
+++ b/modules/http2/h2_workers.c
@@ -288,7 +288,11 @@ static void* APR_THREAD_FUNC slot_run(apr_thread_t *thread, void *wctx)
c->current_thread = thread;
AP_DEBUG_ASSERT(slot->prod);
+#if AP_HAS_RESPONSE_BUCKETS
ap_process_connection(c, ap_get_conn_socket(c));
+#else
+ h2_c2_process(c, thread, slot->id);
+#endif
slot->prod->fn_done(slot->prod->baton, c);
apr_thread_mutex_lock(workers->lock);
diff --git a/modules/http2/mod_http2.dsp b/modules/http2/mod_http2.dsp
index 9fa2c7aa34..d9ff22203a 100644
--- a/modules/http2/mod_http2.dsp
+++ b/modules/http2/mod_http2.dsp
@@ -133,6 +133,10 @@ SOURCE=./h2_conn_ctx.c
# End Source File
# Begin Source File
+SOURCE=./h2_headers.c
+# End Source File
+# Begin Source File
+
SOURCE=./h2_mplx.c
# End Source File
# Begin Source File