summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CMakeLists.txt2
-rw-r--r--modules/http2/NWGNUmod_http21
-rw-r--r--modules/http2/config2.m41
-rw-r--r--modules/http2/h2.h8
-rw-r--r--modules/http2/h2_bucket_beam.c11
-rw-r--r--modules/http2/h2_bucket_eos.c1
-rw-r--r--modules/http2/h2_c1.c1
-rw-r--r--modules/http2/h2_c2.c51
-rw-r--r--modules/http2/h2_c2_filter.c735
-rw-r--r--modules/http2/h2_c2_filter.h13
-rw-r--r--modules/http2/h2_conn_ctx.c1
-rw-r--r--modules/http2/h2_headers.c207
-rw-r--r--modules/http2/h2_headers.h95
-rw-r--r--modules/http2/h2_mplx.c1
-rw-r--r--modules/http2/h2_protocol.c1
-rw-r--r--modules/http2/h2_push.c10
-rw-r--r--modules/http2/h2_push.h9
-rw-r--r--modules/http2/h2_session.c2
-rw-r--r--modules/http2/h2_stream.c127
-rw-r--r--modules/http2/h2_stream.h16
-rw-r--r--modules/http2/h2_util.c40
-rw-r--r--modules/http2/h2_util.h20
-rw-r--r--modules/http2/h2_workers.c1
-rw-r--r--modules/http2/mod_http2.dsp4
-rw-r--r--test/modules/http2/test_200_header_invalid.py27
25 files changed, 226 insertions, 1159 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8cc2eaa22c..526b502551 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -473,7 +473,7 @@ SET(mod_http2_extra_sources
modules/http2/h2_c1.c modules/http2/h2_c1_io.c
modules/http2/h2_c2.c modules/http2/h2_c2_filter.c
modules/http2/h2_config.c modules/http2/h2_conn_ctx.c
- modules/http2/h2_headers.c modules/http2/h2_mplx.c
+ modules/http2/h2_mplx.c
modules/http2/h2_protocol.c modules/http2/h2_push.c
modules/http2/h2_request.c modules/http2/h2_session.c
modules/http2/h2_stream.c modules/http2/h2_switch.c
diff --git a/modules/http2/NWGNUmod_http2 b/modules/http2/NWGNUmod_http2
index cdd6cba6a9..a0442de9dd 100644
--- a/modules/http2/NWGNUmod_http2
+++ b/modules/http2/NWGNUmod_http2
@@ -192,7 +192,6 @@ FILES_nlm_objs = \
$(OBJDIR)/h2_c2_filter.lo \
$(OBJDIR)/h2_config.lo \
$(OBJDIR)/h2_conn_ctx.lo \
- $(OBJDIR)/h2_headers.lo \
$(OBJDIR)/h2_mplx.lo \
$(OBJDIR)/h2_protocol.lo \
$(OBJDIR)/h2_push.lo \
diff --git a/modules/http2/config2.m4 b/modules/http2/config2.m4
index 67065e01ac..bec019b77b 100644
--- a/modules/http2/config2.m4
+++ b/modules/http2/config2.m4
@@ -27,7 +27,6 @@ h2_c2.lo dnl
h2_c2_filter.lo dnl
h2_config.lo dnl
h2_conn_ctx.lo dnl
-h2_headers.lo dnl
h2_mplx.lo dnl
h2_protocol.lo dnl
h2_push.lo dnl
diff --git a/modules/http2/h2.h b/modules/http2/h2.h
index 821809bca1..f707da41c1 100644
--- a/modules/http2/h2.h
+++ b/modules/http2/h2.h
@@ -203,14 +203,6 @@ struct h2_request {
*/
#define H2_HTTP_STATUS_UNSET (0)
-typedef struct h2_headers h2_headers;
-struct h2_headers {
- int status;
- apr_table_t *headers;
- apr_table_t *notes;
- apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */
-};
-
typedef apr_status_t h2_io_data_cb(void *ctx, const char *data, apr_off_t len);
typedef int h2_stream_pri_cmp_fn(int stream_id1, int stream_id2, void *session);
diff --git a/modules/http2/h2_bucket_beam.c b/modules/http2/h2_bucket_beam.c
index 6ac85a5e95..05ca623c58 100644
--- a/modules/http2/h2_bucket_beam.c
+++ b/modules/http2/h2_bucket_beam.c
@@ -28,7 +28,6 @@
#include "h2_private.h"
#include "h2_conn_ctx.h"
-#include "h2_headers.h"
#include "h2_util.h"
#include "h2_bucket_beam.h"
@@ -637,8 +636,14 @@ transfer:
else if (APR_BUCKET_IS_FLUSH(bsender)) {
brecv = apr_bucket_flush_create(bb->bucket_alloc);
}
- else if (H2_BUCKET_IS_HEADERS(bsender)) {
- brecv = h2_bucket_headers_clone(bsender, bb->p, bb->bucket_alloc);
+ else if (AP_BUCKET_IS_RESPONSE(bsender)) {
+ brecv = ap_bucket_response_clone(bsender, bb->p, bb->bucket_alloc);
+ }
+ else if (AP_BUCKET_IS_REQUEST(bsender)) {
+ brecv = ap_bucket_request_clone(bsender, bb->p, bb->bucket_alloc);
+ }
+ else if (AP_BUCKET_IS_HEADERS(bsender)) {
+ brecv = ap_bucket_headers_clone(bsender, bb->p, bb->bucket_alloc);
}
else if (AP_BUCKET_IS_ERROR(bsender)) {
ap_bucket_error *eb = bsender->data;
diff --git a/modules/http2/h2_bucket_eos.c b/modules/http2/h2_bucket_eos.c
index 4fe7ea725f..fa46a3096a 100644
--- a/modules/http2/h2_bucket_eos.c
+++ b/modules/http2/h2_bucket_eos.c
@@ -21,6 +21,7 @@
#include <http_core.h>
#include <http_connection.h>
#include <http_log.h>
+#include <http_protocol.h>
#include "h2_private.h"
#include "h2.h"
diff --git a/modules/http2/h2_c1.c b/modules/http2/h2_c1.c
index 6388985aa1..1f455c145c 100644
--- a/modules/http2/h2_c1.c
+++ b/modules/http2/h2_c1.c
@@ -36,7 +36,6 @@
#include "h2_bucket_beam.h"
#include "h2_config.h"
#include "h2_conn_ctx.h"
-#include "h2_headers.h"
#include "h2_mplx.h"
#include "h2_session.h"
#include "h2_stream.h"
diff --git a/modules/http2/h2_c2.c b/modules/http2/h2_c2.c
index acb28ff3de..46d93e7ebe 100644
--- a/modules/http2/h2_c2.c
+++ b/modules/http2/h2_c2.c
@@ -45,7 +45,6 @@
#include "h2_protocol.h"
#include "h2_mplx.h"
#include "h2_request.h"
-#include "h2_headers.h"
#include "h2_session.h"
#include "h2_stream.h"
#include "h2_c2.h"
@@ -56,6 +55,11 @@ static module *mpm_module;
static int mpm_supported = 1;
static apr_socket_t *dummy_socket;
+static ap_filter_rec_t *c2_net_in_filter_handle;
+static ap_filter_rec_t *c2_net_out_filter_handle;
+static ap_filter_rec_t *c2_notes_out_filter_handle;
+
+
static void check_modules(int force)
{
static int checked = 0;
@@ -330,8 +334,11 @@ static apr_status_t beam_out(conn_rec *c2, h2_conn_ctx_t *conn_ctx, apr_bucket_b
for (b = APR_BRIGADE_FIRST(bb);
b != APR_BRIGADE_SENTINEL(bb);
b = APR_BUCKET_NEXT(b)) {
- if (H2_BUCKET_IS_HEADERS(b)) {
- header_len += (apr_off_t)h2_bucket_headers_headers_length(b);
+ if (AP_BUCKET_IS_RESPONSE(b)) {
+ header_len += (apr_off_t)response_length_estimate(b->data);
+ }
+ if (AP_BUCKET_IS_HEADERS(b)) {
+ header_len += (apr_off_t)headers_length_estimate(b->data);
}
}
}
@@ -429,9 +436,8 @@ apr_status_t h2_c2_process(conn_rec *c2, apr_thread_t *thread, int worker_id)
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2,
"h2_c2(%s-%d), adding filters",
conn_ctx->id, conn_ctx->stream_id);
- ap_add_input_filter("H2_C2_NET_IN", NULL, NULL, c2);
- ap_add_output_filter("H2_C2_NET_CATCH_H1", NULL, NULL, c2);
- ap_add_output_filter("H2_C2_NET_OUT", NULL, NULL, c2);
+ ap_add_input_filter_handle(c2_net_in_filter_handle, NULL, NULL, c2);
+ ap_add_output_filter_handle(c2_net_out_filter_handle, NULL, NULL, c2);
c2_run_pre_connection(c2, ap_get_conn_socket(c2));
conn_ctx->pre_conn_done = 1;
@@ -583,16 +589,7 @@ static int h2_c2_hook_post_read_request(request_rec *r)
ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
"h2_c2(%s-%d): adding request filters",
conn_ctx->id, conn_ctx->stream_id);
-
- /* setup the correct filters to process the request for h2 */
- ap_add_input_filter("H2_C2_REQUEST_IN", NULL, r, r->connection);
-
- /* replace the core http filter that formats response headers
- * in HTTP/1 with our own that collects status and headers */
- ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
-
- ap_add_output_filter("H2_C2_RESPONSE_OUT", NULL, r, r->connection);
- ap_add_output_filter("H2_C2_TRAILERS_OUT", NULL, r, r->connection);
+ ap_add_output_filter_handle(c2_notes_out_filter_handle, NULL, r, r->connection);
}
return DECLINED;
}
@@ -623,18 +620,14 @@ void h2_c2_register_hooks(void)
ap_hook_post_read_request(h2_c2_hook_post_read_request, NULL, NULL, APR_HOOK_REALLY_FIRST);
ap_hook_fixups(h2_c2_hook_fixups, NULL, NULL, APR_HOOK_LAST);
- ap_register_input_filter("H2_C2_NET_IN", h2_c2_filter_in,
- NULL, AP_FTYPE_NETWORK);
- ap_register_output_filter("H2_C2_NET_OUT", h2_c2_filter_out,
- NULL, AP_FTYPE_NETWORK);
- ap_register_output_filter("H2_C2_NET_CATCH_H1", h2_c2_filter_catch_h1_out,
- NULL, AP_FTYPE_NETWORK);
-
- ap_register_input_filter("H2_C2_REQUEST_IN", h2_c2_filter_request_in,
- NULL, AP_FTYPE_PROTOCOL);
- ap_register_output_filter("H2_C2_RESPONSE_OUT", h2_c2_filter_response_out,
- NULL, AP_FTYPE_PROTOCOL);
- ap_register_output_filter("H2_C2_TRAILERS_OUT", h2_c2_filter_trailers_out,
- NULL, AP_FTYPE_PROTOCOL);
+ c2_net_in_filter_handle =
+ ap_register_input_filter("H2_C2_NET_IN", h2_c2_filter_in,
+ NULL, AP_FTYPE_NETWORK);
+ c2_net_out_filter_handle =
+ ap_register_output_filter("H2_C2_NET_OUT", h2_c2_filter_out,
+ NULL, AP_FTYPE_NETWORK);
+ c2_notes_out_filter_handle =
+ ap_register_output_filter("H2_C2_NOTES_OUT", h2_c2_filter_notes_out,
+ NULL, AP_FTYPE_PROTOCOL);
}
diff --git a/modules/http2/h2_c2_filter.c b/modules/http2/h2_c2_filter.c
index dabfd420ea..22299c4794 100644
--- a/modules/http2/h2_c2_filter.c
+++ b/modules/http2/h2_c2_filter.c
@@ -30,8 +30,9 @@
#include <util_time.h>
#include "h2_private.h"
+#include "h2.h"
+#include "h2_config.h"
#include "h2_conn_ctx.h"
-#include "h2_headers.h"
#include "h2_c1.h"
#include "h2_c2_filter.h"
#include "h2_c2.h"
@@ -40,715 +41,49 @@
#include "h2_util.h"
-#define H2_FILTER_LOG(name, c, level, rv, msg, bb) \
- do { \
- if (APLOG_C_IS_LEVEL((c),(level))) { \
- char buffer[4 * 1024]; \
- apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \
- len = h2_util_bb_print(buffer, bmax, "", "", (bb)); \
- ap_log_cerror(APLOG_MARK, (level), rv, (c), \
- "FILTER[%s]: %s %s", \
- (name), (msg), len? buffer : ""); \
- } \
- } while (0)
-
-
-/* This routine is called by apr_table_do and merges all instances of
- * the passed field values into a single array that will be further
- * processed by some later routine. Originally intended to help split
- * and recombine multiple Vary fields, though it is generic to any field
- * consisting of comma/space-separated tokens.
- */
-static int uniq_field_values(void *d, const char *key, const char *val)
-{
- apr_array_header_t *values;
- char *start;
- char *e;
- char **strpp;
- int i;
-
- (void)key;
- values = (apr_array_header_t *)d;
-
- e = apr_pstrdup(values->pool, val);
-
- do {
- /* Find a non-empty fieldname */
-
- while (*e == ',' || apr_isspace(*e)) {
- ++e;
- }
- if (*e == '\0') {
- break;
- }
- start = e;
- while (*e != '\0' && *e != ',' && !apr_isspace(*e)) {
- ++e;
- }
- if (*e != '\0') {
- *e++ = '\0';
- }
-
- /* Now add it to values if it isn't already represented.
- * Could be replaced by a ap_array_strcasecmp() if we had one.
- */
- for (i = 0, strpp = (char **) values->elts; i < values->nelts;
- ++i, ++strpp) {
- if (*strpp && apr_strnatcasecmp(*strpp, start) == 0) {
- break;
- }
- }
- if (i == values->nelts) { /* if not found */
- *(char **)apr_array_push(values) = start;
- }
- } while (*e != '\0');
-
- return 1;
-}
-
-/*
- * Since some clients choke violently on multiple Vary fields, or
- * Vary fields with duplicate tokens, combine any multiples and remove
- * any duplicates.
- */
-static void fix_vary(request_rec *r)
-{
- apr_array_header_t *varies;
-
- varies = apr_array_make(r->pool, 5, sizeof(char *));
-
- /* Extract all Vary fields from the headers_out, separate each into
- * its comma-separated fieldname values, and then add them to varies
- * if not already present in the array.
- */
- apr_table_do(uniq_field_values, varies, r->headers_out, "Vary", NULL);
-
- /* If we found any, replace old Vary fields with unique-ified value */
-
- if (varies->nelts > 0) {
- apr_table_setn(r->headers_out, "Vary",
- apr_array_pstrcat(r->pool, varies, ','));
- }
-}
-
-static h2_headers *create_response(request_rec *r)
-{
- const char *clheader;
- const char *ctype;
-
- /*
- * Now that we are ready to send a response, we need to combine the two
- * header field tables into a single table. If we don't do this, our
- * later attempts to set or unset a given fieldname might be bypassed.
- */
- if (!apr_is_empty_table(r->err_headers_out)) {
- r->headers_out = apr_table_overlay(r->pool, r->err_headers_out,
- r->headers_out);
- apr_table_clear(r->err_headers_out);
- }
-
- /*
- * Remove the 'Vary' header field if the client can't handle it.
- * Since this will have nasty effects on HTTP/1.1 caches, force
- * the response into HTTP/1.0 mode.
- */
- if (apr_table_get(r->subprocess_env, "force-no-vary") != NULL) {
- apr_table_unset(r->headers_out, "Vary");
- r->proto_num = HTTP_VERSION(1,0);
- apr_table_setn(r->subprocess_env, "force-response-1.0", "1");
- }
- else {
- fix_vary(r);
- }
-
- /*
- * Now remove any ETag response header field if earlier processing
- * says so (such as a 'FileETag None' directive).
- */
- if (apr_table_get(r->notes, "no-etag") != NULL) {
- apr_table_unset(r->headers_out, "ETag");
- }
-
- /* determine the protocol and whether we should use keepalives. */
- ap_set_keepalive(r);
-
- if (AP_STATUS_IS_HEADER_ONLY(r->status)) {
- apr_table_unset(r->headers_out, "Transfer-Encoding");
- apr_table_unset(r->headers_out, "Content-Length");
- r->content_type = r->content_encoding = NULL;
- r->content_languages = NULL;
- r->clength = r->chunked = 0;
- }
- else if (r->chunked) {
- apr_table_mergen(r->headers_out, "Transfer-Encoding", "chunked");
- apr_table_unset(r->headers_out, "Content-Length");
- }
-
- ctype = ap_make_content_type(r, r->content_type);
- if (ctype) {
- apr_table_setn(r->headers_out, "Content-Type", ctype);
- }
-
- if (r->content_encoding) {
- apr_table_setn(r->headers_out, "Content-Encoding",
- r->content_encoding);
- }
-
- if (!apr_is_empty_array(r->content_languages)) {
- unsigned int i;
- char *token;
- char **languages = (char **)(r->content_languages->elts);
- const char *field = apr_table_get(r->headers_out, "Content-Language");
-
- while (field && (token = ap_get_list_item(r->pool, &field)) != NULL) {
- for (i = 0; i < r->content_languages->nelts; ++i) {
- if (!apr_strnatcasecmp(token, languages[i]))
- break;
- }
- if (i == r->content_languages->nelts) {
- *((char **) apr_array_push(r->content_languages)) = token;
- }
- }
-
- field = apr_array_pstrcat(r->pool, r->content_languages, ',');
- apr_table_setn(r->headers_out, "Content-Language", field);
- }
-
- /*
- * Control cachability for non-cachable responses if not already set by
- * some other part of the server configuration.
- */
- if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) {
- char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
- ap_recent_rfc822_date(date, r->request_time);
- apr_table_add(r->headers_out, "Expires", date);
- }
-
- /* This is a hack, but I can't find anyway around it. The idea is that
- * we don't want to send out 0 Content-Lengths if it is a head request.
- * This happens when modules try to outsmart the server, and return
- * if they see a HEAD request. Apache 1.3 handlers were supposed to
- * just return in that situation, and the core handled the HEAD. In
- * 2.0, if a handler returns, then the core sends an EOS bucket down
- * the filter stack, and the content-length filter computes a C-L of
- * zero and that gets put in the headers, and we end up sending a
- * zero C-L to the client. We can't just remove the C-L filter,
- * because well behaved 2.0 handlers will send their data down the stack,
- * and we will compute a real C-L for the head request. RBB
- */
- if (r->header_only
- && (clheader = apr_table_get(r->headers_out, "Content-Length"))
- && !strcmp(clheader, "0")) {
- apr_table_unset(r->headers_out, "Content-Length");
- }
-
- /*
- * keep the set-by-proxy server and date headers, otherwise
- * generate a new server header / date header
- */
- if (r->proxyreq == PROXYREQ_NONE
- || !apr_table_get(r->headers_out, "Date")) {
- char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
- ap_recent_rfc822_date(date, r->request_time);
- apr_table_setn(r->headers_out, "Date", date );
- }
- if (r->proxyreq == PROXYREQ_NONE
- || !apr_table_get(r->headers_out, "Server")) {
- const char *us = ap_get_server_banner();
- if (us && *us) {
- apr_table_setn(r->headers_out, "Server", us);
- }
- }
-
- return h2_headers_rcreate(r, r->status, r->headers_out, r->pool);
-}
-
-typedef enum {
- H2_RP_STATUS_LINE,
- H2_RP_HEADER_LINE,
- H2_RP_DONE
-} h2_rp_state_t;
-
-typedef struct h2_response_parser h2_response_parser;
-struct h2_response_parser {
- const char *id;
- h2_rp_state_t state;
- conn_rec *c;
- apr_pool_t *pool;
- int http_status;
- apr_array_header_t *hlines;
- apr_bucket_brigade *tmp;
- apr_bucket_brigade *saveto;
-};
-
-static apr_status_t parse_header(h2_response_parser *parser, char *line) {
- const char *hline;
- if (line[0] == ' ' || line[0] == '\t') {
- char **plast;
- /* continuation line from the header before this */
- while (line[0] == ' ' || line[0] == '\t') {
- ++line;
- }
-
- plast = apr_array_pop(parser->hlines);
- if (plast == NULL) {
- /* not well formed */
- return APR_EINVAL;
- }
- hline = apr_psprintf(parser->pool, "%s %s", *plast, line);
- }
- else {
- /* new header line */
- hline = apr_pstrdup(parser->pool, line);
- }
- APR_ARRAY_PUSH(parser->hlines, const char*) = hline;
- return APR_SUCCESS;
-}
-
-static apr_status_t get_line(h2_response_parser *parser, apr_bucket_brigade *bb,
- char *line, apr_size_t len)
-{
- apr_status_t status;
-
- if (!parser->tmp) {
- parser->tmp = apr_brigade_create(parser->pool, parser->c->bucket_alloc);
- }
- status = apr_brigade_split_line(parser->tmp, bb, APR_BLOCK_READ,
- len);
- if (status == APR_SUCCESS) {
- --len;
- status = apr_brigade_flatten(parser->tmp, line, &len);
- if (status == APR_SUCCESS) {
- /* we assume a non-0 containing line and remove trailing crlf. */
- line[len] = '\0';
- /*
- * XXX: What to do if there is an LF but no CRLF?
- * Should we error out?
- */
- if (len >= 2 && !strcmp(H2_CRLF, line + len - 2)) {
- len -= 2;
- line[len] = '\0';
- apr_brigade_cleanup(parser->tmp);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c,
- "h2_c2(%s): read response line: %s",
- parser->id, line);
- }
- else {
- apr_off_t brigade_length;
-
- /*
- * If the brigade parser->tmp becomes longer than our buffer
- * for flattening we never have a chance to get a complete
- * line. This can happen if we are called multiple times after
- * previous calls did not find a H2_CRLF and we returned
- * APR_EAGAIN. In this case parser->tmp (correctly) grows
- * with each call to apr_brigade_split_line.
- *
- * XXX: Currently a stack based buffer of HUGE_STRING_LEN is
- * used. This means we cannot cope with lines larger than
- * HUGE_STRING_LEN which might be an issue.
- */
- status = apr_brigade_length(parser->tmp, 0, &brigade_length);
- if ((status != APR_SUCCESS) || (brigade_length > len)) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, parser->c, APLOGNO(10257)
- "h2_c2(%s): read response, line too long",
- parser->id);
- return APR_ENOSPC;
- }
- /* this does not look like a complete line yet */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c,
- "h2_c2(%s): read response, incomplete line: %s",
- parser->id, line);
- if (!parser->saveto) {
- parser->saveto = apr_brigade_create(parser->pool,
- parser->c->bucket_alloc);
- }
- /*
- * Be on the save side and save the parser->tmp brigade
- * as it could contain transient buckets which could be
- * invalid next time we are here.
- *
- * NULL for the filter parameter is ok since we
- * provide our own brigade as second parameter
- * and ap_save_brigade does not need to create one.
- */
- ap_save_brigade(NULL, &(parser->saveto), &(parser->tmp),
- parser->tmp->p);
- APR_BRIGADE_CONCAT(parser->tmp, parser->saveto);
- return APR_EAGAIN;
- }
- }
- }
- apr_brigade_cleanup(parser->tmp);
- return status;
-}
-
-static apr_table_t *make_table(h2_response_parser *parser)
-{
- apr_array_header_t *hlines = parser->hlines;
- if (hlines) {
- apr_table_t *headers = apr_table_make(parser->pool, hlines->nelts);
- int i;
-
- for (i = 0; i < hlines->nelts; ++i) {
- char *hline = ((char **)hlines->elts)[i];
- char *sep = ap_strchr(hline, ':');
- if (!sep) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, parser->c,
- APLOGNO(02955) "h2_c2(%s): invalid header[%d] '%s'",
- parser->id, i, (char*)hline);
- /* not valid format, abort */
- return NULL;
- }
- (*sep++) = '\0';
- while (*sep == ' ' || *sep == '\t') {
- ++sep;
- }
-
- if (!h2_util_ignore_header(hline)) {
- apr_table_merge(headers, hline, sep);
- }
- }
- return headers;
- }
- else {
- return apr_table_make(parser->pool, 0);
- }
-}
-
-static apr_status_t pass_response(h2_conn_ctx_t *conn_ctx, ap_filter_t *f,
- h2_response_parser *parser)
+apr_status_t h2_c2_filter_notes_out(ap_filter_t *f, apr_bucket_brigade *bb)
{
apr_bucket *b;
- apr_status_t status;
-
- h2_headers *response = h2_headers_create(parser->http_status,
- make_table(parser),
- NULL, 0, parser->pool);
- apr_brigade_cleanup(parser->tmp);
- b = h2_bucket_headers_create(parser->c->bucket_alloc, response);
- APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
- b = apr_bucket_flush_create(parser->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
- status = ap_pass_brigade(f->next, parser->tmp);
- apr_brigade_cleanup(parser->tmp);
-
- /* reset parser for possible next response */
- parser->state = H2_RP_STATUS_LINE;
- apr_array_clear(parser->hlines);
-
- if (response->status >= 200) {
- conn_ctx->has_final_response = 1;
- }
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c,
- APLOGNO(03197) "h2_c2(%s): passed response %d",
- parser->id, response->status);
- return status;
-}
-
-static apr_status_t parse_status(h2_response_parser *parser, char *line)
-{
- int sindex = (apr_date_checkmask(line, "HTTP/#.# ###*")? 9 :
- (apr_date_checkmask(line, "HTTP/# ###*")? 7 : 0));
- if (sindex > 0) {
- int k = sindex + 3;
- char keepchar = line[k];
- line[k] = '\0';
- parser->http_status = atoi(&line[sindex]);
- line[k] = keepchar;
- parser->state = H2_RP_HEADER_LINE;
-
- return APR_SUCCESS;
- }
- /* Seems like there is garbage on the connection. May be a leftover
- * from a previous proxy request.
- * This should only happen if the H2_RESPONSE filter is not yet in
- * place (post_read_request has not been reached and the handler wants
- * to write something. Probably just the interim response we are
- * waiting for. But if there is other data hanging around before
- * that, this needs to fail. */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c, APLOGNO(03467)
- "h2_c2(%s): unable to parse status line: %s",
- parser->id, line);
- return APR_EINVAL;
-}
+ request_rec *r_prev;
+ ap_bucket_response *resp;
+ const char *err;
-static apr_status_t parse_response(h2_response_parser *parser,
- h2_conn_ctx_t *conn_ctx,
- ap_filter_t* f, apr_bucket_brigade *bb)
-{
- char line[HUGE_STRING_LEN];
- apr_status_t status = APR_SUCCESS;
-
- while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) {
- switch (parser->state) {
- case H2_RP_STATUS_LINE:
- case H2_RP_HEADER_LINE:
- status = get_line(parser, bb, line, sizeof(line));
- if (status == APR_EAGAIN) {
- /* need more data */
- return APR_SUCCESS;
- }
- else if (status != APR_SUCCESS) {
- return status;
- }
- if (parser->state == H2_RP_STATUS_LINE) {
- /* instead of parsing, just take it directly */
- status = parse_status(parser, line);
- }
- else if (line[0] == '\0') {
- /* end of headers, pass response onward */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, parser->c,
- "h2_c2(%s): end of response", parser->id);
- return pass_response(conn_ctx, f, parser);
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, parser->c,
- "h2_c2(%s): response header %s", parser->id, line);
- status = parse_header(parser, line);
- }
- break;
-
- default:
- return status;
- }
+ if (!f->r) {
+ goto pass;
}
- return status;
-}
-
-apr_status_t h2_c2_filter_catch_h1_out(ap_filter_t* f, apr_bucket_brigade* bb)
-{
- h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
- h2_response_parser *parser = f->ctx;
- apr_status_t rv;
-
- ap_assert(conn_ctx);
- H2_FILTER_LOG("c2_catch_h1_out", f->c, APLOG_TRACE2, 0, "check", bb);
-
- if (!conn_ctx->has_final_response) {
- if (!parser) {
- parser = apr_pcalloc(f->c->pool, sizeof(*parser));
- parser->id = apr_psprintf(f->c->pool, "%s-%d", conn_ctx->id, conn_ctx->stream_id);
- parser->pool = f->c->pool;
- parser->c = f->c;
- parser->state = H2_RP_STATUS_LINE;
- parser->hlines = apr_array_make(parser->pool, 10, sizeof(char *));
- f->ctx = parser;
- }
- if (!APR_BRIGADE_EMPTY(bb)) {
- apr_bucket *b = APR_BRIGADE_FIRST(bb);
- if (AP_BUCKET_IS_EOR(b)) {
- /* TODO: Yikes, this happens when errors are encountered on input
- * before anything from the repsonse has been processed. The
- * ap_die_r() call will do nothing in certain conditions.
+ for (b = APR_BRIGADE_FIRST(bb);
+ b != APR_BRIGADE_SENTINEL(bb);
+ b = APR_BUCKET_NEXT(b))
+ {
+ if (AP_BUCKET_IS_RESPONSE(b)) {
+ resp = b->data;
+ if (resp->status >= 400 && f->r->prev) {
+ /* Error responses are commonly handled via internal
+ * redirects to error documents. That creates a new
+ * request_rec with 'prev' set to the original.
+ * Each of these has its onw 'notes'.
+ * We'd like to copy interesting ones into the current 'r->notes'
+ * as we reset HTTP/2 stream with H2 specific error codes then.
*/
- int result = ap_map_http_request_error(conn_ctx->last_err,
- HTTP_INTERNAL_SERVER_ERROR);
- request_rec *r = h2_create_request_rec(conn_ctx->request, f->c);
- ap_die((result >= 400)? result : HTTP_INTERNAL_SERVER_ERROR, r);
- b = ap_bucket_eor_create(f->c->bucket_alloc, r);
- APR_BRIGADE_INSERT_TAIL(bb, b);
- }
- }
- /* There are cases where we need to parse a serialized http/1.1 response.
- * One example is a 100-continue answer via a mod_proxy setup. */
- while (bb && !f->c->aborted && !conn_ctx->has_final_response) {
- rv = parse_response(parser, conn_ctx, f, bb);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, f->c,
- "h2_c2(%s): parsed response", parser->id);
- if (APR_BRIGADE_EMPTY(bb) || APR_SUCCESS != rv) {
- return rv;
- }
- }
- }
-
- return ap_pass_brigade(f->next, bb);
-}
-
-apr_status_t h2_c2_filter_response_out(ap_filter_t *f, apr_bucket_brigade *bb)
-{
- h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
- request_rec *r = f->r;
- apr_bucket *b, *bresp, *body_bucket = NULL, *next;
- ap_bucket_error *eb = NULL;
- h2_headers *response = NULL;
- int headers_passing = 0;
-
- H2_FILTER_LOG("c2_response_out", f->c, APLOG_TRACE1, 0, "called with", bb);
-
- if (f->c->aborted || !conn_ctx || conn_ctx->has_final_response) {
- return ap_pass_brigade(f->next, bb);
- }
-
- if (!conn_ctx->has_final_response) {
- /* check, if we need to send the response now. Until we actually
- * see a DATA bucket or some EOS/EOR, we do not do so. */
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb);
- b = APR_BUCKET_NEXT(b))
- {
- if (AP_BUCKET_IS_ERROR(b) && !eb) {
- eb = b->data;
+ for (r_prev = f->r; r_prev != NULL; r_prev = r_prev->prev) {
+ if ((err = apr_table_get(r_prev->notes, "ssl-renegotiate-forbidden"))) {
+ if (r_prev != f->r) {
+ apr_table_setn(resp->notes, "ssl-renegotiate-forbidden", err);
+ }
+ break;
+ }
+ }
}
- else if (AP_BUCKET_IS_EOC(b)) {
- /* If we see an EOC bucket it is a signal that we should get out
- * of the way doing nothing.
- */
- ap_remove_output_filter(f);
+ else if (h2_config_rgeti(f->r, H2_CONF_PUSH) == 0
+ && h2_config_sgeti(f->r->server, H2_CONF_PUSH) != 0) {
+ /* location configuration turns off H2 PUSH handling */
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c,
- "h2_c2(%s): eoc bucket passed", conn_ctx->id);
- return ap_pass_brigade(f->next, bb);
+ "h2_c2_filter_notes_out, turning PUSH off");
+ apr_table_setn(resp->notes, H2_PUSH_MODE_NOTE, "0");
}
- else if (H2_BUCKET_IS_HEADERS(b)) {
- headers_passing = 1;
- }
- else if (!APR_BUCKET_IS_FLUSH(b)) {
- body_bucket = b;
- break;
- }
- }
-
- if (eb) {
- int st = eb->status;
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03047)
- "h2_c2(%s): err bucket status=%d",
- conn_ctx->id, st);
- /* throw everything away and replace it with the error response
- * generated by ap_die() */
- apr_brigade_cleanup(bb);
- ap_die(st, r);
- return AP_FILTER_ERROR;
- }
-
- if (body_bucket || !headers_passing) {
- /* time to insert the response bucket before the body or if
- * no h2_headers is passed, e.g. the response is empty */
- response = create_response(r);
- if (response == NULL) {
- ap_log_cerror(APLOG_MARK, APLOG_NOTICE, 0, f->c, APLOGNO(03048)
- "h2_c2(%s): unable to create response", conn_ctx->id);
- return APR_ENOMEM;
- }
-
- bresp = h2_bucket_headers_create(f->c->bucket_alloc, response);
- if (body_bucket) {
- APR_BUCKET_INSERT_BEFORE(body_bucket, bresp);
- }
- else {
- APR_BRIGADE_INSERT_HEAD(bb, bresp);
- }
- conn_ctx->has_final_response = 1;
- r->sent_bodyct = 1;
- ap_remove_output_filter_byhandle(f->r->output_filters, "H2_C2_NET_CATCH_H1");
- }
- }
-
- if (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_c2(%s): headers only, cleanup output brigade", conn_ctx->id);
- b = body_bucket? body_bucket : APR_BRIGADE_FIRST(bb);
- while (b != APR_BRIGADE_SENTINEL(bb)) {
- next = APR_BUCKET_NEXT(b);
- if (APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) {
- break;
- }
- if (!H2_BUCKET_IS_HEADERS(b)) {
- APR_BUCKET_REMOVE(b);
- apr_bucket_destroy(b);
- }
- b = next;
- }
- }
- if (conn_ctx->has_final_response) {
- /* lets get out of the way, our task is done */
- ap_remove_output_filter(f);
- }
- return ap_pass_brigade(f->next, bb);
-}
-
-
-apr_status_t h2_c2_filter_request_in(ap_filter_t* f,
- apr_bucket_brigade* bb,
- ap_input_mode_t mode,
- apr_read_type_e block,
- apr_off_t readbytes)
-{
- h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
- request_rec *r = f->r;
- apr_status_t status = APR_SUCCESS;
- apr_bucket *b, *next;
- core_server_config *conf =
- (core_server_config *) ap_get_module_config(r->server->module_config,
- &core_module);
- ap_assert(conn_ctx);
-
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, f->r,
- "h2_c2(%s-%d): request input, exp=%d",
- conn_ctx->id, conn_ctx->stream_id, r->expecting_100);
-
- status = ap_get_brigade(f->next, bb, mode, block, readbytes);
- /* pipe data through, just take care of trailers */
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb); b = next) {
- next = APR_BUCKET_NEXT(b);
- if (H2_BUCKET_IS_HEADERS(b)) {
- h2_headers *headers = h2_bucket_headers_get(b);
- ap_assert(headers);
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
- "h2_c2(%s-%d): receiving trailers",
- conn_ctx->id, conn_ctx->stream_id);
- r->trailers_in = headers->headers;
- if (conf && conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE) {
- r->headers_in = apr_table_overlay(r->pool, r->headers_in,
- r->trailers_in);
- }
- APR_BUCKET_REMOVE(b);
- apr_bucket_destroy(b);
- ap_remove_input_filter(f);
-
- if (headers->raw_bytes && h2_c_logio_add_bytes_in) {
- h2_c_logio_add_bytes_in(f->c, headers->raw_bytes);
- }
- break;
}
}
- return status;
-}
-
-apr_status_t h2_c2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb)
-{
- h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
- request_rec *r = f->r;
- apr_bucket *b, *e;
-
- if (conn_ctx && r) {
- /* Detect the EOS/EOR bucket and forward any trailers that may have
- * been set to our h2_headers.
- */
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb);
- b = APR_BUCKET_NEXT(b))
- {
- if ((APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b))
- && r->trailers_out && !apr_is_empty_table(r->trailers_out)) {
- h2_headers *headers;
- apr_table_t *trailers;
-
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03049)
- "h2_c2(%s-%d): sending trailers",
- conn_ctx->id, conn_ctx->stream_id);
- trailers = apr_table_clone(r->pool, r->trailers_out);
- headers = h2_headers_rcreate(r, HTTP_OK, trailers, r->pool);
- e = h2_bucket_headers_create(bb->bucket_alloc, headers);
- APR_BUCKET_INSERT_BEFORE(b, e);
- apr_table_clear(r->trailers_out);
- ap_remove_output_filter(f);
- break;
- }
- }
- }
-
+pass:
return ap_pass_brigade(f->next, bb);
}
-
diff --git a/modules/http2/h2_c2_filter.h b/modules/http2/h2_c2_filter.h
index 4b00df71d5..13de7d6eca 100644
--- a/modules/http2/h2_c2_filter.h
+++ b/modules/http2/h2_c2_filter.h
@@ -31,19 +31,8 @@
* we need to have all handlers and filters involved in request/response
* processing, so this seems to be the way for now.
*/
-struct h2_headers;
struct h2_response_parser;
-apr_status_t h2_c2_filter_catch_h1_out(ap_filter_t* f, apr_bucket_brigade* bb);
-
-apr_status_t h2_c2_filter_response_out(ap_filter_t *f, apr_bucket_brigade *bb);
-
-apr_status_t h2_c2_filter_request_in(ap_filter_t* f,
- apr_bucket_brigade* brigade,
- ap_input_mode_t mode,
- apr_read_type_e block,
- apr_off_t readbytes);
-
-apr_status_t h2_c2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb);
+apr_status_t h2_c2_filter_notes_out(ap_filter_t *f, apr_bucket_brigade *bb);
#endif /* defined(__mod_h2__h2_c2_filter__) */
diff --git a/modules/http2/h2_conn_ctx.c b/modules/http2/h2_conn_ctx.c
index 0f8b874b7e..40a2701a50 100644
--- a/modules/http2/h2_conn_ctx.c
+++ b/modules/http2/h2_conn_ctx.c
@@ -22,6 +22,7 @@
#include <http_core.h>
#include <http_config.h>
#include <http_log.h>
+#include <http_protocol.h>
#include "h2_private.h"
#include "h2_session.h"
diff --git a/modules/http2/h2_headers.c b/modules/http2/h2_headers.c
deleted file mode 100644
index fbeeba3901..0000000000
--- a/modules/http2/h2_headers.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/* Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-#include <stdio.h>
-
-#include <apr_strings.h>
-
-#include <httpd.h>
-#include <http_core.h>
-#include <http_log.h>
-#include <util_time.h>
-
-#include <nghttp2/nghttp2.h>
-
-#include "h2_private.h"
-#include "h2_protocol.h"
-#include "h2_config.h"
-#include "h2_util.h"
-#include "h2_request.h"
-#include "h2_headers.h"
-
-
-static int is_unsafe(server_rec *s)
-{
- core_server_config *conf = ap_get_core_module_config(s->module_config);
- return (conf->http_conformance == AP_HTTP_CONFORMANCE_UNSAFE);
-}
-
-typedef struct {
- apr_bucket_refcount refcount;
- h2_headers *headers;
-} h2_bucket_headers;
-
-static apr_status_t bucket_read(apr_bucket *b, const char **str,
- apr_size_t *len, apr_read_type_e block)
-{
- (void)b;
- (void)block;
- *str = NULL;
- *len = 0;
- return APR_SUCCESS;
-}
-
-apr_bucket * h2_bucket_headers_make(apr_bucket *b, h2_headers *r)
-{
- h2_bucket_headers *br;
-
- br = apr_bucket_alloc(sizeof(*br), b->list);
- br->headers = r;
-
- b = apr_bucket_shared_make(b, br, 0, 0);
- b->type = &h2_bucket_type_headers;
- b->length = 0;
-
- return b;
-}
-
-apr_bucket * h2_bucket_headers_create(apr_bucket_alloc_t *list,
- h2_headers *r)
-{
- apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
-
- APR_BUCKET_INIT(b);
- b->free = apr_bucket_free;
- b->list = list;
- b = h2_bucket_headers_make(b, r);
- return b;
-}
-
-h2_headers *h2_bucket_headers_get(apr_bucket *b)
-{
- if (H2_BUCKET_IS_HEADERS(b)) {
- return ((h2_bucket_headers *)b->data)->headers;
- }
- return NULL;
-}
-
-const apr_bucket_type_t h2_bucket_type_headers = {
- "H2HEADERS", 5, APR_BUCKET_METADATA,
- apr_bucket_destroy_noop,
- bucket_read,
- apr_bucket_setaside_noop,
- apr_bucket_split_notimpl,
- apr_bucket_shared_copy
-};
-
-apr_bucket *h2_bucket_headers_clone(apr_bucket *src, apr_pool_t *p, apr_bucket_alloc_t *list)
-{
- h2_headers *src_headers;
-
- AP_DEBUG_ASSERT(H2_BUCKET_IS_HEADERS(src));
- src_headers = ((h2_bucket_headers *)src->data)->headers;
- return h2_bucket_headers_create(list, h2_headers_clone(p, src_headers));
-}
-
-
-h2_headers *h2_headers_create(int status, const apr_table_t *headers_in,
- const apr_table_t *notes, apr_off_t raw_bytes,
- apr_pool_t *pool)
-{
- h2_headers *headers = apr_pcalloc(pool, sizeof(h2_headers));
- headers->status = status;
- headers->headers = (headers_in? apr_table_clone(pool, headers_in)
- : apr_table_make(pool, 5));
- headers->notes = (notes? apr_table_clone(pool, notes)
- : apr_table_make(pool, 5));
- return headers;
-}
-
-static int add_header_lengths(void *ctx, const char *name, const char *value)
-{
- apr_size_t *plen = ctx;
- *plen += strlen(name) + strlen(value);
- return 1;
-}
-
-apr_size_t h2_headers_length(h2_headers *headers)
-{
- apr_size_t len = 0;
- apr_table_do(add_header_lengths, &len, headers->headers, NULL);
- return len;
-}
-
-apr_size_t h2_bucket_headers_headers_length(apr_bucket *b)
-{
- h2_headers *h = h2_bucket_headers_get(b);
- return h? h2_headers_length(h) : 0;
-}
-
-h2_headers *h2_headers_rcreate(request_rec *r, int status,
- const apr_table_t *header, apr_pool_t *pool)
-{
- h2_headers *headers = h2_headers_create(status, header, r->notes, 0, pool);
- if (headers->status == HTTP_FORBIDDEN) {
- request_rec *r_prev;
- for (r_prev = r; r_prev != NULL; r_prev = r_prev->prev) {
- const char *cause = apr_table_get(r_prev->notes, "ssl-renegotiate-forbidden");
- if (cause) {
- /* This request triggered a TLS renegotiation that is not allowed
- * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
- */
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, headers->status, r,
- APLOGNO(03061)
- "h2_headers(%ld): renegotiate forbidden, cause: %s",
- (long)r->connection->id, cause);
- headers->status = H2_ERR_HTTP_1_1_REQUIRED;
- break;
- }
- }
- }
- if (is_unsafe(r->server)) {
- apr_table_setn(headers->notes, H2_HDR_CONFORMANCE, H2_HDR_CONFORMANCE_UNSAFE);
- }
- if (h2_config_rgeti(r, H2_CONF_PUSH) == 0 && h2_config_sgeti(r->server, H2_CONF_PUSH) != 0) {
- apr_table_setn(headers->notes, H2_PUSH_MODE_NOTE, "0");
- }
- return headers;
-}
-
-h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h)
-{
- return h2_headers_create(h->status, h->headers, h->notes, h->raw_bytes, pool);
-}
-
-h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h)
-{
- return h2_headers_create(h->status, h->headers, h->notes, h->raw_bytes, pool);
-}
-
-h2_headers *h2_headers_die(apr_status_t type,
- const h2_request *req, apr_pool_t *pool)
-{
- h2_headers *headers;
- char *date;
-
- headers = apr_pcalloc(pool, sizeof(h2_headers));
- headers->status = (type >= 200 && type < 600)? type : 500;
- headers->headers = apr_table_make(pool, 5);
- headers->notes = apr_table_make(pool, 5);
-
- date = apr_palloc(pool, APR_RFC822_DATE_LEN);
- ap_recent_rfc822_date(date, req? req->request_time : apr_time_now());
- apr_table_setn(headers->headers, "Date", date);
- apr_table_setn(headers->headers, "Server", ap_get_server_banner());
-
- return headers;
-}
-
-int h2_headers_are_final_response(h2_headers *headers)
-{
- return headers->status >= 200;
-}
-
diff --git a/modules/http2/h2_headers.h b/modules/http2/h2_headers.h
deleted file mode 100644
index 61ba17848d..0000000000
--- a/modules/http2/h2_headers.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/* Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __mod_h2__h2_headers__
-#define __mod_h2__h2_headers__
-
-#include "h2.h"
-
-struct h2_bucket_beam;
-
-extern const apr_bucket_type_t h2_bucket_type_headers;
-
-#define H2_BUCKET_IS_HEADERS(e) (e->type == &h2_bucket_type_headers)
-
-apr_bucket * h2_bucket_headers_make(apr_bucket *b, h2_headers *r);
-
-apr_bucket * h2_bucket_headers_create(apr_bucket_alloc_t *list,
- h2_headers *r);
-
-h2_headers *h2_bucket_headers_get(apr_bucket *b);
-
-apr_bucket *h2_bucket_headers_clone(apr_bucket *src,
- apr_pool_t *p,
- apr_bucket_alloc_t *list);
-
-/**
- * Create the headers from the given status and headers
- * @param status the headers status
- * @param header the headers of the headers
- * @param notes the notes carried by the headers
- * @param raw_bytes the raw network bytes (if known) used to transmit these
- * @param pool the memory pool to use
- */
-h2_headers *h2_headers_create(int status, const apr_table_t *header,
- const apr_table_t *notes, apr_off_t raw_bytes,
- apr_pool_t *pool);
-
-/**
- * Create the headers from the given request_rec.
- * @param r the request record which was processed
- * @param status the headers status
- * @param header the headers of the headers
- * @param pool the memory pool to use
- */
-h2_headers *h2_headers_rcreate(request_rec *r, int status,
- const apr_table_t *header, apr_pool_t *pool);
-
-/**
- * Copy the headers into another pool. This will not copy any
- * header strings.
- */
-h2_headers *h2_headers_copy(apr_pool_t *pool, h2_headers *h);
-
-/**
- * Clone the headers into another pool. This will also clone any
- * header strings.
- */
-h2_headers *h2_headers_clone(apr_pool_t *pool, h2_headers *h);
-
-/**
- * Create the headers for the given error.
- * @param type the error code
- * @param req the original h2_request
- * @param pool the memory pool to use
- */
-h2_headers *h2_headers_die(apr_status_t type,
- const struct h2_request *req, apr_pool_t *pool);
-
-int h2_headers_are_final_response(h2_headers *headers);
-
-/**
- * Give the number of bytes of all contained header strings.
- */
-apr_size_t h2_headers_length(h2_headers *headers);
-
-/**
- * For H2HEADER buckets, return the length of all contained header strings.
- * For all other buckets, return 0.
- */
-apr_size_t h2_bucket_headers_headers_length(apr_bucket *b);
-
-#endif /* defined(__mod_h2__h2_headers__) */
diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c
index 37a7a9339c..7684222a0c 100644
--- a/modules/http2/h2_mplx.c
+++ b/modules/http2/h2_mplx.c
@@ -28,6 +28,7 @@
#include <http_core.h>
#include <http_connection.h>
#include <http_log.h>
+#include <http_protocol.h>
#include <mpm_common.h>
diff --git a/modules/http2/h2_protocol.c b/modules/http2/h2_protocol.c
index 874753e498..a2861cabd3 100644
--- a/modules/http2/h2_protocol.c
+++ b/modules/http2/h2_protocol.c
@@ -39,7 +39,6 @@
#include "h2_conn_ctx.h"
#include "h2_c1.h"
#include "h2_request.h"
-#include "h2_headers.h"
#include "h2_session.h"
#include "h2_util.h"
#include "h2_protocol.h"
diff --git a/modules/http2/h2_push.c b/modules/http2/h2_push.c
index 805a217d25..eaabffd3f1 100644
--- a/modules/http2/h2_push.c
+++ b/modules/http2/h2_push.c
@@ -29,13 +29,13 @@
#include <httpd.h>
#include <http_core.h>
#include <http_log.h>
+#include <http_protocol.h>
#include "h2_private.h"
#include "h2_protocol.h"
#include "h2_util.h"
#include "h2_push.h"
#include "h2_request.h"
-#include "h2_headers.h"
#include "h2_session.h"
#include "h2_stream.h"
@@ -433,7 +433,7 @@ static int head_iter(void *ctx, const char *key, const char *value)
}
apr_array_header_t *h2_push_collect(apr_pool_t *p, const h2_request *req,
- apr_uint32_t push_policy, const h2_headers *res)
+ apr_uint32_t push_policy, const ap_bucket_response *res)
{
if (req && push_policy != H2_PUSH_NONE) {
/* Collect push candidates from the request/response pair.
@@ -675,9 +675,9 @@ apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t
return npushes;
}
-apr_array_header_t *h2_push_collect_update(h2_stream *stream,
- const struct h2_request *req,
- const struct h2_headers *res)
+apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
+ const struct h2_request *req,
+ const ap_bucket_response *res)
{
apr_array_header_t *pushes;
diff --git a/modules/http2/h2_push.h b/modules/http2/h2_push.h
index 5dc189f007..008a74191b 100644
--- a/modules/http2/h2_push.h
+++ b/modules/http2/h2_push.h
@@ -20,7 +20,6 @@
#include "h2.h"
struct h2_request;
-struct h2_headers;
struct h2_ngheader;
struct h2_session;
struct h2_stream;
@@ -100,7 +99,7 @@ struct h2_push_diary {
apr_array_header_t *h2_push_collect(apr_pool_t *p,
const struct h2_request *req,
apr_uint32_t push_policy,
- const struct h2_headers *res);
+ const ap_bucket_response *res);
/**
* Create a new push diary for the given maximum number of entries.
@@ -121,9 +120,9 @@ apr_array_header_t *h2_push_diary_update(struct h2_session *session, apr_array_h
* Collect pushes for the given request/response pair, enter them into the
* diary and return those pushes newly entered.
*/
-apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
- const struct h2_request *req,
- const struct h2_headers *res);
+apr_array_header_t *h2_push_collect_update(struct h2_stream *stream,
+ const struct h2_request *req,
+ const ap_bucket_response *res);
/**
* Get a cache digest as described in
* https://datatracker.ietf.org/doc/draft-kazuho-h2-cache-digest/
diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c
index 12090c0e7e..273e13f708 100644
--- a/modules/http2/h2_session.c
+++ b/modules/http2/h2_session.c
@@ -26,6 +26,7 @@
#include <http_core.h>
#include <http_config.h>
#include <http_log.h>
+#include <http_protocol.h>
#include <scoreboard.h>
#include <mpm_common.h>
@@ -40,7 +41,6 @@
#include "h2_mplx.h"
#include "h2_push.h"
#include "h2_request.h"
-#include "h2_headers.h"
#include "h2_stream.h"
#include "h2_c2.h"
#include "h2_session.h"
diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c
index 71c005ff2f..9b879e923a 100644
--- a/modules/http2/h2_stream.c
+++ b/modules/http2/h2_stream.c
@@ -26,6 +26,7 @@
#include <http_core.h>
#include <http_connection.h>
#include <http_log.h>
+#include <http_protocol.h>
#include <http_ssl.h>
#include <nghttp2/nghttp2.h>
@@ -39,7 +40,6 @@
#include "h2_mplx.h"
#include "h2_push.h"
#include "h2_request.h"
-#include "h2_headers.h"
#include "h2_session.h"
#include "h2_stream.h"
#include "h2_c2.h"
@@ -243,15 +243,12 @@ static apr_status_t close_input(h2_stream *stream)
if (!stream->rst_error
&& stream->trailers_in
&& !apr_is_empty_table(stream->trailers_in)) {
- h2_headers *r;
-
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
H2_STRM_MSG(stream, "adding trailers"));
- r = h2_headers_create(HTTP_OK, stream->trailers_in, NULL,
- stream->in_trailer_octets, stream->pool);
- stream->trailers_in = NULL;
- b = h2_bucket_headers_create(c->bucket_alloc, r);
+ b = ap_bucket_headers_create(stream->trailers_in,
+ stream->pool, c->bucket_alloc);
input_append_bucket(stream, b);
+ stream->trailers_in = NULL;
}
stream->input_closed = 1;
@@ -865,12 +862,12 @@ cleanup:
return status;
}
-static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb)
+static apr_bucket *get_first_response_bucket(apr_bucket_brigade *bb)
{
if (bb) {
apr_bucket *b = APR_BRIGADE_FIRST(bb);
while (b != APR_BRIGADE_SENTINEL(bb)) {
- if (H2_BUCKET_IS_HEADERS(b)) {
+ if (AP_BUCKET_IS_RESPONSE(b)) {
return b;
}
b = APR_BUCKET_NEXT(b);
@@ -949,7 +946,9 @@ cleanup:
static int bucket_pass_to_c1(apr_bucket *b)
{
- return !H2_BUCKET_IS_HEADERS(b) && !APR_BUCKET_IS_EOS(b);
+ return !AP_BUCKET_IS_RESPONSE(b)
+ && !AP_BUCKET_IS_HEADERS(b)
+ && !APR_BUCKET_IS_EOS(b);
}
apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
@@ -970,7 +969,8 @@ apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
static apr_status_t buffer_output_process_headers(h2_stream *stream)
{
conn_rec *c1 = stream->session->c1;
- h2_headers *headers = NULL;
+ ap_bucket_response *resp = NULL;
+ ap_bucket_headers *headers = NULL;
apr_status_t rv = APR_EAGAIN;
int ngrv = 0, is_empty;
h2_ngheader *nh = NULL;
@@ -982,13 +982,22 @@ static apr_status_t buffer_output_process_headers(h2_stream *stream)
while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
e = APR_BUCKET_NEXT(b);
if (APR_BUCKET_IS_METADATA(b)) {
- if (H2_BUCKET_IS_HEADERS(b)) {
- headers = h2_bucket_headers_get(b);
+ if (AP_BUCKET_IS_RESPONSE(b)) {
+ resp = b->data;
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
+ H2_STRM_MSG(stream, "process response %d"),
+ resp->status);
+ b = e;
+ break;
+ }
+ else if (AP_BUCKET_IS_HEADERS(b)) {
+ headers = b->data;
APR_BUCKET_REMOVE(b);
apr_bucket_destroy(b);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
- H2_STRM_MSG(stream, "process headers, response %d"),
- headers->status);
+ H2_STRM_MSG(stream, "process headers"));
b = e;
break;
}
@@ -1004,32 +1013,32 @@ static apr_status_t buffer_output_process_headers(h2_stream *stream)
}
b = e;
}
- if (!headers) goto cleanup;
- if (stream->response) {
- rv = h2_res_create_ngtrailer(&nh, stream->pool, headers);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
- H2_STRM_LOG(APLOGNO(03072), stream, "submit %d trailers"),
- (int)nh->nvlen);
- if (APR_SUCCESS != rv) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
- H2_STRM_LOG(APLOGNO(10024), stream, "invalid trailers"));
- h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+ if (resp) {
+ nghttp2_data_provider provider, *pprovider = NULL;
+
+ if (resp->status < 100) {
+ h2_stream_rst(stream, resp->status);
goto cleanup;
}
- ngrv = nghttp2_submit_trailer(stream->session->ngh2, stream->id, nh->nv, nh->nvlen);
- }
- else if (headers->status < 100) {
- h2_stream_rst(stream, headers->status);
- goto cleanup;
- }
- else {
- nghttp2_data_provider provider, *pprovider = NULL;
+ if (resp->status == HTTP_FORBIDDEN && resp->notes) {
+ const char *cause = apr_table_get(resp->notes, "ssl-renegotiate-forbidden");
+ if (cause) {
+ /* This request triggered a TLS renegotiation that is not allowed
+ * in HTTP/2. Tell the client that it should use HTTP/1.1 for this.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, resp->status, c1,
+ H2_STRM_LOG(APLOGNO(03061), stream,
+ "renegotiate forbidden, cause: %s"), cause);
+ h2_stream_rst(stream, H2_ERR_HTTP_1_1_REQUIRED);
+ goto cleanup;
+ }
+ }
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1,
H2_STRM_LOG(APLOGNO(03073), stream,
- "submit response %d"), headers->status);
+ "submit response %d"), resp->status);
/* If this stream is not a pushed one itself,
* and HTTP/2 server push is enabled here,
@@ -1038,7 +1047,7 @@ static apr_status_t buffer_output_process_headers(h2_stream *stream)
* -> find and perform any pushes on this stream
* *before* we submit the stream response itself.
* This helps clients avoid opening new streams on Link
- * headers that get pushed right afterwards.
+ * resp that get pushed right afterwards.
*
* *) the response code is relevant, as we do not want to
* make pushes on 401 or 403 codes and friends.
@@ -1050,31 +1059,31 @@ static apr_status_t buffer_output_process_headers(h2_stream *stream)
&& !stream->response
&& stream->request && stream->request->method
&& !strcmp("GET", stream->request->method)
- && (headers->status < 400)
- && (headers->status != 304)
+ && (resp->status < 400)
+ && (resp->status != 304)
&& h2_session_push_enabled(stream->session)) {
/* PUSH is possible and enabled on server, unless the request
* denies it, submit resources to push */
- const char *s = apr_table_get(headers->notes, H2_PUSH_MODE_NOTE);
+ const char *s = apr_table_get(resp->notes, H2_PUSH_MODE_NOTE);
if (!s || strcmp(s, "0")) {
- h2_stream_submit_pushes(stream, headers);
+ h2_stream_submit_pushes(stream, resp);
}
}
if (!stream->pref_priority) {
- stream->pref_priority = h2_stream_get_priority(stream, headers);
+ stream->pref_priority = h2_stream_get_priority(stream, resp);
}
h2_session_set_prio(stream->session, stream, stream->pref_priority);
- if (headers->status == 103
+ if (resp->status == 103
&& !h2_config_sgeti(stream->session->s, H2_CONF_EARLY_HINTS)) {
/* suppress sending this to the client, it might have triggered
* pushes and served its purpose nevertheless */
rv = APR_SUCCESS;
goto cleanup;
}
- if (h2_headers_are_final_response(headers)) {
- stream->response = headers;
+ if (resp->status >= 200) {
+ stream->response = resp;
}
/* Do we know if this stream has no response body? */
@@ -1099,7 +1108,7 @@ static apr_status_t buffer_output_process_headers(h2_stream *stream)
pprovider = &provider;
}
- rv = h2_res_create_ngheader(&nh, stream->pool, headers);
+ rv = h2_res_create_ngheader(&nh, stream->pool, resp);
if (APR_SUCCESS != rv) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
H2_STRM_LOG(APLOGNO(10025), stream, "invalid response"));
@@ -1115,6 +1124,25 @@ static apr_status_t buffer_output_process_headers(h2_stream *stream)
++stream->session->responses_submitted;
}
}
+ else if (headers) {
+ if (!stream->response) {
+ h2_stream_rst(stream, HTTP_INTERNAL_SERVER_ERROR);
+ goto cleanup;
+ }
+ rv = h2_res_create_ngtrailer(&nh, stream->pool, headers);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
+ H2_STRM_LOG(APLOGNO(03072), stream, "submit %d trailers"),
+ (int)nh->nvlen);
+ if (APR_SUCCESS != rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
+ H2_STRM_LOG(APLOGNO(10024), stream, "invalid trailers"));
+ h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+ goto cleanup;
+ }
+
+ ngrv = nghttp2_submit_trailer(stream->session->ngh2, stream->id, nh->nv, nh->nvlen);
+ stream->sent_trailers = 1;
+ }
cleanup:
if (nghttp2_is_fatal(ngrv)) {
@@ -1128,7 +1156,7 @@ cleanup:
return rv;
}
-apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response)
+apr_status_t h2_stream_submit_pushes(h2_stream *stream, ap_bucket_response *response)
{
apr_status_t status = APR_SUCCESS;
apr_array_header_t *pushes;
@@ -1157,7 +1185,7 @@ apr_table_t *h2_stream_get_trailers(h2_stream *stream)
}
const h2_priority *h2_stream_get_priority(h2_stream *stream,
- h2_headers *response)
+ ap_bucket_response *response)
{
if (response && stream->initiated_on) {
const char *ctype = apr_table_get(response->headers, "content-type");
@@ -1175,7 +1203,7 @@ int h2_stream_is_ready(h2_stream *stream)
if (stream->response) {
return 1;
}
- else if (stream->out_buffer && get_first_headers_bucket(stream->out_buffer)) {
+ else if (stream->out_buffer && get_first_response_bucket(stream->out_buffer)) {
return 1;
}
return 0;
@@ -1268,7 +1296,10 @@ static apr_off_t buffer_output_data_to_send(h2_stream *stream, int *peos)
*peos = 1;
break;
}
- else if (H2_BUCKET_IS_HEADERS(b)) {
+ else if (AP_BUCKET_IS_RESPONSE(b)) {
+ break;
+ }
+ else if (AP_BUCKET_IS_HEADERS(b)) {
break;
}
}
diff --git a/modules/http2/h2_stream.h b/modules/http2/h2_stream.h
index 0b1c9616b6..c9683c57b2 100644
--- a/modules/http2/h2_stream.h
+++ b/modules/http2/h2_stream.h
@@ -26,18 +26,12 @@
* connection to the client. The h2_session writes to the h2_stream,
* adding HEADERS and DATA and finally an EOS. When headers are done,
* h2_stream is scheduled for handling, which is expected to produce
- * a response h2_headers at least.
- *
- * The h2_headers may be followed by more h2_headers (interim responses) and
- * by DATA frames read from the h2_stream until EOS is reached. Trailers
- * are send when a last h2_headers is received. This always closes the stream
- * output.
+ * RESPONSE buclets.
*/
struct h2_mplx;
struct h2_priority;
struct h2_request;
-struct h2_headers;
struct h2_session;
struct h2_bucket_beam;
@@ -75,7 +69,7 @@ struct h2_stream {
apr_table_t *trailers_in; /* optional, incoming trailers */
int request_headers_added; /* number of request headers added */
- struct h2_headers *response; /* the final, non-interim response or NULL */
+ ap_bucket_response *response; /* the final, non-interim response or NULL */
struct h2_bucket_beam *input;
apr_bucket_brigade *in_buffer;
@@ -84,6 +78,8 @@ struct h2_stream {
struct h2_bucket_beam *output;
apr_bucket_brigade *out_buffer;
+ unsigned int output_eos : 1; /* output EOS in buffer/sent */
+ unsigned int sent_trailers : 1; /* trailers have been submitted */
int rst_error; /* stream error for RST_STREAM */
unsigned int aborted : 1; /* was aborted */
@@ -268,13 +264,13 @@ apr_table_t *h2_stream_get_trailers(h2_stream *stream);
*
* @param stream the stream for which to submit
*/
-apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response);
+apr_status_t h2_stream_submit_pushes(h2_stream *stream, ap_bucket_response *response);
/**
* Get priority information set for this stream.
*/
const struct h2_priority *h2_stream_get_priority(h2_stream *stream,
- h2_headers *response);
+ ap_bucket_response *response);
/**
* Return a textual representation of the stream state as in RFC 7540
diff --git a/modules/http2/h2_util.c b/modules/http2/h2_util.c
index aa2b7cd6b4..ef0062885e 100644
--- a/modules/http2/h2_util.c
+++ b/modules/http2/h2_util.c
@@ -22,6 +22,7 @@
#include <httpd.h>
#include <http_core.h>
#include <http_log.h>
+#include <http_protocol.h>
#include <http_request.h>
#include <nghttp2/nghttp2.h>
@@ -1620,30 +1621,30 @@ static apr_status_t ngheader_create(h2_ngheader **ph, apr_pool_t *p,
return ctx.status;
}
-static int is_unsafe(h2_headers *h)
+static int is_unsafe(ap_bucket_response *h)
{
- const char *v = apr_table_get(h->notes, H2_HDR_CONFORMANCE);
+ const char *v = h->notes? apr_table_get(h->notes, H2_HDR_CONFORMANCE) : NULL;
return (v && !strcmp(v, H2_HDR_CONFORMANCE_UNSAFE));
}
-apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
- h2_headers *headers)
+apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
+ ap_bucket_headers *headers)
{
- return ngheader_create(ph, p, is_unsafe(headers),
+ return ngheader_create(ph, p, 0,
0, NULL, NULL, headers->headers);
}
apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
- h2_headers *headers)
+ ap_bucket_response *response)
{
const char *keys[] = {
":status"
};
const char *values[] = {
- apr_psprintf(p, "%d", headers->status)
+ apr_psprintf(p, "%d", response->status)
};
- return ngheader_create(ph, p, is_unsafe(headers),
- H2_ALEN(keys), keys, values, headers->headers);
+ return ngheader_create(ph, p, is_unsafe(response),
+ H2_ALEN(keys), keys, values, response->headers);
}
apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
@@ -1942,3 +1943,24 @@ apr_status_t h2_util_wait_on_pipe(apr_file_t *pipe)
return apr_file_read(pipe, rb, &nr);
}
+
+static int add_header_lengths(void *ctx, const char *name, const char *value)
+{
+ apr_size_t *plen = ctx;
+ *plen += strlen(name) + strlen(value);
+ return 1;
+}
+
+apr_size_t headers_length_estimate(ap_bucket_headers *hdrs)
+{
+ apr_size_t len = 0;
+ apr_table_do(add_header_lengths, &len, hdrs->headers, NULL);
+ return len;
+}
+
+apr_size_t response_length_estimate(ap_bucket_response *resp)
+{
+ apr_size_t len = 3 + 1 + 8 + (resp->reason? strlen(resp->reason) : 10);
+ apr_table_do(add_header_lengths, &len, resp->headers, NULL);
+ return len;
+} \ No newline at end of file
diff --git a/modules/http2/h2_util.h b/modules/http2/h2_util.h
index 4b8264d4ee..8ef944c8e0 100644
--- a/modules/http2/h2_util.h
+++ b/modules/http2/h2_util.h
@@ -395,17 +395,15 @@ const char *h2_util_base64url_encode(const char *data,
int h2_util_ignore_header(const char *name);
-struct h2_headers;
-
typedef struct h2_ngheader {
nghttp2_nv *nv;
apr_size_t nvlen;
} h2_ngheader;
apr_status_t h2_res_create_ngtrailer(h2_ngheader **ph, apr_pool_t *p,
- struct h2_headers *headers);
-apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
- struct h2_headers *headers);
+ ap_bucket_headers *headers);
+apr_status_t h2_res_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
+ ap_bucket_response *response);
apr_status_t h2_req_create_ngheader(h2_ngheader **ph, apr_pool_t *p,
const struct h2_request *req);
@@ -530,4 +528,16 @@ void h2_util_drain_pipe(apr_file_t *pipe);
*/
apr_status_t h2_util_wait_on_pipe(apr_file_t *pipe);
+/**
+ * Give an estimate of the length of the header fields,
+ * without compression or other formatting decorations.
+ */
+apr_size_t headers_length_estimate(ap_bucket_headers *hdrs);
+
+/**
+ * Give an estimate of the length of the response meta data size,
+ * without compression or other formatting decorations.
+ */
+apr_size_t response_length_estimate(ap_bucket_response *resp);
+
#endif /* defined(__mod_h2__h2_util__) */
diff --git a/modules/http2/h2_workers.c b/modules/http2/h2_workers.c
index 14590f4902..774122b9e4 100644
--- a/modules/http2/h2_workers.c
+++ b/modules/http2/h2_workers.c
@@ -23,6 +23,7 @@
#include <httpd.h>
#include <http_core.h>
#include <http_log.h>
+#include <http_protocol.h>
#include "h2.h"
#include "h2_private.h"
diff --git a/modules/http2/mod_http2.dsp b/modules/http2/mod_http2.dsp
index d9ff22203a..9fa2c7aa34 100644
--- a/modules/http2/mod_http2.dsp
+++ b/modules/http2/mod_http2.dsp
@@ -133,10 +133,6 @@ SOURCE=./h2_conn_ctx.c
# End Source File
# Begin Source File
-SOURCE=./h2_headers.c
-# End Source File
-# Begin Source File
-
SOURCE=./h2_mplx.c
# End Source File
# Begin Source File
diff --git a/test/modules/http2/test_200_header_invalid.py b/test/modules/http2/test_200_header_invalid.py
index b09a5a6ecb..5f8c976808 100644
--- a/test/modules/http2/test_200_header_invalid.py
+++ b/test/modules/http2/test_200_header_invalid.py
@@ -12,24 +12,22 @@ class TestInvalidHeaders:
assert env.apache_restart() == 0
# let the hecho.py CGI echo chars < 0x20 in field name
- # for almost all such characters, the stream gets aborted with a h2 error and
- # there will be no http status, cr and lf are handled special
+ # for almost all such characters, the stream returns a 500
+ # cr is handled special
def test_h2_200_01(self, env):
url = env.mkurl("https", "cgi", "/hecho.py")
for x in range(1, 32):
r = env.curl_post_data(url, "name=x%%%02xx&value=yz" % x)
- if x in [10]:
- assert 0 == r.exit_code, "unexpected exit code for char 0x%02x" % x
- assert 500 == r.response["status"], "unexpected status for char 0x%02x" % x
- elif x in [13]:
+ if x in [13]:
assert 0 == r.exit_code, "unexpected exit code for char 0x%02x" % x
assert 200 == r.response["status"], "unexpected status for char 0x%02x" % x
else:
- assert 0 != r.exit_code, "unexpected exit code for char 0x%02x" % x
+ assert 0 == r.exit_code, "unexpected exit code for char 0x%02x" % x
+ assert 500 == r.response["status"], "unexpected status for char 0x%02x" % x
# let the hecho.py CGI echo chars < 0x20 in field value
- # for almost all such characters, the stream gets aborted with a h2 error and
- # there will be no http status, cr and lf are handled special
+ # for almost all such characters, the stream returns a 500
+ # cr and lf are handled special
def test_h2_200_02(self, env):
url = env.mkurl("https", "cgi", "/hecho.py")
for x in range(1, 32):
@@ -39,17 +37,20 @@ class TestInvalidHeaders:
assert 0 == r.exit_code, "unexpected exit code for char 0x%02x" % x
assert 200 == r.response["status"], "unexpected status for char 0x%02x" % x
else:
- assert 0 != r.exit_code, "unexpected exit code for char 0x%02x" % x
+ assert 0 == r.exit_code, "unexpected exit code for char 0x%02x" % x
+ assert 500 == r.response["status"], "unexpected status for char 0x%02x" % x
# let the hecho.py CGI echo 0x10 and 0x7f in field name and value
def test_h2_200_03(self, env):
url = env.mkurl("https", "cgi", "/hecho.py")
for h in ["10", "7f"]:
r = env.curl_post_data(url, "name=x%%%s&value=yz" % h)
- assert 0 != r.exit_code
+ assert 0 == r.exit_code, "unexpected exit code for char 0x%02x" % h
+ assert 500 == r.response["status"], "unexpected status for char 0x%02x" % h
r = env.curl_post_data(url, "name=x&value=y%%%sz" % h)
- assert 0 != r.exit_code
-
+ assert 0 == r.exit_code, "unexpected exit code for char 0x%02x" % h
+ assert 500 == r.response["status"], "unexpected status for char 0x%02x" % h
+
# test header field lengths check, LimitRequestLine (default 8190)
def test_h2_200_10(self, env):
url = env.mkurl("https", "cgi", "/")