summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStefan Eissing <icing@apache.org>2021-10-12 15:34:01 +0200
committerStefan Eissing <icing@apache.org>2021-10-12 15:34:01 +0200
commit6a355db082d07a2b71a372e328cfda8fc7d27907 (patch)
treea476e628e10d09f3c2edfdd96c423465f5f661fa
parenttaking numbers for modules/http2 changes (diff)
downloadapache2-6a355db082d07a2b71a372e328cfda8fc7d27907.tar.xz
apache2-6a355db082d07a2b71a372e328cfda8fc7d27907.zip
*) mod_http2:
- Fixed an issue since 1.15.24 that "Server" headers in proxied requests were overwritten instead of preserved. [PR by @daum3ns] - Added directove 'H2StreamTimeout' to configure a separate value for HTTP/2 streams, overriding server's 'Timeout' configuration. [rpluem] - HTTP/2 connections now use pollsets to monitor the status of the ongoing streams and their main connection when host OS allows this. - Removed work-arounds for older versions of libnghttp2 and checking during configure that at least version 1.15.0 is present. - The HTTP/2 connection state handler, based on an experiment and draft at the IETF http working group (abandoned for some time), has been removed. - H2SerializeHeaders no longer has an effect. A warning is logged when it is set to "on". The switch enabled the internal writing of requests to be parsed by the internal HTTP/1.1 protocol handler and was introduced to avoid potential incompatibilities during the introduction of HTTP/2. - Removed the abort/redo of tasks when mood swings lower the active limit. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1894163 13f79535-47bb-0310-9956-ffa450edef68
-rw-r--r--CMakeLists.txt21
-rw-r--r--changes-entries/http2_additions.txt17
-rw-r--r--modules/http2/NWGNUmod_http240
-rw-r--r--modules/http2/config2.m414
-rw-r--r--modules/http2/h2.h30
-rw-r--r--modules/http2/h2_alt_svc.c132
-rw-r--r--modules/http2/h2_alt_svc.h40
-rw-r--r--modules/http2/h2_bucket_beam.c1280
-rw-r--r--modules/http2/h2_bucket_beam.h338
-rw-r--r--modules/http2/h2_c1.c328
-rw-r--r--modules/http2/h2_c1.h (renamed from modules/http2/h2_conn.h)70
-rw-r--r--modules/http2/h2_c1_io.c (renamed from modules/http2/h2_conn_io.c)320
-rw-r--r--modules/http2/h2_c1_io.h (renamed from modules/http2/h2_conn_io.h)46
-rw-r--r--modules/http2/h2_c2.c729
-rw-r--r--modules/http2/h2_c2.h61
-rw-r--r--modules/http2/h2_c2_filter.c (renamed from modules/http2/h2_from_h1.c)379
-rw-r--r--modules/http2/h2_c2_filter.h (renamed from modules/http2/h2_from_h1.h)17
-rw-r--r--modules/http2/h2_config.c198
-rw-r--r--modules/http2/h2_config.h5
-rw-r--r--modules/http2/h2_conn.c402
-rw-r--r--modules/http2/h2_conn_ctx.c147
-rw-r--r--modules/http2/h2_conn_ctx.h97
-rw-r--r--modules/http2/h2_ctx.c106
-rw-r--r--modules/http2/h2_ctx.h75
-rw-r--r--modules/http2/h2_filter.c613
-rw-r--r--modules/http2/h2_filter.h73
-rw-r--r--modules/http2/h2_headers.c2
-rw-r--r--modules/http2/h2_mplx.c1551
-rw-r--r--modules/http2/h2_mplx.h205
-rw-r--r--modules/http2/h2_protocol.c (renamed from modules/http2/h2_h2.c)302
-rw-r--r--modules/http2/h2_protocol.h (renamed from modules/http2/h2_h2.h)39
-rw-r--r--modules/http2/h2_proxy_session.c15
-rw-r--r--modules/http2/h2_proxy_util.c10
-rw-r--r--modules/http2/h2_proxy_util.h5
-rw-r--r--modules/http2/h2_push.c11
-rw-r--r--modules/http2/h2_request.c23
-rw-r--r--modules/http2/h2_request.h8
-rw-r--r--modules/http2/h2_session.c1470
-rw-r--r--modules/http2/h2_session.h66
-rw-r--r--modules/http2/h2_stream.c848
-rw-r--r--modules/http2/h2_stream.h44
-rw-r--r--modules/http2/h2_switch.c38
-rw-r--r--modules/http2/h2_task.c725
-rw-r--r--modules/http2/h2_task.h122
-rw-r--r--modules/http2/h2_util.c156
-rw-r--r--modules/http2/h2_util.h66
-rw-r--r--modules/http2/h2_version.h4
-rw-r--r--modules/http2/h2_workers.c62
-rw-r--r--modules/http2/h2_workers.h15
-rw-r--r--modules/http2/mod_http2.c96
-rw-r--r--modules/http2/mod_http2.dsp30
-rw-r--r--modules/http2/mod_proxy_http2.c6
-rw-r--r--test/modules/http2/test_105_timeout.py2
-rw-r--r--test/modules/http2/test_712_buffering.py2
54 files changed, 4803 insertions, 6698 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index b8259109fb..71f992cfbb 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -469,18 +469,15 @@ SET(mod_http2_extra_defines ssize_t=long)
SET(mod_http2_extra_includes ${NGHTTP2_INCLUDE_DIR})
SET(mod_http2_extra_libs ${NGHTTP2_LIBRARIES})
SET(mod_http2_extra_sources
- modules/http2/h2_alt_svc.c
- modules/http2/h2_bucket_eos.c modules/http2/h2_config.c
- modules/http2/h2_conn.c modules/http2/h2_conn_io.c
- modules/http2/h2_ctx.c modules/http2/h2_filter.c
- modules/http2/h2_from_h1.c modules/http2/h2_h2.c
- modules/http2/h2_bucket_beam.c
- modules/http2/h2_mplx.c modules/http2/h2_push.c
- modules/http2/h2_request.c modules/http2/h2_headers.c
- modules/http2/h2_session.c modules/http2/h2_stream.c
- modules/http2/h2_switch.c
- modules/http2/h2_task.c modules/http2/h2_util.c
- modules/http2/h2_workers.c
+ modules/http2/h2_bucket_beam.c modules/http2/h2_bucket_eos.c
+ modules/http2/h2_c1.c modules/http2/h2_c1_io.c
+ modules/http2/h2_c2.c modules/http2/h2_c2_filter.c
+ modules/http2/h2_config.c modules/http2/h2_conn_ctx.c
+ modules/http2/h2_headers.c modules/http2/h2_mplx.c
+ modules/http2/h2_protocol.c modules/http2/h2_push.c
+ modules/http2/h2_request.c modules/http2/h2_session.c
+ modules/http2/h2_stream.c modules/http2/h2_switch.c
+ modules/http2/h2_util.c modules/http2/h2_workers.c
)
SET(mod_ldap_extra_defines LDAP_DECLARE_EXPORT)
SET(mod_ldap_extra_libs wldap32)
diff --git a/changes-entries/http2_additions.txt b/changes-entries/http2_additions.txt
new file mode 100644
index 0000000000..b9db02c0e8
--- /dev/null
+++ b/changes-entries/http2_additions.txt
@@ -0,0 +1,17 @@
+ *) mod_http2:
+ - Fixed an issue since 1.15.24 that "Server" headers in proxied requests
+ were overwritten instead of preserved. [PR by @daum3ns]
+ - Added directove 'H2StreamTimeout' to configure a separate value for HTTP/2
+ streams, overriding server's 'Timeout' configuration. [rpluem]
+ - HTTP/2 connections now use pollsets to monitor the status of the
+ ongoing streams and their main connection when host OS allows this.
+ - Removed work-arounds for older versions of libnghttp2 and checking
+ during configure that at least version 1.15.0 is present.
+ - The HTTP/2 connection state handler, based on an experiment and draft
+ at the IETF http working group (abandoned for some time), has been removed.
+ - H2SerializeHeaders no longer has an effect. A warning is logged when it is
+ set to "on". The switch enabled the internal writing of requests to be parsed
+ by the internal HTTP/1.1 protocol handler and was introduced to avoid
+ potential incompatibilities during the introduction of HTTP/2.
+ - Removed the abort/redo of tasks when mood swings lower the active limit.
+ [Ruediger Pluem, daum3ns, Stefan Eissing] \ No newline at end of file
diff --git a/modules/http2/NWGNUmod_http2 b/modules/http2/NWGNUmod_http2
index b1dfde734e..cdd6cba6a9 100644
--- a/modules/http2/NWGNUmod_http2
+++ b/modules/http2/NWGNUmod_http2
@@ -184,27 +184,25 @@ TARGET_lib = \
# Paths must all use the '/' character
#
FILES_nlm_objs = \
- $(OBJDIR)/h2_alt_svc.o \
- $(OBJDIR)/h2_bucket_beam.o \
- $(OBJDIR)/h2_bucket_eos.o \
- $(OBJDIR)/h2_config.o \
- $(OBJDIR)/h2_conn.o \
- $(OBJDIR)/h2_conn_io.o \
- $(OBJDIR)/h2_ctx.o \
- $(OBJDIR)/h2_filter.o \
- $(OBJDIR)/h2_from_h1.o \
- $(OBJDIR)/h2_h2.o \
- $(OBJDIR)/h2_mplx.o \
- $(OBJDIR)/h2_push.o \
- $(OBJDIR)/h2_request.o \
- $(OBJDIR)/h2_headers.o \
- $(OBJDIR)/h2_session.o \
- $(OBJDIR)/h2_stream.o \
- $(OBJDIR)/h2_switch.o \
- $(OBJDIR)/h2_task.o \
- $(OBJDIR)/h2_util.o \
- $(OBJDIR)/h2_workers.o \
- $(OBJDIR)/mod_http2.o \
+ $(OBJDIR)/h2_bucket_beam.lo \
+ $(OBJDIR)/h2_bucket_eos.lo \
+ $(OBJDIR)/h2_c1.lo \
+ $(OBJDIR)/h2_c1_io.lo \
+ $(OBJDIR)/h2_c2.lo \
+ $(OBJDIR)/h2_c2_filter.lo \
+ $(OBJDIR)/h2_config.lo \
+ $(OBJDIR)/h2_conn_ctx.lo \
+ $(OBJDIR)/h2_headers.lo \
+ $(OBJDIR)/h2_mplx.lo \
+ $(OBJDIR)/h2_protocol.lo \
+ $(OBJDIR)/h2_push.lo \
+ $(OBJDIR)/h2_request.lo \
+ $(OBJDIR)/h2_session.lo \
+ $(OBJDIR)/h2_stream.lo \
+ $(OBJDIR)/h2_switch.lo \
+ $(OBJDIR)/h2_util.lo \
+ $(OBJDIR)/h2_workers.lo \
+ $(OBJDIR)/mod_http2.lo \
$(EOLIST)
#
diff --git a/modules/http2/config2.m4 b/modules/http2/config2.m4
index 5f49adf1cb..a82051d0a2 100644
--- a/modules/http2/config2.m4
+++ b/modules/http2/config2.m4
@@ -19,24 +19,22 @@ APACHE_MODPATH_INIT(http2)
dnl # list of module object files
http2_objs="dnl
mod_http2.lo dnl
-h2_alt_svc.lo dnl
h2_bucket_beam.lo dnl
h2_bucket_eos.lo dnl
+h2_c1.lo dnl
+h2_c1_io.lo dnl
+h2_c2.lo dnl
+h2_c2_filter.lo dnl
h2_config.lo dnl
-h2_conn.lo dnl
-h2_conn_io.lo dnl
-h2_ctx.lo dnl
-h2_filter.lo dnl
-h2_from_h1.lo dnl
-h2_h2.lo dnl
+h2_conn_ctx.lo dnl
h2_headers.lo dnl
h2_mplx.lo dnl
+h2_protocol.lo dnl
h2_push.lo dnl
h2_request.lo dnl
h2_session.lo dnl
h2_stream.lo dnl
h2_switch.lo dnl
-h2_task.lo dnl
h2_util.lo dnl
h2_workers.lo dnl
"
diff --git a/modules/http2/h2.h b/modules/http2/h2.h
index 08f59c44f9..59d6a46d8e 100644
--- a/modules/http2/h2.h
+++ b/modules/http2/h2.h
@@ -17,6 +17,19 @@
#ifndef __mod_h2__h2__
#define __mod_h2__h2__
+struct h2_session;
+struct h2_stream;
+
+/*
+ * When apr pollsets can poll file descriptors (e.g. pipes),
+ * we use it for polling stream input/output.
+ */
+#ifdef H2_NO_POLL_STREAMS
+#define H2_POLL_STREAMS 0
+#else
+#define H2_POLL_STREAMS APR_FILES_AS_SOCKETS
+#endif
+
/**
* The magic PRIamble of RFC 7540 that is always sent when starting
* a h2 communication.
@@ -89,7 +102,7 @@ typedef enum {
H2_SESSION_ST_DONE, /* finished, connection close */
H2_SESSION_ST_IDLE, /* nothing to write, expecting data inc */
H2_SESSION_ST_BUSY, /* read/write without stop */
- H2_SESSION_ST_WAIT, /* waiting for tasks reporting back */
+ H2_SESSION_ST_WAIT, /* waiting for c1 incoming + c2s output */
H2_SESSION_ST_CLEANUP, /* pool is being cleaned up */
} h2_session_state;
@@ -120,7 +133,9 @@ typedef enum {
H2_SEV_CLOSED_R,
H2_SEV_CANCELLED,
H2_SEV_EOS_SENT,
+ H2_SEV_IN_ERROR,
H2_SEV_IN_DATA_PENDING,
+ H2_SEV_OUT_C1_BLOCK,
} h2_stream_event_t;
@@ -129,7 +144,6 @@ typedef enum {
* become a request_rec to be handled by soemone.
*/
typedef struct h2_request h2_request;
-
struct h2_request {
const char *method; /* pseudo header values, see ch. 8.1.2.3 */
const char *scheme;
@@ -138,8 +152,7 @@ struct h2_request {
apr_table_t *headers;
apr_time_t request_time;
- unsigned int chunked : 1; /* iff request body needs to be forwarded as chunked */
- unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */
+ int chunked; /* iff request body needs to be forwarded as chunked */
apr_off_t raw_bytes; /* RAW network bytes that generated this request - if known. */
int http_status; /* Store a possible HTTP status code that gets
* defined before creating the dummy HTTP/1.1
@@ -155,7 +168,6 @@ struct h2_request {
#define H2_HTTP_STATUS_UNSET (0)
typedef struct h2_headers h2_headers;
-
struct h2_headers {
int status;
apr_table_t *headers;
@@ -165,12 +177,10 @@ struct h2_headers {
typedef apr_status_t h2_io_data_cb(void *ctx, const char *data, apr_off_t len);
-typedef int h2_stream_pri_cmp(int stream_id1, int stream_id2, void *ctx);
-
-/* Note key to attach connection task id to conn_rec/request_rec instances */
+typedef int h2_stream_pri_cmp_fn(int stream_id1, int stream_id2, void *session);
+typedef struct h2_stream *h2_stream_get_fn(struct h2_session *session, int stream_id);
-#define H2_TASK_ID_NOTE "http2-task-id"
-#define H2_FILTER_DEBUG_NOTE "http2-debug"
+/* Note key to attach stream id to conn_rec/request_rec instances */
#define H2_HDR_CONFORMANCE "http2-hdr-conformance"
#define H2_HDR_CONFORMANCE_UNSAFE "unsafe"
#define H2_PUSH_MODE_NOTE "http2-push-mode"
diff --git a/modules/http2/h2_alt_svc.c b/modules/http2/h2_alt_svc.c
deleted file mode 100644
index 383e266b9d..0000000000
--- a/modules/http2/h2_alt_svc.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/* Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <apr_strings.h>
-#include <httpd.h>
-#include <http_core.h>
-#include <http_connection.h>
-#include <http_protocol.h>
-#include <http_ssl.h>
-#include <http_log.h>
-
-#include "h2_private.h"
-#include "h2_alt_svc.h"
-#include "h2_ctx.h"
-#include "h2_config.h"
-#include "h2_h2.h"
-#include "h2_util.h"
-
-static int h2_alt_svc_handler(request_rec *r);
-
-void h2_alt_svc_register_hooks(void)
-{
- ap_hook_post_read_request(h2_alt_svc_handler, NULL, NULL, APR_HOOK_MIDDLE);
-}
-
-/**
- * Parse an Alt-Svc specifier as described in "HTTP Alternative Services"
- * (https://tools.ietf.org/html/draft-ietf-httpbis-alt-svc-04)
- * with the following changes:
- * - do not percent encode token values
- * - do not use quotation marks
- */
-h2_alt_svc *h2_alt_svc_parse(const char *s, apr_pool_t *pool)
-{
- const char *sep = ap_strchr_c(s, '=');
- if (sep) {
- const char *alpn = apr_pstrmemdup(pool, s, (apr_size_t)(sep - s));
- const char *host = NULL;
- int port = 0;
- s = sep + 1;
- sep = ap_strchr_c(s, ':'); /* mandatory : */
- if (sep) {
- if (sep != s) { /* optional host */
- host = apr_pstrmemdup(pool, s, (apr_size_t)(sep - s));
- }
- s = sep + 1;
- if (*s) { /* must be a port number */
- port = (int)apr_atoi64(s);
- if (port > 0 && port < (0x1 << 16)) {
- h2_alt_svc *as = apr_pcalloc(pool, sizeof(*as));
- as->alpn = alpn;
- as->host = host;
- as->port = port;
- return as;
- }
- }
- }
- }
- return NULL;
-}
-
-#define h2_alt_svc_IDX(list, i) ((h2_alt_svc**)(list)->elts)[i]
-
-static int h2_alt_svc_handler(request_rec *r)
-{
- apr_array_header_t *alt_svcs;
- int i;
-
- if (r->connection->keepalives > 0) {
- /* Only announce Alt-Svc on the first response */
- return DECLINED;
- }
-
- if (h2_ctx_rget(r)) {
- return DECLINED;
- }
-
- alt_svcs = h2_config_alt_svcs(r);
- if (r->hostname && alt_svcs && alt_svcs->nelts > 0) {
- const char *alt_svc_used = apr_table_get(r->headers_in, "Alt-Svc-Used");
- if (!alt_svc_used) {
- /* We have alt-svcs defined and client is not already using
- * one, announce the services that were configured and match.
- * The security of this connection determines if we allow
- * other host names or ports only.
- */
- const char *alt_svc = "";
- const char *svc_ma = "";
- int secure = ap_ssl_conn_is_ssl(r->connection);
- int ma = h2_config_rgeti(r, H2_CONF_ALT_SVC_MAX_AGE);
- if (ma >= 0) {
- svc_ma = apr_psprintf(r->pool, "; ma=%d", ma);
- }
- ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03043)
- "h2_alt_svc: announce %s for %s:%d",
- (secure? "secure" : "insecure"),
- r->hostname, (int)r->server->port);
- for (i = 0; i < alt_svcs->nelts; ++i) {
- h2_alt_svc *as = h2_alt_svc_IDX(alt_svcs, i);
- const char *ahost = as->host;
- if (ahost && !apr_strnatcasecmp(ahost, r->hostname)) {
- ahost = NULL;
- }
- if (secure || !ahost) {
- alt_svc = apr_psprintf(r->pool, "%s%s%s=\"%s:%d\"%s",
- alt_svc,
- (*alt_svc? ", " : ""), as->alpn,
- ahost? ahost : "", as->port,
- svc_ma);
- }
- }
- if (*alt_svc) {
- apr_table_setn(r->headers_out, "Alt-Svc", alt_svc);
- }
- }
- }
-
- return DECLINED;
-}
diff --git a/modules/http2/h2_alt_svc.h b/modules/http2/h2_alt_svc.h
deleted file mode 100644
index 479e4d119a..0000000000
--- a/modules/http2/h2_alt_svc.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __mod_h2__h2_alt_svc__
-#define __mod_h2__h2_alt_svc__
-
-typedef struct h2_alt_svc h2_alt_svc;
-
-struct h2_alt_svc {
- const char *alpn;
- const char *host;
- int port;
-};
-
-void h2_alt_svc_register_hooks(void);
-
-/**
- * Parse an Alt-Svc specifier as described in "HTTP Alternative Services"
- * (https://tools.ietf.org/html/draft-ietf-httpbis-alt-svc-04)
- * with the following changes:
- * - do not percent encode token values
- * - do not use quotation marks
- */
-h2_alt_svc *h2_alt_svc_parse(const char *s, apr_pool_t *pool);
-
-
-#endif /* defined(__mod_h2__h2_alt_svc__) */
diff --git a/modules/http2/h2_bucket_beam.c b/modules/http2/h2_bucket_beam.c
index 3e09fcd017..fd1d061f61 100644
--- a/modules/http2/h2_bucket_beam.c
+++ b/modules/http2/h2_bucket_beam.c
@@ -27,133 +27,33 @@
#include <http_log.h>
#include "h2_private.h"
+#include "h2_conn_ctx.h"
#include "h2_util.h"
#include "h2_bucket_beam.h"
-static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy);
-#define H2_BPROXY_NEXT(e) APR_RING_NEXT((e), link)
-#define H2_BPROXY_PREV(e) APR_RING_PREV((e), link)
-#define H2_BPROXY_REMOVE(e) APR_RING_REMOVE((e), link)
-
-#define H2_BPROXY_LIST_INIT(b) APR_RING_INIT(&(b)->list, h2_beam_proxy, link);
-#define H2_BPROXY_LIST_SENTINEL(b) APR_RING_SENTINEL(&(b)->list, h2_beam_proxy, link)
-#define H2_BPROXY_LIST_EMPTY(b) APR_RING_EMPTY(&(b)->list, h2_beam_proxy, link)
-#define H2_BPROXY_LIST_FIRST(b) APR_RING_FIRST(&(b)->list)
-#define H2_BPROXY_LIST_LAST(b) APR_RING_LAST(&(b)->list)
-#define H2_PROXY_BLIST_INSERT_HEAD(b, e) do { \
- h2_beam_proxy *ap__b = (e); \
- APR_RING_INSERT_HEAD(&(b)->list, ap__b, h2_beam_proxy, link); \
+#define H2_BLIST_INIT(b) APR_RING_INIT(&(b)->list, apr_bucket, link);
+#define H2_BLIST_SENTINEL(b) APR_RING_SENTINEL(&(b)->list, apr_bucket, link)
+#define H2_BLIST_EMPTY(b) APR_RING_EMPTY(&(b)->list, apr_bucket, link)
+#define H2_BLIST_FIRST(b) APR_RING_FIRST(&(b)->list)
+#define H2_BLIST_LAST(b) APR_RING_LAST(&(b)->list)
+#define H2_BLIST_INSERT_HEAD(b, e) do { \
+ apr_bucket *ap__b = (e); \
+ APR_RING_INSERT_HEAD(&(b)->list, ap__b, apr_bucket, link); \
} while (0)
-#define H2_BPROXY_LIST_INSERT_TAIL(b, e) do { \
- h2_beam_proxy *ap__b = (e); \
- APR_RING_INSERT_TAIL(&(b)->list, ap__b, h2_beam_proxy, link); \
+#define H2_BLIST_INSERT_TAIL(b, e) do { \
+ apr_bucket *ap__b = (e); \
+ APR_RING_INSERT_TAIL(&(b)->list, ap__b, apr_bucket, link); \
} while (0)
-#define H2_BPROXY_LIST_CONCAT(a, b) do { \
- APR_RING_CONCAT(&(a)->list, &(b)->list, h2_beam_proxy, link); \
+#define H2_BLIST_CONCAT(a, b) do { \
+ APR_RING_CONCAT(&(a)->list, &(b)->list, apr_bucket, link); \
} while (0)
-#define H2_BPROXY_LIST_PREPEND(a, b) do { \
- APR_RING_PREPEND(&(a)->list, &(b)->list, h2_beam_proxy, link); \
+#define H2_BLIST_PREPEND(a, b) do { \
+ APR_RING_PREPEND(&(a)->list, &(b)->list, apr_bucket, link); \
} while (0)
-/*******************************************************************************
- * beam bucket with reference to beam and bucket it represents
- ******************************************************************************/
-
-const apr_bucket_type_t h2_bucket_type_beam;
-
-#define H2_BUCKET_IS_BEAM(e) (e->type == &h2_bucket_type_beam)
-
-struct h2_beam_proxy {
- apr_bucket_refcount refcount;
- APR_RING_ENTRY(h2_beam_proxy) link;
- h2_bucket_beam *beam;
- apr_bucket *bsender;
- apr_size_t n;
-};
-
-static const char Dummy = '\0';
-
-static apr_status_t beam_bucket_read(apr_bucket *b, const char **str,
- apr_size_t *len, apr_read_type_e block)
-{
- h2_beam_proxy *d = b->data;
- if (d->bsender) {
- const char *data;
- apr_status_t status = apr_bucket_read(d->bsender, &data, len, block);
- if (status == APR_SUCCESS) {
- *str = data + b->start;
- *len = b->length;
- }
- return status;
- }
- *str = &Dummy;
- *len = 0;
- return APR_ECONNRESET;
-}
-
-static void beam_bucket_destroy(void *data)
-{
- h2_beam_proxy *d = data;
-
- if (apr_bucket_shared_destroy(d)) {
- /* When the beam gets destroyed before this bucket, it will
- * NULLify its reference here. This is not protected by a mutex,
- * so it will not help with race conditions.
- * But it lets us shut down memory pool with circulare beam
- * references. */
- if (d->beam) {
- h2_beam_emitted(d->beam, d);
- }
- apr_bucket_free(d);
- }
-}
-
-static apr_bucket * h2_beam_bucket_make(apr_bucket *b,
- h2_bucket_beam *beam,
- apr_bucket *bsender, apr_size_t n)
-{
- h2_beam_proxy *d;
-
- d = apr_bucket_alloc(sizeof(*d), b->list);
- H2_BPROXY_LIST_INSERT_TAIL(&beam->proxies, d);
- d->beam = beam;
- d->bsender = bsender;
- d->n = n;
-
- b = apr_bucket_shared_make(b, d, 0, bsender? bsender->length : 0);
- b->type = &h2_bucket_type_beam;
-
- return b;
-}
-
-static apr_bucket *h2_beam_bucket_create(h2_bucket_beam *beam,
- apr_bucket *bsender,
- apr_bucket_alloc_t *list,
- apr_size_t n)
-{
- apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
-
- APR_BUCKET_INIT(b);
- b->free = apr_bucket_free;
- b->list = list;
- return h2_beam_bucket_make(b, beam, bsender, n);
-}
-
-const apr_bucket_type_t h2_bucket_type_beam = {
- "BEAM", 5, APR_BUCKET_DATA,
- beam_bucket_destroy,
- beam_bucket_read,
- apr_bucket_setaside_noop,
- apr_bucket_shared_split,
- apr_bucket_shared_copy
-};
-
-/*******************************************************************************
- * h2_blist, a brigade without allocations
- ******************************************************************************/
-
+/* registry for bucket converting `h2_bucket_beamer` functions */
static apr_array_header_t *beamers;
static apr_status_t cleanup_beamers(void *dummy)
@@ -191,40 +91,53 @@ static apr_bucket *h2_beam_bucket(h2_bucket_beam *beam,
return b;
}
+static int is_empty(h2_bucket_beam *beam);
+static apr_off_t get_buffered_data_len(h2_bucket_beam *beam);
-/*******************************************************************************
- * bucket beam that can transport buckets across threads
- ******************************************************************************/
-
-static void mutex_leave(apr_thread_mutex_t *lock)
+static int h2_blist_count(h2_blist *blist)
{
- apr_thread_mutex_unlock(lock);
-}
+ apr_bucket *b;
+ int count = 0;
-static apr_status_t mutex_enter(void *ctx, h2_beam_lock *pbl)
-{
- h2_bucket_beam *beam = ctx;
- pbl->mutex = beam->lock;
- pbl->leave = mutex_leave;
- return apr_thread_mutex_lock(pbl->mutex);
-}
+ for (b = H2_BLIST_FIRST(blist); b != H2_BLIST_SENTINEL(blist);
+ b = APR_BUCKET_NEXT(b)) {
+ ++count;
+ }
+ return count;
+}
+
+#define H2_BEAM_LOG(beam, c, level, rv, msg, bb) \
+ do { \
+ if (APLOG_C_IS_LEVEL((c),(level))) { \
+ char buffer[4 * 1024]; \
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \
+ len = bb? h2_util_bb_print(buffer, bmax, "", "", bb) : 0; \
+ ap_log_cerror(APLOG_MARK, (level), rv, (c), \
+ "BEAM[%s,%s%sdata=%ld,buckets(send/consumed)=%d/%d]: %s %s", \
+ (beam)->name, \
+ (beam)->aborted? "aborted," : "", \
+ is_empty(beam)? "empty," : "", \
+ (long)get_buffered_data_len(beam), \
+ h2_blist_count(&(beam)->buckets_to_send), \
+ h2_blist_count(&(beam)->buckets_consumed), \
+ (msg), len? buffer : ""); \
+ } \
+ } while (0)
-static apr_status_t enter_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl)
-{
- return mutex_enter(beam, pbl);
-}
-static void leave_yellow(h2_bucket_beam *beam, h2_beam_lock *pbl)
+static int bucket_is_mmap(apr_bucket *b)
{
- (void)beam;
- if (pbl->leave) {
- pbl->leave(pbl->mutex);
- }
+#if APR_HAS_MMAP
+ return APR_BUCKET_IS_MMAP(b);
+#else
+ /* if it is not defined as enabled, it should always be no */
+ return 0;
+#endif
}
static apr_off_t bucket_mem_used(apr_bucket *b)
{
- if (APR_BUCKET_IS_FILE(b)) {
+ if (APR_BUCKET_IS_FILE(b) || bucket_is_mmap(b)) {
return 0;
}
else {
@@ -233,53 +146,37 @@ static apr_off_t bucket_mem_used(apr_bucket *b)
}
}
-static int report_consumption(h2_bucket_beam *beam, h2_beam_lock *pbl)
+static int report_consumption(h2_bucket_beam *beam, int locked)
{
int rv = 0;
- apr_off_t len = beam->received_bytes - beam->cons_bytes_reported;
+ apr_off_t len = beam->recv_bytes - beam->recv_bytes_reported;
h2_beam_io_callback *cb = beam->cons_io_cb;
if (len > 0) {
if (cb) {
void *ctx = beam->cons_ctx;
- if (pbl) leave_yellow(beam, pbl);
+ if (locked) apr_thread_mutex_unlock(beam->lock);
cb(ctx, beam, len);
- if (pbl) enter_yellow(beam, pbl);
+ if (locked) apr_thread_mutex_lock(beam->lock);
rv = 1;
}
- beam->cons_bytes_reported += len;
+ beam->recv_bytes_reported += len;
}
return rv;
}
-static void report_prod_io(h2_bucket_beam *beam, int force, h2_beam_lock *pbl)
-{
- apr_off_t len = beam->sent_bytes - beam->prod_bytes_reported;
- if (force || len > 0) {
- h2_beam_io_callback *cb = beam->prod_io_cb;
- if (cb) {
- void *ctx = beam->prod_ctx;
-
- leave_yellow(beam, pbl);
- cb(ctx, beam, len);
- enter_yellow(beam, pbl);
- }
- beam->prod_bytes_reported += len;
- }
-}
-
static apr_size_t calc_buffered(h2_bucket_beam *beam)
{
apr_size_t len = 0;
apr_bucket *b;
- for (b = H2_BLIST_FIRST(&beam->send_list);
- b != H2_BLIST_SENTINEL(&beam->send_list);
+ for (b = H2_BLIST_FIRST(&beam->buckets_to_send);
+ b != H2_BLIST_SENTINEL(&beam->buckets_to_send);
b = APR_BUCKET_NEXT(b)) {
if (b->length == ((apr_size_t)-1)) {
/* do not count */
}
- else if (APR_BUCKET_IS_FILE(b)) {
+ else if (APR_BUCKET_IS_FILE(b) || bucket_is_mmap(b)) {
/* if unread, has no real mem footprint. */
}
else {
@@ -289,13 +186,13 @@ static apr_size_t calc_buffered(h2_bucket_beam *beam)
return len;
}
-static void r_purge_sent(h2_bucket_beam *beam)
+static void purge_consumed_buckets(h2_bucket_beam *beam)
{
apr_bucket *b;
/* delete all sender buckets in purge brigade, needs to be called
* from sender thread only */
- while (!H2_BLIST_EMPTY(&beam->purge_list)) {
- b = H2_BLIST_FIRST(&beam->purge_list);
+ while (!H2_BLIST_EMPTY(&beam->buckets_consumed)) {
+ b = H2_BLIST_FIRST(&beam->buckets_consumed);
apr_bucket_delete(b);
}
}
@@ -312,30 +209,10 @@ static apr_size_t calc_space_left(h2_bucket_beam *beam)
static int buffer_is_empty(h2_bucket_beam *beam)
{
return ((!beam->recv_buffer || APR_BRIGADE_EMPTY(beam->recv_buffer))
- && H2_BLIST_EMPTY(&beam->send_list));
-}
-
-static apr_status_t wait_empty(h2_bucket_beam *beam, apr_read_type_e block,
- apr_thread_mutex_t *lock)
-{
- apr_status_t rv = APR_SUCCESS;
-
- while (!buffer_is_empty(beam) && APR_SUCCESS == rv) {
- if (APR_BLOCK_READ != block || !lock) {
- rv = APR_EAGAIN;
- }
- else if (beam->timeout > 0) {
- rv = apr_thread_cond_timedwait(beam->change, lock, beam->timeout);
- }
- else {
- rv = apr_thread_cond_wait(beam->change, lock);
- }
- }
- return rv;
+ && H2_BLIST_EMPTY(&beam->buckets_to_send));
}
-static apr_status_t wait_not_empty(h2_bucket_beam *beam, apr_read_type_e block,
- apr_thread_mutex_t *lock)
+static apr_status_t wait_not_empty(h2_bucket_beam *beam, conn_rec *c, apr_read_type_e block)
{
apr_status_t rv = APR_SUCCESS;
@@ -343,24 +220,24 @@ static apr_status_t wait_not_empty(h2_bucket_beam *beam, apr_read_type_e block,
if (beam->aborted) {
rv = APR_ECONNABORTED;
}
- else if (beam->closed) {
- rv = APR_EOF;
- }
- else if (APR_BLOCK_READ != block || !lock) {
+ else if (APR_BLOCK_READ != block) {
rv = APR_EAGAIN;
}
else if (beam->timeout > 0) {
- rv = apr_thread_cond_timedwait(beam->change, lock, beam->timeout);
+ H2_BEAM_LOG(beam, c, APLOG_TRACE2, rv, "wait_not_empty, timeout", NULL);
+ rv = apr_thread_cond_timedwait(beam->change, beam->lock, beam->timeout);
}
else {
- rv = apr_thread_cond_wait(beam->change, lock);
+ H2_BEAM_LOG(beam, c, APLOG_TRACE2, rv, "wait_not_empty, forever", NULL);
+ rv = apr_thread_cond_wait(beam->change, beam->lock);
}
}
return rv;
}
-static apr_status_t wait_not_full(h2_bucket_beam *beam, apr_read_type_e block,
- apr_size_t *pspace_left, h2_beam_lock *bl)
+static apr_status_t wait_not_full(h2_bucket_beam *beam, conn_rec *c,
+ apr_read_type_e block,
+ apr_size_t *pspace_left)
{
apr_status_t rv = APR_SUCCESS;
apr_size_t left;
@@ -369,15 +246,17 @@ static apr_status_t wait_not_full(h2_bucket_beam *beam, apr_read_type_e block,
if (beam->aborted) {
rv = APR_ECONNABORTED;
}
- else if (block != APR_BLOCK_READ || !bl->mutex) {
+ else if (block != APR_BLOCK_READ) {
rv = APR_EAGAIN;
}
else {
if (beam->timeout > 0) {
- rv = apr_thread_cond_timedwait(beam->change, bl->mutex, beam->timeout);
+ H2_BEAM_LOG(beam, c, APLOG_TRACE2, rv, "wait_not_full, timeout", NULL);
+ rv = apr_thread_cond_timedwait(beam->change, beam->lock, beam->timeout);
}
else {
- rv = apr_thread_cond_wait(beam->change, bl->mutex);
+ H2_BEAM_LOG(beam, c, APLOG_TRACE2, rv, "wait_not_full, forever", NULL);
+ rv = apr_thread_cond_wait(beam->change, beam->lock);
}
}
}
@@ -385,73 +264,6 @@ static apr_status_t wait_not_full(h2_bucket_beam *beam, apr_read_type_e block,
return rv;
}
-static void h2_beam_emitted(h2_bucket_beam *beam, h2_beam_proxy *proxy)
-{
- h2_beam_lock bl;
- apr_bucket *b, *next;
-
- if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- /* even when beam buckets are split, only the one where
- * refcount drops to 0 will call us */
- H2_BPROXY_REMOVE(proxy);
- /* invoked from receiver thread, the last beam bucket for the send
- * bucket is about to be destroyed.
- * remove it from the hold, where it should be now */
- if (proxy->bsender) {
- for (b = H2_BLIST_FIRST(&beam->hold_list);
- b != H2_BLIST_SENTINEL(&beam->hold_list);
- b = APR_BUCKET_NEXT(b)) {
- if (b == proxy->bsender) {
- break;
- }
- }
- if (b != H2_BLIST_SENTINEL(&beam->hold_list)) {
- /* bucket is in hold as it should be, mark this one
- * and all before it for purging. We might have placed meta
- * buckets without a receiver proxy into the hold before it
- * and schedule them for purging now */
- for (b = H2_BLIST_FIRST(&beam->hold_list);
- b != H2_BLIST_SENTINEL(&beam->hold_list);
- b = next) {
- next = APR_BUCKET_NEXT(b);
- if (b == proxy->bsender) {
- APR_BUCKET_REMOVE(b);
- H2_BLIST_INSERT_TAIL(&beam->purge_list, b);
- break;
- }
- else if (APR_BUCKET_IS_METADATA(b)) {
- APR_BUCKET_REMOVE(b);
- H2_BLIST_INSERT_TAIL(&beam->purge_list, b);
- }
- else {
- /* another data bucket before this one in hold. this
- * is normal since DATA buckets need not be destroyed
- * in order */
- }
- }
-
- proxy->bsender = NULL;
- }
- else {
- /* it should be there unless we screwed up */
- ap_log_perror(APLOG_MARK, APLOG_WARNING, 0, beam->send_pool,
- APLOGNO(03384) "h2_beam(%d-%s): emitted bucket not "
- "in hold, n=%d", beam->id, beam->tag,
- (int)proxy->n);
- ap_assert(!proxy->bsender);
- }
- }
- /* notify anyone waiting on space to become available */
- if (!bl.mutex) {
- r_purge_sent(beam);
- }
- else {
- apr_thread_cond_broadcast(beam->change);
- }
- leave_yellow(beam, &bl);
- }
-}
-
static void h2_blist_cleanup(h2_blist *bl)
{
apr_bucket *e;
@@ -462,80 +274,7 @@ static void h2_blist_cleanup(h2_blist *bl)
}
}
-static apr_status_t beam_close(h2_bucket_beam *beam)
-{
- if (!beam->closed) {
- beam->closed = 1;
- apr_thread_cond_broadcast(beam->change);
- }
- return APR_SUCCESS;
-}
-
-int h2_beam_is_closed(h2_bucket_beam *beam)
-{
- return beam->closed;
-}
-
-static int pool_register(h2_bucket_beam *beam, apr_pool_t *pool,
- apr_status_t (*cleanup)(void *))
-{
- if (pool && pool != beam->pool) {
- apr_pool_pre_cleanup_register(pool, beam, cleanup);
- return 1;
- }
- return 0;
-}
-
-static int pool_kill(h2_bucket_beam *beam, apr_pool_t *pool,
- apr_status_t (*cleanup)(void *)) {
- if (pool && pool != beam->pool) {
- apr_pool_cleanup_kill(pool, beam, cleanup);
- return 1;
- }
- return 0;
-}
-
-static apr_status_t beam_recv_cleanup(void *data)
-{
- h2_bucket_beam *beam = data;
- /* receiver pool has gone away, clear references */
- beam->recv_buffer = NULL;
- beam->recv_pool = NULL;
- return APR_SUCCESS;
-}
-
-static apr_status_t beam_send_cleanup(void *data)
-{
- h2_bucket_beam *beam = data;
- /* sender is going away, clear up all references to its memory */
- r_purge_sent(beam);
- h2_blist_cleanup(&beam->send_list);
- report_consumption(beam, NULL);
- while (!H2_BPROXY_LIST_EMPTY(&beam->proxies)) {
- h2_beam_proxy *proxy = H2_BPROXY_LIST_FIRST(&beam->proxies);
- H2_BPROXY_REMOVE(proxy);
- proxy->beam = NULL;
- proxy->bsender = NULL;
- }
- h2_blist_cleanup(&beam->purge_list);
- h2_blist_cleanup(&beam->hold_list);
- beam->send_pool = NULL;
- return APR_SUCCESS;
-}
-
-static void beam_set_send_pool(h2_bucket_beam *beam, apr_pool_t *pool)
-{
- if (beam->send_pool != pool) {
- if (beam->send_pool && beam->send_pool != beam->pool) {
- pool_kill(beam, beam->send_pool, beam_send_cleanup);
- beam_send_cleanup(beam);
- }
- beam->send_pool = pool;
- pool_register(beam, beam->send_pool, beam_send_cleanup);
- }
-}
-
-static void recv_buffer_cleanup(h2_bucket_beam *beam, h2_beam_lock *bl)
+static void recv_buffer_cleanup(h2_bucket_beam *beam)
{
if (beam->recv_buffer && !APR_BRIGADE_EMPTY(beam->recv_buffer)) {
apr_bucket_brigade *bb = beam->recv_buffer;
@@ -543,73 +282,30 @@ static void recv_buffer_cleanup(h2_bucket_beam *beam, h2_beam_lock *bl)
beam->recv_buffer = NULL;
apr_brigade_length(bb, 0, &bblen);
- beam->received_bytes += bblen;
+ beam->recv_bytes += bblen;
/* need to do this unlocked since bucket destroy might
* call this beam again. */
- if (bl) leave_yellow(beam, bl);
+ apr_thread_mutex_unlock(beam->lock);
apr_brigade_destroy(bb);
- if (bl) enter_yellow(beam, bl);
-
+ apr_thread_mutex_lock(beam->lock);
+
apr_thread_cond_broadcast(beam->change);
- if (beam->cons_ev_cb) {
- beam->cons_ev_cb(beam->cons_ctx, beam);
+ if (beam->recv_cb) {
+ beam->recv_cb(beam->recv_ctx, beam);
}
}
}
static apr_status_t beam_cleanup(h2_bucket_beam *beam, int from_pool)
{
- apr_status_t status = APR_SUCCESS;
- int safe_send = (beam->owner == H2_BEAM_OWNER_SEND);
- int safe_recv = (beam->owner == H2_BEAM_OWNER_RECV);
-
- /*
- * Owner of the beam is going away, depending on which side it owns,
- * cleanup strategies will differ.
- *
- * In general, receiver holds references to memory from sender.
- * Clean up receiver first, if safe, then cleanup sender, if safe.
- */
-
- /* When called from pool destroy, io callbacks are disabled */
- if (from_pool) {
- beam->cons_io_cb = NULL;
- }
-
- /* When modify send is not safe, this means we still have multi-thread
- * protection and the owner is receiving the buckets. If the sending
- * side has not gone away, this means we could have dangling buckets
- * in our lists that never get destroyed. This should not happen. */
- ap_assert(safe_send || !beam->send_pool);
- if (!H2_BLIST_EMPTY(&beam->send_list)) {
- ap_assert(beam->send_pool);
- }
-
- if (safe_recv) {
- if (beam->recv_pool) {
- pool_kill(beam, beam->recv_pool, beam_recv_cleanup);
- beam->recv_pool = NULL;
- }
- recv_buffer_cleanup(beam, NULL);
- }
- else {
- beam->recv_buffer = NULL;
- beam->recv_pool = NULL;
- }
-
- if (safe_send && beam->send_pool) {
- pool_kill(beam, beam->send_pool, beam_send_cleanup);
- status = beam_send_cleanup(beam);
- }
-
- if (safe_recv) {
- ap_assert(H2_BPROXY_LIST_EMPTY(&beam->proxies));
- ap_assert(H2_BLIST_EMPTY(&beam->send_list));
- ap_assert(H2_BLIST_EMPTY(&beam->hold_list));
- ap_assert(H2_BLIST_EMPTY(&beam->purge_list));
- }
- return status;
+ beam->cons_io_cb = NULL;
+ beam->recv_cb = NULL;
+
+ h2_blist_cleanup(&beam->buckets_to_send);
+ recv_buffer_cleanup(beam);
+ purge_consumed_buckets(beam);
+ return APR_SUCCESS;
}
static apr_status_t beam_pool_cleanup(void *data)
@@ -617,179 +313,131 @@ static apr_status_t beam_pool_cleanup(void *data)
return beam_cleanup(data, 1);
}
-apr_status_t h2_beam_destroy(h2_bucket_beam *beam)
+apr_status_t h2_beam_destroy(h2_bucket_beam *beam, conn_rec *c)
{
apr_pool_cleanup_kill(beam->pool, beam, beam_pool_cleanup);
+ H2_BEAM_LOG(beam, c, APLOG_TRACE2, 0, "destroy", NULL);
return beam_cleanup(beam, 0);
}
-apr_status_t h2_beam_create(h2_bucket_beam **pbeam, apr_pool_t *pool,
- int id, const char *tag,
- h2_beam_owner_t owner,
+apr_status_t h2_beam_create(h2_bucket_beam **pbeam, conn_rec *from,
+ apr_pool_t *pool, int id, const char *tag,
apr_size_t max_buf_size,
apr_interval_time_t timeout)
{
h2_bucket_beam *beam;
- apr_status_t rv = APR_SUCCESS;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(from);
+ apr_status_t rv;
beam = apr_pcalloc(pool, sizeof(*beam));
- if (!beam) {
- return APR_ENOMEM;
- }
-
- beam->id = id;
- beam->tag = tag;
beam->pool = pool;
- beam->owner = owner;
- H2_BLIST_INIT(&beam->send_list);
- H2_BLIST_INIT(&beam->hold_list);
- H2_BLIST_INIT(&beam->purge_list);
- H2_BPROXY_LIST_INIT(&beam->proxies);
+ beam->from = from;
+ beam->id = id;
+ beam->name = apr_psprintf(pool, "%s-%d-%s",
+ conn_ctx->id, id, tag);
+
+ H2_BLIST_INIT(&beam->buckets_to_send);
+ H2_BLIST_INIT(&beam->buckets_consumed);
beam->tx_mem_limits = 1;
beam->max_buf_size = max_buf_size;
beam->timeout = timeout;
rv = apr_thread_mutex_create(&beam->lock, APR_THREAD_MUTEX_DEFAULT, pool);
- if (APR_SUCCESS == rv) {
- rv = apr_thread_cond_create(&beam->change, pool);
- if (APR_SUCCESS == rv) {
- apr_pool_pre_cleanup_register(pool, beam, beam_pool_cleanup);
- *pbeam = beam;
- }
- }
+ if (APR_SUCCESS != rv) goto cleanup;
+ rv = apr_thread_cond_create(&beam->change, pool);
+ if (APR_SUCCESS != rv) goto cleanup;
+ apr_pool_pre_cleanup_register(pool, beam, beam_pool_cleanup);
+
+cleanup:
+ H2_BEAM_LOG(beam, from, APLOG_TRACE2, rv, "created", NULL);
+ *pbeam = (APR_SUCCESS == rv)? beam : NULL;
return rv;
}
void h2_beam_buffer_size_set(h2_bucket_beam *beam, apr_size_t buffer_size)
{
- h2_beam_lock bl;
-
- if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- beam->max_buf_size = buffer_size;
- leave_yellow(beam, &bl);
- }
+ apr_thread_mutex_lock(beam->lock);
+ beam->max_buf_size = buffer_size;
+ apr_thread_mutex_unlock(beam->lock);
}
-apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam)
+void h2_beam_set_copy_files(h2_bucket_beam * beam, int enabled)
{
- h2_beam_lock bl;
- apr_size_t buffer_size = 0;
-
- if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
- buffer_size = beam->max_buf_size;
- leave_yellow(beam, &bl);
- }
- return buffer_size;
+ apr_thread_mutex_lock(beam->lock);
+ beam->copy_files = enabled;
+ apr_thread_mutex_unlock(beam->lock);
}
-void h2_beam_timeout_set(h2_bucket_beam *beam, apr_interval_time_t timeout)
+apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam)
{
- h2_beam_lock bl;
+ apr_size_t buffer_size = 0;
- if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- beam->timeout = timeout;
- leave_yellow(beam, &bl);
- }
+ apr_thread_mutex_lock(beam->lock);
+ buffer_size = beam->max_buf_size;
+ apr_thread_mutex_unlock(beam->lock);
+ return buffer_size;
}
apr_interval_time_t h2_beam_timeout_get(h2_bucket_beam *beam)
{
- h2_beam_lock bl;
- apr_interval_time_t timeout = 0;
-
- if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- timeout = beam->timeout;
- leave_yellow(beam, &bl);
- }
- return timeout;
-}
+ apr_interval_time_t timeout;
-void h2_beam_abort(h2_bucket_beam *beam)
-{
- h2_beam_lock bl;
-
- if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
- beam->aborted = 1;
- r_purge_sent(beam);
- h2_blist_cleanup(&beam->send_list);
- report_consumption(beam, &bl);
- apr_thread_cond_broadcast(beam->change);
- leave_yellow(beam, &bl);
- }
-}
-
-apr_status_t h2_beam_close(h2_bucket_beam *beam)
-{
- h2_beam_lock bl;
-
- if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
- r_purge_sent(beam);
- beam_close(beam);
- report_consumption(beam, &bl);
- leave_yellow(beam, &bl);
- }
- return beam->aborted? APR_ECONNABORTED : APR_SUCCESS;
+ apr_thread_mutex_lock(beam->lock);
+ timeout = beam->timeout;
+ apr_thread_mutex_unlock(beam->lock);
+ return timeout;
}
-apr_status_t h2_beam_leave(h2_bucket_beam *beam)
+void h2_beam_timeout_set(h2_bucket_beam *beam, apr_interval_time_t timeout)
{
- h2_beam_lock bl;
-
- if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
- recv_buffer_cleanup(beam, &bl);
- beam->aborted = 1;
- beam_close(beam);
- leave_yellow(beam, &bl);
- }
- return APR_SUCCESS;
+ apr_thread_mutex_lock(beam->lock);
+ beam->timeout = timeout;
+ apr_thread_mutex_unlock(beam->lock);
}
-apr_status_t h2_beam_wait_empty(h2_bucket_beam *beam, apr_read_type_e block)
+void h2_beam_abort(h2_bucket_beam *beam, conn_rec *c)
{
- apr_status_t status;
- h2_beam_lock bl;
-
- if ((status = enter_yellow(beam, &bl)) == APR_SUCCESS) {
- status = wait_empty(beam, block, bl.mutex);
- leave_yellow(beam, &bl);
+ apr_thread_mutex_lock(beam->lock);
+ beam->aborted = 1;
+ if (c == beam->from) {
+ /* sender aborts */
+ if (beam->was_empty_cb && buffer_is_empty(beam)) {
+ beam->was_empty_cb(beam->was_empty_ctx, beam);
+ }
+ /* no more consumption reporting to sender */
+ beam->cons_io_cb = NULL;
+ beam->cons_ctx = NULL;
+ purge_consumed_buckets(beam);
+ h2_blist_cleanup(&beam->buckets_to_send);
+ report_consumption(beam, 1);
}
- return status;
-}
-
-static void move_to_hold(h2_bucket_beam *beam,
- apr_bucket_brigade *sender_bb)
-{
- apr_bucket *b;
- while (sender_bb && !APR_BRIGADE_EMPTY(sender_bb)) {
- b = APR_BRIGADE_FIRST(sender_bb);
- APR_BUCKET_REMOVE(b);
- H2_BLIST_INSERT_TAIL(&beam->send_list, b);
+ else {
+ /* receiver aborts */
+ recv_buffer_cleanup(beam);
}
+ apr_thread_cond_broadcast(beam->change);
+ apr_thread_mutex_unlock(beam->lock);
}
-static apr_status_t append_bucket(h2_bucket_beam *beam,
+static apr_status_t append_bucket(h2_bucket_beam *beam,
apr_bucket *b,
apr_read_type_e block,
- apr_size_t *pspace_left,
- h2_beam_lock *pbl)
+ apr_size_t *pspace_left)
{
const char *data;
apr_size_t len;
- apr_status_t status;
+ apr_status_t status = APR_SUCCESS;
int can_beam = 0, check_len;
(void)block;
- (void)pbl;
if (beam->aborted) {
return APR_ECONNABORTED;
}
if (APR_BUCKET_IS_METADATA(b)) {
- if (APR_BUCKET_IS_EOS(b)) {
- beam->closed = 1;
- }
APR_BUCKET_REMOVE(b);
- H2_BLIST_INSERT_TAIL(&beam->send_list, b);
+ apr_bucket_setaside(b, beam->pool);
+ H2_BLIST_INSERT_TAIL(&beam->buckets_to_send, b);
return APR_SUCCESS;
}
else if (APR_BUCKET_IS_FILE(b)) {
@@ -809,11 +457,11 @@ static apr_status_t append_bucket(h2_bucket_beam *beam,
* of open file handles and rather use a less efficient beam
* transport. */
apr_bucket_file *bf = b->data;
- apr_file_t *fd = bf->fd;
- can_beam = (bf->refcount.refcount == 1);
- if (can_beam && beam->can_beam_fn) {
- can_beam = beam->can_beam_fn(beam->can_beam_ctx, beam, fd);
- }
+ can_beam = !beam->copy_files && (bf->refcount.refcount == 1);
+ check_len = !can_beam;
+ }
+ else if (bucket_is_mmap(b)) {
+ can_beam = !beam->copy_files;
check_len = !can_beam;
}
else {
@@ -838,453 +486,355 @@ static apr_status_t append_bucket(h2_bucket_beam *beam,
* a receiver thread is a total NO GO, because the bucket might use
* its pool/bucket_alloc from a foreign thread and that will
* corrupt. */
- status = APR_ENOTIMPL;
- if (APR_BUCKET_IS_TRANSIENT(b)) {
- /* this takes care of transient buckets and converts them
- * into heap ones. Other bucket types might or might not be
- * affected by this. */
- status = apr_bucket_setaside(b, beam->send_pool);
+ if (b->length == 0) {
+ apr_bucket_delete(b);
+ return APR_SUCCESS;
}
else if (APR_BUCKET_IS_HEAP(b)) {
/* For heap buckets read from a receiver thread is fine. The
* data will be there and live until the bucket itself is
* destroyed. */
- status = APR_SUCCESS;
- }
- else if (APR_BUCKET_IS_POOL(b)) {
- /* pool buckets are bastards that register at pool cleanup
- * to morph themselves into heap buckets. That may happen anytime,
- * even after the bucket data pointer has been read. So at
- * any time inside the receiver thread, the pool bucket memory
- * may disappear. yikes. */
- status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
- if (status == APR_SUCCESS) {
- apr_bucket_heap_make(b, data, len, NULL);
- }
+ status = apr_bucket_setaside(b, beam->pool);
+ if (status != APR_SUCCESS) goto cleanup;
}
- else if (APR_BUCKET_IS_FILE(b) && can_beam) {
- status = apr_bucket_setaside(b, beam->send_pool);
+ else if (can_beam && (APR_BUCKET_IS_FILE(b) || bucket_is_mmap(b))) {
+ status = apr_bucket_setaside(b, beam->pool);
+ if (status != APR_SUCCESS) goto cleanup;
}
-
- if (status == APR_ENOTIMPL) {
- /* we have no knowledge about the internals of this bucket,
- * but hope that after read, its data stays immutable for the
- * lifetime of the bucket. (see pool bucket handling above for
- * a counter example).
- * We do the read while in the sender thread, so that the bucket may
- * use pools/allocators safely. */
+ else {
+ /* we know of no special shortcut to transfer the bucket to
+ * another pool without copying. So we make it a heap bucket. */
status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
- if (status == APR_SUCCESS) {
- status = apr_bucket_setaside(b, beam->send_pool);
- }
- }
-
- if (status != APR_SUCCESS && status != APR_ENOTIMPL) {
- return status;
+ if (status != APR_SUCCESS) goto cleanup;
+ /* this allocates and copies data */
+ apr_bucket_heap_make(b, data, len, NULL);
}
APR_BUCKET_REMOVE(b);
- H2_BLIST_INSERT_TAIL(&beam->send_list, b);
- beam->sent_bytes += b->length;
+ H2_BLIST_INSERT_TAIL(&beam->buckets_to_send, b);
- return APR_SUCCESS;
-}
-
-void h2_beam_send_from(h2_bucket_beam *beam, apr_pool_t *p)
-{
- h2_beam_lock bl;
- /* Called from the sender thread to add buckets to the beam */
- if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- r_purge_sent(beam);
- beam_set_send_pool(beam, p);
- leave_yellow(beam, &bl);
- }
+cleanup:
+ return status;
}
-apr_status_t h2_beam_send(h2_bucket_beam *beam,
+apr_status_t h2_beam_send(h2_bucket_beam *beam, conn_rec *from,
apr_bucket_brigade *sender_bb,
apr_read_type_e block)
{
apr_bucket *b;
apr_status_t rv = APR_SUCCESS;
apr_size_t space_left = 0;
- h2_beam_lock bl;
+ int was_empty;
/* Called from the sender thread to add buckets to the beam */
- if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- ap_assert(beam->send_pool);
- r_purge_sent(beam);
-
- if (beam->aborted) {
- move_to_hold(beam, sender_bb);
- rv = APR_ECONNABORTED;
- }
- else if (sender_bb) {
- int force_report = !APR_BRIGADE_EMPTY(sender_bb);
-
- space_left = calc_space_left(beam);
- while (!APR_BRIGADE_EMPTY(sender_bb) && APR_SUCCESS == rv) {
- if (space_left <= 0) {
- report_prod_io(beam, force_report, &bl);
- r_purge_sent(beam);
- rv = wait_not_full(beam, block, &space_left, &bl);
- if (APR_SUCCESS != rv) {
- break;
- }
- }
- b = APR_BRIGADE_FIRST(sender_bb);
- rv = append_bucket(beam, b, block, &space_left, &bl);
+ apr_thread_mutex_lock(beam->lock);
+ ap_assert(beam->from == from);
+ ap_assert(sender_bb);
+ H2_BEAM_LOG(beam, from, APLOG_TRACE2, rv, "start send", sender_bb);
+ purge_consumed_buckets(beam);
+ was_empty = buffer_is_empty(beam);
+
+ space_left = calc_space_left(beam);
+ while (!APR_BRIGADE_EMPTY(sender_bb) && APR_SUCCESS == rv) {
+ if (!beam->aborted && space_left <= 0) {
+ purge_consumed_buckets(beam);
+ if (was_empty && beam->was_empty_cb) {
+ beam->was_empty_cb(beam->was_empty_ctx, beam);
}
-
- report_prod_io(beam, force_report, &bl);
- apr_thread_cond_broadcast(beam->change);
+ rv = wait_not_full(beam, from, block, &space_left);
+ if (APR_SUCCESS != rv) {
+ break;
+ }
+ was_empty = buffer_is_empty(beam);
}
- report_consumption(beam, &bl);
- leave_yellow(beam, &bl);
+ b = APR_BRIGADE_FIRST(sender_bb);
+ rv = append_bucket(beam, b, block, &space_left);
+ }
+
+ if (was_empty && beam->was_empty_cb && !buffer_is_empty(beam)) {
+ beam->was_empty_cb(beam->was_empty_ctx, beam);
+ }
+ apr_thread_cond_broadcast(beam->change);
+
+ report_consumption(beam, 1);
+ if (beam->aborted) {
+ rv = APR_ECONNABORTED;
}
+ H2_BEAM_LOG(beam, from, APLOG_TRACE2, rv, "end send", sender_bb);
+ apr_thread_mutex_unlock(beam->lock);
return rv;
}
-apr_status_t h2_beam_receive(h2_bucket_beam *beam,
+apr_status_t h2_beam_receive(h2_bucket_beam *beam,
+ conn_rec *to,
apr_bucket_brigade *bb,
apr_read_type_e block,
- apr_off_t readbytes,
- int *pclosed)
+ apr_off_t readbytes)
{
- h2_beam_lock bl;
apr_bucket *bsender, *brecv, *ng;
int transferred = 0;
- apr_status_t status = APR_SUCCESS;
+ apr_status_t rv = APR_SUCCESS;
apr_off_t remain;
- int transferred_buckets = 0;
+ int consumed_buckets = 0;
+
+ apr_thread_mutex_lock(beam->lock);
+ H2_BEAM_LOG(beam, to, APLOG_TRACE2, 0, "start receive", bb);
+ if (readbytes <= 0) {
+ readbytes = (apr_off_t)APR_SIZE_MAX;
+ }
+ remain = readbytes;
- /* Called from the receiver thread to take buckets from the beam */
- if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- if (readbytes <= 0) {
- readbytes = (apr_off_t)APR_SIZE_MAX;
- }
- remain = readbytes;
-
transfer:
- if (beam->aborted) {
- recv_buffer_cleanup(beam, &bl);
- status = APR_ECONNABORTED;
- goto leave;
+ if (beam->aborted) {
+ recv_buffer_cleanup(beam);
+ rv = APR_ECONNABORTED;
+ goto leave;
+ }
+
+ /* transfer enough buckets from our receiver brigade, if we have one */
+ while (remain >= 0
+ && beam->recv_buffer
+ && !APR_BRIGADE_EMPTY(beam->recv_buffer)) {
+
+ brecv = APR_BRIGADE_FIRST(beam->recv_buffer);
+ if (brecv->length > 0 && remain <= 0) {
+ break;
}
+ APR_BUCKET_REMOVE(brecv);
+ APR_BRIGADE_INSERT_TAIL(bb, brecv);
+ remain -= brecv->length;
+ ++transferred;
+ }
- /* transfer enough buckets from our receiver brigade, if we have one */
- while (remain >= 0
- && beam->recv_buffer
- && !APR_BRIGADE_EMPTY(beam->recv_buffer)) {
-
- brecv = APR_BRIGADE_FIRST(beam->recv_buffer);
- if (brecv->length > 0 && remain <= 0) {
- break;
- }
- APR_BUCKET_REMOVE(brecv);
- APR_BRIGADE_INSERT_TAIL(bb, brecv);
- remain -= brecv->length;
- ++transferred;
+ /* transfer from our sender brigade, transforming sender buckets to
+ * receiver ones until we have enough */
+ while (remain >= 0 && !H2_BLIST_EMPTY(&beam->buckets_to_send)) {
+
+ brecv = NULL;
+ bsender = H2_BLIST_FIRST(&beam->buckets_to_send);
+ if (bsender->length > 0 && remain <= 0) {
+ break;
}
- /* transfer from our sender brigade, transforming sender buckets to
- * receiver ones until we have enough */
- while (remain >= 0 && !H2_BLIST_EMPTY(&beam->send_list)) {
-
- brecv = NULL;
- bsender = H2_BLIST_FIRST(&beam->send_list);
- if (bsender->length > 0 && remain <= 0) {
- break;
+ if (APR_BUCKET_IS_METADATA(bsender)) {
+ /* we need a real copy into the receivers bucket_alloc */
+ if (APR_BUCKET_IS_EOS(bsender)) {
+ brecv = apr_bucket_eos_create(bb->bucket_alloc);
}
-
- if (APR_BUCKET_IS_METADATA(bsender)) {
- if (APR_BUCKET_IS_EOS(bsender)) {
- brecv = apr_bucket_eos_create(bb->bucket_alloc);
- beam->close_sent = 1;
- }
- else if (APR_BUCKET_IS_FLUSH(bsender)) {
- brecv = apr_bucket_flush_create(bb->bucket_alloc);
- }
- else if (AP_BUCKET_IS_ERROR(bsender)) {
- ap_bucket_error *eb = (ap_bucket_error *)bsender;
- brecv = ap_bucket_error_create(eb->status, eb->data,
- bb->p, bb->bucket_alloc);
- }
+ else if (APR_BUCKET_IS_FLUSH(bsender)) {
+ brecv = apr_bucket_flush_create(bb->bucket_alloc);
}
- else if (bsender->length == 0) {
- APR_BUCKET_REMOVE(bsender);
- H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender);
- continue;
- }
- else if (APR_BUCKET_IS_FILE(bsender)) {
- /* This is set aside into the target brigade pool so that
- * any read operation messes with that pool and not
- * the sender one. */
- apr_bucket_file *f = (apr_bucket_file *)bsender->data;
- apr_file_t *fd = f->fd;
- int setaside = (f->readpool != bb->p);
-
- if (setaside) {
- status = apr_file_setaside(&fd, fd, bb->p);
- if (status != APR_SUCCESS) {
- goto leave;
- }
- ++beam->files_beamed;
- }
- ng = apr_brigade_insert_file(bb, fd, bsender->start, (apr_off_t)bsender->length,
- bb->p);
-#if APR_HAS_MMAP
- /* disable mmap handling as this leads to segfaults when
- * the underlying file is changed while memory pointer has
- * been handed out. See also PR 59348 */
- apr_bucket_file_enable_mmap(ng, 0);
-#endif
- APR_BUCKET_REMOVE(bsender);
- H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender);
-
- remain -= bsender->length;
- beam->received_bytes += bsender->length;
- ++transferred;
- ++transferred_buckets;
- continue;
- }
- else {
- /* create a "receiver" standin bucket. we took care about the
- * underlying sender bucket and its data when we placed it into
- * the sender brigade.
- * the beam bucket will notify us on destruction that bsender is
- * no longer needed. */
- brecv = h2_beam_bucket_create(beam, bsender, bb->bucket_alloc,
- beam->buckets_sent++);
- }
-
- /* Place the sender bucket into our hold, to be destroyed when no
- * receiver bucket references it any more. */
- APR_BUCKET_REMOVE(bsender);
- H2_BLIST_INSERT_TAIL(&beam->hold_list, bsender);
-
- beam->received_bytes += bsender->length;
- ++transferred_buckets;
-
- if (brecv) {
- APR_BRIGADE_INSERT_TAIL(bb, brecv);
- remain -= brecv->length;
- ++transferred;
+ else if (AP_BUCKET_IS_ERROR(bsender)) {
+ ap_bucket_error *eb = (ap_bucket_error *)bsender;
+ brecv = ap_bucket_error_create(eb->status, eb->data,
+ bb->p, bb->bucket_alloc);
}
else {
- /* let outside hook determine how bucket is beamed */
- leave_yellow(beam, &bl);
+ /* Does someone else know how to make a proxy for
+ * the bucket? Ask the callbacks registered for this. */
brecv = h2_beam_bucket(beam, bb, bsender);
- enter_yellow(beam, &bl);
-
while (brecv && brecv != APR_BRIGADE_SENTINEL(bb)) {
++transferred;
remain -= brecv->length;
brecv = APR_BUCKET_NEXT(brecv);
}
+ brecv = NULL;
}
}
-
- if (remain < 0) {
- /* too much, put some back into out recv_buffer */
- remain = readbytes;
- for (brecv = APR_BRIGADE_FIRST(bb);
- brecv != APR_BRIGADE_SENTINEL(bb);
- brecv = APR_BUCKET_NEXT(brecv)) {
- remain -= (beam->tx_mem_limits? bucket_mem_used(brecv)
- : (apr_off_t)brecv->length);
- if (remain < 0) {
- apr_bucket_split(brecv, (apr_size_t)((apr_off_t)brecv->length+remain));
- beam->recv_buffer = apr_brigade_split_ex(bb,
- APR_BUCKET_NEXT(brecv),
- beam->recv_buffer);
- break;
- }
- }
+ else if (bsender->length == 0) {
+ /* nop */
}
-
- if (beam->closed && buffer_is_empty(beam)) {
- /* beam is closed and we have nothing more to receive */
- if (!beam->close_sent) {
- apr_bucket *b = apr_bucket_eos_create(bb->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, b);
- beam->close_sent = 1;
- ++transferred;
- status = APR_SUCCESS;
- }
+#if APR_HAS_MMAP
+ else if (APR_BUCKET_IS_MMAP(bsender)) {
+ apr_bucket_mmap *bmmap = bsender->data;
+ apr_mmap_t *mmap;
+ rv = apr_mmap_dup(&mmap, bmmap->mmap, bb->p);
+ if (rv != APR_SUCCESS) goto leave;
+ brecv = apr_bucket_mmap_create(mmap, bsender->start, bsender->length, bb->bucket_alloc);
}
-
- if (transferred_buckets > 0) {
- if (beam->cons_ev_cb) {
- beam->cons_ev_cb(beam->cons_ctx, beam);
+#endif
+ else if (APR_BUCKET_IS_FILE(bsender)) {
+ /* This is setaside into the target brigade pool so that
+ * any read operation messes with that pool and not
+ * the sender one. */
+ apr_bucket_file *f = (apr_bucket_file *)bsender->data;
+ apr_file_t *fd = f->fd;
+ int setaside = (f->readpool != bb->p);
+
+ if (setaside) {
+ rv = apr_file_setaside(&fd, fd, bb->p);
+ if (rv != APR_SUCCESS) goto leave;
}
- }
-
- if (transferred) {
- apr_thread_cond_broadcast(beam->change);
- status = APR_SUCCESS;
+ ng = apr_brigade_insert_file(bb, fd, bsender->start, (apr_off_t)bsender->length,
+ bb->p);
+#if APR_HAS_MMAP
+ /* disable mmap handling as this leads to segfaults when
+ * the underlying file is changed while memory pointer has
+ * been handed out. See also PR 59348 */
+ apr_bucket_file_enable_mmap(ng, 0);
+#endif
+ remain -= bsender->length;
+ ++transferred;
}
else {
- status = wait_not_empty(beam, block, bl.mutex);
- if (status != APR_SUCCESS) {
- goto leave;
+ const char *data;
+ apr_size_t dlen;
+ /* we did that when the bucket was added, so this should
+ * give us the same data as before without changing the bucket
+ * or anything (pool) connected to it. */
+ rv = apr_bucket_read(bsender, &data, &dlen, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) goto leave;
+ rv = apr_brigade_write(bb, NULL, NULL, data, dlen);
+ if (rv != APR_SUCCESS) goto leave;
+
+ remain -= dlen;
+ ++transferred;
+ }
+
+ if (brecv) {
+ /* we have a proxy that we can give the receiver */
+ APR_BRIGADE_INSERT_TAIL(bb, brecv);
+ remain -= brecv->length;
+ ++transferred;
+ }
+ APR_BUCKET_REMOVE(bsender);
+ H2_BLIST_INSERT_TAIL(&beam->buckets_consumed, bsender);
+ beam->recv_bytes += bsender->length;
+ ++consumed_buckets;
+ }
+
+ if (remain < 0) {
+ /* too much, put some back into out recv_buffer */
+ remain = readbytes;
+ for (brecv = APR_BRIGADE_FIRST(bb);
+ brecv != APR_BRIGADE_SENTINEL(bb);
+ brecv = APR_BUCKET_NEXT(brecv)) {
+ remain -= (beam->tx_mem_limits? bucket_mem_used(brecv)
+ : (apr_off_t)brecv->length);
+ if (remain < 0) {
+ apr_bucket_split(brecv, (apr_size_t)((apr_off_t)brecv->length+remain));
+ beam->recv_buffer = apr_brigade_split_ex(bb,
+ APR_BUCKET_NEXT(brecv),
+ beam->recv_buffer);
+ break;
}
- goto transfer;
}
-leave:
- if (pclosed) *pclosed = beam->closed? 1 : 0;
- leave_yellow(beam, &bl);
}
- return status;
+
+ if (beam->recv_cb && consumed_buckets > 0) {
+ beam->recv_cb(beam->recv_ctx, beam);
+ }
+
+ if (transferred) {
+ apr_thread_cond_broadcast(beam->change);
+ rv = APR_SUCCESS;
+ }
+ else if (beam->aborted) {
+ rv = APR_ECONNABORTED;
+ }
+ else {
+ rv = wait_not_empty(beam, to, block);
+ if (rv != APR_SUCCESS) {
+ goto leave;
+ }
+ goto transfer;
+ }
+
+leave:
+ H2_BEAM_LOG(beam, to, APLOG_TRACE2, rv, "end receive", bb);
+ apr_thread_mutex_unlock(beam->lock);
+ return rv;
}
void h2_beam_on_consumed(h2_bucket_beam *beam,
- h2_beam_ev_callback *ev_cb,
h2_beam_io_callback *io_cb, void *ctx)
{
- h2_beam_lock bl;
- if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- beam->cons_ev_cb = ev_cb;
- beam->cons_io_cb = io_cb;
- beam->cons_ctx = ctx;
- leave_yellow(beam, &bl);
- }
+ apr_thread_mutex_lock(beam->lock);
+ beam->cons_io_cb = io_cb;
+ beam->cons_ctx = ctx;
+ apr_thread_mutex_unlock(beam->lock);
}
-void h2_beam_on_produced(h2_bucket_beam *beam,
- h2_beam_io_callback *io_cb, void *ctx)
+void h2_beam_on_received(h2_bucket_beam *beam,
+ h2_beam_ev_callback *recv_cb, void *ctx)
{
- h2_beam_lock bl;
- if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- beam->prod_io_cb = io_cb;
- beam->prod_ctx = ctx;
- leave_yellow(beam, &bl);
- }
+ apr_thread_mutex_lock(beam->lock);
+ beam->recv_cb = recv_cb;
+ beam->recv_ctx = ctx;
+ apr_thread_mutex_unlock(beam->lock);
}
-void h2_beam_on_file_beam(h2_bucket_beam *beam,
- h2_beam_can_beam_callback *cb, void *ctx)
+void h2_beam_on_was_empty(h2_bucket_beam *beam,
+ h2_beam_ev_callback *was_empty_cb, void *ctx)
{
- h2_beam_lock bl;
-
- if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- beam->can_beam_fn = cb;
- beam->can_beam_ctx = ctx;
- leave_yellow(beam, &bl);
- }
+ apr_thread_mutex_lock(beam->lock);
+ beam->was_empty_cb = was_empty_cb;
+ beam->was_empty_ctx = ctx;
+ apr_thread_mutex_unlock(beam->lock);
}
-apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam)
+static apr_off_t get_buffered_data_len(h2_bucket_beam *beam)
{
apr_bucket *b;
apr_off_t l = 0;
- h2_beam_lock bl;
-
- if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
- for (b = H2_BLIST_FIRST(&beam->send_list);
- b != H2_BLIST_SENTINEL(&beam->send_list);
- b = APR_BUCKET_NEXT(b)) {
- /* should all have determinate length */
- l += b->length;
- }
- leave_yellow(beam, &bl);
+
+ for (b = H2_BLIST_FIRST(&beam->buckets_to_send);
+ b != H2_BLIST_SENTINEL(&beam->buckets_to_send);
+ b = APR_BUCKET_NEXT(b)) {
+ /* should all have determinate length */
+ l += b->length;
}
return l;
}
-apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam)
+apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam)
{
- apr_bucket *b;
apr_off_t l = 0;
- h2_beam_lock bl;
-
- if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
- for (b = H2_BLIST_FIRST(&beam->send_list);
- b != H2_BLIST_SENTINEL(&beam->send_list);
- b = APR_BUCKET_NEXT(b)) {
- l += bucket_mem_used(b);
- }
- leave_yellow(beam, &bl);
- }
+
+ apr_thread_mutex_lock(beam->lock);
+ l = get_buffered_data_len(beam);
+ apr_thread_mutex_unlock(beam->lock);
return l;
}
-int h2_beam_empty(h2_bucket_beam *beam)
+apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam)
{
- int empty = 1;
- h2_beam_lock bl;
-
- if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
- empty = (H2_BLIST_EMPTY(&beam->send_list)
- && (!beam->recv_buffer || APR_BRIGADE_EMPTY(beam->recv_buffer)));
- leave_yellow(beam, &bl);
- }
- return empty;
-}
+ apr_bucket *b;
+ apr_off_t l = 0;
-int h2_beam_holds_proxies(h2_bucket_beam *beam)
-{
- int has_proxies = 1;
- h2_beam_lock bl;
-
- if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
- has_proxies = !H2_BPROXY_LIST_EMPTY(&beam->proxies);
- leave_yellow(beam, &bl);
+ apr_thread_mutex_lock(beam->lock);
+ for (b = H2_BLIST_FIRST(&beam->buckets_to_send);
+ b != H2_BLIST_SENTINEL(&beam->buckets_to_send);
+ b = APR_BUCKET_NEXT(b)) {
+ l += bucket_mem_used(b);
}
- return has_proxies;
+ apr_thread_mutex_unlock(beam->lock);
+ return l;
}
-int h2_beam_was_received(h2_bucket_beam *beam)
+static int is_empty(h2_bucket_beam *beam)
{
- int happend = 0;
- h2_beam_lock bl;
-
- if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
- happend = (beam->received_bytes > 0);
- leave_yellow(beam, &bl);
- }
- return happend;
+ return (H2_BLIST_EMPTY(&beam->buckets_to_send)
+ && (!beam->recv_buffer || APR_BRIGADE_EMPTY(beam->recv_buffer)));
}
-apr_size_t h2_beam_get_files_beamed(h2_bucket_beam *beam)
+int h2_beam_empty(h2_bucket_beam *beam)
{
- apr_size_t n = 0;
- h2_beam_lock bl;
-
- if (beam && enter_yellow(beam, &bl) == APR_SUCCESS) {
- n = beam->files_beamed;
- leave_yellow(beam, &bl);
- }
- return n;
-}
+ int empty = 1;
-int h2_beam_no_files(void *ctx, h2_bucket_beam *beam, apr_file_t *file)
-{
- (void)ctx; (void)beam; (void)file;
- return 0;
+ apr_thread_mutex_lock(beam->lock);
+ empty = is_empty(beam);
+ apr_thread_mutex_unlock(beam->lock);
+ return empty;
}
int h2_beam_report_consumption(h2_bucket_beam *beam)
{
- h2_beam_lock bl;
int rv = 0;
- if (enter_yellow(beam, &bl) == APR_SUCCESS) {
- rv = report_consumption(beam, &bl);
- leave_yellow(beam, &bl);
- }
- return rv;
-}
-void h2_beam_log(h2_bucket_beam *beam, conn_rec *c, int level, const char *msg)
-{
- if (beam && APLOG_C_IS_LEVEL(c,level)) {
- ap_log_cerror(APLOG_MARK, level, 0, c,
- "beam(%ld-%d,%s,closed=%d,aborted=%d,empty=%d,buf=%ld): %s",
- (c->master? c->master->id : c->id), beam->id, beam->tag,
- beam->closed, beam->aborted, h2_beam_empty(beam),
- (long)h2_beam_get_buffered(beam), msg);
- }
+ apr_thread_mutex_lock(beam->lock);
+ rv = report_consumption(beam, 1);
+ apr_thread_mutex_unlock(beam->lock);
+ return rv;
}
-
-
diff --git a/modules/http2/h2_bucket_beam.h b/modules/http2/h2_bucket_beam.h
index 6cc1adecb8..02709439dc 100644
--- a/modules/http2/h2_bucket_beam.h
+++ b/modules/http2/h2_bucket_beam.h
@@ -17,113 +17,14 @@
#ifndef h2_bucket_beam_h
#define h2_bucket_beam_h
+#include "h2_conn_ctx.h"
+
struct apr_thread_mutex_t;
struct apr_thread_cond_t;
-/*******************************************************************************
- * apr_bucket list without bells and whistles
- ******************************************************************************/
-
-/**
- * h2_blist can hold a list of buckets just like apr_bucket_brigade, but
- * does not to any allocations or related features.
- */
-typedef struct {
- APR_RING_HEAD(h2_bucket_list, apr_bucket) list;
-} h2_blist;
-
-#define H2_BLIST_INIT(b) APR_RING_INIT(&(b)->list, apr_bucket, link);
-#define H2_BLIST_SENTINEL(b) APR_RING_SENTINEL(&(b)->list, apr_bucket, link)
-#define H2_BLIST_EMPTY(b) APR_RING_EMPTY(&(b)->list, apr_bucket, link)
-#define H2_BLIST_FIRST(b) APR_RING_FIRST(&(b)->list)
-#define H2_BLIST_LAST(b) APR_RING_LAST(&(b)->list)
-#define H2_BLIST_INSERT_HEAD(b, e) do { \
- apr_bucket *ap__b = (e); \
- APR_RING_INSERT_HEAD(&(b)->list, ap__b, apr_bucket, link); \
- } while (0)
-#define H2_BLIST_INSERT_TAIL(b, e) do { \
- apr_bucket *ap__b = (e); \
- APR_RING_INSERT_TAIL(&(b)->list, ap__b, apr_bucket, link); \
- } while (0)
-#define H2_BLIST_CONCAT(a, b) do { \
- APR_RING_CONCAT(&(a)->list, &(b)->list, apr_bucket, link); \
- } while (0)
-#define H2_BLIST_PREPEND(a, b) do { \
- APR_RING_PREPEND(&(a)->list, &(b)->list, apr_bucket, link); \
- } while (0)
-
-/*******************************************************************************
- * h2_bucket_beam
- ******************************************************************************/
-
/**
* A h2_bucket_beam solves the task of transferring buckets, esp. their data,
- * across threads with zero buffer copies.
- *
- * When a thread, let's call it the sender thread, wants to send buckets to
- * another, the green thread, it creates a h2_bucket_beam and adds buckets
- * via the h2_beam_send(). It gives the beam to the green thread which then
- * can receive buckets into its own brigade via h2_beam_receive().
- *
- * Sending and receiving can happen concurrently.
- *
- * The beam can limit the amount of data it accepts via the buffer_size. This
- * can also be adjusted during its lifetime. Sends and receives can be done blocking.
- * A timeout can be set for such blocks.
- *
- * Care needs to be taken when terminating the beam. The beam registers at
- * the pool it was created with and will cleanup after itself. However, if
- * received buckets do still exist, already freed memory might be accessed.
- * The beam does a assertion on this condition.
- *
- * The proper way of shutting down a beam is to first make sure there are no
- * more green buckets out there, then cleanup the beam to purge eventually
- * still existing sender buckets and then, possibly, terminate the beam itself
- * (or the pool it was created with).
- *
- * The following restrictions apply to bucket transport:
- * - only EOS and FLUSH meta buckets are copied through. All other meta buckets
- * are kept in the beams hold.
- * - all kind of data buckets are transported through:
- * - transient buckets are converted to heap ones on send
- * - heap and pool buckets require no extra handling
- * - buckets with indeterminate length are read on send
- * - file buckets will transfer the file itself into a new bucket, if allowed
- * - all other buckets are read on send to make sure data is present
- *
- * This assures that when the sender thread sends its sender buckets, the data
- * is made accessible while still on the sender side. The sender bucket then enters
- * the beams hold storage.
- * When the green thread calls receive, sender buckets in the hold are wrapped
- * into special beam buckets. Beam buckets on read present the data directly
- * from the internal sender one, but otherwise live on the green side. When a
- * beam bucket gets destroyed, it notifies its beam that the corresponding
- * sender bucket from the hold may be destroyed.
- * Since the destruction of green buckets happens in the green thread, any
- * corresponding sender bucket can not immediately be destroyed, as that would
- * result in race conditions.
- * Instead, the beam transfers such sender buckets from the hold to the purge
- * storage. Next time there is a call from the sender side, the buckets in
- * purge will be deleted.
- *
- * There are callbacks that can be registesender with a beam:
- * - a "consumed" callback that gets called on the sender side with the
- * amount of data that has been received by the green side. The amount
- * is a delta from the last callback invocation. The sender side can trigger
- * these callbacks by calling h2_beam_send() with a NULL brigade.
- * - a "can_beam_file" callback that can prohibit the transfer of file handles
- * through the beam. This will cause file buckets to be read on send and
- * its data buffer will then be transports just like a heap bucket would.
- * When no callback is registered, no restrictions apply and all files are
- * passed through.
- * File handles transfersender to the green side will stay there until the
- * receiving brigade's pool is destroyed/cleared. If the pool lives very
- * long or if many different files are beamed, the process might run out
- * of available file handles.
- *
- * The name "beam" of course is inspired by good old transporter
- * technology where humans are kept inside the transporter's memory
- * buffers until the transmission is complete. Star gates use a similar trick.
+ * across threads with as little copying as possible.
*/
typedef void h2_beam_mutex_leave(struct apr_thread_mutex_t *lock);
@@ -141,66 +42,43 @@ typedef void h2_beam_io_callback(void *ctx, h2_bucket_beam *beam,
apr_off_t bytes);
typedef void h2_beam_ev_callback(void *ctx, h2_bucket_beam *beam);
-typedef struct h2_beam_proxy h2_beam_proxy;
-typedef struct {
- APR_RING_HEAD(h2_beam_proxy_list, h2_beam_proxy) list;
-} h2_bproxy_list;
-
-typedef int h2_beam_can_beam_callback(void *ctx, h2_bucket_beam *beam,
- apr_file_t *file);
-
-typedef enum {
- H2_BEAM_OWNER_SEND,
- H2_BEAM_OWNER_RECV
-} h2_beam_owner_t;
-
/**
- * Will deny all transfer of apr_file_t across the beam and force
- * a data copy instead.
+ * h2_blist can hold a list of buckets just like apr_bucket_brigade, but
+ * does not to any allocations or related features.
*/
-int h2_beam_no_files(void *ctx, h2_bucket_beam *beam, apr_file_t *file);
+typedef struct {
+ APR_RING_HEAD(h2_bucket_list, apr_bucket) list;
+} h2_blist;
struct h2_bucket_beam {
int id;
- const char *tag;
+ const char *name;
+ conn_rec *from;
apr_pool_t *pool;
- h2_beam_owner_t owner;
- h2_blist send_list;
- h2_blist hold_list;
- h2_blist purge_list;
+ h2_blist buckets_to_send;
+ h2_blist buckets_consumed;
apr_bucket_brigade *recv_buffer;
- h2_bproxy_list proxies;
- apr_pool_t *send_pool;
apr_pool_t *recv_pool;
apr_size_t max_buf_size;
apr_interval_time_t timeout;
- apr_off_t sent_bytes; /* amount of bytes send */
- apr_off_t received_bytes; /* amount of bytes received */
-
- apr_size_t buckets_sent; /* # of beam buckets sent */
- apr_size_t files_beamed; /* how many file handles have been set aside */
-
- unsigned int aborted : 1;
- unsigned int closed : 1;
- unsigned int close_sent : 1;
- unsigned int tx_mem_limits : 1; /* only memory size counts on transfers */
+ int aborted;
+ int tx_mem_limits; /* only memory size counts on transfers */
+ int copy_files;
struct apr_thread_mutex_t *lock;
struct apr_thread_cond_t *change;
- apr_off_t cons_bytes_reported; /* amount of bytes reported as consumed */
- h2_beam_ev_callback *cons_ev_cb;
- h2_beam_io_callback *cons_io_cb;
+ h2_beam_ev_callback *was_empty_cb; /* event: beam changed to non-empty in h2_beam_send() */
+ void *was_empty_ctx;
+ h2_beam_ev_callback *recv_cb; /* event: buckets were transfered in h2_beam_receive() */
+ void *recv_ctx;
+
+ apr_off_t recv_bytes; /* amount of bytes transferred in h2_beam_receive() */
+ apr_off_t recv_bytes_reported; /* amount of bytes reported as received via callback */
+ h2_beam_io_callback *cons_io_cb; /* report: recv_bytes deltas for sender */
void *cons_ctx;
-
- apr_off_t prod_bytes_reported; /* amount of bytes reported as produced */
- h2_beam_io_callback *prod_io_cb;
- void *prod_ctx;
-
- h2_beam_can_beam_callback *can_beam_fn;
- void *can_beam_ctx;
};
/**
@@ -211,62 +89,66 @@ struct h2_bucket_beam {
* that is only used inside that same mutex.
*
* @param pbeam will hold the created beam on return
+ * @param c_from connection from which buchets are sent
* @param pool pool owning the beam, beam will cleanup when pool released
* @param id identifier of the beam
* @param tag tag identifying beam for logging
- * @param owner if the beam is owned by the sender or receiver, e.g. if
- * the pool owner is using this beam for sending or receiving
* @param buffer_size maximum memory footprint of buckets buffered in beam, or
* 0 for no limitation
* @param timeout timeout for blocking operations
*/
apr_status_t h2_beam_create(h2_bucket_beam **pbeam,
+ conn_rec *from,
apr_pool_t *pool,
int id, const char *tag,
- h2_beam_owner_t owner,
apr_size_t buffer_size,
apr_interval_time_t timeout);
/**
* Destroys the beam immediately without cleanup.
*/
-apr_status_t h2_beam_destroy(h2_bucket_beam *beam);
+apr_status_t h2_beam_destroy(h2_bucket_beam *beam, conn_rec *c);
/**
- * Send buckets from the given brigade through the beam. Will hold buckets
- * internally as long as they have not been processed by the receiving side.
- * All accepted buckets are removed from the given brigade. Will return with
- * APR_EAGAIN on non-blocking sends when not all buckets could be accepted.
- *
- * Call from the sender side only.
+ * Switch copying of file buckets on/off.
*/
-apr_status_t h2_beam_send(h2_bucket_beam *beam,
- apr_bucket_brigade *bb,
- apr_read_type_e block);
+void h2_beam_set_copy_files(h2_bucket_beam * beam, int enabled);
/**
- * Register the pool from which future buckets are send. This defines
- * the lifetime of the buckets, e.g. the pool should not be cleared/destroyed
- * until the data is no longer needed (or has been received).
+ * Send buckets from the given brigade through the beam.
+ * This can block of the amount of bucket data is above the buffer limit.
+ * @param beam the beam to add buckets to
+ * @param from the connection the sender operates on, must be the same as
+ * used to create the beam
+ * @param bb the brigade to take buckets from
+ * @param block if the sending should block when the buffer is full
+ * @return APR_SUCCESS when buckets were added to the beam. This can be
+ * a partial transfer and other buckets may still remain in bb
+ * APR_EAGAIN on non-blocking send when the buffer is full
+ * APR_TIMEUP on blocking semd that time out
+ * APR_ECONNABORTED when beam has been aborted
*/
-void h2_beam_send_from(h2_bucket_beam *beam, apr_pool_t *p);
+apr_status_t h2_beam_send(h2_bucket_beam *beam, conn_rec *from,
+ apr_bucket_brigade *bb,
+ apr_read_type_e block);
/**
- * Receive buckets from the beam into the given brigade. Will return APR_EOF
- * when reading past an EOS bucket. Reads can be blocking until data is
- * available or the beam has been closed. Non-blocking calls return APR_EAGAIN
- * if no data is available.
- *
- * Call from the receiver side only.
- * @param pclosed on return != 0 iff the beam has been closed by the sender. It
- * may still hold untransfered data. Maybe NULL if the caller is
- * not interested in this.
- */
-apr_status_t h2_beam_receive(h2_bucket_beam *beam,
- apr_bucket_brigade *green_buckets,
+ * Receive buckets from the beam into the given brigade. The caller is
+ * operating on connection `to`.
+ * @param beam the beam to receive buckets from
+ * @param to the connection the receiver is working with
+ * @param bb the bucket brigade to append to
+ * @param block if the read should block when buckets are unavailable
+ * @param readbytes the amount of data the receiver wants
+ * @return APR_SUCCESS when buckets were appended
+ * APR_EAGAIN on non-blocking read when no buckets are available
+ * APR_TIMEUP on blocking reads that time out
+ * APR_ECONNABORTED when beam has been aborted
+ */
+apr_status_t h2_beam_receive(h2_bucket_beam *beam, conn_rec *to,
+ apr_bucket_brigade *bb,
apr_read_type_e block,
- apr_off_t readbytes,
- int *pclosed);
+ apr_off_t readbytes);
/**
* Determine if beam is empty.
@@ -274,53 +156,19 @@ apr_status_t h2_beam_receive(h2_bucket_beam *beam,
int h2_beam_empty(h2_bucket_beam *beam);
/**
- * Determine if beam has handed out proxy buckets that are not destroyed.
- */
-int h2_beam_holds_proxies(h2_bucket_beam *beam);
-
-/**
- * Abort the beam. Will cleanup any buffered buckets and answer all send
- * and receives with APR_ECONNABORTED.
- *
- * Call from the sender side only.
- */
-void h2_beam_abort(h2_bucket_beam *beam);
-
-/**
- * Close the beam. Sending an EOS bucket serves the same purpose.
- *
- * Call from the sender side only.
- */
-apr_status_t h2_beam_close(h2_bucket_beam *beam);
-
-/**
- * Receives leaves the beam, e.g. will no longer read. This will
- * interrupt any sender blocked writing and fail future send.
- *
- * Call from the receiver side only.
- */
-apr_status_t h2_beam_leave(h2_bucket_beam *beam);
-
-int h2_beam_is_closed(h2_bucket_beam *beam);
-
-/**
- * Return APR_SUCCESS when all buckets in transit have been handled.
- * When called with APR_BLOCK_READ and a mutex set, will wait until the green
- * side has consumed all data. Otherwise APR_EAGAIN is returned.
- * With clear_buffers set, any queued data is discarded.
- * If a timeout is set on the beam, waiting might also time out and
- * return APR_ETIMEUP.
+ * Abort the beam, either from receiving or sending side.
*
- * Call from the sender side only.
+ * @param beam the beam to abort
+ * @param c the connection the caller is working with
*/
-apr_status_t h2_beam_wait_empty(h2_bucket_beam *beam, apr_read_type_e block);
+void h2_beam_abort(h2_bucket_beam *beam, conn_rec *c);
-/**
- * Set/get the timeout for blocking read/write operations. Only works
- * if a mutex has been set for the beam.
+/**
+ * Set/get the timeout for blocking sebd/receive operations.
*/
void h2_beam_timeout_set(h2_bucket_beam *beam,
apr_interval_time_t timeout);
+
apr_interval_time_t h2_beam_timeout_get(h2_bucket_beam *beam);
/**
@@ -335,7 +183,6 @@ apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam);
* amount of bytes that have been consumed by the receiver, since the
* last callback invocation or reset.
* @param beam the beam to set the callback on
- * @param ev_cb the callback or NULL, called when bytes are consumed
* @param io_cb the callback or NULL, called on sender with bytes consumed
* @param ctx the context to use in callback invocation
*
@@ -343,43 +190,37 @@ apr_size_t h2_beam_buffer_size_get(h2_bucket_beam *beam);
* from any side.
*/
void h2_beam_on_consumed(h2_bucket_beam *beam,
- h2_beam_ev_callback *ev_cb,
h2_beam_io_callback *io_cb, void *ctx);
/**
- * Call any registered consumed handler, if any changes have happened
- * since the last invocation.
- * @return !=0 iff a handler has been called
- *
- * Needs to be invoked from the sending side.
+ * Register a callback to be invoked on the receiver side whenever
+ * buckets have been transfered in a h2_beam_receive() call.
+ * @param beam the beam to set the callback on
+ * @param recv_cb the callback or NULL, called when buckets are received
+ * @param ctx the context to use in callback invocation
*/
-int h2_beam_report_consumption(h2_bucket_beam *beam);
+void h2_beam_on_received(h2_bucket_beam *beam,
+ h2_beam_ev_callback *recv_cb, void *ctx);
/**
- * Register a callback to be invoked on the receiver side with the
- * amount of bytes that have been produces by the sender, since the
- * last callback invocation or reset.
+ * Register a call back from the sender side to be invoked when send
+ * has added to a previously empty beam.
+ * Unregister by passing a NULL was_empty_cb.
* @param beam the beam to set the callback on
- * @param io_cb the callback or NULL, called on receiver with bytes produced
+ * @param was_empty_cb the callback to invoke on blocked send
* @param ctx the context to use in callback invocation
- *
- * Call from the receiver side, callbacks invoked on either side.
*/
-void h2_beam_on_produced(h2_bucket_beam *beam,
- h2_beam_io_callback *io_cb, void *ctx);
+void h2_beam_on_was_empty(h2_bucket_beam *beam,
+ h2_beam_ev_callback *was_empty_cb, void *ctx);
/**
- * Register a callback that may prevent a file from being beam as
- * file handle, forcing the file content to be copied. Then no callback
- * is set (NULL), file handles are transferred directly.
- * @param beam the beam to set the callback on
- * @param io_cb the callback or NULL, called on receiver with bytes produced
- * @param ctx the context to use in callback invocation
- *
- * Call from the receiver side, callbacks invoked on either side.
+ * Call any registered consumed handler, if any changes have happened
+ * since the last invocation.
+ * @return !=0 iff a handler has been called
+ *
+ * Needs to be invoked from the sending side.
*/
-void h2_beam_on_file_beam(h2_bucket_beam *beam,
- h2_beam_can_beam_callback *cb, void *ctx);
+int h2_beam_report_consumption(h2_bucket_beam *beam);
/**
* Get the amount of bytes currently buffered in the beam (unread).
@@ -391,19 +232,10 @@ apr_off_t h2_beam_get_buffered(h2_bucket_beam *beam);
*/
apr_off_t h2_beam_get_mem_used(h2_bucket_beam *beam);
-/**
- * Return != 0 iff (some) data from the beam has been received.
- */
-int h2_beam_was_received(h2_bucket_beam *beam);
-
-apr_size_t h2_beam_get_files_beamed(h2_bucket_beam *beam);
-
-typedef apr_bucket *h2_bucket_beamer(h2_bucket_beam *beam,
+typedef apr_bucket *h2_bucket_beamer(h2_bucket_beam *beam,
apr_bucket_brigade *dest,
const apr_bucket *src);
void h2_register_bucket_beamer(h2_bucket_beamer *beamer);
-void h2_beam_log(h2_bucket_beam *beam, conn_rec *c, int level, const char *msg);
-
#endif /* h2_bucket_beam_h */
diff --git a/modules/http2/h2_c1.c b/modules/http2/h2_c1.c
new file mode 100644
index 0000000000..834b6a9745
--- /dev/null
+++ b/modules/http2/h2_c1.c
@@ -0,0 +1,328 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_strings.h>
+
+#include <ap_mpm.h>
+#include <ap_mmn.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_log.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_ssl.h>
+
+#include <mpm_common.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_bucket_beam.h"
+#include "h2_config.h"
+#include "h2_conn_ctx.h"
+#include "h2_headers.h"
+#include "h2_mplx.h"
+#include "h2_session.h"
+#include "h2_stream.h"
+#include "h2_protocol.h"
+#include "h2_workers.h"
+#include "h2_c1.h"
+#include "h2_version.h"
+#include "h2_util.h"
+
+static struct h2_workers *workers;
+
+static int async_mpm;
+
+apr_status_t h2_c1_child_init(apr_pool_t *pool, server_rec *s)
+{
+ apr_status_t status = APR_SUCCESS;
+ int minw, maxw;
+ int max_threads_per_child = 0;
+ int idle_secs = 0;
+
+ ap_mpm_query(AP_MPMQ_MAX_THREADS, &max_threads_per_child);
+
+ status = ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm);
+ if (status != APR_SUCCESS) {
+ /* some MPMs do not implemnent this */
+ async_mpm = 0;
+ status = APR_SUCCESS;
+ }
+
+ h2_config_init(pool);
+
+ h2_get_num_workers(s, &minw, &maxw);
+ idle_secs = h2_config_sgeti(s, H2_CONF_MAX_WORKER_IDLE_SECS);
+ ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
+ "h2_workers: min=%d max=%d, mthrpchild=%d, idle_secs=%d",
+ minw, maxw, max_threads_per_child, idle_secs);
+ workers = h2_workers_create(s, pool, minw, maxw, idle_secs);
+
+ return h2_mplx_c1_child_init(pool, s);
+}
+
+void h2_c1_child_stopping(apr_pool_t *pool, int graceful)
+{
+ if (workers && graceful) {
+ h2_workers_graceful_shutdown(workers);
+ }
+}
+
+
+apr_status_t h2_c1_setup(conn_rec *c, request_rec *r, server_rec *s)
+{
+ h2_session *session;
+ h2_conn_ctx_t *ctx;
+ apr_status_t rv;
+
+ if (!workers) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02911)
+ "workers not initialized");
+ rv = APR_EGENERAL;
+ goto cleanup;
+ }
+
+ rv = h2_session_create(&session, c, r, s, workers);
+ if (APR_SUCCESS != rv) goto cleanup;
+
+ ctx = h2_conn_ctx_get(c);
+ ap_assert(ctx);
+ ctx->session = session;
+ /* remove the input filter of mod_reqtimeout, now that the connection
+ * is established and we have switched to h2. reqtimeout has supervised
+ * possibly configured handshake timeouts and needs to get out of the way
+ * now since the rest of its state handling assumes http/1.x to take place. */
+ ap_remove_input_filter_byhandle(c->input_filters, "reqtimeout");
+
+cleanup:
+ return rv;
+}
+
+apr_status_t h2_c1_run(conn_rec *c)
+{
+ apr_status_t status;
+ int mpm_state = 0;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+
+ ap_assert(conn_ctx);
+ ap_assert(conn_ctx->session);
+ do {
+ if (c->cs) {
+ c->cs->sense = CONN_SENSE_DEFAULT;
+ c->cs->state = CONN_STATE_HANDLER;
+ }
+
+ status = h2_session_process(conn_ctx->session, async_mpm);
+
+ if (APR_STATUS_IS_EOF(status)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
+ H2_SSSN_LOG(APLOGNO(03045), conn_ctx->session,
+ "process, closing conn"));
+ c->keepalive = AP_CONN_CLOSE;
+ }
+ else {
+ c->keepalive = AP_CONN_KEEPALIVE;
+ }
+
+ if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
+ break;
+ }
+ } while (!async_mpm
+ && c->keepalive == AP_CONN_KEEPALIVE
+ && mpm_state != AP_MPMQ_STOPPING);
+
+ if (c->cs) {
+ switch (conn_ctx->session->state) {
+ case H2_SESSION_ST_INIT:
+ case H2_SESSION_ST_IDLE:
+ case H2_SESSION_ST_BUSY:
+ case H2_SESSION_ST_WAIT:
+ c->cs->state = CONN_STATE_WRITE_COMPLETION;
+ if (c->cs && !conn_ctx->session->remote.emitted_count) {
+ /* let the MPM know that we are not done and want
+ * the Timeout behaviour instead of a KeepAliveTimeout
+ * See PR 63534.
+ */
+ c->cs->sense = CONN_SENSE_WANT_READ;
+ }
+ break;
+ case H2_SESSION_ST_CLEANUP:
+ case H2_SESSION_ST_DONE:
+ default:
+ c->cs->state = CONN_STATE_LINGER;
+ break;
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_c1_pre_close(struct h2_conn_ctx_t *ctx, conn_rec *c)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+
+ if (conn_ctx && conn_ctx->session) {
+ apr_status_t status = h2_session_pre_close(conn_ctx->session, async_mpm);
+ return (status == APR_SUCCESS)? DONE : status;
+ }
+ return DONE;
+}
+
+int h2_c1_allows_direct(conn_rec *c)
+{
+ if (!c->master) {
+ int is_tls = ap_ssl_conn_is_ssl(c);
+ const char *needed_protocol = is_tls? "h2" : "h2c";
+ int h2_direct = h2_config_cgeti(c, H2_CONF_DIRECT);
+
+ if (h2_direct < 0) {
+ h2_direct = is_tls? 0 : 1;
+ }
+ return (h2_direct && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol));
+ }
+ return 0;
+}
+
+int h2_c1_can_upgrade(request_rec *r)
+{
+ if (!r->connection->master) {
+ int h2_upgrade = h2_config_rgeti(r, H2_CONF_UPGRADE);
+ return h2_upgrade > 0 || (h2_upgrade < 0 && !ap_ssl_conn_is_ssl(r->connection));
+ }
+ return 0;
+}
+
+static int h2_c1_hook_process_connection(conn_rec* c)
+{
+ apr_status_t status;
+ h2_conn_ctx_t *ctx;
+
+ if (c->master) goto declined;
+ ctx = h2_conn_ctx_get(c);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn");
+ if (!ctx && c->keepalives == 0) {
+ const char *proto = ap_get_protocol(c);
+
+ if (APLOGctrace1(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn, "
+ "new connection using protocol '%s', direct=%d, "
+ "tls acceptable=%d", proto, h2_c1_allows_direct(c),
+ h2_protocol_is_acceptable_c1(c, NULL, 1));
+ }
+
+ if (!strcmp(AP_PROTOCOL_HTTP1, proto)
+ && h2_c1_allows_direct(c)
+ && h2_protocol_is_acceptable_c1(c, NULL, 1)) {
+ /* Fresh connection still is on http/1.1 and H2Direct is enabled.
+ * Otherwise connection is in a fully acceptable state.
+ * -> peek at the first 24 incoming bytes
+ */
+ apr_bucket_brigade *temp;
+ char *peek = NULL;
+ apr_size_t peeklen;
+
+ temp = apr_brigade_create(c->pool, c->bucket_alloc);
+ status = ap_get_brigade(c->input_filters, temp,
+ AP_MODE_SPECULATIVE, APR_BLOCK_READ, 24);
+
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03054)
+ "h2_h2, error reading 24 bytes speculative");
+ apr_brigade_destroy(temp);
+ return DECLINED;
+ }
+
+ apr_brigade_pflatten(temp, &peek, &peeklen, c->pool);
+ if ((peeklen >= 24) && !memcmp(H2_MAGIC_TOKEN, peek, 24)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_h2, direct mode detected");
+ ctx = h2_conn_ctx_create_for_c1(c, c->base_server,
+ ap_ssl_conn_is_ssl(c)? "h2" : "h2c");
+ }
+ else if (APLOGctrace2(c)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+ "h2_h2, not detected in %d bytes(base64): %s",
+ (int)peeklen, h2_util_base64url_encode(peek, peeklen, c->pool));
+ }
+ apr_brigade_destroy(temp);
+ }
+ }
+
+ if (!ctx) goto declined;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "process_conn");
+ if (!ctx->session) {
+ status = h2_c1_setup(c, NULL, ctx->server? ctx->server : c->base_server);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "conn_setup");
+ if (status != APR_SUCCESS) {
+ h2_conn_ctx_detach(c);
+ return !OK;
+ }
+ }
+ h2_c1_run(c);
+ return OK;
+
+declined:
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, declined");
+ return DECLINED;
+}
+
+static int h2_c1_hook_pre_close(conn_rec *c)
+{
+ h2_conn_ctx_t *ctx;
+
+ /* secondary connection? */
+ if (c->master) {
+ return DECLINED;
+ }
+
+ ctx = h2_conn_ctx_get(c);
+ if (ctx) {
+ /* If the session has been closed correctly already, we will not
+ * find a h2_conn_ctx_there. The presence indicates that the session
+ * is still ongoing. */
+ return h2_c1_pre_close(ctx, c);
+ }
+ return DECLINED;
+}
+
+static const char* const mod_ssl[] = { "mod_ssl.c", NULL};
+static const char* const mod_reqtimeout[] = { "mod_ssl.c", "mod_reqtimeout.c", NULL};
+
+void h2_c1_register_hooks(void)
+{
+ /* Our main processing needs to run quite late. Definitely after mod_ssl,
+ * as we need its connection filters, but also before reqtimeout as its
+ * method of timeouts is specific to HTTP/1.1 (as of now).
+ * The core HTTP/1 processing run as REALLY_LAST, so we will have
+ * a chance to take over before it.
+ */
+ ap_hook_process_connection(h2_c1_hook_process_connection,
+ mod_reqtimeout, NULL, APR_HOOK_LAST);
+
+ /* One last chance to properly say goodbye if we have not done so
+ * already. */
+ ap_hook_pre_close_connection(h2_c1_hook_pre_close, NULL, mod_ssl, APR_HOOK_LAST);
+
+ /* special bucket type transfer through a h2_bucket_beam */
+ h2_register_bucket_beamer(h2_bucket_headers_beam);
+}
+
diff --git a/modules/http2/h2_conn.h b/modules/http2/h2_c1.h
index de868cfa57..232c71c301 100644
--- a/modules/http2/h2_conn.h
+++ b/modules/http2/h2_c1.h
@@ -14,69 +14,65 @@
* limitations under the License.
*/
-#ifndef __mod_h2__h2_conn__
-#define __mod_h2__h2_conn__
+#ifndef __mod_h2__h2_c1__
+#define __mod_h2__h2_c1__
-struct h2_ctx;
-struct h2_task;
+struct h2_conn_ctx_t;
+
+/* Initialize this child process for h2 primary connection work,
+ * to be called once during child init before multi processing
+ * starts.
+ */
+apr_status_t h2_c1_child_init(apr_pool_t *pool, server_rec *s);
/**
- * Setup the connection and our context for HTTP/2 processing
+ * Setup the primary connection and our context for HTTP/2 processing
*
* @param c the connection HTTP/2 is starting on
* @param r the upgrade request that still awaits an answer, optional
* @param s the server selected for this connection (can be != c->base_server)
*/
-apr_status_t h2_conn_setup(conn_rec *c, request_rec *r, server_rec *s);
+apr_status_t h2_c1_setup(conn_rec *c, request_rec *r, server_rec *s);
/**
- * Run the HTTP/2 connection in synchronous fashion.
+ * Run the HTTP/2 primary connection in synchronous fashion.
* Return when the HTTP/2 session is done
* and the connection will close or a fatal error occurred.
*
* @param c the http2 connection to run
* @return APR_SUCCESS when session is done.
*/
-apr_status_t h2_conn_run(conn_rec *c);
+apr_status_t h2_c1_run(conn_rec *c);
/**
- * The connection is about to close. If we have not send a GOAWAY
+ * The primary connection is about to close. If we have not send a GOAWAY
* yet, this is the last chance.
*/
-apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c);
+apr_status_t h2_c1_pre_close(struct h2_conn_ctx_t *ctx, conn_rec *c);
/**
- * Initialize this child process for h2 connection work,
- * to be called once during child init before multi processing
- * starts.
+ * Check if the connection allows a direct detection of HTTPP/2,
+ * as configurable by the H2Direct directive.
+ * @param c the connection to check on
+ * @return != 0 if direct detection is enabled
*/
-apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s);
+int h2_c1_allows_direct(conn_rec *c);
/**
- * Child is about to be stopped, release unused resources
+ * Check if the "Upgrade" HTTP/1.1 mode of protocol switching is enabled
+ * for the given request.
+ * @param r the request to check
+ * @return != 0 iff Upgrade switching is enabled
*/
-void h2_conn_child_stopping(apr_pool_t *pool, int graceful);
+int h2_c1_can_upgrade(request_rec *r);
-typedef enum {
- H2_MPM_UNKNOWN,
- H2_MPM_WORKER,
- H2_MPM_EVENT,
- H2_MPM_PREFORK,
- H2_MPM_MOTORZ,
- H2_MPM_SIMPLE,
- H2_MPM_NETWARE,
- H2_MPM_WINNT,
-} h2_mpm_type_t;
-
-/* Returns the type of MPM module detected */
-h2_mpm_type_t h2_conn_mpm_type(void);
-const char *h2_conn_mpm_name(void);
-int h2_mpm_supported(void);
-
-conn_rec *h2_secondary_create(conn_rec *master, int sec_id, apr_pool_t *parent);
-void h2_secondary_destroy(conn_rec *secondary);
+/* Register hooks for h2 handling on primary connections.
+ */
+void h2_c1_register_hooks(void);
-apr_status_t h2_secondary_run_pre_connection(conn_rec *secondary, apr_socket_t *csd);
-void h2_secondary_run_connection(conn_rec *secondary);
+/**
+ * Child is about to be stopped, release unused resources
+ */
+void h2_c1_child_stopping(apr_pool_t *pool, int graceful);
-#endif /* defined(__mod_h2__h2_conn__) */
+#endif /* defined(__mod_h2__h2_c1__) */
diff --git a/modules/http2/h2_conn_io.c b/modules/http2/h2_c1_io.c
index f506c14e61..3cd6ae6204 100644
--- a/modules/http2/h2_conn_io.c
+++ b/modules/http2/h2_c1_io.c
@@ -17,6 +17,7 @@
#include <assert.h>
#include <apr_strings.h>
#include <ap_mpm.h>
+#include <mpm_common.h>
#include <httpd.h>
#include <http_core.h>
@@ -29,8 +30,8 @@
#include "h2_private.h"
#include "h2_bucket_eos.h"
#include "h2_config.h"
-#include "h2_conn_io.h"
-#include "h2_h2.h"
+#include "h2_c1_io.h"
+#include "h2_protocol.h"
#include "h2_session.h"
#include "h2_util.h"
@@ -56,8 +57,8 @@
#define BUF_REMAIN ((apr_size_t)(bmax-off))
-static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level,
- const char *tag, apr_bucket_brigade *bb)
+static void h2_c1_io_bb_log(conn_rec *c, int stream_id, int level,
+ const char *tag, apr_bucket_brigade *bb)
{
char buffer[16 * 1024];
const char *line = "(null)";
@@ -130,23 +131,29 @@ static void h2_conn_io_bb_log(conn_rec *c, int stream_id, int level,
c->id, tag, line);
}
+#define C1_IO_BB_LOG(c, stream_id, level, tag, bb) \
+ if (APLOG_C_IS_LEVEL(c, level)) { \
+ h2_c1_io_bb_log((c), (stream_id), (level), (tag), (bb)); \
+ }
+
-apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, server_rec *s)
+apr_status_t h2_c1_io_init(h2_c1_io *io, conn_rec *c, server_rec *s)
{
io->c = c;
io->output = apr_brigade_create(c->pool, c->bucket_alloc);
io->is_tls = ap_ssl_conn_is_ssl(c);
io->buffer_output = io->is_tls;
- io->flush_threshold = (apr_size_t)h2_config_sgeti64(s, H2_CONF_STREAM_MAX_MEM);
+ io->flush_threshold = 4 * (apr_size_t)h2_config_sgeti64(s, H2_CONF_STREAM_MAX_MEM);
- if (io->is_tls) {
+ if (io->buffer_output) {
/* This is what we start with,
* see https://issues.apache.org/jira/browse/TS-2503
*/
io->warmup_size = h2_config_sgeti64(s, H2_CONF_TLS_WARMUP_SIZE);
- io->cooldown_usecs = (h2_config_sgeti(s, H2_CONF_TLS_COOLDOWN_SECS)
+ io->cooldown_usecs = (h2_config_sgeti(s, H2_CONF_TLS_COOLDOWN_SECS)
* APR_USEC_PER_SEC);
- io->write_size = (io->cooldown_usecs > 0?
+ io->cooldown_usecs = 0;
+ io->write_size = (io->cooldown_usecs > 0?
WRITE_SIZE_INITIAL : WRITE_SIZE_MAX);
}
else {
@@ -157,7 +164,7 @@ apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, server_rec *s)
if (APLOGctrace1(c)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, io->c,
- "h2_conn_io(%ld): init, buffering=%d, warmup_size=%ld, "
+ "h2_c1_io(%ld): init, buffering=%d, warmup_size=%ld, "
"cd_secs=%f", io->c->id, io->buffer_output,
(long)io->warmup_size,
((double)io->cooldown_usecs/APR_USEC_PER_SEC));
@@ -166,19 +173,20 @@ apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, server_rec *s)
return APR_SUCCESS;
}
-static void append_scratch(h2_conn_io *io)
+static void append_scratch(h2_c1_io *io)
{
if (io->scratch && io->slen > 0) {
apr_bucket *b = apr_bucket_heap_create(io->scratch, io->slen,
apr_bucket_free,
io->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(io->output, b);
+ io->buffered_len += io->slen;
io->scratch = NULL;
io->slen = io->ssize = 0;
}
}
-static apr_size_t assure_scratch_space(h2_conn_io *io) {
+static apr_size_t assure_scratch_space(h2_c1_io *io) {
apr_size_t remain = io->ssize - io->slen;
if (io->scratch && remain == 0) {
append_scratch(io);
@@ -194,7 +202,7 @@ static apr_size_t assure_scratch_space(h2_conn_io *io) {
return remain;
}
-static apr_status_t read_to_scratch(h2_conn_io *io, apr_bucket *b)
+static apr_status_t read_to_scratch(h2_c1_io *io, apr_bucket *b)
{
apr_status_t status;
const char *data;
@@ -211,9 +219,10 @@ static apr_status_t read_to_scratch(h2_conn_io *io, apr_bucket *b)
apr_off_t offset = b->start;
len = b->length;
- /* file buckets will either mmap (which we do not want) or
- * read 8000 byte chunks and split themself. However, we do
- * know *exactly* how many bytes we need where.
+ /* file buckets will read 8000 byte chunks and split
+ * themselves. However, we do know *exactly* how many
+ * bytes we need where. So we read the file directly to
+ * where we need it.
*/
status = apr_file_seek(fd, APR_SET, &offset);
if (status != APR_SUCCESS) {
@@ -225,6 +234,16 @@ static apr_status_t read_to_scratch(h2_conn_io *io, apr_bucket *b)
}
io->slen += len;
}
+ else if (APR_BUCKET_IS_MMAP(b)) {
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, io->c,
+ "h2_c1_io(%ld): seeing mmap bucket of size %ld, scratch remain=%ld",
+ io->c->id, (long)b->length, (long)(io->ssize - io->slen));
+ status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ if (status == APR_SUCCESS) {
+ memcpy(io->scratch+io->slen, data, len);
+ io->slen += len;
+ }
+ }
else {
status = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
if (status == APR_SUCCESS) {
@@ -235,94 +254,101 @@ static apr_status_t read_to_scratch(h2_conn_io *io, apr_bucket *b)
return status;
}
-static void check_write_size(h2_conn_io *io)
-{
- if (io->write_size > WRITE_SIZE_INITIAL
- && (io->cooldown_usecs > 0)
- && (apr_time_now() - io->last_write) >= io->cooldown_usecs) {
- /* long time not written, reset write size */
- io->write_size = WRITE_SIZE_INITIAL;
- io->bytes_written = 0;
- }
- else if (io->write_size < WRITE_SIZE_MAX
- && io->bytes_written >= io->warmup_size) {
- /* connection is hot, use max size */
- io->write_size = WRITE_SIZE_MAX;
- }
-}
-
-static apr_status_t pass_output(h2_conn_io *io, int flush)
+static apr_status_t pass_output(h2_c1_io *io, int flush)
{
conn_rec *c = io->c;
- apr_bucket_brigade *bb = io->output;
- apr_bucket *b;
apr_off_t bblen;
- apr_status_t status;
+ apr_status_t rv;
append_scratch(io);
- if (flush && !io->is_flushed) {
- b = apr_bucket_flush_create(c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, b);
+ if (flush) {
+ if (!APR_BUCKET_IS_FLUSH(APR_BRIGADE_LAST(io->output))) {
+ apr_bucket *b = apr_bucket_flush_create(io->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(io->output, b);
+ }
}
-
- if (APR_BRIGADE_EMPTY(bb)) {
+ if (APR_BRIGADE_EMPTY(io->output)) {
return APR_SUCCESS;
}
ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, NULL);
- apr_brigade_length(bb, 0, &bblen);
- h2_conn_io_bb_log(c, 0, APLOG_TRACE2, "out", bb);
+ io->unflushed = !APR_BUCKET_IS_FLUSH(APR_BRIGADE_LAST(io->output));
+ apr_brigade_length(io->output, 0, &bblen);
+ C1_IO_BB_LOG(c, 0, APLOG_TRACE2, "out", io->output);
- status = ap_pass_brigade(c->output_filters, bb);
- if (status == APR_SUCCESS) {
- io->bytes_written += (apr_size_t)bblen;
- io->last_write = apr_time_now();
- if (flush) {
- io->is_flushed = 1;
+ rv = ap_pass_brigade(c->output_filters, io->output);
+ if (APR_SUCCESS != rv) goto cleanup;
+
+ io->buffered_len = 0;
+ io->bytes_written += (apr_size_t)bblen;
+ if (io->write_size < WRITE_SIZE_MAX
+ && io->bytes_written >= io->warmup_size) {
+ /* connection is hot, use max size */
+ io->write_size = WRITE_SIZE_MAX;
+ }
+ else if (io->cooldown_usecs > 0
+ && io->write_size > WRITE_SIZE_INITIAL) {
+ apr_time_t now = apr_time_now();
+ if ((now - io->last_write) >= io->cooldown_usecs) {
+ /* long time not written, reset write size */
+ io->write_size = WRITE_SIZE_INITIAL;
+ io->bytes_written = 0;
+ }
+ else {
+ io->last_write = now;
}
}
- apr_brigade_cleanup(bb);
- if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03044)
- "h2_conn_io(%ld): pass_out brigade %ld bytes",
+cleanup:
+ if (APR_SUCCESS != rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c, APLOGNO(03044)
+ "h2_c1_io(%ld): pass_out brigade %ld bytes",
c->id, (long)bblen);
}
- return status;
+ apr_brigade_cleanup(io->output);
+ return rv;
}
-int h2_conn_io_needs_flush(h2_conn_io *io)
+int h2_c1_io_needs_flush(h2_c1_io *io)
{
- if (!io->is_flushed) {
- apr_off_t len = h2_brigade_mem_size(io->output);
- if (len > (apr_off_t)io->flush_threshold) {
- return 1;
- }
- /* if we do not exceed flush length due to memory limits,
- * we want at least flush when we have that amount of data. */
- apr_brigade_length(io->output, 0, &len);
- return len > (apr_off_t)(4 * io->flush_threshold);
+ return io->buffered_len >= io->flush_threshold;
+}
+
+int h2_c1_io_pending(h2_c1_io *io)
+{
+ return !APR_BRIGADE_EMPTY(io->output) || (io->scratch && io->slen > 0);
+}
+
+apr_status_t h2_c1_io_pass(h2_c1_io *io)
+{
+ apr_status_t rv = APR_SUCCESS;
+
+ if (h2_c1_io_pending(io)) {
+ rv = pass_output(io, 0);
}
- return 0;
+ return rv;
}
-apr_status_t h2_conn_io_flush(h2_conn_io *io)
+apr_status_t h2_c1_io_assure_flushed(h2_c1_io *io)
{
- apr_status_t status;
- status = pass_output(io, 1);
- check_write_size(io);
- return status;
+ apr_status_t rv = APR_SUCCESS;
+
+ if (h2_c1_io_pending(io) || io->unflushed) {
+ rv = pass_output(io, 1);
+ if (APR_SUCCESS != rv) goto cleanup;
+ }
+cleanup:
+ return rv;
}
-apr_status_t h2_conn_io_write(h2_conn_io *io, const char *data, size_t length)
+apr_status_t h2_c1_io_add_data(h2_c1_io *io, const char *data, size_t length)
{
apr_status_t status = APR_SUCCESS;
apr_size_t remain;
- if (length > 0) {
- io->is_flushed = 0;
- }
-
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, io->c,
+ "h2_c1_io(%ld): adding %ld data bytes",
+ io->c->id, (long)length);
if (io->buffer_output) {
while (length > 0) {
remain = assure_scratch_space(io);
@@ -341,24 +367,20 @@ apr_status_t h2_conn_io_write(h2_conn_io *io, const char *data, size_t length)
}
else {
status = apr_brigade_write(io->output, NULL, NULL, data, length);
+ io->buffered_len += length;
}
return status;
}
-apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb)
+apr_status_t h2_c1_io_append(h2_c1_io *io, apr_bucket_brigade *bb)
{
apr_bucket *b;
- apr_status_t status = APR_SUCCESS;
-
- if (!APR_BRIGADE_EMPTY(bb)) {
- io->is_flushed = 0;
- }
+ apr_status_t rv = APR_SUCCESS;
- while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) {
+ while (!APR_BRIGADE_EMPTY(bb)) {
b = APR_BRIGADE_FIRST(bb);
-
- if (APR_BUCKET_IS_METADATA(b)) {
- /* need to finish any open scratch bucket, as meta data
+ if (APR_BUCKET_IS_METADATA(b) || APR_BUCKET_IS_MMAP(b)) {
+ /* need to finish any open scratch bucket, as meta data
* needs to be forward "in order". */
append_scratch(io);
APR_BUCKET_REMOVE(b);
@@ -372,25 +394,143 @@ apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb)
/* complete write_size bucket, append unchanged */
APR_BUCKET_REMOVE(b);
APR_BRIGADE_INSERT_TAIL(io->output, b);
+ io->buffered_len += b->length;
continue;
}
}
else {
/* bucket fits in remain, copy to scratch */
- status = read_to_scratch(io, b);
+ rv = read_to_scratch(io, b);
apr_bucket_delete(b);
+ if (APR_SUCCESS != rv) goto cleanup;
continue;
}
}
else {
/* no buffering, forward buckets setaside on flush */
- if (APR_BUCKET_IS_TRANSIENT(b)) {
- apr_bucket_setaside(b, io->c->pool);
- }
+ apr_bucket_setaside(b, io->c->pool);
APR_BUCKET_REMOVE(b);
APR_BRIGADE_INSERT_TAIL(io->output, b);
+ io->buffered_len += b->length;
}
}
- return status;
+cleanup:
+ return rv;
+}
+
+static apr_status_t c1_in_feed_bucket(h2_session *session,
+ apr_bucket *b, apr_ssize_t *inout_len)
+{
+ apr_status_t rv = APR_SUCCESS;
+ apr_size_t len;
+ const char *data;
+ ssize_t n;
+
+ rv = apr_bucket_read(b, &data, &len, APR_BLOCK_READ);
+ while (APR_SUCCESS == rv && len > 0) {
+ n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)data, len);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE4, 0, session->c1,
+ H2_SSSN_MSG(session, "fed %ld bytes to nghttp2, %ld read"),
+ (long)len, (long)n);
+ if (n < 0) {
+ if (nghttp2_is_fatal((int)n)) {
+ h2_session_event(session, H2_SESSION_EV_PROTO_ERROR,
+ (int)n, nghttp2_strerror((int)n));
+ rv = APR_EGENERAL;
+ }
+ }
+ else {
+ *inout_len += n;
+ if ((apr_ssize_t)len <= n) {
+ break;
+ }
+ len -= (apr_size_t)n;
+ data += n;
+ }
+ }
+
+ return rv;
+}
+
+static apr_status_t c1_in_feed_brigade(h2_session *session,
+ apr_bucket_brigade *bb,
+ apr_ssize_t *inout_len)
+{
+ apr_status_t rv = APR_SUCCESS;
+ apr_bucket* b;
+
+ *inout_len = 0;
+ while (!APR_BRIGADE_EMPTY(bb)) {
+ b = APR_BRIGADE_FIRST(bb);
+ if (!APR_BUCKET_IS_METADATA(b)) {
+ rv = c1_in_feed_bucket(session, b, inout_len);
+ if (APR_SUCCESS != rv) goto cleanup;
+ }
+ apr_bucket_delete(b);
+ }
+cleanup:
+ apr_brigade_cleanup(bb);
+ return rv;
+}
+
+static apr_status_t read_and_feed(h2_session *session)
+{
+ apr_ssize_t bytes_fed, bytes_requested;
+ apr_status_t rv;
+
+ bytes_requested = H2MAX(APR_BUCKET_BUFF_SIZE, session->max_stream_mem * 4);
+ rv = ap_get_brigade(session->c1->input_filters,
+ session->bbtmp, AP_MODE_READBYTES,
+ APR_NONBLOCK_READ, bytes_requested);
+
+ if (APR_SUCCESS == rv) {
+ h2_util_bb_log(session->c1, session->id, APLOG_TRACE2, "c1 in", session->bbtmp);
+ rv = c1_in_feed_brigade(session, session->bbtmp, &bytes_fed);
+ session->io.bytes_read += bytes_fed;
+ }
+ return rv;
}
+apr_status_t h2_c1_read(h2_session *session)
+{
+ apr_status_t rv;
+
+ /* H2_IN filter handles all incoming data against the session.
+ * We just pull at the filter chain to make it happen */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_SSSN_MSG(session, "session_read start"));
+ rv = read_and_feed(session);
+
+ if (APR_SUCCESS == rv) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_INPUT_PENDING, 0, NULL);
+ }
+ else if (APR_STATUS_IS_EAGAIN(rv)) {
+ /* Signal that we have exhausted the input momentarily.
+ * This might switch to polling the socket */
+ h2_session_dispatch_event(session, H2_SESSION_EV_INPUT_EXHAUSTED, 0, NULL);
+ }
+ else if (APR_SUCCESS != rv) {
+ if (APR_STATUS_IS_ETIMEDOUT(rv)
+ || APR_STATUS_IS_ECONNABORTED(rv)
+ || APR_STATUS_IS_ECONNRESET(rv)
+ || APR_STATUS_IS_EOF(rv)
+ || APR_STATUS_IS_EBADF(rv)) {
+ /* common status for a client that has left */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, session->c1,
+ H2_SSSN_MSG(session, "input gone"));
+ }
+ else {
+ /* uncommon status, log on INFO so that we see this */
+ ap_log_cerror( APLOG_MARK, APLOG_DEBUG, rv, session->c1,
+ H2_SSSN_LOG(APLOGNO(02950), session,
+ "error reading, terminating"));
+ }
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+
+ apr_brigade_cleanup(session->bbtmp);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, session->c1,
+ H2_SSSN_MSG(session, "session_read done"));
+ return rv;
+}
diff --git a/modules/http2/h2_conn_io.h b/modules/http2/h2_c1_io.h
index e96203cac2..95088eac7e 100644
--- a/modules/http2/h2_conn_io.h
+++ b/modules/http2/h2_c1_io.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __mod_h2__h2_conn_io__
-#define __mod_h2__h2_conn_io__
+#ifndef __mod_h2__h2_c1_io__
+#define __mod_h2__h2_c1_io__
struct h2_config;
struct h2_session;
@@ -31,6 +31,7 @@ typedef struct {
apr_bucket_brigade *output;
int is_tls;
+ int unflushed;
apr_time_t cooldown_usecs;
apr_int64_t warmup_size;
@@ -40,37 +41,60 @@ typedef struct {
apr_int64_t bytes_written;
int buffer_output;
- apr_size_t flush_threshold;
+ apr_off_t buffered_len;
+ apr_off_t flush_threshold;
unsigned int is_flushed : 1;
char *scratch;
apr_size_t ssize;
apr_size_t slen;
-} h2_conn_io;
+} h2_c1_io;
-apr_status_t h2_conn_io_init(h2_conn_io *io, conn_rec *c, server_rec *s);
+apr_status_t h2_c1_io_init(h2_c1_io *io, conn_rec *c, server_rec *s);
/**
* Append data to the buffered output.
* @param buf the data to append
* @param length the length of the data to append
*/
-apr_status_t h2_conn_io_write(h2_conn_io *io,
+apr_status_t h2_c1_io_add_data(h2_c1_io *io,
const char *buf,
size_t length);
-apr_status_t h2_conn_io_pass(h2_conn_io *io, apr_bucket_brigade *bb);
+apr_status_t h2_c1_io_add(h2_c1_io *io, apr_bucket *b);
+
+apr_status_t h2_c1_io_append(h2_c1_io *io, apr_bucket_brigade *bb);
/**
* Pass any buffered data on to the connection output filters.
* @param io the connection io
- * @param flush if a flush bucket should be appended to any output
*/
-apr_status_t h2_conn_io_flush(h2_conn_io *io);
+apr_status_t h2_c1_io_pass(h2_c1_io *io);
+
+/**
+ * if there is any data pendiong or was any data send
+ * since the last FLUSH, send out a FLUSH now.
+ */
+apr_status_t h2_c1_io_assure_flushed(h2_c1_io *io);
/**
* Check if the buffered amount of data needs flushing.
*/
-int h2_conn_io_needs_flush(h2_conn_io *io);
+int h2_c1_io_needs_flush(h2_c1_io *io);
+
+/**
+ * Check if we have output pending.
+ */
+int h2_c1_io_pending(h2_c1_io *io);
+
+struct h2_session;
+
+/**
+ * Read c1 input and pass it on to nghttp2.
+ * @param session the session
+ * @param when_pending != 0 if only pending input (sitting in filters)
+ * needs to be read
+ */
+apr_status_t h2_c1_read(struct h2_session *session);
-#endif /* defined(__mod_h2__h2_conn_io__) */
+#endif /* defined(__mod_h2__h2_c1_io__) */
diff --git a/modules/http2/h2_c2.c b/modules/http2/h2_c2.c
new file mode 100644
index 0000000000..8c195344a3
--- /dev/null
+++ b/modules/http2/h2_c2.c
@@ -0,0 +1,729 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <stddef.h>
+
+#include <apr_atomic.h>
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_connection.h>
+#include <http_protocol.h>
+#include <http_request.h>
+#include <http_log.h>
+#include <http_vhost.h>
+#include <util_filter.h>
+#include <ap_mmn.h>
+#include <ap_mpm.h>
+#include <mpm_common.h>
+#include <mod_core.h>
+#include <scoreboard.h>
+
+#include "h2_private.h"
+#include "h2.h"
+#include "h2_bucket_beam.h"
+#include "h2_c1.h"
+#include "h2_config.h"
+#include "h2_conn_ctx.h"
+#include "h2_c2_filter.h"
+#include "h2_protocol.h"
+#include "h2_mplx.h"
+#include "h2_request.h"
+#include "h2_headers.h"
+#include "h2_session.h"
+#include "h2_stream.h"
+#include "h2_c2.h"
+#include "h2_util.h"
+
+
+static h2_mpm_type_t mpm_type = H2_MPM_UNKNOWN;
+static module *mpm_module;
+static int mpm_supported = 1;
+static apr_socket_t *dummy_socket;
+
+static void check_modules(int force)
+{
+ static int checked = 0;
+ int i;
+
+ if (force || !checked) {
+ for (i = 0; ap_loaded_modules[i]; ++i) {
+ module *m = ap_loaded_modules[i];
+
+ if (!strcmp("event.c", m->name)) {
+ mpm_type = H2_MPM_EVENT;
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("motorz.c", m->name)) {
+ mpm_type = H2_MPM_MOTORZ;
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("mpm_netware.c", m->name)) {
+ mpm_type = H2_MPM_NETWARE;
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("prefork.c", m->name)) {
+ mpm_type = H2_MPM_PREFORK;
+ mpm_module = m;
+ /* While http2 can work really well on prefork, it collides
+ * today's use case for prefork: running single-thread app engines
+ * like php. If we restrict h2_workers to 1 per process, php will
+ * work fine, but browser will be limited to 1 active request at a
+ * time. */
+ mpm_supported = 0;
+ break;
+ }
+ else if (!strcmp("simple_api.c", m->name)) {
+ mpm_type = H2_MPM_SIMPLE;
+ mpm_module = m;
+ mpm_supported = 0;
+ break;
+ }
+ else if (!strcmp("mpm_winnt.c", m->name)) {
+ mpm_type = H2_MPM_WINNT;
+ mpm_module = m;
+ break;
+ }
+ else if (!strcmp("worker.c", m->name)) {
+ mpm_type = H2_MPM_WORKER;
+ mpm_module = m;
+ break;
+ }
+ }
+ checked = 1;
+ }
+}
+
+h2_mpm_type_t h2_conn_mpm_type(void)
+{
+ check_modules(0);
+ return mpm_type;
+}
+
+const char *h2_conn_mpm_name(void)
+{
+ check_modules(0);
+ return mpm_module? mpm_module->name : "unknown";
+}
+
+int h2_mpm_supported(void)
+{
+ check_modules(0);
+ return mpm_supported;
+}
+
+static module *h2_conn_mpm_module(void)
+{
+ check_modules(0);
+ return mpm_module;
+}
+
+apr_status_t h2_c2_child_init(apr_pool_t *pool, server_rec *s)
+{
+ check_modules(1);
+ return apr_socket_create(&dummy_socket, APR_INET, SOCK_STREAM,
+ APR_PROTO_TCP, pool);
+}
+
+/* APR callback invoked if allocation fails. */
+static int abort_on_oom(int retcode)
+{
+ ap_abort_on_oom();
+ return retcode; /* unreachable, hopefully. */
+}
+
+conn_rec *h2_c2_create(conn_rec *c1, apr_pool_t *parent)
+{
+ apr_allocator_t *allocator;
+ apr_status_t status;
+ apr_pool_t *pool;
+ conn_rec *c2;
+ void *cfg;
+ module *mpm;
+
+ ap_assert(c1);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c1,
+ "h2_c2: create for c1(%ld)", c1->id);
+
+ /* We create a pool with its own allocator to be used for
+ * processing a request. This is the only way to have the processing
+ * independent of its parent pool in the sense that it can work in
+ * another thread.
+ */
+ apr_allocator_create(&allocator);
+ apr_allocator_max_free_set(allocator, ap_max_mem_free);
+ status = apr_pool_create_ex(&pool, parent, NULL, allocator);
+ if (status != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, c1,
+ APLOGNO(10004) "h2_c2: create pool");
+ return NULL;
+ }
+ apr_allocator_owner_set(allocator, pool);
+ apr_pool_abort_set(abort_on_oom, pool);
+ apr_pool_tag(pool, "h2_c2_conn");
+
+ c2 = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
+ memcpy(c2, c1, sizeof(conn_rec));
+
+ c2->master = c1;
+ c2->pool = pool;
+ c2->conn_config = ap_create_conn_config(pool);
+ c2->notes = apr_table_make(pool, 5);
+ c2->input_filters = NULL;
+ c2->output_filters = NULL;
+ c2->keepalives = 0;
+#if AP_MODULE_MAGIC_AT_LEAST(20180903, 1)
+ c2->filter_conn_ctx = NULL;
+#endif
+ c2->bucket_alloc = apr_bucket_alloc_create(pool);
+#if !AP_MODULE_MAGIC_AT_LEAST(20180720, 1)
+ c2->data_in_input_filters = 0;
+ c2->data_in_output_filters = 0;
+#endif
+ /* prevent mpm_event from making wrong assumptions about this connection,
+ * like e.g. using its socket for an async read check. */
+ c2->clogging_input_filters = 1;
+ c2->log = NULL;
+ c2->aborted = 0;
+ /* We cannot install the master connection socket on the secondary, as
+ * modules mess with timeouts/blocking of the socket, with
+ * unwanted side effects to the master connection processing.
+ * Fortunately, since we never use the secondary socket, we can just install
+ * a single, process-wide dummy and everyone is happy.
+ */
+ ap_set_module_config(c2->conn_config, &core_module, dummy_socket);
+ /* TODO: these should be unique to this thread */
+ c2->sbh = NULL; /*c1->sbh;*/
+ /* TODO: not all mpm modules have learned about secondary connections yet.
+ * copy their config from master to secondary.
+ */
+ if ((mpm = h2_conn_mpm_module()) != NULL) {
+ cfg = ap_get_module_config(c1->conn_config, mpm);
+ ap_set_module_config(c2->conn_config, mpm, cfg);
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c2,
+ "h2_c2(%s): created", c2->log_id);
+ return c2;
+}
+
+void h2_c2_destroy(conn_rec *c2)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c2,
+ "h2_c2(%s): destroy", c2->log_id);
+ apr_pool_destroy(c2->pool);
+}
+
+typedef struct {
+ apr_bucket_brigade *bb; /* c2: data in holding area */
+} h2_c2_fctx_in_t;
+
+static apr_status_t h2_c2_filter_in(ap_filter_t* f,
+ apr_bucket_brigade* bb,
+ ap_input_mode_t mode,
+ apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ h2_conn_ctx_t *conn_ctx;
+ h2_c2_fctx_in_t *fctx = f->ctx;
+ apr_status_t status = APR_SUCCESS;
+ apr_bucket *b, *next;
+ apr_off_t bblen;
+ const int trace1 = APLOGctrace1(f->c);
+ apr_size_t rmax = ((readbytes <= APR_SIZE_MAX)?
+ (apr_size_t)readbytes : APR_SIZE_MAX);
+
+ conn_ctx = h2_conn_ctx_get(f->c);
+ ap_assert(conn_ctx);
+
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_c2_in(%s-%d): read, mode=%d, block=%d, readbytes=%ld",
+ conn_ctx->id, conn_ctx->stream_id, mode, block, (long)readbytes);
+ }
+
+ if (mode == AP_MODE_INIT) {
+ return ap_get_brigade(f->c->input_filters, bb, mode, block, readbytes);
+ }
+
+ if (f->c->aborted) {
+ return APR_ECONNABORTED;
+ }
+
+ if (!conn_ctx->beam_in) {
+ return APR_EOF;
+ }
+
+ if (!fctx) {
+ fctx = apr_pcalloc(f->c->pool, sizeof(*fctx));
+ f->ctx = fctx;
+ fctx->bb = apr_brigade_create(f->c->pool, f->c->bucket_alloc);
+ }
+
+ /* Cleanup brigades from those nasty 0 length non-meta buckets
+ * that apr_brigade_split_line() sometimes produces. */
+ for (b = APR_BRIGADE_FIRST(fctx->bb);
+ b != APR_BRIGADE_SENTINEL(fctx->bb); b = next) {
+ next = APR_BUCKET_NEXT(b);
+ if (b->length == 0 && !APR_BUCKET_IS_METADATA(b)) {
+ apr_bucket_delete(b);
+ }
+ }
+
+ while (APR_BRIGADE_EMPTY(fctx->bb)) {
+ /* Get more input data for our request. */
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_c2_in(%s-%d): get more data from mplx, block=%d, "
+ "readbytes=%ld",
+ conn_ctx->id, conn_ctx->stream_id, block, (long)readbytes);
+ }
+ if (conn_ctx->beam_in) {
+ if (conn_ctx->pipe_in_prod[H2_PIPE_OUT]) {
+receive:
+ status = h2_beam_receive(conn_ctx->beam_in, f->c, fctx->bb, APR_NONBLOCK_READ,
+ conn_ctx->mplx->stream_max_mem);
+ if (APR_STATUS_IS_EAGAIN(status) && APR_BLOCK_READ == block) {
+ status = h2_util_wait_on_pipe(conn_ctx->pipe_in_prod[H2_PIPE_OUT]);
+ if (APR_SUCCESS == status) {
+ goto receive;
+ }
+ }
+ }
+ else {
+ status = h2_beam_receive(conn_ctx->beam_in, f->c, fctx->bb, block,
+ conn_ctx->mplx->stream_max_mem);
+ }
+ }
+ else {
+ status = APR_EOF;
+ }
+
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
+ "h2_c2_in(%s-%d): read returned",
+ conn_ctx->id, conn_ctx->stream_id);
+ }
+ if (APR_STATUS_IS_EAGAIN(status)
+ && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) {
+ /* chunked input handling does not seem to like it if we
+ * return with APR_EAGAIN from a GETLINE read...
+ * upload 100k test on test-ser.example.org hangs */
+ status = APR_SUCCESS;
+ }
+ else if (APR_STATUS_IS_EOF(status)) {
+ break;
+ }
+ else if (status != APR_SUCCESS) {
+ conn_ctx->last_err = status;
+ return status;
+ }
+
+ if (trace1) {
+ h2_util_bb_log(f->c, conn_ctx->stream_id, APLOG_TRACE2,
+ "c2 input recv raw", fctx->bb);
+ }
+ if (h2_c2_logio_add_bytes_in) {
+ apr_brigade_length(bb, 0, &bblen);
+ h2_c2_logio_add_bytes_in(f->c, bblen);
+ }
+ }
+
+ /* Nothing there, no more data to get. Return. */
+ if (status == APR_EOF && APR_BRIGADE_EMPTY(fctx->bb)) {
+ return status;
+ }
+
+ if (trace1) {
+ h2_util_bb_log(f->c, conn_ctx->stream_id, APLOG_TRACE2,
+ "c2 input.bb", fctx->bb);
+ }
+
+ if (APR_BRIGADE_EMPTY(fctx->bb)) {
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
+ "h2_c2_in(%s-%d): no data",
+ conn_ctx->id, conn_ctx->stream_id);
+ }
+ return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
+ }
+
+ if (mode == AP_MODE_EXHAUSTIVE) {
+ /* return all we have */
+ APR_BRIGADE_CONCAT(bb, fctx->bb);
+ }
+ else if (mode == AP_MODE_READBYTES) {
+ status = h2_brigade_concat_length(bb, fctx->bb, rmax);
+ }
+ else if (mode == AP_MODE_SPECULATIVE) {
+ status = h2_brigade_copy_length(bb, fctx->bb, rmax);
+ }
+ else if (mode == AP_MODE_GETLINE) {
+ /* we are reading a single LF line, e.g. the HTTP headers.
+ * this has the nasty side effect to split the bucket, even
+ * though it ends with CRLF and creates a 0 length bucket */
+ status = apr_brigade_split_line(bb, fctx->bb, block,
+ HUGE_STRING_LEN);
+ if (APLOGctrace1(f->c)) {
+ char buffer[1024];
+ apr_size_t len = sizeof(buffer)-1;
+ apr_brigade_flatten(bb, buffer, &len);
+ buffer[len] = 0;
+ if (trace1) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_c2_in(%s-%d): getline: %s",
+ conn_ctx->id, conn_ctx->stream_id, buffer);
+ }
+ }
+ }
+ else {
+ /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
+ * to support it. Seems to work. */
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
+ APLOGNO(03472)
+ "h2_c2_in(%s-%d), unsupported READ mode %d",
+ conn_ctx->id, conn_ctx->stream_id, mode);
+ status = APR_ENOTIMPL;
+ }
+
+ if (trace1) {
+ apr_brigade_length(bb, 0, &bblen);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
+ "h2_c2_in(%s-%d): %ld data bytes",
+ conn_ctx->id, conn_ctx->stream_id, (long)bblen);
+ }
+ return status;
+}
+
+static apr_status_t beam_out(conn_rec *c2, h2_conn_ctx_t *conn_ctx, apr_bucket_brigade* bb)
+{
+ apr_off_t written, left;
+ apr_status_t rv;
+
+ apr_brigade_length(bb, 0, &written);
+ rv = h2_beam_send(conn_ctx->beam_out, c2, bb, APR_BLOCK_READ);
+
+ if (APR_STATUS_IS_EAGAIN(rv)) {
+ apr_brigade_length(bb, 0, &left);
+ written -= left;
+ rv = APR_SUCCESS;
+ }
+ if (written && h2_c2_logio_add_bytes_out) {
+ h2_c2_logio_add_bytes_out(c2, written);
+ }
+ return rv;
+}
+
+static apr_status_t h2_c2_filter_out(ap_filter_t* f, apr_bucket_brigade* bb)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
+ apr_status_t rv;
+
+ ap_assert(conn_ctx);
+ rv = beam_out(f->c, conn_ctx, bb);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, f->c,
+ "h2_c2(%s-%d): output leave",
+ conn_ctx->id, conn_ctx->stream_id);
+ if (APR_SUCCESS != rv) {
+ if (!conn_ctx->done) {
+ h2_beam_abort(conn_ctx->beam_out, f->c);
+ }
+ f->c->aborted = 1;
+ }
+ return rv;
+}
+
+/* post config init */
+apr_status_t h2_c2_init(apr_pool_t *pool, server_rec *s)
+{
+ h2_c2_logio_add_bytes_in = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_in);
+ h2_c2_logio_add_bytes_out = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_out);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t c2_run_pre_connection(conn_rec *c2, apr_socket_t *csd)
+{
+ if (c2->keepalives == 0) {
+ /* Simulate that we had already a request on this connection. Some
+ * hooks trigger special behaviour when keepalives is 0.
+ * (Not necessarily in pre_connection, but later. Set it here, so it
+ * is in place.) */
+ c2->keepalives = 1;
+ /* We signal that this connection will be closed after the request.
+ * Which is true in that sense that we throw away all traffic data
+ * on this c2 connection after each requests. Although we might
+ * reuse internal structures like memory pools.
+ * The wanted effect of this is that httpd does not try to clean up
+ * any dangling data on this connection when a request is done. Which
+ * is unnecessary on a h2 stream.
+ */
+ c2->keepalive = AP_CONN_CLOSE;
+ return ap_run_pre_connection(c2, csd);
+ }
+ ap_assert(c2->output_filters);
+ return APR_SUCCESS;
+}
+
+apr_status_t h2_c2_process(conn_rec *c2, apr_thread_t *thread, int worker_id)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c2);
+
+ ap_assert(conn_ctx);
+ ap_assert(conn_ctx->mplx);
+
+ /* See the discussion at <https://github.com/icing/mod_h2/issues/195>
+ *
+ * Each conn_rec->id is supposed to be unique at a point in time. Since
+ * some modules (and maybe external code) uses this id as an identifier
+ * for the request_rec they handle, it needs to be unique for secondary
+ * connections also.
+ *
+ * The MPM module assigns the connection ids and mod_unique_id is using
+ * that one to generate identifier for requests. While the implementation
+ * works for HTTP/1.x, the parallel execution of several requests per
+ * connection will generate duplicate identifiers on load.
+ *
+ * The original implementation for secondary connection identifiers used
+ * to shift the master connection id up and assign the stream id to the
+ * lower bits. This was cramped on 32 bit systems, but on 64bit there was
+ * enough space.
+ *
+ * As issue 195 showed, mod_unique_id only uses the lower 32 bit of the
+ * connection id, even on 64bit systems. Therefore collisions in request ids.
+ *
+ * The way master connection ids are generated, there is some space "at the
+ * top" of the lower 32 bits on allmost all systems. If you have a setup
+ * with 64k threads per child and 255 child processes, you live on the edge.
+ *
+ * The new implementation shifts 8 bits and XORs in the worker
+ * id. This will experience collisions with > 256 h2 workers and heavy
+ * load still. There seems to be no way to solve this in all possible
+ * configurations by mod_h2 alone.
+ */
+ c2->id = (c2->master->id << 8)^worker_id;
+
+ if (!conn_ctx->pre_conn_done) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2,
+ "h2_c2(%s-%d), adding filters",
+ conn_ctx->id, conn_ctx->stream_id);
+ ap_add_input_filter("H2_C2_NET_IN", NULL, NULL, c2);
+ ap_add_output_filter("H2_C2_NET_CATCH_H1", NULL, NULL, c2);
+ ap_add_output_filter("H2_C2_NET_OUT", NULL, NULL, c2);
+
+ c2_run_pre_connection(c2, ap_get_conn_socket(c2));
+ conn_ctx->pre_conn_done = 1;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2,
+ "h2_c2(%s-%d): process connection",
+ conn_ctx->id, conn_ctx->stream_id);
+
+ c2->current_thread = thread;
+ ap_run_process_connection(c2);
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2,
+ "h2_c2(%s-%d): processing done",
+ conn_ctx->id, conn_ctx->stream_id);
+
+ return APR_SUCCESS;
+}
+
+static apr_status_t c2_process(h2_conn_ctx_t *conn_ctx, conn_rec *c)
+{
+ const h2_request *req = conn_ctx->request;
+ conn_state_t *cs = c->cs;
+ request_rec *r;
+
+ r = h2_create_request_rec(conn_ctx->request, c);
+ if (!r) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_c2(%s-%d): create request_rec failed, r=NULL",
+ conn_ctx->id, conn_ctx->stream_id);
+ goto cleanup;
+ }
+ if (r->status != HTTP_OK) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_c2(%s-%d): create request_rec failed, r->status=%d",
+ conn_ctx->id, conn_ctx->stream_id, r->status);
+ goto cleanup;
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_c2(%s-%d): created request_rec",
+ conn_ctx->id, conn_ctx->stream_id);
+ conn_ctx->server = r->server;
+
+ /* the request_rec->server carries the timeout value that applies */
+ h2_conn_ctx_set_timeout(conn_ctx, r->server->timeout);
+
+ if (h2_config_sgeti(conn_ctx->server, H2_CONF_COPY_FILES)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_mplx(%s-%d): copy_files in output",
+ conn_ctx->id, conn_ctx->stream_id);
+ h2_beam_set_copy_files(conn_ctx->beam_out, 1);
+ }
+
+ ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
+ if (cs) {
+ cs->state = CONN_STATE_HANDLER;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_c2(%s-%d): start process_request",
+ conn_ctx->id, conn_ctx->stream_id);
+
+ /* Add the raw bytes of the request (e.g. header frame lengths to
+ * the logio for this request. */
+ if (req->raw_bytes && h2_c2_logio_add_bytes_in) {
+ h2_c2_logio_add_bytes_in(c, req->raw_bytes);
+ }
+
+ ap_process_request(r);
+ /* After the call to ap_process_request, the
+ * request pool may have been deleted. */
+ r = NULL;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_c2(%s-%d): process_request done",
+ conn_ctx->id, conn_ctx->stream_id);
+ if (cs)
+ cs->state = CONN_STATE_WRITE_COMPLETION;
+
+cleanup:
+ return APR_SUCCESS;
+}
+
+static int h2_c2_hook_process(conn_rec* c)
+{
+ h2_conn_ctx_t *ctx;
+
+ if (!c->master) {
+ return DECLINED;
+ }
+
+ ctx = h2_conn_ctx_get(c);
+ if (ctx->stream_id) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "h2_h2, processing request directly");
+ c2_process(ctx, c);
+ return DONE;
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
+ "secondary_conn(%ld): no h2 stream assing?", c->id);
+ }
+ return DECLINED;
+}
+
+static void check_push(request_rec *r, const char *tag)
+{
+ apr_array_header_t *push_list = h2_config_push_list(r);
+
+ if (!r->expecting_100 && push_list && push_list->nelts > 0) {
+ int i, old_status;
+ const char *old_line;
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "%s, early announcing %d resources for push",
+ tag, push_list->nelts);
+ for (i = 0; i < push_list->nelts; ++i) {
+ h2_push_res *push = &APR_ARRAY_IDX(push_list, i, h2_push_res);
+ apr_table_add(r->headers_out, "Link",
+ apr_psprintf(r->pool, "<%s>; rel=preload%s",
+ push->uri_ref, push->critical? "; critical" : ""));
+ }
+ old_status = r->status;
+ old_line = r->status_line;
+ r->status = 103;
+ r->status_line = "103 Early Hints";
+ ap_send_interim_response(r, 1);
+ r->status = old_status;
+ r->status_line = old_line;
+ }
+}
+
+static int h2_c2_hook_post_read_request(request_rec *r)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(r->connection);
+
+ if (conn_ctx && conn_ctx->stream_id) {
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
+ "h2_c2(%s-%d): adding request filters",
+ conn_ctx->id, conn_ctx->stream_id);
+
+ /* setup the correct filters to process the request for h2 */
+ ap_add_input_filter("H2_C2_REQUEST_IN", NULL, r, r->connection);
+
+ /* replace the core http filter that formats response headers
+ * in HTTP/1 with our own that collects status and headers */
+ ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
+
+ ap_add_output_filter("H2_C2_RESPONSE_OUT", NULL, r, r->connection);
+ ap_add_output_filter("H2_C2_TRAILERS_OUT", NULL, r, r->connection);
+ }
+ return DECLINED;
+}
+
+static int h2_c2_hook_fixups(request_rec *r)
+{
+ /* secondary connection? */
+ if (r->connection->master) {
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(r->connection);
+ if (conn_ctx) {
+ check_push(r, "late_fixup");
+ }
+ }
+ return DECLINED;
+}
+
+APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_c2_logio_add_bytes_in;
+APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_c2_logio_add_bytes_out;
+
+void h2_c2_register_hooks(void)
+{
+ /* When the connection processing actually starts, we might
+ * take over, if the connection is for a h2 stream.
+ */
+ ap_hook_process_connection(h2_c2_hook_process,
+ NULL, NULL, APR_HOOK_FIRST);
+ /* We need to manipulate the standard HTTP/1.1 protocol filters and
+ * install our own. This needs to be done very early. */
+ ap_hook_post_read_request(h2_c2_hook_post_read_request, NULL, NULL, APR_HOOK_REALLY_FIRST);
+ ap_hook_fixups(h2_c2_hook_fixups, NULL, NULL, APR_HOOK_LAST);
+
+ ap_register_input_filter("H2_C2_NET_IN", h2_c2_filter_in,
+ NULL, AP_FTYPE_NETWORK);
+ ap_register_output_filter("H2_C2_NET_OUT", h2_c2_filter_out,
+ NULL, AP_FTYPE_NETWORK);
+ ap_register_output_filter("H2_C2_NET_CATCH_H1", h2_c2_filter_catch_h1_out,
+ NULL, AP_FTYPE_NETWORK);
+
+ ap_register_input_filter("H2_C2_REQUEST_IN", h2_c2_filter_request_in,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_register_output_filter("H2_C2_RESPONSE_OUT", h2_c2_filter_response_out,
+ NULL, AP_FTYPE_PROTOCOL);
+ ap_register_output_filter("H2_C2_TRAILERS_OUT", h2_c2_filter_trailers_out,
+ NULL, AP_FTYPE_PROTOCOL);
+}
+
diff --git a/modules/http2/h2_c2.h b/modules/http2/h2_c2.h
new file mode 100644
index 0000000000..0dd78a5597
--- /dev/null
+++ b/modules/http2/h2_c2.h
@@ -0,0 +1,61 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_c2__
+#define __mod_h2__h2_c2__
+
+#include <http_core.h>
+
+typedef enum {
+ H2_MPM_UNKNOWN,
+ H2_MPM_WORKER,
+ H2_MPM_EVENT,
+ H2_MPM_PREFORK,
+ H2_MPM_MOTORZ,
+ H2_MPM_SIMPLE,
+ H2_MPM_NETWARE,
+ H2_MPM_WINNT,
+} h2_mpm_type_t;
+
+/* Returns the type of MPM module detected */
+h2_mpm_type_t h2_conn_mpm_type(void);
+const char *h2_conn_mpm_name(void);
+int h2_mpm_supported(void);
+
+/* Initialize this child process for h2 secondary connection work,
+ * to be called once during child init before multi processing
+ * starts.
+ */
+apr_status_t h2_c2_child_init(apr_pool_t *pool, server_rec *s);
+
+conn_rec *h2_c2_create(conn_rec *c1, apr_pool_t *parent);
+void h2_c2_destroy(conn_rec *c2);
+
+/**
+ * Process a secondary connection for a HTTP/2 stream request.
+ */
+apr_status_t h2_c2_process(conn_rec *c, apr_thread_t *thread, int worker_id);
+
+void h2_c2_register_hooks(void);
+/*
+ * One time, post config initialization.
+ */
+apr_status_t h2_c2_init(apr_pool_t *pool, server_rec *s);
+
+extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_c2_logio_add_bytes_in;
+extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_c2_logio_add_bytes_out;
+
+#endif /* defined(__mod_h2__h2_c2__) */
diff --git a/modules/http2/h2_from_h1.c b/modules/http2/h2_c2_filter.c
index 86e6328617..ed34db681d 100644
--- a/modules/http2/h2_from_h1.c
+++ b/modules/http2/h2_c2_filter.c
@@ -30,12 +30,28 @@
#include <util_time.h>
#include "h2_private.h"
+#include "h2_conn_ctx.h"
#include "h2_headers.h"
-#include "h2_from_h1.h"
-#include "h2_task.h"
+#include "h2_c2_filter.h"
+#include "h2_c2.h"
+#include "h2_mplx.h"
+#include "h2_request.h"
#include "h2_util.h"
+#define H2_FILTER_LOG(name, c, level, rv, msg, bb) \
+ do { \
+ if (APLOG_C_IS_LEVEL((c),(level))) { \
+ char buffer[4 * 1024]; \
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \
+ len = h2_util_bb_print(buffer, bmax, "", "", (bb)); \
+ ap_log_cerror(APLOG_MARK, (level), rv, (c), \
+ "FILTER[%s]: %s %s", \
+ (name), (msg), len? buffer : ""); \
+ } \
+ } while (0)
+
+
/* This routine is called by apr_table_do and merges all instances of
* the passed field values into a single array that will be further
* processed by some later routine. Originally intended to help split
@@ -114,7 +130,7 @@ static void fix_vary(request_rec *r)
}
}
-static h2_headers *create_response(h2_task *task, request_rec *r)
+static h2_headers *create_response(request_rec *r)
{
const char *clheader;
const char *ctype;
@@ -229,13 +245,14 @@ static h2_headers *create_response(h2_task *task, request_rec *r)
* keep the set-by-proxy server and date headers, otherwise
* generate a new server header / date header
*/
- if (r->proxyreq != PROXYREQ_RESPONSE
- || !apr_table_get(r->headers_out, "Date")) {
+ if (r->proxyreq != PROXYREQ_NONE
+ && !apr_table_get(r->headers_out, "Date")) {
char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
ap_recent_rfc822_date(date, r->request_time);
apr_table_setn(r->headers_out, "Date", date );
}
- if (r->proxyreq != PROXYREQ_RESPONSE) {
+ if (r->proxyreq != PROXYREQ_NONE
+ && !apr_table_get(r->headers_out, "Server")) {
const char *us = ap_get_server_banner();
if (us) {
apr_table_setn(r->headers_out, "Server", us);
@@ -251,14 +268,17 @@ typedef enum {
H2_RP_DONE
} h2_rp_state_t;
-typedef struct h2_response_parser {
+typedef struct h2_response_parser h2_response_parser;
+struct h2_response_parser {
+ const char *id;
h2_rp_state_t state;
- h2_task *task;
+ conn_rec *c;
+ apr_pool_t *pool;
int http_status;
apr_array_header_t *hlines;
apr_bucket_brigade *tmp;
apr_bucket_brigade *saveto;
-} h2_response_parser;
+};
static apr_status_t parse_header(h2_response_parser *parser, char *line) {
const char *hline;
@@ -274,11 +294,11 @@ static apr_status_t parse_header(h2_response_parser *parser, char *line) {
/* not well formed */
return APR_EINVAL;
}
- hline = apr_psprintf(parser->task->pool, "%s %s", *plast, line);
+ hline = apr_psprintf(parser->pool, "%s %s", *plast, line);
}
else {
/* new header line */
- hline = apr_pstrdup(parser->task->pool, line);
+ hline = apr_pstrdup(parser->pool, line);
}
APR_ARRAY_PUSH(parser->hlines, const char*) = hline;
return APR_SUCCESS;
@@ -287,11 +307,10 @@ static apr_status_t parse_header(h2_response_parser *parser, char *line) {
static apr_status_t get_line(h2_response_parser *parser, apr_bucket_brigade *bb,
char *line, apr_size_t len)
{
- h2_task *task = parser->task;
apr_status_t status;
if (!parser->tmp) {
- parser->tmp = apr_brigade_create(task->pool, task->c->bucket_alloc);
+ parser->tmp = apr_brigade_create(parser->pool, parser->c->bucket_alloc);
}
status = apr_brigade_split_line(parser->tmp, bb, APR_BLOCK_READ,
len);
@@ -309,9 +328,9 @@ static apr_status_t get_line(h2_response_parser *parser, apr_bucket_brigade *bb,
len -= 2;
line[len] = '\0';
apr_brigade_cleanup(parser->tmp);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
- "h2_task(%s): read response line: %s",
- task->id, line);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c,
+ "h2_c2(%s): read response line: %s",
+ parser->id, line);
}
else {
apr_off_t brigade_length;
@@ -330,18 +349,18 @@ static apr_status_t get_line(h2_response_parser *parser, apr_bucket_brigade *bb,
*/
status = apr_brigade_length(parser->tmp, 0, &brigade_length);
if ((status != APR_SUCCESS) || (brigade_length > len)) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, task->c, APLOGNO(10257)
- "h2_task(%s): read response, line too long",
- task->id);
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, parser->c, APLOGNO(10257)
+ "h2_c2(%s): read response, line too long",
+ parser->id);
return APR_ENOSPC;
}
/* this does not look like a complete line yet */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
- "h2_task(%s): read response, incomplete line: %s",
- task->id, line);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c,
+ "h2_c2(%s): read response, incomplete line: %s",
+ parser->id, line);
if (!parser->saveto) {
- parser->saveto = apr_brigade_create(task->pool,
- task->c->bucket_alloc);
+ parser->saveto = apr_brigade_create(parser->pool,
+ parser->c->bucket_alloc);
}
/*
* Be on the save side and save the parser->tmp brigade
@@ -365,19 +384,18 @@ static apr_status_t get_line(h2_response_parser *parser, apr_bucket_brigade *bb,
static apr_table_t *make_table(h2_response_parser *parser)
{
- h2_task *task = parser->task;
apr_array_header_t *hlines = parser->hlines;
if (hlines) {
- apr_table_t *headers = apr_table_make(task->pool, hlines->nelts);
+ apr_table_t *headers = apr_table_make(parser->pool, hlines->nelts);
int i;
for (i = 0; i < hlines->nelts; ++i) {
char *hline = ((char **)hlines->elts)[i];
char *sep = ap_strchr(hline, ':');
if (!sep) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, task->c,
- APLOGNO(02955) "h2_task(%s): invalid header[%d] '%s'",
- task->id, i, (char*)hline);
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, APR_EINVAL, parser->c,
+ APLOGNO(02955) "h2_c2(%s): invalid header[%d] '%s'",
+ parser->id, i, (char*)hline);
/* not valid format, abort */
return NULL;
}
@@ -393,23 +411,23 @@ static apr_table_t *make_table(h2_response_parser *parser)
return headers;
}
else {
- return apr_table_make(task->pool, 0);
+ return apr_table_make(parser->pool, 0);
}
}
-static apr_status_t pass_response(h2_task *task, ap_filter_t *f,
- h2_response_parser *parser)
+static apr_status_t pass_response(h2_conn_ctx_t *conn_ctx, ap_filter_t *f,
+ h2_response_parser *parser)
{
apr_bucket *b;
apr_status_t status;
h2_headers *response = h2_headers_create(parser->http_status,
make_table(parser),
- NULL, 0, task->pool);
+ NULL, 0, parser->pool);
apr_brigade_cleanup(parser->tmp);
- b = h2_bucket_headers_create(task->c->bucket_alloc, response);
+ b = h2_bucket_headers_create(parser->c->bucket_alloc, response);
APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
- b = apr_bucket_flush_create(task->c->bucket_alloc);
+ b = apr_bucket_flush_create(parser->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(parser->tmp, b);
status = ap_pass_brigade(f->next, parser->tmp);
apr_brigade_cleanup(parser->tmp);
@@ -419,18 +437,17 @@ static apr_status_t pass_response(h2_task *task, ap_filter_t *f,
apr_array_clear(parser->hlines);
if (response->status >= 200) {
- task->output.sent_response = 1;
+ conn_ctx->has_final_response = 1;
}
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
- APLOGNO(03197) "h2_task(%s): passed response %d",
- task->id, response->status);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c,
+ APLOGNO(03197) "h2_c2(%s): passed response %d",
+ parser->id, response->status);
return status;
}
-static apr_status_t parse_status(h2_task *task, char *line)
+static apr_status_t parse_status(h2_response_parser *parser, char *line)
{
- h2_response_parser *parser = task->output.rparser;
- int sindex = (apr_date_checkmask(line, "HTTP/#.# ###*")? 9 :
+ int sindex = (apr_date_checkmask(line, "HTTP/#.# ###*")? 9 :
(apr_date_checkmask(line, "HTTP/# ###*")? 7 : 0));
if (sindex > 0) {
int k = sindex + 3;
@@ -449,27 +466,19 @@ static apr_status_t parse_status(h2_task *task, char *line)
* to write something. Probably just the interim response we are
* waiting for. But if there is other data hanging around before
* that, this needs to fail. */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03467)
- "h2_task(%s): unable to parse status line: %s",
- task->id, line);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, parser->c, APLOGNO(03467)
+ "h2_c2(%s): unable to parse status line: %s",
+ parser->id, line);
return APR_EINVAL;
}
-apr_status_t h2_from_h1_parse_response(h2_task *task, ap_filter_t *f,
- apr_bucket_brigade *bb)
+static apr_status_t parse_response(h2_response_parser *parser,
+ h2_conn_ctx_t *conn_ctx,
+ ap_filter_t* f, apr_bucket_brigade *bb)
{
- h2_response_parser *parser = task->output.rparser;
char line[HUGE_STRING_LEN];
apr_status_t status = APR_SUCCESS;
- if (!parser) {
- parser = apr_pcalloc(task->pool, sizeof(*parser));
- parser->task = task;
- parser->state = H2_RP_STATUS_LINE;
- parser->hlines = apr_array_make(task->pool, 10, sizeof(char *));
- task->output.rparser = parser;
- }
-
while (!APR_BRIGADE_EMPTY(bb) && status == APR_SUCCESS) {
switch (parser->state) {
case H2_RP_STATUS_LINE:
@@ -484,17 +493,17 @@ apr_status_t h2_from_h1_parse_response(h2_task *task, ap_filter_t *f,
}
if (parser->state == H2_RP_STATUS_LINE) {
/* instead of parsing, just take it directly */
- status = parse_status(task, line);
+ status = parse_status(parser, line);
}
else if (line[0] == '\0') {
/* end of headers, pass response onward */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_task(%s): end of response", task->id);
- return pass_response(task, f, parser);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, parser->c,
+ "h2_c2(%s): end of response", parser->id);
+ return pass_response(conn_ctx, f, parser);
}
else {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_task(%s): response header %s", task->id, line);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, parser->c,
+ "h2_c2(%s): response header %s", parser->id, line);
status = parse_header(parser, line);
}
break;
@@ -506,18 +515,72 @@ apr_status_t h2_from_h1_parse_response(h2_task *task, ap_filter_t *f,
return status;
}
-apr_status_t h2_filter_headers_out(ap_filter_t *f, apr_bucket_brigade *bb)
+apr_status_t h2_c2_filter_catch_h1_out(ap_filter_t* f, apr_bucket_brigade* bb)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
+ h2_response_parser *parser = f->ctx;
+ apr_status_t rv;
+
+ ap_assert(conn_ctx);
+ H2_FILTER_LOG("c2_catch_h1_out", f->c, APLOG_TRACE2, 0, "check", bb);
+
+ if (!conn_ctx->has_final_response) {
+ if (!parser) {
+ parser = apr_pcalloc(f->c->pool, sizeof(*parser));
+ parser->id = apr_psprintf(f->c->pool, "%s-%d", conn_ctx->id, conn_ctx->stream_id);
+ parser->pool = f->c->pool;
+ parser->c = f->c;
+ parser->state = H2_RP_STATUS_LINE;
+ parser->hlines = apr_array_make(parser->pool, 10, sizeof(char *));
+ f->ctx = parser;
+ }
+
+ if (!APR_BRIGADE_EMPTY(bb)) {
+ apr_bucket *b = APR_BRIGADE_FIRST(bb);
+ if (AP_BUCKET_IS_EOR(b)) {
+ /* TODO: Yikes, this happens when errors are encountered on input
+ * before anything from the repsonse has been processed. The
+ * ap_die_r() call will do nothing in certain conditions.
+ */
+ int result = ap_map_http_request_error(conn_ctx->last_err,
+ HTTP_INTERNAL_SERVER_ERROR);
+ request_rec *r = h2_create_request_rec(conn_ctx->request, f->c);
+ ap_die((result >= 400)? result : HTTP_INTERNAL_SERVER_ERROR, r);
+ b = ap_bucket_eor_create(f->c->bucket_alloc, r);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ }
+ }
+ /* There are cases where we need to parse a serialized http/1.1 response.
+ * One example is a 100-continue answer via a mod_proxy setup. */
+ while (bb && !f->c->aborted && !conn_ctx->has_final_response) {
+ rv = parse_response(parser, conn_ctx, f, bb);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, f->c,
+ "h2_c2(%s): parsed response", parser->id);
+ if (APR_BRIGADE_EMPTY(bb) || APR_SUCCESS != rv) {
+ return rv;
+ }
+ }
+ }
+
+ return ap_pass_brigade(f->next, bb);
+}
+
+apr_status_t h2_c2_filter_response_out(ap_filter_t *f, apr_bucket_brigade *bb)
{
- h2_task *task = f->ctx;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
request_rec *r = f->r;
apr_bucket *b, *bresp, *body_bucket = NULL, *next;
ap_bucket_error *eb = NULL;
h2_headers *response = NULL;
int headers_passing = 0;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_task(%s): output_filter called", task->id);
-
- if (!task->output.sent_response && !f->c->aborted) {
+
+ H2_FILTER_LOG("c2_response_out", f->c, APLOG_TRACE1, 0, "called with", bb);
+
+ if (f->c->aborted || !conn_ctx || conn_ctx->has_final_response) {
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ if (!conn_ctx->has_final_response) {
/* check, if we need to send the response now. Until we actually
* see a DATA bucket or some EOS/EOR, we do not do so. */
for (b = APR_BRIGADE_FIRST(bb);
@@ -533,39 +596,40 @@ apr_status_t h2_filter_headers_out(ap_filter_t *f, apr_bucket_brigade *bb)
*/
ap_remove_output_filter(f);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, f->c,
- "h2_task(%s): eoc bucket passed", task->id);
+ "h2_c2(%s): eoc bucket passed", conn_ctx->id);
return ap_pass_brigade(f->next, bb);
}
else if (H2_BUCKET_IS_HEADERS(b)) {
headers_passing = 1;
}
- else if (!APR_BUCKET_IS_FLUSH(b)) {
+ else if (!APR_BUCKET_IS_FLUSH(b)) {
body_bucket = b;
break;
}
}
-
+
if (eb) {
int st = eb->status;
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03047)
- "h2_task(%s): err bucket status=%d", task->id, st);
+ "h2_c2(%s): err bucket status=%d",
+ conn_ctx->id, st);
/* throw everything away and replace it with the error response
* generated by ap_die() */
apr_brigade_cleanup(bb);
ap_die(st, r);
return AP_FILTER_ERROR;
}
-
+
if (body_bucket || !headers_passing) {
/* time to insert the response bucket before the body or if
* no h2_headers is passed, e.g. the response is empty */
- response = create_response(task, r);
+ response = create_response(r);
if (response == NULL) {
ap_log_cerror(APLOG_MARK, APLOG_NOTICE, 0, f->c, APLOGNO(03048)
- "h2_task(%s): unable to create response", task->id);
+ "h2_c2(%s): unable to create response", conn_ctx->id);
return APR_ENOMEM;
}
-
+
bresp = h2_bucket_headers_create(f->c->bucket_alloc, response);
if (body_bucket) {
APR_BUCKET_INSERT_BEFORE(body_bucket, bresp);
@@ -573,15 +637,15 @@ apr_status_t h2_filter_headers_out(ap_filter_t *f, apr_bucket_brigade *bb)
else {
APR_BRIGADE_INSERT_HEAD(bb, bresp);
}
- task->output.sent_response = 1;
+ conn_ctx->has_final_response = 1;
r->sent_bodyct = 1;
+ ap_remove_output_filter_byhandle(f->r->output_filters, "H2_C2_NET_CATCH_H1");
}
}
-
+
if (r->header_only || AP_STATUS_IS_HEADER_ONLY(r->status)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_task(%s): headers only, cleanup output brigade",
- task->id);
+ "h2_c2(%s): headers only, cleanup output brigade", conn_ctx->id);
b = body_bucket? body_bucket : APR_BRIGADE_FIRST(bb);
while (b != APR_BRIGADE_SENTINEL(bb)) {
next = APR_BUCKET_NEXT(b);
@@ -595,14 +659,24 @@ apr_status_t h2_filter_headers_out(ap_filter_t *f, apr_bucket_brigade *bb)
b = next;
}
}
- else if (task->output.sent_response) {
+ if (conn_ctx->has_final_response) {
/* lets get out of the way, our task is done */
ap_remove_output_filter(f);
}
return ap_pass_brigade(f->next, bb);
}
-static void make_chunk(h2_task *task, apr_bucket_brigade *bb,
+
+struct h2_chunk_filter_t {
+ const char *id;
+ int eos_chunk_added;
+ apr_bucket_brigade *bbchunk;
+ apr_off_t chunked_total;
+};
+typedef struct h2_chunk_filter_t h2_chunk_filter_t;
+
+
+static void make_chunk(conn_rec *c, h2_chunk_filter_t *fctx, apr_bucket_brigade *bb,
apr_bucket *first, apr_off_t chunk_len,
apr_bucket *tail)
{
@@ -610,24 +684,24 @@ static void make_chunk(h2_task *task, apr_bucket_brigade *bb,
* HTTP/1.1 chunked encoding format. If tail is NULL, the chunk extends
* to the end of the brigade. */
char buffer[128];
- apr_bucket *c;
+ apr_bucket *b;
apr_size_t len;
len = (apr_size_t)apr_snprintf(buffer, H2_ALEN(buffer),
"%"APR_UINT64_T_HEX_FMT"\r\n", (apr_uint64_t)chunk_len);
- c = apr_bucket_heap_create(buffer, len, NULL, bb->bucket_alloc);
- APR_BUCKET_INSERT_BEFORE(first, c);
- c = apr_bucket_heap_create("\r\n", 2, NULL, bb->bucket_alloc);
+ b = apr_bucket_heap_create(buffer, len, NULL, bb->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(first, b);
+ b = apr_bucket_immortal_create("\r\n", 2, bb->bucket_alloc);
if (tail) {
- APR_BUCKET_INSERT_BEFORE(tail, c);
+ APR_BUCKET_INSERT_BEFORE(tail, b);
}
else {
- APR_BRIGADE_INSERT_TAIL(bb, c);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
}
- task->input.chunked_total += chunk_len;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
- "h2_task(%s): added chunk %ld, total %ld",
- task->id, (long)chunk_len, (long)task->input.chunked_total);
+ fctx->chunked_total += chunk_len;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
+ "h2_c2(%s): added chunk %ld, total %ld",
+ fctx->id, (long)chunk_len, (long)fctx->chunked_total);
}
static int ser_header(void *ctx, const char *name, const char *value)
@@ -637,47 +711,36 @@ static int ser_header(void *ctx, const char *name, const char *value)
return 1;
}
-static apr_status_t read_and_chunk(ap_filter_t *f, h2_task *task,
+static apr_status_t read_and_chunk(ap_filter_t *f, h2_conn_ctx_t *conn_ctx,
apr_read_type_e block) {
+ h2_chunk_filter_t *fctx = f->ctx;
request_rec *r = f->r;
apr_status_t status = APR_SUCCESS;
- apr_bucket_brigade *bb = task->input.bbchunk;
-
- if (!bb) {
- bb = apr_brigade_create(r->pool, f->c->bucket_alloc);
- task->input.bbchunk = bb;
+
+ if (!fctx->bbchunk) {
+ fctx->bbchunk = apr_brigade_create(r->pool, f->c->bucket_alloc);
}
- if (APR_BRIGADE_EMPTY(bb)) {
+ if (APR_BRIGADE_EMPTY(fctx->bbchunk)) {
apr_bucket *b, *next, *first_data = NULL;
apr_bucket_brigade *tmp;
apr_off_t bblen = 0;
/* get more data from the lower layer filters. Always do this
* in larger pieces, since we handle the read modes ourself. */
- status = ap_get_brigade(f->next, bb,
- AP_MODE_READBYTES, block, 32*1024);
- if (status == APR_EOF) {
- if (!task->input.eos) {
- status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
- task->input.eos = 1;
- return APR_SUCCESS;
- }
- ap_remove_input_filter(f);
- return status;
-
- }
- else if (status != APR_SUCCESS) {
+ status = ap_get_brigade(f->next, fctx->bbchunk,
+ AP_MODE_READBYTES, block, conn_ctx->mplx->stream_max_mem);
+ if (status != APR_SUCCESS) {
return status;
}
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb) && !task->input.eos;
+ for (b = APR_BRIGADE_FIRST(fctx->bbchunk);
+ b != APR_BRIGADE_SENTINEL(fctx->bbchunk);
b = next) {
next = APR_BUCKET_NEXT(b);
if (APR_BUCKET_IS_METADATA(b)) {
if (first_data) {
- make_chunk(task, bb, first_data, bblen, b);
+ make_chunk(f->c, fctx, fctx->bbchunk, first_data, bblen, b);
first_data = NULL;
}
@@ -686,29 +749,35 @@ static apr_status_t read_and_chunk(ap_filter_t *f, h2_task *task,
ap_assert(headers);
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
- "h2_task(%s): receiving trailers", task->id);
- tmp = apr_brigade_split_ex(bb, b, NULL);
+ "h2_c2(%s-%d): receiving trailers",
+ conn_ctx->id, conn_ctx->stream_id);
+ tmp = apr_brigade_split_ex(fctx->bbchunk, b, NULL);
if (!apr_is_empty_table(headers->headers)) {
- status = apr_brigade_puts(bb, NULL, NULL, "0\r\n");
- apr_table_do(ser_header, bb, headers->headers, NULL);
- status = apr_brigade_puts(bb, NULL, NULL, "\r\n");
+ status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "0\r\n");
+ apr_table_do(ser_header, fctx->bbchunk, headers->headers, NULL);
+ status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "\r\n");
}
else {
- status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
+ status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "0\r\n\r\n");
}
r->trailers_in = apr_table_clone(r->pool, headers->headers);
APR_BUCKET_REMOVE(b);
apr_bucket_destroy(b);
- APR_BRIGADE_CONCAT(bb, tmp);
+ APR_BRIGADE_CONCAT(fctx->bbchunk, tmp);
apr_brigade_destroy(tmp);
- task->input.eos = 1;
+ fctx->eos_chunk_added = 1;
}
else if (APR_BUCKET_IS_EOS(b)) {
- tmp = apr_brigade_split_ex(bb, b, NULL);
- status = apr_brigade_puts(bb, NULL, NULL, "0\r\n\r\n");
- APR_BRIGADE_CONCAT(bb, tmp);
- apr_brigade_destroy(tmp);
- task->input.eos = 1;
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
+ "h2_c2(%s-%d): receiving eos",
+ conn_ctx->id, conn_ctx->stream_id);
+ if (!fctx->eos_chunk_added) {
+ tmp = apr_brigade_split_ex(fctx->bbchunk, b, NULL);
+ status = apr_brigade_puts(fctx->bbchunk, NULL, NULL, "0\r\n\r\n");
+ APR_BRIGADE_CONCAT(fctx->bbchunk, tmp);
+ apr_brigade_destroy(tmp);
+ }
+ fctx->eos_chunk_added = 0;
}
}
else if (b->length == 0) {
@@ -725,29 +794,38 @@ static apr_status_t read_and_chunk(ap_filter_t *f, h2_task *task,
}
if (first_data) {
- make_chunk(task, bb, first_data, bblen, NULL);
+ make_chunk(f->c, fctx, fctx->bbchunk, first_data, bblen, NULL);
}
}
return status;
}
-apr_status_t h2_filter_request_in(ap_filter_t* f,
+apr_status_t h2_c2_filter_request_in(ap_filter_t* f,
apr_bucket_brigade* bb,
ap_input_mode_t mode,
apr_read_type_e block,
apr_off_t readbytes)
{
- h2_task *task = f->ctx;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
+ h2_chunk_filter_t *fctx = f->ctx;
request_rec *r = f->r;
apr_status_t status = APR_SUCCESS;
apr_bucket *b, *next;
core_server_config *conf =
(core_server_config *) ap_get_module_config(r->server->module_config,
&core_module);
+ ap_assert(conn_ctx);
+
+ if (!fctx) {
+ fctx = apr_pcalloc(r->pool, sizeof(*fctx));
+ fctx->id = apr_psprintf(r->pool, "%s-%d", conn_ctx->id, conn_ctx->stream_id);
+ f->ctx = fctx;
+ }
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, f->r,
- "h2_task(%s): request filter, exp=%d", task->id, r->expecting_100);
- if (!task->request->chunked) {
+ "h2_c2(%s-%d): request input, exp=%d",
+ conn_ctx->id, conn_ctx->stream_id, r->expecting_100);
+ if (!conn_ctx->request->chunked) {
status = ap_get_brigade(f->next, bb, mode, block, readbytes);
/* pipe data through, just take care of trailers */
for (b = APR_BRIGADE_FIRST(bb);
@@ -757,7 +835,8 @@ apr_status_t h2_filter_request_in(ap_filter_t* f,
h2_headers *headers = h2_bucket_headers_get(b);
ap_assert(headers);
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
- "h2_task(%s): receiving trailers", task->id);
+ "h2_c2(%s-%d): receiving trailers",
+ conn_ctx->id, conn_ctx->stream_id);
r->trailers_in = headers->headers;
if (conf && conf->merge_trailers == AP_MERGE_TRAILERS_ENABLE) {
r->headers_in = apr_table_overlay(r->pool, r->headers_in,
@@ -767,8 +846,8 @@ apr_status_t h2_filter_request_in(ap_filter_t* f,
apr_bucket_destroy(b);
ap_remove_input_filter(f);
- if (headers->raw_bytes && h2_task_logio_add_bytes_in) {
- h2_task_logio_add_bytes_in(task->c, headers->raw_bytes);
+ if (headers->raw_bytes && h2_c2_logio_add_bytes_in) {
+ h2_c2_logio_add_bytes_in(f->c, headers->raw_bytes);
}
break;
}
@@ -781,34 +860,33 @@ apr_status_t h2_filter_request_in(ap_filter_t* f,
* transfer encoding and trailers.
* We need to simulate chunked encoding for it to be happy.
*/
- if ((status = read_and_chunk(f, task, block)) != APR_SUCCESS) {
+ if ((status = read_and_chunk(f, conn_ctx, block)) != APR_SUCCESS) {
return status;
}
if (mode == AP_MODE_EXHAUSTIVE) {
/* return all we have */
- APR_BRIGADE_CONCAT(bb, task->input.bbchunk);
+ APR_BRIGADE_CONCAT(bb, fctx->bbchunk);
}
else if (mode == AP_MODE_READBYTES) {
- status = h2_brigade_concat_length(bb, task->input.bbchunk, readbytes);
+ status = h2_brigade_concat_length(bb, fctx->bbchunk, readbytes);
}
else if (mode == AP_MODE_SPECULATIVE) {
- status = h2_brigade_copy_length(bb, task->input.bbchunk, readbytes);
+ status = h2_brigade_copy_length(bb, fctx->bbchunk, readbytes);
}
else if (mode == AP_MODE_GETLINE) {
/* we are reading a single LF line, e.g. the HTTP headers.
* this has the nasty side effect to split the bucket, even
* though it ends with CRLF and creates a 0 length bucket */
- status = apr_brigade_split_line(bb, task->input.bbchunk, block,
- HUGE_STRING_LEN);
+ status = apr_brigade_split_line(bb, fctx->bbchunk, block, HUGE_STRING_LEN);
if (APLOGctrace1(f->c)) {
char buffer[1024];
apr_size_t len = sizeof(buffer)-1;
apr_brigade_flatten(bb, buffer, &len);
buffer[len] = 0;
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_task(%s): getline: %s",
- task->id, buffer);
+ "h2_c2(%s-%d): getline: %s",
+ conn_ctx->id, conn_ctx->stream_id, buffer);
}
}
else {
@@ -816,21 +894,21 @@ apr_status_t h2_filter_request_in(ap_filter_t* f,
* to support it. Seems to work. */
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
APLOGNO(02942)
- "h2_task, unsupported READ mode %d", mode);
+ "h2_c2, unsupported READ mode %d", mode);
status = APR_ENOTIMPL;
}
- h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2, "forwarding input", bb);
+ h2_util_bb_log(f->c, conn_ctx->stream_id, APLOG_TRACE2, "returning input", bb);
return status;
}
-apr_status_t h2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb)
+apr_status_t h2_c2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb)
{
- h2_task *task = f->ctx;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(f->c);
request_rec *r = f->r;
apr_bucket *b, *e;
- if (task && r) {
+ if (conn_ctx && r) {
/* Detect the EOS/EOR bucket and forward any trailers that may have
* been set to our h2_headers.
*/
@@ -844,7 +922,8 @@ apr_status_t h2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb)
apr_table_t *trailers;
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, f->c, APLOGNO(03049)
- "h2_task(%s): sending trailers", task->id);
+ "h2_c2(%s-%d): sending trailers",
+ conn_ctx->id, conn_ctx->stream_id);
trailers = apr_table_clone(r->pool, r->trailers_out);
headers = h2_headers_rcreate(r, HTTP_OK, trailers, r->pool);
e = h2_bucket_headers_create(bb->bucket_alloc, headers);
diff --git a/modules/http2/h2_from_h1.h b/modules/http2/h2_c2_filter.h
index 68a24fd70e..4b00df71d5 100644
--- a/modules/http2/h2_from_h1.h
+++ b/modules/http2/h2_c2_filter.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef __mod_h2__h2_from_h1__
-#define __mod_h2__h2_from_h1__
+#ifndef __mod_h2__h2_c2_filter__
+#define __mod_h2__h2_c2_filter__
/**
* h2_from_h1 parses a HTTP/1.1 response into
@@ -32,19 +32,18 @@
* processing, so this seems to be the way for now.
*/
struct h2_headers;
-struct h2_task;
+struct h2_response_parser;
-apr_status_t h2_from_h1_parse_response(struct h2_task *task, ap_filter_t *f,
- apr_bucket_brigade *bb);
+apr_status_t h2_c2_filter_catch_h1_out(ap_filter_t* f, apr_bucket_brigade* bb);
-apr_status_t h2_filter_headers_out(ap_filter_t *f, apr_bucket_brigade *bb);
+apr_status_t h2_c2_filter_response_out(ap_filter_t *f, apr_bucket_brigade *bb);
-apr_status_t h2_filter_request_in(ap_filter_t* f,
+apr_status_t h2_c2_filter_request_in(ap_filter_t* f,
apr_bucket_brigade* brigade,
ap_input_mode_t mode,
apr_read_type_e block,
apr_off_t readbytes);
-apr_status_t h2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb);
+apr_status_t h2_c2_filter_trailers_out(ap_filter_t *f, apr_bucket_brigade *bb);
-#endif /* defined(__mod_h2__h2_from_h1__) */
+#endif /* defined(__mod_h2__h2_c2_filter__) */
diff --git a/modules/http2/h2_config.c b/modules/http2/h2_config.c
index 06368fd53b..4df058d95d 100644
--- a/modules/http2/h2_config.c
+++ b/modules/http2/h2_config.c
@@ -30,11 +30,10 @@
#include <apr_strings.h>
#include "h2.h"
-#include "h2_alt_svc.h"
-#include "h2_ctx.h"
-#include "h2_conn.h"
+#include "h2_conn_ctx.h"
+#include "h2_c1.h"
#include "h2_config.h"
-#include "h2_h2.h"
+#include "h2_protocol.h"
#include "h2_private.h"
#define DEF_VAL (-1)
@@ -54,41 +53,37 @@
/* Apache httpd module configuration for h2. */
typedef struct h2_config {
const char *name;
- int h2_max_streams; /* max concurrent # streams (http2) */
- int h2_window_size; /* stream window size (http2) */
- int min_workers; /* min # of worker threads/child */
- int max_workers; /* max # of worker threads/child */
- int max_worker_idle_secs; /* max # of idle seconds for worker */
- int stream_max_mem_size; /* max # bytes held in memory/stream */
- apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */
- int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/
- int serialize_headers; /* Use serialized HTTP/1.1 headers for
- processing, better compatibility */
- int h2_direct; /* if mod_h2 is active directly */
- int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */
- int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
- apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */
- int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */
- int h2_push; /* if HTTP/2 server push is enabled */
- struct apr_hash_t *priorities;/* map of content-type to h2_priority records */
+ int h2_max_streams; /* max concurrent # streams (http2) */
+ int h2_window_size; /* stream window size (http2) */
+ int min_workers; /* min # of worker threads/child */
+ int max_workers; /* max # of worker threads/child */
+ int max_worker_idle_secs; /* max # of idle seconds for worker */
+ int stream_max_mem_size; /* max # bytes held in memory/stream */
+ int h2_direct; /* if mod_h2 is active directly */
+ int modern_tls_only; /* Accept only modern TLS in HTTP/2 connections */
+ int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
+ apr_int64_t tls_warmup_size; /* Amount of TLS data to send before going full write size */
+ int tls_cooldown_secs; /* Seconds of idle time before going back to small TLS records */
+ int h2_push; /* if HTTP/2 server push is enabled */
+ struct apr_hash_t *priorities; /* map of content-type to h2_priority records */
- int push_diary_size; /* # of entries in push diary */
- int copy_files; /* if files shall be copied vs setaside on output */
- apr_array_header_t *push_list;/* list of h2_push_res configurations */
- int early_hints; /* support status code 103 */
+ int push_diary_size; /* # of entries in push diary */
+ int copy_files; /* if files shall be copied vs setaside on output */
+ apr_array_header_t *push_list; /* list of h2_push_res configurations */
+ int early_hints; /* support status code 103 */
int padding_bits;
int padding_always;
int output_buffered;
+ apr_interval_time_t stream_timeout;/* beam timeout */
} h2_config;
typedef struct h2_dir_config {
const char *name;
- apr_array_header_t *alt_svcs; /* h2_alt_svc specs for this server */
- int alt_svc_max_age; /* seconds clients can rely on alt-svc info*/
- int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
- int h2_push; /* if HTTP/2 server push is enabled */
- apr_array_header_t *push_list;/* list of h2_push_res configurations */
- int early_hints; /* support status code 103 */
+ int h2_upgrade; /* Allow HTTP/1 upgrade to h2/h2c */
+ int h2_push; /* if HTTP/2 server push is enabled */
+ apr_array_header_t *push_list; /* list of h2_push_res configurations */
+ int early_hints; /* support status code 103 */
+ apr_interval_time_t stream_timeout;/* beam timeout */
} h2_dir_config;
@@ -100,9 +95,6 @@ static h2_config defconf = {
-1, /* max workers */
10 * 60, /* max workers idle secs */
32 * 1024, /* stream max mem size */
- NULL, /* no alt-svcs */
- -1, /* alt-svc max age */
- 0, /* serialize headers */
-1, /* h2 direct mode */
1, /* modern TLS only */
-1, /* HTTP/1 Upgrade support */
@@ -116,17 +108,17 @@ static h2_config defconf = {
0, /* early hints, http status 103 */
0, /* padding bits */
1, /* padding always */
- 1, /* strean output buffered */
+ 1, /* stream output buffered */
+ -1, /* beam timeout */
};
static h2_dir_config defdconf = {
"default",
- NULL, /* no alt-svcs */
- -1, /* alt-svc max age */
-1, /* HTTP/1 Upgrade support */
-1, /* HTTP/2 server push enabled */
NULL, /* push list */
-1, /* early hints, http status 103 */
+ -1, /* beam timeout */
};
void h2_config_init(apr_pool_t *pool)
@@ -146,8 +138,6 @@ void *h2_config_create_svr(apr_pool_t *pool, server_rec *s)
conf->max_workers = DEF_VAL;
conf->max_worker_idle_secs = DEF_VAL;
conf->stream_max_mem_size = DEF_VAL;
- conf->alt_svc_max_age = DEF_VAL;
- conf->serialize_headers = DEF_VAL;
conf->h2_direct = DEF_VAL;
conf->modern_tls_only = DEF_VAL;
conf->h2_upgrade = DEF_VAL;
@@ -162,6 +152,7 @@ void *h2_config_create_svr(apr_pool_t *pool, server_rec *s)
conf->padding_bits = DEF_VAL;
conf->padding_always = DEF_VAL;
conf->output_buffered = DEF_VAL;
+ conf->stream_timeout = DEF_VAL;
return conf;
}
@@ -179,9 +170,6 @@ static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv)
n->max_workers = H2_CONFIG_GET(add, base, max_workers);
n->max_worker_idle_secs = H2_CONFIG_GET(add, base, max_worker_idle_secs);
n->stream_max_mem_size = H2_CONFIG_GET(add, base, stream_max_mem_size);
- n->alt_svcs = add->alt_svcs? add->alt_svcs : base->alt_svcs;
- n->alt_svc_max_age = H2_CONFIG_GET(add, base, alt_svc_max_age);
- n->serialize_headers = H2_CONFIG_GET(add, base, serialize_headers);
n->h2_direct = H2_CONFIG_GET(add, base, h2_direct);
n->modern_tls_only = H2_CONFIG_GET(add, base, modern_tls_only);
n->h2_upgrade = H2_CONFIG_GET(add, base, h2_upgrade);
@@ -206,6 +194,7 @@ static void *h2_config_merge(apr_pool_t *pool, void *basev, void *addv)
n->early_hints = H2_CONFIG_GET(add, base, early_hints);
n->padding_bits = H2_CONFIG_GET(add, base, padding_bits);
n->padding_always = H2_CONFIG_GET(add, base, padding_always);
+ n->stream_timeout = H2_CONFIG_GET(add, base, stream_timeout);
return n;
}
@@ -221,10 +210,10 @@ void *h2_config_create_dir(apr_pool_t *pool, char *x)
char *name = apr_pstrcat(pool, "dir[", s, "]", NULL);
conf->name = name;
- conf->alt_svc_max_age = DEF_VAL;
conf->h2_upgrade = DEF_VAL;
conf->h2_push = DEF_VAL;
conf->early_hints = DEF_VAL;
+ conf->stream_timeout = DEF_VAL;
return conf;
}
@@ -235,8 +224,6 @@ void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv)
h2_dir_config *n = (h2_dir_config *)apr_pcalloc(pool, sizeof(h2_dir_config));
n->name = apr_pstrcat(pool, "merged[", add->name, ", ", base->name, "]", NULL);
- n->alt_svcs = add->alt_svcs? add->alt_svcs : base->alt_svcs;
- n->alt_svc_max_age = H2_CONFIG_GET(add, base, alt_svc_max_age);
n->h2_upgrade = H2_CONFIG_GET(add, base, h2_upgrade);
n->h2_push = H2_CONFIG_GET(add, base, h2_push);
if (add->push_list && base->push_list) {
@@ -246,6 +233,7 @@ void *h2_config_merge_dir(apr_pool_t *pool, void *basev, void *addv)
n->push_list = add->push_list? add->push_list : base->push_list;
}
n->early_hints = H2_CONFIG_GET(add, base, early_hints);
+ n->stream_timeout = H2_CONFIG_GET(add, base, stream_timeout);
return n;
}
@@ -264,10 +252,6 @@ static apr_int64_t h2_srv_config_geti64(const h2_config *conf, h2_config_var_t v
return H2_CONFIG_GET(conf, &defconf, max_worker_idle_secs);
case H2_CONF_STREAM_MAX_MEM:
return H2_CONFIG_GET(conf, &defconf, stream_max_mem_size);
- case H2_CONF_ALT_SVC_MAX_AGE:
- return H2_CONFIG_GET(conf, &defconf, alt_svc_max_age);
- case H2_CONF_SER_HEADERS:
- return H2_CONFIG_GET(conf, &defconf, serialize_headers);
case H2_CONF_MODERN_TLS_ONLY:
return H2_CONFIG_GET(conf, &defconf, modern_tls_only);
case H2_CONF_UPGRADE:
@@ -292,6 +276,8 @@ static apr_int64_t h2_srv_config_geti64(const h2_config *conf, h2_config_var_t v
return H2_CONFIG_GET(conf, &defconf, padding_always);
case H2_CONF_OUTPUT_BUFFER:
return H2_CONFIG_GET(conf, &defconf, output_buffered);
+ case H2_CONF_STREAM_TIMEOUT:
+ return H2_CONFIG_GET(conf, &defconf, stream_timeout);
default:
return DEF_VAL;
}
@@ -318,12 +304,6 @@ static void h2_srv_config_seti(h2_config *conf, h2_config_var_t var, int val)
case H2_CONF_STREAM_MAX_MEM:
H2_CONFIG_SET(conf, stream_max_mem_size, val);
break;
- case H2_CONF_ALT_SVC_MAX_AGE:
- H2_CONFIG_SET(conf, alt_svc_max_age, val);
- break;
- case H2_CONF_SER_HEADERS:
- H2_CONFIG_SET(conf, serialize_headers, val);
- break;
case H2_CONF_MODERN_TLS_ONLY:
H2_CONFIG_SET(conf, modern_tls_only, val);
break;
@@ -371,6 +351,9 @@ static void h2_srv_config_seti64(h2_config *conf, h2_config_var_t var, apr_int64
case H2_CONF_TLS_WARMUP_SIZE:
H2_CONFIG_SET(conf, tls_warmup_size, val);
break;
+ case H2_CONF_STREAM_TIMEOUT:
+ H2_CONFIG_SET(conf, stream_timeout, val);
+ break;
default:
h2_srv_config_seti(conf, var, (int)val);
break;
@@ -396,14 +379,14 @@ static const h2_dir_config *h2_config_rget(request_rec *r)
static apr_int64_t h2_dir_config_geti64(const h2_dir_config *conf, h2_config_var_t var)
{
switch(var) {
- case H2_CONF_ALT_SVC_MAX_AGE:
- return H2_CONFIG_GET(conf, &defdconf, alt_svc_max_age);
case H2_CONF_UPGRADE:
return H2_CONFIG_GET(conf, &defdconf, h2_upgrade);
case H2_CONF_PUSH:
return H2_CONFIG_GET(conf, &defdconf, h2_push);
case H2_CONF_EARLY_HINTS:
return H2_CONFIG_GET(conf, &defdconf, early_hints);
+ case H2_CONF_STREAM_TIMEOUT:
+ return H2_CONFIG_GET(conf, &defdconf, stream_timeout);
default:
return DEF_VAL;
@@ -415,9 +398,6 @@ static void h2_config_seti(h2_dir_config *dconf, h2_config *conf, h2_config_var_
int set_srv = !dconf;
if (dconf) {
switch(var) {
- case H2_CONF_ALT_SVC_MAX_AGE:
- H2_CONFIG_SET(dconf, alt_svc_max_age, val);
- break;
case H2_CONF_UPGRADE:
H2_CONFIG_SET(dconf, h2_upgrade, val);
break;
@@ -444,6 +424,9 @@ static void h2_config_seti64(h2_dir_config *dconf, h2_config *conf, h2_config_va
int set_srv = !dconf;
if (dconf) {
switch(var) {
+ case H2_CONF_STREAM_TIMEOUT:
+ H2_CONFIG_SET(dconf, stream_timeout, val);
+ break;
default:
/* not handled in dir_conf */
set_srv = 1;
@@ -458,18 +441,11 @@ static void h2_config_seti64(h2_dir_config *dconf, h2_config *conf, h2_config_va
static const h2_config *h2_config_get(conn_rec *c)
{
- h2_ctx *ctx = h2_ctx_get(c, 0);
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
- if (ctx) {
- if (ctx->config) {
- return ctx->config;
- }
- else if (ctx->server) {
- ctx->config = h2_config_sget(ctx->server);
- return ctx->config;
- }
+ if (conn_ctx && conn_ctx->server) {
+ return h2_config_sget(conn_ctx->server);
}
-
return h2_config_sget(c->base_server);
}
@@ -526,18 +502,6 @@ apr_array_header_t *h2_config_push_list(request_rec *r)
return sconf? sconf->push_list : NULL;
}
-apr_array_header_t *h2_config_alt_svcs(request_rec *r)
-{
- const h2_config *sconf;
- const h2_dir_config *conf = h2_config_rget(r);
-
- if (conf && conf->alt_svcs) {
- return conf->alt_svcs;
- }
- sconf = h2_config_sget(r->server);
- return sconf? sconf->alt_svcs : NULL;
-}
-
const struct h2_priority *h2_cconfig_get_priority(conn_rec *c, const char *content_type)
{
const h2_config *conf = h2_config_get(c);
@@ -615,41 +579,6 @@ static const char *h2_conf_set_stream_max_mem_size(cmd_parms *cmd,
return NULL;
}
-static const char *h2_add_alt_svc(cmd_parms *cmd,
- void *dirconf, const char *value)
-{
- if (value && *value) {
- h2_alt_svc *as = h2_alt_svc_parse(value, cmd->pool);
- if (!as) {
- return "unable to parse alt-svc specifier";
- }
-
- if (cmd->path) {
- h2_dir_config *dcfg = (h2_dir_config *)dirconf;
- if (!dcfg->alt_svcs) {
- dcfg->alt_svcs = apr_array_make(cmd->pool, 5, sizeof(h2_alt_svc*));
- }
- APR_ARRAY_PUSH(dcfg->alt_svcs, h2_alt_svc*) = as;
- }
- else {
- h2_config *cfg = (h2_config *)h2_config_sget(cmd->server);
- if (!cfg->alt_svcs) {
- cfg->alt_svcs = apr_array_make(cmd->pool, 5, sizeof(h2_alt_svc*));
- }
- APR_ARRAY_PUSH(cfg->alt_svcs, h2_alt_svc*) = as;
- }
- }
- return NULL;
-}
-
-static const char *h2_conf_set_alt_svc_max_age(cmd_parms *cmd,
- void *dirconf, const char *value)
-{
- int val = (int)apr_atoi64(value);
- CONFIG_CMD_SET(cmd, dirconf, H2_CONF_ALT_SVC_MAX_AGE, val);
- return NULL;
-}
-
static const char *h2_conf_set_session_extra_files(cmd_parms *cmd,
void *dirconf, const char *value)
{
@@ -661,18 +590,15 @@ static const char *h2_conf_set_session_extra_files(cmd_parms *cmd,
return NULL;
}
-static const char *h2_conf_set_serialize_headers(cmd_parms *cmd,
+static const char *h2_conf_set_serialize_headers(cmd_parms *parms,
void *dirconf, const char *value)
{
if (!strcasecmp(value, "On")) {
- CONFIG_CMD_SET(cmd, dirconf, H2_CONF_SER_HEADERS, 1);
- return NULL;
+ ap_log_error(APLOG_MARK, APLOG_WARNING, 0, parms->server, APLOGNO(10307)
+ "%s: this feature has been disabled and the directive "
+ "to enable it is ignored.", parms->cmd->name);
}
- else if (!strcasecmp(value, "Off")) {
- CONFIG_CMD_SET(cmd, dirconf, H2_CONF_SER_HEADERS, 0);
- return NULL;
- }
- return "value must be On or Off";
+ return NULL;
}
static const char *h2_conf_set_direct(cmd_parms *cmd,
@@ -928,6 +854,20 @@ static const char *h2_conf_set_output_buffer(cmd_parms *cmd,
return "value must be On or Off";
}
+static const char *h2_conf_set_stream_timeout(cmd_parms *cmd,
+ void *dirconf, const char *value)
+{
+ apr_status_t rv;
+ apr_interval_time_t timeout;
+
+ rv = ap_timeout_parameter_parse(value, &timeout, "s");
+ if (rv != APR_SUCCESS) {
+ return "Invalid timeout value";
+ }
+ CONFIG_CMD_SET64(cmd, dirconf, H2_CONF_STREAM_TIMEOUT, timeout);
+ return NULL;
+}
+
void h2_get_num_workers(server_rec *s, int *minw, int *maxw)
{
int threads_per_child = 0;
@@ -966,12 +906,8 @@ const command_rec h2_cmds[] = {
RSRC_CONF, "maximum number of idle seconds before a worker shuts down"),
AP_INIT_TAKE1("H2StreamMaxMemSize", h2_conf_set_stream_max_mem_size, NULL,
RSRC_CONF, "maximum number of bytes buffered in memory for a stream"),
- AP_INIT_TAKE1("H2AltSvc", h2_add_alt_svc, NULL,
- RSRC_CONF, "adds an Alt-Svc for this server"),
- AP_INIT_TAKE1("H2AltSvcMaxAge", h2_conf_set_alt_svc_max_age, NULL,
- RSRC_CONF, "set the maximum age (in seconds) that client can rely on alt-svc information"),
AP_INIT_TAKE1("H2SerializeHeaders", h2_conf_set_serialize_headers, NULL,
- RSRC_CONF, "on to enable header serialization for compatibility"),
+ RSRC_CONF, "disabled, this directive has no longer an effect."),
AP_INIT_TAKE1("H2ModernTLSOnly", h2_conf_set_modern_tls_only, NULL,
RSRC_CONF, "off to not impose RFC 7540 restrictions on TLS"),
AP_INIT_TAKE1("H2Upgrade", h2_conf_set_upgrade, NULL,
@@ -1000,6 +936,8 @@ const command_rec h2_cmds[] = {
RSRC_CONF, "set payload padding"),
AP_INIT_TAKE1("H2OutputBuffering", h2_conf_set_output_buffer, NULL,
RSRC_CONF, "set stream output buffer on/off"),
+ AP_INIT_TAKE1("H2StreamTimeout", h2_conf_set_stream_timeout, NULL,
+ RSRC_CONF, "set stream timeout"),
AP_END_CMD
};
diff --git a/modules/http2/h2_config.h b/modules/http2/h2_config.h
index 7d7d8aa897..c150fe21d8 100644
--- a/modules/http2/h2_config.h
+++ b/modules/http2/h2_config.h
@@ -30,9 +30,6 @@ typedef enum {
H2_CONF_MAX_WORKERS,
H2_CONF_MAX_WORKER_IDLE_SECS,
H2_CONF_STREAM_MAX_MEM,
- H2_CONF_ALT_SVCS,
- H2_CONF_ALT_SVC_MAX_AGE,
- H2_CONF_SER_HEADERS,
H2_CONF_DIRECT,
H2_CONF_MODERN_TLS_ONLY,
H2_CONF_UPGRADE,
@@ -45,6 +42,7 @@ typedef enum {
H2_CONF_PADDING_BITS,
H2_CONF_PADDING_ALWAYS,
H2_CONF_OUTPUT_BUFFER,
+ H2_CONF_STREAM_TIMEOUT,
} h2_config_var_t;
struct apr_hash_t;
@@ -88,7 +86,6 @@ int h2_config_rgeti(request_rec *r, h2_config_var_t var);
apr_int64_t h2_config_rgeti64(request_rec *r, h2_config_var_t var);
apr_array_header_t *h2_config_push_list(request_rec *r);
-apr_array_header_t *h2_config_alt_svcs(request_rec *r);
void h2_get_num_workers(server_rec *s, int *minw, int *maxw);
diff --git a/modules/http2/h2_conn.c b/modules/http2/h2_conn.c
deleted file mode 100644
index 018d5819a7..0000000000
--- a/modules/http2/h2_conn.c
+++ /dev/null
@@ -1,402 +0,0 @@
-/* Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-#include <apr_strings.h>
-
-#include <ap_mpm.h>
-#include <ap_mmn.h>
-
-#include <httpd.h>
-#include <http_core.h>
-#include <http_config.h>
-#include <http_log.h>
-#include <http_connection.h>
-#include <http_protocol.h>
-#include <http_request.h>
-
-#include <mpm_common.h>
-
-#include "h2_private.h"
-#include "h2.h"
-#include "h2_config.h"
-#include "h2_ctx.h"
-#include "h2_filter.h"
-#include "h2_mplx.h"
-#include "h2_session.h"
-#include "h2_stream.h"
-#include "h2_h2.h"
-#include "h2_task.h"
-#include "h2_workers.h"
-#include "h2_conn.h"
-#include "h2_version.h"
-
-static struct h2_workers *workers;
-
-static h2_mpm_type_t mpm_type = H2_MPM_UNKNOWN;
-static module *mpm_module;
-static int async_mpm;
-static int mpm_supported = 1;
-static apr_socket_t *dummy_socket;
-
-static void check_modules(int force)
-{
- static int checked = 0;
- int i;
-
- if (force || !checked) {
- for (i = 0; ap_loaded_modules[i]; ++i) {
- module *m = ap_loaded_modules[i];
-
- if (!strcmp("event.c", m->name)) {
- mpm_type = H2_MPM_EVENT;
- mpm_module = m;
- break;
- }
- else if (!strcmp("motorz.c", m->name)) {
- mpm_type = H2_MPM_MOTORZ;
- mpm_module = m;
- break;
- }
- else if (!strcmp("mpm_netware.c", m->name)) {
- mpm_type = H2_MPM_NETWARE;
- mpm_module = m;
- break;
- }
- else if (!strcmp("prefork.c", m->name)) {
- mpm_type = H2_MPM_PREFORK;
- mpm_module = m;
- /* While http2 can work really well on prefork, it collides
- * today's use case for prefork: running single-thread app engines
- * like php. If we restrict h2_workers to 1 per process, php will
- * work fine, but browser will be limited to 1 active request at a
- * time. */
- mpm_supported = 0;
- break;
- }
- else if (!strcmp("simple_api.c", m->name)) {
- mpm_type = H2_MPM_SIMPLE;
- mpm_module = m;
- mpm_supported = 0;
- break;
- }
- else if (!strcmp("mpm_winnt.c", m->name)) {
- mpm_type = H2_MPM_WINNT;
- mpm_module = m;
- break;
- }
- else if (!strcmp("worker.c", m->name)) {
- mpm_type = H2_MPM_WORKER;
- mpm_module = m;
- break;
- }
- }
- checked = 1;
- }
-}
-
-apr_status_t h2_conn_child_init(apr_pool_t *pool, server_rec *s)
-{
- apr_status_t status = APR_SUCCESS;
- int minw, maxw;
- int max_threads_per_child = 0;
- int idle_secs = 0;
-
- check_modules(1);
- ap_mpm_query(AP_MPMQ_MAX_THREADS, &max_threads_per_child);
-
- status = ap_mpm_query(AP_MPMQ_IS_ASYNC, &async_mpm);
- if (status != APR_SUCCESS) {
- /* some MPMs do not implemnent this */
- async_mpm = 0;
- status = APR_SUCCESS;
- }
-
- h2_config_init(pool);
-
- h2_get_num_workers(s, &minw, &maxw);
-
- idle_secs = h2_config_sgeti(s, H2_CONF_MAX_WORKER_IDLE_SECS);
- ap_log_error(APLOG_MARK, APLOG_TRACE3, 0, s,
- "h2_workers: min=%d max=%d, mthrpchild=%d, idle_secs=%d",
- minw, maxw, max_threads_per_child, idle_secs);
- workers = h2_workers_create(s, pool, minw, maxw, idle_secs);
-
- ap_register_input_filter("H2_IN", h2_filter_core_input,
- NULL, AP_FTYPE_CONNECTION);
-
- status = h2_mplx_m_child_init(pool, s);
-
- if (status == APR_SUCCESS) {
- status = apr_socket_create(&dummy_socket, APR_INET, SOCK_STREAM,
- APR_PROTO_TCP, pool);
- }
-
- return status;
-}
-
-void h2_conn_child_stopping(apr_pool_t *pool, int graceful)
-{
- if (workers && graceful) {
- h2_workers_graceful_shutdown(workers);
- }
-}
-
-h2_mpm_type_t h2_conn_mpm_type(void)
-{
- check_modules(0);
- return mpm_type;
-}
-
-const char *h2_conn_mpm_name(void)
-{
- check_modules(0);
- return mpm_module? mpm_module->name : "unknown";
-}
-
-int h2_mpm_supported(void)
-{
- check_modules(0);
- return mpm_supported;
-}
-
-static module *h2_conn_mpm_module(void)
-{
- check_modules(0);
- return mpm_module;
-}
-
-apr_status_t h2_conn_setup(conn_rec *c, request_rec *r, server_rec *s)
-{
- h2_session *session;
- h2_ctx *ctx;
- apr_status_t status;
-
- if (!workers) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02911)
- "workers not initialized");
- return APR_EGENERAL;
- }
-
- if (APR_SUCCESS == (status = h2_session_create(&session, c, r, s, workers))) {
- ctx = h2_ctx_get(c, 1);
- h2_ctx_session_set(ctx, session);
-
- /* remove the input filter of mod_reqtimeout, now that the connection
- * is established and we have swtiched to h2. reqtimeout has supervised
- * possibly configured handshake timeouts and needs to get out of the way
- * now since the rest of its state handling assumes http/1.x to take place. */
- ap_remove_input_filter_byhandle(c->input_filters, "reqtimeout");
- }
-
- return status;
-}
-
-apr_status_t h2_conn_run(conn_rec *c)
-{
- apr_status_t status;
- int mpm_state = 0;
- h2_session *session = h2_ctx_get_session(c);
-
- ap_assert(session);
- do {
- if (c->cs) {
- c->cs->sense = CONN_SENSE_DEFAULT;
- c->cs->state = CONN_STATE_HANDLER;
- }
-
- status = h2_session_process(session, async_mpm);
-
- if (APR_STATUS_IS_EOF(status)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
- H2_SSSN_LOG(APLOGNO(03045), session,
- "process, closing conn"));
- c->keepalive = AP_CONN_CLOSE;
- }
- else {
- c->keepalive = AP_CONN_KEEPALIVE;
- }
-
- if (ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
- break;
- }
- } while (!async_mpm
- && c->keepalive == AP_CONN_KEEPALIVE
- && mpm_state != AP_MPMQ_STOPPING);
-
- if (c->cs) {
- switch (session->state) {
- case H2_SESSION_ST_INIT:
- case H2_SESSION_ST_IDLE:
- case H2_SESSION_ST_BUSY:
- case H2_SESSION_ST_WAIT:
- c->cs->state = CONN_STATE_WRITE_COMPLETION;
- if (c->cs && (session->open_streams || !session->remote.emitted_count)) {
- /* let the MPM know that we are not done and want
- * the Timeout behaviour instead of a KeepAliveTimeout
- * See PR 63534.
- */
- c->cs->sense = CONN_SENSE_WANT_READ;
- }
- break;
- case H2_SESSION_ST_CLEANUP:
- case H2_SESSION_ST_DONE:
- default:
- c->cs->state = CONN_STATE_LINGER;
- break;
- }
- }
-
- return APR_SUCCESS;
-}
-
-apr_status_t h2_conn_pre_close(struct h2_ctx *ctx, conn_rec *c)
-{
- h2_session *session = h2_ctx_get_session(c);
-
- (void)c;
- if (session) {
- apr_status_t status = h2_session_pre_close(session, async_mpm);
- return (status == APR_SUCCESS)? DONE : status;
- }
- return DONE;
-}
-
-/* APR callback invoked if allocation fails. */
-static int abort_on_oom(int retcode)
-{
- ap_abort_on_oom();
- return retcode; /* unreachable, hopefully. */
-}
-
-conn_rec *h2_secondary_create(conn_rec *master, int sec_id, apr_pool_t *parent)
-{
- apr_allocator_t *allocator;
- apr_status_t status;
- apr_pool_t *pool;
- conn_rec *c;
- void *cfg;
- module *mpm;
-
- ap_assert(master);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, master,
- "h2_stream(%ld-%d): create secondary", master->id, sec_id);
-
- /* We create a pool with its own allocator to be used for
- * processing a request. This is the only way to have the processing
- * independent of its parent pool in the sense that it can work in
- * another thread. Also, the new allocator needs its own mutex to
- * synchronize sub-pools.
- */
- apr_allocator_create(&allocator);
- apr_allocator_max_free_set(allocator, ap_max_mem_free);
- status = apr_pool_create_ex(&pool, parent, NULL, allocator);
- if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, status, master,
- APLOGNO(10004) "h2_session(%ld-%d): create secondary pool",
- master->id, sec_id);
- return NULL;
- }
- apr_allocator_owner_set(allocator, pool);
- apr_pool_abort_set(abort_on_oom, pool);
- apr_pool_tag(pool, "h2_secondary_conn");
-
- c = (conn_rec *) apr_palloc(pool, sizeof(conn_rec));
- if (c == NULL) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, master,
- APLOGNO(02913) "h2_session(%ld-%d): create secondary",
- master->id, sec_id);
- apr_pool_destroy(pool);
- return NULL;
- }
-
- memcpy(c, master, sizeof(conn_rec));
-
- c->master = master;
- c->pool = pool;
- c->conn_config = ap_create_conn_config(pool);
- c->notes = apr_table_make(pool, 5);
- c->input_filters = NULL;
- c->output_filters = NULL;
- c->keepalives = 0;
-#if AP_MODULE_MAGIC_AT_LEAST(20180903, 1)
- c->filter_conn_ctx = NULL;
-#endif
- c->bucket_alloc = apr_bucket_alloc_create(pool);
-#if !AP_MODULE_MAGIC_AT_LEAST(20180720, 1)
- c->data_in_input_filters = 0;
- c->data_in_output_filters = 0;
-#endif
- /* prevent mpm_event from making wrong assumptions about this connection,
- * like e.g. using its socket for an async read check. */
- c->clogging_input_filters = 1;
- c->log = NULL;
- c->log_id = apr_psprintf(pool, "%ld-%d",
- master->id, sec_id);
- c->aborted = 0;
- /* We cannot install the master connection socket on the secondary, as
- * modules mess with timeouts/blocking of the socket, with
- * unwanted side effects to the master connection processing.
- * Fortunately, since we never use the secondary socket, we can just install
- * a single, process-wide dummy and everyone is happy.
- */
- ap_set_module_config(c->conn_config, &core_module, dummy_socket);
- /* TODO: these should be unique to this thread */
- c->sbh = master->sbh;
- /* TODO: not all mpm modules have learned about secondary connections yet.
- * copy their config from master to secondary.
- */
- if ((mpm = h2_conn_mpm_module()) != NULL) {
- cfg = ap_get_module_config(master->conn_config, mpm);
- ap_set_module_config(c->conn_config, mpm, cfg);
- }
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c,
- "h2_secondary(%s): created", c->log_id);
- return c;
-}
-
-void h2_secondary_destroy(conn_rec *secondary)
-{
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, secondary,
- "h2_secondary(%s): destroy", secondary->log_id);
- secondary->sbh = NULL;
- apr_pool_destroy(secondary->pool);
-}
-
-apr_status_t h2_secondary_run_pre_connection(conn_rec *secondary, apr_socket_t *csd)
-{
- if (secondary->keepalives == 0) {
- /* Simulate that we had already a request on this connection. Some
- * hooks trigger special behaviour when keepalives is 0.
- * (Not necessarily in pre_connection, but later. Set it here, so it
- * is in place.) */
- secondary->keepalives = 1;
- /* We signal that this connection will be closed after the request.
- * Which is true in that sense that we throw away all traffic data
- * on this secondary connection after each requests. Although we might
- * reuse internal structures like memory pools.
- * The wanted effect of this is that httpd does not try to clean up
- * any dangling data on this connection when a request is done. Which
- * is unnecessary on a h2 stream.
- */
- secondary->keepalive = AP_CONN_CLOSE;
- return ap_run_pre_connection(secondary, csd);
- }
- ap_assert(secondary->output_filters);
- return APR_SUCCESS;
-}
-
diff --git a/modules/http2/h2_conn_ctx.c b/modules/http2/h2_conn_ctx.c
new file mode 100644
index 0000000000..ce5e9c1234
--- /dev/null
+++ b/modules/http2/h2_conn_ctx.c
@@ -0,0 +1,147 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <assert.h>
+#include <apr_strings.h>
+
+#include <httpd.h>
+#include <http_core.h>
+#include <http_config.h>
+#include <http_log.h>
+
+#include "h2_private.h"
+#include "h2_session.h"
+#include "h2_bucket_beam.h"
+#include "h2_c2.h"
+#include "h2_mplx.h"
+#include "h2_stream.h"
+#include "h2_util.h"
+#include "h2_conn_ctx.h"
+
+
+void h2_conn_ctx_detach(conn_rec *c)
+{
+ ap_set_module_config(c->conn_config, &http2_module, NULL);
+}
+
+static h2_conn_ctx_t *ctx_create(conn_rec *c, const char *id)
+{
+ h2_conn_ctx_t *conn_ctx = apr_pcalloc(c->pool, sizeof(*conn_ctx));
+ conn_ctx->id = id;
+ conn_ctx->server = c->base_server;
+ conn_ctx->started_at = apr_time_now();
+
+ ap_set_module_config(c->conn_config, &http2_module, conn_ctx);
+ return conn_ctx;
+}
+
+h2_conn_ctx_t *h2_conn_ctx_create_for_c1(conn_rec *c1, server_rec *s, const char *protocol)
+{
+ h2_conn_ctx_t *ctx;
+
+ ctx = ctx_create(c1, apr_psprintf(c1->pool, "%ld", c1->id));
+ ctx->server = s;
+ ctx->protocol = apr_pstrdup(c1->pool, protocol);
+
+ ctx->pfd_out_prod.desc_type = APR_POLL_SOCKET;
+ ctx->pfd_out_prod.desc.s = ap_get_conn_socket(c1);
+ apr_socket_opt_set(ctx->pfd_out_prod.desc.s, APR_SO_NONBLOCK, 1);
+ ctx->pfd_out_prod.reqevents = APR_POLLIN | APR_POLLERR | APR_POLLHUP;
+ ctx->pfd_out_prod.client_data = ctx;
+
+ return ctx;
+}
+
+apr_status_t h2_conn_ctx_init_for_c2(h2_conn_ctx_t **pctx, conn_rec *c2,
+ struct h2_mplx *mplx, struct h2_stream *stream)
+{
+ h2_conn_ctx_t *conn_ctx;
+ apr_status_t rv = APR_SUCCESS;
+
+ ap_assert(c2->master);
+ conn_ctx = h2_conn_ctx_get(c2);
+ if (!conn_ctx) {
+ h2_conn_ctx_t *c1_ctx;
+
+ c1_ctx = h2_conn_ctx_get(c2->master);
+ ap_assert(c1_ctx);
+ ap_assert(c1_ctx->session);
+
+ conn_ctx = ctx_create(c2, c1_ctx->id);
+ conn_ctx->server = c2->master->base_server;
+ }
+
+ conn_ctx->mplx = mplx;
+ conn_ctx->stream_id = stream->id;
+ apr_pool_create(&conn_ctx->req_pool, c2->pool);
+ apr_pool_tag(conn_ctx->req_pool, "H2_C2_REQ");
+ conn_ctx->request = stream->request;
+ conn_ctx->started_at = apr_time_now();
+ conn_ctx->done = 0;
+ conn_ctx->done_at = 0;
+
+ *pctx = conn_ctx;
+ return rv;
+}
+
+void h2_conn_ctx_clear_for_c2(conn_rec *c2)
+{
+ h2_conn_ctx_t *conn_ctx;
+
+ ap_assert(c2->master);
+ conn_ctx = h2_conn_ctx_get(c2);
+ conn_ctx->stream_id = -1;
+ conn_ctx->request = NULL;
+
+ if (conn_ctx->req_pool) {
+ apr_pool_destroy(conn_ctx->req_pool);
+ conn_ctx->req_pool = NULL;
+ conn_ctx->beam_out = NULL;
+ }
+ memset(&conn_ctx->pfd_in_drain, 0, sizeof(conn_ctx->pfd_in_drain));
+ memset(&conn_ctx->pfd_out_prod, 0, sizeof(conn_ctx->pfd_out_prod));
+ conn_ctx->beam_in = NULL;
+}
+
+void h2_conn_ctx_destroy(conn_rec *c)
+{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+
+ if (conn_ctx) {
+ if (conn_ctx->mplx_pool) {
+ apr_pool_destroy(conn_ctx->mplx_pool);
+ conn_ctx->mplx_pool = NULL;
+ }
+ ap_set_module_config(c->conn_config, &http2_module, NULL);
+ }
+}
+
+void h2_conn_ctx_set_timeout(h2_conn_ctx_t *conn_ctx, apr_interval_time_t timeout)
+{
+ if (conn_ctx->beam_out) {
+ h2_beam_timeout_set(conn_ctx->beam_out, timeout);
+ }
+ if (conn_ctx->pipe_out_prod[H2_PIPE_OUT]) {
+ apr_file_pipe_timeout_set(conn_ctx->pipe_out_prod[H2_PIPE_OUT], timeout);
+ }
+
+ if (conn_ctx->beam_in) {
+ h2_beam_timeout_set(conn_ctx->beam_in, timeout);
+ }
+ if (conn_ctx->pipe_in_prod[H2_PIPE_OUT]) {
+ apr_file_pipe_timeout_set(conn_ctx->pipe_in_prod[H2_PIPE_OUT], timeout);
+ }
+}
diff --git a/modules/http2/h2_conn_ctx.h b/modules/http2/h2_conn_ctx.h
new file mode 100644
index 0000000000..b744e8dd29
--- /dev/null
+++ b/modules/http2/h2_conn_ctx.h
@@ -0,0 +1,97 @@
+/* Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __mod_h2__h2_conn_ctx__
+#define __mod_h2__h2_conn_ctx__
+
+struct h2_session;
+struct h2_stream;
+struct h2_mplx;
+struct h2_bucket_beam;
+struct h2_response_parser;
+
+#define H2_PIPE_OUT 0
+#define H2_PIPE_IN 1
+
+/**
+ * The h2 module context associated with a connection.
+ *
+ * It keeps track of the different types of connections:
+ * - those from clients that use HTTP/2 protocol
+ * - those from clients that do not use HTTP/2
+ * - those created by ourself to perform work on HTTP/2 streams
+ */
+struct h2_conn_ctx_t {
+ const char *id; /* c*: our identifier of this connection */
+ server_rec *server; /* c*: httpd server selected. */
+ const char *protocol; /* c1: the protocol negotiated */
+ struct h2_session *session; /* c1: the h2 session established */
+ struct h2_mplx *mplx; /* c2: the multiplexer */
+
+ int pre_conn_done; /* has pre_connection setup run? */
+ int stream_id; /* c1: 0, c2: stream id processed */
+ apr_pool_t *req_pool; /* c2: a c2 child pool for a request */
+ const struct h2_request *request; /* c2: the request to process */
+ struct h2_bucket_beam *beam_out; /* c2: data out, created from req_pool */
+ struct h2_bucket_beam *beam_in; /* c2: data in or NULL, borrowed from request stream */
+
+ apr_pool_t *mplx_pool; /* c2: an mplx child pool for safe use inside mplx lock */
+ apr_file_t *pipe_in_prod[2]; /* c2: input produced notification pipe */
+ apr_file_t *pipe_in_drain[2]; /* c2: input drained notification pipe */
+ apr_file_t *pipe_out_prod[2]; /* c2: output produced notification pipe */
+
+ apr_pollfd_t pfd_in_drain; /* c2: poll pipe_in_drain output */
+ apr_pollfd_t pfd_out_prod; /* c2: poll pipe_out_prod output */
+
+ int has_final_response; /* final HTTP response passed on out */
+ apr_status_t last_err; /* APR_SUCCES or last error encountered in filters */
+ struct h2_response_parser *parser; /* optional parser to catch H1 responses */
+
+ volatile int done; /* c2: processing has finished */
+ apr_time_t started_at; /* c2: when processing started */
+ apr_time_t done_at; /* c2: when processing was done */
+};
+typedef struct h2_conn_ctx_t h2_conn_ctx_t;
+
+/**
+ * Get the h2 connection context.
+ * @param c the connection to look at
+ * @return h2 context of this connection
+ */
+#define h2_conn_ctx_get(c) \
+ ((c)? (h2_conn_ctx_t*)ap_get_module_config((c)->conn_config, &http2_module) : NULL)
+
+/**
+ * Create the h2 connection context.
+ * @param c the connection to create it at
+ * @param s the server in use
+ * @param protocol the procotol selected
+ * @return created h2 context of this connection
+ */
+h2_conn_ctx_t *h2_conn_ctx_create_for_c1(conn_rec *c, server_rec *s, const char *protocol);
+
+apr_status_t h2_conn_ctx_init_for_c2(h2_conn_ctx_t **pctx, conn_rec *c,
+ struct h2_mplx *mplx, struct h2_stream *stream);
+
+void h2_conn_ctx_clear_for_c2(conn_rec *c2);
+
+void h2_conn_ctx_detach(conn_rec *c);
+
+void h2_conn_ctx_destroy(conn_rec *c);
+
+void h2_conn_ctx_set_timeout(h2_conn_ctx_t *conn_ctx, apr_interval_time_t timeout);
+
+#endif /* defined(__mod_h2__h2_conn_ctx__) */
diff --git a/modules/http2/h2_ctx.c b/modules/http2/h2_ctx.c
deleted file mode 100644
index 095f3554b6..0000000000
--- a/modules/http2/h2_ctx.c
+++ /dev/null
@@ -1,106 +0,0 @@
-/* Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-
-#include <httpd.h>
-#include <http_core.h>
-#include <http_config.h>
-
-#include "h2_private.h"
-#include "h2_session.h"
-#include "h2_task.h"
-#include "h2_ctx.h"
-
-static h2_ctx *h2_ctx_create(const conn_rec *c)
-{
- h2_ctx *ctx = apr_pcalloc(c->pool, sizeof(h2_ctx));
- ap_assert(ctx);
- h2_ctx_server_update(ctx, c->base_server);
- ap_set_module_config(c->conn_config, &http2_module, ctx);
- return ctx;
-}
-
-void h2_ctx_clear(const conn_rec *c)
-{
- ap_assert(c);
- ap_set_module_config(c->conn_config, &http2_module, NULL);
-}
-
-h2_ctx *h2_ctx_create_for(const conn_rec *c, h2_task *task)
-{
- h2_ctx *ctx = h2_ctx_create(c);
- if (ctx) {
- ctx->task = task;
- }
- return ctx;
-}
-
-h2_ctx *h2_ctx_get(const conn_rec *c, int create)
-{
- h2_ctx *ctx = (h2_ctx*)ap_get_module_config(c->conn_config, &http2_module);
- if (ctx == NULL && create) {
- ctx = h2_ctx_create(c);
- }
- return ctx;
-}
-
-h2_ctx *h2_ctx_rget(const request_rec *r)
-{
- return h2_ctx_get(r->connection, 0);
-}
-
-const char *h2_ctx_protocol_get(const conn_rec *c)
-{
- h2_ctx *ctx;
- if (c->master) {
- c = c->master;
- }
- ctx = (h2_ctx*)ap_get_module_config(c->conn_config, &http2_module);
- return ctx? ctx->protocol : NULL;
-}
-
-h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto)
-{
- ctx->protocol = proto;
- return ctx;
-}
-
-h2_session *h2_ctx_get_session(conn_rec *c)
-{
- h2_ctx *ctx = h2_ctx_get(c, 0);
- return ctx? ctx->session : NULL;
-}
-
-void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session)
-{
- ctx->session = session;
-}
-
-h2_ctx *h2_ctx_server_update(h2_ctx *ctx, server_rec *s)
-{
- if (ctx->server != s) {
- ctx->server = s;
- }
- return ctx;
-}
-
-h2_task *h2_ctx_get_task(conn_rec *c)
-{
- h2_ctx *ctx = h2_ctx_get(c, 0);
- return ctx? ctx->task : NULL;
-}
-
diff --git a/modules/http2/h2_ctx.h b/modules/http2/h2_ctx.h
deleted file mode 100644
index 417ef36377..0000000000
--- a/modules/http2/h2_ctx.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __mod_h2__h2_ctx__
-#define __mod_h2__h2_ctx__
-
-struct h2_session;
-struct h2_task;
-struct h2_config;
-
-/**
- * The h2 module context associated with a connection.
- *
- * It keeps track of the different types of connections:
- * - those from clients that use HTTP/2 protocol
- * - those from clients that do not use HTTP/2
- * - those created by ourself to perform work on HTTP/2 streams
- */
-typedef struct h2_ctx {
- const char *protocol; /* the protocol negotiated */
- struct h2_session *session; /* the session established */
- struct h2_task *task; /* the h2_task executing or NULL */
- const char *hostname; /* hostname negotiated via SNI, optional */
- server_rec *server; /* httpd server config selected. */
- const struct h2_config *config; /* effective config in this context */
-} h2_ctx;
-
-/**
- * Get (or create) a h2 context record for this connection.
- * @param c the connection to look at
- * @param create != 0 iff missing context shall be created
- * @return h2 context of this connection
- */
-h2_ctx *h2_ctx_get(const conn_rec *c, int create);
-void h2_ctx_clear(const conn_rec *c);
-
-h2_ctx *h2_ctx_rget(const request_rec *r);
-h2_ctx *h2_ctx_create_for(const conn_rec *c, struct h2_task *task);
-
-
-/* Set the h2 protocol established on this connection context or
- * NULL when other protocols are in place.
- */
-h2_ctx *h2_ctx_protocol_set(h2_ctx *ctx, const char *proto);
-
-/* Update the server_rec relevant for this context. A server for
- * a connection may change during SNI handling, for example.
- */
-h2_ctx *h2_ctx_server_update(h2_ctx *ctx, server_rec *s);
-
-void h2_ctx_session_set(h2_ctx *ctx, struct h2_session *session);
-
-/**
- * Get the h2 protocol negotiated for this connection, or NULL.
- */
-const char *h2_ctx_protocol_get(const conn_rec *c);
-
-struct h2_session *h2_ctx_get_session(conn_rec *c);
-struct h2_task *h2_ctx_get_task(conn_rec *c);
-
-
-#endif /* defined(__mod_h2__h2_ctx__) */
diff --git a/modules/http2/h2_filter.c b/modules/http2/h2_filter.c
deleted file mode 100644
index d9257fa3ec..0000000000
--- a/modules/http2/h2_filter.c
+++ /dev/null
@@ -1,613 +0,0 @@
-/* Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-
-#include <apr_strings.h>
-#include <httpd.h>
-#include <http_core.h>
-#include <http_protocol.h>
-#include <http_log.h>
-#include <http_connection.h>
-#include <scoreboard.h>
-
-#include "h2_private.h"
-#include "h2.h"
-#include "h2_config.h"
-#include "h2_conn_io.h"
-#include "h2_ctx.h"
-#include "h2_mplx.h"
-#include "h2_push.h"
-#include "h2_task.h"
-#include "h2_stream.h"
-#include "h2_request.h"
-#include "h2_headers.h"
-#include "h2_stream.h"
-#include "h2_session.h"
-#include "h2_util.h"
-#include "h2_version.h"
-
-#include "h2_filter.h"
-
-#define UNSET -1
-#define H2MIN(x,y) ((x) < (y) ? (x) : (y))
-
-static apr_status_t recv_RAW_DATA(conn_rec *c, h2_filter_cin *cin,
- apr_bucket *b, apr_read_type_e block)
-{
- h2_session *session = cin->session;
- apr_status_t status = APR_SUCCESS;
- apr_size_t len;
- const char *data;
- ssize_t n;
-
- (void)c;
- status = apr_bucket_read(b, &data, &len, block);
-
- while (status == APR_SUCCESS && len > 0) {
- n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)data, len);
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- H2_SSSN_MSG(session, "fed %ld bytes to nghttp2, %ld read"),
- (long)len, (long)n);
- if (n < 0) {
- if (nghttp2_is_fatal((int)n)) {
- h2_session_event(session, H2_SESSION_EV_PROTO_ERROR,
- (int)n, nghttp2_strerror((int)n));
- status = APR_EGENERAL;
- }
- }
- else {
- session->io.bytes_read += n;
- if ((apr_ssize_t)len <= n) {
- break;
- }
- len -= (apr_size_t)n;
- data += n;
- }
- }
-
- return status;
-}
-
-static apr_status_t recv_RAW_brigade(conn_rec *c, h2_filter_cin *cin,
- apr_bucket_brigade *bb,
- apr_read_type_e block)
-{
- apr_status_t status = APR_SUCCESS;
- apr_bucket* b;
- int consumed = 0;
-
- h2_util_bb_log(c, c->id, APLOG_TRACE2, "RAW_in", bb);
- while (status == APR_SUCCESS && !APR_BRIGADE_EMPTY(bb)) {
- b = APR_BRIGADE_FIRST(bb);
-
- if (APR_BUCKET_IS_METADATA(b)) {
- /* nop */
- }
- else {
- status = recv_RAW_DATA(c, cin, b, block);
- }
- consumed = 1;
- apr_bucket_delete(b);
- }
-
- if (!consumed && status == APR_SUCCESS && block == APR_NONBLOCK_READ) {
- return APR_EAGAIN;
- }
- return status;
-}
-
-h2_filter_cin *h2_filter_cin_create(h2_session *session)
-{
- h2_filter_cin *cin;
-
- cin = apr_pcalloc(session->pool, sizeof(*cin));
- if (!cin) {
- return NULL;
- }
- cin->session = session;
- return cin;
-}
-
-void h2_filter_cin_timeout_set(h2_filter_cin *cin, apr_interval_time_t timeout)
-{
- cin->timeout = timeout;
-}
-
-apr_status_t h2_filter_core_input(ap_filter_t* f,
- apr_bucket_brigade* brigade,
- ap_input_mode_t mode,
- apr_read_type_e block,
- apr_off_t readbytes)
-{
- h2_filter_cin *cin = f->ctx;
- apr_status_t status = APR_SUCCESS;
- apr_interval_time_t saved_timeout = UNSET;
- const int trace1 = APLOGctrace1(f->c);
-
- if (trace1) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_session(%ld): read, %s, mode=%d, readbytes=%ld",
- (long)f->c->id, (block == APR_BLOCK_READ)?
- "BLOCK_READ" : "NONBLOCK_READ", mode, (long)readbytes);
- }
-
- if (mode == AP_MODE_INIT || mode == AP_MODE_SPECULATIVE) {
- return ap_get_brigade(f->next, brigade, mode, block, readbytes);
- }
-
- if (mode != AP_MODE_READBYTES) {
- return (block == APR_BLOCK_READ)? APR_SUCCESS : APR_EAGAIN;
- }
-
- if (!cin->bb) {
- cin->bb = apr_brigade_create(cin->session->pool, f->c->bucket_alloc);
- }
-
- if (!cin->socket) {
- cin->socket = ap_get_conn_socket(f->c);
- }
-
- if (APR_BRIGADE_EMPTY(cin->bb)) {
- /* We only do a blocking read when we have no streams to process. So,
- * in httpd scoreboard lingo, we are in a KEEPALIVE connection state.
- */
- if (block == APR_BLOCK_READ) {
- if (cin->timeout > 0) {
- apr_socket_timeout_get(cin->socket, &saved_timeout);
- apr_socket_timeout_set(cin->socket, cin->timeout);
- }
- }
- status = ap_get_brigade(f->next, cin->bb, AP_MODE_READBYTES,
- block, readbytes);
- if (saved_timeout != UNSET) {
- apr_socket_timeout_set(cin->socket, saved_timeout);
- }
- }
-
- switch (status) {
- case APR_SUCCESS:
- status = recv_RAW_brigade(f->c, cin, cin->bb, block);
- break;
- case APR_EOF:
- case APR_EAGAIN:
- case APR_TIMEUP:
- if (trace1) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_session(%ld): read", f->c->id);
- }
- break;
- default:
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, f->c, APLOGNO(03046)
- "h2_session(%ld): error reading", f->c->id);
- break;
- }
- return status;
-}
-
-/*******************************************************************************
- * http2 connection status handler + stream out source
- ******************************************************************************/
-
-typedef struct {
- apr_bucket_refcount refcount;
- h2_bucket_event_cb *cb;
- void *ctx;
-} h2_bucket_observer;
-
-static apr_status_t bucket_read(apr_bucket *b, const char **str,
- apr_size_t *len, apr_read_type_e block)
-{
- (void)b;
- (void)block;
- *str = NULL;
- *len = 0;
- return APR_SUCCESS;
-}
-
-static void bucket_destroy(void *data)
-{
- h2_bucket_observer *h = data;
- if (apr_bucket_shared_destroy(h)) {
- if (h->cb) {
- h->cb(h->ctx, H2_BUCKET_EV_BEFORE_DESTROY, NULL);
- }
- apr_bucket_free(h);
- }
-}
-
-apr_bucket * h2_bucket_observer_make(apr_bucket *b, h2_bucket_event_cb *cb,
- void *ctx)
-{
- h2_bucket_observer *br;
-
- br = apr_bucket_alloc(sizeof(*br), b->list);
- br->cb = cb;
- br->ctx = ctx;
-
- b = apr_bucket_shared_make(b, br, 0, 0);
- b->type = &h2_bucket_type_observer;
- return b;
-}
-
-apr_bucket * h2_bucket_observer_create(apr_bucket_alloc_t *list,
- h2_bucket_event_cb *cb, void *ctx)
-{
- apr_bucket *b = apr_bucket_alloc(sizeof(*b), list);
-
- APR_BUCKET_INIT(b);
- b->free = apr_bucket_free;
- b->list = list;
- b = h2_bucket_observer_make(b, cb, ctx);
- return b;
-}
-
-apr_status_t h2_bucket_observer_fire(apr_bucket *b, h2_bucket_event event)
-{
- if (H2_BUCKET_IS_OBSERVER(b)) {
- h2_bucket_observer *l = (h2_bucket_observer *)b->data;
- return l->cb(l->ctx, event, b);
- }
- return APR_EINVAL;
-}
-
-const apr_bucket_type_t h2_bucket_type_observer = {
- "H2OBS", 5, APR_BUCKET_METADATA,
- bucket_destroy,
- bucket_read,
- apr_bucket_setaside_noop,
- apr_bucket_split_notimpl,
- apr_bucket_shared_copy
-};
-
-apr_bucket *h2_bucket_observer_beam(struct h2_bucket_beam *beam,
- apr_bucket_brigade *dest,
- const apr_bucket *src)
-{
- (void)beam;
- if (H2_BUCKET_IS_OBSERVER(src)) {
- h2_bucket_observer *l = (h2_bucket_observer *)src->data;
- apr_bucket *b = h2_bucket_observer_create(dest->bucket_alloc,
- l->cb, l->ctx);
- APR_BRIGADE_INSERT_TAIL(dest, b);
- l->cb = NULL;
- l->ctx = NULL;
- h2_bucket_observer_fire(b, H2_BUCKET_EV_BEFORE_MASTER_SEND);
- return b;
- }
- return NULL;
-}
-
-static apr_status_t bbout(apr_bucket_brigade *bb, const char *fmt, ...)
- __attribute__((format(printf,2,3)));
-static apr_status_t bbout(apr_bucket_brigade *bb, const char *fmt, ...)
-{
- va_list args;
- apr_status_t rv;
-
- va_start(args, fmt);
- rv = apr_brigade_vprintf(bb, NULL, NULL, fmt, args);
- va_end(args);
-
- return rv;
-}
-
-static void add_settings(apr_bucket_brigade *bb, h2_session *s, int last)
-{
- h2_mplx *m = s->mplx;
-
- bbout(bb, " \"settings\": {\n");
- bbout(bb, " \"SETTINGS_MAX_CONCURRENT_STREAMS\": %d,\n", m->max_streams);
- bbout(bb, " \"SETTINGS_MAX_FRAME_SIZE\": %d,\n", 16*1024);
- bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n", h2_config_sgeti(s->s, H2_CONF_WIN_SIZE));
- bbout(bb, " \"SETTINGS_ENABLE_PUSH\": %d\n", h2_session_push_enabled(s));
- bbout(bb, " }%s\n", last? "" : ",");
-}
-
-static void add_peer_settings(apr_bucket_brigade *bb, h2_session *s, int last)
-{
- bbout(bb, " \"peerSettings\": {\n");
- bbout(bb, " \"SETTINGS_MAX_CONCURRENT_STREAMS\": %d,\n",
- nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS));
- bbout(bb, " \"SETTINGS_MAX_FRAME_SIZE\": %d,\n",
- nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_MAX_FRAME_SIZE));
- bbout(bb, " \"SETTINGS_INITIAL_WINDOW_SIZE\": %d,\n",
- nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE));
- bbout(bb, " \"SETTINGS_ENABLE_PUSH\": %d,\n",
- nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_ENABLE_PUSH));
- bbout(bb, " \"SETTINGS_HEADER_TABLE_SIZE\": %d,\n",
- nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_HEADER_TABLE_SIZE));
- bbout(bb, " \"SETTINGS_MAX_HEADER_LIST_SIZE\": %d\n",
- nghttp2_session_get_remote_settings(s->ngh2, NGHTTP2_SETTINGS_MAX_HEADER_LIST_SIZE));
- bbout(bb, " }%s\n", last? "" : ",");
-}
-
-typedef struct {
- apr_bucket_brigade *bb;
- h2_session *s;
- int idx;
-} stream_ctx_t;
-
-static int add_stream(h2_stream *stream, void *ctx)
-{
- stream_ctx_t *x = ctx;
- int32_t flowIn, flowOut;
-
- flowIn = nghttp2_session_get_stream_effective_local_window_size(x->s->ngh2, stream->id);
- flowOut = nghttp2_session_get_stream_remote_window_size(x->s->ngh2, stream->id);
- bbout(x->bb, "%s\n \"%d\": {\n", (x->idx? "," : ""), stream->id);
- bbout(x->bb, " \"state\": \"%s\",\n", h2_stream_state_str(stream));
- bbout(x->bb, " \"created\": %f,\n", ((double)stream->created)/APR_USEC_PER_SEC);
- bbout(x->bb, " \"flowIn\": %d,\n", flowIn);
- bbout(x->bb, " \"flowOut\": %d,\n", flowOut);
- bbout(x->bb, " \"dataIn\": %"APR_OFF_T_FMT",\n", stream->in_data_octets);
- bbout(x->bb, " \"dataOut\": %"APR_OFF_T_FMT"\n", stream->out_data_octets);
- bbout(x->bb, " }");
-
- ++x->idx;
- return 1;
-}
-
-static void add_streams(apr_bucket_brigade *bb, h2_session *s, int last)
-{
- stream_ctx_t x;
-
- x.bb = bb;
- x.s = s;
- x.idx = 0;
- bbout(bb, " \"streams\": {");
- h2_mplx_m_stream_do(s->mplx, add_stream, &x);
- bbout(bb, "\n }%s\n", last? "" : ",");
-}
-
-static void add_push(apr_bucket_brigade *bb, h2_session *s,
- h2_stream *stream, int last)
-{
- h2_push_diary *diary;
- apr_status_t status;
-
- bbout(bb, " \"push\": {\n");
- diary = s->push_diary;
- if (diary) {
- const char *data;
- const char *base64_digest;
- apr_size_t len;
-
- status = h2_push_diary_digest_get(diary, bb->p, 256,
- stream->request->authority,
- &data, &len);
- if (status == APR_SUCCESS) {
- base64_digest = h2_util_base64url_encode(data, len, bb->p);
- bbout(bb, " \"cacheDigest\": \"%s\",\n", base64_digest);
- }
- }
- bbout(bb, " \"promises\": %d,\n", s->pushes_promised);
- bbout(bb, " \"submits\": %d,\n", s->pushes_submitted);
- bbout(bb, " \"resets\": %d\n", s->pushes_reset);
- bbout(bb, " }%s\n", last? "" : ",");
-}
-
-static void add_in(apr_bucket_brigade *bb, h2_session *s, int last)
-{
- bbout(bb, " \"in\": {\n");
- bbout(bb, " \"requests\": %d,\n", s->remote.emitted_count);
- bbout(bb, " \"resets\": %d, \n", s->streams_reset);
- bbout(bb, " \"frames\": %ld,\n", (long)s->frames_received);
- bbout(bb, " \"octets\": %"APR_UINT64_T_FMT"\n", s->io.bytes_read);
- bbout(bb, " }%s\n", last? "" : ",");
-}
-
-static void add_out(apr_bucket_brigade *bb, h2_session *s, int last)
-{
- bbout(bb, " \"out\": {\n");
- bbout(bb, " \"responses\": %d,\n", s->responses_submitted);
- bbout(bb, " \"frames\": %ld,\n", (long)s->frames_sent);
- bbout(bb, " \"octets\": %"APR_UINT64_T_FMT"\n", s->io.bytes_written);
- bbout(bb, " }%s\n", last? "" : ",");
-}
-
-static void add_stats(apr_bucket_brigade *bb, h2_session *s,
- h2_stream *stream, int last)
-{
- bbout(bb, " \"stats\": {\n");
- add_in(bb, s, 0);
- add_out(bb, s, 0);
- add_push(bb, s, stream, 1);
- bbout(bb, " }%s\n", last? "" : ",");
-}
-
-static apr_status_t h2_status_insert(h2_task *task, apr_bucket *b)
-{
- h2_mplx *m = task->mplx;
- h2_stream *stream = h2_mplx_t_stream_get(m, task);
- h2_session *s;
- conn_rec *c;
-
- apr_bucket_brigade *bb;
- apr_bucket *e;
- int32_t connFlowIn, connFlowOut;
-
- if (!stream) {
- /* stream already done */
- return APR_SUCCESS;
- }
- s = stream->session;
- c = s->c;
-
- bb = apr_brigade_create(stream->pool, c->bucket_alloc);
-
- connFlowIn = nghttp2_session_get_effective_local_window_size(s->ngh2);
- connFlowOut = nghttp2_session_get_remote_window_size(s->ngh2);
-
- bbout(bb, "{\n");
- bbout(bb, " \"version\": \"draft-01\",\n");
- add_settings(bb, s, 0);
- add_peer_settings(bb, s, 0);
- bbout(bb, " \"connFlowIn\": %d,\n", connFlowIn);
- bbout(bb, " \"connFlowOut\": %d,\n", connFlowOut);
- bbout(bb, " \"sentGoAway\": %d,\n", s->local.shutdown);
-
- add_streams(bb, s, 0);
-
- add_stats(bb, s, stream, 1);
- bbout(bb, "}\n");
-
- while ((e = APR_BRIGADE_FIRST(bb)) != APR_BRIGADE_SENTINEL(bb)) {
- APR_BUCKET_REMOVE(e);
- APR_BUCKET_INSERT_AFTER(b, e);
- b = e;
- }
- apr_brigade_destroy(bb);
-
- return APR_SUCCESS;
-}
-
-static apr_status_t status_event(void *ctx, h2_bucket_event event,
- apr_bucket *b)
-{
- h2_task *task = ctx;
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, task->c->master,
- "status_event(%s): %d", task->id, event);
- switch (event) {
- case H2_BUCKET_EV_BEFORE_MASTER_SEND:
- h2_status_insert(task, b);
- break;
- default:
- break;
- }
- return APR_SUCCESS;
-}
-
-static apr_status_t discard_body(request_rec *r, apr_off_t maxlen)
-{
- apr_bucket_brigade *bb;
- int seen_eos;
- apr_status_t rv;
-
- bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
- seen_eos = 0;
- do {
- apr_bucket *bucket;
-
- rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
- APR_BLOCK_READ, HUGE_STRING_LEN);
-
- if (rv != APR_SUCCESS) {
- apr_brigade_destroy(bb);
- return rv;
- }
-
- for (bucket = APR_BRIGADE_FIRST(bb);
- bucket != APR_BRIGADE_SENTINEL(bb);
- bucket = APR_BUCKET_NEXT(bucket))
- {
- const char *data;
- apr_size_t len;
-
- if (APR_BUCKET_IS_EOS(bucket)) {
- seen_eos = 1;
- break;
- }
- if (bucket->length == 0) {
- continue;
- }
- rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
- if (rv != APR_SUCCESS) {
- apr_brigade_destroy(bb);
- return rv;
- }
- maxlen -= bucket->length;
- }
- apr_brigade_cleanup(bb);
- } while (!seen_eos && maxlen >= 0);
-
- return APR_SUCCESS;
-}
-
-int h2_filter_h2_status_handler(request_rec *r)
-{
- conn_rec *c = r->connection;
- h2_task *task;
- apr_bucket_brigade *bb;
- apr_bucket *b;
- apr_status_t status;
-
- if (strcmp(r->handler, "http2-status")) {
- return DECLINED;
- }
- if (r->method_number != M_GET && r->method_number != M_POST) {
- return DECLINED;
- }
-
- task = h2_ctx_get_task(r->connection);
- if (task) {
- /* In this handler, we do some special sauce to send footers back,
- * IFF we received footers in the request. This is used in our test
- * cases, since CGI has no way of handling those. */
- if ((status = discard_body(r, 1024)) != OK) {
- return status;
- }
-
- /* We need to handle the actual output on the main thread, as
- * we need to access h2_session information. */
- r->status = 200;
- r->clength = -1;
- r->chunked = 1;
- apr_table_unset(r->headers_out, "Content-Length");
- /* Discourage content-encodings */
- apr_table_unset(r->headers_out, "Content-Encoding");
- apr_table_setn(r->subprocess_env, "no-brotli", "1");
- apr_table_setn(r->subprocess_env, "no-gzip", "1");
-
- ap_set_content_type(r, "application/json");
- apr_table_setn(r->notes, H2_FILTER_DEBUG_NOTE, "on");
-
- bb = apr_brigade_create(r->pool, c->bucket_alloc);
- b = h2_bucket_observer_create(c->bucket_alloc, status_event, task);
- APR_BRIGADE_INSERT_TAIL(bb, b);
- b = apr_bucket_eos_create(c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, b);
-
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
- "status_handler(%s): checking for incoming trailers",
- task->id);
- if (r->trailers_in && !apr_is_empty_table(r->trailers_in)) {
- ap_log_rerror(APLOG_MARK, APLOG_TRACE2, 0, r,
- "status_handler(%s): seeing incoming trailers",
- task->id);
- apr_table_setn(r->trailers_out, "h2-trailers-in",
- apr_itoa(r->pool, 1));
- }
-
- status = ap_pass_brigade(r->output_filters, bb);
- if (status == APR_SUCCESS
- || r->status != HTTP_OK
- || c->aborted) {
- return OK;
- }
- else {
- /* no way to know what type of error occurred */
- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, status, r,
- "status_handler(%s): ap_pass_brigade failed",
- task->id);
- return AP_FILTER_ERROR;
- }
- }
- return DECLINED;
-}
-
diff --git a/modules/http2/h2_filter.h b/modules/http2/h2_filter.h
deleted file mode 100644
index 12810d81b7..0000000000
--- a/modules/http2/h2_filter.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __mod_h2__h2_filter__
-#define __mod_h2__h2_filter__
-
-struct h2_bucket_beam;
-struct h2_headers;
-struct h2_stream;
-struct h2_session;
-
-typedef struct h2_filter_cin {
- apr_pool_t *pool;
- apr_socket_t *socket;
- apr_interval_time_t timeout;
- apr_bucket_brigade *bb;
- struct h2_session *session;
- apr_bucket *cur;
-} h2_filter_cin;
-
-h2_filter_cin *h2_filter_cin_create(struct h2_session *session);
-
-void h2_filter_cin_timeout_set(h2_filter_cin *cin, apr_interval_time_t timeout);
-
-apr_status_t h2_filter_core_input(ap_filter_t* filter,
- apr_bucket_brigade* brigade,
- ap_input_mode_t mode,
- apr_read_type_e block,
- apr_off_t readbytes);
-
-/******* observer bucket ******************************************************/
-
-typedef enum {
- H2_BUCKET_EV_BEFORE_DESTROY,
- H2_BUCKET_EV_BEFORE_MASTER_SEND
-} h2_bucket_event;
-
-extern const apr_bucket_type_t h2_bucket_type_observer;
-
-typedef apr_status_t h2_bucket_event_cb(void *ctx, h2_bucket_event event, apr_bucket *b);
-
-#define H2_BUCKET_IS_OBSERVER(e) (e->type == &h2_bucket_type_observer)
-
-apr_bucket * h2_bucket_observer_make(apr_bucket *b, h2_bucket_event_cb *cb,
- void *ctx);
-
-apr_bucket * h2_bucket_observer_create(apr_bucket_alloc_t *list,
- h2_bucket_event_cb *cb, void *ctx);
-
-apr_status_t h2_bucket_observer_fire(apr_bucket *b, h2_bucket_event event);
-
-apr_bucket *h2_bucket_observer_beam(struct h2_bucket_beam *beam,
- apr_bucket_brigade *dest,
- const apr_bucket *src);
-
-/******* /.well-known/h2/state handler ****************************************/
-
-int h2_filter_h2_status_handler(request_rec *r);
-
-#endif /* __mod_h2__h2_filter__ */
diff --git a/modules/http2/h2_headers.c b/modules/http2/h2_headers.c
index b4e18f2127..29d86fdff2 100644
--- a/modules/http2/h2_headers.c
+++ b/modules/http2/h2_headers.c
@@ -27,7 +27,7 @@
#include <nghttp2/nghttp2.h>
#include "h2_private.h"
-#include "h2_h2.h"
+#include "h2_protocol.h"
#include "h2_config.h"
#include "h2_util.h"
#include "h2_request.h"
diff --git a/modules/http2/h2_mplx.c b/modules/http2/h2_mplx.c
index e02ad4e58b..e56d9f5b3b 100644
--- a/modules/http2/h2_mplx.c
+++ b/modules/http2/h2_mplx.c
@@ -36,14 +36,14 @@
#include "h2_private.h"
#include "h2_bucket_beam.h"
#include "h2_config.h"
-#include "h2_conn.h"
-#include "h2_ctx.h"
-#include "h2_h2.h"
+#include "h2_c1.h"
+#include "h2_conn_ctx.h"
+#include "h2_protocol.h"
#include "h2_mplx.h"
#include "h2_request.h"
#include "h2_stream.h"
#include "h2_session.h"
-#include "h2_task.h"
+#include "h2_c2.h"
#include "h2_workers.h"
#include "h2_util.h"
@@ -56,25 +56,28 @@ typedef struct {
apr_size_t count;
} stream_iter_ctx;
-/**
- * Naming convention for static functions:
- * - m_*: function only called from the master connection
- * - s_*: function only called from a secondary connection
- * - t_*: function only called from a h2_task holder
- * - mst_*: function called from everyone
- */
-
-static apr_status_t s_mplx_be_happy(h2_mplx *m, h2_task *task);
+static apr_status_t s_mplx_be_happy(h2_mplx *m, conn_rec *c, h2_conn_ctx_t *conn_ctx);
static apr_status_t m_be_annoyed(h2_mplx *m);
-apr_status_t h2_mplx_m_child_init(apr_pool_t *pool, server_rec *s)
+static apr_status_t mplx_pollset_create(h2_mplx *m);
+static apr_status_t mplx_pollset_add(h2_mplx *m, h2_conn_ctx_t *conn_ctx);
+static apr_status_t mplx_pollset_remove(h2_mplx *m, h2_conn_ctx_t *conn_ctx);
+static apr_status_t mplx_pollset_poll(h2_mplx *m, apr_interval_time_t timeout,
+ stream_ev_callback *on_stream_input,
+ stream_ev_callback *on_stream_output,
+ void *on_ctx);
+
+static apr_pool_t *pchild;
+
+apr_status_t h2_mplx_c1_child_init(apr_pool_t *pool, server_rec *s)
{
+ pchild = pool;
return APR_SUCCESS;
}
#define H2_MPLX_ENTER(m) \
- do { apr_status_t rv; if ((rv = apr_thread_mutex_lock(m->lock)) != APR_SUCCESS) {\
- return rv;\
+ do { apr_status_t rv_lock; if ((rv_lock = apr_thread_mutex_lock(m->lock)) != APR_SUCCESS) {\
+ return rv_lock;\
} } while(0)
#define H2_MPLX_LEAVE(m) \
@@ -89,56 +92,84 @@ apr_status_t h2_mplx_m_child_init(apr_pool_t *pool, server_rec *s)
#define H2_MPLX_LEAVE_MAYBE(m, dolock) \
if (dolock) apr_thread_mutex_unlock(m->lock)
-static void mst_check_data_for(h2_mplx *m, int stream_id, int mplx_is_locked);
+static void c1_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
+{
+ h2_stream_in_consumed(ctx, length);
+}
-static void mst_stream_input_ev(void *ctx, h2_bucket_beam *beam)
+static int stream_is_running(h2_stream *stream)
{
- h2_stream *stream = ctx;
- h2_mplx *m = stream->session->mplx;
- apr_atomic_set32(&m->event_pending, 1);
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(stream->c2);
+ return conn_ctx && conn_ctx->started_at != 0 && !conn_ctx->done;
}
-static void m_stream_input_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
+int h2_mplx_c1_stream_is_running(h2_mplx *m, h2_stream *stream)
{
- h2_stream_in_consumed(ctx, length);
+ int rv;
+
+ H2_MPLX_ENTER(m);
+ rv = stream_is_running(stream);
+ H2_MPLX_LEAVE(m);
+ return rv;
}
-static void ms_stream_joined(h2_mplx *m, h2_stream *stream)
+static void c1c2_stream_joined(h2_mplx *m, h2_stream *stream)
{
- ap_assert(!h2_task_has_started(stream->task) || stream->task->worker_done);
+ ap_assert(!stream_is_running(stream));
- h2_ififo_remove(m->readyq, stream->id);
h2_ihash_remove(m->shold, stream->id);
- h2_ihash_add(m->spurge, stream);
+ APR_ARRAY_PUSH(m->spurge, h2_stream *) = stream;
}
static void m_stream_cleanup(h2_mplx *m, h2_stream *stream)
{
- ap_assert(stream->state == H2_SS_CLEANUP);
+ h2_conn_ctx_t *c2_ctx = stream->c2? h2_conn_ctx_get(stream->c2) : NULL;
- if (stream->input) {
- h2_beam_on_consumed(stream->input, NULL, NULL, NULL);
- h2_beam_abort(stream->input);
- }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_STRM_MSG(stream, "cleanup, unsubscribing from beam events"));
if (stream->output) {
- h2_beam_on_produced(stream->output, NULL, NULL);
- h2_beam_leave(stream->output);
+ h2_beam_on_was_empty(stream->output, NULL, NULL);
+ }
+ if (stream->input) {
+ h2_beam_on_received(stream->input, NULL, NULL);
+ h2_beam_on_consumed(stream->input, NULL, NULL);
}
-
- h2_stream_cleanup(stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_STRM_MSG(stream, "cleanup, removing from registries"));
+ ap_assert(stream->state == H2_SS_CLEANUP);
+ h2_stream_cleanup(stream);
h2_ihash_remove(m->streams, stream->id);
h2_iq_remove(m->q, stream->id);
-
- if (!h2_task_has_started(stream->task) || stream->task->done_done) {
- ms_stream_joined(m, stream);
+
+ if (c2_ctx) {
+ if (!stream_is_running(stream)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_STRM_MSG(stream, "cleanup, c2 is done, move to spurge"));
+ /* processing has finished */
+ APR_ARRAY_PUSH(m->spurge, h2_stream *) = stream;
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_STRM_MSG(stream, "cleanup, c2 is running, abort"));
+ /* c2 is still running */
+ stream->c2->aborted = 1;
+ if (stream->input) {
+ h2_beam_abort(stream->input, m->c1);
+ }
+ if (stream->output) {
+ h2_beam_abort(stream->output, m->c1);
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_STRM_MSG(stream, "cleanup, c2 is done, move to shold"));
+ h2_ihash_add(m->shold, stream);
+ }
}
else {
- h2_ififo_remove(m->readyq, stream->id);
- h2_ihash_add(m->shold, stream);
- if (stream->task) {
- stream->task->c->aborted = 1;
- }
+ /* never started */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ H2_STRM_MSG(stream, "cleanup, never started, move to spurge"));
+ APR_ARRAY_PUSH(m->spurge, h2_stream *) = stream;
}
}
@@ -153,179 +184,111 @@ static void m_stream_cleanup(h2_mplx *m, h2_stream *stream)
* their HTTP/1 cousins, the separate allocator seems to work better
* than protecting a shared h2_session one with an own lock.
*/
-h2_mplx *h2_mplx_m_create(conn_rec *c, server_rec *s, apr_pool_t *parent,
+h2_mplx *h2_mplx_c1_create(h2_stream *stream0, server_rec *s, apr_pool_t *parent,
h2_workers *workers)
{
+ h2_conn_ctx_t *conn_ctx;
apr_status_t status = APR_SUCCESS;
apr_allocator_t *allocator;
- apr_thread_mutex_t *mutex;
- h2_mplx *m;
+ apr_thread_mutex_t *mutex = NULL;
+ h2_mplx *m = NULL;
m = apr_pcalloc(parent, sizeof(h2_mplx));
- if (m) {
- m->id = c->id;
- m->c = c;
- m->s = s;
-
- /* We create a pool with its own allocator to be used for
- * processing secondary connections. This is the only way to have the
- * processing independent of its parent pool in the sense that it
- * can work in another thread. Also, the new allocator needs its own
- * mutex to synchronize sub-pools.
- */
- status = apr_allocator_create(&allocator);
- if (status != APR_SUCCESS) {
- return NULL;
- }
- apr_allocator_max_free_set(allocator, ap_max_mem_free);
- apr_pool_create_ex(&m->pool, parent, NULL, allocator);
- if (!m->pool) {
- apr_allocator_destroy(allocator);
- return NULL;
- }
- apr_pool_tag(m->pool, "h2_mplx");
- apr_allocator_owner_set(allocator, m->pool);
- status = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT,
- m->pool);
- if (status != APR_SUCCESS) {
- apr_pool_destroy(m->pool);
- return NULL;
- }
- apr_allocator_mutex_set(allocator, mutex);
+ m->stream0 = stream0;
+ m->c1 = stream0->c2;
+ m->s = s;
+ m->id = m->c1->id;
+
+ /* We create a pool with its own allocator to be used for
+ * processing secondary connections. This is the only way to have the
+ * processing independent of its parent pool in the sense that it
+ * can work in another thread. Also, the new allocator needs its own
+ * mutex to synchronize sub-pools.
+ */
+ status = apr_allocator_create(&allocator);
+ if (status != APR_SUCCESS) {
+ allocator = NULL;
+ goto failure;
+ }
- status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
- m->pool);
- if (status != APR_SUCCESS) {
- apr_pool_destroy(m->pool);
- return NULL;
- }
-
- m->max_streams = h2_config_sgeti(s, H2_CONF_MAX_STREAMS);
- m->stream_max_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
-
- m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
- m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id));
- m->spurge = h2_ihash_create(m->pool, offsetof(h2_stream,id));
- m->q = h2_iq_create(m->pool, m->max_streams);
-
- status = h2_ififo_set_create(&m->readyq, m->pool, m->max_streams);
- if (status != APR_SUCCESS) {
- apr_pool_destroy(m->pool);
- return NULL;
- }
+ apr_allocator_max_free_set(allocator, ap_max_mem_free);
+ apr_pool_create_ex(&m->pool, parent, NULL, allocator);
+ if (!m->pool) goto failure;
- m->workers = workers;
- m->max_active = workers->max_workers;
- m->limit_active = 6; /* the original h1 max parallel connections */
- m->last_mood_change = apr_time_now();
- m->mood_update_interval = apr_time_from_msec(100);
-
- m->spare_secondary = apr_array_make(m->pool, 10, sizeof(conn_rec*));
- }
- return m;
-}
+ apr_pool_tag(m->pool, "h2_mplx");
+ apr_allocator_owner_set(allocator, m->pool);
-int h2_mplx_m_shutdown(h2_mplx *m)
-{
- int max_stream_started = 0;
-
- H2_MPLX_ENTER(m);
+ status = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT,
+ m->pool);
+ if (APR_SUCCESS != status) goto failure;
+ apr_allocator_mutex_set(allocator, mutex);
- max_stream_started = m->max_stream_started;
- /* Clear schedule queue, disabling existing streams from starting */
- h2_iq_clear(m->q);
+ status = apr_thread_mutex_create(&m->lock, APR_THREAD_MUTEX_DEFAULT,
+ m->pool);
+ if (APR_SUCCESS != status) goto failure;
- H2_MPLX_LEAVE(m);
- return max_stream_started;
-}
+ status = apr_thread_cond_create(&m->join_wait, m->pool);
+ if (APR_SUCCESS != status) goto failure;
-static int m_input_consumed_signal(h2_mplx *m, h2_stream *stream)
-{
- if (stream->input) {
- return h2_beam_report_consumption(stream->input);
- }
- return 0;
-}
+ m->max_streams = h2_config_sgeti(s, H2_CONF_MAX_STREAMS);
+ m->stream_max_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
-static int m_report_consumption_iter(void *ctx, void *val)
-{
- h2_stream *stream = val;
- h2_mplx *m = ctx;
-
- m_input_consumed_signal(m, stream);
- if (stream->state == H2_SS_CLOSED_L
- && (!stream->task || stream->task->worker_done)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c,
- H2_STRM_LOG(APLOGNO(10026), stream, "remote close missing"));
- nghttp2_submit_rst_stream(stream->session->ngh2, NGHTTP2_FLAG_NONE,
- stream->id, NGHTTP2_NO_ERROR);
- }
- return 1;
-}
+ m->streams = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->shold = h2_ihash_create(m->pool, offsetof(h2_stream,id));
+ m->spurge = apr_array_make(m->pool, 10, sizeof(h2_stream*));
+ m->q = h2_iq_create(m->pool, m->max_streams);
-static int s_output_consumed_signal(h2_mplx *m, h2_task *task)
-{
- if (task->output.beam) {
- return h2_beam_report_consumption(task->output.beam);
+ m->workers = workers;
+ m->processing_max = workers->max_workers;
+ m->processing_limit = 6; /* the original h1 max parallel connections */
+ m->last_mood_change = apr_time_now();
+ m->mood_update_interval = apr_time_from_msec(100);
+
+ status = mplx_pollset_create(m);
+ if (APR_SUCCESS != status) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, m->c1, APLOGNO(10308)
+ "nghttp2: could not create pollset");
+ goto failure;
}
- return 0;
-}
+ m->streams_to_poll = apr_array_make(m->pool, 10, sizeof(h2_stream*));
+ m->streams_ev_in = apr_array_make(m->pool, 10, sizeof(h2_stream*));
+ m->streams_ev_out = apr_array_make(m->pool, 10, sizeof(h2_stream*));
-static int m_stream_destroy_iter(void *ctx, void *val)
-{
- h2_mplx *m = ctx;
- h2_stream *stream = val;
+#if !H2_POLL_STREAMS
+ status = apr_thread_mutex_create(&m->poll_lock, APR_THREAD_MUTEX_DEFAULT,
+ m->pool);
+ if (APR_SUCCESS != status) goto failure;
+ m->streams_input_read = h2_iq_create(m->pool, 10);
+ m->streams_output_written = h2_iq_create(m->pool, 10);
+#endif
- h2_ihash_remove(m->spurge, stream->id);
- ap_assert(stream->state == H2_SS_CLEANUP);
-
- if (stream->input) {
- /* Process outstanding events before destruction */
- m_input_consumed_signal(m, stream);
- h2_beam_log(stream->input, m->c, APLOG_TRACE2, "stream_destroy");
- h2_beam_destroy(stream->input);
- stream->input = NULL;
- }
+ conn_ctx = h2_conn_ctx_get(m->c1);
+ mplx_pollset_add(m, conn_ctx);
- if (stream->task) {
- h2_task *task = stream->task;
- conn_rec *secondary;
- int reuse_secondary = 0;
-
- stream->task = NULL;
- secondary = task->c;
- if (secondary) {
- if (m->s->keep_alive_max == 0 || secondary->keepalives < m->s->keep_alive_max) {
- reuse_secondary = ((m->spare_secondary->nelts < (m->limit_active * 3 / 2))
- && !task->rst_error);
- }
-
- if (reuse_secondary) {
- h2_beam_log(task->output.beam, m->c, APLOG_DEBUG,
- APLOGNO(03385) "h2_task_destroy, reuse secondary");
- h2_task_destroy(task);
- APR_ARRAY_PUSH(m->spare_secondary, conn_rec*) = secondary;
- }
- else {
- h2_beam_log(task->output.beam, m->c, APLOG_TRACE1,
- "h2_task_destroy, destroy secondary");
- h2_secondary_destroy(secondary);
- }
- }
+ return m;
+
+failure:
+ if (m->pool) {
+ apr_pool_destroy(m->pool);
}
- h2_stream_destroy(stream);
- return 0;
+ else if (allocator) {
+ apr_allocator_destroy(allocator);
+ }
+ return NULL;
}
-static void m_purge_streams(h2_mplx *m, int lock)
+int h2_mplx_c1_shutdown(h2_mplx *m)
{
- if (!h2_ihash_empty(m->spurge)) {
- H2_MPLX_ENTER_MAYBE(m, lock);
- while (!h2_ihash_iter(m->spurge, m_stream_destroy_iter, m)) {
- /* repeat until empty */
- }
- H2_MPLX_LEAVE_MAYBE(m, lock);
- }
+ int max_stream_id_started = 0;
+
+ H2_MPLX_ENTER(m);
+
+ max_stream_id_started = m->max_stream_id_started;
+ /* Clear schedule queue, disabling existing streams from starting */
+ h2_iq_clear(m->q);
+
+ H2_MPLX_LEAVE(m);
+ return max_stream_id_started;
}
typedef struct {
@@ -339,7 +302,7 @@ static int m_stream_iter_wrap(void *ctx, void *stream)
return x->cb(stream, x->ctx);
}
-apr_status_t h2_mplx_m_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx)
+apr_status_t h2_mplx_c1_streams_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx)
{
stream_iter_ctx_t x;
@@ -356,24 +319,22 @@ apr_status_t h2_mplx_m_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx)
static int m_report_stream_iter(void *ctx, void *val) {
h2_mplx *m = ctx;
h2_stream *stream = val;
- h2_task *task = stream->task;
- if (APLOGctrace1(m->c)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- H2_STRM_MSG(stream, "started=%d, scheduled=%d, ready=%d, out_buffer=%ld"),
- !!stream->task, stream->scheduled, h2_stream_is_ready(stream),
- (long)h2_beam_get_buffered(stream->output));
- }
- if (task) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(stream->c2);
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c1,
+ H2_STRM_MSG(stream, "started=%d, scheduled=%d, ready=%d, out_buffer=%ld"),
+ !!stream->c2, stream->scheduled, h2_stream_is_ready(stream),
+ (long)(stream->output? h2_beam_get_buffered(stream->output) : -1));
+ if (conn_ctx) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, /* NO APLOGNO */
H2_STRM_MSG(stream, "->03198: %s %s %s"
"[started=%d/done=%d]"),
- task->request->method, task->request->authority,
- task->request->path, task->worker_started,
- task->worker_done);
+ conn_ctx->request->method, conn_ctx->request->authority,
+ conn_ctx->request->path, conn_ctx->started_at != 0,
+ conn_ctx->done);
}
else {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, /* NO APLOGNO */
- H2_STRM_MSG(stream, "->03198: no task"));
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, /* NO APLOGNO */
+ H2_STRM_MSG(stream, "->03198: not started"));
}
return 1;
}
@@ -381,9 +342,9 @@ static int m_report_stream_iter(void *ctx, void *val) {
static int m_unexpected_stream_iter(void *ctx, void *val) {
h2_mplx *m = ctx;
h2_stream *stream = val;
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, /* NO APLOGNO */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c1, /* NO APLOGNO */
H2_STRM_MSG(stream, "unexpected, started=%d, scheduled=%d, ready=%d"),
- !!stream->task, stream->scheduled, h2_stream_is_ready(stream));
+ !!stream->c2, stream->scheduled, h2_stream_is_ready(stream));
return 1;
}
@@ -391,9 +352,9 @@ static int m_stream_cancel_iter(void *ctx, void *val) {
h2_mplx *m = ctx;
h2_stream *stream = val;
- /* disabled input consumed reporting */
+ /* disable input consumed reporting */
if (stream->input) {
- h2_beam_on_consumed(stream->input, NULL, NULL, NULL);
+ h2_beam_abort(stream->input, m->c1);
}
/* take over event monitoring */
h2_stream_set_monitor(stream, NULL);
@@ -405,32 +366,32 @@ static int m_stream_cancel_iter(void *ctx, void *val) {
return 0;
}
-void h2_mplx_m_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
+void h2_mplx_c1_destroy(h2_mplx *m)
{
apr_status_t status;
int i, wait_secs = 60, old_aborted;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
"h2_mplx(%ld): start release", m->id);
/* How to shut down a h2 connection:
- * 0. abort and tell the workers that no more tasks will come from us */
+ * 0. abort and tell the workers that no more work will come from us */
m->aborted = 1;
h2_workers_unregister(m->workers, m);
H2_MPLX_ENTER_ALWAYS(m);
- /* While really terminating any secondary connections, treat the master
+ /* While really terminating any c2 connections, treat the master
* connection as aborted. It's not as if we could send any more data
* at this point. */
- old_aborted = m->c->aborted;
- m->c->aborted = 1;
+ old_aborted = m->c1->aborted;
+ m->c1->aborted = 1;
/* How to shut down a h2 connection:
* 1. cancel all streams still active */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): release, %d/%d/%d streams (total/hold/purge), %d active tasks",
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
+ "h2_mplx(%ld): release, %d/%d/%d streams (total/hold/purge), %d streams",
m->id, (int)h2_ihash_count(m->streams),
- (int)h2_ihash_count(m->shold), (int)h2_ihash_count(m->spurge), m->tasks_active);
+ (int)h2_ihash_count(m->shold), m->spurge->nelts, m->processing_count);
while (!h2_ihash_iter(m->streams, m_stream_cancel_iter, m)) {
/* until empty */
}
@@ -440,18 +401,17 @@ void h2_mplx_m_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
ap_assert(h2_iq_empty(m->q));
/* 3. while workers are busy on this connection, meaning they
- * are processing tasks from this connection, wait on them finishing
+ * are processing streams from this connection, wait on them finishing
* in order to wake us and let us check again.
* Eventually, this has to succeed. */
- m->join_wait = wait;
- for (i = 0; h2_ihash_count(m->shold) > 0; ++i) {
- status = apr_thread_cond_timedwait(wait, m->lock, apr_time_from_sec(wait_secs));
+ for (i = 0; h2_ihash_count(m->shold) > 0; ++i) {
+ status = apr_thread_cond_timedwait(m->join_wait, m->lock, apr_time_from_sec(wait_secs));
if (APR_STATUS_IS_TIMEUP(status)) {
/* This can happen if we have very long running requests
* that do not time out on IO. */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c, APLOGNO(03198)
- "h2_mplx(%ld): waited %d sec for %d tasks",
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1, APLOGNO(03198)
+ "h2_mplx(%ld): waited %d sec for %d streams",
m->id, i*wait_secs, (int)h2_ihash_count(m->shold));
h2_ihash_iter(m->shold, m_report_stream_iter, m);
}
@@ -459,83 +419,108 @@ void h2_mplx_m_release_and_join(h2_mplx *m, apr_thread_cond_t *wait)
m->join_wait = NULL;
/* 4. With all workers done, all streams should be in spurge */
- ap_assert(m->tasks_active == 0);
+ ap_assert(m->processing_count == 0);
if (!h2_ihash_empty(m->shold)) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c, APLOGNO(03516)
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, m->c1, APLOGNO(03516)
"h2_mplx(%ld): unexpected %d streams in hold",
m->id, (int)h2_ihash_count(m->shold));
h2_ihash_iter(m->shold, m_unexpected_stream_iter, m);
}
- m->c->aborted = old_aborted;
+ m->c1->aborted = old_aborted;
H2_MPLX_LEAVE(m);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c, "h2_mplx(%ld): released", m->id);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1, "h2_mplx(%ld): released", m->id);
}
-apr_status_t h2_mplx_m_stream_cleanup(h2_mplx *m, h2_stream *stream)
+apr_status_t h2_mplx_c1_stream_cleanup(h2_mplx *m, h2_stream *stream,
+ int *pstream_count)
{
H2_MPLX_ENTER(m);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
H2_STRM_MSG(stream, "cleanup"));
- m_stream_cleanup(m, stream);
-
+ m_stream_cleanup(m, stream);
+ *pstream_count = (int)h2_ihash_count(m->streams);
H2_MPLX_LEAVE(m);
return APR_SUCCESS;
}
-h2_stream *h2_mplx_t_stream_get(h2_mplx *m, h2_task *task)
+const h2_stream *h2_mplx_c2_stream_get(h2_mplx *m, int stream_id)
{
h2_stream *s = NULL;
H2_MPLX_ENTER_ALWAYS(m);
-
- s = h2_ihash_get(m->streams, task->stream_id);
-
+ s = h2_ihash_get(m->streams, stream_id);
H2_MPLX_LEAVE(m);
+
return s;
}
-static void mst_output_produced(void *ctx, h2_bucket_beam *beam, apr_off_t bytes)
+static void c1_purge_streams(h2_mplx *m)
{
- h2_stream *stream = ctx;
- h2_mplx *m = stream->session->mplx;
-
- mst_check_data_for(m, stream->id, 0);
+ h2_stream *stream;
+ int i;
+
+ for (i = 0; i < m->spurge->nelts; ++i) {
+ stream = APR_ARRAY_IDX(m->spurge, i, h2_stream*);
+ ap_assert(stream->state == H2_SS_CLEANUP);
+ if (stream->input) {
+ h2_beam_destroy(stream->input, m->c1);
+ stream->input = NULL;
+ }
+ if (stream->c2) {
+ conn_rec *c2 = stream->c2;
+ h2_conn_ctx_t *c2_ctx = h2_conn_ctx_get(c2);
+ apr_status_t rv;
+
+ stream->c2 = NULL;
+ ap_assert(c2_ctx);
+ rv = mplx_pollset_remove(m, c2_ctx);
+ if (APR_SUCCESS != rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, rv, m->c1,
+ "h2_mplx(%ld-%d): pollset_remove %d on purge",
+ m->id, stream->id, c2_ctx->stream_id);
+ }
+ h2_conn_ctx_destroy(c2);
+ h2_c2_destroy(c2);
+ }
+ h2_stream_destroy(stream);
+ }
+ apr_array_clear(m->spurge);
}
-static apr_status_t t_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
+apr_status_t h2_mplx_c1_poll(h2_mplx *m, apr_interval_time_t timeout,
+ stream_ev_callback *on_stream_input,
+ stream_ev_callback *on_stream_output,
+ void *on_ctx)
{
- h2_stream *stream = h2_ihash_get(m->streams, stream_id);
-
- if (!stream || !stream->task || m->aborted) {
- return APR_ECONNABORTED;
- }
-
- ap_assert(stream->output == NULL);
- stream->output = beam;
-
- if (APLOGctrace2(m->c)) {
- h2_beam_log(beam, stream->task->c, APLOG_TRACE2, "out_open");
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->task->c,
- "h2_mplx(%s): out open", stream->task->id);
+ apr_status_t rv;
+
+ H2_MPLX_ENTER(m);
+
+ if (m->aborted) {
+ rv = APR_ECONNABORTED;
+ goto cleanup;
}
-
- h2_beam_on_produced(stream->output, mst_output_produced, stream);
- if (stream->task->output.copy_files) {
- h2_beam_on_file_beam(stream->output, h2_beam_no_files, NULL);
+ /* Purge (destroy) streams outside of pollset processing.
+ * Streams that are registered in the pollset, will be removed
+ * when they are destroyed, but the pollset works on copies
+ * of these registrations. So, if we destroy streams while
+ * processing pollset events, we might access freed memory.
+ */
+ if (m->spurge->nelts) {
+ c1_purge_streams(m);
}
-
- /* we might see some file buckets in the output, see
- * if we have enough handles reserved. */
- mst_check_data_for(m, stream->id, 1);
- return APR_SUCCESS;
+ rv = mplx_pollset_poll(m, timeout, on_stream_input, on_stream_output, on_ctx);
+
+cleanup:
+ H2_MPLX_LEAVE(m);
+ return rv;
}
-apr_status_t h2_mplx_t_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
+apr_status_t h2_mplx_c1_reprioritize(h2_mplx *m, h2_stream_pri_cmp_fn *cmp,
+ h2_session *session)
{
apr_status_t status;
@@ -545,212 +530,326 @@ apr_status_t h2_mplx_t_out_open(h2_mplx *m, int stream_id, h2_bucket_beam *beam)
status = APR_ECONNABORTED;
}
else {
- status = t_out_open(m, stream_id, beam);
+ h2_iq_sort(m->q, cmp, session);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
+ "h2_mplx(%ld): reprioritize streams", m->id);
+ status = APR_SUCCESS;
}
H2_MPLX_LEAVE(m);
return status;
}
-static apr_status_t s_out_close(h2_mplx *m, h2_task *task)
+static void ms_register_if_needed(h2_mplx *m, int from_master)
{
- apr_status_t status = APR_SUCCESS;
+ if (!m->aborted && !m->is_registered && !h2_iq_empty(m->q)) {
+ apr_status_t status = h2_workers_register(m->workers, m);
+ if (status == APR_SUCCESS) {
+ m->is_registered = 1;
+ }
+ else if (from_master) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, m->c1, APLOGNO(10021)
+ "h2_mplx(%ld): register at workers", m->id);
+ }
+ }
+}
+
+static apr_status_t c1_process_stream(h2_mplx *m,
+ h2_stream *stream,
+ h2_stream_pri_cmp_fn *cmp,
+ h2_session *session)
+{
+ apr_status_t rv;
- if (!task) {
- return APR_ECONNABORTED;
+ if (m->aborted) {
+ rv = APR_ECONNABORTED;
+ goto cleanup;
}
- if (task->c) {
- ++task->c->keepalives;
+ if (!stream->request) {
+ rv = APR_EINVAL;
+ goto cleanup;
}
-
- if (!h2_ihash_get(m->streams, task->stream_id)) {
- return APR_ECONNABORTED;
+ if (APLOGctrace1(m->c1)) {
+ const h2_request *r = stream->request;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
+ H2_STRM_MSG(stream, "process %s %s://%s%s chunked=%d"),
+ r->method, r->scheme, r->authority, r->path, r->chunked);
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, task->c,
- "h2_mplx(%s): close", task->id);
- status = h2_beam_close(task->output.beam);
- h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "out_close");
- s_output_consumed_signal(m, task);
- mst_check_data_for(m, task->stream_id, 1);
- return status;
+ rv = h2_stream_setup_input(stream);
+ if (APR_SUCCESS != rv) goto cleanup;
+
+ stream->scheduled = 1;
+ h2_ihash_add(m->streams, stream);
+ if (h2_stream_is_ready(stream)) {
+ /* already have a response */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
+ H2_STRM_MSG(stream, "process, ready already"));
+ }
+ else {
+ h2_iq_add(m->q, stream->id, cmp, session);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
+ H2_STRM_MSG(stream, "process, added to q"));
+ }
+
+cleanup:
+ return rv;
}
-apr_status_t h2_mplx_m_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
- apr_thread_cond_t *iowait)
+apr_status_t h2_mplx_c1_process(h2_mplx *m,
+ h2_iqueue *ready_to_process,
+ h2_stream_get_fn *get_stream,
+ h2_stream_pri_cmp_fn *stream_pri_cmp,
+ h2_session *session,
+ int *pstream_count)
{
- apr_status_t status;
-
+ apr_status_t rv;
+ int sid;
+
H2_MPLX_ENTER(m);
- if (m->aborted) {
- status = APR_ECONNABORTED;
- }
- else if (h2_mplx_m_has_master_events(m)) {
- status = APR_SUCCESS;
- }
- else {
- m_purge_streams(m, 0);
- h2_ihash_iter(m->streams, m_report_consumption_iter, m);
- m->added_output = iowait;
- status = apr_thread_cond_timedwait(m->added_output, m->lock, timeout);
- if (APLOGctrace2(m->c)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld): trywait on data for %f ms)",
- m->id, timeout/1000.0);
+ while ((sid = h2_iq_shift(ready_to_process)) > 0) {
+ h2_stream *stream = get_stream(session, sid);
+ if (stream) {
+ ap_assert(!stream->scheduled);
+ rv = c1_process_stream(session->mplx, stream, stream_pri_cmp, session);
+ if (APR_SUCCESS != rv) {
+ h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
+ }
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
+ "h2_stream(%ld-%d): not found to process", m->id, sid);
}
- m->added_output = NULL;
}
+ ms_register_if_needed(m, 1);
+ *pstream_count = (int)h2_ihash_count(m->streams);
+#if APR_POOL_DEBUG
+ do {
+ apr_size_t mem_g, mem_m, mem_s, mem_w, mem_c1;
+
+ mem_g = pchild? apr_pool_num_bytes(pchild, 1) : 0;
+ mem_m = apr_pool_num_bytes(m->pool, 1);
+ mem_s = apr_pool_num_bytes(session->pool, 1);
+ mem_w = apr_pool_num_bytes(m->workers->pool, 1);
+ mem_c1 = apr_pool_num_bytes(m->c1->pool, 1);
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, m->c1,
+ "h2_mplx(%ld): child mem=%ld, mplx mem=%ld, session mem=%ld, workers=%ld, c1=%ld",
+ m->id, (long)mem_g, (long)mem_m, (long)mem_s, (long)mem_w, (long)mem_c1);
+
+ } while (0);
+#endif
H2_MPLX_LEAVE(m);
- return status;
+ return rv;
}
-static void mst_check_data_for(h2_mplx *m, int stream_id, int mplx_is_locked)
+apr_status_t h2_mplx_c1_fwd_input(h2_mplx *m, struct h2_iqueue *input_pending,
+ h2_stream_get_fn *get_stream,
+ struct h2_session *session)
{
- /* If m->lock is already held, we must release during h2_ififo_push()
- * which can wait on its not_full condition, causing a deadlock because
- * no one would then be able to acquire m->lock to empty the fifo.
- */
- H2_MPLX_LEAVE_MAYBE(m, mplx_is_locked);
- if (h2_ififo_push(m->readyq, stream_id) == APR_SUCCESS) {
- H2_MPLX_ENTER_ALWAYS(m);
- apr_atomic_set32(&m->event_pending, 1);
- if (m->added_output) {
- apr_thread_cond_signal(m->added_output);
+ int sid;
+
+ H2_MPLX_ENTER(m);
+
+ while ((sid = h2_iq_shift(input_pending)) > 0) {
+ h2_stream *stream = get_stream(session, sid);
+ if (stream) {
+ H2_MPLX_LEAVE(m);
+ h2_stream_flush_input(stream);
+ H2_MPLX_ENTER(m);
}
- H2_MPLX_LEAVE_MAYBE(m, !mplx_is_locked);
- }
- else {
- H2_MPLX_ENTER_MAYBE(m, mplx_is_locked);
}
+
+ H2_MPLX_LEAVE(m);
+ return APR_SUCCESS;
}
-apr_status_t h2_mplx_m_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx)
+static void c2_beam_input_write_notify(void *ctx, h2_bucket_beam *beam)
{
- apr_status_t status;
-
- H2_MPLX_ENTER(m);
+ conn_rec *c = ctx;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
- if (m->aborted) {
- status = APR_ECONNABORTED;
- }
- else {
- h2_iq_sort(m->q, cmp, ctx);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): reprioritize tasks", m->id);
- status = APR_SUCCESS;
+ (void)beam;
+ if (conn_ctx && conn_ctx->stream_id && conn_ctx->pipe_in_prod[H2_PIPE_IN]) {
+ apr_file_putc(1, conn_ctx->pipe_in_prod[H2_PIPE_IN]);
}
+}
- H2_MPLX_LEAVE(m);
- return status;
+static void c2_beam_input_read_notify(void *ctx, h2_bucket_beam *beam)
+{
+ conn_rec *c = ctx;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+
+ if (conn_ctx && conn_ctx->stream_id) {
+ if (conn_ctx->pipe_in_drain[H2_PIPE_IN]) {
+ apr_file_putc(1, conn_ctx->pipe_in_drain[H2_PIPE_IN]);
+ }
+#if !H2_POLL_STREAMS
+ else {
+ apr_thread_mutex_lock(conn_ctx->mplx->poll_lock);
+ h2_iq_append(conn_ctx->mplx->streams_input_read, conn_ctx->stream_id);
+ apr_pollset_wakeup(conn_ctx->mplx->pollset);
+ apr_thread_mutex_unlock(conn_ctx->mplx->poll_lock);
+ }
+#endif
+ }
}
-static void ms_register_if_needed(h2_mplx *m, int from_master)
+static void c2_beam_output_write_notify(void *ctx, h2_bucket_beam *beam)
{
- if (!m->aborted && !m->is_registered && !h2_iq_empty(m->q)) {
- apr_status_t status = h2_workers_register(m->workers, m);
- if (status == APR_SUCCESS) {
- m->is_registered = 1;
+ conn_rec *c = ctx;
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+
+ if (conn_ctx && conn_ctx->stream_id) {
+ if (conn_ctx->pipe_out_prod[H2_PIPE_IN]) {
+ apr_file_putc(1, conn_ctx->pipe_out_prod[H2_PIPE_IN]);
}
- else if (from_master) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, status, m->c, APLOGNO(10021)
- "h2_mplx(%ld): register at workers", m->id);
+#if !H2_POLL_STREAMS
+ else {
+ apr_thread_mutex_lock(conn_ctx->mplx->poll_lock);
+ h2_iq_append(conn_ctx->mplx->streams_output_written, conn_ctx->stream_id);
+ apr_pollset_wakeup(conn_ctx->mplx->pollset);
+ apr_thread_mutex_unlock(conn_ctx->mplx->poll_lock);
}
+#endif
}
}
-apr_status_t h2_mplx_m_process(h2_mplx *m, struct h2_stream *stream,
- h2_stream_pri_cmp *cmp, void *ctx)
+static apr_status_t c2_setup_io(h2_mplx *m, conn_rec *c2, h2_stream *stream)
{
- apr_status_t status;
-
- H2_MPLX_ENTER(m);
+ h2_conn_ctx_t *conn_ctx;
+ apr_status_t rv = APR_SUCCESS;
+ const char *action = "init";
- if (m->aborted) {
- status = APR_ECONNABORTED;
+ rv = h2_conn_ctx_init_for_c2(&conn_ctx, c2, m, stream);
+ if (APR_SUCCESS != rv) goto cleanup;
+
+ if (!conn_ctx->beam_out) {
+ action = "create output beam";
+ rv = h2_beam_create(&conn_ctx->beam_out, c2, conn_ctx->req_pool,
+ stream->id, "output", 0, c2->base_server->timeout);
+ if (APR_SUCCESS != rv) goto cleanup;
+
+ h2_beam_buffer_size_set(conn_ctx->beam_out, m->stream_max_mem);
+ h2_beam_on_was_empty(conn_ctx->beam_out, c2_beam_output_write_notify, c2);
+ }
+
+ if (stream->input) {
+ conn_ctx->beam_in = stream->input;
+ h2_beam_on_was_empty(stream->input, c2_beam_input_write_notify, c2);
+ h2_beam_on_received(stream->input, c2_beam_input_read_notify, c2);
+ h2_beam_on_consumed(stream->input, c1_input_consumed, stream);
}
else {
- status = APR_SUCCESS;
- h2_ihash_add(m->streams, stream);
- if (h2_stream_is_ready(stream)) {
- /* already have a response */
- mst_check_data_for(m, stream->id, 1);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- H2_STRM_MSG(stream, "process, add to readyq"));
+ memset(&conn_ctx->pfd_in_drain, 0, sizeof(conn_ctx->pfd_in_drain));
+ }
+
+#if H2_POLL_STREAMS
+ if (!conn_ctx->mplx_pool) {
+ apr_pool_create(&conn_ctx->mplx_pool, m->pool);
+ apr_pool_tag(conn_ctx->mplx_pool, "H2_MPLX_C2");
+ }
+
+ if (!conn_ctx->pipe_out_prod[H2_PIPE_OUT]) {
+ action = "create output pipe";
+ rv = apr_file_pipe_create_pools(&conn_ctx->pipe_out_prod[H2_PIPE_OUT],
+ &conn_ctx->pipe_out_prod[H2_PIPE_IN],
+ APR_FULL_NONBLOCK,
+ conn_ctx->mplx_pool, c2->pool);
+ if (APR_SUCCESS != rv) goto cleanup;
+ }
+ conn_ctx->pfd_out_prod.desc_type = APR_POLL_FILE;
+ conn_ctx->pfd_out_prod.desc.f = conn_ctx->pipe_out_prod[H2_PIPE_OUT];
+ conn_ctx->pfd_out_prod.reqevents = APR_POLLIN | APR_POLLERR | APR_POLLHUP;
+ conn_ctx->pfd_out_prod.client_data = conn_ctx;
+
+ if (stream->input) {
+ if (!conn_ctx->pipe_in_prod[H2_PIPE_OUT]) {
+ action = "create input write pipe";
+ rv = apr_file_pipe_create_pools(&conn_ctx->pipe_in_prod[H2_PIPE_OUT],
+ &conn_ctx->pipe_in_prod[H2_PIPE_IN],
+ APR_READ_BLOCK,
+ c2->pool, conn_ctx->mplx_pool);
+ if (APR_SUCCESS != rv) goto cleanup;
}
- else {
- h2_iq_add(m->q, stream->id, cmp, ctx);
- ms_register_if_needed(m, 1);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- H2_STRM_MSG(stream, "process, added to q"));
+ if (!conn_ctx->pipe_in_drain[H2_PIPE_OUT]) {
+ action = "create input read pipe";
+ rv = apr_file_pipe_create_pools(&conn_ctx->pipe_in_drain[H2_PIPE_OUT],
+ &conn_ctx->pipe_in_drain[H2_PIPE_IN],
+ APR_FULL_NONBLOCK,
+ c2->pool, conn_ctx->mplx_pool);
+ if (APR_SUCCESS != rv) goto cleanup;
}
+ conn_ctx->pfd_in_drain.desc_type = APR_POLL_FILE;
+ conn_ctx->pfd_in_drain.desc.f = conn_ctx->pipe_in_drain[H2_PIPE_OUT];
+ conn_ctx->pfd_in_drain.reqevents = APR_POLLIN | APR_POLLERR | APR_POLLHUP;
+ conn_ctx->pfd_in_drain.client_data = conn_ctx;
}
-
- H2_MPLX_LEAVE(m);
- return status;
+#else
+ memset(&conn_ctx->pfd_out_prod, 0, sizeof(conn_ctx->pfd_out_prod));
+ memset(&conn_ctx->pipe_in_prod, 0, sizeof(conn_ctx->pipe_in_prod));
+ memset(&conn_ctx->pipe_in_drain, 0, sizeof(conn_ctx->pipe_in_drain));
+#endif
+
+cleanup:
+ stream->output = (APR_SUCCESS == rv)? conn_ctx->beam_out : NULL;
+ if (APR_SUCCESS != rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c2,
+ H2_STRM_LOG(APLOGNO(10309), stream,
+ "error %s"), action);
+ }
+ return rv;
}
-static h2_task *s_next_stream_task(h2_mplx *m)
+static conn_rec *s_next_c2(h2_mplx *m)
{
- h2_stream *stream;
+ h2_stream *stream = NULL;
+ apr_status_t rv;
int sid;
- while (!m->aborted && (m->tasks_active < m->limit_active)
+ conn_rec *c2;
+
+ while (!m->aborted && !stream && (m->processing_count < m->processing_limit)
&& (sid = h2_iq_shift(m->q)) > 0) {
-
stream = h2_ihash_get(m->streams, sid);
- if (stream) {
- conn_rec *secondary, **psecondary;
+ }
- psecondary = (conn_rec **)apr_array_pop(m->spare_secondary);
- if (psecondary) {
- secondary = *psecondary;
- secondary->aborted = 0;
- }
- else {
- secondary = h2_secondary_create(m->c, stream->id, m->pool);
- }
-
- if (!stream->task) {
- if (sid > m->max_stream_started) {
- m->max_stream_started = sid;
- }
- if (stream->input) {
- h2_beam_on_consumed(stream->input, mst_stream_input_ev,
- m_stream_input_consumed, stream);
- }
-
- stream->task = h2_task_create(secondary, stream->id,
- stream->request, m, stream->input,
- stream->session->s->timeout,
- m->stream_max_mem);
- if (!stream->task) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOMEM, secondary,
- H2_STRM_LOG(APLOGNO(02941), stream,
- "create task"));
- return NULL;
- }
- }
-
- stream->task->started_at = apr_time_now();
- ++m->tasks_active;
- return stream->task;
+ if (!stream) {
+ if (m->processing_count >= m->processing_limit && !h2_iq_empty(m->q)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c1,
+ "h2_session(%ld): delaying request processing. "
+ "Current limit is %d and %d workers are in use.",
+ m->id, m->processing_limit, m->processing_count);
}
+ return NULL;
}
- if (m->tasks_active >= m->limit_active && !h2_iq_empty(m->q)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, m->c,
- "h2_session(%ld): delaying request processing. "
- "Current limit is %d and %d workers are in use.",
- m->id, m->limit_active, m->tasks_active);
+
+ if (sid > m->max_stream_id_started) {
+ m->max_stream_id_started = sid;
}
- return NULL;
+
+ c2 = h2_c2_create(m->c1, m->pool);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, m->c1,
+ H2_STRM_MSG(stream, "created new c2"));
+
+ rv = c2_setup_io(m, c2, stream);
+ if (APR_SUCCESS != rv) {
+ return NULL;
+ }
+
+ stream->c2 = c2;
+ ++m->processing_count;
+ APR_ARRAY_PUSH(m->streams_to_poll, h2_stream *) = stream;
+ apr_pollset_wakeup(m->pollset);
+
+ return c2;
}
-apr_status_t h2_mplx_s_pop_task(h2_mplx *m, h2_task **ptask)
+apr_status_t h2_mplx_worker_pop_c2(h2_mplx *m, conn_rec **out_c)
{
apr_status_t rv = APR_EOF;
- *ptask = NULL;
+ *out_c = NULL;
ap_assert(m);
ap_assert(m->lock);
@@ -762,8 +861,8 @@ apr_status_t h2_mplx_s_pop_task(h2_mplx *m, h2_task **ptask)
rv = APR_EOF;
}
else {
- *ptask = s_next_stream_task(m);
- rv = (*ptask != NULL && !h2_iq_empty(m->q))? APR_EAGAIN : APR_SUCCESS;
+ *out_c = s_next_c2(m);
+ rv = (*out_c != NULL && !h2_iq_empty(m->q))? APR_EAGAIN : APR_SUCCESS;
}
if (APR_EAGAIN != rv) {
m->is_registered = 0; /* h2_workers will discard this mplx */
@@ -772,85 +871,86 @@ apr_status_t h2_mplx_s_pop_task(h2_mplx *m, h2_task **ptask)
return rv;
}
-static void s_task_done(h2_mplx *m, h2_task *task)
+static void s_c2_done(h2_mplx *m, conn_rec *c2, h2_conn_ctx_t *conn_ctx)
{
h2_stream *stream;
+
+ ap_assert(conn_ctx);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c2,
+ "h2_mplx(%s-%d): c2 done", conn_ctx->id, conn_ctx->stream_id);
+
+ ap_assert(conn_ctx->done == 0);
+ conn_ctx->done = 1;
+ conn_ctx->done_at = apr_time_now();
+ ++c2->keepalives;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2,
+ "h2_mplx(%s-%d): request done, %f ms elapsed",
+ conn_ctx->id, conn_ctx->stream_id,
+ (conn_ctx->done_at - conn_ctx->started_at) / 1000.0);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
- "h2_mplx(%ld): task(%s) done", m->id, task->id);
- s_out_close(m, task);
-
- task->worker_done = 1;
- task->done_at = apr_time_now();
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
- "h2_mplx(%s): request done, %f ms elapsed", task->id,
- (task->done_at - task->started_at) / 1000.0);
-
- if (task->c && !task->c->aborted && task->started_at > m->last_mood_change) {
- s_mplx_be_happy(m, task);
+ if (!conn_ctx->has_final_response) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, conn_ctx->last_err, c2,
+ "h2_c2(%s-%d): processing finished without final response",
+ conn_ctx->id, conn_ctx->stream_id);
+ c2->aborted = 1;
+ }
+ else if (!c2->aborted && conn_ctx->started_at > m->last_mood_change) {
+ s_mplx_be_happy(m, c2, conn_ctx);
}
- ap_assert(task->done_done == 0);
-
- stream = h2_ihash_get(m->streams, task->stream_id);
+ stream = h2_ihash_get(m->streams, conn_ctx->stream_id);
if (stream) {
- /* stream not done yet. */
- if (!m->aborted && task->redo) {
- /* reset and schedule again */
- h2_task_redo(task);
- h2_iq_add(m->q, stream->id, NULL, NULL);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c,
- H2_STRM_MSG(stream, "redo, added to q"));
- }
- else {
- /* stream not cleaned up, stay around */
- task->done_done = 1;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
- H2_STRM_MSG(stream, "task_done, stream open"));
- if (stream->input) {
- h2_beam_leave(stream->input);
- }
-
- /* more data will not arrive, resume the stream */
- mst_check_data_for(m, stream->id, 1);
- }
+ /* stream not done yet. trigger a potential polling on the output
+ * since nothing more will happening here. */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2,
+ H2_STRM_MSG(stream, "c2_done, stream open"));
+ c2_beam_output_write_notify(c2, NULL);
}
- else if ((stream = h2_ihash_get(m->shold, task->stream_id)) != NULL) {
+ else if ((stream = h2_ihash_get(m->shold, conn_ctx->stream_id)) != NULL) {
/* stream is done, was just waiting for this. */
- task->done_done = 1;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
- H2_STRM_MSG(stream, "task_done, in hold"));
- if (stream->input) {
- h2_beam_leave(stream->input);
- }
- ms_stream_joined(m, stream);
- }
- else if ((stream = h2_ihash_get(m->spurge, task->stream_id)) != NULL) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c,
- H2_STRM_LOG(APLOGNO(03517), stream, "already in spurge"));
- ap_assert("stream should not be in spurge" == NULL);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2,
+ H2_STRM_MSG(stream, "c2_done, in hold"));
+ c1c2_stream_joined(m, stream);
}
else {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, task->c, APLOGNO(03518)
- "h2_mplx(%s): task_done, stream not found",
- task->id);
+ int i;
+
+ for (i = 0; i < m->spurge->nelts; ++i) {
+ if (stream == APR_ARRAY_IDX(m->spurge, i, h2_stream*)) {
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c2,
+ H2_STRM_LOG(APLOGNO(03517), stream, "already in spurge"));
+ ap_assert("stream should not be in spurge" == NULL);
+ return;
+ }
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c2, APLOGNO(03518)
+ "h2_mplx(%s-%d): c2_done, stream not found",
+ conn_ctx->id, conn_ctx->stream_id);
ap_assert("stream should still be available" == NULL);
}
}
-void h2_mplx_s_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
+void h2_mplx_worker_c2_done(conn_rec *c2, conn_rec **out_c2)
{
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c2);
+ h2_mplx *m;
+
+ if (!conn_ctx || !conn_ctx->mplx) return;
+ m = conn_ctx->mplx;
+
H2_MPLX_ENTER_ALWAYS(m);
- --m->tasks_active;
- s_task_done(m, task);
+ --m->processing_count;
+ s_c2_done(m, c2, conn_ctx);
if (m->join_wait) {
apr_thread_cond_signal(m->join_wait);
}
- if (ptask) {
- /* caller wants another task */
- *ptask = s_next_stream_task(m);
+ if (out_c2) {
+ /* caller wants another connection to process */
+ *out_c2 = s_next_c2(m);
}
ms_register_if_needed(m, 0);
@@ -861,117 +961,21 @@ void h2_mplx_s_task_done(h2_mplx *m, h2_task *task, h2_task **ptask)
* h2_mplx DoS protection
******************************************************************************/
-static int m_timed_out_busy_iter(void *data, void *val)
-{
- stream_iter_ctx *ctx = data;
- h2_stream *stream = val;
- if (h2_task_has_started(stream->task) && !stream->task->worker_done
- && (ctx->now - stream->task->started_at) > stream->task->timeout) {
- /* timed out stream occupying a worker, found */
- ctx->stream = stream;
- return 0;
- }
- return 1;
-}
-
-static h2_stream *m_get_timed_out_busy_stream(h2_mplx *m)
-{
- stream_iter_ctx ctx;
- ctx.m = m;
- ctx.stream = NULL;
- ctx.now = apr_time_now();
- h2_ihash_iter(m->streams, m_timed_out_busy_iter, &ctx);
- return ctx.stream;
-}
-
-static int m_latest_repeatable_unsubmitted_iter(void *data, void *val)
-{
- stream_iter_ctx *ctx = data;
- h2_stream *stream = val;
-
- if (!stream->task) goto leave;
- if (!h2_task_has_started(stream->task) || stream->task->worker_done) goto leave;
- if (h2_stream_is_ready(stream)) goto leave;
- if (stream->task->redo) {
- ++ctx->count;
- goto leave;
- }
- if (h2_task_can_redo(stream->task)) {
- /* this task occupies a worker, the response has not been submitted
- * yet, not been cancelled and it is a repeatable request
- * -> we could redo it later */
- if (!ctx->stream
- || (ctx->stream->task->started_at < stream->task->started_at)) {
- /* we did not have one or this one was started later */
- ctx->stream = stream;
- }
- }
-leave:
- return 1;
-}
-
-static apr_status_t m_assess_task_to_throttle(h2_task **ptask, h2_mplx *m)
-{
- stream_iter_ctx ctx;
-
- /* count the running tasks already marked for redo and get one that could
- * be throttled */
- *ptask = NULL;
- ctx.m = m;
- ctx.stream = NULL;
- ctx.count = 0;
- h2_ihash_iter(m->streams, m_latest_repeatable_unsubmitted_iter, &ctx);
- if (m->tasks_active - ctx.count > m->limit_active) {
- /* we are above the limit of running tasks, accounting for the ones
- * already throttled. */
- if (ctx.stream && ctx.stream->task) {
- *ptask = ctx.stream->task;
- return APR_EAGAIN;
- }
- /* above limit, be seeing no candidate for easy throttling */
- if (m_get_timed_out_busy_stream(m)) {
- /* Too many busy workers, unable to cancel enough streams
- * and with a busy, timed out stream, we tell the client
- * to go away... */
- return APR_TIMEUP;
- }
- }
- return APR_SUCCESS;
-}
-
-static apr_status_t m_unschedule_slow_tasks(h2_mplx *m)
-{
- h2_task *task;
- apr_status_t rv;
-
- /* Try to get rid of streams that occupy workers. Look for safe requests
- * that are repeatable. If none found, fail the connection.
- */
- while (APR_EAGAIN == (rv = m_assess_task_to_throttle(&task, m))) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%s): unschedule, resetting task for redo later",
- task->id);
- task->redo = 1;
- h2_task_rst(task, H2_ERR_CANCEL);
- }
- return rv;
-}
-
-static apr_status_t s_mplx_be_happy(h2_mplx *m, h2_task *task)
+static apr_status_t s_mplx_be_happy(h2_mplx *m, conn_rec *c, h2_conn_ctx_t *conn_ctx)
{
apr_time_t now;
--m->irritations_since;
now = apr_time_now();
- if (m->limit_active < m->max_active
+ if (m->processing_limit < m->processing_max
&& (now - m->last_mood_change >= m->mood_update_interval
- || m->irritations_since < -m->limit_active)) {
- m->limit_active = H2MIN(m->limit_active * 2, m->max_active);
+ || m->irritations_since < -m->processing_limit)) {
+ m->processing_limit = H2MIN(m->processing_limit * 2, m->processing_max);
m->last_mood_change = now;
m->irritations_since = 0;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_mplx(%ld): mood update, increasing worker limit to %d",
- m->id, m->limit_active);
+ m->id, m->processing_limit);
}
return APR_SUCCESS;
}
@@ -983,159 +987,35 @@ static apr_status_t m_be_annoyed(h2_mplx *m)
++m->irritations_since;
now = apr_time_now();
- if (m->limit_active > 2 &&
+ if (m->processing_limit > 2 &&
((now - m->last_mood_change >= m->mood_update_interval)
- || (m->irritations_since >= m->limit_active))) {
+ || (m->irritations_since >= m->processing_limit))) {
- if (m->limit_active > 16) {
- m->limit_active = 16;
+ if (m->processing_limit > 16) {
+ m->processing_limit = 16;
}
- else if (m->limit_active > 8) {
- m->limit_active = 8;
+ else if (m->processing_limit > 8) {
+ m->processing_limit = 8;
}
- else if (m->limit_active > 4) {
- m->limit_active = 4;
+ else if (m->processing_limit > 4) {
+ m->processing_limit = 4;
}
- else if (m->limit_active > 2) {
- m->limit_active = 2;
+ else if (m->processing_limit > 2) {
+ m->processing_limit = 2;
}
m->last_mood_change = now;
m->irritations_since = 0;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c1,
"h2_mplx(%ld): mood update, decreasing worker limit to %d",
- m->id, m->limit_active);
- }
-
- if (m->tasks_active > m->limit_active) {
- status = m_unschedule_slow_tasks(m);
+ m->id, m->processing_limit);
}
return status;
}
-apr_status_t h2_mplx_m_idle(h2_mplx *m)
-{
- apr_status_t status = APR_SUCCESS;
- apr_size_t scount;
-
- H2_MPLX_ENTER(m);
-
- scount = h2_ihash_count(m->streams);
- if (scount > 0) {
- if (m->tasks_active) {
- /* If we have streams in connection state 'IDLE', meaning
- * all streams are ready to sent data out, but lack
- * WINDOW_UPDATEs.
- *
- * This is ok, unless we have streams that still occupy
- * h2 workers. As worker threads are a scarce resource,
- * we need to take measures that we do not get DoSed.
- *
- * This is what we call an 'idle block'. Limit the amount
- * of busy workers we allow for this connection until it
- * well behaves.
- */
- status = m_be_annoyed(m);
- }
- else if (!h2_iq_empty(m->q)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): idle, but %d streams to process",
- m->id, (int)h2_iq_count(m->q));
- status = APR_EAGAIN;
- }
- else {
- /* idle, have streams, but no tasks active. what are we waiting for?
- * WINDOW_UPDATEs from client? */
- h2_stream *stream = NULL;
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, m->c,
- "h2_mplx(%ld): idle, no tasks ongoing, %d streams",
- m->id, (int)h2_ihash_count(m->streams));
- h2_ihash_shift(m->streams, (void**)&stream, 1);
- if (stream) {
- h2_ihash_add(m->streams, stream);
- if (stream->output && !stream->out_checked) {
- /* FIXME: this looks like a race between the session thinking
- * it is idle and the EOF on a stream not being sent.
- * Signal to caller to leave IDLE state.
- */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- H2_STRM_MSG(stream, "output closed=%d, mplx idle"
- ", out has %ld bytes buffered"),
- h2_beam_is_closed(stream->output),
- (long)h2_beam_get_buffered(stream->output));
- h2_ihash_add(m->streams, stream);
- mst_check_data_for(m, stream->id, 1);
- stream->out_checked = 1;
- status = APR_EAGAIN;
- }
- }
- }
- }
- ms_register_if_needed(m, 1);
-
- H2_MPLX_LEAVE(m);
- return status;
-}
-
/*******************************************************************************
* mplx master events dispatching
******************************************************************************/
-int h2_mplx_m_has_master_events(h2_mplx *m)
-{
- return apr_atomic_read32(&m->event_pending) > 0;
-}
-
-apr_status_t h2_mplx_m_dispatch_master_events(h2_mplx *m, stream_ev_callback *on_resume,
- void *on_ctx)
-{
- h2_stream *stream;
- int n, id;
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c,
- "h2_mplx(%ld): dispatch events", m->id);
- apr_atomic_set32(&m->event_pending, 0);
-
- /* update input windows for streams */
- h2_ihash_iter(m->streams, m_report_consumption_iter, m);
- m_purge_streams(m, 1);
-
- n = h2_ififo_count(m->readyq);
- while (n > 0
- && (h2_ififo_try_pull(m->readyq, &id) == APR_SUCCESS)) {
- --n;
- stream = h2_ihash_get(m->streams, id);
- if (stream) {
- on_resume(on_ctx, stream);
- }
- }
-
- return APR_SUCCESS;
-}
-
-apr_status_t h2_mplx_m_keep_active(h2_mplx *m, h2_stream *stream)
-{
- mst_check_data_for(m, stream->id, 0);
- return APR_SUCCESS;
-}
-
-int h2_mplx_m_awaits_data(h2_mplx *m)
-{
- int waiting = 1;
-
- H2_MPLX_ENTER_ALWAYS(m);
-
- if (h2_ihash_empty(m->streams)) {
- waiting = 0;
- }
- else if (!m->tasks_active && !h2_ififo_count(m->readyq) && h2_iq_empty(m->q)) {
- waiting = 0;
- }
-
- H2_MPLX_LEAVE(m);
- return waiting;
-}
-
static int reset_is_acceptable(h2_stream *stream)
{
/* client may terminate a stream via H2 RST_STREAM message at any time.
@@ -1151,14 +1031,14 @@ static int reset_is_acceptable(h2_stream *stream)
* The responses to such requests continue forever otherwise.
*
*/
- if (!stream->task) return 1; /* have not started or already ended for us. acceptable. */
+ if (!stream_is_running(stream)) return 1;
if (!(stream->id & 0x01)) return 1; /* stream initiated by us. acceptable. */
- if (!stream->has_response) return 0; /* no response headers produced yet. bad. */
+ if (!stream->response) return 0; /* no response headers produced yet. bad. */
if (!stream->out_data_frames) return 0; /* no response body data sent yet. bad. */
return 1; /* otherwise, be forgiving */
}
-apr_status_t h2_mplx_m_client_rst(h2_mplx *m, int stream_id)
+apr_status_t h2_mplx_c1_client_rst(h2_mplx *m, int stream_id)
{
h2_stream *stream;
apr_status_t status = APR_SUCCESS;
@@ -1171,3 +1051,240 @@ apr_status_t h2_mplx_m_client_rst(h2_mplx *m, int stream_id)
H2_MPLX_LEAVE(m);
return status;
}
+
+static apr_status_t mplx_pollset_create(h2_mplx *m)
+{
+ int max_pdfs;
+
+ /* stream0 output, pdf_out+pfd_in_consume per active streams */
+ max_pdfs = 1 + 2 * H2MIN(m->processing_max, m->max_streams);
+ return apr_pollset_create(&m->pollset, max_pdfs, m->pool,
+ APR_POLLSET_WAKEABLE);
+}
+
+static apr_status_t mplx_pollset_add(h2_mplx *m, h2_conn_ctx_t *conn_ctx)
+{
+ apr_status_t rv = APR_SUCCESS;
+ const char *name = "";
+
+ if (conn_ctx->pfd_out_prod.reqevents) {
+ name = "adding out";
+ rv = apr_pollset_add(m->pollset, &conn_ctx->pfd_out_prod);
+ if (APR_SUCCESS != rv) goto cleanup;
+ }
+
+ if (conn_ctx->pfd_in_drain.reqevents) {
+ name = "adding in_read";
+ rv = apr_pollset_add(m->pollset, &conn_ctx->pfd_in_drain);
+ }
+
+cleanup:
+ if (APR_SUCCESS != rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, m->c1,
+ "h2_mplx(%ld-%d): error while adding to pollset %s",
+ m->id, conn_ctx->stream_id, name);
+ }
+ return rv;
+}
+
+static apr_status_t mplx_pollset_remove(h2_mplx *m, h2_conn_ctx_t *conn_ctx)
+{
+ apr_status_t rv = APR_SUCCESS;
+ const char *name = "";
+
+ if (conn_ctx->pfd_out_prod.reqevents) {
+ rv = apr_pollset_remove(m->pollset, &conn_ctx->pfd_out_prod);
+ conn_ctx->pfd_out_prod.reqevents = 0;
+ if (APR_SUCCESS != rv) goto cleanup;
+ }
+
+ if (conn_ctx->pfd_in_drain.reqevents) {
+ name = "in_read";
+ rv = apr_pollset_remove(m->pollset, &conn_ctx->pfd_in_drain);
+ conn_ctx->pfd_in_drain.reqevents = 0;
+ if (APR_SUCCESS != rv) goto cleanup;
+ }
+
+cleanup:
+ if (APR_SUCCESS != rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, m->c1,
+ "h2_mplx(%ld-%d): error removing from pollset %s",
+ m->id, conn_ctx->stream_id, name);
+ }
+ return rv;
+}
+
+static apr_status_t mplx_pollset_poll(h2_mplx *m, apr_interval_time_t timeout,
+ stream_ev_callback *on_stream_input,
+ stream_ev_callback *on_stream_output,
+ void *on_ctx)
+{
+ apr_status_t rv;
+ const apr_pollfd_t *results, *pfd;
+ apr_int32_t nresults, i;
+ h2_conn_ctx_t *conn_ctx;
+ h2_stream *stream;
+
+ /* Make sure we are not called recursively. */
+ ap_assert(!m->polling);
+ m->polling = 1;
+ do {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ "h2_mplx(%ld): enter polling timeout=%d",
+ m->id, (int)apr_time_sec(timeout));
+
+ apr_array_clear(m->streams_ev_in);
+ apr_array_clear(m->streams_ev_out);
+
+ do {
+ /* add streams we started processing in the meantime */
+ if (m->streams_to_poll->nelts) {
+ for (i = 0; i < m->streams_to_poll->nelts; ++i) {
+ stream = APR_ARRAY_IDX(m->streams_to_poll, i, h2_stream*);
+ if (stream && stream->c2 && (conn_ctx = h2_conn_ctx_get(stream->c2))) {
+ mplx_pollset_add(m, conn_ctx);
+ }
+ }
+ apr_array_clear(m->streams_to_poll);
+ }
+
+#if !H2_POLL_STREAMS
+ apr_thread_mutex_lock(m->poll_lock);
+ if (!h2_iq_empty(m->streams_input_read)
+ || !h2_iq_empty(m->streams_output_written)) {
+ while ((i = h2_iq_shift(m->streams_input_read))) {
+ stream = h2_ihash_get(m->streams, i);
+ if (stream) {
+ APR_ARRAY_PUSH(m->streams_ev_in, h2_stream*) = stream;
+ }
+ }
+ while ((i = h2_iq_shift(m->streams_output_written))) {
+ stream = h2_ihash_get(m->streams, i);
+ if (stream) {
+ APR_ARRAY_PUSH(m->streams_ev_out, h2_stream*) = stream;
+ }
+ }
+ nresults = 0;
+ rv = APR_SUCCESS;
+ apr_thread_mutex_unlock(m->poll_lock);
+ break;
+ }
+ apr_thread_mutex_unlock(m->poll_lock);
+#endif
+ H2_MPLX_LEAVE(m);
+ rv = apr_pollset_poll(m->pollset, timeout >= 0? timeout : -1, &nresults, &results);
+ H2_MPLX_ENTER_ALWAYS(m);
+
+ } while (APR_STATUS_IS_EINTR(rv));
+
+ if (APR_SUCCESS != rv) {
+ if (APR_STATUS_IS_TIMEUP(rv)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ "h2_mplx(%ld): polling timed out ",
+ m->id);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, m->c1, APLOGNO(10310)
+ "h2_mplx(%ld): polling failed", m->id);
+ }
+ goto cleanup;
+ }
+
+ for (i = 0; i < nresults; i++) {
+ pfd = &results[i];
+ conn_ctx = pfd->client_data;
+
+ ap_assert(conn_ctx);
+ if (conn_ctx->stream_id == 0) {
+ if (on_stream_input) {
+ APR_ARRAY_PUSH(m->streams_ev_in, h2_stream*) = m->stream0;
+ }
+ continue;
+ }
+
+ h2_util_drain_pipe(pfd->desc.f);
+ stream = h2_ihash_get(m->streams, conn_ctx->stream_id);
+ if (!stream) {
+ stream = h2_ihash_get(m->shold, conn_ctx->stream_id);
+ if (stream) {
+ /* This is normal and means that stream processing on c1 has
+ * already finished to CLEANUP and c2 is not done yet */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, m->c1,
+ "h2_mplx(%ld-%d): stream already in hold for poll event %hx",
+ m->id, conn_ctx->stream_id, pfd->rtnevents);
+ }
+ else {
+ h2_stream *sp = NULL;
+ int j;
+
+ for (j = 0; j < m->spurge->nelts; ++j) {
+ sp = APR_ARRAY_IDX(m->spurge, j, h2_stream*);
+ if (sp->id == conn_ctx->stream_id) {
+ stream = sp;
+ break;
+ }
+ }
+
+ if (stream) {
+ /* This is normal and means that stream processing on c1 has
+ * already finished to CLEANUP and c2 is not done yet */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, m->c1, APLOGNO(10311)
+ "h2_mplx(%ld-%d): stream already in purge for poll event %hx",
+ m->id, conn_ctx->stream_id, pfd->rtnevents);
+ }
+ else {
+ /* This should not happen. When a stream has been purged,
+ * it MUST no longer appear in the pollset. Puring is done
+ * outside the poll result processing. */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, rv, m->c1, APLOGNO(10312)
+ "h2_mplx(%ld-%d): stream no longer known for poll event %hx"
+ ", m->streams=%d, conn_ctx=%lx, fd=%lx",
+ m->id, conn_ctx->stream_id, pfd->rtnevents,
+ (int)h2_ihash_count(m->streams),
+ (long)conn_ctx, (long)pfd->desc.f);
+ h2_ihash_iter(m->streams, m_report_stream_iter, m);
+ }
+ }
+ continue;
+ }
+
+ if (conn_ctx->pfd_out_prod.desc.f == pfd->desc.f) {
+ /* output is available */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ "[%s-%d] poll output event %hx",
+ conn_ctx->id, conn_ctx->stream_id,
+ pfd->rtnevents);
+ APR_ARRAY_PUSH(m->streams_ev_out, h2_stream*) = stream;
+ }
+ else if (conn_ctx->pfd_in_drain.desc.f == pfd->desc.f) {
+ /* input has been consumed */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, m->c1,
+ "[%s-%d] poll input event %hx",
+ conn_ctx->id, conn_ctx->stream_id,
+ pfd->rtnevents);
+ APR_ARRAY_PUSH(m->streams_ev_in, h2_stream*) = stream;
+ }
+ }
+
+ if (on_stream_input && m->streams_ev_in->nelts) {
+ H2_MPLX_LEAVE(m);
+ for (i = 0; i < m->streams_ev_in->nelts; ++i) {
+ on_stream_input(on_ctx, APR_ARRAY_IDX(m->streams_ev_in, i, h2_stream*));
+ }
+ H2_MPLX_ENTER_ALWAYS(m);
+ }
+ if (on_stream_output && m->streams_ev_out->nelts) {
+ H2_MPLX_LEAVE(m);
+ for (i = 0; i < m->streams_ev_out->nelts; ++i) {
+ on_stream_output(on_ctx, APR_ARRAY_IDX(m->streams_ev_out, i, h2_stream*));
+ }
+ H2_MPLX_ENTER_ALWAYS(m);
+ }
+ break;
+ } while(1);
+
+cleanup:
+ m->polling = 0;
+ return rv;
+}
+
diff --git a/modules/http2/h2_mplx.h b/modules/http2/h2_mplx.h
index c61629d570..cb0a85665e 100644
--- a/modules/http2/h2_mplx.h
+++ b/modules/http2/h2_mplx.h
@@ -18,23 +18,16 @@
#define __mod_h2__h2_mplx__
/**
- * The stream multiplexer. It pushes buckets from the connection
- * thread to the stream threads and vice versa. It's thread-safe
- * to use.
+ * The stream multiplexer. It performs communication between the
+ * primary HTTP/2 connection (c1) to the secondary connections (c2)
+ * that process the requests, aka. HTTP/2 streams.
*
- * There is one h2_mplx instance for each h2_session, which sits on top
- * of a particular httpd conn_rec. Input goes from the connection to
- * the stream tasks. Output goes from the stream tasks to the connection,
- * e.g. the client.
+ * There is one h2_mplx instance for each h2_session.
*
- * For each stream, there can be at most "H2StreamMaxMemSize" output bytes
- * queued in the multiplexer. If a task thread tries to write more
- * data, it is blocked until space becomes available.
- *
- * Naming Convention:
- * "h2_mplx_m_" are methods only to be called by the main connection
- * "h2_mplx_s_" are method only to be called by a secondary connection
- * "h2_mplx_t_" are method only to be called by a task handler (can be master or secondary)
+ * Naming Convention:
+ * "h2_mplx_c1_" are methods only to be called by the primary connection
+ * "h2_mplx_c2_" are methods only to be called by a secondary connection
+ * "h2_mplx_worker_" are methods only to be called by a h2 worker thread
*/
struct apr_pool_t;
@@ -43,7 +36,6 @@ struct apr_thread_cond_t;
struct h2_bucket_beam;
struct h2_config;
struct h2_ihash_t;
-struct h2_task;
struct h2_stream;
struct h2_request;
struct apr_thread_cond_t;
@@ -56,74 +48,71 @@ typedef struct h2_mplx h2_mplx;
struct h2_mplx {
long id;
- conn_rec *c;
+ conn_rec *c1; /* the main connection */
apr_pool_t *pool;
+ struct h2_stream *stream0; /* HTTP/2's stream 0 */
server_rec *s; /* server for master conn */
- unsigned int event_pending;
- unsigned int aborted;
- unsigned int is_registered; /* is registered at h2_workers */
+ int aborted;
+ int polling; /* is waiting/processing pollset events */
+ int is_registered; /* is registered at h2_workers */
- struct h2_ihash_t *streams; /* all streams currently processing */
- struct h2_ihash_t *shold; /* all streams done with task ongoing */
- struct h2_ihash_t *spurge; /* all streams done, ready for destroy */
+ struct h2_ihash_t *streams; /* all streams active */
+ struct h2_ihash_t *shold; /* all streams done with c2 processing ongoing */
+ apr_array_header_t *spurge; /* all streams done, ready for destroy */
struct h2_iqueue *q; /* all stream ids that need to be started */
- struct h2_ififo *readyq; /* all stream ids ready for output */
-
- struct h2_ihash_t *redo_tasks; /* all tasks that need to be redone */
-
- int max_streams; /* max # of concurrent streams */
- int max_stream_started; /* highest stream id that started processing */
- int tasks_active; /* # of tasks being processed from this mplx */
- int limit_active; /* current limit on active tasks, dynamic */
- int max_active; /* max, hard limit # of active tasks in a process */
+
+ apr_size_t stream_max_mem; /* max memory to buffer for a stream */
+ int max_streams; /* max # of concurrent streams */
+ int max_stream_id_started; /* highest stream id that started processing */
+
+ int processing_count; /* # of c2 working for this mplx */
+ int processing_limit; /* current limit on processing c2s, dynamic */
+ int processing_max; /* max, hard limit of processing c2s */
- apr_time_t last_mood_change; /* last time, we worker limit changed */
+ apr_time_t last_mood_change; /* last time, processing limit changed */
apr_interval_time_t mood_update_interval; /* how frequent we update at most */
int irritations_since; /* irritations (>0) or happy events (<0) since last mood change */
apr_thread_mutex_t *lock;
- struct apr_thread_cond_t *added_output;
struct apr_thread_cond_t *join_wait;
- apr_size_t stream_max_mem;
-
- apr_pool_t *spare_io_pool;
- apr_array_header_t *spare_secondary; /* spare secondary connections */
-
- struct h2_workers *workers;
+ apr_pollset_t *pollset; /* pollset for c1/c2 IO events */
+ apr_array_header_t *streams_to_poll; /* streams to add to the pollset */
+ apr_array_header_t *streams_ev_in;
+ apr_array_header_t *streams_ev_out;
+
+#if !H2_POLL_STREAMS
+ apr_thread_mutex_t *poll_lock; /* not the painter */
+ struct h2_iqueue *streams_input_read; /* streams whose input has been read from */
+ struct h2_iqueue *streams_output_written; /* streams whose output has been written to */
+#endif
+ struct h2_workers *workers; /* h2 workers process wide instance */
};
-/*******************************************************************************
- * From the main connection processing: h2_mplx_m_*
- ******************************************************************************/
-
-apr_status_t h2_mplx_m_child_init(apr_pool_t *pool, server_rec *s);
+apr_status_t h2_mplx_c1_child_init(apr_pool_t *pool, server_rec *s);
/**
* Create the multiplexer for the given HTTP2 session.
* Implicitly has reference count 1.
*/
-h2_mplx *h2_mplx_m_create(conn_rec *c, server_rec *s, apr_pool_t *master,
- struct h2_workers *workers);
+h2_mplx *h2_mplx_c1_create(struct h2_stream *stream0, server_rec *s, apr_pool_t *master,
+ struct h2_workers *workers);
/**
- * Decreases the reference counter of this mplx and waits for it
- * to reached 0, destroy the mplx afterwards.
- * This is to be called from the thread that created the mplx in
- * the first place.
- * @param m the mplx to be released and destroyed
+ * Destroy the mplx, shutting down all ongoing processing.
+ * @param m the mplx destroyed
* @param wait condition var to wait on for ref counter == 0
*/
-void h2_mplx_m_release_and_join(h2_mplx *m, struct apr_thread_cond_t *wait);
+void h2_mplx_c1_destroy(h2_mplx *m);
/**
* Shut down the multiplexer gracefully. Will no longer schedule new streams
* but let the ongoing ones finish normally.
* @return the highest stream id being/been processed
*/
-int h2_mplx_m_shutdown(h2_mplx *m);
+int h2_mplx_c1_shutdown(h2_mplx *m);
/**
* Notifies mplx that a stream has been completely handled on the main
@@ -131,29 +120,33 @@ int h2_mplx_m_shutdown(h2_mplx *m);
*
* @param m the mplx itself
* @param stream the stream ready for cleanup
+ * @param pstream_count return the number of streams active
*/
-apr_status_t h2_mplx_m_stream_cleanup(h2_mplx *m, struct h2_stream *stream);
-
-/**
- * Waits on output data from any stream in this session to become available.
- * Returns APR_TIMEUP if no data arrived in the given time.
- */
-apr_status_t h2_mplx_m_out_trywait(h2_mplx *m, apr_interval_time_t timeout,
- struct apr_thread_cond_t *iowait);
+apr_status_t h2_mplx_c1_stream_cleanup(h2_mplx *m, struct h2_stream *stream,
+ int *pstream_count);
-apr_status_t h2_mplx_m_keep_active(h2_mplx *m, struct h2_stream *stream);
+int h2_mplx_c1_stream_is_running(h2_mplx *m, struct h2_stream *stream);
/**
* Process a stream request.
*
* @param m the multiplexer
- * @param stream the identifier of the stream
- * @param r the request to be processed
+ * @param read_to_process
+ * @param input_pending
* @param cmp the stream priority compare function
- * @param ctx context data for the compare function
+ * @param pstream_count on return the number of streams active in mplx
*/
-apr_status_t h2_mplx_m_process(h2_mplx *m, struct h2_stream *stream,
- h2_stream_pri_cmp *cmp, void *ctx);
+apr_status_t h2_mplx_c1_process(h2_mplx *m,
+ struct h2_iqueue *read_to_process,
+ h2_stream_get_fn *get_stream,
+ h2_stream_pri_cmp_fn *cmp,
+ struct h2_session *session,
+ int *pstream_count);
+
+apr_status_t h2_mplx_c1_fwd_input(h2_mplx *m, struct h2_iqueue *input_pending,
+ h2_stream_get_fn *get_stream,
+ struct h2_session *session);
+
/**
* Stream priorities have changed, reschedule pending requests.
@@ -162,62 +155,60 @@ apr_status_t h2_mplx_m_process(h2_mplx *m, struct h2_stream *stream,
* @param cmp the stream priority compare function
* @param ctx context data for the compare function
*/
-apr_status_t h2_mplx_m_reprioritize(h2_mplx *m, h2_stream_pri_cmp *cmp, void *ctx);
+apr_status_t h2_mplx_c1_reprioritize(h2_mplx *m, h2_stream_pri_cmp_fn *cmp,
+ struct h2_session *session);
typedef apr_status_t stream_ev_callback(void *ctx, struct h2_stream *stream);
/**
- * Check if the multiplexer has events for the master connection pending.
- * @return != 0 iff there are events pending
- */
-int h2_mplx_m_has_master_events(h2_mplx *m);
-
-/**
- * Dispatch events for the master connection, such as
- ± @param m the multiplexer
- * @param on_resume new output data has arrived for a suspended stream
- * @param ctx user supplied argument to invocation.
+ * Poll the primary connection for input and the active streams for output.
+ * Invoke the callback for any stream where an event happened.
*/
-apr_status_t h2_mplx_m_dispatch_master_events(h2_mplx *m, stream_ev_callback *on_resume,
- void *ctx);
-
-int h2_mplx_m_awaits_data(h2_mplx *m);
-
-typedef int h2_mplx_stream_cb(struct h2_stream *s, void *ctx);
+apr_status_t h2_mplx_c1_poll(h2_mplx *m, apr_interval_time_t timeout,
+ stream_ev_callback *on_stream_input,
+ stream_ev_callback *on_stream_output,
+ void *on_ctx);
-apr_status_t h2_mplx_m_stream_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
+void h2_mplx_c2_input_read(h2_mplx *m, conn_rec *c2);
+void h2_mplx_c2_output_written(h2_mplx *m, conn_rec *c2);
-apr_status_t h2_mplx_m_client_rst(h2_mplx *m, int stream_id);
+typedef int h2_mplx_stream_cb(struct h2_stream *s, void *userdata);
/**
- * Master connection has entered idle mode.
- * @param m the mplx instance of the master connection
- * @return != SUCCESS iff connection should be terminated
+ * Iterate over all streams known to mplx from the primary connection.
+ * @param m the mplx
+ * @param cb the callback to invoke on each stream
+ * @param ctx userdata passed to the callback
*/
-apr_status_t h2_mplx_m_idle(h2_mplx *m);
+apr_status_t h2_mplx_c1_streams_do(h2_mplx *m, h2_mplx_stream_cb *cb, void *ctx);
-/*******************************************************************************
- * From a secondary connection processing: h2_mplx_s_*
- ******************************************************************************/
-apr_status_t h2_mplx_s_pop_task(h2_mplx *m, struct h2_task **ptask);
-void h2_mplx_s_task_done(h2_mplx *m, struct h2_task *task, struct h2_task **ptask);
-
-/*******************************************************************************
- * From a h2_task owner: h2_mplx_s_*
- * (a task is transfered from master to secondary connection and back in
- * its normal lifetime).
- ******************************************************************************/
+/**
+ * A stream has been RST_STREAM by the client. Abort
+ * any processing going on and remove from processing
+ * queue.
+ */
+apr_status_t h2_mplx_c1_client_rst(h2_mplx *m, int stream_id);
/**
- * Opens the output for the given stream with the specified response.
+ * Get readonly access to a stream for a secondary connection.
*/
-apr_status_t h2_mplx_t_out_open(h2_mplx *mplx, int stream_id,
- struct h2_bucket_beam *beam);
+const struct h2_stream *h2_mplx_c2_stream_get(h2_mplx *m, int stream_id);
/**
- * Get the stream that belongs to the given task.
+ * A h2 worker asks for a secondary connection to process.
+ * @param out_c2 non-NULL, a pointer where to reveive the next
+ * secondary connection to process.
*/
-struct h2_stream *h2_mplx_t_stream_get(h2_mplx *m, struct h2_task *task);
+apr_status_t h2_mplx_worker_pop_c2(h2_mplx *m, conn_rec **out_c2);
+/**
+ * A h2 worker reports a secondary connection processing done.
+ * If it is will to do more work for this mplx (this c1 connection),
+ * it provides `out_c`. Otherwise it passes NULL.
+ * @param c2 the secondary connection finished processing
+ * @param out_c2 NULL or a pointer where to reveive the next
+ * secondary connection to process.
+ */
+void h2_mplx_worker_c2_done(conn_rec *c2, conn_rec **out_c2);
#endif /* defined(__mod_h2__h2_mplx__) */
diff --git a/modules/http2/h2_h2.c b/modules/http2/h2_protocol.c
index 1f0a5df7d3..7bb3ea03ba 100644
--- a/modules/http2/h2_h2.c
+++ b/modules/http2/h2_protocol.c
@@ -34,23 +34,22 @@
#include "h2_bucket_beam.h"
#include "h2_stream.h"
-#include "h2_task.h"
+#include "h2_c2.h"
#include "h2_config.h"
-#include "h2_ctx.h"
-#include "h2_conn.h"
-#include "h2_filter.h"
+#include "h2_conn_ctx.h"
+#include "h2_c1.h"
#include "h2_request.h"
#include "h2_headers.h"
#include "h2_session.h"
#include "h2_util.h"
-#include "h2_h2.h"
+#include "h2_protocol.h"
#include "mod_http2.h"
-const char *h2_tls_protos[] = {
+const char *h2_protocol_ids_tls[] = {
"h2", NULL
};
-const char *h2_clear_protos[] = {
+const char *h2_protocol_ids_clear[] = {
"h2c", NULL
};
@@ -76,7 +75,7 @@ static const char *h2_err_descr[] = {
"http/1.1 required",
};
-const char *h2_h2_err_description(unsigned int h2_error)
+const char *h2_protocol_err_description(unsigned int h2_error)
{
if (h2_error < (sizeof(h2_err_descr)/sizeof(h2_err_descr[0]))) {
return h2_err_descr[h2_error];
@@ -421,19 +420,7 @@ static int cipher_is_blacklisted(const char *cipher, const char **psource)
return !!*psource;
}
-/*******************************************************************************
- * Hooks for processing incoming connections:
- * - process_conn take over connection in case of h2
- */
-static int h2_h2_process_conn(conn_rec* c);
-static int h2_h2_pre_close_conn(conn_rec* c);
-static int h2_h2_post_read_req(request_rec *r);
-static int h2_h2_late_fixups(request_rec *r);
-
-/*******************************************************************************
- * Once per lifetime init, retrieve optional functions
- */
-apr_status_t h2_h2_init(apr_pool_t *pool, server_rec *s)
+apr_status_t h2_protocol_init(apr_pool_t *pool, server_rec *s)
{
(void)pool;
ap_log_error(APLOG_MARK, APLOG_TRACE1, 0, s, "h2_h2, child_init");
@@ -442,7 +429,7 @@ apr_status_t h2_h2_init(apr_pool_t *pool, server_rec *s)
return APR_SUCCESS;
}
-int h2_is_acceptable_connection(conn_rec *c, request_rec *r, int require_all)
+int h2_protocol_is_acceptable_c1(conn_rec *c, request_rec *r, int require_all)
{
int is_tls = ap_ssl_conn_is_ssl(c);
@@ -473,265 +460,26 @@ int h2_is_acceptable_connection(conn_rec *c, request_rec *r, int require_all)
return 0;
}
- /* Check TLS cipher blacklist
- */
- val = ap_ssl_var_lookup(pool, s, c, NULL, "SSL_CIPHER");
- if (val && *val) {
- const char *source;
- if (cipher_is_blacklisted(val, &source)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03052)
- "h2_h2(%ld): tls cipher %s blacklisted by %s",
- (long)c->id, val, source);
- return 0;
- }
- }
- else if (require_all) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03053)
- "h2_h2(%ld): tls cipher is indetermined", (long)c->id);
- return 0;
- }
- }
- return 1;
-}
-
-static int h2_allows_h2_direct(conn_rec *c)
-{
- int is_tls = ap_ssl_conn_is_ssl(c);
- const char *needed_protocol = is_tls? "h2" : "h2c";
- int h2_direct = h2_config_cgeti(c, H2_CONF_DIRECT);
-
- if (h2_direct < 0) {
- h2_direct = is_tls? 0 : 1;
- }
- return (h2_direct && ap_is_allowed_protocol(c, NULL, NULL, needed_protocol));
-}
-
-int h2_allows_h2_upgrade(request_rec *r)
-{
- int h2_upgrade = h2_config_rgeti(r, H2_CONF_UPGRADE);
- return h2_upgrade > 0 || (h2_upgrade < 0 && !ap_ssl_conn_is_ssl(r->connection));
-}
-
-/*******************************************************************************
- * Register various hooks
- */
-static const char* const mod_ssl[] = { "mod_ssl.c", NULL};
-static const char* const mod_reqtimeout[] = { "mod_ssl.c", "mod_reqtimeout.c", NULL};
-
-void h2_h2_register_hooks(void)
-{
- /* Our main processing needs to run quite late. Definitely after mod_ssl,
- * as we need its connection filters, but also before reqtimeout as its
- * method of timeouts is specific to HTTP/1.1 (as of now).
- * The core HTTP/1 processing run as REALLY_LAST, so we will have
- * a chance to take over before it.
- */
- ap_hook_process_connection(h2_h2_process_conn,
- mod_reqtimeout, NULL, APR_HOOK_LAST);
-
- /* One last chance to properly say goodbye if we have not done so
- * already. */
- ap_hook_pre_close_connection(h2_h2_pre_close_conn, NULL, mod_ssl, APR_HOOK_LAST);
-
- /* With "H2SerializeHeaders On", we install the filter in this hook
- * that parses the response. This needs to happen before any other post
- * read function terminates the request with an error. Otherwise we will
- * never see the response.
- */
- ap_hook_post_read_request(h2_h2_post_read_req, NULL, NULL, APR_HOOK_REALLY_FIRST);
- ap_hook_fixups(h2_h2_late_fixups, NULL, NULL, APR_HOOK_LAST);
-
- /* special bucket type transfer through a h2_bucket_beam */
- h2_register_bucket_beamer(h2_bucket_headers_beam);
- h2_register_bucket_beamer(h2_bucket_observer_beam);
-}
-
-int h2_h2_process_conn(conn_rec* c)
-{
- apr_status_t status;
- h2_ctx *ctx;
- server_rec *s;
-
- if (c->master) {
- return DECLINED;
- }
-
- ctx = h2_ctx_get(c, 0);
- s = ctx? ctx->server : c->base_server;
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn");
- if (ctx && ctx->task) {
- /* our stream pseudo connection */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "h2_h2, task, declined");
- return DECLINED;
- }
-
- if (!ctx && c->keepalives == 0) {
- const char *proto = ap_get_protocol(c);
-
- if (APLOGctrace1(c)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, process_conn, "
- "new connection using protocol '%s', direct=%d, "
- "tls acceptable=%d", proto, h2_allows_h2_direct(c),
- h2_is_acceptable_connection(c, NULL, 1));
- }
-
- if (!strcmp(AP_PROTOCOL_HTTP1, proto)
- && h2_allows_h2_direct(c)
- && h2_is_acceptable_connection(c, NULL, 1)) {
- /* Fresh connection still is on http/1.1 and H2Direct is enabled.
- * Otherwise connection is in a fully acceptable state.
- * -> peek at the first 24 incoming bytes
- */
- apr_bucket_brigade *temp;
- char *peek = NULL;
- apr_size_t peeklen;
-
- temp = apr_brigade_create(c->pool, c->bucket_alloc);
- status = ap_get_brigade(c->input_filters, temp,
- AP_MODE_SPECULATIVE, APR_BLOCK_READ, 24);
-
- if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03054)
- "h2_h2, error reading 24 bytes speculative");
- apr_brigade_destroy(temp);
- return DECLINED;
- }
-
- apr_brigade_pflatten(temp, &peek, &peeklen, c->pool);
- if ((peeklen >= 24) && !memcmp(H2_MAGIC_TOKEN, peek, 24)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_h2, direct mode detected");
- if (!ctx) {
- ctx = h2_ctx_get(c, 1);
- }
- h2_ctx_protocol_set(ctx, ap_ssl_conn_is_ssl(c)? "h2" : "h2c");
- }
- else if (APLOGctrace2(c)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
- "h2_h2, not detected in %d bytes(base64): %s",
- (int)peeklen, h2_util_base64url_encode(peek, peeklen, c->pool));
- }
-
- apr_brigade_destroy(temp);
- }
- }
-
- if (ctx) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "process_conn");
-
- if (!h2_ctx_get_session(c)) {
- status = h2_conn_setup(c, NULL, s);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c, "conn_setup");
- if (status != APR_SUCCESS) {
- h2_ctx_clear(c);
- return !OK;
- }
- }
- h2_conn_run(c);
- return OK;
- }
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c, "h2_h2, declined");
- return DECLINED;
-}
-
-static int h2_h2_pre_close_conn(conn_rec *c)
-{
- h2_ctx *ctx;
-
- /* secondary connection? */
- if (c->master) {
- return DECLINED;
- }
-
- ctx = h2_ctx_get(c, 0);
- if (ctx) {
- /* If the session has been closed correctly already, we will not
- * find a h2_ctx here. The presence indicates that the session
- * is still ongoing. */
- return h2_conn_pre_close(ctx, c);
- }
- return DECLINED;
-}
-
-static void check_push(request_rec *r, const char *tag)
-{
- apr_array_header_t *push_list = h2_config_push_list(r);
-
- if (!r->expecting_100 && push_list && push_list->nelts > 0) {
- int i, old_status;
- const char *old_line;
-
- ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
- "%s, early announcing %d resources for push",
- tag, push_list->nelts);
- for (i = 0; i < push_list->nelts; ++i) {
- h2_push_res *push = &APR_ARRAY_IDX(push_list, i, h2_push_res);
- apr_table_add(r->headers_out, "Link",
- apr_psprintf(r->pool, "<%s>; rel=preload%s",
- push->uri_ref, push->critical? "; critical" : ""));
- }
- old_status = r->status;
- old_line = r->status_line;
- r->status = 103;
- r->status_line = "103 Early Hints";
- ap_send_interim_response(r, 1);
- r->status = old_status;
- r->status_line = old_line;
- }
-}
-
-static int h2_h2_post_read_req(request_rec *r)
-{
- /* secondary connection? */
- if (r->connection->master) {
- struct h2_task *task = h2_ctx_get_task(r->connection);
- /* This hook will get called twice on internal redirects. Take care
- * that we manipulate filters only once. */
- if (task && !task->filters_set) {
- ap_filter_t *f;
- ap_log_rerror(APLOG_MARK, APLOG_TRACE3, 0, r,
- "h2_task(%s): adding request filters", task->id);
-
- /* setup the correct filters to process the request for h2 */
- ap_add_input_filter("H2_REQUEST", task, r, r->connection);
-
- /* replace the core http filter that formats response headers
- * in HTTP/1 with our own that collects status and headers */
- ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
- ap_add_output_filter("H2_RESPONSE", task, r, r->connection);
-
- for (f = r->input_filters; f; f = f->next) {
- if (!strcmp("H2_SECONDARY_IN", f->frec->name)) {
- f->r = r;
- break;
+ if (!strcmp("TLSv1.2", val)) {
+ /* Check TLS cipher blacklist, defined pre-TLSv1.3, so only
+ * checking for 1.2 */
+ val = ap_ssl_var_lookup(pool, s, c, NULL, "SSL_CIPHER");
+ if (val && *val) {
+ const char *source;
+ if (cipher_is_blacklisted(val, &source)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03052)
+ "h2_h2(%ld): tls cipher %s blacklisted by %s",
+ (long)c->id, val, source);
+ return 0;
}
}
- ap_add_output_filter("H2_TRAILERS_OUT", task, r, r->connection);
- task->filters_set = 1;
- }
- }
- return DECLINED;
-}
-
-static int h2_h2_late_fixups(request_rec *r)
-{
- /* secondary connection? */
- if (r->connection->master) {
- struct h2_task *task = h2_ctx_get_task(r->connection);
- if (task) {
- /* check if we copy vs. setaside files in this location */
- task->output.copy_files = h2_config_rgeti(r, H2_CONF_COPY_FILES);
- task->output.buffered = h2_config_rgeti(r, H2_CONF_OUTPUT_BUFFER);
- if (task->output.copy_files) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, task->c,
- "h2_secondary_out(%s): copy_files on", task->id);
- h2_beam_on_file_beam(task->output.beam, h2_beam_no_files, NULL);
+ else if (require_all) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03053)
+ "h2_h2(%ld): tls cipher is indetermined", (long)c->id);
+ return 0;
}
- check_push(r, "late_fixup");
}
}
- return DECLINED;
+ return 1;
}
diff --git a/modules/http2/h2_h2.h b/modules/http2/h2_protocol.h
index 8cfb9864fe..ed48e8960b 100644
--- a/modules/http2/h2_h2.h
+++ b/modules/http2/h2_protocol.h
@@ -14,54 +14,43 @@
* limitations under the License.
*/
-#ifndef __mod_h2__h2_h2__
-#define __mod_h2__h2_h2__
+#ifndef __mod_h2__h2_protocol__
+#define __mod_h2__h2_protocol__
/**
- * List of ALPN protocol identifiers that we support in cleartext
+ * List of protocol identifiers that we support in cleartext
* negotiations. NULL terminated.
*/
-extern const char *h2_clear_protos[];
+extern const char *h2_protocol_ids_clear[];
/**
- * List of ALPN protocol identifiers that we support in TLS encrypted
- * negotiations. NULL terminated.
+ * List of protocol identifiers that we support in TLS encrypted
+ * negotiations (ALPN). NULL terminated.
*/
-extern const char *h2_tls_protos[];
+extern const char *h2_protocol_ids_tls[];
/**
* Provide a user readable description of the HTTP/2 error code-
* @param h2_error http/2 error code, as in rfc 7540, ch. 7
* @return textual description of code or that it is unknown.
*/
-const char *h2_h2_err_description(unsigned int h2_error);
+const char *h2_protocol_err_description(unsigned int h2_error);
/*
* One time, post config initialization.
*/
-apr_status_t h2_h2_init(apr_pool_t *pool, server_rec *s);
-
-/* Register apache hooks for h2 protocol
- */
-void h2_h2_register_hooks(void);
+apr_status_t h2_protocol_init(apr_pool_t *pool, server_rec *s);
/**
- * Check if the given connection fulfills the requirements as configured.
+ * Check if the given primary connection fulfills the protocol
+ * requirements for HTTP/2.
* @param c the connection
* @param require_all != 0 iff any missing connection properties make
* the test fail. For example, a cipher might not have been selected while
* the handshake is still ongoing.
- * @return != 0 iff connection requirements are met
- */
-int h2_is_acceptable_connection(conn_rec *c, request_rec *r, int require_all);
-
-/**
- * Check if the "Upgrade" HTTP/1.1 mode of protocol switching is enabled
- * for the given request.
- * @param r the request to check
- * @return != 0 iff Upgrade switching is enabled
+ * @return != 0 iff protocol requirements are met
*/
-int h2_allows_h2_upgrade(request_rec *r);
+int h2_protocol_is_acceptable_c1(conn_rec *c, request_rec *r, int require_all);
-#endif /* defined(__mod_h2__h2_h2__) */
+#endif /* defined(__mod_h2__h2_protocol__) */
diff --git a/modules/http2/h2_proxy_session.c b/modules/http2/h2_proxy_session.c
index df4b887541..1cdce4f427 100644
--- a/modules/http2/h2_proxy_session.c
+++ b/modules/http2/h2_proxy_session.c
@@ -692,8 +692,7 @@ static ssize_t stream_request_data(nghttp2_session *ngh2, int32_t stream_id,
}
}
-#ifdef H2_NG2_INVALID_HEADER_CB
-static int on_invalid_header_cb(nghttp2_session *ngh2,
+static int on_invalid_header_cb(nghttp2_session *ngh2,
const nghttp2_frame *frame,
const uint8_t *name, size_t namelen,
const uint8_t *value, size_t valuelen,
@@ -711,7 +710,6 @@ static int on_invalid_header_cb(nghttp2_session *ngh2,
frame->hd.stream_id,
NGHTTP2_PROTOCOL_ERROR);
}
-#endif
h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
proxy_server_conf *conf,
@@ -753,10 +751,8 @@ h2_proxy_session *h2_proxy_session_setup(const char *id, proxy_conn_rec *p_conn,
nghttp2_session_callbacks_set_on_header_callback(cbs, on_header);
nghttp2_session_callbacks_set_before_frame_send_callback(cbs, before_frame_send);
nghttp2_session_callbacks_set_send_callback(cbs, raw_send);
-#ifdef H2_NG2_INVALID_HEADER_CB
nghttp2_session_callbacks_set_on_invalid_header_callback(cbs, on_invalid_header_cb);
-#endif
-
+
nghttp2_option_new(&option);
nghttp2_option_set_peer_max_concurrent_streams(option, 100);
nghttp2_option_set_no_auto_window_update(option, 0);
@@ -829,7 +825,7 @@ static apr_status_t open_stream(h2_proxy_session *session, const char *url,
stream->input = apr_brigade_create(stream->pool, session->c->bucket_alloc);
stream->output = apr_brigade_create(stream->pool, session->c->bucket_alloc);
- stream->req = h2_proxy_req_create(1, stream->pool, 0);
+ stream->req = h2_proxy_req_create(1, stream->pool);
status = apr_uri_parse(stream->pool, url, &puri);
if (status != APR_SUCCESS)
@@ -1141,7 +1137,7 @@ static apr_status_t session_shutdown(h2_proxy_session *session, int reason,
if (!err && reason) {
err = nghttp2_strerror(reason);
}
- nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, 0,
+ nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE, 0,
reason, (uint8_t*)err, err? strlen(err):0);
status = nghttp2_session_send(session->ngh2);
dispatch_event(session, H2_PROXYS_EV_LOCAL_GOAWAY, reason, err);
@@ -1360,8 +1356,7 @@ static void ev_stream_done(h2_proxy_session *session, int stream_id,
else if (!stream->data_received) {
apr_bucket *b;
/* if the response had no body, this is the time to flush
- * an empty brigade which will also write the resonse
- * headers */
+ * an empty brigade which will also write the response headers */
h2_proxy_stream_end_headers_out(stream);
stream->data_received = 1;
b = apr_bucket_flush_create(stream->r->connection->bucket_alloc);
diff --git a/modules/http2/h2_proxy_util.c b/modules/http2/h2_proxy_util.c
index 1e6cb277b0..c1b78fc096 100644
--- a/modules/http2/h2_proxy_util.c
+++ b/modules/http2/h2_proxy_util.c
@@ -583,8 +583,7 @@ static apr_status_t h2_headers_add_h1(apr_table_t *headers, apr_pool_t *pool,
static h2_proxy_request *h2_proxy_req_createn(int id, apr_pool_t *pool, const char *method,
const char *scheme, const char *authority,
- const char *path, apr_table_t *header,
- int serialize)
+ const char *path, apr_table_t *header)
{
h2_proxy_request *req = apr_pcalloc(pool, sizeof(h2_proxy_request));
@@ -594,14 +593,13 @@ static h2_proxy_request *h2_proxy_req_createn(int id, apr_pool_t *pool, const ch
req->path = path;
req->headers = header? header : apr_table_make(pool, 10);
req->request_time = apr_time_now();
- req->serialize = serialize;
-
+
return req;
}
-h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool, int serialize)
+h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool)
{
- return h2_proxy_req_createn(id, pool, NULL, NULL, NULL, NULL, NULL, serialize);
+ return h2_proxy_req_createn(id, pool, NULL, NULL, NULL, NULL, NULL);
}
typedef struct {
diff --git a/modules/http2/h2_proxy_util.h b/modules/http2/h2_proxy_util.h
index 6c9edf4eac..202363dede 100644
--- a/modules/http2/h2_proxy_util.h
+++ b/modules/http2/h2_proxy_util.h
@@ -185,11 +185,10 @@ struct h2_proxy_request {
apr_time_t request_time;
- unsigned int chunked : 1; /* iff request body needs to be forwarded as chunked */
- unsigned int serialize : 1; /* iff this request is written in HTTP/1.1 serialization */
+ int chunked; /* iff request body needs to be forwarded as chunked */
};
-h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool, int serialize);
+h2_proxy_request *h2_proxy_req_create(int id, apr_pool_t *pool);
apr_status_t h2_proxy_req_make(h2_proxy_request *req, apr_pool_t *pool,
const char *method, const char *scheme,
const char *authority, const char *path,
diff --git a/modules/http2/h2_push.c b/modules/http2/h2_push.c
index 0a90a5d86f..805a217d25 100644
--- a/modules/http2/h2_push.c
+++ b/modules/http2/h2_push.c
@@ -31,7 +31,7 @@
#include <http_log.h>
#include "h2_private.h"
-#include "h2_h2.h"
+#include "h2_protocol.h"
#include "h2_util.h"
#include "h2_push.h"
#include "h2_request.h"
@@ -348,9 +348,8 @@ static int add_push(link_ctx *ctx)
}
headers = apr_table_make(ctx->pool, 5);
apr_table_do(set_push_header, headers, ctx->req->headers, NULL);
- req = h2_req_create(0, ctx->pool, method, ctx->req->scheme,
- ctx->req->authority, path, headers,
- ctx->req->serialize);
+ req = h2_request_create(0, ctx->pool, method, ctx->req->scheme,
+ ctx->req->authority, path, headers);
/* atm, we do not push on pushes */
h2_request_end_headers(req, ctx->pool, 1, 0);
push->req = req;
@@ -657,13 +656,13 @@ apr_array_header_t *h2_push_diary_update(h2_session *session, apr_array_header_t
idx = h2_push_diary_find(session->push_diary, e.hash);
if (idx >= 0) {
/* Intentional no APLOGNO */
- ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c,
+ ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c1,
"push_diary_update: already there PUSH %s", push->req->path);
move_to_last(session->push_diary, (apr_size_t)idx);
}
else {
/* Intentional no APLOGNO */
- ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c,
+ ap_log_cerror(APLOG_MARK, GCSLOG_LEVEL, 0, session->c1,
"push_diary_update: adding PUSH %s", push->req->path);
if (!npushes) {
npushes = apr_array_make(pushes->pool, 5, sizeof(h2_push_diary_entry*));
diff --git a/modules/http2/h2_request.c b/modules/http2/h2_request.c
index 7c4fb95ea4..7c9f38a26a 100644
--- a/modules/http2/h2_request.c
+++ b/modules/http2/h2_request.c
@@ -38,6 +38,22 @@
#include "h2_util.h"
+h2_request *h2_request_create(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header)
+{
+ h2_request *req = apr_pcalloc(pool, sizeof(h2_request));
+
+ req->method = method;
+ req->scheme = scheme;
+ req->authority = authority;
+ req->path = path;
+ req->headers = header? header : apr_table_make(pool, 10);
+ req->request_time = apr_time_now();
+
+ return req;
+}
+
typedef struct {
apr_table_t *headers;
apr_pool_t *pool;
@@ -85,9 +101,6 @@ apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool,
req->path = path;
req->headers = apr_table_make(pool, 10);
req->http_status = H2_HTTP_STATUS_UNSET;
- if (r->server) {
- req->serialize = h2_config_rgeti(r, H2_CONF_SER_HEADERS);
- }
x.pool = pool;
x.headers = req->headers;
@@ -195,7 +208,7 @@ apr_status_t h2_request_end_headers(h2_request *req, apr_pool_t *pool, int eos,
}
}
req->raw_bytes += raw_bytes;
-
+
return APR_SUCCESS;
}
@@ -267,7 +280,7 @@ static request_rec *my_ap_create_request(conn_rec *c)
}
#endif
-request_rec *h2_request_create_rec(const h2_request *req, conn_rec *c)
+request_rec *h2_create_request_rec(const h2_request *req, conn_rec *c)
{
int access_status = HTTP_OK;
diff --git a/modules/http2/h2_request.h b/modules/http2/h2_request.h
index b4a1a05a08..0fc207cba3 100644
--- a/modules/http2/h2_request.h
+++ b/modules/http2/h2_request.h
@@ -19,7 +19,11 @@
#include "h2.h"
-apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool,
+h2_request *h2_request_create(int id, apr_pool_t *pool, const char *method,
+ const char *scheme, const char *authority,
+ const char *path, apr_table_t *header);
+
+apr_status_t h2_request_rcreate(h2_request **preq, apr_pool_t *pool,
request_rec *r);
apr_status_t h2_request_add_header(h2_request *req, apr_pool_t *pool,
@@ -43,7 +47,7 @@ h2_request *h2_request_clone(apr_pool_t *p, const h2_request *src);
* @param conn the connection to process the request on
* @return the request_rec representing the request
*/
-request_rec *h2_request_create_rec(const h2_request *req, conn_rec *conn);
+request_rec *h2_create_request_rec(const h2_request *req, conn_rec *conn);
#endif /* defined(__mod_h2__h2_request__) */
diff --git a/modules/http2/h2_session.c b/modules/http2/h2_session.c
index dc883b5b96..bc4d34f2fc 100644
--- a/modules/http2/h2_session.c
+++ b/modules/http2/h2_session.c
@@ -35,24 +35,21 @@
#include "h2_bucket_beam.h"
#include "h2_bucket_eos.h"
#include "h2_config.h"
-#include "h2_ctx.h"
-#include "h2_filter.h"
-#include "h2_h2.h"
+#include "h2_conn_ctx.h"
+#include "h2_protocol.h"
#include "h2_mplx.h"
#include "h2_push.h"
#include "h2_request.h"
#include "h2_headers.h"
#include "h2_stream.h"
-#include "h2_task.h"
+#include "h2_c2.h"
#include "h2_session.h"
#include "h2_util.h"
#include "h2_version.h"
#include "h2_workers.h"
-static apr_status_t dispatch_master(h2_session *session);
-static apr_status_t h2_session_read(h2_session *session, int block);
-static void transit(h2_session *session, const char *action,
+static void transit(h2_session *session, const char *action,
h2_session_state nstate);
static void on_stream_state_enter(void *ctx, h2_stream *stream);
@@ -78,13 +75,10 @@ static h2_stream *get_stream(h2_session *session, int stream_id)
return nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
}
-static void dispatch_event(h2_session *session, h2_session_event_t ev,
- int err, const char *msg);
-
-void h2_session_event(h2_session *session, h2_session_event_t ev,
+void h2_session_event(h2_session *session, h2_session_event_t ev,
int err, const char *msg)
{
- dispatch_event(session, ev, err, msg);
+ h2_session_dispatch_event(session, ev, err, msg);
}
static int rst_unprocessed_stream(h2_stream *stream, void *ctx)
@@ -106,7 +100,7 @@ static int rst_unprocessed_stream(h2_stream *stream, void *ctx)
static void cleanup_unprocessed_streams(h2_session *session)
{
- h2_mplx_m_stream_do(session->mplx, rst_unprocessed_stream, session);
+ h2_mplx_c1_streams_do(session->mplx, rst_unprocessed_stream, session);
}
static h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
@@ -127,7 +121,7 @@ static h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
}
/**
- * Determine the importance of streams when scheduling tasks.
+ * Determine the priority order of streams.
* - if both stream depend on the same one, compare weights
* - if one stream is closer to the root, prioritize that one
* - if both are on the same level, use the weight of their root
@@ -187,20 +181,26 @@ static ssize_t send_cb(nghttp2_session *ngh2,
int flags, void *userp)
{
h2_session *session = (h2_session *)userp;
- apr_status_t status;
+ apr_status_t rv;
(void)ngh2;
(void)flags;
-
- status = h2_conn_io_write(&session->io, (const char *)data, length);
- if (status == APR_SUCCESS) {
+
+ if (h2_c1_io_needs_flush(&session->io)) {
+ return NGHTTP2_ERR_WOULDBLOCK;
+ }
+
+ rv = h2_c1_io_add_data(&session->io, (const char *)data, length);
+ if (APR_SUCCESS == rv) {
return length;
}
- if (APR_STATUS_IS_EAGAIN(status)) {
+ else if (APR_STATUS_IS_EAGAIN(rv)) {
return NGHTTP2_ERR_WOULDBLOCK;
}
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03062)
- "h2_session: send error");
- return h2_session_status_from_apr_status(status);
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, session->c1,
+ APLOGNO(03062) "h2_session: send error");
+ return h2_session_status_from_apr_status(rv);
+ }
}
static int on_invalid_frame_recv_cb(nghttp2_session *ngh2,
@@ -210,11 +210,11 @@ static int on_invalid_frame_recv_cb(nghttp2_session *ngh2,
h2_session *session = (h2_session *)userp;
(void)ngh2;
- if (APLOGcdebug(session->c)) {
+ if (APLOGcdebug(session->c1)) {
char buffer[256];
h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
H2_SSSN_LOG(APLOGNO(03063), session,
"recv invalid FRAME[%s], frames=%ld/%ld (r/s)"),
buffer, (long)session->frames_received,
@@ -234,11 +234,13 @@ static int on_data_chunk_recv_cb(nghttp2_session *ngh2, uint8_t flags,
stream = get_stream(session, stream_id);
if (stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ "h2_stream(%ld-%d): write %ld bytes of DATA",
+ session->id, (int)stream_id, (long)len);
status = h2_stream_recv_DATA(stream, flags, data, len);
- dispatch_event(session, H2_SESSION_EV_STREAM_CHANGE, 0, "stream data rcvd");
}
else {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03064)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(03064)
"h2_stream(%ld-%d): on_data_chunk for unknown stream",
session->id, (int)stream_id);
rv = NGHTTP2_ERR_CALLBACK_FAILURE;
@@ -261,10 +263,10 @@ static int on_stream_close_cb(nghttp2_session *ngh2, int32_t stream_id,
stream = get_stream(session, stream_id);
if (stream) {
if (error_code) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
H2_STRM_LOG(APLOGNO(03065), stream,
"closing with err=%d %s"),
- (int)error_code, h2_h2_err_description(error_code));
+ (int)error_code, h2_protocol_err_description(error_code));
h2_stream_rst(stream, error_code);
}
}
@@ -303,7 +305,7 @@ static int on_header_cb(nghttp2_session *ngh2, const nghttp2_frame *frame,
(void)flags;
stream = get_stream(session, frame->hd.stream_id);
if (!stream) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(02920)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(02920)
"h2_stream(%ld-%d): on_header unknown stream",
session->id, (int)frame->hd.stream_id);
return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
@@ -332,15 +334,25 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
h2_stream *stream;
apr_status_t rv = APR_SUCCESS;
- if (APLOGcdebug(session->c)) {
+ stream = frame->hd.stream_id? get_stream(session, frame->hd.stream_id) : NULL;
+ if (APLOGcdebug(session->c1)) {
char buffer[256];
-
+
h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
- H2_SSSN_LOG(APLOGNO(03066), session,
- "recv FRAME[%s], frames=%ld/%ld (r/s)"),
- buffer, (long)session->frames_received,
- (long)session->frames_sent);
+ if (stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(10302), stream,
+ "recv FRAME[%s], frames=%ld/%ld (r/s)"),
+ buffer, (long)session->frames_received,
+ (long)session->frames_sent);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_SSSN_LOG(APLOGNO(03066), session,
+ "recv FRAME[%s], frames=%ld/%ld (r/s)"),
+ buffer, (long)session->frames_received,
+ (long)session->frames_sent);
+ }
}
++session->frames_received;
@@ -349,16 +361,14 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
/* This can be HEADERS for a new stream, defining the request,
* or HEADER may come after DATA at the end of a stream as in
* trailers */
- stream = get_stream(session, frame->hd.stream_id);
if (stream) {
rv = h2_stream_recv_frame(stream, NGHTTP2_HEADERS, frame->hd.flags,
frame->hd.length + H2_FRAME_HDR_LEN);
}
break;
case NGHTTP2_DATA:
- stream = get_stream(session, frame->hd.stream_id);
if (stream) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
H2_STRM_LOG(APLOGNO(02923), stream,
"DATA, len=%ld, flags=%d"),
(long)frame->hd.length, frame->hd.flags);
@@ -368,7 +378,7 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
break;
case NGHTTP2_PRIORITY:
session->reprioritize = 1;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
"h2_stream(%ld-%d): PRIORITY frame "
" weight=%d, dependsOn=%d, exclusive=%d",
session->id, (int)frame->hd.stream_id,
@@ -377,20 +387,16 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
frame->priority.pri_spec.exclusive);
break;
case NGHTTP2_WINDOW_UPDATE:
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
"h2_stream(%ld-%d): WINDOW_UPDATE incr=%d",
session->id, (int)frame->hd.stream_id,
frame->window_update.window_size_increment);
- if (nghttp2_session_want_write(session->ngh2)) {
- dispatch_event(session, H2_SESSION_EV_FRAME_RCVD, 0, "window update");
- }
break;
case NGHTTP2_RST_STREAM:
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03067)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(03067)
"h2_stream(%ld-%d): RST_STREAM by client, error=%d",
session->id, (int)frame->hd.stream_id,
(int)frame->rst_stream.error_code);
- stream = get_stream(session, frame->hd.stream_id);
if (stream && stream->initiated_on) {
/* A stream reset on a request we sent it. Normal, when the
* client does not want it. */
@@ -399,7 +405,7 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
else {
/* A stream reset on a request it sent us. Could happen in a browser
* when the user navigates away or cancels loading - maybe. */
- h2_mplx_m_client_rst(session->mplx, frame->hd.stream_id);
+ h2_mplx_c1_client_rst(session->mplx, frame->hd.stream_id);
++session->streams_reset;
}
break;
@@ -411,23 +417,21 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
}
else {
session->remote.accepted_max = frame->goaway.last_stream_id;
- dispatch_event(session, H2_SESSION_EV_REMOTE_GOAWAY,
+ h2_session_dispatch_event(session, H2_SESSION_EV_REMOTE_GOAWAY,
frame->goaway.error_code, NULL);
}
break;
case NGHTTP2_SETTINGS:
- if (APLOGctrace2(session->c)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- H2_SSSN_MSG(session, "SETTINGS, len=%ld"), (long)frame->hd.length);
- }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_SSSN_MSG(session, "SETTINGS, len=%ld"), (long)frame->hd.length);
break;
default:
- if (APLOGctrace2(session->c)) {
+ if (APLOGctrace2(session->c1)) {
char buffer[256];
h2_util_frame_print(frame, buffer,
sizeof(buffer)/sizeof(buffer[0]));
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
H2_SSSN_MSG(session, "on_frame_rcv %s"), buffer);
}
break;
@@ -443,7 +447,7 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
* become in serving this connection. This is expressed in increasing "idle_delays".
* Eventually, the connection will timeout and we'll close it. */
session->idle_frames = H2MIN(session->idle_frames + 1, session->frames_received);
- ap_log_cerror( APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ ap_log_cerror( APLOG_MARK, APLOG_TRACE2, 0, session->c1,
H2_SSSN_MSG(session, "session has %ld idle frames"),
(long)session->idle_frames);
if (session->idle_frames > 10) {
@@ -468,16 +472,6 @@ static int on_frame_recv_cb(nghttp2_session *ng2s,
return 0;
}
-static int h2_session_continue_data(h2_session *session) {
- if (h2_mplx_m_has_master_events(session->mplx)) {
- return 0;
- }
- if (h2_conn_io_needs_flush(&session->io)) {
- return 0;
- }
- return 1;
-}
-
static char immortal_zeros[H2_MAX_PADLEN];
static int on_send_data_cb(nghttp2_session *ngh2,
@@ -498,47 +492,43 @@ static int on_send_data_cb(nghttp2_session *ngh2,
(void)ngh2;
(void)source;
- if (!h2_session_continue_data(session)) {
- return NGHTTP2_ERR_WOULDBLOCK;
- }
-
ap_assert(frame->data.padlen <= (H2_MAX_PADLEN+1));
padlen = (unsigned char)frame->data.padlen;
stream = get_stream(session, stream_id);
if (!stream) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_NOTFOUND, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_NOTFOUND, session->c1,
APLOGNO(02924)
"h2_stream(%ld-%d): send_data, stream not found",
session->id, (int)stream_id);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
H2_STRM_MSG(stream, "send_data_cb for %ld bytes"),
(long)length);
- status = h2_conn_io_write(&session->io, (const char *)framehd, H2_FRAME_HDR_LEN);
+ status = h2_c1_io_add_data(&session->io, (const char *)framehd, H2_FRAME_HDR_LEN);
if (padlen && status == APR_SUCCESS) {
--padlen;
- status = h2_conn_io_write(&session->io, (const char *)&padlen, 1);
+ status = h2_c1_io_add_data(&session->io, (const char *)&padlen, 1);
}
if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c1,
H2_STRM_MSG(stream, "writing frame header"));
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
status = h2_stream_read_to(stream, session->bbtmp, &len, &eos);
if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c1,
H2_STRM_MSG(stream, "send_data_cb, reading stream"));
apr_brigade_cleanup(session->bbtmp);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
else if (len != length) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c1,
H2_STRM_MSG(stream, "send_data_cb, wanted %ld bytes, "
"got %ld from stream"), (long)length, (long)len);
apr_brigade_cleanup(session->bbtmp);
@@ -547,11 +537,11 @@ static int on_send_data_cb(nghttp2_session *ngh2,
if (padlen) {
b = apr_bucket_immortal_create(immortal_zeros, padlen,
- session->c->bucket_alloc);
+ session->c1->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(session->bbtmp, b);
}
- status = h2_conn_io_pass(&session->io, session->bbtmp);
+ status = h2_c1_io_append(&session->io, session->bbtmp);
apr_brigade_cleanup(session->bbtmp);
if (status == APR_SUCCESS) {
@@ -560,7 +550,7 @@ static int on_send_data_cb(nghttp2_session *ngh2,
return 0;
}
else {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c1,
H2_STRM_LOG(APLOGNO(02925), stream, "failed send_data_cb"));
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
@@ -584,18 +574,27 @@ static int on_frame_send_cb(nghttp2_session *ngh2,
break;
}
- if (APLOGcdebug(session->c)) {
+ stream = get_stream(session, stream_id);
+ if (APLOGcdebug(session->c1)) {
char buffer[256];
h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
- H2_SSSN_LOG(APLOGNO(03068), session,
- "sent FRAME[%s], frames=%ld/%ld (r/s)"),
- buffer, (long)session->frames_received,
- (long)session->frames_sent);
+ if (stream) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(10303), stream,
+ "sent FRAME[%s], frames=%ld/%ld (r/s)"),
+ buffer, (long)session->frames_received,
+ (long)session->frames_sent);
+ }
+ else {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_SSSN_LOG(APLOGNO(03068), session,
+ "sent FRAME[%s], frames=%ld/%ld (r/s)"),
+ buffer, (long)session->frames_received,
+ (long)session->frames_sent);
+ }
}
- stream = get_stream(session, stream_id);
if (stream) {
h2_stream_send_frame(stream, frame->hd.type, frame->hd.flags,
frame->hd.length + H2_FRAME_HDR_LEN);
@@ -603,8 +602,7 @@ static int on_frame_send_cb(nghttp2_session *ngh2,
return 0;
}
-#ifdef H2_NG2_INVALID_HEADER_CB
-static int on_invalid_header_cb(nghttp2_session *ngh2,
+static int on_invalid_header_cb(nghttp2_session *ngh2,
const nghttp2_frame *frame,
const uint8_t *name, size_t namelen,
const uint8_t *value, size_t valuelen,
@@ -613,20 +611,17 @@ static int on_invalid_header_cb(nghttp2_session *ngh2,
h2_session *session = user_data;
h2_stream *stream;
- if (APLOGcdebug(session->c)) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03456)
- "h2_stream(%ld-%d): invalid header '%s: %s'",
- session->id, (int)frame->hd.stream_id,
- apr_pstrndup(session->pool, (const char *)name, namelen),
- apr_pstrndup(session->pool, (const char *)value, valuelen));
- }
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(03456)
+ "h2_stream(%ld-%d): invalid header '%s: %s'",
+ session->id, (int)frame->hd.stream_id,
+ apr_pstrndup(session->pool, (const char *)name, namelen),
+ apr_pstrndup(session->pool, (const char *)value, valuelen));
stream = get_stream(session, frame->hd.stream_id);
if (stream) {
h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
}
return 0;
}
-#endif
static ssize_t select_padding_cb(nghttp2_session *ngh2,
const nghttp2_frame *frame,
@@ -650,12 +645,10 @@ static ssize_t select_padding_cb(nghttp2_session *ngh2,
&& (frame_len <= session->io.write_size)) {
padded_len = session->io.write_size;
}
- if (APLOGctrace2(session->c)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- "select padding from [%d, %d]: %d (frame length: 0x%04x, write size: %d)",
- (int)frame_len, (int)max_payloadlen+H2_FRAME_HDR_LEN,
- (int)(padded_len - frame_len), (int)padded_len, (int)session->io.write_size);
- }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ "select padding from [%d, %d]: %d (frame length: 0x%04x, write size: %d)",
+ (int)frame_len, (int)max_payloadlen+H2_FRAME_HDR_LEN,
+ (int)(padded_len - frame_len), (int)padded_len, (int)session->io.write_size);
return padded_len - H2_FRAME_HDR_LEN;
}
return frame->hd.length;
@@ -683,9 +676,7 @@ static apr_status_t init_callbacks(conn_rec *c, nghttp2_session_callbacks **pcb)
NGH2_SET_CALLBACK(*pcb, on_header, on_header_cb);
NGH2_SET_CALLBACK(*pcb, send_data, on_send_data_cb);
NGH2_SET_CALLBACK(*pcb, on_frame_send, on_frame_send_cb);
-#ifdef H2_NG2_INVALID_HEADER_CB
NGH2_SET_CALLBACK(*pcb, on_invalid_header, on_invalid_header_cb);
-#endif
NGH2_SET_CALLBACK(*pcb, select_padding, select_padding_cb);
return APR_SUCCESS;
}
@@ -703,9 +694,9 @@ static apr_status_t h2_session_shutdown_notice(h2_session *session)
session->local.accepting = 0;
status = nghttp2_session_send(session->ngh2);
if (status == APR_SUCCESS) {
- status = h2_conn_io_flush(&session->io);
+ status = h2_c1_io_assure_flushed(&session->io);
}
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
H2_SSSN_LOG(APLOGNO(03457), session, "sent shutdown notice"));
return status;
}
@@ -731,7 +722,7 @@ static apr_status_t h2_session_shutdown(h2_session *session, int error,
* Remove all streams greater than this number without submitting
* a RST_STREAM frame, since that should be clear from the GOAWAY
* we send. */
- session->local.accepted_max = h2_mplx_m_shutdown(session->mplx);
+ session->local.accepted_max = h2_mplx_c1_shutdown(session->mplx);
session->local.error = error;
}
else {
@@ -742,25 +733,25 @@ static apr_status_t h2_session_shutdown(h2_session *session, int error,
session->local.accepting = 0;
session->local.shutdown = 1;
- if (!session->c->aborted) {
+ if (!session->c1->aborted) {
nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE,
session->local.accepted_max,
error, (uint8_t*)msg, msg? strlen(msg):0);
status = nghttp2_session_send(session->ngh2);
if (status == APR_SUCCESS) {
- status = h2_conn_io_flush(&session->io);
+ status = h2_c1_io_assure_flushed(&session->io);
}
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
H2_SSSN_LOG(APLOGNO(03069), session,
"sent GOAWAY, err=%d, msg=%s"), error, msg? msg : "");
}
- dispatch_event(session, H2_SESSION_EV_LOCAL_GOAWAY, error, msg);
+ h2_session_dispatch_event(session, H2_SESSION_EV_LOCAL_GOAWAY, error, msg);
return status;
}
static apr_status_t session_cleanup(h2_session *session, const char *trigger)
{
- conn_rec *c = session->c;
+ conn_rec *c = session->c1;
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
H2_SSSN_MSG(session, "pool_cleanup"));
@@ -781,24 +772,24 @@ static apr_status_t session_cleanup(h2_session *session, const char *trigger)
}
transit(session, trigger, H2_SESSION_ST_CLEANUP);
- h2_mplx_m_release_and_join(session->mplx, session->iowait);
+ h2_mplx_c1_destroy(session->mplx);
session->mplx = NULL;
ap_assert(session->ngh2);
nghttp2_session_del(session->ngh2);
session->ngh2 = NULL;
- h2_ctx_clear(c);
-
-
+ h2_conn_ctx_detach(c);
+
return APR_SUCCESS;
}
static apr_status_t session_pool_cleanup(void *data)
{
conn_rec *c = data;
- h2_session *session;
-
- if ((session = h2_ctx_get_session(c))) {
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+ h2_session *session = conn_ctx? conn_ctx->session : NULL;
+
+ if (session) {
int mpm_state = 0;
int level;
@@ -810,7 +801,7 @@ static apr_status_t session_pool_cleanup(void *data)
* However, when the server is stopping, it may shutdown connections
* without running the pre_close hooks. Do not want about that. */
ap_log_cerror(APLOG_MARK, level, 0, c,
- H2_SSSN_LOG(APLOGNO(10020), session,
+ H2_SSSN_LOG(APLOGNO(10020), session,
"session cleanup triggered by pool cleanup. "
"this should have happened earlier already."));
return session_cleanup(session, "pool cleanup");
@@ -823,34 +814,16 @@ apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *
{
nghttp2_session_callbacks *callbacks = NULL;
nghttp2_option *options = NULL;
- apr_allocator_t *allocator;
- apr_thread_mutex_t *mutex;
uint32_t n;
apr_pool_t *pool = NULL;
h2_session *session;
+ h2_stream *stream0;
apr_status_t status;
int rv;
*psession = NULL;
- status = apr_allocator_create(&allocator);
- if (status != APR_SUCCESS) {
- return status;
- }
- apr_allocator_max_free_set(allocator, ap_max_mem_free);
- apr_pool_create_ex(&pool, c->pool, NULL, allocator);
- if (!pool) {
- apr_allocator_destroy(allocator);
- return APR_ENOMEM;
- }
+ apr_pool_create(&pool, c->pool);
apr_pool_tag(pool, "h2_session");
- apr_allocator_owner_set(allocator, pool);
- status = apr_thread_mutex_create(&mutex, APR_THREAD_MUTEX_DEFAULT, pool);
- if (status != APR_SUCCESS) {
- apr_pool_destroy(pool);
- return APR_ENOMEM;
- }
- apr_allocator_mutex_set(allocator, mutex);
-
session = apr_pcalloc(pool, sizeof(h2_session));
if (!session) {
return APR_ENOMEM;
@@ -858,7 +831,7 @@ apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *
*psession = session;
session->id = c->id;
- session->c = c;
+ session->c1 = c;
session->r = r;
session->s = s;
session->pool = pool;
@@ -871,41 +844,25 @@ apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *
session->max_stream_count = h2_config_sgeti(s, H2_CONF_MAX_STREAMS);
session->max_stream_mem = h2_config_sgeti(s, H2_CONF_STREAM_MAX_MEM);
- status = apr_thread_cond_create(&session->iowait, session->pool);
- if (status != APR_SUCCESS) {
- apr_pool_destroy(pool);
- return status;
- }
-
session->in_pending = h2_iq_create(session->pool, (int)session->max_stream_count);
- if (session->in_pending == NULL) {
- apr_pool_destroy(pool);
- return APR_ENOMEM;
- }
+ session->out_c1_blocked = h2_iq_create(session->pool, (int)session->max_stream_count);
+ session->ready_to_process = h2_iq_create(session->pool, (int)session->max_stream_count);
- session->in_process = h2_iq_create(session->pool, (int)session->max_stream_count);
- if (session->in_process == NULL) {
- apr_pool_destroy(pool);
- return APR_ENOMEM;
- }
-
session->monitor = apr_pcalloc(pool, sizeof(h2_stream_monitor));
- if (session->monitor == NULL) {
- apr_pool_destroy(pool);
- return APR_ENOMEM;
- }
session->monitor->ctx = session;
session->monitor->on_state_enter = on_stream_state_enter;
session->monitor->on_state_event = on_stream_state_event;
session->monitor->on_event = on_stream_event;
-
- session->mplx = h2_mplx_m_create(c, s, session->pool, workers);
-
- /* connection input filter that feeds the session */
- session->cin = h2_filter_cin_create(session);
- ap_add_input_filter("H2_IN", session->cin, r, c);
-
- h2_conn_io_init(&session->io, c, s);
+
+ stream0 = h2_stream_create(0, session->pool, session, NULL, 0);
+ stream0->c2 = session->c1; /* stream0's connection is the main connection */
+ session->mplx = h2_mplx_c1_create(stream0, s, session->pool, workers);
+ if (!session->mplx) {
+ apr_pool_destroy(pool);
+ return APR_ENOTIMPL;
+ }
+
+ h2_c1_io_init(&session->io, c, s);
session->padding_max = h2_config_sgeti(s, H2_CONF_PADDING_BITS);
if (session->padding_max) {
session->padding_max = (0x01 << session->padding_max) - 1;
@@ -933,7 +890,11 @@ apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *
/* We need to handle window updates ourself, otherwise we
* get flooded by nghttp2. */
nghttp2_option_set_no_auto_window_update(options, 1);
-
+ /* We do not want nghttp2 to keep information about closed streams as
+ * that accumulates memory on long connections. This makes PRIORITY
+ * setting in relation to older streams non-working. */
+ nghttp2_option_set_no_closed_streams(options, 1);
+
rv = nghttp2_session_server_new2(&session->ngh2, callbacks,
session, options);
nghttp2_session_callbacks_del(callbacks);
@@ -958,8 +919,8 @@ apr_status_t h2_session_create(h2_session **psession, conn_rec *c, request_rec *
"push_diary(type=%d,N=%d)"),
(int)session->max_stream_count,
(int)session->max_stream_mem,
- session->mplx->limit_active,
- session->mplx->max_active,
+ session->mplx->processing_limit,
+ session->mplx->processing_max,
session->push_diary->dtype,
(int)session->push_diary->N);
}
@@ -1040,7 +1001,7 @@ static apr_status_t h2_session_start(h2_session *session, int *rv)
++slen;
}
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c1,
H2_SSSN_LOG(APLOGNO(03201), session,
"start, INITIAL_WINDOW_SIZE=%ld, MAX_CONCURRENT_STREAMS=%d"),
(long)win_size, (int)session->max_stream_count);
@@ -1048,7 +1009,7 @@ static apr_status_t h2_session_start(h2_session *session, int *rv)
settings, slen);
if (*rv != 0) {
status = APR_EGENERAL;
- ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c1,
H2_SSSN_LOG(APLOGNO(02935), session,
"nghttp2_submit_settings: %s"), nghttp2_strerror(*rv));
}
@@ -1066,7 +1027,7 @@ static apr_status_t h2_session_start(h2_session *session, int *rv)
0, NGHTTP2_MAX_WINDOW_SIZE - win_size);
if (*rv != 0) {
status = APR_EGENERAL;
- ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c1,
H2_SSSN_LOG(APLOGNO(02970), session,
"nghttp2_submit_window_update: %s"),
nghttp2_strerror(*rv));
@@ -1076,87 +1037,6 @@ static apr_status_t h2_session_start(h2_session *session, int *rv)
return status;
}
-static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream,
- h2_headers *headers, apr_off_t len,
- int eos);
-
-static ssize_t stream_data_cb(nghttp2_session *ng2s,
- int32_t stream_id,
- uint8_t *buf,
- size_t length,
- uint32_t *data_flags,
- nghttp2_data_source *source,
- void *puser)
-{
- h2_session *session = (h2_session *)puser;
- apr_off_t nread = length;
- int eos = 0;
- apr_status_t status;
- h2_stream *stream;
- ap_assert(session);
-
- /* The session wants to send more DATA for the stream. We need
- * to find out how much of the requested length we can send without
- * blocking.
- * Indicate EOS when we encounter it or DEFERRED if the stream
- * should be suspended. Beware of trailers.
- */
-
- (void)ng2s;
- (void)buf;
- (void)source;
- stream = get_stream(session, stream_id);
- if (!stream) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, session->c,
- APLOGNO(02937)
- "h2_stream(%ld-%d): data_cb, stream not found",
- session->id, (int)stream_id);
- return NGHTTP2_ERR_CALLBACK_FAILURE;
- }
-
- status = h2_stream_out_prepare(stream, &nread, &eos, NULL);
- if (nread) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- H2_STRM_MSG(stream, "prepared no_copy, len=%ld, eos=%d"),
- (long)nread, eos);
- *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY;
- }
-
- switch (status) {
- case APR_SUCCESS:
- break;
-
- case APR_EOF:
- eos = 1;
- break;
-
- case APR_ECONNRESET:
- case APR_ECONNABORTED:
- return NGHTTP2_ERR_CALLBACK_FAILURE;
-
- case APR_EAGAIN:
- /* If there is no data available, our session will automatically
- * suspend this stream and not ask for more data until we resume
- * it. Remember at our h2_stream that we need to do this.
- */
- nread = 0;
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
- H2_STRM_LOG(APLOGNO(03071), stream, "suspending"));
- return NGHTTP2_ERR_DEFERRED;
-
- default:
- nread = 0;
- ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
- H2_STRM_LOG(APLOGNO(02938), stream, "reading data"));
- return NGHTTP2_ERR_CALLBACK_FAILURE;
- }
-
- if (eos) {
- *data_flags |= NGHTTP2_DATA_FLAG_EOF;
- }
- return (ssize_t)nread;
-}
-
struct h2_stream *h2_session_push(h2_session *session, h2_stream *is,
h2_push *push)
{
@@ -1171,20 +1051,20 @@ struct h2_stream *h2_session_push(h2_session *session, h2_stream *is,
ngh->nv, ngh->nvlen, NULL);
}
if (status != APR_SUCCESS || nid <= 0) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c1,
H2_STRM_LOG(APLOGNO(03075), is,
"submitting push promise fail: %s"), nghttp2_strerror(nid));
return NULL;
}
++session->pushes_promised;
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
H2_STRM_LOG(APLOGNO(03076), is, "SERVER_PUSH %d for %s %s on %d"),
nid, push->req->method, push->req->path, is->id);
stream = h2_session_open_stream(session, nid, is->id);
if (!stream) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
H2_STRM_LOG(APLOGNO(03077), is,
"failed to create stream obj %d"), nid);
/* kill the push_promise */
@@ -1195,7 +1075,6 @@ struct h2_stream *h2_session_push(h2_session *session, h2_stream *is,
h2_session_set_prio(session, stream, push->priority);
h2_stream_set_request(stream, push->req);
- ++session->unsent_promises;
return stream;
}
@@ -1210,7 +1089,6 @@ apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream,
const h2_priority *prio)
{
apr_status_t status = APR_SUCCESS;
-#ifdef H2_NG2_CHANGE_PRIO
nghttp2_stream *s_grandpa, *s_parent, *s;
if (prio == NULL) {
@@ -1219,7 +1097,7 @@ apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream,
}
s = nghttp2_session_find_stream(session->ngh2, stream->id);
if (!s) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1,
H2_STRM_MSG(stream, "lookup of nghttp2_stream failed"));
return APR_EINVAL;
}
@@ -1268,7 +1146,7 @@ apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream,
id_grandpa = nghttp2_stream_get_stream_id(s_grandpa);
rv = nghttp2_session_change_stream_priority(session->ngh2, id_parent, &ps);
if (rv < 0) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03202)
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1, APLOGNO(03202)
"h2_stream(%ld-%d): PUSH BEFORE, weight=%d, "
"depends=%d, returned=%d",
session->id, id_parent, ps.weight, ps.stream_id, rv);
@@ -1290,18 +1168,13 @@ apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream,
rv = nghttp2_session_change_stream_priority(session->ngh2, stream->id, &ps);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
H2_STRM_LOG(APLOGNO(03203), stream,
"PUSH %s, weight=%d, depends=%d, returned=%d"),
ptype, ps.weight, ps.stream_id, rv);
status = (rv < 0)? APR_EGENERAL : APR_SUCCESS;
}
-#else
- (void)session;
- (void)stream;
- (void)prio;
- (void)valid_weight;
-#endif
+
return status;
}
@@ -1314,338 +1187,95 @@ int h2_session_push_enabled(h2_session *session)
NGHTTP2_SETTINGS_ENABLE_PUSH));
}
-static apr_status_t h2_session_send(h2_session *session)
+static int h2_session_want_send(h2_session *session)
{
- apr_interval_time_t saved_timeout;
- int rv;
- apr_socket_t *socket;
-
- socket = ap_get_conn_socket(session->c);
- if (socket) {
- apr_socket_timeout_get(socket, &saved_timeout);
- apr_socket_timeout_set(socket, session->s->timeout);
- }
-
- rv = nghttp2_session_send(session->ngh2);
-
- if (socket) {
- apr_socket_timeout_set(socket, saved_timeout);
- }
- session->have_written = 1;
- if (rv != 0 && rv != NGHTTP2_ERR_WOULDBLOCK) {
- if (nghttp2_is_fatal(rv)) {
- dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
- return APR_EGENERAL;
- }
- }
-
- session->unsent_promises = 0;
- session->unsent_submits = 0;
-
- return APR_SUCCESS;
+ return nghttp2_session_want_write(session->ngh2)
+ || h2_c1_io_pending(&session->io);
}
-/**
- * headers for the stream are ready.
- */
-static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream,
- h2_headers *headers, apr_off_t len,
- int eos)
+static apr_status_t h2_session_send(h2_session *session)
{
- apr_status_t status = APR_SUCCESS;
- const char *s;
- int rv = 0;
+ int ngrv;
+ apr_status_t rv = APR_SUCCESS;
- ap_assert(session);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- H2_STRM_MSG(stream, "on_headers"));
- if (headers->status < 100) {
- h2_stream_rst(stream, headers->status);
- goto leave;
- }
- else if (stream->has_response) {
- h2_ngheader *nh;
-
- status = h2_res_create_ngtrailer(&nh, stream->pool, headers);
-
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
- H2_STRM_LOG(APLOGNO(03072), stream, "submit %d trailers"),
- (int)nh->nvlen);
- if (status == APR_SUCCESS) {
- rv = nghttp2_submit_trailer(session->ngh2, stream->id,
- nh->nv, nh->nvlen);
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
- H2_STRM_LOG(APLOGNO(10024), stream, "invalid trailers"));
- h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
- }
- goto leave;
- }
- else {
- nghttp2_data_provider provider, *pprovider = NULL;
- h2_ngheader *ngh;
- const char *note;
-
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
- H2_STRM_LOG(APLOGNO(03073), stream, "submit response %d, REMOTE_WINDOW_SIZE=%u"),
- headers->status,
- (unsigned int)nghttp2_session_get_stream_remote_window_size(session->ngh2, stream->id));
-
- if (!eos || len > 0) {
- memset(&provider, 0, sizeof(provider));
- provider.source.fd = stream->id;
- provider.read_callback = stream_data_cb;
- pprovider = &provider;
- }
-
- /* If this stream is not a pushed one itself,
- * and HTTP/2 server push is enabled here,
- * and the response HTTP status is not sth >= 400,
- * and the remote side has pushing enabled,
- * -> find and perform any pushes on this stream
- * *before* we submit the stream response itself.
- * This helps clients avoid opening new streams on Link
- * headers that get pushed right afterwards.
- *
- * *) the response code is relevant, as we do not want to
- * make pushes on 401 or 403 codes and friends.
- * And if we see a 304, we do not push either
- * as the client, having this resource in its cache, might
- * also have the pushed ones as well.
- */
- if (!stream->initiated_on
- && !stream->has_response
- && stream->request && stream->request->method
- && !strcmp("GET", stream->request->method)
- && (headers->status < 400)
- && (headers->status != 304)
- && h2_session_push_enabled(session)) {
- /* PUSH is possible and enabled on server, unless the request
- * denies it, submit resources to push */
- s = apr_table_get(headers->notes, H2_PUSH_MODE_NOTE);
- if (!s || strcmp(s, "0")) {
- h2_stream_submit_pushes(stream, headers);
- }
- }
-
- if (!stream->pref_priority) {
- stream->pref_priority = h2_stream_get_priority(stream, headers);
- }
- h2_session_set_prio(session, stream, stream->pref_priority);
-
- note = apr_table_get(headers->notes, H2_FILTER_DEBUG_NOTE);
- if (note && !strcmp("on", note)) {
- int32_t connFlowIn, connFlowOut;
-
- connFlowIn = nghttp2_session_get_effective_local_window_size(session->ngh2);
- connFlowOut = nghttp2_session_get_remote_window_size(session->ngh2);
- headers = h2_headers_copy(stream->pool, headers);
- apr_table_setn(headers->headers, "conn-flow-in",
- apr_itoa(stream->pool, connFlowIn));
- apr_table_setn(headers->headers, "conn-flow-out",
- apr_itoa(stream->pool, connFlowOut));
- }
-
- if (headers->status == 103
- && !h2_config_sgeti(session->s, H2_CONF_EARLY_HINTS)) {
- /* suppress sending this to the client, it might have triggered
- * pushes and served its purpose nevertheless */
- rv = 0;
- goto leave;
- }
-
- status = h2_res_create_ngheader(&ngh, stream->pool, headers);
- if (status == APR_SUCCESS) {
- rv = nghttp2_submit_response(session->ngh2, stream->id,
- ngh->nv, ngh->nvlen, pprovider);
- stream->has_response = h2_headers_are_response(headers);
- session->have_written = 1;
-
- if (stream->initiated_on) {
- ++session->pushes_submitted;
- }
- else {
- ++session->responses_submitted;
+ ap_update_child_status(session->c1->sbh, SERVER_BUSY_WRITE, NULL);
+ while (nghttp2_session_want_write(session->ngh2)) {
+ ngrv = nghttp2_session_send(session->ngh2);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ "nghttp2_session_send: %d", (int)ngrv);
+
+ if (ngrv != 0 && ngrv != NGHTTP2_ERR_WOULDBLOCK) {
+ if (nghttp2_is_fatal(ngrv)) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_PROTO_ERROR,
+ ngrv, nghttp2_strerror(ngrv));
+ rv = APR_EGENERAL;
+ goto cleanup;
}
}
- else {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
- H2_STRM_LOG(APLOGNO(10025), stream, "invalid response"));
- h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
- }
+ rv = h2_c1_io_pass(&session->io);
}
-
-leave:
- if (nghttp2_is_fatal(rv)) {
- status = APR_EGENERAL;
- dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
- ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
- APLOGNO(02940) "submit_response: %s",
- nghttp2_strerror(rv));
+cleanup:
+ if (rv != APR_SUCCESS) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
+ H2_ERR_INTERNAL_ERROR, "c1 out writing");
}
-
- ++session->unsent_submits;
-
- /* Unsent push promises are written immediately, as nghttp2
- * 1.5.0 realizes internal stream data structures only on
- * send and we might need them for other submits.
- * Also, to conserve memory, we send at least every 10 submits
- * so that nghttp2 does not buffer all outbound items too
- * long.
- */
- if (status == APR_SUCCESS
- && (session->unsent_promises || session->unsent_submits > 10)) {
- status = h2_session_send(session);
- }
- return status;
+ return rv;
}
/**
- * A stream was resumed as new response/output data arrived.
+ * A streams input state has changed.
*/
-static apr_status_t on_stream_resume(void *ctx, h2_stream *stream)
+static apr_status_t on_stream_input(void *ctx, h2_stream *stream)
{
h2_session *session = ctx;
- apr_status_t status = APR_EAGAIN;
- int rv;
- apr_off_t len = 0;
- int eos = 0;
- h2_headers *headers;
-
+ apr_status_t rv = APR_EAGAIN;
+
ap_assert(stream);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- H2_STRM_MSG(stream, "on_resume"));
-
-send_headers:
- headers = NULL;
- status = h2_stream_out_prepare(stream, &len, &eos, &headers);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c,
- H2_STRM_MSG(stream, "prepared len=%ld, eos=%d"),
- (long)len, eos);
- if (headers) {
- status = on_stream_headers(session, stream, headers, len, eos);
- if (status != APR_SUCCESS || stream->rst_error) {
- return status;
- }
- goto send_headers;
- }
- else if (status != APR_EAGAIN) {
- /* we have DATA to send */
- if (!stream->has_response) {
- /* but no response */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
- H2_STRM_LOG(APLOGNO(03466), stream,
- "no response, RST_STREAM"));
- h2_stream_rst(stream, H2_ERR_PROTOCOL_ERROR);
- return APR_SUCCESS;
- }
- rv = nghttp2_session_resume_data(session->ngh2, stream->id);
- session->have_written = 1;
- ap_log_cerror(APLOG_MARK, nghttp2_is_fatal(rv)?
- APLOG_ERR : APLOG_DEBUG, 0, session->c,
- H2_STRM_LOG(APLOGNO(02936), stream, "resumed"));
- }
- return status;
-}
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_STRM_MSG(stream, "on_input change"));
-static void h2_session_in_flush(h2_session *session)
-{
- int id;
-
- while ((id = h2_iq_shift(session->in_process)) > 0) {
- h2_stream *stream = get_stream(session, id);
- if (stream) {
- ap_assert(!stream->scheduled);
- if (h2_stream_prep_processing(stream) == APR_SUCCESS) {
- h2_mplx_m_process(session->mplx, stream, stream_pri_cmp, session);
- }
- else {
- h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
- }
- }
+ if (stream->id == 0) {
+ /* input on primary connection available? read */
+ rv = h2_c1_read(session);
}
-
- while ((id = h2_iq_shift(session->in_pending)) > 0) {
- h2_stream *stream = get_stream(session, id);
- if (stream) {
- h2_stream_flush_input(stream);
+ else {
+ ap_assert(stream->input);
+ if (stream->state == H2_SS_CLOSED_L
+ && !h2_mplx_c1_stream_is_running(session->mplx, stream)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(10026), stream, "remote close missing"));
+ nghttp2_submit_rst_stream(stream->session->ngh2, NGHTTP2_FLAG_NONE,
+ stream->id, NGHTTP2_NO_ERROR);
+ goto cleanup;
+ }
+ h2_beam_report_consumption(stream->input);
+ if (stream->state == H2_SS_CLOSED_R) {
+ /* TODO: remove this stream from input polling */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_STRM_MSG(stream, "should not longer be input polled"));
}
}
+cleanup:
+ return rv;
}
-static apr_status_t session_read(h2_session *session, apr_size_t readlen, int block)
+/**
+ * A streams output state has changed.
+ */
+static apr_status_t on_stream_output(void *ctx, h2_stream *stream)
{
- apr_status_t status, rstatus = APR_EAGAIN;
- conn_rec *c = session->c;
- apr_off_t read_start = session->io.bytes_read;
-
- while (1) {
- /* H2_IN filter handles all incoming data against the session.
- * We just pull at the filter chain to make it happen */
- status = ap_get_brigade(c->input_filters,
- session->bbtmp, AP_MODE_READBYTES,
- block? APR_BLOCK_READ : APR_NONBLOCK_READ,
- H2MAX(APR_BUCKET_BUFF_SIZE, readlen));
- /* get rid of any possible data we do not expect to get */
- apr_brigade_cleanup(session->bbtmp);
-
- switch (status) {
- case APR_SUCCESS:
- /* successful read, reset our idle timers */
- rstatus = APR_SUCCESS;
- if (block) {
- /* successful blocked read, try unblocked to
- * get more. */
- block = 0;
- }
- break;
- case APR_EAGAIN:
- return rstatus;
- case APR_TIMEUP:
- return status;
- default:
- if (session->io.bytes_read == read_start) {
- /* first attempt failed */
- if (APR_STATUS_IS_ETIMEDOUT(status)
- || APR_STATUS_IS_ECONNABORTED(status)
- || APR_STATUS_IS_ECONNRESET(status)
- || APR_STATUS_IS_EOF(status)
- || APR_STATUS_IS_EBADF(status)) {
- /* common status for a client that has left */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c,
- H2_SSSN_MSG(session, "input gone"));
- }
- else {
- /* uncommon status, log on INFO so that we see this */
- ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, c,
- H2_SSSN_LOG(APLOGNO(02950), session,
- "error reading, terminating"));
- }
- return status;
- }
- /* subsequent failure after success(es), return initial
- * status. */
- return rstatus;
- }
- if ((session->io.bytes_read - read_start) > readlen) {
- /* read enough in one go, give write a chance */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
- H2_SSSN_MSG(session, "read enough, returning"));
- break;
- }
+ h2_session *session = ctx;
+
+ ap_assert(stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_STRM_MSG(stream, "on_output change"));
+ if (stream->id == 0) {
+ /* we dont poll output of stream 0, this should not be called */
+ return APR_SUCCESS;
}
- return rstatus;
+ return h2_stream_read_output(stream);
}
-static apr_status_t h2_session_read(h2_session *session, int block)
-{
- apr_status_t status = session_read(session, session->max_stream_mem
- * H2MAX(2, session->open_streams),
- block);
- h2_session_in_flush(session);
- return status;
-}
static const char *StateNames[] = {
"INIT", /* H2_SESSION_ST_INIT */
@@ -1678,16 +1308,14 @@ static void update_child_status(h2_session *session, int status, const char *msg
(int)session->responses_submitted,
(int)session->pushes_submitted,
(int)session->pushes_reset + session->streams_reset);
- ap_update_child_status_descr(session->c->sbh, status, session->status);
+ ap_update_child_status_descr(session->c1->sbh, status, session->status);
}
}
static void transit(h2_session *session, const char *action, h2_session_state nstate)
{
- apr_time_t timeout;
int ostate, loglvl;
- const char *s;
-
+
if (session->state != nstate) {
ostate = session->state;
session->state = nstate;
@@ -1697,7 +1325,7 @@ static void transit(h2_session *session, const char *action, h2_session_state ns
|| (ostate == H2_SESSION_ST_WAIT && nstate == H2_SESSION_ST_BUSY)){
loglvl = APLOG_TRACE1;
}
- ap_log_cerror(APLOG_MARK, loglvl, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
H2_SSSN_LOG(APLOGNO(03078), session,
"transit [%s] -- %s --> [%s]"),
h2_session_state_str(ostate), action,
@@ -1712,29 +1340,16 @@ static void transit(h2_session *session, const char *action, h2_session_state ns
* If we return to mpm right away, this connection has the
* same chance of being cleaned up by the mpm as connections
* that already served requests - not fair. */
- session->idle_sync_until = apr_time_now() + apr_time_from_sec(1);
- s = "timeout";
- timeout = session->s->timeout;
- update_child_status(session, SERVER_BUSY_READ, "idle");
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- H2_SSSN_LOG("", session, "enter idle, timeout = %d sec"),
- (int)apr_time_sec(timeout));
- }
- else if (session->open_streams) {
- s = "timeout";
- timeout = session->s->timeout;
update_child_status(session, SERVER_BUSY_READ, "idle");
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1,
+ H2_SSSN_LOG("", session, "enter idle"));
}
else {
/* normal keepalive setup */
- s = "keepalive";
- timeout = session->s->keep_alive_timeout;
- update_child_status(session, SERVER_BUSY_KEEPALIVE, "idle");
+ update_child_status(session, SERVER_BUSY_KEEPALIVE, "keepalive");
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1,
+ H2_SSSN_LOG("", session, "enter keepalive"));
}
- session->idle_until = apr_time_now() + timeout;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
- H2_SSSN_LOG("", session, "enter idle, %s = %d sec"),
- s, (int)apr_time_sec(timeout));
break;
case H2_SESSION_ST_DONE:
update_child_status(session, SERVER_CLOSING, "done");
@@ -1758,6 +1373,42 @@ static void h2_session_ev_init(h2_session *session, int arg, const char *msg)
}
}
+static void h2_session_ev_input_pending(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_INIT:
+ case H2_SESSION_ST_IDLE:
+ case H2_SESSION_ST_WAIT:
+ transit(session, "input read", H2_SESSION_ST_BUSY);
+ break;
+ default:
+ break;
+ }
+}
+
+static void h2_session_ev_input_exhausted(h2_session *session, int arg, const char *msg)
+{
+ switch (session->state) {
+ case H2_SESSION_ST_BUSY:
+ if (!h2_session_want_send(session)) {
+ if (session->open_streams == 0) {
+ transit(session, "input exhausted, no streams", H2_SESSION_ST_IDLE);
+ }
+ else {
+ transit(session, "input exhausted", H2_SESSION_ST_WAIT);
+ }
+ }
+ break;
+ case H2_SESSION_ST_WAIT:
+ if (session->open_streams == 0) {
+ transit(session, "input exhausted, no streams", H2_SESSION_ST_IDLE);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
static void h2_session_ev_local_goaway(h2_session *session, int arg, const char *msg)
{
cleanup_unprocessed_streams(session);
@@ -1789,7 +1440,7 @@ static void h2_session_ev_conn_error(h2_session *session, int arg, const char *m
break;
default:
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
H2_SSSN_LOG(APLOGNO(03401), session,
"conn error -> shutdown"));
h2_session_shutdown(session, arg, msg, 0);
@@ -1800,7 +1451,7 @@ static void h2_session_ev_conn_error(h2_session *session, int arg, const char *m
static void h2_session_ev_proto_error(h2_session *session, int arg, const char *msg)
{
if (!session->local.shutdown) {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
H2_SSSN_LOG(APLOGNO(03402), session,
"proto error -> shutdown"));
h2_session_shutdown(session, arg, msg, 0);
@@ -1815,83 +1466,6 @@ static void h2_session_ev_conn_timeout(h2_session *session, int arg, const char
}
}
-static void h2_session_ev_no_io(h2_session *session, int arg, const char *msg)
-{
- switch (session->state) {
- case H2_SESSION_ST_BUSY:
- /* Nothing to READ, nothing to WRITE on the master connection.
- * Possible causes:
- * - we wait for the client to send us sth
- * - we wait for started tasks to produce output
- * - we have finished all streams and the client has sent GO_AWAY
- */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
- H2_SSSN_MSG(session, "NO_IO event, %d streams open"),
- session->open_streams);
- h2_conn_io_flush(&session->io);
- if (session->open_streams > 0) {
- if (h2_mplx_m_awaits_data(session->mplx)) {
- /* waiting for at least one stream to produce data */
- transit(session, "no io", H2_SESSION_ST_WAIT);
- }
- else {
- /* we have streams open, and all are submitted and none
- * is suspended. The only thing keeping us from WRITEing
- * more must be the flow control.
- * This means we only wait for WINDOW_UPDATE from the
- * client and can block on READ. */
- transit(session, "no io (flow wait)", H2_SESSION_ST_IDLE);
- /* Make sure we have flushed all previously written output
- * so that the client will react. */
- if (h2_conn_io_flush(&session->io) != APR_SUCCESS) {
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
- return;
- }
- }
- }
- else if (session->local.accepting) {
- /* When we have no streams, but accept new, switch to idle */
- transit(session, "no io (keepalive)", H2_SESSION_ST_IDLE);
- }
- else {
- /* We are no longer accepting new streams and there are
- * none left. Time to leave. */
- h2_session_shutdown(session, arg, msg, 0);
- transit(session, "no io", H2_SESSION_ST_DONE);
- }
- break;
- default:
- /* nop */
- break;
- }
-}
-
-static void h2_session_ev_frame_rcvd(h2_session *session, int arg, const char *msg)
-{
- switch (session->state) {
- case H2_SESSION_ST_IDLE:
- case H2_SESSION_ST_WAIT:
- transit(session, "frame received", H2_SESSION_ST_BUSY);
- break;
- default:
- /* nop */
- break;
- }
-}
-
-static void h2_session_ev_stream_change(h2_session *session, int arg, const char *msg)
-{
- switch (session->state) {
- case H2_SESSION_ST_IDLE:
- case H2_SESSION_ST_WAIT:
- transit(session, "stream change", H2_SESSION_ST_BUSY);
- break;
- default:
- /* nop */
- break;
- }
-}
-
static void h2_session_ev_ngh2_done(h2_session *session, int arg, const char *msg)
{
switch (session->state) {
@@ -1924,9 +1498,58 @@ static void h2_session_ev_pre_close(h2_session *session, int arg, const char *ms
h2_session_shutdown(session, arg, msg, 1);
}
+static void h2_session_ev_no_more_streams(h2_session *session)
+{
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_SSSN_LOG(APLOGNO(10304), session, "no more streams"));
+ switch (session->state) {
+ case H2_SESSION_ST_BUSY:
+ case H2_SESSION_ST_WAIT:
+ if (!h2_session_want_send(session)) {
+ if (session->local.accepting) {
+ /* We wait for new frames on c1 only. */
+ transit(session, "c1 keepalive", H2_SESSION_ST_IDLE);
+ }
+ else {
+ /* We are no longer accepting new streams.
+ * Time to leave. */
+ h2_session_shutdown(session, 0, "done", 0);
+ transit(session, "c1 done after goaway", H2_SESSION_ST_DONE);
+ }
+ }
+ else {
+ transit(session, "no more streams", H2_SESSION_ST_WAIT);
+ }
+ break;
+ default:
+ /* nop */
+ break;
+ }
+}
+
+static void ev_stream_created(h2_session *session, h2_stream *stream)
+{
+ /* nop */
+}
+
static void ev_stream_open(h2_session *session, h2_stream *stream)
{
- h2_iq_append(session->in_process, stream->id);
+ if (H2_STREAM_CLIENT_INITIATED(stream->id)) {
+ ++session->remote.emitted_count;
+ if (stream->id > session->remote.emitted_max) {
+ session->remote.emitted_max = stream->id;
+ session->local.accepted_max = stream->id;
+ }
+ }
+ else {
+ if (stream->id > session->local.emitted_max) {
+ ++session->local.emitted_count;
+ session->remote.emitted_max = stream->id;
+ }
+ }
+ /* Stream state OPEN means we have received all request headers
+ * and can start processing the stream. */
+ h2_iq_append(session->ready_to_process, stream->id);
}
static void ev_stream_closed(h2_session *session, h2_stream *stream)
@@ -1937,77 +1560,75 @@ static void ev_stream_closed(h2_session *session, h2_stream *stream)
&& (stream->id > session->local.completed_max)) {
session->local.completed_max = stream->id;
}
- switch (session->state) {
- case H2_SESSION_ST_IDLE:
- break;
- default:
- break;
- }
-
/* The stream might have data in the buffers of the main connection.
* We can only free the allocated resources once all had been written.
* Send a special buckets on the connection that gets destroyed when
* all preceding data has been handled. On its destruction, it is safe
* to purge all resources of the stream. */
- b = h2_bucket_eos_create(session->c->bucket_alloc, stream);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
+ H2_STRM_MSG(stream, "adding h2_eos to c1 out"));
+ b = h2_bucket_eos_create(session->c1->bucket_alloc, stream);
APR_BRIGADE_INSERT_TAIL(session->bbtmp, b);
- h2_conn_io_pass(&session->io, session->bbtmp);
+ h2_c1_io_append(&session->io, session->bbtmp);
apr_brigade_cleanup(session->bbtmp);
}
static void on_stream_state_enter(void *ctx, h2_stream *stream)
{
h2_session *session = ctx;
- /* stream entered a new state */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
H2_STRM_MSG(stream, "entered state"));
switch (stream->state) {
case H2_SS_IDLE: /* stream was created */
- ++session->open_streams;
- if (H2_STREAM_CLIENT_INITIATED(stream->id)) {
- ++session->remote.emitted_count;
- if (stream->id > session->remote.emitted_max) {
- session->remote.emitted_max = stream->id;
- session->local.accepted_max = stream->id;
- }
- }
- else {
- if (stream->id > session->local.emitted_max) {
- ++session->local.emitted_count;
- session->remote.emitted_max = stream->id;
- }
- }
+ ev_stream_created(session, stream);
break;
case H2_SS_OPEN: /* stream has request headers */
- case H2_SS_RSVD_L: /* stream has request headers */
+ case H2_SS_RSVD_L:
ev_stream_open(session, stream);
break;
- case H2_SS_CLOSED_L: /* stream output was closed */
+ case H2_SS_CLOSED_L: /* stream output was closed, but remote end is not */
+ /* If the stream is still being processed, it could still be reading
+ * its input (theoretically, http request hangling does not normally).
+ * But when processing is done, we need to cancel the stream as no
+ * one is consuming the input any longer.
+ * This happens, for example, on a large POST when the response
+ * is ready early due to the POST being denied. */
+ if (!h2_mplx_c1_stream_is_running(session->mplx, stream)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
+ H2_STRM_LOG(APLOGNO(10305), stream, "remote close missing"));
+ nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE,
+ stream->id, H2_ERR_NO_ERROR);
+ }
break;
case H2_SS_CLOSED_R: /* stream input was closed */
break;
case H2_SS_CLOSED: /* stream in+out were closed */
- --session->open_streams;
ev_stream_closed(session, stream);
break;
case H2_SS_CLEANUP:
nghttp2_session_set_stream_user_data(session->ngh2, stream->id, NULL);
- h2_mplx_m_stream_cleanup(session->mplx, stream);
+ h2_mplx_c1_stream_cleanup(session->mplx, stream, &session->open_streams);
+ if (session->open_streams == 0) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_NO_MORE_STREAMS,
+ 0, "stream done");
+ }
break;
default:
break;
}
- dispatch_event(session, H2_SESSION_EV_STREAM_CHANGE, 0, "stream state change");
}
-static void on_stream_event(void *ctx, h2_stream *stream,
- h2_stream_event_t ev)
+static void on_stream_event(void *ctx, h2_stream *stream, h2_stream_event_t ev)
{
h2_session *session = ctx;
switch (ev) {
case H2_SEV_IN_DATA_PENDING:
h2_iq_append(session->in_pending, stream->id);
break;
+ case H2_SEV_OUT_C1_BLOCK:
+ h2_iq_append(session->out_c1_blocked, stream->id);
+ break;
default:
/* NOP */
break;
@@ -2031,13 +1652,19 @@ static void on_stream_state_event(void *ctx, h2_stream *stream,
}
}
-static void dispatch_event(h2_session *session, h2_session_event_t ev,
- int arg, const char *msg)
+void h2_session_dispatch_event(h2_session *session, h2_session_event_t ev,
+ int arg, const char *msg)
{
switch (ev) {
case H2_SESSION_EV_INIT:
h2_session_ev_init(session, arg, msg);
break;
+ case H2_SESSION_EV_INPUT_PENDING:
+ h2_session_ev_input_pending(session, arg, msg);
+ break;
+ case H2_SESSION_EV_INPUT_EXHAUSTED:
+ h2_session_ev_input_exhausted(session, arg, msg);
+ break;
case H2_SESSION_EV_LOCAL_GOAWAY:
h2_session_ev_local_goaway(session, arg, msg);
break;
@@ -2053,12 +1680,6 @@ static void dispatch_event(h2_session *session, h2_session_event_t ev,
case H2_SESSION_EV_CONN_TIMEOUT:
h2_session_ev_conn_timeout(session, arg, msg);
break;
- case H2_SESSION_EV_NO_IO:
- h2_session_ev_no_io(session, arg, msg);
- break;
- case H2_SESSION_EV_FRAME_RCVD:
- h2_session_ev_frame_rcvd(session, arg, msg);
- break;
case H2_SESSION_EV_NGH2_DONE:
h2_session_ev_ngh2_done(session, arg, msg);
break;
@@ -2068,309 +1689,186 @@ static void dispatch_event(h2_session *session, h2_session_event_t ev,
case H2_SESSION_EV_PRE_CLOSE:
h2_session_ev_pre_close(session, arg, msg);
break;
- case H2_SESSION_EV_STREAM_CHANGE:
- h2_session_ev_stream_change(session, arg, msg);
+ case H2_SESSION_EV_NO_MORE_STREAMS:
+ h2_session_ev_no_more_streams(session);
break;
default:
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1,
H2_SSSN_MSG(session, "unknown event %d"), ev);
break;
}
}
-/* trigger window updates, stream resumes and submits */
-static apr_status_t dispatch_master(h2_session *session) {
- apr_status_t status;
-
- status = h2_mplx_m_dispatch_master_events(session->mplx,
- on_stream_resume, session);
- if (status == APR_EAGAIN) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
- H2_SSSN_MSG(session, "no master event available"));
- }
- else if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
- H2_SSSN_MSG(session, "dispatch error"));
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
- H2_ERR_INTERNAL_ERROR, "dispatch error");
+static void unblock_c1_out(h2_session *session) {
+ int sid;
+
+ while ((sid = h2_iq_shift(session->out_c1_blocked)) > 0) {
+ nghttp2_session_resume_data(session->ngh2, sid);
}
- return status;
}
-static const int MAX_WAIT_MICROS = 200 * 1000;
-
apr_status_t h2_session_process(h2_session *session, int async)
{
apr_status_t status = APR_SUCCESS;
- conn_rec *c = session->c;
+ conn_rec *c = session->c1;
int rv, mpm_state, trace = APLOGctrace3(c);
apr_time_t now;
-
+
if (trace) {
ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
H2_SSSN_MSG(session, "process start, async=%d"), async);
}
-
+
+ if (H2_SESSION_ST_INIT == session->state) {
+ ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
+ if (!h2_protocol_is_acceptable_c1(c, session->r, 1)) {
+ update_child_status(session, SERVER_BUSY_READ,
+ "inadequate security");
+ h2_session_shutdown(session,
+ NGHTTP2_INADEQUATE_SECURITY, NULL, 1);
+ }
+ else {
+ update_child_status(session, SERVER_BUSY_READ, "init");
+ status = h2_session_start(session, &rv);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
+ H2_SSSN_LOG(APLOGNO(03079), session,
+ "started on %s:%d"),
+ session->s->server_hostname,
+ c->local_addr->port);
+ if (status != APR_SUCCESS) {
+ h2_session_dispatch_event(session,
+ H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+ else {
+ h2_session_dispatch_event(session, H2_SESSION_EV_INIT, 0, NULL);
+ }
+ }
+ }
+
while (session->state != H2_SESSION_ST_DONE) {
now = apr_time_now();
- session->have_read = session->have_written = 0;
if (session->local.accepting
&& !ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
if (mpm_state == AP_MPMQ_STOPPING) {
- dispatch_event(session, H2_SESSION_EV_MPM_STOPPING, 0, NULL);
+ h2_session_dispatch_event(session, H2_SESSION_EV_MPM_STOPPING, 0, NULL);
}
}
-
session->status[0] = '\0';
+ if (h2_session_want_send(session)) {
+ h2_session_send(session);
+ }
+ else if (!nghttp2_session_want_read(session->ngh2)) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL);
+ }
+
+ if (!h2_iq_empty(session->ready_to_process)) {
+ h2_mplx_c1_process(session->mplx, session->ready_to_process,
+ get_stream, stream_pri_cmp, session,
+ &session->open_streams);
+ transit(session, "scheduled stream", H2_SESSION_ST_BUSY);
+ }
+
+ if (!h2_iq_empty(session->in_pending)) {
+ h2_mplx_c1_fwd_input(session->mplx, session->in_pending,
+ get_stream, session);
+ transit(session, "forwarded input", H2_SESSION_ST_BUSY);
+ }
+
+ if (!h2_iq_empty(session->out_c1_blocked)) {
+ unblock_c1_out(session);
+ transit(session, "unblocked output", H2_SESSION_ST_BUSY);
+ }
+
+ if (session->reprioritize) {
+ h2_mplx_c1_reprioritize(session->mplx, stream_pri_cmp, session);
+ session->reprioritize = 0;
+ }
+
+ if (h2_session_want_send(session)) {
+ h2_session_send(session);
+ }
+
+ status = h2_c1_io_assure_flushed(&session->io);
+ if (APR_SUCCESS != status) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ }
+
switch (session->state) {
- case H2_SESSION_ST_INIT:
- ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
- if (!h2_is_acceptable_connection(c, session->r, 1)) {
- update_child_status(session, SERVER_BUSY_READ,
- "inadequate security");
- h2_session_shutdown(session,
- NGHTTP2_INADEQUATE_SECURITY, NULL, 1);
- }
- else {
- update_child_status(session, SERVER_BUSY_READ, "init");
- status = h2_session_start(session, &rv);
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
- H2_SSSN_LOG(APLOGNO(03079), session,
- "started on %s:%d"),
- session->s->server_hostname,
- c->local_addr->port);
- if (status != APR_SUCCESS) {
- dispatch_event(session,
- H2_SESSION_EV_CONN_ERROR, 0, NULL);
- }
- dispatch_event(session, H2_SESSION_EV_INIT, 0, NULL);
- }
- break;
-
- case H2_SESSION_ST_IDLE:
- if (session->idle_until && (now + session->idle_delay) > session->idle_until) {
- ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
- H2_SSSN_MSG(session, "idle, timeout reached, closing"));
- if (session->idle_delay) {
- apr_table_setn(session->c->notes, "short-lingering-close", "1");
- }
- dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout");
- goto out;
- }
-
- if (session->idle_delay) {
- /* we are less interested in spending time on this connection */
- ap_log_cerror( APLOG_MARK, APLOG_TRACE2, status, c,
- H2_SSSN_MSG(session, "session is idle (%ld ms), idle wait %ld sec left"),
- (long)apr_time_as_msec(session->idle_delay),
- (long)apr_time_sec(session->idle_until - now));
- apr_sleep(session->idle_delay);
- session->idle_delay = 0;
- }
+ case H2_SESSION_ST_INIT:
+ ap_assert(0);
+ h2_c1_read(session);
+ break;
- h2_conn_io_flush(&session->io);
- if (async && !session->r && (now > session->idle_sync_until)) {
- if (trace) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
- H2_SSSN_MSG(session,
- "nonblock read, %d streams open"),
- session->open_streams);
- }
- status = h2_session_read(session, 0);
-
- if (status == APR_SUCCESS) {
- session->have_read = 1;
- }
- else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) {
- status = h2_mplx_m_idle(session->mplx);
- if (status == APR_EAGAIN) {
- break;
- }
- else if (status != APR_SUCCESS) {
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
- H2_ERR_ENHANCE_YOUR_CALM, "less is more");
- }
- status = APR_EAGAIN;
- goto out;
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
- H2_SSSN_LOG(APLOGNO(03403), session,
- "no data, error"));
- dispatch_event(session,
- H2_SESSION_EV_CONN_ERROR, 0, "timeout");
- }
- }
- else {
- /* make certain, we send everything before we idle */
- if (trace) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
- H2_SSSN_MSG(session,
- "sync, stutter 1-sec, %d streams open"),
- session->open_streams);
- }
- /* We wait in smaller increments, using a 1 second timeout.
- * That gives us the chance to check for MPMQ_STOPPING often.
- */
- status = h2_mplx_m_idle(session->mplx);
- if (status == APR_EAGAIN) {
- break;
- }
- else if (status != APR_SUCCESS) {
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
- H2_ERR_ENHANCE_YOUR_CALM, "less is more");
- }
- h2_filter_cin_timeout_set(session->cin, apr_time_from_sec(1));
- status = h2_session_read(session, 1);
- if (status == APR_SUCCESS) {
- session->have_read = 1;
- }
- else if (status == APR_EAGAIN) {
- /* nothing to read */
- }
- else if (APR_STATUS_IS_TIMEUP(status)) {
- /* continue reading handling */
- }
- else if (APR_STATUS_IS_ECONNABORTED(status)
- || APR_STATUS_IS_ECONNRESET(status)
- || APR_STATUS_IS_EOF(status)
- || APR_STATUS_IS_EBADF(status)) {
- ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
- H2_SSSN_MSG(session, "input gone"));
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
- }
- else {
- ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
- H2_SSSN_MSG(session,
- "(1 sec timeout) read failed"));
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "error");
- }
- }
- if (nghttp2_session_want_write(session->ngh2)) {
- ap_update_child_status(session->c->sbh, SERVER_BUSY_WRITE, NULL);
- status = h2_session_send(session);
- if (status == APR_SUCCESS) {
- status = h2_conn_io_flush(&session->io);
- }
- if (status != APR_SUCCESS) {
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
- H2_ERR_INTERNAL_ERROR, "writing");
- break;
- }
- }
- break;
-
- case H2_SESSION_ST_BUSY:
- if (nghttp2_session_want_read(session->ngh2)) {
- ap_update_child_status(session->c->sbh, SERVER_BUSY_READ, NULL);
- h2_filter_cin_timeout_set(session->cin, session->s->timeout);
- status = h2_session_read(session, 0);
- if (status == APR_SUCCESS) {
- session->have_read = 1;
- }
- else if (status == APR_EAGAIN) {
- /* nothing to read */
- }
- else if (APR_STATUS_IS_TIMEUP(status)) {
- dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, NULL);
- break;
- }
- else {
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
- }
+ case H2_SESSION_ST_IDLE:
+ ap_assert(session->open_streams == 0);
+ ap_assert(nghttp2_session_want_read(session->ngh2));
+ if (!h2_session_want_send(session)) {
+ /* Give any new incoming request a short grace period to
+ * arrive while we are still hot and return to the mpm
+ * connection handling when nothing really happened. */
+ h2_mplx_c1_poll(session->mplx, apr_time_from_msec(100),
+ on_stream_input, on_stream_output, session);
+ if (H2_SESSION_ST_IDLE == session->state) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c,
+ H2_SSSN_LOG(APLOGNO(10306), session,
+ "returning to mpm c1 monitoring"));
+ goto leaving;
}
+ }
+ else {
+ transit(session, "c1 io pending", H2_SESSION_ST_BUSY);
+ }
+ break;
- status = dispatch_master(session);
- if (status != APR_SUCCESS && status != APR_EAGAIN) {
- break;
- }
-
- if (nghttp2_session_want_write(session->ngh2)) {
- ap_update_child_status(session->c->sbh, SERVER_BUSY_WRITE, NULL);
- status = h2_session_send(session);
- if (status == APR_SUCCESS) {
- status = h2_conn_io_flush(&session->io);
- }
- if (status != APR_SUCCESS) {
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
- H2_ERR_INTERNAL_ERROR, "writing");
- break;
- }
- }
-
- if (session->have_read || session->have_written) {
- if (session->wait_us) {
- session->wait_us = 0;
- }
- }
- else if (!nghttp2_session_want_write(session->ngh2)) {
- dispatch_event(session, H2_SESSION_EV_NO_IO, 0, NULL);
- }
+ case H2_SESSION_ST_BUSY:
+ /* IO happening in and out. Make sure we react to c2 events
+ * inbetween send and receive. */
+ status = h2_mplx_c1_poll(session->mplx, 0,
+ on_stream_input, on_stream_output, session);
+ if (APR_SUCCESS != status && !APR_STATUS_IS_TIMEUP(status)) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
break;
-
- case H2_SESSION_ST_WAIT:
- if (session->wait_us <= 0) {
- session->wait_us = 10;
- if (h2_conn_io_flush(&session->io) != APR_SUCCESS) {
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
- break;
- }
- }
- else {
- /* repeating, increase timer for graceful backoff */
- session->wait_us = H2MIN(session->wait_us*2, MAX_WAIT_MICROS);
- }
+ }
+ h2_c1_read(session);
+ break;
- if (trace) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c,
- "h2_session: wait for data, %ld micros",
- (long)session->wait_us);
- }
- status = h2_mplx_m_out_trywait(session->mplx, session->wait_us,
- session->iowait);
- if (status == APR_SUCCESS) {
- session->wait_us = 0;
- dispatch_event(session, H2_SESSION_EV_STREAM_CHANGE, 0, NULL);
- }
- else if (APR_STATUS_IS_TIMEUP(status)) {
- /* go back to checking all inputs again */
- transit(session, "wait cycle", session->local.shutdown?
- H2_SESSION_ST_DONE : H2_SESSION_ST_BUSY);
- }
- else if (APR_STATUS_IS_ECONNRESET(status)
- || APR_STATUS_IS_ECONNABORTED(status)) {
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, c,
- H2_SSSN_LOG(APLOGNO(03404), session,
- "waiting on conditional"));
- h2_session_shutdown(session, H2_ERR_INTERNAL_ERROR,
- "cond wait error", 0);
- }
+ case H2_SESSION_ST_WAIT:
+ status = h2_c1_io_assure_flushed(&session->io);
+ if (APR_SUCCESS != status) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
break;
-
- default:
- ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
- H2_SSSN_LOG(APLOGNO(03080), session,
- "unknown state"));
- dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, 0, NULL);
+ }
+ /* No IO happening and input is exhausted. Make sure we have
+ * flushed any possibly pending output and then wait with
+ * the c1 connection timeout for sth to happen in our c1/c2 sockets/pipes */
+ status = h2_mplx_c1_poll(session->mplx, session->s->timeout,
+ on_stream_input, on_stream_output, session);
+ if (APR_STATUS_IS_TIMEUP(status)) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout");
break;
- }
+ }
+ else if (APR_SUCCESS != status) {
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "error");
+ break;
+ }
+ break;
- if (!nghttp2_session_want_read(session->ngh2)
- && !nghttp2_session_want_write(session->ngh2)) {
- dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL);
- }
- if (session->reprioritize) {
- h2_mplx_m_reprioritize(session->mplx, stream_pri_cmp, session);
- session->reprioritize = 0;
+ case H2_SESSION_ST_DONE:
+ h2_c1_read(session);
+ break;
+
+ default:
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
+ H2_SSSN_LOG(APLOGNO(03080), session,
+ "unknown state"));
+ h2_session_dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, 0, NULL);
+ break;
}
}
-
-out:
+
+leaving:
if (trace) {
ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
H2_SSSN_MSG(session, "process returns"));
@@ -2380,7 +1878,7 @@ out:
&& (APR_STATUS_IS_EOF(status)
|| APR_STATUS_IS_ECONNRESET(status)
|| APR_STATUS_IS_ECONNABORTED(status))) {
- dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
+ h2_session_dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
}
return (session->state == H2_SESSION_ST_DONE)? APR_EOF : APR_SUCCESS;
@@ -2390,14 +1888,14 @@ apr_status_t h2_session_pre_close(h2_session *session, int async)
{
apr_status_t status;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1,
H2_SSSN_MSG(session, "pre_close"));
- dispatch_event(session, H2_SESSION_EV_PRE_CLOSE, 0,
+ h2_session_dispatch_event(session, H2_SESSION_EV_PRE_CLOSE, 0,
(session->state == H2_SESSION_ST_IDLE)? "timeout" : NULL);
status = session_cleanup(session, "pre_close");
if (status == APR_SUCCESS) {
/* no one should hold a reference to this session any longer and
- * the h2_ctx was removed from the connection.
+ * the h2_conn_ctx_twas removed from the connection.
* Take the pool (and thus all subpools etc. down now, instead of
* during cleanup of main connection pool. */
apr_pool_destroy(session->pool);
diff --git a/modules/http2/h2_session.h b/modules/http2/h2_session.h
index 3fd3088a0f..4680b3c869 100644
--- a/modules/http2/h2_session.h
+++ b/modules/http2/h2_session.h
@@ -17,25 +17,15 @@
#ifndef __mod_h2__h2_session__
#define __mod_h2__h2_session__
-#include "h2_conn_io.h"
+#include "h2_c1_io.h"
/**
* A HTTP/2 connection, a session with a specific client.
*
* h2_session sits on top of a httpd conn_rec* instance and takes complete
* control of the connection data. It receives protocol frames from the
- * client. For new HTTP/2 streams it creates h2_task(s) that are sent
- * via callback to a dispatcher (see h2_conn.c).
- * h2_session keeps h2_io's for each ongoing stream which buffer the
- * payload for that stream.
- *
- * New incoming HEADER frames are converted into a h2_stream+h2_task instance
- * that both represent a HTTP/2 stream, but may have separate lifetimes. This
- * allows h2_task to be scheduled in other threads without semaphores
- * all over the place. It allows task memory to be freed independent of
- * session lifetime and sessions may close down while tasks are still running.
- *
- *
+ * client. For new HTTP/2 streams it creates secondary connections
+ * to execute the requests in h2 workers.
*/
#include "h2.h"
@@ -44,7 +34,6 @@ struct apr_thread_mutext_t;
struct apr_thread_cond_t;
struct h2_ctx;
struct h2_config;
-struct h2_filter_cin;
struct h2_ihash_t;
struct h2_mplx;
struct h2_priority;
@@ -53,38 +42,37 @@ struct h2_push_diary;
struct h2_session;
struct h2_stream;
struct h2_stream_monitor;
-struct h2_task;
struct h2_workers;
struct nghttp2_session;
typedef enum {
H2_SESSION_EV_INIT, /* session was initialized */
+ H2_SESSION_EV_INPUT_PENDING, /* c1 input may have data pending */
+ H2_SESSION_EV_INPUT_EXHAUSTED, /* c1 input exhausted */
H2_SESSION_EV_LOCAL_GOAWAY, /* we send a GOAWAY */
H2_SESSION_EV_REMOTE_GOAWAY, /* remote send us a GOAWAY */
H2_SESSION_EV_CONN_ERROR, /* connection error */
H2_SESSION_EV_PROTO_ERROR, /* protocol error */
H2_SESSION_EV_CONN_TIMEOUT, /* connection timeout */
- H2_SESSION_EV_NO_IO, /* nothing has been read or written */
- H2_SESSION_EV_FRAME_RCVD, /* a frame has been received */
H2_SESSION_EV_NGH2_DONE, /* nghttp2 wants neither read nor write anything */
H2_SESSION_EV_MPM_STOPPING, /* the process is stopping */
H2_SESSION_EV_PRE_CLOSE, /* connection will close after this */
- H2_SESSION_EV_STREAM_CHANGE, /* a stream (state/input/output) changed */
+ H2_SESSION_EV_NO_MORE_STREAMS, /* no more streams to process */
} h2_session_event_t;
typedef struct h2_session {
long id; /* identifier of this session, unique
* inside a httpd process */
- conn_rec *c; /* the connection this session serves */
+ conn_rec *c1; /* the main connection this session serves */
request_rec *r; /* the request that started this in case
* of 'h2c', NULL otherwise */
server_rec *s; /* server/vhost we're starting on */
apr_pool_t *pool; /* pool to use in session */
struct h2_mplx *mplx; /* multiplexer for stream data */
- struct h2_workers *workers; /* for executing stream tasks */
- struct h2_filter_cin *cin; /* connection input filter context */
- h2_conn_io io; /* io on httpd conn filters */
+ struct h2_workers *workers; /* for executing streams */
+ struct h2_c1_io_in_ctx_t *cin; /* connection input filter context */
+ h2_c1_io io; /* io on httpd conn filters */
int padding_max; /* max number of padding bytes */
int padding_always; /* padding has precedence over I/O optimizations */
struct nghttp2_session *ngh2; /* the nghttp2 session (internal use) */
@@ -96,17 +84,13 @@ typedef struct h2_session {
unsigned int reprioritize : 1; /* scheduled streams priority changed */
unsigned int flush : 1; /* flushing output necessary */
- unsigned int have_read : 1; /* session has read client data */
- unsigned int have_written : 1; /* session did write data to client */
apr_interval_time_t wait_us; /* timeout during BUSY_WAIT state, micro secs */
struct h2_push_diary *push_diary; /* remember pushes, avoid duplicates */
struct h2_stream_monitor *monitor;/* monitor callbacks for streams */
- int open_streams; /* number of client streams open */
- int unsent_submits; /* number of submitted, but not yet written responses. */
- int unsent_promises; /* number of submitted, but not yet written push promises */
-
+ int open_streams; /* number of streams processing */
+
int responses_submitted; /* number of http/2 responses submitted */
int streams_reset; /* number of http/2 streams reset by client */
int pushes_promised; /* number of http/2 push promises submitted */
@@ -119,20 +103,18 @@ typedef struct h2_session {
apr_size_t max_stream_count; /* max number of open streams */
apr_size_t max_stream_mem; /* max buffer memory for a single stream */
- apr_time_t idle_until; /* Time we shut down due to sheer boredom */
- apr_time_t idle_sync_until; /* Time we sync wait until keepalive handling kicks in */
apr_size_t idle_frames; /* number of rcvd frames that kept session in idle state */
apr_interval_time_t idle_delay; /* Time we delay processing rcvd frames in idle state */
apr_bucket_brigade *bbtmp; /* brigade for keeping temporary data */
- struct apr_thread_cond_t *iowait; /* our cond when trywaiting for data */
-
+
char status[64]; /* status message for scoreboard */
int last_status_code; /* the one already reported */
const char *last_status_msg; /* the one already reported */
struct h2_iqueue *in_pending; /* all streams with input pending */
- struct h2_iqueue *in_process; /* all streams ready for processing on a secondary */
+ struct h2_iqueue *out_c1_blocked; /* all streams with output blocked on c1 buffer full */
+ struct h2_iqueue *ready_to_process; /* all streams ready for processing */
} h2_session;
@@ -153,7 +135,7 @@ apr_status_t h2_session_create(h2_session **psession,
struct h2_workers *workers);
void h2_session_event(h2_session *session, h2_session_event_t ev,
- int err, const char *msg);
+ int err, const char *msg);
/**
* Process the given HTTP/2 session until it is ended or a fatal
@@ -177,11 +159,6 @@ apr_status_t h2_session_pre_close(h2_session *session, int async);
void h2_session_abort(h2_session *session, apr_status_t reason);
/**
- * Close and deallocate the given session.
- */
-void h2_session_close(h2_session *session);
-
-/**
* Returns if client settings have push enabled.
* @param != 0 iff push is enabled in client settings
*/
@@ -203,6 +180,17 @@ apr_status_t h2_session_set_prio(h2_session *session,
struct h2_stream *stream,
const struct h2_priority *prio);
+/**
+ * Dispatch a event happending during session processing.
+ * @param session the sessiont
+ * @param ev the event that happened
+ * @param arg integer argument (event type dependant)
+ * @param msg destriptive message
+ */
+void h2_session_dispatch_event(h2_session *session, h2_session_event_t ev,
+ int arg, const char *msg);
+
+
#define H2_SSSN_MSG(s, msg) \
"h2_session(%ld,%s,%d): "msg, s->id, h2_session_state_str(s->state), \
s->open_streams
diff --git a/modules/http2/h2_stream.c b/modules/http2/h2_stream.c
index 4fec5377f7..b5f46243b6 100644
--- a/modules/http2/h2_stream.c
+++ b/modules/http2/h2_stream.c
@@ -29,22 +29,22 @@
#include "h2_private.h"
#include "h2.h"
#include "h2_bucket_beam.h"
-#include "h2_conn.h"
+#include "h2_c1.h"
#include "h2_config.h"
-#include "h2_h2.h"
+#include "h2_protocol.h"
#include "h2_mplx.h"
#include "h2_push.h"
#include "h2_request.h"
#include "h2_headers.h"
#include "h2_session.h"
#include "h2_stream.h"
-#include "h2_task.h"
-#include "h2_ctx.h"
-#include "h2_task.h"
+#include "h2_c2.h"
+#include "h2_conn_ctx.h"
+#include "h2_c2.h"
#include "h2_util.h"
-static const char *h2_ss_str(h2_stream_state_t state)
+static const char *h2_ss_str(const h2_stream_state_t state)
{
switch (state) {
case H2_SS_IDLE:
@@ -68,7 +68,7 @@ static const char *h2_ss_str(h2_stream_state_t state)
}
}
-const char *h2_stream_state_str(h2_stream *stream)
+const char *h2_stream_state_str(const h2_stream *stream)
{
return h2_ss_str(stream->state);
}
@@ -121,6 +121,7 @@ static int trans_on_event[][H2_SS_MAX] = {
{ S_ERR, S_ERR, S_ERR, S_CL_R, S_ERR, S_CLS, S_NOP, S_NOP, },/* EV_CLOSED_R*/
{ S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_CLS, S_NOP, S_NOP, },/* EV_CANCELLED*/
{ S_NOP, S_XXX, S_XXX, S_XXX, S_XXX, S_CLS, S_CLN, S_XXX, },/* EV_EOS_SENT*/
+{ S_NOP, S_XXX, S_CLS, S_XXX, S_XXX, S_CLS, S_XXX, S_XXX, },/* EV_IN_ERROR*/
};
static int on_map(h2_stream_state_t state, int map[H2_SS_MAX])
@@ -169,10 +170,18 @@ static int on_event(h2_stream* stream, h2_stream_event_t ev)
return stream->state;
}
+static ssize_t stream_data_cb(nghttp2_session *ng2s,
+ int32_t stream_id,
+ uint8_t *buf,
+ size_t length,
+ uint32_t *data_flags,
+ nghttp2_data_source *source,
+ void *puser);
+
static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, const char *tag)
{
- if (APLOG_C_IS_LEVEL(s->session->c, lvl)) {
- conn_rec *c = s->session->c;
+ if (APLOG_C_IS_LEVEL(s->session->c1, lvl)) {
+ conn_rec *c = s->session->c1;
char buffer[4 * 1024];
apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]);
@@ -182,76 +191,77 @@ static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, const char *tag)
}
}
-static apr_status_t setup_input(h2_stream *stream) {
+apr_status_t h2_stream_setup_input(h2_stream *stream)
+{
if (stream->input == NULL) {
- int empty = (stream->input_eof
+ int empty = (stream->input_closed
&& (!stream->in_buffer
|| APR_BRIGADE_EMPTY(stream->in_buffer)));
if (!empty) {
- h2_beam_create(&stream->input, stream->pool, stream->id,
- "input", H2_BEAM_OWNER_SEND, 0,
- stream->session->s->timeout);
- h2_beam_send_from(stream->input, stream->pool);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "setup input beam"));
+ h2_beam_create(&stream->input, stream->session->c1,
+ stream->pool, stream->id,
+ "input", 0, stream->session->s->timeout);
}
}
return APR_SUCCESS;
}
-static apr_status_t close_input(h2_stream *stream)
+static void input_append_bucket(h2_stream *stream, apr_bucket *b)
{
- conn_rec *c = stream->session->c;
- apr_status_t status = APR_SUCCESS;
+ if (!stream->in_buffer) {
+ stream->in_buffer = apr_brigade_create(
+ stream->pool, stream->session->c1->bucket_alloc);
+ }
+ APR_BRIGADE_INSERT_TAIL(stream->in_buffer, b);
+}
- stream->input_eof = 1;
- if (stream->input && h2_beam_is_closed(stream->input)) {
- return APR_SUCCESS;
+static void input_append_data(h2_stream *stream, const char *data, apr_size_t len)
+{
+ if (!stream->in_buffer) {
+ stream->in_buffer = apr_brigade_create(
+ stream->pool, stream->session->c1->bucket_alloc);
}
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ apr_brigade_write(stream->in_buffer, NULL, NULL, data, len);
+}
+
+
+static apr_status_t close_input(h2_stream *stream)
+{
+ conn_rec *c = stream->session->c1;
+ apr_status_t rv = APR_SUCCESS;
+ apr_bucket *b;
+
+ if (stream->input_closed) goto cleanup;
+
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
H2_STRM_MSG(stream, "closing input"));
- if (stream->rst_error) {
- return APR_ECONNRESET;
- }
-
- if (stream->trailers && !apr_is_empty_table(stream->trailers)) {
- apr_bucket *b;
+ if (!stream->rst_error
+ && stream->trailers_in
+ && !apr_is_empty_table(stream->trailers_in)) {
h2_headers *r;
- if (!stream->in_buffer) {
- stream->in_buffer = apr_brigade_create(stream->pool, c->bucket_alloc);
- }
-
- r = h2_headers_create(HTTP_OK, stream->trailers, NULL,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "adding trailers"));
+ r = h2_headers_create(HTTP_OK, stream->trailers_in, NULL,
stream->in_trailer_octets, stream->pool);
- stream->trailers = NULL;
+ stream->trailers_in = NULL;
b = h2_bucket_headers_create(c->bucket_alloc, r);
- APR_BRIGADE_INSERT_TAIL(stream->in_buffer, b);
-
- b = apr_bucket_eos_create(c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(stream->in_buffer, b);
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
- H2_STRM_MSG(stream, "added trailers"));
- h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING);
+ input_append_bucket(stream, b);
}
- if (stream->input) {
- h2_stream_flush_input(stream);
- return h2_beam_close(stream->input);
- }
- return status;
-}
-static apr_status_t close_output(h2_stream *stream)
-{
- if (!stream->output || h2_beam_is_closed(stream->output)) {
- return APR_SUCCESS;
+ stream->input_closed = 1;
+ if (stream->in_buffer || stream->input) {
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ input_append_bucket(stream, b);
+ h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING);
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
- H2_STRM_MSG(stream, "closing output"));
- return h2_beam_leave(stream->output);
+cleanup:
+ return rv;
}
-static void on_state_enter(h2_stream *stream)
+static void on_state_enter(h2_stream *stream)
{
if (stream->monitor && stream->monitor->on_state_enter) {
stream->monitor->on_state_enter(stream->monitor->ctx, stream);
@@ -271,7 +281,7 @@ static void on_state_invalid(h2_stream *stream)
stream->monitor->on_state_invalid(stream->monitor->ctx, stream);
}
/* stream got an event/frame invalid in its state */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
H2_STRM_MSG(stream, "invalid state event"));
switch (stream->state) {
case H2_SS_OPEN:
@@ -292,13 +302,13 @@ static apr_status_t transit(h2_stream *stream, int new_state)
return APR_SUCCESS;
}
else if (new_state < 0) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c1,
H2_STRM_LOG(APLOGNO(03081), stream, "invalid transition"));
on_state_invalid(stream);
return APR_EINVAL;
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
H2_STRM_MSG(stream, "transit to [%s]"), h2_ss_str(new_state));
stream->state = new_state;
switch (new_state) {
@@ -312,14 +322,12 @@ static apr_status_t transit(h2_stream *stream, int new_state)
case H2_SS_OPEN:
break;
case H2_SS_CLOSED_L:
- close_output(stream);
break;
case H2_SS_CLOSED_R:
close_input(stream);
break;
case H2_SS_CLOSED:
close_input(stream);
- close_output(stream);
if (stream->out_buffer) {
apr_brigade_cleanup(stream->out_buffer);
}
@@ -340,11 +348,11 @@ void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev)
{
int new_state;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
H2_STRM_MSG(stream, "dispatch event %d"), ev);
new_state = on_event(stream, ev);
if (new_state < 0) {
- ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c1,
H2_STRM_LOG(APLOGNO(10002), stream, "invalid event %d"), ev);
on_state_invalid(stream);
AP_DEBUG_ASSERT(new_state > S_XXX);
@@ -352,7 +360,7 @@ void h2_stream_dispatch(h2_stream *stream, h2_stream_event_t ev)
}
else if (new_state == stream->state) {
/* nop */
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
H2_STRM_MSG(stream, "non-state event %d"), ev);
return;
}
@@ -366,7 +374,6 @@ static void set_policy_for(h2_stream *stream, h2_request *r)
{
int enabled = h2_session_push_enabled(stream->session);
stream->push_policy = h2_push_policy_determine(r->headers, stream->pool, enabled);
- r->serialize = h2_config_sgeti(stream->session->s, H2_CONF_SER_HEADERS);
}
apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_t frame_len)
@@ -376,7 +383,7 @@ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_
new_state = on_frame_send(stream->state, ftype);
if (new_state < 0) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
H2_STRM_MSG(stream, "invalid frame %d send"), ftype);
AP_DEBUG_ASSERT(new_state > S_XXX);
return transit(stream, new_state);
@@ -404,8 +411,6 @@ apr_status_t h2_stream_send_frame(h2_stream *stream, int ftype, int flags, size_
default:
break;
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
- H2_STRM_MSG(stream, "send frame %d, eos=%d"), ftype, eos);
status = transit(stream, new_state);
if (status == APR_SUCCESS && eos) {
status = transit(stream, on_event(stream, H2_SEV_CLOSED_L));
@@ -421,7 +426,7 @@ apr_status_t h2_stream_recv_frame(h2_stream *stream, int ftype, int flags, size_
new_state = on_frame_recv(stream->state, ftype);
if (new_state < 0) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
H2_STRM_MSG(stream, "invalid frame %d recv"), ftype);
AP_DEBUG_ASSERT(new_state > S_XXX);
return transit(stream, new_state);
@@ -471,14 +476,20 @@ apr_status_t h2_stream_flush_input(h2_stream *stream)
{
apr_status_t status = APR_SUCCESS;
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c1,
+ H2_STRM_MSG(stream, "flush input"));
if (stream->in_buffer && !APR_BRIGADE_EMPTY(stream->in_buffer)) {
- setup_input(stream);
- status = h2_beam_send(stream->input, stream->in_buffer, APR_BLOCK_READ);
+ if (!stream->input) {
+ h2_stream_setup_input(stream);
+ }
+ status = h2_beam_send(stream->input, stream->session->c1,
+ stream->in_buffer, APR_BLOCK_READ);
stream->in_last_write = apr_time_now();
- }
- if (stream->input_eof
- && stream->input && !h2_beam_is_closed(stream->input)) {
- status = h2_beam_close(stream->input);
+ if (APR_SUCCESS != status && stream->state == H2_SS_CLOSED_L) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c1,
+ H2_STRM_MSG(stream, "send input error"));
+ h2_stream_dispatch(stream, H2_SEV_IN_ERROR);
+ }
}
return status;
}
@@ -491,34 +502,23 @@ apr_status_t h2_stream_recv_DATA(h2_stream *stream, uint8_t flags,
stream->in_data_frames++;
if (len > 0) {
- if (APLOGctrace3(session->c)) {
+ if (APLOGctrace3(session->c1)) {
const char *load = apr_pstrndup(stream->pool, (const char *)data, len);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, session->c1,
H2_STRM_MSG(stream, "recv DATA, len=%d: -->%s<--"),
(int)len, load);
}
else {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c1,
H2_STRM_MSG(stream, "recv DATA, len=%d"), (int)len);
}
stream->in_data_octets += len;
- if (!stream->in_buffer) {
- stream->in_buffer = apr_brigade_create(stream->pool,
- session->c->bucket_alloc);
- }
- apr_brigade_write(stream->in_buffer, NULL, NULL, (const char *)data, len);
+ input_append_data(stream, (const char*)data, len);
h2_stream_dispatch(stream, H2_SEV_IN_DATA_PENDING);
}
return status;
}
-static void prep_output(h2_stream *stream) {
- conn_rec *c = stream->session->c;
- if (!stream->out_buffer) {
- stream->out_buffer = apr_brigade_create(stream->pool, c->bucket_alloc);
- }
-}
-
h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session,
h2_stream_monitor *monitor, int initiated_on)
{
@@ -531,15 +531,14 @@ h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session,
stream->pool = pool;
stream->session = session;
stream->monitor = monitor;
- stream->max_mem = session->max_stream_mem;
-
-#ifdef H2_NG2_LOCAL_WIN_SIZE
- stream->in_window_size =
- nghttp2_session_get_stream_local_window_size(
- stream->session->ngh2, stream->id);
-#endif
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
+ if (id) {
+ stream->in_window_size =
+ nghttp2_session_get_stream_local_window_size(
+ stream->session->ngh2, stream->id);
+ }
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c1,
H2_STRM_LOG(APLOGNO(03082), stream, "created"));
on_state_enter(stream);
return stream;
@@ -547,59 +546,34 @@ h2_stream *h2_stream_create(int id, apr_pool_t *pool, h2_session *session,
void h2_stream_cleanup(h2_stream *stream)
{
- apr_status_t status;
-
+ /* Stream is done on c1. There might still be processing on a c2
+ * going on. The input/output beams get aborted and the stream's
+ * end of the in/out notifications get closed.
+ */
ap_assert(stream);
if (stream->out_buffer) {
- /* remove any left over output buckets that may still have
- * references into request pools */
apr_brigade_cleanup(stream->out_buffer);
}
- if (stream->input) {
- h2_beam_abort(stream->input);
- status = h2_beam_wait_empty(stream->input, APR_NONBLOCK_READ);
- if (status == APR_EAGAIN) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
- H2_STRM_MSG(stream, "wait on input drain"));
- status = h2_beam_wait_empty(stream->input, APR_BLOCK_READ);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c,
- H2_STRM_MSG(stream, "input drain returned"));
- }
- }
}
void h2_stream_destroy(h2_stream *stream)
{
ap_assert(stream);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c1,
H2_STRM_MSG(stream, "destroy"));
apr_pool_destroy(stream->pool);
}
-apr_status_t h2_stream_prep_processing(h2_stream *stream)
-{
- if (stream->request) {
- const h2_request *r = stream->request;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
- H2_STRM_MSG(stream, "schedule %s %s://%s%s chunked=%d"),
- r->method, r->scheme, r->authority, r->path, r->chunked);
- setup_input(stream);
- stream->scheduled = 1;
- return APR_SUCCESS;
- }
- return APR_EINVAL;
-}
-
void h2_stream_rst(h2_stream *stream, int error_code)
{
stream->rst_error = error_code;
if (stream->input) {
- h2_beam_abort(stream->input);
+ h2_beam_abort(stream->input, stream->session->c1);
}
if (stream->output) {
- h2_beam_leave(stream->output);
+ h2_beam_abort(stream->output, stream->session->c1);
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
H2_STRM_MSG(stream, "reset, error=%d"), error_code);
h2_stream_dispatch(stream, H2_SEV_CANCELLED);
}
@@ -648,7 +622,7 @@ static apr_status_t add_trailer(h2_stream *stream,
const char *value, size_t vlen,
size_t max_field_len, int *pwas_added)
{
- conn_rec *c = stream->session->c;
+ conn_rec *c = stream->session->c1;
char *hname, *hvalue;
const char *existing;
@@ -662,12 +636,12 @@ static apr_status_t add_trailer(h2_stream *stream,
if (h2_req_ignore_trailer(name, nlen)) {
return APR_SUCCESS;
}
- if (!stream->trailers) {
- stream->trailers = apr_table_make(stream->pool, 5);
+ if (!stream->trailers_in) {
+ stream->trailers_in = apr_table_make(stream->pool, 5);
}
hname = apr_pstrndup(stream->pool, name, nlen);
h2_util_camel_case_header(hname, nlen);
- existing = apr_table_get(stream->trailers, hname);
+ existing = apr_table_get(stream->trailers_in, hname);
if (max_field_len
&& ((existing? strlen(existing)+2 : 0) + vlen + nlen + 2 > max_field_len)) {
/* "key: (oldval, )?nval" is too long */
@@ -675,7 +649,7 @@ static apr_status_t add_trailer(h2_stream *stream,
}
if (!existing) *pwas_added = 1;
hvalue = apr_pstrndup(stream->pool, value, vlen);
- apr_table_mergen(stream->trailers, hname, hvalue);
+ apr_table_mergen(stream->trailers_in, hname, hvalue);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
H2_STRM_MSG(stream, "added trailer '%s: %s'"), hname, hvalue);
@@ -690,7 +664,7 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
int error = 0, was_added = 0;
apr_status_t status = APR_SUCCESS;
- if (stream->has_response) {
+ if (stream->response) {
return APR_EINVAL;
}
@@ -698,7 +672,7 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
if ((vlen) > session->s->limit_req_line) {
/* pseudo header: approximation of request line size check */
if (!h2_stream_is_ready(stream)) {
- ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c1,
H2_STRM_LOG(APLOGNO(10178), stream,
"Request pseudo header exceeds "
"LimitRequestFieldSize: %s"), name);
@@ -715,8 +689,8 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
}
else if (H2_SS_IDLE == stream->state) {
if (!stream->rtmp) {
- stream->rtmp = h2_req_create(stream->id, stream->pool,
- NULL, NULL, NULL, NULL, NULL, 0);
+ stream->rtmp = h2_request_create(stream->id, stream->pool,
+ NULL, NULL, NULL, NULL, NULL);
}
status = h2_request_add_header(stream->rtmp, stream->pool,
name, nlen, value, vlen,
@@ -736,7 +710,7 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
if (APR_EINVAL == status) {
/* header too long */
if (!h2_stream_is_ready(stream)) {
- ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c1,
H2_STRM_LOG(APLOGNO(10180), stream,"Request header exceeds "
"LimitRequestFieldSize: %.*s"),
(int)H2MIN(nlen, 80), name);
@@ -754,7 +728,7 @@ apr_status_t h2_stream_add_header(h2_stream *stream,
return APR_ECONNRESET;
}
if (!h2_stream_is_ready(stream)) {
- ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, session->c1,
H2_STRM_LOG(APLOGNO(10181), stream, "Number of request headers "
"exceeds LimitRequestFields"));
}
@@ -768,7 +742,7 @@ cleanup:
return APR_EINVAL;
}
else if (status != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c1,
H2_STRM_MSG(stream, "header %s not accepted"), name);
h2_stream_dispatch(stream, H2_SEV_CANCELLED);
}
@@ -805,7 +779,7 @@ apr_status_t h2_stream_end_headers(h2_stream *stream, int eos, size_t raw_bytes)
apr_table_do(table_check_val_len, &ctx, stream->request->headers, NULL);
if (ctx.failed_key) {
if (!h2_stream_is_ready(stream)) {
- ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, 0, stream->session->c1,
H2_STRM_LOG(APLOGNO(10230), stream,"Request header exceeds "
"LimitRequestFieldSize: %.*s"),
(int)H2MIN(strlen(ctx.failed_key), 80), ctx.failed_key);
@@ -832,191 +806,254 @@ static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb)
return NULL;
}
-static apr_status_t add_buffered_data(h2_stream *stream, apr_off_t requested,
- apr_off_t *plen, int *peos, int *is_all,
- h2_headers **pheaders)
+static apr_status_t buffer_output_receive(h2_stream *stream)
{
+ apr_status_t rv = APR_EAGAIN;
+ apr_off_t buf_len;
+ conn_rec *c1 = stream->session->c1;
apr_bucket *b, *e;
-
- *peos = 0;
- *plen = 0;
- *is_all = 0;
- if (pheaders) {
- *pheaders = NULL;
+
+ if (!stream->output) {
+ goto cleanup;
}
- H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "add_buffered_data");
- b = APR_BRIGADE_FIRST(stream->out_buffer);
- while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
- e = APR_BUCKET_NEXT(b);
- if (APR_BUCKET_IS_METADATA(b)) {
- if (APR_BUCKET_IS_FLUSH(b)) {
- APR_BUCKET_REMOVE(b);
- apr_bucket_destroy(b);
- }
- else if (APR_BUCKET_IS_EOS(b)) {
- *peos = 1;
- return APR_SUCCESS;
- }
- else if (H2_BUCKET_IS_HEADERS(b)) {
- if (*plen > 0) {
- /* data before the response, can only return up to here */
- return APR_SUCCESS;
- }
- else if (pheaders) {
- *pheaders = h2_bucket_headers_get(b);
+ if (!stream->out_buffer) {
+ stream->out_buffer = apr_brigade_create(stream->pool, c1->bucket_alloc);
+ buf_len = 0;
+ }
+ else {
+ /* if the brigade contains a file bucket, it normal report length
+ * might be megabytes, but the memory used is tiny. For buffering,
+ * we are only interested in the memory footprint. */
+ buf_len = h2_brigade_mem_size(stream->out_buffer);
+ }
+
+ if (buf_len >= stream->session->max_stream_mem) {
+ /* we have buffered enough. No need to read more.
+ * However, we have now output pending for which we may not
+ * receive another poll event. We need to make sure that this
+ * stream is not suspended so we keep on processing output.
+ */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
+ H2_STRM_MSG(stream, "out_buffer, already has %ld length"),
+ (long)buf_len);
+ rv = APR_SUCCESS;
+ goto cleanup;
+ }
+
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "pre");
+ rv = h2_beam_receive(stream->output, stream->session->c1, stream->out_buffer,
+ APR_NONBLOCK_READ, stream->session->max_stream_mem - buf_len);
+ if (APR_SUCCESS != rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, rv, c1,
+ H2_STRM_MSG(stream, "out_buffer, receive unsuccessful"));
+ goto cleanup;
+ }
+
+ /* get rid of buckets we have no need for */
+ if (!APR_BRIGADE_EMPTY(stream->out_buffer)) {
+ b = APR_BRIGADE_FIRST(stream->out_buffer);
+ while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
+ e = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_FLUSH(b)) { /* we flush any c1 data already */
APR_BUCKET_REMOVE(b);
apr_bucket_destroy(b);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
- H2_STRM_MSG(stream, "prep, -> response %d"),
- (*pheaders)->status);
- return APR_SUCCESS;
- }
- else {
- return APR_EAGAIN;
}
}
- }
- else if (b->length == 0) {
- APR_BUCKET_REMOVE(b);
- apr_bucket_destroy(b);
- }
- else {
- ap_assert(b->length != (apr_size_t)-1);
- *plen += b->length;
- if (*plen >= requested) {
- *plen = requested;
- return APR_SUCCESS;
+ else if (b->length == 0) { /* zero length data */
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
}
+ b = e;
}
- b = e;
}
- *is_all = 1;
- return APR_SUCCESS;
+ H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "out_buffer, after receive");
+
+cleanup:
+ return rv;
}
-apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen,
- int *peos, h2_headers **pheaders)
+static int bucket_pass_to_c1(apr_bucket *b)
{
- apr_status_t status = APR_SUCCESS;
- apr_off_t requested, missing, max_chunk = H2_DATA_CHUNK_SIZE;
- conn_rec *c;
- int complete, was_closed = 0;
+ return !H2_BUCKET_IS_HEADERS(b) && !APR_BUCKET_IS_EOS(b);
+}
+
+apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
+ apr_off_t *plen, int *peos)
+{
+ apr_status_t rv = APR_SUCCESS;
- ap_assert(stream);
-
if (stream->rst_error) {
- *plen = 0;
- *peos = 1;
return APR_ECONNRESET;
}
-
- c = stream->session->c;
- prep_output(stream);
-
- /* determine how much we'd like to send. We cannot send more than
- * is requested. But we can reduce the size in case the master
- * connection operates in smaller chunks. (TSL warmup) */
- if (stream->session->io.write_size > 0) {
- max_chunk = stream->session->io.write_size - H2_FRAME_HDR_LEN;
+ rv = h2_append_brigade(bb, stream->out_buffer, plen, peos, bucket_pass_to_c1);
+ if (APR_SUCCESS == rv && !*peos && !*plen) {
+ rv = APR_EAGAIN;
}
- requested = (*plen > 0)? H2MIN(*plen, max_chunk) : max_chunk;
-
- /* count the buffered data until eos or a headers bucket */
- status = add_buffered_data(stream, requested, plen, peos, &complete, pheaders);
-
- if (status == APR_EAGAIN) {
- /* TODO: ugly, someone needs to retrieve the response first */
- h2_mplx_m_keep_active(stream->session->mplx, stream);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- H2_STRM_MSG(stream, "prep, response eagain"));
- return status;
+ return rv;
+}
+
+static apr_status_t buffer_output_process_headers(h2_stream *stream)
+{
+ conn_rec *c1 = stream->session->c1;
+ h2_headers *headers = NULL;
+ apr_status_t rv = APR_SUCCESS;
+ int ngrv = 0, is_empty;
+ h2_ngheader *nh = NULL;
+ apr_bucket *b, *e;
+
+ if (!stream->out_buffer) goto cleanup;
+
+ b = APR_BRIGADE_FIRST(stream->out_buffer);
+ while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
+ e = APR_BUCKET_NEXT(b);
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (H2_BUCKET_IS_HEADERS(b)) {
+ headers = h2_bucket_headers_get(b);
+ APR_BUCKET_REMOVE(b);
+ apr_bucket_destroy(b);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
+ H2_STRM_MSG(stream, "process headers, response %d"),
+ headers->status);
+ b = e;
+ break;
+ }
+ }
+ else {
+ if (!stream->response) {
+ /* data buckets before response headers, an error */
+ rv = APR_EINVAL;
+ }
+ /* data bucket, need to send those before processing
+ * any subsequent headers (trailers) */
+ goto cleanup;
+ }
+ b = e;
}
- else if (status != APR_SUCCESS) {
- return status;
+ if (!headers) goto cleanup;
+
+ if (stream->response) {
+ rv = h2_res_create_ngtrailer(&nh, stream->pool, headers);
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
+ H2_STRM_LOG(APLOGNO(03072), stream, "submit %d trailers"),
+ (int)nh->nvlen);
+ if (APR_SUCCESS != rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
+ H2_STRM_LOG(APLOGNO(10024), stream, "invalid trailers"));
+ h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+ goto cleanup;
+ }
+
+ ngrv = nghttp2_submit_trailer(stream->session->ngh2, stream->id, nh->nv, nh->nvlen);
}
-
- if (pheaders && *pheaders) {
- return APR_SUCCESS;
+ else if (headers->status < 100) {
+ h2_stream_rst(stream, headers->status);
+ goto cleanup;
}
-
- /* If there we do not have enough buffered data to satisfy the requested
- * length *and* we counted the _complete_ buffer (and did not stop in the middle
- * because of meta data there), lets see if we can read more from the
- * output beam */
- missing = H2MIN(requested, stream->max_mem) - *plen;
- if (complete && !*peos && missing > 0) {
- apr_status_t rv = APR_EOF;
-
- if (stream->output) {
- H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "pre");
- h2_beam_log(stream->output, c, APLOG_TRACE2, "pre read output");
- rv = h2_beam_receive(stream->output, stream->out_buffer,
- APR_NONBLOCK_READ, stream->max_mem - *plen, &was_closed);
- H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "post");
- h2_beam_log(stream->output, c, APLOG_TRACE2, "post read output");
+ else {
+ nghttp2_data_provider provider, *pprovider = NULL;
+
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1,
+ H2_STRM_LOG(APLOGNO(03073), stream,
+ "submit response %d"), headers->status);
+
+ /* If this stream is not a pushed one itself,
+ * and HTTP/2 server push is enabled here,
+ * and the response HTTP status is not sth >= 400,
+ * and the remote side has pushing enabled,
+ * -> find and perform any pushes on this stream
+ * *before* we submit the stream response itself.
+ * This helps clients avoid opening new streams on Link
+ * headers that get pushed right afterwards.
+ *
+ * *) the response code is relevant, as we do not want to
+ * make pushes on 401 or 403 codes and friends.
+ * And if we see a 304, we do not push either
+ * as the client, having this resource in its cache, might
+ * also have the pushed ones as well.
+ */
+ if (!stream->initiated_on
+ && !stream->response
+ && stream->request && stream->request->method
+ && !strcmp("GET", stream->request->method)
+ && (headers->status < 400)
+ && (headers->status != 304)
+ && h2_session_push_enabled(stream->session)) {
+ /* PUSH is possible and enabled on server, unless the request
+ * denies it, submit resources to push */
+ const char *s = apr_table_get(headers->notes, H2_PUSH_MODE_NOTE);
+ if (!s || strcmp(s, "0")) {
+ h2_stream_submit_pushes(stream, headers);
+ }
}
-
- if (rv == APR_SUCCESS) {
- /* count the buffer again, now that we have read output */
- status = add_buffered_data(stream, requested, plen, peos, &complete, pheaders);
+
+ if (!stream->pref_priority) {
+ stream->pref_priority = h2_stream_get_priority(stream, headers);
}
- else if (APR_STATUS_IS_EOF(rv)) {
- apr_bucket *eos = apr_bucket_eos_create(c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(stream->out_buffer, eos);
- *peos = 1;
+ h2_session_set_prio(stream->session, stream, stream->pref_priority);
+
+ if (headers->status == 103
+ && !h2_config_sgeti(stream->session->s, H2_CONF_EARLY_HINTS)) {
+ /* suppress sending this to the client, it might have triggered
+ * pushes and served its purpose nevertheless */
+ goto cleanup;
}
- else if (APR_STATUS_IS_EAGAIN(rv)) {
- /* we set this is the status of this call only if there
- * is no buffered data, see check below */
+ if (h2_headers_are_response(headers)) {
+ stream->response = headers;
}
- else {
- /* real error reading. Give this back directly, even though
- * we may have something buffered. */
- status = rv;
+
+ /* Do we know if this stream has no response body? */
+ is_empty = 0;
+ while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ is_empty = 1;
+ break;
+ }
+ }
+ else { /* data, not empty */
+ break;
+ }
+ b = APR_BUCKET_NEXT(b);
}
- }
-
- if (status == APR_SUCCESS) {
- if (*peos || *plen) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- H2_STRM_MSG(stream, "prepare, len=%ld eos=%d"),
- (long)*plen, *peos);
+
+ if (!is_empty) {
+ memset(&provider, 0, sizeof(provider));
+ provider.source.fd = stream->id;
+ provider.read_callback = stream_data_cb;
+ pprovider = &provider;
+ }
+
+ rv = h2_res_create_ngheader(&nh, stream->pool, headers);
+ if (APR_SUCCESS != rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
+ H2_STRM_LOG(APLOGNO(10025), stream, "invalid response"));
+ h2_stream_rst(stream, NGHTTP2_PROTOCOL_ERROR);
+ goto cleanup;
+ }
+ ngrv = nghttp2_submit_response(stream->session->ngh2, stream->id,
+ nh->nv, nh->nvlen, pprovider);
+ if (stream->initiated_on) {
+ ++stream->session->pushes_submitted;
}
else {
- status = was_closed? APR_EOF : APR_EAGAIN;
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- H2_STRM_MSG(stream, "prepare, no data"));
+ ++stream->session->responses_submitted;
}
}
- return status;
-}
-
-static int is_not_headers(apr_bucket *b)
-{
- return !H2_BUCKET_IS_HEADERS(b);
-}
-apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
- apr_off_t *plen, int *peos)
-{
- conn_rec *c = stream->session->c;
- apr_status_t status = APR_SUCCESS;
-
- if (stream->rst_error) {
- return APR_ECONNRESET;
- }
- status = h2_append_brigade(bb, stream->out_buffer, plen, peos, is_not_headers);
- if (status == APR_SUCCESS && !*peos && !*plen) {
- status = APR_EAGAIN;
- }
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
- H2_STRM_MSG(stream, "read_to, len=%ld eos=%d"),
- (long)*plen, *peos);
- return status;
+cleanup:
+ if (nghttp2_is_fatal(ngrv)) {
+ rv = APR_EGENERAL;
+ h2_session_dispatch_event(stream->session,
+ H2_SESSION_EV_PROTO_ERROR, ngrv, nghttp2_strerror(rv));
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
+ APLOGNO(02940) "submit_response: %s",
+ nghttp2_strerror(rv));
+ }
+ return rv;
}
-
apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response)
{
apr_status_t status = APR_SUCCESS;
@@ -1025,7 +1062,7 @@ apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response)
pushes = h2_push_collect_update(stream, stream->request, response);
if (pushes && !apr_is_empty_array(pushes)) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c1,
H2_STRM_MSG(stream, "found %d push candidates"),
pushes->nelts);
for (i = 0; i < pushes->nelts; ++i) {
@@ -1052,7 +1089,7 @@ const h2_priority *h2_stream_get_priority(h2_stream *stream,
const char *ctype = apr_table_get(response->headers, "content-type");
if (ctype) {
/* FIXME: Not good enough, config needs to come from request->server */
- return h2_cconfig_get_priority(stream->session->c, ctype);
+ return h2_cconfig_get_priority(stream->session->c1, ctype);
}
}
return NULL;
@@ -1060,7 +1097,7 @@ const h2_priority *h2_stream_get_priority(h2_stream *stream,
int h2_stream_is_ready(h2_stream *stream)
{
- if (stream->has_response) {
+ if (stream->response) {
return 1;
}
else if (stream->out_buffer && get_first_headers_bucket(stream->out_buffer)) {
@@ -1093,7 +1130,6 @@ apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount)
consumed -= len;
}
-#ifdef H2_NG2_LOCAL_WIN_SIZE
if (1) {
int cur_size = nghttp2_session_get_stream_local_window_size(
session->ngh2, stream->id);
@@ -1131,13 +1167,221 @@ apr_status_t h2_stream_in_consumed(h2_stream *stream, apr_off_t amount)
nghttp2_session_set_local_window_size(session->ngh2,
NGHTTP2_FLAG_NONE, stream->id, win);
}
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c1,
"h2_stream(%ld-%d): consumed %ld bytes, window now %d/%d",
session->id, stream->id, (long)amount,
cur_size, stream->in_window_size);
}
-#endif
}
return APR_SUCCESS;
}
+static apr_off_t buffer_output_data_to_send(h2_stream *stream, int *peos)
+{
+ /* How much data do we have in our buffers that we can write? */
+ apr_off_t buf_len = 0;
+ apr_bucket *b;
+
+ *peos = 0;
+ if (stream->out_buffer) {
+ b = APR_BRIGADE_FIRST(stream->out_buffer);
+ while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
+ if (APR_BUCKET_IS_METADATA(b)) {
+ if (APR_BUCKET_IS_EOS(b)) {
+ *peos = 1;
+ break;
+ }
+ else if (H2_BUCKET_IS_HEADERS(b)) {
+ break;
+ }
+ }
+ else {
+ buf_len += b->length;
+ }
+ b = APR_BUCKET_NEXT(b);
+ }
+ }
+ return buf_len;
+}
+
+static ssize_t stream_data_cb(nghttp2_session *ng2s,
+ int32_t stream_id,
+ uint8_t *buf,
+ size_t length,
+ uint32_t *data_flags,
+ nghttp2_data_source *source,
+ void *puser)
+{
+ h2_session *session = (h2_session *)puser;
+ conn_rec *c1 = session->c1;
+ apr_off_t buf_len;
+ int eos;
+ apr_status_t rv;
+ h2_stream *stream;
+
+ /* nghttp2 wants to send more DATA for the stream. We need
+ * to find out how much of the requested length we can send without
+ * blocking.
+ * Indicate EOS when we encounter it or DEFERRED if the stream
+ * should be suspended. Beware of trailers.
+ */
+ ap_assert(session);
+ (void)ng2s;
+ (void)buf;
+ (void)source;
+ stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
+ if (!stream || !stream->output) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c1,
+ APLOGNO(02937)
+ "h2_stream(%ld-%d): data_cb, stream not found",
+ session->id, (int)stream_id);
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ if (!stream->response) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
+ APLOGNO(10299)
+ "h2_stream(%ld-%d): data_cb, no response seen yet",
+ session->id, (int)stream_id);
+ return NGHTTP2_ERR_DEFERRED;
+ }
+ if (stream->rst_error) {
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ if (!stream->out_buffer) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
+ "h2_stream(%ld-%d): suspending",
+ session->id, (int)stream_id);
+ return NGHTTP2_ERR_DEFERRED;
+ }
+ if (h2_c1_io_needs_flush(&session->io)) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c1,
+ "h2_stream(%ld-%d): suspending on c1 out needs flush",
+ session->id, (int)stream_id);
+ h2_stream_dispatch(stream, H2_SEV_OUT_C1_BLOCK);
+ return NGHTTP2_ERR_DEFERRED;
+ }
+
+ /* determine how much we'd like to send. We cannot send more than
+ * is requested. But we can reduce the size in case the master
+ * connection operates in smaller chunks. (TSL warmup) */
+ if (stream->session->io.write_size > 0) {
+ apr_off_t chunk_len = stream->session->io.write_size - H2_FRAME_HDR_LEN;
+ if (length > chunk_len) {
+ length = chunk_len;
+ }
+ }
+
+ /* How much data do we have in our buffers that we can write? */
+ buf_len = buffer_output_data_to_send(stream, &eos);
+ if (buf_len < length && !eos) {
+ /* read more? */
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
+ "h2_stream(%ld-%d): need more (read len=%ld, %ld in buffer)",
+ session->id, (int)stream_id, (long)length, (long)buf_len);
+ rv = buffer_output_receive(stream);
+ if (APR_SUCCESS == rv) {
+ /* process any headers sitting at the buffer head. */
+ rv = buffer_output_process_headers(stream);
+ if (APR_SUCCESS != rv) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
+ H2_STRM_LOG(APLOGNO(10300), stream,
+ "data_cb, error processing headers"));
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ buf_len = buffer_output_data_to_send(stream, &eos);
+ }
+ if (APR_EOF == rv) {
+ eos = 1;
+ }
+ else if (APR_SUCCESS != rv && !APR_STATUS_IS_EAGAIN(rv)) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv, c1,
+ H2_STRM_LOG(APLOGNO(02938), stream, "data_cb, reading data"));
+ return NGHTTP2_ERR_CALLBACK_FAILURE;
+ }
+ }
+
+ if (buf_len > (apr_off_t)length) {
+ eos = 0;
+ }
+ else {
+ length = (size_t)buf_len;
+ }
+ if (length) {
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
+ H2_STRM_MSG(stream, "data_cb, sending len=%ld, eos=%d"),
+ (long)length, eos);
+ *data_flags |= NGHTTP2_DATA_FLAG_NO_COPY;
+ }
+ else if (!eos) {
+ /* no data available and output is not closed, need to suspend */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1,
+ H2_STRM_LOG(APLOGNO(03071), stream, "data_cb, suspending"));
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
+ "h2_stream(%ld-%d): suspending",
+ session->id, (int)stream_id);
+ return NGHTTP2_ERR_DEFERRED;
+ }
+
+ if (eos) {
+ *data_flags |= NGHTTP2_DATA_FLAG_EOF;
+ }
+ return length;
+}
+
+apr_status_t h2_stream_read_output(h2_stream *stream)
+{
+ conn_rec *c1 = stream->session->c1;
+ apr_status_t rv = APR_EAGAIN;
+ apr_off_t buf_len;
+ int eos;
+
+ /* stream->pout_recv_write signalled a change. Check what has happend, read
+ * from it and act on seeing a response/data. */
+ if (!stream->output) {
+ /* c2 has not assigned the output beam to the stream (yet). */
+ ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, c1,
+ H2_STRM_MSG(stream, "read_output, no output beam registered"));
+ rv = APR_EAGAIN;
+ goto cleanup;
+ }
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
+ H2_STRM_MSG(stream, "read_output"));
+
+ if (h2_stream_was_closed(stream)) {
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, c1,
+ H2_STRM_LOG(APLOGNO(10301), stream, "already closed"));
+ rv = APR_EOF;
+ goto cleanup;
+ }
+ else if (stream->state == H2_SS_CLOSED_L) {
+ /* We have delivered a response to a stream that was not closed
+ * by the client. This could be a POST with body that we negate
+ * and we need to RST_STREAM to end if. */
+ ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c1,
+ H2_STRM_LOG(APLOGNO(10026), stream, "remote close missing"));
+ nghttp2_submit_rst_stream(stream->session->ngh2, NGHTTP2_FLAG_NONE,
+ stream->id, NGHTTP2_NO_ERROR);
+ rv = APR_EOF;
+ goto cleanup;
+ }
+
+ buf_len = buffer_output_data_to_send(stream, &eos);
+ if (buf_len < stream->session->io.write_size) {
+ rv = buffer_output_receive(stream);
+ if (APR_SUCCESS == rv) {
+ /* process any headers sitting at the buffer head. */
+ rv = buffer_output_process_headers(stream);
+ if (APR_SUCCESS != rv) goto cleanup;
+ }
+ buf_len = buffer_output_data_to_send(stream, &eos);
+ if (buf_len || eos) {
+ nghttp2_session_resume_data(stream->session->ngh2, stream->id);
+ ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c1,
+ "h2_stream(%ld-%d): resumed",
+ stream->session->id, (int)stream->id);
+ }
+ }
+
+cleanup:
+ return rv;
+}
diff --git a/modules/http2/h2_stream.h b/modules/http2/h2_stream.h
index 08f7888fe4..0b1c9616b6 100644
--- a/modules/http2/h2_stream.h
+++ b/modules/http2/h2_stream.h
@@ -39,7 +39,6 @@ struct h2_priority;
struct h2_request;
struct h2_headers;
struct h2_session;
-struct h2_task;
struct h2_bucket_beam;
typedef struct h2_stream h2_stream;
@@ -73,9 +72,11 @@ struct h2_stream {
const struct h2_request *request; /* the request made in this stream */
struct h2_request *rtmp; /* request being assembled */
- apr_table_t *trailers; /* optional incoming trailers */
+ apr_table_t *trailers_in; /* optional, incoming trailers */
int request_headers_added; /* number of request headers added */
-
+
+ struct h2_headers *response; /* the final, non-interim response or NULL */
+
struct h2_bucket_beam *input;
apr_bucket_brigade *in_buffer;
int in_window_size;
@@ -83,18 +84,14 @@ struct h2_stream {
struct h2_bucket_beam *output;
apr_bucket_brigade *out_buffer;
- apr_size_t max_mem; /* maximum amount of data buffered */
int rst_error; /* stream error for RST_STREAM */
unsigned int aborted : 1; /* was aborted */
unsigned int scheduled : 1; /* stream has been scheduled */
- unsigned int has_response : 1; /* response headers are known */
- unsigned int input_eof : 1; /* no more request data coming */
- unsigned int out_checked : 1; /* output eof was double checked */
+ unsigned int input_closed : 1; /* no more request data/trailers coming */
unsigned int push_policy; /* which push policy to use for this request */
- unsigned int input_buffering : 1; /* buffer request bodies for efficiency */
- struct h2_task *task; /* assigned task to fullfill request */
+ conn_rec *c2; /* connection processing stream */
const h2_priority *pref_priority; /* preferred priority for this stream */
apr_off_t out_frames; /* # of frames sent out */
@@ -133,13 +130,9 @@ h2_stream *h2_stream_create(int id, apr_pool_t *pool,
void h2_stream_destroy(h2_stream *stream);
/**
- * Prepare the stream so that processing may start.
- *
- * This is the time to allocated resources not needed before.
- *
- * @param stream the stream to prep
+ * Setup the input for the stream.
*/
-apr_status_t h2_stream_prep_processing(h2_stream *stream);
+apr_status_t h2_stream_setup_input(h2_stream *stream);
/*
* Set a new monitor for this stream, replacing any existing one. Can
@@ -239,21 +232,10 @@ void h2_stream_rst(h2_stream *stream, int error_code);
int h2_stream_was_closed(const h2_stream *stream);
/**
- * Do a speculative read on the stream output to determine the
- * amount of data that can be read.
- *
- * @param stream the stream to speculatively read from
- * @param plen (in-/out) number of bytes requested and on return amount of bytes that
- * may be read without blocking
- * @param peos (out) != 0 iff end of stream will be reached when reading plen
- * bytes (out value).
- * @param presponse (out) the response of one became available
- * @return APR_SUCCESS if out information was computed successfully.
- * APR_EAGAIN if not data is available and end of stream has not been
- * reached yet.
+ * Inspect the c2 output for response(s) and data.
+ * @param stream the stream to read output for
*/
-apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen,
- int *peos, h2_headers **presponse);
+apr_status_t h2_stream_read_output(h2_stream *stream);
/**
* Read a maximum number of bytes into the bucket brigade.
@@ -282,7 +264,7 @@ apr_table_t *h2_stream_get_trailers(h2_stream *stream);
/**
* Submit any server push promises on this stream and schedule
- * the tasks connection with these.
+ * the streams for these.
*
* @param stream the stream for which to submit
*/
@@ -298,7 +280,7 @@ const struct h2_priority *h2_stream_get_priority(h2_stream *stream,
* Return a textual representation of the stream state as in RFC 7540
* nomenclator, all caps, underscores.
*/
-const char *h2_stream_state_str(h2_stream *stream);
+const char *h2_stream_state_str(const h2_stream *stream);
/**
* Determine if stream is ready for submitting a response or a RST
diff --git a/modules/http2/h2_switch.c b/modules/http2/h2_switch.c
index eb050150c9..f0c7e4dd92 100644
--- a/modules/http2/h2_switch.c
+++ b/modules/http2/h2_switch.c
@@ -31,9 +31,10 @@
#include "h2_private.h"
#include "h2_config.h"
-#include "h2_ctx.h"
-#include "h2_conn.h"
-#include "h2_h2.h"
+#include "h2_conn_ctx.h"
+#include "h2_c1.h"
+#include "h2_c2.h"
+#include "h2_protocol.h"
#include "h2_switch.h"
/*******************************************************************************
@@ -54,7 +55,7 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r,
{
int proposed = 0;
int is_tls = ap_ssl_conn_is_ssl(c);
- const char **protos = is_tls? h2_tls_protos : h2_clear_protos;
+ const char **protos = is_tls? h2_protocol_ids_tls : h2_protocol_ids_clear;
if (!h2_mpm_supported()) {
return DECLINED;
@@ -68,7 +69,7 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r,
return DECLINED;
}
- if (!h2_is_acceptable_connection(c, r, 0)) {
+ if (!h2_protocol_is_acceptable_c1(c, r, 0)) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03084)
"protocol propose: connection requirements not met");
return DECLINED;
@@ -81,7 +82,7 @@ static int h2_protocol_propose(conn_rec *c, request_rec *r,
*/
const char *p;
- if (!h2_allows_h2_upgrade(r)) {
+ if (!h2_c1_can_upgrade(r)) {
return DECLINED;
}
@@ -128,7 +129,7 @@ static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s,
const char *protocol)
{
int found = 0;
- const char **protos = ap_ssl_conn_is_ssl(c)? h2_tls_protos : h2_clear_protos;
+ const char **protos = ap_ssl_conn_is_ssl(c)? h2_protocol_ids_tls : h2_protocol_ids_clear;
const char **p = protos;
(void)s;
@@ -145,13 +146,12 @@ static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s,
}
if (found) {
- h2_ctx *ctx = h2_ctx_get(c, 1);
-
+ h2_conn_ctx_t *ctx;
+
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"switching protocol to '%s'", protocol);
- h2_ctx_protocol_set(ctx, protocol);
- h2_ctx_server_update(ctx, s);
-
+ ctx = h2_conn_ctx_create_for_c1(c, s, protocol);
+
if (r != NULL) {
apr_status_t status;
/* Switching in the middle of a request means that
@@ -163,16 +163,16 @@ static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s,
ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
/* Ok, start an h2_conn on this one. */
- status = h2_conn_setup(c, r, s);
+ status = h2_c1_setup(c, r, s);
if (status != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03088)
"session setup");
- h2_ctx_clear(c);
+ h2_conn_ctx_detach(c);
return !OK;
}
- h2_conn_run(c);
+ h2_c1_run(c);
}
return OK;
}
@@ -182,7 +182,13 @@ static int h2_protocol_switch(conn_rec *c, request_rec *r, server_rec *s,
static const char *h2_protocol_get(const conn_rec *c)
{
- return h2_ctx_protocol_get(c);
+ h2_conn_ctx_t *ctx;
+
+ if (c->master) {
+ c = c->master;
+ }
+ ctx = h2_conn_ctx_get(c);
+ return ctx? ctx->protocol : NULL;
}
void h2_switch_register_hooks(void)
diff --git a/modules/http2/h2_task.c b/modules/http2/h2_task.c
deleted file mode 100644
index 5b32656a91..0000000000
--- a/modules/http2/h2_task.c
+++ /dev/null
@@ -1,725 +0,0 @@
-/* Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <assert.h>
-#include <stddef.h>
-
-#include <apr_atomic.h>
-#include <apr_strings.h>
-
-#include <httpd.h>
-#include <http_core.h>
-#include <http_connection.h>
-#include <http_protocol.h>
-#include <http_request.h>
-#include <http_log.h>
-#include <http_vhost.h>
-#include <util_filter.h>
-#include <ap_mpm.h>
-#include <mod_core.h>
-#include <scoreboard.h>
-
-#include "h2_private.h"
-#include "h2.h"
-#include "h2_bucket_beam.h"
-#include "h2_conn.h"
-#include "h2_config.h"
-#include "h2_ctx.h"
-#include "h2_from_h1.h"
-#include "h2_h2.h"
-#include "h2_mplx.h"
-#include "h2_request.h"
-#include "h2_headers.h"
-#include "h2_session.h"
-#include "h2_stream.h"
-#include "h2_task.h"
-#include "h2_util.h"
-
-static void H2_TASK_OUT_LOG(int lvl, h2_task *task, apr_bucket_brigade *bb,
- const char *tag)
-{
- if (APLOG_C_IS_LEVEL(task->c, lvl)) {
- conn_rec *c = task->c;
- char buffer[4 * 1024];
- const char *line = "(null)";
- apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]);
-
- len = h2_util_bb_print(buffer, bmax, tag, "", bb);
- ap_log_cerror(APLOG_MARK, lvl, 0, c, "bb_dump(%s): %s",
- task->id, len? buffer : line);
- }
-}
-
-/*******************************************************************************
- * task input handling
- ******************************************************************************/
-
-static int input_ser_header(void *ctx, const char *name, const char *value)
-{
- h2_task *task = ctx;
- apr_brigade_printf(task->input.bb, NULL, NULL, "%s: %s\r\n", name, value);
- return 1;
-}
-
-/*******************************************************************************
- * task output handling
- ******************************************************************************/
-
-static apr_status_t open_output(h2_task *task)
-{
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, task->c, APLOGNO(03348)
- "h2_task(%s): open output to %s %s %s",
- task->id, task->request->method,
- task->request->authority,
- task->request->path);
- task->output.opened = 1;
- return h2_mplx_t_out_open(task->mplx, task->stream_id, task->output.beam);
-}
-
-static void output_consumed(void *ctx, h2_bucket_beam *beam, apr_off_t length)
-{
- h2_task *task = ctx;
- if (task && h2_task_logio_add_bytes_out) {
- h2_task_logio_add_bytes_out(task->c, length);
- }
-}
-
-static apr_status_t send_out(h2_task *task, apr_bucket_brigade* bb, int block)
-{
- apr_off_t written, left;
- apr_status_t status;
-
- apr_brigade_length(bb, 0, &written);
- H2_TASK_OUT_LOG(APLOG_TRACE2, task, bb, "h2_task send_out");
- h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(before)");
-
- status = h2_beam_send(task->output.beam, bb,
- block? APR_BLOCK_READ : APR_NONBLOCK_READ);
- h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "send_out(after)");
-
- if (APR_STATUS_IS_EAGAIN(status)) {
- apr_brigade_length(bb, 0, &left);
- written -= left;
- status = APR_SUCCESS;
- }
- if (status == APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
- "h2_task(%s): send_out done", task->id);
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, task->c,
- "h2_task(%s): send_out (%ld bytes)",
- task->id, (long)written);
- }
- return status;
-}
-
-/* Bring the data from the brigade (which represents the result of the
- * request_rec out filter chain) into the h2_mplx for further sending
- * on the master connection.
- */
-static apr_status_t secondary_out(h2_task *task, ap_filter_t* f,
- apr_bucket_brigade* bb)
-{
- apr_bucket *b;
- apr_status_t rv = APR_SUCCESS;
- int flush = 0, blocking;
-
-send:
- /* we send block once we opened the output, so someone is there reading it */
- blocking = task->output.opened;
- for (b = APR_BRIGADE_FIRST(bb);
- b != APR_BRIGADE_SENTINEL(bb);
- b = APR_BUCKET_NEXT(b)) {
- if (APR_BUCKET_IS_FLUSH(b) || APR_BUCKET_IS_EOS(b) || AP_BUCKET_IS_EOR(b)) {
- flush = 1;
- break;
- }
- }
-
- if (task->output.bb && !APR_BRIGADE_EMPTY(task->output.bb)) {
- /* still have data buffered from previous attempt.
- * setaside and append new data and try to pass the complete data */
- if (!APR_BRIGADE_EMPTY(bb)) {
- if (APR_SUCCESS != (rv = ap_save_brigade(f, &task->output.bb, &bb, task->pool))) {
- goto out;
- }
- }
- rv = send_out(task, task->output.bb, blocking);
- }
- else {
- /* no data buffered previously, pass brigade directly */
- rv = send_out(task, bb, blocking);
-
- if (APR_SUCCESS == rv && !APR_BRIGADE_EMPTY(bb)) {
- /* output refused to buffer it all, time to open? */
- if (!task->output.opened && APR_SUCCESS == (rv = open_output(task))) {
- /* Make another attempt to send the data. With the output open,
- * the call might be blocking and send all data, so we do not need
- * to save the brigade */
- goto send;
- }
- else if (blocking && flush) {
- /* Need to keep on doing this. */
- goto send;
- }
-
- if (APR_SUCCESS == rv) {
- /* could not write all, buffer the rest */
- ap_log_cerror(APLOG_MARK, APLOG_DEBUG, rv, task->c, APLOGNO(03405)
- "h2_secondary_out(%s): saving brigade", task->id);
- ap_assert(NULL);
- rv = ap_save_brigade(f, &task->output.bb, &bb, task->pool);
- flush = 1;
- }
- }
- }
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, task->c,
- "h2_secondary_out(%s): buffered=%d", task->id, task->output.buffered);
- if (APR_SUCCESS == rv && !task->output.opened && (flush || !task->output.buffered)) {
- /* got a flush or could not write all, time to tell someone to read */
- rv = open_output(task);
- }
-out:
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, rv, task->c,
- "h2_secondary_out(%s): secondary_out leave", task->id);
- return rv;
-}
-
-static apr_status_t output_finish(h2_task *task)
-{
- if (!task->output.opened) {
- return open_output(task);
- }
- return APR_SUCCESS;
-}
-
-/*******************************************************************************
- * task secondary connection filters
- ******************************************************************************/
-
-static apr_status_t h2_filter_secondary_in(ap_filter_t* f,
- apr_bucket_brigade* bb,
- ap_input_mode_t mode,
- apr_read_type_e block,
- apr_off_t readbytes)
-{
- h2_task *task;
- apr_status_t status = APR_SUCCESS;
- apr_bucket *b, *next;
- apr_off_t bblen;
- const int trace1 = APLOGctrace1(f->c);
- apr_size_t rmax = ((readbytes <= APR_SIZE_MAX)?
- (apr_size_t)readbytes : APR_SIZE_MAX);
-
- task = h2_ctx_get_task(f->c);
- ap_assert(task);
-
- if (trace1) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_secondary_in(%s): read, mode=%d, block=%d, readbytes=%ld",
- task->id, mode, block, (long)readbytes);
- }
-
- if (mode == AP_MODE_INIT) {
- return ap_get_brigade(f->c->input_filters, bb, mode, block, readbytes);
- }
-
- if (f->c->aborted) {
- return APR_ECONNABORTED;
- }
-
- if (!task->input.bb) {
- return APR_EOF;
- }
-
- /* Cleanup brigades from those nasty 0 length non-meta buckets
- * that apr_brigade_split_line() sometimes produces. */
- for (b = APR_BRIGADE_FIRST(task->input.bb);
- b != APR_BRIGADE_SENTINEL(task->input.bb); b = next) {
- next = APR_BUCKET_NEXT(b);
- if (b->length == 0 && !APR_BUCKET_IS_METADATA(b)) {
- apr_bucket_delete(b);
- }
- }
-
- while (APR_BRIGADE_EMPTY(task->input.bb)) {
- /* Get more input data for our request. */
- if (trace1) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_secondary_in(%s): get more data from mplx, block=%d, "
- "readbytes=%ld", task->id, block, (long)readbytes);
- }
- if (task->input.beam) {
- status = h2_beam_receive(task->input.beam, task->input.bb, block,
- 128*1024, NULL);
- }
- else {
- status = APR_EOF;
- }
-
- if (trace1) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
- "h2_secondary_in(%s): read returned", task->id);
- }
- if (APR_STATUS_IS_EAGAIN(status)
- && (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) {
- /* chunked input handling does not seem to like it if we
- * return with APR_EAGAIN from a GETLINE read...
- * upload 100k test on test-ser.example.org hangs */
- status = APR_SUCCESS;
- }
- else if (APR_STATUS_IS_EOF(status)) {
- break;
- }
- else if (status != APR_SUCCESS) {
- return status;
- }
-
- if (trace1) {
- h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2,
- "input.beam recv raw", task->input.bb);
- }
- if (h2_task_logio_add_bytes_in) {
- apr_brigade_length(bb, 0, &bblen);
- h2_task_logio_add_bytes_in(f->c, bblen);
- }
- }
-
- /* Nothing there, no more data to get. Return. */
- if (status == APR_EOF && APR_BRIGADE_EMPTY(task->input.bb)) {
- return status;
- }
-
- if (trace1) {
- h2_util_bb_log(f->c, task->stream_id, APLOG_TRACE2,
- "task_input.bb", task->input.bb);
- }
-
- if (APR_BRIGADE_EMPTY(task->input.bb)) {
- if (trace1) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, f->c,
- "h2_secondary_in(%s): no data", task->id);
- }
- return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
- }
-
- if (mode == AP_MODE_EXHAUSTIVE) {
- /* return all we have */
- APR_BRIGADE_CONCAT(bb, task->input.bb);
- }
- else if (mode == AP_MODE_READBYTES) {
- status = h2_brigade_concat_length(bb, task->input.bb, rmax);
- }
- else if (mode == AP_MODE_SPECULATIVE) {
- status = h2_brigade_copy_length(bb, task->input.bb, rmax);
- }
- else if (mode == AP_MODE_GETLINE) {
- /* we are reading a single LF line, e.g. the HTTP headers.
- * this has the nasty side effect to split the bucket, even
- * though it ends with CRLF and creates a 0 length bucket */
- status = apr_brigade_split_line(bb, task->input.bb, block,
- HUGE_STRING_LEN);
- if (APLOGctrace1(f->c)) {
- char buffer[1024];
- apr_size_t len = sizeof(buffer)-1;
- apr_brigade_flatten(bb, buffer, &len);
- buffer[len] = 0;
- if (trace1) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_secondary_in(%s): getline: %s",
- task->id, buffer);
- }
- }
- }
- else {
- /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
- * to support it. Seems to work. */
- ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
- APLOGNO(03472)
- "h2_secondary_in(%s), unsupported READ mode %d",
- task->id, mode);
- status = APR_ENOTIMPL;
- }
-
- if (trace1) {
- apr_brigade_length(bb, 0, &bblen);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, f->c,
- "h2_secondary_in(%s): %ld data bytes", task->id, (long)bblen);
- }
- return status;
-}
-
-static apr_status_t h2_filter_secondary_output(ap_filter_t* filter,
- apr_bucket_brigade* brigade)
-{
- h2_task *task = h2_ctx_get_task(filter->c);
- apr_status_t status;
-
- ap_assert(task);
- status = secondary_out(task, filter, brigade);
- if (status != APR_SUCCESS) {
- h2_task_rst(task, H2_ERR_INTERNAL_ERROR);
- }
- return status;
-}
-
-static apr_status_t h2_filter_parse_h1(ap_filter_t* f, apr_bucket_brigade* bb)
-{
- h2_task *task = h2_ctx_get_task(f->c);
- apr_status_t status;
-
- ap_assert(task);
- /* There are cases where we need to parse a serialized http/1.1
- * response. One example is a 100-continue answer in serialized mode
- * or via a mod_proxy setup */
- while (bb && !task->c->aborted && !task->output.sent_response) {
- status = h2_from_h1_parse_response(task, f, bb);
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c,
- "h2_task(%s): parsed response", task->id);
- if (APR_BRIGADE_EMPTY(bb) || status != APR_SUCCESS) {
- return status;
- }
- }
-
- return ap_pass_brigade(f->next, bb);
-}
-
-/*******************************************************************************
- * task things
- ******************************************************************************/
-
-int h2_task_can_redo(h2_task *task) {
- if (task->input.beam && h2_beam_was_received(task->input.beam)) {
- /* cannot repeat that. */
- return 0;
- }
- return (!strcmp("GET", task->request->method)
- || !strcmp("HEAD", task->request->method)
- || !strcmp("OPTIONS", task->request->method));
-}
-
-int h2_task_has_started(h2_task *task)
-{
- return task && task->started_at != 0;
-}
-
-void h2_task_redo(h2_task *task)
-{
- task->started_at = 0;
- task->worker_done = 0;
- task->rst_error = 0;
-}
-
-void h2_task_rst(h2_task *task, int error)
-{
- task->rst_error = error;
- if (task->input.beam) {
- h2_beam_leave(task->input.beam);
- }
- if (!task->worker_done) {
- h2_beam_abort(task->output.beam);
- }
- if (task->c) {
- task->c->aborted = 1;
- }
-}
-
-/*******************************************************************************
- * Register various hooks
- */
-static const char *const mod_ssl[] = { "mod_ssl.c", NULL};
-static int h2_task_pre_conn(conn_rec* c, void *arg);
-static int h2_task_process_conn(conn_rec* c);
-
-APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in;
-APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out;
-
-void h2_task_register_hooks(void)
-{
- /* This hook runs on new connections before mod_ssl has a say.
- * Its purpose is to prevent mod_ssl from touching our pseudo-connections
- * for streams.
- */
- ap_hook_pre_connection(h2_task_pre_conn,
- NULL, mod_ssl, APR_HOOK_FIRST);
- /* When the connection processing actually starts, we might
- * take over, if the connection is for a task.
- */
- ap_hook_process_connection(h2_task_process_conn,
- NULL, NULL, APR_HOOK_FIRST);
-
- ap_register_input_filter("H2_SECONDARY_IN", h2_filter_secondary_in,
- NULL, AP_FTYPE_NETWORK);
- ap_register_output_filter("H2_SECONDARY_OUT", h2_filter_secondary_output,
- NULL, AP_FTYPE_NETWORK);
- ap_register_output_filter("H2_PARSE_H1", h2_filter_parse_h1,
- NULL, AP_FTYPE_NETWORK);
-
- ap_register_input_filter("H2_REQUEST", h2_filter_request_in,
- NULL, AP_FTYPE_PROTOCOL);
- ap_register_output_filter("H2_RESPONSE", h2_filter_headers_out,
- NULL, AP_FTYPE_PROTOCOL);
- ap_register_output_filter("H2_TRAILERS_OUT", h2_filter_trailers_out,
- NULL, AP_FTYPE_PROTOCOL);
-}
-
-/* post config init */
-apr_status_t h2_task_init(apr_pool_t *pool, server_rec *s)
-{
- h2_task_logio_add_bytes_in = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_in);
- h2_task_logio_add_bytes_out = APR_RETRIEVE_OPTIONAL_FN(ap_logio_add_bytes_out);
-
- return APR_SUCCESS;
-}
-
-static int h2_task_pre_conn(conn_rec* c, void *arg)
-{
- h2_ctx *ctx;
-
- if (!c->master) {
- return OK;
- }
-
- ctx = h2_ctx_get(c, 0);
- (void)arg;
- if (ctx->task) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c,
- "h2_secondary(%s), pre_connection, adding filters", c->log_id);
- ap_add_input_filter("H2_SECONDARY_IN", NULL, NULL, c);
- ap_add_output_filter("H2_PARSE_H1", NULL, NULL, c);
- ap_add_output_filter("H2_SECONDARY_OUT", NULL, NULL, c);
- }
- return OK;
-}
-
-h2_task *h2_task_create(conn_rec *secondary, int stream_id,
- const h2_request *req, h2_mplx *m,
- h2_bucket_beam *input,
- apr_interval_time_t timeout,
- apr_size_t output_max_mem)
-{
- apr_pool_t *pool;
- h2_task *task;
-
- ap_assert(secondary);
- ap_assert(req);
-
- apr_pool_create(&pool, secondary->pool);
- apr_pool_tag(pool, "h2_task");
- task = apr_pcalloc(pool, sizeof(h2_task));
- if (task == NULL) {
- return NULL;
- }
- task->id = "000";
- task->stream_id = stream_id;
- task->c = secondary;
- task->mplx = m;
- task->pool = pool;
- task->request = req;
- task->timeout = timeout;
- task->input.beam = input;
- task->output.max_buffer = output_max_mem;
-
- return task;
-}
-
-void h2_task_destroy(h2_task *task)
-{
- if (task->output.beam) {
- h2_beam_log(task->output.beam, task->c, APLOG_TRACE2, "task_destroy");
- h2_beam_destroy(task->output.beam);
- task->output.beam = NULL;
- }
-
- if (task->eor) {
- apr_bucket_destroy(task->eor);
- }
- if (task->pool) {
- apr_pool_destroy(task->pool);
- }
-}
-
-apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id)
-{
- conn_rec *c;
-
- ap_assert(task);
- c = task->c;
- task->worker_started = 1;
-
- if (c->master) {
- /* See the discussion at <https://github.com/icing/mod_h2/issues/195>
- *
- * Each conn_rec->id is supposed to be unique at a point in time. Since
- * some modules (and maybe external code) uses this id as an identifier
- * for the request_rec they handle, it needs to be unique for secondary
- * connections also.
- *
- * The MPM module assigns the connection ids and mod_unique_id is using
- * that one to generate identifier for requests. While the implementation
- * works for HTTP/1.x, the parallel execution of several requests per
- * connection will generate duplicate identifiers on load.
- *
- * The original implementation for secondary connection identifiers used
- * to shift the master connection id up and assign the stream id to the
- * lower bits. This was cramped on 32 bit systems, but on 64bit there was
- * enough space.
- *
- * As issue 195 showed, mod_unique_id only uses the lower 32 bit of the
- * connection id, even on 64bit systems. Therefore collisions in request ids.
- *
- * The way master connection ids are generated, there is some space "at the
- * top" of the lower 32 bits on allmost all systems. If you have a setup
- * with 64k threads per child and 255 child processes, you live on the edge.
- *
- * The new implementation shifts 8 bits and XORs in the worker
- * id. This will experience collisions with > 256 h2 workers and heavy
- * load still. There seems to be no way to solve this in all possible
- * configurations by mod_h2 alone.
- */
- task->c->id = (c->master->id << 8)^worker_id;
- task->id = apr_psprintf(task->pool, "%ld-%d", task->mplx->id,
- task->stream_id);
- }
-
- h2_beam_create(&task->output.beam, c->pool, task->stream_id, "output",
- H2_BEAM_OWNER_SEND, 0, task->timeout);
- if (!task->output.beam) {
- return APR_ENOMEM;
- }
-
- h2_beam_buffer_size_set(task->output.beam, task->output.max_buffer);
- h2_beam_send_from(task->output.beam, task->pool);
- h2_beam_on_consumed(task->output.beam, NULL, output_consumed, task);
-
- h2_ctx_create_for(c, task);
- apr_table_setn(c->notes, H2_TASK_ID_NOTE, task->id);
-
- h2_secondary_run_pre_connection(c, ap_get_conn_socket(c));
-
- task->input.bb = apr_brigade_create(task->pool, c->bucket_alloc);
- if (task->request->serialize) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_task(%s): serialize request %s %s",
- task->id, task->request->method, task->request->path);
- apr_brigade_printf(task->input.bb, NULL,
- NULL, "%s %s HTTP/1.1\r\n",
- task->request->method, task->request->path);
- apr_table_do(input_ser_header, task, task->request->headers, NULL);
- apr_brigade_puts(task->input.bb, NULL, NULL, "\r\n");
- }
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_task(%s): process connection", task->id);
-
- task->c->current_thread = thread;
- ap_run_process_connection(c);
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_task(%s): processing done", task->id);
- return output_finish(task);
-}
-
-static apr_status_t h2_task_process_request(h2_task *task, conn_rec *c)
-{
- const h2_request *req = task->request;
- conn_state_t *cs = c->cs;
- request_rec *r;
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_task(%s): create request_rec", task->id);
- r = h2_request_create_rec(req, c);
- if (r && (r->status == HTTP_OK)) {
- /* set timeouts for virtual host of request */
- if (task->timeout != r->server->timeout) {
- task->timeout = r->server->timeout;
- h2_beam_timeout_set(task->output.beam, task->timeout);
- if (task->input.beam) {
- h2_beam_timeout_set(task->input.beam, task->timeout);
- }
- }
-
- ap_update_child_status(c->sbh, SERVER_BUSY_WRITE, r);
-
- if (cs) {
- cs->state = CONN_STATE_HANDLER;
- }
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_task(%s): start process_request", task->id);
-
- /* Add the raw bytes of the request (e.g. header frame lengths to
- * the logio for this request. */
- if (req->raw_bytes && h2_task_logio_add_bytes_in) {
- h2_task_logio_add_bytes_in(c, req->raw_bytes);
- }
-
- ap_process_request(r);
-
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_task(%s): process_request done", task->id);
-
- /* After the call to ap_process_request, the
- * request pool may have been deleted. We set
- * r=NULL here to ensure that any dereference
- * of r that might be added later in this function
- * will result in a segfault immediately instead
- * of nondeterministic failures later.
- */
- if (cs)
- cs->state = CONN_STATE_WRITE_COMPLETION;
- r = NULL;
- }
- else if (!r) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_task(%s): create request_rec failed, r=NULL", task->id);
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_task(%s): create request_rec failed, r->status=%d",
- task->id, r->status);
- }
-
- return APR_SUCCESS;
-}
-
-static int h2_task_process_conn(conn_rec* c)
-{
- h2_ctx *ctx;
-
- if (!c->master) {
- return DECLINED;
- }
-
- ctx = h2_ctx_get(c, 0);
- if (ctx->task) {
- if (!ctx->task->request->serialize) {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_h2, processing request directly");
- h2_task_process_request(ctx->task, c);
- return DONE;
- }
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "h2_task(%s), serialized handling", ctx->task->id);
- }
- else {
- ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
- "secondary_conn(%ld): has no task", c->id);
- }
- return DECLINED;
-}
-
diff --git a/modules/http2/h2_task.h b/modules/http2/h2_task.h
deleted file mode 100644
index 50f41b8255..0000000000
--- a/modules/http2/h2_task.h
+++ /dev/null
@@ -1,122 +0,0 @@
-/* Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __mod_h2__h2_task__
-#define __mod_h2__h2_task__
-
-#include <http_core.h>
-
-/**
- * A h2_task fakes a HTTP/1.1 request from the data in a HTTP/2 stream
- * (HEADER+CONT.+DATA) the module receives.
- *
- * In order to answer a HTTP/2 stream, we want all Apache httpd infrastructure
- * to be involved as usual, as if this stream can as a separate HTTP/1.1
- * request. The basic trickery to do so was derived from google's mod_spdy
- * source. Basically, we fake a new conn_rec object, even with its own
- * socket and give it to ap_process_connection().
- *
- * Since h2_task instances are executed in separate threads, we may have
- * different lifetimes than our h2_stream or h2_session instances. Basically,
- * we would like to be as standalone as possible.
- *
- * Finally, to keep certain connection level filters, such as ourselves and
- * especially mod_ssl ones, from messing with our data, we need a filter
- * of our own to disable those.
- */
-
-struct h2_bucket_beam;
-struct h2_conn;
-struct h2_mplx;
-struct h2_task;
-struct h2_request;
-struct h2_response_parser;
-struct h2_stream;
-struct h2_worker;
-
-typedef struct h2_task h2_task;
-
-struct h2_task {
- const char *id;
- int stream_id;
- conn_rec *c;
- apr_pool_t *pool;
-
- const struct h2_request *request;
- apr_interval_time_t timeout;
- int rst_error; /* h2 related stream abort error */
-
- struct {
- struct h2_bucket_beam *beam;
- unsigned int eos : 1;
- apr_bucket_brigade *bb;
- apr_bucket_brigade *bbchunk;
- apr_off_t chunked_total;
- } input;
- struct {
- struct h2_bucket_beam *beam;
- unsigned int opened : 1;
- unsigned int sent_response : 1;
- unsigned int copy_files : 1;
- unsigned int buffered : 1;
- struct h2_response_parser *rparser;
- apr_bucket_brigade *bb;
- apr_size_t max_buffer;
- } output;
-
- struct h2_mplx *mplx;
-
- unsigned int filters_set : 1;
- unsigned int worker_started : 1; /* h2_worker started processing */
- unsigned int redo : 1; /* was throttled, should be restarted later */
-
- int worker_done; /* h2_worker finished */
- int done_done; /* task_done has been handled */
-
- apr_time_t started_at; /* when processing started */
- apr_time_t done_at; /* when processing was done */
- apr_bucket *eor;
-};
-
-h2_task *h2_task_create(conn_rec *secondary, int stream_id,
- const h2_request *req, struct h2_mplx *m,
- struct h2_bucket_beam *input,
- apr_interval_time_t timeout,
- apr_size_t output_max_mem);
-
-void h2_task_destroy(h2_task *task);
-
-apr_status_t h2_task_do(h2_task *task, apr_thread_t *thread, int worker_id);
-
-void h2_task_redo(h2_task *task);
-int h2_task_can_redo(h2_task *task);
-int h2_task_has_started(h2_task *task);
-
-/**
- * Reset the task with the given error code, resets all input/output.
- */
-void h2_task_rst(h2_task *task, int error);
-
-void h2_task_register_hooks(void);
-/*
- * One time, post config initialization.
- */
-apr_status_t h2_task_init(apr_pool_t *pool, server_rec *s);
-
-extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_in) *h2_task_logio_add_bytes_in;
-extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *h2_task_logio_add_bytes_out;
-
-#endif /* defined(__mod_h2__h2_task__) */
diff --git a/modules/http2/h2_util.c b/modules/http2/h2_util.c
index 9b2b3de968..a94c7cae99 100644
--- a/modules/http2/h2_util.c
+++ b/modules/http2/h2_util.c
@@ -352,11 +352,9 @@ static int iq_bubble_down(h2_iqueue *q, int i, int bottom,
h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity)
{
h2_iqueue *q = apr_pcalloc(pool, sizeof(h2_iqueue));
- if (q) {
- q->pool = pool;
- iq_grow(q, capacity);
- q->nelts = 0;
- }
+ q->pool = pool;
+ iq_grow(q, capacity);
+ q->nelts = 0;
return q;
}
@@ -441,7 +439,7 @@ void h2_iq_sort(h2_iqueue *q, h2_iq_cmp *cmp, void *ctx)
ni = iq_bubble_up(q, i, prev, cmp, ctx);
if (ni == prev) {
/* i bubbled one up, bubble the new i down, which
- * keeps all tasks below i sorted. */
+ * keeps all ints below i sorted. */
iq_bubble_down(q, i, last, cmp, ctx);
}
i = prev;
@@ -1157,14 +1155,11 @@ apr_size_t h2_util_table_bytes(apr_table_t *t, apr_size_t pair_extra)
static apr_status_t last_not_included(apr_bucket_brigade *bb,
apr_off_t maxlen,
- int same_alloc,
- apr_size_t *pfile_buckets_allowed,
apr_bucket **pend)
{
apr_bucket *b;
apr_status_t status = APR_SUCCESS;
- int files_allowed = pfile_buckets_allowed? (int)*pfile_buckets_allowed : 0;
-
+
if (maxlen >= 0) {
/* Find the bucket, up to which we reach maxlen/mem bytes */
for (b = APR_BRIGADE_FIRST(bb);
@@ -1189,14 +1184,12 @@ static apr_status_t last_not_included(apr_bucket_brigade *bb,
return status;
}
- if (same_alloc && APR_BUCKET_IS_FILE(b)) {
- /* we like it move it, always */
- }
- else if (files_allowed > 0 && APR_BUCKET_IS_FILE(b)) {
- /* this has no memory footprint really unless
- * it is read, disregard it in length count,
- * unless we do not move the file buckets */
- --files_allowed;
+ if (APR_BUCKET_IS_FILE(b)
+#if APR_HAS_MMAP
+ || APR_BUCKET_IS_MMAP(b)
+#endif
+ ) {
+ /* we like to move it, always */
}
else if (maxlen < (apr_off_t)b->length) {
apr_bucket_split(b, (apr_size_t)maxlen);
@@ -1308,7 +1301,7 @@ int h2_util_has_eos(apr_bucket_brigade *bb, apr_off_t len)
{
apr_bucket *b, *end;
- apr_status_t status = last_not_included(bb, len, 0, 0, &end);
+ apr_status_t status = last_not_included(bb, len, &end);
if (status != APR_SUCCESS) {
return status;
}
@@ -1343,7 +1336,7 @@ apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb,
else {
/* data in the brigade, limit the length returned. Check for EOS
* bucket only if we indicate data. This is required since plen == 0
- * means "the whole brigade" for h2_util_hash_eos()
+ * means "the whole brigade" for h2_util_has_eos()
*/
if (blen < *plen || *plen < 0) {
*plen = blen;
@@ -1353,82 +1346,7 @@ apr_status_t h2_util_bb_avail(apr_bucket_brigade *bb,
return APR_SUCCESS;
}
-apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb,
- h2_util_pass_cb *cb, void *ctx,
- apr_off_t *plen, int *peos)
-{
- apr_status_t status = APR_SUCCESS;
- int consume = (cb != NULL);
- apr_off_t written = 0;
- apr_off_t avail = *plen;
- apr_bucket *next, *b;
-
- /* Pass data in our brigade through the callback until the length
- * is satisfied or we encounter an EOS.
- */
- *peos = 0;
- for (b = APR_BRIGADE_FIRST(bb);
- (status == APR_SUCCESS) && (b != APR_BRIGADE_SENTINEL(bb));
- b = next) {
-
- if (APR_BUCKET_IS_METADATA(b)) {
- if (APR_BUCKET_IS_EOS(b)) {
- *peos = 1;
- }
- else {
- /* ignore */
- }
- }
- else if (avail <= 0) {
- break;
- }
- else {
- const char *data = NULL;
- apr_size_t data_len;
-
- if (b->length == ((apr_size_t)-1)) {
- /* read to determine length */
- status = apr_bucket_read(b, &data, &data_len, APR_NONBLOCK_READ);
- }
- else {
- data_len = b->length;
- }
-
- if (data_len > avail) {
- apr_bucket_split(b, avail);
- data_len = (apr_size_t)avail;
- }
-
- if (consume) {
- if (!data) {
- status = apr_bucket_read(b, &data, &data_len,
- APR_NONBLOCK_READ);
- }
- if (status == APR_SUCCESS) {
- status = cb(ctx, data, data_len);
- }
- }
- else {
- data_len = b->length;
- }
- avail -= data_len;
- written += data_len;
- }
-
- next = APR_BUCKET_NEXT(b);
- if (consume) {
- apr_bucket_delete(b);
- }
- }
-
- *plen = written;
- if (status == APR_SUCCESS && !*peos && !*plen) {
- return APR_EAGAIN;
- }
- return status;
-}
-
-apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax,
+apr_size_t h2_util_bucket_print(char *buffer, apr_size_t bmax,
apr_bucket *b, const char *sep)
{
apr_size_t off = 0;
@@ -1655,7 +1573,7 @@ static apr_status_t ngheader_create(h2_ngheader **ph, apr_pool_t *p,
return APR_ENOMEM;
}
- ctx.ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
+ ctx.ngh->nv = apr_pcalloc(p, n * sizeof(nghttp2_nv));
if (!ctx.ngh->nv) {
return APR_ENOMEM;
}
@@ -1855,27 +1773,6 @@ apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool,
}
/*******************************************************************************
- * h2 request handling
- ******************************************************************************/
-
-h2_request *h2_req_create(int id, apr_pool_t *pool, const char *method,
- const char *scheme, const char *authority,
- const char *path, apr_table_t *header, int serialize)
-{
- h2_request *req = apr_pcalloc(pool, sizeof(h2_request));
-
- req->method = method;
- req->scheme = scheme;
- req->authority = authority;
- req->path = path;
- req->headers = header? header : apr_table_make(pool, 10);
- req->request_time = apr_time_now();
- req->serialize = serialize;
-
- return req;
-}
-
-/*******************************************************************************
* frame logging
******************************************************************************/
@@ -1992,3 +1889,26 @@ int h2_push_policy_determine(apr_table_t *headers, apr_pool_t *p, int push_enabl
return policy;
}
+void h2_util_drain_pipe(apr_file_t *pipe)
+{
+ char rb[512];
+ apr_size_t nr = sizeof(rb);
+
+ while (apr_file_read(pipe, rb, &nr) == APR_SUCCESS) {
+ /* Although we write just one byte to the other end of the pipe
+ * during wakeup, multiple threads could call the wakeup.
+ * So simply drain out from the input side of the pipe all
+ * the data.
+ */
+ if (nr != sizeof(rb))
+ break;
+ }
+}
+
+apr_status_t h2_util_wait_on_pipe(apr_file_t *pipe)
+{
+ char rb[512];
+ apr_size_t nr = sizeof(rb);
+
+ return apr_file_read(pipe, rb, &nr);
+}
diff --git a/modules/http2/h2_util.h b/modules/http2/h2_util.h
index c96570e550..4b8264d4ee 100644
--- a/modules/http2/h2_util.h
+++ b/modules/http2/h2_util.h
@@ -102,7 +102,7 @@ typedef int h2_iq_cmp(int i1, int i2, void *ctx);
h2_iqueue *h2_iq_create(apr_pool_t *pool, int capacity);
/**
- * Return != 0 iff there are no tasks in the queue.
+ * Return != 0 iff there are no ints in the queue.
* @param q the queue to check
*/
int h2_iq_empty(h2_iqueue *q);
@@ -134,11 +134,10 @@ int h2_iq_add(h2_iqueue *q, int sid, h2_iq_cmp *cmp, void *ctx);
int h2_iq_append(h2_iqueue *q, int sid);
/**
- * Remove the stream id from the queue. Return != 0 iff task
- * was found in queue.
- * @param q the task queue
+ * Remove the int from the queue. Return != 0 iff it was found.
+ * @param q the queue
* @param sid the stream id to remove
- * @return != 0 iff task was found in queue
+ * @return != 0 iff int was found in queue
*/
int h2_iq_remove(h2_iqueue *q, int sid);
@@ -148,7 +147,7 @@ int h2_iq_remove(h2_iqueue *q, int sid);
void h2_iq_clear(h2_iqueue *q);
/**
- * Sort the stream idqueue again. Call if the task ordering
+ * Sort the stream idqueue again. Call if the int ordering
* has changed.
*
* @param q the queue to sort
@@ -169,7 +168,7 @@ int h2_iq_shift(h2_iqueue *q);
/**
* Get the first max ids from the queue. All these ids will be removed.
*
- * @param q the queue to get the first task from
+ * @param q the queue to get the first ids from
* @param pint the int array to receive the values
* @param max the maximum number of ids to shift
* @return the actual number of ids shifted
@@ -420,15 +419,6 @@ apr_status_t h2_req_add_header(apr_table_t *headers, apr_pool_t *pool,
size_t max_field_len, int *pwas_added);
/*******************************************************************************
- * h2_request helpers
- ******************************************************************************/
-
-struct h2_request *h2_req_create(int id, apr_pool_t *pool, const char *method,
- const char *scheme, const char *authority,
- const char *path, apr_table_t *header,
- int serialize);
-
-/*******************************************************************************
* apr brigade helpers
******************************************************************************/
@@ -469,22 +459,6 @@ typedef apr_status_t h2_util_pass_cb(void *ctx,
const char *data, apr_off_t len);
/**
- * Read at most *plen bytes from the brigade and pass them into the
- * given callback. If cb is NULL, just return the amount of data that
- * could have been read.
- * If an EOS was/would be encountered, set *peos != 0.
- * @param bb the brigade to read from
- * @param cb the callback to invoke for the read data
- * @param ctx optional data passed to callback
- * @param plen inout, as input gives the maximum number of bytes to read,
- * on return specifies the actual/would be number of bytes
- * @param peos != 0 iff an EOS bucket was/would be encountered.
- */
-apr_status_t h2_util_bb_readx(apr_bucket_brigade *bb,
- h2_util_pass_cb *cb, void *ctx,
- apr_off_t *plen, int *peos);
-
-/**
* Print a bucket's meta data (type and length) to the buffer.
* @return number of characters printed
*/
@@ -509,14 +483,16 @@ apr_size_t h2_util_bb_print(char *buffer, apr_size_t bmax,
* @param bb the brigade to log
*/
#define h2_util_bb_log(c, sid, level, tag, bb) \
-do { \
- char buffer[4 * 1024]; \
- const char *line = "(null)"; \
- apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \
- len = h2_util_bb_print(buffer, bmax, (tag), "", (bb)); \
- ap_log_cerror(APLOG_MARK, level, 0, (c), "bb_dump(%ld): %s", \
- ((c)->master? (c)->master->id : (c)->id), (len? buffer : line)); \
-} while(0)
+if (APLOG_C_IS_LEVEL(c, level)) { \
+ do { \
+ char buffer[4 * 1024]; \
+ const char *line = "(null)"; \
+ apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]); \
+ len = h2_util_bb_print(buffer, bmax, (tag), "", (bb)); \
+ ap_log_cerror(APLOG_MARK, level, 0, (c), "bb_dump(%ld): %s", \
+ ((c)->master? (c)->master->id : (c)->id), (len? buffer : line)); \
+ } while(0); \
+}
typedef int h2_bucket_gate(apr_bucket *b);
@@ -544,4 +520,14 @@ apr_status_t h2_append_brigade(apr_bucket_brigade *to,
*/
apr_off_t h2_brigade_mem_size(apr_bucket_brigade *bb);
+/**
+ * Drain a pipe used for notification.
+ */
+void h2_util_drain_pipe(apr_file_t *pipe);
+
+/**
+ * Wait on data arriving on a pipe.
+ */
+apr_status_t h2_util_wait_on_pipe(apr_file_t *pipe);
+
#endif /* defined(__mod_h2__h2_util__) */
diff --git a/modules/http2/h2_version.h b/modules/http2/h2_version.h
index 40f40a2aa5..ec1225d4d0 100644
--- a/modules/http2/h2_version.h
+++ b/modules/http2/h2_version.h
@@ -27,7 +27,7 @@
* @macro
* Version number of the http2 module as c string
*/
-#define MOD_HTTP2_VERSION "1.15.24"
+#define MOD_HTTP2_VERSION "2.0.0"
/**
* @macro
@@ -35,7 +35,7 @@
* release. This is a 24 bit number with 8 bits for major number, 8 bits
* for minor and 8 bits for patch. Version 1.2.3 becomes 0x010203.
*/
-#define MOD_HTTP2_VERSION_NUM 0x010f18
+#define MOD_HTTP2_VERSION_NUM 0x020000
#endif /* mod_h2_h2_version_h */
diff --git a/modules/http2/h2_workers.c b/modules/http2/h2_workers.c
index 28bb428200..6b9f5a3540 100644
--- a/modules/http2/h2_workers.c
+++ b/modules/http2/h2_workers.c
@@ -27,17 +27,16 @@
#include "h2.h"
#include "h2_private.h"
#include "h2_mplx.h"
-#include "h2_task.h"
+#include "h2_c2.h"
#include "h2_workers.h"
#include "h2_util.h"
typedef struct h2_slot h2_slot;
struct h2_slot {
int id;
- int sticks;
h2_slot *next;
h2_workers *workers;
- h2_task *task;
+ conn_rec *connection;
apr_thread_t *thread;
apr_thread_mutex_t *lock;
apr_thread_cond_t *not_idle;
@@ -79,7 +78,7 @@ static apr_status_t activate_slot(h2_workers *workers, h2_slot *slot)
apr_status_t rv;
slot->workers = workers;
- slot->task = NULL;
+ slot->connection = NULL;
apr_thread_mutex_lock(workers->lock);
if (!slot->lock) {
@@ -158,20 +157,14 @@ static void join_zombies(h2_workers *workers)
}
}
-static apr_status_t slot_pull_task(h2_slot *slot, h2_mplx *m)
+static apr_status_t slot_pull_c2(h2_slot *slot, h2_mplx *m)
{
apr_status_t rv;
- rv = h2_mplx_s_pop_task(m, &slot->task);
- if (slot->task) {
- /* Ok, we got something to give back to the worker for execution.
- * If we still have idle workers, we let the worker be sticky,
- * e.g. making it poll the task's h2_mplx instance for more work
- * before asking back here. */
- slot->sticks = slot->workers->max_workers;
- return rv;
+ rv = h2_mplx_worker_pop_c2(m, &slot->connection);
+ if (slot->connection) {
+ return rv;
}
- slot->sticks = 0;
return APR_EOF;
}
@@ -180,7 +173,7 @@ static h2_fifo_op_t mplx_peek(void *head, void *ctx)
h2_mplx *m = head;
h2_slot *slot = ctx;
- if (slot_pull_task(slot, m) == APR_EAGAIN) {
+ if (slot_pull_c2(slot, m) == APR_EAGAIN) {
wake_idle_worker(slot->workers);
return H2_FIFO_OP_REPUSH;
}
@@ -188,7 +181,7 @@ static h2_fifo_op_t mplx_peek(void *head, void *ctx)
}
/**
- * Get the next task for the given worker. Will block until a task arrives
+ * Get the next c2 for the given worker. Will block until a c2 arrives
* or the max_wait timer expires and more than min workers exist.
*/
static int get_next(h2_slot *slot)
@@ -198,7 +191,7 @@ static int get_next(h2_slot *slot)
apr_status_t rv;
while (!workers->aborted && !slot->timed_out) {
- ap_assert(slot->task == NULL);
+ ap_assert(slot->connection == NULL);
if (non_essential && workers->shutdown) {
/* Terminate non-essential worker on shutdown */
break;
@@ -208,7 +201,7 @@ static int get_next(h2_slot *slot)
* just leave. */
break;
}
- if (slot->task) {
+ if (slot->connection) {
return 1;
}
@@ -256,23 +249,20 @@ static void* APR_THREAD_FUNC slot_run(apr_thread_t *thread, void *wctx)
{
h2_slot *slot = wctx;
- /* Get the h2_task(s) from the ->mplxs queue. */
+ /* Get the next c2 from mplx to process. */
while (get_next(slot)) {
- ap_assert(slot->task != NULL);
do {
- h2_task_do(slot->task, thread, slot->id);
-
- /* Report the task as done. If stickyness is left, offer the
- * mplx the opportunity to give us back a new task right away.
- */
- if (!slot->workers->aborted && --slot->sticks > 0) {
- h2_mplx_s_task_done(slot->task->mplx, slot->task, &slot->task);
+ ap_assert(slot->connection != NULL);
+ h2_c2_process(slot->connection, thread, slot->id);
+ if (!slot->workers->aborted &&
+ apr_atomic_read32(&slot->workers->worker_count) < slot->workers->max_workers) {
+ h2_mplx_worker_c2_done(slot->connection, &slot->connection);
}
else {
- h2_mplx_s_task_done(slot->task->mplx, slot->task, NULL);
- slot->task = NULL;
+ h2_mplx_worker_c2_done(slot->connection, NULL);
+ slot->connection = NULL;
}
- } while (slot->task);
+ } while (slot->connection);
}
if (!slot->timed_out) {
@@ -396,17 +386,9 @@ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pchild,
workers->min_workers, workers->max_workers,
(int)apr_time_sec(workers->max_idle_duration));
/* FIXME: the fifo set we use here has limited capacity. Once the
- * set is full, connections with new requests do a wait. Unfortunately,
- * we have optimizations in place there that makes such waiting "unfair"
- * in the sense that it may take connections a looong time to get scheduled.
- *
- * Need to rewrite this to use one of our double-linked lists and a mutex
- * to have unlimited capacity and fair scheduling.
- *
- * For now, we just make enough room to have many connections inside one
- * process.
+ * set is full, connections with new requests do a wait.
*/
- rv = h2_fifo_set_create(&workers->mplxs, pool, 8 * 1024);
+ rv = h2_fifo_set_create(&workers->mplxs, pool, 16 * 1024);
if (rv != APR_SUCCESS) goto cleanup;
rv = apr_threadattr_create(&workers->thread_attr, workers->pool);
diff --git a/modules/http2/h2_workers.h b/modules/http2/h2_workers.h
index 2aa3b3a3b3..c77cf1a6bb 100644
--- a/modules/http2/h2_workers.h
+++ b/modules/http2/h2_workers.h
@@ -17,16 +17,15 @@
#ifndef __mod_h2__h2_workers__
#define __mod_h2__h2_workers__
-/* Thread pool specific to executing h2_tasks. Has a minimum and maximum
- * number of workers it creates. Starts with minimum workers and adds
- * some on load, reduces the number again when idle.
- *
+/* Thread pool specific to executing secondary connections.
+ * Has a minimum and maximum number of workers it creates.
+ * Starts with minimum workers and adds some on load,
+ * reduces the number again when idle.
*/
struct apr_thread_mutex_t;
struct apr_thread_cond_t;
struct h2_mplx;
struct h2_request;
-struct h2_task;
struct h2_fifo;
struct h2_slot;
@@ -70,9 +69,9 @@ h2_workers *h2_workers_create(server_rec *s, apr_pool_t *pool,
int min_size, int max_size, int idle_secs);
/**
- * Registers a h2_mplx for task scheduling. If this h2_mplx runs
- * out of tasks, it will be automatically be unregistered. Should
- * new tasks arrive, it needs to be registered again.
+ * Registers a h2_mplx for scheduling. If this h2_mplx runs
+ * out of work, it will be automatically be unregistered. Should
+ * new work arrive, it needs to be registered again.
*/
apr_status_t h2_workers_register(h2_workers *workers, struct h2_mplx *m);
diff --git a/modules/http2/mod_http2.c b/modules/http2/mod_http2.c
index 3f2a61fc3e..70c5ed2784 100644
--- a/modules/http2/mod_http2.c
+++ b/modules/http2/mod_http2.c
@@ -30,19 +30,18 @@
#include <nghttp2/nghttp2.h>
#include "h2_stream.h"
-#include "h2_alt_svc.h"
-#include "h2_conn.h"
-#include "h2_filter.h"
-#include "h2_task.h"
+#include "h2_c1.h"
+#include "h2_c2.h"
#include "h2_session.h"
#include "h2_config.h"
-#include "h2_ctx.h"
-#include "h2_h2.h"
+#include "h2_conn_ctx.h"
+#include "h2_protocol.h"
#include "h2_mplx.h"
#include "h2_push.h"
#include "h2_request.h"
#include "h2_switch.h"
#include "h2_version.h"
+#include "h2_bucket_beam.h"
static void h2_hooks(apr_pool_t *pool);
@@ -158,12 +157,12 @@ static int h2_post_config(apr_pool_t *p, apr_pool_t *plog,
h2_conn_mpm_name());
}
- status = h2_h2_init(p, s);
+ status = h2_protocol_init(p, s);
if (status == APR_SUCCESS) {
status = h2_switch_init(p, s);
}
if (status == APR_SUCCESS) {
- status = h2_task_init(p, s);
+ status = h2_c2_init(p, s);
}
return status;
@@ -185,7 +184,7 @@ static void h2_child_init(apr_pool_t *pchild, server_rec *s)
{
apr_allocator_t *allocator;
apr_thread_mutex_t *mutex;
- apr_status_t status;
+ apr_status_t rv;
/* The allocator of pchild has no mutex with MPM prefork, but we need one
* for h2 workers threads synchronization. Even though mod_http2 shouldn't
@@ -203,9 +202,12 @@ static void h2_child_init(apr_pool_t *pchild, server_rec *s)
}
/* Set up our connection processing */
- status = h2_conn_child_init(pchild, s);
- if (status != APR_SUCCESS) {
- ap_log_error(APLOG_MARK, APLOG_ERR, status, s,
+ rv = h2_c1_child_init(pchild, s);
+ if (APR_SUCCESS == rv) {
+ rv = h2_c2_child_init(pchild, s);
+ }
+ if (APR_SUCCESS != rv) {
+ ap_log_error(APLOG_MARK, APLOG_ERR, rv, s,
APLOGNO(02949) "initializing connection handling");
}
}
@@ -230,41 +232,38 @@ static void h2_hooks(apr_pool_t *pool)
*/
ap_hook_child_init(h2_child_init, NULL, NULL, APR_HOOK_MIDDLE);
#if AP_MODULE_MAGIC_AT_LEAST(20120211, 110)
- ap_hook_child_stopping(h2_conn_child_stopping, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_child_stopping(h2_c1_child_stopping, NULL, NULL, APR_HOOK_MIDDLE);
#endif
- h2_h2_register_hooks();
+
+ h2_c1_register_hooks();
h2_switch_register_hooks();
- h2_task_register_hooks();
+ h2_c2_register_hooks();
- h2_alt_svc_register_hooks();
-
- /* Setup subprocess env for certain variables
+ /* Setup subprocess env for certain variables
*/
ap_hook_fixups(h2_h2_fixups, NULL,NULL, APR_HOOK_MIDDLE);
-
- /* test http2 connection status handler */
- ap_hook_handler(h2_filter_h2_status_handler, NULL, NULL, APR_HOOK_MIDDLE);
}
static const char *val_HTTP2(apr_pool_t *p, server_rec *s,
- conn_rec *c, request_rec *r, h2_ctx *ctx)
+ conn_rec *c, request_rec *r, h2_conn_ctx_t *ctx)
{
return ctx? "on" : "off";
}
static const char *val_H2_PUSH(apr_pool_t *p, server_rec *s,
- conn_rec *c, request_rec *r, h2_ctx *ctx)
+ conn_rec *c, request_rec *r,
+ h2_conn_ctx_t *conn_ctx)
{
- if (ctx) {
+ if (conn_ctx) {
if (r) {
- if (ctx->task) {
- h2_stream *stream = h2_mplx_t_stream_get(ctx->task->mplx, ctx->task);
+ if (conn_ctx->stream_id) {
+ const h2_stream *stream = h2_mplx_c2_stream_get(conn_ctx->mplx, conn_ctx->stream_id);
if (stream && stream->push_policy != H2_PUSH_NONE) {
return "on";
}
}
}
- else if (c && h2_session_push_enabled(ctx->session)) {
+ else if (c && h2_session_push_enabled(conn_ctx->session)) {
return "on";
}
}
@@ -277,10 +276,11 @@ static const char *val_H2_PUSH(apr_pool_t *p, server_rec *s,
}
static const char *val_H2_PUSHED(apr_pool_t *p, server_rec *s,
- conn_rec *c, request_rec *r, h2_ctx *ctx)
+ conn_rec *c, request_rec *r,
+ h2_conn_ctx_t *conn_ctx)
{
- if (ctx) {
- if (ctx->task && !H2_STREAM_CLIENT_INITIATED(ctx->task->stream_id)) {
+ if (conn_ctx) {
+ if (conn_ctx->stream_id && !H2_STREAM_CLIENT_INITIATED(conn_ctx->stream_id)) {
return "PUSHED";
}
}
@@ -288,11 +288,12 @@ static const char *val_H2_PUSHED(apr_pool_t *p, server_rec *s,
}
static const char *val_H2_PUSHED_ON(apr_pool_t *p, server_rec *s,
- conn_rec *c, request_rec *r, h2_ctx *ctx)
+ conn_rec *c, request_rec *r,
+ h2_conn_ctx_t *conn_ctx)
{
- if (ctx) {
- if (ctx->task && !H2_STREAM_CLIENT_INITIATED(ctx->task->stream_id)) {
- h2_stream *stream = h2_mplx_t_stream_get(ctx->task->mplx, ctx->task);
+ if (conn_ctx) {
+ if (conn_ctx->stream_id && !H2_STREAM_CLIENT_INITIATED(conn_ctx->stream_id)) {
+ const h2_stream *stream = h2_mplx_c2_stream_get(conn_ctx->mplx, conn_ctx->stream_id);
if (stream) {
return apr_itoa(p, stream->initiated_on);
}
@@ -302,18 +303,20 @@ static const char *val_H2_PUSHED_ON(apr_pool_t *p, server_rec *s,
}
static const char *val_H2_STREAM_TAG(apr_pool_t *p, server_rec *s,
- conn_rec *c, request_rec *r, h2_ctx *ctx)
+ conn_rec *c, request_rec *r, h2_conn_ctx_t *ctx)
{
- if (ctx) {
- if (ctx->task) {
- return ctx->task->id;
+ if (c) {
+ h2_conn_ctx_t *conn_ctx = h2_conn_ctx_get(c);
+ if (conn_ctx) {
+ return conn_ctx->stream_id == 0? conn_ctx->id
+ : apr_psprintf(p, "%s-%d", conn_ctx->id, conn_ctx->stream_id);
}
}
return "";
}
static const char *val_H2_STREAM_ID(apr_pool_t *p, server_rec *s,
- conn_rec *c, request_rec *r, h2_ctx *ctx)
+ conn_rec *c, request_rec *r, h2_conn_ctx_t *ctx)
{
const char *cp = val_H2_STREAM_TAG(p, s, c, r, ctx);
if (cp && (cp = ap_strchr_c(cp, '-'))) {
@@ -323,7 +326,7 @@ static const char *val_H2_STREAM_ID(apr_pool_t *p, server_rec *s,
}
typedef const char *h2_var_lookup(apr_pool_t *p, server_rec *s,
- conn_rec *c, request_rec *r, h2_ctx *ctx);
+ conn_rec *c, request_rec *r, h2_conn_ctx_t *ctx);
typedef struct h2_var_def {
const char *name;
h2_var_lookup *lookup;
@@ -347,7 +350,7 @@ static h2_var_def H2_VARS[] = {
static int http2_is_h2(conn_rec *c)
{
- return h2_ctx_get(c->master? c->master : c, 0) != NULL;
+ return h2_conn_ctx_get(c->master? c->master : c) != NULL;
}
static char *http2_var_lookup(apr_pool_t *p, server_rec *s,
@@ -358,8 +361,8 @@ static char *http2_var_lookup(apr_pool_t *p, server_rec *s,
for (i = 0; i < H2_ALEN(H2_VARS); ++i) {
h2_var_def *vdef = &H2_VARS[i];
if (!strcmp(vdef->name, name)) {
- h2_ctx *ctx = (r? h2_ctx_get(c, 0) :
- h2_ctx_get(c->master? c->master : c, 0));
+ h2_conn_ctx_t *ctx = (r? h2_conn_ctx_get(c) :
+ h2_conn_ctx_get(c->master? c->master : c));
return (char *)vdef->lookup(p, s, c, r, ctx);
}
}
@@ -369,8 +372,9 @@ static char *http2_var_lookup(apr_pool_t *p, server_rec *s,
static int h2_h2_fixups(request_rec *r)
{
if (r->connection->master) {
- h2_ctx *ctx = h2_ctx_get(r->connection, 0);
+ h2_conn_ctx_t *ctx = h2_conn_ctx_get(r->connection);
int i;
+ apr_interval_time_t stream_timeout;
for (i = 0; ctx && i < H2_ALEN(H2_VARS); ++i) {
h2_var_def *vdef = &H2_VARS[i];
@@ -380,6 +384,10 @@ static int h2_h2_fixups(request_rec *r)
r, ctx));
}
}
+ stream_timeout = h2_config_geti64(r, r->server, H2_CONF_STREAM_TIMEOUT);
+ if (stream_timeout > 0) {
+ h2_conn_ctx_set_timeout(ctx, stream_timeout);
+ }
}
return DECLINED;
}
diff --git a/modules/http2/mod_http2.dsp b/modules/http2/mod_http2.dsp
index b18636d63a..d9ff22203a 100644
--- a/modules/http2/mod_http2.dsp
+++ b/modules/http2/mod_http2.dsp
@@ -101,10 +101,6 @@ PostBuild_Cmds=if exist $(TargetPath).manifest mt.exe -manifest $(TargetPath).ma
# Name "mod_http2 - Win32 Debug"
# Begin Source File
-SOURCE=./h2_alt_svc.c
-# End Source File
-# Begin Source File
-
SOURCE=./h2_bucket_beam.c
# End Source File
# Begin Source File
@@ -113,31 +109,31 @@ SOURCE=./h2_bucket_eos.c
# End Source File
# Begin Source File
-SOURCE=./h2_config.c
+SOURCE=./h2_c1.c
# End Source File
# Begin Source File
-SOURCE=./h2_conn.c
+SOURCE=./h2_c1_io.c
# End Source File
# Begin Source File
-SOURCE=./h2_conn_io.c
+SOURCE=./h2_c2.c
# End Source File
# Begin Source File
-SOURCE=./h2_ctx.c
+SOURCE=./h2_c2_filter.c
# End Source File
# Begin Source File
-SOURCE=./h2_filter.c
+SOURCE=./h2_config.c
# End Source File
# Begin Source File
-SOURCE=./h2_from_h1.c
+SOURCE=./h2_conn_ctx.c
# End Source File
# Begin Source File
-SOURCE=./h2_h2.c
+SOURCE=./h2_headers.c
# End Source File
# Begin Source File
@@ -145,15 +141,15 @@ SOURCE=./h2_mplx.c
# End Source File
# Begin Source File
-SOURCE=./h2_push.c
+SOURCE=./h2_protocol.c
# End Source File
# Begin Source File
-SOURCE=./h2_request.c
+SOURCE=./h2_push.c
# End Source File
# Begin Source File
-SOURCE=./h2_headers.c
+SOURCE=./h2_request.c
# End Source File
# Begin Source File
@@ -169,15 +165,15 @@ SOURCE=./h2_switch.c
# End Source File
# Begin Source File
-SOURCE=./h2_task.c
+SOURCE=./h2_util.c
# End Source File
# Begin Source File
-SOURCE=./h2_util.c
+SOURCE=./h2_workers.c
# End Source File
# Begin Source File
-SOURCE=./h2_workers.c
+SOURCE=./mod_http2.c
# End Source File
# Begin Source File
diff --git a/modules/http2/mod_proxy_http2.c b/modules/http2/mod_proxy_http2.c
index 4ea4fb9741..afba3bb32d 100644
--- a/modules/http2/mod_proxy_http2.c
+++ b/modules/http2/mod_proxy_http2.c
@@ -291,7 +291,7 @@ static int proxy_http2_handler(request_rec *r,
const char *proxyname,
apr_port_t proxyport)
{
- const char *proxy_func, *task_id;
+ const char *proxy_func;
char *locurl = url, *u;
apr_size_t slen;
int is_ssl = 0;
@@ -324,11 +324,9 @@ static int proxy_http2_handler(request_rec *r,
return DECLINED;
}
- task_id = apr_table_get(r->connection->notes, H2_TASK_ID_NOTE);
-
ctx = apr_pcalloc(r->pool, sizeof(*ctx));
ctx->master = r->connection->master? r->connection->master : r->connection;
- ctx->id = task_id? task_id : apr_psprintf(r->pool, "%ld", (long)ctx->master->id);
+ ctx->id = apr_psprintf(r->pool, "%ld", (long)ctx->master->id);
ctx->owner = r->connection;
ctx->pool = r->pool;
ctx->server = r->server;
diff --git a/test/modules/http2/test_105_timeout.py b/test/modules/http2/test_105_timeout.py
index d8e7036310..6c0e185e5b 100644
--- a/test/modules/http2/test_105_timeout.py
+++ b/test/modules/http2/test_105_timeout.py
@@ -111,7 +111,6 @@ class TestStore:
assert piper.exitcode == 0
assert len("".join(stdout)) == 3 * 8192
- @pytest.mark.skipif(True, reason="new feature in upcoming http2")
def test_h2_105_11(self, env):
# short connection timeout, longer stream delay
# receiving the first response chunk, then timeout
@@ -126,7 +125,6 @@ class TestStore:
stdout, stderr = piper.close()
assert len("".join(stdout)) == 8192
- @pytest.mark.skipif(True, reason="new feature in upcoming http2")
def test_h2_105_12(self, env):
# long connection timeout, short stream timeout
# sending a slow POST
diff --git a/test/modules/http2/test_712_buffering.py b/test/modules/http2/test_712_buffering.py
index 12a06c8bb8..e0fcea8986 100644
--- a/test/modules/http2/test_712_buffering.py
+++ b/test/modules/http2/test_712_buffering.py
@@ -37,7 +37,6 @@ class TestBuffering:
piper = CurlPiper(env=env, url=url)
piper.stutter_check(chunks, stutter)
- @pytest.mark.skipif(True, reason="new feature in upcoming http2")
def test_h2_712_02(self, env):
# same as 712_01 but via mod_proxy_http2
#
@@ -48,7 +47,6 @@ class TestBuffering:
piper = CurlPiper(env=env, url=url)
piper.stutter_check(chunks, stutter)
- @pytest.mark.skipif(True, reason="new feature in upcoming http2")
def test_h2_712_03(self, env):
# same as 712_02 but with smaller chunks
#