summaryrefslogtreecommitdiffstats
path: root/test
diff options
context:
space:
mode:
authorStefan Eissing <icing@apache.org>2022-10-11 16:54:08 +0200
committerStefan Eissing <icing@apache.org>2022-10-11 16:54:08 +0200
commit61ebb22bf9ff55a09a452b837fdbcf67c219c9bb (patch)
tree66ff53dcf0f58fc6c880022277518dc7ae33c1c2 /test
parent *) mod_proxy_hcheck: Re-enable workers in standard ERROR state. PR 66302. (diff)
downloadapache2-61ebb22bf9ff55a09a452b837fdbcf67c219c9bb.tar.xz
apache2-61ebb22bf9ff55a09a452b837fdbcf67c219c9bb.zip
Sync with v2.0.10 from github:
* Extensive testing in production done by Alessandro Bianchi (@alexskynet) on the v2.0.x versions for stability. Many thanks! * refactored stream response handling to reflect the different phases (response/data/trailers) more clearly and help resolving cpu busy loops. * Adding more negative tests for handling of errored responses to cover edge cases. * mod_http2: fixed handling of response where neiter an EOS nor an ERROR was received as a cause to reset the stream. * mod_proxy_http2: generating error buckets for fault response bodies, to signal failure to fron when response header were already sent. v2.0.9 -------------------------------------------------------------------------------- * Fixed a bug where errors during reponse body handling did not lead to a proper RST_STREAM. Instead processing went into an infinite loop. Extended test cases to catch this condition. v2.0.8 -------------------------------------------------------------------------------- * Delaying input setup of a stream just before processing starts. This allows any EOS indicator arriving from the client before that to take effect. Without knowing that a stream has no input, internal processing has to simulate chunked encoding. This is not wrong, but somewhat more expensive and mod_security has been reported to be allergic to seeing 'chunked' on some requests. See <https://bz.apache.org/bugzilla/show_bug.cgi?id=66282>. * mod_proxy_http2: fixed #235 by no longer forwarding 'Host:' header when request ':authority' is known. Improved test case that did not catch that the previous 'fix' was incorrect. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@1904522 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'test')
-rw-r--r--test/modules/http2/env.py6
-rw-r--r--test/modules/http2/htdocs/cgi/alive.json4
-rw-r--r--test/modules/http2/htdocs/cgi/hello.py9
-rw-r--r--test/modules/http2/mod_h2test/mod_h2test.c154
-rw-r--r--test/modules/http2/test_003_get.py28
-rw-r--r--test/modules/http2/test_105_timeout.py2
-rw-r--r--test/modules/http2/test_202_trailer.py2
-rw-r--r--test/modules/http2/test_203_rfc9113.py42
-rw-r--r--test/modules/http2/test_401_early_hints.py4
-rw-r--r--test/modules/http2/test_500_proxy.py25
-rw-r--r--test/modules/http2/test_600_h2proxy.py119
-rw-r--r--test/pyhttpd/conf.py2
-rw-r--r--test/pyhttpd/config.ini.in1
-rw-r--r--test/pyhttpd/env.py5
-rw-r--r--test/pyhttpd/nghttp.py3
-rw-r--r--test/pyhttpd/result.py16
16 files changed, 398 insertions, 24 deletions
diff --git a/test/modules/http2/env.py b/test/modules/http2/env.py
index 55dce59107..537d3bf37f 100644
--- a/test/modules/http2/env.py
+++ b/test/modules/http2/env.py
@@ -95,6 +95,9 @@ class H2TestEnv(HttpdTestEnv):
'AH02429', # invalid chars in response header names, see test_h2_200
'AH02430', # invalid chars in response header values, see test_h2_200
'AH10373', # SSL errors on uncompleted handshakes, see test_h2_105
+ 'AH01247', # mod_cgid sometimes freaks out on load tests
+ 'AH01110', # error by proxy reading response
+ 'AH10400', # warning that 'enablereuse' has not effect in certain configs test_h2_600
])
self.httpd_error_log.add_ignored_patterns([
re.compile(r'.*malformed header from script \'hecho.py\': Bad header: x.*'),
@@ -126,6 +129,9 @@ class H2Conf(HttpdConf):
"<Location \"/h2test/delay\">",
" SetHandler h2test-delay",
"</Location>",
+ "<Location \"/h2test/error\">",
+ " SetHandler h2test-error",
+ "</Location>",
]
}))
diff --git a/test/modules/http2/htdocs/cgi/alive.json b/test/modules/http2/htdocs/cgi/alive.json
new file mode 100644
index 0000000000..defe2c2e0b
--- /dev/null
+++ b/test/modules/http2/htdocs/cgi/alive.json
@@ -0,0 +1,4 @@
+{
+ "host" : "cgi",
+ "alive" : true
+}
diff --git a/test/modules/http2/htdocs/cgi/hello.py b/test/modules/http2/htdocs/cgi/hello.py
index f9aed3f1a4..20974bfdd3 100644
--- a/test/modules/http2/htdocs/cgi/hello.py
+++ b/test/modules/http2/htdocs/cgi/hello.py
@@ -6,12 +6,15 @@ print("Content-Type: application/json")
print()
print("{")
print(" \"https\" : \"%s\"," % (os.getenv('HTTPS', '')))
-print(" \"x_host\" : \"%s\"," % (os.getenv('X_HOST', '')))
-print(" \"host\" : \"%s\"," % (os.getenv('SERVER_NAME', '')))
+print(" \"host\" : \"%s\"," % (os.getenv('X_HOST', '') \
+ if 'X_HOST' in os.environ else os.getenv('SERVER_NAME', '')))
+print(" \"server\" : \"%s\"," % (os.getenv('SERVER_NAME', '')))
+print(" \"h2_original_host\" : \"%s\"," % (os.getenv('H2_ORIGINAL_HOST', '')))
print(" \"port\" : \"%s\"," % (os.getenv('SERVER_PORT', '')))
print(" \"protocol\" : \"%s\"," % (os.getenv('SERVER_PROTOCOL', '')))
print(" \"ssl_protocol\" : \"%s\"," % (os.getenv('SSL_PROTOCOL', '')))
print(" \"h2\" : \"%s\"," % (os.getenv('HTTP2', '')))
-print(" \"h2push\" : \"%s\"" % (os.getenv('H2PUSH', '')))
+print(" \"h2push\" : \"%s\"," % (os.getenv('H2PUSH', '')))
+print(" \"h2_stream_id\" : \"%s\"" % (os.getenv('H2_STREAM_ID', '')))
print("}")
diff --git a/test/modules/http2/mod_h2test/mod_h2test.c b/test/modules/http2/mod_h2test/mod_h2test.c
index 0b0f057e4a..b5ee8ad6e4 100644
--- a/test/modules/http2/mod_h2test/mod_h2test.c
+++ b/test/modules/http2/mod_h2test/mod_h2test.c
@@ -280,7 +280,7 @@ static int h2test_delay_handler(request_rec *r)
cleanup:
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r,
- "delay_handler: request cleanup, r->status=%d, aborted=%d",
+ "delay_handler: request cleanup, r->status=%d, aborte=%d",
r->status, c->aborted);
if (rv == APR_SUCCESS
|| r->status != HTTP_OK
@@ -297,7 +297,6 @@ static int h2test_trailer_handler(request_rec *r)
apr_bucket *b;
apr_status_t rv;
char buffer[8192];
- int i, chunks = 3;
long l;
int body_len = 0;
@@ -345,7 +344,7 @@ static int h2test_trailer_handler(request_rec *r)
cleanup:
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r,
- "trailer_handler: request cleanup, r->status=%d, aborted=%d",
+ "trailer_handler: request cleanup, r->status=%d, aborte=%d",
r->status, c->aborted);
if (rv == APR_SUCCESS
|| r->status != HTTP_OK
@@ -355,6 +354,154 @@ cleanup:
return AP_FILTER_ERROR;
}
+static int status_from_str(const char *s, apr_status_t *pstatus)
+{
+ if (!strcmp("timeout", s)) {
+ *pstatus = APR_TIMEUP;
+ return 1;
+ }
+ else if (!strcmp("reset", s)) {
+ *pstatus = APR_ECONNRESET;
+ return 1;
+ }
+ return 0;
+}
+
+static int h2test_error_handler(request_rec *r)
+{
+ conn_rec *c = r->connection;
+ apr_bucket_brigade *bb;
+ apr_bucket *b;
+ apr_status_t rv;
+ char buffer[8192];
+ int i, chunks = 3, error_bucket = 1;
+ long l;
+ apr_time_t delay = 0, body_delay = 0;
+ apr_array_header_t *args = NULL;
+ int http_status = 200;
+ apr_status_t error = APR_SUCCESS, body_error = APR_SUCCESS;
+
+ if (strcmp(r->handler, "h2test-error")) {
+ return DECLINED;
+ }
+ if (r->method_number != M_GET && r->method_number != M_POST) {
+ return DECLINED;
+ }
+
+ if (r->args) {
+ args = apr_cstr_split(r->args, "&", 1, r->pool);
+ for (i = 0; i < args->nelts; ++i) {
+ char *s, *val, *arg = APR_ARRAY_IDX(args, i, char*);
+ s = strchr(arg, '=');
+ if (s) {
+ *s = '\0';
+ val = s + 1;
+ if (!strcmp("status", arg)) {
+ http_status = (int)apr_atoi64(val);
+ if (val > 0) {
+ continue;
+ }
+ }
+ else if (!strcmp("error", arg)) {
+ if (status_from_str(val, &error)) {
+ continue;
+ }
+ }
+ else if (!strcmp("error_bucket", arg)) {
+ error_bucket = (int)apr_atoi64(val);
+ if (val >= 0) {
+ continue;
+ }
+ }
+ else if (!strcmp("body_error", arg)) {
+ if (status_from_str(val, &body_error)) {
+ continue;
+ }
+ }
+ else if (!strcmp("delay", arg)) {
+ rv = duration_parse(&delay, r->args, "s");
+ if (APR_SUCCESS == rv) {
+ continue;
+ }
+ }
+ else if (!strcmp("body_delay", arg)) {
+ rv = duration_parse(&body_delay, r->args, "s");
+ if (APR_SUCCESS == rv) {
+ continue;
+ }
+ }
+ }
+ ap_die(HTTP_BAD_REQUEST, r);
+ return OK;
+ }
+ }
+
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "error_handler: processing request, %s",
+ r->args? r->args : "(no args)");
+ r->status = http_status;
+ r->clength = -1;
+ r->chunked = 1;
+ apr_table_unset(r->headers_out, "Content-Length");
+ /* Discourage content-encodings */
+ apr_table_unset(r->headers_out, "Content-Encoding");
+ apr_table_setn(r->subprocess_env, "no-brotli", "1");
+ apr_table_setn(r->subprocess_env, "no-gzip", "1");
+
+ ap_set_content_type(r, "application/octet-stream");
+ bb = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ if (delay) {
+ apr_sleep(delay);
+ }
+ if (error != APR_SUCCESS) {
+ return ap_map_http_request_error(error, HTTP_BAD_REQUEST);
+ }
+ /* flush response */
+ b = apr_bucket_flush_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ rv = ap_pass_brigade(r->output_filters, bb);
+ if (APR_SUCCESS != rv) goto cleanup;
+
+ memset(buffer, 'X', sizeof(buffer));
+ l = sizeof(buffer);
+ for (i = 0; i < chunks; ++i) {
+ if (body_delay) {
+ apr_sleep(body_delay);
+ }
+ rv = apr_brigade_write(bb, NULL, NULL, buffer, l);
+ if (APR_SUCCESS != rv) goto cleanup;
+ rv = ap_pass_brigade(r->output_filters, bb);
+ if (APR_SUCCESS != rv) goto cleanup;
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r,
+ "error_handler: passed %ld bytes as response body", l);
+ if (body_error != APR_SUCCESS) {
+ rv = body_error;
+ goto cleanup;
+ }
+ }
+ /* we are done */
+ b = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ rv = ap_pass_brigade(r->output_filters, bb);
+ apr_brigade_cleanup(bb);
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r, "error_handler: response passed");
+
+cleanup:
+ ap_log_rerror(APLOG_MARK, APLOG_TRACE1, rv, r,
+ "error_handler: request cleanup, r->status=%d, aborted=%d",
+ r->status, c->aborted);
+ if (rv == APR_SUCCESS) {
+ return OK;
+ }
+ if (error_bucket) {
+ http_status = ap_map_http_request_error(rv, HTTP_BAD_REQUEST);
+ b = ap_bucket_error_create(http_status, NULL, r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, b);
+ ap_pass_brigade(r->output_filters, bb);
+ }
+ return AP_FILTER_ERROR;
+}
+
/* Install this module into the apache2 infrastructure.
*/
static void h2test_hooks(apr_pool_t *pool)
@@ -375,5 +522,6 @@ static void h2test_hooks(apr_pool_t *pool)
ap_hook_handler(h2test_echo_handler, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_handler(h2test_delay_handler, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_handler(h2test_trailer_handler, NULL, NULL, APR_HOOK_MIDDLE);
+ ap_hook_handler(h2test_error_handler, NULL, NULL, APR_HOOK_MIDDLE);
}
diff --git a/test/modules/http2/test_003_get.py b/test/modules/http2/test_003_get.py
index 30f18d3524..5928448d2a 100644
--- a/test/modules/http2/test_003_get.py
+++ b/test/modules/http2/test_003_get.py
@@ -237,3 +237,31 @@ content-type: text/html
assert r.response['status'] == 200
assert 'date' in r.response['header']
assert 'server' in r.response['header']
+
+ # lets do some error tests
+ def test_h2_003_70(self, env):
+ url = env.mkurl("https", "cgi", "/h2test/error?status=500")
+ r = env.curl_get(url)
+ assert r.exit_code == 0, r
+ assert r.response['status'] == 500
+ url = env.mkurl("https", "cgi", "/h2test/error?error=timeout")
+ r = env.curl_get(url)
+ assert r.exit_code == 0, r
+ assert r.response['status'] == 408
+
+ # produce an error during response body
+ def test_h2_003_71(self, env, repeat):
+ pytest.skip("needs fix in core protocol handling")
+ url = env.mkurl("https", "cgi", "/h2test/error?body_error=timeout")
+ r = env.curl_get(url)
+ assert r.exit_code != 0, f"{r}"
+ url = env.mkurl("https", "cgi", "/h2test/error?body_error=reset")
+ r = env.curl_get(url)
+ assert r.exit_code != 0, f"{r}"
+
+ # produce an error, fail to generate an error bucket
+ def test_h2_003_72(self, env, repeat):
+ pytest.skip("needs fix in core protocol handling")
+ url = env.mkurl("https", "cgi", "/h2test/error?body_error=timeout&error_bucket=0")
+ r = env.curl_get(url)
+ assert r.exit_code != 0, f"{r}"
diff --git a/test/modules/http2/test_105_timeout.py b/test/modules/http2/test_105_timeout.py
index dfa5f2dbc7..13aa8ed07a 100644
--- a/test/modules/http2/test_105_timeout.py
+++ b/test/modules/http2/test_105_timeout.py
@@ -146,4 +146,4 @@ class TestTimeout:
break
piper.close()
assert piper.response
- assert piper.response['status'] == 408
+ assert piper.response['status'] == 408, f"{piper.response}"
diff --git a/test/modules/http2/test_202_trailer.py b/test/modules/http2/test_202_trailer.py
index 8571955dd7..4b4fc42c78 100644
--- a/test/modules/http2/test_202_trailer.py
+++ b/test/modules/http2/test_202_trailer.py
@@ -86,7 +86,7 @@ class TestTrailers:
url = env.mkurl("https", "cgi", "/h2test/trailer?0")
r = env.nghttp().get(url)
assert r.response["status"] == 200
- assert len(r.response["body"]) == 0
+ assert len(r.response["body"]) == 0, f'{r.response["body"]}'
assert 'trailer' in r.response
assert 'trailer-content-length' in r.response['trailer']
assert r.response['trailer']['trailer-content-length'] == '0'
diff --git a/test/modules/http2/test_203_rfc9113.py b/test/modules/http2/test_203_rfc9113.py
new file mode 100644
index 0000000000..326462f739
--- /dev/null
+++ b/test/modules/http2/test_203_rfc9113.py
@@ -0,0 +1,42 @@
+import pytest
+
+from pyhttpd.env import HttpdTestEnv
+from .env import H2Conf
+
+
+class TestRfc9113:
+
+ @pytest.fixture(autouse=True, scope='class')
+ def _class_scope(self, env):
+ H2Conf(env).add_vhost_test1().install()
+ assert env.apache_restart() == 0
+
+ # by default, we ignore leading/trailing ws
+ # tests with leading ws are not present as curl seems to silently eat those
+ def test_h2_203_01_ws_ignore(self, env):
+ url = env.mkurl("https", "test1", "/")
+ r = env.curl_get(url, options=['-H', 'trailing-space: must not '])
+ assert r.exit_code == 0, f'curl output: {r.stderr}'
+ assert r.response["status"] == 200, f'curl output: {r.stdout}'
+ r = env.curl_get(url, options=['-H', 'trailing-space: must not\t'])
+ assert r.exit_code == 0, f'curl output: {r.stderr}'
+ assert r.response["status"] == 200, f'curl output: {r.stdout}'
+
+ # When enabled, leading/trailing make the stream RST
+ # tests with leading ws are not present as curl seems to silently eat those
+ def test_h2_203_02_ws_reject(self, env):
+ if not env.h2load_is_at_least('1.50.0'):
+ pytest.skip(f'need nghttp2 >= 1.50.0')
+ conf = H2Conf(env)
+ conf.add([
+ "H2HeaderStrictness rfc9113"
+ ])
+ conf.add_vhost_test1()
+ conf.install()
+ assert env.apache_restart() == 0
+ url = env.mkurl("https", "test1", "/")
+ r = env.curl_get(url, options=['-H', 'trailing-space: must not '])
+ assert r.exit_code != 0, f'curl output: {r.stderr}'
+ r = env.curl_get(url, options=['-H', 'trailing-space: must not\t'])
+ assert r.exit_code != 0, f'curl output: {r.stderr}'
+
diff --git a/test/modules/http2/test_401_early_hints.py b/test/modules/http2/test_401_early_hints.py
index 1b851d3080..f73dcc4c8c 100644
--- a/test/modules/http2/test_401_early_hints.py
+++ b/test/modules/http2/test_401_early_hints.py
@@ -26,7 +26,7 @@ class TestEarlyHints:
assert env.apache_restart() == 0
# H2EarlyHints enabled in general, check that it works for H2PushResource
- def test_h2_401_31(self, env):
+ def test_h2_401_31(self, env, repeat):
url = env.mkurl("https", "hints", "/006-hints.html")
r = env.nghttp().get(url)
assert r.response["status"] == 200
@@ -38,7 +38,7 @@ class TestEarlyHints:
assert early["header"]["link"]
# H2EarlyHints enabled in general, but does not trigger on added response headers
- def test_h2_401_32(self, env):
+ def test_h2_401_32(self, env, repeat):
url = env.mkurl("https", "hints", "/006-nohints.html")
r = env.nghttp().get(url)
assert r.response["status"] == 200
diff --git a/test/modules/http2/test_500_proxy.py b/test/modules/http2/test_500_proxy.py
index 5eec052263..d10bcbcb0c 100644
--- a/test/modules/http2/test_500_proxy.py
+++ b/test/modules/http2/test_500_proxy.py
@@ -126,3 +126,28 @@ class TestProxy:
def test_h2_500_24(self, env):
for i in range(100):
self.nghttp_upload_stat(env, "data-1k", ["--no-content-length"])
+
+ # lets do some error tests
+ def test_h2_500_30(self, env):
+ url = env.mkurl("https", "cgi", "/proxy/h2test/error?status=500")
+ r = env.curl_get(url)
+ assert r.exit_code == 0, r
+ assert r.response['status'] == 500
+ url = env.mkurl("https", "cgi", "/proxy/h2test/error?error=timeout")
+ r = env.curl_get(url)
+ assert r.exit_code == 0, r
+ assert r.response['status'] == 408
+
+ # produce an error during response body
+ def test_h2_500_31(self, env, repeat):
+ pytest.skip("needs fix in core protocol handling")
+ url = env.mkurl("https", "cgi", "/proxy/h2test/error?body_error=timeout")
+ r = env.curl_get(url)
+ assert r.exit_code != 0, r
+
+ # produce an error, fail to generate an error bucket
+ def test_h2_500_32(self, env, repeat):
+ pytest.skip("needs fix in core protocol handling")
+ url = env.mkurl("https", "cgi", "/proxy/h2test/error?body_error=timeout&error_bucket=0")
+ r = env.curl_get(url)
+ assert r.exit_code != 0, r
diff --git a/test/modules/http2/test_600_h2proxy.py b/test/modules/http2/test_600_h2proxy.py
index 0f368eda03..854195e4b9 100644
--- a/test/modules/http2/test_600_h2proxy.py
+++ b/test/modules/http2/test_600_h2proxy.py
@@ -23,7 +23,7 @@ class TestH2Proxy:
assert r.response["json"]["ssl_protocol"] != ""
assert r.response["json"]["h2"] == "on"
assert r.response["json"]["h2push"] == "off"
- assert r.response["json"]["x_host"] == f"cgi.{env.http_tld}:{env.https_port}"
+ assert r.response["json"]["host"] == f"cgi.{env.http_tld}:{env.https_port}"
def test_h2_600_02(self, env):
conf = H2Conf(env, extras={
@@ -42,7 +42,8 @@ class TestH2Proxy:
assert r.response["json"]["protocol"] == "HTTP/2.0"
assert r.response["json"]["https"] == ""
# the proxied backend sees Host header as passed on front
- assert r.response["json"]["x_host"] == f"cgi.{env.http_tld}:{env.https_port}"
+ assert r.response["json"]["host"] == f"cgi.{env.http_tld}:{env.https_port}"
+ assert r.response["json"]["h2_original_host"] == ""
def test_h2_600_03(self, env):
conf = H2Conf(env, extras={
@@ -61,4 +62,116 @@ class TestH2Proxy:
assert r.response["json"]["protocol"] == "HTTP/2.0"
assert r.response["json"]["https"] == ""
# the proxied backend sees Host as using in connecting to it
- assert r.response["json"]["x_host"] == f"127.0.0.1:{env.http_port}"
+ assert r.response["json"]["host"] == f"127.0.0.1:{env.http_port}"
+ assert r.response["json"]["h2_original_host"] == ""
+
+ # check that connection reuse actually happens as configured
+ @pytest.mark.parametrize("enable_reuse", [ "on", "off" ])
+ def test_h2_600_04(self, env, enable_reuse):
+ conf = H2Conf(env, extras={
+ f'cgi.{env.http_tld}': [
+ f"ProxyPassMatch ^/h2proxy/([0-9]+)/(.*)$ "
+ f" h2c://127.0.0.1:$1/$2 enablereuse={enable_reuse} keepalive=on",
+ ]
+ })
+ conf.add_vhost_cgi()
+ conf.install()
+ assert env.apache_restart() == 0
+ url = env.mkurl("https", "cgi", f"/h2proxy/{env.http_port}/hello.py")
+ r = env.curl_get(url, 5)
+ assert r.response["status"] == 200
+ assert r.json["h2_stream_id"] == "1"
+ # httpd 2.5.0 disables reuse, not matter the config
+ if enable_reuse == "on" and not env.httpd_is_at_least("2.5.0"):
+ # reuse is not guarantueed for each request, but we expect some
+ # to do it and run on a h2 stream id > 1
+ reused = False
+ for _ in range(10):
+ r = env.curl_get(url, 5)
+ assert r.response["status"] == 200
+ if int(r.json["h2_stream_id"]) > 1:
+ reused = True
+ break
+ assert reused
+ else:
+ r = env.curl_get(url, 5)
+ assert r.response["status"] == 200
+ assert r.json["h2_stream_id"] == "1"
+
+ # do some flexible setup from #235 to proper connection selection
+ @pytest.mark.parametrize("enable_reuse", [ "on", "off" ])
+ def test_h2_600_05(self, env, enable_reuse):
+ conf = H2Conf(env, extras={
+ f'cgi.{env.http_tld}': [
+ f"ProxyPassMatch ^/h2proxy/([0-9]+)/(.*)$ "
+ f" h2c://127.0.0.1:$1/$2 enablereuse={enable_reuse} keepalive=on",
+ ]
+ })
+ conf.add_vhost_cgi()
+ conf.add([
+ f'Listen {env.http_port2}',
+ 'UseCanonicalName On',
+ 'UseCanonicalPhysicalPort On'
+ ])
+ conf.start_vhost(domains=[f'cgi.{env.http_tld}'],
+ port=5004, doc_root="htdocs/cgi")
+ conf.add("AddHandler cgi-script .py")
+ conf.end_vhost()
+ conf.install()
+ assert env.apache_restart() == 0
+ url = env.mkurl("https", "cgi", f"/h2proxy/{env.http_port}/hello.py")
+ r = env.curl_get(url, 5)
+ assert r.response["status"] == 200
+ assert int(r.json["port"]) == env.http_port
+ # going to another backend port must create a new connection and
+ # we should see stream id one again
+ url = env.mkurl("https", "cgi", f"/h2proxy/{env.http_port2}/hello.py")
+ r = env.curl_get(url, 5)
+ assert r.response["status"] == 200
+ exp_port = env.http_port if enable_reuse == "on" \
+ and not env.httpd_is_at_least("2.5.0")\
+ else env.http_port2
+ assert int(r.json["port"]) == exp_port
+
+ # lets do some error tests
+ def test_h2_600_30(self, env):
+ conf = H2Conf(env)
+ conf.add_vhost_cgi(h2proxy_self=True)
+ conf.install()
+ assert env.apache_restart() == 0
+ url = env.mkurl("https", "cgi", "/h2proxy/h2test/error?status=500")
+ r = env.curl_get(url)
+ assert r.exit_code == 0, r
+ assert r.response['status'] == 500
+ url = env.mkurl("https", "cgi", "/h2proxy/h2test/error?error=timeout")
+ r = env.curl_get(url)
+ assert r.exit_code == 0, r
+ assert r.response['status'] == 408
+
+ # produce an error during response body
+ def test_h2_600_31(self, env, repeat):
+ pytest.skip("needs fix in core protocol handling")
+ conf = H2Conf(env)
+ conf.add_vhost_cgi(h2proxy_self=True)
+ conf.install()
+ assert env.apache_restart() == 0
+ url = env.mkurl("https", "cgi", "/h2proxy/h2test/error?body_error=timeout")
+ r = env.curl_get(url)
+ # depending on when the error is detect in proxying, if may RST the
+ # stream (exit_code != 0) or give a 503 response.
+ if r.exit_code == 0:
+ assert r.response['status'] == 503
+
+ # produce an error, fail to generate an error bucket
+ def test_h2_600_32(self, env, repeat):
+ pytest.skip("needs fix in core protocol handling")
+ conf = H2Conf(env)
+ conf.add_vhost_cgi(h2proxy_self=True)
+ conf.install()
+ assert env.apache_restart() == 0
+ url = env.mkurl("https", "cgi", "/h2proxy/h2test/error?body_error=timeout&error_bucket=0")
+ r = env.curl_get(url)
+ # depending on when the error is detect in proxying, if may RST the
+ # stream (exit_code != 0) or give a 503 response.
+ if r.exit_code == 0:
+ assert r.response['status'] == 503
diff --git a/test/pyhttpd/conf.py b/test/pyhttpd/conf.py
index ae34e78b4b..cd3363fb73 100644
--- a/test/pyhttpd/conf.py
+++ b/test/pyhttpd/conf.py
@@ -157,8 +157,6 @@ class HttpdConf(object):
self.start_vhost(domains=[domain, f"cgi-alias.{self.env.http_tld}"],
port=self.env.https_port, doc_root="htdocs/cgi")
self.add_proxies("cgi", proxy_self=proxy_self, h2proxy_self=h2proxy_self)
- if domain in self._extras:
- self.add(self._extras[domain])
self.end_vhost()
self.start_vhost(domains=[domain, f"cgi-alias.{self.env.http_tld}"],
port=self.env.http_port, doc_root="htdocs/cgi")
diff --git a/test/pyhttpd/config.ini.in b/test/pyhttpd/config.ini.in
index 80cab2ba32..e1ae0707ab 100644
--- a/test/pyhttpd/config.ini.in
+++ b/test/pyhttpd/config.ini.in
@@ -25,6 +25,7 @@ gen_dir = @abs_srcdir@/../gen
http_port = 5002
https_port = 5001
proxy_port = 5003
+http_port2 = 5004
http_tld = tests.httpd.apache.org
test_dir = @abs_srcdir@
test_src_dir = @abs_srcdir@
diff --git a/test/pyhttpd/env.py b/test/pyhttpd/env.py
index 991ead9e11..af856effe4 100644
--- a/test/pyhttpd/env.py
+++ b/test/pyhttpd/env.py
@@ -244,6 +244,7 @@ class HttpdTestEnv:
self._h2load = 'h2load'
self._http_port = int(self.config.get('test', 'http_port'))
+ self._http_port2 = int(self.config.get('test', 'http_port2'))
self._https_port = int(self.config.get('test', 'https_port'))
self._proxy_port = int(self.config.get('test', 'proxy_port'))
self._http_tld = self.config.get('test', 'http_tld')
@@ -346,6 +347,10 @@ class HttpdTestEnv:
return self._http_port
@property
+ def http_port2(self) -> int:
+ return self._http_port2
+
+ @property
def https_port(self) -> int:
return self._https_port
diff --git a/test/pyhttpd/nghttp.py b/test/pyhttpd/nghttp.py
index 6dea97b55c..fe4a1aedff 100644
--- a/test/pyhttpd/nghttp.py
+++ b/test/pyhttpd/nghttp.py
@@ -121,7 +121,8 @@ class Nghttp:
prev["previous"] = response["previous"]
response["previous"] = prev
response[hkey] = s["header"]
- s["header"] = {}
+ s["header"] = {}
+ body = ''
continue
m = re.match(r'(.*)\[.*] recv DATA frame <length=(\d+), .*stream_id=(\d+)>', l)
diff --git a/test/pyhttpd/result.py b/test/pyhttpd/result.py
index 5942d35d9a..04ea825a31 100644
--- a/test/pyhttpd/result.py
+++ b/test/pyhttpd/result.py
@@ -9,21 +9,21 @@ class ExecResult:
stdout: bytes, stderr: bytes = None, duration: timedelta = None):
self._args = args
self._exit_code = exit_code
- self._raw = stdout if stdout else b''
- self._stdout = stdout.decode() if stdout is not None else ""
- self._stderr = stderr.decode() if stderr is not None else ""
+ self._stdout = stdout if stdout is not None else b''
+ self._stderr = stderr if stderr is not None else b''
self._duration = duration if duration is not None else timedelta()
self._response = None
self._results = {}
self._assets = []
# noinspection PyBroadException
try:
- self._json_out = json.loads(self._stdout)
+ out = self._stdout.decode()
+ self._json_out = json.loads(out)
except:
self._json_out = None
def __repr__(self):
- return f"ExecResult[code={self.exit_code}, args={self._args}, stdout={self.stdout}, stderr={self.stderr}]"
+ return f"ExecResult[code={self.exit_code}, args={self._args}, stdout={self._stdout}, stderr={self._stderr}]"
@property
def exit_code(self) -> int:
@@ -35,11 +35,11 @@ class ExecResult:
@property
def outraw(self) -> bytes:
- return self._raw
+ return self._stdout
@property
def stdout(self) -> str:
- return self._stdout
+ return self._stdout.decode()
@property
def json(self) -> Optional[Dict]:
@@ -48,7 +48,7 @@ class ExecResult:
@property
def stderr(self) -> str:
- return self._stderr
+ return self._stderr.decode()
@property
def duration(self) -> timedelta: