summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile.in2
-rw-r--r--NWGNUmakefile5
-rw-r--r--build/rules.mk.in4
-rw-r--r--libhttpd.dsp20
-rw-r--r--modules/http/byterange_filter.c388
-rw-r--r--modules/http/chunk_filter.c167
-rw-r--r--modules/http/config2.m42
-rw-r--r--modules/http/http_etag.c220
-rw-r--r--modules/http/http_filters.c1247
-rw-r--r--modules/http/http_protocol.c1715
-rw-r--r--server/Makefile.in2
-rw-r--r--server/core.c876
-rw-r--r--server/core_filters.c929
13 files changed, 2989 insertions, 2588 deletions
diff --git a/Makefile.in b/Makefile.in
index 9be3a77526..3e28897fac 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -6,9 +6,9 @@ PROGRAM_NAME = $(progname)
PROGRAM_SOURCES = modules.c
PROGRAM_LDADD = $(HTTPD_LDFLAGS) $(PROGRAM_DEPENDENCIES) $(EXTRA_LIBS) $(AP_LIBS) $(LIBS)
PROGRAM_DEPENDENCIES = \
+ server/libmain.la \
$(BUILTIN_LIBS) \
$(MPM_LIB) \
- server/libmain.la \
os/$(OS_DIR)/libos.la
PROGRAMS = $(PROGRAM_NAME)
diff --git a/NWGNUmakefile b/NWGNUmakefile
index af16d38da9..ba31a12aef 100644
--- a/NWGNUmakefile
+++ b/NWGNUmakefile
@@ -195,11 +195,16 @@ FILES_nlm_objs = \
$(OBJDIR)/config.o \
$(OBJDIR)/connection.o \
$(OBJDIR)/core.o \
+ $(OBJDIR)/core_filters.o \
$(OBJDIR)/eoc_bucket.o \
$(OBJDIR)/error_bucket.o \
$(OBJDIR)/http_core.o \
$(OBJDIR)/http_protocol.o \
$(OBJDIR)/http_request.o \
+ $(OBJDIR)/byterange_filter.o \
+ $(OBJDIR)/chunk_filter.o \
+ $(OBJDIR)/http_etag.o \
+ $(OBJDIR)/http_filters.o \
$(OBJDIR)/listen.o \
$(OBJDIR)/log.o \
$(OBJDIR)/main.o \
diff --git a/build/rules.mk.in b/build/rules.mk.in
index 38c6fe8829..24b8bcd65c 100644
--- a/build/rules.mk.in
+++ b/build/rules.mk.in
@@ -160,7 +160,7 @@ local-extraclean: local-distclean x-local-extraclean
rm -f $(EXTRACLEAN_TARGETS) ; \
fi
-local-install: $(TARGETS) $(SHARED_TARGETS) $(INSTALL_TARGETS)
+program-install: $(TARGETS) $(SHARED_TARGETS)
@if test -n '$(PROGRAMS)'; then \
test -d $(DESTDIR)$(sbindir) || $(MKINSTALLDIRS) $(DESTDIR)$(sbindir); \
list='$(PROGRAMS)'; for i in $$list; do \
@@ -168,6 +168,8 @@ local-install: $(TARGETS) $(SHARED_TARGETS) $(INSTALL_TARGETS)
done; \
fi
+local-install: program-install $(INSTALL_TARGETS)
+
# to be filled in by the actual Makefile if extra commands are needed
x-local-depend x-local-clean x-local-distclean x-local-extraclean:
diff --git a/libhttpd.dsp b/libhttpd.dsp
index 4def46c08e..906110f84a 100644
--- a/libhttpd.dsp
+++ b/libhttpd.dsp
@@ -382,6 +382,10 @@ SOURCE=.\server\core.c
# End Source File
# Begin Source File
+SOURCE=.\server\core_filters.c
+# End Source File
+# Begin Source File
+
SOURCE=.\modules\http\http_core.c
# End Source File
# Begin Source File
@@ -394,6 +398,22 @@ SOURCE=.\modules\http\http_request.c
# End Source File
# Begin Source File
+SOURCE=.\modules\http\byterange_filter.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\modules\http\chunk_filter.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\modules\http\http_etag.c
+# End Source File
+# Begin Source File
+
+SOURCE=.\modules\http\http_filters.c
+# End Source File
+# Begin Source File
+
SOURCE=.\server\log.c
# End Source File
# Begin Source File
diff --git a/modules/http/byterange_filter.c b/modules/http/byterange_filter.c
new file mode 100644
index 0000000000..fa520df4ba
--- /dev/null
+++ b/modules/http/byterange_filter.c
@@ -0,0 +1,388 @@
+/* Copyright 1999-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * byterange_filter.c --- HTTP byterange filter and friends.
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "apr_lib.h"
+#include "apr_signal.h"
+
+#define APR_WANT_STDIO /* for sscanf */
+#define APR_WANT_STRFUNC
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+#define CORE_PRIVATE
+#include "util_filter.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_log.h" /* For errors detected in basic auth common
+ * support code... */
+#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
+#include "util_charset.h"
+#include "util_ebcdic.h"
+#include "util_time.h"
+
+#include "mod_core.h"
+
+#if APR_HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+static int parse_byterange(char *range, apr_off_t clength,
+ apr_off_t *start, apr_off_t *end)
+{
+ char *dash = strchr(range, '-');
+ char *errp;
+ apr_off_t number;
+
+ if (!dash) {
+ return 0;
+ }
+
+ if ((dash == range)) {
+ /* In the form "-5" */
+ if (apr_strtoff(&number, dash+1, &errp, 10) || *errp) {
+ return 0;
+ }
+ *start = clength - number;
+ *end = clength - 1;
+ }
+ else {
+ *dash++ = '\0';
+ if (apr_strtoff(&number, range, &errp, 10) || *errp) {
+ return 0;
+ }
+ *start = number;
+ if (*dash) {
+ if (apr_strtoff(&number, dash, &errp, 10) || *errp) {
+ return 0;
+ }
+ *end = number;
+ }
+ else { /* "5-" */
+ *end = clength - 1;
+ }
+ }
+
+ if (*start < 0) {
+ *start = 0;
+ }
+
+ if (*end >= clength) {
+ *end = clength - 1;
+ }
+
+ if (*start > *end) {
+ return -1;
+ }
+
+ return (*start > 0 || *end < clength);
+}
+
+static int ap_set_byterange(request_rec *r);
+
+typedef struct byterange_ctx {
+ apr_bucket_brigade *bb;
+ int num_ranges;
+ char *boundary;
+ char *bound_head;
+} byterange_ctx;
+
+/*
+ * Here we try to be compatible with clients that want multipart/x-byteranges
+ * instead of multipart/byteranges (also see above), as per HTTP/1.1. We
+ * look for the Request-Range header (e.g. Netscape 2 and 3) as an indication
+ * that the browser supports an older protocol. We also check User-Agent
+ * for Microsoft Internet Explorer 3, which needs this as well.
+ */
+static int use_range_x(request_rec *r)
+{
+ const char *ua;
+ return (apr_table_get(r->headers_in, "Request-Range")
+ || ((ua = apr_table_get(r->headers_in, "User-Agent"))
+ && ap_strstr_c(ua, "MSIE 3")));
+}
+
+#define BYTERANGE_FMT "%" APR_OFF_T_FMT "-%" APR_OFF_T_FMT "/%" APR_OFF_T_FMT
+#define PARTITION_ERR_FMT "apr_brigade_partition() failed " \
+ "[%" APR_OFF_T_FMT ",%" APR_OFF_T_FMT "]"
+
+AP_CORE_DECLARE_NONSTD(apr_status_t) ap_byterange_filter(ap_filter_t *f,
+ apr_bucket_brigade *bb)
+{
+#define MIN_LENGTH(len1, len2) ((len1 > len2) ? len2 : len1)
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ byterange_ctx *ctx = f->ctx;
+ apr_bucket *e;
+ apr_bucket_brigade *bsend;
+ apr_off_t range_start;
+ apr_off_t range_end;
+ char *current;
+ apr_off_t bb_length;
+ apr_off_t clength = 0;
+ apr_status_t rv;
+ int found = 0;
+
+ if (!ctx) {
+ int num_ranges = ap_set_byterange(r);
+
+ /* We have nothing to do, get out of the way. */
+ if (num_ranges == 0) {
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, bb);
+ }
+
+ ctx = f->ctx = apr_pcalloc(r->pool, sizeof(*ctx));
+ ctx->num_ranges = num_ranges;
+ /* create a brigade in case we never call ap_save_brigade() */
+ ctx->bb = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ if (ctx->num_ranges > 1) {
+ /* Is ap_make_content_type required here? */
+ const char *orig_ct = ap_make_content_type(r, r->content_type);
+ ctx->boundary = apr_psprintf(r->pool, "%" APR_UINT64_T_HEX_FMT "%lx",
+ (apr_uint64_t)r->request_time, (long) getpid());
+
+ ap_set_content_type(r, apr_pstrcat(r->pool, "multipart",
+ use_range_x(r) ? "/x-" : "/",
+ "byteranges; boundary=",
+ ctx->boundary, NULL));
+
+ ctx->bound_head = apr_pstrcat(r->pool,
+ CRLF "--", ctx->boundary,
+ CRLF "Content-type: ",
+ orig_ct,
+ CRLF "Content-range: bytes ",
+ NULL);
+ ap_xlate_proto_to_ascii(ctx->bound_head, strlen(ctx->bound_head));
+ }
+ }
+
+ /* We can't actually deal with byte-ranges until we have the whole brigade
+ * because the byte-ranges can be in any order, and according to the RFC,
+ * we SHOULD return the data in the same order it was requested.
+ *
+ * XXX: We really need to dump all bytes prior to the start of the earliest
+ * range, and only slurp up to the end of the latest range. By this we
+ * mean that we should peek-ahead at the lowest first byte of any range,
+ * and the highest last byte of any range.
+ */
+ if (!APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
+ ap_save_brigade(f, &ctx->bb, &bb, r->pool);
+ return APR_SUCCESS;
+ }
+
+ /* Prepend any earlier saved brigades. */
+ APR_BRIGADE_PREPEND(bb, ctx->bb);
+
+ /* It is possible that we won't have a content length yet, so we have to
+ * compute the length before we can actually do the byterange work.
+ */
+ apr_brigade_length(bb, 1, &bb_length);
+ clength = (apr_off_t)bb_length;
+
+ /* this brigade holds what we will be sending */
+ bsend = apr_brigade_create(r->pool, c->bucket_alloc);
+
+ while ((current = ap_getword(r->pool, &r->range, ','))
+ && (rv = parse_byterange(current, clength, &range_start,
+ &range_end))) {
+ apr_bucket *e2;
+ apr_bucket *ec;
+
+ if (rv == -1) {
+ continue;
+ }
+
+ /* these calls to apr_brigade_partition() should theoretically
+ * never fail because of the above call to apr_brigade_length(),
+ * but what the heck, we'll check for an error anyway */
+ if ((rv = apr_brigade_partition(bb, range_start, &ec)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ PARTITION_ERR_FMT, range_start, clength);
+ continue;
+ }
+ if ((rv = apr_brigade_partition(bb, range_end+1, &e2)) != APR_SUCCESS) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
+ PARTITION_ERR_FMT, range_end+1, clength);
+ continue;
+ }
+
+ found = 1;
+
+ /* For single range requests, we must produce Content-Range header.
+ * Otherwise, we need to produce the multipart boundaries.
+ */
+ if (ctx->num_ranges == 1) {
+ apr_table_setn(r->headers_out, "Content-Range",
+ apr_psprintf(r->pool, "bytes " BYTERANGE_FMT,
+ range_start, range_end, clength));
+ }
+ else {
+ char *ts;
+
+ e = apr_bucket_pool_create(ctx->bound_head, strlen(ctx->bound_head),
+ r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+
+ ts = apr_psprintf(r->pool, BYTERANGE_FMT CRLF CRLF,
+ range_start, range_end, clength);
+ ap_xlate_proto_to_ascii(ts, strlen(ts));
+ e = apr_bucket_pool_create(ts, strlen(ts), r->pool,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+ }
+
+ do {
+ apr_bucket *foo;
+ const char *str;
+ apr_size_t len;
+
+ if (apr_bucket_copy(ec, &foo) != APR_SUCCESS) {
+ /* this shouldn't ever happen due to the call to
+ * apr_brigade_length() above which normalizes
+ * indeterminate-length buckets. just to be sure,
+ * though, this takes care of uncopyable buckets that
+ * do somehow manage to slip through.
+ */
+ /* XXX: check for failure? */
+ apr_bucket_read(ec, &str, &len, APR_BLOCK_READ);
+ apr_bucket_copy(ec, &foo);
+ }
+ APR_BRIGADE_INSERT_TAIL(bsend, foo);
+ ec = APR_BUCKET_NEXT(ec);
+ } while (ec != e2);
+ }
+
+ if (found == 0) {
+ ap_remove_output_filter(f);
+ r->status = HTTP_OK;
+ /* bsend is assumed to be empty if we get here. */
+ e = ap_bucket_error_create(HTTP_RANGE_NOT_SATISFIABLE, NULL,
+ r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+ e = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+ return ap_pass_brigade(f->next, bsend);
+ }
+
+ if (ctx->num_ranges > 1) {
+ char *end;
+
+ /* add the final boundary */
+ end = apr_pstrcat(r->pool, CRLF "--", ctx->boundary, "--" CRLF, NULL);
+ ap_xlate_proto_to_ascii(end, strlen(end));
+ e = apr_bucket_pool_create(end, strlen(end), r->pool, c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+ }
+
+ e = apr_bucket_eos_create(c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bsend, e);
+
+ /* we're done with the original content - all of our data is in bsend. */
+ apr_brigade_destroy(bb);
+
+ /* send our multipart output */
+ return ap_pass_brigade(f->next, bsend);
+}
+
+static int ap_set_byterange(request_rec *r)
+{
+ const char *range;
+ const char *if_range;
+ const char *match;
+ const char *ct;
+ int num_ranges;
+
+ if (r->assbackwards) {
+ return 0;
+ }
+
+ /* Check for Range request-header (HTTP/1.1) or Request-Range for
+ * backwards-compatibility with second-draft Luotonen/Franks
+ * byte-ranges (e.g. Netscape Navigator 2-3).
+ *
+ * We support this form, with Request-Range, and (farther down) we
+ * send multipart/x-byteranges instead of multipart/byteranges for
+ * Request-Range based requests to work around a bug in Netscape
+ * Navigator 2-3 and MSIE 3.
+ */
+
+ if (!(range = apr_table_get(r->headers_in, "Range"))) {
+ range = apr_table_get(r->headers_in, "Request-Range");
+ }
+
+ if (!range || strncasecmp(range, "bytes=", 6) || r->status != HTTP_OK) {
+ return 0;
+ }
+
+ /* is content already a single range? */
+ if (apr_table_get(r->headers_out, "Content-Range")) {
+ return 0;
+ }
+
+ /* is content already a multiple range? */
+ if ((ct = apr_table_get(r->headers_out, "Content-Type"))
+ && (!strncasecmp(ct, "multipart/byteranges", 20)
+ || !strncasecmp(ct, "multipart/x-byteranges", 22))) {
+ return 0;
+ }
+
+ /* Check the If-Range header for Etag or Date.
+ * Note that this check will return false (as required) if either
+ * of the two etags are weak.
+ */
+ if ((if_range = apr_table_get(r->headers_in, "If-Range"))) {
+ if (if_range[0] == '"') {
+ if (!(match = apr_table_get(r->headers_out, "Etag"))
+ || (strcmp(if_range, match) != 0)) {
+ return 0;
+ }
+ }
+ else if (!(match = apr_table_get(r->headers_out, "Last-Modified"))
+ || (strcmp(if_range, match) != 0)) {
+ return 0;
+ }
+ }
+
+ if (!ap_strchr_c(range, ',')) {
+ /* a single range */
+ num_ranges = 1;
+ }
+ else {
+ /* a multiple range */
+ num_ranges = 2;
+ }
+
+ r->status = HTTP_PARTIAL_CONTENT;
+ r->range = range + 6;
+
+ return num_ranges;
+}
diff --git a/modules/http/chunk_filter.c b/modules/http/chunk_filter.c
new file mode 100644
index 0000000000..d28c755aee
--- /dev/null
+++ b/modules/http/chunk_filter.c
@@ -0,0 +1,167 @@
+/* Copyright 1999-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * chunk_filter.c --- HTTP/1.1 chunked transfer encoding filter.
+ */
+
+#include "apr_strings.h"
+#include "apr_thread_proc.h" /* for RLIMIT stuff */
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#define CORE_PRIVATE
+#include "httpd.h"
+#include "http_config.h"
+#include "http_connection.h"
+#include "http_core.h"
+#include "http_protocol.h" /* For index_of_response(). Grump. */
+#include "http_request.h"
+
+#include "util_filter.h"
+#include "util_ebcdic.h"
+#include "ap_mpm.h"
+#include "scoreboard.h"
+
+#include "mod_core.h"
+
+static apr_status_t chunk_filter(ap_filter_t *f, apr_bucket_brigade *b)
+{
+#define ASCII_CRLF "\015\012"
+#define ASCII_ZERO "\060"
+ conn_rec *c = f->r->connection;
+ apr_bucket_brigade *more;
+ apr_bucket *e;
+ apr_status_t rv;
+
+ for (more = NULL; b; b = more, more = NULL) {
+ apr_off_t bytes = 0;
+ apr_bucket *eos = NULL;
+ apr_bucket *flush = NULL;
+ /* XXX: chunk_hdr must remain at this scope since it is used in a
+ * transient bucket.
+ */
+ char chunk_hdr[20]; /* enough space for the snprintf below */
+
+
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (APR_BUCKET_IS_EOS(e)) {
+ /* there shouldn't be anything after the eos */
+ eos = e;
+ break;
+ }
+ if (APR_BUCKET_IS_FLUSH(e)) {
+ flush = e;
+ }
+ else if (e->length == (apr_size_t)-1) {
+ /* unknown amount of data (e.g. a pipe) */
+ const char *data;
+ apr_size_t len;
+
+ rv = apr_bucket_read(e, &data, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ if (len > 0) {
+ /*
+ * There may be a new next bucket representing the
+ * rest of the data stream on which a read() may
+ * block so we pass down what we have so far.
+ */
+ bytes += len;
+ more = apr_brigade_split(b, APR_BUCKET_NEXT(e));
+ break;
+ }
+ else {
+ /* If there was nothing in this bucket then we can
+ * safely move on to the next one without pausing
+ * to pass down what we have counted up so far.
+ */
+ continue;
+ }
+ }
+ else {
+ bytes += e->length;
+ }
+ }
+
+ /*
+ * XXX: if there aren't very many bytes at this point it may
+ * be a good idea to set them aside and return for more,
+ * unless we haven't finished counting this brigade yet.
+ */
+ /* if there are content bytes, then wrap them in a chunk */
+ if (bytes > 0) {
+ apr_size_t hdr_len;
+ /*
+ * Insert the chunk header, specifying the number of bytes in
+ * the chunk.
+ */
+ hdr_len = apr_snprintf(chunk_hdr, sizeof(chunk_hdr),
+ "%" APR_UINT64_T_HEX_FMT CRLF, (apr_uint64_t)bytes);
+ ap_xlate_proto_to_ascii(chunk_hdr, hdr_len);
+ e = apr_bucket_transient_create(chunk_hdr, hdr_len,
+ c->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(b, e);
+
+ /*
+ * Insert the end-of-chunk CRLF before an EOS or
+ * FLUSH bucket, or appended to the brigade
+ */
+ e = apr_bucket_immortal_create(ASCII_CRLF, 2, c->bucket_alloc);
+ if (eos != NULL) {
+ APR_BUCKET_INSERT_BEFORE(eos, e);
+ }
+ else if (flush != NULL) {
+ APR_BUCKET_INSERT_BEFORE(flush, e);
+ }
+ else {
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ }
+ }
+
+ /* RFC 2616, Section 3.6.1
+ *
+ * If there is an EOS bucket, then prefix it with:
+ * 1) the last-chunk marker ("0" CRLF)
+ * 2) the trailer
+ * 3) the end-of-chunked body CRLF
+ *
+ * If there is no EOS bucket, then do nothing.
+ *
+ * XXX: it would be nice to combine this with the end-of-chunk
+ * marker above, but this is a bit more straight-forward for
+ * now.
+ */
+ if (eos != NULL) {
+ /* XXX: (2) trailers ... does not yet exist */
+ e = apr_bucket_immortal_create(ASCII_ZERO ASCII_CRLF
+ /* <trailers> */
+ ASCII_CRLF, 5, c->bucket_alloc);
+ APR_BUCKET_INSERT_BEFORE(eos, e);
+ }
+
+ /* pass the brigade to the next filter. */
+ rv = ap_pass_brigade(f->next, b);
+ if (rv != APR_SUCCESS || eos != NULL) {
+ return rv;
+ }
+ }
+ return APR_SUCCESS;
+}
diff --git a/modules/http/config2.m4 b/modules/http/config2.m4
index ecb7e6e52d..87a7cc8a20 100644
--- a/modules/http/config2.m4
+++ b/modules/http/config2.m4
@@ -2,7 +2,7 @@ dnl modules enabled in this directory by default
APACHE_MODPATH_INIT(http)
-http_objects="http_core.lo http_protocol.lo http_request.lo"
+http_objects="http_core.lo http_protocol.lo http_request.lo http_filters.lo chunk_filter.lo byterange_filter.lo http_etag.lo"
dnl mod_http should only be built as a static module for now.
dnl this will hopefully be "fixed" at some point in the future by
diff --git a/modules/http/http_etag.c b/modules/http/http_etag.c
new file mode 100644
index 0000000000..b26d303c3d
--- /dev/null
+++ b/modules/http/http_etag.c
@@ -0,0 +1,220 @@
+/* Copyright 1999-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "apr_strings.h"
+#include "apr_thread_proc.h" /* for RLIMIT stuff */
+
+#define APR_WANT_STRFUNC
+#include "apr_want.h"
+
+#define CORE_PRIVATE
+#include "httpd.h"
+#include "http_config.h"
+#include "http_connection.h"
+#include "http_core.h"
+#include "http_protocol.h" /* For index_of_response(). Grump. */
+#include "http_request.h"
+
+/* Generate the human-readable hex representation of an unsigned long
+ * (basically a faster version of 'sprintf("%lx")')
+ */
+#define HEX_DIGITS "0123456789abcdef"
+static char *etag_ulong_to_hex(char *next, unsigned long u)
+{
+ int printing = 0;
+ int shift = sizeof(unsigned long) * 8 - 4;
+ do {
+ unsigned long next_digit = ((u >> shift) & (unsigned long)0xf);
+ if (next_digit) {
+ *next++ = HEX_DIGITS[next_digit];
+ printing = 1;
+ }
+ else if (printing) {
+ *next++ = HEX_DIGITS[next_digit];
+ }
+ shift -= 4;
+ } while (shift);
+ *next++ = HEX_DIGITS[u & (unsigned long)0xf];
+ return next;
+}
+
+#define ETAG_WEAK "W/"
+#define CHARS_PER_UNSIGNED_LONG (sizeof(unsigned long) * 2)
+/*
+ * Construct an entity tag (ETag) from resource information. If it's a real
+ * file, build in some of the file characteristics. If the modification time
+ * is newer than (request-time minus 1 second), mark the ETag as weak - it
+ * could be modified again in as short an interval. We rationalize the
+ * modification time we're given to keep it from being in the future.
+ */
+AP_DECLARE(char *) ap_make_etag(request_rec *r, int force_weak)
+{
+ char *weak;
+ apr_size_t weak_len;
+ char *etag;
+ char *next;
+ core_dir_config *cfg;
+ etag_components_t etag_bits;
+ etag_components_t bits_added;
+
+ cfg = (core_dir_config *)ap_get_module_config(r->per_dir_config,
+ &core_module);
+ etag_bits = (cfg->etag_bits & (~ cfg->etag_remove)) | cfg->etag_add;
+
+ /*
+ * If it's a file (or we wouldn't be here) and no ETags
+ * should be set for files, return an empty string and
+ * note it for the header-sender to ignore.
+ */
+ if (etag_bits & ETAG_NONE) {
+ apr_table_setn(r->notes, "no-etag", "omit");
+ return "";
+ }
+
+ if (etag_bits == ETAG_UNSET) {
+ etag_bits = ETAG_BACKWARD;
+ }
+ /*
+ * Make an ETag header out of various pieces of information. We use
+ * the last-modified date and, if we have a real file, the
+ * length and inode number - note that this doesn't have to match
+ * the content-length (i.e. includes), it just has to be unique
+ * for the file.
+ *
+ * If the request was made within a second of the last-modified date,
+ * we send a weak tag instead of a strong one, since it could
+ * be modified again later in the second, and the validation
+ * would be incorrect.
+ */
+ if ((r->request_time - r->mtime > (1 * APR_USEC_PER_SEC)) &&
+ !force_weak) {
+ weak = NULL;
+ weak_len = 0;
+ }
+ else {
+ weak = ETAG_WEAK;
+ weak_len = sizeof(ETAG_WEAK);
+ }
+
+ if (r->finfo.filetype != 0) {
+ /*
+ * ETag gets set to [W/]"inode-size-mtime", modulo any
+ * FileETag keywords.
+ */
+ etag = apr_palloc(r->pool, weak_len + sizeof("\"--\"") +
+ 3 * CHARS_PER_UNSIGNED_LONG + 1);
+ next = etag;
+ if (weak) {
+ while (*weak) {
+ *next++ = *weak++;
+ }
+ }
+ *next++ = '"';
+ bits_added = 0;
+ if (etag_bits & ETAG_INODE) {
+ next = etag_ulong_to_hex(next, (unsigned long)r->finfo.inode);
+ bits_added |= ETAG_INODE;
+ }
+ if (etag_bits & ETAG_SIZE) {
+ if (bits_added != 0) {
+ *next++ = '-';
+ }
+ next = etag_ulong_to_hex(next, (unsigned long)r->finfo.size);
+ bits_added |= ETAG_SIZE;
+ }
+ if (etag_bits & ETAG_MTIME) {
+ if (bits_added != 0) {
+ *next++ = '-';
+ }
+ next = etag_ulong_to_hex(next, (unsigned long)r->mtime);
+ }
+ *next++ = '"';
+ *next = '\0';
+ }
+ else {
+ /*
+ * Not a file document, so just use the mtime: [W/]"mtime"
+ */
+ etag = apr_palloc(r->pool, weak_len + sizeof("\"\"") +
+ CHARS_PER_UNSIGNED_LONG + 1);
+ next = etag;
+ if (weak) {
+ while (*weak) {
+ *next++ = *weak++;
+ }
+ }
+ *next++ = '"';
+ next = etag_ulong_to_hex(next, (unsigned long)r->mtime);
+ *next++ = '"';
+ *next = '\0';
+ }
+
+ return etag;
+}
+
+AP_DECLARE(void) ap_set_etag(request_rec *r)
+{
+ char *etag;
+ char *variant_etag, *vlv;
+ int vlv_weak;
+
+ if (!r->vlist_validator) {
+ etag = ap_make_etag(r, 0);
+
+ /* If we get a blank etag back, don't set the header. */
+ if (!etag[0]) {
+ return;
+ }
+ }
+ else {
+ /* If we have a variant list validator (vlv) due to the
+ * response being negotiated, then we create a structured
+ * entity tag which merges the variant etag with the variant
+ * list validator (vlv). This merging makes revalidation
+ * somewhat safer, ensures that caches which can deal with
+ * Vary will (eventually) be updated if the set of variants is
+ * changed, and is also a protocol requirement for transparent
+ * content negotiation.
+ */
+
+ /* if the variant list validator is weak, we make the whole
+ * structured etag weak. If we would not, then clients could
+ * have problems merging range responses if we have different
+ * variants with the same non-globally-unique strong etag.
+ */
+
+ vlv = r->vlist_validator;
+ vlv_weak = (vlv[0] == 'W');
+
+ variant_etag = ap_make_etag(r, vlv_weak);
+
+ /* If we get a blank etag back, don't append vlv and stop now. */
+ if (!variant_etag[0]) {
+ return;
+ }
+
+ /* merge variant_etag and vlv into a structured etag */
+ variant_etag[strlen(variant_etag) - 1] = '\0';
+ if (vlv_weak) {
+ vlv += 3;
+ }
+ else {
+ vlv++;
+ }
+ etag = apr_pstrcat(r->pool, variant_etag, ";", vlv, NULL);
+ }
+
+ apr_table_setn(r->headers_out, "ETag", etag);
+}
diff --git a/modules/http/http_filters.c b/modules/http/http_filters.c
new file mode 100644
index 0000000000..0f6a037534
--- /dev/null
+++ b/modules/http/http_filters.c
@@ -0,0 +1,1247 @@
+/* Copyright 1999-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * http_filter.c --- HTTP routines which either filters or deal with filters.
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_buckets.h"
+#include "apr_lib.h"
+#include "apr_signal.h"
+
+#define APR_WANT_STDIO /* for sscanf */
+#define APR_WANT_STRFUNC
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+#define CORE_PRIVATE
+#include "util_filter.h"
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h"
+#include "http_main.h"
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_log.h" /* For errors detected in basic auth common
+ * support code... */
+#include "apr_date.h" /* For apr_date_parse_http and APR_DATE_BAD */
+#include "util_charset.h"
+#include "util_ebcdic.h"
+#include "util_time.h"
+
+#include "mod_core.h"
+
+#if APR_HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#if APR_HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+static long get_chunk_size(char *);
+
+typedef struct http_filter_ctx {
+ apr_off_t remaining;
+ apr_off_t limit;
+ apr_off_t limit_used;
+ enum {
+ BODY_NONE,
+ BODY_LENGTH,
+ BODY_CHUNK
+ } state;
+ int eos_sent;
+} http_ctx_t;
+
+/* This is the HTTP_INPUT filter for HTTP requests and responses from
+ * proxied servers (mod_proxy). It handles chunked and content-length
+ * bodies. This can only be inserted/used after the headers
+ * are successfully parsed.
+ */
+apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
+ ap_input_mode_t mode, apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ apr_bucket *e;
+ http_ctx_t *ctx = f->ctx;
+ apr_status_t rv;
+ apr_off_t totalread;
+
+ /* just get out of the way of things we don't want. */
+ if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE) {
+ return ap_get_brigade(f->next, b, mode, block, readbytes);
+ }
+
+ if (!ctx) {
+ const char *tenc, *lenp;
+ f->ctx = ctx = apr_palloc(f->r->pool, sizeof(*ctx));
+ ctx->state = BODY_NONE;
+ ctx->remaining = 0;
+ ctx->limit_used = 0;
+ ctx->eos_sent = 0;
+
+ /* LimitRequestBody does not apply to proxied responses.
+ * Consider implementing this check in its own filter.
+ * Would adding a directive to limit the size of proxied
+ * responses be useful?
+ */
+ if (!f->r->proxyreq) {
+ ctx->limit = ap_get_limit_req_body(f->r);
+ }
+ else {
+ ctx->limit = 0;
+ }
+
+ tenc = apr_table_get(f->r->headers_in, "Transfer-Encoding");
+ lenp = apr_table_get(f->r->headers_in, "Content-Length");
+
+ if (tenc) {
+ if (!strcasecmp(tenc, "chunked")) {
+ ctx->state = BODY_CHUNK;
+ }
+ }
+ else if (lenp) {
+ char *endstr;
+
+ ctx->state = BODY_LENGTH;
+ errno = 0;
+
+ /* Protects against over/underflow, non-digit chars in the
+ * string (excluding leading space) (the endstr checks)
+ * and a negative number. */
+ if (apr_strtoff(&ctx->remaining, lenp, &endstr, 10)
+ || endstr == lenp || *endstr || ctx->remaining < 0) {
+ apr_bucket_brigade *bb;
+
+ ctx->remaining = 0;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r,
+ "Invalid Content-Length");
+
+ bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+ e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE, NULL,
+ f->r->pool, f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ ctx->eos_sent = 1;
+ return ap_pass_brigade(f->r->output_filters, bb);
+ }
+
+ /* If we have a limit in effect and we know the C-L ahead of
+ * time, stop it here if it is invalid.
+ */
+ if (ctx->limit && ctx->limit < ctx->remaining) {
+ apr_bucket_brigade *bb;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r,
+ "Requested content-length of %" APR_OFF_T_FMT
+ " is larger than the configured limit"
+ " of %" APR_OFF_T_FMT, ctx->remaining, ctx->limit);
+ bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+ e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE, NULL,
+ f->r->pool, f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ ctx->eos_sent = 1;
+ return ap_pass_brigade(f->r->output_filters, bb);
+ }
+ }
+
+ /* If we don't have a request entity indicated by the headers, EOS.
+ * (BODY_NONE is a valid intermediate state due to trailers,
+ * but it isn't a valid starting state.)
+ *
+ * RFC 2616 Section 4.4 note 5 states that connection-close
+ * is invalid for a request entity - request bodies must be
+ * denoted by C-L or T-E: chunked.
+ *
+ * Note that since the proxy uses this filter to handle the
+ * proxied *response*, proxy responses MUST be exempt.
+ */
+ if (ctx->state == BODY_NONE && f->r->proxyreq != PROXYREQ_RESPONSE) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ return APR_SUCCESS;
+ }
+
+ /* Since we're about to read data, send 100-Continue if needed.
+ * Only valid on chunked and C-L bodies where the C-L is > 0. */
+ if ((ctx->state == BODY_CHUNK ||
+ (ctx->state == BODY_LENGTH && ctx->remaining > 0)) &&
+ f->r->expecting_100 && f->r->proto_num >= HTTP_VERSION(1,1)) {
+ char *tmp;
+ apr_bucket_brigade *bb;
+
+ tmp = apr_pstrcat(f->r->pool, AP_SERVER_PROTOCOL, " ",
+ ap_get_status_line(100), CRLF CRLF, NULL);
+ bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+ e = apr_bucket_pool_create(tmp, strlen(tmp), f->r->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_HEAD(bb, e);
+ e = apr_bucket_flush_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+
+ ap_pass_brigade(f->c->output_filters, bb);
+ }
+
+ /* We can't read the chunk until after sending 100 if required. */
+ if (ctx->state == BODY_CHUNK) {
+ char line[30];
+ apr_bucket_brigade *bb;
+ apr_size_t len = 30;
+ apr_off_t brigade_length;
+
+ bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+
+ rv = ap_get_brigade(f->next, bb, AP_MODE_GETLINE,
+ APR_BLOCK_READ, 0);
+
+ if (rv == APR_SUCCESS) {
+ /* We have to check the length of the brigade we got back.
+ * We will not accept partial lines.
+ */
+ rv = apr_brigade_length(bb, 1, &brigade_length);
+ if (rv == APR_SUCCESS
+ && brigade_length > f->r->server->limit_req_line) {
+ rv = APR_ENOSPC;
+ }
+ if (rv == APR_SUCCESS) {
+ rv = apr_brigade_flatten(bb, line, &len);
+ if (rv == APR_SUCCESS) {
+ ctx->remaining = get_chunk_size(line);
+ }
+ }
+ }
+ apr_brigade_cleanup(bb);
+
+ /* Detect chunksize error (such as overflow) */
+ if (rv != APR_SUCCESS || ctx->remaining < 0) {
+ ctx->remaining = 0; /* Reset it in case we have to
+ * come back here later */
+ e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE, NULL,
+ f->r->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ ctx->eos_sent = 1;
+ return ap_pass_brigade(f->r->output_filters, bb);
+ }
+
+ if (!ctx->remaining) {
+ /* Handle trailers by calling ap_get_mime_headers again! */
+ ctx->state = BODY_NONE;
+ ap_get_mime_headers(f->r);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ return APR_SUCCESS;
+ }
+ }
+ }
+
+ if (ctx->eos_sent) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ return APR_SUCCESS;
+ }
+
+ if (!ctx->remaining) {
+ switch (ctx->state) {
+ case BODY_NONE:
+ break;
+ case BODY_LENGTH:
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ return APR_SUCCESS;
+ case BODY_CHUNK:
+ {
+ char line[30];
+ apr_bucket_brigade *bb;
+ apr_size_t len = 30;
+
+ bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+
+ /* We need to read the CRLF after the chunk. */
+ rv = ap_get_brigade(f->next, bb, AP_MODE_GETLINE,
+ APR_BLOCK_READ, 0);
+ apr_brigade_cleanup(bb);
+
+ if (rv == APR_SUCCESS) {
+ /* Read the real chunk line. */
+ rv = ap_get_brigade(f->next, bb, AP_MODE_GETLINE,
+ APR_BLOCK_READ, 0);
+ if (rv == APR_SUCCESS) {
+ rv = apr_brigade_flatten(bb, line, &len);
+ if (rv == APR_SUCCESS) {
+ ctx->remaining = get_chunk_size(line);
+ }
+ }
+ apr_brigade_cleanup(bb);
+ }
+
+ /* Detect chunksize error (such as overflow) */
+ if (rv != APR_SUCCESS || ctx->remaining < 0) {
+ ctx->remaining = 0; /* Reset it in case we have to
+ * come back here later */
+ e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE,
+ NULL, f->r->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ ctx->eos_sent = 1;
+ return ap_pass_brigade(f->r->output_filters, bb);
+ }
+
+ if (!ctx->remaining) {
+ /* Handle trailers by calling ap_get_mime_headers again! */
+ ctx->state = BODY_NONE;
+ ap_get_mime_headers(f->r);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ ctx->eos_sent = 1;
+ return APR_SUCCESS;
+ }
+ }
+ break;
+ }
+ }
+
+ /* Ensure that the caller can not go over our boundary point. */
+ if (ctx->state == BODY_LENGTH || ctx->state == BODY_CHUNK) {
+ if (ctx->remaining < readbytes) {
+ readbytes = ctx->remaining;
+ }
+ AP_DEBUG_ASSERT(readbytes > 0);
+ }
+
+ rv = ap_get_brigade(f->next, b, mode, block, readbytes);
+
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ /* How many bytes did we just read? */
+ apr_brigade_length(b, 0, &totalread);
+
+ /* If this happens, we have a bucket of unknown length. Die because
+ * it means our assumptions have changed. */
+ AP_DEBUG_ASSERT(totalread >= 0);
+
+ if (ctx->state != BODY_NONE) {
+ ctx->remaining -= totalread;
+ }
+
+ /* If we have no more bytes remaining on a C-L request,
+ * save the callter a roundtrip to discover EOS.
+ */
+ if (ctx->state == BODY_LENGTH && ctx->remaining == 0) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ }
+
+ /* We have a limit in effect. */
+ if (ctx->limit) {
+ /* FIXME: Note that we might get slightly confused on chunked inputs
+ * as we'd need to compensate for the chunk lengths which may not
+ * really count. This seems to be up for interpretation. */
+ ctx->limit_used += totalread;
+ if (ctx->limit < ctx->limit_used) {
+ apr_bucket_brigade *bb;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r,
+ "Read content-length of %" APR_OFF_T_FMT
+ " is larger than the configured limit"
+ " of %" APR_OFF_T_FMT, ctx->limit_used, ctx->limit);
+ bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
+ e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE, NULL,
+ f->r->pool,
+ f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(bb, e);
+ ctx->eos_sent = 1;
+ return ap_pass_brigade(f->r->output_filters, bb);
+ }
+ }
+
+ return APR_SUCCESS;
+}
+
+/**
+ * Parse a chunk extension, detect overflow.
+ * There are two error cases:
+ * 1) If the conversion would require too many bits, a -1 is returned.
+ * 2) If the conversion used the correct number of bits, but an overflow
+ * caused only the sign bit to flip, then that negative number is
+ * returned.
+ * In general, any negative number can be considered an overflow error.
+ */
+static long get_chunk_size(char *b)
+{
+ long chunksize = 0;
+ size_t chunkbits = sizeof(long) * 8;
+
+ /* Skip leading zeros */
+ while (*b == '0') {
+ ++b;
+ }
+
+ while (apr_isxdigit(*b) && (chunkbits > 0)) {
+ int xvalue = 0;
+
+ if (*b >= '0' && *b <= '9') {
+ xvalue = *b - '0';
+ }
+ else if (*b >= 'A' && *b <= 'F') {
+ xvalue = *b - 'A' + 0xa;
+ }
+ else if (*b >= 'a' && *b <= 'f') {
+ xvalue = *b - 'a' + 0xa;
+ }
+
+ chunksize = (chunksize << 4) | xvalue;
+ chunkbits -= 4;
+ ++b;
+ }
+ if (apr_isxdigit(*b) && (chunkbits <= 0)) {
+ /* overflow */
+ return -1;
+ }
+
+ return chunksize;
+}
+
+typedef struct header_struct {
+ apr_pool_t *pool;
+ apr_bucket_brigade *bb;
+} header_struct;
+
+/* Send a single HTTP header field to the client. Note that this function
+ * is used in calls to table_do(), so their interfaces are co-dependent.
+ * In other words, don't change this one without checking table_do in alloc.c.
+ * It returns true unless there was a write error of some kind.
+ */
+static int form_header_field(header_struct *h,
+ const char *fieldname, const char *fieldval)
+{
+#if APR_CHARSET_EBCDIC
+ char *headfield;
+ apr_size_t len;
+ apr_size_t name_len;
+ apr_size_t val_len;
+ char *next;
+
+ name_len = strlen(fieldname);
+ val_len = strlen(fieldval);
+ len = name_len + val_len + 4; /* 4 for ": " plus CRLF */
+ headfield = (char *)apr_palloc(h->pool, len + 1);
+ memcpy(headfield, fieldname, name_len);
+ next = headfield + name_len;
+ *next++ = ':';
+ *next++ = ' ';
+ memcpy(next, fieldval, val_len);
+ next += val_len;
+ *next++ = CR;
+ *next++ = LF;
+ *next = 0;
+ ap_xlate_proto_to_ascii(headfield, len);
+ apr_brigade_write(h->bb, NULL, NULL, headfield, len);
+#else
+ struct iovec vec[4];
+ struct iovec *v = vec;
+ v->iov_base = (void *)fieldname;
+ v->iov_len = strlen(fieldname);
+ v++;
+ v->iov_base = ": ";
+ v->iov_len = sizeof(": ") - 1;
+ v++;
+ v->iov_base = (void *)fieldval;
+ v->iov_len = strlen(fieldval);
+ v++;
+ v->iov_base = CRLF;
+ v->iov_len = sizeof(CRLF) - 1;
+ apr_brigade_writev(h->bb, NULL, NULL, vec, 4);
+#endif /* !APR_CHARSET_EBCDIC */
+ return 1;
+}
+
+/* This routine is called by apr_table_do and merges all instances of
+ * the passed field values into a single array that will be further
+ * processed by some later routine. Originally intended to help split
+ * and recombine multiple Vary fields, though it is generic to any field
+ * consisting of comma/space-separated tokens.
+ */
+static int uniq_field_values(void *d, const char *key, const char *val)
+{
+ apr_array_header_t *values;
+ char *start;
+ char *e;
+ char **strpp;
+ int i;
+
+ values = (apr_array_header_t *)d;
+
+ e = apr_pstrdup(values->pool, val);
+
+ do {
+ /* Find a non-empty fieldname */
+
+ while (*e == ',' || apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e == '\0') {
+ break;
+ }
+ start = e;
+ while (*e != '\0' && *e != ',' && !apr_isspace(*e)) {
+ ++e;
+ }
+ if (*e != '\0') {
+ *e++ = '\0';
+ }
+
+ /* Now add it to values if it isn't already represented.
+ * Could be replaced by a ap_array_strcasecmp() if we had one.
+ */
+ for (i = 0, strpp = (char **) values->elts; i < values->nelts;
+ ++i, ++strpp) {
+ if (*strpp && strcasecmp(*strpp, start) == 0) {
+ break;
+ }
+ }
+ if (i == values->nelts) { /* if not found */
+ *(char **)apr_array_push(values) = start;
+ }
+ } while (*e != '\0');
+
+ return 1;
+}
+
+/*
+ * Since some clients choke violently on multiple Vary fields, or
+ * Vary fields with duplicate tokens, combine any multiples and remove
+ * any duplicates.
+ */
+static void fixup_vary(request_rec *r)
+{
+ apr_array_header_t *varies;
+
+ varies = apr_array_make(r->pool, 5, sizeof(char *));
+
+ /* Extract all Vary fields from the headers_out, separate each into
+ * its comma-separated fieldname values, and then add them to varies
+ * if not already present in the array.
+ */
+ apr_table_do((int (*)(void *, const char *, const char *))uniq_field_values,
+ (void *) varies, r->headers_out, "Vary", NULL);
+
+ /* If we found any, replace old Vary fields with unique-ified value */
+
+ if (varies->nelts > 0) {
+ apr_table_setn(r->headers_out, "Vary",
+ apr_array_pstrcat(r->pool, varies, ','));
+ }
+}
+
+/* Send a request's HTTP response headers to the client.
+ */
+static apr_status_t send_all_header_fields(header_struct *h,
+ const request_rec *r)
+{
+ const apr_array_header_t *elts;
+ const apr_table_entry_t *t_elt;
+ const apr_table_entry_t *t_end;
+ struct iovec *vec;
+ struct iovec *vec_next;
+
+ elts = apr_table_elts(r->headers_out);
+ if (elts->nelts == 0) {
+ return APR_SUCCESS;
+ }
+ t_elt = (const apr_table_entry_t *)(elts->elts);
+ t_end = t_elt + elts->nelts;
+ vec = (struct iovec *)apr_palloc(h->pool, 4 * elts->nelts *
+ sizeof(struct iovec));
+ vec_next = vec;
+
+ /* For each field, generate
+ * name ": " value CRLF
+ */
+ do {
+ vec_next->iov_base = (void*)(t_elt->key);
+ vec_next->iov_len = strlen(t_elt->key);
+ vec_next++;
+ vec_next->iov_base = ": ";
+ vec_next->iov_len = sizeof(": ") - 1;
+ vec_next++;
+ vec_next->iov_base = (void*)(t_elt->val);
+ vec_next->iov_len = strlen(t_elt->val);
+ vec_next++;
+ vec_next->iov_base = CRLF;
+ vec_next->iov_len = sizeof(CRLF) - 1;
+ vec_next++;
+ t_elt++;
+ } while (t_elt < t_end);
+
+#if APR_CHARSET_EBCDIC
+ {
+ apr_size_t len;
+ char *tmp = apr_pstrcatv(r->pool, vec, vec_next - vec, &len);
+ ap_xlate_proto_to_ascii(tmp, len);
+ return apr_brigade_write(h->bb, NULL, NULL, tmp, len);
+ }
+#else
+ return apr_brigade_writev(h->bb, NULL, NULL, vec, vec_next - vec);
+#endif
+}
+
+/*
+ * Determine the protocol to use for the response. Potentially downgrade
+ * to HTTP/1.0 in some situations and/or turn off keepalives.
+ *
+ * also prepare r->status_line.
+ */
+static void basic_http_header_check(request_rec *r,
+ const char **protocol)
+{
+ if (r->assbackwards) {
+ /* no such thing as a response protocol */
+ return;
+ }
+
+ if (!r->status_line) {
+ r->status_line = ap_get_status_line(r->status);
+ }
+
+ /* Note that we must downgrade before checking for force responses. */
+ if (r->proto_num > HTTP_VERSION(1,0)
+ && apr_table_get(r->subprocess_env, "downgrade-1.0")) {
+ r->proto_num = HTTP_VERSION(1,0);
+ }
+
+ /* kludge around broken browsers when indicated by force-response-1.0
+ */
+ if (r->proto_num == HTTP_VERSION(1,0)
+ && apr_table_get(r->subprocess_env, "force-response-1.0")) {
+ *protocol = "HTTP/1.0";
+ r->connection->keepalive = AP_CONN_CLOSE;
+ }
+ else {
+ *protocol = AP_SERVER_PROTOCOL;
+ }
+
+}
+
+/* fill "bb" with a barebones/initial HTTP response header */
+static void basic_http_header(request_rec *r, apr_bucket_brigade *bb,
+ const char *protocol)
+{
+ char *date;
+ const char *server;
+ header_struct h;
+ struct iovec vec[4];
+
+ if (r->assbackwards) {
+ /* there are no headers to send */
+ return;
+ }
+
+ /* Output the HTTP/1.x Status-Line and the Date and Server fields */
+
+ vec[0].iov_base = (void *)protocol;
+ vec[0].iov_len = strlen(protocol);
+ vec[1].iov_base = (void *)" ";
+ vec[1].iov_len = sizeof(" ") - 1;
+ vec[2].iov_base = (void *)(r->status_line);
+ vec[2].iov_len = strlen(r->status_line);
+ vec[3].iov_base = (void *)CRLF;
+ vec[3].iov_len = sizeof(CRLF) - 1;
+#if APR_CHARSET_EBCDIC
+ {
+ char *tmp;
+ apr_size_t len;
+ tmp = apr_pstrcatv(r->pool, vec, 4, &len);
+ ap_xlate_proto_to_ascii(tmp, len);
+ apr_brigade_write(bb, NULL, NULL, tmp, len);
+ }
+#else
+ apr_brigade_writev(bb, NULL, NULL, vec, 4);
+#endif
+
+ date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+
+ h.pool = r->pool;
+ h.bb = bb;
+ form_header_field(&h, "Date", date);
+
+ /* keep the set-by-proxy server header, otherwise
+ * generate a new server header */
+ if (r->proxyreq != PROXYREQ_NONE) {
+ server = apr_table_get(r->headers_out, "Server");
+ if (server) {
+ form_header_field(&h, "Server", server);
+ }
+ }
+ else {
+ form_header_field(&h, "Server", ap_get_server_version());
+ }
+
+ /* unset so we don't send them again */
+ apr_table_unset(r->headers_out, "Date"); /* Avoid bogosity */
+ apr_table_unset(r->headers_out, "Server");
+}
+
+AP_DECLARE(void) ap_basic_http_header(request_rec *r, apr_bucket_brigade *bb)
+{
+ const char *protocol;
+
+ basic_http_header_check(r, &protocol);
+ basic_http_header(r, bb, protocol);
+}
+
+/* Navigator versions 2.x, 3.x and 4.0 betas up to and including 4.0b2
+ * have a header parsing bug. If the terminating \r\n occur starting
+ * at offset 256, 257 or 258 of output then it will not properly parse
+ * the headers. Curiously it doesn't exhibit this problem at 512, 513.
+ * We are guessing that this is because their initial read of a new request
+ * uses a 256 byte buffer, and subsequent reads use a larger buffer.
+ * So the problem might exist at different offsets as well.
+ *
+ * This should also work on keepalive connections assuming they use the
+ * same small buffer for the first read of each new request.
+ *
+ * At any rate, we check the bytes written so far and, if we are about to
+ * tickle the bug, we instead insert a bogus padding header. Since the bug
+ * manifests as a broken image in Navigator, users blame the server. :(
+ * It is more expensive to check the User-Agent than it is to just add the
+ * bytes, so we haven't used the BrowserMatch feature here.
+ */
+static void terminate_header(apr_bucket_brigade *bb)
+{
+ char tmp[] = "X-Pad: avoid browser bug" CRLF;
+ char crlf[] = CRLF;
+ apr_off_t len;
+ apr_size_t buflen;
+
+ (void) apr_brigade_length(bb, 1, &len);
+
+ if (len >= 255 && len <= 257) {
+ buflen = strlen(tmp);
+ ap_xlate_proto_to_ascii(tmp, buflen);
+ apr_brigade_write(bb, NULL, NULL, tmp, buflen);
+ }
+ buflen = strlen(crlf);
+ ap_xlate_proto_to_ascii(crlf, buflen);
+ apr_brigade_write(bb, NULL, NULL, crlf, buflen);
+}
+
+AP_DECLARE_NONSTD(int) ap_send_http_trace(request_rec *r)
+{
+ int rv;
+ apr_bucket_brigade *b;
+ header_struct h;
+
+ if (r->method_number != M_TRACE) {
+ return DECLINED;
+ }
+
+ /* Get the original request */
+ while (r->prev) {
+ r = r->prev;
+ }
+
+ if ((rv = ap_setup_client_block(r, REQUEST_NO_BODY))) {
+ return rv;
+ }
+
+ ap_set_content_type(r, "message/http");
+
+ /* Now we recreate the request, and echo it back */
+
+ b = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ apr_brigade_putstrs(b, NULL, NULL, r->the_request, CRLF, NULL);
+ h.pool = r->pool;
+ h.bb = b;
+ apr_table_do((int (*) (void *, const char *, const char *))
+ form_header_field, (void *) &h, r->headers_in, NULL);
+ apr_brigade_puts(b, NULL, NULL, CRLF);
+ ap_pass_brigade(r->output_filters, b);
+
+ return DONE;
+}
+
+typedef struct header_filter_ctx {
+ int headers_sent;
+} header_filter_ctx;
+
+AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
+ apr_bucket_brigade *b)
+{
+ request_rec *r = f->r;
+ conn_rec *c = r->connection;
+ const char *clheader;
+ const char *protocol;
+ apr_bucket *e;
+ apr_bucket_brigade *b2;
+ header_struct h;
+ header_filter_ctx *ctx = f->ctx;
+
+ AP_DEBUG_ASSERT(!r->main);
+
+ if (r->header_only) {
+ if (!ctx) {
+ ctx = f->ctx = apr_pcalloc(r->pool, sizeof(header_filter_ctx));
+ }
+ else if (ctx->headers_sent) {
+ apr_brigade_destroy(b);
+ return OK;
+ }
+ }
+
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ if (e->type == &ap_bucket_type_error) {
+ ap_bucket_error *eb = e->data;
+
+ ap_die(eb->status, r);
+ return AP_FILTER_ERROR;
+ }
+ }
+
+ if (r->assbackwards) {
+ r->sent_bodyct = 1;
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, b);
+ }
+
+ /*
+ * Now that we are ready to send a response, we need to combine the two
+ * header field tables into a single table. If we don't do this, our
+ * later attempts to set or unset a given fieldname might be bypassed.
+ */
+ if (!apr_is_empty_table(r->err_headers_out)) {
+ r->headers_out = apr_table_overlay(r->pool, r->err_headers_out,
+ r->headers_out);
+ }
+
+ /*
+ * Remove the 'Vary' header field if the client can't handle it.
+ * Since this will have nasty effects on HTTP/1.1 caches, force
+ * the response into HTTP/1.0 mode.
+ *
+ * Note: the force-response-1.0 should come before the call to
+ * basic_http_header_check()
+ */
+ if (apr_table_get(r->subprocess_env, "force-no-vary") != NULL) {
+ apr_table_unset(r->headers_out, "Vary");
+ r->proto_num = HTTP_VERSION(1,0);
+ apr_table_set(r->subprocess_env, "force-response-1.0", "1");
+ }
+ else {
+ fixup_vary(r);
+ }
+
+ /*
+ * Now remove any ETag response header field if earlier processing
+ * says so (such as a 'FileETag None' directive).
+ */
+ if (apr_table_get(r->notes, "no-etag") != NULL) {
+ apr_table_unset(r->headers_out, "ETag");
+ }
+
+ /* determine the protocol and whether we should use keepalives. */
+ basic_http_header_check(r, &protocol);
+ ap_set_keepalive(r);
+
+ if (r->chunked) {
+ apr_table_mergen(r->headers_out, "Transfer-Encoding", "chunked");
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ apr_table_setn(r->headers_out, "Content-Type",
+ ap_make_content_type(r, r->content_type));
+
+ if (r->content_encoding) {
+ apr_table_setn(r->headers_out, "Content-Encoding",
+ r->content_encoding);
+ }
+
+ if (!apr_is_empty_array(r->content_languages)) {
+ int i;
+ char **languages = (char **)(r->content_languages->elts);
+ for (i = 0; i < r->content_languages->nelts; ++i) {
+ apr_table_mergen(r->headers_out, "Content-Language", languages[i]);
+ }
+ }
+
+ /*
+ * Control cachability for non-cachable responses if not already set by
+ * some other part of the server configuration.
+ */
+ if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) {
+ char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
+ ap_recent_rfc822_date(date, r->request_time);
+ apr_table_addn(r->headers_out, "Expires", date);
+ }
+
+ /* This is a hack, but I can't find anyway around it. The idea is that
+ * we don't want to send out 0 Content-Lengths if it is a head request.
+ * This happens when modules try to outsmart the server, and return
+ * if they see a HEAD request. Apache 1.3 handlers were supposed to
+ * just return in that situation, and the core handled the HEAD. In
+ * 2.0, if a handler returns, then the core sends an EOS bucket down
+ * the filter stack, and the content-length filter computes a C-L of
+ * zero and that gets put in the headers, and we end up sending a
+ * zero C-L to the client. We can't just remove the C-L filter,
+ * because well behaved 2.0 handlers will send their data down the stack,
+ * and we will compute a real C-L for the head request. RBB
+ */
+ if (r->header_only
+ && (clheader = apr_table_get(r->headers_out, "Content-Length"))
+ && !strcmp(clheader, "0")) {
+ apr_table_unset(r->headers_out, "Content-Length");
+ }
+
+ b2 = apr_brigade_create(r->pool, c->bucket_alloc);
+ basic_http_header(r, b2, protocol);
+
+ h.pool = r->pool;
+ h.bb = b2;
+
+ if (r->status == HTTP_NOT_MODIFIED) {
+ apr_table_do((int (*)(void *, const char *, const char *)) form_header_field,
+ (void *) &h, r->headers_out,
+ "Connection",
+ "Keep-Alive",
+ "ETag",
+ "Content-Location",
+ "Expires",
+ "Cache-Control",
+ "Vary",
+ "Warning",
+ "WWW-Authenticate",
+ "Proxy-Authenticate",
+ "Set-Cookie",
+ "Set-Cookie2",
+ NULL);
+ }
+ else {
+ send_all_header_fields(&h, r);
+ }
+
+ terminate_header(b2);
+
+ ap_pass_brigade(f->next, b2);
+
+ if (r->header_only) {
+ apr_brigade_destroy(b);
+ ctx->headers_sent = 1;
+ return OK;
+ }
+
+ r->sent_bodyct = 1; /* Whatever follows is real body stuff... */
+
+ if (r->chunked) {
+ /* We can't add this filter until we have already sent the headers.
+ * If we add it before this point, then the headers will be chunked
+ * as well, and that is just wrong.
+ */
+ ap_add_output_filter("CHUNK", NULL, r, r->connection);
+ }
+
+ /* Don't remove this filter until after we have added the CHUNK filter.
+ * Otherwise, f->next won't be the CHUNK filter and thus the first
+ * brigade won't be chunked properly.
+ */
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, b);
+}
+
+/* In HTTP/1.1, any method can have a body. However, most GET handlers
+ * wouldn't know what to do with a request body if they received one.
+ * This helper routine tests for and reads any message body in the request,
+ * simply discarding whatever it receives. We need to do this because
+ * failing to read the request body would cause it to be interpreted
+ * as the next request on a persistent connection.
+ *
+ * Since we return an error status if the request is malformed, this
+ * routine should be called at the beginning of a no-body handler, e.g.,
+ *
+ * if ((retval = ap_discard_request_body(r)) != OK) {
+ * return retval;
+ * }
+ */
+AP_DECLARE(int) ap_discard_request_body(request_rec *r)
+{
+ apr_bucket_brigade *bb;
+ int rv, seen_eos;
+
+ /* Sometimes we'll get in a state where the input handling has
+ * detected an error where we want to drop the connection, so if
+ * that's the case, don't read the data as that is what we're trying
+ * to avoid.
+ *
+ * This function is also a no-op on a subrequest.
+ */
+ if (r->main || r->connection->keepalive == AP_CONN_CLOSE ||
+ ap_status_drops_connection(r->status)) {
+ return OK;
+ }
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ seen_eos = 0;
+ do {
+ apr_bucket *bucket;
+
+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, HUGE_STRING_LEN);
+
+ if (rv != APR_SUCCESS) {
+ /* FIXME: If we ever have a mapping from filters (apr_status_t)
+ * to HTTP error codes, this would be a good place for them.
+ *
+ * If we received the special case AP_FILTER_ERROR, it means
+ * that the filters have already handled this error.
+ * Otherwise, we should assume we have a bad request.
+ */
+ if (rv == AP_FILTER_ERROR) {
+ apr_brigade_destroy(bb);
+ return rv;
+ }
+ else {
+ apr_brigade_destroy(bb);
+ return HTTP_BAD_REQUEST;
+ }
+ }
+
+ for (bucket = APR_BRIGADE_FIRST(bb);
+ bucket != APR_BRIGADE_SENTINEL(bb);
+ bucket = APR_BUCKET_NEXT(bucket))
+ {
+ const char *data;
+ apr_size_t len;
+
+ if (APR_BUCKET_IS_EOS(bucket)) {
+ seen_eos = 1;
+ break;
+ }
+
+ /* These are metadata buckets. */
+ if (bucket->length == 0) {
+ continue;
+ }
+
+ /* We MUST read because in case we have an unknown-length
+ * bucket or one that morphs, we want to exhaust it.
+ */
+ rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ apr_brigade_destroy(bb);
+ return HTTP_BAD_REQUEST;
+ }
+ }
+ apr_brigade_cleanup(bb);
+ } while (!seen_eos);
+
+ return OK;
+}
+
+/* Here we deal with getting the request message body from the client.
+ * Whether or not the request contains a body is signaled by the presence
+ * of a non-zero Content-Length or by a Transfer-Encoding: chunked.
+ *
+ * Note that this is more complicated than it was in Apache 1.1 and prior
+ * versions, because chunked support means that the module does less.
+ *
+ * The proper procedure is this:
+ *
+ * 1. Call setup_client_block() near the beginning of the request
+ * handler. This will set up all the necessary properties, and will
+ * return either OK, or an error code. If the latter, the module should
+ * return that error code. The second parameter selects the policy to
+ * apply if the request message indicates a body, and how a chunked
+ * transfer-coding should be interpreted. Choose one of
+ *
+ * REQUEST_NO_BODY Send 413 error if message has any body
+ * REQUEST_CHUNKED_ERROR Send 411 error if body without Content-Length
+ * REQUEST_CHUNKED_DECHUNK If chunked, remove the chunks for me.
+ *
+ * In order to use the last two options, the caller MUST provide a buffer
+ * large enough to hold a chunk-size line, including any extensions.
+ *
+ * 2. When you are ready to read a body (if any), call should_client_block().
+ * This will tell the module whether or not to read input. If it is 0,
+ * the module should assume that there is no message body to read.
+ * This step also sends a 100 Continue response to HTTP/1.1 clients,
+ * so should not be called until the module is *definitely* ready to
+ * read content. (otherwise, the point of the 100 response is defeated).
+ * Never call this function more than once.
+ *
+ * 3. Finally, call get_client_block in a loop. Pass it a buffer and its size.
+ * It will put data into the buffer (not necessarily a full buffer), and
+ * return the length of the input block. When it is done reading, it will
+ * return 0 if EOF, or -1 if there was an error.
+ * If an error occurs on input, we force an end to keepalive.
+ */
+
+AP_DECLARE(int) ap_setup_client_block(request_rec *r, int read_policy)
+{
+ const char *tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
+ const char *lenp = apr_table_get(r->headers_in, "Content-Length");
+
+ r->read_body = read_policy;
+ r->read_chunked = 0;
+ r->remaining = 0;
+
+ if (tenc) {
+ if (strcasecmp(tenc, "chunked")) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Unknown Transfer-Encoding %s", tenc);
+ return HTTP_NOT_IMPLEMENTED;
+ }
+ if (r->read_body == REQUEST_CHUNKED_ERROR) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "chunked Transfer-Encoding forbidden: %s", r->uri);
+ return (lenp) ? HTTP_BAD_REQUEST : HTTP_LENGTH_REQUIRED;
+ }
+
+ r->read_chunked = 1;
+ }
+ else if (lenp) {
+ char *endstr;
+
+ if (apr_strtoff(&r->remaining, lenp, &endstr, 10)
+ || *endstr || r->remaining < 0) {
+ r->remaining = 0;
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "Invalid Content-Length");
+ return HTTP_BAD_REQUEST;
+ }
+ }
+
+ if ((r->read_body == REQUEST_NO_BODY)
+ && (r->read_chunked || (r->remaining > 0))) {
+ ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
+ "%s with body is not allowed for %s", r->method, r->uri);
+ return HTTP_REQUEST_ENTITY_TOO_LARGE;
+ }
+
+#ifdef AP_DEBUG
+ {
+ /* Make sure ap_getline() didn't leave any droppings. */
+ core_request_config *req_cfg =
+ (core_request_config *)ap_get_module_config(r->request_config,
+ &core_module);
+ AP_DEBUG_ASSERT(APR_BRIGADE_EMPTY(req_cfg->bb));
+ }
+#endif
+
+ return OK;
+}
+
+AP_DECLARE(int) ap_should_client_block(request_rec *r)
+{
+ /* First check if we have already read the request body */
+
+ if (r->read_length || (!r->read_chunked && (r->remaining <= 0))) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/* get_client_block is called in a loop to get the request message body.
+ * This is quite simple if the client includes a content-length
+ * (the normal case), but gets messy if the body is chunked. Note that
+ * r->remaining is used to maintain state across calls and that
+ * r->read_length is the total number of bytes given to the caller
+ * across all invocations. It is messy because we have to be careful not
+ * to read past the data provided by the client, since these reads block.
+ * Returns 0 on End-of-body, -1 on error or premature chunk end.
+ *
+ */
+AP_DECLARE(long) ap_get_client_block(request_rec *r, char *buffer,
+ apr_size_t bufsiz)
+{
+ apr_status_t rv;
+ apr_bucket_brigade *bb;
+
+ if (r->remaining < 0 || (!r->read_chunked && r->remaining == 0)) {
+ return 0;
+ }
+
+ bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
+ if (bb == NULL) {
+ r->connection->keepalive = AP_CONN_CLOSE;
+ return -1;
+ }
+
+ rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
+ APR_BLOCK_READ, bufsiz);
+
+ /* We lose the failure code here. This is why ap_get_client_block should
+ * not be used.
+ */
+ if (rv != APR_SUCCESS) {
+ /* if we actually fail here, we want to just return and
+ * stop trying to read data from the client.
+ */
+ r->connection->keepalive = AP_CONN_CLOSE;
+ apr_brigade_destroy(bb);
+ return -1;
+ }
+
+ /* If this fails, it means that a filter is written incorrectly and that
+ * it needs to learn how to properly handle APR_BLOCK_READ requests by
+ * returning data when requested.
+ */
+ AP_DEBUG_ASSERT(!APR_BRIGADE_EMPTY(bb));
+
+ /* Check to see if EOS in the brigade.
+ *
+ * If so, we have to leave a nugget for the *next* ap_get_client_block
+ * call to return 0.
+ */
+ if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
+ if (r->read_chunked) {
+ r->remaining = -1;
+ }
+ else {
+ r->remaining = 0;
+ }
+ }
+
+ rv = apr_brigade_flatten(bb, buffer, &bufsiz);
+ if (rv != APR_SUCCESS) {
+ apr_brigade_destroy(bb);
+ return -1;
+ }
+
+ /* XXX yank me? */
+ r->read_length += bufsiz;
+
+ apr_brigade_destroy(bb);
+ return bufsiz;
+}
+
diff --git a/modules/http/http_protocol.c b/modules/http/http_protocol.c
index 27fecaa14b..83efcc2260 100644
--- a/modules/http/http_protocol.c
+++ b/modules/http/http_protocol.c
@@ -702,337 +702,6 @@ AP_DECLARE(const char *) ap_method_name_of(apr_pool_t *p, int methnum)
return NULL;
}
-static long get_chunk_size(char *);
-
-typedef struct http_filter_ctx {
- apr_off_t remaining;
- apr_off_t limit;
- apr_off_t limit_used;
- enum {
- BODY_NONE,
- BODY_LENGTH,
- BODY_CHUNK
- } state;
- int eos_sent;
-} http_ctx_t;
-
-/* This is the HTTP_INPUT filter for HTTP requests and responses from
- * proxied servers (mod_proxy). It handles chunked and content-length
- * bodies. This can only be inserted/used after the headers
- * are successfully parsed.
- */
-apr_status_t ap_http_filter(ap_filter_t *f, apr_bucket_brigade *b,
- ap_input_mode_t mode, apr_read_type_e block,
- apr_off_t readbytes)
-{
- apr_bucket *e;
- http_ctx_t *ctx = f->ctx;
- apr_status_t rv;
- apr_off_t totalread;
-
- /* just get out of the way of things we don't want. */
- if (mode != AP_MODE_READBYTES && mode != AP_MODE_GETLINE) {
- return ap_get_brigade(f->next, b, mode, block, readbytes);
- }
-
- if (!ctx) {
- const char *tenc, *lenp;
- f->ctx = ctx = apr_palloc(f->r->pool, sizeof(*ctx));
- ctx->state = BODY_NONE;
- ctx->remaining = 0;
- ctx->limit_used = 0;
- ctx->eos_sent = 0;
-
- /* LimitRequestBody does not apply to proxied responses.
- * Consider implementing this check in its own filter.
- * Would adding a directive to limit the size of proxied
- * responses be useful?
- */
- if (!f->r->proxyreq) {
- ctx->limit = ap_get_limit_req_body(f->r);
- }
- else {
- ctx->limit = 0;
- }
-
- tenc = apr_table_get(f->r->headers_in, "Transfer-Encoding");
- lenp = apr_table_get(f->r->headers_in, "Content-Length");
-
- if (tenc) {
- if (!strcasecmp(tenc, "chunked")) {
- ctx->state = BODY_CHUNK;
- }
- }
- else if (lenp) {
- char *endstr;
-
- ctx->state = BODY_LENGTH;
- errno = 0;
-
- /* Protects against over/underflow, non-digit chars in the
- * string (excluding leading space) (the endstr checks)
- * and a negative number. */
- if (apr_strtoff(&ctx->remaining, lenp, &endstr, 10)
- || endstr == lenp || *endstr || ctx->remaining < 0) {
- apr_bucket_brigade *bb;
-
- ctx->remaining = 0;
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r,
- "Invalid Content-Length");
-
- bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
- e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE, NULL,
- f->r->pool, f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
- e = apr_bucket_eos_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
- ctx->eos_sent = 1;
- return ap_pass_brigade(f->r->output_filters, bb);
- }
-
- /* If we have a limit in effect and we know the C-L ahead of
- * time, stop it here if it is invalid.
- */
- if (ctx->limit && ctx->limit < ctx->remaining) {
- apr_bucket_brigade *bb;
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r,
- "Requested content-length of %" APR_OFF_T_FMT
- " is larger than the configured limit"
- " of %" APR_OFF_T_FMT, ctx->remaining, ctx->limit);
- bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
- e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE, NULL,
- f->r->pool, f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
- e = apr_bucket_eos_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
- ctx->eos_sent = 1;
- return ap_pass_brigade(f->r->output_filters, bb);
- }
- }
-
- /* If we don't have a request entity indicated by the headers, EOS.
- * (BODY_NONE is a valid intermediate state due to trailers,
- * but it isn't a valid starting state.)
- *
- * RFC 2616 Section 4.4 note 5 states that connection-close
- * is invalid for a request entity - request bodies must be
- * denoted by C-L or T-E: chunked.
- *
- * Note that since the proxy uses this filter to handle the
- * proxied *response*, proxy responses MUST be exempt.
- */
- if (ctx->state == BODY_NONE && f->r->proxyreq != PROXYREQ_RESPONSE) {
- e = apr_bucket_eos_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(b, e);
- ctx->eos_sent = 1;
- return APR_SUCCESS;
- }
-
- /* Since we're about to read data, send 100-Continue if needed.
- * Only valid on chunked and C-L bodies where the C-L is > 0. */
- if ((ctx->state == BODY_CHUNK ||
- (ctx->state == BODY_LENGTH && ctx->remaining > 0)) &&
- f->r->expecting_100 && f->r->proto_num >= HTTP_VERSION(1,1)) {
- char *tmp;
- apr_bucket_brigade *bb;
-
- tmp = apr_pstrcat(f->r->pool, AP_SERVER_PROTOCOL, " ",
- status_lines[0], CRLF CRLF, NULL);
- bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
- e = apr_bucket_pool_create(tmp, strlen(tmp), f->r->pool,
- f->c->bucket_alloc);
- APR_BRIGADE_INSERT_HEAD(bb, e);
- e = apr_bucket_flush_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
-
- ap_pass_brigade(f->c->output_filters, bb);
- }
-
- /* We can't read the chunk until after sending 100 if required. */
- if (ctx->state == BODY_CHUNK) {
- char line[30];
- apr_bucket_brigade *bb;
- apr_size_t len = 30;
- apr_off_t brigade_length;
-
- bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
-
- rv = ap_get_brigade(f->next, bb, AP_MODE_GETLINE,
- APR_BLOCK_READ, 0);
-
- if (rv == APR_SUCCESS) {
- /* We have to check the length of the brigade we got back.
- * We will not accept partial lines.
- */
- rv = apr_brigade_length(bb, 1, &brigade_length);
- if (rv == APR_SUCCESS
- && brigade_length > f->r->server->limit_req_line) {
- rv = APR_ENOSPC;
- }
- if (rv == APR_SUCCESS) {
- rv = apr_brigade_flatten(bb, line, &len);
- if (rv == APR_SUCCESS) {
- ctx->remaining = get_chunk_size(line);
- }
- }
- }
- apr_brigade_cleanup(bb);
-
- /* Detect chunksize error (such as overflow) */
- if (rv != APR_SUCCESS || ctx->remaining < 0) {
- ctx->remaining = 0; /* Reset it in case we have to
- * come back here later */
- e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE, NULL,
- f->r->pool,
- f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
- e = apr_bucket_eos_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
- ctx->eos_sent = 1;
- return ap_pass_brigade(f->r->output_filters, bb);
- }
-
- if (!ctx->remaining) {
- /* Handle trailers by calling ap_get_mime_headers again! */
- ctx->state = BODY_NONE;
- ap_get_mime_headers(f->r);
- e = apr_bucket_eos_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(b, e);
- ctx->eos_sent = 1;
- return APR_SUCCESS;
- }
- }
- }
-
- if (ctx->eos_sent) {
- e = apr_bucket_eos_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(b, e);
- return APR_SUCCESS;
- }
-
- if (!ctx->remaining) {
- switch (ctx->state) {
- case BODY_NONE:
- break;
- case BODY_LENGTH:
- e = apr_bucket_eos_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(b, e);
- ctx->eos_sent = 1;
- return APR_SUCCESS;
- case BODY_CHUNK:
- {
- char line[30];
- apr_bucket_brigade *bb;
- apr_size_t len = 30;
-
- bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
-
- /* We need to read the CRLF after the chunk. */
- rv = ap_get_brigade(f->next, bb, AP_MODE_GETLINE,
- APR_BLOCK_READ, 0);
- apr_brigade_cleanup(bb);
-
- if (rv == APR_SUCCESS) {
- /* Read the real chunk line. */
- rv = ap_get_brigade(f->next, bb, AP_MODE_GETLINE,
- APR_BLOCK_READ, 0);
- if (rv == APR_SUCCESS) {
- rv = apr_brigade_flatten(bb, line, &len);
- if (rv == APR_SUCCESS) {
- ctx->remaining = get_chunk_size(line);
- }
- }
- apr_brigade_cleanup(bb);
- }
-
- /* Detect chunksize error (such as overflow) */
- if (rv != APR_SUCCESS || ctx->remaining < 0) {
- ctx->remaining = 0; /* Reset it in case we have to
- * come back here later */
- e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE,
- NULL, f->r->pool,
- f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
- e = apr_bucket_eos_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
- ctx->eos_sent = 1;
- return ap_pass_brigade(f->r->output_filters, bb);
- }
-
- if (!ctx->remaining) {
- /* Handle trailers by calling ap_get_mime_headers again! */
- ctx->state = BODY_NONE;
- ap_get_mime_headers(f->r);
- e = apr_bucket_eos_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(b, e);
- ctx->eos_sent = 1;
- return APR_SUCCESS;
- }
- }
- break;
- }
- }
-
- /* Ensure that the caller can not go over our boundary point. */
- if (ctx->state == BODY_LENGTH || ctx->state == BODY_CHUNK) {
- if (ctx->remaining < readbytes) {
- readbytes = ctx->remaining;
- }
- AP_DEBUG_ASSERT(readbytes > 0);
- }
-
- rv = ap_get_brigade(f->next, b, mode, block, readbytes);
-
- if (rv != APR_SUCCESS) {
- return rv;
- }
-
- /* How many bytes did we just read? */
- apr_brigade_length(b, 0, &totalread);
-
- /* If this happens, we have a bucket of unknown length. Die because
- * it means our assumptions have changed. */
- AP_DEBUG_ASSERT(totalread >= 0);
-
- if (ctx->state != BODY_NONE) {
- ctx->remaining -= totalread;
- }
-
- /* If we have no more bytes remaining on a C-L request,
- * save the callter a roundtrip to discover EOS.
- */
- if (ctx->state == BODY_LENGTH && ctx->remaining == 0) {
- e = apr_bucket_eos_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(b, e);
- }
-
- /* We have a limit in effect. */
- if (ctx->limit) {
- /* FIXME: Note that we might get slightly confused on chunked inputs
- * as we'd need to compensate for the chunk lengths which may not
- * really count. This seems to be up for interpretation. */
- ctx->limit_used += totalread;
- if (ctx->limit < ctx->limit_used) {
- apr_bucket_brigade *bb;
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, f->r,
- "Read content-length of %" APR_OFF_T_FMT
- " is larger than the configured limit"
- " of %" APR_OFF_T_FMT, ctx->limit_used, ctx->limit);
- bb = apr_brigade_create(f->r->pool, f->c->bucket_alloc);
- e = ap_bucket_error_create(HTTP_REQUEST_ENTITY_TOO_LARGE, NULL,
- f->r->pool,
- f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
- e = apr_bucket_eos_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bb, e);
- ctx->eos_sent = 1;
- return ap_pass_brigade(f->r->output_filters, bb);
- }
- }
-
- return APR_SUCCESS;
-}
-
/* The index is found by its offset from the x00 code of each level.
* Although this is fast, it will need to be replaced if some nutcase
* decides to define a high-numbered code before the lower numbers.
@@ -1069,253 +738,6 @@ AP_DECLARE(const char *) ap_get_status_line(int status)
return status_lines[ap_index_of_response(status)];
}
-typedef struct header_struct {
- apr_pool_t *pool;
- apr_bucket_brigade *bb;
-} header_struct;
-
-/* Send a single HTTP header field to the client. Note that this function
- * is used in calls to table_do(), so their interfaces are co-dependent.
- * In other words, don't change this one without checking table_do in alloc.c.
- * It returns true unless there was a write error of some kind.
- */
-static int form_header_field(header_struct *h,
- const char *fieldname, const char *fieldval)
-{
-#if APR_CHARSET_EBCDIC
- char *headfield;
- apr_size_t len;
- apr_size_t name_len;
- apr_size_t val_len;
- char *next;
-
- name_len = strlen(fieldname);
- val_len = strlen(fieldval);
- len = name_len + val_len + 4; /* 4 for ": " plus CRLF */
- headfield = (char *)apr_palloc(h->pool, len + 1);
- memcpy(headfield, fieldname, name_len);
- next = headfield + name_len;
- *next++ = ':';
- *next++ = ' ';
- memcpy(next, fieldval, val_len);
- next += val_len;
- *next++ = CR;
- *next++ = LF;
- *next = 0;
- ap_xlate_proto_to_ascii(headfield, len);
- apr_brigade_write(h->bb, NULL, NULL, headfield, len);
-#else
- struct iovec vec[4];
- struct iovec *v = vec;
- v->iov_base = (void *)fieldname;
- v->iov_len = strlen(fieldname);
- v++;
- v->iov_base = ": ";
- v->iov_len = sizeof(": ") - 1;
- v++;
- v->iov_base = (void *)fieldval;
- v->iov_len = strlen(fieldval);
- v++;
- v->iov_base = CRLF;
- v->iov_len = sizeof(CRLF) - 1;
- apr_brigade_writev(h->bb, NULL, NULL, vec, 4);
-#endif /* !APR_CHARSET_EBCDIC */
- return 1;
-}
-
-/* Send a request's HTTP response headers to the client.
- */
-static apr_status_t send_all_header_fields(header_struct *h,
- const request_rec *r)
-{
- const apr_array_header_t *elts;
- const apr_table_entry_t *t_elt;
- const apr_table_entry_t *t_end;
- struct iovec *vec;
- struct iovec *vec_next;
-
- elts = apr_table_elts(r->headers_out);
- if (elts->nelts == 0) {
- return APR_SUCCESS;
- }
- t_elt = (const apr_table_entry_t *)(elts->elts);
- t_end = t_elt + elts->nelts;
- vec = (struct iovec *)apr_palloc(h->pool, 4 * elts->nelts *
- sizeof(struct iovec));
- vec_next = vec;
-
- /* For each field, generate
- * name ": " value CRLF
- */
- do {
- vec_next->iov_base = (void*)(t_elt->key);
- vec_next->iov_len = strlen(t_elt->key);
- vec_next++;
- vec_next->iov_base = ": ";
- vec_next->iov_len = sizeof(": ") - 1;
- vec_next++;
- vec_next->iov_base = (void*)(t_elt->val);
- vec_next->iov_len = strlen(t_elt->val);
- vec_next++;
- vec_next->iov_base = CRLF;
- vec_next->iov_len = sizeof(CRLF) - 1;
- vec_next++;
- t_elt++;
- } while (t_elt < t_end);
-
-#if APR_CHARSET_EBCDIC
- {
- apr_size_t len;
- char *tmp = apr_pstrcatv(r->pool, vec, vec_next - vec, &len);
- ap_xlate_proto_to_ascii(tmp, len);
- return apr_brigade_write(h->bb, NULL, NULL, tmp, len);
- }
-#else
- return apr_brigade_writev(h->bb, NULL, NULL, vec, vec_next - vec);
-#endif
-}
-
-/*
- * Determine the protocol to use for the response. Potentially downgrade
- * to HTTP/1.0 in some situations and/or turn off keepalives.
- *
- * also prepare r->status_line.
- */
-static void basic_http_header_check(request_rec *r,
- const char **protocol)
-{
- if (r->assbackwards) {
- /* no such thing as a response protocol */
- return;
- }
-
- if (!r->status_line) {
- r->status_line = status_lines[ap_index_of_response(r->status)];
- }
-
- /* Note that we must downgrade before checking for force responses. */
- if (r->proto_num > HTTP_VERSION(1,0)
- && apr_table_get(r->subprocess_env, "downgrade-1.0")) {
- r->proto_num = HTTP_VERSION(1,0);
- }
-
- /* kludge around broken browsers when indicated by force-response-1.0
- */
- if (r->proto_num == HTTP_VERSION(1,0)
- && apr_table_get(r->subprocess_env, "force-response-1.0")) {
- *protocol = "HTTP/1.0";
- r->connection->keepalive = AP_CONN_CLOSE;
- }
- else {
- *protocol = AP_SERVER_PROTOCOL;
- }
-
-}
-
-/* fill "bb" with a barebones/initial HTTP response header */
-static void basic_http_header(request_rec *r, apr_bucket_brigade *bb,
- const char *protocol)
-{
- char *date;
- const char *server;
- header_struct h;
- struct iovec vec[4];
-
- if (r->assbackwards) {
- /* there are no headers to send */
- return;
- }
-
- /* Output the HTTP/1.x Status-Line and the Date and Server fields */
-
- vec[0].iov_base = (void *)protocol;
- vec[0].iov_len = strlen(protocol);
- vec[1].iov_base = (void *)" ";
- vec[1].iov_len = sizeof(" ") - 1;
- vec[2].iov_base = (void *)(r->status_line);
- vec[2].iov_len = strlen(r->status_line);
- vec[3].iov_base = (void *)CRLF;
- vec[3].iov_len = sizeof(CRLF) - 1;
-#if APR_CHARSET_EBCDIC
- {
- char *tmp;
- apr_size_t len;
- tmp = apr_pstrcatv(r->pool, vec, 4, &len);
- ap_xlate_proto_to_ascii(tmp, len);
- apr_brigade_write(bb, NULL, NULL, tmp, len);
- }
-#else
- apr_brigade_writev(bb, NULL, NULL, vec, 4);
-#endif
-
- date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
- ap_recent_rfc822_date(date, r->request_time);
-
- h.pool = r->pool;
- h.bb = bb;
- form_header_field(&h, "Date", date);
-
- /* keep the set-by-proxy server header, otherwise
- * generate a new server header */
- if (r->proxyreq != PROXYREQ_NONE) {
- server = apr_table_get(r->headers_out, "Server");
- if (server) {
- form_header_field(&h, "Server", server);
- }
- }
- else {
- form_header_field(&h, "Server", ap_get_server_version());
- }
-
- /* unset so we don't send them again */
- apr_table_unset(r->headers_out, "Date"); /* Avoid bogosity */
- apr_table_unset(r->headers_out, "Server");
-}
-
-AP_DECLARE(void) ap_basic_http_header(request_rec *r, apr_bucket_brigade *bb)
-{
- const char *protocol;
-
- basic_http_header_check(r, &protocol);
- basic_http_header(r, bb, protocol);
-}
-
-/* Navigator versions 2.x, 3.x and 4.0 betas up to and including 4.0b2
- * have a header parsing bug. If the terminating \r\n occur starting
- * at offset 256, 257 or 258 of output then it will not properly parse
- * the headers. Curiously it doesn't exhibit this problem at 512, 513.
- * We are guessing that this is because their initial read of a new request
- * uses a 256 byte buffer, and subsequent reads use a larger buffer.
- * So the problem might exist at different offsets as well.
- *
- * This should also work on keepalive connections assuming they use the
- * same small buffer for the first read of each new request.
- *
- * At any rate, we check the bytes written so far and, if we are about to
- * tickle the bug, we instead insert a bogus padding header. Since the bug
- * manifests as a broken image in Navigator, users blame the server. :(
- * It is more expensive to check the User-Agent than it is to just add the
- * bytes, so we haven't used the BrowserMatch feature here.
- */
-static void terminate_header(apr_bucket_brigade *bb)
-{
- char tmp[] = "X-Pad: avoid browser bug" CRLF;
- char crlf[] = CRLF;
- apr_off_t len;
- apr_size_t buflen;
-
- (void) apr_brigade_length(bb, 1, &len);
-
- if (len >= 255 && len <= 257) {
- buflen = strlen(tmp);
- ap_xlate_proto_to_ascii(tmp, buflen);
- apr_brigade_write(bb, NULL, NULL, tmp, buflen);
- }
- buflen = strlen(crlf);
- ap_xlate_proto_to_ascii(crlf, buflen);
- apr_brigade_write(bb, NULL, NULL, crlf, buflen);
-}
-
/* Build the Allow field-value from the request handler method mask.
* Note that we always allow TRACE, since it is handled below.
*/
@@ -1366,41 +788,6 @@ static char *make_allow(request_rec *r)
return list;
}
-AP_DECLARE_NONSTD(int) ap_send_http_trace(request_rec *r)
-{
- int rv;
- apr_bucket_brigade *b;
- header_struct h;
-
- if (r->method_number != M_TRACE) {
- return DECLINED;
- }
-
- /* Get the original request */
- while (r->prev) {
- r = r->prev;
- }
-
- if ((rv = ap_setup_client_block(r, REQUEST_NO_BODY))) {
- return rv;
- }
-
- ap_set_content_type(r, "message/http");
-
- /* Now we recreate the request, and echo it back */
-
- b = apr_brigade_create(r->pool, r->connection->bucket_alloc);
- apr_brigade_putstrs(b, NULL, NULL, r->the_request, CRLF, NULL);
- h.pool = r->pool;
- h.bb = b;
- apr_table_do((int (*) (void *, const char *, const char *))
- form_header_field, (void *) &h, r->headers_in, NULL);
- apr_brigade_puts(b, NULL, NULL, CRLF);
- ap_pass_brigade(r->output_filters, b);
-
- return DONE;
-}
-
AP_DECLARE(int) ap_send_http_options(request_rec *r)
{
if (r->assbackwards) {
@@ -1416,84 +803,6 @@ AP_DECLARE(int) ap_send_http_options(request_rec *r)
return OK;
}
-/* This routine is called by apr_table_do and merges all instances of
- * the passed field values into a single array that will be further
- * processed by some later routine. Originally intended to help split
- * and recombine multiple Vary fields, though it is generic to any field
- * consisting of comma/space-separated tokens.
- */
-static int uniq_field_values(void *d, const char *key, const char *val)
-{
- apr_array_header_t *values;
- char *start;
- char *e;
- char **strpp;
- int i;
-
- values = (apr_array_header_t *)d;
-
- e = apr_pstrdup(values->pool, val);
-
- do {
- /* Find a non-empty fieldname */
-
- while (*e == ',' || apr_isspace(*e)) {
- ++e;
- }
- if (*e == '\0') {
- break;
- }
- start = e;
- while (*e != '\0' && *e != ',' && !apr_isspace(*e)) {
- ++e;
- }
- if (*e != '\0') {
- *e++ = '\0';
- }
-
- /* Now add it to values if it isn't already represented.
- * Could be replaced by a ap_array_strcasecmp() if we had one.
- */
- for (i = 0, strpp = (char **) values->elts; i < values->nelts;
- ++i, ++strpp) {
- if (*strpp && strcasecmp(*strpp, start) == 0) {
- break;
- }
- }
- if (i == values->nelts) { /* if not found */
- *(char **)apr_array_push(values) = start;
- }
- } while (*e != '\0');
-
- return 1;
-}
-
-/*
- * Since some clients choke violently on multiple Vary fields, or
- * Vary fields with duplicate tokens, combine any multiples and remove
- * any duplicates.
- */
-static void fixup_vary(request_rec *r)
-{
- apr_array_header_t *varies;
-
- varies = apr_array_make(r->pool, 5, sizeof(char *));
-
- /* Extract all Vary fields from the headers_out, separate each into
- * its comma-separated fieldname values, and then add them to varies
- * if not already present in the array.
- */
- apr_table_do((int (*)(void *, const char *, const char *))uniq_field_values,
- (void *) varies, r->headers_out, "Vary", NULL);
-
- /* If we found any, replace old Vary fields with unique-ified value */
-
- if (varies->nelts > 0) {
- apr_table_setn(r->headers_out, "Vary",
- apr_array_pstrcat(r->pool, varies, ','));
- }
-}
-
AP_DECLARE(void) ap_set_content_type(request_rec *r, const char *ct)
{
if (!ct) {
@@ -1511,506 +820,6 @@ AP_DECLARE(void) ap_set_content_type(request_rec *r, const char *ct)
}
}
-typedef struct header_filter_ctx {
- int headers_sent;
-} header_filter_ctx;
-
-AP_CORE_DECLARE_NONSTD(apr_status_t) ap_http_header_filter(ap_filter_t *f,
- apr_bucket_brigade *b)
-{
- request_rec *r = f->r;
- conn_rec *c = r->connection;
- const char *clheader;
- const char *protocol;
- apr_bucket *e;
- apr_bucket_brigade *b2;
- header_struct h;
- header_filter_ctx *ctx = f->ctx;
-
- AP_DEBUG_ASSERT(!r->main);
-
- if (r->header_only) {
- if (!ctx) {
- ctx = f->ctx = apr_pcalloc(r->pool, sizeof(header_filter_ctx));
- }
- else if (ctx->headers_sent) {
- apr_brigade_destroy(b);
- return OK;
- }
- }
-
- for (e = APR_BRIGADE_FIRST(b);
- e != APR_BRIGADE_SENTINEL(b);
- e = APR_BUCKET_NEXT(e))
- {
- if (e->type == &ap_bucket_type_error) {
- ap_bucket_error *eb = e->data;
-
- ap_die(eb->status, r);
- return AP_FILTER_ERROR;
- }
- }
-
- if (r->assbackwards) {
- r->sent_bodyct = 1;
- ap_remove_output_filter(f);
- return ap_pass_brigade(f->next, b);
- }
-
- /*
- * Now that we are ready to send a response, we need to combine the two
- * header field tables into a single table. If we don't do this, our
- * later attempts to set or unset a given fieldname might be bypassed.
- */
- if (!apr_is_empty_table(r->err_headers_out)) {
- r->headers_out = apr_table_overlay(r->pool, r->err_headers_out,
- r->headers_out);
- }
-
- /*
- * Remove the 'Vary' header field if the client can't handle it.
- * Since this will have nasty effects on HTTP/1.1 caches, force
- * the response into HTTP/1.0 mode.
- *
- * Note: the force-response-1.0 should come before the call to
- * basic_http_header_check()
- */
- if (apr_table_get(r->subprocess_env, "force-no-vary") != NULL) {
- apr_table_unset(r->headers_out, "Vary");
- r->proto_num = HTTP_VERSION(1,0);
- apr_table_set(r->subprocess_env, "force-response-1.0", "1");
- }
- else {
- fixup_vary(r);
- }
-
- /*
- * Now remove any ETag response header field if earlier processing
- * says so (such as a 'FileETag None' directive).
- */
- if (apr_table_get(r->notes, "no-etag") != NULL) {
- apr_table_unset(r->headers_out, "ETag");
- }
-
- /* determine the protocol and whether we should use keepalives. */
- basic_http_header_check(r, &protocol);
- ap_set_keepalive(r);
-
- if (r->chunked) {
- apr_table_mergen(r->headers_out, "Transfer-Encoding", "chunked");
- apr_table_unset(r->headers_out, "Content-Length");
- }
-
- apr_table_setn(r->headers_out, "Content-Type",
- ap_make_content_type(r, r->content_type));
-
- if (r->content_encoding) {
- apr_table_setn(r->headers_out, "Content-Encoding",
- r->content_encoding);
- }
-
- if (!apr_is_empty_array(r->content_languages)) {
- int i;
- char **languages = (char **)(r->content_languages->elts);
- for (i = 0; i < r->content_languages->nelts; ++i) {
- apr_table_mergen(r->headers_out, "Content-Language", languages[i]);
- }
- }
-
- /*
- * Control cachability for non-cachable responses if not already set by
- * some other part of the server configuration.
- */
- if (r->no_cache && !apr_table_get(r->headers_out, "Expires")) {
- char *date = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
- ap_recent_rfc822_date(date, r->request_time);
- apr_table_addn(r->headers_out, "Expires", date);
- }
-
- /* This is a hack, but I can't find anyway around it. The idea is that
- * we don't want to send out 0 Content-Lengths if it is a head request.
- * This happens when modules try to outsmart the server, and return
- * if they see a HEAD request. Apache 1.3 handlers were supposed to
- * just return in that situation, and the core handled the HEAD. In
- * 2.0, if a handler returns, then the core sends an EOS bucket down
- * the filter stack, and the content-length filter computes a C-L of
- * zero and that gets put in the headers, and we end up sending a
- * zero C-L to the client. We can't just remove the C-L filter,
- * because well behaved 2.0 handlers will send their data down the stack,
- * and we will compute a real C-L for the head request. RBB
- */
- if (r->header_only
- && (clheader = apr_table_get(r->headers_out, "Content-Length"))
- && !strcmp(clheader, "0")) {
- apr_table_unset(r->headers_out, "Content-Length");
- }
-
- b2 = apr_brigade_create(r->pool, c->bucket_alloc);
- basic_http_header(r, b2, protocol);
-
- h.pool = r->pool;
- h.bb = b2;
-
- if (r->status == HTTP_NOT_MODIFIED) {
- apr_table_do((int (*)(void *, const char *, const char *)) form_header_field,
- (void *) &h, r->headers_out,
- "Connection",
- "Keep-Alive",
- "ETag",
- "Content-Location",
- "Expires",
- "Cache-Control",
- "Vary",
- "Warning",
- "WWW-Authenticate",
- "Proxy-Authenticate",
- "Set-Cookie",
- "Set-Cookie2",
- NULL);
- }
- else {
- send_all_header_fields(&h, r);
- }
-
- terminate_header(b2);
-
- ap_pass_brigade(f->next, b2);
-
- if (r->header_only) {
- apr_brigade_destroy(b);
- ctx->headers_sent = 1;
- return OK;
- }
-
- r->sent_bodyct = 1; /* Whatever follows is real body stuff... */
-
- if (r->chunked) {
- /* We can't add this filter until we have already sent the headers.
- * If we add it before this point, then the headers will be chunked
- * as well, and that is just wrong.
- */
- ap_add_output_filter("CHUNK", NULL, r, r->connection);
- }
-
- /* Don't remove this filter until after we have added the CHUNK filter.
- * Otherwise, f->next won't be the CHUNK filter and thus the first
- * brigade won't be chunked properly.
- */
- ap_remove_output_filter(f);
- return ap_pass_brigade(f->next, b);
-}
-
-/* Here we deal with getting the request message body from the client.
- * Whether or not the request contains a body is signaled by the presence
- * of a non-zero Content-Length or by a Transfer-Encoding: chunked.
- *
- * Note that this is more complicated than it was in Apache 1.1 and prior
- * versions, because chunked support means that the module does less.
- *
- * The proper procedure is this:
- *
- * 1. Call setup_client_block() near the beginning of the request
- * handler. This will set up all the necessary properties, and will
- * return either OK, or an error code. If the latter, the module should
- * return that error code. The second parameter selects the policy to
- * apply if the request message indicates a body, and how a chunked
- * transfer-coding should be interpreted. Choose one of
- *
- * REQUEST_NO_BODY Send 413 error if message has any body
- * REQUEST_CHUNKED_ERROR Send 411 error if body without Content-Length
- * REQUEST_CHUNKED_DECHUNK If chunked, remove the chunks for me.
- *
- * In order to use the last two options, the caller MUST provide a buffer
- * large enough to hold a chunk-size line, including any extensions.
- *
- * 2. When you are ready to read a body (if any), call should_client_block().
- * This will tell the module whether or not to read input. If it is 0,
- * the module should assume that there is no message body to read.
- * This step also sends a 100 Continue response to HTTP/1.1 clients,
- * so should not be called until the module is *definitely* ready to
- * read content. (otherwise, the point of the 100 response is defeated).
- * Never call this function more than once.
- *
- * 3. Finally, call get_client_block in a loop. Pass it a buffer and its size.
- * It will put data into the buffer (not necessarily a full buffer), and
- * return the length of the input block. When it is done reading, it will
- * return 0 if EOF, or -1 if there was an error.
- * If an error occurs on input, we force an end to keepalive.
- */
-
-AP_DECLARE(int) ap_setup_client_block(request_rec *r, int read_policy)
-{
- const char *tenc = apr_table_get(r->headers_in, "Transfer-Encoding");
- const char *lenp = apr_table_get(r->headers_in, "Content-Length");
-
- r->read_body = read_policy;
- r->read_chunked = 0;
- r->remaining = 0;
-
- if (tenc) {
- if (strcasecmp(tenc, "chunked")) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
- "Unknown Transfer-Encoding %s", tenc);
- return HTTP_NOT_IMPLEMENTED;
- }
- if (r->read_body == REQUEST_CHUNKED_ERROR) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
- "chunked Transfer-Encoding forbidden: %s", r->uri);
- return (lenp) ? HTTP_BAD_REQUEST : HTTP_LENGTH_REQUIRED;
- }
-
- r->read_chunked = 1;
- }
- else if (lenp) {
- char *endstr;
-
- if (apr_strtoff(&r->remaining, lenp, &endstr, 10)
- || *endstr || r->remaining < 0) {
- r->remaining = 0;
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
- "Invalid Content-Length");
- return HTTP_BAD_REQUEST;
- }
- }
-
- if ((r->read_body == REQUEST_NO_BODY)
- && (r->read_chunked || (r->remaining > 0))) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r,
- "%s with body is not allowed for %s", r->method, r->uri);
- return HTTP_REQUEST_ENTITY_TOO_LARGE;
- }
-
-#ifdef AP_DEBUG
- {
- /* Make sure ap_getline() didn't leave any droppings. */
- core_request_config *req_cfg =
- (core_request_config *)ap_get_module_config(r->request_config,
- &core_module);
- AP_DEBUG_ASSERT(APR_BRIGADE_EMPTY(req_cfg->bb));
- }
-#endif
-
- return OK;
-}
-
-AP_DECLARE(int) ap_should_client_block(request_rec *r)
-{
- /* First check if we have already read the request body */
-
- if (r->read_length || (!r->read_chunked && (r->remaining <= 0))) {
- return 0;
- }
-
- return 1;
-}
-
-/**
- * Parse a chunk extension, detect overflow.
- * There are two error cases:
- * 1) If the conversion would require too many bits, a -1 is returned.
- * 2) If the conversion used the correct number of bits, but an overflow
- * caused only the sign bit to flip, then that negative number is
- * returned.
- * In general, any negative number can be considered an overflow error.
- */
-static long get_chunk_size(char *b)
-{
- long chunksize = 0;
- size_t chunkbits = sizeof(long) * 8;
-
- /* Skip leading zeros */
- while (*b == '0') {
- ++b;
- }
-
- while (apr_isxdigit(*b) && (chunkbits > 0)) {
- int xvalue = 0;
-
- if (*b >= '0' && *b <= '9') {
- xvalue = *b - '0';
- }
- else if (*b >= 'A' && *b <= 'F') {
- xvalue = *b - 'A' + 0xa;
- }
- else if (*b >= 'a' && *b <= 'f') {
- xvalue = *b - 'a' + 0xa;
- }
-
- chunksize = (chunksize << 4) | xvalue;
- chunkbits -= 4;
- ++b;
- }
- if (apr_isxdigit(*b) && (chunkbits <= 0)) {
- /* overflow */
- return -1;
- }
-
- return chunksize;
-}
-
-/* get_client_block is called in a loop to get the request message body.
- * This is quite simple if the client includes a content-length
- * (the normal case), but gets messy if the body is chunked. Note that
- * r->remaining is used to maintain state across calls and that
- * r->read_length is the total number of bytes given to the caller
- * across all invocations. It is messy because we have to be careful not
- * to read past the data provided by the client, since these reads block.
- * Returns 0 on End-of-body, -1 on error or premature chunk end.
- *
- */
-AP_DECLARE(long) ap_get_client_block(request_rec *r, char *buffer,
- apr_size_t bufsiz)
-{
- apr_status_t rv;
- apr_bucket_brigade *bb;
-
- if (r->remaining < 0 || (!r->read_chunked && r->remaining == 0)) {
- return 0;
- }
-
- bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
- if (bb == NULL) {
- r->connection->keepalive = AP_CONN_CLOSE;
- return -1;
- }
-
- rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
- APR_BLOCK_READ, bufsiz);
-
- /* We lose the failure code here. This is why ap_get_client_block should
- * not be used.
- */
- if (rv != APR_SUCCESS) {
- /* if we actually fail here, we want to just return and
- * stop trying to read data from the client.
- */
- r->connection->keepalive = AP_CONN_CLOSE;
- apr_brigade_destroy(bb);
- return -1;
- }
-
- /* If this fails, it means that a filter is written incorrectly and that
- * it needs to learn how to properly handle APR_BLOCK_READ requests by
- * returning data when requested.
- */
- AP_DEBUG_ASSERT(!APR_BRIGADE_EMPTY(bb));
-
- /* Check to see if EOS in the brigade.
- *
- * If so, we have to leave a nugget for the *next* ap_get_client_block
- * call to return 0.
- */
- if (APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
- if (r->read_chunked) {
- r->remaining = -1;
- }
- else {
- r->remaining = 0;
- }
- }
-
- rv = apr_brigade_flatten(bb, buffer, &bufsiz);
- if (rv != APR_SUCCESS) {
- apr_brigade_destroy(bb);
- return -1;
- }
-
- /* XXX yank me? */
- r->read_length += bufsiz;
-
- apr_brigade_destroy(bb);
- return bufsiz;
-}
-
-/* In HTTP/1.1, any method can have a body. However, most GET handlers
- * wouldn't know what to do with a request body if they received one.
- * This helper routine tests for and reads any message body in the request,
- * simply discarding whatever it receives. We need to do this because
- * failing to read the request body would cause it to be interpreted
- * as the next request on a persistent connection.
- *
- * Since we return an error status if the request is malformed, this
- * routine should be called at the beginning of a no-body handler, e.g.,
- *
- * if ((retval = ap_discard_request_body(r)) != OK) {
- * return retval;
- * }
- */
-AP_DECLARE(int) ap_discard_request_body(request_rec *r)
-{
- apr_bucket_brigade *bb;
- int rv, seen_eos;
-
- /* Sometimes we'll get in a state where the input handling has
- * detected an error where we want to drop the connection, so if
- * that's the case, don't read the data as that is what we're trying
- * to avoid.
- *
- * This function is also a no-op on a subrequest.
- */
- if (r->main || r->connection->keepalive == AP_CONN_CLOSE ||
- ap_status_drops_connection(r->status)) {
- return OK;
- }
-
- bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
- seen_eos = 0;
- do {
- apr_bucket *bucket;
-
- rv = ap_get_brigade(r->input_filters, bb, AP_MODE_READBYTES,
- APR_BLOCK_READ, HUGE_STRING_LEN);
-
- if (rv != APR_SUCCESS) {
- /* FIXME: If we ever have a mapping from filters (apr_status_t)
- * to HTTP error codes, this would be a good place for them.
- *
- * If we received the special case AP_FILTER_ERROR, it means
- * that the filters have already handled this error.
- * Otherwise, we should assume we have a bad request.
- */
- if (rv == AP_FILTER_ERROR) {
- apr_brigade_destroy(bb);
- return rv;
- }
- else {
- apr_brigade_destroy(bb);
- return HTTP_BAD_REQUEST;
- }
- }
-
- for (bucket = APR_BRIGADE_FIRST(bb);
- bucket != APR_BRIGADE_SENTINEL(bb);
- bucket = APR_BUCKET_NEXT(bucket))
- {
- const char *data;
- apr_size_t len;
-
- if (APR_BUCKET_IS_EOS(bucket)) {
- seen_eos = 1;
- break;
- }
-
- /* These are metadata buckets. */
- if (bucket->length == 0) {
- continue;
- }
-
- /* We MUST read because in case we have an unknown-length
- * bucket or one that morphs, we want to exhaust it.
- */
- rv = apr_bucket_read(bucket, &data, &len, APR_BLOCK_READ);
- if (rv != APR_SUCCESS) {
- apr_brigade_destroy(bb);
- return HTTP_BAD_REQUEST;
- }
- }
- apr_brigade_cleanup(bb);
- } while (!seen_eos);
-
- return OK;
-}
-
static const char *add_optional_notes(request_rec *r,
const char *prefix,
const char *key,
@@ -2594,527 +1403,3 @@ AP_DECLARE(void) ap_clear_method_list(ap_method_list_t *l)
l->method_list->nelts = 0;
}
-/* Generate the human-readable hex representation of an unsigned long
- * (basically a faster version of 'sprintf("%lx")')
- */
-#define HEX_DIGITS "0123456789abcdef"
-static char *etag_ulong_to_hex(char *next, unsigned long u)
-{
- int printing = 0;
- int shift = sizeof(unsigned long) * 8 - 4;
- do {
- unsigned long next_digit = ((u >> shift) & (unsigned long)0xf);
- if (next_digit) {
- *next++ = HEX_DIGITS[next_digit];
- printing = 1;
- }
- else if (printing) {
- *next++ = HEX_DIGITS[next_digit];
- }
- shift -= 4;
- } while (shift);
- *next++ = HEX_DIGITS[u & (unsigned long)0xf];
- return next;
-}
-
-#define ETAG_WEAK "W/"
-#define CHARS_PER_UNSIGNED_LONG (sizeof(unsigned long) * 2)
-/*
- * Construct an entity tag (ETag) from resource information. If it's a real
- * file, build in some of the file characteristics. If the modification time
- * is newer than (request-time minus 1 second), mark the ETag as weak - it
- * could be modified again in as short an interval. We rationalize the
- * modification time we're given to keep it from being in the future.
- */
-AP_DECLARE(char *) ap_make_etag(request_rec *r, int force_weak)
-{
- char *weak;
- apr_size_t weak_len;
- char *etag;
- char *next;
- core_dir_config *cfg;
- etag_components_t etag_bits;
- etag_components_t bits_added;
-
- cfg = (core_dir_config *)ap_get_module_config(r->per_dir_config,
- &core_module);
- etag_bits = (cfg->etag_bits & (~ cfg->etag_remove)) | cfg->etag_add;
-
- /*
- * If it's a file (or we wouldn't be here) and no ETags
- * should be set for files, return an empty string and
- * note it for the header-sender to ignore.
- */
- if (etag_bits & ETAG_NONE) {
- apr_table_setn(r->notes, "no-etag", "omit");
- return "";
- }
-
- if (etag_bits == ETAG_UNSET) {
- etag_bits = ETAG_BACKWARD;
- }
- /*
- * Make an ETag header out of various pieces of information. We use
- * the last-modified date and, if we have a real file, the
- * length and inode number - note that this doesn't have to match
- * the content-length (i.e. includes), it just has to be unique
- * for the file.
- *
- * If the request was made within a second of the last-modified date,
- * we send a weak tag instead of a strong one, since it could
- * be modified again later in the second, and the validation
- * would be incorrect.
- */
- if ((r->request_time - r->mtime > (1 * APR_USEC_PER_SEC)) &&
- !force_weak) {
- weak = NULL;
- weak_len = 0;
- }
- else {
- weak = ETAG_WEAK;
- weak_len = sizeof(ETAG_WEAK);
- }
-
- if (r->finfo.filetype != 0) {
- /*
- * ETag gets set to [W/]"inode-size-mtime", modulo any
- * FileETag keywords.
- */
- etag = apr_palloc(r->pool, weak_len + sizeof("\"--\"") +
- 3 * CHARS_PER_UNSIGNED_LONG + 1);
- next = etag;
- if (weak) {
- while (*weak) {
- *next++ = *weak++;
- }
- }
- *next++ = '"';
- bits_added = 0;
- if (etag_bits & ETAG_INODE) {
- next = etag_ulong_to_hex(next, (unsigned long)r->finfo.inode);
- bits_added |= ETAG_INODE;
- }
- if (etag_bits & ETAG_SIZE) {
- if (bits_added != 0) {
- *next++ = '-';
- }
- next = etag_ulong_to_hex(next, (unsigned long)r->finfo.size);
- bits_added |= ETAG_SIZE;
- }
- if (etag_bits & ETAG_MTIME) {
- if (bits_added != 0) {
- *next++ = '-';
- }
- next = etag_ulong_to_hex(next, (unsigned long)r->mtime);
- }
- *next++ = '"';
- *next = '\0';
- }
- else {
- /*
- * Not a file document, so just use the mtime: [W/]"mtime"
- */
- etag = apr_palloc(r->pool, weak_len + sizeof("\"\"") +
- CHARS_PER_UNSIGNED_LONG + 1);
- next = etag;
- if (weak) {
- while (*weak) {
- *next++ = *weak++;
- }
- }
- *next++ = '"';
- next = etag_ulong_to_hex(next, (unsigned long)r->mtime);
- *next++ = '"';
- *next = '\0';
- }
-
- return etag;
-}
-
-AP_DECLARE(void) ap_set_etag(request_rec *r)
-{
- char *etag;
- char *variant_etag, *vlv;
- int vlv_weak;
-
- if (!r->vlist_validator) {
- etag = ap_make_etag(r, 0);
-
- /* If we get a blank etag back, don't set the header. */
- if (!etag[0]) {
- return;
- }
- }
- else {
- /* If we have a variant list validator (vlv) due to the
- * response being negotiated, then we create a structured
- * entity tag which merges the variant etag with the variant
- * list validator (vlv). This merging makes revalidation
- * somewhat safer, ensures that caches which can deal with
- * Vary will (eventually) be updated if the set of variants is
- * changed, and is also a protocol requirement for transparent
- * content negotiation.
- */
-
- /* if the variant list validator is weak, we make the whole
- * structured etag weak. If we would not, then clients could
- * have problems merging range responses if we have different
- * variants with the same non-globally-unique strong etag.
- */
-
- vlv = r->vlist_validator;
- vlv_weak = (vlv[0] == 'W');
-
- variant_etag = ap_make_etag(r, vlv_weak);
-
- /* If we get a blank etag back, don't append vlv and stop now. */
- if (!variant_etag[0]) {
- return;
- }
-
- /* merge variant_etag and vlv into a structured etag */
- variant_etag[strlen(variant_etag) - 1] = '\0';
- if (vlv_weak) {
- vlv += 3;
- }
- else {
- vlv++;
- }
- etag = apr_pstrcat(r->pool, variant_etag, ";", vlv, NULL);
- }
-
- apr_table_setn(r->headers_out, "ETag", etag);
-}
-
-static int parse_byterange(char *range, apr_off_t clength,
- apr_off_t *start, apr_off_t *end)
-{
- char *dash = strchr(range, '-');
- char *errp;
- apr_off_t number;
-
- if (!dash) {
- return 0;
- }
-
- if ((dash == range)) {
- /* In the form "-5" */
- if (apr_strtoff(&number, dash+1, &errp, 10) || *errp) {
- return 0;
- }
- *start = clength - number;
- *end = clength - 1;
- }
- else {
- *dash++ = '\0';
- if (apr_strtoff(&number, range, &errp, 10) || *errp) {
- return 0;
- }
- *start = number;
- if (*dash) {
- if (apr_strtoff(&number, dash, &errp, 10) || *errp) {
- return 0;
- }
- *end = number;
- }
- else { /* "5-" */
- *end = clength - 1;
- }
- }
-
- if (*start < 0) {
- *start = 0;
- }
-
- if (*end >= clength) {
- *end = clength - 1;
- }
-
- if (*start > *end) {
- return -1;
- }
-
- return (*start > 0 || *end < clength);
-}
-
-static int ap_set_byterange(request_rec *r);
-
-typedef struct byterange_ctx {
- apr_bucket_brigade *bb;
- int num_ranges;
- char *boundary;
- char *bound_head;
-} byterange_ctx;
-
-/*
- * Here we try to be compatible with clients that want multipart/x-byteranges
- * instead of multipart/byteranges (also see above), as per HTTP/1.1. We
- * look for the Request-Range header (e.g. Netscape 2 and 3) as an indication
- * that the browser supports an older protocol. We also check User-Agent
- * for Microsoft Internet Explorer 3, which needs this as well.
- */
-static int use_range_x(request_rec *r)
-{
- const char *ua;
- return (apr_table_get(r->headers_in, "Request-Range")
- || ((ua = apr_table_get(r->headers_in, "User-Agent"))
- && ap_strstr_c(ua, "MSIE 3")));
-}
-
-#define BYTERANGE_FMT "%" APR_OFF_T_FMT "-%" APR_OFF_T_FMT "/%" APR_OFF_T_FMT
-#define PARTITION_ERR_FMT "apr_brigade_partition() failed " \
- "[%" APR_OFF_T_FMT ",%" APR_OFF_T_FMT "]"
-
-AP_CORE_DECLARE_NONSTD(apr_status_t) ap_byterange_filter(ap_filter_t *f,
- apr_bucket_brigade *bb)
-{
-#define MIN_LENGTH(len1, len2) ((len1 > len2) ? len2 : len1)
- request_rec *r = f->r;
- conn_rec *c = r->connection;
- byterange_ctx *ctx = f->ctx;
- apr_bucket *e;
- apr_bucket_brigade *bsend;
- apr_off_t range_start;
- apr_off_t range_end;
- char *current;
- apr_off_t bb_length;
- apr_off_t clength = 0;
- apr_status_t rv;
- int found = 0;
-
- if (!ctx) {
- int num_ranges = ap_set_byterange(r);
-
- /* We have nothing to do, get out of the way. */
- if (num_ranges == 0) {
- ap_remove_output_filter(f);
- return ap_pass_brigade(f->next, bb);
- }
-
- ctx = f->ctx = apr_pcalloc(r->pool, sizeof(*ctx));
- ctx->num_ranges = num_ranges;
- /* create a brigade in case we never call ap_save_brigade() */
- ctx->bb = apr_brigade_create(r->pool, c->bucket_alloc);
-
- if (ctx->num_ranges > 1) {
- /* Is ap_make_content_type required here? */
- const char *orig_ct = ap_make_content_type(r, r->content_type);
- ctx->boundary = apr_psprintf(r->pool, "%" APR_UINT64_T_HEX_FMT "%lx",
- (apr_uint64_t)r->request_time, (long) getpid());
-
- ap_set_content_type(r, apr_pstrcat(r->pool, "multipart",
- use_range_x(r) ? "/x-" : "/",
- "byteranges; boundary=",
- ctx->boundary, NULL));
-
- ctx->bound_head = apr_pstrcat(r->pool,
- CRLF "--", ctx->boundary,
- CRLF "Content-type: ",
- orig_ct,
- CRLF "Content-range: bytes ",
- NULL);
- ap_xlate_proto_to_ascii(ctx->bound_head, strlen(ctx->bound_head));
- }
- }
-
- /* We can't actually deal with byte-ranges until we have the whole brigade
- * because the byte-ranges can be in any order, and according to the RFC,
- * we SHOULD return the data in the same order it was requested.
- *
- * XXX: We really need to dump all bytes prior to the start of the earliest
- * range, and only slurp up to the end of the latest range. By this we
- * mean that we should peek-ahead at the lowest first byte of any range,
- * and the highest last byte of any range.
- */
- if (!APR_BUCKET_IS_EOS(APR_BRIGADE_LAST(bb))) {
- ap_save_brigade(f, &ctx->bb, &bb, r->pool);
- return APR_SUCCESS;
- }
-
- /* Prepend any earlier saved brigades. */
- APR_BRIGADE_PREPEND(bb, ctx->bb);
-
- /* It is possible that we won't have a content length yet, so we have to
- * compute the length before we can actually do the byterange work.
- */
- apr_brigade_length(bb, 1, &bb_length);
- clength = (apr_off_t)bb_length;
-
- /* this brigade holds what we will be sending */
- bsend = apr_brigade_create(r->pool, c->bucket_alloc);
-
- while ((current = ap_getword(r->pool, &r->range, ','))
- && (rv = parse_byterange(current, clength, &range_start,
- &range_end))) {
- apr_bucket *e2;
- apr_bucket *ec;
-
- if (rv == -1) {
- continue;
- }
-
- /* these calls to apr_brigade_partition() should theoretically
- * never fail because of the above call to apr_brigade_length(),
- * but what the heck, we'll check for an error anyway */
- if ((rv = apr_brigade_partition(bb, range_start, &ec)) != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
- PARTITION_ERR_FMT, range_start, clength);
- continue;
- }
- if ((rv = apr_brigade_partition(bb, range_end+1, &e2)) != APR_SUCCESS) {
- ap_log_rerror(APLOG_MARK, APLOG_ERR, rv, r,
- PARTITION_ERR_FMT, range_end+1, clength);
- continue;
- }
-
- found = 1;
-
- /* For single range requests, we must produce Content-Range header.
- * Otherwise, we need to produce the multipart boundaries.
- */
- if (ctx->num_ranges == 1) {
- apr_table_setn(r->headers_out, "Content-Range",
- apr_psprintf(r->pool, "bytes " BYTERANGE_FMT,
- range_start, range_end, clength));
- }
- else {
- char *ts;
-
- e = apr_bucket_pool_create(ctx->bound_head, strlen(ctx->bound_head),
- r->pool, c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bsend, e);
-
- ts = apr_psprintf(r->pool, BYTERANGE_FMT CRLF CRLF,
- range_start, range_end, clength);
- ap_xlate_proto_to_ascii(ts, strlen(ts));
- e = apr_bucket_pool_create(ts, strlen(ts), r->pool,
- c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bsend, e);
- }
-
- do {
- apr_bucket *foo;
- const char *str;
- apr_size_t len;
-
- if (apr_bucket_copy(ec, &foo) != APR_SUCCESS) {
- /* this shouldn't ever happen due to the call to
- * apr_brigade_length() above which normalizes
- * indeterminate-length buckets. just to be sure,
- * though, this takes care of uncopyable buckets that
- * do somehow manage to slip through.
- */
- /* XXX: check for failure? */
- apr_bucket_read(ec, &str, &len, APR_BLOCK_READ);
- apr_bucket_copy(ec, &foo);
- }
- APR_BRIGADE_INSERT_TAIL(bsend, foo);
- ec = APR_BUCKET_NEXT(ec);
- } while (ec != e2);
- }
-
- if (found == 0) {
- ap_remove_output_filter(f);
- r->status = HTTP_OK;
- /* bsend is assumed to be empty if we get here. */
- e = ap_bucket_error_create(HTTP_RANGE_NOT_SATISFIABLE, NULL,
- r->pool, c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bsend, e);
- e = apr_bucket_eos_create(c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bsend, e);
- return ap_pass_brigade(f->next, bsend);
- }
-
- if (ctx->num_ranges > 1) {
- char *end;
-
- /* add the final boundary */
- end = apr_pstrcat(r->pool, CRLF "--", ctx->boundary, "--" CRLF, NULL);
- ap_xlate_proto_to_ascii(end, strlen(end));
- e = apr_bucket_pool_create(end, strlen(end), r->pool, c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bsend, e);
- }
-
- e = apr_bucket_eos_create(c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(bsend, e);
-
- /* we're done with the original content - all of our data is in bsend. */
- apr_brigade_destroy(bb);
-
- /* send our multipart output */
- return ap_pass_brigade(f->next, bsend);
-}
-
-static int ap_set_byterange(request_rec *r)
-{
- const char *range;
- const char *if_range;
- const char *match;
- const char *ct;
- int num_ranges;
-
- if (r->assbackwards) {
- return 0;
- }
-
- /* Check for Range request-header (HTTP/1.1) or Request-Range for
- * backwards-compatibility with second-draft Luotonen/Franks
- * byte-ranges (e.g. Netscape Navigator 2-3).
- *
- * We support this form, with Request-Range, and (farther down) we
- * send multipart/x-byteranges instead of multipart/byteranges for
- * Request-Range based requests to work around a bug in Netscape
- * Navigator 2-3 and MSIE 3.
- */
-
- if (!(range = apr_table_get(r->headers_in, "Range"))) {
- range = apr_table_get(r->headers_in, "Request-Range");
- }
-
- if (!range || strncasecmp(range, "bytes=", 6) || r->status != HTTP_OK) {
- return 0;
- }
-
- /* is content already a single range? */
- if (apr_table_get(r->headers_out, "Content-Range")) {
- return 0;
- }
-
- /* is content already a multiple range? */
- if ((ct = apr_table_get(r->headers_out, "Content-Type"))
- && (!strncasecmp(ct, "multipart/byteranges", 20)
- || !strncasecmp(ct, "multipart/x-byteranges", 22))) {
- return 0;
- }
-
- /* Check the If-Range header for Etag or Date.
- * Note that this check will return false (as required) if either
- * of the two etags are weak.
- */
- if ((if_range = apr_table_get(r->headers_in, "If-Range"))) {
- if (if_range[0] == '"') {
- if (!(match = apr_table_get(r->headers_out, "Etag"))
- || (strcmp(if_range, match) != 0)) {
- return 0;
- }
- }
- else if (!(match = apr_table_get(r->headers_out, "Last-Modified"))
- || (strcmp(if_range, match) != 0)) {
- return 0;
- }
- }
-
- if (!ap_strchr_c(range, ',')) {
- /* a single range */
- num_ranges = 1;
- }
- else {
- /* a multiple range */
- num_ranges = 2;
- }
-
- r->status = HTTP_PARTIAL_CONTENT;
- r->range = range + 6;
-
- return num_ranges;
-}
diff --git a/server/Makefile.in b/server/Makefile.in
index 871d4e8dd7..5f5df99b9f 100644
--- a/server/Makefile.in
+++ b/server/Makefile.in
@@ -14,7 +14,7 @@ LTLIBRARY_SOURCES = \
mpm_common.c util_charset.c util_debug.c util_xml.c \
util_filter.c exports.c buildmark.c \
scoreboard.c error_bucket.c protocol.c core.c request.c provider.c \
- eoc_bucket.c
+ eoc_bucket.c core_filters.c
TARGETS = delete-exports $(LTLIBRARY_NAME) $(CORE_IMPLIB_FILE) export_vars.h httpd.exp
diff --git a/server/core.c b/server/core.c
index 5f27ae10de..f340585d38 100644
--- a/server/core.c
+++ b/server/core.c
@@ -93,6 +93,12 @@ AP_DECLARE_DATA ap_filter_rec_t *ap_content_length_filter_handle;
AP_DECLARE_DATA ap_filter_rec_t *ap_net_time_filter_handle;
AP_DECLARE_DATA ap_filter_rec_t *ap_core_input_filter_handle;
+extern int core_input_filter(ap_filter_t *, apr_bucket_brigade *,
+ ap_input_mode_t, apr_read_type_e, apr_off_t);
+extern int net_time_filter(ap_filter_t *, apr_bucket_brigade *,
+ ap_input_mode_t, apr_read_type_e, apr_off_t);
+extern apr_status_t core_output_filter(ap_filter_t *, apr_bucket_brigade *);
+
/* magic pointer for ErrorDocument xxx "default" */
static char errordocument_default;
@@ -3008,218 +3014,6 @@ void ap_add_output_filters_by_type(request_rec *r)
return;
}
-static apr_status_t writev_it_all(apr_socket_t *s,
- struct iovec *vec, int nvec,
- apr_size_t len, apr_size_t *nbytes)
-{
- apr_size_t bytes_written = 0;
- apr_status_t rv;
- apr_size_t n = len;
- int i = 0;
-
- *nbytes = 0;
-
- /* XXX handle checking for non-blocking socket */
- while (bytes_written != len) {
- rv = apr_socket_sendv(s, vec + i, nvec - i, &n);
- *nbytes += n;
- bytes_written += n;
- if (rv != APR_SUCCESS)
- return rv;
-
- /* If the write did not complete, adjust the iovecs and issue
- * apr_socket_sendv again
- */
- if (bytes_written < len) {
- /* Skip over the vectors that have already been written */
- apr_size_t cnt = vec[i].iov_len;
- while (n >= cnt && i + 1 < nvec) {
- i++;
- cnt += vec[i].iov_len;
- }
-
- if (n < cnt) {
- /* Handle partial write of vec i */
- vec[i].iov_base = (char *) vec[i].iov_base +
- (vec[i].iov_len - (cnt - n));
- vec[i].iov_len = cnt -n;
- }
- }
-
- n = len - bytes_written;
- }
-
- return APR_SUCCESS;
-}
-
-/* sendfile_it_all()
- * send the entire file using sendfile()
- * handle partial writes
- * return only when all bytes have been sent or an error is encountered.
- */
-
-#if APR_HAS_SENDFILE
-static apr_status_t sendfile_it_all(core_net_rec *c,
- apr_file_t *fd,
- apr_hdtr_t *hdtr,
- apr_off_t file_offset,
- apr_size_t file_bytes_left,
- apr_size_t total_bytes_left,
- apr_size_t *bytes_sent,
- apr_int32_t flags)
-{
- apr_status_t rv;
-#ifdef AP_DEBUG
- apr_interval_time_t timeout = 0;
-#endif
-
- AP_DEBUG_ASSERT((apr_socket_timeout_get(c->client_socket, &timeout)
- == APR_SUCCESS)
- && timeout > 0); /* socket must be in timeout mode */
-
- /* Reset the bytes_sent field */
- *bytes_sent = 0;
-
- do {
- apr_size_t tmplen = file_bytes_left;
-
- rv = apr_socket_sendfile(c->client_socket, fd, hdtr, &file_offset, &tmplen,
- flags);
- *bytes_sent += tmplen;
- total_bytes_left -= tmplen;
- if (!total_bytes_left || rv != APR_SUCCESS) {
- return rv; /* normal case & error exit */
- }
-
- AP_DEBUG_ASSERT(total_bytes_left > 0 && tmplen > 0);
-
- /* partial write, oooh noooo...
- * Skip over any header data which was written
- */
- while (tmplen && hdtr->numheaders) {
- if (tmplen >= hdtr->headers[0].iov_len) {
- tmplen -= hdtr->headers[0].iov_len;
- --hdtr->numheaders;
- ++hdtr->headers;
- }
- else {
- char *iov_base = (char *)hdtr->headers[0].iov_base;
-
- hdtr->headers[0].iov_len -= tmplen;
- iov_base += tmplen;
- hdtr->headers[0].iov_base = iov_base;
- tmplen = 0;
- }
- }
-
- /* Skip over any file data which was written */
-
- if (tmplen <= file_bytes_left) {
- file_offset += tmplen;
- file_bytes_left -= tmplen;
- continue;
- }
-
- tmplen -= file_bytes_left;
- file_bytes_left = 0;
- file_offset = 0;
-
- /* Skip over any trailer data which was written */
-
- while (tmplen && hdtr->numtrailers) {
- if (tmplen >= hdtr->trailers[0].iov_len) {
- tmplen -= hdtr->trailers[0].iov_len;
- --hdtr->numtrailers;
- ++hdtr->trailers;
- }
- else {
- char *iov_base = (char *)hdtr->trailers[0].iov_base;
-
- hdtr->trailers[0].iov_len -= tmplen;
- iov_base += tmplen;
- hdtr->trailers[0].iov_base = iov_base;
- tmplen = 0;
- }
- }
- } while (1);
-}
-#endif
-
-/*
- * emulate_sendfile()
- * Sends the contents of file fd along with header/trailer bytes, if any,
- * to the network. emulate_sendfile will return only when all the bytes have been
- * sent (i.e., it handles partial writes) or on a network error condition.
- */
-static apr_status_t emulate_sendfile(core_net_rec *c, apr_file_t *fd,
- apr_hdtr_t *hdtr, apr_off_t offset,
- apr_size_t length, apr_size_t *nbytes)
-{
- apr_status_t rv = APR_SUCCESS;
- apr_size_t togo; /* Remaining number of bytes in the file to send */
- apr_size_t sendlen = 0;
- apr_size_t bytes_sent;
- apr_int32_t i;
- apr_off_t o; /* Track the file offset for partial writes */
- char buffer[8192];
-
- *nbytes = 0;
-
- /* Send the headers
- * writev_it_all handles partial writes.
- * XXX: optimization... if headers are less than MIN_WRITE_SIZE, copy
- * them into buffer
- */
- if (hdtr && hdtr->numheaders > 0 ) {
- for (i = 0; i < hdtr->numheaders; i++) {
- sendlen += hdtr->headers[i].iov_len;
- }
-
- rv = writev_it_all(c->client_socket, hdtr->headers, hdtr->numheaders,
- sendlen, &bytes_sent);
- *nbytes += bytes_sent; /* track total bytes sent */
- }
-
- /* Seek the file to 'offset' */
- if (offset >= 0 && rv == APR_SUCCESS) {
- rv = apr_file_seek(fd, APR_SET, &offset);
- }
-
- /* Send the file, making sure to handle partial writes */
- togo = length;
- while (rv == APR_SUCCESS && togo) {
- sendlen = togo > sizeof(buffer) ? sizeof(buffer) : togo;
- o = 0;
- rv = apr_file_read(fd, buffer, &sendlen);
- while (rv == APR_SUCCESS && sendlen) {
- bytes_sent = sendlen;
- rv = apr_socket_send(c->client_socket, &buffer[o], &bytes_sent);
- *nbytes += bytes_sent;
- if (rv == APR_SUCCESS) {
- sendlen -= bytes_sent; /* sendlen != bytes_sent ==> partial write */
- o += bytes_sent; /* o is where we are in the buffer */
- togo -= bytes_sent; /* track how much of the file we've sent */
- }
- }
- }
-
- /* Send the trailers
- * XXX: optimization... if it will fit, send this on the last send in the
- * loop above
- */
- sendlen = 0;
- if ( rv == APR_SUCCESS && hdtr && hdtr->numtrailers > 0 ) {
- for (i = 0; i < hdtr->numtrailers; i++) {
- sendlen += hdtr->trailers[i].iov_len;
- }
- rv = writev_it_all(c->client_socket, hdtr->trailers, hdtr->numtrailers,
- sendlen, &bytes_sent);
- *nbytes += bytes_sent;
- }
-
- return rv;
-}
-
/* Note --- ErrorDocument will now work from .htaccess files.
* The AllowOverride of Fileinfo allows webmasters to turn it off
*/
@@ -3574,8 +3368,6 @@ static int core_override_type(request_rec *r)
return OK;
}
-
-
static int default_handler(request_rec *r)
{
conn_rec *c = r->connection;
@@ -3733,664 +3525,10 @@ static int default_handler(request_rec *r)
}
}
-typedef struct net_time_filter_ctx {
- apr_socket_t *csd;
- int first_line;
-} net_time_filter_ctx_t;
-static int net_time_filter(ap_filter_t *f, apr_bucket_brigade *b,
- ap_input_mode_t mode, apr_read_type_e block,
- apr_off_t readbytes)
-{
- net_time_filter_ctx_t *ctx = f->ctx;
- int keptalive = f->c->keepalive == AP_CONN_KEEPALIVE;
-
- if (!ctx) {
- f->ctx = ctx = apr_palloc(f->r->pool, sizeof(*ctx));
- ctx->first_line = 1;
- ctx->csd = ap_get_module_config(f->c->conn_config, &core_module);
- }
-
- if (mode != AP_MODE_INIT && mode != AP_MODE_EATCRLF) {
- if (ctx->first_line) {
- apr_socket_timeout_set(ctx->csd,
- keptalive
- ? f->c->base_server->keep_alive_timeout
- : f->c->base_server->timeout);
- ctx->first_line = 0;
- }
- else {
- if (keptalive) {
- apr_socket_timeout_set(ctx->csd, f->c->base_server->timeout);
- }
- }
- }
- return ap_get_brigade(f->next, b, mode, block, readbytes);
-}
-
-/**
- * Remove all zero length buckets from the brigade.
- */
-#define BRIGADE_NORMALIZE(b) \
-do { \
- apr_bucket *e = APR_BRIGADE_FIRST(b); \
- do { \
- if (e->length == 0 && !APR_BUCKET_IS_METADATA(e)) { \
- apr_bucket *d; \
- d = APR_BUCKET_NEXT(e); \
- apr_bucket_delete(e); \
- e = d; \
- } \
- e = APR_BUCKET_NEXT(e); \
- } while (!APR_BRIGADE_EMPTY(b) && (e != APR_BRIGADE_SENTINEL(b))); \
-} while (0)
-
-static int core_input_filter(ap_filter_t *f, apr_bucket_brigade *b,
- ap_input_mode_t mode, apr_read_type_e block,
- apr_off_t readbytes)
-{
- apr_bucket *e;
- apr_status_t rv;
- core_net_rec *net = f->ctx;
- core_ctx_t *ctx = net->in_ctx;
- const char *str;
- apr_size_t len;
-
- if (mode == AP_MODE_INIT) {
- /*
- * this mode is for filters that might need to 'initialize'
- * a connection before reading request data from a client.
- * NNTP over SSL for example needs to handshake before the
- * server sends the welcome message.
- * such filters would have changed the mode before this point
- * is reached. however, protocol modules such as NNTP should
- * not need to know anything about SSL. given the example, if
- * SSL is not in the filter chain, AP_MODE_INIT is a noop.
- */
- return APR_SUCCESS;
- }
-
- if (!ctx)
- {
- ctx = apr_pcalloc(f->c->pool, sizeof(*ctx));
- ctx->b = apr_brigade_create(f->c->pool, f->c->bucket_alloc);
-
- /* seed the brigade with the client socket. */
- e = apr_bucket_socket_create(net->client_socket, f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(ctx->b, e);
- net->in_ctx = ctx;
- }
- else if (APR_BRIGADE_EMPTY(ctx->b)) {
- return APR_EOF;
- }
-
- /* ### This is bad. */
- BRIGADE_NORMALIZE(ctx->b);
-
- /* check for empty brigade again *AFTER* BRIGADE_NORMALIZE()
- * If we have lost our socket bucket (see above), we are EOF.
- *
- * Ideally, this should be returning SUCCESS with EOS bucket, but
- * some higher-up APIs (spec. read_request_line via ap_rgetline)
- * want an error code. */
- if (APR_BRIGADE_EMPTY(ctx->b)) {
- return APR_EOF;
- }
-
- if (mode == AP_MODE_GETLINE) {
- /* we are reading a single LF line, e.g. the HTTP headers */
- rv = apr_brigade_split_line(b, ctx->b, block, HUGE_STRING_LEN);
- /* We should treat EAGAIN here the same as we do for EOF (brigade is
- * empty). We do this by returning whatever we have read. This may
- * or may not be bogus, but is consistent (for now) with EOF logic.
- */
- if (APR_STATUS_IS_EAGAIN(rv)) {
- rv = APR_SUCCESS;
- }
- return rv;
- }
-
- /* ### AP_MODE_PEEK is a horrific name for this mode because we also
- * eat any CRLFs that we see. That's not the obvious intention of
- * this mode. Determine whether anyone actually uses this or not. */
- if (mode == AP_MODE_EATCRLF) {
- apr_bucket *e;
- const char *c;
-
- /* The purpose of this loop is to ignore any CRLF (or LF) at the end
- * of a request. Many browsers send extra lines at the end of POST
- * requests. We use the PEEK method to determine if there is more
- * data on the socket, so that we know if we should delay sending the
- * end of one request until we have served the second request in a
- * pipelined situation. We don't want to actually delay sending a
- * response if the server finds a CRLF (or LF), becuause that doesn't
- * mean that there is another request, just a blank line.
- */
- while (1) {
- if (APR_BRIGADE_EMPTY(ctx->b))
- return APR_EOF;
-
- e = APR_BRIGADE_FIRST(ctx->b);
-
- rv = apr_bucket_read(e, &str, &len, APR_NONBLOCK_READ);
-
- if (rv != APR_SUCCESS)
- return rv;
-
- c = str;
- while (c < str + len) {
- if (*c == APR_ASCII_LF)
- c++;
- else if (*c == APR_ASCII_CR && *(c + 1) == APR_ASCII_LF)
- c += 2;
- else
- return APR_SUCCESS;
- }
-
- /* If we reach here, we were a bucket just full of CRLFs, so
- * just toss the bucket. */
- /* FIXME: Is this the right thing to do in the core? */
- apr_bucket_delete(e);
- }
- return APR_SUCCESS;
- }
-
- /* If mode is EXHAUSTIVE, we want to just read everything until the end
- * of the brigade, which in this case means the end of the socket.
- * To do this, we attach the brigade that has currently been setaside to
- * the brigade that was passed down, and send that brigade back.
- *
- * NOTE: This is VERY dangerous to use, and should only be done with
- * extreme caution. However, the Perchild MPM needs this feature
- * if it is ever going to work correctly again. With this, the Perchild
- * MPM can easily request the socket and all data that has been read,
- * which means that it can pass it to the correct child process.
- */
- if (mode == AP_MODE_EXHAUSTIVE) {
- apr_bucket *e;
-
- /* Tack on any buckets that were set aside. */
- APR_BRIGADE_CONCAT(b, ctx->b);
-
- /* Since we've just added all potential buckets (which will most
- * likely simply be the socket bucket) we know this is the end,
- * so tack on an EOS too. */
- /* We have read until the brigade was empty, so we know that we
- * must be EOS. */
- e = apr_bucket_eos_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(b, e);
- return APR_SUCCESS;
- }
-
- /* read up to the amount they specified. */
- if (mode == AP_MODE_READBYTES || mode == AP_MODE_SPECULATIVE) {
- apr_bucket *e;
- apr_bucket_brigade *newbb;
-
- AP_DEBUG_ASSERT(readbytes > 0);
-
- e = APR_BRIGADE_FIRST(ctx->b);
- rv = apr_bucket_read(e, &str, &len, block);
-
- if (APR_STATUS_IS_EAGAIN(rv)) {
- return APR_SUCCESS;
- }
- else if (rv != APR_SUCCESS) {
- return rv;
- }
- else if (block == APR_BLOCK_READ && len == 0) {
- /* We wanted to read some bytes in blocking mode. We read
- * 0 bytes. Hence, we now assume we are EOS.
- *
- * When we are in normal mode, return an EOS bucket to the
- * caller.
- * When we are in speculative mode, leave ctx->b empty, so
- * that the next call returns an EOS bucket.
- */
- apr_bucket_delete(e);
-
- if (mode == AP_MODE_READBYTES) {
- e = apr_bucket_eos_create(f->c->bucket_alloc);
- APR_BRIGADE_INSERT_TAIL(b, e);
- }
- return APR_SUCCESS;
- }
-
- /* We can only return at most what we read. */
- if (len < readbytes) {
- readbytes = len;
- }
-
- rv = apr_brigade_partition(ctx->b, readbytes, &e);
- if (rv != APR_SUCCESS) {
- return rv;
- }
-
- /* Must do split before CONCAT */
- newbb = apr_brigade_split(ctx->b, e);
-
- if (mode == AP_MODE_READBYTES) {
- APR_BRIGADE_CONCAT(b, ctx->b);
- }
- else if (mode == AP_MODE_SPECULATIVE) {
- apr_bucket *copy_bucket;
-
- for (e = APR_BRIGADE_FIRST(ctx->b);
- e != APR_BRIGADE_SENTINEL(ctx->b);
- e = APR_BUCKET_NEXT(e))
- {
- rv = apr_bucket_copy(e, &copy_bucket);
- if (rv != APR_SUCCESS) {
- return rv;
- }
- APR_BRIGADE_INSERT_TAIL(b, copy_bucket);
- }
- }
-
- /* Take what was originally there and place it back on ctx->b */
- APR_BRIGADE_CONCAT(ctx->b, newbb);
- }
- return APR_SUCCESS;
-}
-
-#define MAX_IOVEC_TO_WRITE 16
-
/* Optional function coming from mod_logio, used for logging of output
* traffic
*/
-static APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *logio_add_bytes_out;
-
-static apr_status_t core_output_filter(ap_filter_t *f, apr_bucket_brigade *b)
-{
- apr_status_t rv;
- apr_bucket_brigade *more;
- conn_rec *c = f->c;
- core_net_rec *net = f->ctx;
- core_output_filter_ctx_t *ctx = net->out_ctx;
- apr_read_type_e eblock = APR_NONBLOCK_READ;
- apr_pool_t *input_pool = b->p;
-
- if (ctx == NULL) {
- ctx = apr_pcalloc(c->pool, sizeof(*ctx));
- net->out_ctx = ctx;
- }
-
- /* If we have a saved brigade, concatenate the new brigade to it */
- if (ctx->b) {
- APR_BRIGADE_CONCAT(ctx->b, b);
- b = ctx->b;
- ctx->b = NULL;
- }
-
- /* Perform multiple passes over the brigade, sending batches of output
- to the connection. */
- while (b && !APR_BRIGADE_EMPTY(b)) {
- apr_size_t nbytes = 0;
- apr_bucket *last_e = NULL; /* initialized for debugging */
- apr_bucket *e;
-
- /* one group of iovecs per pass over the brigade */
- apr_size_t nvec = 0;
- apr_size_t nvec_trailers = 0;
- struct iovec vec[MAX_IOVEC_TO_WRITE];
- struct iovec vec_trailers[MAX_IOVEC_TO_WRITE];
-
- /* one file per pass over the brigade */
- apr_file_t *fd = NULL;
- apr_size_t flen = 0;
- apr_off_t foffset = 0;
-
- /* keep track of buckets that we've concatenated
- * to avoid small writes
- */
- apr_bucket *last_merged_bucket = NULL;
-
- /* tail of brigade if we need another pass */
- more = NULL;
-
- /* Iterate over the brigade: collect iovecs and/or a file */
- for (e = APR_BRIGADE_FIRST(b);
- e != APR_BRIGADE_SENTINEL(b);
- e = APR_BUCKET_NEXT(e))
- {
- /* keep track of the last bucket processed */
- last_e = e;
- if (APR_BUCKET_IS_EOS(e) || AP_BUCKET_IS_EOC(e)) {
- break;
- }
- else if (APR_BUCKET_IS_FLUSH(e)) {
- if (e != APR_BRIGADE_LAST(b)) {
- more = apr_brigade_split(b, APR_BUCKET_NEXT(e));
- }
- break;
- }
-
- /* It doesn't make any sense to use sendfile for a file bucket
- * that represents 10 bytes.
- */
- else if (APR_BUCKET_IS_FILE(e)
- && (e->length >= AP_MIN_SENDFILE_BYTES)) {
- apr_bucket_file *a = e->data;
-
- /* We can't handle more than one file bucket at a time
- * so we split here and send the file we have already
- * found.
- */
- if (fd) {
- more = apr_brigade_split(b, e);
- break;
- }
-
- fd = a->fd;
- flen = e->length;
- foffset = e->start;
- }
- else {
- const char *str;
- apr_size_t n;
-
- rv = apr_bucket_read(e, &str, &n, eblock);
- if (APR_STATUS_IS_EAGAIN(rv)) {
- /* send what we have so far since we shouldn't expect more
- * output for a while... next time we read, block
- */
- more = apr_brigade_split(b, e);
- eblock = APR_BLOCK_READ;
- break;
- }
- eblock = APR_NONBLOCK_READ;
- if (n) {
- if (!fd) {
- if (nvec == MAX_IOVEC_TO_WRITE) {
- /* woah! too many. buffer them up, for use later. */
- apr_bucket *temp, *next;
- apr_bucket_brigade *temp_brig;
-
- if (nbytes >= AP_MIN_BYTES_TO_WRITE) {
- /* We have enough data in the iovec
- * to justify doing a writev
- */
- more = apr_brigade_split(b, e);
- break;
- }
-
- /* Create a temporary brigade as a means
- * of concatenating a bunch of buckets together
- */
- if (last_merged_bucket) {
- /* If we've concatenated together small
- * buckets already in a previous pass,
- * the initial buckets in this brigade
- * are heap buckets that may have extra
- * space left in them (because they
- * were created by apr_brigade_write()).
- * We can take advantage of this by
- * building the new temp brigade out of
- * these buckets, so that the content
- * in them doesn't have to be copied again.
- */
- apr_bucket_brigade *bb;
- bb = apr_brigade_split(b,
- APR_BUCKET_NEXT(last_merged_bucket));
- temp_brig = b;
- b = bb;
- }
- else {
- temp_brig = apr_brigade_create(f->c->pool,
- f->c->bucket_alloc);
- }
-
- temp = APR_BRIGADE_FIRST(b);
- while (temp != e) {
- apr_bucket *d;
- rv = apr_bucket_read(temp, &str, &n, APR_BLOCK_READ);
- apr_brigade_write(temp_brig, NULL, NULL, str, n);
- d = temp;
- temp = APR_BUCKET_NEXT(temp);
- apr_bucket_delete(d);
- }
-
- nvec = 0;
- nbytes = 0;
- temp = APR_BRIGADE_FIRST(temp_brig);
- APR_BUCKET_REMOVE(temp);
- APR_BRIGADE_INSERT_HEAD(b, temp);
- apr_bucket_read(temp, &str, &n, APR_BLOCK_READ);
- vec[nvec].iov_base = (char*) str;
- vec[nvec].iov_len = n;
- nvec++;
-
- /* Just in case the temporary brigade has
- * multiple buckets, recover the rest of
- * them and put them in the brigade that
- * we're sending.
- */
- for (next = APR_BRIGADE_FIRST(temp_brig);
- next != APR_BRIGADE_SENTINEL(temp_brig);
- next = APR_BRIGADE_FIRST(temp_brig)) {
- APR_BUCKET_REMOVE(next);
- APR_BUCKET_INSERT_AFTER(temp, next);
- temp = next;
- apr_bucket_read(next, &str, &n,
- APR_BLOCK_READ);
- vec[nvec].iov_base = (char*) str;
- vec[nvec].iov_len = n;
- nvec++;
- }
-
- apr_brigade_destroy(temp_brig);
-
- last_merged_bucket = temp;
- e = temp;
- last_e = e;
- }
- else {
- vec[nvec].iov_base = (char*) str;
- vec[nvec].iov_len = n;
- nvec++;
- }
- }
- else {
- /* The bucket is a trailer to a file bucket */
-
- if (nvec_trailers == MAX_IOVEC_TO_WRITE) {
- /* woah! too many. stop now. */
- more = apr_brigade_split(b, e);
- break;
- }
-
- vec_trailers[nvec_trailers].iov_base = (char*) str;
- vec_trailers[nvec_trailers].iov_len = n;
- nvec_trailers++;
- }
-
- nbytes += n;
- }
- }
- }
-
-
- /* Completed iterating over the brigade, now determine if we want
- * to buffer the brigade or send the brigade out on the network.
- *
- * Save if we haven't accumulated enough bytes to send, the connection
- * is not about to be closed, and:
- *
- * 1) we didn't see a file, we don't have more passes over the
- * brigade to perform, AND we didn't stop at a FLUSH bucket.
- * (IOW, we will save plain old bytes such as HTTP headers)
- * or
- * 2) we hit the EOS and have a keep-alive connection
- * (IOW, this response is a bit more complex, but we save it
- * with the hope of concatenating with another response)
- */
- if (nbytes + flen < AP_MIN_BYTES_TO_WRITE
- && !AP_BUCKET_IS_EOC(last_e)
- && ((!fd && !more && !APR_BUCKET_IS_FLUSH(last_e))
- || (APR_BUCKET_IS_EOS(last_e)
- && c->keepalive == AP_CONN_KEEPALIVE))) {
-
- /* NEVER save an EOS in here. If we are saving a brigade with
- * an EOS bucket, then we are doing keepalive connections, and
- * we want to process to second request fully.
- */
- if (APR_BUCKET_IS_EOS(last_e)) {
- apr_bucket *bucket;
- int file_bucket_saved = 0;
- apr_bucket_delete(last_e);
- for (bucket = APR_BRIGADE_FIRST(b);
- bucket != APR_BRIGADE_SENTINEL(b);
- bucket = APR_BUCKET_NEXT(bucket)) {
-
- /* Do a read on each bucket to pull in the
- * data from pipe and socket buckets, so
- * that we don't leave their file descriptors
- * open indefinitely. Do the same for file
- * buckets, with one exception: allow the
- * first file bucket in the brigade to remain
- * a file bucket, so that we don't end up
- * doing an mmap+memcpy every time a client
- * requests a <8KB file over a keepalive
- * connection.
- */
- if (APR_BUCKET_IS_FILE(bucket) && !file_bucket_saved) {
- file_bucket_saved = 1;
- }
- else {
- const char *buf;
- apr_size_t len = 0;
- rv = apr_bucket_read(bucket, &buf, &len,
- APR_BLOCK_READ);
- if (rv != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_ERR, rv,
- c, "core_output_filter:"
- " Error reading from bucket.");
- return HTTP_INTERNAL_SERVER_ERROR;
- }
- }
- }
- }
- if (!ctx->deferred_write_pool) {
- apr_pool_create(&ctx->deferred_write_pool, c->pool);
- apr_pool_tag(ctx->deferred_write_pool, "deferred_write");
- }
- ap_save_brigade(f, &ctx->b, &b, ctx->deferred_write_pool);
-
- return APR_SUCCESS;
- }
-
- if (fd) {
- apr_hdtr_t hdtr;
- apr_size_t bytes_sent;
-
-#if APR_HAS_SENDFILE
- apr_int32_t flags = 0;
-#endif
-
- memset(&hdtr, '\0', sizeof(hdtr));
- if (nvec) {
- hdtr.numheaders = nvec;
- hdtr.headers = vec;
- }
-
- if (nvec_trailers) {
- hdtr.numtrailers = nvec_trailers;
- hdtr.trailers = vec_trailers;
- }
-
-#if APR_HAS_SENDFILE
- if (apr_file_flags_get(fd) & APR_SENDFILE_ENABLED) {
-
- if (c->keepalive == AP_CONN_CLOSE && APR_BUCKET_IS_EOS(last_e)) {
- /* Prepare the socket to be reused */
- flags |= APR_SENDFILE_DISCONNECT_SOCKET;
- }
-
- rv = sendfile_it_all(net, /* the network information */
- fd, /* the file to send */
- &hdtr, /* header and trailer iovecs */
- foffset, /* offset in the file to begin
- sending from */
- flen, /* length of file */
- nbytes + flen, /* total length including
- headers */
- &bytes_sent, /* how many bytes were
- sent */
- flags); /* apr_sendfile flags */
- }
- else
-#endif
- {
- rv = emulate_sendfile(net, fd, &hdtr, foffset, flen,
- &bytes_sent);
- }
-
- if (logio_add_bytes_out && bytes_sent > 0)
- logio_add_bytes_out(c, bytes_sent);
-
- fd = NULL;
- }
- else {
- apr_size_t bytes_sent;
-
- rv = writev_it_all(net->client_socket,
- vec, nvec,
- nbytes, &bytes_sent);
-
- if (logio_add_bytes_out && bytes_sent > 0)
- logio_add_bytes_out(c, bytes_sent);
- }
-
- apr_brigade_destroy(b);
-
- /* drive cleanups for resources which were set aside
- * this may occur before or after termination of the request which
- * created the resource
- */
- if (ctx->deferred_write_pool) {
- if (more && more->p == ctx->deferred_write_pool) {
- /* "more" belongs to the deferred_write_pool,
- * which is about to be cleared.
- */
- if (APR_BRIGADE_EMPTY(more)) {
- more = NULL;
- }
- else {
- /* uh oh... change more's lifetime
- * to the input brigade's lifetime
- */
- apr_bucket_brigade *tmp_more = more;
- more = NULL;
- ap_save_brigade(f, &more, &tmp_more, input_pool);
- }
- }
- apr_pool_clear(ctx->deferred_write_pool);
- }
-
- if (rv != APR_SUCCESS) {
- ap_log_cerror(APLOG_MARK, APLOG_INFO, rv, c,
- "core_output_filter: writing data to the network");
-
- if (more)
- apr_brigade_destroy(more);
-
- /* No need to check for SUCCESS, we did that above. */
- if (!APR_STATUS_IS_EAGAIN(rv)) {
- c->aborted = 1;
- }
-
- /* The client has aborted, but the request was successful. We
- * will report success, and leave it to the access and error
- * logs to note that the connection was aborted.
- */
- return APR_SUCCESS;
- }
-
- b = more;
- more = NULL;
- } /* end while () */
-
- return APR_SUCCESS;
-}
+APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *logio_add_bytes_out;
static int core_post_config(apr_pool_t *pconf, apr_pool_t *plog, apr_pool_t *ptemp, server_rec *s)
{
diff --git a/server/core_filters.c b/server/core_filters.c
new file mode 100644
index 0000000000..dc7e293953
--- /dev/null
+++ b/server/core_filters.c
@@ -0,0 +1,929 @@
+/* Copyright 2001-2004 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * core_filters.c --- Core input/output network filters.
+ */
+
+#include "apr.h"
+#include "apr_strings.h"
+#include "apr_lib.h"
+#include "apr_fnmatch.h"
+#include "apr_hash.h"
+#include "apr_thread_proc.h" /* for RLIMIT stuff */
+#include "apr_hooks.h"
+
+#define APR_WANT_IOVEC
+#define APR_WANT_STRFUNC
+#define APR_WANT_MEMFUNC
+#include "apr_want.h"
+
+#define CORE_PRIVATE
+#include "ap_config.h"
+#include "httpd.h"
+#include "http_config.h"
+#include "http_core.h"
+#include "http_protocol.h" /* For index_of_response(). Grump. */
+#include "http_request.h"
+#include "http_vhost.h"
+#include "http_main.h" /* For the default_handler below... */
+#include "http_log.h"
+#include "util_md5.h"
+#include "http_connection.h"
+#include "apr_buckets.h"
+#include "util_filter.h"
+#include "util_ebcdic.h"
+#include "mpm.h"
+#include "mpm_common.h"
+#include "scoreboard.h"
+#include "mod_core.h"
+#include "mod_proxy.h"
+#include "ap_listen.h"
+
+#include "mod_so.h" /* for ap_find_loaded_module_symbol */
+
+#define AP_MIN_SENDFILE_BYTES (256)
+
+typedef struct net_time_filter_ctx {
+ apr_socket_t *csd;
+ int first_line;
+} net_time_filter_ctx_t;
+
+int net_time_filter(ap_filter_t *f, apr_bucket_brigade *b,
+ ap_input_mode_t mode, apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ net_time_filter_ctx_t *ctx = f->ctx;
+ int keptalive = f->c->keepalive == AP_CONN_KEEPALIVE;
+
+ if (!ctx) {
+ f->ctx = ctx = apr_palloc(f->r->pool, sizeof(*ctx));
+ ctx->first_line = 1;
+ ctx->csd = ap_get_module_config(f->c->conn_config, &core_module);
+ }
+
+ if (mode != AP_MODE_INIT && mode != AP_MODE_EATCRLF) {
+ if (ctx->first_line) {
+ apr_socket_timeout_set(ctx->csd,
+ keptalive
+ ? f->c->base_server->keep_alive_timeout
+ : f->c->base_server->timeout);
+ ctx->first_line = 0;
+ }
+ else {
+ if (keptalive) {
+ apr_socket_timeout_set(ctx->csd, f->c->base_server->timeout);
+ }
+ }
+ }
+ return ap_get_brigade(f->next, b, mode, block, readbytes);
+}
+
+/**
+ * Remove all zero length buckets from the brigade.
+ */
+#define BRIGADE_NORMALIZE(b) \
+do { \
+ apr_bucket *e = APR_BRIGADE_FIRST(b); \
+ do { \
+ if (e->length == 0 && !APR_BUCKET_IS_METADATA(e)) { \
+ apr_bucket *d; \
+ d = APR_BUCKET_NEXT(e); \
+ apr_bucket_delete(e); \
+ e = d; \
+ } \
+ e = APR_BUCKET_NEXT(e); \
+ } while (!APR_BRIGADE_EMPTY(b) && (e != APR_BRIGADE_SENTINEL(b))); \
+} while (0)
+
+int core_input_filter(ap_filter_t *f, apr_bucket_brigade *b,
+ ap_input_mode_t mode, apr_read_type_e block,
+ apr_off_t readbytes)
+{
+ apr_bucket *e;
+ apr_status_t rv;
+ core_net_rec *net = f->ctx;
+ core_ctx_t *ctx = net->in_ctx;
+ const char *str;
+ apr_size_t len;
+
+ if (mode == AP_MODE_INIT) {
+ /*
+ * this mode is for filters that might need to 'initialize'
+ * a connection before reading request data from a client.
+ * NNTP over SSL for example needs to handshake before the
+ * server sends the welcome message.
+ * such filters would have changed the mode before this point
+ * is reached. however, protocol modules such as NNTP should
+ * not need to know anything about SSL. given the example, if
+ * SSL is not in the filter chain, AP_MODE_INIT is a noop.
+ */
+ return APR_SUCCESS;
+ }
+
+ if (!ctx)
+ {
+ ctx = apr_pcalloc(f->c->pool, sizeof(*ctx));
+ ctx->b = apr_brigade_create(f->c->pool, f->c->bucket_alloc);
+
+ /* seed the brigade with the client socket. */
+ e = apr_bucket_socket_create(net->client_socket, f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(ctx->b, e);
+ net->in_ctx = ctx;
+ }
+ else if (APR_BRIGADE_EMPTY(ctx->b)) {
+ return APR_EOF;
+ }
+
+ /* ### This is bad. */
+ BRIGADE_NORMALIZE(ctx->b);
+
+ /* check for empty brigade again *AFTER* BRIGADE_NORMALIZE()
+ * If we have lost our socket bucket (see above), we are EOF.
+ *
+ * Ideally, this should be returning SUCCESS with EOS bucket, but
+ * some higher-up APIs (spec. read_request_line via ap_rgetline)
+ * want an error code. */
+ if (APR_BRIGADE_EMPTY(ctx->b)) {
+ return APR_EOF;
+ }
+
+ if (mode == AP_MODE_GETLINE) {
+ /* we are reading a single LF line, e.g. the HTTP headers */
+ rv = apr_brigade_split_line(b, ctx->b, block, HUGE_STRING_LEN);
+ /* We should treat EAGAIN here the same as we do for EOF (brigade is
+ * empty). We do this by returning whatever we have read. This may
+ * or may not be bogus, but is consistent (for now) with EOF logic.
+ */
+ if (APR_STATUS_IS_EAGAIN(rv)) {
+ rv = APR_SUCCESS;
+ }
+ return rv;
+ }
+
+ /* ### AP_MODE_PEEK is a horrific name for this mode because we also
+ * eat any CRLFs that we see. That's not the obvious intention of
+ * this mode. Determine whether anyone actually uses this or not. */
+ if (mode == AP_MODE_EATCRLF) {
+ apr_bucket *e;
+ const char *c;
+
+ /* The purpose of this loop is to ignore any CRLF (or LF) at the end
+ * of a request. Many browsers send extra lines at the end of POST
+ * requests. We use the PEEK method to determine if there is more
+ * data on the socket, so that we know if we should delay sending the
+ * end of one request until we have served the second request in a
+ * pipelined situation. We don't want to actually delay sending a
+ * response if the server finds a CRLF (or LF), becuause that doesn't
+ * mean that there is another request, just a blank line.
+ */
+ while (1) {
+ if (APR_BRIGADE_EMPTY(ctx->b))
+ return APR_EOF;
+
+ e = APR_BRIGADE_FIRST(ctx->b);
+
+ rv = apr_bucket_read(e, &str, &len, APR_NONBLOCK_READ);
+
+ if (rv != APR_SUCCESS)
+ return rv;
+
+ c = str;
+ while (c < str + len) {
+ if (*c == APR_ASCII_LF)
+ c++;
+ else if (*c == APR_ASCII_CR && *(c + 1) == APR_ASCII_LF)
+ c += 2;
+ else
+ return APR_SUCCESS;
+ }
+
+ /* If we reach here, we were a bucket just full of CRLFs, so
+ * just toss the bucket. */
+ /* FIXME: Is this the right thing to do in the core? */
+ apr_bucket_delete(e);
+ }
+ return APR_SUCCESS;
+ }
+
+ /* If mode is EXHAUSTIVE, we want to just read everything until the end
+ * of the brigade, which in this case means the end of the socket.
+ * To do this, we attach the brigade that has currently been setaside to
+ * the brigade that was passed down, and send that brigade back.
+ *
+ * NOTE: This is VERY dangerous to use, and should only be done with
+ * extreme caution. However, the Perchild MPM needs this feature
+ * if it is ever going to work correctly again. With this, the Perchild
+ * MPM can easily request the socket and all data that has been read,
+ * which means that it can pass it to the correct child process.
+ */
+ if (mode == AP_MODE_EXHAUSTIVE) {
+ apr_bucket *e;
+
+ /* Tack on any buckets that were set aside. */
+ APR_BRIGADE_CONCAT(b, ctx->b);
+
+ /* Since we've just added all potential buckets (which will most
+ * likely simply be the socket bucket) we know this is the end,
+ * so tack on an EOS too. */
+ /* We have read until the brigade was empty, so we know that we
+ * must be EOS. */
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ return APR_SUCCESS;
+ }
+
+ /* read up to the amount they specified. */
+ if (mode == AP_MODE_READBYTES || mode == AP_MODE_SPECULATIVE) {
+ apr_bucket *e;
+ apr_bucket_brigade *newbb;
+
+ AP_DEBUG_ASSERT(readbytes > 0);
+
+ e = APR_BRIGADE_FIRST(ctx->b);
+ rv = apr_bucket_read(e, &str, &len, block);
+
+ if (APR_STATUS_IS_EAGAIN(rv)) {
+ return APR_SUCCESS;
+ }
+ else if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ else if (block == APR_BLOCK_READ && len == 0) {
+ /* We wanted to read some bytes in blocking mode. We read
+ * 0 bytes. Hence, we now assume we are EOS.
+ *
+ * When we are in normal mode, return an EOS bucket to the
+ * caller.
+ * When we are in speculative mode, leave ctx->b empty, so
+ * that the next call returns an EOS bucket.
+ */
+ apr_bucket_delete(e);
+
+ if (mode == AP_MODE_READBYTES) {
+ e = apr_bucket_eos_create(f->c->bucket_alloc);
+ APR_BRIGADE_INSERT_TAIL(b, e);
+ }
+ return APR_SUCCESS;
+ }
+
+ /* We can only return at most what we read. */
+ if (len < readbytes) {
+ readbytes = len;
+ }
+
+ rv = apr_brigade_partition(ctx->b, readbytes, &e);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+
+ /* Must do split before CONCAT */
+ newbb = apr_brigade_split(ctx->b, e);
+
+ if (mode == AP_MODE_READBYTES) {
+ APR_BRIGADE_CONCAT(b, ctx->b);
+ }
+ else if (mode == AP_MODE_SPECULATIVE) {
+ apr_bucket *copy_bucket;
+
+ for (e = APR_BRIGADE_FIRST(ctx->b);
+ e != APR_BRIGADE_SENTINEL(ctx->b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ rv = apr_bucket_copy(e, &copy_bucket);
+ if (rv != APR_SUCCESS) {
+ return rv;
+ }
+ APR_BRIGADE_INSERT_TAIL(b, copy_bucket);
+ }
+ }
+
+ /* Take what was originally there and place it back on ctx->b */
+ APR_BRIGADE_CONCAT(ctx->b, newbb);
+ }
+ return APR_SUCCESS;
+}
+
+static apr_status_t writev_it_all(apr_socket_t *s,
+ struct iovec *vec, int nvec,
+ apr_size_t len, apr_size_t *nbytes)
+{
+ apr_size_t bytes_written = 0;
+ apr_status_t rv;
+ apr_size_t n = len;
+ int i = 0;
+
+ *nbytes = 0;
+
+ /* XXX handle checking for non-blocking socket */
+ while (bytes_written != len) {
+ rv = apr_socket_sendv(s, vec + i, nvec - i, &n);
+ *nbytes += n;
+ bytes_written += n;
+ if (rv != APR_SUCCESS)
+ return rv;
+
+ /* If the write did not complete, adjust the iovecs and issue
+ * apr_socket_sendv again
+ */
+ if (bytes_written < len) {
+ /* Skip over the vectors that have already been written */
+ apr_size_t cnt = vec[i].iov_len;
+ while (n >= cnt && i + 1 < nvec) {
+ i++;
+ cnt += vec[i].iov_len;
+ }
+
+ if (n < cnt) {
+ /* Handle partial write of vec i */
+ vec[i].iov_base = (char *) vec[i].iov_base +
+ (vec[i].iov_len - (cnt - n));
+ vec[i].iov_len = cnt -n;
+ }
+ }
+
+ n = len - bytes_written;
+ }
+
+ return APR_SUCCESS;
+}
+
+/* sendfile_it_all()
+ * send the entire file using sendfile()
+ * handle partial writes
+ * return only when all bytes have been sent or an error is encountered.
+ */
+
+#if APR_HAS_SENDFILE
+static apr_status_t sendfile_it_all(core_net_rec *c,
+ apr_file_t *fd,
+ apr_hdtr_t *hdtr,
+ apr_off_t file_offset,
+ apr_size_t file_bytes_left,
+ apr_size_t total_bytes_left,
+ apr_size_t *bytes_sent,
+ apr_int32_t flags)
+{
+ apr_status_t rv;
+#ifdef AP_DEBUG
+ apr_interval_time_t timeout = 0;
+#endif
+
+ AP_DEBUG_ASSERT((apr_socket_timeout_get(c->client_socket, &timeout)
+ == APR_SUCCESS)
+ && timeout > 0); /* socket must be in timeout mode */
+
+ /* Reset the bytes_sent field */
+ *bytes_sent = 0;
+
+ do {
+ apr_size_t tmplen = file_bytes_left;
+
+ rv = apr_socket_sendfile(c->client_socket, fd, hdtr, &file_offset, &tmplen,
+ flags);
+ *bytes_sent += tmplen;
+ total_bytes_left -= tmplen;
+ if (!total_bytes_left || rv != APR_SUCCESS) {
+ return rv; /* normal case & error exit */
+ }
+
+ AP_DEBUG_ASSERT(total_bytes_left > 0 && tmplen > 0);
+
+ /* partial write, oooh noooo...
+ * Skip over any header data which was written
+ */
+ while (tmplen && hdtr->numheaders) {
+ if (tmplen >= hdtr->headers[0].iov_len) {
+ tmplen -= hdtr->headers[0].iov_len;
+ --hdtr->numheaders;
+ ++hdtr->headers;
+ }
+ else {
+ char *iov_base = (char *)hdtr->headers[0].iov_base;
+
+ hdtr->headers[0].iov_len -= tmplen;
+ iov_base += tmplen;
+ hdtr->headers[0].iov_base = iov_base;
+ tmplen = 0;
+ }
+ }
+
+ /* Skip over any file data which was written */
+
+ if (tmplen <= file_bytes_left) {
+ file_offset += tmplen;
+ file_bytes_left -= tmplen;
+ continue;
+ }
+
+ tmplen -= file_bytes_left;
+ file_bytes_left = 0;
+ file_offset = 0;
+
+ /* Skip over any trailer data which was written */
+
+ while (tmplen && hdtr->numtrailers) {
+ if (tmplen >= hdtr->trailers[0].iov_len) {
+ tmplen -= hdtr->trailers[0].iov_len;
+ --hdtr->numtrailers;
+ ++hdtr->trailers;
+ }
+ else {
+ char *iov_base = (char *)hdtr->trailers[0].iov_base;
+
+ hdtr->trailers[0].iov_len -= tmplen;
+ iov_base += tmplen;
+ hdtr->trailers[0].iov_base = iov_base;
+ tmplen = 0;
+ }
+ }
+ } while (1);
+}
+#endif
+
+/*
+ * emulate_sendfile()
+ * Sends the contents of file fd along with header/trailer bytes, if any,
+ * to the network. emulate_sendfile will return only when all the bytes have been
+ * sent (i.e., it handles partial writes) or on a network error condition.
+ */
+static apr_status_t emulate_sendfile(core_net_rec *c, apr_file_t *fd,
+ apr_hdtr_t *hdtr, apr_off_t offset,
+ apr_size_t length, apr_size_t *nbytes)
+{
+ apr_status_t rv = APR_SUCCESS;
+ apr_size_t togo; /* Remaining number of bytes in the file to send */
+ apr_size_t sendlen = 0;
+ apr_size_t bytes_sent;
+ apr_int32_t i;
+ apr_off_t o; /* Track the file offset for partial writes */
+ char buffer[8192];
+
+ *nbytes = 0;
+
+ /* Send the headers
+ * writev_it_all handles partial writes.
+ * XXX: optimization... if headers are less than MIN_WRITE_SIZE, copy
+ * them into buffer
+ */
+ if (hdtr && hdtr->numheaders > 0 ) {
+ for (i = 0; i < hdtr->numheaders; i++) {
+ sendlen += hdtr->headers[i].iov_len;
+ }
+
+ rv = writev_it_all(c->client_socket, hdtr->headers, hdtr->numheaders,
+ sendlen, &bytes_sent);
+ *nbytes += bytes_sent; /* track total bytes sent */
+ }
+
+ /* Seek the file to 'offset' */
+ if (offset >= 0 && rv == APR_SUCCESS) {
+ rv = apr_file_seek(fd, APR_SET, &offset);
+ }
+
+ /* Send the file, making sure to handle partial writes */
+ togo = length;
+ while (rv == APR_SUCCESS && togo) {
+ sendlen = togo > sizeof(buffer) ? sizeof(buffer) : togo;
+ o = 0;
+ rv = apr_file_read(fd, buffer, &sendlen);
+ while (rv == APR_SUCCESS && sendlen) {
+ bytes_sent = sendlen;
+ rv = apr_socket_send(c->client_socket, &buffer[o], &bytes_sent);
+ *nbytes += bytes_sent;
+ if (rv == APR_SUCCESS) {
+ sendlen -= bytes_sent; /* sendlen != bytes_sent ==> partial write */
+ o += bytes_sent; /* o is where we are in the buffer */
+ togo -= bytes_sent; /* track how much of the file we've sent */
+ }
+ }
+ }
+
+ /* Send the trailers
+ * XXX: optimization... if it will fit, send this on the last send in the
+ * loop above
+ */
+ sendlen = 0;
+ if ( rv == APR_SUCCESS && hdtr && hdtr->numtrailers > 0 ) {
+ for (i = 0; i < hdtr->numtrailers; i++) {
+ sendlen += hdtr->trailers[i].iov_len;
+ }
+ rv = writev_it_all(c->client_socket, hdtr->trailers, hdtr->numtrailers,
+ sendlen, &bytes_sent);
+ *nbytes += bytes_sent;
+ }
+
+ return rv;
+}
+
+#define MAX_IOVEC_TO_WRITE 16
+
+/* Optional function coming from mod_logio, used for logging of output
+ * traffic
+ */
+extern APR_OPTIONAL_FN_TYPE(ap_logio_add_bytes_out) *logio_add_bytes_out;
+
+apr_status_t core_output_filter(ap_filter_t *f, apr_bucket_brigade *b)
+{
+ apr_status_t rv;
+ apr_bucket_brigade *more;
+ conn_rec *c = f->c;
+ core_net_rec *net = f->ctx;
+ core_output_filter_ctx_t *ctx = net->out_ctx;
+ apr_read_type_e eblock = APR_NONBLOCK_READ;
+ apr_pool_t *input_pool = b->p;
+
+ if (ctx == NULL) {
+ ctx = apr_pcalloc(c->pool, sizeof(*ctx));
+ net->out_ctx = ctx;
+ }
+
+ /* If we have a saved brigade, concatenate the new brigade to it */
+ if (ctx->b) {
+ APR_BRIGADE_CONCAT(ctx->b, b);
+ b = ctx->b;
+ ctx->b = NULL;
+ }
+
+ /* Perform multiple passes over the brigade, sending batches of output
+ to the connection. */
+ while (b && !APR_BRIGADE_EMPTY(b)) {
+ apr_size_t nbytes = 0;
+ apr_bucket *last_e = NULL; /* initialized for debugging */
+ apr_bucket *e;
+
+ /* one group of iovecs per pass over the brigade */
+ apr_size_t nvec = 0;
+ apr_size_t nvec_trailers = 0;
+ struct iovec vec[MAX_IOVEC_TO_WRITE];
+ struct iovec vec_trailers[MAX_IOVEC_TO_WRITE];
+
+ /* one file per pass over the brigade */
+ apr_file_t *fd = NULL;
+ apr_size_t flen = 0;
+ apr_off_t foffset = 0;
+
+ /* keep track of buckets that we've concatenated
+ * to avoid small writes
+ */
+ apr_bucket *last_merged_bucket = NULL;
+
+ /* tail of brigade if we need another pass */
+ more = NULL;
+
+ /* Iterate over the brigade: collect iovecs and/or a file */
+ for (e = APR_BRIGADE_FIRST(b);
+ e != APR_BRIGADE_SENTINEL(b);
+ e = APR_BUCKET_NEXT(e))
+ {
+ /* keep track of the last bucket processed */
+ last_e = e;
+ if (APR_BUCKET_IS_EOS(e) || AP_BUCKET_IS_EOC(e)) {
+ break;
+ }
+ else if (APR_BUCKET_IS_FLUSH(e)) {
+ if (e != APR_BRIGADE_LAST(b)) {
+ more = apr_brigade_split(b, APR_BUCKET_NEXT(e));
+ }
+ break;
+ }
+
+ /* It doesn't make any sense to use sendfile for a file bucket
+ * that represents 10 bytes.
+ */
+ else if (APR_BUCKET_IS_FILE(e)
+ && (e->length >= AP_MIN_SENDFILE_BYTES)) {
+ apr_bucket_file *a = e->data;
+
+ /* We can't handle more than one file bucket at a time
+ * so we split here and send the file we have already
+ * found.
+ */
+ if (fd) {
+ more = apr_brigade_split(b, e);
+ break;
+ }
+
+ fd = a->fd;
+ flen = e->length;
+ foffset = e->start;
+ }
+ else {
+ const char *str;
+ apr_size_t n;
+
+ rv = apr_bucket_read(e, &str, &n, eblock);
+ if (APR_STATUS_IS_EAGAIN(rv)) {
+ /* send what we have so far since we shouldn't expect more
+ * output for a while... next time we read, block
+ */
+ more = apr_brigade_split(b, e);
+ eblock = APR_BLOCK_READ;
+ break;
+ }
+ eblock = APR_NONBLOCK_READ;
+ if (n) {
+ if (!fd) {
+ if (nvec == MAX_IOVEC_TO_WRITE) {
+ /* woah! too many. buffer them up, for use later. */
+ apr_bucket *temp, *next;
+ apr_bucket_brigade *temp_brig;
+
+ if (nbytes >= AP_MIN_BYTES_TO_WRITE) {
+ /* We have enough data in the iovec
+ * to justify doing a writev
+ */
+ more = apr_brigade_split(b, e);
+ break;
+ }
+
+ /* Create a temporary brigade as a means
+ * of concatenating a bunch of buckets together
+ */
+ if (last_merged_bucket) {
+ /* If we've concatenated together small
+ * buckets already in a previous pass,
+ * the initial buckets in this brigade
+ * are heap buckets that may have extra
+ * space left in them (because they
+ * were created by apr_brigade_write()).
+ * We can take advantage of this by
+ * building the new temp brigade out of
+ * these buckets, so that the content
+ * in them doesn't have to be copied again.
+ */
+ apr_bucket_brigade *bb;
+ bb = apr_brigade_split(b,
+ APR_BUCKET_NEXT(last_merged_bucket));
+ temp_brig = b;
+ b = bb;
+ }
+ else {
+ temp_brig = apr_brigade_create(f->c->pool,
+ f->c->bucket_alloc);
+ }
+
+ temp = APR_BRIGADE_FIRST(b);
+ while (temp != e) {
+ apr_bucket *d;
+ rv = apr_bucket_read(temp, &str, &n, APR_BLOCK_READ);
+ apr_brigade_write(temp_brig, NULL, NULL, str, n);
+ d = temp;
+ temp = APR_BUCKET_NEXT(temp);
+ apr_bucket_delete(d);
+ }
+
+ nvec = 0;
+ nbytes = 0;
+ temp = APR_BRIGADE_FIRST(temp_brig);
+ APR_BUCKET_REMOVE(temp);
+ APR_BRIGADE_INSERT_HEAD(b, temp);
+ apr_bucket_read(temp, &str, &n, APR_BLOCK_READ);
+ vec[nvec].iov_base = (char*) str;
+ vec[nvec].iov_len = n;
+ nvec++;
+
+ /* Just in case the temporary brigade has
+ * multiple buckets, recover the rest of
+ * them and put them in the brigade that
+ * we're sending.
+ */
+ for (next = APR_BRIGADE_FIRST(temp_brig);
+ next != APR_BRIGADE_SENTINEL(temp_brig);
+ next = APR_BRIGADE_FIRST(temp_brig)) {
+ APR_BUCKET_REMOVE(next);
+ APR_BUCKET_INSERT_AFTER(temp, next);
+ temp = next;
+ apr_bucket_read(next, &str, &n,
+ APR_BLOCK_READ);
+ vec[nvec].iov_base = (char*) str;
+ vec[nvec].iov_len = n;
+ nvec++;
+ }
+
+ apr_brigade_destroy(temp_brig);
+
+ last_merged_bucket = temp;
+ e = temp;
+ last_e = e;
+ }
+ else {
+ vec[nvec].iov_base = (char*) str;
+ vec[nvec].iov_len = n;
+ nvec++;
+ }
+ }
+ else {
+ /* The bucket is a trailer to a file bucket */
+
+ if (nvec_trailers == MAX_IOVEC_TO_WRITE) {
+ /* woah! too many. stop now. */
+ more = apr_brigade_split(b, e);
+ break;
+ }
+
+ vec_trailers[nvec_trailers].iov_base = (char*) str;
+ vec_trailers[nvec_trailers].iov_len = n;
+ nvec_trailers++;
+ }
+
+ nbytes += n;
+ }
+ }
+ }
+
+
+ /* Completed iterating over the brigade, now determine if we want
+ * to buffer the brigade or send the brigade out on the network.
+ *
+ * Save if we haven't accumulated enough bytes to send, the connection
+ * is not about to be closed, and:
+ *
+ * 1) we didn't see a file, we don't have more passes over the
+ * brigade to perform, AND we didn't stop at a FLUSH bucket.
+ * (IOW, we will save plain old bytes such as HTTP headers)
+ * or
+ * 2) we hit the EOS and have a keep-alive connection
+ * (IOW, this response is a bit more complex, but we save it
+ * with the hope of concatenating with another response)
+ */
+ if (nbytes + flen < AP_MIN_BYTES_TO_WRITE
+ && !AP_BUCKET_IS_EOC(last_e)
+ && ((!fd && !more && !APR_BUCKET_IS_FLUSH(last_e))
+ || (APR_BUCKET_IS_EOS(last_e)
+ && c->keepalive == AP_CONN_KEEPALIVE))) {
+
+ /* NEVER save an EOS in here. If we are saving a brigade with
+ * an EOS bucket, then we are doing keepalive connections, and
+ * we want to process to second request fully.
+ */
+ if (APR_BUCKET_IS_EOS(last_e)) {
+ apr_bucket *bucket;
+ int file_bucket_saved = 0;
+ apr_bucket_delete(last_e);
+ for (bucket = APR_BRIGADE_FIRST(b);
+ bucket != APR_BRIGADE_SENTINEL(b);
+ bucket = APR_BUCKET_NEXT(bucket)) {
+
+ /* Do a read on each bucket to pull in the
+ * data from pipe and socket buckets, so
+ * that we don't leave their file descriptors
+ * open indefinitely. Do the same for file
+ * buckets, with one exception: allow the
+ * first file bucket in the brigade to remain
+ * a file bucket, so that we don't end up
+ * doing an mmap+memcpy every time a client
+ * requests a <8KB file over a keepalive
+ * connection.
+ */
+ if (APR_BUCKET_IS_FILE(bucket) && !file_bucket_saved) {
+ file_bucket_saved = 1;
+ }
+ else {
+ const char *buf;
+ apr_size_t len = 0;
+ rv = apr_bucket_read(bucket, &buf, &len,
+ APR_BLOCK_READ);
+ if (rv != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_ERR, rv,
+ c, "core_output_filter:"
+ " Error reading from bucket.");
+ return HTTP_INTERNAL_SERVER_ERROR;
+ }
+ }
+ }
+ }
+ if (!ctx->deferred_write_pool) {
+ apr_pool_create(&ctx->deferred_write_pool, c->pool);
+ apr_pool_tag(ctx->deferred_write_pool, "deferred_write");
+ }
+ ap_save_brigade(f, &ctx->b, &b, ctx->deferred_write_pool);
+
+ return APR_SUCCESS;
+ }
+
+ if (fd) {
+ apr_hdtr_t hdtr;
+ apr_size_t bytes_sent;
+
+#if APR_HAS_SENDFILE
+ apr_int32_t flags = 0;
+#endif
+
+ memset(&hdtr, '\0', sizeof(hdtr));
+ if (nvec) {
+ hdtr.numheaders = nvec;
+ hdtr.headers = vec;
+ }
+
+ if (nvec_trailers) {
+ hdtr.numtrailers = nvec_trailers;
+ hdtr.trailers = vec_trailers;
+ }
+
+#if APR_HAS_SENDFILE
+ if (apr_file_flags_get(fd) & APR_SENDFILE_ENABLED) {
+
+ if (c->keepalive == AP_CONN_CLOSE && APR_BUCKET_IS_EOS(last_e)) {
+ /* Prepare the socket to be reused */
+ flags |= APR_SENDFILE_DISCONNECT_SOCKET;
+ }
+
+ rv = sendfile_it_all(net, /* the network information */
+ fd, /* the file to send */
+ &hdtr, /* header and trailer iovecs */
+ foffset, /* offset in the file to begin
+ sending from */
+ flen, /* length of file */
+ nbytes + flen, /* total length including
+ headers */
+ &bytes_sent, /* how many bytes were
+ sent */
+ flags); /* apr_sendfile flags */
+ }
+ else
+#endif
+ {
+ rv = emulate_sendfile(net, fd, &hdtr, foffset, flen,
+ &bytes_sent);
+ }
+
+ if (logio_add_bytes_out && bytes_sent > 0)
+ logio_add_bytes_out(c, bytes_sent);
+
+ fd = NULL;
+ }
+ else {
+ apr_size_t bytes_sent;
+
+ rv = writev_it_all(net->client_socket,
+ vec, nvec,
+ nbytes, &bytes_sent);
+
+ if (logio_add_bytes_out && bytes_sent > 0)
+ logio_add_bytes_out(c, bytes_sent);
+ }
+
+ apr_brigade_destroy(b);
+
+ /* drive cleanups for resources which were set aside
+ * this may occur before or after termination of the request which
+ * created the resource
+ */
+ if (ctx->deferred_write_pool) {
+ if (more && more->p == ctx->deferred_write_pool) {
+ /* "more" belongs to the deferred_write_pool,
+ * which is about to be cleared.
+ */
+ if (APR_BRIGADE_EMPTY(more)) {
+ more = NULL;
+ }
+ else {
+ /* uh oh... change more's lifetime
+ * to the input brigade's lifetime
+ */
+ apr_bucket_brigade *tmp_more = more;
+ more = NULL;
+ ap_save_brigade(f, &more, &tmp_more, input_pool);
+ }
+ }
+ apr_pool_clear(ctx->deferred_write_pool);
+ }
+
+ if (rv != APR_SUCCESS) {
+ ap_log_cerror(APLOG_MARK, APLOG_INFO, rv, c,
+ "core_output_filter: writing data to the network");
+
+ if (more)
+ apr_brigade_destroy(more);
+
+ /* No need to check for SUCCESS, we did that above. */
+ if (!APR_STATUS_IS_EAGAIN(rv)) {
+ c->aborted = 1;
+ }
+
+ /* The client has aborted, but the request was successful. We
+ * will report success, and leave it to the access and error
+ * logs to note that the connection was aborted.
+ */
+ return APR_SUCCESS;
+ }
+
+ b = more;
+ more = NULL;
+ } /* end while () */
+
+ return APR_SUCCESS;
+}