summaryrefslogtreecommitdiffstats
path: root/modules/experimental
diff options
context:
space:
mode:
authorPaul J. Reder <rederpj@apache.org>2002-10-02 20:26:52 +0200
committerPaul J. Reder <rederpj@apache.org>2002-10-02 20:26:52 +0200
commit70b40483dcab41f90f1476e1e2fb92b42952eaaf (patch)
tree59b85f159b860303a4311ef4b1e8c07c25cf3c61 /modules/experimental
parentSander reminded me that this was out of date. (diff)
downloadapache2-70b40483dcab41f90f1476e1e2fb92b42952eaaf.tar.xz
apache2-70b40483dcab41f90f1476e1e2fb92b42952eaaf.zip
Fix a core dump in mod_cache when it attemtped to store uncopyable
buckets. This happened, for instance, when a file to be cached contained SSI tags to execute a CGI script (passed as a pipe bucket). [Paul J. Reder] git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@97058 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'modules/experimental')
-rw-r--r--modules/experimental/mod_cache.c33
-rw-r--r--modules/experimental/mod_mem_cache.c4
2 files changed, 33 insertions, 4 deletions
diff --git a/modules/experimental/mod_cache.c b/modules/experimental/mod_cache.c
index 78d34c04c3..4402fda646 100644
--- a/modules/experimental/mod_cache.c
+++ b/modules/experimental/mod_cache.c
@@ -679,7 +679,36 @@ static int cache_in_filter(ap_filter_t *f, apr_bucket_brigade *in)
}
APR_BRIGADE_FOREACH(e, in) {
apr_bucket *copy;
- apr_bucket_copy(e, &copy);
+ rv = apr_bucket_copy(e, &copy);
+ if (rv == APR_ENOTIMPL) {
+ const char *str;
+ apr_size_t len;
+
+ /* This takes care of uncopyable buckets. */
+ rv = apr_bucket_read(e, &str, &len, APR_BLOCK_READ);
+ if ((rv == APR_SUCCESS) &&
+ (cache->saved_size + len <=
+ conf->max_streaming_buffer_size)) {
+ rv = apr_bucket_copy(e, &copy);
+ }
+
+ if ((rv != APR_SUCCESS) ||
+ (cache->saved_size + len >
+ conf->max_streaming_buffer_size)){
+ ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, r->server,
+ "cache: not caching streamed response for "
+ "%s because length %s", url,
+ "> CacheMaxStreamingBuffer");
+
+ if (cache->saved_brigade != NULL) {
+ apr_brigade_destroy(cache->saved_brigade);
+ cache->saved_brigade = NULL;
+ cache->saved_size = 0;
+ }
+ ap_remove_output_filter(f);
+ return ap_pass_brigade(f->next, in);
+ }
+ }
APR_BRIGADE_INSERT_TAIL(cache->saved_brigade, copy);
}
cache->saved_size += size;
@@ -1055,7 +1084,7 @@ static const char *set_max_streaming_buffer(cmd_parms *parms, void *dummy,
&cache_module);
val = (apr_off_t)strtol(arg, &err, 10);
if (*err != 0) {
- return "CacheMaxStreamingBuffer value must be a percentage";
+ return "CacheMaxStreamingBuffer value must be a number";
}
conf->max_streaming_buffer_size = val;
return NULL;
diff --git a/modules/experimental/mod_mem_cache.c b/modules/experimental/mod_mem_cache.c
index 7b91505005..60d1768c2c 100644
--- a/modules/experimental/mod_mem_cache.c
+++ b/modules/experimental/mod_mem_cache.c
@@ -991,10 +991,10 @@ static apr_status_t write_body(cache_handle_t *h, request_rec *r, apr_bucket_bri
obj->count+=len;
}
}
- /* This should not happen, but if it does, we are in BIG trouble
+ /* This should not fail, but if it does, we are in BIG trouble
* cause we just stomped all over the heap.
*/
- AP_DEBUG_ASSERT(obj->count >= mobj->m_len);
+ AP_DEBUG_ASSERT(obj->count <= mobj->m_len);
}
return APR_SUCCESS;
}