From: Joe Orton Date: Wed, 14 Jun 2006 13:16:29 +0000 (+0000) Subject: * server/core.c (default_handler): Use apr_brigade_insert_file() to X-Git-Tag: 2.3.0~2334 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c13f8dd6eb159ca32e6d65b074edd8bfe1ca5c1f;p=thirdparty%2Fapache%2Fhttpd.git * server/core.c (default_handler): Use apr_brigade_insert_file() to append the file to the brigade. * server/protocol.c (ap_send_fd), modules/proxy/mod_proxy_http.c (spool_reqbody_cl), modules/cache/mod_mem_cache.c (recall_body), modules/cache/mod_disk_cache.c (recall_body), modules/mappers/mod_negotiation.c (handle_map_file), modules/generators/mod_asis.c (asis_handler), modules/dav/fs/repos.c [DEBUG_GET_HANDLER] (dav_fs_deliver), modules/arch/win32/mod_isapi.c (ServerSupportFunction): Likewise. git-svn-id: https://svn.apache.org/repos/asf/httpd/httpd/trunk@414238 13f79535-47bb-0310-9956-ffa450edef68 --- diff --git a/modules/arch/win32/mod_isapi.c b/modules/arch/win32/mod_isapi.c index b0826b27b25..c7352d2a2f0 100644 --- a/modules/arch/win32/mod_isapi.c +++ b/modules/arch/win32/mod_isapi.c @@ -1050,28 +1050,7 @@ int APR_THREAD_FUNC ServerSupportFunction(isapi_cid *cid, } sent += (apr_uint32_t)fsize; -#if APR_HAS_LARGE_FILES - if (r->finfo.size > AP_MAX_SENDFILE) { - /* APR_HAS_LARGE_FILES issue; must split into mutiple buckets, - * no greater than MAX(apr_size_t), and more granular than that - * in case the brigade code/filters attempt to read it directly. - */ - b = apr_bucket_file_create(fd, tf->Offset, AP_MAX_SENDFILE, - r->pool, c->bucket_alloc); - while (fsize > AP_MAX_SENDFILE) { - apr_bucket *bc; - apr_bucket_copy(b, &bc); - APR_BRIGADE_INSERT_TAIL(bb, bc); - b->start += AP_MAX_SENDFILE; - fsize -= AP_MAX_SENDFILE; - } - b->length = (apr_size_t)fsize; /* Resize just the last bucket */ - } - else -#endif - b = apr_bucket_file_create(fd, tf->Offset, (apr_size_t)fsize, - r->pool, c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, b); + apr_brigade_insert_file(bb, fd, tf->Offset, fsize, r->pool); if (tf->pTail && tf->TailLength) { sent += tf->TailLength; diff --git a/modules/cache/mod_disk_cache.c b/modules/cache/mod_disk_cache.c index 1f14645e1a1..30b8e42bd78 100644 --- a/modules/cache/mod_disk_cache.c +++ b/modules/cache/mod_disk_cache.c @@ -776,9 +776,8 @@ static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_bri apr_bucket *e; disk_cache_object_t *dobj = (disk_cache_object_t*) h->cache_obj->vobj; - e = apr_bucket_file_create(dobj->fd, 0, (apr_size_t) dobj->file_size, p, - bb->bucket_alloc); - APR_BRIGADE_INSERT_HEAD(bb, e); + apr_brigade_insert_file(bb, dobj->fd, 0, dobj->file_size, p); + e = apr_bucket_eos_create(bb->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, e); diff --git a/modules/cache/mod_file_cache.c b/modules/cache/mod_file_cache.c index 881f8c0e4d6..025defe3d76 100644 --- a/modules/cache/mod_file_cache.c +++ b/modules/cache/mod_file_cache.c @@ -297,9 +297,8 @@ static int sendfile_handler(request_rec *r, a_file *file) apr_bucket *b; apr_bucket_brigade *bb = apr_brigade_create(r->pool, c->bucket_alloc); - b = apr_bucket_file_create(file->file, 0, (apr_size_t)file->finfo.size, - r->pool, c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, b); + apr_brigade_insert_file(bb, file->file, 0, file->finfo.size, r->pool); + b = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); diff --git a/modules/cache/mod_mem_cache.c b/modules/cache/mod_mem_cache.c index 645c782b32f..e7e947eb6ee 100644 --- a/modules/cache/mod_mem_cache.c +++ b/modules/cache/mod_mem_cache.c @@ -652,13 +652,14 @@ static apr_status_t recall_body(cache_handle_t *h, apr_pool_t *p, apr_bucket_bri /* CACHE_TYPE_FILE */ apr_file_t *file; apr_os_file_put(&file, &mobj->fd, mobj->flags, p); - b = apr_bucket_file_create(file, 0, mobj->m_len, p, bb->bucket_alloc); + + apr_brigade_insert_file(bb, file, 0, mobj->m_len, p); } else { /* CACHE_TYPE_HEAP */ b = apr_bucket_immortal_create(mobj->m, mobj->m_len, bb->bucket_alloc); + APR_BRIGADE_INSERT_TAIL(bb, b); } - APR_BRIGADE_INSERT_TAIL(bb, b); b = apr_bucket_eos_create(bb->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); diff --git a/modules/dav/fs/repos.c b/modules/dav/fs/repos.c index 6f1fd4107c9..4dcb4d8e31b 100644 --- a/modules/dav/fs/repos.c +++ b/modules/dav/fs/repos.c @@ -980,11 +980,7 @@ static dav_error * dav_fs_deliver(const dav_resource *resource, bb = apr_brigade_create(pool, output->c->bucket_alloc); - /* ### this does not handle large files. but this is test code anyway */ - bkt = apr_bucket_file_create(fd, 0, - (apr_size_t)resource->info->finfo.size, - pool, output->c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, bkt); + apr_brigade_insert_file(bb, fd, 0, resource->info->finfo.size, pool); bkt = apr_bucket_eos_create(output->c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, bkt); diff --git a/modules/generators/mod_asis.c b/modules/generators/mod_asis.c index 25507739af4..0453bf57b93 100644 --- a/modules/generators/mod_asis.c +++ b/modules/generators/mod_asis.c @@ -90,28 +90,8 @@ static int asis_handler(request_rec *r) } bb = apr_brigade_create(r->pool, c->bucket_alloc); -#if APR_HAS_LARGE_FILES - if (r->finfo.size - pos > AP_MAX_SENDFILE) { - /* APR_HAS_LARGE_FILES issue; must split into mutiple buckets, - * no greater than MAX(apr_size_t), and more granular than that - * in case the brigade code/filters attempt to read it directly. - */ - apr_off_t fsize = r->finfo.size - pos; - b = apr_bucket_file_create(f, pos, AP_MAX_SENDFILE, - r->pool, c->bucket_alloc); - while (fsize > AP_MAX_SENDFILE) { - APR_BRIGADE_INSERT_TAIL(bb, b); - apr_bucket_copy(b, &b); - b->start += AP_MAX_SENDFILE; - fsize -= AP_MAX_SENDFILE; - } - b->length = (apr_size_t)fsize; /* Resize just the last bucket */ - } - else -#endif - b = apr_bucket_file_create(f, pos, (apr_size_t) (r->finfo.size - pos), - r->pool, c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, b); + apr_brigade_insert_file(bb, f, pos, r->finfo.size - pos, r->pool); + b = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, b); rv = ap_pass_brigade(r->output_filters, bb); diff --git a/modules/mappers/mod_negotiation.c b/modules/mappers/mod_negotiation.c index 6754bb7ff41..958515fc72d 100644 --- a/modules/mappers/mod_negotiation.c +++ b/modules/mappers/mod_negotiation.c @@ -3043,10 +3043,9 @@ static int handle_map_file(request_rec *r) return res; } bb = apr_brigade_create(r->pool, c->bucket_alloc); - e = apr_bucket_file_create(map, best->body, - (apr_size_t)best->bytes, r->pool, - c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, e); + + apr_brigade_insert_file(bb, map, best->body, best->bytes, r->pool); + e = apr_bucket_eos_create(c->bucket_alloc); APR_BRIGADE_INSERT_TAIL(bb, e); diff --git a/modules/proxy/mod_proxy_http.c b/modules/proxy/mod_proxy_http.c index cddd535b4dc..517821763f6 100644 --- a/modules/proxy/mod_proxy_http.c +++ b/modules/proxy/mod_proxy_http.c @@ -542,28 +542,7 @@ static apr_status_t spool_reqbody_cl(apr_pool_t *p, terminate_headers(bucket_alloc, header_brigade); APR_BRIGADE_CONCAT(header_brigade, body_brigade); if (tmpfile) { - /* For platforms where the size of the file may be larger than - * that which can be stored in a single bucket (where the - * length field is an apr_size_t), split it into several - * buckets: */ - if (sizeof(apr_off_t) > sizeof(apr_size_t) - && fsize > AP_MAX_SENDFILE) { - e = apr_bucket_file_create(tmpfile, 0, AP_MAX_SENDFILE, p, - bucket_alloc); - while (fsize > AP_MAX_SENDFILE) { - apr_bucket *ce; - apr_bucket_copy(e, &ce); - APR_BRIGADE_INSERT_TAIL(header_brigade, ce); - e->start += AP_MAX_SENDFILE; - fsize -= AP_MAX_SENDFILE; - } - e->length = (apr_size_t)fsize; /* Resize just the last bucket */ - } - else { - e = apr_bucket_file_create(tmpfile, 0, (apr_size_t)fsize, p, - bucket_alloc); - } - APR_BRIGADE_INSERT_TAIL(header_brigade, e); + apr_brigade_insert_file(header_brigade, tmpfile, 0, fsize, p); } /* This is all a single brigade, pass with flush flagged */ status = pass_brigade(bucket_alloc, r, p_conn, origin, header_brigade, 1); diff --git a/server/core.c b/server/core.c index 661e9be721d..8c492f103b4 100644 --- a/server/core.c +++ b/server/core.c @@ -3581,35 +3581,13 @@ static int default_handler(request_rec *r) ap_md5digest(r->pool, fd)); } - /* For platforms where the size of the file may be larger than - * that which can be stored in a single bucket (where the - * length field is an apr_size_t), split it into several - * buckets: */ - if (sizeof(apr_off_t) > sizeof(apr_size_t) - && r->finfo.size > AP_MAX_SENDFILE) { - apr_off_t fsize = r->finfo.size; - e = apr_bucket_file_create(fd, 0, AP_MAX_SENDFILE, r->pool, - c->bucket_alloc); - while (fsize > AP_MAX_SENDFILE) { - apr_bucket *ce; - apr_bucket_copy(e, &ce); - APR_BRIGADE_INSERT_TAIL(bb, ce); - e->start += AP_MAX_SENDFILE; - fsize -= AP_MAX_SENDFILE; - } - e->length = (apr_size_t)fsize; /* Resize just the last bucket */ - } - else { - e = apr_bucket_file_create(fd, 0, (apr_size_t)r->finfo.size, - r->pool, c->bucket_alloc); - } + e = apr_brigade_insert_file(bb, fd, 0, r->finfo.size, r->pool); #if APR_HAS_MMAP if (d->enable_mmap == ENABLE_MMAP_OFF) { (void)apr_bucket_file_enable_mmap(e, 0); } #endif - APR_BRIGADE_INSERT_TAIL(bb, e); } e = apr_bucket_eos_create(c->bucket_alloc); diff --git a/server/protocol.c b/server/protocol.c index 170e4651c2a..d063c95483d 100644 --- a/server/protocol.c +++ b/server/protocol.c @@ -1346,12 +1346,11 @@ AP_DECLARE(apr_status_t) ap_send_fd(apr_file_t *fd, request_rec *r, { conn_rec *c = r->connection; apr_bucket_brigade *bb = NULL; - apr_bucket *b; apr_status_t rv; bb = apr_brigade_create(r->pool, c->bucket_alloc); - b = apr_bucket_file_create(fd, offset, len, r->pool, c->bucket_alloc); - APR_BRIGADE_INSERT_TAIL(bb, b); + + apr_brigade_insert_file(bb, fd, 0, len, r->pool); rv = ap_pass_brigade(r->output_filters, bb); if (rv != APR_SUCCESS) {