From 53970609ebd72be82c529a8f5ef7fc8486b1e358 Mon Sep 17 00:00:00 2001 From: hno <> Date: Sat, 4 Jan 2003 08:28:13 +0000 Subject: [PATCH] Change async-io to use a pipe to signal I/O completetion instead of relying on rapid polling. The actual completetion events are still polled. Only the notification that there is completed events is done via the pipe to have select/poll react in a timely fashion. --- src/fs/aufs/aiops.cc | 40 ++++++++++++++++++++++++++-------------- src/fs/aufs/async_io.cc | 4 ++-- 2 files changed, 28 insertions(+), 16 deletions(-) diff --git a/src/fs/aufs/aiops.cc b/src/fs/aufs/aiops.cc index 7929d6803b..19e94e7b59 100644 --- a/src/fs/aufs/aiops.cc +++ b/src/fs/aufs/aiops.cc @@ -1,5 +1,5 @@ /* - * $Id: aiops.cc,v 1.18 2002/11/10 02:29:58 hno Exp $ + * $Id: aiops.cc,v 1.19 2003/01/04 01:28:13 hno Exp $ * * DEBUG: section 43 AIOPS * AUTHOR: Stewart Forster @@ -146,6 +146,8 @@ static struct { NULL, &done_requests.head }; +static int done_fd = 0; +static int done_signalled = 0; static pthread_attr_t globattr; #if HAVE_SCHED_H static struct sched_param globsched; @@ -221,10 +223,20 @@ squidaio_xstrfree(char *str) xfree(str); } +static void +squidaio_fdhandler(int fd, void *data) +{ + char buf[256]; + done_signalled = 0; + read(fd, buf, sizeof(buf)); + commSetSelect(fd, COMM_SELECT_READ, squidaio_fdhandler, NULL, 0); +} + static void squidaio_init(void) { int i; + int done_pipe[2]; squidaio_thread_t *threadp; if (squidaio_initialised) @@ -268,6 +280,15 @@ squidaio_init(void) done_queue.requests = 0; done_queue.blocked = 0; + /* Initialize done pipe signal */ + pipe(done_pipe); + done_fd = done_pipe[1]; + fd_open(done_pipe[0], FD_PIPE, "async-io completetion event: main"); + fd_open(done_pipe[1], FD_PIPE, "async-io completetion event: threads"); + commSetNonBlocking(done_pipe[0]); + commSetNonBlocking(done_pipe[1]); + commSetSelect(done_pipe[0], COMM_SELECT_READ, squidaio_fdhandler, NULL, 0); + /* Create threads and get them to sit in their wait loop */ squidaio_thread_pool = memPoolCreate("aio_thread", sizeof(squidaio_thread_t)); assert(NUMTHREADS); @@ -389,6 +410,10 @@ squidaio_thread_loop(void *ptr) *done_queue.tailp = request; done_queue.tailp = &request->next; pthread_mutex_unlock(&done_queue.mutex); + if (!done_signalled) { + done_signalled = 1; + write(done_fd, "!", 1); + } threadp->requests++; } /* while forever */ return NULL; @@ -790,19 +815,6 @@ squidaio_poll_queues(void) } done_requests.tailp = &requests->next; } -#if HAVE_SCHED_H - /* Give up the CPU to allow the threads to do their work */ - /* - * For Andres thoughts about yield(), see - * http://www.squid-cache.org/mail-archive/squid-dev/200012/0001.html - */ - if (done_queue.head || request_queue.head) -#ifndef _SQUID_SOLARIS_ - sched_yield(); -#else - yield(); -#endif -#endif } squidaio_result_t * diff --git a/src/fs/aufs/async_io.cc b/src/fs/aufs/async_io.cc index 3f4fdf8ad1..0733881745 100644 --- a/src/fs/aufs/async_io.cc +++ b/src/fs/aufs/async_io.cc @@ -1,6 +1,6 @@ /* - * $Id: async_io.cc,v 1.20 2002/12/27 10:26:35 robertc Exp $ + * $Id: async_io.cc,v 1.21 2003/01/04 01:28:13 hno Exp $ * * DEBUG: section 32 Asynchronous Disk I/O * AUTHOR: Pete Bentley @@ -95,6 +95,7 @@ aioFDWasClosed(int fd) fd_close(fd); } + void aioInit(void) { @@ -104,7 +105,6 @@ aioInit(void) cachemgrRegister("squidaio_counts", "Async IO Function Counters", aioStats, 0, 1); initialised = 1; - comm_quick_poll_required(); } void -- 2.47.3