4 * DEBUG: section 43 AIOPS
5 * AUTHOR: Stewart Forster <slf@connect.com.au>
7 * SQUID Web Proxy Cache http://www.squid-cache.org/
8 * ----------------------------------------------------------
10 * Squid is the result of efforts by numerous individuals from
11 * the Internet community; see the CONTRIBUTORS file for full
12 * details. Many organizations have provided support for Squid's
13 * development; see the SPONSORS file for full details. Squid is
14 * Copyrighted (C) 2001 by the Regents of the University of
15 * California; see the COPYRIGHT file for full details. Squid
16 * incorporates software developed and/or copyrighted by other
17 * sources; see the CREDITS file for full details.
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
36 #error "_REENTRANT MUST be defined to build squid async io support."
39 #include "squid-old.h"
40 #include "DiskThreads.h"
53 #include "SquidTime.h"
56 #define RIDICULOUS_LENGTH 4096
58 enum _squidaio_thread_status
{
65 typedef enum _squidaio_thread_status squidaio_thread_status
;
67 typedef struct squidaio_request_t
{
69 struct squidaio_request_t
*next
;
70 squidaio_request_type request_type
;
83 struct stat
*tmpstatp
;
86 squidaio_result_t
*resultp
;
89 typedef struct squidaio_request_queue_t
{
90 pthread_mutex_t mutex
;
92 squidaio_request_t
*volatile head
;
93 squidaio_request_t
*volatile *volatile tailp
;
94 unsigned long requests
;
95 unsigned long blocked
; /* main failed to lock the queue */
96 } squidaio_request_queue_t
;
98 typedef struct squidaio_thread_t squidaio_thread_t
;
100 struct squidaio_thread_t
{
101 squidaio_thread_t
*next
;
103 squidaio_thread_status status
;
105 struct squidaio_request_t
*current_req
;
106 unsigned long requests
;
109 static void squidaio_queue_request(squidaio_request_t
*);
110 static void squidaio_cleanup_request(squidaio_request_t
*);
111 SQUIDCEXTERN
void *squidaio_thread_loop(void *);
112 static void squidaio_do_open(squidaio_request_t
*);
113 static void squidaio_do_read(squidaio_request_t
*);
114 static void squidaio_do_write(squidaio_request_t
*);
115 static void squidaio_do_close(squidaio_request_t
*);
116 static void squidaio_do_stat(squidaio_request_t
*);
117 static void squidaio_do_unlink(squidaio_request_t
*);
119 static void *squidaio_do_opendir(squidaio_request_t
*);
121 static void squidaio_debug(squidaio_request_t
*);
122 static void squidaio_poll_queues(void);
124 static squidaio_thread_t
*threads
= NULL
;
125 static int squidaio_initialised
= 0;
128 #define AIO_LARGE_BUFS 16384
129 #define AIO_MEDIUM_BUFS AIO_LARGE_BUFS >> 1
130 #define AIO_SMALL_BUFS AIO_LARGE_BUFS >> 2
131 #define AIO_TINY_BUFS AIO_LARGE_BUFS >> 3
132 #define AIO_MICRO_BUFS 128
134 static MemAllocator
*squidaio_large_bufs
= NULL
; /* 16K */
135 static MemAllocator
*squidaio_medium_bufs
= NULL
; /* 8K */
136 static MemAllocator
*squidaio_small_bufs
= NULL
; /* 4K */
137 static MemAllocator
*squidaio_tiny_bufs
= NULL
; /* 2K */
138 static MemAllocator
*squidaio_micro_bufs
= NULL
; /* 128K */
140 static int request_queue_len
= 0;
141 static MemAllocator
*squidaio_request_pool
= NULL
;
142 static MemAllocator
*squidaio_thread_pool
= NULL
;
143 static squidaio_request_queue_t request_queue
;
146 squidaio_request_t
*head
, **tailp
;
151 NULL
, &request_queue2
.head
153 static squidaio_request_queue_t done_queue
;
156 squidaio_request_t
*head
, **tailp
;
161 NULL
, &done_requests
.head
163 static pthread_attr_t globattr
;
166 static struct sched_param globsched
;
168 static pthread_t main_thread
;
170 static MemAllocator
*
171 squidaio_get_pool(int size
)
173 if (size
<= AIO_LARGE_BUFS
) {
174 if (size
<= AIO_MICRO_BUFS
)
175 return squidaio_micro_bufs
;
176 else if (size
<= AIO_TINY_BUFS
)
177 return squidaio_tiny_bufs
;
178 else if (size
<= AIO_SMALL_BUFS
)
179 return squidaio_small_bufs
;
180 else if (size
<= AIO_MEDIUM_BUFS
)
181 return squidaio_medium_bufs
;
183 return squidaio_large_bufs
;
190 squidaio_xmalloc(int size
)
195 if ((pool
= squidaio_get_pool(size
)) != NULL
) {
204 squidaio_xstrdup(const char *str
)
207 int len
= strlen(str
) + 1;
209 p
= (char *)squidaio_xmalloc(len
);
210 strncpy(p
, str
, len
);
216 squidaio_xfree(void *p
, int size
)
220 if ((pool
= squidaio_get_pool(size
)) != NULL
) {
227 squidaio_xstrfree(char *str
)
230 int len
= strlen(str
) + 1;
232 if ((pool
= squidaio_get_pool(len
)) != NULL
) {
242 squidaio_thread_t
*threadp
;
244 if (squidaio_initialised
)
247 pthread_attr_init(&globattr
);
249 #if HAVE_PTHREAD_ATTR_SETSCOPE
251 pthread_attr_setscope(&globattr
, PTHREAD_SCOPE_SYSTEM
);
256 globsched
.sched_priority
= 1;
260 main_thread
= pthread_self();
262 #if HAVE_SCHED_H && HAVE_PTHREAD_SETSCHEDPARAM
264 pthread_setschedparam(main_thread
, SCHED_OTHER
, &globsched
);
269 globsched
.sched_priority
= 2;
272 #if HAVE_SCHED_H && HAVE_PTHREAD_ATTR_SETSCHEDPARAM
274 pthread_attr_setschedparam(&globattr
, &globsched
);
278 /* Give each thread a smaller 256KB stack, should be more than sufficient */
279 pthread_attr_setstacksize(&globattr
, 256 * 1024);
281 /* Initialize request queue */
282 if (pthread_mutex_init(&(request_queue
.mutex
), NULL
))
283 fatal("Failed to create mutex");
285 if (pthread_cond_init(&(request_queue
.cond
), NULL
))
286 fatal("Failed to create condition variable");
288 request_queue
.head
= NULL
;
290 request_queue
.tailp
= &request_queue
.head
;
292 request_queue
.requests
= 0;
294 request_queue
.blocked
= 0;
296 /* Initialize done queue */
297 if (pthread_mutex_init(&(done_queue
.mutex
), NULL
))
298 fatal("Failed to create mutex");
300 if (pthread_cond_init(&(done_queue
.cond
), NULL
))
301 fatal("Failed to create condition variable");
303 done_queue
.head
= NULL
;
305 done_queue
.tailp
= &done_queue
.head
;
307 done_queue
.requests
= 0;
309 done_queue
.blocked
= 0;
311 /* Create threads and get them to sit in their wait loop */
312 squidaio_thread_pool
= memPoolCreate("aio_thread", sizeof(squidaio_thread_t
));
316 for (i
= 0; i
< NUMTHREADS
; i
++) {
317 threadp
= (squidaio_thread_t
*)squidaio_thread_pool
->alloc();
318 threadp
->status
= _THREAD_STARTING
;
319 threadp
->current_req
= NULL
;
320 threadp
->requests
= 0;
321 threadp
->next
= threads
;
324 if (pthread_create(&threadp
->thread
, &globattr
, squidaio_thread_loop
, threadp
)) {
325 fprintf(stderr
, "Thread creation failed\n");
326 threadp
->status
= _THREAD_FAILED
;
331 /* Create request pool */
332 squidaio_request_pool
= memPoolCreate("aio_request", sizeof(squidaio_request_t
));
334 squidaio_large_bufs
= memPoolCreate("squidaio_large_bufs", AIO_LARGE_BUFS
);
336 squidaio_medium_bufs
= memPoolCreate("squidaio_medium_bufs", AIO_MEDIUM_BUFS
);
338 squidaio_small_bufs
= memPoolCreate("squidaio_small_bufs", AIO_SMALL_BUFS
);
340 squidaio_tiny_bufs
= memPoolCreate("squidaio_tiny_bufs", AIO_TINY_BUFS
);
342 squidaio_micro_bufs
= memPoolCreate("squidaio_micro_bufs", AIO_MICRO_BUFS
);
344 squidaio_initialised
= 1;
348 squidaio_shutdown(void)
350 if (!squidaio_initialised
)
353 /* This is the same as in squidaio_sync */
355 squidaio_poll_queues();
356 } while (request_queue_len
> 0);
358 CommIO::NotifyIOClose();
360 squidaio_initialised
= 0;
364 squidaio_thread_loop(void *ptr
)
366 squidaio_thread_t
*threadp
= (squidaio_thread_t
*)ptr
;
367 squidaio_request_t
*request
;
371 * Make sure to ignore signals which may possibly get sent to
372 * the parent squid thread. Causes havoc with mutex's and
373 * condition waits otherwise
376 sigemptyset(&newSig
);
377 sigaddset(&newSig
, SIGPIPE
);
378 sigaddset(&newSig
, SIGCHLD
);
379 #if defined(_SQUID_LINUX_THREADS_)
381 sigaddset(&newSig
, SIGQUIT
);
382 sigaddset(&newSig
, SIGTRAP
);
385 sigaddset(&newSig
, SIGUSR1
);
386 sigaddset(&newSig
, SIGUSR2
);
389 sigaddset(&newSig
, SIGHUP
);
390 sigaddset(&newSig
, SIGTERM
);
391 sigaddset(&newSig
, SIGINT
);
392 sigaddset(&newSig
, SIGALRM
);
393 pthread_sigmask(SIG_BLOCK
, &newSig
, NULL
);
396 threadp
->current_req
= request
= NULL
;
398 /* Get a request to process */
399 threadp
->status
= _THREAD_WAITING
;
400 pthread_mutex_lock(&request_queue
.mutex
);
402 while (!request_queue
.head
) {
403 pthread_cond_wait(&request_queue
.cond
, &request_queue
.mutex
);
406 request
= request_queue
.head
;
409 request_queue
.head
= request
->next
;
411 if (!request_queue
.head
)
412 request_queue
.tailp
= &request_queue
.head
;
414 pthread_mutex_unlock(&request_queue
.mutex
);
416 /* process the request */
417 threadp
->status
= _THREAD_BUSY
;
419 request
->next
= NULL
;
421 threadp
->current_req
= request
;
425 if (!request
->cancelled
) {
426 switch (request
->request_type
) {
429 squidaio_do_open(request
);
433 squidaio_do_read(request
);
437 squidaio_do_write(request
);
441 squidaio_do_close(request
);
445 squidaio_do_unlink(request
);
448 #if AIO_OPENDIR /* Opendir not implemented yet */
450 case _AIO_OP_OPENDIR
:
451 squidaio_do_opendir(request
);
456 squidaio_do_stat(request
);
461 request
->err
= EINVAL
;
464 } else { /* cancelled */
466 request
->err
= EINTR
;
469 threadp
->status
= _THREAD_DONE
;
470 /* put the request in the done queue */
471 pthread_mutex_lock(&done_queue
.mutex
);
472 *done_queue
.tailp
= request
;
473 done_queue
.tailp
= &request
->next
;
474 pthread_mutex_unlock(&done_queue
.mutex
);
475 CommIO::NotifyIOCompleted();
477 } /* while forever */
480 } /* squidaio_thread_loop */
483 squidaio_queue_request(squidaio_request_t
* request
)
485 static int high_start
= 0;
486 debugs(43, 9, "squidaio_queue_request: " << request
<< " type=" << request
->request_type
<< " result=" << request
->resultp
);
487 /* Mark it as not executed (failing result, no error) */
490 /* Internal housekeeping */
491 request_queue_len
+= 1;
492 request
->resultp
->_data
= request
;
493 /* Play some tricks with the request_queue2 queue */
494 request
->next
= NULL
;
496 if (pthread_mutex_trylock(&request_queue
.mutex
) == 0) {
497 if (request_queue2
.head
) {
498 /* Grab blocked requests */
499 *request_queue
.tailp
= request_queue2
.head
;
500 request_queue
.tailp
= request_queue2
.tailp
;
503 /* Enqueue request */
504 *request_queue
.tailp
= request
;
506 request_queue
.tailp
= &request
->next
;
508 pthread_cond_signal(&request_queue
.cond
);
510 pthread_mutex_unlock(&request_queue
.mutex
);
512 if (request_queue2
.head
) {
513 /* Clear queue of blocked requests */
514 request_queue2
.head
= NULL
;
515 request_queue2
.tailp
= &request_queue2
.head
;
518 /* Oops, the request queue is blocked, use request_queue2 */
519 *request_queue2
.tailp
= request
;
520 request_queue2
.tailp
= &request
->next
;
523 if (request_queue2
.head
) {
524 static int filter
= 0;
525 static int filter_limit
= 8;
527 if (++filter
>= filter_limit
) {
528 filter_limit
+= filter
;
530 debugs(43, 1, "squidaio_queue_request: WARNING - Queue congestion");
534 /* Warn if out of threads */
535 if (request_queue_len
> MAGIC1
) {
536 static int last_warn
= 0;
537 static int queue_high
, queue_low
;
539 if (high_start
== 0) {
540 high_start
= squid_curtime
;
541 queue_high
= request_queue_len
;
542 queue_low
= request_queue_len
;
545 if (request_queue_len
> queue_high
)
546 queue_high
= request_queue_len
;
548 if (request_queue_len
< queue_low
)
549 queue_low
= request_queue_len
;
551 if (squid_curtime
>= (last_warn
+ 15) &&
552 squid_curtime
>= (high_start
+ 5)) {
553 debugs(43, 1, "squidaio_queue_request: WARNING - Disk I/O overloading");
555 if (squid_curtime
>= (high_start
+ 15))
556 debugs(43, 1, "squidaio_queue_request: Queue Length: current=" <<
557 request_queue_len
<< ", high=" << queue_high
<<
558 ", low=" << queue_low
<< ", duration=" <<
559 (long int) (squid_curtime
- high_start
));
561 last_warn
= squid_curtime
;
567 /* Warn if seriously overloaded */
568 if (request_queue_len
> RIDICULOUS_LENGTH
) {
569 debugs(43, 0, "squidaio_queue_request: Async request queue growing uncontrollably!");
570 debugs(43, 0, "squidaio_queue_request: Syncing pending I/O operations.. (blocking)");
572 debugs(43, 0, "squidaio_queue_request: Synced");
574 } /* squidaio_queue_request */
577 squidaio_cleanup_request(squidaio_request_t
* requestp
)
579 squidaio_result_t
*resultp
= requestp
->resultp
;
580 int cancelled
= requestp
->cancelled
;
582 /* Free allocated structures and copy data back to user space if the */
583 /* request hasn't been cancelled */
585 switch (requestp
->request_type
) {
589 if (!cancelled
&& requestp
->ret
== 0)
590 memcpy(requestp
->statp
, requestp
->tmpstatp
, sizeof(struct stat
));
592 squidaio_xfree(requestp
->tmpstatp
, sizeof(struct stat
));
594 squidaio_xstrfree(requestp
->path
);
599 if (cancelled
&& requestp
->ret
>= 0)
600 /* The open() was cancelled but completed */
601 close(requestp
->ret
);
603 squidaio_xstrfree(requestp
->path
);
608 if (cancelled
&& requestp
->ret
< 0)
609 /* The close() was cancelled and never got executed */
616 case _AIO_OP_OPENDIR
:
617 squidaio_xstrfree(requestp
->path
);
631 if (resultp
!= NULL
&& !cancelled
) {
632 resultp
->aio_return
= requestp
->ret
;
633 resultp
->aio_errno
= requestp
->err
;
636 squidaio_request_pool
->freeOne(requestp
);
637 } /* squidaio_cleanup_request */
641 squidaio_cancel(squidaio_result_t
* resultp
)
643 squidaio_request_t
*request
= (squidaio_request_t
*)resultp
->_data
;
645 if (request
&& request
->resultp
== resultp
) {
646 debugs(43, 9, "squidaio_cancel: " << request
<< " type=" << request
->request_type
<< " result=" << request
->resultp
);
647 request
->cancelled
= 1;
648 request
->resultp
= NULL
;
649 resultp
->_data
= NULL
;
650 resultp
->result_type
= _AIO_OP_NONE
;
655 } /* squidaio_cancel */
659 squidaio_open(const char *path
, int oflag
, mode_t mode
, squidaio_result_t
* resultp
)
662 squidaio_request_t
*requestp
;
664 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
666 requestp
->path
= (char *) squidaio_xstrdup(path
);
668 requestp
->oflag
= oflag
;
670 requestp
->mode
= mode
;
672 requestp
->resultp
= resultp
;
674 requestp
->request_type
= _AIO_OP_OPEN
;
676 requestp
->cancelled
= 0;
678 resultp
->result_type
= _AIO_OP_OPEN
;
680 squidaio_queue_request(requestp
);
687 squidaio_do_open(squidaio_request_t
* requestp
)
689 requestp
->ret
= open(requestp
->path
, requestp
->oflag
, requestp
->mode
);
690 requestp
->err
= errno
;
695 squidaio_read(int fd
, char *bufp
, size_t bufs
, off_t offset
, int whence
, squidaio_result_t
* resultp
)
697 squidaio_request_t
*requestp
;
699 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
703 requestp
->bufferp
= bufp
;
705 requestp
->buflen
= bufs
;
707 requestp
->offset
= offset
;
709 requestp
->whence
= whence
;
711 requestp
->resultp
= resultp
;
713 requestp
->request_type
= _AIO_OP_READ
;
715 requestp
->cancelled
= 0;
717 resultp
->result_type
= _AIO_OP_READ
;
719 squidaio_queue_request(requestp
);
726 squidaio_do_read(squidaio_request_t
* requestp
)
728 lseek(requestp
->fd
, requestp
->offset
, requestp
->whence
);
729 requestp
->ret
= read(requestp
->fd
, requestp
->bufferp
, requestp
->buflen
);
730 requestp
->err
= errno
;
735 squidaio_write(int fd
, char *bufp
, size_t bufs
, off_t offset
, int whence
, squidaio_result_t
* resultp
)
737 squidaio_request_t
*requestp
;
739 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
743 requestp
->bufferp
= bufp
;
745 requestp
->buflen
= bufs
;
747 requestp
->offset
= offset
;
749 requestp
->whence
= whence
;
751 requestp
->resultp
= resultp
;
753 requestp
->request_type
= _AIO_OP_WRITE
;
755 requestp
->cancelled
= 0;
757 resultp
->result_type
= _AIO_OP_WRITE
;
759 squidaio_queue_request(requestp
);
766 squidaio_do_write(squidaio_request_t
* requestp
)
768 requestp
->ret
= write(requestp
->fd
, requestp
->bufferp
, requestp
->buflen
);
769 requestp
->err
= errno
;
774 squidaio_close(int fd
, squidaio_result_t
* resultp
)
776 squidaio_request_t
*requestp
;
778 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
782 requestp
->resultp
= resultp
;
784 requestp
->request_type
= _AIO_OP_CLOSE
;
786 requestp
->cancelled
= 0;
788 resultp
->result_type
= _AIO_OP_CLOSE
;
790 squidaio_queue_request(requestp
);
797 squidaio_do_close(squidaio_request_t
* requestp
)
799 requestp
->ret
= close(requestp
->fd
);
800 requestp
->err
= errno
;
806 squidaio_stat(const char *path
, struct stat
*sb
, squidaio_result_t
* resultp
)
809 squidaio_request_t
*requestp
;
811 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
813 requestp
->path
= (char *) squidaio_xstrdup(path
);
815 requestp
->statp
= sb
;
817 requestp
->tmpstatp
= (struct stat
*) squidaio_xmalloc(sizeof(struct stat
));
819 requestp
->resultp
= resultp
;
821 requestp
->request_type
= _AIO_OP_STAT
;
823 requestp
->cancelled
= 0;
825 resultp
->result_type
= _AIO_OP_STAT
;
827 squidaio_queue_request(requestp
);
834 squidaio_do_stat(squidaio_request_t
* requestp
)
836 requestp
->ret
= stat(requestp
->path
, requestp
->tmpstatp
);
837 requestp
->err
= errno
;
842 squidaio_unlink(const char *path
, squidaio_result_t
* resultp
)
845 squidaio_request_t
*requestp
;
847 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
849 requestp
->path
= squidaio_xstrdup(path
);
851 requestp
->resultp
= resultp
;
853 requestp
->request_type
= _AIO_OP_UNLINK
;
855 requestp
->cancelled
= 0;
857 resultp
->result_type
= _AIO_OP_UNLINK
;
859 squidaio_queue_request(requestp
);
866 squidaio_do_unlink(squidaio_request_t
* requestp
)
868 requestp
->ret
= unlink(requestp
->path
);
869 requestp
->err
= errno
;
873 /* XXX squidaio_opendir NOT implemented yet.. */
876 squidaio_opendir(const char *path
, squidaio_result_t
* resultp
)
878 squidaio_request_t
*requestp
;
881 requestp
= squidaio_request_pool
->alloc();
883 resultp
->result_type
= _AIO_OP_OPENDIR
;
889 squidaio_do_opendir(squidaio_request_t
* requestp
)
891 /* NOT IMPLEMENTED */
897 squidaio_poll_queues(void)
899 /* kick "overflow" request queue */
901 if (request_queue2
.head
&&
902 pthread_mutex_trylock(&request_queue
.mutex
) == 0) {
903 *request_queue
.tailp
= request_queue2
.head
;
904 request_queue
.tailp
= request_queue2
.tailp
;
905 pthread_cond_signal(&request_queue
.cond
);
906 pthread_mutex_unlock(&request_queue
.mutex
);
907 request_queue2
.head
= NULL
;
908 request_queue2
.tailp
= &request_queue2
.head
;
911 /* poll done queue */
912 if (done_queue
.head
&& pthread_mutex_trylock(&done_queue
.mutex
) == 0) {
914 struct squidaio_request_t
*requests
= done_queue
.head
;
915 done_queue
.head
= NULL
;
916 done_queue
.tailp
= &done_queue
.head
;
917 pthread_mutex_unlock(&done_queue
.mutex
);
918 *done_requests
.tailp
= requests
;
919 request_queue_len
-= 1;
921 while (requests
->next
) {
922 requests
= requests
->next
;
923 request_queue_len
-= 1;
926 done_requests
.tailp
= &requests
->next
;
931 squidaio_poll_done(void)
933 squidaio_request_t
*request
;
934 squidaio_result_t
*resultp
;
939 request
= done_requests
.head
;
941 if (request
== NULL
&& !polled
) {
942 CommIO::ResetNotifications();
943 squidaio_poll_queues();
945 request
= done_requests
.head
;
952 debugs(43, 9, "squidaio_poll_done: " << request
<< " type=" << request
->request_type
<< " result=" << request
->resultp
);
953 done_requests
.head
= request
->next
;
955 if (!done_requests
.head
)
956 done_requests
.tailp
= &done_requests
.head
;
958 resultp
= request
->resultp
;
960 cancelled
= request
->cancelled
;
962 squidaio_debug(request
);
964 debugs(43, 5, "DONE: " << request
->ret
<< " -> " << request
->err
);
966 squidaio_cleanup_request(request
);
972 } /* squidaio_poll_done */
975 squidaio_operations_pending(void)
977 return request_queue_len
+ (done_requests
.head
? 1 : 0);
983 /* XXX This might take a while if the queue is large.. */
986 squidaio_poll_queues();
987 } while (request_queue_len
> 0);
989 return squidaio_operations_pending();
993 squidaio_get_queue_len(void)
995 return request_queue_len
;
999 squidaio_debug(squidaio_request_t
* request
)
1001 switch (request
->request_type
) {
1004 debugs(43, 5, "OPEN of " << request
->path
<< " to FD " << request
->ret
);
1008 debugs(43, 5, "READ on fd: " << request
->fd
);
1012 debugs(43, 5, "WRITE on fd: " << request
->fd
);
1016 debugs(43, 5, "CLOSE of fd: " << request
->fd
);
1019 case _AIO_OP_UNLINK
:
1020 debugs(43, 5, "UNLINK of " << request
->path
);
1029 squidaio_stats(StoreEntry
* sentry
)
1031 squidaio_thread_t
*threadp
;
1034 if (!squidaio_initialised
)
1037 storeAppendPrintf(sentry
, "\n\nThreads Status:\n");
1039 storeAppendPrintf(sentry
, "#\tID\t# Requests\n");
1043 for (i
= 0; i
< NUMTHREADS
; i
++) {
1044 storeAppendPrintf(sentry
, "%i\t0x%lx\t%ld\n", i
+ 1, (unsigned long)threadp
->thread
, threadp
->requests
);
1045 threadp
= threadp
->next
;