4 * DEBUG: section 43 AIOPS
5 * AUTHOR: Stewart Forster <slf@connect.com.au>
7 * SQUID Web Proxy Cache http://www.squid-cache.org/
8 * ----------------------------------------------------------
10 * Squid is the result of efforts by numerous individuals from
11 * the Internet community; see the CONTRIBUTORS file for full
12 * details. Many organizations have provided support for Squid's
13 * development; see the SPONSORS file for full details. Squid is
14 * Copyrighted (C) 2001 by the Regents of the University of
15 * California; see the COPYRIGHT file for full details. Squid
16 * incorporates software developed and/or copyrighted by other
17 * sources; see the CREDITS file for full details.
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
36 #error "_REENTRANT MUST be defined to build squid async io support."
40 #include "DiskThreads.h"
43 #include <sys/types.h>
54 #include "SquidTime.h"
57 #define RIDICULOUS_LENGTH 4096
59 enum _squidaio_thread_status
{
66 typedef enum _squidaio_thread_status squidaio_thread_status
;
68 typedef struct squidaio_request_t
{
70 struct squidaio_request_t
*next
;
71 squidaio_request_type request_type
;
84 struct stat
*tmpstatp
;
87 squidaio_result_t
*resultp
;
90 typedef struct squidaio_request_queue_t
{
91 pthread_mutex_t mutex
;
93 squidaio_request_t
*volatile head
;
94 squidaio_request_t
*volatile *volatile tailp
;
95 unsigned long requests
;
96 unsigned long blocked
; /* main failed to lock the queue */
97 } squidaio_request_queue_t
;
99 typedef struct squidaio_thread_t squidaio_thread_t
;
101 struct squidaio_thread_t
{
102 squidaio_thread_t
*next
;
104 squidaio_thread_status status
;
106 struct squidaio_request_t
*current_req
;
107 unsigned long requests
;
110 static void squidaio_queue_request(squidaio_request_t
*);
111 static void squidaio_cleanup_request(squidaio_request_t
*);
112 static void *squidaio_thread_loop(void *);
113 static void squidaio_do_open(squidaio_request_t
*);
114 static void squidaio_do_read(squidaio_request_t
*);
115 static void squidaio_do_write(squidaio_request_t
*);
116 static void squidaio_do_close(squidaio_request_t
*);
117 static void squidaio_do_stat(squidaio_request_t
*);
118 static void squidaio_do_unlink(squidaio_request_t
*);
120 static void *squidaio_do_opendir(squidaio_request_t
*);
122 static void squidaio_debug(squidaio_request_t
*);
123 static void squidaio_poll_queues(void);
125 static squidaio_thread_t
*threads
= NULL
;
126 static int squidaio_initialised
= 0;
129 #define AIO_LARGE_BUFS 16384
130 #define AIO_MEDIUM_BUFS AIO_LARGE_BUFS >> 1
131 #define AIO_SMALL_BUFS AIO_LARGE_BUFS >> 2
132 #define AIO_TINY_BUFS AIO_LARGE_BUFS >> 3
133 #define AIO_MICRO_BUFS 128
135 static MemAllocator
*squidaio_large_bufs
= NULL
; /* 16K */
136 static MemAllocator
*squidaio_medium_bufs
= NULL
; /* 8K */
137 static MemAllocator
*squidaio_small_bufs
= NULL
; /* 4K */
138 static MemAllocator
*squidaio_tiny_bufs
= NULL
; /* 2K */
139 static MemAllocator
*squidaio_micro_bufs
= NULL
; /* 128K */
141 static int request_queue_len
= 0;
142 static MemAllocator
*squidaio_request_pool
= NULL
;
143 static MemAllocator
*squidaio_thread_pool
= NULL
;
144 static squidaio_request_queue_t request_queue
;
147 squidaio_request_t
*head
, **tailp
;
152 NULL
, &request_queue2
.head
154 static squidaio_request_queue_t done_queue
;
157 squidaio_request_t
*head
, **tailp
;
162 NULL
, &done_requests
.head
164 static pthread_attr_t globattr
;
167 static struct sched_param globsched
;
169 static pthread_t main_thread
;
171 static MemAllocator
*
172 squidaio_get_pool(int size
)
174 if (size
<= AIO_LARGE_BUFS
) {
175 if (size
<= AIO_MICRO_BUFS
)
176 return squidaio_micro_bufs
;
177 else if (size
<= AIO_TINY_BUFS
)
178 return squidaio_tiny_bufs
;
179 else if (size
<= AIO_SMALL_BUFS
)
180 return squidaio_small_bufs
;
181 else if (size
<= AIO_MEDIUM_BUFS
)
182 return squidaio_medium_bufs
;
184 return squidaio_large_bufs
;
191 squidaio_xmalloc(int size
)
196 if ((pool
= squidaio_get_pool(size
)) != NULL
) {
205 squidaio_xstrdup(const char *str
)
208 int len
= strlen(str
) + 1;
210 p
= (char *)squidaio_xmalloc(len
);
211 strncpy(p
, str
, len
);
217 squidaio_xfree(void *p
, int size
)
221 if ((pool
= squidaio_get_pool(size
)) != NULL
) {
228 squidaio_xstrfree(char *str
)
231 int len
= strlen(str
) + 1;
233 if ((pool
= squidaio_get_pool(len
)) != NULL
) {
243 squidaio_thread_t
*threadp
;
245 if (squidaio_initialised
)
248 pthread_attr_init(&globattr
);
250 #if HAVE_PTHREAD_ATTR_SETSCOPE
252 pthread_attr_setscope(&globattr
, PTHREAD_SCOPE_SYSTEM
);
257 globsched
.sched_priority
= 1;
261 main_thread
= pthread_self();
263 #if HAVE_SCHED_H && HAVE_PTHREAD_SETSCHEDPARAM
265 pthread_setschedparam(main_thread
, SCHED_OTHER
, &globsched
);
270 globsched
.sched_priority
= 2;
273 #if HAVE_SCHED_H && HAVE_PTHREAD_ATTR_SETSCHEDPARAM
275 pthread_attr_setschedparam(&globattr
, &globsched
);
279 /* Give each thread a smaller 256KB stack, should be more than sufficient */
280 pthread_attr_setstacksize(&globattr
, 256 * 1024);
282 /* Initialize request queue */
283 if (pthread_mutex_init(&(request_queue
.mutex
), NULL
))
284 fatal("Failed to create mutex");
286 if (pthread_cond_init(&(request_queue
.cond
), NULL
))
287 fatal("Failed to create condition variable");
289 request_queue
.head
= NULL
;
291 request_queue
.tailp
= &request_queue
.head
;
293 request_queue
.requests
= 0;
295 request_queue
.blocked
= 0;
297 /* Initialize done queue */
298 if (pthread_mutex_init(&(done_queue
.mutex
), NULL
))
299 fatal("Failed to create mutex");
301 if (pthread_cond_init(&(done_queue
.cond
), NULL
))
302 fatal("Failed to create condition variable");
304 done_queue
.head
= NULL
;
306 done_queue
.tailp
= &done_queue
.head
;
308 done_queue
.requests
= 0;
310 done_queue
.blocked
= 0;
312 /* Create threads and get them to sit in their wait loop */
313 squidaio_thread_pool
= memPoolCreate("aio_thread", sizeof(squidaio_thread_t
));
317 for (i
= 0; i
< NUMTHREADS
; i
++) {
318 threadp
= (squidaio_thread_t
*)squidaio_thread_pool
->alloc();
319 threadp
->status
= _THREAD_STARTING
;
320 threadp
->current_req
= NULL
;
321 threadp
->requests
= 0;
322 threadp
->next
= threads
;
325 if (pthread_create(&threadp
->thread
, &globattr
, squidaio_thread_loop
, threadp
)) {
326 fprintf(stderr
, "Thread creation failed\n");
327 threadp
->status
= _THREAD_FAILED
;
332 /* Create request pool */
333 squidaio_request_pool
= memPoolCreate("aio_request", sizeof(squidaio_request_t
));
335 squidaio_large_bufs
= memPoolCreate("squidaio_large_bufs", AIO_LARGE_BUFS
);
337 squidaio_medium_bufs
= memPoolCreate("squidaio_medium_bufs", AIO_MEDIUM_BUFS
);
339 squidaio_small_bufs
= memPoolCreate("squidaio_small_bufs", AIO_SMALL_BUFS
);
341 squidaio_tiny_bufs
= memPoolCreate("squidaio_tiny_bufs", AIO_TINY_BUFS
);
343 squidaio_micro_bufs
= memPoolCreate("squidaio_micro_bufs", AIO_MICRO_BUFS
);
345 squidaio_initialised
= 1;
349 squidaio_shutdown(void)
351 if (!squidaio_initialised
)
354 /* This is the same as in squidaio_sync */
356 squidaio_poll_queues();
357 } while (request_queue_len
> 0);
359 CommIO::NotifyIOClose();
361 squidaio_initialised
= 0;
365 squidaio_thread_loop(void *ptr
)
367 squidaio_thread_t
*threadp
= (squidaio_thread_t
*)ptr
;
368 squidaio_request_t
*request
;
372 * Make sure to ignore signals which may possibly get sent to
373 * the parent squid thread. Causes havoc with mutex's and
374 * condition waits otherwise
377 sigemptyset(&newSig
);
378 sigaddset(&newSig
, SIGPIPE
);
379 sigaddset(&newSig
, SIGCHLD
);
380 #ifdef _SQUID_LINUX_THREADS_
382 sigaddset(&newSig
, SIGQUIT
);
383 sigaddset(&newSig
, SIGTRAP
);
386 sigaddset(&newSig
, SIGUSR1
);
387 sigaddset(&newSig
, SIGUSR2
);
390 sigaddset(&newSig
, SIGHUP
);
391 sigaddset(&newSig
, SIGTERM
);
392 sigaddset(&newSig
, SIGINT
);
393 sigaddset(&newSig
, SIGALRM
);
394 pthread_sigmask(SIG_BLOCK
, &newSig
, NULL
);
397 threadp
->current_req
= request
= NULL
;
399 /* Get a request to process */
400 threadp
->status
= _THREAD_WAITING
;
401 pthread_mutex_lock(&request_queue
.mutex
);
403 while (!request_queue
.head
) {
404 pthread_cond_wait(&request_queue
.cond
, &request_queue
.mutex
);
407 request
= request_queue
.head
;
410 request_queue
.head
= request
->next
;
412 if (!request_queue
.head
)
413 request_queue
.tailp
= &request_queue
.head
;
415 pthread_mutex_unlock(&request_queue
.mutex
);
417 /* process the request */
418 threadp
->status
= _THREAD_BUSY
;
420 request
->next
= NULL
;
422 threadp
->current_req
= request
;
426 if (!request
->cancelled
) {
427 switch (request
->request_type
) {
430 squidaio_do_open(request
);
434 squidaio_do_read(request
);
438 squidaio_do_write(request
);
442 squidaio_do_close(request
);
446 squidaio_do_unlink(request
);
449 #if AIO_OPENDIR /* Opendir not implemented yet */
451 case _AIO_OP_OPENDIR
:
452 squidaio_do_opendir(request
);
457 squidaio_do_stat(request
);
462 request
->err
= EINVAL
;
465 } else { /* cancelled */
467 request
->err
= EINTR
;
470 threadp
->status
= _THREAD_DONE
;
471 /* put the request in the done queue */
472 pthread_mutex_lock(&done_queue
.mutex
);
473 *done_queue
.tailp
= request
;
474 done_queue
.tailp
= &request
->next
;
475 pthread_mutex_unlock(&done_queue
.mutex
);
476 CommIO::NotifyIOCompleted();
478 } /* while forever */
481 } /* squidaio_thread_loop */
484 squidaio_queue_request(squidaio_request_t
* request
)
486 static int high_start
= 0;
487 debugs(43, 9, "squidaio_queue_request: " << request
<< " type=" << request
->request_type
<< " result=" << request
->resultp
);
488 /* Mark it as not executed (failing result, no error) */
491 /* Internal housekeeping */
492 request_queue_len
+= 1;
493 request
->resultp
->_data
= request
;
494 /* Play some tricks with the request_queue2 queue */
495 request
->next
= NULL
;
497 if (pthread_mutex_trylock(&request_queue
.mutex
) == 0) {
498 if (request_queue2
.head
) {
499 /* Grab blocked requests */
500 *request_queue
.tailp
= request_queue2
.head
;
501 request_queue
.tailp
= request_queue2
.tailp
;
504 /* Enqueue request */
505 *request_queue
.tailp
= request
;
507 request_queue
.tailp
= &request
->next
;
509 pthread_cond_signal(&request_queue
.cond
);
511 pthread_mutex_unlock(&request_queue
.mutex
);
513 if (request_queue2
.head
) {
514 /* Clear queue of blocked requests */
515 request_queue2
.head
= NULL
;
516 request_queue2
.tailp
= &request_queue2
.head
;
519 /* Oops, the request queue is blocked, use request_queue2 */
520 *request_queue2
.tailp
= request
;
521 request_queue2
.tailp
= &request
->next
;
524 if (request_queue2
.head
) {
525 static int filter
= 0;
526 static int filter_limit
= 8;
528 if (++filter
>= filter_limit
) {
529 filter_limit
+= filter
;
531 debugs(43, 1, "squidaio_queue_request: WARNING - Queue congestion");
535 /* Warn if out of threads */
536 if (request_queue_len
> MAGIC1
) {
537 static int last_warn
= 0;
538 static int queue_high
, queue_low
;
540 if (high_start
== 0) {
541 high_start
= squid_curtime
;
542 queue_high
= request_queue_len
;
543 queue_low
= request_queue_len
;
546 if (request_queue_len
> queue_high
)
547 queue_high
= request_queue_len
;
549 if (request_queue_len
< queue_low
)
550 queue_low
= request_queue_len
;
552 if (squid_curtime
>= (last_warn
+ 15) &&
553 squid_curtime
>= (high_start
+ 5)) {
554 debugs(43, 1, "squidaio_queue_request: WARNING - Disk I/O overloading");
556 if (squid_curtime
>= (high_start
+ 15))
557 debugs(43, 1, "squidaio_queue_request: Queue Length: current=" <<
558 request_queue_len
<< ", high=" << queue_high
<<
559 ", low=" << queue_low
<< ", duration=" <<
560 (long int) (squid_curtime
- high_start
));
562 last_warn
= squid_curtime
;
568 /* Warn if seriously overloaded */
569 if (request_queue_len
> RIDICULOUS_LENGTH
) {
570 debugs(43, 0, "squidaio_queue_request: Async request queue growing uncontrollably!");
571 debugs(43, 0, "squidaio_queue_request: Syncing pending I/O operations.. (blocking)");
573 debugs(43, 0, "squidaio_queue_request: Synced");
575 } /* squidaio_queue_request */
578 squidaio_cleanup_request(squidaio_request_t
* requestp
)
580 squidaio_result_t
*resultp
= requestp
->resultp
;
581 int cancelled
= requestp
->cancelled
;
583 /* Free allocated structures and copy data back to user space if the */
584 /* request hasn't been cancelled */
586 switch (requestp
->request_type
) {
590 if (!cancelled
&& requestp
->ret
== 0)
592 xmemcpy(requestp
->statp
, requestp
->tmpstatp
, sizeof(struct stat
));
594 squidaio_xfree(requestp
->tmpstatp
, sizeof(struct stat
));
596 squidaio_xstrfree(requestp
->path
);
601 if (cancelled
&& requestp
->ret
>= 0)
602 /* The open() was cancelled but completed */
603 close(requestp
->ret
);
605 squidaio_xstrfree(requestp
->path
);
610 if (cancelled
&& requestp
->ret
< 0)
611 /* The close() was cancelled and never got executed */
618 case _AIO_OP_OPENDIR
:
619 squidaio_xstrfree(requestp
->path
);
633 if (resultp
!= NULL
&& !cancelled
) {
634 resultp
->aio_return
= requestp
->ret
;
635 resultp
->aio_errno
= requestp
->err
;
638 squidaio_request_pool
->free(requestp
);
639 } /* squidaio_cleanup_request */
643 squidaio_cancel(squidaio_result_t
* resultp
)
645 squidaio_request_t
*request
= (squidaio_request_t
*)resultp
->_data
;
647 if (request
&& request
->resultp
== resultp
) {
648 debugs(43, 9, "squidaio_cancel: " << request
<< " type=" << request
->request_type
<< " result=" << request
->resultp
);
649 request
->cancelled
= 1;
650 request
->resultp
= NULL
;
651 resultp
->_data
= NULL
;
652 resultp
->result_type
= _AIO_OP_NONE
;
657 } /* squidaio_cancel */
661 squidaio_open(const char *path
, int oflag
, mode_t mode
, squidaio_result_t
* resultp
)
664 squidaio_request_t
*requestp
;
666 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
668 requestp
->path
= (char *) squidaio_xstrdup(path
);
670 requestp
->oflag
= oflag
;
672 requestp
->mode
= mode
;
674 requestp
->resultp
= resultp
;
676 requestp
->request_type
= _AIO_OP_OPEN
;
678 requestp
->cancelled
= 0;
680 resultp
->result_type
= _AIO_OP_OPEN
;
682 squidaio_queue_request(requestp
);
689 squidaio_do_open(squidaio_request_t
* requestp
)
691 requestp
->ret
= open(requestp
->path
, requestp
->oflag
, requestp
->mode
);
692 requestp
->err
= errno
;
697 squidaio_read(int fd
, char *bufp
, size_t bufs
, off_t offset
, int whence
, squidaio_result_t
* resultp
)
699 squidaio_request_t
*requestp
;
701 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
705 requestp
->bufferp
= bufp
;
707 requestp
->buflen
= bufs
;
709 requestp
->offset
= offset
;
711 requestp
->whence
= whence
;
713 requestp
->resultp
= resultp
;
715 requestp
->request_type
= _AIO_OP_READ
;
717 requestp
->cancelled
= 0;
719 resultp
->result_type
= _AIO_OP_READ
;
721 squidaio_queue_request(requestp
);
728 squidaio_do_read(squidaio_request_t
* requestp
)
730 lseek(requestp
->fd
, requestp
->offset
, requestp
->whence
);
731 requestp
->ret
= read(requestp
->fd
, requestp
->bufferp
, requestp
->buflen
);
732 requestp
->err
= errno
;
737 squidaio_write(int fd
, char *bufp
, size_t bufs
, off_t offset
, int whence
, squidaio_result_t
* resultp
)
739 squidaio_request_t
*requestp
;
741 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
745 requestp
->bufferp
= bufp
;
747 requestp
->buflen
= bufs
;
749 requestp
->offset
= offset
;
751 requestp
->whence
= whence
;
753 requestp
->resultp
= resultp
;
755 requestp
->request_type
= _AIO_OP_WRITE
;
757 requestp
->cancelled
= 0;
759 resultp
->result_type
= _AIO_OP_WRITE
;
761 squidaio_queue_request(requestp
);
768 squidaio_do_write(squidaio_request_t
* requestp
)
770 requestp
->ret
= write(requestp
->fd
, requestp
->bufferp
, requestp
->buflen
);
771 requestp
->err
= errno
;
776 squidaio_close(int fd
, squidaio_result_t
* resultp
)
778 squidaio_request_t
*requestp
;
780 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
784 requestp
->resultp
= resultp
;
786 requestp
->request_type
= _AIO_OP_CLOSE
;
788 requestp
->cancelled
= 0;
790 resultp
->result_type
= _AIO_OP_CLOSE
;
792 squidaio_queue_request(requestp
);
799 squidaio_do_close(squidaio_request_t
* requestp
)
801 requestp
->ret
= close(requestp
->fd
);
802 requestp
->err
= errno
;
808 squidaio_stat(const char *path
, struct stat
*sb
, squidaio_result_t
* resultp
)
811 squidaio_request_t
*requestp
;
813 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
815 requestp
->path
= (char *) squidaio_xstrdup(path
);
817 requestp
->statp
= sb
;
819 requestp
->tmpstatp
= (struct stat
*) squidaio_xmalloc(sizeof(struct stat
));
821 requestp
->resultp
= resultp
;
823 requestp
->request_type
= _AIO_OP_STAT
;
825 requestp
->cancelled
= 0;
827 resultp
->result_type
= _AIO_OP_STAT
;
829 squidaio_queue_request(requestp
);
836 squidaio_do_stat(squidaio_request_t
* requestp
)
838 requestp
->ret
= stat(requestp
->path
, requestp
->tmpstatp
);
839 requestp
->err
= errno
;
844 squidaio_unlink(const char *path
, squidaio_result_t
* resultp
)
847 squidaio_request_t
*requestp
;
849 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
851 requestp
->path
= squidaio_xstrdup(path
);
853 requestp
->resultp
= resultp
;
855 requestp
->request_type
= _AIO_OP_UNLINK
;
857 requestp
->cancelled
= 0;
859 resultp
->result_type
= _AIO_OP_UNLINK
;
861 squidaio_queue_request(requestp
);
868 squidaio_do_unlink(squidaio_request_t
* requestp
)
870 requestp
->ret
= unlink(requestp
->path
);
871 requestp
->err
= errno
;
875 /* XXX squidaio_opendir NOT implemented yet.. */
878 squidaio_opendir(const char *path
, squidaio_result_t
* resultp
)
880 squidaio_request_t
*requestp
;
883 requestp
= squidaio_request_pool
->alloc();
885 resultp
->result_type
= _AIO_OP_OPENDIR
;
891 squidaio_do_opendir(squidaio_request_t
* requestp
)
893 /* NOT IMPLEMENTED */
899 squidaio_poll_queues(void)
901 /* kick "overflow" request queue */
903 if (request_queue2
.head
&&
904 pthread_mutex_trylock(&request_queue
.mutex
) == 0) {
905 *request_queue
.tailp
= request_queue2
.head
;
906 request_queue
.tailp
= request_queue2
.tailp
;
907 pthread_cond_signal(&request_queue
.cond
);
908 pthread_mutex_unlock(&request_queue
.mutex
);
909 request_queue2
.head
= NULL
;
910 request_queue2
.tailp
= &request_queue2
.head
;
913 /* poll done queue */
914 if (done_queue
.head
&& pthread_mutex_trylock(&done_queue
.mutex
) == 0) {
916 struct squidaio_request_t
*requests
= done_queue
.head
;
917 done_queue
.head
= NULL
;
918 done_queue
.tailp
= &done_queue
.head
;
919 pthread_mutex_unlock(&done_queue
.mutex
);
920 *done_requests
.tailp
= requests
;
921 request_queue_len
-= 1;
923 while (requests
->next
) {
924 requests
= requests
->next
;
925 request_queue_len
-= 1;
928 done_requests
.tailp
= &requests
->next
;
933 squidaio_poll_done(void)
935 squidaio_request_t
*request
;
936 squidaio_result_t
*resultp
;
941 request
= done_requests
.head
;
943 if (request
== NULL
&& !polled
) {
944 CommIO::ResetNotifications();
945 squidaio_poll_queues();
947 request
= done_requests
.head
;
954 debugs(43, 9, "squidaio_poll_done: " << request
<< " type=" << request
->request_type
<< " result=" << request
->resultp
);
955 done_requests
.head
= request
->next
;
957 if (!done_requests
.head
)
958 done_requests
.tailp
= &done_requests
.head
;
960 resultp
= request
->resultp
;
962 cancelled
= request
->cancelled
;
964 squidaio_debug(request
);
966 debugs(43, 5, "DONE: " << request
->ret
<< " -> " << request
->err
);
968 squidaio_cleanup_request(request
);
974 } /* squidaio_poll_done */
977 squidaio_operations_pending(void)
979 return request_queue_len
+ (done_requests
.head
? 1 : 0);
985 /* XXX This might take a while if the queue is large.. */
988 squidaio_poll_queues();
989 } while (request_queue_len
> 0);
991 return squidaio_operations_pending();
995 squidaio_get_queue_len(void)
997 return request_queue_len
;
1001 squidaio_debug(squidaio_request_t
* request
)
1003 switch (request
->request_type
) {
1006 debugs(43, 5, "OPEN of " << request
->path
<< " to FD " << request
->ret
);
1010 debugs(43, 5, "READ on fd: " << request
->fd
);
1014 debugs(43, 5, "WRITE on fd: " << request
->fd
);
1018 debugs(43, 5, "CLOSE of fd: " << request
->fd
);
1021 case _AIO_OP_UNLINK
:
1022 debugs(43, 5, "UNLINK of " << request
->path
);
1031 squidaio_stats(StoreEntry
* sentry
)
1033 squidaio_thread_t
*threadp
;
1036 if (!squidaio_initialised
)
1039 storeAppendPrintf(sentry
, "\n\nThreads Status:\n");
1041 storeAppendPrintf(sentry
, "#\tID\t# Requests\n");
1045 for (i
= 0; i
< NUMTHREADS
; i
++) {
1046 storeAppendPrintf(sentry
, "%i\t0x%lx\t%ld\n", i
+ 1, (unsigned long)threadp
->thread
, threadp
->requests
);
1047 threadp
= threadp
->next
;