2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 43 AIOPS */
12 #error "_REENTRANT MUST be defined to build squid async io support."
16 #include "DiskIO/DiskThreads/CommIO.h"
17 #include "DiskThreads.h"
18 #include "SquidConfig.h"
19 #include "SquidTime.h"
23 * struct stat and squidaio_xstrdup use explicit pool alloc()/freeOne().
24 * XXX: convert to MEMPROXY_CLASS() API
38 #define RIDICULOUS_LENGTH 4096
40 enum _squidaio_thread_status
{
47 typedef enum _squidaio_thread_status squidaio_thread_status
;
49 typedef struct squidaio_request_t
{
51 struct squidaio_request_t
*next
;
52 squidaio_request_type request_type
;
65 struct stat
*tmpstatp
;
68 squidaio_result_t
*resultp
;
71 typedef struct squidaio_request_queue_t
{
72 pthread_mutex_t mutex
;
74 squidaio_request_t
*volatile head
;
75 squidaio_request_t
*volatile *volatile tailp
;
76 unsigned long requests
;
77 unsigned long blocked
; /* main failed to lock the queue */
78 } squidaio_request_queue_t
;
80 typedef struct squidaio_thread_t squidaio_thread_t
;
82 struct squidaio_thread_t
{
83 squidaio_thread_t
*next
;
85 squidaio_thread_status status
;
87 struct squidaio_request_t
*current_req
;
88 unsigned long requests
;
91 static void squidaio_queue_request(squidaio_request_t
*);
92 static void squidaio_cleanup_request(squidaio_request_t
*);
93 void *squidaio_thread_loop(void *);
94 static void squidaio_do_open(squidaio_request_t
*);
95 static void squidaio_do_read(squidaio_request_t
*);
96 static void squidaio_do_write(squidaio_request_t
*);
97 static void squidaio_do_close(squidaio_request_t
*);
98 static void squidaio_do_stat(squidaio_request_t
*);
99 static void squidaio_do_unlink(squidaio_request_t
*);
101 static void *squidaio_do_opendir(squidaio_request_t
*);
103 static void squidaio_debug(squidaio_request_t
*);
104 static void squidaio_poll_queues(void);
106 static squidaio_thread_t
*threads
= NULL
;
107 static int squidaio_initialised
= 0;
109 #define AIO_LARGE_BUFS 16384
110 #define AIO_MEDIUM_BUFS AIO_LARGE_BUFS >> 1
111 #define AIO_SMALL_BUFS AIO_LARGE_BUFS >> 2
112 #define AIO_TINY_BUFS AIO_LARGE_BUFS >> 3
113 #define AIO_MICRO_BUFS 128
115 static MemAllocator
*squidaio_large_bufs
= NULL
; /* 16K */
116 static MemAllocator
*squidaio_medium_bufs
= NULL
; /* 8K */
117 static MemAllocator
*squidaio_small_bufs
= NULL
; /* 4K */
118 static MemAllocator
*squidaio_tiny_bufs
= NULL
; /* 2K */
119 static MemAllocator
*squidaio_micro_bufs
= NULL
; /* 128K */
121 static int request_queue_len
= 0;
122 static MemAllocator
*squidaio_request_pool
= NULL
;
123 static MemAllocator
*squidaio_thread_pool
= NULL
;
124 static squidaio_request_queue_t request_queue
;
127 squidaio_request_t
*head
, **tailp
;
132 NULL
, &request_queue2
.head
134 static squidaio_request_queue_t done_queue
;
137 squidaio_request_t
*head
, **tailp
;
142 NULL
, &done_requests
.head
144 static pthread_attr_t globattr
;
147 static struct sched_param globsched
;
149 static pthread_t main_thread
;
151 static MemAllocator
*
152 squidaio_get_pool(int size
)
154 if (size
<= AIO_LARGE_BUFS
) {
155 if (size
<= AIO_MICRO_BUFS
)
156 return squidaio_micro_bufs
;
157 else if (size
<= AIO_TINY_BUFS
)
158 return squidaio_tiny_bufs
;
159 else if (size
<= AIO_SMALL_BUFS
)
160 return squidaio_small_bufs
;
161 else if (size
<= AIO_MEDIUM_BUFS
)
162 return squidaio_medium_bufs
;
164 return squidaio_large_bufs
;
171 squidaio_xmalloc(int size
)
176 if ((pool
= squidaio_get_pool(size
)) != NULL
) {
185 squidaio_xstrdup(const char *str
)
188 int len
= strlen(str
) + 1;
190 p
= (char *)squidaio_xmalloc(len
);
191 strncpy(p
, str
, len
);
197 squidaio_xfree(void *p
, int size
)
201 if ((pool
= squidaio_get_pool(size
)) != NULL
) {
208 squidaio_xstrfree(char *str
)
211 int len
= strlen(str
) + 1;
213 if ((pool
= squidaio_get_pool(len
)) != NULL
) {
223 squidaio_thread_t
*threadp
;
225 if (squidaio_initialised
)
228 pthread_attr_init(&globattr
);
230 #if HAVE_PTHREAD_ATTR_SETSCOPE
232 pthread_attr_setscope(&globattr
, PTHREAD_SCOPE_SYSTEM
);
237 globsched
.sched_priority
= 1;
241 main_thread
= pthread_self();
243 #if HAVE_SCHED_H && HAVE_PTHREAD_SETSCHEDPARAM
245 pthread_setschedparam(main_thread
, SCHED_OTHER
, &globsched
);
250 globsched
.sched_priority
= 2;
253 #if HAVE_SCHED_H && HAVE_PTHREAD_ATTR_SETSCHEDPARAM
255 pthread_attr_setschedparam(&globattr
, &globsched
);
259 /* Give each thread a smaller 256KB stack, should be more than sufficient */
260 pthread_attr_setstacksize(&globattr
, 256 * 1024);
262 /* Initialize request queue */
263 if (pthread_mutex_init(&(request_queue
.mutex
), NULL
))
264 fatal("Failed to create mutex");
266 if (pthread_cond_init(&(request_queue
.cond
), NULL
))
267 fatal("Failed to create condition variable");
269 request_queue
.head
= NULL
;
271 request_queue
.tailp
= &request_queue
.head
;
273 request_queue
.requests
= 0;
275 request_queue
.blocked
= 0;
277 /* Initialize done queue */
278 if (pthread_mutex_init(&(done_queue
.mutex
), NULL
))
279 fatal("Failed to create mutex");
281 if (pthread_cond_init(&(done_queue
.cond
), NULL
))
282 fatal("Failed to create condition variable");
284 done_queue
.head
= NULL
;
286 done_queue
.tailp
= &done_queue
.head
;
288 done_queue
.requests
= 0;
290 done_queue
.blocked
= 0;
292 // Initialize the thread I/O pipes before creating any threads
293 // see bug 3189 comment 5 about race conditions.
294 CommIO::Initialize();
296 /* Create threads and get them to sit in their wait loop */
297 squidaio_thread_pool
= memPoolCreate("aio_thread", sizeof(squidaio_thread_t
));
301 for (i
= 0; i
< NUMTHREADS
; ++i
) {
302 threadp
= (squidaio_thread_t
*)squidaio_thread_pool
->alloc();
303 threadp
->status
= _THREAD_STARTING
;
304 threadp
->current_req
= NULL
;
305 threadp
->requests
= 0;
306 threadp
->next
= threads
;
309 if (pthread_create(&threadp
->thread
, &globattr
, squidaio_thread_loop
, threadp
)) {
310 fprintf(stderr
, "Thread creation failed\n");
311 threadp
->status
= _THREAD_FAILED
;
316 /* Create request pool */
317 squidaio_request_pool
= memPoolCreate("aio_request", sizeof(squidaio_request_t
));
319 squidaio_large_bufs
= memPoolCreate("squidaio_large_bufs", AIO_LARGE_BUFS
);
321 squidaio_medium_bufs
= memPoolCreate("squidaio_medium_bufs", AIO_MEDIUM_BUFS
);
323 squidaio_small_bufs
= memPoolCreate("squidaio_small_bufs", AIO_SMALL_BUFS
);
325 squidaio_tiny_bufs
= memPoolCreate("squidaio_tiny_bufs", AIO_TINY_BUFS
);
327 squidaio_micro_bufs
= memPoolCreate("squidaio_micro_bufs", AIO_MICRO_BUFS
);
329 squidaio_initialised
= 1;
333 squidaio_shutdown(void)
335 if (!squidaio_initialised
)
338 /* This is the same as in squidaio_sync */
340 squidaio_poll_queues();
341 } while (request_queue_len
> 0);
343 CommIO::NotifyIOClose();
345 squidaio_initialised
= 0;
349 squidaio_thread_loop(void *ptr
)
351 squidaio_thread_t
*threadp
= (squidaio_thread_t
*)ptr
;
352 squidaio_request_t
*request
;
356 * Make sure to ignore signals which may possibly get sent to
357 * the parent squid thread. Causes havoc with mutex's and
358 * condition waits otherwise
361 sigemptyset(&newSig
);
362 sigaddset(&newSig
, SIGPIPE
);
363 sigaddset(&newSig
, SIGCHLD
);
364 #if defined(_SQUID_LINUX_THREADS_)
366 sigaddset(&newSig
, SIGQUIT
);
367 sigaddset(&newSig
, SIGTRAP
);
370 sigaddset(&newSig
, SIGUSR1
);
371 sigaddset(&newSig
, SIGUSR2
);
374 sigaddset(&newSig
, SIGHUP
);
375 sigaddset(&newSig
, SIGTERM
);
376 sigaddset(&newSig
, SIGINT
);
377 sigaddset(&newSig
, SIGALRM
);
378 pthread_sigmask(SIG_BLOCK
, &newSig
, NULL
);
381 threadp
->current_req
= request
= NULL
;
383 /* Get a request to process */
384 threadp
->status
= _THREAD_WAITING
;
385 pthread_mutex_lock(&request_queue
.mutex
);
387 while (!request_queue
.head
) {
388 pthread_cond_wait(&request_queue
.cond
, &request_queue
.mutex
);
391 request
= request_queue
.head
;
394 request_queue
.head
= request
->next
;
396 if (!request_queue
.head
)
397 request_queue
.tailp
= &request_queue
.head
;
399 pthread_mutex_unlock(&request_queue
.mutex
);
401 /* process the request */
402 threadp
->status
= _THREAD_BUSY
;
404 request
->next
= NULL
;
406 threadp
->current_req
= request
;
410 if (!request
->cancelled
) {
411 switch (request
->request_type
) {
414 squidaio_do_open(request
);
418 squidaio_do_read(request
);
422 squidaio_do_write(request
);
426 squidaio_do_close(request
);
430 squidaio_do_unlink(request
);
433 #if AIO_OPENDIR /* Opendir not implemented yet */
435 case _AIO_OP_OPENDIR
:
436 squidaio_do_opendir(request
);
441 squidaio_do_stat(request
);
446 request
->err
= EINVAL
;
449 } else { /* cancelled */
451 request
->err
= EINTR
;
454 threadp
->status
= _THREAD_DONE
;
455 /* put the request in the done queue */
456 pthread_mutex_lock(&done_queue
.mutex
);
457 *done_queue
.tailp
= request
;
458 done_queue
.tailp
= &request
->next
;
459 pthread_mutex_unlock(&done_queue
.mutex
);
460 CommIO::NotifyIOCompleted();
461 ++ threadp
->requests
;
462 } /* while forever */
465 } /* squidaio_thread_loop */
468 squidaio_queue_request(squidaio_request_t
* request
)
470 static int high_start
= 0;
471 debugs(43, 9, "squidaio_queue_request: " << request
<< " type=" << request
->request_type
<< " result=" << request
->resultp
);
472 /* Mark it as not executed (failing result, no error) */
475 /* Internal housekeeping */
476 request_queue_len
+= 1;
477 request
->resultp
->_data
= request
;
478 /* Play some tricks with the request_queue2 queue */
479 request
->next
= NULL
;
481 if (pthread_mutex_trylock(&request_queue
.mutex
) == 0) {
482 if (request_queue2
.head
) {
483 /* Grab blocked requests */
484 *request_queue
.tailp
= request_queue2
.head
;
485 request_queue
.tailp
= request_queue2
.tailp
;
488 /* Enqueue request */
489 *request_queue
.tailp
= request
;
491 request_queue
.tailp
= &request
->next
;
493 pthread_cond_signal(&request_queue
.cond
);
495 pthread_mutex_unlock(&request_queue
.mutex
);
497 if (request_queue2
.head
) {
498 /* Clear queue of blocked requests */
499 request_queue2
.head
= NULL
;
500 request_queue2
.tailp
= &request_queue2
.head
;
503 /* Oops, the request queue is blocked, use request_queue2 */
504 *request_queue2
.tailp
= request
;
505 request_queue2
.tailp
= &request
->next
;
508 if (request_queue2
.head
) {
509 static int filter
= 0;
510 static int filter_limit
= 8;
512 if (++filter
>= filter_limit
) {
513 filter_limit
+= filter
;
515 debugs(43, DBG_IMPORTANT
, "squidaio_queue_request: WARNING - Queue congestion");
519 /* Warn if out of threads */
520 if (request_queue_len
> MAGIC1
) {
521 static int last_warn
= 0;
522 static int queue_high
, queue_low
;
524 if (high_start
== 0) {
525 high_start
= squid_curtime
;
526 queue_high
= request_queue_len
;
527 queue_low
= request_queue_len
;
530 if (request_queue_len
> queue_high
)
531 queue_high
= request_queue_len
;
533 if (request_queue_len
< queue_low
)
534 queue_low
= request_queue_len
;
536 if (squid_curtime
>= (last_warn
+ 15) &&
537 squid_curtime
>= (high_start
+ 5)) {
538 debugs(43, DBG_IMPORTANT
, "squidaio_queue_request: WARNING - Disk I/O overloading");
540 if (squid_curtime
>= (high_start
+ 15))
541 debugs(43, DBG_IMPORTANT
, "squidaio_queue_request: Queue Length: current=" <<
542 request_queue_len
<< ", high=" << queue_high
<<
543 ", low=" << queue_low
<< ", duration=" <<
544 (long int) (squid_curtime
- high_start
));
546 last_warn
= squid_curtime
;
552 /* Warn if seriously overloaded */
553 if (request_queue_len
> RIDICULOUS_LENGTH
) {
554 debugs(43, DBG_CRITICAL
, "squidaio_queue_request: Async request queue growing uncontrollably!");
555 debugs(43, DBG_CRITICAL
, "squidaio_queue_request: Syncing pending I/O operations.. (blocking)");
557 debugs(43, DBG_CRITICAL
, "squidaio_queue_request: Synced");
559 } /* squidaio_queue_request */
562 squidaio_cleanup_request(squidaio_request_t
* requestp
)
564 squidaio_result_t
*resultp
= requestp
->resultp
;
565 int cancelled
= requestp
->cancelled
;
567 /* Free allocated structures and copy data back to user space if the */
568 /* request hasn't been cancelled */
570 switch (requestp
->request_type
) {
574 if (!cancelled
&& requestp
->ret
== 0)
575 memcpy(requestp
->statp
, requestp
->tmpstatp
, sizeof(struct stat
));
577 squidaio_xfree(requestp
->tmpstatp
, sizeof(struct stat
));
579 squidaio_xstrfree(requestp
->path
);
584 if (cancelled
&& requestp
->ret
>= 0)
585 /* The open() was cancelled but completed */
586 close(requestp
->ret
);
588 squidaio_xstrfree(requestp
->path
);
593 if (cancelled
&& requestp
->ret
< 0)
594 /* The close() was cancelled and never got executed */
601 case _AIO_OP_OPENDIR
:
602 squidaio_xstrfree(requestp
->path
);
616 if (resultp
!= NULL
&& !cancelled
) {
617 resultp
->aio_return
= requestp
->ret
;
618 resultp
->aio_errno
= requestp
->err
;
621 squidaio_request_pool
->freeOne(requestp
);
622 } /* squidaio_cleanup_request */
625 squidaio_cancel(squidaio_result_t
* resultp
)
627 squidaio_request_t
*request
= (squidaio_request_t
*)resultp
->_data
;
629 if (request
&& request
->resultp
== resultp
) {
630 debugs(43, 9, "squidaio_cancel: " << request
<< " type=" << request
->request_type
<< " result=" << request
->resultp
);
631 request
->cancelled
= 1;
632 request
->resultp
= NULL
;
633 resultp
->_data
= NULL
;
634 resultp
->result_type
= _AIO_OP_NONE
;
639 } /* squidaio_cancel */
642 squidaio_open(const char *path
, int oflag
, mode_t mode
, squidaio_result_t
* resultp
)
645 squidaio_request_t
*requestp
;
647 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
649 requestp
->path
= (char *) squidaio_xstrdup(path
);
651 requestp
->oflag
= oflag
;
653 requestp
->mode
= mode
;
655 requestp
->resultp
= resultp
;
657 requestp
->request_type
= _AIO_OP_OPEN
;
659 requestp
->cancelled
= 0;
661 resultp
->result_type
= _AIO_OP_OPEN
;
663 squidaio_queue_request(requestp
);
669 squidaio_do_open(squidaio_request_t
* requestp
)
671 requestp
->ret
= open(requestp
->path
, requestp
->oflag
, requestp
->mode
);
672 requestp
->err
= errno
;
676 squidaio_read(int fd
, char *bufp
, size_t bufs
, off_t offset
, int whence
, squidaio_result_t
* resultp
)
678 squidaio_request_t
*requestp
;
680 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
684 requestp
->bufferp
= bufp
;
686 requestp
->buflen
= bufs
;
688 requestp
->offset
= offset
;
690 requestp
->whence
= whence
;
692 requestp
->resultp
= resultp
;
694 requestp
->request_type
= _AIO_OP_READ
;
696 requestp
->cancelled
= 0;
698 resultp
->result_type
= _AIO_OP_READ
;
700 squidaio_queue_request(requestp
);
706 squidaio_do_read(squidaio_request_t
* requestp
)
708 if (lseek(requestp
->fd
, requestp
->offset
, requestp
->whence
) >= 0)
709 requestp
->ret
= read(requestp
->fd
, requestp
->bufferp
, requestp
->buflen
);
712 requestp
->err
= errno
;
716 squidaio_write(int fd
, char *bufp
, size_t bufs
, off_t offset
, int whence
, squidaio_result_t
* resultp
)
718 squidaio_request_t
*requestp
;
720 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
724 requestp
->bufferp
= bufp
;
726 requestp
->buflen
= bufs
;
728 requestp
->offset
= offset
;
730 requestp
->whence
= whence
;
732 requestp
->resultp
= resultp
;
734 requestp
->request_type
= _AIO_OP_WRITE
;
736 requestp
->cancelled
= 0;
738 resultp
->result_type
= _AIO_OP_WRITE
;
740 squidaio_queue_request(requestp
);
746 squidaio_do_write(squidaio_request_t
* requestp
)
748 requestp
->ret
= write(requestp
->fd
, requestp
->bufferp
, requestp
->buflen
);
749 requestp
->err
= errno
;
753 squidaio_close(int fd
, squidaio_result_t
* resultp
)
755 squidaio_request_t
*requestp
;
757 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
761 requestp
->resultp
= resultp
;
763 requestp
->request_type
= _AIO_OP_CLOSE
;
765 requestp
->cancelled
= 0;
767 resultp
->result_type
= _AIO_OP_CLOSE
;
769 squidaio_queue_request(requestp
);
775 squidaio_do_close(squidaio_request_t
* requestp
)
777 requestp
->ret
= close(requestp
->fd
);
778 requestp
->err
= errno
;
783 squidaio_stat(const char *path
, struct stat
*sb
, squidaio_result_t
* resultp
)
786 squidaio_request_t
*requestp
;
788 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
790 requestp
->path
= (char *) squidaio_xstrdup(path
);
792 requestp
->statp
= sb
;
794 requestp
->tmpstatp
= (struct stat
*) squidaio_xmalloc(sizeof(struct stat
));
796 requestp
->resultp
= resultp
;
798 requestp
->request_type
= _AIO_OP_STAT
;
800 requestp
->cancelled
= 0;
802 resultp
->result_type
= _AIO_OP_STAT
;
804 squidaio_queue_request(requestp
);
810 squidaio_do_stat(squidaio_request_t
* requestp
)
812 requestp
->ret
= stat(requestp
->path
, requestp
->tmpstatp
);
813 requestp
->err
= errno
;
817 squidaio_unlink(const char *path
, squidaio_result_t
* resultp
)
820 squidaio_request_t
*requestp
;
822 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
824 requestp
->path
= squidaio_xstrdup(path
);
826 requestp
->resultp
= resultp
;
828 requestp
->request_type
= _AIO_OP_UNLINK
;
830 requestp
->cancelled
= 0;
832 resultp
->result_type
= _AIO_OP_UNLINK
;
834 squidaio_queue_request(requestp
);
840 squidaio_do_unlink(squidaio_request_t
* requestp
)
842 requestp
->ret
= unlink(requestp
->path
);
843 requestp
->err
= errno
;
847 /* XXX squidaio_opendir NOT implemented yet.. */
850 squidaio_opendir(const char *path
, squidaio_result_t
* resultp
)
852 squidaio_request_t
*requestp
;
855 requestp
= squidaio_request_pool
->alloc();
857 resultp
->result_type
= _AIO_OP_OPENDIR
;
863 squidaio_do_opendir(squidaio_request_t
* requestp
)
865 /* NOT IMPLEMENTED */
871 squidaio_poll_queues(void)
873 /* kick "overflow" request queue */
875 if (request_queue2
.head
&&
876 pthread_mutex_trylock(&request_queue
.mutex
) == 0) {
877 *request_queue
.tailp
= request_queue2
.head
;
878 request_queue
.tailp
= request_queue2
.tailp
;
879 pthread_cond_signal(&request_queue
.cond
);
880 pthread_mutex_unlock(&request_queue
.mutex
);
881 request_queue2
.head
= NULL
;
882 request_queue2
.tailp
= &request_queue2
.head
;
885 /* poll done queue */
886 if (done_queue
.head
&& pthread_mutex_trylock(&done_queue
.mutex
) == 0) {
888 struct squidaio_request_t
*requests
= done_queue
.head
;
889 done_queue
.head
= NULL
;
890 done_queue
.tailp
= &done_queue
.head
;
891 pthread_mutex_unlock(&done_queue
.mutex
);
892 *done_requests
.tailp
= requests
;
893 request_queue_len
-= 1;
895 while (requests
->next
) {
896 requests
= requests
->next
;
897 request_queue_len
-= 1;
900 done_requests
.tailp
= &requests
->next
;
905 squidaio_poll_done(void)
907 squidaio_request_t
*request
;
908 squidaio_result_t
*resultp
;
913 request
= done_requests
.head
;
915 if (request
== NULL
&& !polled
) {
916 CommIO::ResetNotifications();
917 squidaio_poll_queues();
919 request
= done_requests
.head
;
926 debugs(43, 9, "squidaio_poll_done: " << request
<< " type=" << request
->request_type
<< " result=" << request
->resultp
);
927 done_requests
.head
= request
->next
;
929 if (!done_requests
.head
)
930 done_requests
.tailp
= &done_requests
.head
;
932 resultp
= request
->resultp
;
934 cancelled
= request
->cancelled
;
936 squidaio_debug(request
);
938 debugs(43, 5, "DONE: " << request
->ret
<< " -> " << request
->err
);
940 squidaio_cleanup_request(request
);
946 } /* squidaio_poll_done */
949 squidaio_operations_pending(void)
951 return request_queue_len
+ (done_requests
.head
? 1 : 0);
957 /* XXX This might take a while if the queue is large.. */
960 squidaio_poll_queues();
961 } while (request_queue_len
> 0);
963 return squidaio_operations_pending();
967 squidaio_get_queue_len(void)
969 return request_queue_len
;
973 squidaio_debug(squidaio_request_t
* request
)
975 switch (request
->request_type
) {
978 debugs(43, 5, "OPEN of " << request
->path
<< " to FD " << request
->ret
);
982 debugs(43, 5, "READ on fd: " << request
->fd
);
986 debugs(43, 5, "WRITE on fd: " << request
->fd
);
990 debugs(43, 5, "CLOSE of fd: " << request
->fd
);
994 debugs(43, 5, "UNLINK of " << request
->path
);
1003 squidaio_stats(StoreEntry
* sentry
)
1005 squidaio_thread_t
*threadp
;
1008 if (!squidaio_initialised
)
1011 storeAppendPrintf(sentry
, "\n\nThreads Status:\n");
1013 storeAppendPrintf(sentry
, "#\tID\t# Requests\n");
1017 for (i
= 0; i
< NUMTHREADS
; ++i
) {
1018 storeAppendPrintf(sentry
, "%i\t0x%lx\t%ld\n", i
+ 1, (unsigned long)threadp
->thread
, threadp
->requests
);
1019 threadp
= threadp
->next
;