2 * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 43 AIOPS */
12 #error "_REENTRANT MUST be defined to build squid async io support."
16 #include "DiskIO/DiskThreads/CommIO.h"
17 #include "DiskThreads.h"
18 #include "SquidConfig.h"
22 * struct stat and squidaio_xstrdup use explicit pool alloc()/freeOne().
23 * XXX: convert to MEMPROXY_CLASS() API
37 #define RIDICULOUS_LENGTH 4096
39 enum _squidaio_thread_status
{
46 typedef enum _squidaio_thread_status squidaio_thread_status
;
48 typedef struct squidaio_request_t
{
50 struct squidaio_request_t
*next
;
51 squidaio_request_type request_type
;
64 struct stat
*tmpstatp
;
67 squidaio_result_t
*resultp
;
70 typedef struct squidaio_request_queue_t
{
71 pthread_mutex_t mutex
;
73 squidaio_request_t
*volatile head
;
74 squidaio_request_t
*volatile *volatile tailp
;
75 unsigned long requests
;
76 unsigned long blocked
; /* main failed to lock the queue */
77 } squidaio_request_queue_t
;
79 typedef struct squidaio_thread_t squidaio_thread_t
;
81 struct squidaio_thread_t
{
82 squidaio_thread_t
*next
;
84 squidaio_thread_status status
;
86 struct squidaio_request_t
*current_req
;
87 unsigned long requests
;
90 static void squidaio_queue_request(squidaio_request_t
*);
91 static void squidaio_cleanup_request(squidaio_request_t
*);
92 void *squidaio_thread_loop(void *);
93 static void squidaio_do_open(squidaio_request_t
*);
94 static void squidaio_do_read(squidaio_request_t
*);
95 static void squidaio_do_write(squidaio_request_t
*);
96 static void squidaio_do_close(squidaio_request_t
*);
97 static void squidaio_do_stat(squidaio_request_t
*);
98 static void squidaio_do_unlink(squidaio_request_t
*);
100 static void *squidaio_do_opendir(squidaio_request_t
*);
102 static void squidaio_debug(squidaio_request_t
*);
103 static void squidaio_poll_queues(void);
105 static squidaio_thread_t
*threads
= nullptr;
106 static int squidaio_initialised
= 0;
108 #define AIO_LARGE_BUFS 16384
109 #define AIO_MEDIUM_BUFS AIO_LARGE_BUFS >> 1
110 #define AIO_SMALL_BUFS AIO_LARGE_BUFS >> 2
111 #define AIO_TINY_BUFS AIO_LARGE_BUFS >> 3
112 #define AIO_MICRO_BUFS 128
114 static Mem::Allocator
*squidaio_large_bufs
= nullptr; /* 16K */
115 static Mem::Allocator
*squidaio_medium_bufs
= nullptr; /* 8K */
116 static Mem::Allocator
*squidaio_small_bufs
= nullptr; /* 4K */
117 static Mem::Allocator
*squidaio_tiny_bufs
= nullptr; /* 2K */
118 static Mem::Allocator
*squidaio_micro_bufs
= nullptr; /* 128K */
120 static int request_queue_len
= 0;
121 static Mem::Allocator
*squidaio_request_pool
= nullptr;
122 static Mem::Allocator
*squidaio_thread_pool
= nullptr;
123 static squidaio_request_queue_t request_queue
;
126 squidaio_request_t
*head
, **tailp
;
131 nullptr, &request_queue2
.head
133 static squidaio_request_queue_t done_queue
;
136 squidaio_request_t
*head
, **tailp
;
141 nullptr, &done_requests
.head
143 static pthread_attr_t globattr
;
146 static struct sched_param globsched
;
148 static pthread_t main_thread
;
150 static Mem::Allocator
*
151 squidaio_get_pool(int size
)
153 if (size
<= AIO_LARGE_BUFS
) {
154 if (size
<= AIO_MICRO_BUFS
)
155 return squidaio_micro_bufs
;
156 else if (size
<= AIO_TINY_BUFS
)
157 return squidaio_tiny_bufs
;
158 else if (size
<= AIO_SMALL_BUFS
)
159 return squidaio_small_bufs
;
160 else if (size
<= AIO_MEDIUM_BUFS
)
161 return squidaio_medium_bufs
;
163 return squidaio_large_bufs
;
170 squidaio_xmalloc(int size
)
174 if (const auto pool
= squidaio_get_pool(size
)) {
183 squidaio_xstrdup(const char *str
)
186 int len
= strlen(str
) + 1;
188 p
= (char *)squidaio_xmalloc(len
);
189 strncpy(p
, str
, len
);
195 squidaio_xfree(void *p
, int size
)
197 if (const auto pool
= squidaio_get_pool(size
)) {
204 squidaio_xstrfree(char *str
)
206 int len
= strlen(str
) + 1;
208 if (const auto pool
= squidaio_get_pool(len
)) {
218 squidaio_thread_t
*threadp
;
220 if (squidaio_initialised
)
223 pthread_attr_init(&globattr
);
225 #if HAVE_PTHREAD_ATTR_SETSCOPE
227 pthread_attr_setscope(&globattr
, PTHREAD_SCOPE_SYSTEM
);
232 globsched
.sched_priority
= 1;
236 main_thread
= pthread_self();
238 #if HAVE_SCHED_H && HAVE_PTHREAD_SETSCHEDPARAM
240 pthread_setschedparam(main_thread
, SCHED_OTHER
, &globsched
);
245 globsched
.sched_priority
= 2;
248 #if HAVE_SCHED_H && HAVE_PTHREAD_ATTR_SETSCHEDPARAM
250 pthread_attr_setschedparam(&globattr
, &globsched
);
254 /* Give each thread a smaller 256KB stack, should be more than sufficient */
255 pthread_attr_setstacksize(&globattr
, 256 * 1024);
257 /* Initialize request queue */
258 if (pthread_mutex_init(&(request_queue
.mutex
), nullptr))
259 fatal("Failed to create mutex");
261 if (pthread_cond_init(&(request_queue
.cond
), nullptr))
262 fatal("Failed to create condition variable");
264 request_queue
.head
= nullptr;
266 request_queue
.tailp
= &request_queue
.head
;
268 request_queue
.requests
= 0;
270 request_queue
.blocked
= 0;
272 /* Initialize done queue */
273 if (pthread_mutex_init(&(done_queue
.mutex
), nullptr))
274 fatal("Failed to create mutex");
276 if (pthread_cond_init(&(done_queue
.cond
), nullptr))
277 fatal("Failed to create condition variable");
279 done_queue
.head
= nullptr;
281 done_queue
.tailp
= &done_queue
.head
;
283 done_queue
.requests
= 0;
285 done_queue
.blocked
= 0;
287 // Initialize the thread I/O pipes before creating any threads
288 // see bug 3189 comment 5 about race conditions.
289 CommIO::Initialize();
291 /* Create threads and get them to sit in their wait loop */
292 squidaio_thread_pool
= memPoolCreate("aio_thread", sizeof(squidaio_thread_t
));
294 assert(NUMTHREADS
!= 0);
296 for (i
= 0; i
< NUMTHREADS
; ++i
) {
297 threadp
= (squidaio_thread_t
*)squidaio_thread_pool
->alloc();
298 threadp
->status
= _THREAD_STARTING
;
299 threadp
->current_req
= nullptr;
300 threadp
->requests
= 0;
301 threadp
->next
= threads
;
304 if (pthread_create(&threadp
->thread
, &globattr
, squidaio_thread_loop
, threadp
)) {
305 fprintf(stderr
, "Thread creation failed\n");
306 threadp
->status
= _THREAD_FAILED
;
311 /* Create request pool */
312 squidaio_request_pool
= memPoolCreate("aio_request", sizeof(squidaio_request_t
));
314 squidaio_large_bufs
= memPoolCreate("squidaio_large_bufs", AIO_LARGE_BUFS
);
316 squidaio_medium_bufs
= memPoolCreate("squidaio_medium_bufs", AIO_MEDIUM_BUFS
);
318 squidaio_small_bufs
= memPoolCreate("squidaio_small_bufs", AIO_SMALL_BUFS
);
320 squidaio_tiny_bufs
= memPoolCreate("squidaio_tiny_bufs", AIO_TINY_BUFS
);
322 squidaio_micro_bufs
= memPoolCreate("squidaio_micro_bufs", AIO_MICRO_BUFS
);
324 squidaio_initialised
= 1;
328 squidaio_shutdown(void)
330 if (!squidaio_initialised
)
333 /* This is the same as in squidaio_sync */
335 squidaio_poll_queues();
336 } while (request_queue_len
> 0);
338 CommIO::NotifyIOClose();
340 squidaio_initialised
= 0;
344 squidaio_thread_loop(void *ptr
)
346 squidaio_thread_t
*threadp
= (squidaio_thread_t
*)ptr
;
347 squidaio_request_t
*request
;
351 * Make sure to ignore signals which may possibly get sent to
352 * the parent squid thread. Causes havoc with mutex's and
353 * condition waits otherwise
356 sigemptyset(&newSig
);
357 sigaddset(&newSig
, SIGPIPE
);
358 sigaddset(&newSig
, SIGCHLD
);
359 #if defined(_SQUID_LINUX_THREADS_)
361 sigaddset(&newSig
, SIGQUIT
);
362 sigaddset(&newSig
, SIGTRAP
);
365 sigaddset(&newSig
, SIGUSR1
);
366 sigaddset(&newSig
, SIGUSR2
);
369 sigaddset(&newSig
, SIGHUP
);
370 sigaddset(&newSig
, SIGTERM
);
371 sigaddset(&newSig
, SIGINT
);
372 sigaddset(&newSig
, SIGALRM
);
373 pthread_sigmask(SIG_BLOCK
, &newSig
, nullptr);
376 threadp
->current_req
= request
= nullptr;
378 /* Get a request to process */
379 threadp
->status
= _THREAD_WAITING
;
380 pthread_mutex_lock(&request_queue
.mutex
);
382 while (!request_queue
.head
) {
383 pthread_cond_wait(&request_queue
.cond
, &request_queue
.mutex
);
386 request
= request_queue
.head
;
389 request_queue
.head
= request
->next
;
391 if (!request_queue
.head
)
392 request_queue
.tailp
= &request_queue
.head
;
394 pthread_mutex_unlock(&request_queue
.mutex
);
396 /* process the request */
397 threadp
->status
= _THREAD_BUSY
;
399 request
->next
= nullptr;
401 threadp
->current_req
= request
;
405 if (!request
->cancelled
) {
406 switch (request
->request_type
) {
409 squidaio_do_open(request
);
413 squidaio_do_read(request
);
417 squidaio_do_write(request
);
421 squidaio_do_close(request
);
425 squidaio_do_unlink(request
);
428 #if AIO_OPENDIR /* Opendir not implemented yet */
430 case _AIO_OP_OPENDIR
:
431 squidaio_do_opendir(request
);
436 squidaio_do_stat(request
);
441 request
->err
= EINVAL
;
444 } else { /* cancelled */
446 request
->err
= EINTR
;
449 threadp
->status
= _THREAD_DONE
;
450 /* put the request in the done queue */
451 pthread_mutex_lock(&done_queue
.mutex
);
452 *done_queue
.tailp
= request
;
453 done_queue
.tailp
= &request
->next
;
454 pthread_mutex_unlock(&done_queue
.mutex
);
455 CommIO::NotifyIOCompleted();
456 ++ threadp
->requests
;
457 } /* while forever */
460 } /* squidaio_thread_loop */
463 squidaio_queue_request(squidaio_request_t
* request
)
465 static int high_start
= 0;
466 debugs(43, 9, "squidaio_queue_request: " << request
<< " type=" << request
->request_type
<< " result=" << request
->resultp
);
467 /* Mark it as not executed (failing result, no error) */
470 /* Internal housekeeping */
471 request_queue_len
+= 1;
472 request
->resultp
->_data
= request
;
473 /* Play some tricks with the request_queue2 queue */
474 request
->next
= nullptr;
476 if (pthread_mutex_trylock(&request_queue
.mutex
) == 0) {
477 if (request_queue2
.head
) {
478 /* Grab blocked requests */
479 *request_queue
.tailp
= request_queue2
.head
;
480 request_queue
.tailp
= request_queue2
.tailp
;
483 /* Enqueue request */
484 *request_queue
.tailp
= request
;
486 request_queue
.tailp
= &request
->next
;
488 pthread_cond_signal(&request_queue
.cond
);
490 pthread_mutex_unlock(&request_queue
.mutex
);
492 if (request_queue2
.head
) {
493 /* Clear queue of blocked requests */
494 request_queue2
.head
= nullptr;
495 request_queue2
.tailp
= &request_queue2
.head
;
498 /* Oops, the request queue is blocked, use request_queue2 */
499 *request_queue2
.tailp
= request
;
500 request_queue2
.tailp
= &request
->next
;
503 if (request_queue2
.head
) {
504 static uint64_t filter
= 0;
505 static uint64_t filter_limit
= 8192;
507 if (++filter
>= filter_limit
) {
508 filter_limit
+= filter
;
510 debugs(43, DBG_IMPORTANT
, "WARNING: squidaio_queue_request: Queue congestion (growing to " << filter_limit
<< ")");
514 /* Warn if out of threads */
515 if (request_queue_len
> MAGIC1
) {
516 static int last_warn
= 0;
517 static int queue_high
, queue_low
;
519 if (high_start
== 0) {
520 high_start
= squid_curtime
;
521 queue_high
= request_queue_len
;
522 queue_low
= request_queue_len
;
525 if (request_queue_len
> queue_high
)
526 queue_high
= request_queue_len
;
528 if (request_queue_len
< queue_low
)
529 queue_low
= request_queue_len
;
531 if (squid_curtime
>= (last_warn
+ 15) &&
532 squid_curtime
>= (high_start
+ 5)) {
533 debugs(43, DBG_IMPORTANT
, "WARNING: squidaio_queue_request: Disk I/O overloading");
535 if (squid_curtime
>= (high_start
+ 15))
536 debugs(43, DBG_IMPORTANT
, "squidaio_queue_request: Queue Length: current=" <<
537 request_queue_len
<< ", high=" << queue_high
<<
538 ", low=" << queue_low
<< ", duration=" <<
539 (long int) (squid_curtime
- high_start
));
541 last_warn
= squid_curtime
;
547 /* Warn if seriously overloaded */
548 if (request_queue_len
> RIDICULOUS_LENGTH
) {
549 debugs(43, DBG_CRITICAL
, "squidaio_queue_request: Async request queue growing uncontrollably!");
550 debugs(43, DBG_CRITICAL
, "squidaio_queue_request: Syncing pending I/O operations.. (blocking)");
552 debugs(43, DBG_CRITICAL
, "squidaio_queue_request: Synced");
554 } /* squidaio_queue_request */
557 squidaio_cleanup_request(squidaio_request_t
* requestp
)
559 squidaio_result_t
*resultp
= requestp
->resultp
;
560 int cancelled
= requestp
->cancelled
;
562 /* Free allocated structures and copy data back to user space if the */
563 /* request hasn't been cancelled */
565 switch (requestp
->request_type
) {
569 if (!cancelled
&& requestp
->ret
== 0)
570 memcpy(requestp
->statp
, requestp
->tmpstatp
, sizeof(struct stat
));
572 squidaio_xfree(requestp
->tmpstatp
, sizeof(struct stat
));
574 squidaio_xstrfree(requestp
->path
);
579 if (cancelled
&& requestp
->ret
>= 0)
580 /* The open() was cancelled but completed */
581 close(requestp
->ret
);
583 squidaio_xstrfree(requestp
->path
);
588 if (cancelled
&& requestp
->ret
< 0)
589 /* The close() was cancelled and never got executed */
596 case _AIO_OP_OPENDIR
:
597 squidaio_xstrfree(requestp
->path
);
611 if (resultp
!= nullptr && !cancelled
) {
612 resultp
->aio_return
= requestp
->ret
;
613 resultp
->aio_errno
= requestp
->err
;
616 squidaio_request_pool
->freeOne(requestp
);
617 } /* squidaio_cleanup_request */
620 squidaio_cancel(squidaio_result_t
* resultp
)
622 squidaio_request_t
*request
= (squidaio_request_t
*)resultp
->_data
;
624 if (request
&& request
->resultp
== resultp
) {
625 debugs(43, 9, "squidaio_cancel: " << request
<< " type=" << request
->request_type
<< " result=" << request
->resultp
);
626 request
->cancelled
= 1;
627 request
->resultp
= nullptr;
628 resultp
->_data
= nullptr;
629 resultp
->result_type
= _AIO_OP_NONE
;
634 } /* squidaio_cancel */
637 squidaio_open(const char *path
, int oflag
, mode_t mode
, squidaio_result_t
* resultp
)
640 squidaio_request_t
*requestp
;
642 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
644 requestp
->path
= (char *) squidaio_xstrdup(path
);
646 requestp
->oflag
= oflag
;
648 requestp
->mode
= mode
;
650 requestp
->resultp
= resultp
;
652 requestp
->request_type
= _AIO_OP_OPEN
;
654 requestp
->cancelled
= 0;
656 resultp
->result_type
= _AIO_OP_OPEN
;
658 squidaio_queue_request(requestp
);
664 squidaio_do_open(squidaio_request_t
* requestp
)
666 requestp
->ret
= open(requestp
->path
, requestp
->oflag
, requestp
->mode
);
667 requestp
->err
= errno
;
671 squidaio_read(int fd
, char *bufp
, size_t bufs
, off_t offset
, int whence
, squidaio_result_t
* resultp
)
673 squidaio_request_t
*requestp
;
675 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
679 requestp
->bufferp
= bufp
;
681 requestp
->buflen
= bufs
;
683 requestp
->offset
= offset
;
685 requestp
->whence
= whence
;
687 requestp
->resultp
= resultp
;
689 requestp
->request_type
= _AIO_OP_READ
;
691 requestp
->cancelled
= 0;
693 resultp
->result_type
= _AIO_OP_READ
;
695 squidaio_queue_request(requestp
);
701 squidaio_do_read(squidaio_request_t
* requestp
)
703 if (lseek(requestp
->fd
, requestp
->offset
, requestp
->whence
) >= 0)
704 requestp
->ret
= read(requestp
->fd
, requestp
->bufferp
, requestp
->buflen
);
707 requestp
->err
= errno
;
711 squidaio_write(int fd
, char *bufp
, size_t bufs
, off_t offset
, int whence
, squidaio_result_t
* resultp
)
713 squidaio_request_t
*requestp
;
715 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
719 requestp
->bufferp
= bufp
;
721 requestp
->buflen
= bufs
;
723 requestp
->offset
= offset
;
725 requestp
->whence
= whence
;
727 requestp
->resultp
= resultp
;
729 requestp
->request_type
= _AIO_OP_WRITE
;
731 requestp
->cancelled
= 0;
733 resultp
->result_type
= _AIO_OP_WRITE
;
735 squidaio_queue_request(requestp
);
741 squidaio_do_write(squidaio_request_t
* requestp
)
743 requestp
->ret
= write(requestp
->fd
, requestp
->bufferp
, requestp
->buflen
);
744 requestp
->err
= errno
;
748 squidaio_close(int fd
, squidaio_result_t
* resultp
)
750 squidaio_request_t
*requestp
;
752 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
756 requestp
->resultp
= resultp
;
758 requestp
->request_type
= _AIO_OP_CLOSE
;
760 requestp
->cancelled
= 0;
762 resultp
->result_type
= _AIO_OP_CLOSE
;
764 squidaio_queue_request(requestp
);
770 squidaio_do_close(squidaio_request_t
* requestp
)
772 requestp
->ret
= close(requestp
->fd
);
773 requestp
->err
= errno
;
778 squidaio_stat(const char *path
, struct stat
*sb
, squidaio_result_t
* resultp
)
781 squidaio_request_t
*requestp
;
783 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
785 requestp
->path
= (char *) squidaio_xstrdup(path
);
787 requestp
->statp
= sb
;
789 requestp
->tmpstatp
= (struct stat
*) squidaio_xmalloc(sizeof(struct stat
));
791 requestp
->resultp
= resultp
;
793 requestp
->request_type
= _AIO_OP_STAT
;
795 requestp
->cancelled
= 0;
797 resultp
->result_type
= _AIO_OP_STAT
;
799 squidaio_queue_request(requestp
);
805 squidaio_do_stat(squidaio_request_t
* requestp
)
807 requestp
->ret
= stat(requestp
->path
, requestp
->tmpstatp
);
808 requestp
->err
= errno
;
812 squidaio_unlink(const char *path
, squidaio_result_t
* resultp
)
815 squidaio_request_t
*requestp
;
817 requestp
= (squidaio_request_t
*)squidaio_request_pool
->alloc();
819 requestp
->path
= squidaio_xstrdup(path
);
821 requestp
->resultp
= resultp
;
823 requestp
->request_type
= _AIO_OP_UNLINK
;
825 requestp
->cancelled
= 0;
827 resultp
->result_type
= _AIO_OP_UNLINK
;
829 squidaio_queue_request(requestp
);
835 squidaio_do_unlink(squidaio_request_t
* requestp
)
837 requestp
->ret
= unlink(requestp
->path
);
838 requestp
->err
= errno
;
842 /* XXX squidaio_opendir NOT implemented yet.. */
845 squidaio_opendir(const char *path
, squidaio_result_t
* resultp
)
847 squidaio_request_t
*requestp
;
850 requestp
= squidaio_request_pool
->alloc();
852 resultp
->result_type
= _AIO_OP_OPENDIR
;
858 squidaio_do_opendir(squidaio_request_t
* requestp
)
860 /* NOT IMPLEMENTED */
866 squidaio_poll_queues(void)
868 /* kick "overflow" request queue */
870 if (request_queue2
.head
&&
871 pthread_mutex_trylock(&request_queue
.mutex
) == 0) {
872 *request_queue
.tailp
= request_queue2
.head
;
873 request_queue
.tailp
= request_queue2
.tailp
;
874 pthread_cond_signal(&request_queue
.cond
);
875 pthread_mutex_unlock(&request_queue
.mutex
);
876 request_queue2
.head
= nullptr;
877 request_queue2
.tailp
= &request_queue2
.head
;
880 /* poll done queue */
881 if (done_queue
.head
&& pthread_mutex_trylock(&done_queue
.mutex
) == 0) {
883 struct squidaio_request_t
*requests
= done_queue
.head
;
884 done_queue
.head
= nullptr;
885 done_queue
.tailp
= &done_queue
.head
;
886 pthread_mutex_unlock(&done_queue
.mutex
);
887 *done_requests
.tailp
= requests
;
888 request_queue_len
-= 1;
890 while (requests
->next
) {
891 requests
= requests
->next
;
892 request_queue_len
-= 1;
895 done_requests
.tailp
= &requests
->next
;
900 squidaio_poll_done(void)
902 squidaio_request_t
*request
;
903 squidaio_result_t
*resultp
;
908 request
= done_requests
.head
;
910 if (request
== nullptr && !polled
) {
911 CommIO::ResetNotifications();
912 squidaio_poll_queues();
914 request
= done_requests
.head
;
921 debugs(43, 9, "squidaio_poll_done: " << request
<< " type=" << request
->request_type
<< " result=" << request
->resultp
);
922 done_requests
.head
= request
->next
;
924 if (!done_requests
.head
)
925 done_requests
.tailp
= &done_requests
.head
;
927 resultp
= request
->resultp
;
929 cancelled
= request
->cancelled
;
931 squidaio_debug(request
);
933 debugs(43, 5, "DONE: " << request
->ret
<< " -> " << request
->err
);
935 squidaio_cleanup_request(request
);
941 } /* squidaio_poll_done */
944 squidaio_operations_pending(void)
946 return request_queue_len
+ (done_requests
.head
? 1 : 0);
952 /* XXX This might take a while if the queue is large.. */
955 squidaio_poll_queues();
956 } while (request_queue_len
> 0);
958 return squidaio_operations_pending();
962 squidaio_get_queue_len(void)
964 return request_queue_len
;
968 squidaio_debug(squidaio_request_t
* request
)
970 switch (request
->request_type
) {
973 debugs(43, 5, "OPEN of " << request
->path
<< " to FD " << request
->ret
);
977 debugs(43, 5, "READ on fd: " << request
->fd
);
981 debugs(43, 5, "WRITE on fd: " << request
->fd
);
985 debugs(43, 5, "CLOSE of fd: " << request
->fd
);
989 debugs(43, 5, "UNLINK of " << request
->path
);
998 squidaio_stats(StoreEntry
* sentry
)
1000 squidaio_thread_t
*threadp
;
1003 if (!squidaio_initialised
)
1006 storeAppendPrintf(sentry
, "\n\nThreads Status:\n");
1008 storeAppendPrintf(sentry
, "#\tID\t# Requests\n");
1012 for (i
= 0; i
< NUMTHREADS
; ++i
) {
1013 storeAppendPrintf(sentry
, "%i\t0x%lx\t%ld\n", i
+ 1, (unsigned long)threadp
->thread
, threadp
->requests
);
1014 threadp
= threadp
->next
;