]> git.ipfire.org Git - thirdparty/squid.git/blame - src/fs/aufs/aiops.cc
Andres Kroonmaa's hi-res cpu profiling patch
[thirdparty/squid.git] / src / fs / aufs / aiops.cc
CommitLineData
cd748f27 1/*
99cce4cb 2 * $Id: aiops.cc,v 1.13 2002/07/21 00:25:45 hno Exp $
cd748f27 3 *
4 * DEBUG: section 43 AIOPS
5 * AUTHOR: Stewart Forster <slf@connect.com.au>
6 *
2b6662ba 7 * SQUID Web Proxy Cache http://www.squid-cache.org/
cd748f27 8 * ----------------------------------------------------------
9 *
2b6662ba 10 * Squid is the result of efforts by numerous individuals from
11 * the Internet community; see the CONTRIBUTORS file for full
12 * details. Many organizations have provided support for Squid's
13 * development; see the SPONSORS file for full details. Squid is
14 * Copyrighted (C) 2001 by the Regents of the University of
15 * California; see the COPYRIGHT file for full details. Squid
16 * incorporates software developed and/or copyrighted by other
17 * sources; see the CREDITS file for full details.
cd748f27 18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
32 *
33 */
34
35#include "squid.h"
36#include "store_asyncufs.h"
37
38#include <stdio.h>
39#include <sys/types.h>
40#include <sys/stat.h>
41#include <fcntl.h>
42#include <pthread.h>
43#include <errno.h>
44#include <dirent.h>
45#include <signal.h>
46#if HAVE_SCHED_H
47#include <sched.h>
48#endif
49
50#define RIDICULOUS_LENGTH 4096
51
c04d4f40 52enum _squidaio_thread_status {
cd748f27 53 _THREAD_STARTING = 0,
54 _THREAD_WAITING,
55 _THREAD_BUSY,
56 _THREAD_FAILED,
57 _THREAD_DONE
58};
c04d4f40 59typedef enum _squidaio_thread_status squidaio_thread_status;
cd748f27 60
c04d4f40 61enum _squidaio_request_type {
cd748f27 62 _AIO_OP_NONE = 0,
63 _AIO_OP_OPEN,
64 _AIO_OP_READ,
65 _AIO_OP_WRITE,
66 _AIO_OP_CLOSE,
67 _AIO_OP_UNLINK,
15a47d1d 68 _AIO_OP_TRUNCATE,
cd748f27 69 _AIO_OP_OPENDIR,
70 _AIO_OP_STAT
71};
c04d4f40 72typedef enum _squidaio_request_type squidaio_request_type;
cd748f27 73
c04d4f40 74typedef struct squidaio_request_t {
75 struct squidaio_request_t *next;
76 squidaio_request_type request_type;
cd748f27 77 int cancelled;
78 char *path;
79 int oflag;
80 mode_t mode;
81 int fd;
82 char *bufferp;
83 char *tmpbufp;
84 int buflen;
85 off_t offset;
86 int whence;
87 int ret;
88 int err;
89 struct stat *tmpstatp;
90 struct stat *statp;
c04d4f40 91 squidaio_result_t *resultp;
92} squidaio_request_t;
cd748f27 93
c04d4f40 94typedef struct squidaio_request_queue_t {
55f0e6f7 95 pthread_mutex_t mutex;
96 pthread_cond_t cond;
c04d4f40 97 squidaio_request_t *volatile head;
98 squidaio_request_t *volatile *volatile tailp;
55f0e6f7 99 unsigned long requests;
f0debecb 100 unsigned long blocked; /* main failed to lock the queue */
c04d4f40 101} squidaio_request_queue_t;
55f0e6f7 102
c04d4f40 103typedef struct squidaio_thread_t squidaio_thread_t;
104struct squidaio_thread_t {
105 squidaio_thread_t *next;
cd748f27 106 pthread_t thread;
c04d4f40 107 squidaio_thread_status status;
108 struct squidaio_request_t *current_req;
55f0e6f7 109 unsigned long requests;
110};
cd748f27 111
c04d4f40 112static void squidaio_init(void);
113static void squidaio_queue_request(squidaio_request_t *);
114static void squidaio_cleanup_request(squidaio_request_t *);
115static void *squidaio_thread_loop(void *);
116static void squidaio_do_open(squidaio_request_t *);
117static void squidaio_do_read(squidaio_request_t *);
118static void squidaio_do_write(squidaio_request_t *);
119static void squidaio_do_close(squidaio_request_t *);
120static void squidaio_do_stat(squidaio_request_t *);
121static void squidaio_do_unlink(squidaio_request_t *);
122static void squidaio_do_truncate(squidaio_request_t *);
cd748f27 123#if AIO_OPENDIR
c04d4f40 124static void *squidaio_do_opendir(squidaio_request_t *);
cd748f27 125#endif
c04d4f40 126static void squidaio_debug(squidaio_request_t *);
127static void squidaio_poll_queues(void);
cd748f27 128
c04d4f40 129static squidaio_thread_t *threads = NULL;
130static int squidaio_initialised = 0;
cd748f27 131
55f0e6f7 132
58cd5bbd 133#define AIO_LARGE_BUFS 16384
134#define AIO_MEDIUM_BUFS AIO_LARGE_BUFS >> 1
135#define AIO_SMALL_BUFS AIO_LARGE_BUFS >> 2
136#define AIO_TINY_BUFS AIO_LARGE_BUFS >> 3
55f0e6f7 137#define AIO_MICRO_BUFS 128
58cd5bbd 138
c04d4f40 139static MemPool *squidaio_large_bufs = NULL; /* 16K */
9bea1d5b 140static MemPool *squidaio_medium_bufs = NULL; /* 8K */
c04d4f40 141static MemPool *squidaio_small_bufs = NULL; /* 4K */
142static MemPool *squidaio_tiny_bufs = NULL; /* 2K */
143static MemPool *squidaio_micro_bufs = NULL; /* 128K */
58cd5bbd 144
cd748f27 145static int request_queue_len = 0;
c04d4f40 146static MemPool *squidaio_request_pool = NULL;
147static MemPool *squidaio_thread_pool = NULL;
148static squidaio_request_queue_t request_queue;
f0debecb 149static struct {
c04d4f40 150 squidaio_request_t *head, **tailp;
f0debecb 151} request_queue2 = {
152
153 NULL, &request_queue2.head
154};
c04d4f40 155static squidaio_request_queue_t done_queue;
f0debecb 156static struct {
c04d4f40 157 squidaio_request_t *head, **tailp;
f0debecb 158} done_requests = {
159
160 NULL, &done_requests.head
161};
cd748f27 162static pthread_attr_t globattr;
5e5c622b 163#if HAVE_SCHED_H
cd748f27 164static struct sched_param globsched;
5e5c622b 165#endif
cd748f27 166static pthread_t main_thread;
167
58cd5bbd 168static MemPool *
c04d4f40 169squidaio_get_pool(int size)
58cd5bbd 170{
171 MemPool *p;
172 if (size <= AIO_LARGE_BUFS) {
f0debecb 173 if (size <= AIO_MICRO_BUFS)
c04d4f40 174 p = squidaio_micro_bufs;
55f0e6f7 175 else if (size <= AIO_TINY_BUFS)
c04d4f40 176 p = squidaio_tiny_bufs;
f0debecb 177 else if (size <= AIO_SMALL_BUFS)
c04d4f40 178 p = squidaio_small_bufs;
f0debecb 179 else if (size <= AIO_MEDIUM_BUFS)
c04d4f40 180 p = squidaio_medium_bufs;
f0debecb 181 else
c04d4f40 182 p = squidaio_large_bufs;
58cd5bbd 183 } else
184 p = NULL;
185 return p;
186}
187
188static void *
c04d4f40 189squidaio_xmalloc(int size)
58cd5bbd 190{
191 void *p;
192 MemPool *pool;
193
c04d4f40 194 if ((pool = squidaio_get_pool(size)) != NULL) {
58cd5bbd 195 p = memPoolAlloc(pool);
196 } else
197 p = xmalloc(size);
198
199 return p;
200}
201
55f0e6f7 202static char *
c04d4f40 203squidaio_xstrdup(const char *str)
55f0e6f7 204{
205 char *p;
f0debecb 206 int len = strlen(str) + 1;
55f0e6f7 207
c04d4f40 208 p = squidaio_xmalloc(len);
55f0e6f7 209 strncpy(p, str, len);
210
211 return p;
212}
213
58cd5bbd 214static void
c04d4f40 215squidaio_xfree(void *p, int size)
58cd5bbd 216{
217 MemPool *pool;
218
c04d4f40 219 if ((pool = squidaio_get_pool(size)) != NULL) {
f0debecb 220 memPoolFree(pool, p);
58cd5bbd 221 } else
f0debecb 222 xfree(p);
58cd5bbd 223}
224
55f0e6f7 225static void
c04d4f40 226squidaio_xstrfree(char *str)
55f0e6f7 227{
228 MemPool *pool;
f0debecb 229 int len = strlen(str) + 1;
55f0e6f7 230
c04d4f40 231 if ((pool = squidaio_get_pool(len)) != NULL) {
f0debecb 232 memPoolFree(pool, str);
55f0e6f7 233 } else
f0debecb 234 xfree(str);
55f0e6f7 235}
236
cd748f27 237static void
c04d4f40 238squidaio_init(void)
cd748f27 239{
240 int i;
c04d4f40 241 squidaio_thread_t *threadp;
cd748f27 242
c04d4f40 243 if (squidaio_initialised)
cd748f27 244 return;
245
246 pthread_attr_init(&globattr);
247#if HAVE_PTHREAD_ATTR_SETSCOPE
248 pthread_attr_setscope(&globattr, PTHREAD_SCOPE_SYSTEM);
249#endif
5e5c622b 250#if HAVE_SCHED_H
cd748f27 251 globsched.sched_priority = 1;
5e5c622b 252#endif
cd748f27 253 main_thread = pthread_self();
5e5c622b 254#if HAVE_SCHED_H && HAVE_PTHREAD_SETSCHEDPARAM
cd748f27 255 pthread_setschedparam(main_thread, SCHED_OTHER, &globsched);
256#endif
5e5c622b 257#if HAVE_SCHED_H
cd748f27 258 globsched.sched_priority = 2;
5e5c622b 259#endif
260#if HAVE_SCHED_H && HAVE_PTHREAD_ATTR_SETSCHEDPARAM
cd748f27 261 pthread_attr_setschedparam(&globattr, &globsched);
262#endif
263
55f0e6f7 264 /* Initialize request queue */
265 if (pthread_mutex_init(&(request_queue.mutex), NULL))
266 fatal("Failed to create mutex");
267 if (pthread_cond_init(&(request_queue.cond), NULL))
268 fatal("Failed to create condition variable");
269 request_queue.head = NULL;
270 request_queue.tailp = &request_queue.head;
271 request_queue.requests = 0;
272 request_queue.blocked = 0;
273
274 /* Initialize done queue */
275 if (pthread_mutex_init(&(done_queue.mutex), NULL))
276 fatal("Failed to create mutex");
277 if (pthread_cond_init(&(done_queue.cond), NULL))
278 fatal("Failed to create condition variable");
279 done_queue.head = NULL;
280 done_queue.tailp = &done_queue.head;
281 done_queue.requests = 0;
282 done_queue.blocked = 0;
cd748f27 283
55f0e6f7 284 /* Create threads and get them to sit in their wait loop */
c04d4f40 285 squidaio_thread_pool = memPoolCreate("aio_thread", sizeof(squidaio_thread_t));
cd748f27 286 for (i = 0; i < NUMTHREADS; i++) {
c04d4f40 287 threadp = memPoolAlloc(squidaio_thread_pool);
cd748f27 288 threadp->status = _THREAD_STARTING;
55f0e6f7 289 threadp->current_req = NULL;
290 threadp->requests = 0;
291 threadp->next = threads;
292 threads = threadp;
c04d4f40 293 if (pthread_create(&threadp->thread, &globattr, squidaio_thread_loop, threadp)) {
cd748f27 294 fprintf(stderr, "Thread creation failed\n");
295 threadp->status = _THREAD_FAILED;
296 continue;
297 }
cd748f27 298 }
299
300 /* Create request pool */
c04d4f40 301 squidaio_request_pool = memPoolCreate("aio_request", sizeof(squidaio_request_t));
302 squidaio_large_bufs = memPoolCreate("squidaio_large_bufs", AIO_LARGE_BUFS);
303 squidaio_medium_bufs = memPoolCreate("squidaio_medium_bufs", AIO_MEDIUM_BUFS);
304 squidaio_small_bufs = memPoolCreate("squidaio_small_bufs", AIO_SMALL_BUFS);
305 squidaio_tiny_bufs = memPoolCreate("squidaio_tiny_bufs", AIO_TINY_BUFS);
306 squidaio_micro_bufs = memPoolCreate("squidaio_micro_bufs", AIO_MICRO_BUFS);
307
308 squidaio_initialised = 1;
cd748f27 309}
310
311
312static void *
c04d4f40 313squidaio_thread_loop(void *ptr)
cd748f27 314{
c04d4f40 315 squidaio_thread_t *threadp = ptr;
316 squidaio_request_t *request;
cd748f27 317 sigset_t new;
cd748f27 318
319 /*
320 * Make sure to ignore signals which may possibly get sent to
321 * the parent squid thread. Causes havoc with mutex's and
322 * condition waits otherwise
323 */
324
325 sigemptyset(&new);
326 sigaddset(&new, SIGPIPE);
327 sigaddset(&new, SIGCHLD);
328#ifdef _SQUID_LINUX_THREADS_
329 sigaddset(&new, SIGQUIT);
330 sigaddset(&new, SIGTRAP);
331#else
332 sigaddset(&new, SIGUSR1);
333 sigaddset(&new, SIGUSR2);
334#endif
335 sigaddset(&new, SIGHUP);
336 sigaddset(&new, SIGTERM);
337 sigaddset(&new, SIGINT);
338 sigaddset(&new, SIGALRM);
339 pthread_sigmask(SIG_BLOCK, &new, NULL);
340
cd748f27 341 while (1) {
55f0e6f7 342 threadp->current_req = request = NULL;
343 request = NULL;
344 /* Get a request to process */
f0debecb 345 threadp->status = _THREAD_WAITING;
55f0e6f7 346 pthread_mutex_lock(&request_queue.mutex);
f0debecb 347 while (!request_queue.head) {
55f0e6f7 348 pthread_cond_wait(&request_queue.cond, &request_queue.mutex);
cd748f27 349 }
55f0e6f7 350 request = request_queue.head;
351 if (request)
352 request_queue.head = request->next;
f0debecb 353 if (!request_queue.head)
354 request_queue.tailp = &request_queue.head;
55f0e6f7 355 pthread_mutex_unlock(&request_queue.mutex);
356 /* process the request */
357 threadp->status = _THREAD_BUSY;
358 request->next = NULL;
359 threadp->current_req = request;
cd748f27 360 errno = 0;
361 if (!request->cancelled) {
362 switch (request->request_type) {
363 case _AIO_OP_OPEN:
c04d4f40 364 squidaio_do_open(request);
cd748f27 365 break;
366 case _AIO_OP_READ:
c04d4f40 367 squidaio_do_read(request);
cd748f27 368 break;
369 case _AIO_OP_WRITE:
c04d4f40 370 squidaio_do_write(request);
cd748f27 371 break;
372 case _AIO_OP_CLOSE:
c04d4f40 373 squidaio_do_close(request);
cd748f27 374 break;
375 case _AIO_OP_UNLINK:
c04d4f40 376 squidaio_do_unlink(request);
cd748f27 377 break;
15a47d1d 378 case _AIO_OP_TRUNCATE:
c04d4f40 379 squidaio_do_truncate(request);
15a47d1d 380 break;
cd748f27 381#if AIO_OPENDIR /* Opendir not implemented yet */
382 case _AIO_OP_OPENDIR:
c04d4f40 383 squidaio_do_opendir(request);
cd748f27 384 break;
385#endif
386 case _AIO_OP_STAT:
c04d4f40 387 squidaio_do_stat(request);
cd748f27 388 break;
389 default:
390 request->ret = -1;
391 request->err = EINVAL;
392 break;
393 }
394 } else { /* cancelled */
395 request->ret = -1;
396 request->err = EINTR;
397 }
55f0e6f7 398 threadp->status = _THREAD_DONE;
399 /* put the request in the done queue */
400 pthread_mutex_lock(&done_queue.mutex);
401 *done_queue.tailp = request;
402 done_queue.tailp = &request->next;
403 pthread_mutex_unlock(&done_queue.mutex);
404 threadp->requests++;
405 } /* while forever */
cd748f27 406 return NULL;
c04d4f40 407} /* squidaio_thread_loop */
cd748f27 408
409static void
c04d4f40 410squidaio_queue_request(squidaio_request_t * request)
cd748f27 411{
cd748f27 412 static int high_start = 0;
99cce4cb 413 debug(43, 9) ("squidaio_queue_request: %p type=%d result=%p\n",
f0debecb 414 request, request->request_type, request->resultp);
cd748f27 415 /* Mark it as not executed (failing result, no error) */
55f0e6f7 416 request->ret = -1;
417 request->err = 0;
418 /* Internal housekeeping */
419 request_queue_len += 1;
420 request->resultp->_data = request;
421 /* Play some tricks with the request_queue2 queue */
422 request->next = NULL;
423 if (!request_queue2.head) {
424 if (pthread_mutex_trylock(&request_queue.mutex) == 0) {
425 /* Normal path */
426 *request_queue.tailp = request;
427 request_queue.tailp = &request->next;
428 pthread_cond_signal(&request_queue.cond);
429 pthread_mutex_unlock(&request_queue.mutex);
f0debecb 430 } else {
55f0e6f7 431 /* Oops, the request queue is blocked, use request_queue2 */
432 *request_queue2.tailp = request;
433 request_queue2.tailp = &request->next;
f0debecb 434 }
55f0e6f7 435 } else {
436 /* Secondary path. We have blocked requests to deal with */
437 /* add the request to the chain */
438 *request_queue2.tailp = request;
439 if (pthread_mutex_trylock(&request_queue.mutex) == 0) {
440 /* Ok, the queue is no longer blocked */
441 *request_queue.tailp = request_queue2.head;
442 request_queue.tailp = &request->next;
443 pthread_cond_signal(&request_queue.cond);
444 pthread_mutex_unlock(&request_queue.mutex);
445 request_queue2.head = NULL;
446 request_queue2.tailp = &request_queue2.head;
447 } else {
448 /* still blocked, bump the blocked request chain */
449 request_queue2.tailp = &request->next;
450 }
451 }
452 if (request_queue2.head) {
453 static int filter = 0;
454 static int filter_limit = 8;
455 if (++filter >= filter_limit) {
456 filter_limit += filter;
457 filter = 0;
c04d4f40 458 debug(43, 1) ("squidaio_queue_request: WARNING - Queue congestion\n");
55f0e6f7 459 }
cd748f27 460 }
cd748f27 461 /* Warn if out of threads */
55f0e6f7 462 if (request_queue_len > MAGIC1) {
463 static int last_warn = 0;
464 static int queue_high, queue_low;
cd748f27 465 if (high_start == 0) {
466 high_start = squid_curtime;
467 queue_high = request_queue_len;
468 queue_low = request_queue_len;
469 }
470 if (request_queue_len > queue_high)
471 queue_high = request_queue_len;
472 if (request_queue_len < queue_low)
473 queue_low = request_queue_len;
474 if (squid_curtime >= (last_warn + 15) &&
55f0e6f7 475 squid_curtime >= (high_start + 5)) {
c04d4f40 476 debug(43, 1) ("squidaio_queue_request: WARNING - Disk I/O overloading\n");
55f0e6f7 477 if (squid_curtime >= (high_start + 15))
33272404 478 debug(43, 1) ("squidaio_queue_request: Queue Length: current=%d, high=%d, low=%d, duration=%ld\n",
479 request_queue_len, queue_high, queue_low, (long int) (squid_curtime - high_start));
cd748f27 480 last_warn = squid_curtime;
481 }
482 } else {
483 high_start = 0;
484 }
55f0e6f7 485 /* Warn if seriously overloaded */
cd748f27 486 if (request_queue_len > RIDICULOUS_LENGTH) {
c04d4f40 487 debug(43, 0) ("squidaio_queue_request: Async request queue growing uncontrollably!\n");
488 debug(43, 0) ("squidaio_queue_request: Syncing pending I/O operations.. (blocking)\n");
489 squidaio_sync();
490 debug(43, 0) ("squidaio_queue_request: Synced\n");
cd748f27 491 }
c04d4f40 492} /* squidaio_queue_request */
cd748f27 493
cd748f27 494static void
c04d4f40 495squidaio_cleanup_request(squidaio_request_t * requestp)
cd748f27 496{
c04d4f40 497 squidaio_result_t *resultp = requestp->resultp;
cd748f27 498 int cancelled = requestp->cancelled;
499
500 /* Free allocated structures and copy data back to user space if the */
501 /* request hasn't been cancelled */
502 switch (requestp->request_type) {
503 case _AIO_OP_STAT:
504 if (!cancelled && requestp->ret == 0)
505 xmemcpy(requestp->statp, requestp->tmpstatp, sizeof(struct stat));
c04d4f40 506 squidaio_xfree(requestp->tmpstatp, sizeof(struct stat));
507 squidaio_xstrfree(requestp->path);
55f0e6f7 508 break;
cd748f27 509 case _AIO_OP_OPEN:
510 if (cancelled && requestp->ret >= 0)
511 /* The open() was cancelled but completed */
512 close(requestp->ret);
c04d4f40 513 squidaio_xstrfree(requestp->path);
cd748f27 514 break;
515 case _AIO_OP_CLOSE:
516 if (cancelled && requestp->ret < 0)
517 /* The close() was cancelled and never got executed */
518 close(requestp->fd);
519 break;
520 case _AIO_OP_UNLINK:
15a47d1d 521 case _AIO_OP_TRUNCATE:
cd748f27 522 case _AIO_OP_OPENDIR:
c04d4f40 523 squidaio_xstrfree(requestp->path);
cd748f27 524 break;
525 case _AIO_OP_READ:
526 if (!cancelled && requestp->ret > 0)
527 xmemcpy(requestp->bufferp, requestp->tmpbufp, requestp->ret);
c04d4f40 528 squidaio_xfree(requestp->tmpbufp, requestp->buflen);
55f0e6f7 529 break;
cd748f27 530 case _AIO_OP_WRITE:
c04d4f40 531 squidaio_xfree(requestp->tmpbufp, requestp->buflen);
cd748f27 532 break;
533 default:
534 break;
535 }
536 if (resultp != NULL && !cancelled) {
537 resultp->aio_return = requestp->ret;
538 resultp->aio_errno = requestp->err;
539 }
c04d4f40 540 memPoolFree(squidaio_request_pool, requestp);
541} /* squidaio_cleanup_request */
cd748f27 542
543
544int
c04d4f40 545squidaio_cancel(squidaio_result_t * resultp)
cd748f27 546{
c04d4f40 547 squidaio_request_t *request = resultp->_data;
cd748f27 548
55f0e6f7 549 if (request && request->resultp == resultp) {
99cce4cb 550 debug(43, 9) ("squidaio_cancel: %p type=%d result=%p\n",
f0debecb 551 request, request->request_type, request->resultp);
55f0e6f7 552 request->cancelled = 1;
553 request->resultp = NULL;
554 resultp->_data = NULL;
555 return 0;
556 }
cd748f27 557 return 1;
c04d4f40 558} /* squidaio_cancel */
cd748f27 559
560
561int
c04d4f40 562squidaio_open(const char *path, int oflag, mode_t mode, squidaio_result_t * resultp)
cd748f27 563{
c04d4f40 564 squidaio_request_t *requestp;
cd748f27 565
c04d4f40 566 if (!squidaio_initialised)
567 squidaio_init();
568 requestp = memPoolAlloc(squidaio_request_pool);
569 requestp->path = (char *) squidaio_xstrdup(path);
cd748f27 570 requestp->oflag = oflag;
571 requestp->mode = mode;
572 requestp->resultp = resultp;
573 requestp->request_type = _AIO_OP_OPEN;
574 requestp->cancelled = 0;
575
c04d4f40 576 squidaio_queue_request(requestp);
cd748f27 577 return 0;
578}
579
580
581static void
c04d4f40 582squidaio_do_open(squidaio_request_t * requestp)
cd748f27 583{
584 requestp->ret = open(requestp->path, requestp->oflag, requestp->mode);
585 requestp->err = errno;
586}
587
588
589int
c04d4f40 590squidaio_read(int fd, char *bufp, int bufs, off_t offset, int whence, squidaio_result_t * resultp)
cd748f27 591{
c04d4f40 592 squidaio_request_t *requestp;
cd748f27 593
c04d4f40 594 if (!squidaio_initialised)
595 squidaio_init();
596 requestp = memPoolAlloc(squidaio_request_pool);
cd748f27 597 requestp->fd = fd;
598 requestp->bufferp = bufp;
c04d4f40 599 requestp->tmpbufp = (char *) squidaio_xmalloc(bufs);
cd748f27 600 requestp->buflen = bufs;
601 requestp->offset = offset;
602 requestp->whence = whence;
603 requestp->resultp = resultp;
604 requestp->request_type = _AIO_OP_READ;
605 requestp->cancelled = 0;
606
c04d4f40 607 squidaio_queue_request(requestp);
cd748f27 608 return 0;
609}
610
611
612static void
c04d4f40 613squidaio_do_read(squidaio_request_t * requestp)
cd748f27 614{
615 lseek(requestp->fd, requestp->offset, requestp->whence);
616 requestp->ret = read(requestp->fd, requestp->tmpbufp, requestp->buflen);
617 requestp->err = errno;
618}
619
620
621int
c04d4f40 622squidaio_write(int fd, char *bufp, int bufs, off_t offset, int whence, squidaio_result_t * resultp)
cd748f27 623{
c04d4f40 624 squidaio_request_t *requestp;
cd748f27 625
c04d4f40 626 if (!squidaio_initialised)
627 squidaio_init();
628 requestp = memPoolAlloc(squidaio_request_pool);
cd748f27 629 requestp->fd = fd;
c04d4f40 630 requestp->tmpbufp = (char *) squidaio_xmalloc(bufs);
cd748f27 631 xmemcpy(requestp->tmpbufp, bufp, bufs);
632 requestp->buflen = bufs;
633 requestp->offset = offset;
634 requestp->whence = whence;
635 requestp->resultp = resultp;
636 requestp->request_type = _AIO_OP_WRITE;
637 requestp->cancelled = 0;
638
c04d4f40 639 squidaio_queue_request(requestp);
cd748f27 640 return 0;
641}
642
643
644static void
c04d4f40 645squidaio_do_write(squidaio_request_t * requestp)
cd748f27 646{
647 requestp->ret = write(requestp->fd, requestp->tmpbufp, requestp->buflen);
648 requestp->err = errno;
649}
650
651
652int
c04d4f40 653squidaio_close(int fd, squidaio_result_t * resultp)
cd748f27 654{
c04d4f40 655 squidaio_request_t *requestp;
cd748f27 656
c04d4f40 657 if (!squidaio_initialised)
658 squidaio_init();
659 requestp = memPoolAlloc(squidaio_request_pool);
cd748f27 660 requestp->fd = fd;
661 requestp->resultp = resultp;
662 requestp->request_type = _AIO_OP_CLOSE;
663 requestp->cancelled = 0;
664
c04d4f40 665 squidaio_queue_request(requestp);
cd748f27 666 return 0;
667}
668
669
670static void
c04d4f40 671squidaio_do_close(squidaio_request_t * requestp)
cd748f27 672{
673 requestp->ret = close(requestp->fd);
674 requestp->err = errno;
675}
676
677
678int
c04d4f40 679squidaio_stat(const char *path, struct stat *sb, squidaio_result_t * resultp)
cd748f27 680{
c04d4f40 681 squidaio_request_t *requestp;
cd748f27 682
c04d4f40 683 if (!squidaio_initialised)
684 squidaio_init();
685 requestp = memPoolAlloc(squidaio_request_pool);
686 requestp->path = (char *) squidaio_xstrdup(path);
cd748f27 687 requestp->statp = sb;
c04d4f40 688 requestp->tmpstatp = (struct stat *) squidaio_xmalloc(sizeof(struct stat));
cd748f27 689 requestp->resultp = resultp;
690 requestp->request_type = _AIO_OP_STAT;
691 requestp->cancelled = 0;
692
c04d4f40 693 squidaio_queue_request(requestp);
cd748f27 694 return 0;
695}
696
697
698static void
c04d4f40 699squidaio_do_stat(squidaio_request_t * requestp)
cd748f27 700{
701 requestp->ret = stat(requestp->path, requestp->tmpstatp);
702 requestp->err = errno;
703}
704
705
706int
c04d4f40 707squidaio_unlink(const char *path, squidaio_result_t * resultp)
cd748f27 708{
c04d4f40 709 squidaio_request_t *requestp;
cd748f27 710
c04d4f40 711 if (!squidaio_initialised)
712 squidaio_init();
713 requestp = memPoolAlloc(squidaio_request_pool);
714 requestp->path = squidaio_xstrdup(path);
cd748f27 715 requestp->resultp = resultp;
716 requestp->request_type = _AIO_OP_UNLINK;
717 requestp->cancelled = 0;
718
c04d4f40 719 squidaio_queue_request(requestp);
cd748f27 720 return 0;
721}
722
723
724static void
c04d4f40 725squidaio_do_unlink(squidaio_request_t * requestp)
cd748f27 726{
727 requestp->ret = unlink(requestp->path);
728 requestp->err = errno;
729}
730
15a47d1d 731int
c04d4f40 732squidaio_truncate(const char *path, off_t length, squidaio_result_t * resultp)
15a47d1d 733{
c04d4f40 734 squidaio_request_t *requestp;
15a47d1d 735
c04d4f40 736 if (!squidaio_initialised)
737 squidaio_init();
738 requestp = memPoolAlloc(squidaio_request_pool);
739 requestp->path = (char *) squidaio_xstrdup(path);
15a47d1d 740 requestp->offset = length;
15a47d1d 741 requestp->resultp = resultp;
742 requestp->request_type = _AIO_OP_TRUNCATE;
743 requestp->cancelled = 0;
744
c04d4f40 745 squidaio_queue_request(requestp);
15a47d1d 746 return 0;
747}
748
749
750static void
c04d4f40 751squidaio_do_truncate(squidaio_request_t * requestp)
15a47d1d 752{
753 requestp->ret = truncate(requestp->path, requestp->offset);
754 requestp->err = errno;
755}
756
cd748f27 757
758#if AIO_OPENDIR
c04d4f40 759/* XXX squidaio_opendir NOT implemented yet.. */
cd748f27 760
761int
c04d4f40 762squidaio_opendir(const char *path, squidaio_result_t * resultp)
cd748f27 763{
c04d4f40 764 squidaio_request_t *requestp;
cd748f27 765 int len;
766
c04d4f40 767 if (!squidaio_initialised)
768 squidaio_init();
769 requestp = memPoolAlloc(squidaio_request_pool);
cd748f27 770 return -1;
771}
772
773static void
c04d4f40 774squidaio_do_opendir(squidaio_request_t * requestp)
cd748f27 775{
776 /* NOT IMPLEMENTED */
777}
778
779#endif
780
55f0e6f7 781static void
c04d4f40 782squidaio_poll_queues(void)
55f0e6f7 783{
784 /* kick "overflow" request queue */
785 if (request_queue2.head &&
4672d0cd 786 pthread_mutex_trylock(&request_queue.mutex) == 0) {
55f0e6f7 787 *request_queue.tailp = request_queue2.head;
788 request_queue.tailp = request_queue2.tailp;
789 pthread_cond_signal(&request_queue.cond);
790 pthread_mutex_unlock(&request_queue.mutex);
791 request_queue2.head = NULL;
792 request_queue2.tailp = &request_queue2.head;
793 }
794 /* poll done queue */
795 if (done_queue.head && pthread_mutex_trylock(&done_queue.mutex) == 0) {
c04d4f40 796 struct squidaio_request_t *requests = done_queue.head;
55f0e6f7 797 done_queue.head = NULL;
798 done_queue.tailp = &done_queue.head;
799 pthread_mutex_unlock(&done_queue.mutex);
800 *done_requests.tailp = requests;
801 request_queue_len -= 1;
4672d0cd 802 while (requests->next) {
55f0e6f7 803 requests = requests->next;
804 request_queue_len -= 1;
cd748f27 805 }
55f0e6f7 806 done_requests.tailp = &requests->next;
807 }
5e5c622b 808#if HAVE_SCHED_H
55f0e6f7 809 /* Give up the CPU to allow the threads to do their work */
4672d0cd 810 /*
811 * For Andres thoughts about yield(), see
812 * http://www.squid-cache.org/mail-archive/squid-dev/200012/0001.html
813 */
55f0e6f7 814 if (done_queue.head || request_queue.head)
4672d0cd 815#ifndef _SQUID_SOLARIS_
55f0e6f7 816 sched_yield();
4672d0cd 817#else
818 yield();
819#endif
5e5c622b 820#endif
55f0e6f7 821}
cd748f27 822
c04d4f40 823squidaio_result_t *
824squidaio_poll_done(void)
cd748f27 825{
c04d4f40 826 squidaio_request_t *request;
827 squidaio_result_t *resultp;
cd748f27 828 int cancelled;
55f0e6f7 829 int polled = 0;
cd748f27 830
831 AIO_REPOLL:
55f0e6f7 832 request = done_requests.head;
833 if (request == NULL && !polled) {
c04d4f40 834 squidaio_poll_queues();
55f0e6f7 835 polled = 1;
836 request = done_requests.head;
837 }
838 if (!request) {
cd748f27 839 return NULL;
840 }
99cce4cb 841 debug(43, 9) ("squidaio_poll_done: %p type=%d result=%p\n",
f0debecb 842 request, request->request_type, request->resultp);
55f0e6f7 843 done_requests.head = request->next;
844 if (!done_requests.head)
845 done_requests.tailp = &done_requests.head;
846 resultp = request->resultp;
847 cancelled = request->cancelled;
c04d4f40 848 squidaio_debug(request);
55f0e6f7 849 debug(43, 5) ("DONE: %d -> %d\n", request->ret, request->err);
c04d4f40 850 squidaio_cleanup_request(request);
cd748f27 851 if (cancelled)
852 goto AIO_REPOLL;
853 return resultp;
c04d4f40 854} /* squidaio_poll_done */
cd748f27 855
856int
c04d4f40 857squidaio_operations_pending(void)
cd748f27 858{
55f0e6f7 859 return request_queue_len + (done_requests.head ? 1 : 0);
cd748f27 860}
861
862int
c04d4f40 863squidaio_sync(void)
cd748f27 864{
55f0e6f7 865 /* XXX This might take a while if the queue is large.. */
cd748f27 866 do {
c04d4f40 867 squidaio_poll_queues();
cd748f27 868 } while (request_queue_len > 0);
c04d4f40 869 return squidaio_operations_pending();
cd748f27 870}
871
872int
c04d4f40 873squidaio_get_queue_len(void)
cd748f27 874{
875 return request_queue_len;
876}
877
878static void
c04d4f40 879squidaio_debug(squidaio_request_t * request)
cd748f27 880{
55f0e6f7 881 switch (request->request_type) {
cd748f27 882 case _AIO_OP_OPEN:
55f0e6f7 883 debug(43, 5) ("OPEN of %s to FD %d\n", request->path, request->ret);
cd748f27 884 break;
885 case _AIO_OP_READ:
55f0e6f7 886 debug(43, 5) ("READ on fd: %d\n", request->fd);
cd748f27 887 break;
888 case _AIO_OP_WRITE:
55f0e6f7 889 debug(43, 5) ("WRITE on fd: %d\n", request->fd);
cd748f27 890 break;
891 case _AIO_OP_CLOSE:
55f0e6f7 892 debug(43, 5) ("CLOSE of fd: %d\n", request->fd);
cd748f27 893 break;
894 case _AIO_OP_UNLINK:
55f0e6f7 895 debug(43, 5) ("UNLINK of %s\n", request->path);
cd748f27 896 break;
15a47d1d 897 case _AIO_OP_TRUNCATE:
55f0e6f7 898 debug(43, 5) ("UNLINK of %s\n", request->path);
15a47d1d 899 break;
cd748f27 900 default:
901 break;
902 }
903}