1 /* Handle general operations.
2 Copyright (C) 1997-2021 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
4 Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997.
6 The GNU C Library is free software; you can redistribute it and/or
7 modify it under the terms of the GNU Lesser General Public
8 License as published by the Free Software Foundation; either
9 version 2.1 of the License, or (at your option) any later version.
11 The GNU C Library is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
16 You should have received a copy of the GNU Lesser General Public
17 License along with the GNU C Library; if not, see
18 <https://www.gnu.org/licenses/>. */
27 #include <sys/param.h>
32 #ifndef aio_create_helper_thread
33 # define aio_create_helper_thread __aio_create_helper_thread
36 __aio_create_helper_thread (pthread_t
*threadp
, void *(*tf
) (void *), void *arg
)
40 /* Make sure the thread is created detached. */
41 pthread_attr_init (&attr
);
42 pthread_attr_setdetachstate (&attr
, PTHREAD_CREATE_DETACHED
);
44 int ret
= pthread_create (threadp
, &attr
, tf
, arg
);
46 (void) pthread_attr_destroy (&attr
);
51 static void add_request_to_runlist (struct requestlist
*newrequest
);
53 /* Pool of request list entries. */
54 static struct requestlist
**pool
;
56 /* Number of total and allocated pool entries. */
57 static size_t pool_max_size
;
58 static size_t pool_size
;
60 /* We implement a two dimensional array but allocate each row separately.
61 The macro below determines how many entries should be used per row.
62 It should better be a power of two. */
63 #define ENTRIES_PER_ROW 32
65 /* How many rows we allocate at once. */
68 /* List of available entries. */
69 static struct requestlist
*freelist
;
71 /* List of request waiting to be processed. */
72 static struct requestlist
*runlist
;
74 /* Structure list of all currently processed requests. */
75 static struct requestlist
*requests
;
77 /* Number of threads currently running. */
80 /* Number of threads waiting for work to arrive. */
81 static int idle_thread_count
;
84 /* These are the values used to optimize the use of AIO. The user can
85 overwrite them by using the `aio_init' function. */
86 static struct aioinit optim
=
88 20, /* int aio_threads; Maximal number of threads. */
89 64, /* int aio_num; Number of expected simultaneous requests. */
99 /* Since the list is global we need a mutex protecting it. */
100 pthread_mutex_t __aio_requests_mutex
= PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
;
102 /* When you add a request to the list and there are idle threads present,
103 you signal this condition variable. When a thread finishes work, it waits
104 on this condition variable for a time before it actually exits. */
105 pthread_cond_t __aio_new_request_notification
= PTHREAD_COND_INITIALIZER
;
108 /* Functions to handle request list pool. */
109 static struct requestlist
*
112 struct requestlist
*result
;
114 if (freelist
== NULL
)
116 struct requestlist
*new_row
;
119 assert (sizeof (struct aiocb
) == sizeof (struct aiocb64
));
121 if (pool_size
+ 1 >= pool_max_size
)
123 size_t new_max_size
= pool_max_size
+ ROWS_STEP
;
124 struct requestlist
**new_tab
;
126 new_tab
= (struct requestlist
**)
127 realloc (pool
, new_max_size
* sizeof (struct requestlist
*));
132 pool_max_size
= new_max_size
;
136 /* Allocate the new row. */
137 cnt
= pool_size
== 0 ? optim
.aio_num
: ENTRIES_PER_ROW
;
138 new_row
= (struct requestlist
*) calloc (cnt
,
139 sizeof (struct requestlist
));
143 pool
[pool_size
++] = new_row
;
145 /* Put all the new entries in the freelist. */
148 new_row
->next_prio
= freelist
;
149 freelist
= new_row
++;
155 freelist
= freelist
->next_prio
;
162 __aio_free_request (struct requestlist
*elem
)
165 elem
->next_prio
= freelist
;
171 __aio_find_req (aiocb_union
*elem
)
173 struct requestlist
*runp
= requests
;
174 int fildes
= elem
->aiocb
.aio_fildes
;
176 while (runp
!= NULL
&& runp
->aiocbp
->aiocb
.aio_fildes
< fildes
)
177 runp
= runp
->next_fd
;
181 if (runp
->aiocbp
->aiocb
.aio_fildes
!= fildes
)
184 while (runp
!= NULL
&& runp
->aiocbp
!= elem
)
185 runp
= runp
->next_prio
;
193 __aio_find_req_fd (int fildes
)
195 struct requestlist
*runp
= requests
;
197 while (runp
!= NULL
&& runp
->aiocbp
->aiocb
.aio_fildes
< fildes
)
198 runp
= runp
->next_fd
;
200 return (runp
!= NULL
&& runp
->aiocbp
->aiocb
.aio_fildes
== fildes
206 __aio_remove_request (struct requestlist
*last
, struct requestlist
*req
,
209 assert (req
->running
== yes
|| req
->running
== queued
210 || req
->running
== done
);
213 last
->next_prio
= all
? NULL
: req
->next_prio
;
216 if (all
|| req
->next_prio
== NULL
)
218 if (req
->last_fd
!= NULL
)
219 req
->last_fd
->next_fd
= req
->next_fd
;
221 requests
= req
->next_fd
;
222 if (req
->next_fd
!= NULL
)
223 req
->next_fd
->last_fd
= req
->last_fd
;
227 if (req
->last_fd
!= NULL
)
228 req
->last_fd
->next_fd
= req
->next_prio
;
230 requests
= req
->next_prio
;
232 if (req
->next_fd
!= NULL
)
233 req
->next_fd
->last_fd
= req
->next_prio
;
235 req
->next_prio
->last_fd
= req
->last_fd
;
236 req
->next_prio
->next_fd
= req
->next_fd
;
238 /* Mark this entry as runnable. */
239 req
->next_prio
->running
= yes
;
242 if (req
->running
== yes
)
244 struct requestlist
*runp
= runlist
;
252 runlist
= runp
->next_run
;
254 last
->next_run
= runp
->next_run
;
258 runp
= runp
->next_run
;
265 /* The thread handler. */
266 static void *handle_fildes_io (void *arg
);
269 /* User optimization. */
271 __aio_init (const struct aioinit
*init
)
274 pthread_mutex_lock (&__aio_requests_mutex
);
276 /* Only allow writing new values if the table is not yet allocated. */
279 optim
.aio_threads
= init
->aio_threads
< 1 ? 1 : init
->aio_threads
;
280 assert (powerof2 (ENTRIES_PER_ROW
));
281 optim
.aio_num
= (init
->aio_num
< ENTRIES_PER_ROW
283 : init
->aio_num
& ~(ENTRIES_PER_ROW
- 1));
286 if (init
->aio_idle_time
!= 0)
287 optim
.aio_idle_time
= init
->aio_idle_time
;
289 /* Release the mutex. */
290 pthread_mutex_unlock (&__aio_requests_mutex
);
292 weak_alias (__aio_init
, aio_init
)
295 /* The main function of the async I/O handling. It enqueues requests
296 and if necessary starts and handles threads. */
298 __aio_enqueue_request (aiocb_union
*aiocbp
, int operation
)
302 struct sched_param param
;
303 struct requestlist
*last
, *runp
, *newp
;
306 if (operation
== LIO_SYNC
|| operation
== LIO_DSYNC
)
307 aiocbp
->aiocb
.aio_reqprio
= 0;
308 else if (aiocbp
->aiocb
.aio_reqprio
< 0
309 #ifdef AIO_PRIO_DELTA_MAX
310 || aiocbp
->aiocb
.aio_reqprio
> AIO_PRIO_DELTA_MAX
314 /* Invalid priority value. */
315 __set_errno (EINVAL
);
316 aiocbp
->aiocb
.__error_code
= EINVAL
;
317 aiocbp
->aiocb
.__return_value
= -1;
321 /* Compute priority for this request. */
322 pthread_getschedparam (pthread_self (), &policy
, ¶m
);
323 prio
= param
.sched_priority
- aiocbp
->aiocb
.aio_reqprio
;
326 pthread_mutex_lock (&__aio_requests_mutex
);
330 /* First look whether the current file descriptor is currently
333 && runp
->aiocbp
->aiocb
.aio_fildes
< aiocbp
->aiocb
.aio_fildes
)
336 runp
= runp
->next_fd
;
339 /* Get a new element for the waiting list. */
343 pthread_mutex_unlock (&__aio_requests_mutex
);
344 __set_errno (EAGAIN
);
347 newp
->aiocbp
= aiocbp
;
348 newp
->waiting
= NULL
;
350 aiocbp
->aiocb
.__abs_prio
= prio
;
351 aiocbp
->aiocb
.__policy
= policy
;
352 aiocbp
->aiocb
.aio_lio_opcode
= operation
;
353 aiocbp
->aiocb
.__error_code
= EINPROGRESS
;
354 aiocbp
->aiocb
.__return_value
= 0;
357 && runp
->aiocbp
->aiocb
.aio_fildes
== aiocbp
->aiocb
.aio_fildes
)
359 /* The current file descriptor is worked on. It makes no sense
360 to start another thread since this new thread would fight
361 with the running thread for the resources. But we also cannot
362 say that the thread processing this desriptor shall immediately
363 after finishing the current job process this request if there
364 are other threads in the running queue which have a higher
367 /* Simply enqueue it after the running one according to the
370 while (runp
->next_prio
!= NULL
371 && runp
->next_prio
->aiocbp
->aiocb
.__abs_prio
>= prio
)
374 runp
= runp
->next_prio
;
377 newp
->next_prio
= runp
->next_prio
;
378 runp
->next_prio
= newp
;
385 /* Enqueue this request for a new descriptor. */
388 newp
->last_fd
= NULL
;
389 newp
->next_fd
= requests
;
390 if (requests
!= NULL
)
391 requests
->last_fd
= newp
;
396 newp
->next_fd
= last
->next_fd
;
397 newp
->last_fd
= last
;
398 last
->next_fd
= newp
;
399 if (newp
->next_fd
!= NULL
)
400 newp
->next_fd
->last_fd
= newp
;
403 newp
->next_prio
= NULL
;
409 /* We try to create a new thread for this file descriptor. The
410 function which gets called will handle all available requests
411 for this descriptor and when all are processed it will
414 If no new thread can be created or if the specified limit of
415 threads for AIO is reached we queue the request. */
417 /* See if we need to and are able to create a thread. */
418 if (nthreads
< optim
.aio_threads
&& idle_thread_count
== 0)
422 running
= newp
->running
= allocated
;
424 /* Now try to start a thread. */
425 result
= aio_create_helper_thread (&thid
, handle_fildes_io
, newp
);
427 /* We managed to enqueue the request. All errors which can
428 happen now can be recognized by calls to `aio_return' and
433 /* Reset the running flag. The new request is not running. */
434 running
= newp
->running
= yes
;
438 /* We cannot create a thread in the moment and there is
439 also no thread running. This is a problem. `errno' is
440 set to EAGAIN if this is only a temporary problem. */
441 __aio_remove_request (last
, newp
, 0);
449 /* Enqueue the request in the run queue if it is not yet running. */
450 if (running
== yes
&& result
== 0)
452 add_request_to_runlist (newp
);
454 /* If there is a thread waiting for work, then let it know that we
455 have just given it something to do. */
456 if (idle_thread_count
> 0)
457 pthread_cond_signal (&__aio_new_request_notification
);
461 newp
->running
= running
;
464 /* Something went wrong. */
465 __aio_free_request (newp
);
466 aiocbp
->aiocb
.__error_code
= result
;
467 __set_errno (result
);
471 /* Release the mutex. */
472 pthread_mutex_unlock (&__aio_requests_mutex
);
479 handle_fildes_io (void *arg
)
481 pthread_t self
= pthread_self ();
482 struct sched_param param
;
483 struct requestlist
*runp
= (struct requestlist
*) arg
;
488 pthread_getschedparam (self
, &policy
, ¶m
);
492 /* If runp is NULL, then we were created to service the work queue
493 in general, not to handle any particular request. In that case we
494 skip the "do work" stuff on the first pass, and go directly to the
495 "get work off the work queue" part of this loop, which is near the
498 pthread_mutex_lock (&__aio_requests_mutex
);
501 /* Hopefully this request is marked as running. */
502 assert (runp
->running
== allocated
);
504 /* Update our variables. */
505 aiocbp
= runp
->aiocbp
;
506 fildes
= aiocbp
->aiocb
.aio_fildes
;
508 /* Change the priority to the requested value (if necessary). */
509 if (aiocbp
->aiocb
.__abs_prio
!= param
.sched_priority
510 || aiocbp
->aiocb
.__policy
!= policy
)
512 param
.sched_priority
= aiocbp
->aiocb
.__abs_prio
;
513 policy
= aiocbp
->aiocb
.__policy
;
514 pthread_setschedparam (self
, policy
, ¶m
);
517 /* Process request pointed to by RUNP. We must not be disturbed
519 if ((aiocbp
->aiocb
.aio_lio_opcode
& 127) == LIO_READ
)
521 if (sizeof (off_t
) != sizeof (off64_t
)
522 && aiocbp
->aiocb
.aio_lio_opcode
& 128)
523 aiocbp
->aiocb
.__return_value
=
524 TEMP_FAILURE_RETRY (__pread64 (fildes
, (void *)
525 aiocbp
->aiocb64
.aio_buf
,
526 aiocbp
->aiocb64
.aio_nbytes
,
527 aiocbp
->aiocb64
.aio_offset
));
529 aiocbp
->aiocb
.__return_value
=
530 TEMP_FAILURE_RETRY (__libc_pread (fildes
,
532 aiocbp
->aiocb
.aio_buf
,
533 aiocbp
->aiocb
.aio_nbytes
,
534 aiocbp
->aiocb
.aio_offset
));
536 if (aiocbp
->aiocb
.__return_value
== -1 && errno
== ESPIPE
)
537 /* The Linux kernel is different from others. It returns
538 ESPIPE if using pread on a socket. Other platforms
539 simply ignore the offset parameter and behave like
541 aiocbp
->aiocb
.__return_value
=
542 TEMP_FAILURE_RETRY (read (fildes
,
543 (void *) aiocbp
->aiocb64
.aio_buf
,
544 aiocbp
->aiocb64
.aio_nbytes
));
546 else if ((aiocbp
->aiocb
.aio_lio_opcode
& 127) == LIO_WRITE
)
548 if (sizeof (off_t
) != sizeof (off64_t
)
549 && aiocbp
->aiocb
.aio_lio_opcode
& 128)
550 aiocbp
->aiocb
.__return_value
=
551 TEMP_FAILURE_RETRY (__pwrite64 (fildes
, (const void *)
552 aiocbp
->aiocb64
.aio_buf
,
553 aiocbp
->aiocb64
.aio_nbytes
,
554 aiocbp
->aiocb64
.aio_offset
));
556 aiocbp
->aiocb
.__return_value
=
557 TEMP_FAILURE_RETRY (__libc_pwrite (fildes
, (const void *)
558 aiocbp
->aiocb
.aio_buf
,
559 aiocbp
->aiocb
.aio_nbytes
,
560 aiocbp
->aiocb
.aio_offset
));
562 if (aiocbp
->aiocb
.__return_value
== -1 && errno
== ESPIPE
)
563 /* The Linux kernel is different from others. It returns
564 ESPIPE if using pwrite on a socket. Other platforms
565 simply ignore the offset parameter and behave like
567 aiocbp
->aiocb
.__return_value
=
568 TEMP_FAILURE_RETRY (write (fildes
,
569 (void *) aiocbp
->aiocb64
.aio_buf
,
570 aiocbp
->aiocb64
.aio_nbytes
));
572 else if (aiocbp
->aiocb
.aio_lio_opcode
== LIO_DSYNC
)
573 aiocbp
->aiocb
.__return_value
=
574 TEMP_FAILURE_RETRY (fdatasync (fildes
));
575 else if (aiocbp
->aiocb
.aio_lio_opcode
== LIO_SYNC
)
576 aiocbp
->aiocb
.__return_value
=
577 TEMP_FAILURE_RETRY (fsync (fildes
));
580 /* This is an invalid opcode. */
581 aiocbp
->aiocb
.__return_value
= -1;
582 __set_errno (EINVAL
);
586 pthread_mutex_lock (&__aio_requests_mutex
);
588 if (aiocbp
->aiocb
.__return_value
== -1)
589 aiocbp
->aiocb
.__error_code
= errno
;
591 aiocbp
->aiocb
.__error_code
= 0;
593 /* Send the signal to notify about finished processing of the
597 /* For debugging purposes we reset the running flag of the
599 assert (runp
->running
== allocated
);
600 runp
->running
= done
;
602 /* Now dequeue the current request. */
603 __aio_remove_request (NULL
, runp
, 0);
604 if (runp
->next_prio
!= NULL
)
605 add_request_to_runlist (runp
->next_prio
);
607 /* Free the old element. */
608 __aio_free_request (runp
);
613 /* If the runlist is empty, then we sleep for a while, waiting for
614 something to arrive in it. */
615 if (runp
== NULL
&& optim
.aio_idle_time
>= 0)
618 struct timespec wakeup_time
;
621 __clock_gettime (CLOCK_REALTIME
, &now
);
622 wakeup_time
.tv_sec
= now
.tv_sec
+ optim
.aio_idle_time
;
623 wakeup_time
.tv_nsec
= now
.tv_nsec
;
624 if (wakeup_time
.tv_nsec
>= 1000000000)
626 wakeup_time
.tv_nsec
-= 1000000000;
627 ++wakeup_time
.tv_sec
;
629 pthread_cond_timedwait (&__aio_new_request_notification
,
630 &__aio_requests_mutex
,
640 assert (runp
->running
== yes
);
641 runp
->running
= allocated
;
642 runlist
= runp
->next_run
;
644 /* If we have a request to process, and there's still another in
645 the run list, then we need to either wake up or create a new
646 thread to service the request that is still in the run list. */
649 /* There are at least two items in the work queue to work on.
650 If there are other idle threads, then we should wake them
651 up for these other work elements; otherwise, we should try
652 to create a new thread. */
653 if (idle_thread_count
> 0)
654 pthread_cond_signal (&__aio_new_request_notification
);
655 else if (nthreads
< optim
.aio_threads
)
660 /* Make sure the thread is created detached. */
661 pthread_attr_init (&attr
);
662 pthread_attr_setdetachstate (&attr
, PTHREAD_CREATE_DETACHED
);
664 /* Now try to start a thread. If we fail, no big deal,
665 because we know that there is at least one thread (us)
666 that is working on AIO operations. */
667 if (pthread_create (&thid
, &attr
, handle_fildes_io
, NULL
)
674 /* Release the mutex. */
675 pthread_mutex_unlock (&__aio_requests_mutex
);
677 while (runp
!= NULL
);
683 /* Free allocated resources. */
684 libc_freeres_fn (free_res
)
688 for (row
= 0; row
< pool_max_size
; ++row
)
695 /* Add newrequest to the runlist. The __abs_prio flag of newrequest must
696 be correctly set to do this. Also, you had better set newrequest's
697 "running" flag to "yes" before you release your lock or you'll throw an
700 add_request_to_runlist (struct requestlist
*newrequest
)
702 int prio
= newrequest
->aiocbp
->aiocb
.__abs_prio
;
703 struct requestlist
*runp
;
705 if (runlist
== NULL
|| runlist
->aiocbp
->aiocb
.__abs_prio
< prio
)
707 newrequest
->next_run
= runlist
;
708 runlist
= newrequest
;
714 while (runp
->next_run
!= NULL
715 && runp
->next_run
->aiocbp
->aiocb
.__abs_prio
>= prio
)
716 runp
= runp
->next_run
;
718 newrequest
->next_run
= runp
->next_run
;
719 runp
->next_run
= newrequest
;