]> git.ipfire.org Git - thirdparty/glibc.git/blob - libio/genops.c
Update copyright dates with scripts/update-copyrights.
[thirdparty/glibc.git] / libio / genops.c
1 /* Copyright (C) 1993-2019 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <http://www.gnu.org/licenses/>.
17
18 As a special exception, if you link the code in this file with
19 files compiled with a GNU compiler to produce an executable,
20 that does not cause the resulting executable to be covered by
21 the GNU Lesser General Public License. This exception does not
22 however invalidate any other reasons why the executable file
23 might be covered by the GNU Lesser General Public License.
24 This exception applies to code released by its copyright holders
25 in files containing the exception. */
26
27 /* Generic or default I/O operations. */
28
29 #include "libioP.h"
30 #include <stdlib.h>
31 #include <string.h>
32 #include <stdbool.h>
33 #include <sched.h>
34
35 #ifdef _IO_MTSAFE_IO
36 static _IO_lock_t list_all_lock = _IO_lock_initializer;
37 #endif
38
39 static FILE *run_fp;
40
41 #ifdef _IO_MTSAFE_IO
42 static void
43 flush_cleanup (void *not_used)
44 {
45 if (run_fp != NULL)
46 _IO_funlockfile (run_fp);
47 _IO_lock_unlock (list_all_lock);
48 }
49 #endif
50
51 void
52 _IO_un_link (struct _IO_FILE_plus *fp)
53 {
54 if (fp->file._flags & _IO_LINKED)
55 {
56 FILE **f;
57 #ifdef _IO_MTSAFE_IO
58 _IO_cleanup_region_start_noarg (flush_cleanup);
59 _IO_lock_lock (list_all_lock);
60 run_fp = (FILE *) fp;
61 _IO_flockfile ((FILE *) fp);
62 #endif
63 if (_IO_list_all == NULL)
64 ;
65 else if (fp == _IO_list_all)
66 _IO_list_all = (struct _IO_FILE_plus *) _IO_list_all->file._chain;
67 else
68 for (f = &_IO_list_all->file._chain; *f; f = &(*f)->_chain)
69 if (*f == (FILE *) fp)
70 {
71 *f = fp->file._chain;
72 break;
73 }
74 fp->file._flags &= ~_IO_LINKED;
75 #ifdef _IO_MTSAFE_IO
76 _IO_funlockfile ((FILE *) fp);
77 run_fp = NULL;
78 _IO_lock_unlock (list_all_lock);
79 _IO_cleanup_region_end (0);
80 #endif
81 }
82 }
83 libc_hidden_def (_IO_un_link)
84
85 void
86 _IO_link_in (struct _IO_FILE_plus *fp)
87 {
88 if ((fp->file._flags & _IO_LINKED) == 0)
89 {
90 fp->file._flags |= _IO_LINKED;
91 #ifdef _IO_MTSAFE_IO
92 _IO_cleanup_region_start_noarg (flush_cleanup);
93 _IO_lock_lock (list_all_lock);
94 run_fp = (FILE *) fp;
95 _IO_flockfile ((FILE *) fp);
96 #endif
97 fp->file._chain = (FILE *) _IO_list_all;
98 _IO_list_all = fp;
99 #ifdef _IO_MTSAFE_IO
100 _IO_funlockfile ((FILE *) fp);
101 run_fp = NULL;
102 _IO_lock_unlock (list_all_lock);
103 _IO_cleanup_region_end (0);
104 #endif
105 }
106 }
107 libc_hidden_def (_IO_link_in)
108
109 /* Return minimum _pos markers
110 Assumes the current get area is the main get area. */
111 ssize_t _IO_least_marker (FILE *fp, char *end_p);
112
113 ssize_t
114 _IO_least_marker (FILE *fp, char *end_p)
115 {
116 ssize_t least_so_far = end_p - fp->_IO_read_base;
117 struct _IO_marker *mark;
118 for (mark = fp->_markers; mark != NULL; mark = mark->_next)
119 if (mark->_pos < least_so_far)
120 least_so_far = mark->_pos;
121 return least_so_far;
122 }
123
124 /* Switch current get area from backup buffer to (start of) main get area. */
125
126 void
127 _IO_switch_to_main_get_area (FILE *fp)
128 {
129 char *tmp;
130 fp->_flags &= ~_IO_IN_BACKUP;
131 /* Swap _IO_read_end and _IO_save_end. */
132 tmp = fp->_IO_read_end;
133 fp->_IO_read_end = fp->_IO_save_end;
134 fp->_IO_save_end= tmp;
135 /* Swap _IO_read_base and _IO_save_base. */
136 tmp = fp->_IO_read_base;
137 fp->_IO_read_base = fp->_IO_save_base;
138 fp->_IO_save_base = tmp;
139 /* Set _IO_read_ptr. */
140 fp->_IO_read_ptr = fp->_IO_read_base;
141 }
142
143 /* Switch current get area from main get area to (end of) backup area. */
144
145 void
146 _IO_switch_to_backup_area (FILE *fp)
147 {
148 char *tmp;
149 fp->_flags |= _IO_IN_BACKUP;
150 /* Swap _IO_read_end and _IO_save_end. */
151 tmp = fp->_IO_read_end;
152 fp->_IO_read_end = fp->_IO_save_end;
153 fp->_IO_save_end = tmp;
154 /* Swap _IO_read_base and _IO_save_base. */
155 tmp = fp->_IO_read_base;
156 fp->_IO_read_base = fp->_IO_save_base;
157 fp->_IO_save_base = tmp;
158 /* Set _IO_read_ptr. */
159 fp->_IO_read_ptr = fp->_IO_read_end;
160 }
161
162 int
163 _IO_switch_to_get_mode (FILE *fp)
164 {
165 if (fp->_IO_write_ptr > fp->_IO_write_base)
166 if (_IO_OVERFLOW (fp, EOF) == EOF)
167 return EOF;
168 if (_IO_in_backup (fp))
169 fp->_IO_read_base = fp->_IO_backup_base;
170 else
171 {
172 fp->_IO_read_base = fp->_IO_buf_base;
173 if (fp->_IO_write_ptr > fp->_IO_read_end)
174 fp->_IO_read_end = fp->_IO_write_ptr;
175 }
176 fp->_IO_read_ptr = fp->_IO_write_ptr;
177
178 fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_write_end = fp->_IO_read_ptr;
179
180 fp->_flags &= ~_IO_CURRENTLY_PUTTING;
181 return 0;
182 }
183 libc_hidden_def (_IO_switch_to_get_mode)
184
185 void
186 _IO_free_backup_area (FILE *fp)
187 {
188 if (_IO_in_backup (fp))
189 _IO_switch_to_main_get_area (fp); /* Just in case. */
190 free (fp->_IO_save_base);
191 fp->_IO_save_base = NULL;
192 fp->_IO_save_end = NULL;
193 fp->_IO_backup_base = NULL;
194 }
195 libc_hidden_def (_IO_free_backup_area)
196
197 int
198 __overflow (FILE *f, int ch)
199 {
200 /* This is a single-byte stream. */
201 if (f->_mode == 0)
202 _IO_fwide (f, -1);
203 return _IO_OVERFLOW (f, ch);
204 }
205 libc_hidden_def (__overflow)
206
207 static int
208 save_for_backup (FILE *fp, char *end_p)
209 {
210 /* Append [_IO_read_base..end_p] to backup area. */
211 ssize_t least_mark = _IO_least_marker (fp, end_p);
212 /* needed_size is how much space we need in the backup area. */
213 size_t needed_size = (end_p - fp->_IO_read_base) - least_mark;
214 /* FIXME: Dubious arithmetic if pointers are NULL */
215 size_t current_Bsize = fp->_IO_save_end - fp->_IO_save_base;
216 size_t avail; /* Extra space available for future expansion. */
217 ssize_t delta;
218 struct _IO_marker *mark;
219 if (needed_size > current_Bsize)
220 {
221 char *new_buffer;
222 avail = 100;
223 new_buffer = (char *) malloc (avail + needed_size);
224 if (new_buffer == NULL)
225 return EOF; /* FIXME */
226 if (least_mark < 0)
227 {
228 __mempcpy (__mempcpy (new_buffer + avail,
229 fp->_IO_save_end + least_mark,
230 -least_mark),
231 fp->_IO_read_base,
232 end_p - fp->_IO_read_base);
233 }
234 else
235 memcpy (new_buffer + avail,
236 fp->_IO_read_base + least_mark,
237 needed_size);
238 free (fp->_IO_save_base);
239 fp->_IO_save_base = new_buffer;
240 fp->_IO_save_end = new_buffer + avail + needed_size;
241 }
242 else
243 {
244 avail = current_Bsize - needed_size;
245 if (least_mark < 0)
246 {
247 memmove (fp->_IO_save_base + avail,
248 fp->_IO_save_end + least_mark,
249 -least_mark);
250 memcpy (fp->_IO_save_base + avail - least_mark,
251 fp->_IO_read_base,
252 end_p - fp->_IO_read_base);
253 }
254 else if (needed_size > 0)
255 memcpy (fp->_IO_save_base + avail,
256 fp->_IO_read_base + least_mark,
257 needed_size);
258 }
259 fp->_IO_backup_base = fp->_IO_save_base + avail;
260 /* Adjust all the streammarkers. */
261 delta = end_p - fp->_IO_read_base;
262 for (mark = fp->_markers; mark != NULL; mark = mark->_next)
263 mark->_pos -= delta;
264 return 0;
265 }
266
267 int
268 __underflow (FILE *fp)
269 {
270 if (_IO_vtable_offset (fp) == 0 && _IO_fwide (fp, -1) != -1)
271 return EOF;
272
273 if (fp->_mode == 0)
274 _IO_fwide (fp, -1);
275 if (_IO_in_put_mode (fp))
276 if (_IO_switch_to_get_mode (fp) == EOF)
277 return EOF;
278 if (fp->_IO_read_ptr < fp->_IO_read_end)
279 return *(unsigned char *) fp->_IO_read_ptr;
280 if (_IO_in_backup (fp))
281 {
282 _IO_switch_to_main_get_area (fp);
283 if (fp->_IO_read_ptr < fp->_IO_read_end)
284 return *(unsigned char *) fp->_IO_read_ptr;
285 }
286 if (_IO_have_markers (fp))
287 {
288 if (save_for_backup (fp, fp->_IO_read_end))
289 return EOF;
290 }
291 else if (_IO_have_backup (fp))
292 _IO_free_backup_area (fp);
293 return _IO_UNDERFLOW (fp);
294 }
295 libc_hidden_def (__underflow)
296
297 int
298 __uflow (FILE *fp)
299 {
300 if (_IO_vtable_offset (fp) == 0 && _IO_fwide (fp, -1) != -1)
301 return EOF;
302
303 if (fp->_mode == 0)
304 _IO_fwide (fp, -1);
305 if (_IO_in_put_mode (fp))
306 if (_IO_switch_to_get_mode (fp) == EOF)
307 return EOF;
308 if (fp->_IO_read_ptr < fp->_IO_read_end)
309 return *(unsigned char *) fp->_IO_read_ptr++;
310 if (_IO_in_backup (fp))
311 {
312 _IO_switch_to_main_get_area (fp);
313 if (fp->_IO_read_ptr < fp->_IO_read_end)
314 return *(unsigned char *) fp->_IO_read_ptr++;
315 }
316 if (_IO_have_markers (fp))
317 {
318 if (save_for_backup (fp, fp->_IO_read_end))
319 return EOF;
320 }
321 else if (_IO_have_backup (fp))
322 _IO_free_backup_area (fp);
323 return _IO_UFLOW (fp);
324 }
325 libc_hidden_def (__uflow)
326
327 void
328 _IO_setb (FILE *f, char *b, char *eb, int a)
329 {
330 if (f->_IO_buf_base && !(f->_flags & _IO_USER_BUF))
331 free (f->_IO_buf_base);
332 f->_IO_buf_base = b;
333 f->_IO_buf_end = eb;
334 if (a)
335 f->_flags &= ~_IO_USER_BUF;
336 else
337 f->_flags |= _IO_USER_BUF;
338 }
339 libc_hidden_def (_IO_setb)
340
341 void
342 _IO_doallocbuf (FILE *fp)
343 {
344 if (fp->_IO_buf_base)
345 return;
346 if (!(fp->_flags & _IO_UNBUFFERED) || fp->_mode > 0)
347 if (_IO_DOALLOCATE (fp) != EOF)
348 return;
349 _IO_setb (fp, fp->_shortbuf, fp->_shortbuf+1, 0);
350 }
351 libc_hidden_def (_IO_doallocbuf)
352
353 int
354 _IO_default_underflow (FILE *fp)
355 {
356 return EOF;
357 }
358
359 int
360 _IO_default_uflow (FILE *fp)
361 {
362 int ch = _IO_UNDERFLOW (fp);
363 if (ch == EOF)
364 return EOF;
365 return *(unsigned char *) fp->_IO_read_ptr++;
366 }
367 libc_hidden_def (_IO_default_uflow)
368
369 size_t
370 _IO_default_xsputn (FILE *f, const void *data, size_t n)
371 {
372 const char *s = (char *) data;
373 size_t more = n;
374 if (more <= 0)
375 return 0;
376 for (;;)
377 {
378 /* Space available. */
379 if (f->_IO_write_ptr < f->_IO_write_end)
380 {
381 size_t count = f->_IO_write_end - f->_IO_write_ptr;
382 if (count > more)
383 count = more;
384 if (count > 20)
385 {
386 f->_IO_write_ptr = __mempcpy (f->_IO_write_ptr, s, count);
387 s += count;
388 }
389 else if (count)
390 {
391 char *p = f->_IO_write_ptr;
392 ssize_t i;
393 for (i = count; --i >= 0; )
394 *p++ = *s++;
395 f->_IO_write_ptr = p;
396 }
397 more -= count;
398 }
399 if (more == 0 || _IO_OVERFLOW (f, (unsigned char) *s++) == EOF)
400 break;
401 more--;
402 }
403 return n - more;
404 }
405 libc_hidden_def (_IO_default_xsputn)
406
407 size_t
408 _IO_sgetn (FILE *fp, void *data, size_t n)
409 {
410 /* FIXME handle putback buffer here! */
411 return _IO_XSGETN (fp, data, n);
412 }
413 libc_hidden_def (_IO_sgetn)
414
415 size_t
416 _IO_default_xsgetn (FILE *fp, void *data, size_t n)
417 {
418 size_t more = n;
419 char *s = (char*) data;
420 for (;;)
421 {
422 /* Data available. */
423 if (fp->_IO_read_ptr < fp->_IO_read_end)
424 {
425 size_t count = fp->_IO_read_end - fp->_IO_read_ptr;
426 if (count > more)
427 count = more;
428 if (count > 20)
429 {
430 s = __mempcpy (s, fp->_IO_read_ptr, count);
431 fp->_IO_read_ptr += count;
432 }
433 else if (count)
434 {
435 char *p = fp->_IO_read_ptr;
436 int i = (int) count;
437 while (--i >= 0)
438 *s++ = *p++;
439 fp->_IO_read_ptr = p;
440 }
441 more -= count;
442 }
443 if (more == 0 || __underflow (fp) == EOF)
444 break;
445 }
446 return n - more;
447 }
448 libc_hidden_def (_IO_default_xsgetn)
449
450 FILE *
451 _IO_default_setbuf (FILE *fp, char *p, ssize_t len)
452 {
453 if (_IO_SYNC (fp) == EOF)
454 return NULL;
455 if (p == NULL || len == 0)
456 {
457 fp->_flags |= _IO_UNBUFFERED;
458 _IO_setb (fp, fp->_shortbuf, fp->_shortbuf+1, 0);
459 }
460 else
461 {
462 fp->_flags &= ~_IO_UNBUFFERED;
463 _IO_setb (fp, p, p+len, 0);
464 }
465 fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_write_end = 0;
466 fp->_IO_read_base = fp->_IO_read_ptr = fp->_IO_read_end = 0;
467 return fp;
468 }
469
470 off64_t
471 _IO_default_seekpos (FILE *fp, off64_t pos, int mode)
472 {
473 return _IO_SEEKOFF (fp, pos, 0, mode);
474 }
475
476 int
477 _IO_default_doallocate (FILE *fp)
478 {
479 char *buf;
480
481 buf = malloc(BUFSIZ);
482 if (__glibc_unlikely (buf == NULL))
483 return EOF;
484
485 _IO_setb (fp, buf, buf+BUFSIZ, 1);
486 return 1;
487 }
488 libc_hidden_def (_IO_default_doallocate)
489
490 void
491 _IO_init_internal (FILE *fp, int flags)
492 {
493 _IO_no_init (fp, flags, -1, NULL, NULL);
494 }
495
496 void
497 _IO_init (FILE *fp, int flags)
498 {
499 IO_set_accept_foreign_vtables (&_IO_vtable_check);
500 _IO_init_internal (fp, flags);
501 }
502
503 static int stdio_needs_locking;
504
505 /* In a single-threaded process most stdio locks can be omitted. After
506 _IO_enable_locks is called, locks are not optimized away any more.
507 It must be first called while the process is still single-threaded.
508
509 This lock optimization can be disabled on a per-file basis by setting
510 _IO_FLAGS2_NEED_LOCK, because a file can have user-defined callbacks
511 or can be locked with flockfile and then a thread may be created
512 between a lock and unlock, so omitting the lock is not valid.
513
514 Here we have to make sure that the flag is set on all existing files
515 and files created later. */
516 void
517 _IO_enable_locks (void)
518 {
519 _IO_ITER i;
520
521 if (stdio_needs_locking)
522 return;
523 stdio_needs_locking = 1;
524 for (i = _IO_iter_begin (); i != _IO_iter_end (); i = _IO_iter_next (i))
525 _IO_iter_file (i)->_flags2 |= _IO_FLAGS2_NEED_LOCK;
526 }
527 libc_hidden_def (_IO_enable_locks)
528
529 void
530 _IO_old_init (FILE *fp, int flags)
531 {
532 fp->_flags = _IO_MAGIC|flags;
533 fp->_flags2 = 0;
534 if (stdio_needs_locking)
535 fp->_flags2 |= _IO_FLAGS2_NEED_LOCK;
536 fp->_IO_buf_base = NULL;
537 fp->_IO_buf_end = NULL;
538 fp->_IO_read_base = NULL;
539 fp->_IO_read_ptr = NULL;
540 fp->_IO_read_end = NULL;
541 fp->_IO_write_base = NULL;
542 fp->_IO_write_ptr = NULL;
543 fp->_IO_write_end = NULL;
544 fp->_chain = NULL; /* Not necessary. */
545
546 fp->_IO_save_base = NULL;
547 fp->_IO_backup_base = NULL;
548 fp->_IO_save_end = NULL;
549 fp->_markers = NULL;
550 fp->_cur_column = 0;
551 #if _IO_JUMPS_OFFSET
552 fp->_vtable_offset = 0;
553 #endif
554 #ifdef _IO_MTSAFE_IO
555 if (fp->_lock != NULL)
556 _IO_lock_init (*fp->_lock);
557 #endif
558 }
559
560 void
561 _IO_no_init (FILE *fp, int flags, int orientation,
562 struct _IO_wide_data *wd, const struct _IO_jump_t *jmp)
563 {
564 _IO_old_init (fp, flags);
565 fp->_mode = orientation;
566 if (orientation >= 0)
567 {
568 fp->_wide_data = wd;
569 fp->_wide_data->_IO_buf_base = NULL;
570 fp->_wide_data->_IO_buf_end = NULL;
571 fp->_wide_data->_IO_read_base = NULL;
572 fp->_wide_data->_IO_read_ptr = NULL;
573 fp->_wide_data->_IO_read_end = NULL;
574 fp->_wide_data->_IO_write_base = NULL;
575 fp->_wide_data->_IO_write_ptr = NULL;
576 fp->_wide_data->_IO_write_end = NULL;
577 fp->_wide_data->_IO_save_base = NULL;
578 fp->_wide_data->_IO_backup_base = NULL;
579 fp->_wide_data->_IO_save_end = NULL;
580
581 fp->_wide_data->_wide_vtable = jmp;
582 }
583 else
584 /* Cause predictable crash when a wide function is called on a byte
585 stream. */
586 fp->_wide_data = (struct _IO_wide_data *) -1L;
587 fp->_freeres_list = NULL;
588 }
589
590 int
591 _IO_default_sync (FILE *fp)
592 {
593 return 0;
594 }
595
596 /* The way the C++ classes are mapped into the C functions in the
597 current implementation, this function can get called twice! */
598
599 void
600 _IO_default_finish (FILE *fp, int dummy)
601 {
602 struct _IO_marker *mark;
603 if (fp->_IO_buf_base && !(fp->_flags & _IO_USER_BUF))
604 {
605 free (fp->_IO_buf_base);
606 fp->_IO_buf_base = fp->_IO_buf_end = NULL;
607 }
608
609 for (mark = fp->_markers; mark != NULL; mark = mark->_next)
610 mark->_sbuf = NULL;
611
612 if (fp->_IO_save_base)
613 {
614 free (fp->_IO_save_base);
615 fp->_IO_save_base = NULL;
616 }
617
618 _IO_un_link ((struct _IO_FILE_plus *) fp);
619
620 #ifdef _IO_MTSAFE_IO
621 if (fp->_lock != NULL)
622 _IO_lock_fini (*fp->_lock);
623 #endif
624 }
625 libc_hidden_def (_IO_default_finish)
626
627 off64_t
628 _IO_default_seekoff (FILE *fp, off64_t offset, int dir, int mode)
629 {
630 return _IO_pos_BAD;
631 }
632
633 int
634 _IO_sputbackc (FILE *fp, int c)
635 {
636 int result;
637
638 if (fp->_IO_read_ptr > fp->_IO_read_base
639 && (unsigned char)fp->_IO_read_ptr[-1] == (unsigned char)c)
640 {
641 fp->_IO_read_ptr--;
642 result = (unsigned char) c;
643 }
644 else
645 result = _IO_PBACKFAIL (fp, c);
646
647 if (result != EOF)
648 fp->_flags &= ~_IO_EOF_SEEN;
649
650 return result;
651 }
652 libc_hidden_def (_IO_sputbackc)
653
654 int
655 _IO_sungetc (FILE *fp)
656 {
657 int result;
658
659 if (fp->_IO_read_ptr > fp->_IO_read_base)
660 {
661 fp->_IO_read_ptr--;
662 result = (unsigned char) *fp->_IO_read_ptr;
663 }
664 else
665 result = _IO_PBACKFAIL (fp, EOF);
666
667 if (result != EOF)
668 fp->_flags &= ~_IO_EOF_SEEN;
669
670 return result;
671 }
672
673 unsigned
674 _IO_adjust_column (unsigned start, const char *line, int count)
675 {
676 const char *ptr = line + count;
677 while (ptr > line)
678 if (*--ptr == '\n')
679 return line + count - ptr - 1;
680 return start + count;
681 }
682 libc_hidden_def (_IO_adjust_column)
683
684 int
685 _IO_flush_all_lockp (int do_lock)
686 {
687 int result = 0;
688 FILE *fp;
689
690 #ifdef _IO_MTSAFE_IO
691 _IO_cleanup_region_start_noarg (flush_cleanup);
692 _IO_lock_lock (list_all_lock);
693 #endif
694
695 for (fp = (FILE *) _IO_list_all; fp != NULL; fp = fp->_chain)
696 {
697 run_fp = fp;
698 if (do_lock)
699 _IO_flockfile (fp);
700
701 if (((fp->_mode <= 0 && fp->_IO_write_ptr > fp->_IO_write_base)
702 || (_IO_vtable_offset (fp) == 0
703 && fp->_mode > 0 && (fp->_wide_data->_IO_write_ptr
704 > fp->_wide_data->_IO_write_base))
705 )
706 && _IO_OVERFLOW (fp, EOF) == EOF)
707 result = EOF;
708
709 if (do_lock)
710 _IO_funlockfile (fp);
711 run_fp = NULL;
712 }
713
714 #ifdef _IO_MTSAFE_IO
715 _IO_lock_unlock (list_all_lock);
716 _IO_cleanup_region_end (0);
717 #endif
718
719 return result;
720 }
721
722
723 int
724 _IO_flush_all (void)
725 {
726 /* We want locking. */
727 return _IO_flush_all_lockp (1);
728 }
729 libc_hidden_def (_IO_flush_all)
730
731 void
732 _IO_flush_all_linebuffered (void)
733 {
734 FILE *fp;
735
736 #ifdef _IO_MTSAFE_IO
737 _IO_cleanup_region_start_noarg (flush_cleanup);
738 _IO_lock_lock (list_all_lock);
739 #endif
740
741 for (fp = (FILE *) _IO_list_all; fp != NULL; fp = fp->_chain)
742 {
743 run_fp = fp;
744 _IO_flockfile (fp);
745
746 if ((fp->_flags & _IO_NO_WRITES) == 0 && fp->_flags & _IO_LINE_BUF)
747 _IO_OVERFLOW (fp, EOF);
748
749 _IO_funlockfile (fp);
750 run_fp = NULL;
751 }
752
753 #ifdef _IO_MTSAFE_IO
754 _IO_lock_unlock (list_all_lock);
755 _IO_cleanup_region_end (0);
756 #endif
757 }
758 libc_hidden_def (_IO_flush_all_linebuffered)
759 weak_alias (_IO_flush_all_linebuffered, _flushlbf)
760
761
762 /* The following is a bit tricky. In general, we want to unbuffer the
763 streams so that all output which follows is seen. If we are not
764 looking for memory leaks it does not make much sense to free the
765 actual buffer because this will happen anyway once the program
766 terminated. If we do want to look for memory leaks we have to free
767 the buffers. Whether something is freed is determined by the
768 function sin the libc_freeres section. Those are called as part of
769 the atexit routine, just like _IO_cleanup. The problem is we do
770 not know whether the freeres code is called first or _IO_cleanup.
771 if the former is the case, we set the DEALLOC_BUFFER variable to
772 true and _IO_unbuffer_all will take care of the rest. If
773 _IO_unbuffer_all is called first we add the streams to a list
774 which the freeres function later can walk through. */
775 static void _IO_unbuffer_all (void);
776
777 static bool dealloc_buffers;
778 static FILE *freeres_list;
779
780 static void
781 _IO_unbuffer_all (void)
782 {
783 FILE *fp;
784
785 #ifdef _IO_MTSAFE_IO
786 _IO_cleanup_region_start_noarg (flush_cleanup);
787 _IO_lock_lock (list_all_lock);
788 #endif
789
790 for (fp = (FILE *) _IO_list_all; fp; fp = fp->_chain)
791 {
792 if (! (fp->_flags & _IO_UNBUFFERED)
793 /* Iff stream is un-orientated, it wasn't used. */
794 && fp->_mode != 0)
795 {
796 #ifdef _IO_MTSAFE_IO
797 int cnt;
798 #define MAXTRIES 2
799 for (cnt = 0; cnt < MAXTRIES; ++cnt)
800 if (fp->_lock == NULL || _IO_lock_trylock (*fp->_lock) == 0)
801 break;
802 else
803 /* Give the other thread time to finish up its use of the
804 stream. */
805 __sched_yield ();
806 #endif
807
808 if (! dealloc_buffers && !(fp->_flags & _IO_USER_BUF))
809 {
810 fp->_flags |= _IO_USER_BUF;
811
812 fp->_freeres_list = freeres_list;
813 freeres_list = fp;
814 fp->_freeres_buf = fp->_IO_buf_base;
815 }
816
817 _IO_SETBUF (fp, NULL, 0);
818
819 if (fp->_mode > 0)
820 _IO_wsetb (fp, NULL, NULL, 0);
821
822 #ifdef _IO_MTSAFE_IO
823 if (cnt < MAXTRIES && fp->_lock != NULL)
824 _IO_lock_unlock (*fp->_lock);
825 #endif
826 }
827
828 /* Make sure that never again the wide char functions can be
829 used. */
830 fp->_mode = -1;
831 }
832
833 #ifdef _IO_MTSAFE_IO
834 _IO_lock_unlock (list_all_lock);
835 _IO_cleanup_region_end (0);
836 #endif
837 }
838
839
840 libc_freeres_fn (buffer_free)
841 {
842 dealloc_buffers = true;
843
844 while (freeres_list != NULL)
845 {
846 free (freeres_list->_freeres_buf);
847
848 freeres_list = freeres_list->_freeres_list;
849 }
850 }
851
852
853 int
854 _IO_cleanup (void)
855 {
856 /* We do *not* want locking. Some threads might use streams but
857 that is their problem, we flush them underneath them. */
858 int result = _IO_flush_all_lockp (0);
859
860 /* We currently don't have a reliable mechanism for making sure that
861 C++ static destructors are executed in the correct order.
862 So it is possible that other static destructors might want to
863 write to cout - and they're supposed to be able to do so.
864
865 The following will make the standard streambufs be unbuffered,
866 which forces any output from late destructors to be written out. */
867 _IO_unbuffer_all ();
868
869 return result;
870 }
871
872
873 void
874 _IO_init_marker (struct _IO_marker *marker, FILE *fp)
875 {
876 marker->_sbuf = fp;
877 if (_IO_in_put_mode (fp))
878 _IO_switch_to_get_mode (fp);
879 if (_IO_in_backup (fp))
880 marker->_pos = fp->_IO_read_ptr - fp->_IO_read_end;
881 else
882 marker->_pos = fp->_IO_read_ptr - fp->_IO_read_base;
883
884 /* Should perhaps sort the chain? */
885 marker->_next = fp->_markers;
886 fp->_markers = marker;
887 }
888
889 void
890 _IO_remove_marker (struct _IO_marker *marker)
891 {
892 /* Unlink from sb's chain. */
893 struct _IO_marker **ptr = &marker->_sbuf->_markers;
894 for (; ; ptr = &(*ptr)->_next)
895 {
896 if (*ptr == NULL)
897 break;
898 else if (*ptr == marker)
899 {
900 *ptr = marker->_next;
901 return;
902 }
903 }
904 /* FIXME: if _sbuf has a backup area that is no longer needed,
905 should we delete it now, or wait until the next underflow? */
906 }
907
908 #define BAD_DELTA EOF
909
910 int
911 _IO_marker_difference (struct _IO_marker *mark1, struct _IO_marker *mark2)
912 {
913 return mark1->_pos - mark2->_pos;
914 }
915
916 /* Return difference between MARK and current position of MARK's stream. */
917 int
918 _IO_marker_delta (struct _IO_marker *mark)
919 {
920 int cur_pos;
921 if (mark->_sbuf == NULL)
922 return BAD_DELTA;
923 if (_IO_in_backup (mark->_sbuf))
924 cur_pos = mark->_sbuf->_IO_read_ptr - mark->_sbuf->_IO_read_end;
925 else
926 cur_pos = mark->_sbuf->_IO_read_ptr - mark->_sbuf->_IO_read_base;
927 return mark->_pos - cur_pos;
928 }
929
930 int
931 _IO_seekmark (FILE *fp, struct _IO_marker *mark, int delta)
932 {
933 if (mark->_sbuf != fp)
934 return EOF;
935 if (mark->_pos >= 0)
936 {
937 if (_IO_in_backup (fp))
938 _IO_switch_to_main_get_area (fp);
939 fp->_IO_read_ptr = fp->_IO_read_base + mark->_pos;
940 }
941 else
942 {
943 if (!_IO_in_backup (fp))
944 _IO_switch_to_backup_area (fp);
945 fp->_IO_read_ptr = fp->_IO_read_end + mark->_pos;
946 }
947 return 0;
948 }
949
950 void
951 _IO_unsave_markers (FILE *fp)
952 {
953 struct _IO_marker *mark = fp->_markers;
954 if (mark)
955 {
956 fp->_markers = 0;
957 }
958
959 if (_IO_have_backup (fp))
960 _IO_free_backup_area (fp);
961 }
962 libc_hidden_def (_IO_unsave_markers)
963
964 int
965 _IO_default_pbackfail (FILE *fp, int c)
966 {
967 if (fp->_IO_read_ptr > fp->_IO_read_base && !_IO_in_backup (fp)
968 && (unsigned char) fp->_IO_read_ptr[-1] == c)
969 --fp->_IO_read_ptr;
970 else
971 {
972 /* Need to handle a filebuf in write mode (switch to read mode). FIXME!*/
973 if (!_IO_in_backup (fp))
974 {
975 /* We need to keep the invariant that the main get area
976 logically follows the backup area. */
977 if (fp->_IO_read_ptr > fp->_IO_read_base && _IO_have_backup (fp))
978 {
979 if (save_for_backup (fp, fp->_IO_read_ptr))
980 return EOF;
981 }
982 else if (!_IO_have_backup (fp))
983 {
984 /* No backup buffer: allocate one. */
985 /* Use nshort buffer, if unused? (probably not) FIXME */
986 int backup_size = 128;
987 char *bbuf = (char *) malloc (backup_size);
988 if (bbuf == NULL)
989 return EOF;
990 fp->_IO_save_base = bbuf;
991 fp->_IO_save_end = fp->_IO_save_base + backup_size;
992 fp->_IO_backup_base = fp->_IO_save_end;
993 }
994 fp->_IO_read_base = fp->_IO_read_ptr;
995 _IO_switch_to_backup_area (fp);
996 }
997 else if (fp->_IO_read_ptr <= fp->_IO_read_base)
998 {
999 /* Increase size of existing backup buffer. */
1000 size_t new_size;
1001 size_t old_size = fp->_IO_read_end - fp->_IO_read_base;
1002 char *new_buf;
1003 new_size = 2 * old_size;
1004 new_buf = (char *) malloc (new_size);
1005 if (new_buf == NULL)
1006 return EOF;
1007 memcpy (new_buf + (new_size - old_size), fp->_IO_read_base,
1008 old_size);
1009 free (fp->_IO_read_base);
1010 _IO_setg (fp, new_buf, new_buf + (new_size - old_size),
1011 new_buf + new_size);
1012 fp->_IO_backup_base = fp->_IO_read_ptr;
1013 }
1014
1015 *--fp->_IO_read_ptr = c;
1016 }
1017 return (unsigned char) c;
1018 }
1019 libc_hidden_def (_IO_default_pbackfail)
1020
1021 off64_t
1022 _IO_default_seek (FILE *fp, off64_t offset, int dir)
1023 {
1024 return _IO_pos_BAD;
1025 }
1026
1027 int
1028 _IO_default_stat (FILE *fp, void *st)
1029 {
1030 return EOF;
1031 }
1032
1033 ssize_t
1034 _IO_default_read (FILE *fp, void *data, ssize_t n)
1035 {
1036 return -1;
1037 }
1038
1039 ssize_t
1040 _IO_default_write (FILE *fp, const void *data, ssize_t n)
1041 {
1042 return 0;
1043 }
1044
1045 int
1046 _IO_default_showmanyc (FILE *fp)
1047 {
1048 return -1;
1049 }
1050
1051 void
1052 _IO_default_imbue (FILE *fp, void *locale)
1053 {
1054 }
1055
1056 _IO_ITER
1057 _IO_iter_begin (void)
1058 {
1059 return (_IO_ITER) _IO_list_all;
1060 }
1061 libc_hidden_def (_IO_iter_begin)
1062
1063 _IO_ITER
1064 _IO_iter_end (void)
1065 {
1066 return NULL;
1067 }
1068 libc_hidden_def (_IO_iter_end)
1069
1070 _IO_ITER
1071 _IO_iter_next (_IO_ITER iter)
1072 {
1073 return iter->_chain;
1074 }
1075 libc_hidden_def (_IO_iter_next)
1076
1077 FILE *
1078 _IO_iter_file (_IO_ITER iter)
1079 {
1080 return iter;
1081 }
1082 libc_hidden_def (_IO_iter_file)
1083
1084 void
1085 _IO_list_lock (void)
1086 {
1087 #ifdef _IO_MTSAFE_IO
1088 _IO_lock_lock (list_all_lock);
1089 #endif
1090 }
1091 libc_hidden_def (_IO_list_lock)
1092
1093 void
1094 _IO_list_unlock (void)
1095 {
1096 #ifdef _IO_MTSAFE_IO
1097 _IO_lock_unlock (list_all_lock);
1098 #endif
1099 }
1100 libc_hidden_def (_IO_list_unlock)
1101
1102 void
1103 _IO_list_resetlock (void)
1104 {
1105 #ifdef _IO_MTSAFE_IO
1106 _IO_lock_init (list_all_lock);
1107 #endif
1108 }
1109 libc_hidden_def (_IO_list_resetlock)
1110
1111 text_set_element(__libc_atexit, _IO_cleanup);