1 /* Copyright (C) 1993, 1995, 1997, 1998 Free Software Foundation, Inc.
2 This file is part of the GNU IO Library.
4 This library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU General Public License as
6 published by the Free Software Foundation; either version 2, or (at
7 your option) any later version.
9 This library is distributed in the hope that it will be useful, but
10 WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this library; see the file COPYING. If not, write to
16 the Free Software Foundation, 59 Temple Place - Suite 330, Boston,
19 As a special exception, if you link this library with files
20 compiled with a GNU compiler to produce an executable, this does
21 not cause the resulting executable to be covered by the GNU General
22 Public License. This exception does not however invalidate any
23 other reasons why the executable file might be covered by the GNU
24 General Public License. */
26 /* Generic or default I/O operations. */
38 if (fp
->_flags
& _IO_LINKED
)
41 for (f
= &_IO_list_all
; *f
!= NULL
; f
= &(*f
)->_chain
)
49 fp
->_flags
&= ~_IO_LINKED
;
57 if ((fp
->_flags
& _IO_LINKED
) == 0)
59 fp
->_flags
|= _IO_LINKED
;
60 fp
->_chain
= _IO_list_all
;
65 /* Return minimum _pos markers
66 Assumes the current get area is the main get area. */
67 static _IO_ssize_t _IO_least_marker
__P ((_IO_FILE
*fp
, char *end_p
));
70 _IO_least_marker (fp
, end_p
)
74 _IO_ssize_t least_so_far
= end_p
- fp
->_IO_read_base
;
75 struct _IO_marker
*mark
;
76 for (mark
= fp
->_markers
; mark
!= NULL
; mark
= mark
->_next
)
77 if (mark
->_pos
< least_so_far
)
78 least_so_far
= mark
->_pos
;
82 /* Switch current get area from backup buffer to (start of) main get area. */
85 _IO_switch_to_main_get_area (fp
)
89 fp
->_flags
&= ~_IO_IN_BACKUP
;
90 /* Swap _IO_read_end and _IO_save_end. */
91 tmp
= fp
->_IO_read_end
;
92 fp
->_IO_read_end
= fp
->_IO_save_end
;
93 fp
->_IO_save_end
= tmp
;
94 /* Swap _IO_read_base and _IO_save_base. */
95 tmp
= fp
->_IO_read_base
;
96 fp
->_IO_read_base
= fp
->_IO_save_base
;
97 fp
->_IO_save_base
= tmp
;
98 /* Set _IO_read_ptr. */
99 fp
->_IO_read_ptr
= fp
->_IO_read_base
;
102 /* Switch current get area from main get area to (end of) backup area. */
105 _IO_switch_to_backup_area (fp
)
109 fp
->_flags
|= _IO_IN_BACKUP
;
110 /* Swap _IO_read_end and _IO_save_end. */
111 tmp
= fp
->_IO_read_end
;
112 fp
->_IO_read_end
= fp
->_IO_save_end
;
113 fp
->_IO_save_end
= tmp
;
114 /* Swap _IO_read_base and _IO_save_base. */
115 tmp
= fp
->_IO_read_base
;
116 fp
->_IO_read_base
= fp
->_IO_save_base
;
117 fp
->_IO_save_base
= tmp
;
118 /* Set _IO_read_ptr. */
119 fp
->_IO_read_ptr
= fp
->_IO_read_end
;
123 _IO_switch_to_get_mode (fp
)
126 if (fp
->_IO_write_ptr
> fp
->_IO_write_base
)
127 if (_IO_OVERFLOW (fp
, EOF
) == EOF
)
129 if (_IO_in_backup (fp
))
130 fp
->_IO_read_base
= fp
->_IO_backup_base
;
133 fp
->_IO_read_base
= fp
->_IO_buf_base
;
134 if (fp
->_IO_write_ptr
> fp
->_IO_read_end
)
135 fp
->_IO_read_end
= fp
->_IO_write_ptr
;
137 fp
->_IO_read_ptr
= fp
->_IO_write_ptr
;
139 fp
->_IO_write_base
= fp
->_IO_write_ptr
= fp
->_IO_write_end
= fp
->_IO_read_ptr
;
141 fp
->_flags
&= ~_IO_CURRENTLY_PUTTING
;
146 _IO_free_backup_area (fp
)
149 if (_IO_in_backup (fp
))
150 _IO_switch_to_main_get_area (fp
); /* Just in case. */
151 free (fp
->_IO_save_base
);
152 fp
->_IO_save_base
= NULL
;
153 fp
->_IO_save_end
= NULL
;
154 fp
->_IO_backup_base
= NULL
;
159 _IO_switch_to_put_mode (fp
)
162 fp
->_IO_write_base
= fp
->_IO_read_ptr
;
163 fp
->_IO_write_ptr
= fp
->_IO_read_ptr
;
164 /* Following is wrong if line- or un-buffered? */
165 fp
->_IO_write_end
= (fp
->_flags
& _IO_IN_BACKUP
166 ? fp
->_IO_read_end
: fp
->_IO_buf_end
);
168 fp
->_IO_read_ptr
= fp
->_IO_read_end
;
169 fp
->_IO_read_base
= fp
->_IO_read_end
;
171 fp
->_flags
|= _IO_CURRENTLY_PUTTING
;
181 return _IO_OVERFLOW (f
, ch
);
184 static int save_for_backup
__P ((_IO_FILE
*fp
, char *end_p
))
194 save_for_backup (fp
, end_p
)
198 /* Append [_IO_read_base..end_p] to backup area. */
199 _IO_ssize_t least_mark
= _IO_least_marker (fp
, end_p
);
200 /* needed_size is how much space we need in the backup area. */
201 _IO_size_t needed_size
= (end_p
- fp
->_IO_read_base
) - least_mark
;
202 /* FIXME: Dubious arithmetic if pointers are NULL */
203 _IO_size_t current_Bsize
= fp
->_IO_save_end
- fp
->_IO_save_base
;
204 _IO_size_t avail
; /* Extra space available for future expansion. */
206 struct _IO_marker
*mark
;
207 if (needed_size
> current_Bsize
)
211 new_buffer
= (char *) malloc (avail
+ needed_size
);
212 if (new_buffer
== NULL
)
213 return EOF
; /* FIXME */
217 __mempcpy (__mempcpy (new_buffer
+ avail
,
218 fp
->_IO_save_end
+ least_mark
,
221 end_p
- fp
->_IO_read_base
);
223 memcpy (new_buffer
+ avail
,
224 fp
->_IO_save_end
+ least_mark
,
226 memcpy (new_buffer
+ avail
- least_mark
,
228 end_p
- fp
->_IO_read_base
);
232 memcpy (new_buffer
+ avail
,
233 fp
->_IO_read_base
+ least_mark
,
235 if (fp
->_IO_save_base
)
236 free (fp
->_IO_save_base
);
237 fp
->_IO_save_base
= new_buffer
;
238 fp
->_IO_save_end
= new_buffer
+ avail
+ needed_size
;
242 avail
= current_Bsize
- needed_size
;
245 memmove (fp
->_IO_save_base
+ avail
,
246 fp
->_IO_save_end
+ least_mark
,
248 memcpy (fp
->_IO_save_base
+ avail
- least_mark
,
250 end_p
- fp
->_IO_read_base
);
252 else if (needed_size
> 0)
253 memcpy (fp
->_IO_save_base
+ avail
,
254 fp
->_IO_read_base
+ least_mark
,
257 fp
->_IO_backup_base
= fp
->_IO_save_base
+ avail
;
258 /* Adjust all the streammarkers. */
259 delta
= end_p
- fp
->_IO_read_base
;
260 for (mark
= fp
->_markers
; mark
!= NULL
; mark
= mark
->_next
)
269 if (_IO_in_put_mode (fp
))
270 if (_IO_switch_to_get_mode (fp
) == EOF
)
272 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
273 return *(unsigned char *) fp
->_IO_read_ptr
;
274 if (_IO_in_backup (fp
))
276 _IO_switch_to_main_get_area (fp
);
277 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
278 return *(unsigned char *) fp
->_IO_read_ptr
;
280 if (_IO_have_markers (fp
))
282 if (save_for_backup (fp
, fp
->_IO_read_end
))
285 else if (_IO_have_backup (fp
))
286 _IO_free_backup_area (fp
);
287 return _IO_UNDERFLOW (fp
);
294 if (_IO_in_put_mode (fp
))
295 if (_IO_switch_to_get_mode (fp
) == EOF
)
297 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
298 return *(unsigned char *) fp
->_IO_read_ptr
++;
299 if (_IO_in_backup (fp
))
301 _IO_switch_to_main_get_area (fp
);
302 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
303 return *(unsigned char *) fp
->_IO_read_ptr
++;
305 if (_IO_have_markers (fp
))
307 if (save_for_backup (fp
, fp
->_IO_read_end
))
310 else if (_IO_have_backup (fp
))
311 _IO_free_backup_area (fp
);
312 return _IO_UFLOW (fp
);
316 _IO_setb (f
, b
, eb
, a
)
322 if (f
->_IO_buf_base
&& !(f
->_flags
& _IO_USER_BUF
))
323 FREE_BUF (f
->_IO_buf_base
, _IO_blen (f
));
327 f
->_flags
&= ~_IO_USER_BUF
;
329 f
->_flags
|= _IO_USER_BUF
;
336 if (fp
->_IO_buf_base
)
338 if (!(fp
->_flags
& _IO_UNBUFFERED
))
339 if (_IO_DOALLOCATE (fp
) != EOF
)
341 _IO_setb (fp
, fp
->_shortbuf
, fp
->_shortbuf
+1, 0);
345 _IO_default_underflow (fp
)
352 _IO_default_uflow (fp
)
355 int ch
= _IO_UNDERFLOW (fp
);
358 return *(unsigned char *) fp
->_IO_read_ptr
++;
362 _IO_default_xsputn (f
, data
, n
)
367 const char *s
= (char *) data
;
373 /* Space available. */
374 _IO_ssize_t count
= f
->_IO_write_end
- f
->_IO_write_ptr
;
377 if ((_IO_size_t
) count
> more
)
382 f
->_IO_write_ptr
= __mempcpy (f
->_IO_write_ptr
, s
, count
);
384 memcpy (f
->_IO_write_ptr
, s
, count
);
385 f
->_IO_write_ptr
+= count
;
393 char *p
= f
->_IO_write_ptr
;
395 for (i
= count
; --i
>= 0; )
397 f
->_IO_write_ptr
= p
;
401 if (more
== 0 || __overflow (f
, (unsigned char) *s
++) == EOF
)
409 _IO_sgetn (fp
, data
, n
)
414 /* FIXME handle putback buffer here! */
415 return _IO_XSGETN (fp
, data
, n
);
419 _IO_default_xsgetn (fp
, data
, n
)
425 char *s
= (char*) data
;
428 /* Data available. */
429 _IO_ssize_t count
= fp
->_IO_read_end
- fp
->_IO_read_ptr
;
432 if ((_IO_size_t
) count
> more
)
437 s
= __mempcpy (s
, fp
->_IO_read_ptr
, count
);
439 memcpy (s
, fp
->_IO_read_ptr
, count
);
442 fp
->_IO_read_ptr
+= count
;
448 char *p
= fp
->_IO_read_ptr
;
452 fp
->_IO_read_ptr
= p
;
456 if (more
== 0 || __underflow (fp
) == EOF
)
463 /* Seems not to be needed. --drepper */
473 _IO_default_setbuf (fp
, p
, len
)
478 if (_IO_SYNC (fp
) == EOF
)
480 if (p
== NULL
|| len
== 0)
482 fp
->_flags
|= _IO_UNBUFFERED
;
483 _IO_setb (fp
, fp
->_shortbuf
, fp
->_shortbuf
+1, 0);
487 fp
->_flags
&= ~_IO_UNBUFFERED
;
488 _IO_setb (fp
, p
, p
+len
, 0);
490 fp
->_IO_write_base
= fp
->_IO_write_ptr
= fp
->_IO_write_end
= 0;
491 fp
->_IO_read_base
= fp
->_IO_read_ptr
= fp
->_IO_read_end
= 0;
496 _IO_default_seekpos (fp
, pos
, mode
)
501 return _IO_SEEKOFF (fp
, _IO_pos_as_off (pos
), 0, mode
);
505 _IO_default_doallocate (fp
)
510 ALLOC_BUF (buf
, _IO_BUFSIZ
, EOF
);
511 _IO_setb (fp
, buf
, buf
+_IO_BUFSIZ
, 1);
520 fp
->_flags
= _IO_MAGIC
|flags
;
521 fp
->_IO_buf_base
= NULL
;
522 fp
->_IO_buf_end
= NULL
;
523 fp
->_IO_read_base
= NULL
;
524 fp
->_IO_read_ptr
= NULL
;
525 fp
->_IO_read_end
= NULL
;
526 fp
->_IO_write_base
= NULL
;
527 fp
->_IO_write_ptr
= NULL
;
528 fp
->_IO_write_end
= NULL
;
529 fp
->_chain
= NULL
; /* Not necessary. */
531 fp
->_IO_save_base
= NULL
;
532 fp
->_IO_backup_base
= NULL
;
533 fp
->_IO_save_end
= NULL
;
537 fp
->_vtable_offset
= 0;
540 _IO_lock_init (*fp
->_lock
);
545 _IO_default_sync (fp
)
551 /* The way the C++ classes are mapped into the C functions in the
552 current implementation, this function can get called twice! */
555 _IO_default_finish (fp
, dummy
)
559 struct _IO_marker
*mark
;
560 if (fp
->_IO_buf_base
&& !(fp
->_flags
& _IO_USER_BUF
))
562 FREE_BUF (fp
->_IO_buf_base
, _IO_blen (fp
));
563 fp
->_IO_buf_base
= fp
->_IO_buf_end
= NULL
;
566 for (mark
= fp
->_markers
; mark
!= NULL
; mark
= mark
->_next
)
569 if (fp
->_IO_save_base
)
571 free (fp
->_IO_save_base
);
572 fp
->_IO_save_base
= NULL
;
576 _IO_lock_fini (*fp
->_lock
);
583 _IO_default_seekoff (fp
, offset
, dir
, mode
)
593 _IO_sputbackc (fp
, c
)
599 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
600 && (unsigned char)fp
->_IO_read_ptr
[-1] == (unsigned char)c
)
603 result
= (unsigned char) c
;
606 result
= _IO_PBACKFAIL (fp
, c
);
609 fp
->_flags
&= ~_IO_EOF_SEEN
;
620 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
)
623 result
= (unsigned char) *fp
->_IO_read_ptr
;
626 result
= _IO_PBACKFAIL (fp
, EOF
);
629 fp
->_flags
&= ~_IO_EOF_SEEN
;
634 #if 0 /* Work in progress */
635 /* Seems not to be needed. */
638 _IO_set_column (fp
, c
)
645 fp
->_column
= c
- (fp
->_IO_write_ptr
- fp
->_IO_write_base
);
649 _IO_set_column (fp
, i
)
653 fp
->_cur_column
= i
+ 1;
661 _IO_adjust_column (start
, line
, count
)
666 const char *ptr
= line
+ count
;
669 return line
+ count
- ptr
- 1;
670 return start
+ count
;
674 /* Seems not to be needed. --drepper */
680 return _IO_adjust_column (fp
->_cur_column
- 1,
682 fp
->_IO_write_ptr
- fp
->_IO_write_base
);
692 for (fp
= _IO_list_all
; fp
!= NULL
; fp
= fp
->_chain
)
693 if (fp
->_IO_write_ptr
> fp
->_IO_write_base
694 && _IO_OVERFLOW (fp
, EOF
) == EOF
)
700 _IO_flush_all_linebuffered ()
703 for (fp
= _IO_list_all
; fp
!= NULL
; fp
= fp
->_chain
)
704 if ((fp
->_flags
& _IO_NO_WRITES
) == 0 && fp
->_flags
& _IO_LINE_BUF
)
705 _IO_OVERFLOW (fp
, EOF
);
708 static void _IO_unbuffer_all
__P ((void));
714 for (fp
= _IO_list_all
; fp
!= NULL
; fp
= fp
->_chain
)
715 if (! (fp
->_flags
& _IO_UNBUFFERED
))
716 _IO_SETBUF (fp
, NULL
, 0);
722 int result
= _IO_flush_all ();
724 /* We currently don't have a reliable mechanism for making sure that
725 C++ static destructors are executed in the correct order.
726 So it is possible that other static destructors might want to
727 write to cout - and they're supposed to be able to do so.
729 The following will make the standard streambufs be unbuffered,
730 which forces any output from late destructors to be written out. */
738 _IO_init_marker (marker
, fp
)
739 struct _IO_marker
*marker
;
743 if (_IO_in_put_mode (fp
))
744 _IO_switch_to_get_mode (fp
);
745 if (_IO_in_backup (fp
))
746 marker
->_pos
= fp
->_IO_read_ptr
- fp
->_IO_read_end
;
748 marker
->_pos
= fp
->_IO_read_ptr
- fp
->_IO_read_base
;
750 /* Should perhaps sort the chain? */
751 marker
->_next
= fp
->_markers
;
752 fp
->_markers
= marker
;
756 _IO_remove_marker (marker
)
757 struct _IO_marker
*marker
;
759 /* Unlink from sb's chain. */
760 struct _IO_marker
**ptr
= &marker
->_sbuf
->_markers
;
761 for (; ; ptr
= &(*ptr
)->_next
)
765 else if (*ptr
== marker
)
767 *ptr
= marker
->_next
;
772 if _sbuf has a backup area that is no longer needed
, should we
delete
773 it now
, or wait until the next underflow
?
777 #define BAD_DELTA EOF
780 _IO_marker_difference (mark1
, mark2
)
781 struct _IO_marker
*mark1
;
782 struct _IO_marker
*mark2
;
784 return mark1
->_pos
- mark2
->_pos
;
787 /* Return difference between MARK and current position of MARK's stream. */
789 _IO_marker_delta (mark
)
790 struct _IO_marker
*mark
;
793 if (mark
->_sbuf
== NULL
)
795 if (_IO_in_backup (mark
->_sbuf
))
796 cur_pos
= mark
->_sbuf
->_IO_read_ptr
- mark
->_sbuf
->_IO_read_end
;
798 cur_pos
= mark
->_sbuf
->_IO_read_ptr
- mark
->_sbuf
->_IO_read_base
;
799 return mark
->_pos
- cur_pos
;
803 _IO_seekmark (fp
, mark
, delta
)
805 struct _IO_marker
*mark
;
808 if (mark
->_sbuf
!= fp
)
812 if (_IO_in_backup (fp
))
813 _IO_switch_to_main_get_area (fp
);
814 fp
->_IO_read_ptr
= fp
->_IO_read_base
+ mark
->_pos
;
818 if (!_IO_in_backup (fp
))
819 _IO_switch_to_backup_area (fp
);
820 fp
->_IO_read_ptr
= fp
->_IO_read_end
+ mark
->_pos
;
826 _IO_unsave_markers (fp
)
829 struct _IO_marker
*mark
= fp
->_markers
;
833 streampos offset
= seekoff (0, ios::cur
, ios::in
);
836 offset
+= eGptr () - Gbase ();
837 for ( ; mark
!= NULL
; mark
= mark
->_next
)
838 mark
->set_streampos (mark
->_pos
+ offset
);
842 for ( ; mark
!= NULL
; mark
= mark
->_next
)
843 mark
->set_streampos (EOF
);
849 if (_IO_have_backup (fp
))
850 _IO_free_backup_area (fp
);
854 /* Seems not to be needed. --drepper */
856 _IO_nobackup_pbackfail (fp
, c
)
860 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
)
862 if (c
!= EOF
&& *fp
->_IO_read_ptr
!= c
)
863 *fp
->_IO_read_ptr
= c
;
864 return (unsigned char) c
;
869 _IO_default_pbackfail (fp
, c
)
873 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
&& !_IO_in_backup (fp
)
874 && (unsigned char) fp
->_IO_read_ptr
[-1] == c
)
878 /* Need to handle a filebuf in write mode (switch to read mode). FIXME!*/
879 if (!_IO_in_backup (fp
))
881 /* We need to keep the invariant that the main get area
882 logically follows the backup area. */
883 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
&& _IO_have_backup (fp
))
885 if (save_for_backup (fp
, fp
->_IO_read_ptr
))
888 else if (!_IO_have_backup (fp
))
890 /* No backup buffer: allocate one. */
891 /* Use nshort buffer, if unused? (probably not) FIXME */
892 int backup_size
= 128;
893 char *bbuf
= (char *) malloc (backup_size
);
896 fp
->_IO_save_base
= bbuf
;
897 fp
->_IO_save_end
= fp
->_IO_save_base
+ backup_size
;
898 fp
->_IO_backup_base
= fp
->_IO_save_end
;
900 fp
->_IO_read_base
= fp
->_IO_read_ptr
;
901 _IO_switch_to_backup_area (fp
);
903 else if (fp
->_IO_read_ptr
<= fp
->_IO_read_base
)
905 /* Increase size of existing backup buffer. */
907 _IO_size_t old_size
= fp
->_IO_read_end
- fp
->_IO_read_base
;
909 new_size
= 2 * old_size
;
910 new_buf
= (char *) malloc (new_size
);
913 memcpy (new_buf
+ (new_size
- old_size
), fp
->_IO_read_base
,
915 free (fp
->_IO_read_base
);
916 _IO_setg (fp
, new_buf
, new_buf
+ (new_size
- old_size
),
918 fp
->_IO_backup_base
= fp
->_IO_read_ptr
;
921 *--fp
->_IO_read_ptr
= c
;
923 return (unsigned char) c
;
927 _IO_default_seek (fp
, offset
, dir
)
936 _IO_default_stat (fp
, st
)
944 _IO_default_read (fp
, data
, n
)
953 _IO_default_write (fp
, data
, n
)
962 _IO_default_showmanyc (fp
)
969 _IO_default_imbue (fp
, locale
)
986 ~__io_defs() { _IO_cleanup (); }
994 weak_alias (_IO_cleanup
, _cleanup
)
997 #ifdef text_set_element
998 text_set_element(__libc_atexit
, _cleanup
);