]> git.ipfire.org Git - thirdparty/glibc.git/blob - libio/fileops.c
libio: Remove codecvt vtable [BZ #24588]
[thirdparty/glibc.git] / libio / fileops.c
1 /* Copyright (C) 1993-2019 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Written by Per Bothner <bothner@cygnus.com>.
4
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
9
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
14
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>.
18
19 As a special exception, if you link the code in this file with
20 files compiled with a GNU compiler to produce an executable,
21 that does not cause the resulting executable to be covered by
22 the GNU Lesser General Public License. This exception does not
23 however invalidate any other reasons why the executable file
24 might be covered by the GNU Lesser General Public License.
25 This exception applies to code released by its copyright holders
26 in files containing the exception. */
27
28
29 #include "libioP.h"
30 #include <assert.h>
31 #include <fcntl.h>
32 #include <sys/mman.h>
33 #include <sys/param.h>
34 #include <sys/types.h>
35 #include <sys/stat.h>
36 #include <string.h>
37 #include <errno.h>
38 #include <unistd.h>
39 #include <stdlib.h>
40 #include "../wcsmbs/wcsmbsload.h"
41 #include "../iconv/gconv_charset.h"
42 #include "../iconv/gconv_int.h"
43 #include <shlib-compat.h>
44 #include <not-cancel.h>
45 #include <kernel-features.h>
46
47 extern struct __gconv_trans_data __libio_translit attribute_hidden;
48
49 /* An fstream can be in at most one of put mode, get mode, or putback mode.
50 Putback mode is a variant of get mode.
51
52 In a filebuf, there is only one current position, instead of two
53 separate get and put pointers. In get mode, the current position
54 is that of gptr(); in put mode that of pptr().
55
56 The position in the buffer that corresponds to the position
57 in external file system is normally _IO_read_end, except in putback
58 mode, when it is _IO_save_end and also when the file is in append mode,
59 since switching from read to write mode automatically sends the position in
60 the external file system to the end of file.
61 If the field _fb._offset is >= 0, it gives the offset in
62 the file as a whole corresponding to eGptr(). (?)
63
64 PUT MODE:
65 If a filebuf is in put mode, then all of _IO_read_ptr, _IO_read_end,
66 and _IO_read_base are equal to each other. These are usually equal
67 to _IO_buf_base, though not necessarily if we have switched from
68 get mode to put mode. (The reason is to maintain the invariant
69 that _IO_read_end corresponds to the external file position.)
70 _IO_write_base is non-NULL and usually equal to _IO_buf_base.
71 We also have _IO_write_end == _IO_buf_end, but only in fully buffered mode.
72 The un-flushed character are those between _IO_write_base and _IO_write_ptr.
73
74 GET MODE:
75 If a filebuf is in get or putback mode, eback() != egptr().
76 In get mode, the unread characters are between gptr() and egptr().
77 The OS file position corresponds to that of egptr().
78
79 PUTBACK MODE:
80 Putback mode is used to remember "excess" characters that have
81 been sputbackc'd in a separate putback buffer.
82 In putback mode, the get buffer points to the special putback buffer.
83 The unread characters are the characters between gptr() and egptr()
84 in the putback buffer, as well as the area between save_gptr()
85 and save_egptr(), which point into the original reserve buffer.
86 (The pointers save_gptr() and save_egptr() are the values
87 of gptr() and egptr() at the time putback mode was entered.)
88 The OS position corresponds to that of save_egptr().
89
90 LINE BUFFERED OUTPUT:
91 During line buffered output, _IO_write_base==base() && epptr()==base().
92 However, ptr() may be anywhere between base() and ebuf().
93 This forces a call to filebuf::overflow(int C) on every put.
94 If there is more space in the buffer, and C is not a '\n',
95 then C is inserted, and pptr() incremented.
96
97 UNBUFFERED STREAMS:
98 If a filebuf is unbuffered(), the _shortbuf[1] is used as the buffer.
99 */
100
101 #define CLOSED_FILEBUF_FLAGS \
102 (_IO_IS_FILEBUF+_IO_NO_READS+_IO_NO_WRITES+_IO_TIED_PUT_GET)
103
104
105 void
106 _IO_new_file_init_internal (struct _IO_FILE_plus *fp)
107 {
108 /* POSIX.1 allows another file handle to be used to change the position
109 of our file descriptor. Hence we actually don't know the actual
110 position before we do the first fseek (and until a following fflush). */
111 fp->file._offset = _IO_pos_BAD;
112 fp->file._flags |= CLOSED_FILEBUF_FLAGS;
113
114 _IO_link_in (fp);
115 fp->file._fileno = -1;
116 }
117
118 /* External version of _IO_new_file_init_internal which switches off
119 vtable validation. */
120 void
121 _IO_new_file_init (struct _IO_FILE_plus *fp)
122 {
123 IO_set_accept_foreign_vtables (&_IO_vtable_check);
124 _IO_new_file_init_internal (fp);
125 }
126
127 int
128 _IO_new_file_close_it (FILE *fp)
129 {
130 int write_status;
131 if (!_IO_file_is_open (fp))
132 return EOF;
133
134 if ((fp->_flags & _IO_NO_WRITES) == 0
135 && (fp->_flags & _IO_CURRENTLY_PUTTING) != 0)
136 write_status = _IO_do_flush (fp);
137 else
138 write_status = 0;
139
140 _IO_unsave_markers (fp);
141
142 int close_status = ((fp->_flags2 & _IO_FLAGS2_NOCLOSE) == 0
143 ? _IO_SYSCLOSE (fp) : 0);
144
145 /* Free buffer. */
146 if (fp->_mode > 0)
147 {
148 if (_IO_have_wbackup (fp))
149 _IO_free_wbackup_area (fp);
150 _IO_wsetb (fp, NULL, NULL, 0);
151 _IO_wsetg (fp, NULL, NULL, NULL);
152 _IO_wsetp (fp, NULL, NULL);
153 }
154 _IO_setb (fp, NULL, NULL, 0);
155 _IO_setg (fp, NULL, NULL, NULL);
156 _IO_setp (fp, NULL, NULL);
157
158 _IO_un_link ((struct _IO_FILE_plus *) fp);
159 fp->_flags = _IO_MAGIC|CLOSED_FILEBUF_FLAGS;
160 fp->_fileno = -1;
161 fp->_offset = _IO_pos_BAD;
162
163 return close_status ? close_status : write_status;
164 }
165 libc_hidden_ver (_IO_new_file_close_it, _IO_file_close_it)
166
167 void
168 _IO_new_file_finish (FILE *fp, int dummy)
169 {
170 if (_IO_file_is_open (fp))
171 {
172 _IO_do_flush (fp);
173 if (!(fp->_flags & _IO_DELETE_DONT_CLOSE))
174 _IO_SYSCLOSE (fp);
175 }
176 _IO_default_finish (fp, 0);
177 }
178 libc_hidden_ver (_IO_new_file_finish, _IO_file_finish)
179
180 FILE *
181 _IO_file_open (FILE *fp, const char *filename, int posix_mode, int prot,
182 int read_write, int is32not64)
183 {
184 int fdesc;
185 if (__glibc_unlikely (fp->_flags2 & _IO_FLAGS2_NOTCANCEL))
186 fdesc = __open_nocancel (filename,
187 posix_mode | (is32not64 ? 0 : O_LARGEFILE), prot);
188 else
189 fdesc = __open (filename, posix_mode | (is32not64 ? 0 : O_LARGEFILE), prot);
190 if (fdesc < 0)
191 return NULL;
192 fp->_fileno = fdesc;
193 _IO_mask_flags (fp, read_write,_IO_NO_READS+_IO_NO_WRITES+_IO_IS_APPENDING);
194 /* For append mode, send the file offset to the end of the file. Don't
195 update the offset cache though, since the file handle is not active. */
196 if ((read_write & (_IO_IS_APPENDING | _IO_NO_READS))
197 == (_IO_IS_APPENDING | _IO_NO_READS))
198 {
199 off64_t new_pos = _IO_SYSSEEK (fp, 0, _IO_seek_end);
200 if (new_pos == _IO_pos_BAD && errno != ESPIPE)
201 {
202 __close_nocancel (fdesc);
203 return NULL;
204 }
205 }
206 _IO_link_in ((struct _IO_FILE_plus *) fp);
207 return fp;
208 }
209 libc_hidden_def (_IO_file_open)
210
211 FILE *
212 _IO_new_file_fopen (FILE *fp, const char *filename, const char *mode,
213 int is32not64)
214 {
215 int oflags = 0, omode;
216 int read_write;
217 int oprot = 0666;
218 int i;
219 FILE *result;
220 const char *cs;
221 const char *last_recognized;
222
223 if (_IO_file_is_open (fp))
224 return 0;
225 switch (*mode)
226 {
227 case 'r':
228 omode = O_RDONLY;
229 read_write = _IO_NO_WRITES;
230 break;
231 case 'w':
232 omode = O_WRONLY;
233 oflags = O_CREAT|O_TRUNC;
234 read_write = _IO_NO_READS;
235 break;
236 case 'a':
237 omode = O_WRONLY;
238 oflags = O_CREAT|O_APPEND;
239 read_write = _IO_NO_READS|_IO_IS_APPENDING;
240 break;
241 default:
242 __set_errno (EINVAL);
243 return NULL;
244 }
245 last_recognized = mode;
246 for (i = 1; i < 7; ++i)
247 {
248 switch (*++mode)
249 {
250 case '\0':
251 break;
252 case '+':
253 omode = O_RDWR;
254 read_write &= _IO_IS_APPENDING;
255 last_recognized = mode;
256 continue;
257 case 'x':
258 oflags |= O_EXCL;
259 last_recognized = mode;
260 continue;
261 case 'b':
262 last_recognized = mode;
263 continue;
264 case 'm':
265 fp->_flags2 |= _IO_FLAGS2_MMAP;
266 continue;
267 case 'c':
268 fp->_flags2 |= _IO_FLAGS2_NOTCANCEL;
269 continue;
270 case 'e':
271 oflags |= O_CLOEXEC;
272 fp->_flags2 |= _IO_FLAGS2_CLOEXEC;
273 continue;
274 default:
275 /* Ignore. */
276 continue;
277 }
278 break;
279 }
280
281 result = _IO_file_open (fp, filename, omode|oflags, oprot, read_write,
282 is32not64);
283
284 if (result != NULL)
285 {
286 /* Test whether the mode string specifies the conversion. */
287 cs = strstr (last_recognized + 1, ",ccs=");
288 if (cs != NULL)
289 {
290 /* Yep. Load the appropriate conversions and set the orientation
291 to wide. */
292 struct gconv_fcts fcts;
293 struct _IO_codecvt *cc;
294 char *endp = __strchrnul (cs + 5, ',');
295 char *ccs = malloc (endp - (cs + 5) + 3);
296
297 if (ccs == NULL)
298 {
299 int malloc_err = errno; /* Whatever malloc failed with. */
300 (void) _IO_file_close_it (fp);
301 __set_errno (malloc_err);
302 return NULL;
303 }
304
305 *((char *) __mempcpy (ccs, cs + 5, endp - (cs + 5))) = '\0';
306 strip (ccs, ccs);
307
308 if (__wcsmbs_named_conv (&fcts, ccs[2] == '\0'
309 ? upstr (ccs, cs + 5) : ccs) != 0)
310 {
311 /* Something went wrong, we cannot load the conversion modules.
312 This means we cannot proceed since the user explicitly asked
313 for these. */
314 (void) _IO_file_close_it (fp);
315 free (ccs);
316 __set_errno (EINVAL);
317 return NULL;
318 }
319
320 free (ccs);
321
322 assert (fcts.towc_nsteps == 1);
323 assert (fcts.tomb_nsteps == 1);
324
325 fp->_wide_data->_IO_read_ptr = fp->_wide_data->_IO_read_end;
326 fp->_wide_data->_IO_write_ptr = fp->_wide_data->_IO_write_base;
327
328 /* Clear the state. We start all over again. */
329 memset (&fp->_wide_data->_IO_state, '\0', sizeof (__mbstate_t));
330 memset (&fp->_wide_data->_IO_last_state, '\0', sizeof (__mbstate_t));
331
332 cc = fp->_codecvt = &fp->_wide_data->_codecvt;
333
334 cc->__cd_in.__cd.__nsteps = fcts.towc_nsteps;
335 cc->__cd_in.__cd.__steps = fcts.towc;
336
337 cc->__cd_in.__cd.__data[0].__invocation_counter = 0;
338 cc->__cd_in.__cd.__data[0].__internal_use = 1;
339 cc->__cd_in.__cd.__data[0].__flags = __GCONV_IS_LAST;
340 cc->__cd_in.__cd.__data[0].__statep = &result->_wide_data->_IO_state;
341
342 cc->__cd_out.__cd.__nsteps = fcts.tomb_nsteps;
343 cc->__cd_out.__cd.__steps = fcts.tomb;
344
345 cc->__cd_out.__cd.__data[0].__invocation_counter = 0;
346 cc->__cd_out.__cd.__data[0].__internal_use = 1;
347 cc->__cd_out.__cd.__data[0].__flags
348 = __GCONV_IS_LAST | __GCONV_TRANSLIT;
349 cc->__cd_out.__cd.__data[0].__statep =
350 &result->_wide_data->_IO_state;
351
352 /* From now on use the wide character callback functions. */
353 _IO_JUMPS_FILE_plus (fp) = fp->_wide_data->_wide_vtable;
354
355 /* Set the mode now. */
356 result->_mode = 1;
357 }
358 }
359
360 return result;
361 }
362 libc_hidden_ver (_IO_new_file_fopen, _IO_file_fopen)
363
364 FILE *
365 _IO_new_file_attach (FILE *fp, int fd)
366 {
367 if (_IO_file_is_open (fp))
368 return NULL;
369 fp->_fileno = fd;
370 fp->_flags &= ~(_IO_NO_READS+_IO_NO_WRITES);
371 fp->_flags |= _IO_DELETE_DONT_CLOSE;
372 /* Get the current position of the file. */
373 /* We have to do that since that may be junk. */
374 fp->_offset = _IO_pos_BAD;
375 int save_errno = errno;
376 if (_IO_SEEKOFF (fp, (off64_t)0, _IO_seek_cur, _IOS_INPUT|_IOS_OUTPUT)
377 == _IO_pos_BAD && errno != ESPIPE)
378 return NULL;
379 __set_errno (save_errno);
380 return fp;
381 }
382 libc_hidden_ver (_IO_new_file_attach, _IO_file_attach)
383
384 FILE *
385 _IO_new_file_setbuf (FILE *fp, char *p, ssize_t len)
386 {
387 if (_IO_default_setbuf (fp, p, len) == NULL)
388 return NULL;
389
390 fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_write_end
391 = fp->_IO_buf_base;
392 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base);
393
394 return fp;
395 }
396 libc_hidden_ver (_IO_new_file_setbuf, _IO_file_setbuf)
397
398
399 FILE *
400 _IO_file_setbuf_mmap (FILE *fp, char *p, ssize_t len)
401 {
402 FILE *result;
403
404 /* Change the function table. */
405 _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps;
406 fp->_wide_data->_wide_vtable = &_IO_wfile_jumps;
407
408 /* And perform the normal operation. */
409 result = _IO_new_file_setbuf (fp, p, len);
410
411 /* If the call failed, restore to using mmap. */
412 if (result == NULL)
413 {
414 _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps_mmap;
415 fp->_wide_data->_wide_vtable = &_IO_wfile_jumps_mmap;
416 }
417
418 return result;
419 }
420
421 static size_t new_do_write (FILE *, const char *, size_t);
422
423 /* Write TO_DO bytes from DATA to FP.
424 Then mark FP as having empty buffers. */
425
426 int
427 _IO_new_do_write (FILE *fp, const char *data, size_t to_do)
428 {
429 return (to_do == 0
430 || (size_t) new_do_write (fp, data, to_do) == to_do) ? 0 : EOF;
431 }
432 libc_hidden_ver (_IO_new_do_write, _IO_do_write)
433
434 static size_t
435 new_do_write (FILE *fp, const char *data, size_t to_do)
436 {
437 size_t count;
438 if (fp->_flags & _IO_IS_APPENDING)
439 /* On a system without a proper O_APPEND implementation,
440 you would need to sys_seek(0, SEEK_END) here, but is
441 not needed nor desirable for Unix- or Posix-like systems.
442 Instead, just indicate that offset (before and after) is
443 unpredictable. */
444 fp->_offset = _IO_pos_BAD;
445 else if (fp->_IO_read_end != fp->_IO_write_base)
446 {
447 off64_t new_pos
448 = _IO_SYSSEEK (fp, fp->_IO_write_base - fp->_IO_read_end, 1);
449 if (new_pos == _IO_pos_BAD)
450 return 0;
451 fp->_offset = new_pos;
452 }
453 count = _IO_SYSWRITE (fp, data, to_do);
454 if (fp->_cur_column && count)
455 fp->_cur_column = _IO_adjust_column (fp->_cur_column - 1, data, count) + 1;
456 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base);
457 fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_buf_base;
458 fp->_IO_write_end = (fp->_mode <= 0
459 && (fp->_flags & (_IO_LINE_BUF | _IO_UNBUFFERED))
460 ? fp->_IO_buf_base : fp->_IO_buf_end);
461 return count;
462 }
463
464 int
465 _IO_new_file_underflow (FILE *fp)
466 {
467 ssize_t count;
468
469 /* C99 requires EOF to be "sticky". */
470 if (fp->_flags & _IO_EOF_SEEN)
471 return EOF;
472
473 if (fp->_flags & _IO_NO_READS)
474 {
475 fp->_flags |= _IO_ERR_SEEN;
476 __set_errno (EBADF);
477 return EOF;
478 }
479 if (fp->_IO_read_ptr < fp->_IO_read_end)
480 return *(unsigned char *) fp->_IO_read_ptr;
481
482 if (fp->_IO_buf_base == NULL)
483 {
484 /* Maybe we already have a push back pointer. */
485 if (fp->_IO_save_base != NULL)
486 {
487 free (fp->_IO_save_base);
488 fp->_flags &= ~_IO_IN_BACKUP;
489 }
490 _IO_doallocbuf (fp);
491 }
492
493 /* FIXME This can/should be moved to genops ?? */
494 if (fp->_flags & (_IO_LINE_BUF|_IO_UNBUFFERED))
495 {
496 /* We used to flush all line-buffered stream. This really isn't
497 required by any standard. My recollection is that
498 traditional Unix systems did this for stdout. stderr better
499 not be line buffered. So we do just that here
500 explicitly. --drepper */
501 _IO_acquire_lock (stdout);
502
503 if ((stdout->_flags & (_IO_LINKED | _IO_NO_WRITES | _IO_LINE_BUF))
504 == (_IO_LINKED | _IO_LINE_BUF))
505 _IO_OVERFLOW (stdout, EOF);
506
507 _IO_release_lock (stdout);
508 }
509
510 _IO_switch_to_get_mode (fp);
511
512 /* This is very tricky. We have to adjust those
513 pointers before we call _IO_SYSREAD () since
514 we may longjump () out while waiting for
515 input. Those pointers may be screwed up. H.J. */
516 fp->_IO_read_base = fp->_IO_read_ptr = fp->_IO_buf_base;
517 fp->_IO_read_end = fp->_IO_buf_base;
518 fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_write_end
519 = fp->_IO_buf_base;
520
521 count = _IO_SYSREAD (fp, fp->_IO_buf_base,
522 fp->_IO_buf_end - fp->_IO_buf_base);
523 if (count <= 0)
524 {
525 if (count == 0)
526 fp->_flags |= _IO_EOF_SEEN;
527 else
528 fp->_flags |= _IO_ERR_SEEN, count = 0;
529 }
530 fp->_IO_read_end += count;
531 if (count == 0)
532 {
533 /* If a stream is read to EOF, the calling application may switch active
534 handles. As a result, our offset cache would no longer be valid, so
535 unset it. */
536 fp->_offset = _IO_pos_BAD;
537 return EOF;
538 }
539 if (fp->_offset != _IO_pos_BAD)
540 _IO_pos_adjust (fp->_offset, count);
541 return *(unsigned char *) fp->_IO_read_ptr;
542 }
543 libc_hidden_ver (_IO_new_file_underflow, _IO_file_underflow)
544
545 /* Guts of underflow callback if we mmap the file. This stats the file and
546 updates the stream state to match. In the normal case we return zero.
547 If the file is no longer eligible for mmap, its jump tables are reset to
548 the vanilla ones and we return nonzero. */
549 static int
550 mmap_remap_check (FILE *fp)
551 {
552 struct stat64 st;
553
554 if (_IO_SYSSTAT (fp, &st) == 0
555 && S_ISREG (st.st_mode) && st.st_size != 0
556 /* Limit the file size to 1MB for 32-bit machines. */
557 && (sizeof (ptrdiff_t) > 4 || st.st_size < 1*1024*1024))
558 {
559 const size_t pagesize = __getpagesize ();
560 # define ROUNDED(x) (((x) + pagesize - 1) & ~(pagesize - 1))
561 if (ROUNDED (st.st_size) < ROUNDED (fp->_IO_buf_end
562 - fp->_IO_buf_base))
563 {
564 /* We can trim off some pages past the end of the file. */
565 (void) __munmap (fp->_IO_buf_base + ROUNDED (st.st_size),
566 ROUNDED (fp->_IO_buf_end - fp->_IO_buf_base)
567 - ROUNDED (st.st_size));
568 fp->_IO_buf_end = fp->_IO_buf_base + st.st_size;
569 }
570 else if (ROUNDED (st.st_size) > ROUNDED (fp->_IO_buf_end
571 - fp->_IO_buf_base))
572 {
573 /* The file added some pages. We need to remap it. */
574 void *p;
575 #if _G_HAVE_MREMAP
576 p = __mremap (fp->_IO_buf_base, ROUNDED (fp->_IO_buf_end
577 - fp->_IO_buf_base),
578 ROUNDED (st.st_size), MREMAP_MAYMOVE);
579 if (p == MAP_FAILED)
580 {
581 (void) __munmap (fp->_IO_buf_base,
582 fp->_IO_buf_end - fp->_IO_buf_base);
583 goto punt;
584 }
585 #else
586 (void) __munmap (fp->_IO_buf_base,
587 fp->_IO_buf_end - fp->_IO_buf_base);
588 p = __mmap64 (NULL, st.st_size, PROT_READ, MAP_SHARED,
589 fp->_fileno, 0);
590 if (p == MAP_FAILED)
591 goto punt;
592 #endif
593 fp->_IO_buf_base = p;
594 fp->_IO_buf_end = fp->_IO_buf_base + st.st_size;
595 }
596 else
597 {
598 /* The number of pages didn't change. */
599 fp->_IO_buf_end = fp->_IO_buf_base + st.st_size;
600 }
601 # undef ROUNDED
602
603 fp->_offset -= fp->_IO_read_end - fp->_IO_read_ptr;
604 _IO_setg (fp, fp->_IO_buf_base,
605 fp->_offset < fp->_IO_buf_end - fp->_IO_buf_base
606 ? fp->_IO_buf_base + fp->_offset : fp->_IO_buf_end,
607 fp->_IO_buf_end);
608
609 /* If we are already positioned at or past the end of the file, don't
610 change the current offset. If not, seek past what we have mapped,
611 mimicking the position left by a normal underflow reading into its
612 buffer until EOF. */
613
614 if (fp->_offset < fp->_IO_buf_end - fp->_IO_buf_base)
615 {
616 if (__lseek64 (fp->_fileno, fp->_IO_buf_end - fp->_IO_buf_base,
617 SEEK_SET)
618 != fp->_IO_buf_end - fp->_IO_buf_base)
619 fp->_flags |= _IO_ERR_SEEN;
620 else
621 fp->_offset = fp->_IO_buf_end - fp->_IO_buf_base;
622 }
623
624 return 0;
625 }
626 else
627 {
628 /* Life is no longer good for mmap. Punt it. */
629 (void) __munmap (fp->_IO_buf_base,
630 fp->_IO_buf_end - fp->_IO_buf_base);
631 punt:
632 fp->_IO_buf_base = fp->_IO_buf_end = NULL;
633 _IO_setg (fp, NULL, NULL, NULL);
634 if (fp->_mode <= 0)
635 _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps;
636 else
637 _IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps;
638 fp->_wide_data->_wide_vtable = &_IO_wfile_jumps;
639
640 return 1;
641 }
642 }
643
644 /* Special callback replacing the underflow callbacks if we mmap the file. */
645 int
646 _IO_file_underflow_mmap (FILE *fp)
647 {
648 if (fp->_IO_read_ptr < fp->_IO_read_end)
649 return *(unsigned char *) fp->_IO_read_ptr;
650
651 if (__glibc_unlikely (mmap_remap_check (fp)))
652 /* We punted to the regular file functions. */
653 return _IO_UNDERFLOW (fp);
654
655 if (fp->_IO_read_ptr < fp->_IO_read_end)
656 return *(unsigned char *) fp->_IO_read_ptr;
657
658 fp->_flags |= _IO_EOF_SEEN;
659 return EOF;
660 }
661
662 static void
663 decide_maybe_mmap (FILE *fp)
664 {
665 /* We use the file in read-only mode. This could mean we can
666 mmap the file and use it without any copying. But not all
667 file descriptors are for mmap-able objects and on 32-bit
668 machines we don't want to map files which are too large since
669 this would require too much virtual memory. */
670 struct stat64 st;
671
672 if (_IO_SYSSTAT (fp, &st) == 0
673 && S_ISREG (st.st_mode) && st.st_size != 0
674 /* Limit the file size to 1MB for 32-bit machines. */
675 && (sizeof (ptrdiff_t) > 4 || st.st_size < 1*1024*1024)
676 /* Sanity check. */
677 && (fp->_offset == _IO_pos_BAD || fp->_offset <= st.st_size))
678 {
679 /* Try to map the file. */
680 void *p;
681
682 p = __mmap64 (NULL, st.st_size, PROT_READ, MAP_SHARED, fp->_fileno, 0);
683 if (p != MAP_FAILED)
684 {
685 /* OK, we managed to map the file. Set the buffer up and use a
686 special jump table with simplified underflow functions which
687 never tries to read anything from the file. */
688
689 if (__lseek64 (fp->_fileno, st.st_size, SEEK_SET) != st.st_size)
690 {
691 (void) __munmap (p, st.st_size);
692 fp->_offset = _IO_pos_BAD;
693 }
694 else
695 {
696 _IO_setb (fp, p, (char *) p + st.st_size, 0);
697
698 if (fp->_offset == _IO_pos_BAD)
699 fp->_offset = 0;
700
701 _IO_setg (fp, p, p + fp->_offset, p + st.st_size);
702 fp->_offset = st.st_size;
703
704 if (fp->_mode <= 0)
705 _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps_mmap;
706 else
707 _IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps_mmap;
708 fp->_wide_data->_wide_vtable = &_IO_wfile_jumps_mmap;
709
710 return;
711 }
712 }
713 }
714
715 /* We couldn't use mmap, so revert to the vanilla file operations. */
716
717 if (fp->_mode <= 0)
718 _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps;
719 else
720 _IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps;
721 fp->_wide_data->_wide_vtable = &_IO_wfile_jumps;
722 }
723
724 int
725 _IO_file_underflow_maybe_mmap (FILE *fp)
726 {
727 /* This is the first read attempt. Choose mmap or vanilla operations
728 and then punt to the chosen underflow routine. */
729 decide_maybe_mmap (fp);
730 return _IO_UNDERFLOW (fp);
731 }
732
733
734 int
735 _IO_new_file_overflow (FILE *f, int ch)
736 {
737 if (f->_flags & _IO_NO_WRITES) /* SET ERROR */
738 {
739 f->_flags |= _IO_ERR_SEEN;
740 __set_errno (EBADF);
741 return EOF;
742 }
743 /* If currently reading or no buffer allocated. */
744 if ((f->_flags & _IO_CURRENTLY_PUTTING) == 0 || f->_IO_write_base == NULL)
745 {
746 /* Allocate a buffer if needed. */
747 if (f->_IO_write_base == NULL)
748 {
749 _IO_doallocbuf (f);
750 _IO_setg (f, f->_IO_buf_base, f->_IO_buf_base, f->_IO_buf_base);
751 }
752 /* Otherwise must be currently reading.
753 If _IO_read_ptr (and hence also _IO_read_end) is at the buffer end,
754 logically slide the buffer forwards one block (by setting the
755 read pointers to all point at the beginning of the block). This
756 makes room for subsequent output.
757 Otherwise, set the read pointers to _IO_read_end (leaving that
758 alone, so it can continue to correspond to the external position). */
759 if (__glibc_unlikely (_IO_in_backup (f)))
760 {
761 size_t nbackup = f->_IO_read_end - f->_IO_read_ptr;
762 _IO_free_backup_area (f);
763 f->_IO_read_base -= MIN (nbackup,
764 f->_IO_read_base - f->_IO_buf_base);
765 f->_IO_read_ptr = f->_IO_read_base;
766 }
767
768 if (f->_IO_read_ptr == f->_IO_buf_end)
769 f->_IO_read_end = f->_IO_read_ptr = f->_IO_buf_base;
770 f->_IO_write_ptr = f->_IO_read_ptr;
771 f->_IO_write_base = f->_IO_write_ptr;
772 f->_IO_write_end = f->_IO_buf_end;
773 f->_IO_read_base = f->_IO_read_ptr = f->_IO_read_end;
774
775 f->_flags |= _IO_CURRENTLY_PUTTING;
776 if (f->_mode <= 0 && f->_flags & (_IO_LINE_BUF | _IO_UNBUFFERED))
777 f->_IO_write_end = f->_IO_write_ptr;
778 }
779 if (ch == EOF)
780 return _IO_do_write (f, f->_IO_write_base,
781 f->_IO_write_ptr - f->_IO_write_base);
782 if (f->_IO_write_ptr == f->_IO_buf_end ) /* Buffer is really full */
783 if (_IO_do_flush (f) == EOF)
784 return EOF;
785 *f->_IO_write_ptr++ = ch;
786 if ((f->_flags & _IO_UNBUFFERED)
787 || ((f->_flags & _IO_LINE_BUF) && ch == '\n'))
788 if (_IO_do_write (f, f->_IO_write_base,
789 f->_IO_write_ptr - f->_IO_write_base) == EOF)
790 return EOF;
791 return (unsigned char) ch;
792 }
793 libc_hidden_ver (_IO_new_file_overflow, _IO_file_overflow)
794
795 int
796 _IO_new_file_sync (FILE *fp)
797 {
798 ssize_t delta;
799 int retval = 0;
800
801 /* char* ptr = cur_ptr(); */
802 if (fp->_IO_write_ptr > fp->_IO_write_base)
803 if (_IO_do_flush(fp)) return EOF;
804 delta = fp->_IO_read_ptr - fp->_IO_read_end;
805 if (delta != 0)
806 {
807 off64_t new_pos = _IO_SYSSEEK (fp, delta, 1);
808 if (new_pos != (off64_t) EOF)
809 fp->_IO_read_end = fp->_IO_read_ptr;
810 else if (errno == ESPIPE)
811 ; /* Ignore error from unseekable devices. */
812 else
813 retval = EOF;
814 }
815 if (retval != EOF)
816 fp->_offset = _IO_pos_BAD;
817 /* FIXME: Cleanup - can this be shared? */
818 /* setg(base(), ptr, ptr); */
819 return retval;
820 }
821 libc_hidden_ver (_IO_new_file_sync, _IO_file_sync)
822
823 static int
824 _IO_file_sync_mmap (FILE *fp)
825 {
826 if (fp->_IO_read_ptr != fp->_IO_read_end)
827 {
828 if (__lseek64 (fp->_fileno, fp->_IO_read_ptr - fp->_IO_buf_base,
829 SEEK_SET)
830 != fp->_IO_read_ptr - fp->_IO_buf_base)
831 {
832 fp->_flags |= _IO_ERR_SEEN;
833 return EOF;
834 }
835 }
836 fp->_offset = fp->_IO_read_ptr - fp->_IO_buf_base;
837 fp->_IO_read_end = fp->_IO_read_ptr = fp->_IO_read_base;
838 return 0;
839 }
840
841 /* ftell{,o} implementation. The only time we modify the state of the stream
842 is when we have unflushed writes. In that case we seek to the end and
843 record that offset in the stream object. */
844 static off64_t
845 do_ftell (FILE *fp)
846 {
847 off64_t result, offset = 0;
848
849 /* No point looking at unflushed data if we haven't allocated buffers
850 yet. */
851 if (fp->_IO_buf_base != NULL)
852 {
853 bool unflushed_writes = fp->_IO_write_ptr > fp->_IO_write_base;
854
855 bool append_mode = (fp->_flags & _IO_IS_APPENDING) == _IO_IS_APPENDING;
856
857 /* When we have unflushed writes in append mode, seek to the end of the
858 file and record that offset. This is the only time we change the file
859 stream state and it is safe since the file handle is active. */
860 if (unflushed_writes && append_mode)
861 {
862 result = _IO_SYSSEEK (fp, 0, _IO_seek_end);
863 if (result == _IO_pos_BAD)
864 return EOF;
865 else
866 fp->_offset = result;
867 }
868
869 /* Adjust for unflushed data. */
870 if (!unflushed_writes)
871 offset -= fp->_IO_read_end - fp->_IO_read_ptr;
872 /* We don't trust _IO_read_end to represent the current file offset when
873 writing in append mode because the value would have to be shifted to
874 the end of the file during a flush. Use the write base instead, along
875 with the new offset we got above when we did a seek to the end of the
876 file. */
877 else if (append_mode)
878 offset += fp->_IO_write_ptr - fp->_IO_write_base;
879 /* For all other modes, _IO_read_end represents the file offset. */
880 else
881 offset += fp->_IO_write_ptr - fp->_IO_read_end;
882 }
883
884 if (fp->_offset != _IO_pos_BAD)
885 result = fp->_offset;
886 else
887 result = _IO_SYSSEEK (fp, 0, _IO_seek_cur);
888
889 if (result == EOF)
890 return result;
891
892 result += offset;
893
894 if (result < 0)
895 {
896 __set_errno (EINVAL);
897 return EOF;
898 }
899
900 return result;
901 }
902
903 off64_t
904 _IO_new_file_seekoff (FILE *fp, off64_t offset, int dir, int mode)
905 {
906 off64_t result;
907 off64_t delta, new_offset;
908 long count;
909
910 /* Short-circuit into a separate function. We don't want to mix any
911 functionality and we don't want to touch anything inside the FILE
912 object. */
913 if (mode == 0)
914 return do_ftell (fp);
915
916 /* POSIX.1 8.2.3.7 says that after a call the fflush() the file
917 offset of the underlying file must be exact. */
918 int must_be_exact = (fp->_IO_read_base == fp->_IO_read_end
919 && fp->_IO_write_base == fp->_IO_write_ptr);
920
921 bool was_writing = (fp->_IO_write_ptr > fp->_IO_write_base
922 || _IO_in_put_mode (fp));
923
924 /* Flush unwritten characters.
925 (This may do an unneeded write if we seek within the buffer.
926 But to be able to switch to reading, we would need to set
927 egptr to pptr. That can't be done in the current design,
928 which assumes file_ptr() is eGptr. Anyway, since we probably
929 end up flushing when we close(), it doesn't make much difference.)
930 FIXME: simulate mem-mapped files. */
931 if (was_writing && _IO_switch_to_get_mode (fp))
932 return EOF;
933
934 if (fp->_IO_buf_base == NULL)
935 {
936 /* It could be that we already have a pushback buffer. */
937 if (fp->_IO_read_base != NULL)
938 {
939 free (fp->_IO_read_base);
940 fp->_flags &= ~_IO_IN_BACKUP;
941 }
942 _IO_doallocbuf (fp);
943 _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base);
944 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base);
945 }
946
947 switch (dir)
948 {
949 case _IO_seek_cur:
950 /* Adjust for read-ahead (bytes is buffer). */
951 offset -= fp->_IO_read_end - fp->_IO_read_ptr;
952
953 if (fp->_offset == _IO_pos_BAD)
954 goto dumb;
955 /* Make offset absolute, assuming current pointer is file_ptr(). */
956 offset += fp->_offset;
957 if (offset < 0)
958 {
959 __set_errno (EINVAL);
960 return EOF;
961 }
962
963 dir = _IO_seek_set;
964 break;
965 case _IO_seek_set:
966 break;
967 case _IO_seek_end:
968 {
969 struct stat64 st;
970 if (_IO_SYSSTAT (fp, &st) == 0 && S_ISREG (st.st_mode))
971 {
972 offset += st.st_size;
973 dir = _IO_seek_set;
974 }
975 else
976 goto dumb;
977 }
978 }
979
980 _IO_free_backup_area (fp);
981
982 /* At this point, dir==_IO_seek_set. */
983
984 /* If destination is within current buffer, optimize: */
985 if (fp->_offset != _IO_pos_BAD && fp->_IO_read_base != NULL
986 && !_IO_in_backup (fp))
987 {
988 off64_t start_offset = (fp->_offset
989 - (fp->_IO_read_end - fp->_IO_buf_base));
990 if (offset >= start_offset && offset < fp->_offset)
991 {
992 _IO_setg (fp, fp->_IO_buf_base,
993 fp->_IO_buf_base + (offset - start_offset),
994 fp->_IO_read_end);
995 _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base);
996
997 _IO_mask_flags (fp, 0, _IO_EOF_SEEN);
998 goto resync;
999 }
1000 }
1001
1002 if (fp->_flags & _IO_NO_READS)
1003 goto dumb;
1004
1005 /* Try to seek to a block boundary, to improve kernel page management. */
1006 new_offset = offset & ~(fp->_IO_buf_end - fp->_IO_buf_base - 1);
1007 delta = offset - new_offset;
1008 if (delta > fp->_IO_buf_end - fp->_IO_buf_base)
1009 {
1010 new_offset = offset;
1011 delta = 0;
1012 }
1013 result = _IO_SYSSEEK (fp, new_offset, 0);
1014 if (result < 0)
1015 return EOF;
1016 if (delta == 0)
1017 count = 0;
1018 else
1019 {
1020 count = _IO_SYSREAD (fp, fp->_IO_buf_base,
1021 (must_be_exact
1022 ? delta : fp->_IO_buf_end - fp->_IO_buf_base));
1023 if (count < delta)
1024 {
1025 /* We weren't allowed to read, but try to seek the remainder. */
1026 offset = count == EOF ? delta : delta-count;
1027 dir = _IO_seek_cur;
1028 goto dumb;
1029 }
1030 }
1031 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base + delta,
1032 fp->_IO_buf_base + count);
1033 _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base);
1034 fp->_offset = result + count;
1035 _IO_mask_flags (fp, 0, _IO_EOF_SEEN);
1036 return offset;
1037 dumb:
1038
1039 _IO_unsave_markers (fp);
1040 result = _IO_SYSSEEK (fp, offset, dir);
1041 if (result != EOF)
1042 {
1043 _IO_mask_flags (fp, 0, _IO_EOF_SEEN);
1044 fp->_offset = result;
1045 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base);
1046 _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base);
1047 }
1048 return result;
1049
1050 resync:
1051 /* We need to do it since it is possible that the file offset in
1052 the kernel may be changed behind our back. It may happen when
1053 we fopen a file and then do a fork. One process may access the
1054 file and the kernel file offset will be changed. */
1055 if (fp->_offset >= 0)
1056 _IO_SYSSEEK (fp, fp->_offset, 0);
1057
1058 return offset;
1059 }
1060 libc_hidden_ver (_IO_new_file_seekoff, _IO_file_seekoff)
1061
1062 off64_t
1063 _IO_file_seekoff_mmap (FILE *fp, off64_t offset, int dir, int mode)
1064 {
1065 off64_t result;
1066
1067 /* If we are only interested in the current position, calculate it and
1068 return right now. This calculation does the right thing when we are
1069 using a pushback buffer, but in the usual case has the same value as
1070 (fp->_IO_read_ptr - fp->_IO_buf_base). */
1071 if (mode == 0)
1072 return fp->_offset - (fp->_IO_read_end - fp->_IO_read_ptr);
1073
1074 switch (dir)
1075 {
1076 case _IO_seek_cur:
1077 /* Adjust for read-ahead (bytes is buffer). */
1078 offset += fp->_IO_read_ptr - fp->_IO_read_base;
1079 break;
1080 case _IO_seek_set:
1081 break;
1082 case _IO_seek_end:
1083 offset += fp->_IO_buf_end - fp->_IO_buf_base;
1084 break;
1085 }
1086 /* At this point, dir==_IO_seek_set. */
1087
1088 if (offset < 0)
1089 {
1090 /* No negative offsets are valid. */
1091 __set_errno (EINVAL);
1092 return EOF;
1093 }
1094
1095 result = _IO_SYSSEEK (fp, offset, 0);
1096 if (result < 0)
1097 return EOF;
1098
1099 if (offset > fp->_IO_buf_end - fp->_IO_buf_base)
1100 /* One can fseek arbitrarily past the end of the file
1101 and it is meaningless until one attempts to read.
1102 Leave the buffer pointers in EOF state until underflow. */
1103 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_end, fp->_IO_buf_end);
1104 else
1105 /* Adjust the read pointers to match the file position,
1106 but so the next read attempt will call underflow. */
1107 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base + offset,
1108 fp->_IO_buf_base + offset);
1109
1110 fp->_offset = result;
1111
1112 _IO_mask_flags (fp, 0, _IO_EOF_SEEN);
1113
1114 return offset;
1115 }
1116
1117 static off64_t
1118 _IO_file_seekoff_maybe_mmap (FILE *fp, off64_t offset, int dir,
1119 int mode)
1120 {
1121 /* We only get here when we haven't tried to read anything yet.
1122 So there is nothing more useful for us to do here than just
1123 the underlying lseek call. */
1124
1125 off64_t result = _IO_SYSSEEK (fp, offset, dir);
1126 if (result < 0)
1127 return EOF;
1128
1129 fp->_offset = result;
1130 return result;
1131 }
1132
1133 ssize_t
1134 _IO_file_read (FILE *fp, void *buf, ssize_t size)
1135 {
1136 return (__builtin_expect (fp->_flags2 & _IO_FLAGS2_NOTCANCEL, 0)
1137 ? __read_nocancel (fp->_fileno, buf, size)
1138 : __read (fp->_fileno, buf, size));
1139 }
1140 libc_hidden_def (_IO_file_read)
1141
1142 off64_t
1143 _IO_file_seek (FILE *fp, off64_t offset, int dir)
1144 {
1145 return __lseek64 (fp->_fileno, offset, dir);
1146 }
1147 libc_hidden_def (_IO_file_seek)
1148
1149 int
1150 _IO_file_stat (FILE *fp, void *st)
1151 {
1152 return __fxstat64 (_STAT_VER, fp->_fileno, (struct stat64 *) st);
1153 }
1154 libc_hidden_def (_IO_file_stat)
1155
1156 int
1157 _IO_file_close_mmap (FILE *fp)
1158 {
1159 /* In addition to closing the file descriptor we have to unmap the file. */
1160 (void) __munmap (fp->_IO_buf_base, fp->_IO_buf_end - fp->_IO_buf_base);
1161 fp->_IO_buf_base = fp->_IO_buf_end = NULL;
1162 /* Cancelling close should be avoided if possible since it leaves an
1163 unrecoverable state behind. */
1164 return __close_nocancel (fp->_fileno);
1165 }
1166
1167 int
1168 _IO_file_close (FILE *fp)
1169 {
1170 /* Cancelling close should be avoided if possible since it leaves an
1171 unrecoverable state behind. */
1172 return __close_nocancel (fp->_fileno);
1173 }
1174 libc_hidden_def (_IO_file_close)
1175
1176 ssize_t
1177 _IO_new_file_write (FILE *f, const void *data, ssize_t n)
1178 {
1179 ssize_t to_do = n;
1180 while (to_do > 0)
1181 {
1182 ssize_t count = (__builtin_expect (f->_flags2
1183 & _IO_FLAGS2_NOTCANCEL, 0)
1184 ? __write_nocancel (f->_fileno, data, to_do)
1185 : __write (f->_fileno, data, to_do));
1186 if (count < 0)
1187 {
1188 f->_flags |= _IO_ERR_SEEN;
1189 break;
1190 }
1191 to_do -= count;
1192 data = (void *) ((char *) data + count);
1193 }
1194 n -= to_do;
1195 if (f->_offset >= 0)
1196 f->_offset += n;
1197 return n;
1198 }
1199
1200 size_t
1201 _IO_new_file_xsputn (FILE *f, const void *data, size_t n)
1202 {
1203 const char *s = (const char *) data;
1204 size_t to_do = n;
1205 int must_flush = 0;
1206 size_t count = 0;
1207
1208 if (n <= 0)
1209 return 0;
1210 /* This is an optimized implementation.
1211 If the amount to be written straddles a block boundary
1212 (or the filebuf is unbuffered), use sys_write directly. */
1213
1214 /* First figure out how much space is available in the buffer. */
1215 if ((f->_flags & _IO_LINE_BUF) && (f->_flags & _IO_CURRENTLY_PUTTING))
1216 {
1217 count = f->_IO_buf_end - f->_IO_write_ptr;
1218 if (count >= n)
1219 {
1220 const char *p;
1221 for (p = s + n; p > s; )
1222 {
1223 if (*--p == '\n')
1224 {
1225 count = p - s + 1;
1226 must_flush = 1;
1227 break;
1228 }
1229 }
1230 }
1231 }
1232 else if (f->_IO_write_end > f->_IO_write_ptr)
1233 count = f->_IO_write_end - f->_IO_write_ptr; /* Space available. */
1234
1235 /* Then fill the buffer. */
1236 if (count > 0)
1237 {
1238 if (count > to_do)
1239 count = to_do;
1240 f->_IO_write_ptr = __mempcpy (f->_IO_write_ptr, s, count);
1241 s += count;
1242 to_do -= count;
1243 }
1244 if (to_do + must_flush > 0)
1245 {
1246 size_t block_size, do_write;
1247 /* Next flush the (full) buffer. */
1248 if (_IO_OVERFLOW (f, EOF) == EOF)
1249 /* If nothing else has to be written we must not signal the
1250 caller that everything has been written. */
1251 return to_do == 0 ? EOF : n - to_do;
1252
1253 /* Try to maintain alignment: write a whole number of blocks. */
1254 block_size = f->_IO_buf_end - f->_IO_buf_base;
1255 do_write = to_do - (block_size >= 128 ? to_do % block_size : 0);
1256
1257 if (do_write)
1258 {
1259 count = new_do_write (f, s, do_write);
1260 to_do -= count;
1261 if (count < do_write)
1262 return n - to_do;
1263 }
1264
1265 /* Now write out the remainder. Normally, this will fit in the
1266 buffer, but it's somewhat messier for line-buffered files,
1267 so we let _IO_default_xsputn handle the general case. */
1268 if (to_do)
1269 to_do -= _IO_default_xsputn (f, s+do_write, to_do);
1270 }
1271 return n - to_do;
1272 }
1273 libc_hidden_ver (_IO_new_file_xsputn, _IO_file_xsputn)
1274
1275 size_t
1276 _IO_file_xsgetn (FILE *fp, void *data, size_t n)
1277 {
1278 size_t want, have;
1279 ssize_t count;
1280 char *s = data;
1281
1282 want = n;
1283
1284 if (fp->_IO_buf_base == NULL)
1285 {
1286 /* Maybe we already have a push back pointer. */
1287 if (fp->_IO_save_base != NULL)
1288 {
1289 free (fp->_IO_save_base);
1290 fp->_flags &= ~_IO_IN_BACKUP;
1291 }
1292 _IO_doallocbuf (fp);
1293 }
1294
1295 while (want > 0)
1296 {
1297 have = fp->_IO_read_end - fp->_IO_read_ptr;
1298 if (want <= have)
1299 {
1300 memcpy (s, fp->_IO_read_ptr, want);
1301 fp->_IO_read_ptr += want;
1302 want = 0;
1303 }
1304 else
1305 {
1306 if (have > 0)
1307 {
1308 s = __mempcpy (s, fp->_IO_read_ptr, have);
1309 want -= have;
1310 fp->_IO_read_ptr += have;
1311 }
1312
1313 /* Check for backup and repeat */
1314 if (_IO_in_backup (fp))
1315 {
1316 _IO_switch_to_main_get_area (fp);
1317 continue;
1318 }
1319
1320 /* If we now want less than a buffer, underflow and repeat
1321 the copy. Otherwise, _IO_SYSREAD directly to
1322 the user buffer. */
1323 if (fp->_IO_buf_base
1324 && want < (size_t) (fp->_IO_buf_end - fp->_IO_buf_base))
1325 {
1326 if (__underflow (fp) == EOF)
1327 break;
1328
1329 continue;
1330 }
1331
1332 /* These must be set before the sysread as we might longjmp out
1333 waiting for input. */
1334 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base);
1335 _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base);
1336
1337 /* Try to maintain alignment: read a whole number of blocks. */
1338 count = want;
1339 if (fp->_IO_buf_base)
1340 {
1341 size_t block_size = fp->_IO_buf_end - fp->_IO_buf_base;
1342 if (block_size >= 128)
1343 count -= want % block_size;
1344 }
1345
1346 count = _IO_SYSREAD (fp, s, count);
1347 if (count <= 0)
1348 {
1349 if (count == 0)
1350 fp->_flags |= _IO_EOF_SEEN;
1351 else
1352 fp->_flags |= _IO_ERR_SEEN;
1353
1354 break;
1355 }
1356
1357 s += count;
1358 want -= count;
1359 if (fp->_offset != _IO_pos_BAD)
1360 _IO_pos_adjust (fp->_offset, count);
1361 }
1362 }
1363
1364 return n - want;
1365 }
1366 libc_hidden_def (_IO_file_xsgetn)
1367
1368 static size_t
1369 _IO_file_xsgetn_mmap (FILE *fp, void *data, size_t n)
1370 {
1371 size_t have;
1372 char *read_ptr = fp->_IO_read_ptr;
1373 char *s = (char *) data;
1374
1375 have = fp->_IO_read_end - fp->_IO_read_ptr;
1376
1377 if (have < n)
1378 {
1379 if (__glibc_unlikely (_IO_in_backup (fp)))
1380 {
1381 s = __mempcpy (s, read_ptr, have);
1382 n -= have;
1383 _IO_switch_to_main_get_area (fp);
1384 read_ptr = fp->_IO_read_ptr;
1385 have = fp->_IO_read_end - fp->_IO_read_ptr;
1386 }
1387
1388 if (have < n)
1389 {
1390 /* Check that we are mapping all of the file, in case it grew. */
1391 if (__glibc_unlikely (mmap_remap_check (fp)))
1392 /* We punted mmap, so complete with the vanilla code. */
1393 return s - (char *) data + _IO_XSGETN (fp, data, n);
1394
1395 read_ptr = fp->_IO_read_ptr;
1396 have = fp->_IO_read_end - read_ptr;
1397 }
1398 }
1399
1400 if (have < n)
1401 fp->_flags |= _IO_EOF_SEEN;
1402
1403 if (have != 0)
1404 {
1405 have = MIN (have, n);
1406 s = __mempcpy (s, read_ptr, have);
1407 fp->_IO_read_ptr = read_ptr + have;
1408 }
1409
1410 return s - (char *) data;
1411 }
1412
1413 static size_t
1414 _IO_file_xsgetn_maybe_mmap (FILE *fp, void *data, size_t n)
1415 {
1416 /* We only get here if this is the first attempt to read something.
1417 Decide which operations to use and then punt to the chosen one. */
1418
1419 decide_maybe_mmap (fp);
1420 return _IO_XSGETN (fp, data, n);
1421 }
1422
1423 versioned_symbol (libc, _IO_new_do_write, _IO_do_write, GLIBC_2_1);
1424 versioned_symbol (libc, _IO_new_file_attach, _IO_file_attach, GLIBC_2_1);
1425 versioned_symbol (libc, _IO_new_file_close_it, _IO_file_close_it, GLIBC_2_1);
1426 versioned_symbol (libc, _IO_new_file_finish, _IO_file_finish, GLIBC_2_1);
1427 versioned_symbol (libc, _IO_new_file_fopen, _IO_file_fopen, GLIBC_2_1);
1428 versioned_symbol (libc, _IO_new_file_init, _IO_file_init, GLIBC_2_1);
1429 versioned_symbol (libc, _IO_new_file_setbuf, _IO_file_setbuf, GLIBC_2_1);
1430 versioned_symbol (libc, _IO_new_file_sync, _IO_file_sync, GLIBC_2_1);
1431 versioned_symbol (libc, _IO_new_file_overflow, _IO_file_overflow, GLIBC_2_1);
1432 versioned_symbol (libc, _IO_new_file_seekoff, _IO_file_seekoff, GLIBC_2_1);
1433 versioned_symbol (libc, _IO_new_file_underflow, _IO_file_underflow, GLIBC_2_1);
1434 versioned_symbol (libc, _IO_new_file_write, _IO_file_write, GLIBC_2_1);
1435 versioned_symbol (libc, _IO_new_file_xsputn, _IO_file_xsputn, GLIBC_2_1);
1436
1437 const struct _IO_jump_t _IO_file_jumps libio_vtable =
1438 {
1439 JUMP_INIT_DUMMY,
1440 JUMP_INIT(finish, _IO_file_finish),
1441 JUMP_INIT(overflow, _IO_file_overflow),
1442 JUMP_INIT(underflow, _IO_file_underflow),
1443 JUMP_INIT(uflow, _IO_default_uflow),
1444 JUMP_INIT(pbackfail, _IO_default_pbackfail),
1445 JUMP_INIT(xsputn, _IO_file_xsputn),
1446 JUMP_INIT(xsgetn, _IO_file_xsgetn),
1447 JUMP_INIT(seekoff, _IO_new_file_seekoff),
1448 JUMP_INIT(seekpos, _IO_default_seekpos),
1449 JUMP_INIT(setbuf, _IO_new_file_setbuf),
1450 JUMP_INIT(sync, _IO_new_file_sync),
1451 JUMP_INIT(doallocate, _IO_file_doallocate),
1452 JUMP_INIT(read, _IO_file_read),
1453 JUMP_INIT(write, _IO_new_file_write),
1454 JUMP_INIT(seek, _IO_file_seek),
1455 JUMP_INIT(close, _IO_file_close),
1456 JUMP_INIT(stat, _IO_file_stat),
1457 JUMP_INIT(showmanyc, _IO_default_showmanyc),
1458 JUMP_INIT(imbue, _IO_default_imbue)
1459 };
1460 libc_hidden_data_def (_IO_file_jumps)
1461
1462 const struct _IO_jump_t _IO_file_jumps_mmap libio_vtable =
1463 {
1464 JUMP_INIT_DUMMY,
1465 JUMP_INIT(finish, _IO_file_finish),
1466 JUMP_INIT(overflow, _IO_file_overflow),
1467 JUMP_INIT(underflow, _IO_file_underflow_mmap),
1468 JUMP_INIT(uflow, _IO_default_uflow),
1469 JUMP_INIT(pbackfail, _IO_default_pbackfail),
1470 JUMP_INIT(xsputn, _IO_new_file_xsputn),
1471 JUMP_INIT(xsgetn, _IO_file_xsgetn_mmap),
1472 JUMP_INIT(seekoff, _IO_file_seekoff_mmap),
1473 JUMP_INIT(seekpos, _IO_default_seekpos),
1474 JUMP_INIT(setbuf, (_IO_setbuf_t) _IO_file_setbuf_mmap),
1475 JUMP_INIT(sync, _IO_file_sync_mmap),
1476 JUMP_INIT(doallocate, _IO_file_doallocate),
1477 JUMP_INIT(read, _IO_file_read),
1478 JUMP_INIT(write, _IO_new_file_write),
1479 JUMP_INIT(seek, _IO_file_seek),
1480 JUMP_INIT(close, _IO_file_close_mmap),
1481 JUMP_INIT(stat, _IO_file_stat),
1482 JUMP_INIT(showmanyc, _IO_default_showmanyc),
1483 JUMP_INIT(imbue, _IO_default_imbue)
1484 };
1485
1486 const struct _IO_jump_t _IO_file_jumps_maybe_mmap libio_vtable =
1487 {
1488 JUMP_INIT_DUMMY,
1489 JUMP_INIT(finish, _IO_file_finish),
1490 JUMP_INIT(overflow, _IO_file_overflow),
1491 JUMP_INIT(underflow, _IO_file_underflow_maybe_mmap),
1492 JUMP_INIT(uflow, _IO_default_uflow),
1493 JUMP_INIT(pbackfail, _IO_default_pbackfail),
1494 JUMP_INIT(xsputn, _IO_new_file_xsputn),
1495 JUMP_INIT(xsgetn, _IO_file_xsgetn_maybe_mmap),
1496 JUMP_INIT(seekoff, _IO_file_seekoff_maybe_mmap),
1497 JUMP_INIT(seekpos, _IO_default_seekpos),
1498 JUMP_INIT(setbuf, (_IO_setbuf_t) _IO_file_setbuf_mmap),
1499 JUMP_INIT(sync, _IO_new_file_sync),
1500 JUMP_INIT(doallocate, _IO_file_doallocate),
1501 JUMP_INIT(read, _IO_file_read),
1502 JUMP_INIT(write, _IO_new_file_write),
1503 JUMP_INIT(seek, _IO_file_seek),
1504 JUMP_INIT(close, _IO_file_close),
1505 JUMP_INIT(stat, _IO_file_stat),
1506 JUMP_INIT(showmanyc, _IO_default_showmanyc),
1507 JUMP_INIT(imbue, _IO_default_imbue)
1508 };