]> git.ipfire.org Git - thirdparty/e2fsprogs.git/blob - lib/ext2fs/unix_io.c
Merge branch 'maint' into next
[thirdparty/e2fsprogs.git] / lib / ext2fs / unix_io.c
1 /*
2 * unix_io.c --- This is the Unix (well, really POSIX) implementation
3 * of the I/O manager.
4 *
5 * Implements a one-block write-through cache.
6 *
7 * Includes support for Windows NT support under Cygwin.
8 *
9 * Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
10 * 2002 by Theodore Ts'o.
11 *
12 * %Begin-Header%
13 * This file may be redistributed under the terms of the GNU Library
14 * General Public License, version 2.
15 * %End-Header%
16 */
17
18 #if !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
19 #define _XOPEN_SOURCE 600
20 #define _DARWIN_C_SOURCE
21 #define _FILE_OFFSET_BITS 64
22 #ifndef _LARGEFILE_SOURCE
23 #define _LARGEFILE_SOURCE
24 #endif
25 #ifndef _LARGEFILE64_SOURCE
26 #define _LARGEFILE64_SOURCE
27 #endif
28 #ifndef _GNU_SOURCE
29 #define _GNU_SOURCE
30 #endif
31 #endif
32
33 #include "config.h"
34 #include <stdio.h>
35 #include <string.h>
36 #if HAVE_UNISTD_H
37 #include <unistd.h>
38 #endif
39 #if HAVE_ERRNO_H
40 #include <errno.h>
41 #endif
42 #include <fcntl.h>
43 #include <time.h>
44 #ifdef __linux__
45 #include <sys/utsname.h>
46 #endif
47 #if HAVE_SYS_TYPES_H
48 #include <sys/types.h>
49 #endif
50 #ifdef HAVE_SYS_IOCTL_H
51 #include <sys/ioctl.h>
52 #endif
53 #ifdef HAVE_SYS_MOUNT_H
54 #include <sys/mount.h>
55 #endif
56 #ifdef HAVE_SYS_PRCTL_H
57 #include <sys/prctl.h>
58 #else
59 #define PR_GET_DUMPABLE 3
60 #endif
61 #if HAVE_SYS_STAT_H
62 #include <sys/stat.h>
63 #endif
64 #if HAVE_SYS_RESOURCE_H
65 #include <sys/resource.h>
66 #endif
67 #if HAVE_LINUX_FALLOC_H
68 #include <linux/falloc.h>
69 #endif
70 #ifdef HAVE_PTHREAD
71 #include <pthread.h>
72 #endif
73
74 #if defined(__linux__) && defined(_IO) && !defined(BLKROGET)
75 #define BLKROGET _IO(0x12, 94) /* Get read-only status (0 = read_write). */
76 #endif
77
78 #undef ALIGN_DEBUG
79
80 #include "ext2_fs.h"
81 #include "ext2fs.h"
82 #include "ext2fsP.h"
83
84 /*
85 * For checking structure magic numbers...
86 */
87
88 #define EXT2_CHECK_MAGIC(struct, code) \
89 if ((struct)->magic != (code)) return (code)
90
91 struct unix_cache {
92 char *buf;
93 unsigned long long block;
94 int access_time;
95 unsigned dirty:1;
96 unsigned in_use:1;
97 };
98
99 #define CACHE_SIZE 8
100 #define WRITE_DIRECT_SIZE 4 /* Must be smaller than CACHE_SIZE */
101 #define READ_DIRECT_SIZE 4 /* Should be smaller than CACHE_SIZE */
102
103 struct unix_private_data {
104 int magic;
105 int dev;
106 int flags;
107 int align;
108 int access_time;
109 ext2_loff_t offset;
110 struct unix_cache cache[CACHE_SIZE];
111 void *bounce;
112 struct struct_io_stats io_stats;
113 #ifdef HAVE_PTHREAD
114 pthread_mutex_t cache_mutex;
115 pthread_mutex_t bounce_mutex;
116 pthread_mutex_t stats_mutex;
117 #endif
118 };
119
120 #define IS_ALIGNED(n, align) ((((uintptr_t) n) & \
121 ((uintptr_t) ((align)-1))) == 0)
122
123 typedef enum lock_kind {
124 CACHE_MTX, BOUNCE_MTX, STATS_MTX
125 } kind_t;
126
127 #ifdef HAVE_PTHREAD
128 static inline pthread_mutex_t *get_mutex(struct unix_private_data *data,
129 kind_t kind)
130 {
131 if (data->flags & IO_FLAG_THREADS) {
132 switch (kind) {
133 case CACHE_MTX:
134 return &data->cache_mutex;
135 case BOUNCE_MTX:
136 return &data->bounce_mutex;
137 case STATS_MTX:
138 return &data->stats_mutex;
139 }
140 }
141 return NULL;
142 }
143 #endif
144
145 static inline void mutex_lock(struct unix_private_data *data, kind_t kind)
146 {
147 #ifdef HAVE_PTHREAD
148 pthread_mutex_t *mtx = get_mutex(data,kind);
149
150 if (mtx)
151 pthread_mutex_lock(mtx);
152 #endif
153 }
154
155 static inline void mutex_unlock(struct unix_private_data *data, kind_t kind)
156 {
157 #ifdef HAVE_PTHREAD
158 pthread_mutex_t *mtx = get_mutex(data,kind);
159
160 if (mtx)
161 pthread_mutex_unlock(mtx);
162 #endif
163 }
164
165 static errcode_t unix_get_stats(io_channel channel, io_stats *stats)
166 {
167 errcode_t retval = 0;
168
169 struct unix_private_data *data;
170
171 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
172 data = (struct unix_private_data *) channel->private_data;
173 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
174
175 if (stats) {
176 mutex_lock(data, STATS_MTX);
177 *stats = &data->io_stats;
178 mutex_unlock(data, STATS_MTX);
179 }
180
181 return retval;
182 }
183
184 static char *safe_getenv(const char *arg)
185 {
186 if ((getuid() != geteuid()) || (getgid() != getegid()))
187 return NULL;
188 #ifdef HAVE_PRCTL
189 if (prctl(PR_GET_DUMPABLE, 0, 0, 0, 0) == 0)
190 return NULL;
191 #else
192 #if (defined(linux) && defined(SYS_prctl))
193 if (syscall(SYS_prctl, PR_GET_DUMPABLE, 0, 0, 0, 0) == 0)
194 return NULL;
195 #endif
196 #endif
197
198 #if defined(HAVE_SECURE_GETENV)
199 return secure_getenv(arg);
200 #elif defined(HAVE___SECURE_GETENV)
201 return __secure_getenv(arg);
202 #else
203 return getenv(arg);
204 #endif
205 }
206
207 /*
208 * Here are the raw I/O functions
209 */
210 static errcode_t raw_read_blk(io_channel channel,
211 struct unix_private_data *data,
212 unsigned long long block,
213 int count, void *bufv)
214 {
215 errcode_t retval;
216 ssize_t size;
217 ext2_loff_t location;
218 int actual = 0;
219 unsigned char *buf = bufv;
220 ssize_t really_read = 0;
221
222 size = (count < 0) ? -count : (ext2_loff_t) count * channel->block_size;
223 mutex_lock(data, STATS_MTX);
224 data->io_stats.bytes_read += size;
225 mutex_unlock(data, STATS_MTX);
226 location = ((ext2_loff_t) block * channel->block_size) + data->offset;
227
228 if (data->flags & IO_FLAG_FORCE_BOUNCE) {
229 if (ext2fs_llseek(data->dev, location, SEEK_SET) != location) {
230 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
231 goto error_out;
232 }
233 goto bounce_read;
234 }
235
236 #ifdef HAVE_PREAD64
237 /* Try an aligned pread */
238 if ((channel->align == 0) ||
239 (IS_ALIGNED(buf, channel->align) &&
240 IS_ALIGNED(size, channel->align))) {
241 actual = pread64(data->dev, buf, size, location);
242 if (actual == size)
243 return 0;
244 actual = 0;
245 }
246 #elif HAVE_PREAD
247 /* Try an aligned pread */
248 if ((sizeof(off_t) >= sizeof(ext2_loff_t)) &&
249 ((channel->align == 0) ||
250 (IS_ALIGNED(buf, channel->align) &&
251 IS_ALIGNED(size, channel->align)))) {
252 actual = pread(data->dev, buf, size, location);
253 if (actual == size)
254 return 0;
255 actual = 0;
256 }
257 #endif /* HAVE_PREAD */
258
259 if (ext2fs_llseek(data->dev, location, SEEK_SET) != location) {
260 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
261 goto error_out;
262 }
263 if ((channel->align == 0) ||
264 (IS_ALIGNED(buf, channel->align) &&
265 IS_ALIGNED(size, channel->align))) {
266 actual = read(data->dev, buf, size);
267 if (actual != size) {
268 short_read:
269 if (actual < 0) {
270 retval = errno;
271 actual = 0;
272 } else
273 retval = EXT2_ET_SHORT_READ;
274 goto error_out;
275 }
276 return 0;
277 }
278
279 #ifdef ALIGN_DEBUG
280 printf("raw_read_blk: O_DIRECT fallback: %p %lu\n", buf,
281 (unsigned long) size);
282 #endif
283
284 /*
285 * The buffer or size which we're trying to read isn't aligned
286 * to the O_DIRECT rules, so we need to do this the hard way...
287 */
288 bounce_read:
289 while (size > 0) {
290 mutex_lock(data, BOUNCE_MTX);
291 actual = read(data->dev, data->bounce, channel->block_size);
292 if (actual != channel->block_size) {
293 mutex_unlock(data, BOUNCE_MTX);
294 actual = really_read;
295 buf -= really_read;
296 size += really_read;
297 goto short_read;
298 }
299 actual = size;
300 if (size > channel->block_size)
301 actual = channel->block_size;
302 memcpy(buf, data->bounce, actual);
303 really_read += actual;
304 size -= actual;
305 buf += actual;
306 mutex_unlock(data, BOUNCE_MTX);
307 }
308 return 0;
309
310 error_out:
311 if (actual >= 0 && actual < size)
312 memset((char *) buf+actual, 0, size-actual);
313 if (channel->read_error)
314 retval = (channel->read_error)(channel, block, count, buf,
315 size, actual, retval);
316 return retval;
317 }
318
319 static errcode_t raw_write_blk(io_channel channel,
320 struct unix_private_data *data,
321 unsigned long long block,
322 int count, const void *bufv)
323 {
324 ssize_t size;
325 ext2_loff_t location;
326 int actual = 0;
327 errcode_t retval;
328 const unsigned char *buf = bufv;
329
330 if (count == 1)
331 size = channel->block_size;
332 else {
333 if (count < 0)
334 size = -count;
335 else
336 size = (ext2_loff_t) count * channel->block_size;
337 }
338 mutex_lock(data, STATS_MTX);
339 data->io_stats.bytes_written += size;
340 mutex_unlock(data, STATS_MTX);
341
342 location = ((ext2_loff_t) block * channel->block_size) + data->offset;
343
344 if (data->flags & IO_FLAG_FORCE_BOUNCE) {
345 if (ext2fs_llseek(data->dev, location, SEEK_SET) != location) {
346 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
347 goto error_out;
348 }
349 goto bounce_write;
350 }
351
352 #ifdef HAVE_PWRITE64
353 /* Try an aligned pwrite */
354 if ((channel->align == 0) ||
355 (IS_ALIGNED(buf, channel->align) &&
356 IS_ALIGNED(size, channel->align))) {
357 actual = pwrite64(data->dev, buf, size, location);
358 if (actual == size)
359 return 0;
360 }
361 #elif HAVE_PWRITE
362 /* Try an aligned pwrite */
363 if ((sizeof(off_t) >= sizeof(ext2_loff_t)) &&
364 ((channel->align == 0) ||
365 (IS_ALIGNED(buf, channel->align) &&
366 IS_ALIGNED(size, channel->align)))) {
367 actual = pwrite(data->dev, buf, size, location);
368 if (actual == size)
369 return 0;
370 }
371 #endif /* HAVE_PWRITE */
372
373 if (ext2fs_llseek(data->dev, location, SEEK_SET) != location) {
374 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
375 goto error_out;
376 }
377
378 if ((channel->align == 0) ||
379 (IS_ALIGNED(buf, channel->align) &&
380 IS_ALIGNED(size, channel->align))) {
381 actual = write(data->dev, buf, size);
382 if (actual < 0) {
383 retval = errno;
384 goto error_out;
385 }
386 if (actual != size) {
387 short_write:
388 retval = EXT2_ET_SHORT_WRITE;
389 goto error_out;
390 }
391 return 0;
392 }
393
394 #ifdef ALIGN_DEBUG
395 printf("raw_write_blk: O_DIRECT fallback: %p %lu\n", buf,
396 (unsigned long) size);
397 #endif
398 /*
399 * The buffer or size which we're trying to write isn't aligned
400 * to the O_DIRECT rules, so we need to do this the hard way...
401 */
402 bounce_write:
403 while (size > 0) {
404 mutex_lock(data, BOUNCE_MTX);
405 if (size < channel->block_size) {
406 actual = read(data->dev, data->bounce,
407 channel->block_size);
408 if (actual != channel->block_size) {
409 if (actual < 0) {
410 mutex_unlock(data, BOUNCE_MTX);
411 retval = errno;
412 goto error_out;
413 }
414 memset((char *) data->bounce + actual, 0,
415 channel->block_size - actual);
416 }
417 }
418 actual = size;
419 if (size > channel->block_size)
420 actual = channel->block_size;
421 memcpy(data->bounce, buf, actual);
422 if (ext2fs_llseek(data->dev, location, SEEK_SET) != location) {
423 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
424 goto error_out;
425 }
426 actual = write(data->dev, data->bounce, channel->block_size);
427 mutex_unlock(data, BOUNCE_MTX);
428 if (actual < 0) {
429 retval = errno;
430 goto error_out;
431 }
432 if (actual != channel->block_size)
433 goto short_write;
434 size -= actual;
435 buf += actual;
436 location += actual;
437 }
438 return 0;
439
440 error_out:
441 if (channel->write_error)
442 retval = (channel->write_error)(channel, block, count, buf,
443 size, actual, retval);
444 return retval;
445 }
446
447
448 /*
449 * Here we implement the cache functions
450 */
451
452 /* Allocate the cache buffers */
453 static errcode_t alloc_cache(io_channel channel,
454 struct unix_private_data *data)
455 {
456 errcode_t retval;
457 struct unix_cache *cache;
458 int i;
459
460 data->access_time = 0;
461 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
462 cache->block = 0;
463 cache->access_time = 0;
464 cache->dirty = 0;
465 cache->in_use = 0;
466 if (cache->buf)
467 ext2fs_free_mem(&cache->buf);
468 retval = io_channel_alloc_buf(channel, 0, &cache->buf);
469 if (retval)
470 return retval;
471 }
472 if (channel->align || data->flags & IO_FLAG_FORCE_BOUNCE) {
473 if (data->bounce)
474 ext2fs_free_mem(&data->bounce);
475 retval = io_channel_alloc_buf(channel, 0, &data->bounce);
476 }
477 return retval;
478 }
479
480 /* Free the cache buffers */
481 static void free_cache(struct unix_private_data *data)
482 {
483 struct unix_cache *cache;
484 int i;
485
486 data->access_time = 0;
487 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
488 cache->block = 0;
489 cache->access_time = 0;
490 cache->dirty = 0;
491 cache->in_use = 0;
492 if (cache->buf)
493 ext2fs_free_mem(&cache->buf);
494 }
495 if (data->bounce)
496 ext2fs_free_mem(&data->bounce);
497 }
498
499 #ifndef NO_IO_CACHE
500 /*
501 * Try to find a block in the cache. If the block is not found, and
502 * eldest is a non-zero pointer, then fill in eldest with the cache
503 * entry to that should be reused.
504 */
505 static struct unix_cache *find_cached_block(struct unix_private_data *data,
506 unsigned long long block,
507 struct unix_cache **eldest)
508 {
509 struct unix_cache *cache, *unused_cache, *oldest_cache;
510 int i;
511
512 unused_cache = oldest_cache = 0;
513 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
514 if (!cache->in_use) {
515 if (!unused_cache)
516 unused_cache = cache;
517 continue;
518 }
519 if (cache->block == block) {
520 cache->access_time = ++data->access_time;
521 return cache;
522 }
523 if (!oldest_cache ||
524 (cache->access_time < oldest_cache->access_time))
525 oldest_cache = cache;
526 }
527 if (eldest)
528 *eldest = (unused_cache) ? unused_cache : oldest_cache;
529 return 0;
530 }
531
532 /*
533 * Reuse a particular cache entry for another block.
534 */
535 static void reuse_cache(io_channel channel, struct unix_private_data *data,
536 struct unix_cache *cache, unsigned long long block)
537 {
538 if (cache->dirty && cache->in_use)
539 raw_write_blk(channel, data, cache->block, 1, cache->buf);
540
541 cache->in_use = 1;
542 cache->dirty = 0;
543 cache->block = block;
544 cache->access_time = ++data->access_time;
545 }
546
547 #define FLUSH_INVALIDATE 0x01
548 #define FLUSH_NOLOCK 0x02
549
550 /*
551 * Flush all of the blocks in the cache
552 */
553 static errcode_t flush_cached_blocks(io_channel channel,
554 struct unix_private_data *data,
555 int flags)
556 {
557 struct unix_cache *cache;
558 errcode_t retval, retval2;
559 int i;
560
561 retval2 = 0;
562 if ((flags & FLUSH_NOLOCK) == 0)
563 mutex_lock(data, CACHE_MTX);
564 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
565 if (!cache->in_use)
566 continue;
567
568 if (flags & FLUSH_INVALIDATE)
569 cache->in_use = 0;
570
571 if (!cache->dirty)
572 continue;
573
574 retval = raw_write_blk(channel, data,
575 cache->block, 1, cache->buf);
576 if (retval)
577 retval2 = retval;
578 else
579 cache->dirty = 0;
580 }
581 if ((flags & FLUSH_NOLOCK) == 0)
582 mutex_unlock(data, CACHE_MTX);
583 return retval2;
584 }
585 #endif /* NO_IO_CACHE */
586
587 #ifdef __linux__
588 #ifndef BLKDISCARDZEROES
589 #define BLKDISCARDZEROES _IO(0x12,124)
590 #endif
591 #endif
592
593 int ext2fs_open_file(const char *pathname, int flags, mode_t mode)
594 {
595 if (mode)
596 #if defined(HAVE_OPEN64) && !defined(__OSX_AVAILABLE_BUT_DEPRECATED)
597 return open64(pathname, flags, mode);
598 else
599 return open64(pathname, flags);
600 #else
601 return open(pathname, flags, mode);
602 else
603 return open(pathname, flags);
604 #endif
605 }
606
607 int ext2fs_stat(const char *path, ext2fs_struct_stat *buf)
608 {
609 #if defined(HAVE_FSTAT64) && !defined(__OSX_AVAILABLE_BUT_DEPRECATED)
610 return stat64(path, buf);
611 #else
612 return stat(path, buf);
613 #endif
614 }
615
616 int ext2fs_fstat(int fd, ext2fs_struct_stat *buf)
617 {
618 #if defined(HAVE_FSTAT64) && !defined(__OSX_AVAILABLE_BUT_DEPRECATED)
619 return fstat64(fd, buf);
620 #else
621 return fstat(fd, buf);
622 #endif
623 }
624
625
626 static errcode_t unix_open_channel(const char *name, int fd,
627 int flags, io_channel *channel,
628 io_manager io_mgr)
629 {
630 io_channel io = NULL;
631 struct unix_private_data *data = NULL;
632 errcode_t retval;
633 ext2fs_struct_stat st;
634 #ifdef __linux__
635 struct utsname ut;
636 #endif
637
638 if (safe_getenv("UNIX_IO_FORCE_BOUNCE"))
639 flags |= IO_FLAG_FORCE_BOUNCE;
640
641 #ifdef __linux__
642 /*
643 * We need to make sure any previous errors in the block
644 * device are thrown away, sigh.
645 */
646 (void) fsync(fd);
647 #endif
648
649 retval = ext2fs_get_mem(sizeof(struct struct_io_channel), &io);
650 if (retval)
651 goto cleanup;
652 memset(io, 0, sizeof(struct struct_io_channel));
653 io->magic = EXT2_ET_MAGIC_IO_CHANNEL;
654 retval = ext2fs_get_mem(sizeof(struct unix_private_data), &data);
655 if (retval)
656 goto cleanup;
657
658 io->manager = io_mgr;
659 retval = ext2fs_get_mem(strlen(name)+1, &io->name);
660 if (retval)
661 goto cleanup;
662
663 strcpy(io->name, name);
664 io->private_data = data;
665 io->block_size = 1024;
666 io->read_error = 0;
667 io->write_error = 0;
668 io->refcount = 1;
669 io->flags = 0;
670
671 memset(data, 0, sizeof(struct unix_private_data));
672 data->magic = EXT2_ET_MAGIC_UNIX_IO_CHANNEL;
673 data->io_stats.num_fields = 2;
674 data->flags = flags;
675 data->dev = fd;
676
677 #if defined(O_DIRECT)
678 if (flags & IO_FLAG_DIRECT_IO)
679 io->align = ext2fs_get_dio_alignment(data->dev);
680 #elif defined(F_NOCACHE)
681 if (flags & IO_FLAG_DIRECT_IO)
682 io->align = 4096;
683 #endif
684
685 /*
686 * If the device is really a block device, then set the
687 * appropriate flag, otherwise we can set DISCARD_ZEROES flag
688 * because we are going to use punch hole instead of discard
689 * and if it succeed, subsequent read from sparse area returns
690 * zero.
691 */
692 if (ext2fs_fstat(data->dev, &st) == 0) {
693 if (ext2fsP_is_disk_device(st.st_mode))
694 io->flags |= CHANNEL_FLAGS_BLOCK_DEVICE;
695 else
696 io->flags |= CHANNEL_FLAGS_DISCARD_ZEROES;
697 }
698
699 #ifdef BLKDISCARDZEROES
700 {
701 int zeroes = 0;
702 if (ioctl(data->dev, BLKDISCARDZEROES, &zeroes) == 0 &&
703 zeroes)
704 io->flags |= CHANNEL_FLAGS_DISCARD_ZEROES;
705 }
706 #endif
707
708 #if defined(__CYGWIN__)
709 /*
710 * Some operating systems require that the buffers be aligned,
711 * regardless of O_DIRECT
712 */
713 if (!io->align)
714 io->align = 512;
715 #endif
716
717 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
718 if (io->flags & CHANNEL_FLAGS_BLOCK_DEVICE) {
719 int dio_align = ext2fs_get_dio_alignment(fd);
720
721 if (io->align < dio_align)
722 io->align = dio_align;
723 }
724 #endif
725
726 if ((retval = alloc_cache(io, data)))
727 goto cleanup;
728
729 #ifdef BLKROGET
730 if (flags & IO_FLAG_RW) {
731 int error;
732 int readonly = 0;
733
734 /* Is the block device actually writable? */
735 error = ioctl(data->dev, BLKROGET, &readonly);
736 if (!error && readonly) {
737 retval = EPERM;
738 goto cleanup;
739 }
740 }
741 #endif
742
743 #ifdef __linux__
744 #undef RLIM_INFINITY
745 #if (defined(__alpha__) || ((defined(__sparc__) || defined(__mips__)) && (SIZEOF_LONG == 4)))
746 #define RLIM_INFINITY ((unsigned long)(~0UL>>1))
747 #else
748 #define RLIM_INFINITY (~0UL)
749 #endif
750 /*
751 * Work around a bug in 2.4.10-2.4.18 kernels where writes to
752 * block devices are wrongly getting hit by the filesize
753 * limit. This workaround isn't perfect, since it won't work
754 * if glibc wasn't built against 2.2 header files. (Sigh.)
755 *
756 */
757 if ((flags & IO_FLAG_RW) &&
758 (uname(&ut) == 0) &&
759 ((ut.release[0] == '2') && (ut.release[1] == '.') &&
760 (ut.release[2] == '4') && (ut.release[3] == '.') &&
761 (ut.release[4] == '1') && (ut.release[5] >= '0') &&
762 (ut.release[5] < '8')) &&
763 (ext2fs_fstat(data->dev, &st) == 0) &&
764 (ext2fsP_is_disk_device(st.st_mode))) {
765 struct rlimit rlim;
766
767 rlim.rlim_cur = rlim.rlim_max = (unsigned long) RLIM_INFINITY;
768 setrlimit(RLIMIT_FSIZE, &rlim);
769 getrlimit(RLIMIT_FSIZE, &rlim);
770 if (((unsigned long) rlim.rlim_cur) <
771 ((unsigned long) rlim.rlim_max)) {
772 rlim.rlim_cur = rlim.rlim_max;
773 setrlimit(RLIMIT_FSIZE, &rlim);
774 }
775 }
776 #endif
777 #ifdef HAVE_PTHREAD
778 if (flags & IO_FLAG_THREADS) {
779 io->flags |= CHANNEL_FLAGS_THREADS;
780 retval = pthread_mutex_init(&data->cache_mutex, NULL);
781 if (retval)
782 goto cleanup;
783 retval = pthread_mutex_init(&data->bounce_mutex, NULL);
784 if (retval) {
785 pthread_mutex_destroy(&data->cache_mutex);
786 goto cleanup;
787 }
788 retval = pthread_mutex_init(&data->stats_mutex, NULL);
789 if (retval) {
790 pthread_mutex_destroy(&data->cache_mutex);
791 pthread_mutex_destroy(&data->bounce_mutex);
792 goto cleanup;
793 }
794 }
795 #endif
796 *channel = io;
797 return 0;
798
799 cleanup:
800 if (data) {
801 if (data->dev >= 0)
802 close(data->dev);
803 free_cache(data);
804 ext2fs_free_mem(&data);
805 }
806 if (io) {
807 if (io->name) {
808 ext2fs_free_mem(&io->name);
809 }
810 ext2fs_free_mem(&io);
811 }
812 return retval;
813 }
814
815 static errcode_t unixfd_open(const char *str_fd, int flags,
816 io_channel *channel)
817 {
818 int fd;
819 int fd_flags;
820
821 fd = atoi(str_fd);
822 #if defined(HAVE_FCNTL)
823 fd_flags = fcntl(fd, F_GETFD);
824 if (fd_flags == -1)
825 return EBADF;
826
827 flags = 0;
828 if (fd_flags & O_RDWR)
829 flags |= IO_FLAG_RW;
830 if (fd_flags & O_EXCL)
831 flags |= IO_FLAG_EXCLUSIVE;
832 #if defined(O_DIRECT)
833 if (fd_flags & O_DIRECT)
834 flags |= IO_FLAG_DIRECT_IO;
835 #endif
836 #endif /* HAVE_FCNTL */
837
838 return unix_open_channel(str_fd, fd, flags, channel, unixfd_io_manager);
839 }
840
841 static errcode_t unix_open(const char *name, int flags,
842 io_channel *channel)
843 {
844 int fd = -1;
845 int open_flags;
846
847 if (name == 0)
848 return EXT2_ET_BAD_DEVICE_NAME;
849
850 open_flags = (flags & IO_FLAG_RW) ? O_RDWR : O_RDONLY;
851 if (flags & IO_FLAG_EXCLUSIVE)
852 open_flags |= O_EXCL;
853 #if defined(O_DIRECT)
854 if (flags & IO_FLAG_DIRECT_IO)
855 open_flags |= O_DIRECT;
856 #endif
857 fd = ext2fs_open_file(name, open_flags, 0);
858 if (fd < 0)
859 return errno;
860 #if defined(F_NOCACHE) && !defined(IO_DIRECT)
861 if (flags & IO_FLAG_DIRECT_IO) {
862 if (fcntl(fd, F_NOCACHE, 1) < 0)
863 return errno;
864 }
865 #endif
866 return unix_open_channel(name, fd, flags, channel, unix_io_manager);
867 }
868
869 static errcode_t unix_close(io_channel channel)
870 {
871 struct unix_private_data *data;
872 errcode_t retval = 0;
873
874 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
875 data = (struct unix_private_data *) channel->private_data;
876 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
877
878 if (--channel->refcount > 0)
879 return 0;
880
881 #ifndef NO_IO_CACHE
882 retval = flush_cached_blocks(channel, data, 0);
883 #endif
884
885 if (close(data->dev) < 0)
886 retval = errno;
887 free_cache(data);
888 #ifdef HAVE_PTHREAD
889 if (data->flags & IO_FLAG_THREADS) {
890 pthread_mutex_destroy(&data->cache_mutex);
891 pthread_mutex_destroy(&data->bounce_mutex);
892 pthread_mutex_destroy(&data->stats_mutex);
893 }
894 #endif
895
896 ext2fs_free_mem(&channel->private_data);
897 if (channel->name)
898 ext2fs_free_mem(&channel->name);
899 ext2fs_free_mem(&channel);
900 return retval;
901 }
902
903 static errcode_t unix_set_blksize(io_channel channel, int blksize)
904 {
905 struct unix_private_data *data;
906 errcode_t retval = 0;
907
908 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
909 data = (struct unix_private_data *) channel->private_data;
910 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
911
912 if (channel->block_size != blksize) {
913 mutex_lock(data, CACHE_MTX);
914 mutex_lock(data, BOUNCE_MTX);
915 #ifndef NO_IO_CACHE
916 if ((retval = flush_cached_blocks(channel, data, FLUSH_NOLOCK)))
917 return retval;
918 #endif
919
920 channel->block_size = blksize;
921 free_cache(data);
922 retval = alloc_cache(channel, data);
923 mutex_unlock(data, BOUNCE_MTX);
924 mutex_unlock(data, CACHE_MTX);
925 }
926 return retval;
927 }
928
929 static errcode_t unix_read_blk64(io_channel channel, unsigned long long block,
930 int count, void *buf)
931 {
932 struct unix_private_data *data;
933 struct unix_cache *cache, *reuse[READ_DIRECT_SIZE];
934 errcode_t retval = 0;
935 char *cp;
936 int i, j;
937
938 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
939 data = (struct unix_private_data *) channel->private_data;
940 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
941
942 #ifdef NO_IO_CACHE
943 return raw_read_blk(channel, data, block, count, buf);
944 #else
945 if (data->flags & IO_FLAG_NOCACHE)
946 return raw_read_blk(channel, data, block, count, buf);
947 /*
948 * If we're doing an odd-sized read or a very large read,
949 * flush out the cache and then do a direct read.
950 */
951 if (count < 0 || count > WRITE_DIRECT_SIZE) {
952 if ((retval = flush_cached_blocks(channel, data, 0)))
953 return retval;
954 return raw_read_blk(channel, data, block, count, buf);
955 }
956
957 cp = buf;
958 mutex_lock(data, CACHE_MTX);
959 while (count > 0) {
960 /* If it's in the cache, use it! */
961 if ((cache = find_cached_block(data, block, &reuse[0]))) {
962 #ifdef DEBUG
963 printf("Using cached block %lu\n", block);
964 #endif
965 memcpy(cp, cache->buf, channel->block_size);
966 count--;
967 block++;
968 cp += channel->block_size;
969 continue;
970 }
971 if (count == 1) {
972 /*
973 * Special case where we read directly into the
974 * cache buffer; important in the O_DIRECT case
975 */
976 cache = reuse[0];
977 reuse_cache(channel, data, cache, block);
978 if ((retval = raw_read_blk(channel, data, block, 1,
979 cache->buf))) {
980 cache->in_use = 0;
981 break;
982 }
983 memcpy(cp, cache->buf, channel->block_size);
984 retval = 0;
985 break;
986 }
987
988 /*
989 * Find the number of uncached blocks so we can do a
990 * single read request
991 */
992 for (i=1; i < count; i++)
993 if (find_cached_block(data, block+i, &reuse[i]))
994 break;
995 #ifdef DEBUG
996 printf("Reading %d blocks starting at %lu\n", i, block);
997 #endif
998 if ((retval = raw_read_blk(channel, data, block, i, cp)))
999 break;
1000
1001 /* Save the results in the cache */
1002 for (j=0; j < i; j++) {
1003 count--;
1004 cache = reuse[j];
1005 reuse_cache(channel, data, cache, block++);
1006 memcpy(cache->buf, cp, channel->block_size);
1007 cp += channel->block_size;
1008 }
1009 }
1010 mutex_unlock(data, CACHE_MTX);
1011 return retval;
1012 #endif /* NO_IO_CACHE */
1013 }
1014
1015 static errcode_t unix_read_blk(io_channel channel, unsigned long block,
1016 int count, void *buf)
1017 {
1018 return unix_read_blk64(channel, block, count, buf);
1019 }
1020
1021 static errcode_t unix_write_blk64(io_channel channel, unsigned long long block,
1022 int count, const void *buf)
1023 {
1024 struct unix_private_data *data;
1025 struct unix_cache *cache, *reuse;
1026 errcode_t retval = 0;
1027 const char *cp;
1028 int writethrough;
1029
1030 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1031 data = (struct unix_private_data *) channel->private_data;
1032 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1033
1034 #ifdef NO_IO_CACHE
1035 return raw_write_blk(channel, data, block, count, buf);
1036 #else
1037 if (data->flags & IO_FLAG_NOCACHE)
1038 return raw_write_blk(channel, data, block, count, buf);
1039 /*
1040 * If we're doing an odd-sized write or a very large write,
1041 * flush out the cache completely and then do a direct write.
1042 */
1043 if (count < 0 || count > WRITE_DIRECT_SIZE) {
1044 if ((retval = flush_cached_blocks(channel, data,
1045 FLUSH_INVALIDATE)))
1046 return retval;
1047 return raw_write_blk(channel, data, block, count, buf);
1048 }
1049
1050 /*
1051 * For a moderate-sized multi-block write, first force a write
1052 * if we're in write-through cache mode, and then fill the
1053 * cache with the blocks.
1054 */
1055 writethrough = channel->flags & CHANNEL_FLAGS_WRITETHROUGH;
1056 if (writethrough)
1057 retval = raw_write_blk(channel, data, block, count, buf);
1058
1059 cp = buf;
1060 mutex_lock(data, CACHE_MTX);
1061 while (count > 0) {
1062 cache = find_cached_block(data, block, &reuse);
1063 if (!cache) {
1064 cache = reuse;
1065 reuse_cache(channel, data, cache, block);
1066 }
1067 if (cache->buf != cp)
1068 memcpy(cache->buf, cp, channel->block_size);
1069 cache->dirty = !writethrough;
1070 count--;
1071 block++;
1072 cp += channel->block_size;
1073 }
1074 mutex_unlock(data, CACHE_MTX);
1075 return retval;
1076 #endif /* NO_IO_CACHE */
1077 }
1078
1079 static errcode_t unix_cache_readahead(io_channel channel,
1080 unsigned long long block,
1081 unsigned long long count)
1082 {
1083 #ifdef POSIX_FADV_WILLNEED
1084 struct unix_private_data *data;
1085
1086 data = (struct unix_private_data *)channel->private_data;
1087 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1088 return posix_fadvise(data->dev,
1089 (ext2_loff_t)block * channel->block_size + data->offset,
1090 (ext2_loff_t)count * channel->block_size,
1091 POSIX_FADV_WILLNEED);
1092 #else
1093 return EXT2_ET_OP_NOT_SUPPORTED;
1094 #endif
1095 }
1096
1097 static errcode_t unix_write_blk(io_channel channel, unsigned long block,
1098 int count, const void *buf)
1099 {
1100 return unix_write_blk64(channel, block, count, buf);
1101 }
1102
1103 static errcode_t unix_write_byte(io_channel channel, unsigned long offset,
1104 int size, const void *buf)
1105 {
1106 struct unix_private_data *data;
1107 errcode_t retval = 0;
1108 ssize_t actual;
1109
1110 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1111 data = (struct unix_private_data *) channel->private_data;
1112 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1113
1114 if (channel->align != 0) {
1115 #ifdef ALIGN_DEBUG
1116 printf("unix_write_byte: O_DIRECT fallback\n");
1117 #endif
1118 return EXT2_ET_UNIMPLEMENTED;
1119 }
1120
1121 #ifndef NO_IO_CACHE
1122 /*
1123 * Flush out the cache completely
1124 */
1125 if ((retval = flush_cached_blocks(channel, data, FLUSH_INVALIDATE)))
1126 return retval;
1127 #endif
1128
1129 if (lseek(data->dev, offset + data->offset, SEEK_SET) < 0)
1130 return errno;
1131
1132 actual = write(data->dev, buf, size);
1133 if (actual < 0)
1134 return errno;
1135 if (actual != size)
1136 return EXT2_ET_SHORT_WRITE;
1137
1138 return 0;
1139 }
1140
1141 /*
1142 * Flush data buffers to disk.
1143 */
1144 static errcode_t unix_flush(io_channel channel)
1145 {
1146 struct unix_private_data *data;
1147 errcode_t retval = 0;
1148
1149 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1150 data = (struct unix_private_data *) channel->private_data;
1151 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1152
1153 #ifndef NO_IO_CACHE
1154 retval = flush_cached_blocks(channel, data, 0);
1155 #endif
1156 #ifdef HAVE_FSYNC
1157 if (!retval && fsync(data->dev) != 0)
1158 return errno;
1159 #endif
1160 return retval;
1161 }
1162
1163 static errcode_t unix_set_option(io_channel channel, const char *option,
1164 const char *arg)
1165 {
1166 struct unix_private_data *data;
1167 unsigned long long tmp;
1168 errcode_t retval;
1169 char *end;
1170
1171 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1172 data = (struct unix_private_data *) channel->private_data;
1173 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1174
1175 if (!strcmp(option, "offset")) {
1176 if (!arg)
1177 return EXT2_ET_INVALID_ARGUMENT;
1178
1179 tmp = strtoull(arg, &end, 0);
1180 if (*end)
1181 return EXT2_ET_INVALID_ARGUMENT;
1182 data->offset = tmp;
1183 if (data->offset < 0)
1184 return EXT2_ET_INVALID_ARGUMENT;
1185 return 0;
1186 }
1187 if (!strcmp(option, "cache")) {
1188 if (!arg)
1189 return EXT2_ET_INVALID_ARGUMENT;
1190 if (!strcmp(arg, "on")) {
1191 data->flags &= ~IO_FLAG_NOCACHE;
1192 return 0;
1193 }
1194 if (!strcmp(arg, "off")) {
1195 retval = flush_cached_blocks(channel, data, 0);
1196 data->flags |= IO_FLAG_NOCACHE;
1197 return retval;
1198 }
1199 return EXT2_ET_INVALID_ARGUMENT;
1200 }
1201 return EXT2_ET_INVALID_ARGUMENT;
1202 }
1203
1204 #if defined(__linux__) && !defined(BLKDISCARD)
1205 #define BLKDISCARD _IO(0x12,119)
1206 #endif
1207
1208 static errcode_t unix_discard(io_channel channel, unsigned long long block,
1209 unsigned long long count)
1210 {
1211 struct unix_private_data *data;
1212 int ret;
1213
1214 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1215 data = (struct unix_private_data *) channel->private_data;
1216 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1217
1218 if (channel->flags & CHANNEL_FLAGS_BLOCK_DEVICE) {
1219 #ifdef BLKDISCARD
1220 __u64 range[2];
1221
1222 range[0] = (__u64)(block) * channel->block_size + data->offset;
1223 range[1] = (__u64)(count) * channel->block_size;
1224
1225 ret = ioctl(data->dev, BLKDISCARD, &range);
1226 #else
1227 goto unimplemented;
1228 #endif
1229 } else {
1230 #if defined(HAVE_FALLOCATE) && defined(FALLOC_FL_PUNCH_HOLE)
1231 /*
1232 * If we are not on block device, try to use punch hole
1233 * to reclaim free space.
1234 */
1235 ret = fallocate(data->dev,
1236 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1237 (off_t)(block) * channel->block_size + data->offset,
1238 (off_t)(count) * channel->block_size);
1239 #else
1240 goto unimplemented;
1241 #endif
1242 }
1243 if (ret < 0) {
1244 if (errno == EOPNOTSUPP)
1245 goto unimplemented;
1246 return errno;
1247 }
1248 return 0;
1249 unimplemented:
1250 return EXT2_ET_UNIMPLEMENTED;
1251 }
1252
1253 /*
1254 * If we know about ZERO_RANGE, try that before we try PUNCH_HOLE because
1255 * ZERO_RANGE doesn't unmap preallocated blocks. We prefer fallocate because
1256 * it always invalidates page cache, and libext2fs requires that reads after
1257 * ZERO_RANGE return zeroes.
1258 */
1259 static int __unix_zeroout(int fd, off_t offset, off_t len)
1260 {
1261 int ret = -1;
1262
1263 #if defined(HAVE_FALLOCATE) && defined(FALLOC_FL_ZERO_RANGE)
1264 ret = fallocate(fd, FALLOC_FL_ZERO_RANGE, offset, len);
1265 if (ret == 0)
1266 return 0;
1267 #endif
1268 #if defined(HAVE_FALLOCATE) && defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE)
1269 ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1270 offset, len);
1271 if (ret == 0)
1272 return 0;
1273 #endif
1274 errno = EOPNOTSUPP;
1275 return ret;
1276 }
1277
1278 /* parameters might not be used if OS doesn't support zeroout */
1279 #if __GNUC_PREREQ (4, 6)
1280 #pragma GCC diagnostic push
1281 #pragma GCC diagnostic ignored "-Wunused-parameter"
1282 #endif
1283 static errcode_t unix_zeroout(io_channel channel, unsigned long long block,
1284 unsigned long long count)
1285 {
1286 struct unix_private_data *data;
1287 int ret;
1288
1289 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1290 data = (struct unix_private_data *) channel->private_data;
1291 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1292
1293 if (safe_getenv("UNIX_IO_NOZEROOUT"))
1294 goto unimplemented;
1295
1296 if (!(channel->flags & CHANNEL_FLAGS_BLOCK_DEVICE)) {
1297 /* Regular file, try to use truncate/punch/zero. */
1298 struct stat statbuf;
1299
1300 if (count == 0)
1301 return 0;
1302 /*
1303 * If we're trying to zero a range past the end of the file,
1304 * extend the file size, then truncate everything.
1305 */
1306 ret = fstat(data->dev, &statbuf);
1307 if (ret)
1308 goto err;
1309 if ((unsigned long long) statbuf.st_size <
1310 (block + count) * channel->block_size + data->offset) {
1311 ret = ftruncate(data->dev,
1312 (block + count) * channel->block_size + data->offset);
1313 if (ret)
1314 goto err;
1315 }
1316 }
1317
1318 ret = __unix_zeroout(data->dev,
1319 (off_t)(block) * channel->block_size + data->offset,
1320 (off_t)(count) * channel->block_size);
1321 err:
1322 if (ret < 0) {
1323 if (errno == EOPNOTSUPP)
1324 goto unimplemented;
1325 return errno;
1326 }
1327 return 0;
1328 unimplemented:
1329 return EXT2_ET_UNIMPLEMENTED;
1330 }
1331 #if __GNUC_PREREQ (4, 6)
1332 #pragma GCC diagnostic pop
1333 #endif
1334
1335 static struct struct_io_manager struct_unix_manager = {
1336 .magic = EXT2_ET_MAGIC_IO_MANAGER,
1337 .name = "Unix I/O Manager",
1338 .open = unix_open,
1339 .close = unix_close,
1340 .set_blksize = unix_set_blksize,
1341 .read_blk = unix_read_blk,
1342 .write_blk = unix_write_blk,
1343 .flush = unix_flush,
1344 .write_byte = unix_write_byte,
1345 .set_option = unix_set_option,
1346 .get_stats = unix_get_stats,
1347 .read_blk64 = unix_read_blk64,
1348 .write_blk64 = unix_write_blk64,
1349 .discard = unix_discard,
1350 .cache_readahead = unix_cache_readahead,
1351 .zeroout = unix_zeroout,
1352 };
1353
1354 io_manager unix_io_manager = &struct_unix_manager;
1355
1356 static struct struct_io_manager struct_unixfd_manager = {
1357 .magic = EXT2_ET_MAGIC_IO_MANAGER,
1358 .name = "Unix fd I/O Manager",
1359 .open = unixfd_open,
1360 .close = unix_close,
1361 .set_blksize = unix_set_blksize,
1362 .read_blk = unix_read_blk,
1363 .write_blk = unix_write_blk,
1364 .flush = unix_flush,
1365 .write_byte = unix_write_byte,
1366 .set_option = unix_set_option,
1367 .get_stats = unix_get_stats,
1368 .read_blk64 = unix_read_blk64,
1369 .write_blk64 = unix_write_blk64,
1370 .discard = unix_discard,
1371 .cache_readahead = unix_cache_readahead,
1372 .zeroout = unix_zeroout,
1373 };
1374
1375 io_manager unixfd_io_manager = &struct_unixfd_manager;