]> git.ipfire.org Git - thirdparty/e2fsprogs.git/blob - lib/ext2fs/unix_io.c
Merge branch 'maint' into next
[thirdparty/e2fsprogs.git] / lib / ext2fs / unix_io.c
1 /*
2 * unix_io.c --- This is the Unix (well, really POSIX) implementation
3 * of the I/O manager.
4 *
5 * Implements a one-block write-through cache.
6 *
7 * Includes support for Windows NT support under Cygwin.
8 *
9 * Copyright (C) 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
10 * 2002 by Theodore Ts'o.
11 *
12 * %Begin-Header%
13 * This file may be redistributed under the terms of the GNU Library
14 * General Public License, version 2.
15 * %End-Header%
16 */
17
18 #if !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
19 #define _XOPEN_SOURCE 600
20 #define _DARWIN_C_SOURCE
21 #define _FILE_OFFSET_BITS 64
22 #ifndef _LARGEFILE_SOURCE
23 #define _LARGEFILE_SOURCE
24 #endif
25 #ifndef _LARGEFILE64_SOURCE
26 #define _LARGEFILE64_SOURCE
27 #endif
28 #ifndef _GNU_SOURCE
29 #define _GNU_SOURCE
30 #endif
31 #endif
32
33 #include "config.h"
34 #include <stdio.h>
35 #include <string.h>
36 #if HAVE_UNISTD_H
37 #include <unistd.h>
38 #endif
39 #if HAVE_ERRNO_H
40 #include <errno.h>
41 #endif
42 #include <fcntl.h>
43 #include <time.h>
44 #ifdef __linux__
45 #include <sys/utsname.h>
46 #endif
47 #if HAVE_SYS_TYPES_H
48 #include <sys/types.h>
49 #endif
50 #ifdef HAVE_SYS_IOCTL_H
51 #include <sys/ioctl.h>
52 #endif
53 #ifdef HAVE_SYS_MOUNT_H
54 #include <sys/mount.h>
55 #endif
56 #ifdef HAVE_SYS_PRCTL_H
57 #include <sys/prctl.h>
58 #else
59 #define PR_GET_DUMPABLE 3
60 #endif
61 #if HAVE_SYS_STAT_H
62 #include <sys/stat.h>
63 #endif
64 #if HAVE_SYS_RESOURCE_H
65 #include <sys/resource.h>
66 #endif
67 #if HAVE_LINUX_FALLOC_H
68 #include <linux/falloc.h>
69 #endif
70 #ifdef HAVE_PTHREAD
71 #include <pthread.h>
72 #endif
73
74 #if defined(__linux__) && defined(_IO) && !defined(BLKROGET)
75 #define BLKROGET _IO(0x12, 94) /* Get read-only status (0 = read_write). */
76 #endif
77
78 #undef ALIGN_DEBUG
79
80 #include "ext2_fs.h"
81 #include "ext2fs.h"
82 #include "ext2fsP.h"
83
84 /*
85 * For checking structure magic numbers...
86 */
87
88 #define EXT2_CHECK_MAGIC(struct, code) \
89 if ((struct)->magic != (code)) return (code)
90
91 struct unix_cache {
92 char *buf;
93 unsigned long long block;
94 int access_time;
95 unsigned dirty:1;
96 unsigned in_use:1;
97 unsigned write_err:1;
98 };
99
100 #define CACHE_SIZE 8
101 #define WRITE_DIRECT_SIZE 4 /* Must be smaller than CACHE_SIZE */
102 #define READ_DIRECT_SIZE 4 /* Should be smaller than CACHE_SIZE */
103
104 struct unix_private_data {
105 int magic;
106 int dev;
107 int flags;
108 int align;
109 int access_time;
110 ext2_loff_t offset;
111 struct unix_cache cache[CACHE_SIZE];
112 void *bounce;
113 struct struct_io_stats io_stats;
114 #ifdef HAVE_PTHREAD
115 pthread_mutex_t cache_mutex;
116 pthread_mutex_t bounce_mutex;
117 pthread_mutex_t stats_mutex;
118 #endif
119 };
120
121 #define IS_ALIGNED(n, align) ((((uintptr_t) n) & \
122 ((uintptr_t) ((align)-1))) == 0)
123
124 typedef enum lock_kind {
125 CACHE_MTX, BOUNCE_MTX, STATS_MTX
126 } kind_t;
127
128 #ifdef HAVE_PTHREAD
129 static inline pthread_mutex_t *get_mutex(struct unix_private_data *data,
130 kind_t kind)
131 {
132 if (data->flags & IO_FLAG_THREADS) {
133 switch (kind) {
134 case CACHE_MTX:
135 return &data->cache_mutex;
136 case BOUNCE_MTX:
137 return &data->bounce_mutex;
138 case STATS_MTX:
139 return &data->stats_mutex;
140 }
141 }
142 return NULL;
143 }
144 #endif
145
146 static inline void mutex_lock(struct unix_private_data *data, kind_t kind)
147 {
148 #ifdef HAVE_PTHREAD
149 pthread_mutex_t *mtx = get_mutex(data,kind);
150
151 if (mtx)
152 pthread_mutex_lock(mtx);
153 #endif
154 }
155
156 static inline void mutex_unlock(struct unix_private_data *data, kind_t kind)
157 {
158 #ifdef HAVE_PTHREAD
159 pthread_mutex_t *mtx = get_mutex(data,kind);
160
161 if (mtx)
162 pthread_mutex_unlock(mtx);
163 #endif
164 }
165
166 static errcode_t unix_get_stats(io_channel channel, io_stats *stats)
167 {
168 errcode_t retval = 0;
169
170 struct unix_private_data *data;
171
172 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
173 data = (struct unix_private_data *) channel->private_data;
174 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
175
176 if (stats) {
177 mutex_lock(data, STATS_MTX);
178 *stats = &data->io_stats;
179 mutex_unlock(data, STATS_MTX);
180 }
181
182 return retval;
183 }
184
185 static char *safe_getenv(const char *arg)
186 {
187 if ((getuid() != geteuid()) || (getgid() != getegid()))
188 return NULL;
189 #ifdef HAVE_PRCTL
190 if (prctl(PR_GET_DUMPABLE, 0, 0, 0, 0) == 0)
191 return NULL;
192 #else
193 #if (defined(linux) && defined(SYS_prctl))
194 if (syscall(SYS_prctl, PR_GET_DUMPABLE, 0, 0, 0, 0) == 0)
195 return NULL;
196 #endif
197 #endif
198
199 #if defined(HAVE_SECURE_GETENV)
200 return secure_getenv(arg);
201 #elif defined(HAVE___SECURE_GETENV)
202 return __secure_getenv(arg);
203 #else
204 return getenv(arg);
205 #endif
206 }
207
208 /*
209 * Here are the raw I/O functions
210 */
211 static errcode_t raw_read_blk(io_channel channel,
212 struct unix_private_data *data,
213 unsigned long long block,
214 int count, void *bufv)
215 {
216 errcode_t retval;
217 ssize_t size;
218 ext2_loff_t location;
219 int actual = 0;
220 unsigned char *buf = bufv;
221 ssize_t really_read = 0;
222 unsigned long long aligned_blk;
223 int align_size, offset;
224
225 size = (count < 0) ? -count : (ext2_loff_t) count * channel->block_size;
226 mutex_lock(data, STATS_MTX);
227 data->io_stats.bytes_read += size;
228 mutex_unlock(data, STATS_MTX);
229 location = ((ext2_loff_t) block * channel->block_size) + data->offset;
230
231 if (data->flags & IO_FLAG_FORCE_BOUNCE)
232 goto bounce_read;
233
234 #ifdef HAVE_PREAD64
235 /* Try an aligned pread */
236 if ((channel->align == 0) ||
237 (IS_ALIGNED(buf, channel->align) &&
238 IS_ALIGNED(location, channel->align) &&
239 IS_ALIGNED(size, channel->align))) {
240 actual = pread64(data->dev, buf, size, location);
241 if (actual == size)
242 return 0;
243 actual = 0;
244 }
245 #elif HAVE_PREAD
246 /* Try an aligned pread */
247 if ((sizeof(off_t) >= sizeof(ext2_loff_t)) &&
248 ((channel->align == 0) ||
249 (IS_ALIGNED(buf, channel->align) &&
250 IS_ALIGNED(location, channel->align) &&
251 IS_ALIGNED(size, channel->align)))) {
252 actual = pread(data->dev, buf, size, location);
253 if (actual == size)
254 return 0;
255 actual = 0;
256 }
257 #endif /* HAVE_PREAD */
258
259 if ((channel->align == 0) ||
260 (IS_ALIGNED(buf, channel->align) &&
261 IS_ALIGNED(location, channel->align) &&
262 IS_ALIGNED(size, channel->align))) {
263 mutex_lock(data, BOUNCE_MTX);
264 if (ext2fs_llseek(data->dev, location, SEEK_SET) < 0) {
265 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
266 goto error_unlock;
267 }
268 actual = read(data->dev, buf, size);
269 if (actual != size) {
270 short_read:
271 if (actual < 0) {
272 retval = errno;
273 actual = 0;
274 } else
275 retval = EXT2_ET_SHORT_READ;
276 goto error_unlock;
277 }
278 goto success_unlock;
279 }
280
281 #ifdef ALIGN_DEBUG
282 printf("raw_read_blk: O_DIRECT fallback: %p %lu\n", buf,
283 (unsigned long) size);
284 #endif
285
286 /*
287 * The buffer or size which we're trying to read isn't aligned
288 * to the O_DIRECT rules, so we need to do this the hard way...
289 */
290 bounce_read:
291 if (channel->align == 0)
292 channel->align = 1;
293 if ((channel->block_size > channel->align) &&
294 (channel->block_size % channel->align) == 0)
295 align_size = channel->block_size;
296 else
297 align_size = channel->align;
298 aligned_blk = location / align_size;
299 offset = location % align_size;
300
301 mutex_lock(data, BOUNCE_MTX);
302 if (ext2fs_llseek(data->dev, aligned_blk * align_size, SEEK_SET) < 0) {
303 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
304 goto error_unlock;
305 }
306 while (size > 0) {
307 actual = read(data->dev, data->bounce, align_size);
308 if (actual != align_size) {
309 actual = really_read;
310 buf -= really_read;
311 size += really_read;
312 goto short_read;
313 }
314 if ((actual + offset) > align_size)
315 actual = align_size - offset;
316 if (actual > size)
317 actual = size;
318 memcpy(buf, (char *)data->bounce + offset, actual);
319
320 really_read += actual;
321 size -= actual;
322 buf += actual;
323 offset = 0;
324 aligned_blk++;
325 }
326 success_unlock:
327 mutex_unlock(data, BOUNCE_MTX);
328 return 0;
329
330 error_unlock:
331 mutex_unlock(data, BOUNCE_MTX);
332 if (actual >= 0 && actual < size)
333 memset((char *) buf+actual, 0, size-actual);
334 if (channel->read_error)
335 retval = (channel->read_error)(channel, block, count, buf,
336 size, actual, retval);
337 return retval;
338 }
339
340 #define RAW_WRITE_NO_HANDLER 1
341
342 static errcode_t raw_write_blk(io_channel channel,
343 struct unix_private_data *data,
344 unsigned long long block,
345 int count, const void *bufv,
346 int flags)
347 {
348 ssize_t size;
349 ext2_loff_t location;
350 int actual = 0;
351 errcode_t retval;
352 const unsigned char *buf = bufv;
353 unsigned long long aligned_blk;
354 int align_size, offset;
355
356 if (count == 1)
357 size = channel->block_size;
358 else {
359 if (count < 0)
360 size = -count;
361 else
362 size = (ext2_loff_t) count * channel->block_size;
363 }
364 mutex_lock(data, STATS_MTX);
365 data->io_stats.bytes_written += size;
366 mutex_unlock(data, STATS_MTX);
367
368 location = ((ext2_loff_t) block * channel->block_size) + data->offset;
369
370 if (data->flags & IO_FLAG_FORCE_BOUNCE)
371 goto bounce_write;
372
373 #ifdef HAVE_PWRITE64
374 /* Try an aligned pwrite */
375 if ((channel->align == 0) ||
376 (IS_ALIGNED(buf, channel->align) &&
377 IS_ALIGNED(location, channel->align) &&
378 IS_ALIGNED(size, channel->align))) {
379 actual = pwrite64(data->dev, buf, size, location);
380 if (actual == size)
381 return 0;
382 }
383 #elif HAVE_PWRITE
384 /* Try an aligned pwrite */
385 if ((sizeof(off_t) >= sizeof(ext2_loff_t)) &&
386 ((channel->align == 0) ||
387 (IS_ALIGNED(buf, channel->align) &&
388 IS_ALIGNED(location, channel->align) &&
389 IS_ALIGNED(size, channel->align)))) {
390 actual = pwrite(data->dev, buf, size, location);
391 if (actual == size)
392 return 0;
393 }
394 #endif /* HAVE_PWRITE */
395
396 if ((channel->align == 0) ||
397 (IS_ALIGNED(buf, channel->align) &&
398 IS_ALIGNED(location, channel->align) &&
399 IS_ALIGNED(size, channel->align))) {
400 mutex_lock(data, BOUNCE_MTX);
401 if (ext2fs_llseek(data->dev, location, SEEK_SET) < 0) {
402 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
403 goto error_unlock;
404 }
405 actual = write(data->dev, buf, size);
406 mutex_unlock(data, BOUNCE_MTX);
407 if (actual < 0) {
408 retval = errno;
409 goto error_out;
410 }
411 if (actual != size) {
412 short_write:
413 retval = EXT2_ET_SHORT_WRITE;
414 goto error_out;
415 }
416 return 0;
417 }
418
419 #ifdef ALIGN_DEBUG
420 printf("raw_write_blk: O_DIRECT fallback: %p %lu\n", buf,
421 (unsigned long) size);
422 #endif
423 /*
424 * The buffer or size which we're trying to write isn't aligned
425 * to the O_DIRECT rules, so we need to do this the hard way...
426 */
427 bounce_write:
428 if (channel->align == 0)
429 channel->align = 1;
430 if ((channel->block_size > channel->align) &&
431 (channel->block_size % channel->align) == 0)
432 align_size = channel->block_size;
433 else
434 align_size = channel->align;
435 aligned_blk = location / align_size;
436 offset = location % align_size;
437
438 while (size > 0) {
439 int actual_w;
440
441 mutex_lock(data, BOUNCE_MTX);
442 if (size < align_size || offset) {
443 if (ext2fs_llseek(data->dev, aligned_blk * align_size,
444 SEEK_SET) < 0) {
445 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
446 goto error_unlock;
447 }
448 actual = read(data->dev, data->bounce,
449 align_size);
450 if (actual != align_size) {
451 if (actual < 0) {
452 retval = errno;
453 goto error_unlock;
454 }
455 memset((char *) data->bounce + actual, 0,
456 align_size - actual);
457 }
458 }
459 actual = size;
460 if ((actual + offset) > align_size)
461 actual = align_size - offset;
462 if (actual > size)
463 actual = size;
464 memcpy(((char *)data->bounce) + offset, buf, actual);
465 if (ext2fs_llseek(data->dev, aligned_blk * align_size, SEEK_SET) < 0) {
466 retval = errno ? errno : EXT2_ET_LLSEEK_FAILED;
467 goto error_unlock;
468 }
469 actual_w = write(data->dev, data->bounce, align_size);
470 mutex_unlock(data, BOUNCE_MTX);
471 if (actual_w < 0) {
472 retval = errno;
473 goto error_out;
474 }
475 if (actual_w != align_size)
476 goto short_write;
477 size -= actual;
478 buf += actual;
479 location += actual;
480 aligned_blk++;
481 offset = 0;
482 }
483 return 0;
484
485 error_unlock:
486 mutex_unlock(data, BOUNCE_MTX);
487 error_out:
488 if (((flags & RAW_WRITE_NO_HANDLER) == 0) && channel->write_error)
489 retval = (channel->write_error)(channel, block, count, buf,
490 size, actual, retval);
491 return retval;
492 }
493
494
495 /*
496 * Here we implement the cache functions
497 */
498
499 /* Allocate the cache buffers */
500 static errcode_t alloc_cache(io_channel channel,
501 struct unix_private_data *data)
502 {
503 errcode_t retval;
504 struct unix_cache *cache;
505 int i;
506
507 data->access_time = 0;
508 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
509 cache->block = 0;
510 cache->access_time = 0;
511 cache->dirty = 0;
512 cache->in_use = 0;
513 if (cache->buf)
514 ext2fs_free_mem(&cache->buf);
515 retval = io_channel_alloc_buf(channel, 0, &cache->buf);
516 if (retval)
517 return retval;
518 }
519 if (channel->align || data->flags & IO_FLAG_FORCE_BOUNCE) {
520 if (data->bounce)
521 ext2fs_free_mem(&data->bounce);
522 retval = io_channel_alloc_buf(channel, 0, &data->bounce);
523 }
524 return retval;
525 }
526
527 /* Free the cache buffers */
528 static void free_cache(struct unix_private_data *data)
529 {
530 struct unix_cache *cache;
531 int i;
532
533 data->access_time = 0;
534 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
535 cache->block = 0;
536 cache->access_time = 0;
537 cache->dirty = 0;
538 cache->in_use = 0;
539 if (cache->buf)
540 ext2fs_free_mem(&cache->buf);
541 }
542 if (data->bounce)
543 ext2fs_free_mem(&data->bounce);
544 }
545
546 #ifndef NO_IO_CACHE
547 /*
548 * Try to find a block in the cache. If the block is not found, and
549 * eldest is a non-zero pointer, then fill in eldest with the cache
550 * entry to that should be reused.
551 */
552 static struct unix_cache *find_cached_block(struct unix_private_data *data,
553 unsigned long long block,
554 struct unix_cache **eldest)
555 {
556 struct unix_cache *cache, *unused_cache, *oldest_cache;
557 int i;
558
559 unused_cache = oldest_cache = 0;
560 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
561 if (!cache->in_use) {
562 if (!unused_cache)
563 unused_cache = cache;
564 continue;
565 }
566 if (cache->block == block) {
567 cache->access_time = ++data->access_time;
568 return cache;
569 }
570 if (!oldest_cache ||
571 (cache->access_time < oldest_cache->access_time))
572 oldest_cache = cache;
573 }
574 if (eldest)
575 *eldest = (unused_cache) ? unused_cache : oldest_cache;
576 return 0;
577 }
578
579 /*
580 * Reuse a particular cache entry for another block.
581 */
582 static errcode_t reuse_cache(io_channel channel,
583 struct unix_private_data *data, struct unix_cache *cache,
584 unsigned long long block)
585 {
586 if (cache->dirty && cache->in_use) {
587 errcode_t retval;
588
589 retval = raw_write_blk(channel, data, cache->block, 1,
590 cache->buf, RAW_WRITE_NO_HANDLER);
591 if (retval) {
592 cache->write_err = 1;
593 return retval;
594 }
595 }
596
597 cache->in_use = 1;
598 cache->dirty = 0;
599 cache->write_err = 0;
600 cache->block = block;
601 cache->access_time = ++data->access_time;
602 return 0;
603 }
604
605 #define FLUSH_INVALIDATE 0x01
606 #define FLUSH_NOLOCK 0x02
607
608 /*
609 * Flush all of the blocks in the cache
610 */
611 static errcode_t flush_cached_blocks(io_channel channel,
612 struct unix_private_data *data,
613 int flags)
614 {
615 struct unix_cache *cache;
616 errcode_t retval, retval2 = 0;
617 int i;
618 int errors_found = 0;
619
620 if ((flags & FLUSH_NOLOCK) == 0)
621 mutex_lock(data, CACHE_MTX);
622 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
623 if (!cache->in_use || !cache->dirty)
624 continue;
625 retval = raw_write_blk(channel, data,
626 cache->block, 1, cache->buf,
627 RAW_WRITE_NO_HANDLER);
628 if (retval) {
629 cache->write_err = 1;
630 errors_found = 1;
631 retval2 = retval;
632 } else {
633 cache->dirty = 0;
634 cache->write_err = 0;
635 if (flags & FLUSH_INVALIDATE)
636 cache->in_use = 0;
637 }
638 }
639 if ((flags & FLUSH_NOLOCK) == 0)
640 mutex_unlock(data, CACHE_MTX);
641 retry:
642 while (errors_found) {
643 if ((flags & FLUSH_NOLOCK) == 0)
644 mutex_lock(data, CACHE_MTX);
645 errors_found = 0;
646 for (i=0, cache = data->cache; i < CACHE_SIZE; i++, cache++) {
647 if (!cache->in_use || !cache->write_err)
648 continue;
649 errors_found = 1;
650 if (cache->write_err && channel->write_error) {
651 char *err_buf = NULL;
652 unsigned long long err_block = cache->block;
653
654 cache->dirty = 0;
655 cache->in_use = 0;
656 cache->write_err = 0;
657 if (io_channel_alloc_buf(channel, 0,
658 &err_buf))
659 err_buf = NULL;
660 else
661 memcpy(err_buf, cache->buf,
662 channel->block_size);
663 mutex_unlock(data, CACHE_MTX);
664 (channel->write_error)(channel, err_block,
665 1, err_buf, channel->block_size, -1,
666 retval2);
667 if (err_buf)
668 ext2fs_free_mem(&err_buf);
669 goto retry;
670 } else
671 cache->write_err = 0;
672 }
673 if ((flags & FLUSH_NOLOCK) == 0)
674 mutex_unlock(data, CACHE_MTX);
675 }
676 return retval2;
677 }
678 #endif /* NO_IO_CACHE */
679
680 #ifdef __linux__
681 #ifndef BLKDISCARDZEROES
682 #define BLKDISCARDZEROES _IO(0x12,124)
683 #endif
684 #endif
685
686 int ext2fs_open_file(const char *pathname, int flags, mode_t mode)
687 {
688 if (mode)
689 #if defined(HAVE_OPEN64) && !defined(__OSX_AVAILABLE_BUT_DEPRECATED)
690 return open64(pathname, flags, mode);
691 else
692 return open64(pathname, flags);
693 #else
694 return open(pathname, flags, mode);
695 else
696 return open(pathname, flags);
697 #endif
698 }
699
700 int ext2fs_stat(const char *path, ext2fs_struct_stat *buf)
701 {
702 #if defined(HAVE_FSTAT64) && !defined(__OSX_AVAILABLE_BUT_DEPRECATED)
703 return stat64(path, buf);
704 #else
705 return stat(path, buf);
706 #endif
707 }
708
709 int ext2fs_fstat(int fd, ext2fs_struct_stat *buf)
710 {
711 #if defined(HAVE_FSTAT64) && !defined(__OSX_AVAILABLE_BUT_DEPRECATED)
712 return fstat64(fd, buf);
713 #else
714 return fstat(fd, buf);
715 #endif
716 }
717
718
719 static errcode_t unix_open_channel(const char *name, int fd,
720 int flags, io_channel *channel,
721 io_manager io_mgr)
722 {
723 io_channel io = NULL;
724 struct unix_private_data *data = NULL;
725 errcode_t retval;
726 ext2fs_struct_stat st;
727 #ifdef __linux__
728 struct utsname ut;
729 #endif
730
731 if (safe_getenv("UNIX_IO_FORCE_BOUNCE"))
732 flags |= IO_FLAG_FORCE_BOUNCE;
733
734 #ifdef __linux__
735 /*
736 * We need to make sure any previous errors in the block
737 * device are thrown away, sigh.
738 */
739 (void) fsync(fd);
740 #endif
741
742 retval = ext2fs_get_mem(sizeof(struct struct_io_channel), &io);
743 if (retval)
744 goto cleanup;
745 memset(io, 0, sizeof(struct struct_io_channel));
746 io->magic = EXT2_ET_MAGIC_IO_CHANNEL;
747 retval = ext2fs_get_mem(sizeof(struct unix_private_data), &data);
748 if (retval)
749 goto cleanup;
750
751 io->manager = io_mgr;
752 retval = ext2fs_get_mem(strlen(name)+1, &io->name);
753 if (retval)
754 goto cleanup;
755
756 strcpy(io->name, name);
757 io->private_data = data;
758 io->block_size = 1024;
759 io->read_error = 0;
760 io->write_error = 0;
761 io->refcount = 1;
762 io->flags = 0;
763
764 memset(data, 0, sizeof(struct unix_private_data));
765 data->magic = EXT2_ET_MAGIC_UNIX_IO_CHANNEL;
766 data->io_stats.num_fields = 2;
767 data->flags = flags;
768 data->dev = fd;
769
770 #if defined(O_DIRECT)
771 if (flags & IO_FLAG_DIRECT_IO)
772 io->align = ext2fs_get_dio_alignment(data->dev);
773 #elif defined(F_NOCACHE)
774 if (flags & IO_FLAG_DIRECT_IO)
775 io->align = 4096;
776 #endif
777
778 /*
779 * If the device is really a block device, then set the
780 * appropriate flag, otherwise we can set DISCARD_ZEROES flag
781 * because we are going to use punch hole instead of discard
782 * and if it succeed, subsequent read from sparse area returns
783 * zero.
784 */
785 if (ext2fs_fstat(data->dev, &st) == 0) {
786 if (ext2fsP_is_disk_device(st.st_mode))
787 io->flags |= CHANNEL_FLAGS_BLOCK_DEVICE;
788 else
789 io->flags |= CHANNEL_FLAGS_DISCARD_ZEROES;
790 }
791
792 #ifdef BLKDISCARDZEROES
793 {
794 int zeroes = 0;
795 if (ioctl(data->dev, BLKDISCARDZEROES, &zeroes) == 0 &&
796 zeroes)
797 io->flags |= CHANNEL_FLAGS_DISCARD_ZEROES;
798 }
799 #endif
800
801 #if defined(__CYGWIN__)
802 /*
803 * Some operating systems require that the buffers be aligned,
804 * regardless of O_DIRECT
805 */
806 if (!io->align)
807 io->align = 512;
808 #endif
809
810 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
811 if (io->flags & CHANNEL_FLAGS_BLOCK_DEVICE) {
812 int dio_align = ext2fs_get_dio_alignment(fd);
813
814 if (io->align < dio_align)
815 io->align = dio_align;
816 }
817 #endif
818
819 if ((retval = alloc_cache(io, data)))
820 goto cleanup;
821
822 #ifdef BLKROGET
823 if (flags & IO_FLAG_RW) {
824 int error;
825 int readonly = 0;
826
827 /* Is the block device actually writable? */
828 error = ioctl(data->dev, BLKROGET, &readonly);
829 if (!error && readonly) {
830 retval = EPERM;
831 goto cleanup;
832 }
833 }
834 #endif
835
836 #ifdef __linux__
837 #undef RLIM_INFINITY
838 #if (defined(__alpha__) || ((defined(__sparc__) || defined(__mips__)) && (SIZEOF_LONG == 4)))
839 #define RLIM_INFINITY ((unsigned long)(~0UL>>1))
840 #else
841 #define RLIM_INFINITY (~0UL)
842 #endif
843 /*
844 * Work around a bug in 2.4.10-2.4.18 kernels where writes to
845 * block devices are wrongly getting hit by the filesize
846 * limit. This workaround isn't perfect, since it won't work
847 * if glibc wasn't built against 2.2 header files. (Sigh.)
848 *
849 */
850 if ((flags & IO_FLAG_RW) &&
851 (uname(&ut) == 0) &&
852 ((ut.release[0] == '2') && (ut.release[1] == '.') &&
853 (ut.release[2] == '4') && (ut.release[3] == '.') &&
854 (ut.release[4] == '1') && (ut.release[5] >= '0') &&
855 (ut.release[5] < '8')) &&
856 (ext2fs_fstat(data->dev, &st) == 0) &&
857 (ext2fsP_is_disk_device(st.st_mode))) {
858 struct rlimit rlim;
859
860 rlim.rlim_cur = rlim.rlim_max = (unsigned long) RLIM_INFINITY;
861 setrlimit(RLIMIT_FSIZE, &rlim);
862 getrlimit(RLIMIT_FSIZE, &rlim);
863 if (((unsigned long) rlim.rlim_cur) <
864 ((unsigned long) rlim.rlim_max)) {
865 rlim.rlim_cur = rlim.rlim_max;
866 setrlimit(RLIMIT_FSIZE, &rlim);
867 }
868 }
869 #endif
870 #ifdef HAVE_PTHREAD
871 if (flags & IO_FLAG_THREADS) {
872 io->flags |= CHANNEL_FLAGS_THREADS;
873 retval = pthread_mutex_init(&data->cache_mutex, NULL);
874 if (retval)
875 goto cleanup;
876 retval = pthread_mutex_init(&data->bounce_mutex, NULL);
877 if (retval) {
878 pthread_mutex_destroy(&data->cache_mutex);
879 goto cleanup;
880 }
881 retval = pthread_mutex_init(&data->stats_mutex, NULL);
882 if (retval) {
883 pthread_mutex_destroy(&data->cache_mutex);
884 pthread_mutex_destroy(&data->bounce_mutex);
885 goto cleanup;
886 }
887 }
888 #endif
889 *channel = io;
890 return 0;
891
892 cleanup:
893 if (data) {
894 if (data->dev >= 0)
895 close(data->dev);
896 free_cache(data);
897 ext2fs_free_mem(&data);
898 }
899 if (io) {
900 if (io->name) {
901 ext2fs_free_mem(&io->name);
902 }
903 ext2fs_free_mem(&io);
904 }
905 return retval;
906 }
907
908 static errcode_t unixfd_open(const char *str_fd, int flags,
909 io_channel *channel)
910 {
911 int fd;
912 int fd_flags;
913
914 fd = atoi(str_fd);
915 #if defined(HAVE_FCNTL)
916 fd_flags = fcntl(fd, F_GETFD);
917 if (fd_flags == -1)
918 return EBADF;
919
920 flags = 0;
921 if (fd_flags & O_RDWR)
922 flags |= IO_FLAG_RW;
923 if (fd_flags & O_EXCL)
924 flags |= IO_FLAG_EXCLUSIVE;
925 #if defined(O_DIRECT)
926 if (fd_flags & O_DIRECT)
927 flags |= IO_FLAG_DIRECT_IO;
928 #endif
929 #endif /* HAVE_FCNTL */
930
931 return unix_open_channel(str_fd, fd, flags, channel, unixfd_io_manager);
932 }
933
934 static errcode_t unix_open(const char *name, int flags,
935 io_channel *channel)
936 {
937 int fd = -1;
938 int open_flags;
939
940 if (name == 0)
941 return EXT2_ET_BAD_DEVICE_NAME;
942
943 open_flags = (flags & IO_FLAG_RW) ? O_RDWR : O_RDONLY;
944 if (flags & IO_FLAG_EXCLUSIVE)
945 open_flags |= O_EXCL;
946 #if defined(O_DIRECT)
947 if (flags & IO_FLAG_DIRECT_IO)
948 open_flags |= O_DIRECT;
949 #endif
950 fd = ext2fs_open_file(name, open_flags, 0);
951 if (fd < 0)
952 return errno;
953 #if defined(F_NOCACHE) && !defined(IO_DIRECT)
954 if (flags & IO_FLAG_DIRECT_IO) {
955 if (fcntl(fd, F_NOCACHE, 1) < 0)
956 return errno;
957 }
958 #endif
959 return unix_open_channel(name, fd, flags, channel, unix_io_manager);
960 }
961
962 static errcode_t unix_close(io_channel channel)
963 {
964 struct unix_private_data *data;
965 errcode_t retval = 0;
966
967 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
968 data = (struct unix_private_data *) channel->private_data;
969 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
970
971 if (--channel->refcount > 0)
972 return 0;
973
974 #ifndef NO_IO_CACHE
975 retval = flush_cached_blocks(channel, data, 0);
976 #endif
977
978 if (close(data->dev) < 0)
979 retval = errno;
980 free_cache(data);
981 #ifdef HAVE_PTHREAD
982 if (data->flags & IO_FLAG_THREADS) {
983 pthread_mutex_destroy(&data->cache_mutex);
984 pthread_mutex_destroy(&data->bounce_mutex);
985 pthread_mutex_destroy(&data->stats_mutex);
986 }
987 #endif
988
989 ext2fs_free_mem(&channel->private_data);
990 if (channel->name)
991 ext2fs_free_mem(&channel->name);
992 ext2fs_free_mem(&channel);
993 return retval;
994 }
995
996 static errcode_t unix_set_blksize(io_channel channel, int blksize)
997 {
998 struct unix_private_data *data;
999 errcode_t retval = 0;
1000
1001 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1002 data = (struct unix_private_data *) channel->private_data;
1003 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1004
1005 if (channel->block_size != blksize) {
1006 mutex_lock(data, CACHE_MTX);
1007 mutex_lock(data, BOUNCE_MTX);
1008 #ifndef NO_IO_CACHE
1009 if ((retval = flush_cached_blocks(channel, data, FLUSH_NOLOCK))){
1010 mutex_unlock(data, BOUNCE_MTX);
1011 mutex_unlock(data, CACHE_MTX);
1012 return retval;
1013 }
1014 #endif
1015
1016 channel->block_size = blksize;
1017 free_cache(data);
1018 retval = alloc_cache(channel, data);
1019 mutex_unlock(data, BOUNCE_MTX);
1020 mutex_unlock(data, CACHE_MTX);
1021 }
1022 return retval;
1023 }
1024
1025 static errcode_t unix_read_blk64(io_channel channel, unsigned long long block,
1026 int count, void *buf)
1027 {
1028 struct unix_private_data *data;
1029 struct unix_cache *cache;
1030 errcode_t retval;
1031 char *cp;
1032 int i, j;
1033
1034 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1035 data = (struct unix_private_data *) channel->private_data;
1036 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1037
1038 #ifdef NO_IO_CACHE
1039 return raw_read_blk(channel, data, block, count, buf);
1040 #else
1041 if (data->flags & IO_FLAG_NOCACHE)
1042 return raw_read_blk(channel, data, block, count, buf);
1043 /*
1044 * If we're doing an odd-sized read or a very large read,
1045 * flush out the cache and then do a direct read.
1046 */
1047 if (count < 0 || count > WRITE_DIRECT_SIZE) {
1048 if ((retval = flush_cached_blocks(channel, data, 0)))
1049 return retval;
1050 return raw_read_blk(channel, data, block, count, buf);
1051 }
1052
1053 cp = buf;
1054 mutex_lock(data, CACHE_MTX);
1055 while (count > 0) {
1056 /* If it's in the cache, use it! */
1057 if ((cache = find_cached_block(data, block, NULL))) {
1058 #ifdef DEBUG
1059 printf("Using cached block %lu\n", block);
1060 #endif
1061 memcpy(cp, cache->buf, channel->block_size);
1062 count--;
1063 block++;
1064 cp += channel->block_size;
1065 continue;
1066 }
1067
1068 /*
1069 * Find the number of uncached blocks so we can do a
1070 * single read request
1071 */
1072 for (i=1; i < count; i++)
1073 if (find_cached_block(data, block+i, NULL))
1074 break;
1075 #ifdef DEBUG
1076 printf("Reading %d blocks starting at %lu\n", i, block);
1077 #endif
1078 mutex_unlock(data, CACHE_MTX);
1079 if ((retval = raw_read_blk(channel, data, block, i, cp)))
1080 return retval;
1081 mutex_lock(data, CACHE_MTX);
1082
1083 /* Save the results in the cache */
1084 for (j=0; j < i; j++) {
1085 if (!find_cached_block(data, block, &cache)) {
1086 retval = reuse_cache(channel, data,
1087 cache, block);
1088 if (retval)
1089 goto call_write_handler;
1090 memcpy(cache->buf, cp, channel->block_size);
1091 }
1092 count--;
1093 block++;
1094 cp += channel->block_size;
1095 }
1096 }
1097 mutex_unlock(data, CACHE_MTX);
1098 return 0;
1099
1100 call_write_handler:
1101 if (cache->write_err && channel->write_error) {
1102 char *err_buf = NULL;
1103 unsigned long long err_block = cache->block;
1104
1105 cache->dirty = 0;
1106 cache->in_use = 0;
1107 cache->write_err = 0;
1108 if (io_channel_alloc_buf(channel, 0, &err_buf))
1109 err_buf = NULL;
1110 else
1111 memcpy(err_buf, cache->buf, channel->block_size);
1112 mutex_unlock(data, CACHE_MTX);
1113 (channel->write_error)(channel, err_block, 1, err_buf,
1114 channel->block_size, -1,
1115 retval);
1116 if (err_buf)
1117 ext2fs_free_mem(&err_buf);
1118 } else
1119 mutex_unlock(data, CACHE_MTX);
1120 return retval;
1121 #endif /* NO_IO_CACHE */
1122 }
1123
1124 static errcode_t unix_read_blk(io_channel channel, unsigned long block,
1125 int count, void *buf)
1126 {
1127 return unix_read_blk64(channel, block, count, buf);
1128 }
1129
1130 static errcode_t unix_write_blk64(io_channel channel, unsigned long long block,
1131 int count, const void *buf)
1132 {
1133 struct unix_private_data *data;
1134 struct unix_cache *cache, *reuse;
1135 errcode_t retval = 0;
1136 const char *cp;
1137 int writethrough;
1138
1139 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1140 data = (struct unix_private_data *) channel->private_data;
1141 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1142
1143 #ifdef NO_IO_CACHE
1144 return raw_write_blk(channel, data, block, count, buf, 0);
1145 #else
1146 if (data->flags & IO_FLAG_NOCACHE)
1147 return raw_write_blk(channel, data, block, count, buf, 0);
1148 /*
1149 * If we're doing an odd-sized write or a very large write,
1150 * flush out the cache completely and then do a direct write.
1151 */
1152 if (count < 0 || count > WRITE_DIRECT_SIZE) {
1153 if ((retval = flush_cached_blocks(channel, data,
1154 FLUSH_INVALIDATE)))
1155 return retval;
1156 return raw_write_blk(channel, data, block, count, buf, 0);
1157 }
1158
1159 /*
1160 * For a moderate-sized multi-block write, first force a write
1161 * if we're in write-through cache mode, and then fill the
1162 * cache with the blocks.
1163 */
1164 writethrough = channel->flags & CHANNEL_FLAGS_WRITETHROUGH;
1165 if (writethrough)
1166 retval = raw_write_blk(channel, data, block, count, buf, 0);
1167
1168 cp = buf;
1169 mutex_lock(data, CACHE_MTX);
1170 while (count > 0) {
1171 cache = find_cached_block(data, block, &reuse);
1172 if (!cache) {
1173 errcode_t err;
1174
1175 cache = reuse;
1176 err = reuse_cache(channel, data, cache, block);
1177 if (err)
1178 goto call_write_handler;
1179 }
1180 if (cache->buf != cp)
1181 memcpy(cache->buf, cp, channel->block_size);
1182 cache->dirty = !writethrough;
1183 count--;
1184 block++;
1185 cp += channel->block_size;
1186 }
1187 mutex_unlock(data, CACHE_MTX);
1188 return retval;
1189
1190 call_write_handler:
1191 if (cache->write_err && channel->write_error) {
1192 char *err_buf = NULL;
1193 unsigned long long err_block = cache->block;
1194
1195 cache->dirty = 0;
1196 cache->in_use = 0;
1197 cache->write_err = 0;
1198 if (io_channel_alloc_buf(channel, 0, &err_buf))
1199 err_buf = NULL;
1200 else
1201 memcpy(err_buf, cache->buf, channel->block_size);
1202 mutex_unlock(data, CACHE_MTX);
1203 (channel->write_error)(channel, err_block, 1, err_buf,
1204 channel->block_size, -1,
1205 retval);
1206 if (err_buf)
1207 ext2fs_free_mem(&err_buf);
1208 } else
1209 mutex_unlock(data, CACHE_MTX);
1210 return retval;
1211 #endif /* NO_IO_CACHE */
1212 }
1213
1214 static errcode_t unix_cache_readahead(io_channel channel,
1215 unsigned long long block,
1216 unsigned long long count)
1217 {
1218 #ifdef POSIX_FADV_WILLNEED
1219 struct unix_private_data *data;
1220
1221 data = (struct unix_private_data *)channel->private_data;
1222 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1223 return posix_fadvise(data->dev,
1224 (ext2_loff_t)block * channel->block_size + data->offset,
1225 (ext2_loff_t)count * channel->block_size,
1226 POSIX_FADV_WILLNEED);
1227 #else
1228 return EXT2_ET_OP_NOT_SUPPORTED;
1229 #endif
1230 }
1231
1232 static errcode_t unix_write_blk(io_channel channel, unsigned long block,
1233 int count, const void *buf)
1234 {
1235 return unix_write_blk64(channel, block, count, buf);
1236 }
1237
1238 static errcode_t unix_write_byte(io_channel channel, unsigned long offset,
1239 int size, const void *buf)
1240 {
1241 struct unix_private_data *data;
1242 errcode_t retval = 0;
1243 ssize_t actual;
1244
1245 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1246 data = (struct unix_private_data *) channel->private_data;
1247 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1248
1249 if (channel->align != 0) {
1250 #ifdef ALIGN_DEBUG
1251 printf("unix_write_byte: O_DIRECT fallback\n");
1252 #endif
1253 return EXT2_ET_UNIMPLEMENTED;
1254 }
1255
1256 #ifndef NO_IO_CACHE
1257 /*
1258 * Flush out the cache completely
1259 */
1260 if ((retval = flush_cached_blocks(channel, data, FLUSH_INVALIDATE)))
1261 return retval;
1262 #endif
1263
1264 if (lseek(data->dev, offset + data->offset, SEEK_SET) < 0)
1265 return errno;
1266
1267 actual = write(data->dev, buf, size);
1268 if (actual < 0)
1269 return errno;
1270 if (actual != size)
1271 return EXT2_ET_SHORT_WRITE;
1272
1273 return 0;
1274 }
1275
1276 /*
1277 * Flush data buffers to disk.
1278 */
1279 static errcode_t unix_flush(io_channel channel)
1280 {
1281 struct unix_private_data *data;
1282 errcode_t retval = 0;
1283
1284 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1285 data = (struct unix_private_data *) channel->private_data;
1286 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1287
1288 #ifndef NO_IO_CACHE
1289 retval = flush_cached_blocks(channel, data, 0);
1290 #endif
1291 #ifdef HAVE_FSYNC
1292 if (!retval && fsync(data->dev) != 0)
1293 return errno;
1294 #endif
1295 return retval;
1296 }
1297
1298 static errcode_t unix_set_option(io_channel channel, const char *option,
1299 const char *arg)
1300 {
1301 struct unix_private_data *data;
1302 unsigned long long tmp;
1303 errcode_t retval;
1304 char *end;
1305
1306 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1307 data = (struct unix_private_data *) channel->private_data;
1308 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1309
1310 if (!strcmp(option, "offset")) {
1311 if (!arg)
1312 return EXT2_ET_INVALID_ARGUMENT;
1313
1314 tmp = strtoull(arg, &end, 0);
1315 if (*end)
1316 return EXT2_ET_INVALID_ARGUMENT;
1317 data->offset = tmp;
1318 if (data->offset < 0)
1319 return EXT2_ET_INVALID_ARGUMENT;
1320 return 0;
1321 }
1322 if (!strcmp(option, "cache")) {
1323 if (!arg)
1324 return EXT2_ET_INVALID_ARGUMENT;
1325 if (!strcmp(arg, "on")) {
1326 data->flags &= ~IO_FLAG_NOCACHE;
1327 return 0;
1328 }
1329 if (!strcmp(arg, "off")) {
1330 retval = flush_cached_blocks(channel, data, 0);
1331 data->flags |= IO_FLAG_NOCACHE;
1332 return retval;
1333 }
1334 return EXT2_ET_INVALID_ARGUMENT;
1335 }
1336 return EXT2_ET_INVALID_ARGUMENT;
1337 }
1338
1339 #if defined(__linux__) && !defined(BLKDISCARD)
1340 #define BLKDISCARD _IO(0x12,119)
1341 #endif
1342
1343 static errcode_t unix_discard(io_channel channel, unsigned long long block,
1344 unsigned long long count)
1345 {
1346 struct unix_private_data *data;
1347 int ret;
1348
1349 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1350 data = (struct unix_private_data *) channel->private_data;
1351 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1352
1353 if (channel->flags & CHANNEL_FLAGS_BLOCK_DEVICE) {
1354 #ifdef BLKDISCARD
1355 __u64 range[2];
1356
1357 range[0] = (__u64)(block) * channel->block_size + data->offset;
1358 range[1] = (__u64)(count) * channel->block_size;
1359
1360 ret = ioctl(data->dev, BLKDISCARD, &range);
1361 #else
1362 goto unimplemented;
1363 #endif
1364 } else {
1365 #if defined(HAVE_FALLOCATE) && defined(FALLOC_FL_PUNCH_HOLE)
1366 /*
1367 * If we are not on block device, try to use punch hole
1368 * to reclaim free space.
1369 */
1370 ret = fallocate(data->dev,
1371 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1372 (off_t)(block) * channel->block_size + data->offset,
1373 (off_t)(count) * channel->block_size);
1374 #else
1375 goto unimplemented;
1376 #endif
1377 }
1378 if (ret < 0) {
1379 if (errno == EOPNOTSUPP)
1380 goto unimplemented;
1381 return errno;
1382 }
1383 return 0;
1384 unimplemented:
1385 return EXT2_ET_UNIMPLEMENTED;
1386 }
1387
1388 /*
1389 * If we know about ZERO_RANGE, try that before we try PUNCH_HOLE because
1390 * ZERO_RANGE doesn't unmap preallocated blocks. We prefer fallocate because
1391 * it always invalidates page cache, and libext2fs requires that reads after
1392 * ZERO_RANGE return zeroes.
1393 */
1394 static int __unix_zeroout(int fd, off_t offset, off_t len)
1395 {
1396 int ret = -1;
1397
1398 #if defined(HAVE_FALLOCATE) && defined(FALLOC_FL_ZERO_RANGE)
1399 ret = fallocate(fd, FALLOC_FL_ZERO_RANGE, offset, len);
1400 if (ret == 0)
1401 return 0;
1402 #endif
1403 #if defined(HAVE_FALLOCATE) && defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE)
1404 ret = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
1405 offset, len);
1406 if (ret == 0)
1407 return 0;
1408 #endif
1409 errno = EOPNOTSUPP;
1410 return ret;
1411 }
1412
1413 /* parameters might not be used if OS doesn't support zeroout */
1414 #if __GNUC_PREREQ (4, 6)
1415 #pragma GCC diagnostic push
1416 #pragma GCC diagnostic ignored "-Wunused-parameter"
1417 #endif
1418 static errcode_t unix_zeroout(io_channel channel, unsigned long long block,
1419 unsigned long long count)
1420 {
1421 struct unix_private_data *data;
1422 int ret;
1423
1424 EXT2_CHECK_MAGIC(channel, EXT2_ET_MAGIC_IO_CHANNEL);
1425 data = (struct unix_private_data *) channel->private_data;
1426 EXT2_CHECK_MAGIC(data, EXT2_ET_MAGIC_UNIX_IO_CHANNEL);
1427
1428 if (safe_getenv("UNIX_IO_NOZEROOUT"))
1429 goto unimplemented;
1430
1431 if (!(channel->flags & CHANNEL_FLAGS_BLOCK_DEVICE)) {
1432 /* Regular file, try to use truncate/punch/zero. */
1433 struct stat statbuf;
1434
1435 if (count == 0)
1436 return 0;
1437 /*
1438 * If we're trying to zero a range past the end of the file,
1439 * extend the file size, then truncate everything.
1440 */
1441 ret = fstat(data->dev, &statbuf);
1442 if (ret)
1443 goto err;
1444 if ((unsigned long long) statbuf.st_size <
1445 (block + count) * channel->block_size + data->offset) {
1446 ret = ftruncate(data->dev,
1447 (block + count) * channel->block_size + data->offset);
1448 if (ret)
1449 goto err;
1450 }
1451 }
1452
1453 ret = __unix_zeroout(data->dev,
1454 (off_t)(block) * channel->block_size + data->offset,
1455 (off_t)(count) * channel->block_size);
1456 err:
1457 if (ret < 0) {
1458 if (errno == EOPNOTSUPP)
1459 goto unimplemented;
1460 return errno;
1461 }
1462 return 0;
1463 unimplemented:
1464 return EXT2_ET_UNIMPLEMENTED;
1465 }
1466 #if __GNUC_PREREQ (4, 6)
1467 #pragma GCC diagnostic pop
1468 #endif
1469
1470 static struct struct_io_manager struct_unix_manager = {
1471 .magic = EXT2_ET_MAGIC_IO_MANAGER,
1472 .name = "Unix I/O Manager",
1473 .open = unix_open,
1474 .close = unix_close,
1475 .set_blksize = unix_set_blksize,
1476 .read_blk = unix_read_blk,
1477 .write_blk = unix_write_blk,
1478 .flush = unix_flush,
1479 .write_byte = unix_write_byte,
1480 .set_option = unix_set_option,
1481 .get_stats = unix_get_stats,
1482 .read_blk64 = unix_read_blk64,
1483 .write_blk64 = unix_write_blk64,
1484 .discard = unix_discard,
1485 .cache_readahead = unix_cache_readahead,
1486 .zeroout = unix_zeroout,
1487 };
1488
1489 io_manager unix_io_manager = &struct_unix_manager;
1490
1491 static struct struct_io_manager struct_unixfd_manager = {
1492 .magic = EXT2_ET_MAGIC_IO_MANAGER,
1493 .name = "Unix fd I/O Manager",
1494 .open = unixfd_open,
1495 .close = unix_close,
1496 .set_blksize = unix_set_blksize,
1497 .read_blk = unix_read_blk,
1498 .write_blk = unix_write_blk,
1499 .flush = unix_flush,
1500 .write_byte = unix_write_byte,
1501 .set_option = unix_set_option,
1502 .get_stats = unix_get_stats,
1503 .read_blk64 = unix_read_blk64,
1504 .write_blk64 = unix_write_blk64,
1505 .discard = unix_discard,
1506 .cache_readahead = unix_cache_readahead,
1507 .zeroout = unix_zeroout,
1508 };
1509
1510 io_manager unixfd_io_manager = &struct_unixfd_manager;