]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/rdwr.c
libxfs: remove unused argument in trans_iput
[thirdparty/xfsprogs-dev.git] / libxfs / rdwr.c
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <xfs/libxfs.h>
20 #include "init.h"
21
22 #define BDSTRAT_SIZE (256 * 1024)
23
24 #define IO_BCOMPARE_CHECK
25
26 void
27 libxfs_device_zero(struct xfs_buftarg *btp, xfs_daddr_t start, uint len)
28 {
29 xfs_off_t start_offset, end_offset, offset;
30 ssize_t zsize, bytes;
31 char *z;
32 int fd;
33
34 zsize = min(BDSTRAT_SIZE, BBTOB(len));
35 if ((z = memalign(libxfs_device_alignment(), zsize)) == NULL) {
36 fprintf(stderr,
37 _("%s: %s can't memalign %d bytes: %s\n"),
38 progname, __FUNCTION__, (int)zsize, strerror(errno));
39 exit(1);
40 }
41 memset(z, 0, zsize);
42
43 fd = libxfs_device_to_fd(btp->dev);
44 start_offset = LIBXFS_BBTOOFF64(start);
45
46 if ((lseek64(fd, start_offset, SEEK_SET)) < 0) {
47 fprintf(stderr, _("%s: %s seek to offset %llu failed: %s\n"),
48 progname, __FUNCTION__,
49 (unsigned long long)start_offset, strerror(errno));
50 exit(1);
51 }
52
53 end_offset = LIBXFS_BBTOOFF64(start + len) - start_offset;
54 for (offset = 0; offset < end_offset; ) {
55 bytes = min((ssize_t)(end_offset - offset), zsize);
56 if ((bytes = write(fd, z, bytes)) < 0) {
57 fprintf(stderr, _("%s: %s write failed: %s\n"),
58 progname, __FUNCTION__, strerror(errno));
59 exit(1);
60 } else if (bytes == 0) {
61 fprintf(stderr, _("%s: %s not progressing?\n"),
62 progname, __FUNCTION__);
63 exit(1);
64 }
65 offset += bytes;
66 }
67 free(z);
68 }
69
70 static void unmount_record(void *p)
71 {
72 xlog_op_header_t *op = (xlog_op_header_t *)p;
73 /* the data section must be 32 bit size aligned */
74 struct {
75 __uint16_t magic;
76 __uint16_t pad1;
77 __uint32_t pad2; /* may as well make it 64 bits */
78 } magic = { XLOG_UNMOUNT_TYPE, 0, 0 };
79
80 memset(p, 0, BBSIZE);
81 op->oh_tid = cpu_to_be32(1);
82 op->oh_len = cpu_to_be32(sizeof(magic));
83 op->oh_clientid = XFS_LOG;
84 op->oh_flags = XLOG_UNMOUNT_TRANS;
85 op->oh_res2 = 0;
86
87 /* and the data for this op */
88 memcpy((char *)p + sizeof(xlog_op_header_t), &magic, sizeof(magic));
89 }
90
91 static xfs_caddr_t next(xfs_caddr_t ptr, int offset, void *private)
92 {
93 xfs_buf_t *buf = (xfs_buf_t *)private;
94
95 if (XFS_BUF_COUNT(buf) < (int)(ptr - XFS_BUF_PTR(buf)) + offset)
96 abort();
97 return ptr + offset;
98 }
99
100 int
101 libxfs_log_clear(
102 struct xfs_buftarg *btp,
103 xfs_daddr_t start,
104 uint length,
105 uuid_t *fs_uuid,
106 int version,
107 int sunit,
108 int fmt)
109 {
110 xfs_buf_t *bp;
111 int len;
112
113 if (!btp->dev || !fs_uuid)
114 return -EINVAL;
115
116 /* first zero the log */
117 libxfs_device_zero(btp, start, length);
118
119 /* then write a log record header */
120 len = ((version == 2) && sunit) ? BTOBB(sunit) : 2;
121 len = MAX(len, 2);
122 bp = libxfs_getbufr(btp, start, len);
123 libxfs_log_header(XFS_BUF_PTR(bp),
124 fs_uuid, version, sunit, fmt, next, bp);
125 bp->b_flags |= LIBXFS_B_DIRTY;
126 libxfs_putbufr(bp);
127 return 0;
128 }
129
130 int
131 libxfs_log_header(
132 xfs_caddr_t caddr,
133 uuid_t *fs_uuid,
134 int version,
135 int sunit,
136 int fmt,
137 libxfs_get_block_t *nextfunc,
138 void *private)
139 {
140 xlog_rec_header_t *head = (xlog_rec_header_t *)caddr;
141 xfs_caddr_t p = caddr;
142 __be32 cycle_lsn;
143 int i, len;
144
145 len = ((version == 2) && sunit) ? BTOBB(sunit) : 1;
146
147 /* note that oh_tid actually contains the cycle number
148 * and the tid is stored in h_cycle_data[0] - that's the
149 * way things end up on disk.
150 */
151 memset(p, 0, BBSIZE);
152 head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
153 head->h_cycle = cpu_to_be32(1);
154 head->h_version = cpu_to_be32(version);
155 if (len != 1)
156 head->h_len = cpu_to_be32(sunit - BBSIZE);
157 else
158 head->h_len = cpu_to_be32(20);
159 head->h_crc = cpu_to_be32(0);
160 head->h_prev_block = cpu_to_be32(-1);
161 head->h_num_logops = cpu_to_be32(1);
162 head->h_cycle_data[0] = cpu_to_be32(0xb0c0d0d0);
163 head->h_fmt = cpu_to_be32(fmt);
164 head->h_size = cpu_to_be32(XLOG_HEADER_CYCLE_SIZE);
165
166 head->h_lsn = cpu_to_be64(xlog_assign_lsn(1, 0));
167 head->h_tail_lsn = cpu_to_be64(xlog_assign_lsn(1, 0));
168
169 memcpy(&head->h_fs_uuid, fs_uuid, sizeof(uuid_t));
170
171 len = MAX(len, 2);
172 p = nextfunc(p, BBSIZE, private);
173 unmount_record(p);
174
175 cycle_lsn = CYCLE_LSN_DISK(head->h_lsn);
176 for (i = 2; i < len; i++) {
177 p = nextfunc(p, BBSIZE, private);
178 memset(p, 0, BBSIZE);
179 *(__be32 *)p = cycle_lsn;
180 }
181
182 return BBTOB(len);
183 }
184
185 /*
186 * Simple I/O (buffer cache) interface
187 */
188
189
190 #ifdef XFS_BUF_TRACING
191
192 #undef libxfs_readbuf
193 #undef libxfs_readbuf_map
194 #undef libxfs_writebuf
195 #undef libxfs_getbuf
196 #undef libxfs_getbuf_map
197 #undef libxfs_getbuf_flags
198 #undef libxfs_putbuf
199
200 xfs_buf_t *libxfs_readbuf(struct xfs_buftarg *, xfs_daddr_t, int, int,
201 const struct xfs_buf_ops *);
202 xfs_buf_t *libxfs_readbuf_map(struct xfs_buftarg *, struct xfs_buf_map *,
203 int, int, const struct xfs_buf_ops *);
204 int libxfs_writebuf(xfs_buf_t *, int);
205 xfs_buf_t *libxfs_getbuf(struct xfs_buftarg *, xfs_daddr_t, int);
206 xfs_buf_t *libxfs_getbuf_map(struct xfs_buftarg *, struct xfs_buf_map *,
207 int, int);
208 xfs_buf_t *libxfs_getbuf_flags(struct xfs_buftarg *, xfs_daddr_t, int,
209 unsigned int);
210 void libxfs_putbuf (xfs_buf_t *);
211
212 #define __add_trace(bp, func, file, line) \
213 do { \
214 if (bp) { \
215 (bp)->b_func = (func); \
216 (bp)->b_file = (file); \
217 (bp)->b_line = (line); \
218 } \
219 } while (0)
220
221 xfs_buf_t *
222 libxfs_trace_readbuf(const char *func, const char *file, int line,
223 struct xfs_buftarg *btp, xfs_daddr_t blkno, int len, int flags,
224 const struct xfs_buf_ops *ops)
225 {
226 xfs_buf_t *bp = libxfs_readbuf(btp, blkno, len, flags, ops);
227 __add_trace(bp, func, file, line);
228 return bp;
229 }
230
231 xfs_buf_t *
232 libxfs_trace_readbuf_map(const char *func, const char *file, int line,
233 struct xfs_buftarg *btp, struct xfs_buf_map *map, int nmaps, int flags,
234 const struct xfs_buf_ops *ops)
235 {
236 xfs_buf_t *bp = libxfs_readbuf_map(btp, map, nmaps, flags, ops);
237 __add_trace(bp, func, file, line);
238 return bp;
239 }
240
241 int
242 libxfs_trace_writebuf(const char *func, const char *file, int line, xfs_buf_t *bp, int flags)
243 {
244 __add_trace(bp, func, file, line);
245 return libxfs_writebuf(bp, flags);
246 }
247
248 xfs_buf_t *
249 libxfs_trace_getbuf(const char *func, const char *file, int line,
250 struct xfs_buftarg *btp, xfs_daddr_t blkno, int len)
251 {
252 xfs_buf_t *bp = libxfs_getbuf(btp, blkno, len);
253 __add_trace(bp, func, file, line);
254 return bp;
255 }
256
257 xfs_buf_t *
258 libxfs_trace_getbuf_map(const char *func, const char *file, int line,
259 struct xfs_buftarg *btp, struct xfs_buf_map *map, int nmaps,
260 int flags)
261 {
262 xfs_buf_t *bp = libxfs_getbuf_map(btp, map, nmaps, flags);
263 __add_trace(bp, func, file, line);
264 return bp;
265 }
266
267 xfs_buf_t *
268 libxfs_trace_getbuf_flags(const char *func, const char *file, int line,
269 struct xfs_buftarg *btp, xfs_daddr_t blkno, int len, unsigned int flags)
270 {
271 xfs_buf_t *bp = libxfs_getbuf_flags(btp, blkno, len, flags);
272 __add_trace(bp, func, file, line);
273 return bp;
274 }
275
276 void
277 libxfs_trace_putbuf(const char *func, const char *file, int line, xfs_buf_t *bp)
278 {
279 __add_trace(bp, func, file, line);
280 libxfs_putbuf(bp);
281 }
282
283
284 #endif
285
286
287 xfs_buf_t *
288 libxfs_getsb(xfs_mount_t *mp, int flags)
289 {
290 return libxfs_readbuf(mp->m_ddev_targp, XFS_SB_DADDR,
291 XFS_FSS_TO_BB(mp, 1), flags, &xfs_sb_buf_ops);
292 }
293
294 kmem_zone_t *xfs_buf_zone;
295
296 static struct cache_mru xfs_buf_freelist =
297 {{&xfs_buf_freelist.cm_list, &xfs_buf_freelist.cm_list},
298 0, PTHREAD_MUTEX_INITIALIZER };
299
300 /*
301 * The bufkey is used to pass the new buffer information to the cache object
302 * allocation routine. Because discontiguous buffers need to pass different
303 * information, we need fields to pass that information. However, because the
304 * blkno and bblen is needed for the initial cache entry lookup (i.e. for
305 * bcompare) the fact that the map/nmaps is non-null to switch to discontiguous
306 * buffer initialisation instead of a contiguous buffer.
307 */
308 struct xfs_bufkey {
309 struct xfs_buftarg *buftarg;
310 xfs_daddr_t blkno;
311 unsigned int bblen;
312 struct xfs_buf_map *map;
313 int nmaps;
314 };
315
316 /* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
317 #define GOLDEN_RATIO_PRIME 0x9e37fffffffc0001UL
318 #define CACHE_LINE_SIZE 64
319 static unsigned int
320 libxfs_bhash(cache_key_t key, unsigned int hashsize, unsigned int hashshift)
321 {
322 uint64_t hashval = ((struct xfs_bufkey *)key)->blkno;
323 uint64_t tmp;
324
325 tmp = hashval ^ (GOLDEN_RATIO_PRIME + hashval) / CACHE_LINE_SIZE;
326 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> hashshift);
327 return tmp % hashsize;
328 }
329
330 static int
331 libxfs_bcompare(struct cache_node *node, cache_key_t key)
332 {
333 struct xfs_buf *bp = (struct xfs_buf *)node;
334 struct xfs_bufkey *bkey = (struct xfs_bufkey *)key;
335
336 if (bp->b_target->dev == bkey->buftarg->dev &&
337 bp->b_bn == bkey->blkno) {
338 if (bp->b_bcount == BBTOB(bkey->bblen))
339 return CACHE_HIT;
340 #ifdef IO_BCOMPARE_CHECK
341 if (!(libxfs_bcache->c_flags & CACHE_MISCOMPARE_PURGE)) {
342 fprintf(stderr,
343 "%lx: Badness in key lookup (length)\n"
344 "bp=(bno 0x%llx, len %u bytes) key=(bno 0x%llx, len %u bytes)\n",
345 pthread_self(),
346 (unsigned long long)bp->b_bn, (int)bp->b_bcount,
347 (unsigned long long)bkey->blkno,
348 BBTOB(bkey->bblen));
349 }
350 #endif
351 return CACHE_PURGE;
352 }
353 return CACHE_MISS;
354 }
355
356 void
357 libxfs_bprint(xfs_buf_t *bp)
358 {
359 fprintf(stderr, "Buffer 0x%p blkno=%llu bytes=%u flags=0x%x count=%u\n",
360 bp, (unsigned long long)bp->b_bn, (unsigned)bp->b_bcount,
361 bp->b_flags, bp->b_node.cn_count);
362 }
363
364 static void
365 __initbuf(xfs_buf_t *bp, struct xfs_buftarg *btp, xfs_daddr_t bno,
366 unsigned int bytes)
367 {
368 bp->b_flags = 0;
369 bp->b_bn = bno;
370 bp->b_bcount = bytes;
371 bp->b_length = BTOBB(bytes);
372 bp->b_target = btp;
373 bp->b_error = 0;
374 if (!bp->b_addr)
375 bp->b_addr = memalign(libxfs_device_alignment(), bytes);
376 if (!bp->b_addr) {
377 fprintf(stderr,
378 _("%s: %s can't memalign %u bytes: %s\n"),
379 progname, __FUNCTION__, bytes,
380 strerror(errno));
381 exit(1);
382 }
383 #ifdef XFS_BUF_TRACING
384 list_head_init(&bp->b_lock_list);
385 #endif
386 pthread_mutex_init(&bp->b_lock, NULL);
387 bp->b_holder = 0;
388 bp->b_recur = 0;
389 bp->b_ops = NULL;
390 }
391
392 static void
393 libxfs_initbuf(xfs_buf_t *bp, struct xfs_buftarg *btp, xfs_daddr_t bno,
394 unsigned int bytes)
395 {
396 __initbuf(bp, btp, bno, bytes);
397 }
398
399 static void
400 libxfs_initbuf_map(xfs_buf_t *bp, struct xfs_buftarg *btp,
401 struct xfs_buf_map *map, int nmaps)
402 {
403 unsigned int bytes = 0;
404 int i;
405
406 bytes = sizeof(struct xfs_buf_map) * nmaps;
407 bp->b_map = malloc(bytes);
408 if (!bp->b_map) {
409 fprintf(stderr,
410 _("%s: %s can't malloc %u bytes: %s\n"),
411 progname, __FUNCTION__, bytes,
412 strerror(errno));
413 exit(1);
414 }
415 bp->b_nmaps = nmaps;
416
417 bytes = 0;
418 for ( i = 0; i < nmaps; i++) {
419 bp->b_map[i].bm_bn = map[i].bm_bn;
420 bp->b_map[i].bm_len = map[i].bm_len;
421 bytes += BBTOB(map[i].bm_len);
422 }
423
424 __initbuf(bp, btp, map[0].bm_bn, bytes);
425 bp->b_flags |= LIBXFS_B_DISCONTIG;
426 }
427
428 xfs_buf_t *
429 __libxfs_getbufr(int blen)
430 {
431 xfs_buf_t *bp;
432
433 /*
434 * first look for a buffer that can be used as-is,
435 * if one cannot be found, see if there is a buffer,
436 * and if so, free its buffer and set b_addr to NULL
437 * before calling libxfs_initbuf.
438 */
439 pthread_mutex_lock(&xfs_buf_freelist.cm_mutex);
440 if (!list_empty(&xfs_buf_freelist.cm_list)) {
441 list_for_each_entry(bp, &xfs_buf_freelist.cm_list, b_node.cn_mru) {
442 if (bp->b_bcount == blen) {
443 list_del_init(&bp->b_node.cn_mru);
444 break;
445 }
446 }
447 if (&bp->b_node.cn_mru == &xfs_buf_freelist.cm_list) {
448 bp = list_entry(xfs_buf_freelist.cm_list.next,
449 xfs_buf_t, b_node.cn_mru);
450 list_del_init(&bp->b_node.cn_mru);
451 free(bp->b_addr);
452 bp->b_addr = NULL;
453 free(bp->b_map);
454 bp->b_map = NULL;
455 }
456 } else
457 bp = kmem_zone_zalloc(xfs_buf_zone, 0);
458 pthread_mutex_unlock(&xfs_buf_freelist.cm_mutex);
459 bp->b_ops = NULL;
460
461 return bp;
462 }
463
464 xfs_buf_t *
465 libxfs_getbufr(struct xfs_buftarg *btp, xfs_daddr_t blkno, int bblen)
466 {
467 xfs_buf_t *bp;
468 int blen = BBTOB(bblen);
469
470 bp =__libxfs_getbufr(blen);
471 if (bp)
472 libxfs_initbuf(bp, btp, blkno, blen);
473 #ifdef IO_DEBUG
474 printf("%lx: %s: allocated %u bytes buffer, key=0x%llx(0x%llx), %p\n",
475 pthread_self(), __FUNCTION__, blen,
476 (long long)LIBXFS_BBTOOFF64(blkno), (long long)blkno, bp);
477 #endif
478
479 return bp;
480 }
481
482 xfs_buf_t *
483 libxfs_getbufr_map(struct xfs_buftarg *btp, xfs_daddr_t blkno, int bblen,
484 struct xfs_buf_map *map, int nmaps)
485 {
486 xfs_buf_t *bp;
487 int blen = BBTOB(bblen);
488
489 if (!map || !nmaps) {
490 fprintf(stderr,
491 _("%s: %s invalid map %p or nmaps %d\n"),
492 progname, __FUNCTION__, map, nmaps);
493 exit(1);
494 }
495
496 if (blkno != map[0].bm_bn) {
497 fprintf(stderr,
498 _("%s: %s map blkno 0x%llx doesn't match key 0x%llx\n"),
499 progname, __FUNCTION__, (long long)map[0].bm_bn,
500 (long long)blkno);
501 exit(1);
502 }
503
504 bp =__libxfs_getbufr(blen);
505 if (bp)
506 libxfs_initbuf_map(bp, btp, map, nmaps);
507 #ifdef IO_DEBUG
508 printf("%lx: %s: allocated %u bytes buffer, key=0x%llx(0x%llx), %p\n",
509 pthread_self(), __FUNCTION__, blen,
510 (long long)LIBXFS_BBTOOFF64(blkno), (long long)blkno, bp);
511 #endif
512
513 return bp;
514 }
515
516 #ifdef XFS_BUF_TRACING
517 struct list_head lock_buf_list = {&lock_buf_list, &lock_buf_list};
518 int lock_buf_count = 0;
519 #endif
520
521 extern int use_xfs_buf_lock;
522
523 static struct xfs_buf *
524 __cache_lookup(struct xfs_bufkey *key, unsigned int flags)
525 {
526 struct xfs_buf *bp;
527
528 cache_node_get(libxfs_bcache, key, (struct cache_node **)&bp);
529 if (!bp)
530 return NULL;
531
532 if (use_xfs_buf_lock) {
533 int ret;
534
535 ret = pthread_mutex_trylock(&bp->b_lock);
536 if (ret) {
537 ASSERT(ret == EAGAIN);
538 if (flags & LIBXFS_GETBUF_TRYLOCK)
539 goto out_put;
540
541 if (pthread_equal(bp->b_holder, pthread_self())) {
542 fprintf(stderr,
543 _("Warning: recursive buffer locking at block %" PRIu64 " detected\n"),
544 key->blkno);
545 bp->b_recur++;
546 return bp;
547 } else {
548 pthread_mutex_lock(&bp->b_lock);
549 }
550 }
551
552 bp->b_holder = pthread_self();
553 }
554
555 cache_node_set_priority(libxfs_bcache, (struct cache_node *)bp,
556 cache_node_get_priority((struct cache_node *)bp) -
557 CACHE_PREFETCH_PRIORITY);
558 #ifdef XFS_BUF_TRACING
559 pthread_mutex_lock(&libxfs_bcache->c_mutex);
560 lock_buf_count++;
561 list_add(&bp->b_lock_list, &lock_buf_list);
562 pthread_mutex_unlock(&libxfs_bcache->c_mutex);
563 #endif
564 #ifdef IO_DEBUG
565 printf("%lx %s: hit buffer %p for bno = 0x%llx/0x%llx\n",
566 pthread_self(), __FUNCTION__,
567 bp, bp->b_bn, (long long)LIBXFS_BBTOOFF64(key->blkno));
568 #endif
569
570 return bp;
571 out_put:
572 cache_node_put(libxfs_bcache, (struct cache_node *)bp);
573 return NULL;
574 }
575
576 struct xfs_buf *
577 libxfs_getbuf_flags(struct xfs_buftarg *btp, xfs_daddr_t blkno, int len,
578 unsigned int flags)
579 {
580 struct xfs_bufkey key = {0};
581
582 key.buftarg = btp;
583 key.blkno = blkno;
584 key.bblen = len;
585
586 return __cache_lookup(&key, flags);
587 }
588
589 struct xfs_buf *
590 libxfs_getbuf(struct xfs_buftarg *btp, xfs_daddr_t blkno, int len)
591 {
592 return libxfs_getbuf_flags(btp, blkno, len, 0);
593 }
594
595 struct xfs_buf *
596 libxfs_getbuf_map(struct xfs_buftarg *btp, struct xfs_buf_map *map,
597 int nmaps, int flags)
598 {
599 struct xfs_bufkey key = {0};
600 int i;
601
602 if (nmaps == 1)
603 return libxfs_getbuf_flags(btp, map[0].bm_bn, map[0].bm_len,
604 flags);
605
606 key.buftarg = btp;
607 key.blkno = map[0].bm_bn;
608 for (i = 0; i < nmaps; i++) {
609 key.bblen += map[i].bm_len;
610 }
611 key.map = map;
612 key.nmaps = nmaps;
613
614 return __cache_lookup(&key, flags);
615 }
616
617 void
618 libxfs_putbuf(xfs_buf_t *bp)
619 {
620 #ifdef XFS_BUF_TRACING
621 pthread_mutex_lock(&libxfs_bcache->c_mutex);
622 lock_buf_count--;
623 ASSERT(lock_buf_count >= 0);
624 list_del_init(&bp->b_lock_list);
625 pthread_mutex_unlock(&libxfs_bcache->c_mutex);
626 #endif
627 if (use_xfs_buf_lock) {
628 if (bp->b_recur) {
629 bp->b_recur--;
630 } else {
631 bp->b_holder = 0;
632 pthread_mutex_unlock(&bp->b_lock);
633 }
634 }
635 cache_node_put(libxfs_bcache, (struct cache_node *)bp);
636 }
637
638 void
639 libxfs_purgebuf(xfs_buf_t *bp)
640 {
641 struct xfs_bufkey key = {0};
642
643 key.buftarg = bp->b_target;
644 key.blkno = bp->b_bn;
645 key.bblen = bp->b_length;
646
647 cache_node_purge(libxfs_bcache, &key, (struct cache_node *)bp);
648 }
649
650 static struct cache_node *
651 libxfs_balloc(cache_key_t key)
652 {
653 struct xfs_bufkey *bufkey = (struct xfs_bufkey *)key;
654
655 if (bufkey->map)
656 return (struct cache_node *)
657 libxfs_getbufr_map(bufkey->buftarg,
658 bufkey->blkno, bufkey->bblen,
659 bufkey->map, bufkey->nmaps);
660 return (struct cache_node *)libxfs_getbufr(bufkey->buftarg,
661 bufkey->blkno, bufkey->bblen);
662 }
663
664
665 static int
666 __read_buf(int fd, void *buf, int len, off64_t offset, int flags)
667 {
668 int sts;
669
670 sts = pread64(fd, buf, len, offset);
671 if (sts < 0) {
672 int error = errno;
673 fprintf(stderr, _("%s: read failed: %s\n"),
674 progname, strerror(error));
675 if (flags & LIBXFS_EXIT_ON_FAILURE)
676 exit(1);
677 return error;
678 } else if (sts != len) {
679 fprintf(stderr, _("%s: error - read only %d of %d bytes\n"),
680 progname, sts, len);
681 if (flags & LIBXFS_EXIT_ON_FAILURE)
682 exit(1);
683 return EIO;
684 }
685 return 0;
686 }
687
688 int
689 libxfs_readbufr(struct xfs_buftarg *btp, xfs_daddr_t blkno, xfs_buf_t *bp,
690 int len, int flags)
691 {
692 int fd = libxfs_device_to_fd(btp->dev);
693 int bytes = BBTOB(len);
694 int error;
695
696 ASSERT(BBTOB(len) <= bp->b_bcount);
697
698 error = __read_buf(fd, bp->b_addr, bytes, LIBXFS_BBTOOFF64(blkno), flags);
699 if (!error &&
700 bp->b_target->dev == btp->dev &&
701 bp->b_bn == blkno &&
702 bp->b_bcount == bytes)
703 bp->b_flags |= LIBXFS_B_UPTODATE;
704 #ifdef IO_DEBUG
705 printf("%lx: %s: read %u bytes, error %d, blkno=0x%llx(0x%llx), %p\n",
706 pthread_self(), __FUNCTION__, bytes, error,
707 (long long)LIBXFS_BBTOOFF64(blkno), (long long)blkno, bp);
708 #endif
709 return error;
710 }
711
712 void
713 libxfs_readbuf_verify(struct xfs_buf *bp, const struct xfs_buf_ops *ops)
714 {
715 if (!ops)
716 return;
717 bp->b_ops = ops;
718 bp->b_ops->verify_read(bp);
719 bp->b_flags &= ~LIBXFS_B_UNCHECKED;
720 }
721
722
723 xfs_buf_t *
724 libxfs_readbuf(struct xfs_buftarg *btp, xfs_daddr_t blkno, int len, int flags,
725 const struct xfs_buf_ops *ops)
726 {
727 xfs_buf_t *bp;
728 int error;
729
730 bp = libxfs_getbuf(btp, blkno, len);
731 if (!bp)
732 return NULL;
733
734 /*
735 * if the buffer was prefetched, it is likely that it was not validated.
736 * Hence if we are supplied an ops function and the buffer is marked as
737 * unchecked, we need to validate it now.
738 *
739 * We do this verification even if the buffer is dirty - the
740 * verification is almost certainly going to fail the CRC check in this
741 * case as a dirty buffer has not had the CRC recalculated. However, we
742 * should not be dirtying unchecked buffers and therefore failing it
743 * here because it's dirty and unchecked indicates we've screwed up
744 * somewhere else.
745 */
746 bp->b_error = 0;
747 if ((bp->b_flags & (LIBXFS_B_UPTODATE|LIBXFS_B_DIRTY))) {
748 if (bp->b_flags & LIBXFS_B_UNCHECKED)
749 libxfs_readbuf_verify(bp, ops);
750 return bp;
751 }
752
753 /*
754 * Set the ops on a cache miss (i.e. first physical read) as the
755 * verifier may change the ops to match the type of buffer it contains.
756 * A cache hit might reset the verifier to the original type if we set
757 * it again, but it won't get called again and set to match the buffer
758 * contents. *cough* xfs_da_node_buf_ops *cough*.
759 */
760 error = libxfs_readbufr(btp, blkno, bp, len, flags);
761 if (error)
762 bp->b_error = error;
763 else
764 libxfs_readbuf_verify(bp, ops);
765 return bp;
766 }
767
768 int
769 libxfs_readbufr_map(struct xfs_buftarg *btp, struct xfs_buf *bp, int flags)
770 {
771 int fd;
772 int error = 0;
773 char *buf;
774 int i;
775
776 fd = libxfs_device_to_fd(btp->dev);
777 buf = bp->b_addr;
778 for (i = 0; i < bp->b_nmaps; i++) {
779 off64_t offset = LIBXFS_BBTOOFF64(bp->b_map[i].bm_bn);
780 int len = BBTOB(bp->b_map[i].bm_len);
781
782 error = __read_buf(fd, buf, len, offset, flags);
783 if (error) {
784 bp->b_error = error;
785 break;
786 }
787 buf += len;
788 }
789
790 if (!error)
791 bp->b_flags |= LIBXFS_B_UPTODATE;
792 #ifdef IO_DEBUG
793 printf("%lx: %s: read %u bytes, error %d, blkno=0x%llx(0x%llx), %p\n",
794 pthread_self(), __FUNCTION__, , error,
795 (long long)LIBXFS_BBTOOFF64(blkno), (long long)blkno, bp);
796 #endif
797 return error;
798 }
799
800 struct xfs_buf *
801 libxfs_readbuf_map(struct xfs_buftarg *btp, struct xfs_buf_map *map, int nmaps,
802 int flags, const struct xfs_buf_ops *ops)
803 {
804 struct xfs_buf *bp;
805 int error = 0;
806
807 if (nmaps == 1)
808 return libxfs_readbuf(btp, map[0].bm_bn, map[0].bm_len,
809 flags, ops);
810
811 bp = libxfs_getbuf_map(btp, map, nmaps, 0);
812 if (!bp)
813 return NULL;
814
815 bp->b_error = 0;
816 if ((bp->b_flags & (LIBXFS_B_UPTODATE|LIBXFS_B_DIRTY))) {
817 if (bp->b_flags & LIBXFS_B_UNCHECKED)
818 libxfs_readbuf_verify(bp, ops);
819 return bp;
820 }
821 error = libxfs_readbufr_map(btp, bp, flags);
822 if (!error)
823 libxfs_readbuf_verify(bp, ops);
824
825 #ifdef IO_DEBUG
826 printf("%lx: %s: read %lu bytes, error %d, blkno=%llu(%llu), %p\n",
827 pthread_self(), __FUNCTION__, buf - (char *)bp->b_addr, error,
828 (long long)LIBXFS_BBTOOFF64(bp->b_bn), (long long)bp->b_bn, bp);
829 #endif
830 return bp;
831 }
832
833 static int
834 __write_buf(int fd, void *buf, int len, off64_t offset, int flags)
835 {
836 int sts;
837
838 sts = pwrite64(fd, buf, len, offset);
839 if (sts < 0) {
840 int error = errno;
841 fprintf(stderr, _("%s: pwrite64 failed: %s\n"),
842 progname, strerror(error));
843 if (flags & LIBXFS_B_EXIT)
844 exit(1);
845 return error;
846 } else if (sts != len) {
847 fprintf(stderr, _("%s: error - pwrite64 only %d of %d bytes\n"),
848 progname, sts, len);
849 if (flags & LIBXFS_B_EXIT)
850 exit(1);
851 return EIO;
852 }
853 return 0;
854 }
855
856 int
857 libxfs_writebufr(xfs_buf_t *bp)
858 {
859 int fd = libxfs_device_to_fd(bp->b_target->dev);
860 int error = 0;
861
862 /*
863 * we never write buffers that are marked stale. This indicates they
864 * contain data that has been invalidated, and even if the buffer is
865 * dirty it must *never* be written. Verifiers are wonderful for finding
866 * bugs like this. Make sure the error is obvious as to the cause.
867 */
868 if (bp->b_flags & LIBXFS_B_STALE) {
869 bp->b_error = ESTALE;
870 return bp->b_error;
871 }
872
873 /*
874 * clear any pre-existing error status on the buffer. This can occur if
875 * the buffer is corrupt on disk and the repair process doesn't clear
876 * the error before fixing and writing it back.
877 */
878 bp->b_error = 0;
879 if (bp->b_ops) {
880 bp->b_ops->verify_write(bp);
881 if (bp->b_error) {
882 fprintf(stderr,
883 _("%s: write verifer failed on bno 0x%llx/0x%x\n"),
884 __func__, (long long)bp->b_bn, bp->b_bcount);
885 return bp->b_error;
886 }
887 }
888
889 if (!(bp->b_flags & LIBXFS_B_DISCONTIG)) {
890 error = __write_buf(fd, bp->b_addr, bp->b_bcount,
891 LIBXFS_BBTOOFF64(bp->b_bn), bp->b_flags);
892 } else {
893 int i;
894 char *buf = bp->b_addr;
895
896 for (i = 0; i < bp->b_nmaps; i++) {
897 off64_t offset = LIBXFS_BBTOOFF64(bp->b_map[i].bm_bn);
898 int len = BBTOB(bp->b_map[i].bm_len);
899
900 error = __write_buf(fd, buf, len, offset, bp->b_flags);
901 if (error) {
902 bp->b_error = error;
903 break;
904 }
905 buf += len;
906 }
907 }
908
909 #ifdef IO_DEBUG
910 printf("%lx: %s: wrote %u bytes, blkno=%llu(%llu), %p\n",
911 pthread_self(), __FUNCTION__, bp->b_bcount,
912 (long long)LIBXFS_BBTOOFF64(bp->b_bn),
913 (long long)bp->b_bn, bp);
914 #endif
915 if (!error) {
916 bp->b_flags |= LIBXFS_B_UPTODATE;
917 bp->b_flags &= ~(LIBXFS_B_DIRTY | LIBXFS_B_EXIT |
918 LIBXFS_B_UNCHECKED);
919 }
920 return error;
921 }
922
923 int
924 libxfs_writebuf_int(xfs_buf_t *bp, int flags)
925 {
926 /*
927 * Clear any error hanging over from reading the buffer. This prevents
928 * subsequent reads after this write from seeing stale errors.
929 */
930 bp->b_error = 0;
931 bp->b_flags |= (LIBXFS_B_DIRTY | flags);
932 return 0;
933 }
934
935 int
936 libxfs_writebuf(xfs_buf_t *bp, int flags)
937 {
938 #ifdef IO_DEBUG
939 printf("%lx: %s: dirty blkno=%llu(%llu)\n",
940 pthread_self(), __FUNCTION__,
941 (long long)LIBXFS_BBTOOFF64(bp->b_bn),
942 (long long)bp->b_bn);
943 #endif
944 /*
945 * Clear any error hanging over from reading the buffer. This prevents
946 * subsequent reads after this write from seeing stale errors.
947 */
948 bp->b_error = 0;
949 bp->b_flags |= (LIBXFS_B_DIRTY | flags);
950 libxfs_putbuf(bp);
951 return 0;
952 }
953
954 void
955 libxfs_iomove(xfs_buf_t *bp, uint boff, int len, void *data, int flags)
956 {
957 #ifdef IO_DEBUG
958 if (boff + len > bp->b_bcount) {
959 printf("Badness, iomove out of range!\n"
960 "bp=(bno 0x%llx, bytes %u) range=(boff %u, bytes %u)\n",
961 (long long)bp->b_bn, bp->b_bcount, boff, len);
962 abort();
963 }
964 #endif
965 switch (flags) {
966 case LIBXFS_BZERO:
967 memset(bp->b_addr + boff, 0, len);
968 break;
969 case LIBXFS_BREAD:
970 memcpy(data, bp->b_addr + boff, len);
971 break;
972 case LIBXFS_BWRITE:
973 memcpy(bp->b_addr + boff, data, len);
974 break;
975 }
976 }
977
978 static void
979 libxfs_brelse(struct cache_node *node)
980 {
981 xfs_buf_t *bp = (xfs_buf_t *)node;
982
983 if (bp != NULL) {
984 if (bp->b_flags & LIBXFS_B_DIRTY)
985 libxfs_writebufr(bp);
986 pthread_mutex_lock(&xfs_buf_freelist.cm_mutex);
987 list_add(&bp->b_node.cn_mru, &xfs_buf_freelist.cm_list);
988 pthread_mutex_unlock(&xfs_buf_freelist.cm_mutex);
989 }
990 }
991
992 static unsigned int
993 libxfs_bulkrelse(
994 struct cache *cache,
995 struct list_head *list)
996 {
997 xfs_buf_t *bp;
998 int count = 0;
999
1000 if (list_empty(list))
1001 return 0 ;
1002
1003 list_for_each_entry(bp, list, b_node.cn_mru) {
1004 if (bp->b_flags & LIBXFS_B_DIRTY)
1005 libxfs_writebufr(bp);
1006 count++;
1007 }
1008
1009 pthread_mutex_lock(&xfs_buf_freelist.cm_mutex);
1010 __list_splice(list, &xfs_buf_freelist.cm_list);
1011 pthread_mutex_unlock(&xfs_buf_freelist.cm_mutex);
1012
1013 return count;
1014 }
1015
1016 static void
1017 libxfs_bflush(struct cache_node *node)
1018 {
1019 xfs_buf_t *bp = (xfs_buf_t *)node;
1020
1021 if ((bp != NULL) && (bp->b_flags & LIBXFS_B_DIRTY))
1022 libxfs_writebufr(bp);
1023 }
1024
1025 void
1026 libxfs_putbufr(xfs_buf_t *bp)
1027 {
1028 libxfs_brelse((struct cache_node *)bp);
1029 }
1030
1031
1032 void
1033 libxfs_bcache_purge(void)
1034 {
1035 cache_purge(libxfs_bcache);
1036 }
1037
1038 void
1039 libxfs_bcache_flush(void)
1040 {
1041 cache_flush(libxfs_bcache);
1042 }
1043
1044 int
1045 libxfs_bcache_overflowed(void)
1046 {
1047 return cache_overflowed(libxfs_bcache);
1048 }
1049
1050 struct cache_operations libxfs_bcache_operations = {
1051 /* .hash */ libxfs_bhash,
1052 /* .alloc */ libxfs_balloc,
1053 /* .flush */ libxfs_bflush,
1054 /* .relse */ libxfs_brelse,
1055 /* .compare */ libxfs_bcompare,
1056 /* .bulkrelse */libxfs_bulkrelse
1057 };
1058
1059
1060 /*
1061 * Inode cache stubs.
1062 */
1063
1064 extern kmem_zone_t *xfs_ili_zone;
1065 extern kmem_zone_t *xfs_inode_zone;
1066
1067 int
1068 libxfs_iget(xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, uint lock_flags,
1069 xfs_inode_t **ipp, xfs_daddr_t bno)
1070 {
1071 xfs_inode_t *ip;
1072 int error = 0;
1073
1074 ip = kmem_zone_zalloc(xfs_inode_zone, 0);
1075 if (!ip)
1076 return ENOMEM;
1077
1078 ip->i_ino = ino;
1079 ip->i_mount = mp;
1080 error = xfs_iread(mp, tp, ip, bno);
1081 if (error) {
1082 kmem_zone_free(xfs_inode_zone, ip);
1083 *ipp = NULL;
1084 return error;
1085 }
1086
1087 *ipp = ip;
1088 return 0;
1089 }
1090
1091 static void
1092 libxfs_idestroy(xfs_inode_t *ip)
1093 {
1094 switch (ip->i_d.di_mode & S_IFMT) {
1095 case S_IFREG:
1096 case S_IFDIR:
1097 case S_IFLNK:
1098 libxfs_idestroy_fork(ip, XFS_DATA_FORK);
1099 break;
1100 }
1101 if (ip->i_afp)
1102 libxfs_idestroy_fork(ip, XFS_ATTR_FORK);
1103 }
1104
1105 void
1106 libxfs_iput(xfs_inode_t *ip)
1107 {
1108 if (ip->i_itemp)
1109 kmem_zone_free(xfs_ili_zone, ip->i_itemp);
1110 ip->i_itemp = NULL;
1111 libxfs_idestroy(ip);
1112 kmem_zone_free(xfs_inode_zone, ip);
1113 }