]> git.ipfire.org Git - thirdparty/xfsprogs-dev.git/blob - libxfs/libxfs_priv.h
xfsprogs: Release v6.7.0
[thirdparty/xfsprogs-dev.git] / libxfs / libxfs_priv.h
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6
7 /*
8 * This header is effectively a "namespace multiplexor" for the
9 * user level XFS code. It provides all of the necessary stuff
10 * such that we can build some parts of the XFS kernel code in
11 * user space in a controlled fashion, and translates the names
12 * used in the kernel into the names which libxfs is going to
13 * make available to user tools.
14 *
15 * It should only ever be #include'd by XFS "kernel" code being
16 * compiled in user space.
17 *
18 * Our goals here are to...
19 * o "share" large amounts of complex code between user and
20 * kernel space;
21 * o shield the user tools from changes in the bleeding
22 * edge kernel code, merging source changes when
23 * convenient and not immediately (no symlinks);
24 * o i.e. be able to merge changes to the kernel source back
25 * into the affected user tools in a controlled fashion;
26 * o provide a _minimalist_ life-support system for kernel
27 * code in user land, not the "everything + the kitchen
28 * sink" model which libsim had mutated into;
29 * o allow the kernel code to be completely free of code
30 * specifically there to support the user level build.
31 */
32
33 /*
34 * define a guard and something we can check to determine what include context
35 * we are running from.
36 */
37 #ifndef __LIBXFS_INTERNAL_XFS_H__
38 #define __LIBXFS_INTERNAL_XFS_H__
39
40 #include "libxfs_api_defs.h"
41 #include "platform_defs.h"
42 #include "xfs.h"
43
44 #include "list.h"
45 #include "hlist.h"
46 #include "cache.h"
47 #include "bitops.h"
48 #include "kmem.h"
49 #include "libfrog/radix-tree.h"
50 #include "atomic.h"
51
52 #include "xfs_types.h"
53 #include "xfs_arch.h"
54
55 #include "xfs_fs.h"
56 #include "libfrog/crc32c.h"
57
58 /* Zones used in libxfs allocations that aren't in shared header files */
59 extern kmem_zone_t *xfs_buf_item_zone;
60 extern kmem_zone_t *xfs_ili_zone;
61 extern kmem_zone_t *xfs_buf_zone;
62 extern kmem_zone_t *xfs_inode_zone;
63 extern kmem_zone_t *xfs_trans_zone;
64
65 #define timespec64 timespec
66
67 /* fake up iomap, (not) used in xfs_bmap.[ch] */
68 #define IOMAP_F_SHARED 0x04
69 #define xfs_bmbt_to_iomap(a, b, c, d) ((void) 0)
70
71 /* CRC stuff, buffer API dependent on it */
72 #define crc32c(c,p,l) crc32c_le((c),(unsigned char const *)(p),(l))
73
74 /* fake up kernel's iomap, (not) used in xfs_bmap.[ch] */
75 struct iomap;
76
77 #include "xfs_cksum.h"
78
79 /*
80 * This mirrors the kernel include for xfs_buf.h - it's implicitly included in
81 * every files via a similar include in the kernel xfs_linux.h.
82 */
83 #include "libxfs_io.h"
84
85 /* for all the support code that uses progname in error messages */
86 extern char *progname;
87
88 #undef ASSERT
89 #define ASSERT(ex) assert(ex)
90
91 /*
92 * We have no need for the "linux" dev_t in userspace, so these
93 * are no-ops, and an xfs_dev_t is stored in VFS_I(ip)->i_rdev
94 */
95 #define xfs_to_linux_dev_t(dev) dev
96 #define linux_to_xfs_dev_t(dev) dev
97
98 #ifndef EWRONGFS
99 #define EWRONGFS EINVAL
100 #endif
101
102 #define xfs_error_level 0
103
104 #define STATIC static
105
106 /*
107 * Starting in Linux 4.15, the %p (raw pointer value) printk modifier
108 * prints a hashed version of the pointer to avoid leaking kernel
109 * pointers into dmesg. If we're trying to debug the kernel we want the
110 * raw values, so override this behavior as best we can.
111 *
112 * In userspace we don't have this problem.
113 */
114 #define PTR_FMT "%p"
115
116 #define XFS_IGET_CREATE 0x1
117 #define XFS_IGET_UNTRUSTED 0x2
118
119 extern void cmn_err(int, char *, ...);
120 enum ce { CE_DEBUG, CE_CONT, CE_NOTE, CE_WARN, CE_ALERT, CE_PANIC };
121
122 #define xfs_notice(mp,fmt,args...) cmn_err(CE_NOTE,fmt, ## args)
123 #define xfs_warn(mp,fmt,args...) cmn_err(CE_WARN,fmt, ## args)
124 #define xfs_err(mp,fmt,args...) cmn_err(CE_ALERT,fmt, ## args)
125 #define xfs_alert(mp,fmt,args...) cmn_err(CE_ALERT,fmt, ## args)
126
127 #define xfs_buf_ioerror_alert(bp,f) ((void) 0);
128
129 #define xfs_hex_dump(d,n) ((void) 0)
130 #define xfs_stack_trace() ((void) 0)
131
132
133 #define xfs_force_shutdown(d,n) ((void) 0)
134 #define xfs_mod_delalloc(a,b) ((void) 0)
135
136 /* stop unused var warnings by assigning mp to itself */
137
138 #define xfs_corruption_error(e,l,mp,b,sz,fi,ln,fa) do { \
139 (mp) = (mp); \
140 cmn_err(CE_ALERT, "%s: XFS_CORRUPTION_ERROR", (e)); \
141 } while (0)
142
143 #define XFS_CORRUPTION_ERROR(e, lvl, mp, buf, bufsize) do { \
144 (mp) = (mp); \
145 cmn_err(CE_ALERT, "%s: XFS_CORRUPTION_ERROR", (e)); \
146 } while (0)
147
148 #define XFS_ERROR_REPORT(e,l,mp) do { \
149 (mp) = (mp); \
150 cmn_err(CE_ALERT, "%s: XFS_ERROR_REPORT", (e)); \
151 } while (0)
152
153 #define XFS_WARN_CORRUPT(mp, expr) \
154 ( ((mp)->m_flags & LIBXFS_MOUNT_WANT_CORRUPTED) ? \
155 (printf("%s: XFS_WARN_CORRUPT at %s:%d", #expr, \
156 __func__, __LINE__), true) : true)
157
158 #define XFS_IS_CORRUPT(mp, expr) \
159 (unlikely(expr) ? XFS_WARN_CORRUPT((mp), (expr)) : false)
160
161 #define XFS_ERRLEVEL_LOW 1
162 #define XFS_FORCED_SHUTDOWN(mp) 0
163 #define XFS_ILOCK_EXCL 0
164 #define XFS_STATS_INC(mp, count) do { (mp) = (mp); } while (0)
165 #define XFS_STATS_DEC(mp, count, x) do { (mp) = (mp); } while (0)
166 #define XFS_STATS_ADD(mp, count, x) do { (mp) = (mp); } while (0)
167 #define XFS_TEST_ERROR(expr,a,b) ( expr )
168
169 #ifdef __GNUC__
170 #define __return_address __builtin_return_address(0)
171
172 /*
173 * Return the address of a label. Use barrier() so that the optimizer
174 * won't reorder code to refactor the error jumpouts into a single
175 * return, which throws off the reported address.
176 */
177 #define __this_address ({ __label__ __here; __here: barrier(); &&__here; })
178 /* Optimization barrier */
179
180 /* The "volatile" is due to gcc bugs */
181 #define barrier() __asm__ __volatile__("": : :"memory")
182 #endif
183
184 /* Optimization barrier */
185 #ifndef barrier
186 # define barrier() __memory_barrier()
187 #endif
188
189 /* miscellaneous kernel routines not in user space */
190 #define spin_lock_init(a) ((void) 0)
191 #define spin_lock(a) ((void) 0)
192 #define spin_unlock(a) ((void) 0)
193 #define likely(x) (x)
194 #define unlikely(x) (x)
195 #define rcu_read_lock() ((void) 0)
196 #define rcu_read_unlock() ((void) 0)
197 /* Need to be able to handle this bare or in control flow */
198 static inline bool WARN_ON(bool expr) {
199 return (expr);
200 }
201
202 #define WARN_ON_ONCE(e) WARN_ON(e)
203 #define percpu_counter_read(x) (*x)
204 #define percpu_counter_read_positive(x) ((*x) > 0 ? (*x) : 0)
205 #define percpu_counter_sum(x) (*x)
206
207 #define READ_ONCE(x) (x)
208 #define WRITE_ONCE(x, val) ((x) = (val))
209
210 /*
211 * prandom_u32 is used for di_gen inode allocation, it must be zero for libxfs
212 * or all sorts of badness can occur!
213 */
214 #define prandom_u32() 0
215
216 #define PAGE_SIZE getpagesize()
217
218 #define inode_peek_iversion(inode) (inode)->i_version
219 #define inode_set_iversion_queried(inode, version) do { \
220 (inode)->i_version = (version); \
221 } while (0)
222
223 static inline int __do_div(unsigned long long *n, unsigned base)
224 {
225 int __res;
226 __res = (int)(((unsigned long) *n) % (unsigned) base);
227 *n = ((unsigned long) *n) / (unsigned) base;
228 return __res;
229 }
230
231 #define do_div(n,base) (__do_div((unsigned long long *)&(n), (base)))
232 #define do_mod(a, b) ((a) % (b))
233 #define rol32(x,y) (((x) << (y)) | ((x) >> (32 - (y))))
234
235 /**
236 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
237 * @dividend: unsigned 64bit dividend
238 * @divisor: unsigned 32bit divisor
239 * @remainder: pointer to unsigned 32bit remainder
240 *
241 * Return: sets ``*remainder``, then returns dividend / divisor
242 *
243 * This is commonly provided by 32bit archs to provide an optimized 64bit
244 * divide.
245 */
246 static inline uint64_t
247 div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder)
248 {
249 *remainder = dividend % divisor;
250 return dividend / divisor;
251 }
252
253 #define min_t(type,x,y) \
254 ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
255 #define max_t(type,x,y) \
256 ({ type __x = (x); type __y = (y); __x > __y ? __x: __y; })
257
258 /**
259 * swap - swap values of @a and @b
260 * @a: first value
261 * @b: second value
262 */
263 #define swap(a, b) \
264 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
265
266 #define __round_mask(x, y) ((__typeof__(x))((y)-1))
267 #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
268 #define round_down(x, y) ((x) & ~__round_mask(x, y))
269 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
270
271 /*
272 * Handling for kernel bitmap types.
273 */
274 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBBY * sizeof(long))
275 #define DECLARE_BITMAP(name,bits) \
276 unsigned long name[BITS_TO_LONGS(bits)]
277 #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
278
279 /*
280 * This is a common helper function for find_next_bit and
281 * find_next_zero_bit. The difference is the "invert" argument, which
282 * is XORed with each fetched word before searching it for one bits.
283 */
284 static inline unsigned long
285 _find_next_bit(const unsigned long *addr, unsigned long nbits,
286 unsigned long start, unsigned long invert)
287 {
288 unsigned long tmp;
289
290 if (!nbits || start >= nbits)
291 return nbits;
292
293 tmp = addr[start / BITS_PER_LONG] ^ invert;
294
295 /* Handle 1st word. */
296 tmp &= BITMAP_FIRST_WORD_MASK(start);
297 start = round_down(start, BITS_PER_LONG);
298
299 while (!tmp) {
300 start += BITS_PER_LONG;
301 if (start >= nbits)
302 return nbits;
303
304 tmp = addr[start / BITS_PER_LONG] ^ invert;
305 }
306
307 return min(start + ffs(tmp), nbits);
308 }
309
310 /*
311 * Find the next set bit in a memory region.
312 */
313 static inline unsigned long
314 find_next_bit(const unsigned long *addr, unsigned long size,
315 unsigned long offset)
316 {
317 return _find_next_bit(addr, size, offset, 0UL);
318 }
319 static inline unsigned long
320 find_next_zero_bit(const unsigned long *addr, unsigned long size,
321 unsigned long offset)
322 {
323 return _find_next_bit(addr, size, offset, ~0UL);
324 }
325 #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
326
327 static inline __attribute__((const))
328 int is_power_of_2(unsigned long n)
329 {
330 return (n != 0 && ((n & (n - 1)) == 0));
331 }
332
333 /*
334 * xfs_iroundup: round up argument to next power of two
335 */
336 static inline uint
337 roundup_pow_of_two(uint v)
338 {
339 int i;
340 uint m;
341
342 if ((v & (v - 1)) == 0)
343 return v;
344 ASSERT((v & 0x80000000) == 0);
345 if ((v & (v + 1)) == 0)
346 return v + 1;
347 for (i = 0, m = 1; i < 31; i++, m <<= 1) {
348 if (v & m)
349 continue;
350 v |= m;
351 if ((v & (v + 1)) == 0)
352 return v + 1;
353 }
354 ASSERT(0);
355 return 0;
356 }
357
358 static inline uint64_t
359 roundup_64(uint64_t x, uint32_t y)
360 {
361 x += y - 1;
362 do_div(x, y);
363 return x * y;
364 }
365
366 /* buffer management */
367 #define XBF_TRYLOCK 0
368 #define XBF_UNMAPPED 0
369 #define XBF_DONE 0
370 #define xfs_buf_stale(bp) ((bp)->b_flags |= LIBXFS_B_STALE)
371 #define XFS_BUF_UNDELAYWRITE(bp) ((bp)->b_flags &= ~LIBXFS_B_DIRTY)
372
373 static inline struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target,
374 xfs_daddr_t blkno, size_t numblks, xfs_buf_flags_t flags)
375 {
376 return NULL;
377 }
378
379 #define xfs_buf_oneshot(bp) ((void) 0)
380
381 #define xfs_buf_zero(bp, off, len) \
382 memset((bp)->b_addr + off, 0, len);
383
384 /* mount stuff */
385 #define XFS_MOUNT_32BITINODES LIBXFS_MOUNT_32BITINODES
386 #define XFS_MOUNT_ATTR2 LIBXFS_MOUNT_ATTR2
387 #define XFS_MOUNT_SMALL_INUMS 0 /* ignored in userspace */
388 #define XFS_MOUNT_WSYNC 0 /* ignored in userspace */
389 #define XFS_MOUNT_NOALIGN 0 /* ignored in userspace */
390 #define XFS_MOUNT_IKEEP 0 /* ignored in userspace */
391 #define XFS_MOUNT_SWALLOC 0 /* ignored in userspace */
392 #define XFS_MOUNT_RDONLY 0 /* ignored in userspace */
393 #define XFS_MOUNT_BAD_SUMMARY 0 /* ignored in userspace */
394
395 #define xfs_trans_set_sync(tp) ((void) 0)
396 #define xfs_trans_agblocks_delta(tp, d)
397 #define xfs_trans_agflist_delta(tp, d)
398 #define xfs_trans_agbtree_delta(tp, d)
399 #define xfs_trans_buf_set_type(tp, bp, t) ({ \
400 int __t = (t); \
401 __t = __t; /* no set-but-unused warning */ \
402 tp = tp; /* no set-but-unused warning */ \
403 })
404
405 #define xfs_trans_buf_copy_type(dbp, sbp)
406
407 /* no readahead, need to avoid set-but-unused var warnings. */
408 #define xfs_buf_readahead(a,d,c,ops) ({ \
409 xfs_daddr_t __d = d; \
410 __d = __d; /* no set-but-unused warning */ \
411 })
412 #define xfs_buf_readahead_map(a,b,c,ops) ((void) 0) /* no readahead */
413
414 #define xfs_sort qsort
415
416 #define xfs_ilock(ip,mode) ((void) 0)
417 #define xfs_ilock_data_map_shared(ip) (0)
418 #define xfs_ilock_attr_map_shared(ip) (0)
419 #define xfs_iunlock(ip,mode) ({ \
420 typeof(mode) __mode = mode; \
421 __mode = __mode; /* no set-but-unused warning */ \
422 })
423
424 /* space allocation */
425 #define XFS_EXTENT_BUSY_DISCARDED 0x01 /* undergoing a discard op. */
426 #define XFS_EXTENT_BUSY_SKIP_DISCARD 0x02 /* do not discard */
427
428 #define xfs_extent_busy_reuse(mp,ag,bno,len,user) ((void) 0)
429 /* avoid unused variable warning */
430 #define xfs_extent_busy_insert(tp,ag,bno,len,flags)({ \
431 xfs_agnumber_t __foo = ag; \
432 __foo = __foo; /* no set-but-unused warning */ \
433 })
434 #define xfs_extent_busy_trim(args,bno,len,busy_gen) ({ \
435 unsigned __foo = *(busy_gen); \
436 *(busy_gen) = __foo; \
437 false; \
438 })
439 #define xfs_extent_busy_flush(mp,pag,busy_gen) ((void)(0))
440
441 #define xfs_rotorstep 1
442 #define xfs_bmap_rtalloc(a) (-ENOSYS)
443 #define xfs_get_extsz_hint(ip) (0)
444 #define xfs_get_cowextsz_hint(ip) (0)
445 #define xfs_inode_is_filestream(ip) (0)
446 #define xfs_filestream_lookup_ag(ip) (0)
447 #define xfs_filestream_new_ag(ip,ag) (0)
448
449 /* quota bits */
450 #define xfs_trans_mod_dquot_byino(t,i,f,d) ((void) 0)
451 #define xfs_trans_reserve_quota_nblks(t,i,b,n,f) (0)
452 #define xfs_trans_unreserve_quota_nblks(t,i,b,n,f) ((void) 0)
453 #define xfs_qm_dqattach(i) (0)
454
455 #define uuid_copy(s,d) platform_uuid_copy((s),(d))
456 #define uuid_equal(s,d) (platform_uuid_compare((s),(d)) == 0)
457
458 #define xfs_icreate_log(tp, agno, agbno, cnt, isize, len, gen) ((void) 0)
459 #define xfs_sb_validate_fsb_count(sbp, nblks) (0)
460
461 /*
462 * Prototypes for kernel static functions that are aren't in their
463 * associated header files.
464 */
465 struct xfs_da_args;
466 struct xfs_bmap_free;
467 struct xfs_bmap_free_item;
468 struct xfs_mount;
469 struct xfs_sb;
470 struct xfs_trans;
471 struct xfs_inode;
472 struct xfs_log_item;
473 struct xfs_buf;
474 struct xfs_buf_map;
475 struct xfs_buf_log_item;
476 struct xfs_buftarg;
477
478 /* xfs_attr.c */
479 int xfs_attr_rmtval_get(struct xfs_da_args *);
480
481 /* xfs_bmap.c */
482 void xfs_bmap_del_free(struct xfs_bmap_free *, struct xfs_bmap_free_item *);
483
484 /* xfs_mount.c */
485 int xfs_initialize_perag_data(struct xfs_mount *, xfs_agnumber_t);
486 void xfs_mount_common(struct xfs_mount *, struct xfs_sb *);
487
488 /*
489 * logitem.c and trans.c prototypes
490 */
491 void xfs_trans_init(struct xfs_mount *);
492 int xfs_trans_roll(struct xfs_trans **);
493
494 /* xfs_trans_item.c */
495 void xfs_trans_add_item(struct xfs_trans *, struct xfs_log_item *);
496 void xfs_trans_del_item(struct xfs_log_item *);
497
498 /* xfs_inode_item.c */
499 void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *);
500
501 /* xfs_buf_item.c */
502 void xfs_buf_item_init(struct xfs_buf *, struct xfs_mount *);
503 void xfs_buf_item_log(struct xfs_buf_log_item *, uint, uint);
504
505 /* xfs_trans_buf.c */
506 struct xfs_buf *xfs_trans_buf_item_match(struct xfs_trans *,
507 struct xfs_buftarg *, struct xfs_buf_map *, int);
508
509 /* local source files */
510 #define xfs_mod_fdblocks(mp, delta, rsvd) \
511 libxfs_mod_incore_sb(mp, XFS_TRANS_SB_FDBLOCKS, delta, rsvd)
512 #define xfs_mod_frextents(mp, delta) \
513 libxfs_mod_incore_sb(mp, XFS_TRANS_SB_FREXTENTS, delta, 0)
514 int libxfs_mod_incore_sb(struct xfs_mount *, int, int64_t, int);
515 /* percpu counters in mp are #defined to the superblock sb_ counters */
516 #define xfs_reinit_percpu_counters(mp)
517
518 void xfs_trans_mod_sb(struct xfs_trans *, uint, long);
519
520 void xfs_verifier_error(struct xfs_buf *bp, int error,
521 xfs_failaddr_t failaddr);
522 void xfs_inode_verifier_error(struct xfs_inode *ip, int error,
523 const char *name, void *buf, size_t bufsz,
524 xfs_failaddr_t failaddr);
525
526 #define xfs_buf_verifier_error(bp,e,n,bu,bus,fa) \
527 xfs_verifier_error(bp, e, fa)
528 void
529 xfs_buf_corruption_error(struct xfs_buf *bp);
530
531 /* XXX: this is clearly a bug - a shared header needs to export this */
532 /* xfs_rtalloc.c */
533 int libxfs_rtfree_extent(struct xfs_trans *, xfs_rtblock_t, xfs_extlen_t);
534 bool libxfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
535
536 struct xfs_rtalloc_rec {
537 xfs_rtblock_t ar_startext;
538 xfs_rtblock_t ar_extcount;
539 };
540
541 typedef int (*xfs_rtalloc_query_range_fn)(
542 struct xfs_trans *tp,
543 struct xfs_rtalloc_rec *rec,
544 void *priv);
545
546 int libxfs_zero_extent(struct xfs_inode *ip, xfs_fsblock_t start_fsb,
547 xfs_off_t count_fsb);
548
549
550 bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
551 void xfs_log_item_init(struct xfs_mount *, struct xfs_log_item *, int);
552 #define xfs_log_in_recovery(mp) (false)
553
554 /* xfs_icache.c */
555 #define xfs_inode_set_cowblocks_tag(ip) do { } while (0)
556 #define xfs_inode_set_eofblocks_tag(ip) do { } while (0)
557
558 /* xfs_stats.h */
559 #define XFS_STATS_CALC_INDEX(member) 0
560 #define XFS_STATS_INC_OFF(mp, off)
561 #define XFS_STATS_ADD_OFF(mp, off, val)
562
563 typedef unsigned char u8;
564 unsigned int hweight8(unsigned int w);
565 unsigned int hweight32(unsigned int w);
566 unsigned int hweight64(__u64 w);
567
568 #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
569 #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
570
571 static inline void set_bit(int nr, volatile unsigned long *addr)
572 {
573 unsigned long mask = BIT_MASK(nr);
574 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
575
576 *p |= mask;
577 }
578
579 static inline void clear_bit(int nr, volatile unsigned long *addr)
580 {
581 unsigned long mask = BIT_MASK(nr);
582 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
583
584 *p &= ~mask;
585 }
586
587 static inline int test_bit(int nr, const volatile unsigned long *addr)
588 {
589 unsigned long mask = BIT_MASK(nr);
590 unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
591
592 return *p & mask;
593 }
594
595 /* Sets and returns original value of the bit */
596 static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
597 {
598 if (test_bit(nr, addr))
599 return 1;
600 set_bit(nr, addr);
601 return 0;
602 }
603
604 /* Keep static checkers quiet about nonstatic functions by exporting */
605 int xfs_rtbuf_get(struct xfs_mount *mp, struct xfs_trans *tp,
606 xfs_rtblock_t block, int issum, struct xfs_buf **bpp);
607 int xfs_rtcheck_range(struct xfs_mount *mp, struct xfs_trans *tp,
608 xfs_rtblock_t start, xfs_extlen_t len, int val,
609 xfs_rtblock_t *new, int *stat);
610 int xfs_rtfind_back(struct xfs_mount *mp, struct xfs_trans *tp,
611 xfs_rtblock_t start, xfs_rtblock_t limit,
612 xfs_rtblock_t *rtblock);
613 int xfs_rtfind_forw(struct xfs_mount *mp, struct xfs_trans *tp,
614 xfs_rtblock_t start, xfs_rtblock_t limit,
615 xfs_rtblock_t *rtblock);
616 int xfs_rtmodify_range(struct xfs_mount *mp, struct xfs_trans *tp,
617 xfs_rtblock_t start, xfs_extlen_t len, int val);
618 int xfs_rtmodify_summary_int(struct xfs_mount *mp, struct xfs_trans *tp,
619 int log, xfs_rtblock_t bbno, int delta,
620 xfs_buf_t **rbpp, xfs_fsblock_t *rsb,
621 xfs_suminfo_t *sum);
622 int xfs_rtmodify_summary(struct xfs_mount *mp, struct xfs_trans *tp, int log,
623 xfs_rtblock_t bbno, int delta, xfs_buf_t **rbpp,
624 xfs_fsblock_t *rsb);
625 int xfs_rtfree_range(struct xfs_mount *mp, struct xfs_trans *tp,
626 xfs_rtblock_t start, xfs_extlen_t len,
627 struct xfs_buf **rbpp, xfs_fsblock_t *rsb);
628 int xfs_rtalloc_query_range(struct xfs_trans *tp,
629 struct xfs_rtalloc_rec *low_rec,
630 struct xfs_rtalloc_rec *high_rec,
631 xfs_rtalloc_query_range_fn fn,
632 void *priv);
633 int xfs_rtalloc_query_all(struct xfs_trans *tp,
634 xfs_rtalloc_query_range_fn fn,
635 void *priv);
636 bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
637 int xfs_rtalloc_extent_is_free(struct xfs_mount *mp, struct xfs_trans *tp,
638 xfs_rtblock_t start, xfs_extlen_t len,
639 bool *is_free);
640 /* xfs_bmap_util.h */
641 struct xfs_bmalloca;
642 int xfs_bmap_extsize_align(struct xfs_mount *mp, struct xfs_bmbt_irec *gotp,
643 struct xfs_bmbt_irec *prevp, xfs_extlen_t extsz,
644 int rt, int eof, int delay, int convert,
645 xfs_fileoff_t *offp, xfs_extlen_t *lenp);
646 void xfs_bmap_adjacent(struct xfs_bmalloca *ap);
647 int xfs_bmap_last_extent(struct xfs_trans *tp, struct xfs_inode *ip,
648 int whichfork, struct xfs_bmbt_irec *rec,
649 int *is_empty);
650
651 #endif /* __LIBXFS_INTERNAL_XFS_H__ */