1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 * This header is effectively a "namespace multiplexor" for the
9 * user level XFS code. It provides all of the necessary stuff
10 * such that we can build some parts of the XFS kernel code in
11 * user space in a controlled fashion, and translates the names
12 * used in the kernel into the names which libxfs is going to
13 * make available to user tools.
15 * It should only ever be #include'd by XFS "kernel" code being
16 * compiled in user space.
18 * Our goals here are to...
19 * o "share" large amounts of complex code between user and
21 * o shield the user tools from changes in the bleeding
22 * edge kernel code, merging source changes when
23 * convenient and not immediately (no symlinks);
24 * o i.e. be able to merge changes to the kernel source back
25 * into the affected user tools in a controlled fashion;
26 * o provide a _minimalist_ life-support system for kernel
27 * code in user land, not the "everything + the kitchen
28 * sink" model which libsim had mutated into;
29 * o allow the kernel code to be completely free of code
30 * specifically there to support the user level build.
34 * define a guard and something we can check to determine what include context
35 * we are running from.
37 #ifndef __LIBXFS_INTERNAL_XFS_H__
38 #define __LIBXFS_INTERNAL_XFS_H__
40 #include "libxfs_api_defs.h"
41 #include "platform_defs.h"
49 #include "libfrog/radix-tree.h"
53 #include "xfs_types.h"
57 #include "libfrog/crc32c.h"
59 #include <sys/xattr.h>
61 /* Zones used in libxfs allocations that aren't in shared header files */
62 extern kmem_zone_t
*xfs_buf_item_zone
;
63 extern kmem_zone_t
*xfs_ili_zone
;
64 extern kmem_zone_t
*xfs_buf_zone
;
65 extern kmem_zone_t
*xfs_inode_zone
;
66 extern kmem_zone_t
*xfs_trans_zone
;
68 /* fake up iomap, (not) used in xfs_bmap.[ch] */
69 #define IOMAP_F_SHARED 0x04
70 #define xfs_bmbt_to_iomap(a, b, c, d) ((void) 0)
72 /* CRC stuff, buffer API dependent on it */
73 #define crc32c(c,p,l) crc32c_le((c),(unsigned char const *)(p),(l))
75 /* fake up kernel's iomap, (not) used in xfs_bmap.[ch] */
78 #include "xfs_cksum.h"
81 * This mirrors the kernel include for xfs_buf.h - it's implicitly included in
82 * every files via a similar include in the kernel xfs_linux.h.
84 #include "libxfs_io.h"
86 /* for all the support code that uses progname in error messages */
87 extern char *progname
;
90 #define ASSERT(ex) assert(ex)
93 * We have no need for the "linux" dev_t in userspace, so these
94 * are no-ops, and an xfs_dev_t is stored in VFS_I(ip)->i_rdev
96 #define xfs_to_linux_dev_t(dev) dev
97 #define linux_to_xfs_dev_t(dev) dev
100 #define EWRONGFS EINVAL
103 #define xfs_error_level 0
105 #define STATIC static
108 * Starting in Linux 4.15, the %p (raw pointer value) printk modifier
109 * prints a hashed version of the pointer to avoid leaking kernel
110 * pointers into dmesg. If we're trying to debug the kernel we want the
111 * raw values, so override this behavior as best we can.
113 * In userspace we don't have this problem.
117 #define XFS_IGET_CREATE 0x1
118 #define XFS_IGET_UNTRUSTED 0x2
120 extern void cmn_err(int, char *, ...);
121 enum ce
{ CE_DEBUG
, CE_CONT
, CE_NOTE
, CE_WARN
, CE_ALERT
, CE_PANIC
};
123 #define xfs_info(mp,fmt,args...) cmn_err(CE_CONT, _(fmt), ## args)
124 #define xfs_notice(mp,fmt,args...) cmn_err(CE_NOTE, _(fmt), ## args)
125 #define xfs_warn(mp,fmt,args...) cmn_err(CE_WARN, _(fmt), ## args)
126 #define xfs_err(mp,fmt,args...) cmn_err(CE_ALERT, _(fmt), ## args)
127 #define xfs_alert(mp,fmt,args...) cmn_err(CE_ALERT, _(fmt), ## args)
129 #define xfs_buf_ioerror_alert(bp,f) ((void) 0);
131 #define xfs_hex_dump(d,n) ((void) 0)
132 #define xfs_stack_trace() ((void) 0)
135 #define xfs_force_shutdown(d,n) ((void) 0)
136 #define xfs_mod_delalloc(a,b) ((void) 0)
138 /* stop unused var warnings by assigning mp to itself */
140 #define xfs_corruption_error(e,l,mp,b,sz,fi,ln,fa) do { \
142 cmn_err(CE_ALERT, "%s: XFS_CORRUPTION_ERROR", (e)); \
145 #define XFS_CORRUPTION_ERROR(e, lvl, mp, buf, bufsize) do { \
147 cmn_err(CE_ALERT, "%s: XFS_CORRUPTION_ERROR", (e)); \
150 #define XFS_ERROR_REPORT(e,l,mp) do { \
152 cmn_err(CE_ALERT, "%s: XFS_ERROR_REPORT", (e)); \
155 #define XFS_WARN_CORRUPT(mp, expr) \
156 ( xfs_is_reporting_corruption(mp) ? \
157 (printf("%s: XFS_WARN_CORRUPT at %s:%d", #expr, \
158 __func__, __LINE__), true) : true)
160 #define XFS_IS_CORRUPT(mp, expr) \
161 (unlikely(expr) ? XFS_WARN_CORRUPT((mp), (expr)) : false)
163 #define XFS_ERRLEVEL_LOW 1
164 #define XFS_ILOCK_EXCL 0
165 #define XFS_STATS_INC(mp, count) do { (mp) = (mp); } while (0)
166 #define XFS_STATS_DEC(mp, count, x) do { (mp) = (mp); } while (0)
167 #define XFS_STATS_ADD(mp, count, x) do { (mp) = (mp); } while (0)
168 #define XFS_TEST_ERROR(expr,a,b) ( expr )
170 #define __section(section) __attribute__((__section__(section)))
172 #define xfs_printk_once(func, dev, fmt, ...) \
174 static bool __section(".data.once") __print_once; \
175 bool __ret_print_once = !__print_once; \
177 if (!__print_once) { \
178 __print_once = true; \
179 func(dev, fmt, ##__VA_ARGS__); \
181 unlikely(__ret_print_once); \
184 #define xfs_info_once(dev, fmt, ...) \
185 xfs_printk_once(xfs_info, dev, fmt, ##__VA_ARGS__)
188 #define __return_address __builtin_return_address(0)
191 * Return the address of a label. Use barrier() so that the optimizer
192 * won't reorder code to refactor the error jumpouts into a single
193 * return, which throws off the reported address.
195 #define __this_address ({ __label__ __here; __here: barrier(); &&__here; })
196 /* Optimization barrier */
198 /* The "volatile" is due to gcc bugs */
199 #define barrier() __asm__ __volatile__("": : :"memory")
202 /* Optimization barrier */
204 # define barrier() __memory_barrier()
207 /* miscellaneous kernel routines not in user space */
208 #define likely(x) (x)
209 #define unlikely(x) (x)
211 /* Need to be able to handle this bare or in control flow */
212 static inline bool WARN_ON(bool expr
) {
216 #define WARN_ON_ONCE(e) WARN_ON(e)
217 #define percpu_counter_read(x) (*x)
218 #define percpu_counter_read_positive(x) ((*x) > 0 ? (*x) : 0)
219 #define percpu_counter_sum(x) (*x)
221 #define READ_ONCE(x) (x)
222 #define WRITE_ONCE(x, val) ((x) = (val))
225 * prandom_u32 is used for di_gen inode allocation, it must be zero for libxfs
226 * or all sorts of badness can occur!
228 #define prandom_u32() 0
230 #define PAGE_SIZE getpagesize()
232 #define inode_peek_iversion(inode) (inode)->i_version
233 #define inode_set_iversion_queried(inode, version) do { \
234 (inode)->i_version = (version); \
237 static inline int __do_div(unsigned long long *n
, unsigned base
)
240 __res
= (int)(((unsigned long) *n
) % (unsigned) base
);
241 *n
= ((unsigned long) *n
) / (unsigned) base
;
245 #define do_div(n,base) (__do_div((unsigned long long *)&(n), (base)))
246 #define do_mod(a, b) ((a) % (b))
247 #define rol32(x,y) (((x) << (y)) | ((x) >> (32 - (y))))
250 * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
251 * @dividend: unsigned 64bit dividend
252 * @divisor: unsigned 32bit divisor
253 * @remainder: pointer to unsigned 32bit remainder
255 * Return: sets ``*remainder``, then returns dividend / divisor
257 * This is commonly provided by 32bit archs to provide an optimized 64bit
260 static inline uint64_t
261 div_u64_rem(uint64_t dividend
, uint32_t divisor
, uint32_t *remainder
)
263 *remainder
= dividend
% divisor
;
264 return dividend
/ divisor
;
268 * div_u64 - unsigned 64bit divide with 32bit divisor
269 * @dividend: unsigned 64bit dividend
270 * @divisor: unsigned 32bit divisor
272 * This is the most common 64bit divide and should be used if possible,
273 * as many 32bit archs can optimize this variant better than a full 64bit
276 static inline uint64_t div_u64(uint64_t dividend
, uint32_t divisor
)
279 return div_u64_rem(dividend
, divisor
, &remainder
);
283 * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
284 * @dividend: unsigned 64bit dividend
285 * @divisor: unsigned 64bit divisor
286 * @remainder: pointer to unsigned 64bit remainder
288 * Return: sets ``*remainder``, then returns dividend / divisor
290 static inline uint64_t
291 div64_u64_rem(uint64_t dividend
, uint64_t divisor
, uint64_t *remainder
)
293 *remainder
= dividend
% divisor
;
294 return dividend
/ divisor
;
297 #define min_t(type,x,y) \
298 ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
299 #define max_t(type,x,y) \
300 ({ type __x = (x); type __y = (y); __x > __y ? __x: __y; })
303 * swap - swap values of @a and @b
308 do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
310 #define __round_mask(x, y) ((__typeof__(x))((y)-1))
311 #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
312 #define round_down(x, y) ((x) & ~__round_mask(x, y))
313 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
316 * Handling for kernel bitmap types.
318 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, NBBY * sizeof(long))
319 #define DECLARE_BITMAP(name,bits) \
320 unsigned long name[BITS_TO_LONGS(bits)]
321 #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
324 * This is a common helper function for find_next_bit and
325 * find_next_zero_bit. The difference is the "invert" argument, which
326 * is XORed with each fetched word before searching it for one bits.
328 static inline unsigned long
329 _find_next_bit(const unsigned long *addr
, unsigned long nbits
,
330 unsigned long start
, unsigned long invert
)
334 if (!nbits
|| start
>= nbits
)
337 tmp
= addr
[start
/ BITS_PER_LONG
] ^ invert
;
339 /* Handle 1st word. */
340 tmp
&= BITMAP_FIRST_WORD_MASK(start
);
341 start
= round_down(start
, BITS_PER_LONG
);
344 start
+= BITS_PER_LONG
;
348 tmp
= addr
[start
/ BITS_PER_LONG
] ^ invert
;
351 return min(start
+ ffs(tmp
), nbits
);
355 * Find the next set bit in a memory region.
357 static inline unsigned long
358 find_next_bit(const unsigned long *addr
, unsigned long size
,
359 unsigned long offset
)
361 return _find_next_bit(addr
, size
, offset
, 0UL);
363 static inline unsigned long
364 find_next_zero_bit(const unsigned long *addr
, unsigned long size
,
365 unsigned long offset
)
367 return _find_next_bit(addr
, size
, offset
, ~0UL);
369 #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
371 static inline __attribute__((const))
372 int is_power_of_2(unsigned long n
)
374 return (n
!= 0 && ((n
& (n
- 1)) == 0));
378 * xfs_iroundup: round up argument to next power of two
381 roundup_pow_of_two(uint v
)
386 if ((v
& (v
- 1)) == 0)
388 ASSERT((v
& 0x80000000) == 0);
389 if ((v
& (v
+ 1)) == 0)
391 for (i
= 0, m
= 1; i
< 31; i
++, m
<<= 1) {
395 if ((v
& (v
+ 1)) == 0)
402 static inline uint64_t
403 roundup_64(uint64_t x
, uint32_t y
)
410 static inline uint64_t
411 howmany_64(uint64_t x
, uint32_t y
)
418 /* buffer management */
419 #define XBF_TRYLOCK 0
420 #define XBF_UNMAPPED 0
422 #define xfs_buf_stale(bp) ((bp)->b_flags |= LIBXFS_B_STALE)
423 #define XFS_BUF_UNDELAYWRITE(bp) ((bp)->b_flags &= ~LIBXFS_B_DIRTY)
425 /* buffer type flags for write callbacks */
426 #define _XBF_INODES 0 /* inode buffer */
427 #define _XBF_DQUOTS 0 /* dquot buffer */
428 #define _XBF_LOGRECOVERY 0 /* log recovery buffer */
430 static inline struct xfs_buf
*xfs_buf_incore(struct xfs_buftarg
*target
,
431 xfs_daddr_t blkno
, size_t numblks
, xfs_buf_flags_t flags
)
436 #define xfs_buf_oneshot(bp) ((void) 0)
438 #define xfs_buf_zero(bp, off, len) \
439 memset((bp)->b_addr + off, 0, len);
441 void __xfs_buf_mark_corrupt(struct xfs_buf
*bp
, xfs_failaddr_t fa
);
442 #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
445 #define XFS_MOUNT_32BITINODES LIBXFS_MOUNT_32BITINODES
446 #define XFS_MOUNT_ATTR2 LIBXFS_MOUNT_ATTR2
447 #define XFS_MOUNT_SMALL_INUMS 0 /* ignored in userspace */
448 #define XFS_MOUNT_WSYNC 0 /* ignored in userspace */
449 #define XFS_MOUNT_NOALIGN 0 /* ignored in userspace */
450 #define XFS_MOUNT_IKEEP 0 /* ignored in userspace */
451 #define XFS_MOUNT_SWALLOC 0 /* ignored in userspace */
452 #define XFS_MOUNT_RDONLY 0 /* ignored in userspace */
453 #define XFS_MOUNT_BAD_SUMMARY 0 /* ignored in userspace */
455 #define xfs_trans_set_sync(tp) ((void) 0)
456 #define xfs_trans_buf_set_type(tp, bp, t) ({ \
458 __t = __t; /* no set-but-unused warning */ \
459 tp = tp; /* no set-but-unused warning */ \
462 #define xfs_trans_buf_copy_type(dbp, sbp)
464 /* no readahead, need to avoid set-but-unused var warnings. */
465 #define xfs_buf_readahead(a,d,c,ops) ({ \
466 xfs_daddr_t __d = d; \
467 __d = __d; /* no set-but-unused warning */ \
469 #define xfs_buf_readahead_map(a,b,c,ops) ((void) 0) /* no readahead */
471 #define xfs_sort qsort
473 #define xfs_ilock(ip,mode) ((void) 0)
474 #define xfs_ilock_data_map_shared(ip) (0)
475 #define xfs_ilock_attr_map_shared(ip) (0)
476 #define xfs_iunlock(ip,mode) ({ \
477 typeof(mode) __mode = mode; \
478 __mode = __mode; /* no set-but-unused warning */ \
481 /* space allocation */
482 #define XFS_EXTENT_BUSY_DISCARDED 0x01 /* undergoing a discard op. */
483 #define XFS_EXTENT_BUSY_SKIP_DISCARD 0x02 /* do not discard */
485 #define xfs_extent_busy_reuse(mp,ag,bno,len,user) ((void) 0)
486 /* avoid unused variable warning */
487 #define xfs_extent_busy_insert(tp,pag,bno,len,flags)({ \
488 struct xfs_perag *__foo = pag; \
489 __foo = __foo; /* no set-but-unused warning */ \
491 #define xfs_extent_busy_trim(args,bno,len,busy_gen) ({ \
492 unsigned __foo = *(busy_gen); \
493 *(busy_gen) = __foo; \
496 #define xfs_extent_busy_flush(mp,pag,busy_gen) ((void)(0))
498 #define xfs_rotorstep 1
499 #define xfs_bmap_rtalloc(a) (-ENOSYS)
500 #define xfs_get_extsz_hint(ip) (0)
501 #define xfs_get_cowextsz_hint(ip) (0)
502 #define xfs_inode_is_filestream(ip) (0)
503 #define xfs_filestream_lookup_ag(ip) (0)
504 #define xfs_filestream_new_ag(ip,ag) (0)
507 #define xfs_trans_mod_dquot_byino(t,i,f,d) ((void) 0)
508 #define xfs_trans_reserve_quota_nblks(t,i,b,n,f) (0)
510 /* hack too silence gcc */
511 static inline int retzero(void) { return 0; }
512 #define xfs_trans_unreserve_quota_nblks(t,i,b,n,f) retzero()
513 #define xfs_quota_unreserve_blkres(i,b) retzero()
515 #define xfs_quota_reserve_blkres(i,b) (0)
516 #define xfs_qm_dqattach(i) (0)
518 #define uuid_copy(s,d) platform_uuid_copy((s),(d))
519 #define uuid_equal(s,d) (platform_uuid_compare((s),(d)) == 0)
521 #define xfs_icreate_log(tp, agno, agbno, cnt, isize, len, gen) ((void) 0)
522 #define xfs_sb_validate_fsb_count(sbp, nblks) (0)
525 * Prototypes for kernel static functions that are aren't in their
526 * associated header files.
529 struct xfs_bmap_free
;
530 struct xfs_bmap_free_item
;
538 struct xfs_buf_log_item
;
542 int xfs_attr_rmtval_get(struct xfs_da_args
*);
545 void xfs_bmap_del_free(struct xfs_bmap_free
*, struct xfs_bmap_free_item
*);
548 void xfs_mount_common(struct xfs_mount
*, struct xfs_sb
*);
551 * logitem.c and trans.c prototypes
553 void xfs_trans_init(struct xfs_mount
*);
554 int xfs_trans_roll(struct xfs_trans
**);
556 /* xfs_trans_item.c */
557 void xfs_trans_add_item(struct xfs_trans
*, struct xfs_log_item
*);
558 void xfs_trans_del_item(struct xfs_log_item
*);
560 /* xfs_inode_item.c */
561 void xfs_inode_item_init(struct xfs_inode
*, struct xfs_mount
*);
564 void xfs_buf_item_init(struct xfs_buf
*, struct xfs_mount
*);
565 void xfs_buf_item_log(struct xfs_buf_log_item
*, uint
, uint
);
567 /* xfs_trans_buf.c */
568 struct xfs_buf
*xfs_trans_buf_item_match(struct xfs_trans
*,
569 struct xfs_buftarg
*, struct xfs_buf_map
*, int);
571 /* local source files */
572 #define xfs_mod_fdblocks(mp, delta, rsvd) \
573 libxfs_mod_incore_sb(mp, XFS_TRANS_SB_FDBLOCKS, delta, rsvd)
574 #define xfs_mod_frextents(mp, delta) \
575 libxfs_mod_incore_sb(mp, XFS_TRANS_SB_FREXTENTS, delta, 0)
576 int libxfs_mod_incore_sb(struct xfs_mount
*, int, int64_t, int);
577 /* percpu counters in mp are #defined to the superblock sb_ counters */
578 #define xfs_reinit_percpu_counters(mp)
580 void xfs_trans_mod_sb(struct xfs_trans
*, uint
, long);
582 void xfs_verifier_error(struct xfs_buf
*bp
, int error
,
583 xfs_failaddr_t failaddr
);
584 void xfs_inode_verifier_error(struct xfs_inode
*ip
, int error
,
585 const char *name
, void *buf
, size_t bufsz
,
586 xfs_failaddr_t failaddr
);
588 #define xfs_buf_verifier_error(bp,e,n,bu,bus,fa) \
589 xfs_verifier_error(bp, e, fa)
591 xfs_buf_corruption_error(struct xfs_buf
*bp
, xfs_failaddr_t fa
);
593 /* XXX: this is clearly a bug - a shared header needs to export this */
595 int libxfs_rtfree_extent(struct xfs_trans
*, xfs_rtblock_t
, xfs_extlen_t
);
596 bool libxfs_verify_rtbno(struct xfs_mount
*mp
, xfs_rtblock_t rtbno
);
598 struct xfs_rtalloc_rec
{
599 xfs_rtblock_t ar_startext
;
600 xfs_rtblock_t ar_extcount
;
603 typedef int (*xfs_rtalloc_query_range_fn
)(
604 struct xfs_trans
*tp
,
605 const struct xfs_rtalloc_rec
*rec
,
608 int libxfs_zero_extent(struct xfs_inode
*ip
, xfs_fsblock_t start_fsb
,
609 xfs_off_t count_fsb
);
612 bool xfs_log_check_lsn(struct xfs_mount
*, xfs_lsn_t
);
613 void xfs_log_item_init(struct xfs_mount
*, struct xfs_log_item
*, int);
614 #define xfs_log_in_recovery(mp) (false)
617 #define xfs_inode_set_cowblocks_tag(ip) do { } while (0)
618 #define xfs_inode_set_eofblocks_tag(ip) do { } while (0)
621 #define XFS_STATS_CALC_INDEX(member) 0
622 #define XFS_STATS_INC_OFF(mp, off)
623 #define XFS_STATS_ADD_OFF(mp, off, val)
625 typedef unsigned char u8
;
626 unsigned int hweight8(unsigned int w
);
627 unsigned int hweight32(unsigned int w
);
628 unsigned int hweight64(__u64 w
);
630 #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
631 #define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
633 static inline void set_bit(int nr
, volatile unsigned long *addr
)
635 unsigned long mask
= BIT_MASK(nr
);
636 unsigned long *p
= ((unsigned long *)addr
) + BIT_WORD(nr
);
641 static inline void clear_bit(int nr
, volatile unsigned long *addr
)
643 unsigned long mask
= BIT_MASK(nr
);
644 unsigned long *p
= ((unsigned long *)addr
) + BIT_WORD(nr
);
649 static inline int test_bit(int nr
, const volatile unsigned long *addr
)
651 unsigned long mask
= BIT_MASK(nr
);
652 unsigned long *p
= ((unsigned long *)addr
) + BIT_WORD(nr
);
657 /* Sets and returns original value of the bit */
658 static inline int test_and_set_bit(int nr
, volatile unsigned long *addr
)
660 if (test_bit(nr
, addr
))
666 static inline int xfs_buf_hash_init(struct xfs_perag
*pag
) { return 0; }
667 static inline void xfs_buf_hash_destroy(struct xfs_perag
*pag
) { }
669 static inline int xfs_iunlink_init(struct xfs_perag
*pag
) { return 0; }
670 static inline void xfs_iunlink_destroy(struct xfs_perag
*pag
) { }
672 xfs_agnumber_t
xfs_set_inode_alloc(struct xfs_mount
*mp
,
673 xfs_agnumber_t agcount
);
675 /* Keep static checkers quiet about nonstatic functions by exporting */
676 int xfs_rtbuf_get(struct xfs_mount
*mp
, struct xfs_trans
*tp
,
677 xfs_rtblock_t block
, int issum
, struct xfs_buf
**bpp
);
678 int xfs_rtcheck_range(struct xfs_mount
*mp
, struct xfs_trans
*tp
,
679 xfs_rtblock_t start
, xfs_extlen_t len
, int val
,
680 xfs_rtblock_t
*new, int *stat
);
681 int xfs_rtfind_back(struct xfs_mount
*mp
, struct xfs_trans
*tp
,
682 xfs_rtblock_t start
, xfs_rtblock_t limit
,
683 xfs_rtblock_t
*rtblock
);
684 int xfs_rtfind_forw(struct xfs_mount
*mp
, struct xfs_trans
*tp
,
685 xfs_rtblock_t start
, xfs_rtblock_t limit
,
686 xfs_rtblock_t
*rtblock
);
687 int xfs_rtmodify_range(struct xfs_mount
*mp
, struct xfs_trans
*tp
,
688 xfs_rtblock_t start
, xfs_extlen_t len
, int val
);
689 int xfs_rtmodify_summary_int(struct xfs_mount
*mp
, struct xfs_trans
*tp
,
690 int log
, xfs_rtblock_t bbno
, int delta
,
691 struct xfs_buf
**rbpp
, xfs_fsblock_t
*rsb
,
693 int xfs_rtmodify_summary(struct xfs_mount
*mp
, struct xfs_trans
*tp
, int log
,
694 xfs_rtblock_t bbno
, int delta
, struct xfs_buf
**rbpp
,
696 int xfs_rtfree_range(struct xfs_mount
*mp
, struct xfs_trans
*tp
,
697 xfs_rtblock_t start
, xfs_extlen_t len
,
698 struct xfs_buf
**rbpp
, xfs_fsblock_t
*rsb
);
699 int xfs_rtalloc_query_range(struct xfs_trans
*tp
,
700 const struct xfs_rtalloc_rec
*low_rec
,
701 const struct xfs_rtalloc_rec
*high_rec
,
702 xfs_rtalloc_query_range_fn fn
,
704 int xfs_rtalloc_query_all(struct xfs_trans
*tp
,
705 xfs_rtalloc_query_range_fn fn
,
707 bool xfs_verify_rtbno(struct xfs_mount
*mp
, xfs_rtblock_t rtbno
);
708 int xfs_rtalloc_extent_is_free(struct xfs_mount
*mp
, struct xfs_trans
*tp
,
709 xfs_rtblock_t start
, xfs_extlen_t len
,
711 /* xfs_bmap_util.h */
713 int xfs_bmap_extsize_align(struct xfs_mount
*mp
, struct xfs_bmbt_irec
*gotp
,
714 struct xfs_bmbt_irec
*prevp
, xfs_extlen_t extsz
,
715 int rt
, int eof
, int delay
, int convert
,
716 xfs_fileoff_t
*offp
, xfs_extlen_t
*lenp
);
717 void xfs_bmap_adjacent(struct xfs_bmalloca
*ap
);
718 int xfs_bmap_last_extent(struct xfs_trans
*tp
, struct xfs_inode
*ip
,
719 int whichfork
, struct xfs_bmbt_irec
*rec
,
723 #define xfs_iflags_set(ip, flags) do { } while (0)
725 #endif /* __LIBXFS_INTERNAL_XFS_H__ */