]> git.ipfire.org Git - people/ms/linux.git/blame - fs/xfs/xfs_buf.h
Merge branch 'for-6.0/dax' into libnvdimm-fixes
[people/ms/linux.git] / fs / xfs / xfs_buf.h
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
7b718769
NS
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
1da177e4 5 */
1da177e4
LT
6#ifndef __XFS_BUF_H__
7#define __XFS_BUF_H__
8
1da177e4
LT
9#include <linux/list.h>
10#include <linux/types.h>
11#include <linux/spinlock.h>
1da177e4
LT
12#include <linux/mm.h>
13#include <linux/fs.h>
c94c2acf 14#include <linux/dax.h>
1da177e4 15#include <linux/uio.h>
e80dfa19 16#include <linux/list_lru.h>
1da177e4 17
231f91ab
DC
18extern struct kmem_cache *xfs_buf_cache;
19
1da177e4
LT
20/*
21 * Base types
22 */
b01d1461 23struct xfs_buf;
1da177e4 24
ce8e922c
NS
25#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
26
b9b3fe15
DC
27#define XBF_READ (1u << 0) /* buffer intended for reading from device */
28#define XBF_WRITE (1u << 1) /* buffer intended for writing to device */
29#define XBF_READ_AHEAD (1u << 2) /* asynchronous read-ahead */
30#define XBF_NO_IOACCT (1u << 3) /* bypass I/O accounting (non-LRU bufs) */
31#define XBF_ASYNC (1u << 4) /* initiator will not wait for completion */
32#define XBF_DONE (1u << 5) /* all pages in the buffer uptodate */
33#define XBF_STALE (1u << 6) /* buffer has been staled, do not find it */
34#define XBF_WRITE_FAIL (1u << 7) /* async writes have failed on this buffer */
1d5ae5df 35
f593bf14 36/* buffer type flags for write callbacks */
b9b3fe15
DC
37#define _XBF_INODES (1u << 16)/* inode buffer */
38#define _XBF_DQUOTS (1u << 17)/* dquot buffer */
39#define _XBF_LOGRECOVERY (1u << 18)/* log recovery buffer */
1da177e4 40
807cbbdb 41/* flags used only internally */
b9b3fe15
DC
42#define _XBF_PAGES (1u << 20)/* backed by refcounted pages */
43#define _XBF_KMEM (1u << 21)/* backed by heap memory */
44#define _XBF_DELWRI_Q (1u << 22)/* buffer on a delwri queue */
6ab455ee 45
f593bf14 46/* flags used only as arguments to access routines */
85c73bf7 47#define XBF_INCORE (1u << 29)/* lookup only, return if found in cache */
b9b3fe15
DC
48#define XBF_TRYLOCK (1u << 30)/* lock requested, but do not wait */
49#define XBF_UNMAPPED (1u << 31)/* do not map the buffer */
f593bf14 50
85c73bf7 51
807cbbdb 52typedef unsigned int xfs_buf_flags_t;
1da177e4 53
0b1b213f
CH
54#define XFS_BUF_FLAGS \
55 { XBF_READ, "READ" }, \
56 { XBF_WRITE, "WRITE" }, \
1d5ae5df 57 { XBF_READ_AHEAD, "READ_AHEAD" }, \
1247ec4c 58 { XBF_NO_IOACCT, "NO_IOACCT" }, \
0b1b213f
CH
59 { XBF_ASYNC, "ASYNC" }, \
60 { XBF_DONE, "DONE" }, \
0b1b213f 61 { XBF_STALE, "STALE" }, \
ac8809f9 62 { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
f593bf14 63 { _XBF_INODES, "INODES" }, \
0c7e5afb 64 { _XBF_DQUOTS, "DQUOTS" }, \
b9b3fe15 65 { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \
0b1b213f 66 { _XBF_PAGES, "PAGES" }, \
0e6e847f 67 { _XBF_KMEM, "KMEM" }, \
f593bf14
DC
68 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
69 /* The following interface flags should never be set */ \
85c73bf7 70 { XBF_INCORE, "INCORE" }, \
f593bf14
DC
71 { XBF_TRYLOCK, "TRYLOCK" }, \
72 { XBF_UNMAPPED, "UNMAPPED" }
ac8809f9 73
a4082357
DC
74/*
75 * Internal state flags.
76 */
77#define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
63db7c81 78#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
0b1b213f 79
7c71ee78
ES
80/*
81 * The xfs_buftarg contains 2 notions of "sector size" -
82 *
83 * 1) The metadata sector size, which is the minimum unit and
84 * alignment of IO which will be performed by metadata operations.
85 * 2) The device logical sector size
86 *
87 * The first is specified at mkfs time, and is stored on-disk in the
88 * superblock's sb_sectsize.
89 *
90 * The latter is derived from the underlying device, and controls direct IO
91 * alignment constraints.
92 */
1da177e4 93typedef struct xfs_buftarg {
ce8e922c
NS
94 dev_t bt_dev;
95 struct block_device *bt_bdev;
486aff5e 96 struct dax_device *bt_daxdev;
cd913c76 97 u64 bt_dax_part_off;
ebad861b 98 struct xfs_mount *bt_mount;
6da54179
ES
99 unsigned int bt_meta_sectorsize;
100 size_t bt_meta_sectormask;
7c71ee78
ES
101 size_t bt_logical_sectorsize;
102 size_t bt_logical_sectormask;
ce8e922c 103
ff57ab21
DC
104 /* LRU control structures */
105 struct shrinker bt_shrinker;
e80dfa19 106 struct list_lru bt_lru;
9c7504aa
BF
107
108 struct percpu_counter bt_io_count;
f9bccfcc 109 struct ratelimit_state bt_ioerror_rl;
1da177e4
LT
110} xfs_buftarg_t;
111
ce8e922c 112#define XB_PAGES 2
1da177e4 113
cbb7baab
DC
114struct xfs_buf_map {
115 xfs_daddr_t bm_bn; /* block number for I/O */
116 int bm_len; /* size of I/O */
117};
118
3e85c868
DC
119#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
120 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
121
1813dd64 122struct xfs_buf_ops {
233135b7 123 char *name;
15baadf7
DW
124 union {
125 __be32 magic[2]; /* v4 and v5 on disk magic values */
126 __be16 magic16[2]; /* v4 and v5 on disk magic values */
127 };
1813dd64
DC
128 void (*verify_read)(struct xfs_buf *);
129 void (*verify_write)(struct xfs_buf *);
b5572597 130 xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
1813dd64
DC
131};
132
e8222613 133struct xfs_buf {
50f59e8e
DC
134 /*
135 * first cacheline holds all the fields needed for an uncontended cache
136 * hit to be fully processed. The semaphore straddles the cacheline
137 * boundary, but the counter and lock sits on the first cacheline,
138 * which is the only bit that is touched if we hit the semaphore
139 * fast-path on locking.
140 */
6031e73a 141 struct rhash_head b_rhash_head; /* pag buffer hash node */
8cf07f3d 142
4c7f65ae 143 xfs_daddr_t b_rhash_key; /* buffer cache index */
4e94b71b 144 int b_length; /* size of buffer in BBs */
50f59e8e 145 atomic_t b_hold; /* reference count */
430cbeb8 146 atomic_t b_lru_ref; /* lru reclaim ref count */
50f59e8e 147 xfs_buf_flags_t b_flags; /* status flags */
ce8e922c 148 struct semaphore b_sema; /* semaphore for lockables */
50f59e8e 149
6fb8a90a
CM
150 /*
151 * concurrent access to b_lru and b_lru_flags are protected by
152 * bt_lru_lock and not by b_sema
153 */
430cbeb8 154 struct list_head b_lru; /* lru list */
a4082357
DC
155 spinlock_t b_lock; /* internal state lock */
156 unsigned int b_state; /* internal state flags */
61be9c52 157 int b_io_error; /* internal IO error state */
ce8e922c
NS
158 wait_queue_head_t b_waiters; /* unpin waiters */
159 struct list_head b_list;
74f75a0c 160 struct xfs_perag *b_pag; /* contains rbtree root */
dbd329f1 161 struct xfs_mount *b_mount;
10fb9ac1 162 struct xfs_buftarg *b_target; /* buffer target (device) */
ce8e922c 163 void *b_addr; /* virtual address of buffer */
b29c70f5 164 struct work_struct b_ioend_work;
b4dd330b 165 struct completion b_iowait; /* queue for I/O waiters */
e99b4bd0 166 struct xfs_buf_log_item *b_log_item;
643c8c05 167 struct list_head b_li_list; /* Log items list head */
bf9d9013 168 struct xfs_trans *b_transp;
ce8e922c
NS
169 struct page **b_pages; /* array of page pointers */
170 struct page *b_page_array[XB_PAGES]; /* inline pages */
3e85c868 171 struct xfs_buf_map *b_maps; /* compound buffer map */
d44d9bc6 172 struct xfs_buf_map __b_map; /* inline compound buffer map */
3e85c868 173 int b_map_count;
50f59e8e
DC
174 atomic_t b_pin_count; /* pin count */
175 atomic_t b_io_remaining; /* #outstanding I/O requests */
176 unsigned int b_page_count; /* size of page array */
54cd3aa6
CH
177 unsigned int b_offset; /* page offset of b_addr,
178 only for _XBF_KMEM buffers */
2451337d 179 int b_error; /* error code on I/O */
a5ea70d2
CM
180
181 /*
182 * async write failure retry count. Initialised to zero on the first
183 * failure, then when it exceeds the maximum configured without a
184 * success the write is considered to be failed permanently and the
185 * iodone handler will take appropriate action.
186 *
187 * For retry timeouts, we record the jiffie of the first failure. This
188 * means that we can change the retry timeout for buffers already under
189 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
190 *
191 * last_error is used to ensure that we are getting repeated errors, not
192 * different errors. e.g. a block device might change ENOSPC to EIO when
193 * a failure timeout occurs, so we want to re-initialise the error
194 * retry behaviour appropriately when that happens.
195 */
196 int b_retries;
197 unsigned long b_first_retry_time; /* in jiffies */
198 int b_last_error;
199
1813dd64 200 const struct xfs_buf_ops *b_ops;
298f3422 201 struct rcu_head b_rcu;
e8222613 202};
1da177e4 203
1da177e4 204/* Finding and Reading Buffers */
3848b5f6
DW
205int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
206 int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp);
4ed8e27b
DW
207int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map,
208 int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp,
cdbcf82b 209 const struct xfs_buf_ops *ops, xfs_failaddr_t fa);
6dde2707 210void xfs_buf_readahead_map(struct xfs_buftarg *target,
c3f8fc73 211 struct xfs_buf_map *map, int nmaps,
1813dd64 212 const struct xfs_buf_ops *ops);
6dde2707 213
85c73bf7
DC
214static inline int
215xfs_buf_incore(
216 struct xfs_buftarg *target,
217 xfs_daddr_t blkno,
218 size_t numblks,
219 xfs_buf_flags_t flags,
220 struct xfs_buf **bpp)
221{
222 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
223
224 return xfs_buf_get_map(target, &map, 1, XBF_INCORE | flags, bpp);
225}
226
841263e9 227static inline int
6dde2707
DC
228xfs_buf_get(
229 struct xfs_buftarg *target,
230 xfs_daddr_t blkno,
841263e9
DW
231 size_t numblks,
232 struct xfs_buf **bpp)
6dde2707
DC
233{
234 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
3848b5f6 235
841263e9 236 return xfs_buf_get_map(target, &map, 1, 0, bpp);
6dde2707
DC
237}
238
0e3eccce 239static inline int
6dde2707
DC
240xfs_buf_read(
241 struct xfs_buftarg *target,
242 xfs_daddr_t blkno,
243 size_t numblks,
c3f8fc73 244 xfs_buf_flags_t flags,
0e3eccce 245 struct xfs_buf **bpp,
1813dd64 246 const struct xfs_buf_ops *ops)
6dde2707
DC
247{
248 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
4ed8e27b 249
cdbcf82b
DW
250 return xfs_buf_read_map(target, &map, 1, flags, bpp, ops,
251 __builtin_return_address(0));
6dde2707
DC
252}
253
254static inline void
255xfs_buf_readahead(
256 struct xfs_buftarg *target,
257 xfs_daddr_t blkno,
c3f8fc73 258 size_t numblks,
1813dd64 259 const struct xfs_buf_ops *ops)
6dde2707
DC
260{
261 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
1813dd64 262 return xfs_buf_readahead_map(target, &map, 1, ops);
6dde2707 263}
e70b73f8 264
b9b3fe15
DC
265int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
266 xfs_buf_flags_t flags, struct xfs_buf **bpp);
ba372674 267int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
b9b3fe15
DC
268 size_t numblks, xfs_buf_flags_t flags, struct xfs_buf **bpp,
269 const struct xfs_buf_ops *ops);
26e32875 270int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags);
e70b73f8 271void xfs_buf_hold(struct xfs_buf *bp);
1da177e4
LT
272
273/* Releasing Buffers */
e8222613 274extern void xfs_buf_rele(struct xfs_buf *);
1da177e4
LT
275
276/* Locking and Unlocking Buffers */
e8222613
DC
277extern int xfs_buf_trylock(struct xfs_buf *);
278extern void xfs_buf_lock(struct xfs_buf *);
279extern void xfs_buf_unlock(struct xfs_buf *);
0c842ad4
CH
280#define xfs_buf_islocked(bp) \
281 ((bp)->b_sema.count <= 0)
1da177e4 282
e8222613 283static inline void xfs_buf_relse(struct xfs_buf *bp)
f593bf14
DC
284{
285 xfs_buf_unlock(bp);
286 xfs_buf_rele(bp);
287}
288
1da177e4 289/* Buffer Read and Write Routines */
c2b006c1 290extern int xfs_bwrite(struct xfs_buf *bp);
f593bf14 291
31ca03c9
DW
292extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
293 xfs_failaddr_t failaddr);
294#define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
cdbcf82b 295extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa);
54b3b1f6 296void xfs_buf_ioend_fail(struct xfs_buf *);
f9a196ee 297void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize);
8d57c216
DW
298void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa);
299#define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
ce8e922c 300
1da177e4 301/* Buffer Utility Routines */
88ee2df7 302extern void *xfs_buf_offset(struct xfs_buf *, size_t);
5cfd28b6 303extern void xfs_buf_stale(struct xfs_buf *bp);
1da177e4 304
1da177e4 305/* Delayed Write Buffer Routines */
20e8a063 306extern void xfs_buf_delwri_cancel(struct list_head *);
43ff2122
CH
307extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
308extern int xfs_buf_delwri_submit(struct list_head *);
309extern int xfs_buf_delwri_submit_nowait(struct list_head *);
7912e7fe 310extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
1da177e4 311
04fcad80
DC
312static inline xfs_daddr_t xfs_buf_daddr(struct xfs_buf *bp)
313{
314 return bp->b_maps[0].bm_bn;
315}
316
7561d27e 317void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
ce8e922c 318
879de98e
DC
319/*
320 * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
321 * up with a reference count of 0 so it will be tossed from the cache when
322 * released.
323 */
324static inline void xfs_buf_oneshot(struct xfs_buf *bp)
325{
326 if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1)
327 return;
328 atomic_set(&bp->b_lru_ref, 0);
329}
330
811e64c7
CS
331static inline int xfs_buf_ispinned(struct xfs_buf *bp)
332{
333 return atomic_read(&bp->b_pin_count);
334}
ce8e922c 335
51582170
ES
336static inline int
337xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
338{
339 return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
340 cksum_offset);
341}
342
f1dbcd7e
ES
343static inline void
344xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
345{
346 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
347 cksum_offset);
348}
349
1da177e4
LT
350/*
351 * Handling of buftargs.
352 */
5b5abbef
CH
353struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *mp,
354 struct block_device *bdev);
a1f69417 355extern void xfs_free_buftarg(struct xfs_buftarg *);
8321ddb2 356extern void xfs_buftarg_wait(struct xfs_buftarg *);
10fb9ac1
BF
357extern void xfs_buftarg_drain(struct xfs_buftarg *);
358extern int xfs_setsize_buftarg(struct xfs_buftarg *, unsigned int);
d808f617 359
ce8e922c
NS
360#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
361#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
362
75d02303 363int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops);
15baadf7
DW
364bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic);
365bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic);
1aff5696 366
1da177e4 367#endif /* __XFS_BUF_H__ */