]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0 | |
2 | /* | |
3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | |
4 | * All Rights Reserved. | |
5 | */ | |
6 | #ifndef __XFS_BUF_H__ | |
7 | #define __XFS_BUF_H__ | |
8 | ||
9 | #include <linux/list.h> | |
10 | #include <linux/types.h> | |
11 | #include <linux/spinlock.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/fs.h> | |
14 | #include <linux/dax.h> | |
15 | #include <linux/uio.h> | |
16 | #include <linux/list_lru.h> | |
17 | ||
18 | extern struct kmem_cache *xfs_buf_cache; | |
19 | ||
20 | /* | |
21 | * Base types | |
22 | */ | |
23 | struct xfs_buf; | |
24 | ||
25 | #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) | |
26 | ||
27 | #define XBF_READ (1u << 0) /* buffer intended for reading from device */ | |
28 | #define XBF_WRITE (1u << 1) /* buffer intended for writing to device */ | |
29 | #define XBF_READ_AHEAD (1u << 2) /* asynchronous read-ahead */ | |
30 | #define XBF_NO_IOACCT (1u << 3) /* bypass I/O accounting (non-LRU bufs) */ | |
31 | #define XBF_ASYNC (1u << 4) /* initiator will not wait for completion */ | |
32 | #define XBF_DONE (1u << 5) /* all pages in the buffer uptodate */ | |
33 | #define XBF_STALE (1u << 6) /* buffer has been staled, do not find it */ | |
34 | #define XBF_WRITE_FAIL (1u << 7) /* async writes have failed on this buffer */ | |
35 | ||
36 | /* buffer type flags for write callbacks */ | |
37 | #define _XBF_INODES (1u << 16)/* inode buffer */ | |
38 | #define _XBF_DQUOTS (1u << 17)/* dquot buffer */ | |
39 | #define _XBF_LOGRECOVERY (1u << 18)/* log recovery buffer */ | |
40 | ||
41 | /* flags used only internally */ | |
42 | #define _XBF_PAGES (1u << 20)/* backed by refcounted pages */ | |
43 | #define _XBF_KMEM (1u << 21)/* backed by heap memory */ | |
44 | #define _XBF_DELWRI_Q (1u << 22)/* buffer on a delwri queue */ | |
45 | ||
46 | /* flags used only as arguments to access routines */ | |
47 | #define XBF_INCORE (1u << 29)/* lookup only, return if found in cache */ | |
48 | #define XBF_TRYLOCK (1u << 30)/* lock requested, but do not wait */ | |
49 | #define XBF_UNMAPPED (1u << 31)/* do not map the buffer */ | |
50 | ||
51 | ||
52 | typedef unsigned int xfs_buf_flags_t; | |
53 | ||
54 | #define XFS_BUF_FLAGS \ | |
55 | { XBF_READ, "READ" }, \ | |
56 | { XBF_WRITE, "WRITE" }, \ | |
57 | { XBF_READ_AHEAD, "READ_AHEAD" }, \ | |
58 | { XBF_NO_IOACCT, "NO_IOACCT" }, \ | |
59 | { XBF_ASYNC, "ASYNC" }, \ | |
60 | { XBF_DONE, "DONE" }, \ | |
61 | { XBF_STALE, "STALE" }, \ | |
62 | { XBF_WRITE_FAIL, "WRITE_FAIL" }, \ | |
63 | { _XBF_INODES, "INODES" }, \ | |
64 | { _XBF_DQUOTS, "DQUOTS" }, \ | |
65 | { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \ | |
66 | { _XBF_PAGES, "PAGES" }, \ | |
67 | { _XBF_KMEM, "KMEM" }, \ | |
68 | { _XBF_DELWRI_Q, "DELWRI_Q" }, \ | |
69 | /* The following interface flags should never be set */ \ | |
70 | { XBF_INCORE, "INCORE" }, \ | |
71 | { XBF_TRYLOCK, "TRYLOCK" }, \ | |
72 | { XBF_UNMAPPED, "UNMAPPED" } | |
73 | ||
74 | /* | |
75 | * Internal state flags. | |
76 | */ | |
77 | #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */ | |
78 | #define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */ | |
79 | ||
80 | /* | |
81 | * The xfs_buftarg contains 2 notions of "sector size" - | |
82 | * | |
83 | * 1) The metadata sector size, which is the minimum unit and | |
84 | * alignment of IO which will be performed by metadata operations. | |
85 | * 2) The device logical sector size | |
86 | * | |
87 | * The first is specified at mkfs time, and is stored on-disk in the | |
88 | * superblock's sb_sectsize. | |
89 | * | |
90 | * The latter is derived from the underlying device, and controls direct IO | |
91 | * alignment constraints. | |
92 | */ | |
93 | typedef struct xfs_buftarg { | |
94 | dev_t bt_dev; | |
95 | struct block_device *bt_bdev; | |
96 | struct dax_device *bt_daxdev; | |
97 | u64 bt_dax_part_off; | |
98 | struct xfs_mount *bt_mount; | |
99 | unsigned int bt_meta_sectorsize; | |
100 | size_t bt_meta_sectormask; | |
101 | size_t bt_logical_sectorsize; | |
102 | size_t bt_logical_sectormask; | |
103 | ||
104 | /* LRU control structures */ | |
105 | struct shrinker bt_shrinker; | |
106 | struct list_lru bt_lru; | |
107 | ||
108 | struct percpu_counter bt_io_count; | |
109 | struct ratelimit_state bt_ioerror_rl; | |
110 | } xfs_buftarg_t; | |
111 | ||
112 | #define XB_PAGES 2 | |
113 | ||
114 | struct xfs_buf_map { | |
115 | xfs_daddr_t bm_bn; /* block number for I/O */ | |
116 | int bm_len; /* size of I/O */ | |
117 | }; | |
118 | ||
119 | #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \ | |
120 | struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) }; | |
121 | ||
122 | struct xfs_buf_ops { | |
123 | char *name; | |
124 | union { | |
125 | __be32 magic[2]; /* v4 and v5 on disk magic values */ | |
126 | __be16 magic16[2]; /* v4 and v5 on disk magic values */ | |
127 | }; | |
128 | void (*verify_read)(struct xfs_buf *); | |
129 | void (*verify_write)(struct xfs_buf *); | |
130 | xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp); | |
131 | }; | |
132 | ||
133 | struct xfs_buf { | |
134 | /* | |
135 | * first cacheline holds all the fields needed for an uncontended cache | |
136 | * hit to be fully processed. The semaphore straddles the cacheline | |
137 | * boundary, but the counter and lock sits on the first cacheline, | |
138 | * which is the only bit that is touched if we hit the semaphore | |
139 | * fast-path on locking. | |
140 | */ | |
141 | struct rhash_head b_rhash_head; /* pag buffer hash node */ | |
142 | ||
143 | xfs_daddr_t b_rhash_key; /* buffer cache index */ | |
144 | int b_length; /* size of buffer in BBs */ | |
145 | atomic_t b_hold; /* reference count */ | |
146 | atomic_t b_lru_ref; /* lru reclaim ref count */ | |
147 | xfs_buf_flags_t b_flags; /* status flags */ | |
148 | struct semaphore b_sema; /* semaphore for lockables */ | |
149 | ||
150 | /* | |
151 | * concurrent access to b_lru and b_lru_flags are protected by | |
152 | * bt_lru_lock and not by b_sema | |
153 | */ | |
154 | struct list_head b_lru; /* lru list */ | |
155 | spinlock_t b_lock; /* internal state lock */ | |
156 | unsigned int b_state; /* internal state flags */ | |
157 | int b_io_error; /* internal IO error state */ | |
158 | wait_queue_head_t b_waiters; /* unpin waiters */ | |
159 | struct list_head b_list; | |
160 | struct xfs_perag *b_pag; /* contains rbtree root */ | |
161 | struct xfs_mount *b_mount; | |
162 | struct xfs_buftarg *b_target; /* buffer target (device) */ | |
163 | void *b_addr; /* virtual address of buffer */ | |
164 | struct work_struct b_ioend_work; | |
165 | struct completion b_iowait; /* queue for I/O waiters */ | |
166 | struct xfs_buf_log_item *b_log_item; | |
167 | struct list_head b_li_list; /* Log items list head */ | |
168 | struct xfs_trans *b_transp; | |
169 | struct page **b_pages; /* array of page pointers */ | |
170 | struct page *b_page_array[XB_PAGES]; /* inline pages */ | |
171 | struct xfs_buf_map *b_maps; /* compound buffer map */ | |
172 | struct xfs_buf_map __b_map; /* inline compound buffer map */ | |
173 | int b_map_count; | |
174 | atomic_t b_pin_count; /* pin count */ | |
175 | atomic_t b_io_remaining; /* #outstanding I/O requests */ | |
176 | unsigned int b_page_count; /* size of page array */ | |
177 | unsigned int b_offset; /* page offset of b_addr, | |
178 | only for _XBF_KMEM buffers */ | |
179 | int b_error; /* error code on I/O */ | |
180 | ||
181 | /* | |
182 | * async write failure retry count. Initialised to zero on the first | |
183 | * failure, then when it exceeds the maximum configured without a | |
184 | * success the write is considered to be failed permanently and the | |
185 | * iodone handler will take appropriate action. | |
186 | * | |
187 | * For retry timeouts, we record the jiffie of the first failure. This | |
188 | * means that we can change the retry timeout for buffers already under | |
189 | * I/O and thus avoid getting stuck in a retry loop with a long timeout. | |
190 | * | |
191 | * last_error is used to ensure that we are getting repeated errors, not | |
192 | * different errors. e.g. a block device might change ENOSPC to EIO when | |
193 | * a failure timeout occurs, so we want to re-initialise the error | |
194 | * retry behaviour appropriately when that happens. | |
195 | */ | |
196 | int b_retries; | |
197 | unsigned long b_first_retry_time; /* in jiffies */ | |
198 | int b_last_error; | |
199 | ||
200 | const struct xfs_buf_ops *b_ops; | |
201 | struct rcu_head b_rcu; | |
202 | }; | |
203 | ||
204 | /* Finding and Reading Buffers */ | |
205 | int xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf_map *map, | |
206 | int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp); | |
207 | int xfs_buf_read_map(struct xfs_buftarg *target, struct xfs_buf_map *map, | |
208 | int nmaps, xfs_buf_flags_t flags, struct xfs_buf **bpp, | |
209 | const struct xfs_buf_ops *ops, xfs_failaddr_t fa); | |
210 | void xfs_buf_readahead_map(struct xfs_buftarg *target, | |
211 | struct xfs_buf_map *map, int nmaps, | |
212 | const struct xfs_buf_ops *ops); | |
213 | ||
214 | static inline int | |
215 | xfs_buf_incore( | |
216 | struct xfs_buftarg *target, | |
217 | xfs_daddr_t blkno, | |
218 | size_t numblks, | |
219 | xfs_buf_flags_t flags, | |
220 | struct xfs_buf **bpp) | |
221 | { | |
222 | DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); | |
223 | ||
224 | return xfs_buf_get_map(target, &map, 1, XBF_INCORE | flags, bpp); | |
225 | } | |
226 | ||
227 | static inline int | |
228 | xfs_buf_get( | |
229 | struct xfs_buftarg *target, | |
230 | xfs_daddr_t blkno, | |
231 | size_t numblks, | |
232 | struct xfs_buf **bpp) | |
233 | { | |
234 | DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); | |
235 | ||
236 | return xfs_buf_get_map(target, &map, 1, 0, bpp); | |
237 | } | |
238 | ||
239 | static inline int | |
240 | xfs_buf_read( | |
241 | struct xfs_buftarg *target, | |
242 | xfs_daddr_t blkno, | |
243 | size_t numblks, | |
244 | xfs_buf_flags_t flags, | |
245 | struct xfs_buf **bpp, | |
246 | const struct xfs_buf_ops *ops) | |
247 | { | |
248 | DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); | |
249 | ||
250 | return xfs_buf_read_map(target, &map, 1, flags, bpp, ops, | |
251 | __builtin_return_address(0)); | |
252 | } | |
253 | ||
254 | static inline void | |
255 | xfs_buf_readahead( | |
256 | struct xfs_buftarg *target, | |
257 | xfs_daddr_t blkno, | |
258 | size_t numblks, | |
259 | const struct xfs_buf_ops *ops) | |
260 | { | |
261 | DEFINE_SINGLE_BUF_MAP(map, blkno, numblks); | |
262 | return xfs_buf_readahead_map(target, &map, 1, ops); | |
263 | } | |
264 | ||
265 | int xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, | |
266 | xfs_buf_flags_t flags, struct xfs_buf **bpp); | |
267 | int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr, | |
268 | size_t numblks, xfs_buf_flags_t flags, struct xfs_buf **bpp, | |
269 | const struct xfs_buf_ops *ops); | |
270 | int _xfs_buf_read(struct xfs_buf *bp, xfs_buf_flags_t flags); | |
271 | void xfs_buf_hold(struct xfs_buf *bp); | |
272 | ||
273 | /* Releasing Buffers */ | |
274 | extern void xfs_buf_rele(struct xfs_buf *); | |
275 | ||
276 | /* Locking and Unlocking Buffers */ | |
277 | extern int xfs_buf_trylock(struct xfs_buf *); | |
278 | extern void xfs_buf_lock(struct xfs_buf *); | |
279 | extern void xfs_buf_unlock(struct xfs_buf *); | |
280 | #define xfs_buf_islocked(bp) \ | |
281 | ((bp)->b_sema.count <= 0) | |
282 | ||
283 | static inline void xfs_buf_relse(struct xfs_buf *bp) | |
284 | { | |
285 | xfs_buf_unlock(bp); | |
286 | xfs_buf_rele(bp); | |
287 | } | |
288 | ||
289 | /* Buffer Read and Write Routines */ | |
290 | extern int xfs_bwrite(struct xfs_buf *bp); | |
291 | ||
292 | extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error, | |
293 | xfs_failaddr_t failaddr); | |
294 | #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address) | |
295 | extern void xfs_buf_ioerror_alert(struct xfs_buf *bp, xfs_failaddr_t fa); | |
296 | void xfs_buf_ioend_fail(struct xfs_buf *); | |
297 | void xfs_buf_zero(struct xfs_buf *bp, size_t boff, size_t bsize); | |
298 | void __xfs_buf_mark_corrupt(struct xfs_buf *bp, xfs_failaddr_t fa); | |
299 | #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address) | |
300 | ||
301 | /* Buffer Utility Routines */ | |
302 | extern void *xfs_buf_offset(struct xfs_buf *, size_t); | |
303 | extern void xfs_buf_stale(struct xfs_buf *bp); | |
304 | ||
305 | /* Delayed Write Buffer Routines */ | |
306 | extern void xfs_buf_delwri_cancel(struct list_head *); | |
307 | extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *); | |
308 | extern int xfs_buf_delwri_submit(struct list_head *); | |
309 | extern int xfs_buf_delwri_submit_nowait(struct list_head *); | |
310 | extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *); | |
311 | ||
312 | static inline xfs_daddr_t xfs_buf_daddr(struct xfs_buf *bp) | |
313 | { | |
314 | return bp->b_maps[0].bm_bn; | |
315 | } | |
316 | ||
317 | void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref); | |
318 | ||
319 | /* | |
320 | * If the buffer is already on the LRU, do nothing. Otherwise set the buffer | |
321 | * up with a reference count of 0 so it will be tossed from the cache when | |
322 | * released. | |
323 | */ | |
324 | static inline void xfs_buf_oneshot(struct xfs_buf *bp) | |
325 | { | |
326 | if (!list_empty(&bp->b_lru) || atomic_read(&bp->b_lru_ref) > 1) | |
327 | return; | |
328 | atomic_set(&bp->b_lru_ref, 0); | |
329 | } | |
330 | ||
331 | static inline int xfs_buf_ispinned(struct xfs_buf *bp) | |
332 | { | |
333 | return atomic_read(&bp->b_pin_count); | |
334 | } | |
335 | ||
336 | static inline int | |
337 | xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset) | |
338 | { | |
339 | return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length), | |
340 | cksum_offset); | |
341 | } | |
342 | ||
343 | static inline void | |
344 | xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset) | |
345 | { | |
346 | xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), | |
347 | cksum_offset); | |
348 | } | |
349 | ||
350 | /* | |
351 | * Handling of buftargs. | |
352 | */ | |
353 | struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *mp, | |
354 | struct block_device *bdev); | |
355 | extern void xfs_free_buftarg(struct xfs_buftarg *); | |
356 | extern void xfs_buftarg_wait(struct xfs_buftarg *); | |
357 | extern void xfs_buftarg_drain(struct xfs_buftarg *); | |
358 | extern int xfs_setsize_buftarg(struct xfs_buftarg *, unsigned int); | |
359 | ||
360 | #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev) | |
361 | #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev) | |
362 | ||
363 | int xfs_buf_reverify(struct xfs_buf *bp, const struct xfs_buf_ops *ops); | |
364 | bool xfs_verify_magic(struct xfs_buf *bp, __be32 dmagic); | |
365 | bool xfs_verify_magic16(struct xfs_buf *bp, __be16 dmagic); | |
366 | ||
367 | #endif /* __XFS_BUF_H__ */ |