]> git.ipfire.org Git - people/ms/linux.git/blame - fs/xfs/xfs_buf.h
Split buffer's b_fspriv field
[people/ms/linux.git] / fs / xfs / xfs_buf.h
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4
LT
18#ifndef __XFS_BUF_H__
19#define __XFS_BUF_H__
20
1da177e4
LT
21#include <linux/list.h>
22#include <linux/types.h>
23#include <linux/spinlock.h>
1da177e4
LT
24#include <linux/mm.h>
25#include <linux/fs.h>
c94c2acf 26#include <linux/dax.h>
1da177e4
LT
27#include <linux/buffer_head.h>
28#include <linux/uio.h>
e80dfa19 29#include <linux/list_lru.h>
1da177e4
LT
30
31/*
32 * Base types
33 */
34
ce8e922c
NS
35#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
36
ce8e922c
NS
37typedef enum {
38 XBRW_READ = 1, /* transfer into target memory */
39 XBRW_WRITE = 2, /* transfer from target memory */
40 XBRW_ZERO = 3, /* Zero target memory */
41} xfs_buf_rw_t;
42
6fb8a90a
CM
43#define XBF_READ (1 << 0) /* buffer intended for reading from device */
44#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
45#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
c891c30a 46#define XBF_NO_IOACCT (1 << 3) /* bypass I/O accounting (non-LRU bufs) */
6fb8a90a
CM
47#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
48#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
49#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
ac8809f9 50#define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */
1d5ae5df
CH
51
52/* I/O hints for the BIO layer */
6fb8a90a
CM
53#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
54#define XBF_FUA (1 << 11)/* force cache write through mode */
55#define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */
1da177e4 56
807cbbdb 57/* flags used only as arguments to access routines */
6fb8a90a
CM
58#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */
59#define XBF_UNMAPPED (1 << 17)/* do not map the buffer */
1da177e4 60
807cbbdb 61/* flags used only internally */
6fb8a90a
CM
62#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
63#define _XBF_KMEM (1 << 21)/* backed by heap memory */
64#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
65#define _XBF_COMPOUND (1 << 23)/* compound buffer */
6ab455ee 66
807cbbdb 67typedef unsigned int xfs_buf_flags_t;
1da177e4 68
0b1b213f
CH
69#define XFS_BUF_FLAGS \
70 { XBF_READ, "READ" }, \
71 { XBF_WRITE, "WRITE" }, \
1d5ae5df 72 { XBF_READ_AHEAD, "READ_AHEAD" }, \
1247ec4c 73 { XBF_NO_IOACCT, "NO_IOACCT" }, \
0b1b213f
CH
74 { XBF_ASYNC, "ASYNC" }, \
75 { XBF_DONE, "DONE" }, \
0b1b213f 76 { XBF_STALE, "STALE" }, \
ac8809f9 77 { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
1d5ae5df
CH
78 { XBF_SYNCIO, "SYNCIO" }, \
79 { XBF_FUA, "FUA" }, \
80 { XBF_FLUSH, "FLUSH" }, \
6fb8a90a 81 { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\
611c9946 82 { XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\
0b1b213f 83 { _XBF_PAGES, "PAGES" }, \
0e6e847f 84 { _XBF_KMEM, "KMEM" }, \
cbb7baab 85 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
63db7c81 86 { _XBF_COMPOUND, "COMPOUND" }
a4082357 87
ac8809f9 88
a4082357
DC
89/*
90 * Internal state flags.
91 */
92#define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
63db7c81 93#define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
0b1b213f 94
7c71ee78
ES
95/*
96 * The xfs_buftarg contains 2 notions of "sector size" -
97 *
98 * 1) The metadata sector size, which is the minimum unit and
99 * alignment of IO which will be performed by metadata operations.
100 * 2) The device logical sector size
101 *
102 * The first is specified at mkfs time, and is stored on-disk in the
103 * superblock's sb_sectsize.
104 *
105 * The latter is derived from the underlying device, and controls direct IO
106 * alignment constraints.
107 */
1da177e4 108typedef struct xfs_buftarg {
ce8e922c
NS
109 dev_t bt_dev;
110 struct block_device *bt_bdev;
486aff5e 111 struct dax_device *bt_daxdev;
ebad861b 112 struct xfs_mount *bt_mount;
6da54179
ES
113 unsigned int bt_meta_sectorsize;
114 size_t bt_meta_sectormask;
7c71ee78
ES
115 size_t bt_logical_sectorsize;
116 size_t bt_logical_sectormask;
ce8e922c 117
ff57ab21
DC
118 /* LRU control structures */
119 struct shrinker bt_shrinker;
e80dfa19 120 struct list_lru bt_lru;
9c7504aa
BF
121
122 struct percpu_counter bt_io_count;
1da177e4
LT
123} xfs_buftarg_t;
124
1da177e4 125struct xfs_buf;
ce8e922c 126typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
1da177e4 127
c3f8fc73 128
ce8e922c 129#define XB_PAGES 2
1da177e4 130
cbb7baab
DC
131struct xfs_buf_map {
132 xfs_daddr_t bm_bn; /* block number for I/O */
133 int bm_len; /* size of I/O */
134};
135
3e85c868
DC
136#define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
137 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
138
1813dd64 139struct xfs_buf_ops {
233135b7 140 char *name;
1813dd64
DC
141 void (*verify_read)(struct xfs_buf *);
142 void (*verify_write)(struct xfs_buf *);
b5572597 143 xfs_failaddr_t (*verify_struct)(struct xfs_buf *bp);
1813dd64
DC
144};
145
1da177e4 146typedef struct xfs_buf {
50f59e8e
DC
147 /*
148 * first cacheline holds all the fields needed for an uncontended cache
149 * hit to be fully processed. The semaphore straddles the cacheline
150 * boundary, but the counter and lock sits on the first cacheline,
151 * which is the only bit that is touched if we hit the semaphore
152 * fast-path on locking.
153 */
6031e73a 154 struct rhash_head b_rhash_head; /* pag buffer hash node */
cbb7baab 155 xfs_daddr_t b_bn; /* block number of buffer */
4e94b71b 156 int b_length; /* size of buffer in BBs */
50f59e8e 157 atomic_t b_hold; /* reference count */
430cbeb8 158 atomic_t b_lru_ref; /* lru reclaim ref count */
50f59e8e 159 xfs_buf_flags_t b_flags; /* status flags */
ce8e922c 160 struct semaphore b_sema; /* semaphore for lockables */
50f59e8e 161
6fb8a90a
CM
162 /*
163 * concurrent access to b_lru and b_lru_flags are protected by
164 * bt_lru_lock and not by b_sema
165 */
430cbeb8 166 struct list_head b_lru; /* lru list */
a4082357
DC
167 spinlock_t b_lock; /* internal state lock */
168 unsigned int b_state; /* internal state flags */
61be9c52 169 int b_io_error; /* internal IO error state */
ce8e922c
NS
170 wait_queue_head_t b_waiters; /* unpin waiters */
171 struct list_head b_list;
74f75a0c 172 struct xfs_perag *b_pag; /* contains rbtree root */
ce8e922c 173 xfs_buftarg_t *b_target; /* buffer target (device) */
ce8e922c 174 void *b_addr; /* virtual address of buffer */
b29c70f5
BF
175 struct work_struct b_ioend_work;
176 struct workqueue_struct *b_ioend_wq; /* I/O completion wq */
ce8e922c 177 xfs_buf_iodone_t b_iodone; /* I/O completion function */
b4dd330b 178 struct completion b_iowait; /* queue for I/O waiters */
fb1755a6
CM
179 void *b_log_item;
180 struct xfs_log_item *b_li_list;
bf9d9013 181 struct xfs_trans *b_transp;
ce8e922c
NS
182 struct page **b_pages; /* array of page pointers */
183 struct page *b_page_array[XB_PAGES]; /* inline pages */
3e85c868 184 struct xfs_buf_map *b_maps; /* compound buffer map */
d44d9bc6 185 struct xfs_buf_map __b_map; /* inline compound buffer map */
3e85c868 186 int b_map_count;
aa0e8833 187 int b_io_length; /* IO size in BBs */
50f59e8e
DC
188 atomic_t b_pin_count; /* pin count */
189 atomic_t b_io_remaining; /* #outstanding I/O requests */
190 unsigned int b_page_count; /* size of page array */
191 unsigned int b_offset; /* page offset in first page */
2451337d 192 int b_error; /* error code on I/O */
a5ea70d2
CM
193
194 /*
195 * async write failure retry count. Initialised to zero on the first
196 * failure, then when it exceeds the maximum configured without a
197 * success the write is considered to be failed permanently and the
198 * iodone handler will take appropriate action.
199 *
200 * For retry timeouts, we record the jiffie of the first failure. This
201 * means that we can change the retry timeout for buffers already under
202 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
203 *
204 * last_error is used to ensure that we are getting repeated errors, not
205 * different errors. e.g. a block device might change ENOSPC to EIO when
206 * a failure timeout occurs, so we want to re-initialise the error
207 * retry behaviour appropriately when that happens.
208 */
209 int b_retries;
210 unsigned long b_first_retry_time; /* in jiffies */
211 int b_last_error;
212
1813dd64 213 const struct xfs_buf_ops *b_ops;
cfb02852 214
ce8e922c
NS
215#ifdef XFS_BUF_LOCK_TRACKING
216 int b_last_holder;
1da177e4
LT
217#endif
218} xfs_buf_t;
219
1da177e4 220/* Finding and Reading Buffers */
3e85c868
DC
221struct xfs_buf *_xfs_buf_find(struct xfs_buftarg *target,
222 struct xfs_buf_map *map, int nmaps,
223 xfs_buf_flags_t flags, struct xfs_buf *new_bp);
224
225static inline struct xfs_buf *
226xfs_incore(
227 struct xfs_buftarg *target,
228 xfs_daddr_t blkno,
229 size_t numblks,
230 xfs_buf_flags_t flags)
231{
232 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
233 return _xfs_buf_find(target, &map, 1, flags, NULL);
234}
235
236struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target,
237 struct xfs_buf_map *map, int nmaps,
238 xfs_buf_flags_t flags);
239
240static inline struct xfs_buf *
241xfs_buf_alloc(
242 struct xfs_buftarg *target,
243 xfs_daddr_t blkno,
244 size_t numblks,
245 xfs_buf_flags_t flags)
246{
247 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
248 return _xfs_buf_alloc(target, &map, 1, flags);
249}
1da177e4 250
6dde2707
DC
251struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
252 struct xfs_buf_map *map, int nmaps,
253 xfs_buf_flags_t flags);
254struct xfs_buf *xfs_buf_read_map(struct xfs_buftarg *target,
255 struct xfs_buf_map *map, int nmaps,
1813dd64
DC
256 xfs_buf_flags_t flags,
257 const struct xfs_buf_ops *ops);
6dde2707 258void xfs_buf_readahead_map(struct xfs_buftarg *target,
c3f8fc73 259 struct xfs_buf_map *map, int nmaps,
1813dd64 260 const struct xfs_buf_ops *ops);
6dde2707
DC
261
262static inline struct xfs_buf *
263xfs_buf_get(
264 struct xfs_buftarg *target,
265 xfs_daddr_t blkno,
266 size_t numblks,
267 xfs_buf_flags_t flags)
268{
269 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
270 return xfs_buf_get_map(target, &map, 1, flags);
271}
272
273static inline struct xfs_buf *
274xfs_buf_read(
275 struct xfs_buftarg *target,
276 xfs_daddr_t blkno,
277 size_t numblks,
c3f8fc73 278 xfs_buf_flags_t flags,
1813dd64 279 const struct xfs_buf_ops *ops)
6dde2707
DC
280{
281 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
1813dd64 282 return xfs_buf_read_map(target, &map, 1, flags, ops);
6dde2707
DC
283}
284
285static inline void
286xfs_buf_readahead(
287 struct xfs_buftarg *target,
288 xfs_daddr_t blkno,
c3f8fc73 289 size_t numblks,
1813dd64 290 const struct xfs_buf_ops *ops)
6dde2707
DC
291{
292 DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
1813dd64 293 return xfs_buf_readahead_map(target, &map, 1, ops);
6dde2707 294}
e70b73f8 295
e70b73f8
DC
296void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
297int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
298
299struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
300 int flags);
ba372674
DC
301int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
302 size_t numblks, int flags, struct xfs_buf **bpp,
303 const struct xfs_buf_ops *ops);
e70b73f8 304void xfs_buf_hold(struct xfs_buf *bp);
1da177e4
LT
305
306/* Releasing Buffers */
ce8e922c
NS
307extern void xfs_buf_free(xfs_buf_t *);
308extern void xfs_buf_rele(xfs_buf_t *);
1da177e4
LT
309
310/* Locking and Unlocking Buffers */
0c842ad4 311extern int xfs_buf_trylock(xfs_buf_t *);
ce8e922c
NS
312extern void xfs_buf_lock(xfs_buf_t *);
313extern void xfs_buf_unlock(xfs_buf_t *);
0c842ad4
CH
314#define xfs_buf_islocked(bp) \
315 ((bp)->b_sema.count <= 0)
1da177e4
LT
316
317/* Buffer Read and Write Routines */
c2b006c1 318extern int xfs_bwrite(struct xfs_buf *bp);
e8aaba9a 319extern void xfs_buf_ioend(struct xfs_buf *bp);
31ca03c9
DW
320extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error,
321 xfs_failaddr_t failaddr);
322#define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
901796af 323extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
595bff75
DC
324extern void xfs_buf_submit(struct xfs_buf *bp);
325extern int xfs_buf_submit_wait(struct xfs_buf *bp);
b9c48649 326extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
ce8e922c 327 xfs_buf_rw_t);
1a1a3e97
CH
328#define xfs_buf_zero(bp, off, len) \
329 xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
ce8e922c 330
1da177e4 331/* Buffer Utility Routines */
88ee2df7 332extern void *xfs_buf_offset(struct xfs_buf *, size_t);
5cfd28b6 333extern void xfs_buf_stale(struct xfs_buf *bp);
1da177e4 334
1da177e4 335/* Delayed Write Buffer Routines */
20e8a063 336extern void xfs_buf_delwri_cancel(struct list_head *);
43ff2122
CH
337extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
338extern int xfs_buf_delwri_submit(struct list_head *);
339extern int xfs_buf_delwri_submit_nowait(struct list_head *);
7912e7fe 340extern int xfs_buf_delwri_pushbuf(struct xfs_buf *, struct list_head *);
1da177e4
LT
341
342/* Buffer Daemon Setup Routines */
ce8e922c
NS
343extern int xfs_buf_init(void);
344extern void xfs_buf_terminate(void);
1da177e4 345
cbb7baab
DC
346/*
347 * These macros use the IO block map rather than b_bn. b_bn is now really
348 * just for the buffer cache index for cached buffers. As IO does not use b_bn
349 * anymore, uncached buffers do not use b_bn at all and hence must modify the IO
350 * map directly. Uncached buffers are not allowed to be discontiguous, so this
351 * is safe to do.
352 *
353 * In future, uncached buffers will pass the block number directly to the io
354 * request function and hence these macros will go away at that point.
355 */
d44d9bc6
MT
356#define XFS_BUF_ADDR(bp) ((bp)->b_maps[0].bm_bn)
357#define XFS_BUF_SET_ADDR(bp, bno) ((bp)->b_maps[0].bm_bn = (xfs_daddr_t)(bno))
ce8e922c 358
7561d27e 359void xfs_buf_set_ref(struct xfs_buf *bp, int lru_ref);
ce8e922c 360
811e64c7
CS
361static inline int xfs_buf_ispinned(struct xfs_buf *bp)
362{
363 return atomic_read(&bp->b_pin_count);
364}
ce8e922c 365
ce8e922c 366static inline void xfs_buf_relse(xfs_buf_t *bp)
1da177e4 367{
bfc60177 368 xfs_buf_unlock(bp);
ce8e922c 369 xfs_buf_rele(bp);
1da177e4
LT
370}
371
51582170
ES
372static inline int
373xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
374{
375 return xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
376 cksum_offset);
377}
378
f1dbcd7e
ES
379static inline void
380xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
381{
382 xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length),
383 cksum_offset);
384}
385
1da177e4
LT
386/*
387 * Handling of buftargs.
388 */
ebad861b 389extern xfs_buftarg_t *xfs_alloc_buftarg(struct xfs_mount *,
486aff5e 390 struct block_device *, struct dax_device *);
c141b292 391extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
1da177e4 392extern void xfs_wait_buftarg(xfs_buftarg_t *);
a96c4151 393extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int);
d808f617 394
ce8e922c
NS
395#define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
396#define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
397
1da177e4 398#endif /* __XFS_BUF_H__ */