]> git.ipfire.org Git - people/ms/linux.git/blame - fs/btrfs/locking.h
Merge branch 'for-6.0/dax' into libnvdimm-fixes
[people/ms/linux.git] / fs / btrfs / locking.h
CommitLineData
9888c340 1/* SPDX-License-Identifier: GPL-2.0 */
925baedd
CM
2/*
3 * Copyright (C) 2008 Oracle. All rights reserved.
925baedd
CM
4 */
5
9888c340
DS
6#ifndef BTRFS_LOCKING_H
7#define BTRFS_LOCKING_H
925baedd 8
2992df73
NB
9#include <linux/atomic.h>
10#include <linux/wait.h>
11#include <linux/percpu_counter.h>
31f6e769
DS
12#include "extent_io.h"
13
bd681513
CM
14#define BTRFS_WRITE_LOCK 1
15#define BTRFS_READ_LOCK 2
bd681513 16
fd7ba1c1
JB
17/*
18 * We are limited in number of subclasses by MAX_LOCKDEP_SUBCLASSES, which at
19 * the time of this patch is 8, which is how many we use. Keep this in mind if
20 * you decide you want to add another subclass.
21 */
22enum btrfs_lock_nesting {
23 BTRFS_NESTING_NORMAL,
24
9631e4cc
JB
25 /*
26 * When we COW a block we are holding the lock on the original block,
27 * and since our lockdep maps are rootid+level, this confuses lockdep
28 * when we lock the newly allocated COW'd block. Handle this by having
29 * a subclass for COW'ed blocks so that lockdep doesn't complain.
30 */
31 BTRFS_NESTING_COW,
32
bf77467a
JB
33 /*
34 * Oftentimes we need to lock adjacent nodes on the same level while
35 * still holding the lock on the original node we searched to, such as
36 * for searching forward or for split/balance.
37 *
38 * Because of this we need to indicate to lockdep that this is
39 * acceptable by having a different subclass for each of these
40 * operations.
41 */
42 BTRFS_NESTING_LEFT,
43 BTRFS_NESTING_RIGHT,
44
bf59a5a2
JB
45 /*
46 * When splitting we will be holding a lock on the left/right node when
47 * we need to cow that node, thus we need a new set of subclasses for
48 * these two operations.
49 */
50 BTRFS_NESTING_LEFT_COW,
51 BTRFS_NESTING_RIGHT_COW,
52
4dff97e6
JB
53 /*
54 * When splitting we may push nodes to the left or right, but still use
55 * the subsequent nodes in our path, keeping our locks on those adjacent
56 * blocks. Thus when we go to allocate a new split block we've already
57 * used up all of our available subclasses, so this subclass exists to
58 * handle this case where we need to allocate a new split block.
59 */
60 BTRFS_NESTING_SPLIT,
61
cf6f34aa
JB
62 /*
63 * When promoting a new block to a root we need to have a special
64 * subclass so we don't confuse lockdep, as it will appear that we are
65 * locking a higher level node before a lower level one. Copying also
66 * has this problem as it appears we're locking the same block again
67 * when we make a snapshot of an existing root.
68 */
69 BTRFS_NESTING_NEW_ROOT,
70
fd7ba1c1
JB
71 /*
72 * We are limited to MAX_LOCKDEP_SUBLCLASSES number of subclasses, so
73 * add this in here and add a static_assert to keep us from going over
74 * the limit. As of this writing we're limited to 8, and we're
75 * definitely using 8, hence this check to keep us from messing up in
76 * the future.
77 */
78 BTRFS_NESTING_MAX,
79};
80
81static_assert(BTRFS_NESTING_MAX <= MAX_LOCKDEP_SUBCLASSES,
82 "too many lock subclasses defined");
83
2992df73
NB
84struct btrfs_path;
85
fd7ba1c1 86void __btrfs_tree_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
143bede5
JM
87void btrfs_tree_lock(struct extent_buffer *eb);
88void btrfs_tree_unlock(struct extent_buffer *eb);
b4ce94de 89
0ecae6ff 90void __btrfs_tree_read_lock(struct extent_buffer *eb, enum btrfs_lock_nesting nest);
bd681513
CM
91void btrfs_tree_read_lock(struct extent_buffer *eb);
92void btrfs_tree_read_unlock(struct extent_buffer *eb);
bd681513
CM
93int btrfs_try_tree_read_lock(struct extent_buffer *eb);
94int btrfs_try_tree_write_lock(struct extent_buffer *eb);
51899412 95struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
1bb96598 96struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root);
f82c458a 97
31f6e769 98#ifdef CONFIG_BTRFS_DEBUG
49d0c642
FM
99static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb)
100{
101 lockdep_assert_held_write(&eb->lock);
31f6e769
DS
102}
103#else
49d0c642 104static inline void btrfs_assert_tree_write_locked(struct extent_buffer *eb) { }
31f6e769 105#endif
bd681513 106
1f95ec01 107void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
ed2b1d36 108
bd681513
CM
109static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
110{
ac5887c8 111 if (rw == BTRFS_WRITE_LOCK)
bd681513 112 btrfs_tree_unlock(eb);
bd681513
CM
113 else if (rw == BTRFS_READ_LOCK)
114 btrfs_tree_read_unlock(eb);
115 else
116 BUG();
117}
118
2992df73
NB
119struct btrfs_drew_lock {
120 atomic_t readers;
121 struct percpu_counter writers;
122 wait_queue_head_t pending_writers;
123 wait_queue_head_t pending_readers;
124};
125
126int btrfs_drew_lock_init(struct btrfs_drew_lock *lock);
127void btrfs_drew_lock_destroy(struct btrfs_drew_lock *lock);
128void btrfs_drew_write_lock(struct btrfs_drew_lock *lock);
129bool btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock);
130void btrfs_drew_write_unlock(struct btrfs_drew_lock *lock);
131void btrfs_drew_read_lock(struct btrfs_drew_lock *lock);
132void btrfs_drew_read_unlock(struct btrfs_drew_lock *lock);
133
0a27a047
JB
134#ifdef CONFIG_DEBUG_LOCK_ALLOC
135void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, int level);
b40130b2 136void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root, struct extent_buffer *eb);
0a27a047
JB
137#else
138static inline void btrfs_set_buffer_lockdep_class(u64 objectid,
139 struct extent_buffer *eb, int level)
140{
141}
b40130b2
JB
142static inline void btrfs_maybe_reset_lockdep_class(struct btrfs_root *root,
143 struct extent_buffer *eb)
144{
145}
0a27a047
JB
146#endif
147
925baedd 148#endif