]> git.ipfire.org Git - people/ms/linux.git/blame - fs/gfs2/glock.h
Merge branch 'for-6.0/dax' into libnvdimm-fixes
[people/ms/linux.git] / fs / gfs2 / glock.h
CommitLineData
7336d0e6 1/* SPDX-License-Identifier: GPL-2.0-only */
b3b94faa
DT
2/*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3a8a9a10 4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
b3b94faa
DT
5 */
6
7#ifndef __GLOCK_DOT_H__
8#define __GLOCK_DOT_H__
9
e8edc6e0 10#include <linux/sched.h>
f057f6cd 11#include <linux/parser.h>
f2f7ba52 12#include "incore.h"
df3d87bd 13#include "util.h"
f2f7ba52 14
f057f6cd
SW
15/* Options for hostdata parser */
16
17enum {
18 Opt_jid,
19 Opt_id,
20 Opt_first,
21 Opt_nodir,
22 Opt_err,
23};
24
25/*
26 * lm_lockname types
27 */
28
29#define LM_TYPE_RESERVED 0x00
30#define LM_TYPE_NONDISK 0x01
31#define LM_TYPE_INODE 0x02
32#define LM_TYPE_RGRP 0x03
33#define LM_TYPE_META 0x04
34#define LM_TYPE_IOPEN 0x05
35#define LM_TYPE_FLOCK 0x06
36#define LM_TYPE_PLOCK 0x07
37#define LM_TYPE_QUOTA 0x08
38#define LM_TYPE_JOURNAL 0x09
39
40/*
41 * lm_lock() states
42 *
43 * SHARED is compatible with SHARED, not with DEFERRED or EX.
44 * DEFERRED is compatible with DEFERRED, not with SHARED or EX.
45 */
46
47#define LM_ST_UNLOCKED 0
48#define LM_ST_EXCLUSIVE 1
49#define LM_ST_DEFERRED 2
50#define LM_ST_SHARED 3
51
52/*
53 * lm_lock() flags
54 *
55 * LM_FLAG_TRY
56 * Don't wait to acquire the lock if it can't be granted immediately.
57 *
58 * LM_FLAG_TRY_1CB
59 * Send one blocking callback if TRY is set and the lock is not granted.
60 *
61 * LM_FLAG_NOEXP
62 * GFS sets this flag on lock requests it makes while doing journal recovery.
63 * These special requests should not be blocked due to the recovery like
64 * ordinary locks would be.
65 *
66 * LM_FLAG_ANY
67 * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may
68 * also be granted in SHARED. The preferred state is whichever is compatible
69 * with other granted locks, or the specified state if no other locks exist.
70 *
71 * LM_FLAG_PRIORITY
72 * Override fairness considerations. Suppose a lock is held in a shared state
73 * and there is a pending request for the deferred state. A shared lock
74 * request with the priority flag would be allowed to bypass the deferred
75 * request and directly join the other shared lock. A shared lock request
76 * without the priority flag might be forced to wait until the deferred
77 * requested had acquired and released the lock.
06e908cd
BP
78 *
79 * LM_FLAG_NODE_SCOPE
80 * This holder agrees to share the lock within this node. In other words,
81 * the glock is held in EX mode according to DLM, but local holders on the
82 * same node can share it.
f057f6cd
SW
83 */
84
b58bf407
BP
85#define LM_FLAG_TRY 0x0001
86#define LM_FLAG_TRY_1CB 0x0002
87#define LM_FLAG_NOEXP 0x0004
88#define LM_FLAG_ANY 0x0008
89#define LM_FLAG_PRIORITY 0x0010
06e908cd 90#define LM_FLAG_NODE_SCOPE 0x0020
b58bf407
BP
91#define GL_ASYNC 0x0040
92#define GL_EXACT 0x0080
93#define GL_SKIP 0x0100
94#define GL_NOCACHE 0x0400
f057f6cd
SW
95
96/*
921169ca 97 * lm_async_cb return flags
f057f6cd
SW
98 *
99 * LM_OUT_ST_MASK
100 * Masks the lower two bits of lock state in the returned value.
101 *
102 * LM_OUT_CANCELED
103 * The lock request was canceled.
104 *
f057f6cd
SW
105 */
106
107#define LM_OUT_ST_MASK 0x00000003
108#define LM_OUT_CANCELED 0x00000008
921169ca 109#define LM_OUT_ERROR 0x00000004
f057f6cd
SW
110
111/*
112 * lm_recovery_done() messages
113 */
114
115#define LM_RD_GAVEUP 308
116#define LM_RD_SUCCESS 309
117
118#define GLR_TRYFAILED 13
119
7cf8dcd3
BP
120#define GL_GLOCK_MAX_HOLD (long)(HZ / 5)
121#define GL_GLOCK_DFT_HOLD (long)(HZ / 5)
122#define GL_GLOCK_MIN_HOLD (long)(10)
123#define GL_GLOCK_HOLD_INCR (long)(HZ / 20)
124#define GL_GLOCK_HOLD_DECR (long)(HZ / 40)
125
f057f6cd
SW
126struct lm_lockops {
127 const char *lm_proto_name;
e0c2a9aa
DT
128 int (*lm_mount) (struct gfs2_sbd *sdp, const char *table);
129 void (*lm_first_done) (struct gfs2_sbd *sdp);
130 void (*lm_recovery_result) (struct gfs2_sbd *sdp, unsigned int jid,
131 unsigned int result);
132 void (*lm_unmount) (struct gfs2_sbd *sdp);
f057f6cd 133 void (*lm_withdraw) (struct gfs2_sbd *sdp);
bc015cb8 134 void (*lm_put_lock) (struct gfs2_glock *gl);
921169ca
SW
135 int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
136 unsigned int flags);
f057f6cd
SW
137 void (*lm_cancel) (struct gfs2_glock *gl);
138 const match_table_t *lm_tokens;
139};
140
11d8b79e
KC
141struct gfs2_glock_aspace {
142 struct gfs2_glock glock;
143 struct address_space mapping;
144};
145
b94a170e 146extern struct workqueue_struct *gfs2_delete_workqueue;
7afd88d9 147static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
b3b94faa
DT
148{
149 struct gfs2_holder *gh;
b1e058da 150 struct pid *pid;
b3b94faa
DT
151
152 /* Look in glock's list of holders for one with current task as owner */
f3dd1649 153 spin_lock(&gl->gl_lockref.lock);
b1e058da 154 pid = task_pid(current);
b3b94faa 155 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
6802e340
SW
156 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
157 break;
dc732906
BP
158 if (test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags))
159 continue;
7afd88d9
SW
160 if (gh->gh_owner_pid == pid)
161 goto out;
b3b94faa 162 }
7afd88d9
SW
163 gh = NULL;
164out:
f3dd1649 165 spin_unlock(&gl->gl_lockref.lock);
b3b94faa 166
7afd88d9 167 return gh;
b3b94faa
DT
168}
169
170static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
171{
50299965 172 return gl->gl_state == LM_ST_EXCLUSIVE;
b3b94faa
DT
173}
174
175static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
176{
50299965 177 return gl->gl_state == LM_ST_DEFERRED;
b3b94faa
DT
178}
179
180static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
181{
50299965 182 return gl->gl_state == LM_ST_SHARED;
b3b94faa
DT
183}
184
009d8518
SW
185static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
186{
11d8b79e
KC
187 if (gl->gl_ops->go_flags & GLOF_ASPACE) {
188 struct gfs2_glock_aspace *gla =
189 container_of(gl, struct gfs2_glock_aspace, glock);
190 return &gla->mapping;
191 }
009d8518
SW
192 return NULL;
193}
194
8eae1ca0
SW
195extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
196 const struct gfs2_glock_operations *glops,
197 int create, struct gfs2_glock **glp);
71c1b213 198extern void gfs2_glock_hold(struct gfs2_glock *gl);
8eae1ca0 199extern void gfs2_glock_put(struct gfs2_glock *gl);
71c1b213 200extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
b016d9a8
AG
201
202extern void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
203 u16 flags, struct gfs2_holder *gh,
204 unsigned long ip);
205static inline void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
206 u16 flags, struct gfs2_holder *gh) {
207 __gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
208}
209
b58bf407 210extern void gfs2_holder_reinit(unsigned int state, u16 flags,
8eae1ca0
SW
211 struct gfs2_holder *gh);
212extern void gfs2_holder_uninit(struct gfs2_holder *gh);
213extern int gfs2_glock_nq(struct gfs2_holder *gh);
214extern int gfs2_glock_poll(struct gfs2_holder *gh);
f2e70d8f 215extern int gfs2_instantiate(struct gfs2_holder *gh);
53d69132 216extern int gfs2_glock_holder_ready(struct gfs2_holder *gh);
8eae1ca0 217extern int gfs2_glock_wait(struct gfs2_holder *gh);
ad26967b 218extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
8eae1ca0
SW
219extern void gfs2_glock_dq(struct gfs2_holder *gh);
220extern void gfs2_glock_dq_wait(struct gfs2_holder *gh);
221extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
222extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
223 const struct gfs2_glock_operations *glops,
b58bf407 224 unsigned int state, u16 flags,
8eae1ca0
SW
225 struct gfs2_holder *gh);
226extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
227extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
3792ce97
BP
228extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
229 bool fsid);
230#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \
231 gfs2_dump_glock(NULL, gl, true); \
232 BUG(); } } while(0)
ea4e61c7
BP
233#define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \
234 gfs2_dump_glock(NULL, gl, true); \
235 gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \
236 while (0)
237#define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \
238 gfs2_dump_glock(NULL, gl, true); \
239 gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
240 while (0)
241
8eae1ca0 242extern __printf(2, 3)
6802e340 243void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
b3b94faa 244
d0dc80db 245/**
33027af6 246 * gfs2_glock_nq_init - initialize a holder and enqueue it on a glock
d0dc80db
SW
247 * @gl: the glock
248 * @state: the state we're requesting
249 * @flags: the modifier flags
250 * @gh: the holder structure
251 *
252 * Returns: 0, GLR_*, or errno
253 */
254
255static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
b58bf407 256 unsigned int state, u16 flags,
d0dc80db
SW
257 struct gfs2_holder *gh)
258{
259 int error;
260
b016d9a8 261 __gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
d0dc80db
SW
262
263 error = gfs2_glock_nq(gh);
264 if (error)
265 gfs2_holder_uninit(gh);
266
267 return error;
268}
269
bc015cb8
SW
270extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
271extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
a0e3cc65
AG
272extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
273extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
274extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
275extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
bc015cb8 276extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
bc015cb8 277extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
29687a2a 278extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
fc0e38da 279extern void gfs2_glock_free(struct gfs2_glock *gl);
bc015cb8
SW
280
281extern int __init gfs2_glock_init(void);
282extern void gfs2_glock_exit(void);
283
2abbf9a4 284extern void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
bc015cb8 285extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
2abbf9a4 286extern void gfs2_register_debugfs(void);
bc015cb8 287extern void gfs2_unregister_debugfs(void);
85d1da67 288
f057f6cd
SW
289extern const struct lm_lockops gfs2_dlm_ops;
290
6df9f9a2
AG
291static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh)
292{
293 gh->gh_gl = NULL;
294}
295
296static inline bool gfs2_holder_initialized(struct gfs2_holder *gh)
297{
298 return gh->gh_gl;
299}
300
ad26967b
BP
301static inline bool gfs2_holder_queued(struct gfs2_holder *gh)
302{
303 return !list_empty(&gh->gh_list);
304}
305
df3d87bd
BP
306/**
307 * glock_set_object - set the gl_object field of a glock
308 * @gl: the glock
309 * @object: the object
310 */
4fd1a579
AG
311static inline void glock_set_object(struct gfs2_glock *gl, void *object)
312{
313 spin_lock(&gl->gl_lockref.lock);
df3d87bd 314 if (gfs2_assert_warn(gl->gl_name.ln_sbd, gl->gl_object == NULL))
3792ce97 315 gfs2_dump_glock(NULL, gl, true);
4fd1a579
AG
316 gl->gl_object = object;
317 spin_unlock(&gl->gl_lockref.lock);
318}
319
df3d87bd
BP
320/**
321 * glock_clear_object - clear the gl_object field of a glock
322 * @gl: the glock
323 * @object: the object
324 *
325 * I'd love to similarly add this:
326 * else if (gfs2_assert_warn(gl->gl_sbd, gl->gl_object == object))
3792ce97 327 * gfs2_dump_glock(NULL, gl, true);
df3d87bd
BP
328 * Unfortunately, that's not possible because as soon as gfs2_delete_inode
329 * frees the block in the rgrp, another process can reassign it for an I_NEW
330 * inode in gfs2_create_inode because that calls new_inode, not gfs2_iget.
331 * That means gfs2_delete_inode may subsequently try to call this function
332 * for a glock that's already pointing to a brand new inode. If we clear the
333 * new inode's gl_object, we'll introduce metadata corruption. Function
334 * gfs2_delete_inode calls clear_inode which calls gfs2_clear_inode which also
335 * tries to clear gl_object, so it's more than just gfs2_delete_inode.
336 *
337 */
338static inline void glock_clear_object(struct gfs2_glock *gl, void *object)
339{
340 spin_lock(&gl->gl_lockref.lock);
341 if (gl->gl_object == object)
342 gl->gl_object = NULL;
343 spin_unlock(&gl->gl_lockref.lock);
344}
345
dc732906
BP
346static inline void gfs2_holder_allow_demote(struct gfs2_holder *gh)
347{
348 struct gfs2_glock *gl = gh->gh_gl;
349
350 spin_lock(&gl->gl_lockref.lock);
351 set_bit(HIF_MAY_DEMOTE, &gh->gh_iflags);
352 spin_unlock(&gl->gl_lockref.lock);
353}
354
355static inline void gfs2_holder_disallow_demote(struct gfs2_holder *gh)
356{
357 struct gfs2_glock *gl = gh->gh_gl;
358
359 spin_lock(&gl->gl_lockref.lock);
360 clear_bit(HIF_MAY_DEMOTE, &gh->gh_iflags);
361 spin_unlock(&gl->gl_lockref.lock);
362}
363
f286d627
AG
364extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
365extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
366
b3b94faa 367#endif /* __GLOCK_DOT_H__ */