]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-only | |
2 | /* | |
3 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
4 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. | |
5 | */ | |
6 | ||
7 | #include <linux/spinlock.h> | |
8 | #include <linux/completion.h> | |
9 | #include <linux/buffer_head.h> | |
10 | #include <linux/gfs2_ondisk.h> | |
11 | #include <linux/bio.h> | |
12 | #include <linux/posix_acl.h> | |
13 | #include <linux/security.h> | |
14 | ||
15 | #include "gfs2.h" | |
16 | #include "incore.h" | |
17 | #include "bmap.h" | |
18 | #include "glock.h" | |
19 | #include "glops.h" | |
20 | #include "inode.h" | |
21 | #include "log.h" | |
22 | #include "meta_io.h" | |
23 | #include "recovery.h" | |
24 | #include "rgrp.h" | |
25 | #include "util.h" | |
26 | #include "trans.h" | |
27 | #include "dir.h" | |
28 | #include "lops.h" | |
29 | ||
30 | struct workqueue_struct *gfs2_freeze_wq; | |
31 | ||
32 | extern struct workqueue_struct *gfs2_control_wq; | |
33 | ||
34 | static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) | |
35 | { | |
36 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | |
37 | ||
38 | fs_err(sdp, | |
39 | "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page " | |
40 | "state 0x%lx\n", | |
41 | bh, (unsigned long long)bh->b_blocknr, bh->b_state, | |
42 | bh->b_page->mapping, bh->b_page->flags); | |
43 | fs_err(sdp, "AIL glock %u:%llu mapping %p\n", | |
44 | gl->gl_name.ln_type, gl->gl_name.ln_number, | |
45 | gfs2_glock2aspace(gl)); | |
46 | gfs2_lm(sdp, "AIL error\n"); | |
47 | gfs2_withdraw_delayed(sdp); | |
48 | } | |
49 | ||
50 | /** | |
51 | * __gfs2_ail_flush - remove all buffers for a given lock from the AIL | |
52 | * @gl: the glock | |
53 | * @fsync: set when called from fsync (not all buffers will be clean) | |
54 | * @nr_revokes: Number of buffers to revoke | |
55 | * | |
56 | * None of the buffers should be dirty, locked, or pinned. | |
57 | */ | |
58 | ||
59 | static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, | |
60 | unsigned int nr_revokes) | |
61 | { | |
62 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | |
63 | struct list_head *head = &gl->gl_ail_list; | |
64 | struct gfs2_bufdata *bd, *tmp; | |
65 | struct buffer_head *bh; | |
66 | const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); | |
67 | ||
68 | gfs2_log_lock(sdp); | |
69 | spin_lock(&sdp->sd_ail_lock); | |
70 | list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { | |
71 | if (nr_revokes == 0) | |
72 | break; | |
73 | bh = bd->bd_bh; | |
74 | if (bh->b_state & b_state) { | |
75 | if (fsync) | |
76 | continue; | |
77 | gfs2_ail_error(gl, bh); | |
78 | } | |
79 | gfs2_trans_add_revoke(sdp, bd); | |
80 | nr_revokes--; | |
81 | } | |
82 | GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); | |
83 | spin_unlock(&sdp->sd_ail_lock); | |
84 | gfs2_log_unlock(sdp); | |
85 | } | |
86 | ||
87 | ||
88 | static int gfs2_ail_empty_gl(struct gfs2_glock *gl) | |
89 | { | |
90 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | |
91 | struct gfs2_trans tr; | |
92 | unsigned int revokes; | |
93 | int ret; | |
94 | ||
95 | revokes = atomic_read(&gl->gl_ail_count); | |
96 | ||
97 | if (!revokes) { | |
98 | bool have_revokes; | |
99 | bool log_in_flight; | |
100 | ||
101 | /* | |
102 | * We have nothing on the ail, but there could be revokes on | |
103 | * the sdp revoke queue, in which case, we still want to flush | |
104 | * the log and wait for it to finish. | |
105 | * | |
106 | * If the sdp revoke list is empty too, we might still have an | |
107 | * io outstanding for writing revokes, so we should wait for | |
108 | * it before returning. | |
109 | * | |
110 | * If none of these conditions are true, our revokes are all | |
111 | * flushed and we can return. | |
112 | */ | |
113 | gfs2_log_lock(sdp); | |
114 | have_revokes = !list_empty(&sdp->sd_log_revokes); | |
115 | log_in_flight = atomic_read(&sdp->sd_log_in_flight); | |
116 | gfs2_log_unlock(sdp); | |
117 | if (have_revokes) | |
118 | goto flush; | |
119 | if (log_in_flight) | |
120 | log_flush_wait(sdp); | |
121 | return 0; | |
122 | } | |
123 | ||
124 | memset(&tr, 0, sizeof(tr)); | |
125 | set_bit(TR_ONSTACK, &tr.tr_flags); | |
126 | ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_); | |
127 | if (ret) | |
128 | goto flush; | |
129 | __gfs2_ail_flush(gl, 0, revokes); | |
130 | gfs2_trans_end(sdp); | |
131 | ||
132 | flush: | |
133 | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | | |
134 | GFS2_LFC_AIL_EMPTY_GL); | |
135 | return 0; | |
136 | } | |
137 | ||
138 | void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) | |
139 | { | |
140 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | |
141 | unsigned int revokes = atomic_read(&gl->gl_ail_count); | |
142 | int ret; | |
143 | ||
144 | if (!revokes) | |
145 | return; | |
146 | ||
147 | ret = gfs2_trans_begin(sdp, 0, revokes); | |
148 | if (ret) | |
149 | return; | |
150 | __gfs2_ail_flush(gl, fsync, revokes); | |
151 | gfs2_trans_end(sdp); | |
152 | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | | |
153 | GFS2_LFC_AIL_FLUSH); | |
154 | } | |
155 | ||
156 | /** | |
157 | * gfs2_rgrp_metasync - sync out the metadata of a resource group | |
158 | * @gl: the glock protecting the resource group | |
159 | * | |
160 | */ | |
161 | ||
162 | static int gfs2_rgrp_metasync(struct gfs2_glock *gl) | |
163 | { | |
164 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | |
165 | struct address_space *metamapping = &sdp->sd_aspace; | |
166 | struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); | |
167 | const unsigned bsize = sdp->sd_sb.sb_bsize; | |
168 | loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK; | |
169 | loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1; | |
170 | int error; | |
171 | ||
172 | filemap_fdatawrite_range(metamapping, start, end); | |
173 | error = filemap_fdatawait_range(metamapping, start, end); | |
174 | WARN_ON_ONCE(error && !gfs2_withdrawn(sdp)); | |
175 | mapping_set_error(metamapping, error); | |
176 | if (error) | |
177 | gfs2_io_error(sdp); | |
178 | return error; | |
179 | } | |
180 | ||
181 | /** | |
182 | * rgrp_go_sync - sync out the metadata for this glock | |
183 | * @gl: the glock | |
184 | * | |
185 | * Called when demoting or unlocking an EX glock. We must flush | |
186 | * to disk all dirty buffers/pages relating to this glock, and must not | |
187 | * return to caller to demote/unlock the glock until I/O is complete. | |
188 | */ | |
189 | ||
190 | static int rgrp_go_sync(struct gfs2_glock *gl) | |
191 | { | |
192 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | |
193 | struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); | |
194 | int error; | |
195 | ||
196 | if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) | |
197 | return 0; | |
198 | GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); | |
199 | ||
200 | gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | | |
201 | GFS2_LFC_RGRP_GO_SYNC); | |
202 | error = gfs2_rgrp_metasync(gl); | |
203 | if (!error) | |
204 | error = gfs2_ail_empty_gl(gl); | |
205 | gfs2_free_clones(rgd); | |
206 | return error; | |
207 | } | |
208 | ||
209 | /** | |
210 | * rgrp_go_inval - invalidate the metadata for this glock | |
211 | * @gl: the glock | |
212 | * @flags: | |
213 | * | |
214 | * We never used LM_ST_DEFERRED with resource groups, so that we | |
215 | * should always see the metadata flag set here. | |
216 | * | |
217 | */ | |
218 | ||
219 | static void rgrp_go_inval(struct gfs2_glock *gl, int flags) | |
220 | { | |
221 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | |
222 | struct address_space *mapping = &sdp->sd_aspace; | |
223 | struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); | |
224 | const unsigned bsize = sdp->sd_sb.sb_bsize; | |
225 | loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK; | |
226 | loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1; | |
227 | ||
228 | gfs2_rgrp_brelse(rgd); | |
229 | WARN_ON_ONCE(!(flags & DIO_METADATA)); | |
230 | truncate_inode_pages_range(mapping, start, end); | |
231 | } | |
232 | ||
233 | static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl, | |
234 | const char *fs_id_buf) | |
235 | { | |
236 | struct gfs2_rgrpd *rgd = gl->gl_object; | |
237 | ||
238 | if (rgd) | |
239 | gfs2_rgrp_dump(seq, rgd, fs_id_buf); | |
240 | } | |
241 | ||
242 | static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl) | |
243 | { | |
244 | struct gfs2_inode *ip; | |
245 | ||
246 | spin_lock(&gl->gl_lockref.lock); | |
247 | ip = gl->gl_object; | |
248 | if (ip) | |
249 | set_bit(GIF_GLOP_PENDING, &ip->i_flags); | |
250 | spin_unlock(&gl->gl_lockref.lock); | |
251 | return ip; | |
252 | } | |
253 | ||
254 | struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl) | |
255 | { | |
256 | struct gfs2_rgrpd *rgd; | |
257 | ||
258 | spin_lock(&gl->gl_lockref.lock); | |
259 | rgd = gl->gl_object; | |
260 | spin_unlock(&gl->gl_lockref.lock); | |
261 | ||
262 | return rgd; | |
263 | } | |
264 | ||
265 | static void gfs2_clear_glop_pending(struct gfs2_inode *ip) | |
266 | { | |
267 | if (!ip) | |
268 | return; | |
269 | ||
270 | clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags); | |
271 | wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING); | |
272 | } | |
273 | ||
274 | /** | |
275 | * gfs2_inode_metasync - sync out the metadata of an inode | |
276 | * @gl: the glock protecting the inode | |
277 | * | |
278 | */ | |
279 | int gfs2_inode_metasync(struct gfs2_glock *gl) | |
280 | { | |
281 | struct address_space *metamapping = gfs2_glock2aspace(gl); | |
282 | int error; | |
283 | ||
284 | filemap_fdatawrite(metamapping); | |
285 | error = filemap_fdatawait(metamapping); | |
286 | if (error) | |
287 | gfs2_io_error(gl->gl_name.ln_sbd); | |
288 | return error; | |
289 | } | |
290 | ||
291 | /** | |
292 | * inode_go_sync - Sync the dirty metadata of an inode | |
293 | * @gl: the glock protecting the inode | |
294 | * | |
295 | */ | |
296 | ||
297 | static int inode_go_sync(struct gfs2_glock *gl) | |
298 | { | |
299 | struct gfs2_inode *ip = gfs2_glock2inode(gl); | |
300 | int isreg = ip && S_ISREG(ip->i_inode.i_mode); | |
301 | struct address_space *metamapping = gfs2_glock2aspace(gl); | |
302 | int error = 0, ret; | |
303 | ||
304 | if (isreg) { | |
305 | if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags)) | |
306 | unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); | |
307 | inode_dio_wait(&ip->i_inode); | |
308 | } | |
309 | if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) | |
310 | goto out; | |
311 | ||
312 | GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE); | |
313 | ||
314 | gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | | |
315 | GFS2_LFC_INODE_GO_SYNC); | |
316 | filemap_fdatawrite(metamapping); | |
317 | if (isreg) { | |
318 | struct address_space *mapping = ip->i_inode.i_mapping; | |
319 | filemap_fdatawrite(mapping); | |
320 | error = filemap_fdatawait(mapping); | |
321 | mapping_set_error(mapping, error); | |
322 | } | |
323 | ret = gfs2_inode_metasync(gl); | |
324 | if (!error) | |
325 | error = ret; | |
326 | gfs2_ail_empty_gl(gl); | |
327 | /* | |
328 | * Writeback of the data mapping may cause the dirty flag to be set | |
329 | * so we have to clear it again here. | |
330 | */ | |
331 | smp_mb__before_atomic(); | |
332 | clear_bit(GLF_DIRTY, &gl->gl_flags); | |
333 | ||
334 | out: | |
335 | gfs2_clear_glop_pending(ip); | |
336 | return error; | |
337 | } | |
338 | ||
339 | /** | |
340 | * inode_go_inval - prepare a inode glock to be released | |
341 | * @gl: the glock | |
342 | * @flags: | |
343 | * | |
344 | * Normally we invalidate everything, but if we are moving into | |
345 | * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we | |
346 | * can keep hold of the metadata, since it won't have changed. | |
347 | * | |
348 | */ | |
349 | ||
350 | static void inode_go_inval(struct gfs2_glock *gl, int flags) | |
351 | { | |
352 | struct gfs2_inode *ip = gfs2_glock2inode(gl); | |
353 | ||
354 | if (flags & DIO_METADATA) { | |
355 | struct address_space *mapping = gfs2_glock2aspace(gl); | |
356 | truncate_inode_pages(mapping, 0); | |
357 | if (ip) { | |
358 | set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags); | |
359 | forget_all_cached_acls(&ip->i_inode); | |
360 | security_inode_invalidate_secctx(&ip->i_inode); | |
361 | gfs2_dir_hash_inval(ip); | |
362 | } | |
363 | } | |
364 | ||
365 | if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) { | |
366 | gfs2_log_flush(gl->gl_name.ln_sbd, NULL, | |
367 | GFS2_LOG_HEAD_FLUSH_NORMAL | | |
368 | GFS2_LFC_INODE_GO_INVAL); | |
369 | gl->gl_name.ln_sbd->sd_rindex_uptodate = 0; | |
370 | } | |
371 | if (ip && S_ISREG(ip->i_inode.i_mode)) | |
372 | truncate_inode_pages(ip->i_inode.i_mapping, 0); | |
373 | ||
374 | gfs2_clear_glop_pending(ip); | |
375 | } | |
376 | ||
377 | /** | |
378 | * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock | |
379 | * @gl: the glock | |
380 | * | |
381 | * Returns: 1 if it's ok | |
382 | */ | |
383 | ||
384 | static int inode_go_demote_ok(const struct gfs2_glock *gl) | |
385 | { | |
386 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | |
387 | ||
388 | if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) | |
389 | return 0; | |
390 | ||
391 | return 1; | |
392 | } | |
393 | ||
394 | static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) | |
395 | { | |
396 | const struct gfs2_dinode *str = buf; | |
397 | struct timespec64 atime; | |
398 | u16 height, depth; | |
399 | umode_t mode = be32_to_cpu(str->di_mode); | |
400 | bool is_new = ip->i_inode.i_state & I_NEW; | |
401 | ||
402 | if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) | |
403 | goto corrupt; | |
404 | if (unlikely(!is_new && inode_wrong_type(&ip->i_inode, mode))) | |
405 | goto corrupt; | |
406 | ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); | |
407 | ip->i_inode.i_mode = mode; | |
408 | if (is_new) { | |
409 | ip->i_inode.i_rdev = 0; | |
410 | switch (mode & S_IFMT) { | |
411 | case S_IFBLK: | |
412 | case S_IFCHR: | |
413 | ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major), | |
414 | be32_to_cpu(str->di_minor)); | |
415 | break; | |
416 | } | |
417 | } | |
418 | ||
419 | i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid)); | |
420 | i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid)); | |
421 | set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink)); | |
422 | i_size_write(&ip->i_inode, be64_to_cpu(str->di_size)); | |
423 | gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); | |
424 | atime.tv_sec = be64_to_cpu(str->di_atime); | |
425 | atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); | |
426 | if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0) | |
427 | ip->i_inode.i_atime = atime; | |
428 | ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); | |
429 | ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec); | |
430 | ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); | |
431 | ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); | |
432 | ||
433 | ip->i_goal = be64_to_cpu(str->di_goal_meta); | |
434 | ip->i_generation = be64_to_cpu(str->di_generation); | |
435 | ||
436 | ip->i_diskflags = be32_to_cpu(str->di_flags); | |
437 | ip->i_eattr = be64_to_cpu(str->di_eattr); | |
438 | /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */ | |
439 | gfs2_set_inode_flags(&ip->i_inode); | |
440 | height = be16_to_cpu(str->di_height); | |
441 | if (unlikely(height > GFS2_MAX_META_HEIGHT)) | |
442 | goto corrupt; | |
443 | ip->i_height = (u8)height; | |
444 | ||
445 | depth = be16_to_cpu(str->di_depth); | |
446 | if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) | |
447 | goto corrupt; | |
448 | ip->i_depth = (u8)depth; | |
449 | ip->i_entries = be32_to_cpu(str->di_entries); | |
450 | ||
451 | if (S_ISREG(ip->i_inode.i_mode)) | |
452 | gfs2_set_aops(&ip->i_inode); | |
453 | ||
454 | return 0; | |
455 | corrupt: | |
456 | gfs2_consist_inode(ip); | |
457 | return -EIO; | |
458 | } | |
459 | ||
460 | /** | |
461 | * gfs2_inode_refresh - Refresh the incore copy of the dinode | |
462 | * @ip: The GFS2 inode | |
463 | * | |
464 | * Returns: errno | |
465 | */ | |
466 | ||
467 | int gfs2_inode_refresh(struct gfs2_inode *ip) | |
468 | { | |
469 | struct buffer_head *dibh; | |
470 | int error; | |
471 | ||
472 | error = gfs2_meta_inode_buffer(ip, &dibh); | |
473 | if (error) | |
474 | return error; | |
475 | ||
476 | error = gfs2_dinode_in(ip, dibh->b_data); | |
477 | brelse(dibh); | |
478 | return error; | |
479 | } | |
480 | ||
481 | /** | |
482 | * inode_go_instantiate - read in an inode if necessary | |
483 | * @gh: The glock holder | |
484 | * | |
485 | * Returns: errno | |
486 | */ | |
487 | ||
488 | static int inode_go_instantiate(struct gfs2_glock *gl) | |
489 | { | |
490 | struct gfs2_inode *ip = gl->gl_object; | |
491 | ||
492 | if (!ip) /* no inode to populate - read it in later */ | |
493 | return 0; | |
494 | ||
495 | return gfs2_inode_refresh(ip); | |
496 | } | |
497 | ||
498 | static int inode_go_held(struct gfs2_holder *gh) | |
499 | { | |
500 | struct gfs2_glock *gl = gh->gh_gl; | |
501 | struct gfs2_inode *ip = gl->gl_object; | |
502 | int error = 0; | |
503 | ||
504 | if (!ip) /* no inode to populate - read it in later */ | |
505 | return 0; | |
506 | ||
507 | if (gh->gh_state != LM_ST_DEFERRED) | |
508 | inode_dio_wait(&ip->i_inode); | |
509 | ||
510 | if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && | |
511 | (gl->gl_state == LM_ST_EXCLUSIVE) && | |
512 | (gh->gh_state == LM_ST_EXCLUSIVE)) | |
513 | error = gfs2_truncatei_resume(ip); | |
514 | ||
515 | return error; | |
516 | } | |
517 | ||
518 | /** | |
519 | * inode_go_dump - print information about an inode | |
520 | * @seq: The iterator | |
521 | * @gl: The glock | |
522 | * @fs_id_buf: file system id (may be empty) | |
523 | * | |
524 | */ | |
525 | ||
526 | static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl, | |
527 | const char *fs_id_buf) | |
528 | { | |
529 | struct gfs2_inode *ip = gl->gl_object; | |
530 | struct inode *inode = &ip->i_inode; | |
531 | unsigned long nrpages; | |
532 | ||
533 | if (ip == NULL) | |
534 | return; | |
535 | ||
536 | xa_lock_irq(&inode->i_data.i_pages); | |
537 | nrpages = inode->i_data.nrpages; | |
538 | xa_unlock_irq(&inode->i_data.i_pages); | |
539 | ||
540 | gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu " | |
541 | "p:%lu\n", fs_id_buf, | |
542 | (unsigned long long)ip->i_no_formal_ino, | |
543 | (unsigned long long)ip->i_no_addr, | |
544 | IF2DT(ip->i_inode.i_mode), ip->i_flags, | |
545 | (unsigned int)ip->i_diskflags, | |
546 | (unsigned long long)i_size_read(inode), nrpages); | |
547 | } | |
548 | ||
549 | /** | |
550 | * freeze_go_sync - promote/demote the freeze glock | |
551 | * @gl: the glock | |
552 | */ | |
553 | ||
554 | static int freeze_go_sync(struct gfs2_glock *gl) | |
555 | { | |
556 | int error = 0; | |
557 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | |
558 | ||
559 | /* | |
560 | * We need to check gl_state == LM_ST_SHARED here and not gl_req == | |
561 | * LM_ST_EXCLUSIVE. That's because when any node does a freeze, | |
562 | * all the nodes should have the freeze glock in SH mode and they all | |
563 | * call do_xmote: One for EX and the others for UN. They ALL must | |
564 | * freeze locally, and they ALL must queue freeze work. The freeze_work | |
565 | * calls freeze_func, which tries to reacquire the freeze glock in SH, | |
566 | * effectively waiting for the thaw on the node who holds it in EX. | |
567 | * Once thawed, the work func acquires the freeze glock in | |
568 | * SH and everybody goes back to thawed. | |
569 | */ | |
570 | if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) && | |
571 | !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) { | |
572 | atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE); | |
573 | error = freeze_super(sdp->sd_vfs); | |
574 | if (error) { | |
575 | fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n", | |
576 | error); | |
577 | if (gfs2_withdrawn(sdp)) { | |
578 | atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN); | |
579 | return 0; | |
580 | } | |
581 | gfs2_assert_withdraw(sdp, 0); | |
582 | } | |
583 | queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work); | |
584 | if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) | |
585 | gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE | | |
586 | GFS2_LFC_FREEZE_GO_SYNC); | |
587 | else /* read-only mounts */ | |
588 | atomic_set(&sdp->sd_freeze_state, SFS_FROZEN); | |
589 | } | |
590 | return 0; | |
591 | } | |
592 | ||
593 | /** | |
594 | * freeze_go_xmote_bh - After promoting/demoting the freeze glock | |
595 | * @gl: the glock | |
596 | */ | |
597 | static int freeze_go_xmote_bh(struct gfs2_glock *gl) | |
598 | { | |
599 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | |
600 | struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); | |
601 | struct gfs2_glock *j_gl = ip->i_gl; | |
602 | struct gfs2_log_header_host head; | |
603 | int error; | |
604 | ||
605 | if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) { | |
606 | j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); | |
607 | ||
608 | error = gfs2_find_jhead(sdp->sd_jdesc, &head, false); | |
609 | if (gfs2_assert_withdraw_delayed(sdp, !error)) | |
610 | return error; | |
611 | if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags & | |
612 | GFS2_LOG_HEAD_UNMOUNT)) | |
613 | return -EIO; | |
614 | sdp->sd_log_sequence = head.lh_sequence + 1; | |
615 | gfs2_log_pointers_init(sdp, head.lh_blkno); | |
616 | } | |
617 | return 0; | |
618 | } | |
619 | ||
620 | /** | |
621 | * freeze_go_demote_ok | |
622 | * @gl: the glock | |
623 | * | |
624 | * Always returns 0 | |
625 | */ | |
626 | ||
627 | static int freeze_go_demote_ok(const struct gfs2_glock *gl) | |
628 | { | |
629 | return 0; | |
630 | } | |
631 | ||
632 | /** | |
633 | * iopen_go_callback - schedule the dcache entry for the inode to be deleted | |
634 | * @gl: the glock | |
635 | * @remote: true if this came from a different cluster node | |
636 | * | |
637 | * gl_lockref.lock lock is held while calling this | |
638 | */ | |
639 | static void iopen_go_callback(struct gfs2_glock *gl, bool remote) | |
640 | { | |
641 | struct gfs2_inode *ip = gl->gl_object; | |
642 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | |
643 | ||
644 | if (!remote || sb_rdonly(sdp->sd_vfs)) | |
645 | return; | |
646 | ||
647 | if (gl->gl_demote_state == LM_ST_UNLOCKED && | |
648 | gl->gl_state == LM_ST_SHARED && ip) { | |
649 | gl->gl_lockref.count++; | |
650 | if (!queue_delayed_work(gfs2_delete_workqueue, | |
651 | &gl->gl_delete, 0)) | |
652 | gl->gl_lockref.count--; | |
653 | } | |
654 | } | |
655 | ||
656 | static int iopen_go_demote_ok(const struct gfs2_glock *gl) | |
657 | { | |
658 | return !gfs2_delete_work_queued(gl); | |
659 | } | |
660 | ||
661 | /** | |
662 | * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it | |
663 | * @gl: glock being freed | |
664 | * | |
665 | * For now, this is only used for the journal inode glock. In withdraw | |
666 | * situations, we need to wait for the glock to be freed so that we know | |
667 | * other nodes may proceed with recovery / journal replay. | |
668 | */ | |
669 | static void inode_go_free(struct gfs2_glock *gl) | |
670 | { | |
671 | /* Note that we cannot reference gl_object because it's already set | |
672 | * to NULL by this point in its lifecycle. */ | |
673 | if (!test_bit(GLF_FREEING, &gl->gl_flags)) | |
674 | return; | |
675 | clear_bit_unlock(GLF_FREEING, &gl->gl_flags); | |
676 | wake_up_bit(&gl->gl_flags, GLF_FREEING); | |
677 | } | |
678 | ||
679 | /** | |
680 | * nondisk_go_callback - used to signal when a node did a withdraw | |
681 | * @gl: the nondisk glock | |
682 | * @remote: true if this came from a different cluster node | |
683 | * | |
684 | */ | |
685 | static void nondisk_go_callback(struct gfs2_glock *gl, bool remote) | |
686 | { | |
687 | struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; | |
688 | ||
689 | /* Ignore the callback unless it's from another node, and it's the | |
690 | live lock. */ | |
691 | if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK) | |
692 | return; | |
693 | ||
694 | /* First order of business is to cancel the demote request. We don't | |
695 | * really want to demote a nondisk glock. At best it's just to inform | |
696 | * us of another node's withdraw. We'll keep it in SH mode. */ | |
697 | clear_bit(GLF_DEMOTE, &gl->gl_flags); | |
698 | clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags); | |
699 | ||
700 | /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */ | |
701 | if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || | |
702 | test_bit(SDF_WITHDRAWN, &sdp->sd_flags) || | |
703 | test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) | |
704 | return; | |
705 | ||
706 | /* We only care when a node wants us to unlock, because that means | |
707 | * they want a journal recovered. */ | |
708 | if (gl->gl_demote_state != LM_ST_UNLOCKED) | |
709 | return; | |
710 | ||
711 | if (sdp->sd_args.ar_spectator) { | |
712 | fs_warn(sdp, "Spectator node cannot recover journals.\n"); | |
713 | return; | |
714 | } | |
715 | ||
716 | fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n"); | |
717 | set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags); | |
718 | /* | |
719 | * We can't call remote_withdraw directly here or gfs2_recover_journal | |
720 | * because this is called from the glock unlock function and the | |
721 | * remote_withdraw needs to enqueue and dequeue the same "live" glock | |
722 | * we were called from. So we queue it to the control work queue in | |
723 | * lock_dlm. | |
724 | */ | |
725 | queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0); | |
726 | } | |
727 | ||
728 | const struct gfs2_glock_operations gfs2_meta_glops = { | |
729 | .go_type = LM_TYPE_META, | |
730 | .go_flags = GLOF_NONDISK, | |
731 | }; | |
732 | ||
733 | const struct gfs2_glock_operations gfs2_inode_glops = { | |
734 | .go_sync = inode_go_sync, | |
735 | .go_inval = inode_go_inval, | |
736 | .go_demote_ok = inode_go_demote_ok, | |
737 | .go_instantiate = inode_go_instantiate, | |
738 | .go_held = inode_go_held, | |
739 | .go_dump = inode_go_dump, | |
740 | .go_type = LM_TYPE_INODE, | |
741 | .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB, | |
742 | .go_free = inode_go_free, | |
743 | }; | |
744 | ||
745 | const struct gfs2_glock_operations gfs2_rgrp_glops = { | |
746 | .go_sync = rgrp_go_sync, | |
747 | .go_inval = rgrp_go_inval, | |
748 | .go_instantiate = gfs2_rgrp_go_instantiate, | |
749 | .go_dump = gfs2_rgrp_go_dump, | |
750 | .go_type = LM_TYPE_RGRP, | |
751 | .go_flags = GLOF_LVB, | |
752 | }; | |
753 | ||
754 | const struct gfs2_glock_operations gfs2_freeze_glops = { | |
755 | .go_sync = freeze_go_sync, | |
756 | .go_xmote_bh = freeze_go_xmote_bh, | |
757 | .go_demote_ok = freeze_go_demote_ok, | |
758 | .go_type = LM_TYPE_NONDISK, | |
759 | .go_flags = GLOF_NONDISK, | |
760 | }; | |
761 | ||
762 | const struct gfs2_glock_operations gfs2_iopen_glops = { | |
763 | .go_type = LM_TYPE_IOPEN, | |
764 | .go_callback = iopen_go_callback, | |
765 | .go_dump = inode_go_dump, | |
766 | .go_demote_ok = iopen_go_demote_ok, | |
767 | .go_flags = GLOF_LRU | GLOF_NONDISK, | |
768 | .go_subclass = 1, | |
769 | }; | |
770 | ||
771 | const struct gfs2_glock_operations gfs2_flock_glops = { | |
772 | .go_type = LM_TYPE_FLOCK, | |
773 | .go_flags = GLOF_LRU | GLOF_NONDISK, | |
774 | }; | |
775 | ||
776 | const struct gfs2_glock_operations gfs2_nondisk_glops = { | |
777 | .go_type = LM_TYPE_NONDISK, | |
778 | .go_flags = GLOF_NONDISK, | |
779 | .go_callback = nondisk_go_callback, | |
780 | }; | |
781 | ||
782 | const struct gfs2_glock_operations gfs2_quota_glops = { | |
783 | .go_type = LM_TYPE_QUOTA, | |
784 | .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK, | |
785 | }; | |
786 | ||
787 | const struct gfs2_glock_operations gfs2_journal_glops = { | |
788 | .go_type = LM_TYPE_JOURNAL, | |
789 | .go_flags = GLOF_NONDISK, | |
790 | }; | |
791 | ||
792 | const struct gfs2_glock_operations *gfs2_glops_list[] = { | |
793 | [LM_TYPE_META] = &gfs2_meta_glops, | |
794 | [LM_TYPE_INODE] = &gfs2_inode_glops, | |
795 | [LM_TYPE_RGRP] = &gfs2_rgrp_glops, | |
796 | [LM_TYPE_IOPEN] = &gfs2_iopen_glops, | |
797 | [LM_TYPE_FLOCK] = &gfs2_flock_glops, | |
798 | [LM_TYPE_NONDISK] = &gfs2_nondisk_glops, | |
799 | [LM_TYPE_QUOTA] = &gfs2_quota_glops, | |
800 | [LM_TYPE_JOURNAL] = &gfs2_journal_glops, | |
801 | }; | |
802 |