]>
Commit | Line | Data |
---|---|---|
b3b94faa DT |
1 | /* |
2 | * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. | |
da6dd40d | 3 | * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved. |
b3b94faa DT |
4 | * |
5 | * This copyrighted material is made available to anyone wishing to use, | |
6 | * modify, copy, or redistribute it subject to the terms and conditions | |
e9fc2aa0 | 7 | * of the GNU General Public License version 2. |
b3b94faa DT |
8 | */ |
9 | ||
10 | #include <linux/sched.h> | |
11 | #include <linux/slab.h> | |
12 | #include <linux/spinlock.h> | |
13 | #include <linux/completion.h> | |
14 | #include <linux/buffer_head.h> | |
5c676f6d | 15 | #include <linux/gfs2_ondisk.h> |
71b86f56 | 16 | #include <linux/crc32.h> |
a25311c8 | 17 | #include <linux/delay.h> |
ec69b188 SW |
18 | #include <linux/kthread.h> |
19 | #include <linux/freezer.h> | |
254db57f | 20 | #include <linux/bio.h> |
885bceca | 21 | #include <linux/blkdev.h> |
4667a0ec | 22 | #include <linux/writeback.h> |
4a36d08d | 23 | #include <linux/list_sort.h> |
b3b94faa DT |
24 | |
25 | #include "gfs2.h" | |
5c676f6d | 26 | #include "incore.h" |
b3b94faa DT |
27 | #include "bmap.h" |
28 | #include "glock.h" | |
29 | #include "log.h" | |
30 | #include "lops.h" | |
31 | #include "meta_io.h" | |
5c676f6d | 32 | #include "util.h" |
71b86f56 | 33 | #include "dir.h" |
63997775 | 34 | #include "trace_gfs2.h" |
b3b94faa | 35 | |
b3b94faa DT |
36 | /** |
37 | * gfs2_struct2blk - compute stuff | |
38 | * @sdp: the filesystem | |
39 | * @nstruct: the number of structures | |
40 | * @ssize: the size of the structures | |
41 | * | |
42 | * Compute the number of log descriptor blocks needed to hold a certain number | |
43 | * of structures of a certain size. | |
44 | * | |
45 | * Returns: the number of blocks needed (minimum is always 1) | |
46 | */ | |
47 | ||
48 | unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct, | |
49 | unsigned int ssize) | |
50 | { | |
51 | unsigned int blks; | |
52 | unsigned int first, second; | |
53 | ||
54 | blks = 1; | |
faa31ce8 | 55 | first = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / ssize; |
b3b94faa DT |
56 | |
57 | if (nstruct > first) { | |
568f4c96 SW |
58 | second = (sdp->sd_sb.sb_bsize - |
59 | sizeof(struct gfs2_meta_header)) / ssize; | |
5c676f6d | 60 | blks += DIV_ROUND_UP(nstruct - first, second); |
b3b94faa DT |
61 | } |
62 | ||
63 | return blks; | |
64 | } | |
65 | ||
1e1a3d03 SW |
66 | /** |
67 | * gfs2_remove_from_ail - Remove an entry from the ail lists, updating counters | |
68 | * @mapping: The associated mapping (maybe NULL) | |
69 | * @bd: The gfs2_bufdata to remove | |
70 | * | |
c618e87a | 71 | * The ail lock _must_ be held when calling this function |
1e1a3d03 SW |
72 | * |
73 | */ | |
74 | ||
f91a0d3e | 75 | void gfs2_remove_from_ail(struct gfs2_bufdata *bd) |
1e1a3d03 | 76 | { |
16ca9412 | 77 | bd->bd_tr = NULL; |
1ad38c43 SW |
78 | list_del_init(&bd->bd_ail_st_list); |
79 | list_del_init(&bd->bd_ail_gl_list); | |
1e1a3d03 | 80 | atomic_dec(&bd->bd_gl->gl_ail_count); |
1e1a3d03 SW |
81 | brelse(bd->bd_bh); |
82 | } | |
83 | ||
ddacfaf7 SW |
84 | /** |
85 | * gfs2_ail1_start_one - Start I/O on a part of the AIL | |
86 | * @sdp: the filesystem | |
4667a0ec SW |
87 | * @wbc: The writeback control structure |
88 | * @ai: The ail structure | |
ddacfaf7 SW |
89 | * |
90 | */ | |
91 | ||
4f1de018 SW |
92 | static int gfs2_ail1_start_one(struct gfs2_sbd *sdp, |
93 | struct writeback_control *wbc, | |
16ca9412 | 94 | struct gfs2_trans *tr) |
d6a079e8 DC |
95 | __releases(&sdp->sd_ail_lock) |
96 | __acquires(&sdp->sd_ail_lock) | |
ddacfaf7 | 97 | { |
5ac048bb | 98 | struct gfs2_glock *gl = NULL; |
4667a0ec | 99 | struct address_space *mapping; |
ddacfaf7 SW |
100 | struct gfs2_bufdata *bd, *s; |
101 | struct buffer_head *bh; | |
ddacfaf7 | 102 | |
16ca9412 | 103 | list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, bd_ail_st_list) { |
4667a0ec | 104 | bh = bd->bd_bh; |
ddacfaf7 | 105 | |
16ca9412 | 106 | gfs2_assert(sdp, bd->bd_tr == tr); |
ddacfaf7 | 107 | |
4667a0ec SW |
108 | if (!buffer_busy(bh)) { |
109 | if (!buffer_uptodate(bh)) | |
110 | gfs2_io_error_bh(sdp, bh); | |
16ca9412 | 111 | list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list); |
4667a0ec SW |
112 | continue; |
113 | } | |
114 | ||
115 | if (!buffer_dirty(bh)) | |
116 | continue; | |
117 | if (gl == bd->bd_gl) | |
118 | continue; | |
119 | gl = bd->bd_gl; | |
16ca9412 | 120 | list_move(&bd->bd_ail_st_list, &tr->tr_ail1_list); |
4667a0ec | 121 | mapping = bh->b_page->mapping; |
4f1de018 SW |
122 | if (!mapping) |
123 | continue; | |
4667a0ec SW |
124 | spin_unlock(&sdp->sd_ail_lock); |
125 | generic_writepages(mapping, wbc); | |
126 | spin_lock(&sdp->sd_ail_lock); | |
127 | if (wbc->nr_to_write <= 0) | |
128 | break; | |
4f1de018 | 129 | return 1; |
4667a0ec | 130 | } |
4f1de018 SW |
131 | |
132 | return 0; | |
4667a0ec | 133 | } |
ddacfaf7 | 134 | |
ddacfaf7 | 135 | |
4667a0ec SW |
136 | /** |
137 | * gfs2_ail1_flush - start writeback of some ail1 entries | |
138 | * @sdp: The super block | |
139 | * @wbc: The writeback control structure | |
140 | * | |
141 | * Writes back some ail1 entries, according to the limits in the | |
142 | * writeback control structure | |
143 | */ | |
ddacfaf7 | 144 | |
4667a0ec SW |
145 | void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc) |
146 | { | |
147 | struct list_head *head = &sdp->sd_ail1_list; | |
16ca9412 | 148 | struct gfs2_trans *tr; |
885bceca | 149 | struct blk_plug plug; |
ddacfaf7 | 150 | |
c83ae9ca | 151 | trace_gfs2_ail_flush(sdp, wbc, 1); |
885bceca | 152 | blk_start_plug(&plug); |
4667a0ec | 153 | spin_lock(&sdp->sd_ail_lock); |
4f1de018 | 154 | restart: |
16ca9412 | 155 | list_for_each_entry_reverse(tr, head, tr_list) { |
4667a0ec | 156 | if (wbc->nr_to_write <= 0) |
ddacfaf7 | 157 | break; |
16ca9412 | 158 | if (gfs2_ail1_start_one(sdp, wbc, tr)) |
4f1de018 | 159 | goto restart; |
4667a0ec SW |
160 | } |
161 | spin_unlock(&sdp->sd_ail_lock); | |
885bceca | 162 | blk_finish_plug(&plug); |
c83ae9ca | 163 | trace_gfs2_ail_flush(sdp, wbc, 0); |
4667a0ec SW |
164 | } |
165 | ||
166 | /** | |
167 | * gfs2_ail1_start - start writeback of all ail1 entries | |
168 | * @sdp: The superblock | |
169 | */ | |
170 | ||
171 | static void gfs2_ail1_start(struct gfs2_sbd *sdp) | |
172 | { | |
173 | struct writeback_control wbc = { | |
174 | .sync_mode = WB_SYNC_NONE, | |
175 | .nr_to_write = LONG_MAX, | |
176 | .range_start = 0, | |
177 | .range_end = LLONG_MAX, | |
178 | }; | |
179 | ||
180 | return gfs2_ail1_flush(sdp, &wbc); | |
ddacfaf7 SW |
181 | } |
182 | ||
183 | /** | |
184 | * gfs2_ail1_empty_one - Check whether or not a trans in the AIL has been synced | |
185 | * @sdp: the filesystem | |
186 | * @ai: the AIL entry | |
187 | * | |
188 | */ | |
189 | ||
16ca9412 | 190 | static void gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr) |
ddacfaf7 SW |
191 | { |
192 | struct gfs2_bufdata *bd, *s; | |
193 | struct buffer_head *bh; | |
194 | ||
16ca9412 | 195 | list_for_each_entry_safe_reverse(bd, s, &tr->tr_ail1_list, |
ddacfaf7 SW |
196 | bd_ail_st_list) { |
197 | bh = bd->bd_bh; | |
16ca9412 | 198 | gfs2_assert(sdp, bd->bd_tr == tr); |
4667a0ec SW |
199 | if (buffer_busy(bh)) |
200 | continue; | |
ddacfaf7 SW |
201 | if (!buffer_uptodate(bh)) |
202 | gfs2_io_error_bh(sdp, bh); | |
16ca9412 | 203 | list_move(&bd->bd_ail_st_list, &tr->tr_ail2_list); |
ddacfaf7 SW |
204 | } |
205 | ||
ddacfaf7 SW |
206 | } |
207 | ||
4667a0ec SW |
208 | /** |
209 | * gfs2_ail1_empty - Try to empty the ail1 lists | |
210 | * @sdp: The superblock | |
211 | * | |
212 | * Tries to empty the ail1 lists, starting with the oldest first | |
213 | */ | |
b3b94faa | 214 | |
4667a0ec | 215 | static int gfs2_ail1_empty(struct gfs2_sbd *sdp) |
b3b94faa | 216 | { |
16ca9412 | 217 | struct gfs2_trans *tr, *s; |
5d054964 | 218 | int oldest_tr = 1; |
b3b94faa DT |
219 | int ret; |
220 | ||
d6a079e8 | 221 | spin_lock(&sdp->sd_ail_lock); |
16ca9412 BM |
222 | list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) { |
223 | gfs2_ail1_empty_one(sdp, tr); | |
5d054964 | 224 | if (list_empty(&tr->tr_ail1_list) && oldest_tr) |
16ca9412 | 225 | list_move(&tr->tr_list, &sdp->sd_ail2_list); |
4667a0ec | 226 | else |
5d054964 | 227 | oldest_tr = 0; |
b3b94faa | 228 | } |
b3b94faa | 229 | ret = list_empty(&sdp->sd_ail1_list); |
d6a079e8 | 230 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa DT |
231 | |
232 | return ret; | |
233 | } | |
234 | ||
26b06a69 SW |
235 | static void gfs2_ail1_wait(struct gfs2_sbd *sdp) |
236 | { | |
16ca9412 | 237 | struct gfs2_trans *tr; |
26b06a69 SW |
238 | struct gfs2_bufdata *bd; |
239 | struct buffer_head *bh; | |
240 | ||
241 | spin_lock(&sdp->sd_ail_lock); | |
16ca9412 BM |
242 | list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) { |
243 | list_for_each_entry(bd, &tr->tr_ail1_list, bd_ail_st_list) { | |
26b06a69 SW |
244 | bh = bd->bd_bh; |
245 | if (!buffer_locked(bh)) | |
246 | continue; | |
247 | get_bh(bh); | |
248 | spin_unlock(&sdp->sd_ail_lock); | |
249 | wait_on_buffer(bh); | |
250 | brelse(bh); | |
251 | return; | |
252 | } | |
253 | } | |
254 | spin_unlock(&sdp->sd_ail_lock); | |
255 | } | |
ddacfaf7 SW |
256 | |
257 | /** | |
258 | * gfs2_ail2_empty_one - Check whether or not a trans in the AIL has been synced | |
259 | * @sdp: the filesystem | |
260 | * @ai: the AIL entry | |
261 | * | |
262 | */ | |
263 | ||
16ca9412 | 264 | static void gfs2_ail2_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr) |
ddacfaf7 | 265 | { |
16ca9412 | 266 | struct list_head *head = &tr->tr_ail2_list; |
ddacfaf7 SW |
267 | struct gfs2_bufdata *bd; |
268 | ||
269 | while (!list_empty(head)) { | |
270 | bd = list_entry(head->prev, struct gfs2_bufdata, | |
271 | bd_ail_st_list); | |
16ca9412 | 272 | gfs2_assert(sdp, bd->bd_tr == tr); |
f91a0d3e | 273 | gfs2_remove_from_ail(bd); |
ddacfaf7 SW |
274 | } |
275 | } | |
276 | ||
b3b94faa DT |
277 | static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail) |
278 | { | |
16ca9412 | 279 | struct gfs2_trans *tr, *safe; |
b3b94faa DT |
280 | unsigned int old_tail = sdp->sd_log_tail; |
281 | int wrap = (new_tail < old_tail); | |
282 | int a, b, rm; | |
283 | ||
d6a079e8 | 284 | spin_lock(&sdp->sd_ail_lock); |
b3b94faa | 285 | |
16ca9412 BM |
286 | list_for_each_entry_safe(tr, safe, &sdp->sd_ail2_list, tr_list) { |
287 | a = (old_tail <= tr->tr_first); | |
288 | b = (tr->tr_first < new_tail); | |
b3b94faa DT |
289 | rm = (wrap) ? (a || b) : (a && b); |
290 | if (!rm) | |
291 | continue; | |
292 | ||
16ca9412 BM |
293 | gfs2_ail2_empty_one(sdp, tr); |
294 | list_del(&tr->tr_list); | |
295 | gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list)); | |
296 | gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list)); | |
297 | kfree(tr); | |
b3b94faa DT |
298 | } |
299 | ||
d6a079e8 | 300 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa DT |
301 | } |
302 | ||
24972557 BM |
303 | /** |
304 | * gfs2_log_release - Release a given number of log blocks | |
305 | * @sdp: The GFS2 superblock | |
306 | * @blks: The number of blocks | |
307 | * | |
308 | */ | |
309 | ||
310 | void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks) | |
311 | { | |
312 | ||
313 | atomic_add(blks, &sdp->sd_log_blks_free); | |
314 | trace_gfs2_log_blocks(sdp, blks); | |
315 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= | |
316 | sdp->sd_jdesc->jd_blocks); | |
317 | up_read(&sdp->sd_log_flush_lock); | |
318 | } | |
319 | ||
b3b94faa DT |
320 | /** |
321 | * gfs2_log_reserve - Make a log reservation | |
322 | * @sdp: The GFS2 superblock | |
323 | * @blks: The number of blocks to reserve | |
324 | * | |
89918647 | 325 | * Note that we never give out the last few blocks of the journal. Thats |
2332c443 | 326 | * due to the fact that there is a small number of header blocks |
b004157a SW |
327 | * associated with each log flush. The exact number can't be known until |
328 | * flush time, so we ensure that we have just enough free blocks at all | |
329 | * times to avoid running out during a log flush. | |
330 | * | |
5e687eac BM |
331 | * We no longer flush the log here, instead we wake up logd to do that |
332 | * for us. To avoid the thundering herd and to ensure that we deal fairly | |
333 | * with queued waiters, we use an exclusive wait. This means that when we | |
334 | * get woken with enough journal space to get our reservation, we need to | |
335 | * wake the next waiter on the list. | |
336 | * | |
b3b94faa DT |
337 | * Returns: errno |
338 | */ | |
339 | ||
340 | int gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks) | |
341 | { | |
2e60d768 | 342 | int ret = 0; |
5d054964 | 343 | unsigned reserved_blks = 7 * (4096 / sdp->sd_vfs->s_blocksize); |
5e687eac BM |
344 | unsigned wanted = blks + reserved_blks; |
345 | DEFINE_WAIT(wait); | |
346 | int did_wait = 0; | |
347 | unsigned int free_blocks; | |
b3b94faa DT |
348 | |
349 | if (gfs2_assert_warn(sdp, blks) || | |
350 | gfs2_assert_warn(sdp, blks <= sdp->sd_jdesc->jd_blocks)) | |
351 | return -EINVAL; | |
f07b3520 | 352 | atomic_add(blks, &sdp->sd_log_blks_needed); |
5e687eac BM |
353 | retry: |
354 | free_blocks = atomic_read(&sdp->sd_log_blks_free); | |
355 | if (unlikely(free_blocks <= wanted)) { | |
356 | do { | |
357 | prepare_to_wait_exclusive(&sdp->sd_log_waitq, &wait, | |
358 | TASK_UNINTERRUPTIBLE); | |
359 | wake_up(&sdp->sd_logd_waitq); | |
360 | did_wait = 1; | |
361 | if (atomic_read(&sdp->sd_log_blks_free) <= wanted) | |
362 | io_schedule(); | |
363 | free_blocks = atomic_read(&sdp->sd_log_blks_free); | |
364 | } while(free_blocks <= wanted); | |
365 | finish_wait(&sdp->sd_log_waitq, &wait); | |
b3b94faa | 366 | } |
2e60d768 | 367 | atomic_inc(&sdp->sd_reserving_log); |
5e687eac | 368 | if (atomic_cmpxchg(&sdp->sd_log_blks_free, free_blocks, |
2e60d768 BM |
369 | free_blocks - blks) != free_blocks) { |
370 | if (atomic_dec_and_test(&sdp->sd_reserving_log)) | |
371 | wake_up(&sdp->sd_reserving_log_wait); | |
5e687eac | 372 | goto retry; |
2e60d768 | 373 | } |
f07b3520 | 374 | atomic_sub(blks, &sdp->sd_log_blks_needed); |
63997775 | 375 | trace_gfs2_log_blocks(sdp, -blks); |
5e687eac BM |
376 | |
377 | /* | |
378 | * If we waited, then so might others, wake them up _after_ we get | |
379 | * our share of the log. | |
380 | */ | |
381 | if (unlikely(did_wait)) | |
382 | wake_up(&sdp->sd_log_waitq); | |
484adff8 SW |
383 | |
384 | down_read(&sdp->sd_log_flush_lock); | |
24972557 BM |
385 | if (unlikely(!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))) { |
386 | gfs2_log_release(sdp, blks); | |
2e60d768 | 387 | ret = -EROFS; |
24972557 | 388 | } |
2e60d768 BM |
389 | if (atomic_dec_and_test(&sdp->sd_reserving_log)) |
390 | wake_up(&sdp->sd_reserving_log_wait); | |
391 | return ret; | |
b3b94faa DT |
392 | } |
393 | ||
b3b94faa DT |
394 | /** |
395 | * log_distance - Compute distance between two journal blocks | |
396 | * @sdp: The GFS2 superblock | |
397 | * @newer: The most recent journal block of the pair | |
398 | * @older: The older journal block of the pair | |
399 | * | |
400 | * Compute the distance (in the journal direction) between two | |
401 | * blocks in the journal | |
402 | * | |
403 | * Returns: the distance in blocks | |
404 | */ | |
405 | ||
faa31ce8 | 406 | static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer, |
b3b94faa DT |
407 | unsigned int older) |
408 | { | |
409 | int dist; | |
410 | ||
411 | dist = newer - older; | |
412 | if (dist < 0) | |
413 | dist += sdp->sd_jdesc->jd_blocks; | |
414 | ||
415 | return dist; | |
416 | } | |
417 | ||
2332c443 RP |
418 | /** |
419 | * calc_reserved - Calculate the number of blocks to reserve when | |
420 | * refunding a transaction's unused buffers. | |
421 | * @sdp: The GFS2 superblock | |
422 | * | |
423 | * This is complex. We need to reserve room for all our currently used | |
424 | * metadata buffers (e.g. normal file I/O rewriting file time stamps) and | |
425 | * all our journaled data buffers for journaled files (e.g. files in the | |
426 | * meta_fs like rindex, or files for which chattr +j was done.) | |
427 | * If we don't reserve enough space, gfs2_log_refund and gfs2_log_flush | |
428 | * will count it as free space (sd_log_blks_free) and corruption will follow. | |
429 | * | |
430 | * We can have metadata bufs and jdata bufs in the same journal. So each | |
431 | * type gets its own log header, for which we need to reserve a block. | |
432 | * In fact, each type has the potential for needing more than one header | |
433 | * in cases where we have more buffers than will fit on a journal page. | |
434 | * Metadata journal entries take up half the space of journaled buffer entries. | |
435 | * Thus, metadata entries have buf_limit (502) and journaled buffers have | |
436 | * databuf_limit (251) before they cause a wrap around. | |
437 | * | |
438 | * Also, we need to reserve blocks for revoke journal entries and one for an | |
439 | * overall header for the lot. | |
440 | * | |
441 | * Returns: the number of blocks reserved | |
442 | */ | |
443 | static unsigned int calc_reserved(struct gfs2_sbd *sdp) | |
444 | { | |
445 | unsigned int reserved = 0; | |
022ef4fe SW |
446 | unsigned int mbuf; |
447 | unsigned int dbuf; | |
448 | struct gfs2_trans *tr = sdp->sd_log_tr; | |
2332c443 | 449 | |
022ef4fe SW |
450 | if (tr) { |
451 | mbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm; | |
452 | dbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm; | |
453 | reserved = mbuf + dbuf; | |
454 | /* Account for header blocks */ | |
455 | reserved += DIV_ROUND_UP(mbuf, buf_limit(sdp)); | |
456 | reserved += DIV_ROUND_UP(dbuf, databuf_limit(sdp)); | |
457 | } | |
2332c443 | 458 | |
2e95e3f6 | 459 | if (sdp->sd_log_commited_revoke > 0) |
022ef4fe | 460 | reserved += gfs2_struct2blk(sdp, sdp->sd_log_commited_revoke, |
2332c443 | 461 | sizeof(u64)); |
2332c443 RP |
462 | /* One for the overall header */ |
463 | if (reserved) | |
464 | reserved++; | |
465 | return reserved; | |
466 | } | |
467 | ||
b3b94faa DT |
468 | static unsigned int current_tail(struct gfs2_sbd *sdp) |
469 | { | |
16ca9412 | 470 | struct gfs2_trans *tr; |
b3b94faa DT |
471 | unsigned int tail; |
472 | ||
d6a079e8 | 473 | spin_lock(&sdp->sd_ail_lock); |
b3b94faa | 474 | |
faa31ce8 | 475 | if (list_empty(&sdp->sd_ail1_list)) { |
b3b94faa | 476 | tail = sdp->sd_log_head; |
faa31ce8 | 477 | } else { |
16ca9412 BM |
478 | tr = list_entry(sdp->sd_ail1_list.prev, struct gfs2_trans, |
479 | tr_list); | |
480 | tail = tr->tr_first; | |
b3b94faa DT |
481 | } |
482 | ||
d6a079e8 | 483 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa DT |
484 | |
485 | return tail; | |
486 | } | |
487 | ||
2332c443 | 488 | static void log_pull_tail(struct gfs2_sbd *sdp, unsigned int new_tail) |
b3b94faa DT |
489 | { |
490 | unsigned int dist = log_distance(sdp, new_tail, sdp->sd_log_tail); | |
491 | ||
492 | ail2_empty(sdp, new_tail); | |
493 | ||
fd041f0b | 494 | atomic_add(dist, &sdp->sd_log_blks_free); |
63997775 | 495 | trace_gfs2_log_blocks(sdp, dist); |
5e687eac BM |
496 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= |
497 | sdp->sd_jdesc->jd_blocks); | |
b3b94faa DT |
498 | |
499 | sdp->sd_log_tail = new_tail; | |
500 | } | |
501 | ||
b3b94faa | 502 | |
34cc1781 | 503 | static void log_flush_wait(struct gfs2_sbd *sdp) |
b3b94faa | 504 | { |
16615be1 SW |
505 | DEFINE_WAIT(wait); |
506 | ||
507 | if (atomic_read(&sdp->sd_log_in_flight)) { | |
508 | do { | |
509 | prepare_to_wait(&sdp->sd_log_flush_wait, &wait, | |
510 | TASK_UNINTERRUPTIBLE); | |
511 | if (atomic_read(&sdp->sd_log_in_flight)) | |
512 | io_schedule(); | |
513 | } while(atomic_read(&sdp->sd_log_in_flight)); | |
514 | finish_wait(&sdp->sd_log_flush_wait, &wait); | |
b3b94faa | 515 | } |
b3b94faa DT |
516 | } |
517 | ||
45138990 | 518 | static int ip_cmp(void *priv, struct list_head *a, struct list_head *b) |
4a36d08d | 519 | { |
45138990 | 520 | struct gfs2_inode *ipa, *ipb; |
4a36d08d | 521 | |
45138990 SW |
522 | ipa = list_entry(a, struct gfs2_inode, i_ordered); |
523 | ipb = list_entry(b, struct gfs2_inode, i_ordered); | |
4a36d08d | 524 | |
45138990 | 525 | if (ipa->i_no_addr < ipb->i_no_addr) |
4a36d08d | 526 | return -1; |
45138990 | 527 | if (ipa->i_no_addr > ipb->i_no_addr) |
4a36d08d BP |
528 | return 1; |
529 | return 0; | |
530 | } | |
531 | ||
d7b616e2 SW |
532 | static void gfs2_ordered_write(struct gfs2_sbd *sdp) |
533 | { | |
45138990 | 534 | struct gfs2_inode *ip; |
d7b616e2 SW |
535 | LIST_HEAD(written); |
536 | ||
45138990 SW |
537 | spin_lock(&sdp->sd_ordered_lock); |
538 | list_sort(NULL, &sdp->sd_log_le_ordered, &ip_cmp); | |
d7b616e2 | 539 | while (!list_empty(&sdp->sd_log_le_ordered)) { |
45138990 SW |
540 | ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered); |
541 | list_move(&ip->i_ordered, &written); | |
542 | if (ip->i_inode.i_mapping->nrpages == 0) | |
d7b616e2 | 543 | continue; |
45138990 SW |
544 | spin_unlock(&sdp->sd_ordered_lock); |
545 | filemap_fdatawrite(ip->i_inode.i_mapping); | |
546 | spin_lock(&sdp->sd_ordered_lock); | |
d7b616e2 SW |
547 | } |
548 | list_splice(&written, &sdp->sd_log_le_ordered); | |
45138990 | 549 | spin_unlock(&sdp->sd_ordered_lock); |
d7b616e2 SW |
550 | } |
551 | ||
552 | static void gfs2_ordered_wait(struct gfs2_sbd *sdp) | |
553 | { | |
45138990 | 554 | struct gfs2_inode *ip; |
d7b616e2 | 555 | |
45138990 | 556 | spin_lock(&sdp->sd_ordered_lock); |
d7b616e2 | 557 | while (!list_empty(&sdp->sd_log_le_ordered)) { |
45138990 SW |
558 | ip = list_entry(sdp->sd_log_le_ordered.next, struct gfs2_inode, i_ordered); |
559 | list_del(&ip->i_ordered); | |
560 | WARN_ON(!test_and_clear_bit(GIF_ORDERED, &ip->i_flags)); | |
561 | if (ip->i_inode.i_mapping->nrpages == 0) | |
d7b616e2 | 562 | continue; |
45138990 SW |
563 | spin_unlock(&sdp->sd_ordered_lock); |
564 | filemap_fdatawait(ip->i_inode.i_mapping); | |
565 | spin_lock(&sdp->sd_ordered_lock); | |
d7b616e2 | 566 | } |
45138990 SW |
567 | spin_unlock(&sdp->sd_ordered_lock); |
568 | } | |
569 | ||
570 | void gfs2_ordered_del_inode(struct gfs2_inode *ip) | |
571 | { | |
572 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | |
573 | ||
574 | spin_lock(&sdp->sd_ordered_lock); | |
575 | if (test_and_clear_bit(GIF_ORDERED, &ip->i_flags)) | |
576 | list_del(&ip->i_ordered); | |
577 | spin_unlock(&sdp->sd_ordered_lock); | |
d7b616e2 SW |
578 | } |
579 | ||
5d054964 BM |
580 | void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) |
581 | { | |
582 | struct buffer_head *bh = bd->bd_bh; | |
583 | struct gfs2_glock *gl = bd->bd_gl; | |
584 | ||
5d054964 BM |
585 | bh->b_private = NULL; |
586 | bd->bd_blkno = bh->b_blocknr; | |
9290a9a7 BP |
587 | gfs2_remove_from_ail(bd); /* drops ref on bh */ |
588 | bd->bd_bh = NULL; | |
5d054964 BM |
589 | bd->bd_ops = &gfs2_revoke_lops; |
590 | sdp->sd_log_num_revoke++; | |
c97cbd68 AG |
591 | if (atomic_inc_return(&gl->gl_revokes) == 1) |
592 | gfs2_glock_hold(gl); | |
5d054964 BM |
593 | set_bit(GLF_LFLUSH, &gl->gl_flags); |
594 | list_add(&bd->bd_list, &sdp->sd_log_le_revoke); | |
595 | } | |
596 | ||
cb9e778a BP |
597 | void gfs2_glock_remove_revoke(struct gfs2_glock *gl) |
598 | { | |
599 | if (atomic_dec_return(&gl->gl_revokes) == 0) { | |
600 | clear_bit(GLF_LFLUSH, &gl->gl_flags); | |
601 | gfs2_glock_queue_put(gl); | |
602 | } | |
603 | } | |
604 | ||
5d054964 BM |
605 | void gfs2_write_revokes(struct gfs2_sbd *sdp) |
606 | { | |
607 | struct gfs2_trans *tr; | |
608 | struct gfs2_bufdata *bd, *tmp; | |
609 | int have_revokes = 0; | |
610 | int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); | |
611 | ||
612 | gfs2_ail1_empty(sdp); | |
613 | spin_lock(&sdp->sd_ail_lock); | |
614 | list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { | |
615 | list_for_each_entry(bd, &tr->tr_ail2_list, bd_ail_st_list) { | |
616 | if (list_empty(&bd->bd_list)) { | |
617 | have_revokes = 1; | |
618 | goto done; | |
619 | } | |
620 | } | |
621 | } | |
622 | done: | |
623 | spin_unlock(&sdp->sd_ail_lock); | |
624 | if (have_revokes == 0) | |
625 | return; | |
626 | while (sdp->sd_log_num_revoke > max_revokes) | |
627 | max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); | |
628 | max_revokes -= sdp->sd_log_num_revoke; | |
629 | if (!sdp->sd_log_num_revoke) { | |
630 | atomic_dec(&sdp->sd_log_blks_free); | |
631 | /* If no blocks have been reserved, we need to also | |
632 | * reserve a block for the header */ | |
633 | if (!sdp->sd_log_blks_reserved) | |
634 | atomic_dec(&sdp->sd_log_blks_free); | |
635 | } | |
636 | gfs2_log_lock(sdp); | |
637 | spin_lock(&sdp->sd_ail_lock); | |
638 | list_for_each_entry(tr, &sdp->sd_ail1_list, tr_list) { | |
639 | list_for_each_entry_safe(bd, tmp, &tr->tr_ail2_list, bd_ail_st_list) { | |
640 | if (max_revokes == 0) | |
641 | goto out_of_blocks; | |
642 | if (!list_empty(&bd->bd_list)) | |
643 | continue; | |
644 | gfs2_add_revoke(sdp, bd); | |
645 | max_revokes--; | |
646 | } | |
647 | } | |
648 | out_of_blocks: | |
649 | spin_unlock(&sdp->sd_ail_lock); | |
650 | gfs2_log_unlock(sdp); | |
651 | ||
652 | if (!sdp->sd_log_num_revoke) { | |
653 | atomic_inc(&sdp->sd_log_blks_free); | |
654 | if (!sdp->sd_log_blks_reserved) | |
655 | atomic_inc(&sdp->sd_log_blks_free); | |
656 | } | |
657 | } | |
658 | ||
34cc1781 SW |
659 | /** |
660 | * log_write_header - Get and initialize a journal header buffer | |
661 | * @sdp: The GFS2 superblock | |
662 | * | |
663 | * Returns: the initialized log buffer descriptor | |
664 | */ | |
665 | ||
fdb76a42 | 666 | static void log_write_header(struct gfs2_sbd *sdp, u32 flags) |
34cc1781 | 667 | { |
34cc1781 SW |
668 | struct gfs2_log_header *lh; |
669 | unsigned int tail; | |
670 | u32 hash; | |
0f0b9b63 | 671 | int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC; |
e8c92ed7 | 672 | struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); |
2e60d768 | 673 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); |
e8c92ed7 SW |
674 | lh = page_address(page); |
675 | clear_page(lh); | |
34cc1781 | 676 | |
2e60d768 BM |
677 | gfs2_assert_withdraw(sdp, (state != SFS_FROZEN)); |
678 | ||
34cc1781 SW |
679 | tail = current_tail(sdp); |
680 | ||
34cc1781 SW |
681 | lh->lh_header.mh_magic = cpu_to_be32(GFS2_MAGIC); |
682 | lh->lh_header.mh_type = cpu_to_be32(GFS2_METATYPE_LH); | |
683 | lh->lh_header.__pad0 = cpu_to_be64(0); | |
684 | lh->lh_header.mh_format = cpu_to_be32(GFS2_FORMAT_LH); | |
685 | lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid); | |
686 | lh->lh_sequence = cpu_to_be64(sdp->sd_log_sequence++); | |
687 | lh->lh_flags = cpu_to_be32(flags); | |
688 | lh->lh_tail = cpu_to_be32(tail); | |
689 | lh->lh_blkno = cpu_to_be32(sdp->sd_log_flush_head); | |
e8c92ed7 | 690 | hash = gfs2_disk_hash(page_address(page), sizeof(struct gfs2_log_header)); |
34cc1781 SW |
691 | lh->lh_hash = cpu_to_be32(hash); |
692 | ||
34cc1781 SW |
693 | if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) { |
694 | gfs2_ordered_wait(sdp); | |
695 | log_flush_wait(sdp); | |
70fd7614 | 696 | op_flags = REQ_SYNC | REQ_META | REQ_PRIO; |
34cc1781 | 697 | } |
34cc1781 | 698 | |
e8c92ed7 SW |
699 | sdp->sd_log_idle = (tail == sdp->sd_log_flush_head); |
700 | gfs2_log_write_page(sdp, page); | |
e1b1afa6 | 701 | gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags); |
e8c92ed7 | 702 | log_flush_wait(sdp); |
34cc1781 SW |
703 | |
704 | if (sdp->sd_log_tail != tail) | |
705 | log_pull_tail(sdp, tail); | |
34cc1781 SW |
706 | } |
707 | ||
b3b94faa | 708 | /** |
b09e593d | 709 | * gfs2_log_flush - flush incore transaction(s) |
b3b94faa DT |
710 | * @sdp: the filesystem |
711 | * @gl: The glock structure to flush. If NULL, flush the whole incore log | |
712 | * | |
713 | */ | |
714 | ||
24972557 BM |
715 | void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, |
716 | enum gfs2_flush_type type) | |
b3b94faa | 717 | { |
16ca9412 | 718 | struct gfs2_trans *tr; |
2e60d768 | 719 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); |
b3b94faa | 720 | |
484adff8 | 721 | down_write(&sdp->sd_log_flush_lock); |
f55ab26a | 722 | |
2bcd610d SW |
723 | /* Log might have been flushed while we waited for the flush lock */ |
724 | if (gl && !test_bit(GLF_LFLUSH, &gl->gl_flags)) { | |
725 | up_write(&sdp->sd_log_flush_lock); | |
726 | return; | |
f55ab26a | 727 | } |
63997775 | 728 | trace_gfs2_log_flush(sdp, 1); |
f55ab26a | 729 | |
400ac52e BM |
730 | if (type == SHUTDOWN_FLUSH) |
731 | clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); | |
732 | ||
b1ab1e44 | 733 | sdp->sd_log_flush_head = sdp->sd_log_head; |
16ca9412 BM |
734 | tr = sdp->sd_log_tr; |
735 | if (tr) { | |
736 | sdp->sd_log_tr = NULL; | |
b1ab1e44 | 737 | tr->tr_first = sdp->sd_log_flush_head; |
2e60d768 BM |
738 | if (unlikely (state == SFS_FROZEN)) |
739 | gfs2_assert_withdraw(sdp, !tr->tr_num_buf_new && !tr->tr_num_databuf_new); | |
16ca9412 | 740 | } |
b3b94faa | 741 | |
2e60d768 BM |
742 | if (unlikely(state == SFS_FROZEN)) |
743 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); | |
b3b94faa DT |
744 | gfs2_assert_withdraw(sdp, |
745 | sdp->sd_log_num_revoke == sdp->sd_log_commited_revoke); | |
746 | ||
d7b616e2 | 747 | gfs2_ordered_write(sdp); |
d69a3c65 | 748 | lops_before_commit(sdp, tr); |
e1b1afa6 | 749 | gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0); |
d7b616e2 | 750 | |
34cc1781 | 751 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { |
428fd95d | 752 | log_flush_wait(sdp); |
fdb76a42 | 753 | log_write_header(sdp, 0); |
34cc1781 | 754 | } else if (sdp->sd_log_tail != current_tail(sdp) && !sdp->sd_log_idle){ |
fd041f0b | 755 | atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ |
63997775 | 756 | trace_gfs2_log_blocks(sdp, -1); |
fdb76a42 | 757 | log_write_header(sdp, 0); |
2332c443 | 758 | } |
16ca9412 | 759 | lops_after_commit(sdp, tr); |
b09e593d | 760 | |
fe1a698f SW |
761 | gfs2_log_lock(sdp); |
762 | sdp->sd_log_head = sdp->sd_log_flush_head; | |
faa31ce8 | 763 | sdp->sd_log_blks_reserved = 0; |
faa31ce8 | 764 | sdp->sd_log_commited_revoke = 0; |
b3b94faa | 765 | |
d6a079e8 | 766 | spin_lock(&sdp->sd_ail_lock); |
16ca9412 BM |
767 | if (tr && !list_empty(&tr->tr_ail1_list)) { |
768 | list_add(&tr->tr_list, &sdp->sd_ail1_list); | |
769 | tr = NULL; | |
b3b94faa | 770 | } |
d6a079e8 | 771 | spin_unlock(&sdp->sd_ail_lock); |
b3b94faa | 772 | gfs2_log_unlock(sdp); |
24972557 | 773 | |
24972557 BM |
774 | if (type != NORMAL_FLUSH) { |
775 | if (!sdp->sd_log_idle) { | |
776 | for (;;) { | |
777 | gfs2_ail1_start(sdp); | |
778 | gfs2_ail1_wait(sdp); | |
779 | if (gfs2_ail1_empty(sdp)) | |
780 | break; | |
781 | } | |
782 | atomic_dec(&sdp->sd_log_blks_free); /* Adjust for unreserved buffer */ | |
783 | trace_gfs2_log_blocks(sdp, -1); | |
24972557 BM |
784 | log_write_header(sdp, 0); |
785 | sdp->sd_log_head = sdp->sd_log_flush_head; | |
786 | } | |
787 | if (type == SHUTDOWN_FLUSH || type == FREEZE_FLUSH) | |
788 | gfs2_log_shutdown(sdp); | |
2e60d768 BM |
789 | if (type == FREEZE_FLUSH) |
790 | atomic_set(&sdp->sd_freeze_state, SFS_FROZEN); | |
24972557 BM |
791 | } |
792 | ||
63997775 | 793 | trace_gfs2_log_flush(sdp, 0); |
484adff8 | 794 | up_write(&sdp->sd_log_flush_lock); |
b3b94faa | 795 | |
16ca9412 | 796 | kfree(tr); |
b3b94faa DT |
797 | } |
798 | ||
d69a3c65 SW |
799 | /** |
800 | * gfs2_merge_trans - Merge a new transaction into a cached transaction | |
801 | * @old: Original transaction to be expanded | |
802 | * @new: New transaction to be merged | |
803 | */ | |
804 | ||
04017f66 | 805 | static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new) |
d69a3c65 | 806 | { |
04017f66 BP |
807 | struct gfs2_trans *old = sdp->sd_log_tr; |
808 | ||
9862ca05 | 809 | WARN_ON_ONCE(!test_bit(TR_ATTACHED, &old->tr_flags)); |
d69a3c65 SW |
810 | |
811 | old->tr_num_buf_new += new->tr_num_buf_new; | |
812 | old->tr_num_databuf_new += new->tr_num_databuf_new; | |
813 | old->tr_num_buf_rm += new->tr_num_buf_rm; | |
814 | old->tr_num_databuf_rm += new->tr_num_databuf_rm; | |
815 | old->tr_num_revoke += new->tr_num_revoke; | |
816 | old->tr_num_revoke_rm += new->tr_num_revoke_rm; | |
817 | ||
818 | list_splice_tail_init(&new->tr_databuf, &old->tr_databuf); | |
819 | list_splice_tail_init(&new->tr_buf, &old->tr_buf); | |
04017f66 BP |
820 | |
821 | spin_lock(&sdp->sd_ail_lock); | |
822 | list_splice_tail_init(&new->tr_ail1_list, &old->tr_ail1_list); | |
823 | list_splice_tail_init(&new->tr_ail2_list, &old->tr_ail2_list); | |
824 | spin_unlock(&sdp->sd_ail_lock); | |
d69a3c65 SW |
825 | } |
826 | ||
b3b94faa DT |
827 | static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) |
828 | { | |
2332c443 | 829 | unsigned int reserved; |
ac39aadd | 830 | unsigned int unused; |
022ef4fe | 831 | unsigned int maxres; |
b3b94faa DT |
832 | |
833 | gfs2_log_lock(sdp); | |
834 | ||
022ef4fe | 835 | if (sdp->sd_log_tr) { |
04017f66 | 836 | gfs2_merge_trans(sdp, tr); |
022ef4fe | 837 | } else if (tr->tr_num_buf_new || tr->tr_num_databuf_new) { |
9862ca05 | 838 | gfs2_assert_withdraw(sdp, test_bit(TR_ALLOCED, &tr->tr_flags)); |
022ef4fe | 839 | sdp->sd_log_tr = tr; |
9862ca05 | 840 | set_bit(TR_ATTACHED, &tr->tr_flags); |
022ef4fe SW |
841 | } |
842 | ||
b3b94faa | 843 | sdp->sd_log_commited_revoke += tr->tr_num_revoke - tr->tr_num_revoke_rm; |
2332c443 | 844 | reserved = calc_reserved(sdp); |
022ef4fe SW |
845 | maxres = sdp->sd_log_blks_reserved + tr->tr_reserved; |
846 | gfs2_assert_withdraw(sdp, maxres >= reserved); | |
847 | unused = maxres - reserved; | |
ac39aadd | 848 | atomic_add(unused, &sdp->sd_log_blks_free); |
63997775 | 849 | trace_gfs2_log_blocks(sdp, unused); |
fd041f0b | 850 | gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <= |
2332c443 | 851 | sdp->sd_jdesc->jd_blocks); |
b3b94faa DT |
852 | sdp->sd_log_blks_reserved = reserved; |
853 | ||
854 | gfs2_log_unlock(sdp); | |
855 | } | |
856 | ||
857 | /** | |
858 | * gfs2_log_commit - Commit a transaction to the log | |
859 | * @sdp: the filesystem | |
860 | * @tr: the transaction | |
861 | * | |
5e687eac BM |
862 | * We wake up gfs2_logd if the number of pinned blocks exceed thresh1 |
863 | * or the total number of used blocks (pinned blocks plus AIL blocks) | |
864 | * is greater than thresh2. | |
865 | * | |
866 | * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of | |
867 | * journal size. | |
868 | * | |
b3b94faa DT |
869 | * Returns: errno |
870 | */ | |
871 | ||
872 | void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) | |
873 | { | |
874 | log_refund(sdp, tr); | |
b3b94faa | 875 | |
5e687eac BM |
876 | if (atomic_read(&sdp->sd_log_pinned) > atomic_read(&sdp->sd_log_thresh1) || |
877 | ((sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free)) > | |
878 | atomic_read(&sdp->sd_log_thresh2))) | |
879 | wake_up(&sdp->sd_logd_waitq); | |
b3b94faa DT |
880 | } |
881 | ||
882 | /** | |
883 | * gfs2_log_shutdown - write a shutdown header into a journal | |
884 | * @sdp: the filesystem | |
885 | * | |
886 | */ | |
887 | ||
888 | void gfs2_log_shutdown(struct gfs2_sbd *sdp) | |
889 | { | |
b3b94faa | 890 | gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved); |
b3b94faa | 891 | gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); |
b3b94faa DT |
892 | gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list)); |
893 | ||
894 | sdp->sd_log_flush_head = sdp->sd_log_head; | |
b3b94faa | 895 | |
fdb76a42 | 896 | log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT); |
b3b94faa | 897 | |
a74604be SW |
898 | gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail); |
899 | gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list)); | |
b3b94faa DT |
900 | |
901 | sdp->sd_log_head = sdp->sd_log_flush_head; | |
b3b94faa | 902 | sdp->sd_log_tail = sdp->sd_log_head; |
a25311c8 SW |
903 | } |
904 | ||
5e687eac BM |
905 | static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp) |
906 | { | |
f07b3520 BP |
907 | return (atomic_read(&sdp->sd_log_pinned) + |
908 | atomic_read(&sdp->sd_log_blks_needed) >= | |
909 | atomic_read(&sdp->sd_log_thresh1)); | |
5e687eac BM |
910 | } |
911 | ||
912 | static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp) | |
913 | { | |
914 | unsigned int used_blocks = sdp->sd_jdesc->jd_blocks - atomic_read(&sdp->sd_log_blks_free); | |
b066a4ee AD |
915 | |
916 | if (test_and_clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags)) | |
917 | return 1; | |
918 | ||
f07b3520 BP |
919 | return used_blocks + atomic_read(&sdp->sd_log_blks_needed) >= |
920 | atomic_read(&sdp->sd_log_thresh2); | |
5e687eac | 921 | } |
ec69b188 SW |
922 | |
923 | /** | |
924 | * gfs2_logd - Update log tail as Active Items get flushed to in-place blocks | |
925 | * @sdp: Pointer to GFS2 superblock | |
926 | * | |
927 | * Also, periodically check to make sure that we're using the most recent | |
928 | * journal index. | |
929 | */ | |
930 | ||
931 | int gfs2_logd(void *data) | |
932 | { | |
933 | struct gfs2_sbd *sdp = data; | |
5e687eac BM |
934 | unsigned long t = 1; |
935 | DEFINE_WAIT(wait); | |
b63f5e84 | 936 | bool did_flush; |
ec69b188 SW |
937 | |
938 | while (!kthread_should_stop()) { | |
ec69b188 | 939 | |
942b0cdd BP |
940 | /* Check for errors writing to the journal */ |
941 | if (sdp->sd_log_error) { | |
942 | gfs2_lm_withdraw(sdp, | |
943 | "GFS2: fsid=%s: error %d: " | |
944 | "withdrawing the file system to " | |
945 | "prevent further damage.\n", | |
946 | sdp->sd_fsname, sdp->sd_log_error); | |
947 | } | |
948 | ||
b63f5e84 | 949 | did_flush = false; |
5e687eac | 950 | if (gfs2_jrnl_flush_reqd(sdp) || t == 0) { |
4667a0ec | 951 | gfs2_ail1_empty(sdp); |
24972557 | 952 | gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); |
b63f5e84 | 953 | did_flush = true; |
5e687eac | 954 | } |
ec69b188 | 955 | |
5e687eac BM |
956 | if (gfs2_ail_flush_reqd(sdp)) { |
957 | gfs2_ail1_start(sdp); | |
26b06a69 | 958 | gfs2_ail1_wait(sdp); |
4667a0ec | 959 | gfs2_ail1_empty(sdp); |
24972557 | 960 | gfs2_log_flush(sdp, NULL, NORMAL_FLUSH); |
b63f5e84 | 961 | did_flush = true; |
ec69b188 SW |
962 | } |
963 | ||
b63f5e84 | 964 | if (!gfs2_ail_flush_reqd(sdp) || did_flush) |
26b06a69 SW |
965 | wake_up(&sdp->sd_log_waitq); |
966 | ||
ec69b188 | 967 | t = gfs2_tune_get(sdp, gt_logd_secs) * HZ; |
a0acae0e TH |
968 | |
969 | try_to_freeze(); | |
5e687eac BM |
970 | |
971 | do { | |
972 | prepare_to_wait(&sdp->sd_logd_waitq, &wait, | |
5f487490 | 973 | TASK_INTERRUPTIBLE); |
5e687eac BM |
974 | if (!gfs2_ail_flush_reqd(sdp) && |
975 | !gfs2_jrnl_flush_reqd(sdp) && | |
976 | !kthread_should_stop()) | |
977 | t = schedule_timeout(t); | |
978 | } while(t && !gfs2_ail_flush_reqd(sdp) && | |
979 | !gfs2_jrnl_flush_reqd(sdp) && | |
980 | !kthread_should_stop()); | |
981 | finish_wait(&sdp->sd_logd_waitq, &wait); | |
ec69b188 SW |
982 | } |
983 | ||
984 | return 0; | |
985 | } | |
986 |