1 // SPDX-License-Identifier: GPL-2.0-only
3 * bitmap.c two-level bitmap (C) Peter T. Breuer (ptb@ot.uc3m.es) 2003
5 * bitmap_create - sets up the bitmap structure
6 * bitmap_destroy - destroys the bitmap structure
8 * additions, Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc.:
9 * - added disk storage for bitmap
10 * - changes to allow various bitmap chunk sizes
16 * flush after percent set rather than just time based. (maybe both).
19 #include <linux/blkdev.h>
20 #include <linux/module.h>
21 #include <linux/errno.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/timer.h>
25 #include <linux/sched.h>
26 #include <linux/list.h>
27 #include <linux/file.h>
28 #include <linux/mount.h>
29 #include <linux/buffer_head.h>
30 #include <linux/seq_file.h>
31 #include <trace/events/block.h>
33 #include "md-bitmap.h"
35 static inline char *bmname(struct bitmap
*bitmap
)
37 return bitmap
->mddev
? mdname(bitmap
->mddev
) : "mdX";
41 * check a page and, if necessary, allocate it (or hijack it if the alloc fails)
43 * 1) check to see if this page is allocated, if it's not then try to alloc
44 * 2) if the alloc fails, set the page's hijacked flag so we'll use the
45 * page pointer directly as a counter
47 * if we find our page, we increment the page's refcount so that it stays
48 * allocated while we're using it
50 static int md_bitmap_checkpage(struct bitmap_counts
*bitmap
,
51 unsigned long page
, int create
, int no_hijack
)
52 __releases(bitmap
->lock
)
53 __acquires(bitmap
->lock
)
55 unsigned char *mappage
;
57 if (page
>= bitmap
->pages
) {
58 /* This can happen if bitmap_start_sync goes beyond
59 * End-of-device while looking for a whole page.
65 if (bitmap
->bp
[page
].hijacked
) /* it's hijacked, don't try to alloc */
68 if (bitmap
->bp
[page
].map
) /* page is already allocated, just return */
74 /* this page has not been allocated yet */
76 spin_unlock_irq(&bitmap
->lock
);
77 /* It is possible that this is being called inside a
78 * prepare_to_wait/finish_wait loop from raid5c:make_request().
79 * In general it is not permitted to sleep in that context as it
80 * can cause the loop to spin freely.
81 * That doesn't apply here as we can only reach this point
83 * When this function completes, either bp[page].map or
84 * bp[page].hijacked. In either case, this function will
85 * abort before getting to this point again. So there is
86 * no risk of a free-spin, and so it is safe to assert
87 * that sleeping here is allowed.
89 sched_annotate_sleep();
90 mappage
= kzalloc(PAGE_SIZE
, GFP_NOIO
);
91 spin_lock_irq(&bitmap
->lock
);
93 if (mappage
== NULL
) {
94 pr_debug("md/bitmap: map page allocation failed, hijacking\n");
95 /* We don't support hijack for cluster raid */
98 /* failed - set the hijacked flag so that we can use the
99 * pointer as a counter */
100 if (!bitmap
->bp
[page
].map
)
101 bitmap
->bp
[page
].hijacked
= 1;
102 } else if (bitmap
->bp
[page
].map
||
103 bitmap
->bp
[page
].hijacked
) {
104 /* somebody beat us to getting the page */
108 /* no page was in place and we have one, so install it */
110 bitmap
->bp
[page
].map
= mappage
;
111 bitmap
->missing_pages
--;
116 /* if page is completely empty, put it back on the free list, or dealloc it */
117 /* if page was hijacked, unmark the flag so it might get alloced next time */
118 /* Note: lock should be held when calling this */
119 static void md_bitmap_checkfree(struct bitmap_counts
*bitmap
, unsigned long page
)
123 if (bitmap
->bp
[page
].count
) /* page is still busy */
126 /* page is no longer in use, it can be released */
128 if (bitmap
->bp
[page
].hijacked
) { /* page was hijacked, undo this now */
129 bitmap
->bp
[page
].hijacked
= 0;
130 bitmap
->bp
[page
].map
= NULL
;
132 /* normal case, free the page */
133 ptr
= bitmap
->bp
[page
].map
;
134 bitmap
->bp
[page
].map
= NULL
;
135 bitmap
->missing_pages
++;
141 * bitmap file handling - read and write the bitmap file and its superblock
145 * basic page I/O operations
148 /* IO operations when bitmap is stored near all superblocks */
149 static int read_sb_page(struct mddev
*mddev
, loff_t offset
,
151 unsigned long index
, int size
)
153 /* choose a good rdev and read the page from there */
155 struct md_rdev
*rdev
;
158 rdev_for_each(rdev
, mddev
) {
159 if (! test_bit(In_sync
, &rdev
->flags
)
160 || test_bit(Faulty
, &rdev
->flags
)
161 || test_bit(Bitmap_sync
, &rdev
->flags
))
164 target
= offset
+ index
* (PAGE_SIZE
/512);
166 if (sync_page_io(rdev
, target
,
167 roundup(size
, bdev_logical_block_size(rdev
->bdev
)),
168 page
, REQ_OP_READ
, 0, true)) {
176 static struct md_rdev
*next_active_rdev(struct md_rdev
*rdev
, struct mddev
*mddev
)
178 /* Iterate the disks of an mddev, using rcu to protect access to the
179 * linked list, and raising the refcount of devices we return to ensure
180 * they don't disappear while in use.
181 * As devices are only added or removed when raid_disk is < 0 and
182 * nr_pending is 0 and In_sync is clear, the entries we return will
183 * still be in the same position on the list when we re-enter
184 * list_for_each_entry_continue_rcu.
186 * Note that if entered with 'rdev == NULL' to start at the
187 * beginning, we temporarily assign 'rdev' to an address which
188 * isn't really an rdev, but which can be used by
189 * list_for_each_entry_continue_rcu() to find the first entry.
193 /* start at the beginning */
194 rdev
= list_entry(&mddev
->disks
, struct md_rdev
, same_set
);
196 /* release the previous rdev and start from there. */
197 rdev_dec_pending(rdev
, mddev
);
199 list_for_each_entry_continue_rcu(rdev
, &mddev
->disks
, same_set
) {
200 if (rdev
->raid_disk
>= 0 &&
201 !test_bit(Faulty
, &rdev
->flags
)) {
202 /* this is a usable devices */
203 atomic_inc(&rdev
->nr_pending
);
212 static int write_sb_page(struct bitmap
*bitmap
, struct page
*page
, int wait
)
214 struct md_rdev
*rdev
;
215 struct block_device
*bdev
;
216 struct mddev
*mddev
= bitmap
->mddev
;
217 struct bitmap_storage
*store
= &bitmap
->storage
;
221 while ((rdev
= next_active_rdev(rdev
, mddev
)) != NULL
) {
222 int size
= PAGE_SIZE
;
223 loff_t offset
= mddev
->bitmap_info
.offset
;
225 bdev
= (rdev
->meta_bdev
) ? rdev
->meta_bdev
: rdev
->bdev
;
227 if (page
->index
== store
->file_pages
-1) {
228 int last_page_size
= store
->bytes
& (PAGE_SIZE
-1);
229 if (last_page_size
== 0)
230 last_page_size
= PAGE_SIZE
;
231 size
= roundup(last_page_size
,
232 bdev_logical_block_size(bdev
));
234 /* Just make sure we aren't corrupting data or
237 if (mddev
->external
) {
238 /* Bitmap could be anywhere. */
239 if (rdev
->sb_start
+ offset
+ (page
->index
243 rdev
->sb_start
+ offset
244 < (rdev
->data_offset
+ mddev
->dev_sectors
247 } else if (offset
< 0) {
248 /* DATA BITMAP METADATA */
250 + (long)(page
->index
* (PAGE_SIZE
/512))
252 /* bitmap runs in to metadata */
254 if (rdev
->data_offset
+ mddev
->dev_sectors
255 > rdev
->sb_start
+ offset
)
256 /* data runs in to bitmap */
258 } else if (rdev
->sb_start
< rdev
->data_offset
) {
259 /* METADATA BITMAP DATA */
262 + page
->index
*(PAGE_SIZE
/512) + size
/512
264 /* bitmap runs in to data */
267 /* DATA METADATA BITMAP - no problems */
269 md_super_write(mddev
, rdev
,
270 rdev
->sb_start
+ offset
271 + page
->index
* (PAGE_SIZE
/512),
276 if (wait
&& md_super_wait(mddev
) < 0)
284 static void md_bitmap_file_kick(struct bitmap
*bitmap
);
286 * write out a page to a file
288 static void write_page(struct bitmap
*bitmap
, struct page
*page
, int wait
)
290 struct buffer_head
*bh
;
292 if (bitmap
->storage
.file
== NULL
) {
293 switch (write_sb_page(bitmap
, page
, wait
)) {
295 set_bit(BITMAP_WRITE_ERROR
, &bitmap
->flags
);
299 bh
= page_buffers(page
);
301 while (bh
&& bh
->b_blocknr
) {
302 atomic_inc(&bitmap
->pending_writes
);
303 set_buffer_locked(bh
);
304 set_buffer_mapped(bh
);
305 submit_bh(REQ_OP_WRITE
, REQ_SYNC
, bh
);
306 bh
= bh
->b_this_page
;
310 wait_event(bitmap
->write_wait
,
311 atomic_read(&bitmap
->pending_writes
)==0);
313 if (test_bit(BITMAP_WRITE_ERROR
, &bitmap
->flags
))
314 md_bitmap_file_kick(bitmap
);
317 static void end_bitmap_write(struct buffer_head
*bh
, int uptodate
)
319 struct bitmap
*bitmap
= bh
->b_private
;
322 set_bit(BITMAP_WRITE_ERROR
, &bitmap
->flags
);
323 if (atomic_dec_and_test(&bitmap
->pending_writes
))
324 wake_up(&bitmap
->write_wait
);
327 /* copied from buffer.c */
329 __clear_page_buffers(struct page
*page
)
331 ClearPagePrivate(page
);
332 set_page_private(page
, 0);
335 static void free_buffers(struct page
*page
)
337 struct buffer_head
*bh
;
339 if (!PagePrivate(page
))
342 bh
= page_buffers(page
);
344 struct buffer_head
*next
= bh
->b_this_page
;
345 free_buffer_head(bh
);
348 __clear_page_buffers(page
);
352 /* read a page from a file.
353 * We both read the page, and attach buffers to the page to record the
354 * address of each block (using bmap). These addresses will be used
355 * to write the block later, completely bypassing the filesystem.
356 * This usage is similar to how swap files are handled, and allows us
357 * to write to a file with no concerns of memory allocation failing.
359 static int read_page(struct file
*file
, unsigned long index
,
360 struct bitmap
*bitmap
,
365 struct inode
*inode
= file_inode(file
);
366 struct buffer_head
*bh
;
369 pr_debug("read bitmap file (%dB @ %llu)\n", (int)PAGE_SIZE
,
370 (unsigned long long)index
<< PAGE_SHIFT
);
372 bh
= alloc_page_buffers(page
, 1<<inode
->i_blkbits
, false);
377 attach_page_buffers(page
, bh
);
378 block
= index
<< (PAGE_SHIFT
- inode
->i_blkbits
);
383 bh
->b_blocknr
= bmap(inode
, block
);
384 if (bh
->b_blocknr
== 0) {
385 /* Cannot use this file! */
389 bh
->b_bdev
= inode
->i_sb
->s_bdev
;
390 if (count
< (1<<inode
->i_blkbits
))
393 count
-= (1<<inode
->i_blkbits
);
395 bh
->b_end_io
= end_bitmap_write
;
396 bh
->b_private
= bitmap
;
397 atomic_inc(&bitmap
->pending_writes
);
398 set_buffer_locked(bh
);
399 set_buffer_mapped(bh
);
400 submit_bh(REQ_OP_READ
, 0, bh
);
403 bh
= bh
->b_this_page
;
407 wait_event(bitmap
->write_wait
,
408 atomic_read(&bitmap
->pending_writes
)==0);
409 if (test_bit(BITMAP_WRITE_ERROR
, &bitmap
->flags
))
413 pr_err("md: bitmap read error: (%dB @ %llu): %d\n",
415 (unsigned long long)index
<< PAGE_SHIFT
,
421 * bitmap file superblock operations
425 * md_bitmap_wait_writes() should be called before writing any bitmap
426 * blocks, to ensure previous writes, particularly from
427 * md_bitmap_daemon_work(), have completed.
429 static void md_bitmap_wait_writes(struct bitmap
*bitmap
)
431 if (bitmap
->storage
.file
)
432 wait_event(bitmap
->write_wait
,
433 atomic_read(&bitmap
->pending_writes
)==0);
435 /* Note that we ignore the return value. The writes
436 * might have failed, but that would just mean that
437 * some bits which should be cleared haven't been,
438 * which is safe. The relevant bitmap blocks will
439 * probably get written again, but there is no great
440 * loss if they aren't.
442 md_super_wait(bitmap
->mddev
);
446 /* update the event counter and sync the superblock to disk */
447 void md_bitmap_update_sb(struct bitmap
*bitmap
)
451 if (!bitmap
|| !bitmap
->mddev
) /* no bitmap for this array */
453 if (bitmap
->mddev
->bitmap_info
.external
)
455 if (!bitmap
->storage
.sb_page
) /* no superblock */
457 sb
= kmap_atomic(bitmap
->storage
.sb_page
);
458 sb
->events
= cpu_to_le64(bitmap
->mddev
->events
);
459 if (bitmap
->mddev
->events
< bitmap
->events_cleared
)
460 /* rocking back to read-only */
461 bitmap
->events_cleared
= bitmap
->mddev
->events
;
462 sb
->events_cleared
= cpu_to_le64(bitmap
->events_cleared
);
464 * clear BITMAP_WRITE_ERROR bit to protect against the case that
465 * a bitmap write error occurred but the later writes succeeded.
467 sb
->state
= cpu_to_le32(bitmap
->flags
& ~BIT(BITMAP_WRITE_ERROR
));
468 /* Just in case these have been changed via sysfs: */
469 sb
->daemon_sleep
= cpu_to_le32(bitmap
->mddev
->bitmap_info
.daemon_sleep
/HZ
);
470 sb
->write_behind
= cpu_to_le32(bitmap
->mddev
->bitmap_info
.max_write_behind
);
471 /* This might have been changed by a reshape */
472 sb
->sync_size
= cpu_to_le64(bitmap
->mddev
->resync_max_sectors
);
473 sb
->chunksize
= cpu_to_le32(bitmap
->mddev
->bitmap_info
.chunksize
);
474 sb
->nodes
= cpu_to_le32(bitmap
->mddev
->bitmap_info
.nodes
);
475 sb
->sectors_reserved
= cpu_to_le32(bitmap
->mddev
->
478 write_page(bitmap
, bitmap
->storage
.sb_page
, 1);
480 EXPORT_SYMBOL(md_bitmap_update_sb
);
482 /* print out the bitmap file superblock */
483 void md_bitmap_print_sb(struct bitmap
*bitmap
)
487 if (!bitmap
|| !bitmap
->storage
.sb_page
)
489 sb
= kmap_atomic(bitmap
->storage
.sb_page
);
490 pr_debug("%s: bitmap file superblock:\n", bmname(bitmap
));
491 pr_debug(" magic: %08x\n", le32_to_cpu(sb
->magic
));
492 pr_debug(" version: %d\n", le32_to_cpu(sb
->version
));
493 pr_debug(" uuid: %08x.%08x.%08x.%08x\n",
494 le32_to_cpu(*(__le32
*)(sb
->uuid
+0)),
495 le32_to_cpu(*(__le32
*)(sb
->uuid
+4)),
496 le32_to_cpu(*(__le32
*)(sb
->uuid
+8)),
497 le32_to_cpu(*(__le32
*)(sb
->uuid
+12)));
498 pr_debug(" events: %llu\n",
499 (unsigned long long) le64_to_cpu(sb
->events
));
500 pr_debug("events cleared: %llu\n",
501 (unsigned long long) le64_to_cpu(sb
->events_cleared
));
502 pr_debug(" state: %08x\n", le32_to_cpu(sb
->state
));
503 pr_debug(" chunksize: %d B\n", le32_to_cpu(sb
->chunksize
));
504 pr_debug(" daemon sleep: %ds\n", le32_to_cpu(sb
->daemon_sleep
));
505 pr_debug(" sync size: %llu KB\n",
506 (unsigned long long)le64_to_cpu(sb
->sync_size
)/2);
507 pr_debug("max write behind: %d\n", le32_to_cpu(sb
->write_behind
));
515 * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb
516 * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
517 * This function verifies 'bitmap_info' and populates the on-disk bitmap
518 * structure, which is to be written to disk.
520 * Returns: 0 on success, -Exxx on error
522 static int md_bitmap_new_disk_sb(struct bitmap
*bitmap
)
525 unsigned long chunksize
, daemon_sleep
, write_behind
;
527 bitmap
->storage
.sb_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
528 if (bitmap
->storage
.sb_page
== NULL
)
530 bitmap
->storage
.sb_page
->index
= 0;
532 sb
= kmap_atomic(bitmap
->storage
.sb_page
);
534 sb
->magic
= cpu_to_le32(BITMAP_MAGIC
);
535 sb
->version
= cpu_to_le32(BITMAP_MAJOR_HI
);
537 chunksize
= bitmap
->mddev
->bitmap_info
.chunksize
;
539 if (!is_power_of_2(chunksize
)) {
541 pr_warn("bitmap chunksize not a power of 2\n");
544 sb
->chunksize
= cpu_to_le32(chunksize
);
546 daemon_sleep
= bitmap
->mddev
->bitmap_info
.daemon_sleep
;
547 if (!daemon_sleep
|| (daemon_sleep
> MAX_SCHEDULE_TIMEOUT
)) {
548 pr_debug("Choosing daemon_sleep default (5 sec)\n");
549 daemon_sleep
= 5 * HZ
;
551 sb
->daemon_sleep
= cpu_to_le32(daemon_sleep
);
552 bitmap
->mddev
->bitmap_info
.daemon_sleep
= daemon_sleep
;
555 * FIXME: write_behind for RAID1. If not specified, what
556 * is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
558 write_behind
= bitmap
->mddev
->bitmap_info
.max_write_behind
;
559 if (write_behind
> COUNTER_MAX
)
560 write_behind
= COUNTER_MAX
/ 2;
561 sb
->write_behind
= cpu_to_le32(write_behind
);
562 bitmap
->mddev
->bitmap_info
.max_write_behind
= write_behind
;
564 /* keep the array size field of the bitmap superblock up to date */
565 sb
->sync_size
= cpu_to_le64(bitmap
->mddev
->resync_max_sectors
);
567 memcpy(sb
->uuid
, bitmap
->mddev
->uuid
, 16);
569 set_bit(BITMAP_STALE
, &bitmap
->flags
);
570 sb
->state
= cpu_to_le32(bitmap
->flags
);
571 bitmap
->events_cleared
= bitmap
->mddev
->events
;
572 sb
->events_cleared
= cpu_to_le64(bitmap
->mddev
->events
);
573 bitmap
->mddev
->bitmap_info
.nodes
= 0;
580 /* read the superblock from the bitmap file and initialize some bitmap fields */
581 static int md_bitmap_read_sb(struct bitmap
*bitmap
)
585 unsigned long chunksize
, daemon_sleep
, write_behind
;
586 unsigned long long events
;
588 unsigned long sectors_reserved
= 0;
590 struct page
*sb_page
;
591 loff_t offset
= bitmap
->mddev
->bitmap_info
.offset
;
593 if (!bitmap
->storage
.file
&& !bitmap
->mddev
->bitmap_info
.offset
) {
594 chunksize
= 128 * 1024 * 1024;
595 daemon_sleep
= 5 * HZ
;
597 set_bit(BITMAP_STALE
, &bitmap
->flags
);
601 /* page 0 is the superblock, read it... */
602 sb_page
= alloc_page(GFP_KERNEL
);
605 bitmap
->storage
.sb_page
= sb_page
;
608 /* If cluster_slot is set, the cluster is setup */
609 if (bitmap
->cluster_slot
>= 0) {
610 sector_t bm_blocks
= bitmap
->mddev
->resync_max_sectors
;
612 sector_div(bm_blocks
,
613 bitmap
->mddev
->bitmap_info
.chunksize
>> 9);
615 bm_blocks
= ((bm_blocks
+7) >> 3) + sizeof(bitmap_super_t
);
617 bm_blocks
= DIV_ROUND_UP_SECTOR_T(bm_blocks
, 4096);
618 offset
= bitmap
->mddev
->bitmap_info
.offset
+ (bitmap
->cluster_slot
* (bm_blocks
<< 3));
619 pr_debug("%s:%d bm slot: %d offset: %llu\n", __func__
, __LINE__
,
620 bitmap
->cluster_slot
, offset
);
623 if (bitmap
->storage
.file
) {
624 loff_t isize
= i_size_read(bitmap
->storage
.file
->f_mapping
->host
);
625 int bytes
= isize
> PAGE_SIZE
? PAGE_SIZE
: isize
;
627 err
= read_page(bitmap
->storage
.file
, 0,
628 bitmap
, bytes
, sb_page
);
630 err
= read_sb_page(bitmap
->mddev
,
633 0, sizeof(bitmap_super_t
));
639 sb
= kmap_atomic(sb_page
);
641 chunksize
= le32_to_cpu(sb
->chunksize
);
642 daemon_sleep
= le32_to_cpu(sb
->daemon_sleep
) * HZ
;
643 write_behind
= le32_to_cpu(sb
->write_behind
);
644 sectors_reserved
= le32_to_cpu(sb
->sectors_reserved
);
645 /* Setup nodes/clustername only if bitmap version is
648 if (sb
->version
== cpu_to_le32(BITMAP_MAJOR_CLUSTERED
)) {
649 nodes
= le32_to_cpu(sb
->nodes
);
650 strlcpy(bitmap
->mddev
->bitmap_info
.cluster_name
,
651 sb
->cluster_name
, 64);
654 /* verify that the bitmap-specific fields are valid */
655 if (sb
->magic
!= cpu_to_le32(BITMAP_MAGIC
))
656 reason
= "bad magic";
657 else if (le32_to_cpu(sb
->version
) < BITMAP_MAJOR_LO
||
658 le32_to_cpu(sb
->version
) > BITMAP_MAJOR_CLUSTERED
)
659 reason
= "unrecognized superblock version";
660 else if (chunksize
< 512)
661 reason
= "bitmap chunksize too small";
662 else if (!is_power_of_2(chunksize
))
663 reason
= "bitmap chunksize not a power of 2";
664 else if (daemon_sleep
< 1 || daemon_sleep
> MAX_SCHEDULE_TIMEOUT
)
665 reason
= "daemon sleep period out of range";
666 else if (write_behind
> COUNTER_MAX
)
667 reason
= "write-behind limit out of range (0 - 16383)";
669 pr_warn("%s: invalid bitmap file superblock: %s\n",
670 bmname(bitmap
), reason
);
674 /* keep the array size field of the bitmap superblock up to date */
675 sb
->sync_size
= cpu_to_le64(bitmap
->mddev
->resync_max_sectors
);
677 if (bitmap
->mddev
->persistent
) {
679 * We have a persistent array superblock, so compare the
680 * bitmap's UUID and event counter to the mddev's
682 if (memcmp(sb
->uuid
, bitmap
->mddev
->uuid
, 16)) {
683 pr_warn("%s: bitmap superblock UUID mismatch\n",
687 events
= le64_to_cpu(sb
->events
);
688 if (!nodes
&& (events
< bitmap
->mddev
->events
)) {
689 pr_warn("%s: bitmap file is out of date (%llu < %llu) -- forcing full recovery\n",
690 bmname(bitmap
), events
,
691 (unsigned long long) bitmap
->mddev
->events
);
692 set_bit(BITMAP_STALE
, &bitmap
->flags
);
696 /* assign fields using values from superblock */
697 bitmap
->flags
|= le32_to_cpu(sb
->state
);
698 if (le32_to_cpu(sb
->version
) == BITMAP_MAJOR_HOSTENDIAN
)
699 set_bit(BITMAP_HOSTENDIAN
, &bitmap
->flags
);
700 bitmap
->events_cleared
= le64_to_cpu(sb
->events_cleared
);
701 strlcpy(bitmap
->mddev
->bitmap_info
.cluster_name
, sb
->cluster_name
, 64);
706 /* Assigning chunksize is required for "re_read" */
707 bitmap
->mddev
->bitmap_info
.chunksize
= chunksize
;
708 if (err
== 0 && nodes
&& (bitmap
->cluster_slot
< 0)) {
709 err
= md_setup_cluster(bitmap
->mddev
, nodes
);
711 pr_warn("%s: Could not setup cluster service (%d)\n",
712 bmname(bitmap
), err
);
715 bitmap
->cluster_slot
= md_cluster_ops
->slot_number(bitmap
->mddev
);
721 if (test_bit(BITMAP_STALE
, &bitmap
->flags
))
722 bitmap
->events_cleared
= bitmap
->mddev
->events
;
723 bitmap
->mddev
->bitmap_info
.chunksize
= chunksize
;
724 bitmap
->mddev
->bitmap_info
.daemon_sleep
= daemon_sleep
;
725 bitmap
->mddev
->bitmap_info
.max_write_behind
= write_behind
;
726 bitmap
->mddev
->bitmap_info
.nodes
= nodes
;
727 if (bitmap
->mddev
->bitmap_info
.space
== 0 ||
728 bitmap
->mddev
->bitmap_info
.space
> sectors_reserved
)
729 bitmap
->mddev
->bitmap_info
.space
= sectors_reserved
;
731 md_bitmap_print_sb(bitmap
);
732 if (bitmap
->cluster_slot
< 0)
733 md_cluster_stop(bitmap
->mddev
);
739 * general bitmap file operations
745 * Use one bit per "chunk" (block set). We do the disk I/O on the bitmap
746 * file a page at a time. There's a superblock at the start of the file.
748 /* calculate the index of the page that contains this bit */
749 static inline unsigned long file_page_index(struct bitmap_storage
*store
,
753 chunk
+= sizeof(bitmap_super_t
) << 3;
754 return chunk
>> PAGE_BIT_SHIFT
;
757 /* calculate the (bit) offset of this bit within a page */
758 static inline unsigned long file_page_offset(struct bitmap_storage
*store
,
762 chunk
+= sizeof(bitmap_super_t
) << 3;
763 return chunk
& (PAGE_BITS
- 1);
767 * return a pointer to the page in the filemap that contains the given bit
770 static inline struct page
*filemap_get_page(struct bitmap_storage
*store
,
773 if (file_page_index(store
, chunk
) >= store
->file_pages
)
775 return store
->filemap
[file_page_index(store
, chunk
)];
778 static int md_bitmap_storage_alloc(struct bitmap_storage
*store
,
779 unsigned long chunks
, int with_super
,
782 int pnum
, offset
= 0;
783 unsigned long num_pages
;
786 bytes
= DIV_ROUND_UP(chunks
, 8);
788 bytes
+= sizeof(bitmap_super_t
);
790 num_pages
= DIV_ROUND_UP(bytes
, PAGE_SIZE
);
791 offset
= slot_number
* num_pages
;
793 store
->filemap
= kmalloc_array(num_pages
, sizeof(struct page
*),
798 if (with_super
&& !store
->sb_page
) {
799 store
->sb_page
= alloc_page(GFP_KERNEL
|__GFP_ZERO
);
800 if (store
->sb_page
== NULL
)
805 if (store
->sb_page
) {
806 store
->filemap
[0] = store
->sb_page
;
808 store
->sb_page
->index
= offset
;
811 for ( ; pnum
< num_pages
; pnum
++) {
812 store
->filemap
[pnum
] = alloc_page(GFP_KERNEL
|__GFP_ZERO
);
813 if (!store
->filemap
[pnum
]) {
814 store
->file_pages
= pnum
;
817 store
->filemap
[pnum
]->index
= pnum
+ offset
;
819 store
->file_pages
= pnum
;
821 /* We need 4 bits per page, rounded up to a multiple
822 * of sizeof(unsigned long) */
823 store
->filemap_attr
= kzalloc(
824 roundup(DIV_ROUND_UP(num_pages
*4, 8), sizeof(unsigned long)),
826 if (!store
->filemap_attr
)
829 store
->bytes
= bytes
;
834 static void md_bitmap_file_unmap(struct bitmap_storage
*store
)
836 struct page
**map
, *sb_page
;
841 map
= store
->filemap
;
842 pages
= store
->file_pages
;
843 sb_page
= store
->sb_page
;
846 if (map
[pages
] != sb_page
) /* 0 is sb_page, release it below */
847 free_buffers(map
[pages
]);
849 kfree(store
->filemap_attr
);
852 free_buffers(sb_page
);
855 struct inode
*inode
= file_inode(file
);
856 invalidate_mapping_pages(inode
->i_mapping
, 0, -1);
862 * bitmap_file_kick - if an error occurs while manipulating the bitmap file
863 * then it is no longer reliable, so we stop using it and we mark the file
864 * as failed in the superblock
866 static void md_bitmap_file_kick(struct bitmap
*bitmap
)
868 char *path
, *ptr
= NULL
;
870 if (!test_and_set_bit(BITMAP_STALE
, &bitmap
->flags
)) {
871 md_bitmap_update_sb(bitmap
);
873 if (bitmap
->storage
.file
) {
874 path
= kmalloc(PAGE_SIZE
, GFP_KERNEL
);
876 ptr
= file_path(bitmap
->storage
.file
,
879 pr_warn("%s: kicking failed bitmap file %s from array!\n",
880 bmname(bitmap
), IS_ERR(ptr
) ? "" : ptr
);
884 pr_warn("%s: disabling internal bitmap due to errors\n",
889 enum bitmap_page_attr
{
890 BITMAP_PAGE_DIRTY
= 0, /* there are set bits that need to be synced */
891 BITMAP_PAGE_PENDING
= 1, /* there are bits that are being cleaned.
892 * i.e. counter is 1 or 2. */
893 BITMAP_PAGE_NEEDWRITE
= 2, /* there are cleared bits that need to be synced */
896 static inline void set_page_attr(struct bitmap
*bitmap
, int pnum
,
897 enum bitmap_page_attr attr
)
899 set_bit((pnum
<<2) + attr
, bitmap
->storage
.filemap_attr
);
902 static inline void clear_page_attr(struct bitmap
*bitmap
, int pnum
,
903 enum bitmap_page_attr attr
)
905 clear_bit((pnum
<<2) + attr
, bitmap
->storage
.filemap_attr
);
908 static inline int test_page_attr(struct bitmap
*bitmap
, int pnum
,
909 enum bitmap_page_attr attr
)
911 return test_bit((pnum
<<2) + attr
, bitmap
->storage
.filemap_attr
);
914 static inline int test_and_clear_page_attr(struct bitmap
*bitmap
, int pnum
,
915 enum bitmap_page_attr attr
)
917 return test_and_clear_bit((pnum
<<2) + attr
,
918 bitmap
->storage
.filemap_attr
);
921 * bitmap_file_set_bit -- called before performing a write to the md device
922 * to set (and eventually sync) a particular bit in the bitmap file
924 * we set the bit immediately, then we record the page number so that
925 * when an unplug occurs, we can flush the dirty pages out to disk
927 static void md_bitmap_file_set_bit(struct bitmap
*bitmap
, sector_t block
)
932 unsigned long chunk
= block
>> bitmap
->counts
.chunkshift
;
933 struct bitmap_storage
*store
= &bitmap
->storage
;
934 unsigned long node_offset
= 0;
936 if (mddev_is_clustered(bitmap
->mddev
))
937 node_offset
= bitmap
->cluster_slot
* store
->file_pages
;
939 page
= filemap_get_page(&bitmap
->storage
, chunk
);
942 bit
= file_page_offset(&bitmap
->storage
, chunk
);
945 kaddr
= kmap_atomic(page
);
946 if (test_bit(BITMAP_HOSTENDIAN
, &bitmap
->flags
))
949 set_bit_le(bit
, kaddr
);
950 kunmap_atomic(kaddr
);
951 pr_debug("set file bit %lu page %lu\n", bit
, page
->index
);
952 /* record page number so it gets flushed to disk when unplug occurs */
953 set_page_attr(bitmap
, page
->index
- node_offset
, BITMAP_PAGE_DIRTY
);
956 static void md_bitmap_file_clear_bit(struct bitmap
*bitmap
, sector_t block
)
961 unsigned long chunk
= block
>> bitmap
->counts
.chunkshift
;
962 struct bitmap_storage
*store
= &bitmap
->storage
;
963 unsigned long node_offset
= 0;
965 if (mddev_is_clustered(bitmap
->mddev
))
966 node_offset
= bitmap
->cluster_slot
* store
->file_pages
;
968 page
= filemap_get_page(&bitmap
->storage
, chunk
);
971 bit
= file_page_offset(&bitmap
->storage
, chunk
);
972 paddr
= kmap_atomic(page
);
973 if (test_bit(BITMAP_HOSTENDIAN
, &bitmap
->flags
))
974 clear_bit(bit
, paddr
);
976 clear_bit_le(bit
, paddr
);
977 kunmap_atomic(paddr
);
978 if (!test_page_attr(bitmap
, page
->index
- node_offset
, BITMAP_PAGE_NEEDWRITE
)) {
979 set_page_attr(bitmap
, page
->index
- node_offset
, BITMAP_PAGE_PENDING
);
980 bitmap
->allclean
= 0;
984 static int md_bitmap_file_test_bit(struct bitmap
*bitmap
, sector_t block
)
989 unsigned long chunk
= block
>> bitmap
->counts
.chunkshift
;
992 page
= filemap_get_page(&bitmap
->storage
, chunk
);
995 bit
= file_page_offset(&bitmap
->storage
, chunk
);
996 paddr
= kmap_atomic(page
);
997 if (test_bit(BITMAP_HOSTENDIAN
, &bitmap
->flags
))
998 set
= test_bit(bit
, paddr
);
1000 set
= test_bit_le(bit
, paddr
);
1001 kunmap_atomic(paddr
);
1006 /* this gets called when the md device is ready to unplug its underlying
1007 * (slave) device queues -- before we let any writes go down, we need to
1008 * sync the dirty pages of the bitmap file to disk */
1009 void md_bitmap_unplug(struct bitmap
*bitmap
)
1012 int dirty
, need_write
;
1015 if (!bitmap
|| !bitmap
->storage
.filemap
||
1016 test_bit(BITMAP_STALE
, &bitmap
->flags
))
1019 /* look at each page to see if there are any set bits that need to be
1020 * flushed out to disk */
1021 for (i
= 0; i
< bitmap
->storage
.file_pages
; i
++) {
1022 if (!bitmap
->storage
.filemap
)
1024 dirty
= test_and_clear_page_attr(bitmap
, i
, BITMAP_PAGE_DIRTY
);
1025 need_write
= test_and_clear_page_attr(bitmap
, i
,
1026 BITMAP_PAGE_NEEDWRITE
);
1027 if (dirty
|| need_write
) {
1029 md_bitmap_wait_writes(bitmap
);
1030 if (bitmap
->mddev
->queue
)
1031 blk_add_trace_msg(bitmap
->mddev
->queue
,
1032 "md bitmap_unplug");
1034 clear_page_attr(bitmap
, i
, BITMAP_PAGE_PENDING
);
1035 write_page(bitmap
, bitmap
->storage
.filemap
[i
], 0);
1040 md_bitmap_wait_writes(bitmap
);
1042 if (test_bit(BITMAP_WRITE_ERROR
, &bitmap
->flags
))
1043 md_bitmap_file_kick(bitmap
);
1045 EXPORT_SYMBOL(md_bitmap_unplug
);
1047 static void md_bitmap_set_memory_bits(struct bitmap
*bitmap
, sector_t offset
, int needed
);
1048 /* * bitmap_init_from_disk -- called at bitmap_create time to initialize
1049 * the in-memory bitmap from the on-disk bitmap -- also, sets up the
1050 * memory mapping of the bitmap file
1052 * if there's no bitmap file, or if the bitmap file had been
1053 * previously kicked from the array, we mark all the bits as
1054 * 1's in order to cause a full resync.
1056 * We ignore all bits for sectors that end earlier than 'start'.
1057 * This is used when reading an out-of-date bitmap...
1059 static int md_bitmap_init_from_disk(struct bitmap
*bitmap
, sector_t start
)
1061 unsigned long i
, chunks
, index
, oldindex
, bit
, node_offset
= 0;
1062 struct page
*page
= NULL
;
1063 unsigned long bit_cnt
= 0;
1065 unsigned long offset
;
1069 struct bitmap_storage
*store
= &bitmap
->storage
;
1071 chunks
= bitmap
->counts
.chunks
;
1074 if (!file
&& !bitmap
->mddev
->bitmap_info
.offset
) {
1075 /* No permanent bitmap - fill with '1s'. */
1076 store
->filemap
= NULL
;
1077 store
->file_pages
= 0;
1078 for (i
= 0; i
< chunks
; i
++) {
1079 /* if the disk bit is set, set the memory bit */
1080 int needed
= ((sector_t
)(i
+1) << (bitmap
->counts
.chunkshift
)
1082 md_bitmap_set_memory_bits(bitmap
,
1083 (sector_t
)i
<< bitmap
->counts
.chunkshift
,
1089 outofdate
= test_bit(BITMAP_STALE
, &bitmap
->flags
);
1091 pr_warn("%s: bitmap file is out of date, doing full recovery\n", bmname(bitmap
));
1093 if (file
&& i_size_read(file
->f_mapping
->host
) < store
->bytes
) {
1094 pr_warn("%s: bitmap file too short %lu < %lu\n",
1096 (unsigned long) i_size_read(file
->f_mapping
->host
),
1103 if (!bitmap
->mddev
->bitmap_info
.external
)
1104 offset
= sizeof(bitmap_super_t
);
1106 if (mddev_is_clustered(bitmap
->mddev
))
1107 node_offset
= bitmap
->cluster_slot
* (DIV_ROUND_UP(store
->bytes
, PAGE_SIZE
));
1109 for (i
= 0; i
< chunks
; i
++) {
1111 index
= file_page_index(&bitmap
->storage
, i
);
1112 bit
= file_page_offset(&bitmap
->storage
, i
);
1113 if (index
!= oldindex
) { /* this is a new page, read it in */
1115 /* unmap the old page, we're done with it */
1116 if (index
== store
->file_pages
-1)
1117 count
= store
->bytes
- index
* PAGE_SIZE
;
1120 page
= store
->filemap
[index
];
1122 ret
= read_page(file
, index
, bitmap
,
1127 bitmap
->mddev
->bitmap_info
.offset
,
1129 index
+ node_offset
, count
);
1138 * if bitmap is out of date, dirty the
1139 * whole page and write it out
1141 paddr
= kmap_atomic(page
);
1142 memset(paddr
+ offset
, 0xff,
1143 PAGE_SIZE
- offset
);
1144 kunmap_atomic(paddr
);
1145 write_page(bitmap
, page
, 1);
1148 if (test_bit(BITMAP_WRITE_ERROR
,
1153 paddr
= kmap_atomic(page
);
1154 if (test_bit(BITMAP_HOSTENDIAN
, &bitmap
->flags
))
1155 b
= test_bit(bit
, paddr
);
1157 b
= test_bit_le(bit
, paddr
);
1158 kunmap_atomic(paddr
);
1160 /* if the disk bit is set, set the memory bit */
1161 int needed
= ((sector_t
)(i
+1) << bitmap
->counts
.chunkshift
1163 md_bitmap_set_memory_bits(bitmap
,
1164 (sector_t
)i
<< bitmap
->counts
.chunkshift
,
1171 pr_debug("%s: bitmap initialized from disk: read %lu pages, set %lu of %lu bits\n",
1172 bmname(bitmap
), store
->file_pages
,
1178 pr_warn("%s: bitmap initialisation failed: %d\n",
1179 bmname(bitmap
), ret
);
1183 void md_bitmap_write_all(struct bitmap
*bitmap
)
1185 /* We don't actually write all bitmap blocks here,
1186 * just flag them as needing to be written
1190 if (!bitmap
|| !bitmap
->storage
.filemap
)
1192 if (bitmap
->storage
.file
)
1193 /* Only one copy, so nothing needed */
1196 for (i
= 0; i
< bitmap
->storage
.file_pages
; i
++)
1197 set_page_attr(bitmap
, i
,
1198 BITMAP_PAGE_NEEDWRITE
);
1199 bitmap
->allclean
= 0;
1202 static void md_bitmap_count_page(struct bitmap_counts
*bitmap
,
1203 sector_t offset
, int inc
)
1205 sector_t chunk
= offset
>> bitmap
->chunkshift
;
1206 unsigned long page
= chunk
>> PAGE_COUNTER_SHIFT
;
1207 bitmap
->bp
[page
].count
+= inc
;
1208 md_bitmap_checkfree(bitmap
, page
);
1211 static void md_bitmap_set_pending(struct bitmap_counts
*bitmap
, sector_t offset
)
1213 sector_t chunk
= offset
>> bitmap
->chunkshift
;
1214 unsigned long page
= chunk
>> PAGE_COUNTER_SHIFT
;
1215 struct bitmap_page
*bp
= &bitmap
->bp
[page
];
1221 static bitmap_counter_t
*md_bitmap_get_counter(struct bitmap_counts
*bitmap
,
1222 sector_t offset
, sector_t
*blocks
,
1226 * bitmap daemon -- periodically wakes up to clean bits and flush pages
1230 void md_bitmap_daemon_work(struct mddev
*mddev
)
1232 struct bitmap
*bitmap
;
1234 unsigned long nextpage
;
1236 struct bitmap_counts
*counts
;
1238 /* Use a mutex to guard daemon_work against
1241 mutex_lock(&mddev
->bitmap_info
.mutex
);
1242 bitmap
= mddev
->bitmap
;
1243 if (bitmap
== NULL
) {
1244 mutex_unlock(&mddev
->bitmap_info
.mutex
);
1247 if (time_before(jiffies
, bitmap
->daemon_lastrun
1248 + mddev
->bitmap_info
.daemon_sleep
))
1251 bitmap
->daemon_lastrun
= jiffies
;
1252 if (bitmap
->allclean
) {
1253 mddev
->thread
->timeout
= MAX_SCHEDULE_TIMEOUT
;
1256 bitmap
->allclean
= 1;
1258 if (bitmap
->mddev
->queue
)
1259 blk_add_trace_msg(bitmap
->mddev
->queue
,
1260 "md bitmap_daemon_work");
1262 /* Any file-page which is PENDING now needs to be written.
1263 * So set NEEDWRITE now, then after we make any last-minute changes
1266 for (j
= 0; j
< bitmap
->storage
.file_pages
; j
++)
1267 if (test_and_clear_page_attr(bitmap
, j
,
1268 BITMAP_PAGE_PENDING
))
1269 set_page_attr(bitmap
, j
,
1270 BITMAP_PAGE_NEEDWRITE
);
1272 if (bitmap
->need_sync
&&
1273 mddev
->bitmap_info
.external
== 0) {
1274 /* Arrange for superblock update as well as
1277 bitmap
->need_sync
= 0;
1278 if (bitmap
->storage
.filemap
) {
1279 sb
= kmap_atomic(bitmap
->storage
.sb_page
);
1280 sb
->events_cleared
=
1281 cpu_to_le64(bitmap
->events_cleared
);
1283 set_page_attr(bitmap
, 0,
1284 BITMAP_PAGE_NEEDWRITE
);
1287 /* Now look at the bitmap counters and if any are '2' or '1',
1288 * decrement and handle accordingly.
1290 counts
= &bitmap
->counts
;
1291 spin_lock_irq(&counts
->lock
);
1293 for (j
= 0; j
< counts
->chunks
; j
++) {
1294 bitmap_counter_t
*bmc
;
1295 sector_t block
= (sector_t
)j
<< counts
->chunkshift
;
1297 if (j
== nextpage
) {
1298 nextpage
+= PAGE_COUNTER_RATIO
;
1299 if (!counts
->bp
[j
>> PAGE_COUNTER_SHIFT
].pending
) {
1300 j
|= PAGE_COUNTER_MASK
;
1303 counts
->bp
[j
>> PAGE_COUNTER_SHIFT
].pending
= 0;
1306 bmc
= md_bitmap_get_counter(counts
, block
, &blocks
, 0);
1308 j
|= PAGE_COUNTER_MASK
;
1311 if (*bmc
== 1 && !bitmap
->need_sync
) {
1312 /* We can clear the bit */
1314 md_bitmap_count_page(counts
, block
, -1);
1315 md_bitmap_file_clear_bit(bitmap
, block
);
1316 } else if (*bmc
&& *bmc
<= 2) {
1318 md_bitmap_set_pending(counts
, block
);
1319 bitmap
->allclean
= 0;
1322 spin_unlock_irq(&counts
->lock
);
1324 md_bitmap_wait_writes(bitmap
);
1325 /* Now start writeout on any page in NEEDWRITE that isn't DIRTY.
1326 * DIRTY pages need to be written by bitmap_unplug so it can wait
1328 * If we find any DIRTY page we stop there and let bitmap_unplug
1329 * handle all the rest. This is important in the case where
1330 * the first blocking holds the superblock and it has been updated.
1331 * We mustn't write any other blocks before the superblock.
1334 j
< bitmap
->storage
.file_pages
1335 && !test_bit(BITMAP_STALE
, &bitmap
->flags
);
1337 if (test_page_attr(bitmap
, j
,
1339 /* bitmap_unplug will handle the rest */
1341 if (test_and_clear_page_attr(bitmap
, j
,
1342 BITMAP_PAGE_NEEDWRITE
)) {
1343 write_page(bitmap
, bitmap
->storage
.filemap
[j
], 0);
1348 if (bitmap
->allclean
== 0)
1349 mddev
->thread
->timeout
=
1350 mddev
->bitmap_info
.daemon_sleep
;
1351 mutex_unlock(&mddev
->bitmap_info
.mutex
);
1354 static bitmap_counter_t
*md_bitmap_get_counter(struct bitmap_counts
*bitmap
,
1355 sector_t offset
, sector_t
*blocks
,
1357 __releases(bitmap
->lock
)
1358 __acquires(bitmap
->lock
)
1360 /* If 'create', we might release the lock and reclaim it.
1361 * The lock must have been taken with interrupts enabled.
1362 * If !create, we don't release the lock.
1364 sector_t chunk
= offset
>> bitmap
->chunkshift
;
1365 unsigned long page
= chunk
>> PAGE_COUNTER_SHIFT
;
1366 unsigned long pageoff
= (chunk
& PAGE_COUNTER_MASK
) << COUNTER_BYTE_SHIFT
;
1370 err
= md_bitmap_checkpage(bitmap
, page
, create
, 0);
1372 if (bitmap
->bp
[page
].hijacked
||
1373 bitmap
->bp
[page
].map
== NULL
)
1374 csize
= ((sector_t
)1) << (bitmap
->chunkshift
+
1375 PAGE_COUNTER_SHIFT
- 1);
1377 csize
= ((sector_t
)1) << bitmap
->chunkshift
;
1378 *blocks
= csize
- (offset
& (csize
- 1));
1383 /* now locked ... */
1385 if (bitmap
->bp
[page
].hijacked
) { /* hijacked pointer */
1386 /* should we use the first or second counter field
1387 * of the hijacked pointer? */
1388 int hi
= (pageoff
> PAGE_COUNTER_MASK
);
1389 return &((bitmap_counter_t
*)
1390 &bitmap
->bp
[page
].map
)[hi
];
1391 } else /* page is allocated */
1392 return (bitmap_counter_t
*)
1393 &(bitmap
->bp
[page
].map
[pageoff
]);
1396 int md_bitmap_startwrite(struct bitmap
*bitmap
, sector_t offset
, unsigned long sectors
, int behind
)
1403 atomic_inc(&bitmap
->behind_writes
);
1404 bw
= atomic_read(&bitmap
->behind_writes
);
1405 if (bw
> bitmap
->behind_writes_used
)
1406 bitmap
->behind_writes_used
= bw
;
1408 pr_debug("inc write-behind count %d/%lu\n",
1409 bw
, bitmap
->mddev
->bitmap_info
.max_write_behind
);
1414 bitmap_counter_t
*bmc
;
1416 spin_lock_irq(&bitmap
->counts
.lock
);
1417 bmc
= md_bitmap_get_counter(&bitmap
->counts
, offset
, &blocks
, 1);
1419 spin_unlock_irq(&bitmap
->counts
.lock
);
1423 if (unlikely(COUNTER(*bmc
) == COUNTER_MAX
)) {
1424 DEFINE_WAIT(__wait
);
1425 /* note that it is safe to do the prepare_to_wait
1426 * after the test as long as we do it before dropping
1429 prepare_to_wait(&bitmap
->overflow_wait
, &__wait
,
1430 TASK_UNINTERRUPTIBLE
);
1431 spin_unlock_irq(&bitmap
->counts
.lock
);
1433 finish_wait(&bitmap
->overflow_wait
, &__wait
);
1439 md_bitmap_file_set_bit(bitmap
, offset
);
1440 md_bitmap_count_page(&bitmap
->counts
, offset
, 1);
1448 spin_unlock_irq(&bitmap
->counts
.lock
);
1451 if (sectors
> blocks
)
1458 EXPORT_SYMBOL(md_bitmap_startwrite
);
1460 void md_bitmap_endwrite(struct bitmap
*bitmap
, sector_t offset
,
1461 unsigned long sectors
, int success
, int behind
)
1466 if (atomic_dec_and_test(&bitmap
->behind_writes
))
1467 wake_up(&bitmap
->behind_wait
);
1468 pr_debug("dec write-behind count %d/%lu\n",
1469 atomic_read(&bitmap
->behind_writes
),
1470 bitmap
->mddev
->bitmap_info
.max_write_behind
);
1475 unsigned long flags
;
1476 bitmap_counter_t
*bmc
;
1478 spin_lock_irqsave(&bitmap
->counts
.lock
, flags
);
1479 bmc
= md_bitmap_get_counter(&bitmap
->counts
, offset
, &blocks
, 0);
1481 spin_unlock_irqrestore(&bitmap
->counts
.lock
, flags
);
1485 if (success
&& !bitmap
->mddev
->degraded
&&
1486 bitmap
->events_cleared
< bitmap
->mddev
->events
) {
1487 bitmap
->events_cleared
= bitmap
->mddev
->events
;
1488 bitmap
->need_sync
= 1;
1489 sysfs_notify_dirent_safe(bitmap
->sysfs_can_clear
);
1492 if (!success
&& !NEEDED(*bmc
))
1493 *bmc
|= NEEDED_MASK
;
1495 if (COUNTER(*bmc
) == COUNTER_MAX
)
1496 wake_up(&bitmap
->overflow_wait
);
1500 md_bitmap_set_pending(&bitmap
->counts
, offset
);
1501 bitmap
->allclean
= 0;
1503 spin_unlock_irqrestore(&bitmap
->counts
.lock
, flags
);
1505 if (sectors
> blocks
)
1511 EXPORT_SYMBOL(md_bitmap_endwrite
);
1513 static int __bitmap_start_sync(struct bitmap
*bitmap
, sector_t offset
, sector_t
*blocks
,
1516 bitmap_counter_t
*bmc
;
1518 if (bitmap
== NULL
) {/* FIXME or bitmap set as 'failed' */
1520 return 1; /* always resync if no bitmap */
1522 spin_lock_irq(&bitmap
->counts
.lock
);
1523 bmc
= md_bitmap_get_counter(&bitmap
->counts
, offset
, blocks
, 0);
1529 else if (NEEDED(*bmc
)) {
1531 if (!degraded
) { /* don't set/clear bits if degraded */
1532 *bmc
|= RESYNC_MASK
;
1533 *bmc
&= ~NEEDED_MASK
;
1537 spin_unlock_irq(&bitmap
->counts
.lock
);
1541 int md_bitmap_start_sync(struct bitmap
*bitmap
, sector_t offset
, sector_t
*blocks
,
1544 /* bitmap_start_sync must always report on multiples of whole
1545 * pages, otherwise resync (which is very PAGE_SIZE based) will
1547 * So call __bitmap_start_sync repeatedly (if needed) until
1548 * At least PAGE_SIZE>>9 blocks are covered.
1549 * Return the 'or' of the result.
1555 while (*blocks
< (PAGE_SIZE
>>9)) {
1556 rv
|= __bitmap_start_sync(bitmap
, offset
,
1557 &blocks1
, degraded
);
1563 EXPORT_SYMBOL(md_bitmap_start_sync
);
1565 void md_bitmap_end_sync(struct bitmap
*bitmap
, sector_t offset
, sector_t
*blocks
, int aborted
)
1567 bitmap_counter_t
*bmc
;
1568 unsigned long flags
;
1570 if (bitmap
== NULL
) {
1574 spin_lock_irqsave(&bitmap
->counts
.lock
, flags
);
1575 bmc
= md_bitmap_get_counter(&bitmap
->counts
, offset
, blocks
, 0);
1580 *bmc
&= ~RESYNC_MASK
;
1582 if (!NEEDED(*bmc
) && aborted
)
1583 *bmc
|= NEEDED_MASK
;
1586 md_bitmap_set_pending(&bitmap
->counts
, offset
);
1587 bitmap
->allclean
= 0;
1592 spin_unlock_irqrestore(&bitmap
->counts
.lock
, flags
);
1594 EXPORT_SYMBOL(md_bitmap_end_sync
);
1596 void md_bitmap_close_sync(struct bitmap
*bitmap
)
1598 /* Sync has finished, and any bitmap chunks that weren't synced
1599 * properly have been aborted. It remains to us to clear the
1600 * RESYNC bit wherever it is still on
1602 sector_t sector
= 0;
1606 while (sector
< bitmap
->mddev
->resync_max_sectors
) {
1607 md_bitmap_end_sync(bitmap
, sector
, &blocks
, 0);
1611 EXPORT_SYMBOL(md_bitmap_close_sync
);
1613 void md_bitmap_cond_end_sync(struct bitmap
*bitmap
, sector_t sector
, bool force
)
1621 bitmap
->last_end_sync
= jiffies
;
1624 if (!force
&& time_before(jiffies
, (bitmap
->last_end_sync
1625 + bitmap
->mddev
->bitmap_info
.daemon_sleep
)))
1627 wait_event(bitmap
->mddev
->recovery_wait
,
1628 atomic_read(&bitmap
->mddev
->recovery_active
) == 0);
1630 bitmap
->mddev
->curr_resync_completed
= sector
;
1631 set_bit(MD_SB_CHANGE_CLEAN
, &bitmap
->mddev
->sb_flags
);
1632 sector
&= ~((1ULL << bitmap
->counts
.chunkshift
) - 1);
1634 while (s
< sector
&& s
< bitmap
->mddev
->resync_max_sectors
) {
1635 md_bitmap_end_sync(bitmap
, s
, &blocks
, 0);
1638 bitmap
->last_end_sync
= jiffies
;
1639 sysfs_notify(&bitmap
->mddev
->kobj
, NULL
, "sync_completed");
1641 EXPORT_SYMBOL(md_bitmap_cond_end_sync
);
1643 void md_bitmap_sync_with_cluster(struct mddev
*mddev
,
1644 sector_t old_lo
, sector_t old_hi
,
1645 sector_t new_lo
, sector_t new_hi
)
1647 struct bitmap
*bitmap
= mddev
->bitmap
;
1648 sector_t sector
, blocks
= 0;
1650 for (sector
= old_lo
; sector
< new_lo
; ) {
1651 md_bitmap_end_sync(bitmap
, sector
, &blocks
, 0);
1654 WARN((blocks
> new_lo
) && old_lo
, "alignment is not correct for lo\n");
1656 for (sector
= old_hi
; sector
< new_hi
; ) {
1657 md_bitmap_start_sync(bitmap
, sector
, &blocks
, 0);
1660 WARN((blocks
> new_hi
) && old_hi
, "alignment is not correct for hi\n");
1662 EXPORT_SYMBOL(md_bitmap_sync_with_cluster
);
1664 static void md_bitmap_set_memory_bits(struct bitmap
*bitmap
, sector_t offset
, int needed
)
1666 /* For each chunk covered by any of these sectors, set the
1667 * counter to 2 and possibly set resync_needed. They should all
1668 * be 0 at this point
1672 bitmap_counter_t
*bmc
;
1673 spin_lock_irq(&bitmap
->counts
.lock
);
1674 bmc
= md_bitmap_get_counter(&bitmap
->counts
, offset
, &secs
, 1);
1676 spin_unlock_irq(&bitmap
->counts
.lock
);
1681 md_bitmap_count_page(&bitmap
->counts
, offset
, 1);
1682 md_bitmap_set_pending(&bitmap
->counts
, offset
);
1683 bitmap
->allclean
= 0;
1686 *bmc
|= NEEDED_MASK
;
1687 spin_unlock_irq(&bitmap
->counts
.lock
);
1690 /* dirty the memory and file bits for bitmap chunks "s" to "e" */
1691 void md_bitmap_dirty_bits(struct bitmap
*bitmap
, unsigned long s
, unsigned long e
)
1693 unsigned long chunk
;
1695 for (chunk
= s
; chunk
<= e
; chunk
++) {
1696 sector_t sec
= (sector_t
)chunk
<< bitmap
->counts
.chunkshift
;
1697 md_bitmap_set_memory_bits(bitmap
, sec
, 1);
1698 md_bitmap_file_set_bit(bitmap
, sec
);
1699 if (sec
< bitmap
->mddev
->recovery_cp
)
1700 /* We are asserting that the array is dirty,
1701 * so move the recovery_cp address back so
1702 * that it is obvious that it is dirty
1704 bitmap
->mddev
->recovery_cp
= sec
;
1709 * flush out any pending updates
1711 void md_bitmap_flush(struct mddev
*mddev
)
1713 struct bitmap
*bitmap
= mddev
->bitmap
;
1716 if (!bitmap
) /* there was no bitmap */
1719 /* run the daemon_work three time to ensure everything is flushed
1722 sleep
= mddev
->bitmap_info
.daemon_sleep
* 2;
1723 bitmap
->daemon_lastrun
-= sleep
;
1724 md_bitmap_daemon_work(mddev
);
1725 bitmap
->daemon_lastrun
-= sleep
;
1726 md_bitmap_daemon_work(mddev
);
1727 bitmap
->daemon_lastrun
-= sleep
;
1728 md_bitmap_daemon_work(mddev
);
1729 md_bitmap_update_sb(bitmap
);
1733 * free memory that was allocated
1735 void md_bitmap_free(struct bitmap
*bitmap
)
1737 unsigned long k
, pages
;
1738 struct bitmap_page
*bp
;
1740 if (!bitmap
) /* there was no bitmap */
1743 if (bitmap
->sysfs_can_clear
)
1744 sysfs_put(bitmap
->sysfs_can_clear
);
1746 if (mddev_is_clustered(bitmap
->mddev
) && bitmap
->mddev
->cluster_info
&&
1747 bitmap
->cluster_slot
== md_cluster_ops
->slot_number(bitmap
->mddev
))
1748 md_cluster_stop(bitmap
->mddev
);
1750 /* Shouldn't be needed - but just in case.... */
1751 wait_event(bitmap
->write_wait
,
1752 atomic_read(&bitmap
->pending_writes
) == 0);
1754 /* release the bitmap file */
1755 md_bitmap_file_unmap(&bitmap
->storage
);
1757 bp
= bitmap
->counts
.bp
;
1758 pages
= bitmap
->counts
.pages
;
1760 /* free all allocated memory */
1762 if (bp
) /* deallocate the page memory */
1763 for (k
= 0; k
< pages
; k
++)
1764 if (bp
[k
].map
&& !bp
[k
].hijacked
)
1769 EXPORT_SYMBOL(md_bitmap_free
);
1771 void md_bitmap_wait_behind_writes(struct mddev
*mddev
)
1773 struct bitmap
*bitmap
= mddev
->bitmap
;
1775 /* wait for behind writes to complete */
1776 if (bitmap
&& atomic_read(&bitmap
->behind_writes
) > 0) {
1777 pr_debug("md:%s: behind writes in progress - waiting to stop.\n",
1779 /* need to kick something here to make sure I/O goes? */
1780 wait_event(bitmap
->behind_wait
,
1781 atomic_read(&bitmap
->behind_writes
) == 0);
1785 void md_bitmap_destroy(struct mddev
*mddev
)
1787 struct bitmap
*bitmap
= mddev
->bitmap
;
1789 if (!bitmap
) /* there was no bitmap */
1792 md_bitmap_wait_behind_writes(mddev
);
1794 mutex_lock(&mddev
->bitmap_info
.mutex
);
1795 spin_lock(&mddev
->lock
);
1796 mddev
->bitmap
= NULL
; /* disconnect from the md device */
1797 spin_unlock(&mddev
->lock
);
1798 mutex_unlock(&mddev
->bitmap_info
.mutex
);
1800 mddev
->thread
->timeout
= MAX_SCHEDULE_TIMEOUT
;
1802 md_bitmap_free(bitmap
);
1806 * initialize the bitmap structure
1807 * if this returns an error, bitmap_destroy must be called to do clean up
1808 * once mddev->bitmap is set
1810 struct bitmap
*md_bitmap_create(struct mddev
*mddev
, int slot
)
1812 struct bitmap
*bitmap
;
1813 sector_t blocks
= mddev
->resync_max_sectors
;
1814 struct file
*file
= mddev
->bitmap_info
.file
;
1816 struct kernfs_node
*bm
= NULL
;
1818 BUILD_BUG_ON(sizeof(bitmap_super_t
) != 256);
1820 BUG_ON(file
&& mddev
->bitmap_info
.offset
);
1822 if (test_bit(MD_HAS_JOURNAL
, &mddev
->flags
)) {
1823 pr_notice("md/raid:%s: array with journal cannot have bitmap\n",
1825 return ERR_PTR(-EBUSY
);
1828 bitmap
= kzalloc(sizeof(*bitmap
), GFP_KERNEL
);
1830 return ERR_PTR(-ENOMEM
);
1832 spin_lock_init(&bitmap
->counts
.lock
);
1833 atomic_set(&bitmap
->pending_writes
, 0);
1834 init_waitqueue_head(&bitmap
->write_wait
);
1835 init_waitqueue_head(&bitmap
->overflow_wait
);
1836 init_waitqueue_head(&bitmap
->behind_wait
);
1838 bitmap
->mddev
= mddev
;
1839 bitmap
->cluster_slot
= slot
;
1842 bm
= sysfs_get_dirent(mddev
->kobj
.sd
, "bitmap");
1844 bitmap
->sysfs_can_clear
= sysfs_get_dirent(bm
, "can_clear");
1847 bitmap
->sysfs_can_clear
= NULL
;
1849 bitmap
->storage
.file
= file
;
1852 /* As future accesses to this file will use bmap,
1853 * and bypass the page cache, we must sync the file
1858 /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
1859 if (!mddev
->bitmap_info
.external
) {
1861 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
1862 * instructing us to create a new on-disk bitmap instance.
1864 if (test_and_clear_bit(MD_ARRAY_FIRST_USE
, &mddev
->flags
))
1865 err
= md_bitmap_new_disk_sb(bitmap
);
1867 err
= md_bitmap_read_sb(bitmap
);
1870 if (mddev
->bitmap_info
.chunksize
== 0 ||
1871 mddev
->bitmap_info
.daemon_sleep
== 0)
1872 /* chunksize and time_base need to be
1879 bitmap
->daemon_lastrun
= jiffies
;
1880 err
= md_bitmap_resize(bitmap
, blocks
, mddev
->bitmap_info
.chunksize
, 1);
1884 pr_debug("created bitmap (%lu pages) for device %s\n",
1885 bitmap
->counts
.pages
, bmname(bitmap
));
1887 err
= test_bit(BITMAP_WRITE_ERROR
, &bitmap
->flags
) ? -EIO
: 0;
1893 md_bitmap_free(bitmap
);
1894 return ERR_PTR(err
);
1897 int md_bitmap_load(struct mddev
*mddev
)
1901 sector_t sector
= 0;
1902 struct bitmap
*bitmap
= mddev
->bitmap
;
1907 if (mddev_is_clustered(mddev
))
1908 md_cluster_ops
->load_bitmaps(mddev
, mddev
->bitmap_info
.nodes
);
1910 /* Clear out old bitmap info first: Either there is none, or we
1911 * are resuming after someone else has possibly changed things,
1912 * so we should forget old cached info.
1913 * All chunks should be clean, but some might need_sync.
1915 while (sector
< mddev
->resync_max_sectors
) {
1917 md_bitmap_start_sync(bitmap
, sector
, &blocks
, 0);
1920 md_bitmap_close_sync(bitmap
);
1922 if (mddev
->degraded
== 0
1923 || bitmap
->events_cleared
== mddev
->events
)
1924 /* no need to keep dirty bits to optimise a
1925 * re-add of a missing device */
1926 start
= mddev
->recovery_cp
;
1928 mutex_lock(&mddev
->bitmap_info
.mutex
);
1929 err
= md_bitmap_init_from_disk(bitmap
, start
);
1930 mutex_unlock(&mddev
->bitmap_info
.mutex
);
1934 clear_bit(BITMAP_STALE
, &bitmap
->flags
);
1936 /* Kick recovery in case any bits were set */
1937 set_bit(MD_RECOVERY_NEEDED
, &bitmap
->mddev
->recovery
);
1939 mddev
->thread
->timeout
= mddev
->bitmap_info
.daemon_sleep
;
1940 md_wakeup_thread(mddev
->thread
);
1942 md_bitmap_update_sb(bitmap
);
1944 if (test_bit(BITMAP_WRITE_ERROR
, &bitmap
->flags
))
1949 EXPORT_SYMBOL_GPL(md_bitmap_load
);
1951 struct bitmap
*get_bitmap_from_slot(struct mddev
*mddev
, int slot
)
1954 struct bitmap
*bitmap
;
1956 bitmap
= md_bitmap_create(mddev
, slot
);
1957 if (IS_ERR(bitmap
)) {
1958 rv
= PTR_ERR(bitmap
);
1962 rv
= md_bitmap_init_from_disk(bitmap
, 0);
1964 md_bitmap_free(bitmap
);
1970 EXPORT_SYMBOL(get_bitmap_from_slot
);
1972 /* Loads the bitmap associated with slot and copies the resync information
1975 int md_bitmap_copy_from_slot(struct mddev
*mddev
, int slot
,
1976 sector_t
*low
, sector_t
*high
, bool clear_bits
)
1979 sector_t block
, lo
= 0, hi
= 0;
1980 struct bitmap_counts
*counts
;
1981 struct bitmap
*bitmap
;
1983 bitmap
= get_bitmap_from_slot(mddev
, slot
);
1984 if (IS_ERR(bitmap
)) {
1985 pr_err("%s can't get bitmap from slot %d\n", __func__
, slot
);
1989 counts
= &bitmap
->counts
;
1990 for (j
= 0; j
< counts
->chunks
; j
++) {
1991 block
= (sector_t
)j
<< counts
->chunkshift
;
1992 if (md_bitmap_file_test_bit(bitmap
, block
)) {
1996 md_bitmap_file_clear_bit(bitmap
, block
);
1997 md_bitmap_set_memory_bits(mddev
->bitmap
, block
, 1);
1998 md_bitmap_file_set_bit(mddev
->bitmap
, block
);
2003 md_bitmap_update_sb(bitmap
);
2004 /* BITMAP_PAGE_PENDING is set, but bitmap_unplug needs
2005 * BITMAP_PAGE_DIRTY or _NEEDWRITE to write ... */
2006 for (i
= 0; i
< bitmap
->storage
.file_pages
; i
++)
2007 if (test_page_attr(bitmap
, i
, BITMAP_PAGE_PENDING
))
2008 set_page_attr(bitmap
, i
, BITMAP_PAGE_NEEDWRITE
);
2009 md_bitmap_unplug(bitmap
);
2011 md_bitmap_unplug(mddev
->bitmap
);
2017 EXPORT_SYMBOL_GPL(md_bitmap_copy_from_slot
);
2020 void md_bitmap_status(struct seq_file
*seq
, struct bitmap
*bitmap
)
2022 unsigned long chunk_kb
;
2023 struct bitmap_counts
*counts
;
2028 counts
= &bitmap
->counts
;
2030 chunk_kb
= bitmap
->mddev
->bitmap_info
.chunksize
>> 10;
2031 seq_printf(seq
, "bitmap: %lu/%lu pages [%luKB], "
2033 counts
->pages
- counts
->missing_pages
,
2035 (counts
->pages
- counts
->missing_pages
)
2036 << (PAGE_SHIFT
- 10),
2037 chunk_kb
? chunk_kb
: bitmap
->mddev
->bitmap_info
.chunksize
,
2038 chunk_kb
? "KB" : "B");
2039 if (bitmap
->storage
.file
) {
2040 seq_printf(seq
, ", file: ");
2041 seq_file_path(seq
, bitmap
->storage
.file
, " \t\n");
2044 seq_printf(seq
, "\n");
2047 int md_bitmap_resize(struct bitmap
*bitmap
, sector_t blocks
,
2048 int chunksize
, int init
)
2050 /* If chunk_size is 0, choose an appropriate chunk size.
2051 * Then possibly allocate new storage space.
2052 * Then quiesce, copy bits, replace bitmap, and re-start
2054 * This function is called both to set up the initial bitmap
2055 * and to resize the bitmap while the array is active.
2056 * If this happens as a result of the array being resized,
2057 * chunksize will be zero, and we need to choose a suitable
2058 * chunksize, otherwise we use what we are given.
2060 struct bitmap_storage store
;
2061 struct bitmap_counts old_counts
;
2062 unsigned long chunks
;
2064 sector_t old_blocks
, new_blocks
;
2068 struct bitmap_page
*new_bp
;
2070 if (bitmap
->storage
.file
&& !init
) {
2071 pr_info("md: cannot resize file-based bitmap\n");
2075 if (chunksize
== 0) {
2076 /* If there is enough space, leave the chunk size unchanged,
2077 * else increase by factor of two until there is enough space.
2080 long space
= bitmap
->mddev
->bitmap_info
.space
;
2083 /* We don't know how much space there is, so limit
2084 * to current size - in sectors.
2086 bytes
= DIV_ROUND_UP(bitmap
->counts
.chunks
, 8);
2087 if (!bitmap
->mddev
->bitmap_info
.external
)
2088 bytes
+= sizeof(bitmap_super_t
);
2089 space
= DIV_ROUND_UP(bytes
, 512);
2090 bitmap
->mddev
->bitmap_info
.space
= space
;
2092 chunkshift
= bitmap
->counts
.chunkshift
;
2095 /* 'chunkshift' is shift from block size to chunk size */
2097 chunks
= DIV_ROUND_UP_SECTOR_T(blocks
, 1 << chunkshift
);
2098 bytes
= DIV_ROUND_UP(chunks
, 8);
2099 if (!bitmap
->mddev
->bitmap_info
.external
)
2100 bytes
+= sizeof(bitmap_super_t
);
2101 } while (bytes
> (space
<< 9));
2103 chunkshift
= ffz(~chunksize
) - BITMAP_BLOCK_SHIFT
;
2105 chunks
= DIV_ROUND_UP_SECTOR_T(blocks
, 1 << chunkshift
);
2106 memset(&store
, 0, sizeof(store
));
2107 if (bitmap
->mddev
->bitmap_info
.offset
|| bitmap
->mddev
->bitmap_info
.file
)
2108 ret
= md_bitmap_storage_alloc(&store
, chunks
,
2109 !bitmap
->mddev
->bitmap_info
.external
,
2110 mddev_is_clustered(bitmap
->mddev
)
2111 ? bitmap
->cluster_slot
: 0);
2113 md_bitmap_file_unmap(&store
);
2117 pages
= DIV_ROUND_UP(chunks
, PAGE_COUNTER_RATIO
);
2119 new_bp
= kcalloc(pages
, sizeof(*new_bp
), GFP_KERNEL
);
2122 md_bitmap_file_unmap(&store
);
2127 bitmap
->mddev
->pers
->quiesce(bitmap
->mddev
, 1);
2129 store
.file
= bitmap
->storage
.file
;
2130 bitmap
->storage
.file
= NULL
;
2132 if (store
.sb_page
&& bitmap
->storage
.sb_page
)
2133 memcpy(page_address(store
.sb_page
),
2134 page_address(bitmap
->storage
.sb_page
),
2135 sizeof(bitmap_super_t
));
2136 md_bitmap_file_unmap(&bitmap
->storage
);
2137 bitmap
->storage
= store
;
2139 old_counts
= bitmap
->counts
;
2140 bitmap
->counts
.bp
= new_bp
;
2141 bitmap
->counts
.pages
= pages
;
2142 bitmap
->counts
.missing_pages
= pages
;
2143 bitmap
->counts
.chunkshift
= chunkshift
;
2144 bitmap
->counts
.chunks
= chunks
;
2145 bitmap
->mddev
->bitmap_info
.chunksize
= 1 << (chunkshift
+
2146 BITMAP_BLOCK_SHIFT
);
2148 blocks
= min(old_counts
.chunks
<< old_counts
.chunkshift
,
2149 chunks
<< chunkshift
);
2151 spin_lock_irq(&bitmap
->counts
.lock
);
2152 /* For cluster raid, need to pre-allocate bitmap */
2153 if (mddev_is_clustered(bitmap
->mddev
)) {
2155 for (page
= 0; page
< pages
; page
++) {
2156 ret
= md_bitmap_checkpage(&bitmap
->counts
, page
, 1, 1);
2160 /* deallocate the page memory */
2161 for (k
= 0; k
< page
; k
++) {
2162 kfree(new_bp
[k
].map
);
2166 /* restore some fields from old_counts */
2167 bitmap
->counts
.bp
= old_counts
.bp
;
2168 bitmap
->counts
.pages
= old_counts
.pages
;
2169 bitmap
->counts
.missing_pages
= old_counts
.pages
;
2170 bitmap
->counts
.chunkshift
= old_counts
.chunkshift
;
2171 bitmap
->counts
.chunks
= old_counts
.chunks
;
2172 bitmap
->mddev
->bitmap_info
.chunksize
= 1 << (old_counts
.chunkshift
+
2173 BITMAP_BLOCK_SHIFT
);
2174 blocks
= old_counts
.chunks
<< old_counts
.chunkshift
;
2175 pr_warn("Could not pre-allocate in-memory bitmap for cluster raid\n");
2178 bitmap
->counts
.bp
[page
].count
+= 1;
2182 for (block
= 0; block
< blocks
; ) {
2183 bitmap_counter_t
*bmc_old
, *bmc_new
;
2186 bmc_old
= md_bitmap_get_counter(&old_counts
, block
, &old_blocks
, 0);
2187 set
= bmc_old
&& NEEDED(*bmc_old
);
2190 bmc_new
= md_bitmap_get_counter(&bitmap
->counts
, block
, &new_blocks
, 1);
2191 if (*bmc_new
== 0) {
2192 /* need to set on-disk bits too. */
2193 sector_t end
= block
+ new_blocks
;
2194 sector_t start
= block
>> chunkshift
;
2195 start
<<= chunkshift
;
2196 while (start
< end
) {
2197 md_bitmap_file_set_bit(bitmap
, block
);
2198 start
+= 1 << chunkshift
;
2201 md_bitmap_count_page(&bitmap
->counts
, block
, 1);
2202 md_bitmap_set_pending(&bitmap
->counts
, block
);
2204 *bmc_new
|= NEEDED_MASK
;
2205 if (new_blocks
< old_blocks
)
2206 old_blocks
= new_blocks
;
2208 block
+= old_blocks
;
2211 if (bitmap
->counts
.bp
!= old_counts
.bp
) {
2213 for (k
= 0; k
< old_counts
.pages
; k
++)
2214 if (!old_counts
.bp
[k
].hijacked
)
2215 kfree(old_counts
.bp
[k
].map
);
2216 kfree(old_counts
.bp
);
2221 while (block
< (chunks
<< chunkshift
)) {
2222 bitmap_counter_t
*bmc
;
2223 bmc
= md_bitmap_get_counter(&bitmap
->counts
, block
, &new_blocks
, 1);
2225 /* new space. It needs to be resynced, so
2226 * we set NEEDED_MASK.
2229 *bmc
= NEEDED_MASK
| 2;
2230 md_bitmap_count_page(&bitmap
->counts
, block
, 1);
2231 md_bitmap_set_pending(&bitmap
->counts
, block
);
2234 block
+= new_blocks
;
2236 for (i
= 0; i
< bitmap
->storage
.file_pages
; i
++)
2237 set_page_attr(bitmap
, i
, BITMAP_PAGE_DIRTY
);
2239 spin_unlock_irq(&bitmap
->counts
.lock
);
2242 md_bitmap_unplug(bitmap
);
2243 bitmap
->mddev
->pers
->quiesce(bitmap
->mddev
, 0);
2249 EXPORT_SYMBOL_GPL(md_bitmap_resize
);
2252 location_show(struct mddev
*mddev
, char *page
)
2255 if (mddev
->bitmap_info
.file
)
2256 len
= sprintf(page
, "file");
2257 else if (mddev
->bitmap_info
.offset
)
2258 len
= sprintf(page
, "%+lld", (long long)mddev
->bitmap_info
.offset
);
2260 len
= sprintf(page
, "none");
2261 len
+= sprintf(page
+len
, "\n");
2266 location_store(struct mddev
*mddev
, const char *buf
, size_t len
)
2270 rv
= mddev_lock(mddev
);
2274 if (!mddev
->pers
->quiesce
) {
2278 if (mddev
->recovery
|| mddev
->sync_thread
) {
2284 if (mddev
->bitmap
|| mddev
->bitmap_info
.file
||
2285 mddev
->bitmap_info
.offset
) {
2286 /* bitmap already configured. Only option is to clear it */
2287 if (strncmp(buf
, "none", 4) != 0) {
2292 mddev_suspend(mddev
);
2293 md_bitmap_destroy(mddev
);
2294 mddev_resume(mddev
);
2296 mddev
->bitmap_info
.offset
= 0;
2297 if (mddev
->bitmap_info
.file
) {
2298 struct file
*f
= mddev
->bitmap_info
.file
;
2299 mddev
->bitmap_info
.file
= NULL
;
2303 /* No bitmap, OK to set a location */
2305 if (strncmp(buf
, "none", 4) == 0)
2306 /* nothing to be done */;
2307 else if (strncmp(buf
, "file:", 5) == 0) {
2308 /* Not supported yet */
2313 rv
= kstrtoll(buf
+1, 10, &offset
);
2315 rv
= kstrtoll(buf
, 10, &offset
);
2322 if (mddev
->bitmap_info
.external
== 0 &&
2323 mddev
->major_version
== 0 &&
2324 offset
!= mddev
->bitmap_info
.default_offset
) {
2328 mddev
->bitmap_info
.offset
= offset
;
2330 struct bitmap
*bitmap
;
2331 bitmap
= md_bitmap_create(mddev
, -1);
2332 mddev_suspend(mddev
);
2334 rv
= PTR_ERR(bitmap
);
2336 mddev
->bitmap
= bitmap
;
2337 rv
= md_bitmap_load(mddev
);
2339 mddev
->bitmap_info
.offset
= 0;
2342 md_bitmap_destroy(mddev
);
2343 mddev_resume(mddev
);
2346 mddev_resume(mddev
);
2350 if (!mddev
->external
) {
2351 /* Ensure new bitmap info is stored in
2352 * metadata promptly.
2354 set_bit(MD_SB_CHANGE_DEVS
, &mddev
->sb_flags
);
2355 md_wakeup_thread(mddev
->thread
);
2359 mddev_unlock(mddev
);
2365 static struct md_sysfs_entry bitmap_location
=
2366 __ATTR(location
, S_IRUGO
|S_IWUSR
, location_show
, location_store
);
2368 /* 'bitmap/space' is the space available at 'location' for the
2369 * bitmap. This allows the kernel to know when it is safe to
2370 * resize the bitmap to match a resized array.
2373 space_show(struct mddev
*mddev
, char *page
)
2375 return sprintf(page
, "%lu\n", mddev
->bitmap_info
.space
);
2379 space_store(struct mddev
*mddev
, const char *buf
, size_t len
)
2381 unsigned long sectors
;
2384 rv
= kstrtoul(buf
, 10, §ors
);
2391 if (mddev
->bitmap
&&
2392 sectors
< (mddev
->bitmap
->storage
.bytes
+ 511) >> 9)
2393 return -EFBIG
; /* Bitmap is too big for this small space */
2395 /* could make sure it isn't too big, but that isn't really
2396 * needed - user-space should be careful.
2398 mddev
->bitmap_info
.space
= sectors
;
2402 static struct md_sysfs_entry bitmap_space
=
2403 __ATTR(space
, S_IRUGO
|S_IWUSR
, space_show
, space_store
);
2406 timeout_show(struct mddev
*mddev
, char *page
)
2409 unsigned long secs
= mddev
->bitmap_info
.daemon_sleep
/ HZ
;
2410 unsigned long jifs
= mddev
->bitmap_info
.daemon_sleep
% HZ
;
2412 len
= sprintf(page
, "%lu", secs
);
2414 len
+= sprintf(page
+len
, ".%03u", jiffies_to_msecs(jifs
));
2415 len
+= sprintf(page
+len
, "\n");
2420 timeout_store(struct mddev
*mddev
, const char *buf
, size_t len
)
2422 /* timeout can be set at any time */
2423 unsigned long timeout
;
2424 int rv
= strict_strtoul_scaled(buf
, &timeout
, 4);
2428 /* just to make sure we don't overflow... */
2429 if (timeout
>= LONG_MAX
/ HZ
)
2432 timeout
= timeout
* HZ
/ 10000;
2434 if (timeout
>= MAX_SCHEDULE_TIMEOUT
)
2435 timeout
= MAX_SCHEDULE_TIMEOUT
-1;
2438 mddev
->bitmap_info
.daemon_sleep
= timeout
;
2439 if (mddev
->thread
) {
2440 /* if thread->timeout is MAX_SCHEDULE_TIMEOUT, then
2441 * the bitmap is all clean and we don't need to
2442 * adjust the timeout right now
2444 if (mddev
->thread
->timeout
< MAX_SCHEDULE_TIMEOUT
) {
2445 mddev
->thread
->timeout
= timeout
;
2446 md_wakeup_thread(mddev
->thread
);
2452 static struct md_sysfs_entry bitmap_timeout
=
2453 __ATTR(time_base
, S_IRUGO
|S_IWUSR
, timeout_show
, timeout_store
);
2456 backlog_show(struct mddev
*mddev
, char *page
)
2458 return sprintf(page
, "%lu\n", mddev
->bitmap_info
.max_write_behind
);
2462 backlog_store(struct mddev
*mddev
, const char *buf
, size_t len
)
2464 unsigned long backlog
;
2465 int rv
= kstrtoul(buf
, 10, &backlog
);
2468 if (backlog
> COUNTER_MAX
)
2470 mddev
->bitmap_info
.max_write_behind
= backlog
;
2474 static struct md_sysfs_entry bitmap_backlog
=
2475 __ATTR(backlog
, S_IRUGO
|S_IWUSR
, backlog_show
, backlog_store
);
2478 chunksize_show(struct mddev
*mddev
, char *page
)
2480 return sprintf(page
, "%lu\n", mddev
->bitmap_info
.chunksize
);
2484 chunksize_store(struct mddev
*mddev
, const char *buf
, size_t len
)
2486 /* Can only be changed when no bitmap is active */
2488 unsigned long csize
;
2491 rv
= kstrtoul(buf
, 10, &csize
);
2495 !is_power_of_2(csize
))
2497 mddev
->bitmap_info
.chunksize
= csize
;
2501 static struct md_sysfs_entry bitmap_chunksize
=
2502 __ATTR(chunksize
, S_IRUGO
|S_IWUSR
, chunksize_show
, chunksize_store
);
2504 static ssize_t
metadata_show(struct mddev
*mddev
, char *page
)
2506 if (mddev_is_clustered(mddev
))
2507 return sprintf(page
, "clustered\n");
2508 return sprintf(page
, "%s\n", (mddev
->bitmap_info
.external
2509 ? "external" : "internal"));
2512 static ssize_t
metadata_store(struct mddev
*mddev
, const char *buf
, size_t len
)
2514 if (mddev
->bitmap
||
2515 mddev
->bitmap_info
.file
||
2516 mddev
->bitmap_info
.offset
)
2518 if (strncmp(buf
, "external", 8) == 0)
2519 mddev
->bitmap_info
.external
= 1;
2520 else if ((strncmp(buf
, "internal", 8) == 0) ||
2521 (strncmp(buf
, "clustered", 9) == 0))
2522 mddev
->bitmap_info
.external
= 0;
2528 static struct md_sysfs_entry bitmap_metadata
=
2529 __ATTR(metadata
, S_IRUGO
|S_IWUSR
, metadata_show
, metadata_store
);
2531 static ssize_t
can_clear_show(struct mddev
*mddev
, char *page
)
2534 spin_lock(&mddev
->lock
);
2536 len
= sprintf(page
, "%s\n", (mddev
->bitmap
->need_sync
?
2539 len
= sprintf(page
, "\n");
2540 spin_unlock(&mddev
->lock
);
2544 static ssize_t
can_clear_store(struct mddev
*mddev
, const char *buf
, size_t len
)
2546 if (mddev
->bitmap
== NULL
)
2548 if (strncmp(buf
, "false", 5) == 0)
2549 mddev
->bitmap
->need_sync
= 1;
2550 else if (strncmp(buf
, "true", 4) == 0) {
2551 if (mddev
->degraded
)
2553 mddev
->bitmap
->need_sync
= 0;
2559 static struct md_sysfs_entry bitmap_can_clear
=
2560 __ATTR(can_clear
, S_IRUGO
|S_IWUSR
, can_clear_show
, can_clear_store
);
2563 behind_writes_used_show(struct mddev
*mddev
, char *page
)
2566 spin_lock(&mddev
->lock
);
2567 if (mddev
->bitmap
== NULL
)
2568 ret
= sprintf(page
, "0\n");
2570 ret
= sprintf(page
, "%lu\n",
2571 mddev
->bitmap
->behind_writes_used
);
2572 spin_unlock(&mddev
->lock
);
2577 behind_writes_used_reset(struct mddev
*mddev
, const char *buf
, size_t len
)
2580 mddev
->bitmap
->behind_writes_used
= 0;
2584 static struct md_sysfs_entry max_backlog_used
=
2585 __ATTR(max_backlog_used
, S_IRUGO
| S_IWUSR
,
2586 behind_writes_used_show
, behind_writes_used_reset
);
2588 static struct attribute
*md_bitmap_attrs
[] = {
2589 &bitmap_location
.attr
,
2591 &bitmap_timeout
.attr
,
2592 &bitmap_backlog
.attr
,
2593 &bitmap_chunksize
.attr
,
2594 &bitmap_metadata
.attr
,
2595 &bitmap_can_clear
.attr
,
2596 &max_backlog_used
.attr
,
2599 struct attribute_group md_bitmap_group
= {
2601 .attrs
= md_bitmap_attrs
,