1 // SPDX-License-Identifier: GPL-2.0+
3 * NILFS segment usage file.
5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
7 * Written by Koji Sato.
8 * Revised by Ryusuke Konishi.
11 #include <linux/kernel.h>
13 #include <linux/string.h>
14 #include <linux/buffer_head.h>
15 #include <linux/errno.h>
19 #include <trace/events/nilfs2.h>
22 * struct nilfs_sufile_info - on-memory private data of sufile
23 * @mi: on-memory private data of metadata file
24 * @ncleansegs: number of clean segments
25 * @allocmin: lower limit of allocatable segment range
26 * @allocmax: upper limit of allocatable segment range
28 struct nilfs_sufile_info
{
29 struct nilfs_mdt_info mi
;
30 unsigned long ncleansegs
;/* number of clean segments */
31 __u64 allocmin
; /* lower limit of allocatable segment range */
32 __u64 allocmax
; /* upper limit of allocatable segment range */
35 static inline struct nilfs_sufile_info
*NILFS_SUI(struct inode
*sufile
)
37 return (struct nilfs_sufile_info
*)NILFS_MDT(sufile
);
40 static inline unsigned long
41 nilfs_sufile_segment_usages_per_block(const struct inode
*sufile
)
43 return NILFS_MDT(sufile
)->mi_entries_per_block
;
47 nilfs_sufile_get_blkoff(const struct inode
*sufile
, __u64 segnum
)
49 __u64 t
= segnum
+ NILFS_MDT(sufile
)->mi_first_entry_offset
;
51 do_div(t
, nilfs_sufile_segment_usages_per_block(sufile
));
52 return (unsigned long)t
;
56 nilfs_sufile_get_offset(const struct inode
*sufile
, __u64 segnum
)
58 __u64 t
= segnum
+ NILFS_MDT(sufile
)->mi_first_entry_offset
;
60 return do_div(t
, nilfs_sufile_segment_usages_per_block(sufile
));
64 nilfs_sufile_segment_usages_in_block(const struct inode
*sufile
, __u64 curr
,
67 return min_t(unsigned long,
68 nilfs_sufile_segment_usages_per_block(sufile
) -
69 nilfs_sufile_get_offset(sufile
, curr
),
73 static struct nilfs_segment_usage
*
74 nilfs_sufile_block_get_segment_usage(const struct inode
*sufile
, __u64 segnum
,
75 struct buffer_head
*bh
, void *kaddr
)
77 return kaddr
+ bh_offset(bh
) +
78 nilfs_sufile_get_offset(sufile
, segnum
) *
79 NILFS_MDT(sufile
)->mi_entry_size
;
82 static inline int nilfs_sufile_get_header_block(struct inode
*sufile
,
83 struct buffer_head
**bhp
)
85 return nilfs_mdt_get_block(sufile
, 0, 0, NULL
, bhp
);
89 nilfs_sufile_get_segment_usage_block(struct inode
*sufile
, __u64 segnum
,
90 int create
, struct buffer_head
**bhp
)
92 return nilfs_mdt_get_block(sufile
,
93 nilfs_sufile_get_blkoff(sufile
, segnum
),
97 static int nilfs_sufile_delete_segment_usage_block(struct inode
*sufile
,
100 return nilfs_mdt_delete_block(sufile
,
101 nilfs_sufile_get_blkoff(sufile
, segnum
));
104 static void nilfs_sufile_mod_counter(struct buffer_head
*header_bh
,
105 u64 ncleanadd
, u64 ndirtyadd
)
107 struct nilfs_sufile_header
*header
;
110 kaddr
= kmap_atomic(header_bh
->b_page
);
111 header
= kaddr
+ bh_offset(header_bh
);
112 le64_add_cpu(&header
->sh_ncleansegs
, ncleanadd
);
113 le64_add_cpu(&header
->sh_ndirtysegs
, ndirtyadd
);
114 kunmap_atomic(kaddr
);
116 mark_buffer_dirty(header_bh
);
120 * nilfs_sufile_get_ncleansegs - return the number of clean segments
121 * @sufile: inode of segment usage file
123 unsigned long nilfs_sufile_get_ncleansegs(struct inode
*sufile
)
125 return NILFS_SUI(sufile
)->ncleansegs
;
129 * nilfs_sufile_updatev - modify multiple segment usages at a time
130 * @sufile: inode of segment usage file
131 * @segnumv: array of segment numbers
132 * @nsegs: size of @segnumv array
133 * @create: creation flag
134 * @ndone: place to store number of modified segments on @segnumv
135 * @dofunc: primitive operation for the update
137 * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
138 * against the given array of segments. The @dofunc is called with
139 * buffers of a header block and the sufile block in which the target
140 * segment usage entry is contained. If @ndone is given, the number
141 * of successfully modified segments from the head is stored in the
142 * place @ndone points to.
144 * Return Value: On success, zero is returned. On error, one of the
145 * following negative error codes is returned.
149 * %-ENOMEM - Insufficient amount of memory available.
151 * %-ENOENT - Given segment usage is in hole block (may be returned if
154 * %-EINVAL - Invalid segment usage number
156 int nilfs_sufile_updatev(struct inode
*sufile
, __u64
*segnumv
, size_t nsegs
,
157 int create
, size_t *ndone
,
158 void (*dofunc
)(struct inode
*, __u64
,
159 struct buffer_head
*,
160 struct buffer_head
*))
162 struct buffer_head
*header_bh
, *bh
;
163 unsigned long blkoff
, prev_blkoff
;
165 size_t nerr
= 0, n
= 0;
168 if (unlikely(nsegs
== 0))
171 down_write(&NILFS_MDT(sufile
)->mi_sem
);
172 for (seg
= segnumv
; seg
< segnumv
+ nsegs
; seg
++) {
173 if (unlikely(*seg
>= nilfs_sufile_get_nsegments(sufile
))) {
174 nilfs_warn(sufile
->i_sb
,
175 "%s: invalid segment number: %llu",
176 __func__
, (unsigned long long)*seg
);
185 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
190 blkoff
= nilfs_sufile_get_blkoff(sufile
, *seg
);
191 ret
= nilfs_mdt_get_block(sufile
, blkoff
, create
, NULL
, &bh
);
196 dofunc(sufile
, *seg
, header_bh
, bh
);
198 if (++seg
>= segnumv
+ nsegs
)
200 prev_blkoff
= blkoff
;
201 blkoff
= nilfs_sufile_get_blkoff(sufile
, *seg
);
202 if (blkoff
== prev_blkoff
)
205 /* get different block */
207 ret
= nilfs_mdt_get_block(sufile
, blkoff
, create
, NULL
, &bh
);
208 if (unlikely(ret
< 0))
217 up_write(&NILFS_MDT(sufile
)->mi_sem
);
224 int nilfs_sufile_update(struct inode
*sufile
, __u64 segnum
, int create
,
225 void (*dofunc
)(struct inode
*, __u64
,
226 struct buffer_head
*,
227 struct buffer_head
*))
229 struct buffer_head
*header_bh
, *bh
;
232 if (unlikely(segnum
>= nilfs_sufile_get_nsegments(sufile
))) {
233 nilfs_warn(sufile
->i_sb
, "%s: invalid segment number: %llu",
234 __func__
, (unsigned long long)segnum
);
237 down_write(&NILFS_MDT(sufile
)->mi_sem
);
239 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
243 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, create
, &bh
);
245 dofunc(sufile
, segnum
, header_bh
, bh
);
251 up_write(&NILFS_MDT(sufile
)->mi_sem
);
256 * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
257 * @sufile: inode of segment usage file
258 * @start: minimum segment number of allocatable region (inclusive)
259 * @end: maximum segment number of allocatable region (inclusive)
261 * Return Value: On success, 0 is returned. On error, one of the
262 * following negative error codes is returned.
264 * %-ERANGE - invalid segment region
266 int nilfs_sufile_set_alloc_range(struct inode
*sufile
, __u64 start
, __u64 end
)
268 struct nilfs_sufile_info
*sui
= NILFS_SUI(sufile
);
272 down_write(&NILFS_MDT(sufile
)->mi_sem
);
273 nsegs
= nilfs_sufile_get_nsegments(sufile
);
275 if (start
<= end
&& end
< nsegs
) {
276 sui
->allocmin
= start
;
280 up_write(&NILFS_MDT(sufile
)->mi_sem
);
285 * nilfs_sufile_alloc - allocate a segment
286 * @sufile: inode of segment usage file
287 * @segnump: pointer to segment number
289 * Description: nilfs_sufile_alloc() allocates a clean segment.
291 * Return Value: On success, 0 is returned and the segment number of the
292 * allocated segment is stored in the place pointed by @segnump. On error, one
293 * of the following negative error codes is returned.
297 * %-ENOMEM - Insufficient amount of memory available.
299 * %-ENOSPC - No clean segment left.
301 int nilfs_sufile_alloc(struct inode
*sufile
, __u64
*segnump
)
303 struct buffer_head
*header_bh
, *su_bh
;
304 struct nilfs_sufile_header
*header
;
305 struct nilfs_segment_usage
*su
;
306 struct nilfs_sufile_info
*sui
= NILFS_SUI(sufile
);
307 size_t susz
= NILFS_MDT(sufile
)->mi_entry_size
;
308 __u64 segnum
, maxsegnum
, last_alloc
;
310 unsigned long nsegments
, nsus
, cnt
;
313 down_write(&NILFS_MDT(sufile
)->mi_sem
);
315 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
318 kaddr
= kmap_atomic(header_bh
->b_page
);
319 header
= kaddr
+ bh_offset(header_bh
);
320 last_alloc
= le64_to_cpu(header
->sh_last_alloc
);
321 kunmap_atomic(kaddr
);
323 nsegments
= nilfs_sufile_get_nsegments(sufile
);
324 maxsegnum
= sui
->allocmax
;
325 segnum
= last_alloc
+ 1;
326 if (segnum
< sui
->allocmin
|| segnum
> sui
->allocmax
)
327 segnum
= sui
->allocmin
;
329 for (cnt
= 0; cnt
< nsegments
; cnt
+= nsus
) {
330 if (segnum
> maxsegnum
) {
331 if (cnt
< sui
->allocmax
- sui
->allocmin
+ 1) {
333 * wrap around in the limited region.
334 * if allocation started from
335 * sui->allocmin, this never happens.
337 segnum
= sui
->allocmin
;
338 maxsegnum
= last_alloc
;
339 } else if (segnum
> sui
->allocmin
&&
340 sui
->allocmax
+ 1 < nsegments
) {
341 segnum
= sui
->allocmax
+ 1;
342 maxsegnum
= nsegments
- 1;
343 } else if (sui
->allocmin
> 0) {
345 maxsegnum
= sui
->allocmin
- 1;
347 break; /* never happens */
350 trace_nilfs2_segment_usage_check(sufile
, segnum
, cnt
);
351 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 1,
355 kaddr
= kmap_atomic(su_bh
->b_page
);
356 su
= nilfs_sufile_block_get_segment_usage(
357 sufile
, segnum
, su_bh
, kaddr
);
359 nsus
= nilfs_sufile_segment_usages_in_block(
360 sufile
, segnum
, maxsegnum
);
361 for (j
= 0; j
< nsus
; j
++, su
= (void *)su
+ susz
, segnum
++) {
362 if (!nilfs_segment_usage_clean(su
))
364 /* found a clean segment */
365 nilfs_segment_usage_set_dirty(su
);
366 kunmap_atomic(kaddr
);
368 kaddr
= kmap_atomic(header_bh
->b_page
);
369 header
= kaddr
+ bh_offset(header_bh
);
370 le64_add_cpu(&header
->sh_ncleansegs
, -1);
371 le64_add_cpu(&header
->sh_ndirtysegs
, 1);
372 header
->sh_last_alloc
= cpu_to_le64(segnum
);
373 kunmap_atomic(kaddr
);
376 mark_buffer_dirty(header_bh
);
377 mark_buffer_dirty(su_bh
);
378 nilfs_mdt_mark_dirty(sufile
);
382 trace_nilfs2_segment_usage_allocated(sufile
, segnum
);
387 kunmap_atomic(kaddr
);
391 /* no segments left */
398 up_write(&NILFS_MDT(sufile
)->mi_sem
);
402 void nilfs_sufile_do_cancel_free(struct inode
*sufile
, __u64 segnum
,
403 struct buffer_head
*header_bh
,
404 struct buffer_head
*su_bh
)
406 struct nilfs_segment_usage
*su
;
409 kaddr
= kmap_atomic(su_bh
->b_page
);
410 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, su_bh
, kaddr
);
411 if (unlikely(!nilfs_segment_usage_clean(su
))) {
412 nilfs_warn(sufile
->i_sb
, "%s: segment %llu must be clean",
413 __func__
, (unsigned long long)segnum
);
414 kunmap_atomic(kaddr
);
417 nilfs_segment_usage_set_dirty(su
);
418 kunmap_atomic(kaddr
);
420 nilfs_sufile_mod_counter(header_bh
, -1, 1);
421 NILFS_SUI(sufile
)->ncleansegs
--;
423 mark_buffer_dirty(su_bh
);
424 nilfs_mdt_mark_dirty(sufile
);
427 void nilfs_sufile_do_scrap(struct inode
*sufile
, __u64 segnum
,
428 struct buffer_head
*header_bh
,
429 struct buffer_head
*su_bh
)
431 struct nilfs_segment_usage
*su
;
435 kaddr
= kmap_atomic(su_bh
->b_page
);
436 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, su_bh
, kaddr
);
437 if (su
->su_flags
== cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY
)) &&
438 su
->su_nblocks
== cpu_to_le32(0)) {
439 kunmap_atomic(kaddr
);
442 clean
= nilfs_segment_usage_clean(su
);
443 dirty
= nilfs_segment_usage_dirty(su
);
445 /* make the segment garbage */
446 su
->su_lastmod
= cpu_to_le64(0);
447 su
->su_nblocks
= cpu_to_le32(0);
448 su
->su_flags
= cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY
));
449 kunmap_atomic(kaddr
);
451 nilfs_sufile_mod_counter(header_bh
, clean
? (u64
)-1 : 0, dirty
? 0 : 1);
452 NILFS_SUI(sufile
)->ncleansegs
-= clean
;
454 mark_buffer_dirty(su_bh
);
455 nilfs_mdt_mark_dirty(sufile
);
458 void nilfs_sufile_do_free(struct inode
*sufile
, __u64 segnum
,
459 struct buffer_head
*header_bh
,
460 struct buffer_head
*su_bh
)
462 struct nilfs_segment_usage
*su
;
466 kaddr
= kmap_atomic(su_bh
->b_page
);
467 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, su_bh
, kaddr
);
468 if (nilfs_segment_usage_clean(su
)) {
469 nilfs_warn(sufile
->i_sb
, "%s: segment %llu is already clean",
470 __func__
, (unsigned long long)segnum
);
471 kunmap_atomic(kaddr
);
474 WARN_ON(nilfs_segment_usage_error(su
));
475 WARN_ON(!nilfs_segment_usage_dirty(su
));
477 sudirty
= nilfs_segment_usage_dirty(su
);
478 nilfs_segment_usage_set_clean(su
);
479 kunmap_atomic(kaddr
);
480 mark_buffer_dirty(su_bh
);
482 nilfs_sufile_mod_counter(header_bh
, 1, sudirty
? (u64
)-1 : 0);
483 NILFS_SUI(sufile
)->ncleansegs
++;
485 nilfs_mdt_mark_dirty(sufile
);
487 trace_nilfs2_segment_usage_freed(sufile
, segnum
);
491 * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
492 * @sufile: inode of segment usage file
493 * @segnum: segment number
495 int nilfs_sufile_mark_dirty(struct inode
*sufile
, __u64 segnum
)
497 struct buffer_head
*bh
;
499 struct nilfs_segment_usage
*su
;
502 down_write(&NILFS_MDT(sufile
)->mi_sem
);
503 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0, &bh
);
507 kaddr
= kmap_atomic(bh
->b_page
);
508 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, bh
, kaddr
);
509 if (unlikely(nilfs_segment_usage_error(su
))) {
510 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
512 kunmap_atomic(kaddr
);
514 if (nilfs_segment_is_active(nilfs
, segnum
)) {
515 nilfs_error(sufile
->i_sb
,
516 "active segment %llu is erroneous",
517 (unsigned long long)segnum
);
520 * Segments marked erroneous are never allocated by
521 * nilfs_sufile_alloc(); only active segments, ie,
522 * the segments indexed by ns_segnum or ns_nextnum,
523 * can be erroneous here.
529 nilfs_segment_usage_set_dirty(su
);
530 kunmap_atomic(kaddr
);
531 mark_buffer_dirty(bh
);
532 nilfs_mdt_mark_dirty(sufile
);
536 up_write(&NILFS_MDT(sufile
)->mi_sem
);
541 * nilfs_sufile_set_segment_usage - set usage of a segment
542 * @sufile: inode of segment usage file
543 * @segnum: segment number
544 * @nblocks: number of live blocks in the segment
545 * @modtime: modification time (option)
547 int nilfs_sufile_set_segment_usage(struct inode
*sufile
, __u64 segnum
,
548 unsigned long nblocks
, time64_t modtime
)
550 struct buffer_head
*bh
;
551 struct nilfs_segment_usage
*su
;
555 down_write(&NILFS_MDT(sufile
)->mi_sem
);
556 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0, &bh
);
560 kaddr
= kmap_atomic(bh
->b_page
);
561 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, bh
, kaddr
);
564 * Check segusage error and set su_lastmod only when updating
565 * this entry with a valid timestamp, not for cancellation.
567 WARN_ON_ONCE(nilfs_segment_usage_error(su
));
568 su
->su_lastmod
= cpu_to_le64(modtime
);
570 su
->su_nblocks
= cpu_to_le32(nblocks
);
571 kunmap_atomic(kaddr
);
573 mark_buffer_dirty(bh
);
574 nilfs_mdt_mark_dirty(sufile
);
578 up_write(&NILFS_MDT(sufile
)->mi_sem
);
583 * nilfs_sufile_get_stat - get segment usage statistics
584 * @sufile: inode of segment usage file
585 * @sustat: pointer to a structure of segment usage statistics
587 * Description: nilfs_sufile_get_stat() returns information about segment
590 * Return Value: On success, 0 is returned, and segment usage information is
591 * stored in the place pointed by @sustat. On error, one of the following
592 * negative error codes is returned.
596 * %-ENOMEM - Insufficient amount of memory available.
598 int nilfs_sufile_get_stat(struct inode
*sufile
, struct nilfs_sustat
*sustat
)
600 struct buffer_head
*header_bh
;
601 struct nilfs_sufile_header
*header
;
602 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
606 down_read(&NILFS_MDT(sufile
)->mi_sem
);
608 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
612 kaddr
= kmap_atomic(header_bh
->b_page
);
613 header
= kaddr
+ bh_offset(header_bh
);
614 sustat
->ss_nsegs
= nilfs_sufile_get_nsegments(sufile
);
615 sustat
->ss_ncleansegs
= le64_to_cpu(header
->sh_ncleansegs
);
616 sustat
->ss_ndirtysegs
= le64_to_cpu(header
->sh_ndirtysegs
);
617 sustat
->ss_ctime
= nilfs
->ns_ctime
;
618 sustat
->ss_nongc_ctime
= nilfs
->ns_nongc_ctime
;
619 spin_lock(&nilfs
->ns_last_segment_lock
);
620 sustat
->ss_prot_seq
= nilfs
->ns_prot_seq
;
621 spin_unlock(&nilfs
->ns_last_segment_lock
);
622 kunmap_atomic(kaddr
);
626 up_read(&NILFS_MDT(sufile
)->mi_sem
);
630 void nilfs_sufile_do_set_error(struct inode
*sufile
, __u64 segnum
,
631 struct buffer_head
*header_bh
,
632 struct buffer_head
*su_bh
)
634 struct nilfs_segment_usage
*su
;
638 kaddr
= kmap_atomic(su_bh
->b_page
);
639 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, su_bh
, kaddr
);
640 if (nilfs_segment_usage_error(su
)) {
641 kunmap_atomic(kaddr
);
644 suclean
= nilfs_segment_usage_clean(su
);
645 nilfs_segment_usage_set_error(su
);
646 kunmap_atomic(kaddr
);
649 nilfs_sufile_mod_counter(header_bh
, -1, 0);
650 NILFS_SUI(sufile
)->ncleansegs
--;
652 mark_buffer_dirty(su_bh
);
653 nilfs_mdt_mark_dirty(sufile
);
657 * nilfs_sufile_truncate_range - truncate range of segment array
658 * @sufile: inode of segment usage file
659 * @start: start segment number (inclusive)
660 * @end: end segment number (inclusive)
662 * Return Value: On success, 0 is returned. On error, one of the
663 * following negative error codes is returned.
667 * %-ENOMEM - Insufficient amount of memory available.
669 * %-EINVAL - Invalid number of segments specified
671 * %-EBUSY - Dirty or active segments are present in the range
673 static int nilfs_sufile_truncate_range(struct inode
*sufile
,
674 __u64 start
, __u64 end
)
676 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
677 struct buffer_head
*header_bh
;
678 struct buffer_head
*su_bh
;
679 struct nilfs_segment_usage
*su
, *su2
;
680 size_t susz
= NILFS_MDT(sufile
)->mi_entry_size
;
681 unsigned long segusages_per_block
;
682 unsigned long nsegs
, ncleaned
;
689 nsegs
= nilfs_sufile_get_nsegments(sufile
);
692 if (start
> end
|| start
>= nsegs
)
695 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
699 segusages_per_block
= nilfs_sufile_segment_usages_per_block(sufile
);
702 for (segnum
= start
; segnum
<= end
; segnum
+= n
) {
703 n
= min_t(unsigned long,
704 segusages_per_block
-
705 nilfs_sufile_get_offset(sufile
, segnum
),
707 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0,
715 kaddr
= kmap_atomic(su_bh
->b_page
);
716 su
= nilfs_sufile_block_get_segment_usage(
717 sufile
, segnum
, su_bh
, kaddr
);
719 for (j
= 0; j
< n
; j
++, su
= (void *)su
+ susz
) {
720 if ((le32_to_cpu(su
->su_flags
) &
721 ~BIT(NILFS_SEGMENT_USAGE_ERROR
)) ||
722 nilfs_segment_is_active(nilfs
, segnum
+ j
)) {
724 kunmap_atomic(kaddr
);
730 for (su
= su2
, j
= 0; j
< n
; j
++, su
= (void *)su
+ susz
) {
731 if (nilfs_segment_usage_error(su
)) {
732 nilfs_segment_usage_set_clean(su
);
736 kunmap_atomic(kaddr
);
738 mark_buffer_dirty(su_bh
);
743 if (n
== segusages_per_block
) {
745 nilfs_sufile_delete_segment_usage_block(sufile
, segnum
);
752 NILFS_SUI(sufile
)->ncleansegs
+= ncleaned
;
753 nilfs_sufile_mod_counter(header_bh
, ncleaned
, 0);
754 nilfs_mdt_mark_dirty(sufile
);
762 * nilfs_sufile_resize - resize segment array
763 * @sufile: inode of segment usage file
764 * @newnsegs: new number of segments
766 * Return Value: On success, 0 is returned. On error, one of the
767 * following negative error codes is returned.
771 * %-ENOMEM - Insufficient amount of memory available.
773 * %-ENOSPC - Enough free space is not left for shrinking
775 * %-EBUSY - Dirty or active segments exist in the region to be truncated
777 int nilfs_sufile_resize(struct inode
*sufile
, __u64 newnsegs
)
779 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
780 struct buffer_head
*header_bh
;
781 struct nilfs_sufile_header
*header
;
782 struct nilfs_sufile_info
*sui
= NILFS_SUI(sufile
);
784 unsigned long nsegs
, nrsvsegs
;
787 down_write(&NILFS_MDT(sufile
)->mi_sem
);
789 nsegs
= nilfs_sufile_get_nsegments(sufile
);
790 if (nsegs
== newnsegs
)
794 nrsvsegs
= nilfs_nrsvsegs(nilfs
, newnsegs
);
795 if (newnsegs
< nsegs
&& nsegs
- newnsegs
+ nrsvsegs
> sui
->ncleansegs
)
798 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
802 if (newnsegs
> nsegs
) {
803 sui
->ncleansegs
+= newnsegs
- nsegs
;
804 } else /* newnsegs < nsegs */ {
805 ret
= nilfs_sufile_truncate_range(sufile
, newnsegs
, nsegs
- 1);
809 sui
->ncleansegs
-= nsegs
- newnsegs
;
812 * If the sufile is successfully truncated, immediately adjust
813 * the segment allocation space while locking the semaphore
814 * "mi_sem" so that nilfs_sufile_alloc() never allocates
815 * segments in the truncated space.
817 sui
->allocmax
= newnsegs
- 1;
821 kaddr
= kmap_atomic(header_bh
->b_page
);
822 header
= kaddr
+ bh_offset(header_bh
);
823 header
->sh_ncleansegs
= cpu_to_le64(sui
->ncleansegs
);
824 kunmap_atomic(kaddr
);
826 mark_buffer_dirty(header_bh
);
827 nilfs_mdt_mark_dirty(sufile
);
828 nilfs_set_nsegments(nilfs
, newnsegs
);
833 up_write(&NILFS_MDT(sufile
)->mi_sem
);
838 * nilfs_sufile_get_suinfo -
839 * @sufile: inode of segment usage file
840 * @segnum: segment number to start looking
841 * @buf: array of suinfo
842 * @sisz: byte size of suinfo
843 * @nsi: size of suinfo array
847 * Return Value: On success, 0 is returned and .... On error, one of the
848 * following negative error codes is returned.
852 * %-ENOMEM - Insufficient amount of memory available.
854 ssize_t
nilfs_sufile_get_suinfo(struct inode
*sufile
, __u64 segnum
, void *buf
,
855 unsigned int sisz
, size_t nsi
)
857 struct buffer_head
*su_bh
;
858 struct nilfs_segment_usage
*su
;
859 struct nilfs_suinfo
*si
= buf
;
860 size_t susz
= NILFS_MDT(sufile
)->mi_entry_size
;
861 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
863 unsigned long nsegs
, segusages_per_block
;
867 down_read(&NILFS_MDT(sufile
)->mi_sem
);
869 segusages_per_block
= nilfs_sufile_segment_usages_per_block(sufile
);
870 nsegs
= min_t(unsigned long,
871 nilfs_sufile_get_nsegments(sufile
) - segnum
,
873 for (i
= 0; i
< nsegs
; i
+= n
, segnum
+= n
) {
874 n
= min_t(unsigned long,
875 segusages_per_block
-
876 nilfs_sufile_get_offset(sufile
, segnum
),
878 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0,
884 memset(si
, 0, sisz
* n
);
885 si
= (void *)si
+ sisz
* n
;
889 kaddr
= kmap_atomic(su_bh
->b_page
);
890 su
= nilfs_sufile_block_get_segment_usage(
891 sufile
, segnum
, su_bh
, kaddr
);
893 j
++, su
= (void *)su
+ susz
, si
= (void *)si
+ sisz
) {
894 si
->sui_lastmod
= le64_to_cpu(su
->su_lastmod
);
895 si
->sui_nblocks
= le32_to_cpu(su
->su_nblocks
);
896 si
->sui_flags
= le32_to_cpu(su
->su_flags
) &
897 ~BIT(NILFS_SEGMENT_USAGE_ACTIVE
);
898 if (nilfs_segment_is_active(nilfs
, segnum
+ j
))
900 BIT(NILFS_SEGMENT_USAGE_ACTIVE
);
902 kunmap_atomic(kaddr
);
908 up_read(&NILFS_MDT(sufile
)->mi_sem
);
913 * nilfs_sufile_set_suinfo - sets segment usage info
914 * @sufile: inode of segment usage file
915 * @buf: array of suinfo_update
916 * @supsz: byte size of suinfo_update
917 * @nsup: size of suinfo_update array
919 * Description: Takes an array of nilfs_suinfo_update structs and updates
920 * segment usage accordingly. Only the fields indicated by the sup_flags
923 * Return Value: On success, 0 is returned. On error, one of the
924 * following negative error codes is returned.
928 * %-ENOMEM - Insufficient amount of memory available.
930 * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
932 ssize_t
nilfs_sufile_set_suinfo(struct inode
*sufile
, void *buf
,
933 unsigned int supsz
, size_t nsup
)
935 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
936 struct buffer_head
*header_bh
, *bh
;
937 struct nilfs_suinfo_update
*sup
, *supend
= buf
+ supsz
* nsup
;
938 struct nilfs_segment_usage
*su
;
940 unsigned long blkoff
, prev_blkoff
;
941 int cleansi
, cleansu
, dirtysi
, dirtysu
;
942 long ncleaned
= 0, ndirtied
= 0;
945 if (unlikely(nsup
== 0))
948 for (sup
= buf
; sup
< supend
; sup
= (void *)sup
+ supsz
) {
949 if (sup
->sup_segnum
>= nilfs
->ns_nsegments
951 (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS
))
952 || (nilfs_suinfo_update_nblocks(sup
) &&
953 sup
->sup_sui
.sui_nblocks
>
954 nilfs
->ns_blocks_per_segment
))
958 down_write(&NILFS_MDT(sufile
)->mi_sem
);
960 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
965 blkoff
= nilfs_sufile_get_blkoff(sufile
, sup
->sup_segnum
);
966 ret
= nilfs_mdt_get_block(sufile
, blkoff
, 1, NULL
, &bh
);
971 kaddr
= kmap_atomic(bh
->b_page
);
972 su
= nilfs_sufile_block_get_segment_usage(
973 sufile
, sup
->sup_segnum
, bh
, kaddr
);
975 if (nilfs_suinfo_update_lastmod(sup
))
976 su
->su_lastmod
= cpu_to_le64(sup
->sup_sui
.sui_lastmod
);
978 if (nilfs_suinfo_update_nblocks(sup
))
979 su
->su_nblocks
= cpu_to_le32(sup
->sup_sui
.sui_nblocks
);
981 if (nilfs_suinfo_update_flags(sup
)) {
983 * Active flag is a virtual flag projected by running
984 * nilfs kernel code - drop it not to write it to
987 sup
->sup_sui
.sui_flags
&=
988 ~BIT(NILFS_SEGMENT_USAGE_ACTIVE
);
990 cleansi
= nilfs_suinfo_clean(&sup
->sup_sui
);
991 cleansu
= nilfs_segment_usage_clean(su
);
992 dirtysi
= nilfs_suinfo_dirty(&sup
->sup_sui
);
993 dirtysu
= nilfs_segment_usage_dirty(su
);
995 if (cleansi
&& !cleansu
)
997 else if (!cleansi
&& cleansu
)
1000 if (dirtysi
&& !dirtysu
)
1002 else if (!dirtysi
&& dirtysu
)
1005 su
->su_flags
= cpu_to_le32(sup
->sup_sui
.sui_flags
);
1008 kunmap_atomic(kaddr
);
1010 sup
= (void *)sup
+ supsz
;
1014 prev_blkoff
= blkoff
;
1015 blkoff
= nilfs_sufile_get_blkoff(sufile
, sup
->sup_segnum
);
1016 if (blkoff
== prev_blkoff
)
1019 /* get different block */
1020 mark_buffer_dirty(bh
);
1022 ret
= nilfs_mdt_get_block(sufile
, blkoff
, 1, NULL
, &bh
);
1023 if (unlikely(ret
< 0))
1026 mark_buffer_dirty(bh
);
1030 if (ncleaned
|| ndirtied
) {
1031 nilfs_sufile_mod_counter(header_bh
, (u64
)ncleaned
,
1033 NILFS_SUI(sufile
)->ncleansegs
+= ncleaned
;
1035 nilfs_mdt_mark_dirty(sufile
);
1039 up_write(&NILFS_MDT(sufile
)->mi_sem
);
1044 * nilfs_sufile_trim_fs() - trim ioctl handle function
1045 * @sufile: inode of segment usage file
1046 * @range: fstrim_range structure
1048 * start: First Byte to trim
1049 * len: number of Bytes to trim from start
1050 * minlen: minimum extent length in Bytes
1052 * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes
1053 * from start to start+len. start is rounded up to the next block boundary
1054 * and start+len is rounded down. For each clean segment blkdev_issue_discard
1055 * function is invoked.
1057 * Return Value: On success, 0 is returned or negative error code, otherwise.
1059 int nilfs_sufile_trim_fs(struct inode
*sufile
, struct fstrim_range
*range
)
1061 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
1062 struct buffer_head
*su_bh
;
1063 struct nilfs_segment_usage
*su
;
1065 size_t n
, i
, susz
= NILFS_MDT(sufile
)->mi_entry_size
;
1066 sector_t seg_start
, seg_end
, start_block
, end_block
;
1067 sector_t start
= 0, nblocks
= 0;
1068 u64 segnum
, segnum_end
, minlen
, len
, max_blocks
, ndiscarded
= 0;
1070 unsigned int sects_per_block
;
1072 sects_per_block
= (1 << nilfs
->ns_blocksize_bits
) /
1073 bdev_logical_block_size(nilfs
->ns_bdev
);
1074 len
= range
->len
>> nilfs
->ns_blocksize_bits
;
1075 minlen
= range
->minlen
>> nilfs
->ns_blocksize_bits
;
1076 max_blocks
= ((u64
)nilfs
->ns_nsegments
* nilfs
->ns_blocks_per_segment
);
1078 if (!len
|| range
->start
>= max_blocks
<< nilfs
->ns_blocksize_bits
)
1081 start_block
= (range
->start
+ nilfs
->ns_blocksize
- 1) >>
1082 nilfs
->ns_blocksize_bits
;
1085 * range->len can be very large (actually, it is set to
1086 * ULLONG_MAX by default) - truncate upper end of the range
1087 * carefully so as not to overflow.
1089 if (max_blocks
- start_block
< len
)
1090 end_block
= max_blocks
- 1;
1092 end_block
= start_block
+ len
- 1;
1094 segnum
= nilfs_get_segnum_of_block(nilfs
, start_block
);
1095 segnum_end
= nilfs_get_segnum_of_block(nilfs
, end_block
);
1097 down_read(&NILFS_MDT(sufile
)->mi_sem
);
1099 while (segnum
<= segnum_end
) {
1100 n
= nilfs_sufile_segment_usages_in_block(sufile
, segnum
,
1103 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0,
1113 kaddr
= kmap_atomic(su_bh
->b_page
);
1114 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
,
1116 for (i
= 0; i
< n
; ++i
, ++segnum
, su
= (void *)su
+ susz
) {
1117 if (!nilfs_segment_usage_clean(su
))
1120 nilfs_get_segment_range(nilfs
, segnum
, &seg_start
,
1124 /* start new extent */
1126 nblocks
= seg_end
- seg_start
+ 1;
1130 if (start
+ nblocks
== seg_start
) {
1131 /* add to previous extent */
1132 nblocks
+= seg_end
- seg_start
+ 1;
1136 /* discard previous extent */
1137 if (start
< start_block
) {
1138 nblocks
-= start_block
- start
;
1139 start
= start_block
;
1142 if (nblocks
>= minlen
) {
1143 kunmap_atomic(kaddr
);
1145 ret
= blkdev_issue_discard(nilfs
->ns_bdev
,
1146 start
* sects_per_block
,
1147 nblocks
* sects_per_block
,
1154 ndiscarded
+= nblocks
;
1155 kaddr
= kmap_atomic(su_bh
->b_page
);
1156 su
= nilfs_sufile_block_get_segment_usage(
1157 sufile
, segnum
, su_bh
, kaddr
);
1160 /* start new extent */
1162 nblocks
= seg_end
- seg_start
+ 1;
1164 kunmap_atomic(kaddr
);
1170 /* discard last extent */
1171 if (start
< start_block
) {
1172 nblocks
-= start_block
- start
;
1173 start
= start_block
;
1175 if (start
+ nblocks
> end_block
+ 1)
1176 nblocks
= end_block
- start
+ 1;
1178 if (nblocks
>= minlen
) {
1179 ret
= blkdev_issue_discard(nilfs
->ns_bdev
,
1180 start
* sects_per_block
,
1181 nblocks
* sects_per_block
,
1184 ndiscarded
+= nblocks
;
1189 up_read(&NILFS_MDT(sufile
)->mi_sem
);
1191 range
->len
= ndiscarded
<< nilfs
->ns_blocksize_bits
;
1196 * nilfs_sufile_read - read or get sufile inode
1197 * @sb: super block instance
1198 * @susize: size of a segment usage entry
1199 * @raw_inode: on-disk sufile inode
1200 * @inodep: buffer to store the inode
1202 int nilfs_sufile_read(struct super_block
*sb
, size_t susize
,
1203 struct nilfs_inode
*raw_inode
, struct inode
**inodep
)
1205 struct inode
*sufile
;
1206 struct nilfs_sufile_info
*sui
;
1207 struct buffer_head
*header_bh
;
1208 struct nilfs_sufile_header
*header
;
1212 if (susize
> sb
->s_blocksize
) {
1213 nilfs_err(sb
, "too large segment usage size: %zu bytes",
1216 } else if (susize
< NILFS_MIN_SEGMENT_USAGE_SIZE
) {
1217 nilfs_err(sb
, "too small segment usage size: %zu bytes",
1222 sufile
= nilfs_iget_locked(sb
, NULL
, NILFS_SUFILE_INO
);
1223 if (unlikely(!sufile
))
1225 if (!(sufile
->i_state
& I_NEW
))
1228 err
= nilfs_mdt_init(sufile
, NILFS_MDT_GFP
, sizeof(*sui
));
1232 nilfs_mdt_set_entry_size(sufile
, susize
,
1233 sizeof(struct nilfs_sufile_header
));
1235 err
= nilfs_read_inode_common(sufile
, raw_inode
);
1239 err
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
1243 sui
= NILFS_SUI(sufile
);
1244 kaddr
= kmap_atomic(header_bh
->b_page
);
1245 header
= kaddr
+ bh_offset(header_bh
);
1246 sui
->ncleansegs
= le64_to_cpu(header
->sh_ncleansegs
);
1247 kunmap_atomic(kaddr
);
1250 sui
->allocmax
= nilfs_sufile_get_nsegments(sufile
) - 1;
1253 unlock_new_inode(sufile
);
1258 iget_failed(sufile
);