#define F2FS_FEATURE_COMPRESSION 0x00002000
#define F2FS_FEATURE_RO 0x00004000
#define F2FS_FEATURE_DEVICE_ALIAS 0x00008000
+#define F2FS_FEATURE_PACKED_SSA 0x00010000
#define __F2FS_HAS_FEATURE(raw_super, mask) \
((raw_super->feature & cpu_to_le32(mask)) != 0)
F2FS_FEATURE_FUNCS(compression, COMPRESSION);
F2FS_FEATURE_FUNCS(readonly, RO);
F2FS_FEATURE_FUNCS(device_alias, DEVICE_ALIAS);
+F2FS_FEATURE_FUNCS(packed_ssa, PACKED_SSA);
#ifdef CONFIG_BLK_DEV_ZONED
static inline bool f2fs_zone_is_seq(struct f2fs_sb_info *sbi, int devi,
unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
SUM_TYPE_DATA : SUM_TYPE_NODE;
unsigned char data_type = (type == SUM_TYPE_DATA) ? DATA : NODE;
- int submitted = 0;
+ int submitted = 0, sum_blk_cnt;
if (__is_large_section(sbi)) {
sec_end_segno = rounddown(end_segno, SEGS_PER_SEC(sbi));
sanity_check_seg_type(sbi, get_seg_entry(sbi, segno)->type);
+ segno = rounddown(segno, SUMS_PER_BLOCK);
+ sum_blk_cnt = DIV_ROUND_UP(end_segno - segno, SUMS_PER_BLOCK);
/* readahead multi ssa blocks those have contiguous address */
if (__is_large_section(sbi))
f2fs_ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
- end_segno - segno, META_SSA, true);
+ sum_blk_cnt, META_SSA, true);
/* reference all summary page */
while (segno < end_segno) {
- struct folio *sum_folio = f2fs_get_sum_folio(sbi, segno++);
+ struct folio *sum_folio = f2fs_get_sum_folio(sbi, segno);
+
+ segno += SUMS_PER_BLOCK;
if (IS_ERR(sum_folio)) {
int err = PTR_ERR(sum_folio);
- end_segno = segno - 1;
- for (segno = start_segno; segno < end_segno; segno++) {
+ end_segno = segno - SUMS_PER_BLOCK;
+ segno = rounddown(start_segno, SUMS_PER_BLOCK);
+ while (segno < end_segno) {
sum_folio = filemap_get_folio(META_MAPPING(sbi),
GET_SUM_BLOCK(sbi, segno));
folio_put_refs(sum_folio, 2);
+ segno += SUMS_PER_BLOCK;
}
return err;
}
blk_start_plug(&plug);
- for (segno = start_segno; segno < end_segno; segno++) {
- struct f2fs_summary_block *sum;
+ segno = start_segno;
+ while (segno < end_segno) {
+ unsigned int cur_segno;
/* find segment summary of victim */
struct folio *sum_folio = filemap_get_folio(META_MAPPING(sbi),
GET_SUM_BLOCK(sbi, segno));
+ unsigned int block_end_segno = rounddown(segno, SUMS_PER_BLOCK)
+ + SUMS_PER_BLOCK;
+
+ if (block_end_segno > end_segno)
+ block_end_segno = end_segno;
if (is_cursec(sbi, GET_SEC_FROM_SEG(sbi, segno))) {
f2fs_err(sbi, "%s: segment %u is used by log",
__func__, segno);
f2fs_bug_on(sbi, 1);
- goto skip;
+ goto next_block;
}
- if (get_valid_blocks(sbi, segno, false) == 0)
- goto freed;
- if (gc_type == BG_GC && __is_large_section(sbi) &&
- migrated >= sbi->migration_granularity)
- goto skip;
if (!folio_test_uptodate(sum_folio) ||
unlikely(f2fs_cp_error(sbi)))
- goto skip;
+ goto next_block;
- sum = folio_address(sum_folio);
- if (type != GET_SUM_TYPE((&sum->footer))) {
- f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SIT and SSA",
- segno, type, GET_SUM_TYPE((&sum->footer)));
- f2fs_stop_checkpoint(sbi, false,
- STOP_CP_REASON_CORRUPTED_SUMMARY);
- goto skip;
- }
+ for (cur_segno = segno; cur_segno < block_end_segno;
+ cur_segno++) {
+ struct f2fs_summary_block *sum;
- /*
- * this is to avoid deadlock:
- * - lock_page(sum_page) - f2fs_replace_block
- * - check_valid_map() - down_write(sentry_lock)
- * - down_read(sentry_lock) - change_curseg()
- * - lock_page(sum_page)
- */
- if (type == SUM_TYPE_NODE)
- submitted += gc_node_segment(sbi, sum->entries, segno,
- gc_type);
- else
- submitted += gc_data_segment(sbi, sum->entries, gc_list,
- segno, gc_type,
- force_migrate);
+ if (get_valid_blocks(sbi, cur_segno, false) == 0)
+ goto freed;
+ if (gc_type == BG_GC && __is_large_section(sbi) &&
+ migrated >= sbi->migration_granularity)
+ continue;
- stat_inc_gc_seg_count(sbi, data_type, gc_type);
- sbi->gc_reclaimed_segs[sbi->gc_mode]++;
- migrated++;
+ sum = SUM_BLK_PAGE_ADDR(sum_folio, cur_segno);
+ if (type != GET_SUM_TYPE((&sum->footer))) {
+ f2fs_err(sbi, "Inconsistent segment (%u) type "
+ "[%d, %d] in SSA and SIT",
+ cur_segno, type,
+ GET_SUM_TYPE((&sum->footer)));
+ f2fs_stop_checkpoint(sbi, false,
+ STOP_CP_REASON_CORRUPTED_SUMMARY);
+ continue;
+ }
-freed:
- if (gc_type == FG_GC &&
- get_valid_blocks(sbi, segno, false) == 0)
- seg_freed++;
+ /*
+ * this is to avoid deadlock:
+ * - lock_page(sum_page) - f2fs_replace_block
+ * - check_valid_map() - down_write(sentry_lock)
+ * - down_read(sentry_lock) - change_curseg()
+ * - lock_page(sum_page)
+ */
+ if (type == SUM_TYPE_NODE)
+ submitted += gc_node_segment(sbi, sum->entries,
+ cur_segno, gc_type);
+ else
+ submitted += gc_data_segment(sbi, sum->entries,
+ gc_list, cur_segno,
+ gc_type, force_migrate);
- if (__is_large_section(sbi))
- sbi->next_victim_seg[gc_type] =
- (segno + 1 < sec_end_segno) ?
- segno + 1 : NULL_SEGNO;
-skip:
+ stat_inc_gc_seg_count(sbi, data_type, gc_type);
+ sbi->gc_reclaimed_segs[sbi->gc_mode]++;
+ migrated++;
+
+freed:
+ if (gc_type == FG_GC &&
+ get_valid_blocks(sbi, cur_segno, false) == 0)
+ seg_freed++;
+
+ if (__is_large_section(sbi))
+ sbi->next_victim_seg[gc_type] =
+ (cur_segno + 1 < sec_end_segno) ?
+ cur_segno + 1 : NULL_SEGNO;
+ }
+next_block:
folio_put_refs(sum_folio, 2);
+ segno = block_end_segno;
}
if (submitted)
sum_folio = f2fs_get_sum_folio(sbi, segno);
if (IS_ERR(sum_folio))
return PTR_ERR(sum_folio);
- sum_node = folio_address(sum_folio);
+ sum_node = SUM_BLK_PAGE_ADDR(sum_folio, segno);
sum = sum_node->entries[blkoff];
f2fs_folio_put(sum_folio, true);
got_it:
void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
void *src, block_t blk_addr)
{
- struct folio *folio = f2fs_grab_meta_folio(sbi, blk_addr);
+ struct folio *folio;
+
+ if (SUMS_PER_BLOCK == 1)
+ folio = f2fs_grab_meta_folio(sbi, blk_addr);
+ else
+ folio = f2fs_get_meta_folio_retry(sbi, blk_addr);
+
+ if (IS_ERR(folio))
+ return;
memcpy(folio_address(folio), src, PAGE_SIZE);
folio_mark_dirty(folio);
}
static void write_sum_page(struct f2fs_sb_info *sbi,
- struct f2fs_summary_block *sum_blk, block_t blk_addr)
+ struct f2fs_summary_block *sum_blk, unsigned int segno)
{
- f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
+ struct folio *folio;
+
+ if (SUMS_PER_BLOCK == 1)
+ return f2fs_update_meta_page(sbi, (void *)sum_blk,
+ GET_SUM_BLOCK(sbi, segno));
+
+ folio = f2fs_get_sum_folio(sbi, segno);
+ if (IS_ERR(folio))
+ return;
+
+ memcpy(SUM_BLK_PAGE_ADDR(folio, segno), sum_blk, sizeof(*sum_blk));
+ folio_mark_dirty(folio);
+ f2fs_folio_put(folio, true);
}
static void write_current_sum_page(struct f2fs_sb_info *sbi,
int ret;
if (curseg->inited)
- write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, segno));
+ write_sum_page(sbi, curseg->sum_blk, segno);
segno = __get_next_segno(sbi, type);
ret = get_new_segment(sbi, &segno, new_sec, pinning);
struct folio *sum_folio;
if (curseg->inited)
- write_sum_page(sbi, curseg->sum_blk, GET_SUM_BLOCK(sbi, curseg->segno));
+ write_sum_page(sbi, curseg->sum_blk, curseg->segno);
__set_test_and_inuse(sbi, new_segno);
memset(curseg->sum_blk, 0, SUM_ENTRY_SIZE);
return PTR_ERR(sum_folio);
}
- sum_node = folio_address(sum_folio);
+ sum_node = SUM_BLK_PAGE_ADDR(sum_folio, new_segno);
memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
f2fs_folio_put(sum_folio, true);
return 0;
goto out;
if (get_valid_blocks(sbi, curseg->segno, false)) {
- write_sum_page(sbi, curseg->sum_blk,
- GET_SUM_BLOCK(sbi, curseg->segno));
+ write_sum_page(sbi, curseg->sum_blk, curseg->segno);
} else {
mutex_lock(&DIRTY_I(sbi)->seglist_lock);
__set_test_and_free(sbi, curseg->segno, true);
if (segment_full) {
if (type == CURSEG_COLD_DATA_PINNED &&
!((curseg->segno + 1) % sbi->segs_per_sec)) {
- write_sum_page(sbi, curseg->sum_blk,
- GET_SUM_BLOCK(sbi, curseg->segno));
+ write_sum_page(sbi, curseg->sum_blk, curseg->segno);
reset_curseg_fields(curseg);
goto skip_new_segment;
}
#define GET_ZONE_FROM_SEG(sbi, segno) \
GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
-#define GET_SUM_BLOCK(sbi, segno) \
- ((sbi)->sm_info->ssa_blkaddr + (segno))
+#define SUMS_PER_BLOCK (F2FS_BLKSIZE / F2FS_SUM_BLKSIZE)
+#define GET_SUM_BLOCK(sbi, segno) \
+ (SM_I(sbi)->ssa_blkaddr + (segno / SUMS_PER_BLOCK))
+#define GET_SUM_BLKOFF(segno) (segno % SUMS_PER_BLOCK)
+#define SUM_BLK_PAGE_ADDR(folio, segno) \
+ (folio_address(folio) + GET_SUM_BLKOFF(segno) * F2FS_SUM_BLKSIZE)
#define GET_SUM_TYPE(footer) ((footer)->entry_type)
#define SET_SUM_TYPE(footer, type) ((footer)->entry_type = (type))
if (sanity_check_area_boundary(sbi, folio, index))
return -EFSCORRUPTED;
+ /*
+ * Check for legacy summary layout on 16KB+ block devices.
+ * Modern f2fs-tools packs multiple 4KB summary areas into one block,
+ * whereas legacy versions used one block per summary, leading
+ * to a much larger SSA.
+ */
+ if (SUMS_PER_BLOCK > 1 &&
+ !(__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_PACKED_SSA))) {
+ f2fs_info(sbi, "Error: Device formatted with a legacy version. "
+ "Please reformat with a tool supporting the packed ssa "
+ "feature for block sizes larger than 4kb.");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
if (f2fs_sb_has_compression(sbi))
len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "compression");
+ if (f2fs_sb_has_packed_ssa(sbi))
+ len += sysfs_emit_at(buf, len, "%s%s",
+ len ? ", " : "", "packed_ssa");
len += sysfs_emit_at(buf, len, "%s%s",
len ? ", " : "", "pin_file");
len += sysfs_emit_at(buf, len, "\n");
#ifdef CONFIG_UNICODE
F2FS_FEATURE_RO_ATTR(linear_lookup);
#endif
+F2FS_FEATURE_RO_ATTR(packed_ssa);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
#ifdef CONFIG_UNICODE
BASE_ATTR_LIST(linear_lookup),
#endif
+ BASE_ATTR_LIST(packed_ssa),
NULL,
};
ATTRIBUTE_GROUPS(f2fs_feat);
F2FS_SB_FEATURE_RO_ATTR(compression, COMPRESSION);
F2FS_SB_FEATURE_RO_ATTR(readonly, RO);
F2FS_SB_FEATURE_RO_ATTR(device_alias, DEVICE_ALIAS);
+F2FS_SB_FEATURE_RO_ATTR(packed_ssa, PACKED_SSA);
static struct attribute *f2fs_sb_feat_attrs[] = {
ATTR_LIST(sb_encryption),
ATTR_LIST(sb_compression),
ATTR_LIST(sb_readonly),
ATTR_LIST(sb_device_alias),
+ ATTR_LIST(sb_packed_ssa),
NULL,
};
ATTRIBUTE_GROUPS(f2fs_sb_feat);
#define F2FS_LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) /* log number for sector/blk */
#define F2FS_BLKSIZE PAGE_SIZE /* support only block == page */
#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
+#define F2FS_SUM_BLKSIZE 4096 /* only support 4096 byte sum block */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_EXTENSION_LEN 8 /* max size of extension */
* from node's page's beginning to get a data block address.
* ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
*/
-#define ENTRIES_IN_SUM (F2FS_BLKSIZE / 8)
+#define ENTRIES_IN_SUM (F2FS_SUM_BLKSIZE / 8)
#define SUMMARY_SIZE (7) /* sizeof(struct f2fs_summary) */
#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
__le32 check_sum; /* summary checksum */
} __packed;
-#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
+#define SUM_JOURNAL_SIZE (F2FS_SUM_BLKSIZE - SUM_FOOTER_SIZE -\
SUM_ENTRY_SIZE)
#define NAT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\
sizeof(struct nat_journal_entry))