Track read folio counts by order in F2FS iostat sysfs and tracepoints.
Signed-off-by: Daniel Lee <chullee@google.com>
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
if (!folio)
goto out;
+ f2fs_update_read_folio_count(F2FS_I_SB(inode), folio);
+
folio_in_bio = false;
index = folio->index;
offset = 0;
prefetchw(&folio->flags);
}
+ f2fs_update_read_folio_count(F2FS_I_SB(inode), folio);
+
#ifdef CONFIG_F2FS_FS_COMPRESSION
index = folio->index;
#include <linux/uio.h>
#include <linux/types.h>
+#include <linux/mmzone.h>
#include <linux/page-flags.h>
#include <linux/slab.h>
#include <linux/crc32.h>
unsigned long long iostat_count[NR_IO_TYPE];
unsigned long long iostat_bytes[NR_IO_TYPE];
unsigned long long prev_iostat_bytes[NR_IO_TYPE];
+ unsigned long long iostat_read_folio_count[NR_PAGE_ORDERS];
+ unsigned long long prev_iostat_read_folio_count[NR_PAGE_ORDERS];
bool iostat_enable;
unsigned long iostat_next_period;
unsigned int iostat_period_ms;
{
struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ int i;
if (!sbi->iostat_enable)
return 0;
IOSTAT_INFO_SHOW("fs node", FS_NODE_READ_IO);
IOSTAT_INFO_SHOW("fs meta", FS_META_READ_IO);
+ /* print read folio order stats */
+ seq_printf(seq, "%-23s", "fs read folio order:");
+ for (i = 0; i < NR_PAGE_ORDERS; i++)
+ seq_printf(seq, " %llu", sbi->iostat_read_folio_count[i]);
+ seq_putc(seq, '\n');
+
/* print other IOs */
seq_puts(seq, "[OTHER]\n");
IOSTAT_INFO_SHOW("fs discard", FS_DISCARD_IO);
static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi)
{
unsigned long long iostat_diff[NR_IO_TYPE];
+ unsigned long long read_folio_count_diff[NR_PAGE_ORDERS];
int i;
unsigned long flags;
sbi->prev_iostat_bytes[i];
sbi->prev_iostat_bytes[i] = sbi->iostat_bytes[i];
}
+
+ for (i = 0; i < NR_PAGE_ORDERS; i++) {
+ read_folio_count_diff[i] = sbi->iostat_read_folio_count[i] -
+ sbi->prev_iostat_read_folio_count[i];
+ sbi->prev_iostat_read_folio_count[i] = sbi->iostat_read_folio_count[i];
+ }
spin_unlock_irqrestore(&sbi->iostat_lock, flags);
- trace_f2fs_iostat(sbi, iostat_diff);
+ trace_f2fs_iostat(sbi, iostat_diff, read_folio_count_diff);
__record_iostat_latency(sbi);
}
sbi->iostat_bytes[i] = 0;
sbi->prev_iostat_bytes[i] = 0;
}
+ for (i = 0; i < NR_PAGE_ORDERS; i++) {
+ sbi->iostat_read_folio_count[i] = 0;
+ sbi->prev_iostat_read_folio_count[i] = 0;
+ }
spin_unlock_irq(&sbi->iostat_lock);
spin_lock_irq(&sbi->iostat_lat_lock);
sbi->iostat_count[type]++;
}
+void f2fs_update_read_folio_count(struct f2fs_sb_info *sbi, struct folio *folio)
+{
+ unsigned int order = folio_order(folio);
+ unsigned long flags;
+
+ if (!sbi->iostat_enable)
+ return;
+
+ if (order >= NR_PAGE_ORDERS)
+ order = NR_PAGE_ORDERS - 1;
+
+ spin_lock_irqsave(&sbi->iostat_lock, flags);
+ sbi->iostat_read_folio_count[order]++;
+ spin_unlock_irqrestore(&sbi->iostat_lock, flags);
+
+ f2fs_record_iostat(sbi);
+}
+
void f2fs_update_iostat(struct f2fs_sb_info *sbi, struct inode *inode,
enum iostat_type type, unsigned long long io_bytes)
{
extern void f2fs_reset_iostat(struct f2fs_sb_info *sbi);
extern void f2fs_update_iostat(struct f2fs_sb_info *sbi, struct inode *inode,
enum iostat_type type, unsigned long long io_bytes);
+extern void f2fs_update_read_folio_count(struct f2fs_sb_info *sbi,
+ struct folio *folio);
struct bio_iostat_ctx {
struct f2fs_sb_info *sbi;
#else
static inline void f2fs_update_iostat(struct f2fs_sb_info *sbi, struct inode *inode,
enum iostat_type type, unsigned long long io_bytes) {}
+static inline void f2fs_update_read_folio_count(struct f2fs_sb_info *sbi,
+ struct folio *folio) {}
static inline void iostat_update_and_unbind_ctx(struct bio *bio) {}
static inline void iostat_alloc_and_bind_ctx(struct f2fs_sb_info *sbi,
struct bio *bio, struct bio_post_read_ctx *ctx) {}
#ifdef CONFIG_F2FS_IOSTAT
TRACE_EVENT(f2fs_iostat,
- TP_PROTO(struct f2fs_sb_info *sbi, unsigned long long *iostat),
+ TP_PROTO(struct f2fs_sb_info *sbi, unsigned long long *iostat,
+ unsigned long long *read_folio_count),
- TP_ARGS(sbi, iostat),
+ TP_ARGS(sbi, iostat, read_folio_count),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(unsigned long long, fs_mrio)
__field(unsigned long long, fs_discard)
__field(unsigned long long, fs_reset_zone)
+ __array(unsigned long long, read_folio_count, 11)
),
TP_fast_assign(
__entry->fs_mrio = iostat[FS_META_READ_IO];
__entry->fs_discard = iostat[FS_DISCARD_IO];
__entry->fs_reset_zone = iostat[FS_ZONE_RESET_IO];
+ memset(__entry->read_folio_count, 0, sizeof(__entry->read_folio_count));
+ memcpy(__entry->read_folio_count, read_folio_count,
+ sizeof(unsigned long long) * min_t(int, NR_PAGE_ORDERS, 11));
),
TP_printk("dev = (%d,%d), "
"app [read=%llu (direct=%llu, buffered=%llu), mapped=%llu], "
"compr(buffered=%llu, mapped=%llu)], "
"fs [data=%llu, (gc_data=%llu, cdata=%llu), "
- "node=%llu, meta=%llu]",
+ "node=%llu, meta=%llu], "
+ "read_folio_count [0=%llu, 1=%llu, 2=%llu, 3=%llu, 4=%llu, "
+ "5=%llu, 6=%llu, 7=%llu, 8=%llu, 9=%llu, 10=%llu]",
show_dev(__entry->dev), __entry->app_wio, __entry->app_dio,
__entry->app_bio, __entry->app_mio, __entry->app_bcdio,
__entry->app_mcdio, __entry->fs_dio, __entry->fs_cdio,
__entry->app_rio, __entry->app_drio, __entry->app_brio,
__entry->app_mrio, __entry->app_bcrio, __entry->app_mcrio,
__entry->fs_drio, __entry->fs_gdrio,
- __entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio)
+ __entry->fs_cdrio, __entry->fs_nrio, __entry->fs_mrio,
+ __entry->read_folio_count[0], __entry->read_folio_count[1],
+ __entry->read_folio_count[2], __entry->read_folio_count[3],
+ __entry->read_folio_count[4], __entry->read_folio_count[5],
+ __entry->read_folio_count[6], __entry->read_folio_count[7],
+ __entry->read_folio_count[8], __entry->read_folio_count[9],
+ __entry->read_folio_count[10])
);
#ifndef __F2FS_IOSTAT_LATENCY_TYPE