extern atomic_t netfs_n_wh_write;
extern atomic_t netfs_n_wh_write_done;
extern atomic_t netfs_n_wh_write_failed;
+extern atomic_t netfs_n_wb_lock_skip;
+extern atomic_t netfs_n_wb_lock_wait;
int netfs_stats_show(struct seq_file *m, void *v);
atomic_t netfs_n_wh_write;
atomic_t netfs_n_wh_write_done;
atomic_t netfs_n_wh_write_failed;
+atomic_t netfs_n_wb_lock_skip;
+atomic_t netfs_n_wb_lock_wait;
int netfs_stats_show(struct seq_file *m, void *v)
{
atomic_read(&netfs_n_rh_rreq),
atomic_read(&netfs_n_rh_sreq),
atomic_read(&netfs_n_wh_wstream_conflict));
+ seq_printf(m, "WbLock : skip=%u wait=%u\n",
+ atomic_read(&netfs_n_wb_lock_skip),
+ atomic_read(&netfs_n_wb_lock_wait));
return fscache_stats_show(m);
}
EXPORT_SYMBOL(netfs_stats_show);
struct folio *folio;
int error = 0;
- if (wbc->sync_mode == WB_SYNC_ALL)
+ if (!mutex_trylock(&ictx->wb_lock)) {
+ if (wbc->sync_mode == WB_SYNC_NONE) {
+ netfs_stat(&netfs_n_wb_lock_skip);
+ return 0;
+ }
+ netfs_stat(&netfs_n_wb_lock_wait);
mutex_lock(&ictx->wb_lock);
- else if (!mutex_trylock(&ictx->wb_lock))
- return 0;
+ }
/* Need the first folio to be able to set up the op. */
folio = writeback_iter(mapping, wbc, NULL, &error);