* written back and waited upon before fsync() returns.
*
* The functions mark_buffer_dirty_inode(), fsync_inode_buffers(),
- * inode_has_buffers() and invalidate_inode_buffers() are provided for the
+ * mmb_has_buffers() and invalidate_inode_buffers() are provided for the
* management of a list of dependent buffers in mapping_metadata_bhs struct.
*
* The locking is a little subtle: The list of buffer heads is protected by
}
}
-int inode_has_buffers(struct inode *inode)
+bool mmb_has_buffers(struct mapping_metadata_bhs *mmb)
{
- return !list_empty(&inode->i_data.i_metadata_bhs.list);
+ return !list_empty(&mmb->list);
}
-EXPORT_SYMBOL_GPL(inode_has_buffers);
+EXPORT_SYMBOL_GPL(mmb_has_buffers);
/**
* sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
struct blk_plug plug;
LIST_HEAD(tmp);
- if (list_empty(&mmb->list))
+ if (!mmb_has_buffers(mmb))
return 0;
blk_start_plug(&plug);
*/
void invalidate_inode_buffers(struct inode *inode)
{
- if (inode_has_buffers(inode)) {
- struct mapping_metadata_bhs *mmb = &inode->i_data.i_metadata_bhs;
+ struct mapping_metadata_bhs *mmb = &inode->i_data.i_metadata_bhs;
+ if (mmb_has_buffers(mmb)) {
spin_lock(&mmb->lock);
while (!list_empty(&mmb->list))
__remove_assoc_queue(mmb, BH_ENTRY(mmb->list.next));
}
/* Any metadata buffers to write? */
- if (inode_has_buffers(inode))
+ if (mmb_has_buffers(&inode->i_mapping->i_metadata_bhs))
return true;
return inode_state_read_once(inode) & I_DIRTY_DATASYNC;
}
void buffer_init(void);
bool try_to_free_buffers(struct folio *folio);
-int inode_has_buffers(struct inode *inode);
+bool mmb_has_buffers(struct mapping_metadata_bhs *mmb);
void invalidate_inode_buffers(struct inode *inode);
int sync_mapping_buffers(struct address_space *mapping);
void invalidate_bh_lrus(void);