* will not race with any other ebs.
*/
if (folio->mapping)
- lockdep_assert_held(&folio->mapping->private_lock);
+ lockdep_assert_held(&folio->mapping->i_private_lock);
if (fs_info->nodesize >= PAGE_SIZE) {
if (!folio_test_private(folio))
* Take private lock to ensure the subpage won't be detached
* in the meantime.
*/
- spin_lock(&page->mapping->private_lock);
+ spin_lock(&page->mapping->i_private_lock);
if (!folio_test_private(folio)) {
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
break;
}
spin_lock_irqsave(&subpage->lock, flags);
if (!test_bit(bit_start + fs_info->subpage_info->dirty_offset,
subpage->bitmaps)) {
spin_unlock_irqrestore(&subpage->lock, flags);
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
bit_start++;
continue;
}
*/
eb = find_extent_buffer_nolock(fs_info, start);
spin_unlock_irqrestore(&subpage->lock, flags);
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
/*
* The eb has already reached 0 refs thus find_extent_buffer()
if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE)
return submit_eb_subpage(page, wbc);
- spin_lock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
if (!folio_test_private(folio)) {
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
return 0;
}
* crashing the machine for something we can survive anyway.
*/
if (WARN_ON(!eb)) {
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
return 0;
}
if (eb == ctx->eb) {
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
return 0;
}
ret = atomic_inc_not_zero(&eb->refs);
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
if (!ret)
return 0;
{
struct btrfs_subpage *subpage;
- lockdep_assert_held(&folio->mapping->private_lock);
+ lockdep_assert_held(&folio->mapping->i_private_lock);
if (folio_test_private(folio)) {
subpage = folio_get_private(folio);
/*
* For mapped eb, we're going to change the folio private, which should
- * be done under the private_lock.
+ * be done under the i_private_lock.
*/
if (mapped)
- spin_lock(&folio->mapping->private_lock);
+ spin_lock(&folio->mapping->i_private_lock);
if (!folio_test_private(folio)) {
if (mapped)
- spin_unlock(&folio->mapping->private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
return;
}
folio_detach_private(folio);
}
if (mapped)
- spin_unlock(&folio->mapping->private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
return;
}
if (!folio_range_has_eb(fs_info, folio))
btrfs_detach_subpage(fs_info, folio);
- spin_unlock(&folio->mapping->private_lock);
+ spin_unlock(&folio->mapping->i_private_lock);
}
/* Release all pages attached to the extent buffer */
/*
* Preallocate folio private for subpage case, so that we won't
- * allocate memory with private_lock nor page lock hold.
+ * allocate memory with i_private_lock nor page lock hold.
*
* The memory will be freed by attach_extent_buffer_page() or freed
* manually if we exit earlier.
* and free the allocated page.
*/
folio = eb->folios[i];
- spin_lock(&mapping->private_lock);
+ spin_lock(&mapping->i_private_lock);
/* Should not fail, as we have preallocated the memory */
ret = attach_extent_buffer_folio(eb, folio, prealloc);
ASSERT(!ret);
* Thus needs no special handling in error path.
*/
btrfs_folio_inc_eb_refs(fs_info, folio);
- spin_unlock(&mapping->private_lock);
+ spin_unlock(&mapping->i_private_lock);
WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
* Finally to check if we have cleared folio private, as if we have
* released all ebs in the page, the folio private should be cleared now.
*/
- spin_lock(&page->mapping->private_lock);
+ spin_lock(&page->mapping->i_private_lock);
if (!folio_test_private(page_folio(page)))
ret = 1;
else
ret = 0;
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
return ret;
}
* We need to make sure nobody is changing folio private, as we rely on
* folio private as the pointer to extent buffer.
*/
- spin_lock(&page->mapping->private_lock);
+ spin_lock(&page->mapping->i_private_lock);
if (!folio_test_private(folio)) {
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
return 1;
}
spin_lock(&eb->refs_lock);
if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
spin_unlock(&eb->refs_lock);
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
return 0;
}
- spin_unlock(&page->mapping->private_lock);
+ spin_unlock(&page->mapping->i_private_lock);
/*
* If tree ref isn't set then we know the ref on this eb is a real ref,