}
}
+/*
+ * The only thing keeping the address space around is the i_pages lock
+ * (it's cycled in clear_inode() after removing the entries from i_pages)
+ * After we call xas_unlock_irq(), we cannot touch xas->xa.
+ */
+static void wait_entry_unlocked(struct xa_state *xas, void *entry)
+{
+ struct wait_exceptional_entry_queue ewait;
+ wait_queue_head_t *wq;
+
+ init_wait(&ewait.wait);
+ ewait.wait.func = wake_exceptional_entry_func;
+
+ wq = dax_entry_waitqueue(xas, entry, &ewait.key);
+ /*
+ * Unlike get_unlocked_entry() there is no guarantee that this
+ * path ever successfully retrieves an unlocked entry before an
+ * inode dies. Perform a non-exclusive wait in case this path
+ * never successfully performs its own wake up.
+ */
+ prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
+ xas_unlock_irq(xas);
+ schedule();
+ finish_wait(wq, &ewait.wait);
+}
+
static void put_unlocked_entry(struct xa_state *xas, void *entry)
{
/* If we were the only waiter woken, wake the next one */
* @page: The page whose entry we want to lock
*
* Context: Process context.
- * Return: %true if the entry was locked or does not need to be locked.
+ * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
+ * not be locked.
*/
-bool dax_lock_mapping_entry(struct page *page)
+dax_entry_t dax_lock_page(struct page *page)
{
XA_STATE(xas, NULL, 0);
void *entry;
- bool locked;
/* Ensure page->mapping isn't freed while we look at it */
rcu_read_lock();
for (;;) {
struct address_space *mapping = READ_ONCE(page->mapping);
- locked = false;
- if (!dax_mapping(mapping))
+ entry = NULL;
+ if (!mapping || !dax_mapping(mapping))
break;
/*
* otherwise we would not have a valid pfn_to_page()
* translation.
*/
- locked = true;
+ entry = (void *)~0UL;
if (S_ISCHR(mapping->host->i_mode))
break;
entry = xas_load(&xas);
if (dax_is_locked(entry)) {
rcu_read_unlock();
- entry = get_unlocked_entry(&xas);
- xas_unlock_irq(&xas);
- put_unlocked_entry(&xas, entry);
+ wait_entry_unlocked(&xas, entry);
rcu_read_lock();
continue;
}
break;
}
rcu_read_unlock();
- return locked;
+ return (dax_entry_t)entry;
}
-void dax_unlock_mapping_entry(struct page *page)
+void dax_unlock_page(struct page *page, dax_entry_t cookie)
{
struct address_space *mapping = page->mapping;
XA_STATE(xas, &mapping->i_pages, page->index);
- void *entry;
if (S_ISCHR(mapping->host->i_mode))
return;
- rcu_read_lock();
- entry = xas_load(&xas);
- rcu_read_unlock();
- entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry));
- dax_unlock_entry(&xas, entry);
+ dax_unlock_entry(&xas, (void *)cookie);
}
/*
i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
- unsigned long address, start, end;
+ struct mmu_notifier_range range;
+ unsigned long address;
cond_resched();
address = pgoff_address(index, vma);
/*
- * Note because we provide start/end to follow_pte_pmd it will
+ * Note because we provide range to follow_pte_pmd it will
* call mmu_notifier_invalidate_range_start() on our behalf
* before taking any lock.
*/
- if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
+ if (follow_pte_pmd(vma->vm_mm, address, &range,
+ &ptep, &pmdp, &ptl))
continue;
/*
pte_unmap_unlock(ptep, ptl);
}
- mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
+ mmu_notifier_invalidate_range_end(&range);
}
i_mmap_unlock_read(mapping);
}
static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
struct address_space *mapping, void *entry)
{
- unsigned long pfn;
+ unsigned long pfn, index, count;
long ret = 0;
- size_t size;
/*
* A page got tagged dirty in DAX mapping? Something is seriously
xas_unlock_irq(xas);
/*
- * Even if dax_writeback_mapping_range() was given a wbc->range_start
- * in the middle of a PMD, the 'index' we are given will be aligned to
- * the start index of the PMD, as will the pfn we pull from 'entry'.
+ * If dax_writeback_mapping_range() was given a wbc->range_start
+ * in the middle of a PMD, the 'index' we use needs to be
+ * aligned to the start of the PMD.
* This allows us to flush for PMD_SIZE and not have to worry about
* partial PMD writebacks.
*/
pfn = dax_to_pfn(entry);
- size = PAGE_SIZE << dax_entry_order(entry);
+ count = 1UL << dax_entry_order(entry);
+ index = xas->xa_index & ~(count - 1);
- dax_entry_mkclean(mapping, xas->xa_index, pfn);
- dax_flush(dax_dev, page_address(pfn_to_page(pfn)), size);
+ dax_entry_mkclean(mapping, index, pfn);
+ dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
/*
* After we have flushed the cache, we can clear the dirty tag. There
* cannot be new dirty data in the pfn after the flush has completed as
xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
dax_wake_entry(xas, entry, false);
- trace_dax_writeback_one(mapping->host, xas->xa_index,
- size >> PAGE_SHIFT);
+ trace_dax_writeback_one(mapping->host, index, count);
return ret;
put_unlocked:
{
if (error == 0)
return VM_FAULT_NOPAGE;
- if (error == -ENOMEM)
- return VM_FAULT_OOM;
- return VM_FAULT_SIGBUS;
+ return vmf_error(error);
}
/*