kill_dev_dax(dev_dax);
}
+static void fsdev_clear_ops(void *data)
+{
+ struct dev_dax *dev_dax = data;
+
+ dax_set_ops(dev_dax->dax_dev, NULL);
+}
+
/*
* Page map operations for FS-DAX mode
* Similar to fsdax_pagemap_ops in drivers/nvdimm/pmem.c
if (rc)
return rc;
+ /* Set the dax operations for fs-dax access path */
+ rc = dax_set_ops(dax_dev, &dev_dax_ops);
+ if (rc)
+ return rc;
+
+ rc = devm_add_action_or_reset(dev, fsdev_clear_ops, dev_dax);
+ if (rc)
+ return rc;
+
run_dax(dax_dev);
return devm_add_action_or_reset(dev, fsdev_kill, dev_dax);
}
if (!dax_alive(dax_dev))
return -ENXIO;
+ if (!dax_dev->ops)
+ return -EOPNOTSUPP;
+
if (nr_pages < 0)
return -EINVAL;
if (!dax_alive(dax_dev))
return -ENXIO;
+
+ if (!dax_dev->ops)
+ return -EOPNOTSUPP;
+
/*
* There are no callers that want to zero more than one page as of now.
* Once users are there, this check can be removed after the
size_t dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
void *addr, size_t bytes, struct iov_iter *iter)
{
- if (!dax_dev->ops->recovery_write)
+ if (!dax_dev->ops || !dax_dev->ops->recovery_write)
return 0;
return dax_dev->ops->recovery_write(dax_dev, pgoff, addr, bytes, iter);
}
}
EXPORT_SYMBOL_GPL(set_dax_nomc);
+/**
+ * dax_set_ops - set the dax_operations for a dax_device
+ * @dax_dev: the dax_device to configure
+ * @ops: the operations to set (may be NULL to clear)
+ *
+ * This allows drivers to set the dax_operations after the dax_device
+ * has been allocated. This is needed when the device is created before
+ * the driver that needs specific ops is bound (e.g., fsdev_dax binding
+ * to a dev_dax created by hmem).
+ *
+ * When setting non-NULL ops, fails if ops are already set (returns -EBUSY).
+ * When clearing ops (NULL), always succeeds.
+ *
+ * Return: 0 on success, -EBUSY if ops already set
+ */
+int dax_set_ops(struct dax_device *dax_dev, const struct dax_operations *ops)
+{
+ if (ops) {
+ /* Setting ops: fail if already set */
+ if (cmpxchg(&dax_dev->ops, NULL, ops) != NULL)
+ return -EBUSY;
+ } else {
+ /* Clearing ops: always allowed */
+ dax_dev->ops = NULL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dax_set_ops);
+
bool dax_alive(struct dax_device *dax_dev)
{
lockdep_assert_held(&dax_srcu);
bool dax_alive(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev);
+int dax_set_ops(struct dax_device *dax_dev, const struct dax_operations *ops);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
enum dax_access_mode mode, void **kaddr, unsigned long *pfn);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,