if (umem_dmabuf->revoked)
return;
+
+ if (umem_dmabuf->pinned_revoke)
+ umem_dmabuf->pinned_revoke(umem_dmabuf->private);
+
ib_umem_dmabuf_unmap_pages(umem_dmabuf);
if (umem_dmabuf->pinned) {
dma_buf_unpin(umem_dmabuf->attach);
umem_dmabuf->revoked = 1;
}
+static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_revocable_ops = {
+ .allow_peer2peer = true,
+ .move_notify = ib_umem_dmabuf_revoke_locked,
+};
+
static struct ib_umem_dmabuf *
ib_umem_dmabuf_get_pinned_and_lock(struct ib_device *device,
struct device *dma_device,
}
EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned_with_dma_device);
+/**
+ * ib_umem_dmabuf_get_pinned_revocable_and_lock - Map & pin a revocable dmabuf
+ * @device: IB device.
+ * @offset: Start offset.
+ * @size: Length.
+ * @fd: dmabuf fd.
+ * @access: Access flags.
+ *
+ * Obtains a umem from a dmabuf for drivers/devices that can support revocation.
+ *
+ * Returns with dma_resv_lock held upon success. The driver must set the revoke
+ * callback prior to unlock by calling ib_umem_dmabuf_set_revoke_locked().
+ *
+ * When a revocation occurs, the revoke callback will be called. The driver must
+ * ensure that the region is no longer accessed when the callback returns. Any
+ * subsequent access attempts should also probably cause an AE for MRs.
+ *
+ * If the umem is used for an MR, the driver must ensure that the key remains in
+ * use such that it cannot be obtained by a new region until this region is
+ * fully deregistered (i.e., ibv_dereg_mr). If a driver needs to serialize with
+ * revoke calls, it can use dma_resv_lock.
+ *
+ * If successful, then the revoke callback may be called at any time and will
+ * also be called automatically upon ib_umem_release (serialized). The revoke
+ * callback will be called one time at most.
+ *
+ * Return: A pointer to ib_umem_dmabuf on success, or an ERR_PTR on failure.
+ */
+struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_revocable_and_lock(struct ib_device *device,
+ unsigned long offset, size_t size,
+ int fd, int access)
+{
+ const struct dma_buf_attach_ops *ops =
+ &ib_umem_dmabuf_attach_pinned_revocable_ops;
+
+ return ib_umem_dmabuf_get_pinned_and_lock(device, device->dma_device,
+ offset, size, fd, access,
+ ops);
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned_revocable_and_lock);
+
+void ib_umem_dmabuf_set_revoke_locked(struct ib_umem_dmabuf *umem_dmabuf,
+ void (*revoke)(void *priv), void *priv)
+{
+ dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
+
+ umem_dmabuf->pinned_revoke = revoke;
+ umem_dmabuf->private = priv;
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_set_revoke_locked);
+
struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
unsigned long offset,
size_t size, int fd,
struct scatterlist *last_sg;
unsigned long first_sg_offset;
unsigned long last_sg_trim;
+ void (*pinned_revoke)(void *priv);
void *private;
u8 pinned : 1;
u8 revoked : 1;
size_t size, int fd,
int access);
struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_revocable_and_lock(struct ib_device *device,
+ unsigned long offset, size_t size,
+ int fd, int access);
+void ib_umem_dmabuf_set_revoke_locked(struct ib_umem_dmabuf *umem_dmabuf,
+ void (*revoke)(void *priv), void *priv);
+struct ib_umem_dmabuf *
ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
struct device *dma_device,
unsigned long offset, size_t size,
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct ib_umem_dmabuf *
+ib_umem_dmabuf_get_pinned_revocable_and_lock(struct ib_device *device,
+ unsigned long offset, size_t size,
+ int fd, int access)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void
+ib_umem_dmabuf_set_revoke_locked(struct ib_umem_dmabuf *umem_dmabuf,
+ void (*revoke)(void *priv), void *priv) {}
+
static inline struct ib_umem_dmabuf *
ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
struct device *dma_device,