return ERR_PTR(err);
}
+static int irdma_hwdereg_mr(struct ib_mr *ib_mr);
+
+static void irdma_umem_dmabuf_revoke(void *priv)
+{
+ /* priv is guaranteed to be valid any time this callback is invoked
+ * because we do not set the callback until after successful iwmr
+ * allocation and initialization.
+ */
+ struct irdma_mr *iwmr = priv;
+ int err;
+
+ /* Invalidate the key in hardware. This does not actually release the
+ * key for potential reuse - that only occurs when the region is fully
+ * deregistered.
+ *
+ * The irdma_hwdereg_mr call is a no-op if the region is not currently
+ * registered with hardware.
+ */
+ err = irdma_hwdereg_mr(&iwmr->ibmr);
+ if (err) {
+ struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
+
+ ibdev_err(&iwdev->ibdev, "dmabuf mr revoke failed %d", err);
+ if (!iwdev->rf->reset) {
+ iwdev->rf->reset = true;
+ iwdev->rf->gen_ops.request_reset(iwdev->rf);
+ }
+ }
+}
+
static struct ib_mr *irdma_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
u64 len, u64 virt,
int fd, int access,
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
return ERR_PTR(-EINVAL);
- umem_dmabuf = ib_umem_dmabuf_get_pinned(pd->device, start, len, fd, access);
+ umem_dmabuf =
+ ib_umem_dmabuf_get_pinned_revocable_and_lock(pd->device, start,
+ len, fd, access);
if (IS_ERR(umem_dmabuf)) {
ibdev_dbg(&iwdev->ibdev, "Failed to get dmabuf umem[%pe]\n",
umem_dmabuf);
if (err)
goto err_iwmr;
+ ib_umem_dmabuf_set_revoke_locked(umem_dmabuf, irdma_umem_dmabuf_revoke,
+ iwmr);
+ ib_umem_dmabuf_revoke_unlock(umem_dmabuf);
return &iwmr->ibmr;
err_iwmr:
irdma_free_iwmr(iwmr);
err_release:
+ ib_umem_dmabuf_revoke_unlock(umem_dmabuf);
+
+ /* Will result in a call to revoke, but driver callback is not set and
+ * is therefore skipped.
+ */
ib_umem_release(&umem_dmabuf->umem);
return ERR_PTR(err);
struct irdma_device *iwdev = to_iwdev(ib_mr->device);
struct irdma_mr *iwmr = to_iwmr(ib_mr);
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ bool dmabuf_revocable = iwmr->region && iwmr->region->is_dmabuf;
+ struct ib_umem_dmabuf *umem_dmabuf;
int ret;
if (len > iwdev->rf->sc_dev.hw_attrs.max_mr_size)
if (flags & ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS))
return ERR_PTR(-EOPNOTSUPP);
+ if (dmabuf_revocable) {
+ umem_dmabuf = to_ib_umem_dmabuf(iwmr->region);
+
+ ib_umem_dmabuf_revoke_lock(umem_dmabuf);
+
+ /* If the dmabuf has been revoked, it means that the region has
+ * been invalidated in HW. We must not allow it to become valid
+ * again unless the user is requesting a change in translation
+ * which will end up dropping the umem dmabuf and allocating an
+ * entirely new umem anyway.
+ */
+ if (umem_dmabuf->revoked && !(flags & IB_MR_REREG_TRANS)) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+ }
+
ret = irdma_hwdereg_mr(ib_mr);
if (ret)
- return ERR_PTR(ret);
+ goto err_unlock;
if (flags & IB_MR_REREG_ACCESS)
iwmr->access = new_access;
&iwpbl->pble_alloc);
iwpbl->pbl_allocated = false;
}
+
+ if (dmabuf_revocable) {
+ /* Must unlock before release to prevent deadlock */
+ ib_umem_dmabuf_revoke_unlock(umem_dmabuf);
+ dmabuf_revocable = false;
+ }
+
if (iwmr->region) {
ib_umem_release(iwmr->region);
iwmr->region = NULL;
}
ret = irdma_rereg_mr_trans(iwmr, start, len, virt);
- } else
+ } else {
ret = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
- if (ret)
- return ERR_PTR(ret);
+ }
- return NULL;
+err_unlock:
+ if (dmabuf_revocable)
+ ib_umem_dmabuf_revoke_unlock(umem_dmabuf);
+
+ return ret ? ERR_PTR(ret) : NULL;
}
/**
struct irdma_mr *iwmr = to_iwmr(ib_mr);
struct irdma_device *iwdev = to_iwdev(ib_mr->device);
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
+ bool dmabuf_revocable = iwmr->region && iwmr->region->is_dmabuf;
int ret;
if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
goto done;
}
- ret = irdma_hwdereg_mr(ib_mr);
- if (ret)
- return ret;
+ if (!dmabuf_revocable) {
+ ret = irdma_hwdereg_mr(ib_mr);
+ if (ret)
+ return ret;
- irdma_free_stag(iwdev, iwmr->stag);
+ irdma_free_stag(iwdev, iwmr->stag);
+ }
done:
+ if (iwmr->region)
+ /* For dmabuf MRs, ib_umem_release will trigger a synchronous
+ * call to the revoke callback which will perform the actual HW
+ * invalidation via irdma_hwdereg_mr. We rely on this for its
+ * implicit serialization w.r.t. concurrent revocations. This
+ * must be done before freeing the PBLEs.
+ */
+ ib_umem_release(iwmr->region);
+
if (iwpbl->pbl_allocated)
irdma_free_pble(iwdev->rf->pble_rsrc, &iwpbl->pble_alloc);
- if (iwmr->region)
- ib_umem_release(iwmr->region);
+ if (dmabuf_revocable)
+ irdma_free_stag(iwdev, iwmr->stag);
kfree(iwmr);