#include <drm/drm_exec.h>
#include "amdgpu.h"
-#define work_to_evf_mgr(w, name) container_of(w, struct amdgpu_eviction_fence_mgr, name)
-#define evf_mgr_to_fpriv(e) container_of(e, struct amdgpu_fpriv, evf_mgr)
-
static const char *
amdgpu_eviction_fence_get_driver_name(struct dma_fence *fence)
{
return ef->timeline_name;
}
-int
-amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
- struct drm_exec *exec)
+static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
{
- struct amdgpu_eviction_fence *old_ef, *new_ef;
- struct drm_gem_object *obj;
- unsigned long index;
- int ret;
-
- if (evf_mgr->ev_fence &&
- !dma_fence_is_signaled(&evf_mgr->ev_fence->base))
- return 0;
- /*
- * Steps to replace eviction fence:
- * * lock all objects in exec (caller)
- * * create a new eviction fence
- * * update new eviction fence in evf_mgr
- * * attach the new eviction fence to BOs
- * * release the old fence
- * * unlock the objects (caller)
- */
- new_ef = amdgpu_eviction_fence_create(evf_mgr);
- if (!new_ef) {
- DRM_ERROR("Failed to create new eviction fence\n");
- return -ENOMEM;
- }
-
- /* Update the eviction fence now */
- spin_lock(&evf_mgr->ev_fence_lock);
- old_ef = evf_mgr->ev_fence;
- evf_mgr->ev_fence = new_ef;
- spin_unlock(&evf_mgr->ev_fence_lock);
+ struct amdgpu_eviction_fence *ev_fence = to_ev_fence(f);
- /* Attach the new fence */
- drm_exec_for_each_locked_object(exec, index, obj) {
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-
- if (!bo)
- continue;
- ret = amdgpu_eviction_fence_attach(evf_mgr, bo);
- if (ret) {
- DRM_ERROR("Failed to attch new eviction fence\n");
- goto free_err;
- }
- }
-
- /* Free old fence */
- if (old_ef)
- dma_fence_put(&old_ef->base);
- return 0;
-
-free_err:
- kfree(new_ef);
- return ret;
+ schedule_work(&ev_fence->evf_mgr->suspend_work);
+ return true;
}
+static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
+ .get_driver_name = amdgpu_eviction_fence_get_driver_name,
+ .get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
+ .enable_signaling = amdgpu_eviction_fence_enable_signaling,
+};
+
static void
amdgpu_eviction_fence_suspend_worker(struct work_struct *work)
{
- struct amdgpu_eviction_fence_mgr *evf_mgr = work_to_evf_mgr(work, suspend_work.work);
- struct amdgpu_fpriv *fpriv = evf_mgr_to_fpriv(evf_mgr);
+ struct amdgpu_eviction_fence_mgr *evf_mgr =
+ container_of(work, struct amdgpu_eviction_fence_mgr,
+ suspend_work);
+ struct amdgpu_fpriv *fpriv =
+ container_of(evf_mgr, struct amdgpu_fpriv, evf_mgr);
struct amdgpu_userq_mgr *uq_mgr = &fpriv->userq_mgr;
- struct amdgpu_eviction_fence *ev_fence;
+ struct dma_fence *ev_fence;
mutex_lock(&uq_mgr->userq_mutex);
- spin_lock(&evf_mgr->ev_fence_lock);
- ev_fence = evf_mgr->ev_fence;
- if (ev_fence)
- dma_fence_get(&ev_fence->base);
- else
- goto unlock;
- spin_unlock(&evf_mgr->ev_fence_lock);
-
- amdgpu_userq_evict(uq_mgr, ev_fence);
+ ev_fence = amdgpu_evf_mgr_get_fence(evf_mgr);
+ amdgpu_userq_evict(uq_mgr, !evf_mgr->shutdown);
- mutex_unlock(&uq_mgr->userq_mutex);
- dma_fence_put(&ev_fence->base);
- return;
-
-unlock:
- spin_unlock(&evf_mgr->ev_fence_lock);
+ /*
+ * Signaling the eviction fence must be done while holding the
+ * userq_mutex. Otherwise we won't resume the queues before issuing the
+ * next fence.
+ */
+ dma_fence_signal(ev_fence);
+ dma_fence_put(ev_fence);
mutex_unlock(&uq_mgr->userq_mutex);
}
-static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
+void amdgpu_evf_mgr_attach_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
{
- struct amdgpu_eviction_fence_mgr *evf_mgr;
- struct amdgpu_eviction_fence *ev_fence;
-
- if (!f)
- return true;
-
- ev_fence = to_ev_fence(f);
- evf_mgr = ev_fence->evf_mgr;
-
- schedule_delayed_work(&evf_mgr->suspend_work, 0);
- return true;
-}
-
-static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
- .get_driver_name = amdgpu_eviction_fence_get_driver_name,
- .get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
- .enable_signaling = amdgpu_eviction_fence_enable_signaling,
-};
+ struct dma_fence *ev_fence = amdgpu_evf_mgr_get_fence(evf_mgr);
+ struct dma_resv *resv = bo->tbo.base.resv;
-void amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
- struct amdgpu_eviction_fence *ev_fence)
-{
- spin_lock(&evf_mgr->ev_fence_lock);
- dma_fence_signal(&ev_fence->base);
- spin_unlock(&evf_mgr->ev_fence_lock);
+ dma_resv_add_fence(resv, ev_fence, DMA_RESV_USAGE_BOOKKEEP);
+ dma_fence_put(ev_fence);
}
-struct amdgpu_eviction_fence *
-amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr)
+int amdgpu_evf_mgr_rearm(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec)
{
struct amdgpu_eviction_fence *ev_fence;
+ struct drm_gem_object *obj;
+ unsigned long index;
+ /* Create and initialize a new eviction fence */
ev_fence = kzalloc_obj(*ev_fence);
if (!ev_fence)
- return NULL;
+ return -ENOMEM;
ev_fence->evf_mgr = evf_mgr;
get_task_comm(ev_fence->timeline_name, current);
dma_fence_init64(&ev_fence->base, &amdgpu_eviction_fence_ops,
&ev_fence->lock, evf_mgr->ev_fence_ctx,
atomic_inc_return(&evf_mgr->ev_fence_seq));
- return ev_fence;
-}
-
-void amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr)
-{
- struct amdgpu_eviction_fence *ev_fence;
-
- /* Wait for any pending work to execute */
- flush_delayed_work(&evf_mgr->suspend_work);
-
- spin_lock(&evf_mgr->ev_fence_lock);
- ev_fence = evf_mgr->ev_fence;
- spin_unlock(&evf_mgr->ev_fence_lock);
-
- if (!ev_fence)
- return;
- dma_fence_wait(&ev_fence->base, false);
+ /* Remember it for newly added BOs */
+ dma_fence_put(evf_mgr->ev_fence);
+ evf_mgr->ev_fence = &ev_fence->base;
- /* Last unref of ev_fence */
- dma_fence_put(&ev_fence->base);
-}
-
-int amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
- struct amdgpu_bo *bo)
-{
- struct amdgpu_eviction_fence *ev_fence;
- struct dma_resv *resv = bo->tbo.base.resv;
- int ret;
-
- if (!resv)
- return 0;
+ /* And add it to all existing BOs */
+ drm_exec_for_each_locked_object(exec, index, obj) {
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- ret = dma_resv_reserve_fences(resv, 1);
- if (ret) {
- DRM_DEBUG_DRIVER("Failed to resv fence space\n");
- return ret;
+ amdgpu_evf_mgr_attach_fence(evf_mgr, bo);
}
-
- spin_lock(&evf_mgr->ev_fence_lock);
- ev_fence = evf_mgr->ev_fence;
- if (ev_fence)
- dma_resv_add_fence(resv, &ev_fence->base, DMA_RESV_USAGE_BOOKKEEP);
- spin_unlock(&evf_mgr->ev_fence_lock);
-
return 0;
}
-void amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
- struct amdgpu_bo *bo)
+void amdgpu_evf_mgr_detach_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo)
{
struct dma_fence *stub = dma_fence_get_stub();
dma_fence_put(stub);
}
-int amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr)
+void amdgpu_evf_mgr_init(struct amdgpu_eviction_fence_mgr *evf_mgr)
{
- /* This needs to be done one time per open */
atomic_set(&evf_mgr->ev_fence_seq, 0);
evf_mgr->ev_fence_ctx = dma_fence_context_alloc(1);
- spin_lock_init(&evf_mgr->ev_fence_lock);
+ evf_mgr->ev_fence = dma_fence_get_stub();
- INIT_DELAYED_WORK(&evf_mgr->suspend_work, amdgpu_eviction_fence_suspend_worker);
- return 0;
+ INIT_WORK(&evf_mgr->suspend_work, amdgpu_eviction_fence_suspend_worker);
+}
+
+void amdgpu_evf_mgr_shutdown(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ evf_mgr->shutdown = true;
+ flush_work(&evf_mgr->suspend_work);
+}
+
+void amdgpu_evf_mgr_fini(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ dma_fence_wait(rcu_dereference_protected(evf_mgr->ev_fence, true),
+ false);
+ flush_work(&evf_mgr->suspend_work);
+ dma_fence_put(evf_mgr->ev_fence);
}
#ifndef AMDGPU_EV_FENCE_H_
#define AMDGPU_EV_FENCE_H_
+#include <linux/dma-fence.h>
+
struct amdgpu_eviction_fence {
struct dma_fence base;
spinlock_t lock;
struct amdgpu_eviction_fence_mgr {
u64 ev_fence_ctx;
atomic_t ev_fence_seq;
- spinlock_t ev_fence_lock;
- struct amdgpu_eviction_fence *ev_fence;
- struct delayed_work suspend_work;
- uint8_t fd_closing;
-};
-
-/* Eviction fence helper functions */
-struct amdgpu_eviction_fence *
-amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr);
-void
-amdgpu_eviction_fence_destroy(struct amdgpu_eviction_fence_mgr *evf_mgr);
-
-int
-amdgpu_eviction_fence_attach(struct amdgpu_eviction_fence_mgr *evf_mgr,
- struct amdgpu_bo *bo);
+ /*
+ * Only updated while holding the VM resv lock.
+ * Only signaled while holding the userq mutex.
+ */
+ struct dma_fence __rcu *ev_fence;
+ struct work_struct suspend_work;
+ bool shutdown;
+};
-void
-amdgpu_eviction_fence_detach(struct amdgpu_eviction_fence_mgr *evf_mgr,
- struct amdgpu_bo *bo);
+static inline struct dma_fence *
+amdgpu_evf_mgr_get_fence(struct amdgpu_eviction_fence_mgr *evf_mgr)
+{
+ struct dma_fence *ev_fence;
-int
-amdgpu_eviction_fence_init(struct amdgpu_eviction_fence_mgr *evf_mgr);
+ rcu_read_lock();
+ ev_fence = dma_fence_get_rcu_safe(&evf_mgr->ev_fence);
+ rcu_read_unlock();
+ return ev_fence;
+}
-void
-amdgpu_eviction_fence_signal(struct amdgpu_eviction_fence_mgr *evf_mgr,
- struct amdgpu_eviction_fence *ev_fence);
+void amdgpu_evf_mgr_attach_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+int amdgpu_evf_mgr_rearm(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct drm_exec *exec);
+void amdgpu_evf_mgr_detach_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
+ struct amdgpu_bo *bo);
+void amdgpu_evf_mgr_init(struct amdgpu_eviction_fence_mgr *evf_mgr);
+void amdgpu_evf_mgr_shutdown(struct amdgpu_eviction_fence_mgr *evf_mgr);
+void amdgpu_evf_mgr_fini(struct amdgpu_eviction_fence_mgr *evf_mgr);
-int
-amdgpu_eviction_fence_replace_fence(struct amdgpu_eviction_fence_mgr *evf_mgr,
- struct drm_exec *exec);
#endif
amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *uq_mgr,
struct amdgpu_eviction_fence_mgr *evf_mgr)
{
- struct amdgpu_eviction_fence *ev_fence;
+ struct dma_fence *ev_fence;
retry:
/* Flush any pending resume work to create ev_fence */
flush_delayed_work(&uq_mgr->resume_work);
mutex_lock(&uq_mgr->userq_mutex);
- spin_lock(&evf_mgr->ev_fence_lock);
- ev_fence = evf_mgr->ev_fence;
- spin_unlock(&evf_mgr->ev_fence_lock);
- if (!ev_fence || dma_fence_is_signaled(&ev_fence->base)) {
+ ev_fence = amdgpu_evf_mgr_get_fence(evf_mgr);
+ if (dma_fence_is_signaled(ev_fence)) {
+ dma_fence_put(ev_fence);
mutex_unlock(&uq_mgr->userq_mutex);
/*
* Looks like there was no pending resume work,
schedule_delayed_work(&uq_mgr->resume_work, 0);
goto retry;
}
+ dma_fence_put(ev_fence);
}
int amdgpu_userq_create_object(struct amdgpu_userq_mgr *uq_mgr,
dma_fence_wait(bo_va->last_pt_update, false);
dma_fence_wait(vm->last_update, false);
- ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec);
+ ret = amdgpu_evf_mgr_rearm(&fpriv->evf_mgr, &exec);
if (ret)
drm_file_err(uq_mgr->file, "Failed to replace eviction fence\n");
{
struct amdgpu_userq_mgr *uq_mgr = work_to_uq_mgr(work, resume_work.work);
struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
+ struct dma_fence *ev_fence;
int ret;
- flush_delayed_work(&fpriv->evf_mgr.suspend_work);
-
mutex_lock(&uq_mgr->userq_mutex);
+ ev_fence = amdgpu_evf_mgr_get_fence(&fpriv->evf_mgr);
+ if (!dma_fence_is_signaled(ev_fence))
+ goto unlock;
ret = amdgpu_userq_vm_validate(uq_mgr);
if (ret) {
unlock:
mutex_unlock(&uq_mgr->userq_mutex);
+ dma_fence_put(ev_fence);
}
static int
}
void
-amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr,
- struct amdgpu_eviction_fence *ev_fence)
+amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, bool schedule_resume)
{
- struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr);
- struct amdgpu_eviction_fence_mgr *evf_mgr = &fpriv->evf_mgr;
struct amdgpu_device *adev = uq_mgr->adev;
int ret;
if (ret)
dev_err(adev->dev, "Failed to evict userqueue\n");
- /* Signal current eviction fence */
- amdgpu_eviction_fence_signal(evf_mgr, ev_fence);
-
- if (!evf_mgr->fd_closing)
+ if (schedule_resume)
schedule_delayed_work(&uq_mgr->resume_work, 0);
}