#include "amdgpu_vm.h"
#include "amdgpu_userq.h"
#include "amdgpu_hmm.h"
+#include "amdgpu_reset.h"
#include "amdgpu_userq_fence.h"
u32 amdgpu_userq_get_supported_ip_mask(struct amdgpu_device *adev)
struct amdgpu_device *adev = uq_mgr->adev;
const struct amdgpu_userq_funcs *uq_funcs = adev->userq_funcs[queue->queue_type];
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+
/* Drop the userq reference. */
amdgpu_userq_buffer_vas_list_cleanup(adev, queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
amdgpu_userq_fence_driver_free(queue);
- idr_remove(&uq_mgr->userq_idr, queue_id);
+ /* Use interrupt-safe locking since IRQ handlers may access these XArrays */
+ xa_erase_irq(&uq_mgr->userq_mgr_xa, (unsigned long)queue_id);
+ xa_erase_irq(&adev->userq_doorbell_xa, queue->doorbell_index);
+ queue->userq_mgr = NULL;
list_del(&queue->userq_va_list);
kfree(queue);
+
+ up_read(&adev->reset_domain->sem);
}
static struct amdgpu_usermode_queue *
amdgpu_userq_find(struct amdgpu_userq_mgr *uq_mgr, int qid)
{
- return idr_find(&uq_mgr->userq_idr, qid);
+ return xa_load(&uq_mgr->userq_mgr_xa, qid);
}
void
struct amdgpu_db_info db_info;
char *queue_name;
bool skip_map_queue;
+ u32 qid;
uint64_t index;
- int qid, r = 0;
+ int r = 0;
int priority =
(args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >>
AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT;
*
* This will also make sure we have a valid eviction fence ready to be used.
*/
- mutex_lock(&adev->userq_mutex);
amdgpu_userq_ensure_ev_fence(&fpriv->userq_mgr, &fpriv->evf_mgr);
uq_funcs = adev->userq_funcs[args->in.ip_type];
goto unlock;
}
- qid = idr_alloc(&uq_mgr->userq_idr, queue, 1, AMDGPU_MAX_USERQ_COUNT, GFP_KERNEL);
- if (qid < 0) {
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+ r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
+ if (r) {
+ kfree(queue);
+ up_read(&adev->reset_domain->sem);
+ goto unlock;
+ }
+
+ r = xa_alloc(&uq_mgr->userq_mgr_xa, &qid, queue, XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
+ if (r) {
drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
amdgpu_userq_fence_driver_free(queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
kfree(queue);
r = -ENOMEM;
+ up_read(&adev->reset_domain->sem);
goto unlock;
}
+ up_read(&adev->reset_domain->sem);
+ queue->userq_mgr = uq_mgr;
/* don't map the queue if scheduling is halted */
if (adev->userq_halt_for_enforce_isolation &&
r = amdgpu_userq_map_helper(uq_mgr, queue);
if (r) {
drm_file_err(uq_mgr->file, "Failed to map Queue\n");
- idr_remove(&uq_mgr->userq_idr, qid);
+ xa_erase(&uq_mgr->userq_mgr_xa, qid);
amdgpu_userq_fence_driver_free(queue);
uq_funcs->mqd_destroy(uq_mgr, queue);
kfree(queue);
unlock:
mutex_unlock(&uq_mgr->userq_mutex);
- mutex_unlock(&adev->userq_mutex);
return r;
}
amdgpu_userq_restore_all(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
- int queue_id;
+ unsigned long queue_id;
int ret = 0, r;
/* Resume all the queues for this process */
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
if (!amdgpu_userq_buffer_vas_mapped(queue)) {
drm_file_err(uq_mgr->file,
amdgpu_userq_evict_all(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
- int queue_id;
+ unsigned long queue_id;
int ret = 0, r;
/* Try to unmap all the queues in this process ctx */
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
r = amdgpu_userq_preempt_helper(uq_mgr, queue);
if (r)
ret = r;
amdgpu_userq_wait_for_signal(struct amdgpu_userq_mgr *uq_mgr)
{
struct amdgpu_usermode_queue *queue;
- int queue_id, ret;
+ unsigned long queue_id;
+ int ret;
- idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) {
+ xa_for_each(&uq_mgr->userq_mgr_xa, queue_id, queue) {
struct dma_fence *f = queue->last_fence;
if (!f || dma_fence_is_signaled(f))
struct amdgpu_device *adev)
{
mutex_init(&userq_mgr->userq_mutex);
- idr_init_base(&userq_mgr->userq_idr, 1);
+ xa_init_flags(&userq_mgr->userq_mgr_xa, XA_FLAGS_ALLOC);
userq_mgr->adev = adev;
userq_mgr->file = file_priv;
- mutex_lock(&adev->userq_mutex);
- list_add(&userq_mgr->list, &adev->userq_mgr_list);
- mutex_unlock(&adev->userq_mutex);
-
INIT_DELAYED_WORK(&userq_mgr->resume_work, amdgpu_userq_restore_worker);
return 0;
}
void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
{
- struct amdgpu_device *adev = userq_mgr->adev;
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- uint32_t queue_id;
+ unsigned long queue_id;
cancel_delayed_work_sync(&userq_mgr->resume_work);
- mutex_lock(&adev->userq_mutex);
mutex_lock(&userq_mgr->userq_mutex);
- idr_for_each_entry(&userq_mgr->userq_idr, queue, queue_id) {
+ xa_for_each(&userq_mgr->userq_mgr_xa, queue_id, queue) {
amdgpu_userq_wait_for_last_fence(userq_mgr, queue);
amdgpu_userq_unmap_helper(userq_mgr, queue);
amdgpu_userq_cleanup(userq_mgr, queue, queue_id);
}
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
- if (uqm == userq_mgr) {
- list_del(&uqm->list);
- break;
- }
- }
- idr_destroy(&userq_mgr->userq_idr);
+ xa_destroy(&userq_mgr->userq_mgr_xa);
mutex_unlock(&userq_mgr->userq_mutex);
- mutex_unlock(&adev->userq_mutex);
mutex_destroy(&userq_mgr->userq_mutex);
}
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
int r;
if (!ip_mask)
return 0;
- guard(mutex)(&adev->userq_mutex);
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
cancel_delayed_work_sync(&uqm->resume_work);
guard(mutex)(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- if (adev->in_s0ix)
- r = amdgpu_userq_preempt_helper(uqm, queue);
- else
- r = amdgpu_userq_unmap_helper(uqm, queue);
- if (r)
- return r;
- }
+ if (adev->in_s0ix)
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ else
+ r = amdgpu_userq_unmap_helper(uqm, queue);
+ if (r)
+ return r;
}
return 0;
}
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
int r;
if (!ip_mask)
return 0;
- guard(mutex)(&adev->userq_mutex);
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
guard(mutex)(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- if (adev->in_s0ix)
- r = amdgpu_userq_restore_helper(uqm, queue);
- else
- r = amdgpu_userq_map_helper(uqm, queue);
- if (r)
- return r;
- }
+ if (adev->in_s0ix)
+ r = amdgpu_userq_restore_helper(uqm, queue);
+ else
+ r = amdgpu_userq_map_helper(uqm, queue);
+ if (r)
+ return r;
}
return 0;
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
int ret = 0, r;
/* only need to stop gfx/compute */
if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
return 0;
- mutex_lock(&adev->userq_mutex);
if (adev->userq_halt_for_enforce_isolation)
dev_warn(adev->dev, "userq scheduling already stopped!\n");
adev->userq_halt_for_enforce_isolation = true;
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
cancel_delayed_work_sync(&uqm->resume_work);
mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
- if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
- (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
- (queue->xcp_id == idx)) {
- r = amdgpu_userq_preempt_helper(uqm, queue);
- if (r)
- ret = r;
- }
+ if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
+ (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
+ (queue->xcp_id == idx)) {
+ r = amdgpu_userq_preempt_helper(uqm, queue);
+ if (r)
+ ret = r;
}
mutex_unlock(&uqm->userq_mutex);
}
- mutex_unlock(&adev->userq_mutex);
+
return ret;
}
{
u32 ip_mask = amdgpu_userq_get_supported_ip_mask(adev);
struct amdgpu_usermode_queue *queue;
- struct amdgpu_userq_mgr *uqm, *tmp;
- int queue_id;
+ struct amdgpu_userq_mgr *uqm;
+ unsigned long queue_id;
int ret = 0, r;
/* only need to stop gfx/compute */
if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
return 0;
- mutex_lock(&adev->userq_mutex);
if (!adev->userq_halt_for_enforce_isolation)
dev_warn(adev->dev, "userq scheduling already started!\n");
adev->userq_halt_for_enforce_isolation = false;
- list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
+ xa_for_each(&adev->userq_doorbell_xa, queue_id, queue) {
+ uqm = queue->userq_mgr;
mutex_lock(&uqm->userq_mutex);
- idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
if (((queue->queue_type == AMDGPU_HW_IP_GFX) ||
(queue->queue_type == AMDGPU_HW_IP_COMPUTE)) &&
(queue->xcp_id == idx)) {
if (r)
ret = r;
}
- }
mutex_unlock(&uqm->userq_mutex);
}
- mutex_unlock(&adev->userq_mutex);
+
return ret;
}