*/
struct drm_gpu_scheduler sched;
+ /**
+ * @prealloc_throttle: Used to throttle VM_BIND ops if too much pre-
+ * allocated memory is in flight.
+ *
+ * Because we have to pre-allocate pgtable pages for the worst case
+ * (ie. new mappings do not share any PTEs with existing mappings)
+ * we could end up consuming a lot of resources transiently. The
+ * prealloc_throttle puts an upper bound on that.
+ */
+ struct {
+ /** @wait: Notified when preallocated resources are released */
+ wait_queue_head_t wait;
+
+ /**
+ * @in_flight: The # of preallocated pgtable pages in-flight
+ * for queued VM_BIND jobs.
+ */
+ atomic_t in_flight;
+ } prealloc_throttle;
+
/**
* @mm: Memory management for kernel managed VA allocations
*
vm->mmu->funcs->prealloc_cleanup(vm->mmu, &job->prealloc);
+ atomic_sub(job->prealloc.count, &vm->prealloc_throttle.in_flight);
+
drm_sched_job_cleanup(_job);
job_foreach_bo (obj, job)
kfree(op);
}
+ wake_up(&vm->prealloc_throttle.wait);
+
kfree(job);
}
ret = drm_sched_init(&vm->sched, &args);
if (ret)
goto err_free_dummy;
+
+ init_waitqueue_head(&vm->prealloc_throttle.wait);
}
drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem,
* them as a single mapping. Otherwise the prealloc_count() will not realize
* they can share pagetable pages and vastly overcount.
*/
-static void
+static int
vm_bind_prealloc_count(struct msm_vm_bind_job *job)
{
struct msm_vm_bind_op *first = NULL, *last = NULL;
+ struct msm_gem_vm *vm = to_msm_vm(job->vm);
+ int ret;
for (int i = 0; i < job->nr_ops; i++) {
struct msm_vm_bind_op *op = &job->ops[i];
/* Flush the remaining range: */
prealloc_count(job, first, last);
+
+ /*
+ * Now that we know the needed amount to pre-alloc, throttle on pending
+ * VM_BIND jobs if we already have too much pre-alloc memory in flight
+ */
+ ret = wait_event_interruptible(
+ vm->prealloc_throttle.wait,
+ atomic_read(&vm->prealloc_throttle.in_flight) <= 1024);
+ if (ret)
+ return ret;
+
+ atomic_add(job->prealloc.count, &vm->prealloc_throttle.in_flight);
+
+ return 0;
}
/*
if (ret)
goto out_unlock;
- vm_bind_prealloc_count(job);
+ ret = vm_bind_prealloc_count(job);
+ if (ret)
+ goto out_unlock;
struct drm_exec exec;
unsigned flags = DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT;