1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
7 #include "msm_ringbuffer.h"
10 static uint num_hw_submissions
= 8;
11 MODULE_PARM_DESC(num_hw_submissions
, "The max # of jobs to write into ringbuffer (default 8)");
12 module_param(num_hw_submissions
, uint
, 0600);
14 static struct dma_fence
*msm_job_run(struct drm_sched_job
*job
)
16 struct msm_gem_submit
*submit
= to_msm_submit(job
);
17 struct msm_fence_context
*fctx
= submit
->ring
->fctx
;
18 struct msm_gpu
*gpu
= submit
->gpu
;
19 struct msm_drm_private
*priv
= gpu
->dev
->dev_private
;
22 msm_fence_init(submit
->hw_fence
, fctx
);
24 submit
->seqno
= submit
->hw_fence
->seqno
;
26 mutex_lock(&priv
->lru
.lock
);
28 for (i
= 0; i
< submit
->nr_bos
; i
++) {
29 struct drm_gem_object
*obj
= submit
->bos
[i
].obj
;
31 msm_gem_unpin_active(obj
);
32 submit
->bos
[i
].flags
&= ~BO_PINNED
;
35 mutex_unlock(&priv
->lru
.lock
);
37 msm_gpu_submit(gpu
, submit
);
39 return dma_fence_get(submit
->hw_fence
);
42 static void msm_job_free(struct drm_sched_job
*job
)
44 struct msm_gem_submit
*submit
= to_msm_submit(job
);
46 drm_sched_job_cleanup(job
);
47 msm_gem_submit_put(submit
);
50 static const struct drm_sched_backend_ops msm_sched_ops
= {
51 .run_job
= msm_job_run
,
52 .free_job
= msm_job_free
55 struct msm_ringbuffer
*msm_ringbuffer_new(struct msm_gpu
*gpu
, int id
,
56 void *memptrs
, uint64_t memptrs_iova
)
58 struct msm_ringbuffer
*ring
;
63 /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
64 BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ
));
66 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
75 ring
->start
= msm_gem_kernel_new(gpu
->dev
, MSM_GPU_RINGBUFFER_SZ
,
76 check_apriv(gpu
, MSM_BO_WC
| MSM_BO_GPU_READONLY
),
77 gpu
->aspace
, &ring
->bo
, &ring
->iova
);
79 if (IS_ERR(ring
->start
)) {
80 ret
= PTR_ERR(ring
->start
);
85 msm_gem_object_set_name(ring
->bo
, "ring%d", id
);
87 ring
->end
= ring
->start
+ (MSM_GPU_RINGBUFFER_SZ
>> 2);
88 ring
->next
= ring
->start
;
89 ring
->cur
= ring
->start
;
91 ring
->memptrs
= memptrs
;
92 ring
->memptrs_iova
= memptrs_iova
;
94 /* currently managing hangcheck ourselves: */
95 sched_timeout
= MAX_SCHEDULE_TIMEOUT
;
97 ret
= drm_sched_init(&ring
->sched
, &msm_sched_ops
,
98 DRM_SCHED_PRIORITY_COUNT
,
99 num_hw_submissions
, 0, sched_timeout
,
100 NULL
, NULL
, to_msm_bo(ring
->bo
)->name
, gpu
->dev
->dev
);
105 INIT_LIST_HEAD(&ring
->submits
);
106 spin_lock_init(&ring
->submit_lock
);
107 spin_lock_init(&ring
->preempt_lock
);
109 snprintf(name
, sizeof(name
), "gpu-ring-%d", ring
->id
);
111 ring
->fctx
= msm_fence_context_alloc(gpu
->dev
, &ring
->memptrs
->fence
, name
);
116 msm_ringbuffer_destroy(ring
);
120 void msm_ringbuffer_destroy(struct msm_ringbuffer
*ring
)
122 if (IS_ERR_OR_NULL(ring
))
125 drm_sched_fini(&ring
->sched
);
127 msm_fence_context_free(ring
->fctx
);
129 msm_gem_kernel_put(ring
->bo
, ring
->gpu
->aspace
);