1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Etnaviv Project
6 #include <linux/moduleparam.h>
8 #include "etnaviv_drv.h"
9 #include "etnaviv_dump.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_gpu.h"
12 #include "etnaviv_sched.h"
13 #include "state.xml.h"
15 static int etnaviv_job_hang_limit
= 0;
16 module_param_named(job_hang_limit
, etnaviv_job_hang_limit
, int , 0444);
17 static int etnaviv_hw_jobs_limit
= 4;
18 module_param_named(hw_job_limit
, etnaviv_hw_jobs_limit
, int , 0444);
20 static struct dma_fence
*etnaviv_sched_run_job(struct drm_sched_job
*sched_job
)
22 struct etnaviv_gem_submit
*submit
= to_etnaviv_submit(sched_job
);
23 struct dma_fence
*fence
= NULL
;
25 if (likely(!sched_job
->s_fence
->finished
.error
))
26 fence
= etnaviv_gpu_submit(submit
);
28 dev_dbg(submit
->gpu
->dev
, "skipping bad job\n");
33 static enum drm_gpu_sched_stat
etnaviv_sched_timedout_job(struct drm_sched_job
36 struct etnaviv_gem_submit
*submit
= to_etnaviv_submit(sched_job
);
37 struct etnaviv_gpu
*gpu
= submit
->gpu
;
42 drm_sched_stop(&gpu
->sched
, sched_job
);
45 * If the GPU managed to complete this jobs fence, the timout is
48 if (dma_fence_is_signaled(submit
->out_fence
))
52 * If the GPU is still making forward progress on the front-end (which
53 * should never loop) we shift out the timeout to give it a chance to
56 dma_addr
= gpu_read(gpu
, VIVS_FE_DMA_ADDRESS
);
57 change
= dma_addr
- gpu
->hangcheck_dma_addr
;
58 if (gpu
->state
== ETNA_GPU_STATE_RUNNING
&&
59 (gpu
->completed_fence
!= gpu
->hangcheck_fence
||
60 change
< 0 || change
> 16)) {
61 gpu
->hangcheck_dma_addr
= dma_addr
;
62 gpu
->hangcheck_fence
= gpu
->completed_fence
;
67 drm_sched_increase_karma(sched_job
);
69 /* get the GPU back into the init state */
70 etnaviv_core_dump(submit
);
71 etnaviv_gpu_recover_hang(submit
);
73 drm_sched_resubmit_jobs(&gpu
->sched
);
75 drm_sched_start(&gpu
->sched
, true);
76 return DRM_GPU_SCHED_STAT_NOMINAL
;
79 /* restart scheduler after GPU is usable again */
80 drm_sched_start(&gpu
->sched
, true);
81 return DRM_GPU_SCHED_STAT_NOMINAL
;
84 static void etnaviv_sched_free_job(struct drm_sched_job
*sched_job
)
86 struct etnaviv_gem_submit
*submit
= to_etnaviv_submit(sched_job
);
88 drm_sched_job_cleanup(sched_job
);
90 etnaviv_submit_put(submit
);
93 static const struct drm_sched_backend_ops etnaviv_sched_ops
= {
94 .run_job
= etnaviv_sched_run_job
,
95 .timedout_job
= etnaviv_sched_timedout_job
,
96 .free_job
= etnaviv_sched_free_job
,
99 int etnaviv_sched_push_job(struct etnaviv_gem_submit
*submit
)
101 struct etnaviv_gpu
*gpu
= submit
->gpu
;
105 * Hold the sched lock across the whole operation to avoid jobs being
106 * pushed out of order with regard to their sched fence seqnos as
107 * allocated in drm_sched_job_arm.
109 mutex_lock(&gpu
->sched_lock
);
111 drm_sched_job_arm(&submit
->sched_job
);
113 submit
->out_fence
= dma_fence_get(&submit
->sched_job
.s_fence
->finished
);
114 ret
= xa_alloc_cyclic(&gpu
->user_fences
, &submit
->out_fence_id
,
115 submit
->out_fence
, xa_limit_32b
,
116 &gpu
->next_user_fence
, GFP_KERNEL
);
118 drm_sched_job_cleanup(&submit
->sched_job
);
122 /* the scheduler holds on to the job now */
123 kref_get(&submit
->refcount
);
125 drm_sched_entity_push_job(&submit
->sched_job
);
128 mutex_unlock(&gpu
->sched_lock
);
133 int etnaviv_sched_init(struct etnaviv_gpu
*gpu
)
137 ret
= drm_sched_init(&gpu
->sched
, &etnaviv_sched_ops
,
138 DRM_SCHED_PRIORITY_COUNT
,
139 etnaviv_hw_jobs_limit
, etnaviv_job_hang_limit
,
140 msecs_to_jiffies(500), NULL
, NULL
,
141 dev_name(gpu
->dev
), gpu
->dev
);
148 void etnaviv_sched_fini(struct etnaviv_gpu
*gpu
)
150 drm_sched_fini(&gpu
->sched
);