]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
Merge branch 'drm-next-5.1' of git://people.freedesktop.org/~agd5f/linux into drm...
[thirdparty/kernel/stable.git] / drivers / gpu / drm / msm / disp / mdp4 / mdp4_crtc.c
CommitLineData
c8afe684
RC
1/*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
78f27b1c 18#include <drm/drm_crtc.h>
78f27b1c 19#include <drm/drm_flip_work.h>
c8afe684 20#include <drm/drm_mode.h>
fcd70cd3 21#include <drm/drm_probe_helper.h>
78f27b1c
MY
22
23#include "mdp4_kms.h"
c8afe684
RC
24
25struct mdp4_crtc {
26 struct drm_crtc base;
27 char name[8];
c8afe684
RC
28 int id;
29 int ovlp;
30 enum mdp4_dma dma;
31 bool enabled;
32
33 /* which mixer/encoder we route output to: */
34 int mixer;
35
36 struct {
37 spinlock_t lock;
38 bool stale;
39 uint32_t width, height;
aa1b0e59 40 uint32_t x, y;
c8afe684
RC
41
42 /* next cursor to scan-out: */
43 uint32_t next_iova;
44 struct drm_gem_object *next_bo;
45
46 /* current cursor being scanned out: */
47 struct drm_gem_object *scanout_bo;
48 } cursor;
49
50
51 /* if there is a pending flip, these will be non-null: */
52 struct drm_pending_vblank_event *event;
c8afe684 53
0a5c9aad
HL
54 /* Bits have been flushed at the last commit,
55 * used to decide if a vsync has happened since last commit.
56 */
57 u32 flushed_mask;
58
2a2b8fa6
RC
59#define PENDING_CURSOR 0x1
60#define PENDING_FLIP 0x2
61 atomic_t pending;
62
c8afe684
RC
63 /* for unref'ing cursor bo's after scanout completes: */
64 struct drm_flip_work unref_cursor_work;
65
9e0efa63
RC
66 struct mdp_irq vblank;
67 struct mdp_irq err;
c8afe684
RC
68};
69#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
70
71static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
72{
73 struct msm_drm_private *priv = crtc->dev->dev_private;
9e0efa63 74 return to_mdp4_kms(to_mdp_kms(priv->kms));
c8afe684
RC
75}
76
b69720c0 77static void request_pending(struct drm_crtc *crtc, uint32_t pending)
c8afe684
RC
78{
79 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
c8afe684 80
b69720c0
RC
81 atomic_or(pending, &mdp4_crtc->pending);
82 mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
83}
84
85static void crtc_flush(struct drm_crtc *crtc)
86{
87 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
88 struct mdp4_kms *mdp4_kms = get_kms(crtc);
bb6c018d
RC
89 struct drm_plane *plane;
90 uint32_t flush = 0;
b69720c0 91
93b02beb 92 drm_atomic_crtc_for_each_plane(plane, crtc) {
bb6c018d
RC
93 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
94 flush |= pipe2flush(pipe_id);
b69720c0 95 }
bb6c018d 96
b69720c0
RC
97 flush |= ovlp2flush(mdp4_crtc->ovlp);
98
99 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
100
0a5c9aad
HL
101 mdp4_crtc->flushed_mask = flush;
102
b69720c0
RC
103 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
104}
105
2a2b8fa6
RC
106/* if file!=NULL, this is preclose potential cancel-flip path */
107static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
c8afe684
RC
108{
109 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
110 struct drm_device *dev = crtc->dev;
111 struct drm_pending_vblank_event *event;
112 unsigned long flags;
113
114 spin_lock_irqsave(&dev->event_lock, flags);
115 event = mdp4_crtc->event;
116 if (event) {
02efb359
SV
117 mdp4_crtc->event = NULL;
118 DBG("%s: send event: %p", mdp4_crtc->name, event);
119 drm_crtc_send_vblank_event(crtc, event);
c8afe684
RC
120 }
121 spin_unlock_irqrestore(&dev->event_lock, flags);
122}
123
c8afe684
RC
124static void unref_cursor_worker(struct drm_flip_work *work, void *val)
125{
126 struct mdp4_crtc *mdp4_crtc =
127 container_of(work, struct mdp4_crtc, unref_cursor_work);
128 struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
f59f62d5 129 struct msm_kms *kms = &mdp4_kms->base.base;
c8afe684 130
7ad0e8cf 131 msm_gem_unpin_iova(val, kms->aspace);
dc9a9b32 132 drm_gem_object_put_unlocked(val);
c8afe684
RC
133}
134
135static void mdp4_crtc_destroy(struct drm_crtc *crtc)
136{
137 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
c8afe684
RC
138
139 drm_crtc_cleanup(crtc);
c8afe684
RC
140 drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
141
142 kfree(mdp4_crtc);
143}
144
4dd14fe6
RC
145/* statically (for now) map planes to mixer stage (z-order): */
146static const int idxs[] = {
147 [VG1] = 1,
148 [VG2] = 2,
149 [RGB1] = 0,
150 [RGB2] = 0,
151 [RGB3] = 0,
152 [VG3] = 3,
153 [VG4] = 4,
154
155};
156
157/* setup mixer config, for which we need to consider all crtc's and
158 * the planes attached to them
159 *
160 * TODO may possibly need some extra locking here
161 */
162static void setup_mixer(struct mdp4_kms *mdp4_kms)
c8afe684 163{
4dd14fe6
RC
164 struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
165 struct drm_crtc *crtc;
c8afe684 166 uint32_t mixer_cfg = 0;
facb4f4e 167 static const enum mdp_mixer_stage_id stages[] = {
a8623918
RC
168 STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
169 };
a8623918 170
4dd14fe6
RC
171 list_for_each_entry(crtc, &config->crtc_list, head) {
172 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
173 struct drm_plane *plane;
c8afe684 174
93b02beb 175 drm_atomic_crtc_for_each_plane(plane, crtc) {
4dd14fe6
RC
176 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
177 int idx = idxs[pipe_id];
178 mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
179 pipe_id, stages[idx]);
180 }
181 }
182
183 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
184}
185
186static void blend_setup(struct drm_crtc *crtc)
187{
188 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
189 struct mdp4_kms *mdp4_kms = get_kms(crtc);
190 struct drm_plane *plane;
191 int i, ovlp = mdp4_crtc->ovlp;
192 bool alpha[4]= { false, false, false, false };
d65bd0e4 193
c8afe684
RC
194 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
195 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
196 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
197 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
198
93b02beb 199 drm_atomic_crtc_for_each_plane(plane, crtc) {
bb6c018d
RC
200 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
201 int idx = idxs[pipe_id];
202 if (idx > 0) {
203 const struct mdp_format *format =
42eb2f72 204 to_mdp_format(msm_framebuffer_format(plane->state->fb));
bb6c018d 205 alpha[idx-1] = format->alpha_enable;
a8623918
RC
206 }
207 }
208
c8afe684 209 for (i = 0; i < 4; i++) {
a8623918
RC
210 uint32_t op;
211
212 if (alpha[i]) {
213 op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
214 MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
215 MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
216 } else {
217 op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
218 MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
219 }
220
221 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
222 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
223 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
224 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
c8afe684
RC
225 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
226 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
227 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
228 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
229 }
230
4dd14fe6 231 setup_mixer(mdp4_kms);
c8afe684
RC
232}
233
e27c54ff 234static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
c8afe684
RC
235{
236 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
237 struct mdp4_kms *mdp4_kms = get_kms(crtc);
238 enum mdp4_dma dma = mdp4_crtc->dma;
e27c54ff
RC
239 int ovlp = mdp4_crtc->ovlp;
240 struct drm_display_mode *mode;
241
242 if (WARN_ON(!crtc->state))
243 return;
c8afe684 244
e27c54ff 245 mode = &crtc->state->adjusted_mode;
c8afe684 246
7510a9c6
SM
247 DBG("%s: set mode: " DRM_MODE_FMT,
248 mdp4_crtc->name, DRM_MODE_ARG(mode));
c8afe684
RC
249
250 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
251 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
252 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
253
254 /* take data from pipe: */
255 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
88ff1c2f 256 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
c8afe684
RC
257 mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
258 MDP4_DMA_DST_SIZE_WIDTH(0) |
259 MDP4_DMA_DST_SIZE_HEIGHT(0));
260
261 mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
262 mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
263 MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
264 MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
88ff1c2f 265 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
c8afe684
RC
266
267 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
268
c8afe684
RC
269 if (dma == DMA_E) {
270 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
271 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
272 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
273 }
c8afe684
RC
274}
275
64581714
LP
276static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
277 struct drm_crtc_state *old_state)
c8afe684
RC
278{
279 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
0b776d45
RC
280 struct mdp4_kms *mdp4_kms = get_kms(crtc);
281
c8afe684 282 DBG("%s", mdp4_crtc->name);
0b776d45
RC
283
284 if (WARN_ON(!mdp4_crtc->enabled))
285 return;
286
cae923e6
RC
287 /* Disable/save vblank irq handling before power is disabled */
288 drm_crtc_vblank_off(crtc);
289
0b776d45
RC
290 mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
291 mdp4_disable(mdp4_kms);
292
293 mdp4_crtc->enabled = false;
c8afe684
RC
294}
295
0b20a0f8
LP
296static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
297 struct drm_crtc_state *old_state)
c8afe684 298{
0b776d45
RC
299 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
300 struct mdp4_kms *mdp4_kms = get_kms(crtc);
301
302 DBG("%s", mdp4_crtc->name);
303
304 if (WARN_ON(mdp4_crtc->enabled))
305 return;
306
307 mdp4_enable(mdp4_kms);
cae923e6
RC
308
309 /* Restore vblank irq handling after power is enabled */
310 drm_crtc_vblank_on(crtc);
311
0b776d45
RC
312 mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
313
c8afe684 314 crtc_flush(crtc);
0b776d45
RC
315
316 mdp4_crtc->enabled = true;
c8afe684
RC
317}
318
e27c54ff
RC
319static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
320 struct drm_crtc_state *state)
c8afe684 321{
e27c54ff 322 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
e27c54ff 323 DBG("%s: check", mdp4_crtc->name);
e27c54ff 324 // TODO anything else to check?
b69720c0 325 return 0;
c8afe684
RC
326}
327
613d2b27
ML
328static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
329 struct drm_crtc_state *old_crtc_state)
c8afe684 330{
e27c54ff
RC
331 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
332 DBG("%s: begin", mdp4_crtc->name);
c8afe684
RC
333}
334
613d2b27
ML
335static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
336 struct drm_crtc_state *old_crtc_state)
c8afe684
RC
337{
338 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
339 struct drm_device *dev = crtc->dev;
2a2b8fa6 340 unsigned long flags;
c8afe684 341
f86afecf 342 DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
c8afe684 343
e27c54ff 344 WARN_ON(mdp4_crtc->event);
c8afe684 345
2a2b8fa6 346 spin_lock_irqsave(&dev->event_lock, flags);
e27c54ff 347 mdp4_crtc->event = crtc->state->event;
78b32d49 348 crtc->state->event = NULL;
2a2b8fa6
RC
349 spin_unlock_irqrestore(&dev->event_lock, flags);
350
e27c54ff
RC
351 blend_setup(crtc);
352 crtc_flush(crtc);
353 request_pending(crtc, PENDING_FLIP);
c8afe684
RC
354}
355
c8afe684
RC
356#define CURSOR_WIDTH 64
357#define CURSOR_HEIGHT 64
358
359/* called from IRQ to update cursor related registers (if needed). The
360 * cursor registers, other than x/y position, appear not to be double
361 * buffered, and changing them other than from vblank seems to trigger
362 * underflow.
363 */
364static void update_cursor(struct drm_crtc *crtc)
365{
366 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
aa1b0e59 367 struct mdp4_kms *mdp4_kms = get_kms(crtc);
f59f62d5 368 struct msm_kms *kms = &mdp4_kms->base.base;
c8afe684
RC
369 enum mdp4_dma dma = mdp4_crtc->dma;
370 unsigned long flags;
371
372 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
373 if (mdp4_crtc->cursor.stale) {
c8afe684
RC
374 struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
375 struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
78babc16 376 uint64_t iova = mdp4_crtc->cursor.next_iova;
c8afe684
RC
377
378 if (next_bo) {
379 /* take a obj ref + iova ref when we start scanning out: */
dc9a9b32 380 drm_gem_object_get(next_bo);
9fe041f6 381 msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
c8afe684
RC
382
383 /* enable cursor: */
384 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
385 MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
386 MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
387 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
388 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
389 MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
390 MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
391 } else {
392 /* disable cursor: */
7d8d9f67
RC
393 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
394 mdp4_kms->blank_cursor_iova);
c8afe684
RC
395 }
396
397 /* and drop the iova ref + obj rev when done scanning out: */
398 if (prev_bo)
399 drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
400
401 mdp4_crtc->cursor.scanout_bo = next_bo;
402 mdp4_crtc->cursor.stale = false;
403 }
aa1b0e59
RC
404
405 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
406 MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
407 MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
408
c8afe684
RC
409 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
410}
411
412static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
413 struct drm_file *file_priv, uint32_t handle,
414 uint32_t width, uint32_t height)
415{
416 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
417 struct mdp4_kms *mdp4_kms = get_kms(crtc);
f59f62d5 418 struct msm_kms *kms = &mdp4_kms->base.base;
c8afe684
RC
419 struct drm_device *dev = crtc->dev;
420 struct drm_gem_object *cursor_bo, *old_bo;
421 unsigned long flags;
78babc16 422 uint64_t iova;
c8afe684
RC
423 int ret;
424
425 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
6a41da17 426 DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
c8afe684
RC
427 return -EINVAL;
428 }
429
430 if (handle) {
a8ad0bd8 431 cursor_bo = drm_gem_object_lookup(file_priv, handle);
c8afe684
RC
432 if (!cursor_bo)
433 return -ENOENT;
434 } else {
435 cursor_bo = NULL;
436 }
437
438 if (cursor_bo) {
9fe041f6 439 ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
c8afe684
RC
440 if (ret)
441 goto fail;
442 } else {
443 iova = 0;
444 }
445
446 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
447 old_bo = mdp4_crtc->cursor.next_bo;
448 mdp4_crtc->cursor.next_bo = cursor_bo;
449 mdp4_crtc->cursor.next_iova = iova;
450 mdp4_crtc->cursor.width = width;
451 mdp4_crtc->cursor.height = height;
452 mdp4_crtc->cursor.stale = true;
453 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
454
455 if (old_bo) {
456 /* drop our previous reference: */
7d8d9f67 457 drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
c8afe684
RC
458 }
459
2a2b8fa6
RC
460 request_pending(crtc, PENDING_CURSOR);
461
c8afe684
RC
462 return 0;
463
464fail:
dc9a9b32 465 drm_gem_object_put_unlocked(cursor_bo);
c8afe684
RC
466 return ret;
467}
468
469static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
470{
471 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
aa1b0e59 472 unsigned long flags;
c8afe684 473
aa1b0e59
RC
474 spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
475 mdp4_crtc->cursor.x = x;
476 mdp4_crtc->cursor.y = y;
477 spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
478
479 crtc_flush(crtc);
480 request_pending(crtc, PENDING_CURSOR);
c8afe684
RC
481
482 return 0;
483}
484
485static const struct drm_crtc_funcs mdp4_crtc_funcs = {
e27c54ff 486 .set_config = drm_atomic_helper_set_config,
c8afe684 487 .destroy = mdp4_crtc_destroy,
e27c54ff 488 .page_flip = drm_atomic_helper_page_flip,
c8afe684
RC
489 .cursor_set = mdp4_crtc_cursor_set,
490 .cursor_move = mdp4_crtc_cursor_move,
e27c54ff
RC
491 .reset = drm_atomic_helper_crtc_reset,
492 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
493 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
c8afe684
RC
494};
495
496static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
e27c54ff 497 .mode_set_nofb = mdp4_crtc_mode_set_nofb,
e27c54ff
RC
498 .atomic_check = mdp4_crtc_atomic_check,
499 .atomic_begin = mdp4_crtc_atomic_begin,
500 .atomic_flush = mdp4_crtc_atomic_flush,
0b20a0f8 501 .atomic_enable = mdp4_crtc_atomic_enable,
64581714 502 .atomic_disable = mdp4_crtc_atomic_disable,
c8afe684
RC
503};
504
9e0efa63 505static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
c8afe684
RC
506{
507 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
508 struct drm_crtc *crtc = &mdp4_crtc->base;
509 struct msm_drm_private *priv = crtc->dev->dev_private;
2a2b8fa6 510 unsigned pending;
c8afe684 511
9e0efa63 512 mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
c8afe684 513
2a2b8fa6
RC
514 pending = atomic_xchg(&mdp4_crtc->pending, 0);
515
516 if (pending & PENDING_FLIP) {
517 complete_flip(crtc, NULL);
2a2b8fa6
RC
518 }
519
520 if (pending & PENDING_CURSOR) {
521 update_cursor(crtc);
522 drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
523 }
c8afe684
RC
524}
525
9e0efa63 526static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
c8afe684
RC
527{
528 struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
529 struct drm_crtc *crtc = &mdp4_crtc->base;
530 DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
531 crtc_flush(crtc);
532}
533
0a5c9aad
HL
534static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc)
535{
536 struct drm_device *dev = crtc->dev;
537 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
538 struct mdp4_kms *mdp4_kms = get_kms(crtc);
539 int ret;
540
541 ret = drm_crtc_vblank_get(crtc);
542 if (ret)
543 return;
544
545 ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
546 !(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) &
547 mdp4_crtc->flushed_mask),
548 msecs_to_jiffies(50));
549 if (ret <= 0)
550 dev_warn(dev->dev, "vblank time out, crtc=%d\n", mdp4_crtc->id);
551
552 mdp4_crtc->flushed_mask = 0;
553
554 drm_crtc_vblank_put(crtc);
555}
556
c8afe684
RC
557uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
558{
559 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
560 return mdp4_crtc->vblank.irqmask;
561}
562
c8afe684
RC
563/* set dma config, ie. the format the encoder wants. */
564void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
565{
566 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
567 struct mdp4_kms *mdp4_kms = get_kms(crtc);
568
569 mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
570}
571
572/* set interface for routing crtc->encoder: */
d65bd0e4 573void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
c8afe684
RC
574{
575 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
576 struct mdp4_kms *mdp4_kms = get_kms(crtc);
577 uint32_t intf_sel;
578
579 intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
580
581 switch (mdp4_crtc->dma) {
582 case DMA_P:
583 intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
584 intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
585 break;
586 case DMA_S:
587 intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
588 intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
589 break;
590 case DMA_E:
591 intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
592 intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
593 break;
594 }
595
596 if (intf == INTF_DSI_VIDEO) {
597 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
598 intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
c8afe684
RC
599 } else if (intf == INTF_DSI_CMD) {
600 intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
601 intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
c8afe684
RC
602 }
603
d65bd0e4
RC
604 mdp4_crtc->mixer = mixer;
605
c8afe684
RC
606 blend_setup(crtc);
607
608 DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
609
610 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
611}
612
0a5c9aad
HL
613void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc)
614{
615 /* wait_for_flush_done is the only case for now.
616 * Later we will have command mode CRTC to wait for
617 * other event.
618 */
619 mdp4_crtc_wait_for_flush_done(crtc);
620}
621
c8afe684
RC
622static const char *dma_names[] = {
623 "DMA_P", "DMA_S", "DMA_E",
624};
625
626/* initialize crtc */
627struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
628 struct drm_plane *plane, int id, int ovlp_id,
629 enum mdp4_dma dma_id)
630{
631 struct drm_crtc *crtc = NULL;
632 struct mdp4_crtc *mdp4_crtc;
c8afe684
RC
633
634 mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
d7f8db53
BB
635 if (!mdp4_crtc)
636 return ERR_PTR(-ENOMEM);
c8afe684
RC
637
638 crtc = &mdp4_crtc->base;
639
b69720c0 640 mdp4_crtc->id = id;
c8afe684
RC
641
642 mdp4_crtc->ovlp = ovlp_id;
643 mdp4_crtc->dma = dma_id;
644
645 mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
646 mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
647
648 mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
649 mdp4_crtc->err.irq = mdp4_crtc_err_irq;
650
651 snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
652 dma_names[dma_id], ovlp_id);
653
654 spin_lock_init(&mdp4_crtc->cursor.lock);
655
d7f8db53 656 drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
c8afe684
RC
657 "unref cursor", unref_cursor_worker);
658
f9882876
VS
659 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs,
660 NULL);
c8afe684
RC
661 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
662
c8afe684 663 return crtc;
c8afe684 664}