2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <drm/drm_crtc.h>
19 #include <drm/drm_flip_work.h>
20 #include <drm/drm_mode.h>
21 #include <drm/drm_probe_helper.h>
33 /* which mixer/encoder we route output to: */
39 uint32_t width
, height
;
42 /* next cursor to scan-out: */
44 struct drm_gem_object
*next_bo
;
46 /* current cursor being scanned out: */
47 struct drm_gem_object
*scanout_bo
;
51 /* if there is a pending flip, these will be non-null: */
52 struct drm_pending_vblank_event
*event
;
54 /* Bits have been flushed at the last commit,
55 * used to decide if a vsync has happened since last commit.
59 #define PENDING_CURSOR 0x1
60 #define PENDING_FLIP 0x2
63 /* for unref'ing cursor bo's after scanout completes: */
64 struct drm_flip_work unref_cursor_work
;
66 struct mdp_irq vblank
;
69 #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
71 static struct mdp4_kms
*get_kms(struct drm_crtc
*crtc
)
73 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
74 return to_mdp4_kms(to_mdp_kms(priv
->kms
));
77 static void request_pending(struct drm_crtc
*crtc
, uint32_t pending
)
79 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
81 atomic_or(pending
, &mdp4_crtc
->pending
);
82 mdp_irq_register(&get_kms(crtc
)->base
, &mdp4_crtc
->vblank
);
85 static void crtc_flush(struct drm_crtc
*crtc
)
87 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
88 struct mdp4_kms
*mdp4_kms
= get_kms(crtc
);
89 struct drm_plane
*plane
;
92 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
93 enum mdp4_pipe pipe_id
= mdp4_plane_pipe(plane
);
94 flush
|= pipe2flush(pipe_id
);
97 flush
|= ovlp2flush(mdp4_crtc
->ovlp
);
99 DBG("%s: flush=%08x", mdp4_crtc
->name
, flush
);
101 mdp4_crtc
->flushed_mask
= flush
;
103 mdp4_write(mdp4_kms
, REG_MDP4_OVERLAY_FLUSH
, flush
);
106 /* if file!=NULL, this is preclose potential cancel-flip path */
107 static void complete_flip(struct drm_crtc
*crtc
, struct drm_file
*file
)
109 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
110 struct drm_device
*dev
= crtc
->dev
;
111 struct drm_pending_vblank_event
*event
;
114 spin_lock_irqsave(&dev
->event_lock
, flags
);
115 event
= mdp4_crtc
->event
;
117 mdp4_crtc
->event
= NULL
;
118 DBG("%s: send event: %p", mdp4_crtc
->name
, event
);
119 drm_crtc_send_vblank_event(crtc
, event
);
121 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
124 static void unref_cursor_worker(struct drm_flip_work
*work
, void *val
)
126 struct mdp4_crtc
*mdp4_crtc
=
127 container_of(work
, struct mdp4_crtc
, unref_cursor_work
);
128 struct mdp4_kms
*mdp4_kms
= get_kms(&mdp4_crtc
->base
);
129 struct msm_kms
*kms
= &mdp4_kms
->base
.base
;
131 msm_gem_unpin_iova(val
, kms
->aspace
);
132 drm_gem_object_put_unlocked(val
);
135 static void mdp4_crtc_destroy(struct drm_crtc
*crtc
)
137 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
139 drm_crtc_cleanup(crtc
);
140 drm_flip_work_cleanup(&mdp4_crtc
->unref_cursor_work
);
145 /* statically (for now) map planes to mixer stage (z-order): */
146 static const int idxs
[] = {
157 /* setup mixer config, for which we need to consider all crtc's and
158 * the planes attached to them
160 * TODO may possibly need some extra locking here
162 static void setup_mixer(struct mdp4_kms
*mdp4_kms
)
164 struct drm_mode_config
*config
= &mdp4_kms
->dev
->mode_config
;
165 struct drm_crtc
*crtc
;
166 uint32_t mixer_cfg
= 0;
167 static const enum mdp_mixer_stage_id stages
[] = {
168 STAGE_BASE
, STAGE0
, STAGE1
, STAGE2
, STAGE3
,
171 list_for_each_entry(crtc
, &config
->crtc_list
, head
) {
172 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
173 struct drm_plane
*plane
;
175 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
176 enum mdp4_pipe pipe_id
= mdp4_plane_pipe(plane
);
177 int idx
= idxs
[pipe_id
];
178 mixer_cfg
= mixercfg(mixer_cfg
, mdp4_crtc
->mixer
,
179 pipe_id
, stages
[idx
]);
183 mdp4_write(mdp4_kms
, REG_MDP4_LAYERMIXER_IN_CFG
, mixer_cfg
);
186 static void blend_setup(struct drm_crtc
*crtc
)
188 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
189 struct mdp4_kms
*mdp4_kms
= get_kms(crtc
);
190 struct drm_plane
*plane
;
191 int i
, ovlp
= mdp4_crtc
->ovlp
;
192 bool alpha
[4]= { false, false, false, false };
194 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_TRANSP_LOW0(ovlp
), 0);
195 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_TRANSP_LOW1(ovlp
), 0);
196 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp
), 0);
197 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp
), 0);
199 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
200 enum mdp4_pipe pipe_id
= mdp4_plane_pipe(plane
);
201 int idx
= idxs
[pipe_id
];
203 const struct mdp_format
*format
=
204 to_mdp_format(msm_framebuffer_format(plane
->state
->fb
));
205 alpha
[idx
-1] = format
->alpha_enable
;
209 for (i
= 0; i
< 4; i
++) {
213 op
= MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL
) |
214 MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL
) |
215 MDP4_OVLP_STAGE_OP_BG_INV_ALPHA
;
217 op
= MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST
) |
218 MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST
);
221 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp
, i
), 0xff);
222 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp
, i
), 0x00);
223 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_STAGE_OP(ovlp
, i
), op
);
224 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_STAGE_CO3(ovlp
, i
), 1);
225 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp
, i
), 0);
226 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp
, i
), 0);
227 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp
, i
), 0);
228 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp
, i
), 0);
231 setup_mixer(mdp4_kms
);
234 static void mdp4_crtc_mode_set_nofb(struct drm_crtc
*crtc
)
236 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
237 struct mdp4_kms
*mdp4_kms
= get_kms(crtc
);
238 enum mdp4_dma dma
= mdp4_crtc
->dma
;
239 int ovlp
= mdp4_crtc
->ovlp
;
240 struct drm_display_mode
*mode
;
242 if (WARN_ON(!crtc
->state
))
245 mode
= &crtc
->state
->adjusted_mode
;
247 DBG("%s: set mode: " DRM_MODE_FMT
,
248 mdp4_crtc
->name
, DRM_MODE_ARG(mode
));
250 mdp4_write(mdp4_kms
, REG_MDP4_DMA_SRC_SIZE(dma
),
251 MDP4_DMA_SRC_SIZE_WIDTH(mode
->hdisplay
) |
252 MDP4_DMA_SRC_SIZE_HEIGHT(mode
->vdisplay
));
254 /* take data from pipe: */
255 mdp4_write(mdp4_kms
, REG_MDP4_DMA_SRC_BASE(dma
), 0);
256 mdp4_write(mdp4_kms
, REG_MDP4_DMA_SRC_STRIDE(dma
), 0);
257 mdp4_write(mdp4_kms
, REG_MDP4_DMA_DST_SIZE(dma
),
258 MDP4_DMA_DST_SIZE_WIDTH(0) |
259 MDP4_DMA_DST_SIZE_HEIGHT(0));
261 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_BASE(ovlp
), 0);
262 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_SIZE(ovlp
),
263 MDP4_OVLP_SIZE_WIDTH(mode
->hdisplay
) |
264 MDP4_OVLP_SIZE_HEIGHT(mode
->vdisplay
));
265 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_STRIDE(ovlp
), 0);
267 mdp4_write(mdp4_kms
, REG_MDP4_OVLP_CFG(ovlp
), 1);
270 mdp4_write(mdp4_kms
, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
271 mdp4_write(mdp4_kms
, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
272 mdp4_write(mdp4_kms
, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
276 static void mdp4_crtc_atomic_disable(struct drm_crtc
*crtc
,
277 struct drm_crtc_state
*old_state
)
279 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
280 struct mdp4_kms
*mdp4_kms
= get_kms(crtc
);
282 DBG("%s", mdp4_crtc
->name
);
284 if (WARN_ON(!mdp4_crtc
->enabled
))
287 /* Disable/save vblank irq handling before power is disabled */
288 drm_crtc_vblank_off(crtc
);
290 mdp_irq_unregister(&mdp4_kms
->base
, &mdp4_crtc
->err
);
291 mdp4_disable(mdp4_kms
);
293 mdp4_crtc
->enabled
= false;
296 static void mdp4_crtc_atomic_enable(struct drm_crtc
*crtc
,
297 struct drm_crtc_state
*old_state
)
299 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
300 struct mdp4_kms
*mdp4_kms
= get_kms(crtc
);
302 DBG("%s", mdp4_crtc
->name
);
304 if (WARN_ON(mdp4_crtc
->enabled
))
307 mdp4_enable(mdp4_kms
);
309 /* Restore vblank irq handling after power is enabled */
310 drm_crtc_vblank_on(crtc
);
312 mdp_irq_register(&mdp4_kms
->base
, &mdp4_crtc
->err
);
316 mdp4_crtc
->enabled
= true;
319 static int mdp4_crtc_atomic_check(struct drm_crtc
*crtc
,
320 struct drm_crtc_state
*state
)
322 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
323 DBG("%s: check", mdp4_crtc
->name
);
324 // TODO anything else to check?
328 static void mdp4_crtc_atomic_begin(struct drm_crtc
*crtc
,
329 struct drm_crtc_state
*old_crtc_state
)
331 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
332 DBG("%s: begin", mdp4_crtc
->name
);
335 static void mdp4_crtc_atomic_flush(struct drm_crtc
*crtc
,
336 struct drm_crtc_state
*old_crtc_state
)
338 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
339 struct drm_device
*dev
= crtc
->dev
;
342 DBG("%s: event: %p", mdp4_crtc
->name
, crtc
->state
->event
);
344 WARN_ON(mdp4_crtc
->event
);
346 spin_lock_irqsave(&dev
->event_lock
, flags
);
347 mdp4_crtc
->event
= crtc
->state
->event
;
348 crtc
->state
->event
= NULL
;
349 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
353 request_pending(crtc
, PENDING_FLIP
);
356 #define CURSOR_WIDTH 64
357 #define CURSOR_HEIGHT 64
359 /* called from IRQ to update cursor related registers (if needed). The
360 * cursor registers, other than x/y position, appear not to be double
361 * buffered, and changing them other than from vblank seems to trigger
364 static void update_cursor(struct drm_crtc
*crtc
)
366 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
367 struct mdp4_kms
*mdp4_kms
= get_kms(crtc
);
368 struct msm_kms
*kms
= &mdp4_kms
->base
.base
;
369 enum mdp4_dma dma
= mdp4_crtc
->dma
;
372 spin_lock_irqsave(&mdp4_crtc
->cursor
.lock
, flags
);
373 if (mdp4_crtc
->cursor
.stale
) {
374 struct drm_gem_object
*next_bo
= mdp4_crtc
->cursor
.next_bo
;
375 struct drm_gem_object
*prev_bo
= mdp4_crtc
->cursor
.scanout_bo
;
376 uint64_t iova
= mdp4_crtc
->cursor
.next_iova
;
379 /* take a obj ref + iova ref when we start scanning out: */
380 drm_gem_object_get(next_bo
);
381 msm_gem_get_and_pin_iova(next_bo
, kms
->aspace
, &iova
);
384 mdp4_write(mdp4_kms
, REG_MDP4_DMA_CURSOR_SIZE(dma
),
385 MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc
->cursor
.width
) |
386 MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc
->cursor
.height
));
387 mdp4_write(mdp4_kms
, REG_MDP4_DMA_CURSOR_BASE(dma
), iova
);
388 mdp4_write(mdp4_kms
, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma
),
389 MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB
) |
390 MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN
);
392 /* disable cursor: */
393 mdp4_write(mdp4_kms
, REG_MDP4_DMA_CURSOR_BASE(dma
),
394 mdp4_kms
->blank_cursor_iova
);
397 /* and drop the iova ref + obj rev when done scanning out: */
399 drm_flip_work_queue(&mdp4_crtc
->unref_cursor_work
, prev_bo
);
401 mdp4_crtc
->cursor
.scanout_bo
= next_bo
;
402 mdp4_crtc
->cursor
.stale
= false;
405 mdp4_write(mdp4_kms
, REG_MDP4_DMA_CURSOR_POS(dma
),
406 MDP4_DMA_CURSOR_POS_X(mdp4_crtc
->cursor
.x
) |
407 MDP4_DMA_CURSOR_POS_Y(mdp4_crtc
->cursor
.y
));
409 spin_unlock_irqrestore(&mdp4_crtc
->cursor
.lock
, flags
);
412 static int mdp4_crtc_cursor_set(struct drm_crtc
*crtc
,
413 struct drm_file
*file_priv
, uint32_t handle
,
414 uint32_t width
, uint32_t height
)
416 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
417 struct mdp4_kms
*mdp4_kms
= get_kms(crtc
);
418 struct msm_kms
*kms
= &mdp4_kms
->base
.base
;
419 struct drm_device
*dev
= crtc
->dev
;
420 struct drm_gem_object
*cursor_bo
, *old_bo
;
425 if ((width
> CURSOR_WIDTH
) || (height
> CURSOR_HEIGHT
)) {
426 DRM_DEV_ERROR(dev
->dev
, "bad cursor size: %dx%d\n", width
, height
);
431 cursor_bo
= drm_gem_object_lookup(file_priv
, handle
);
439 ret
= msm_gem_get_and_pin_iova(cursor_bo
, kms
->aspace
, &iova
);
446 spin_lock_irqsave(&mdp4_crtc
->cursor
.lock
, flags
);
447 old_bo
= mdp4_crtc
->cursor
.next_bo
;
448 mdp4_crtc
->cursor
.next_bo
= cursor_bo
;
449 mdp4_crtc
->cursor
.next_iova
= iova
;
450 mdp4_crtc
->cursor
.width
= width
;
451 mdp4_crtc
->cursor
.height
= height
;
452 mdp4_crtc
->cursor
.stale
= true;
453 spin_unlock_irqrestore(&mdp4_crtc
->cursor
.lock
, flags
);
456 /* drop our previous reference: */
457 drm_flip_work_queue(&mdp4_crtc
->unref_cursor_work
, old_bo
);
460 request_pending(crtc
, PENDING_CURSOR
);
465 drm_gem_object_put_unlocked(cursor_bo
);
469 static int mdp4_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
471 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
474 spin_lock_irqsave(&mdp4_crtc
->cursor
.lock
, flags
);
475 mdp4_crtc
->cursor
.x
= x
;
476 mdp4_crtc
->cursor
.y
= y
;
477 spin_unlock_irqrestore(&mdp4_crtc
->cursor
.lock
, flags
);
480 request_pending(crtc
, PENDING_CURSOR
);
485 static const struct drm_crtc_funcs mdp4_crtc_funcs
= {
486 .set_config
= drm_atomic_helper_set_config
,
487 .destroy
= mdp4_crtc_destroy
,
488 .page_flip
= drm_atomic_helper_page_flip
,
489 .cursor_set
= mdp4_crtc_cursor_set
,
490 .cursor_move
= mdp4_crtc_cursor_move
,
491 .reset
= drm_atomic_helper_crtc_reset
,
492 .atomic_duplicate_state
= drm_atomic_helper_crtc_duplicate_state
,
493 .atomic_destroy_state
= drm_atomic_helper_crtc_destroy_state
,
496 static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs
= {
497 .mode_set_nofb
= mdp4_crtc_mode_set_nofb
,
498 .atomic_check
= mdp4_crtc_atomic_check
,
499 .atomic_begin
= mdp4_crtc_atomic_begin
,
500 .atomic_flush
= mdp4_crtc_atomic_flush
,
501 .atomic_enable
= mdp4_crtc_atomic_enable
,
502 .atomic_disable
= mdp4_crtc_atomic_disable
,
505 static void mdp4_crtc_vblank_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
507 struct mdp4_crtc
*mdp4_crtc
= container_of(irq
, struct mdp4_crtc
, vblank
);
508 struct drm_crtc
*crtc
= &mdp4_crtc
->base
;
509 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
512 mdp_irq_unregister(&get_kms(crtc
)->base
, &mdp4_crtc
->vblank
);
514 pending
= atomic_xchg(&mdp4_crtc
->pending
, 0);
516 if (pending
& PENDING_FLIP
) {
517 complete_flip(crtc
, NULL
);
520 if (pending
& PENDING_CURSOR
) {
522 drm_flip_work_commit(&mdp4_crtc
->unref_cursor_work
, priv
->wq
);
526 static void mdp4_crtc_err_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
528 struct mdp4_crtc
*mdp4_crtc
= container_of(irq
, struct mdp4_crtc
, err
);
529 struct drm_crtc
*crtc
= &mdp4_crtc
->base
;
530 DBG("%s: error: %08x", mdp4_crtc
->name
, irqstatus
);
534 static void mdp4_crtc_wait_for_flush_done(struct drm_crtc
*crtc
)
536 struct drm_device
*dev
= crtc
->dev
;
537 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
538 struct mdp4_kms
*mdp4_kms
= get_kms(crtc
);
541 ret
= drm_crtc_vblank_get(crtc
);
545 ret
= wait_event_timeout(dev
->vblank
[drm_crtc_index(crtc
)].queue
,
546 !(mdp4_read(mdp4_kms
, REG_MDP4_OVERLAY_FLUSH
) &
547 mdp4_crtc
->flushed_mask
),
548 msecs_to_jiffies(50));
550 dev_warn(dev
->dev
, "vblank time out, crtc=%d\n", mdp4_crtc
->id
);
552 mdp4_crtc
->flushed_mask
= 0;
554 drm_crtc_vblank_put(crtc
);
557 uint32_t mdp4_crtc_vblank(struct drm_crtc
*crtc
)
559 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
560 return mdp4_crtc
->vblank
.irqmask
;
563 /* set dma config, ie. the format the encoder wants. */
564 void mdp4_crtc_set_config(struct drm_crtc
*crtc
, uint32_t config
)
566 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
567 struct mdp4_kms
*mdp4_kms
= get_kms(crtc
);
569 mdp4_write(mdp4_kms
, REG_MDP4_DMA_CONFIG(mdp4_crtc
->dma
), config
);
572 /* set interface for routing crtc->encoder: */
573 void mdp4_crtc_set_intf(struct drm_crtc
*crtc
, enum mdp4_intf intf
, int mixer
)
575 struct mdp4_crtc
*mdp4_crtc
= to_mdp4_crtc(crtc
);
576 struct mdp4_kms
*mdp4_kms
= get_kms(crtc
);
579 intf_sel
= mdp4_read(mdp4_kms
, REG_MDP4_DISP_INTF_SEL
);
581 switch (mdp4_crtc
->dma
) {
583 intf_sel
&= ~MDP4_DISP_INTF_SEL_PRIM__MASK
;
584 intf_sel
|= MDP4_DISP_INTF_SEL_PRIM(intf
);
587 intf_sel
&= ~MDP4_DISP_INTF_SEL_SEC__MASK
;
588 intf_sel
|= MDP4_DISP_INTF_SEL_SEC(intf
);
591 intf_sel
&= ~MDP4_DISP_INTF_SEL_EXT__MASK
;
592 intf_sel
|= MDP4_DISP_INTF_SEL_EXT(intf
);
596 if (intf
== INTF_DSI_VIDEO
) {
597 intf_sel
&= ~MDP4_DISP_INTF_SEL_DSI_CMD
;
598 intf_sel
|= MDP4_DISP_INTF_SEL_DSI_VIDEO
;
599 } else if (intf
== INTF_DSI_CMD
) {
600 intf_sel
&= ~MDP4_DISP_INTF_SEL_DSI_VIDEO
;
601 intf_sel
|= MDP4_DISP_INTF_SEL_DSI_CMD
;
604 mdp4_crtc
->mixer
= mixer
;
608 DBG("%s: intf_sel=%08x", mdp4_crtc
->name
, intf_sel
);
610 mdp4_write(mdp4_kms
, REG_MDP4_DISP_INTF_SEL
, intf_sel
);
613 void mdp4_crtc_wait_for_commit_done(struct drm_crtc
*crtc
)
615 /* wait_for_flush_done is the only case for now.
616 * Later we will have command mode CRTC to wait for
619 mdp4_crtc_wait_for_flush_done(crtc
);
622 static const char *dma_names
[] = {
623 "DMA_P", "DMA_S", "DMA_E",
626 /* initialize crtc */
627 struct drm_crtc
*mdp4_crtc_init(struct drm_device
*dev
,
628 struct drm_plane
*plane
, int id
, int ovlp_id
,
629 enum mdp4_dma dma_id
)
631 struct drm_crtc
*crtc
= NULL
;
632 struct mdp4_crtc
*mdp4_crtc
;
634 mdp4_crtc
= kzalloc(sizeof(*mdp4_crtc
), GFP_KERNEL
);
636 return ERR_PTR(-ENOMEM
);
638 crtc
= &mdp4_crtc
->base
;
642 mdp4_crtc
->ovlp
= ovlp_id
;
643 mdp4_crtc
->dma
= dma_id
;
645 mdp4_crtc
->vblank
.irqmask
= dma2irq(mdp4_crtc
->dma
);
646 mdp4_crtc
->vblank
.irq
= mdp4_crtc_vblank_irq
;
648 mdp4_crtc
->err
.irqmask
= dma2err(mdp4_crtc
->dma
);
649 mdp4_crtc
->err
.irq
= mdp4_crtc_err_irq
;
651 snprintf(mdp4_crtc
->name
, sizeof(mdp4_crtc
->name
), "%s:%d",
652 dma_names
[dma_id
], ovlp_id
);
654 spin_lock_init(&mdp4_crtc
->cursor
.lock
);
656 drm_flip_work_init(&mdp4_crtc
->unref_cursor_work
,
657 "unref cursor", unref_cursor_worker
);
659 drm_crtc_init_with_planes(dev
, crtc
, plane
, NULL
, &mdp4_crtc_funcs
,
661 drm_crtc_helper_add(crtc
, &mdp4_crtc_helper_funcs
);