1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
8 #include <linux/sort.h>
9 #include <drm/drm_mode.h>
10 #include <drm/drm_crtc.h>
11 #include <drm/drm_flip_work.h>
12 #include <drm/drm_probe_helper.h>
16 #define CURSOR_WIDTH 64
17 #define CURSOR_HEIGHT 64
24 spinlock_t lm_lock
; /* protect REG_MDP5_LM_* registers */
26 /* if there is a pending flip, these will be non-null: */
27 struct drm_pending_vblank_event
*event
;
29 /* Bits have been flushed at the last commit,
30 * used to decide if a vsync has happened since last commit.
34 #define PENDING_CURSOR 0x1
35 #define PENDING_FLIP 0x2
38 /* for unref'ing cursor bo's after scanout completes: */
39 struct drm_flip_work unref_cursor_work
;
41 struct mdp_irq vblank
;
43 struct mdp_irq pp_done
;
45 struct completion pp_completion
;
47 bool lm_cursor_enabled
;
50 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
53 /* current cursor being scanned out: */
54 struct drm_gem_object
*scanout_bo
;
56 uint32_t width
, height
;
60 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
62 static void mdp5_crtc_restore_cursor(struct drm_crtc
*crtc
);
64 static struct mdp5_kms
*get_kms(struct drm_crtc
*crtc
)
66 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
67 return to_mdp5_kms(to_mdp_kms(priv
->kms
));
70 static void request_pending(struct drm_crtc
*crtc
, uint32_t pending
)
72 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
74 atomic_or(pending
, &mdp5_crtc
->pending
);
75 mdp_irq_register(&get_kms(crtc
)->base
, &mdp5_crtc
->vblank
);
78 static void request_pp_done_pending(struct drm_crtc
*crtc
)
80 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
81 reinit_completion(&mdp5_crtc
->pp_completion
);
84 static u32
crtc_flush(struct drm_crtc
*crtc
, u32 flush_mask
)
86 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
87 struct mdp5_ctl
*ctl
= mdp5_cstate
->ctl
;
88 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
89 bool start
= !mdp5_cstate
->defer_start
;
91 mdp5_cstate
->defer_start
= false;
93 DBG("%s: flush=%08x", crtc
->name
, flush_mask
);
95 return mdp5_ctl_commit(ctl
, pipeline
, flush_mask
, start
);
99 * flush updates, to make sure hw is updated to new scanout fb,
100 * so that we can safely queue unref to current fb (ie. next
101 * vblank we know hw is done w/ previous scanout_fb).
103 static u32
crtc_flush_all(struct drm_crtc
*crtc
)
105 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
106 struct mdp5_hw_mixer
*mixer
, *r_mixer
;
107 struct drm_plane
*plane
;
108 uint32_t flush_mask
= 0;
110 /* this should not happen: */
111 if (WARN_ON(!mdp5_cstate
->ctl
))
114 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
115 if (!plane
->state
->visible
)
117 flush_mask
|= mdp5_plane_get_flush(plane
);
120 mixer
= mdp5_cstate
->pipeline
.mixer
;
121 flush_mask
|= mdp_ctl_flush_mask_lm(mixer
->lm
);
123 r_mixer
= mdp5_cstate
->pipeline
.r_mixer
;
125 flush_mask
|= mdp_ctl_flush_mask_lm(r_mixer
->lm
);
127 return crtc_flush(crtc
, flush_mask
);
130 /* if file!=NULL, this is preclose potential cancel-flip path */
131 static void complete_flip(struct drm_crtc
*crtc
, struct drm_file
*file
)
133 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
134 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
135 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
136 struct mdp5_ctl
*ctl
= mdp5_cstate
->ctl
;
137 struct drm_device
*dev
= crtc
->dev
;
138 struct drm_pending_vblank_event
*event
;
141 spin_lock_irqsave(&dev
->event_lock
, flags
);
142 event
= mdp5_crtc
->event
;
144 mdp5_crtc
->event
= NULL
;
145 DBG("%s: send event: %p", crtc
->name
, event
);
146 drm_crtc_send_vblank_event(crtc
, event
);
148 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
150 if (ctl
&& !crtc
->state
->enable
) {
151 /* set STAGE_UNUSED for all layers */
152 mdp5_ctl_blend(ctl
, pipeline
, NULL
, NULL
, 0, 0);
153 /* XXX: What to do here? */
154 /* mdp5_crtc->ctl = NULL; */
158 static void unref_cursor_worker(struct drm_flip_work
*work
, void *val
)
160 struct mdp5_crtc
*mdp5_crtc
=
161 container_of(work
, struct mdp5_crtc
, unref_cursor_work
);
162 struct mdp5_kms
*mdp5_kms
= get_kms(&mdp5_crtc
->base
);
163 struct msm_kms
*kms
= &mdp5_kms
->base
.base
;
165 msm_gem_unpin_iova(val
, kms
->aspace
);
166 drm_gem_object_put_unlocked(val
);
169 static void mdp5_crtc_destroy(struct drm_crtc
*crtc
)
171 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
173 drm_crtc_cleanup(crtc
);
174 drm_flip_work_cleanup(&mdp5_crtc
->unref_cursor_work
);
179 static inline u32
mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage
)
182 case STAGE0
: return MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA
;
183 case STAGE1
: return MDP5_LM_BLEND_COLOR_OUT_STAGE1_FG_ALPHA
;
184 case STAGE2
: return MDP5_LM_BLEND_COLOR_OUT_STAGE2_FG_ALPHA
;
185 case STAGE3
: return MDP5_LM_BLEND_COLOR_OUT_STAGE3_FG_ALPHA
;
186 case STAGE4
: return MDP5_LM_BLEND_COLOR_OUT_STAGE4_FG_ALPHA
;
187 case STAGE5
: return MDP5_LM_BLEND_COLOR_OUT_STAGE5_FG_ALPHA
;
188 case STAGE6
: return MDP5_LM_BLEND_COLOR_OUT_STAGE6_FG_ALPHA
;
195 * left/right pipe offsets for the stage array used in blend_setup()
201 * blend_setup() - blend all the planes of a CRTC
203 * If no base layer is available, border will be enabled as the base layer.
204 * Otherwise all layers will be blended based on their stage calculated
205 * in mdp5_crtc_atomic_check.
207 static void blend_setup(struct drm_crtc
*crtc
)
209 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
210 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
211 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
212 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
213 struct drm_plane
*plane
;
214 const struct mdp5_cfg_hw
*hw_cfg
;
215 struct mdp5_plane_state
*pstate
, *pstates
[STAGE_MAX
+ 1] = {NULL
};
216 const struct mdp_format
*format
;
217 struct mdp5_hw_mixer
*mixer
= pipeline
->mixer
;
218 uint32_t lm
= mixer
->lm
;
219 struct mdp5_hw_mixer
*r_mixer
= pipeline
->r_mixer
;
220 uint32_t r_lm
= r_mixer
? r_mixer
->lm
: 0;
221 struct mdp5_ctl
*ctl
= mdp5_cstate
->ctl
;
222 uint32_t blend_op
, fg_alpha
, bg_alpha
, ctl_blend_flags
= 0;
224 enum mdp5_pipe stage
[STAGE_MAX
+ 1][MAX_PIPE_STAGE
] = { { SSPP_NONE
} };
225 enum mdp5_pipe r_stage
[STAGE_MAX
+ 1][MAX_PIPE_STAGE
] = { { SSPP_NONE
} };
226 int i
, plane_cnt
= 0;
227 bool bg_alpha_enabled
= false;
228 u32 mixer_op_mode
= 0;
230 #define blender(stage) ((stage) - STAGE0)
232 hw_cfg
= mdp5_cfg_get_hw_config(mdp5_kms
->cfg
);
234 spin_lock_irqsave(&mdp5_crtc
->lm_lock
, flags
);
236 /* ctl could be released already when we are shutting down: */
237 /* XXX: Can this happen now? */
241 /* Collect all plane information */
242 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
243 enum mdp5_pipe right_pipe
;
245 if (!plane
->state
->visible
)
248 pstate
= to_mdp5_plane_state(plane
->state
);
249 pstates
[pstate
->stage
] = pstate
;
250 stage
[pstate
->stage
][PIPE_LEFT
] = mdp5_plane_pipe(plane
);
252 * if we have a right mixer, stage the same pipe as we
253 * have on the left mixer
256 r_stage
[pstate
->stage
][PIPE_LEFT
] =
257 mdp5_plane_pipe(plane
);
259 * if we have a right pipe (i.e, the plane comprises of 2
260 * hwpipes, then stage the right pipe on the right side of both
263 right_pipe
= mdp5_plane_right_pipe(plane
);
265 stage
[pstate
->stage
][PIPE_RIGHT
] = right_pipe
;
266 r_stage
[pstate
->stage
][PIPE_RIGHT
] = right_pipe
;
272 if (!pstates
[STAGE_BASE
]) {
273 ctl_blend_flags
|= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT
;
274 DBG("Border Color is enabled");
275 } else if (plane_cnt
) {
276 format
= to_mdp_format(msm_framebuffer_format(pstates
[STAGE_BASE
]->base
.fb
));
278 if (format
->alpha_enable
)
279 bg_alpha_enabled
= true;
282 /* The reset for blending */
283 for (i
= STAGE0
; i
<= STAGE_MAX
; i
++) {
287 format
= to_mdp_format(
288 msm_framebuffer_format(pstates
[i
]->base
.fb
));
289 plane
= pstates
[i
]->base
.plane
;
290 blend_op
= MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST
) |
291 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST
);
292 fg_alpha
= pstates
[i
]->alpha
;
293 bg_alpha
= 0xFF - pstates
[i
]->alpha
;
295 if (!format
->alpha_enable
&& bg_alpha_enabled
)
298 mixer_op_mode
|= mdp5_lm_use_fg_alpha_mask(i
);
300 DBG("Stage %d fg_alpha %x bg_alpha %x", i
, fg_alpha
, bg_alpha
);
302 if (format
->alpha_enable
&& pstates
[i
]->premultiplied
) {
303 blend_op
= MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST
) |
304 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL
);
305 if (fg_alpha
!= 0xff) {
308 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA
|
309 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA
;
311 blend_op
|= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA
;
313 } else if (format
->alpha_enable
) {
314 blend_op
= MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_PIXEL
) |
315 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL
);
316 if (fg_alpha
!= 0xff) {
319 MDP5_LM_BLEND_OP_MODE_FG_MOD_ALPHA
|
320 MDP5_LM_BLEND_OP_MODE_FG_INV_MOD_ALPHA
|
321 MDP5_LM_BLEND_OP_MODE_BG_MOD_ALPHA
|
322 MDP5_LM_BLEND_OP_MODE_BG_INV_MOD_ALPHA
;
324 blend_op
|= MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA
;
328 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_OP_MODE(lm
,
329 blender(i
)), blend_op
);
330 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_FG_ALPHA(lm
,
331 blender(i
)), fg_alpha
);
332 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_BG_ALPHA(lm
,
333 blender(i
)), bg_alpha
);
335 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_OP_MODE(r_lm
,
336 blender(i
)), blend_op
);
337 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm
,
338 blender(i
)), fg_alpha
);
339 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm
,
340 blender(i
)), bg_alpha
);
344 val
= mdp5_read(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(lm
));
345 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(lm
),
346 val
| mixer_op_mode
);
348 val
= mdp5_read(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm
));
349 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm
),
350 val
| mixer_op_mode
);
353 mdp5_ctl_blend(ctl
, pipeline
, stage
, r_stage
, plane_cnt
,
356 spin_unlock_irqrestore(&mdp5_crtc
->lm_lock
, flags
);
359 static void mdp5_crtc_mode_set_nofb(struct drm_crtc
*crtc
)
361 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
362 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
363 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
364 struct mdp5_hw_mixer
*mixer
= mdp5_cstate
->pipeline
.mixer
;
365 struct mdp5_hw_mixer
*r_mixer
= mdp5_cstate
->pipeline
.r_mixer
;
366 uint32_t lm
= mixer
->lm
;
367 u32 mixer_width
, val
;
369 struct drm_display_mode
*mode
;
371 if (WARN_ON(!crtc
->state
))
374 mode
= &crtc
->state
->adjusted_mode
;
376 DBG("%s: set mode: " DRM_MODE_FMT
, crtc
->name
, DRM_MODE_ARG(mode
));
378 mixer_width
= mode
->hdisplay
;
382 spin_lock_irqsave(&mdp5_crtc
->lm_lock
, flags
);
383 mdp5_write(mdp5_kms
, REG_MDP5_LM_OUT_SIZE(lm
),
384 MDP5_LM_OUT_SIZE_WIDTH(mixer_width
) |
385 MDP5_LM_OUT_SIZE_HEIGHT(mode
->vdisplay
));
387 /* Assign mixer to LEFT side in source split mode */
388 val
= mdp5_read(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(lm
));
389 val
&= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT
;
390 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(lm
), val
);
393 u32 r_lm
= r_mixer
->lm
;
395 mdp5_write(mdp5_kms
, REG_MDP5_LM_OUT_SIZE(r_lm
),
396 MDP5_LM_OUT_SIZE_WIDTH(mixer_width
) |
397 MDP5_LM_OUT_SIZE_HEIGHT(mode
->vdisplay
));
399 /* Assign mixer to RIGHT side in source split mode */
400 val
= mdp5_read(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm
));
401 val
|= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT
;
402 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm
), val
);
405 spin_unlock_irqrestore(&mdp5_crtc
->lm_lock
, flags
);
408 static void mdp5_crtc_atomic_disable(struct drm_crtc
*crtc
,
409 struct drm_crtc_state
*old_state
)
411 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
412 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
413 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
414 struct device
*dev
= &mdp5_kms
->pdev
->dev
;
417 DBG("%s", crtc
->name
);
419 if (WARN_ON(!mdp5_crtc
->enabled
))
422 /* Disable/save vblank irq handling before power is disabled */
423 drm_crtc_vblank_off(crtc
);
425 if (mdp5_cstate
->cmd_mode
)
426 mdp_irq_unregister(&mdp5_kms
->base
, &mdp5_crtc
->pp_done
);
428 mdp_irq_unregister(&mdp5_kms
->base
, &mdp5_crtc
->err
);
429 pm_runtime_put_sync(dev
);
431 if (crtc
->state
->event
&& !crtc
->state
->active
) {
432 WARN_ON(mdp5_crtc
->event
);
433 spin_lock_irqsave(&mdp5_kms
->dev
->event_lock
, flags
);
434 drm_crtc_send_vblank_event(crtc
, crtc
->state
->event
);
435 crtc
->state
->event
= NULL
;
436 spin_unlock_irqrestore(&mdp5_kms
->dev
->event_lock
, flags
);
439 mdp5_crtc
->enabled
= false;
442 static void mdp5_crtc_atomic_enable(struct drm_crtc
*crtc
,
443 struct drm_crtc_state
*old_state
)
445 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
446 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
447 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
448 struct device
*dev
= &mdp5_kms
->pdev
->dev
;
450 DBG("%s", crtc
->name
);
452 if (WARN_ON(mdp5_crtc
->enabled
))
455 pm_runtime_get_sync(dev
);
457 if (mdp5_crtc
->lm_cursor_enabled
) {
459 * Restore LM cursor state, as it might have been lost
462 if (mdp5_crtc
->cursor
.iova
) {
465 spin_lock_irqsave(&mdp5_crtc
->cursor
.lock
, flags
);
466 mdp5_crtc_restore_cursor(crtc
);
467 spin_unlock_irqrestore(&mdp5_crtc
->cursor
.lock
, flags
);
469 mdp5_ctl_set_cursor(mdp5_cstate
->ctl
,
470 &mdp5_cstate
->pipeline
, 0, true);
472 mdp5_ctl_set_cursor(mdp5_cstate
->ctl
,
473 &mdp5_cstate
->pipeline
, 0, false);
477 /* Restore vblank irq handling after power is enabled */
478 drm_crtc_vblank_on(crtc
);
480 mdp5_crtc_mode_set_nofb(crtc
);
482 mdp_irq_register(&mdp5_kms
->base
, &mdp5_crtc
->err
);
484 if (mdp5_cstate
->cmd_mode
)
485 mdp_irq_register(&mdp5_kms
->base
, &mdp5_crtc
->pp_done
);
487 mdp5_crtc
->enabled
= true;
490 int mdp5_crtc_setup_pipeline(struct drm_crtc
*crtc
,
491 struct drm_crtc_state
*new_crtc_state
,
492 bool need_right_mixer
)
494 struct mdp5_crtc_state
*mdp5_cstate
=
495 to_mdp5_crtc_state(new_crtc_state
);
496 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
497 struct mdp5_interface
*intf
;
498 bool new_mixer
= false;
500 new_mixer
= !pipeline
->mixer
;
502 if ((need_right_mixer
&& !pipeline
->r_mixer
) ||
503 (!need_right_mixer
&& pipeline
->r_mixer
))
507 struct mdp5_hw_mixer
*old_mixer
= pipeline
->mixer
;
508 struct mdp5_hw_mixer
*old_r_mixer
= pipeline
->r_mixer
;
512 caps
= MDP_LM_CAP_DISPLAY
;
513 if (need_right_mixer
)
514 caps
|= MDP_LM_CAP_PAIR
;
516 ret
= mdp5_mixer_assign(new_crtc_state
->state
, crtc
, caps
,
517 &pipeline
->mixer
, need_right_mixer
?
518 &pipeline
->r_mixer
: NULL
);
522 mdp5_mixer_release(new_crtc_state
->state
, old_mixer
);
524 mdp5_mixer_release(new_crtc_state
->state
, old_r_mixer
);
525 if (!need_right_mixer
)
526 pipeline
->r_mixer
= NULL
;
531 * these should have been already set up in the encoder's atomic
532 * check (called by drm_atomic_helper_check_modeset)
534 intf
= pipeline
->intf
;
536 mdp5_cstate
->err_irqmask
= intf2err(intf
->num
);
537 mdp5_cstate
->vblank_irqmask
= intf2vblank(pipeline
->mixer
, intf
);
539 if ((intf
->type
== INTF_DSI
) &&
540 (intf
->mode
== MDP5_INTF_DSI_MODE_COMMAND
)) {
541 mdp5_cstate
->pp_done_irqmask
= lm2ppdone(pipeline
->mixer
);
542 mdp5_cstate
->cmd_mode
= true;
544 mdp5_cstate
->pp_done_irqmask
= 0;
545 mdp5_cstate
->cmd_mode
= false;
552 struct drm_plane
*plane
;
553 struct mdp5_plane_state
*state
;
556 static int pstate_cmp(const void *a
, const void *b
)
558 struct plane_state
*pa
= (struct plane_state
*)a
;
559 struct plane_state
*pb
= (struct plane_state
*)b
;
560 return pa
->state
->zpos
- pb
->state
->zpos
;
563 /* is there a helper for this? */
564 static bool is_fullscreen(struct drm_crtc_state
*cstate
,
565 struct drm_plane_state
*pstate
)
567 return (pstate
->crtc_x
<= 0) && (pstate
->crtc_y
<= 0) &&
568 ((pstate
->crtc_x
+ pstate
->crtc_w
) >= cstate
->mode
.hdisplay
) &&
569 ((pstate
->crtc_y
+ pstate
->crtc_h
) >= cstate
->mode
.vdisplay
);
572 static enum mdp_mixer_stage_id
get_start_stage(struct drm_crtc
*crtc
,
573 struct drm_crtc_state
*new_crtc_state
,
574 struct drm_plane_state
*bpstate
)
576 struct mdp5_crtc_state
*mdp5_cstate
=
577 to_mdp5_crtc_state(new_crtc_state
);
580 * if we're in source split mode, it's mandatory to have
581 * border out on the base stage
583 if (mdp5_cstate
->pipeline
.r_mixer
)
586 /* if the bottom-most layer is not fullscreen, we need to use
587 * it for solid-color:
589 if (!is_fullscreen(new_crtc_state
, bpstate
))
595 static int mdp5_crtc_atomic_check(struct drm_crtc
*crtc
,
596 struct drm_crtc_state
*state
)
598 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
599 struct drm_plane
*plane
;
600 struct drm_device
*dev
= crtc
->dev
;
601 struct plane_state pstates
[STAGE_MAX
+ 1];
602 const struct mdp5_cfg_hw
*hw_cfg
;
603 const struct drm_plane_state
*pstate
;
604 const struct drm_display_mode
*mode
= &state
->adjusted_mode
;
605 bool cursor_plane
= false;
606 bool need_right_mixer
= false;
609 enum mdp_mixer_stage_id start
;
611 DBG("%s: check", crtc
->name
);
613 drm_atomic_crtc_state_for_each_plane_state(plane
, pstate
, state
) {
614 if (!pstate
->visible
)
617 pstates
[cnt
].plane
= plane
;
618 pstates
[cnt
].state
= to_mdp5_plane_state(pstate
);
621 * if any plane on this crtc uses 2 hwpipes, then we need
622 * the crtc to have a right hwmixer.
624 if (pstates
[cnt
].state
->r_hwpipe
)
625 need_right_mixer
= true;
628 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
632 /* bail out early if there aren't any planes */
636 hw_cfg
= mdp5_cfg_get_hw_config(mdp5_kms
->cfg
);
639 * we need a right hwmixer if the mode's width is greater than a single
642 if (mode
->hdisplay
> hw_cfg
->lm
.max_width
)
643 need_right_mixer
= true;
645 ret
= mdp5_crtc_setup_pipeline(crtc
, state
, need_right_mixer
);
647 DRM_DEV_ERROR(dev
->dev
, "couldn't assign mixers %d\n", ret
);
651 /* assign a stage based on sorted zpos property */
652 sort(pstates
, cnt
, sizeof(pstates
[0]), pstate_cmp
, NULL
);
654 /* trigger a warning if cursor isn't the highest zorder */
655 WARN_ON(cursor_plane
&&
656 (pstates
[cnt
- 1].plane
->type
!= DRM_PLANE_TYPE_CURSOR
));
658 start
= get_start_stage(crtc
, state
, &pstates
[0].state
->base
);
660 /* verify that there are not too many planes attached to crtc
661 * and that we don't have conflicting mixer stages:
663 if ((cnt
+ start
- 1) >= hw_cfg
->lm
.nb_stages
) {
664 DRM_DEV_ERROR(dev
->dev
, "too many planes! cnt=%d, start stage=%d\n",
669 for (i
= 0; i
< cnt
; i
++) {
670 if (cursor_plane
&& (i
== (cnt
- 1)))
671 pstates
[i
].state
->stage
= hw_cfg
->lm
.nb_stages
;
673 pstates
[i
].state
->stage
= start
+ i
;
674 DBG("%s: assign pipe %s on stage=%d", crtc
->name
,
675 pstates
[i
].plane
->name
,
676 pstates
[i
].state
->stage
);
682 static void mdp5_crtc_atomic_begin(struct drm_crtc
*crtc
,
683 struct drm_crtc_state
*old_crtc_state
)
685 DBG("%s: begin", crtc
->name
);
688 static void mdp5_crtc_atomic_flush(struct drm_crtc
*crtc
,
689 struct drm_crtc_state
*old_crtc_state
)
691 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
692 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
693 struct drm_device
*dev
= crtc
->dev
;
696 DBG("%s: event: %p", crtc
->name
, crtc
->state
->event
);
698 WARN_ON(mdp5_crtc
->event
);
700 spin_lock_irqsave(&dev
->event_lock
, flags
);
701 mdp5_crtc
->event
= crtc
->state
->event
;
702 crtc
->state
->event
= NULL
;
703 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
706 * If no CTL has been allocated in mdp5_crtc_atomic_check(),
707 * it means we are trying to flush a CRTC whose state is disabled:
708 * nothing else needs to be done.
710 /* XXX: Can this happen now ? */
711 if (unlikely(!mdp5_cstate
->ctl
))
716 /* PP_DONE irq is only used by command mode for now.
717 * It is better to request pending before FLUSH and START trigger
718 * to make sure no pp_done irq missed.
719 * This is safe because no pp_done will happen before SW trigger
722 if (mdp5_cstate
->cmd_mode
)
723 request_pp_done_pending(crtc
);
725 mdp5_crtc
->flushed_mask
= crtc_flush_all(crtc
);
727 /* XXX are we leaking out state here? */
728 mdp5_crtc
->vblank
.irqmask
= mdp5_cstate
->vblank_irqmask
;
729 mdp5_crtc
->err
.irqmask
= mdp5_cstate
->err_irqmask
;
730 mdp5_crtc
->pp_done
.irqmask
= mdp5_cstate
->pp_done_irqmask
;
732 request_pending(crtc
, PENDING_FLIP
);
735 static void get_roi(struct drm_crtc
*crtc
, uint32_t *roi_w
, uint32_t *roi_h
)
737 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
738 uint32_t xres
= crtc
->mode
.hdisplay
;
739 uint32_t yres
= crtc
->mode
.vdisplay
;
742 * Cursor Region Of Interest (ROI) is a plane read from cursor
743 * buffer to render. The ROI region is determined by the visibility of
744 * the cursor point. In the default Cursor image the cursor point will
745 * be at the top left of the cursor image.
748 * If the cursor point reaches the right (xres - x < cursor.width) or
749 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
750 * width and ROI height need to be evaluated to crop the cursor image
752 * (xres-x) will be new cursor width when x > (xres - cursor.width)
753 * (yres-y) will be new cursor height when y > (yres - cursor.height)
756 * We get negative x and/or y coordinates.
757 * (cursor.width - abs(x)) will be new cursor width when x < 0
758 * (cursor.height - abs(y)) will be new cursor width when y < 0
760 if (mdp5_crtc
->cursor
.x
>= 0)
761 *roi_w
= min(mdp5_crtc
->cursor
.width
, xres
-
762 mdp5_crtc
->cursor
.x
);
764 *roi_w
= mdp5_crtc
->cursor
.width
- abs(mdp5_crtc
->cursor
.x
);
765 if (mdp5_crtc
->cursor
.y
>= 0)
766 *roi_h
= min(mdp5_crtc
->cursor
.height
, yres
-
767 mdp5_crtc
->cursor
.y
);
769 *roi_h
= mdp5_crtc
->cursor
.height
- abs(mdp5_crtc
->cursor
.y
);
772 static void mdp5_crtc_restore_cursor(struct drm_crtc
*crtc
)
774 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
775 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
776 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
777 const enum mdp5_cursor_alpha cur_alpha
= CURSOR_ALPHA_PER_PIXEL
;
778 uint32_t blendcfg
, stride
;
779 uint32_t x
, y
, src_x
, src_y
, width
, height
;
780 uint32_t roi_w
, roi_h
;
783 assert_spin_locked(&mdp5_crtc
->cursor
.lock
);
785 lm
= mdp5_cstate
->pipeline
.mixer
->lm
;
787 x
= mdp5_crtc
->cursor
.x
;
788 y
= mdp5_crtc
->cursor
.y
;
789 width
= mdp5_crtc
->cursor
.width
;
790 height
= mdp5_crtc
->cursor
.height
;
792 stride
= width
* drm_format_plane_cpp(DRM_FORMAT_ARGB8888
, 0);
794 get_roi(crtc
, &roi_w
, &roi_h
);
796 /* If cusror buffer overlaps due to rotation on the
797 * upper or left screen border the pixel offset inside
798 * the cursor buffer of the ROI is the positive overlap
801 if (mdp5_crtc
->cursor
.x
< 0) {
802 src_x
= abs(mdp5_crtc
->cursor
.x
);
807 if (mdp5_crtc
->cursor
.y
< 0) {
808 src_y
= abs(mdp5_crtc
->cursor
.y
);
813 DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
814 crtc
->name
, x
, y
, roi_w
, roi_h
, src_x
, src_y
);
816 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_STRIDE(lm
), stride
);
817 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_FORMAT(lm
),
818 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888
));
819 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_IMG_SIZE(lm
),
820 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height
) |
821 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width
));
822 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_SIZE(lm
),
823 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h
) |
824 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w
));
825 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_START_XY(lm
),
826 MDP5_LM_CURSOR_START_XY_Y_START(y
) |
827 MDP5_LM_CURSOR_START_XY_X_START(x
));
828 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_XY(lm
),
829 MDP5_LM_CURSOR_XY_SRC_Y(src_y
) |
830 MDP5_LM_CURSOR_XY_SRC_X(src_x
));
831 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_BASE_ADDR(lm
),
832 mdp5_crtc
->cursor
.iova
);
834 blendcfg
= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN
;
835 blendcfg
|= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha
);
836 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm
), blendcfg
);
839 static int mdp5_crtc_cursor_set(struct drm_crtc
*crtc
,
840 struct drm_file
*file
, uint32_t handle
,
841 uint32_t width
, uint32_t height
)
843 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
844 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
845 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
846 struct drm_device
*dev
= crtc
->dev
;
847 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
848 struct platform_device
*pdev
= mdp5_kms
->pdev
;
849 struct msm_kms
*kms
= &mdp5_kms
->base
.base
;
850 struct drm_gem_object
*cursor_bo
, *old_bo
= NULL
;
851 struct mdp5_ctl
*ctl
;
853 uint32_t flush_mask
= mdp_ctl_flush_mask_cursor(0);
854 bool cursor_enable
= true;
857 if (!mdp5_crtc
->lm_cursor_enabled
) {
859 "cursor_set is deprecated with cursor planes\n");
863 if ((width
> CURSOR_WIDTH
) || (height
> CURSOR_HEIGHT
)) {
864 DRM_DEV_ERROR(dev
->dev
, "bad cursor size: %dx%d\n", width
, height
);
868 ctl
= mdp5_cstate
->ctl
;
872 /* don't support LM cursors when we we have source split enabled */
873 if (mdp5_cstate
->pipeline
.r_mixer
)
878 cursor_enable
= false;
879 mdp5_crtc
->cursor
.iova
= 0;
880 pm_runtime_get_sync(&pdev
->dev
);
884 cursor_bo
= drm_gem_object_lookup(file
, handle
);
888 ret
= msm_gem_get_and_pin_iova(cursor_bo
, kms
->aspace
,
889 &mdp5_crtc
->cursor
.iova
);
893 pm_runtime_get_sync(&pdev
->dev
);
895 spin_lock_irqsave(&mdp5_crtc
->cursor
.lock
, flags
);
896 old_bo
= mdp5_crtc
->cursor
.scanout_bo
;
898 mdp5_crtc
->cursor
.scanout_bo
= cursor_bo
;
899 mdp5_crtc
->cursor
.width
= width
;
900 mdp5_crtc
->cursor
.height
= height
;
902 mdp5_crtc_restore_cursor(crtc
);
904 spin_unlock_irqrestore(&mdp5_crtc
->cursor
.lock
, flags
);
907 ret
= mdp5_ctl_set_cursor(ctl
, pipeline
, 0, cursor_enable
);
909 DRM_DEV_ERROR(dev
->dev
, "failed to %sable cursor: %d\n",
910 cursor_enable
? "en" : "dis", ret
);
914 crtc_flush(crtc
, flush_mask
);
917 pm_runtime_put_sync(&pdev
->dev
);
919 drm_flip_work_queue(&mdp5_crtc
->unref_cursor_work
, old_bo
);
920 /* enable vblank to complete cursor work: */
921 request_pending(crtc
, PENDING_CURSOR
);
926 static int mdp5_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
928 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
929 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
930 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
931 uint32_t flush_mask
= mdp_ctl_flush_mask_cursor(0);
932 struct drm_device
*dev
= crtc
->dev
;
937 if (!mdp5_crtc
->lm_cursor_enabled
) {
939 "cursor_move is deprecated with cursor planes\n");
943 /* don't support LM cursors when we we have source split enabled */
944 if (mdp5_cstate
->pipeline
.r_mixer
)
947 /* In case the CRTC is disabled, just drop the cursor update */
948 if (unlikely(!crtc
->state
->enable
))
951 /* accept negative x/y coordinates up to maximum cursor overlap */
952 mdp5_crtc
->cursor
.x
= x
= max(x
, -(int)mdp5_crtc
->cursor
.width
);
953 mdp5_crtc
->cursor
.y
= y
= max(y
, -(int)mdp5_crtc
->cursor
.height
);
955 get_roi(crtc
, &roi_w
, &roi_h
);
957 pm_runtime_get_sync(&mdp5_kms
->pdev
->dev
);
959 spin_lock_irqsave(&mdp5_crtc
->cursor
.lock
, flags
);
960 mdp5_crtc_restore_cursor(crtc
);
961 spin_unlock_irqrestore(&mdp5_crtc
->cursor
.lock
, flags
);
963 crtc_flush(crtc
, flush_mask
);
965 pm_runtime_put_sync(&mdp5_kms
->pdev
->dev
);
971 mdp5_crtc_atomic_print_state(struct drm_printer
*p
,
972 const struct drm_crtc_state
*state
)
974 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(state
);
975 struct mdp5_pipeline
*pipeline
= &mdp5_cstate
->pipeline
;
976 struct mdp5_kms
*mdp5_kms
= get_kms(state
->crtc
);
978 if (WARN_ON(!pipeline
))
981 if (mdp5_cstate
->ctl
)
982 drm_printf(p
, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate
->ctl
));
984 drm_printf(p
, "\thwmixer=%s\n", pipeline
->mixer
?
985 pipeline
->mixer
->name
: "(null)");
987 if (mdp5_kms
->caps
& MDP_CAP_SRC_SPLIT
)
988 drm_printf(p
, "\tright hwmixer=%s\n", pipeline
->r_mixer
?
989 pipeline
->r_mixer
->name
: "(null)");
991 drm_printf(p
, "\tcmd_mode=%d\n", mdp5_cstate
->cmd_mode
);
994 static void mdp5_crtc_reset(struct drm_crtc
*crtc
)
996 struct mdp5_crtc_state
*mdp5_cstate
;
999 __drm_atomic_helper_crtc_destroy_state(crtc
->state
);
1000 kfree(to_mdp5_crtc_state(crtc
->state
));
1003 mdp5_cstate
= kzalloc(sizeof(*mdp5_cstate
), GFP_KERNEL
);
1006 mdp5_cstate
->base
.crtc
= crtc
;
1007 crtc
->state
= &mdp5_cstate
->base
;
1011 static struct drm_crtc_state
*
1012 mdp5_crtc_duplicate_state(struct drm_crtc
*crtc
)
1014 struct mdp5_crtc_state
*mdp5_cstate
;
1016 if (WARN_ON(!crtc
->state
))
1019 mdp5_cstate
= kmemdup(to_mdp5_crtc_state(crtc
->state
),
1020 sizeof(*mdp5_cstate
), GFP_KERNEL
);
1024 __drm_atomic_helper_crtc_duplicate_state(crtc
, &mdp5_cstate
->base
);
1026 return &mdp5_cstate
->base
;
1029 static void mdp5_crtc_destroy_state(struct drm_crtc
*crtc
, struct drm_crtc_state
*state
)
1031 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(state
);
1033 __drm_atomic_helper_crtc_destroy_state(state
);
1038 static const struct drm_crtc_funcs mdp5_crtc_funcs
= {
1039 .set_config
= drm_atomic_helper_set_config
,
1040 .destroy
= mdp5_crtc_destroy
,
1041 .page_flip
= drm_atomic_helper_page_flip
,
1042 .reset
= mdp5_crtc_reset
,
1043 .atomic_duplicate_state
= mdp5_crtc_duplicate_state
,
1044 .atomic_destroy_state
= mdp5_crtc_destroy_state
,
1045 .cursor_set
= mdp5_crtc_cursor_set
,
1046 .cursor_move
= mdp5_crtc_cursor_move
,
1047 .atomic_print_state
= mdp5_crtc_atomic_print_state
,
1050 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs
= {
1051 .mode_set_nofb
= mdp5_crtc_mode_set_nofb
,
1052 .atomic_check
= mdp5_crtc_atomic_check
,
1053 .atomic_begin
= mdp5_crtc_atomic_begin
,
1054 .atomic_flush
= mdp5_crtc_atomic_flush
,
1055 .atomic_enable
= mdp5_crtc_atomic_enable
,
1056 .atomic_disable
= mdp5_crtc_atomic_disable
,
1059 static void mdp5_crtc_vblank_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
1061 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
, vblank
);
1062 struct drm_crtc
*crtc
= &mdp5_crtc
->base
;
1063 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
1066 mdp_irq_unregister(&get_kms(crtc
)->base
, &mdp5_crtc
->vblank
);
1068 pending
= atomic_xchg(&mdp5_crtc
->pending
, 0);
1070 if (pending
& PENDING_FLIP
) {
1071 complete_flip(crtc
, NULL
);
1074 if (pending
& PENDING_CURSOR
)
1075 drm_flip_work_commit(&mdp5_crtc
->unref_cursor_work
, priv
->wq
);
1078 static void mdp5_crtc_err_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
1080 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
, err
);
1082 DBG("%s: error: %08x", mdp5_crtc
->base
.name
, irqstatus
);
1085 static void mdp5_crtc_pp_done_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
1087 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
,
1090 complete(&mdp5_crtc
->pp_completion
);
1093 static void mdp5_crtc_wait_for_pp_done(struct drm_crtc
*crtc
)
1095 struct drm_device
*dev
= crtc
->dev
;
1096 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
1097 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1100 ret
= wait_for_completion_timeout(&mdp5_crtc
->pp_completion
,
1101 msecs_to_jiffies(50));
1103 dev_warn(dev
->dev
, "pp done time out, lm=%d\n",
1104 mdp5_cstate
->pipeline
.mixer
->lm
);
1107 static void mdp5_crtc_wait_for_flush_done(struct drm_crtc
*crtc
)
1109 struct drm_device
*dev
= crtc
->dev
;
1110 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
1111 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1112 struct mdp5_ctl
*ctl
= mdp5_cstate
->ctl
;
1115 /* Should not call this function if crtc is disabled. */
1119 ret
= drm_crtc_vblank_get(crtc
);
1123 ret
= wait_event_timeout(dev
->vblank
[drm_crtc_index(crtc
)].queue
,
1124 ((mdp5_ctl_get_commit_status(ctl
) &
1125 mdp5_crtc
->flushed_mask
) == 0),
1126 msecs_to_jiffies(50));
1128 dev_warn(dev
->dev
, "vblank time out, crtc=%d\n", mdp5_crtc
->id
);
1130 mdp5_crtc
->flushed_mask
= 0;
1132 drm_crtc_vblank_put(crtc
);
1135 uint32_t mdp5_crtc_vblank(struct drm_crtc
*crtc
)
1137 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
1138 return mdp5_crtc
->vblank
.irqmask
;
1141 void mdp5_crtc_set_pipeline(struct drm_crtc
*crtc
)
1143 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1144 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
1146 /* should this be done elsewhere ? */
1147 mdp_irq_update(&mdp5_kms
->base
);
1149 mdp5_ctl_set_pipeline(mdp5_cstate
->ctl
, &mdp5_cstate
->pipeline
);
1152 struct mdp5_ctl
*mdp5_crtc_get_ctl(struct drm_crtc
*crtc
)
1154 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1156 return mdp5_cstate
->ctl
;
1159 struct mdp5_hw_mixer
*mdp5_crtc_get_mixer(struct drm_crtc
*crtc
)
1161 struct mdp5_crtc_state
*mdp5_cstate
;
1164 return ERR_PTR(-EINVAL
);
1166 mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1168 return WARN_ON(!mdp5_cstate
->pipeline
.mixer
) ?
1169 ERR_PTR(-EINVAL
) : mdp5_cstate
->pipeline
.mixer
;
1172 struct mdp5_pipeline
*mdp5_crtc_get_pipeline(struct drm_crtc
*crtc
)
1174 struct mdp5_crtc_state
*mdp5_cstate
;
1177 return ERR_PTR(-EINVAL
);
1179 mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1181 return &mdp5_cstate
->pipeline
;
1184 void mdp5_crtc_wait_for_commit_done(struct drm_crtc
*crtc
)
1186 struct mdp5_crtc_state
*mdp5_cstate
= to_mdp5_crtc_state(crtc
->state
);
1188 if (mdp5_cstate
->cmd_mode
)
1189 mdp5_crtc_wait_for_pp_done(crtc
);
1191 mdp5_crtc_wait_for_flush_done(crtc
);
1194 /* initialize crtc */
1195 struct drm_crtc
*mdp5_crtc_init(struct drm_device
*dev
,
1196 struct drm_plane
*plane
,
1197 struct drm_plane
*cursor_plane
, int id
)
1199 struct drm_crtc
*crtc
= NULL
;
1200 struct mdp5_crtc
*mdp5_crtc
;
1202 mdp5_crtc
= kzalloc(sizeof(*mdp5_crtc
), GFP_KERNEL
);
1204 return ERR_PTR(-ENOMEM
);
1206 crtc
= &mdp5_crtc
->base
;
1210 spin_lock_init(&mdp5_crtc
->lm_lock
);
1211 spin_lock_init(&mdp5_crtc
->cursor
.lock
);
1212 init_completion(&mdp5_crtc
->pp_completion
);
1214 mdp5_crtc
->vblank
.irq
= mdp5_crtc_vblank_irq
;
1215 mdp5_crtc
->err
.irq
= mdp5_crtc_err_irq
;
1216 mdp5_crtc
->pp_done
.irq
= mdp5_crtc_pp_done_irq
;
1218 mdp5_crtc
->lm_cursor_enabled
= cursor_plane
? false : true;
1220 drm_crtc_init_with_planes(dev
, crtc
, plane
, cursor_plane
,
1221 &mdp5_crtc_funcs
, NULL
);
1223 drm_flip_work_init(&mdp5_crtc
->unref_cursor_work
,
1224 "unref cursor", unref_cursor_worker
);
1226 drm_crtc_helper_add(crtc
, &mdp5_crtc_helper_funcs
);