2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
20 #include <linux/sort.h>
21 #include <linux/debugfs.h>
22 #include <linux/ktime.h>
23 #include <drm/drm_mode.h>
24 #include <drm/drm_crtc.h>
25 #include <drm/drm_crtc_helper.h>
26 #include <drm/drm_flip_work.h>
27 #include <drm/drm_rect.h>
30 #include "dpu_hw_lm.h"
31 #include "dpu_hw_ctl.h"
33 #include "dpu_plane.h"
34 #include "dpu_encoder.h"
36 #include "dpu_core_perf.h"
37 #include "dpu_trace.h"
39 #define DPU_DRM_BLEND_OP_NOT_DEFINED 0
40 #define DPU_DRM_BLEND_OP_OPAQUE 1
41 #define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
42 #define DPU_DRM_BLEND_OP_COVERAGE 3
43 #define DPU_DRM_BLEND_OP_MAX 4
45 /* layer mixer index on dpu_crtc */
49 static struct dpu_kms
*_dpu_crtc_get_kms(struct drm_crtc
*crtc
)
51 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
53 return to_dpu_kms(priv
->kms
);
56 static void dpu_crtc_destroy(struct drm_crtc
*crtc
)
58 struct dpu_crtc
*dpu_crtc
= to_dpu_crtc(crtc
);
65 drm_crtc_cleanup(crtc
);
69 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer
*mixer
,
70 struct dpu_plane_state
*pstate
, struct dpu_format
*format
)
72 struct dpu_hw_mixer
*lm
= mixer
->hw_lm
;
74 struct drm_format_name_buf format_name
;
76 /* default to opaque blending */
77 blend_op
= DPU_BLEND_FG_ALPHA_FG_CONST
|
78 DPU_BLEND_BG_ALPHA_BG_CONST
;
80 if (format
->alpha_enable
) {
81 /* coverage blending */
82 blend_op
= DPU_BLEND_FG_ALPHA_FG_PIXEL
|
83 DPU_BLEND_BG_ALPHA_FG_PIXEL
|
84 DPU_BLEND_BG_INV_ALPHA
;
87 lm
->ops
.setup_blend_config(lm
, pstate
->stage
,
90 DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
91 drm_get_format_name(format
->base
.pixel_format
, &format_name
),
92 format
->alpha_enable
, blend_op
);
95 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc
*crtc
)
97 struct dpu_crtc
*dpu_crtc
;
98 struct dpu_crtc_state
*crtc_state
;
99 int lm_idx
, lm_horiz_position
;
101 dpu_crtc
= to_dpu_crtc(crtc
);
102 crtc_state
= to_dpu_crtc_state(crtc
->state
);
104 lm_horiz_position
= 0;
105 for (lm_idx
= 0; lm_idx
< crtc_state
->num_mixers
; lm_idx
++) {
106 const struct drm_rect
*lm_roi
= &crtc_state
->lm_bounds
[lm_idx
];
107 struct dpu_hw_mixer
*hw_lm
= crtc_state
->mixers
[lm_idx
].hw_lm
;
108 struct dpu_hw_mixer_cfg cfg
;
110 if (!lm_roi
|| !drm_rect_visible(lm_roi
))
113 cfg
.out_width
= drm_rect_width(lm_roi
);
114 cfg
.out_height
= drm_rect_height(lm_roi
);
115 cfg
.right_mixer
= lm_horiz_position
++;
117 hw_lm
->ops
.setup_mixer_out(hw_lm
, &cfg
);
121 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc
*crtc
,
122 struct dpu_crtc
*dpu_crtc
, struct dpu_crtc_mixer
*mixer
)
124 struct drm_plane
*plane
;
125 struct drm_framebuffer
*fb
;
126 struct drm_plane_state
*state
;
127 struct dpu_crtc_state
*cstate
= to_dpu_crtc_state(crtc
->state
);
128 struct dpu_plane_state
*pstate
= NULL
;
129 struct dpu_format
*format
;
130 struct dpu_hw_ctl
*ctl
= mixer
->lm_ctl
;
131 struct dpu_hw_stage_cfg
*stage_cfg
= &dpu_crtc
->stage_cfg
;
134 uint32_t stage_idx
, lm_idx
;
135 int zpos_cnt
[DPU_STAGE_MAX
+ 1] = { 0 };
136 bool bg_alpha_enable
= false;
138 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
139 state
= plane
->state
;
143 pstate
= to_dpu_plane_state(state
);
146 dpu_plane_get_ctl_flush(plane
, ctl
, &flush_mask
);
148 DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
152 dpu_plane_pipe(plane
) - SSPP_VIG0
,
153 state
->fb
? state
->fb
->base
.id
: -1);
155 format
= to_dpu_format(msm_framebuffer_format(pstate
->base
.fb
));
157 if (pstate
->stage
== DPU_STAGE_BASE
&& format
->alpha_enable
)
158 bg_alpha_enable
= true;
160 stage_idx
= zpos_cnt
[pstate
->stage
]++;
161 stage_cfg
->stage
[pstate
->stage
][stage_idx
] =
162 dpu_plane_pipe(plane
);
163 stage_cfg
->multirect_index
[pstate
->stage
][stage_idx
] =
164 pstate
->multirect_index
;
166 trace_dpu_crtc_setup_mixer(DRMID(crtc
), DRMID(plane
),
167 state
, pstate
, stage_idx
,
168 dpu_plane_pipe(plane
) - SSPP_VIG0
,
169 format
->base
.pixel_format
,
170 fb
? fb
->modifier
: 0);
172 /* blend config update */
173 for (lm_idx
= 0; lm_idx
< cstate
->num_mixers
; lm_idx
++) {
174 _dpu_crtc_setup_blend_cfg(mixer
+ lm_idx
,
177 mixer
[lm_idx
].flush_mask
|= flush_mask
;
179 if (bg_alpha_enable
&& !format
->alpha_enable
)
180 mixer
[lm_idx
].mixer_op_mode
= 0;
182 mixer
[lm_idx
].mixer_op_mode
|=
187 _dpu_crtc_program_lm_output_roi(crtc
);
191 * _dpu_crtc_blend_setup - configure crtc mixers
192 * @crtc: Pointer to drm crtc structure
194 static void _dpu_crtc_blend_setup(struct drm_crtc
*crtc
)
196 struct dpu_crtc
*dpu_crtc
= to_dpu_crtc(crtc
);
197 struct dpu_crtc_state
*cstate
= to_dpu_crtc_state(crtc
->state
);
198 struct dpu_crtc_mixer
*mixer
= cstate
->mixers
;
199 struct dpu_hw_ctl
*ctl
;
200 struct dpu_hw_mixer
*lm
;
203 DPU_DEBUG("%s\n", dpu_crtc
->name
);
205 for (i
= 0; i
< cstate
->num_mixers
; i
++) {
206 if (!mixer
[i
].hw_lm
|| !mixer
[i
].lm_ctl
) {
207 DPU_ERROR("invalid lm or ctl assigned to mixer\n");
210 mixer
[i
].mixer_op_mode
= 0;
211 mixer
[i
].flush_mask
= 0;
212 if (mixer
[i
].lm_ctl
->ops
.clear_all_blendstages
)
213 mixer
[i
].lm_ctl
->ops
.clear_all_blendstages(
217 /* initialize stage cfg */
218 memset(&dpu_crtc
->stage_cfg
, 0, sizeof(struct dpu_hw_stage_cfg
));
220 _dpu_crtc_blend_setup_mixer(crtc
, dpu_crtc
, mixer
);
222 for (i
= 0; i
< cstate
->num_mixers
; i
++) {
223 ctl
= mixer
[i
].lm_ctl
;
226 lm
->ops
.setup_alpha_out(lm
, mixer
[i
].mixer_op_mode
);
228 mixer
[i
].flush_mask
|= ctl
->ops
.get_bitmask_mixer(ctl
,
229 mixer
[i
].hw_lm
->idx
);
231 /* stage config flush mask */
232 ctl
->ops
.update_pending_flush(ctl
, mixer
[i
].flush_mask
);
234 DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
235 mixer
[i
].hw_lm
->idx
- LM_0
,
236 mixer
[i
].mixer_op_mode
,
238 mixer
[i
].flush_mask
);
240 ctl
->ops
.setup_blendstage(ctl
, mixer
[i
].hw_lm
->idx
,
241 &dpu_crtc
->stage_cfg
);
246 * _dpu_crtc_complete_flip - signal pending page_flip events
247 * Any pending vblank events are added to the vblank_event_list
248 * so that the next vblank interrupt shall signal them.
249 * However PAGE_FLIP events are not handled through the vblank_event_list.
250 * This API signals any pending PAGE_FLIP events requested through
251 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
252 * @crtc: Pointer to drm crtc structure
254 static void _dpu_crtc_complete_flip(struct drm_crtc
*crtc
)
256 struct dpu_crtc
*dpu_crtc
= to_dpu_crtc(crtc
);
257 struct drm_device
*dev
= crtc
->dev
;
260 spin_lock_irqsave(&dev
->event_lock
, flags
);
261 if (dpu_crtc
->event
) {
262 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc
->name
,
264 trace_dpu_crtc_complete_flip(DRMID(crtc
));
265 drm_crtc_send_vblank_event(crtc
, dpu_crtc
->event
);
266 dpu_crtc
->event
= NULL
;
268 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
271 enum dpu_intf_mode
dpu_crtc_get_intf_mode(struct drm_crtc
*crtc
)
273 struct drm_encoder
*encoder
;
275 if (!crtc
|| !crtc
->dev
) {
276 DPU_ERROR("invalid crtc\n");
277 return INTF_MODE_NONE
;
280 WARN_ON(!drm_modeset_is_locked(&crtc
->mutex
));
282 /* TODO: Returns the first INTF_MODE, could there be multiple values? */
283 drm_for_each_encoder_mask(encoder
, crtc
->dev
, crtc
->state
->encoder_mask
)
284 return dpu_encoder_get_intf_mode(encoder
);
286 return INTF_MODE_NONE
;
289 void dpu_crtc_vblank_callback(struct drm_crtc
*crtc
)
291 struct dpu_crtc
*dpu_crtc
= to_dpu_crtc(crtc
);
293 /* keep statistics on vblank callback - with auto reset via debugfs */
294 if (ktime_compare(dpu_crtc
->vblank_cb_time
, ktime_set(0, 0)) == 0)
295 dpu_crtc
->vblank_cb_time
= ktime_get();
297 dpu_crtc
->vblank_cb_count
++;
298 _dpu_crtc_complete_flip(crtc
);
299 drm_crtc_handle_vblank(crtc
);
300 trace_dpu_crtc_vblank_cb(DRMID(crtc
));
303 static void dpu_crtc_release_bw_unlocked(struct drm_crtc
*crtc
)
306 struct drm_modeset_acquire_ctx ctx
;
308 DRM_MODESET_LOCK_ALL_BEGIN(crtc
->dev
, ctx
, 0, ret
);
309 dpu_core_perf_crtc_release_bw(crtc
);
310 DRM_MODESET_LOCK_ALL_END(ctx
, ret
);
312 DRM_ERROR("Failed to acquire modeset locks to release bw, %d\n",
316 static void dpu_crtc_frame_event_work(struct kthread_work
*work
)
318 struct dpu_crtc_frame_event
*fevent
= container_of(work
,
319 struct dpu_crtc_frame_event
, work
);
320 struct drm_crtc
*crtc
= fevent
->crtc
;
321 struct dpu_crtc
*dpu_crtc
= to_dpu_crtc(crtc
);
323 bool frame_done
= false;
325 DPU_ATRACE_BEGIN("crtc_frame_event");
327 DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc
->base
.id
, fevent
->event
,
328 ktime_to_ns(fevent
->ts
));
330 if (fevent
->event
& (DPU_ENCODER_FRAME_EVENT_DONE
331 | DPU_ENCODER_FRAME_EVENT_ERROR
332 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD
)) {
334 if (atomic_read(&dpu_crtc
->frame_pending
) < 1) {
335 /* this should not happen */
336 DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
339 ktime_to_ns(fevent
->ts
),
340 atomic_read(&dpu_crtc
->frame_pending
));
341 } else if (atomic_dec_return(&dpu_crtc
->frame_pending
) == 0) {
342 /* release bandwidth and other resources */
343 trace_dpu_crtc_frame_event_done(DRMID(crtc
),
345 dpu_crtc_release_bw_unlocked(crtc
);
347 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc
),
351 if (fevent
->event
& DPU_ENCODER_FRAME_EVENT_DONE
)
352 dpu_core_perf_crtc_update(crtc
, 0, false);
354 if (fevent
->event
& (DPU_ENCODER_FRAME_EVENT_DONE
355 | DPU_ENCODER_FRAME_EVENT_ERROR
))
359 if (fevent
->event
& DPU_ENCODER_FRAME_EVENT_PANEL_DEAD
)
360 DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
361 crtc
->base
.id
, ktime_to_ns(fevent
->ts
));
364 complete_all(&dpu_crtc
->frame_done_comp
);
366 spin_lock_irqsave(&dpu_crtc
->spin_lock
, flags
);
367 list_add_tail(&fevent
->list
, &dpu_crtc
->frame_event_list
);
368 spin_unlock_irqrestore(&dpu_crtc
->spin_lock
, flags
);
369 DPU_ATRACE_END("crtc_frame_event");
373 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
374 * registers this API to encoder for all frame event callbacks like
375 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
376 * from different context - IRQ, user thread, commit_thread, etc. Each event
377 * should be carefully reviewed and should be processed in proper task context
378 * to avoid schedulin delay or properly manage the irq context's bottom half
381 static void dpu_crtc_frame_event_cb(void *data
, u32 event
)
383 struct drm_crtc
*crtc
= (struct drm_crtc
*)data
;
384 struct dpu_crtc
*dpu_crtc
;
385 struct msm_drm_private
*priv
;
386 struct dpu_crtc_frame_event
*fevent
;
390 /* Nothing to do on idle event */
391 if (event
& DPU_ENCODER_FRAME_EVENT_IDLE
)
394 dpu_crtc
= to_dpu_crtc(crtc
);
395 priv
= crtc
->dev
->dev_private
;
396 crtc_id
= drm_crtc_index(crtc
);
398 trace_dpu_crtc_frame_event_cb(DRMID(crtc
), event
);
400 spin_lock_irqsave(&dpu_crtc
->spin_lock
, flags
);
401 fevent
= list_first_entry_or_null(&dpu_crtc
->frame_event_list
,
402 struct dpu_crtc_frame_event
, list
);
404 list_del_init(&fevent
->list
);
405 spin_unlock_irqrestore(&dpu_crtc
->spin_lock
, flags
);
408 DRM_ERROR("crtc%d event %d overflow\n", crtc
->base
.id
, event
);
412 fevent
->event
= event
;
414 fevent
->ts
= ktime_get();
415 kthread_queue_work(&priv
->event_thread
[crtc_id
].worker
, &fevent
->work
);
418 void dpu_crtc_complete_commit(struct drm_crtc
*crtc
,
419 struct drm_crtc_state
*old_state
)
421 if (!crtc
|| !crtc
->state
) {
422 DPU_ERROR("invalid crtc\n");
425 trace_dpu_crtc_complete_commit(DRMID(crtc
));
428 static void _dpu_crtc_setup_mixer_for_encoder(
429 struct drm_crtc
*crtc
,
430 struct drm_encoder
*enc
)
432 struct dpu_crtc_state
*cstate
= to_dpu_crtc_state(crtc
->state
);
433 struct dpu_kms
*dpu_kms
= _dpu_crtc_get_kms(crtc
);
434 struct dpu_rm
*rm
= &dpu_kms
->rm
;
435 struct dpu_crtc_mixer
*mixer
;
436 struct dpu_hw_ctl
*last_valid_ctl
= NULL
;
438 struct dpu_rm_hw_iter lm_iter
, ctl_iter
;
440 dpu_rm_init_hw_iter(&lm_iter
, enc
->base
.id
, DPU_HW_BLK_LM
);
441 dpu_rm_init_hw_iter(&ctl_iter
, enc
->base
.id
, DPU_HW_BLK_CTL
);
443 /* Set up all the mixers and ctls reserved by this encoder */
444 for (i
= cstate
->num_mixers
; i
< ARRAY_SIZE(cstate
->mixers
); i
++) {
445 mixer
= &cstate
->mixers
[i
];
447 if (!dpu_rm_get_hw(rm
, &lm_iter
))
449 mixer
->hw_lm
= (struct dpu_hw_mixer
*)lm_iter
.hw
;
451 /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
452 if (!dpu_rm_get_hw(rm
, &ctl_iter
)) {
453 DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
454 mixer
->hw_lm
->idx
- LM_0
);
455 mixer
->lm_ctl
= last_valid_ctl
;
457 mixer
->lm_ctl
= (struct dpu_hw_ctl
*)ctl_iter
.hw
;
458 last_valid_ctl
= mixer
->lm_ctl
;
461 /* Shouldn't happen, mixers are always >= ctls */
462 if (!mixer
->lm_ctl
) {
463 DPU_ERROR("no valid ctls found for lm %d\n",
464 mixer
->hw_lm
->idx
- LM_0
);
468 mixer
->encoder
= enc
;
470 cstate
->num_mixers
++;
471 DPU_DEBUG("setup mixer %d: lm %d\n",
472 i
, mixer
->hw_lm
->idx
- LM_0
);
473 DPU_DEBUG("setup mixer %d: ctl %d\n",
474 i
, mixer
->lm_ctl
->idx
- CTL_0
);
478 static void _dpu_crtc_setup_mixers(struct drm_crtc
*crtc
)
480 struct drm_encoder
*enc
;
482 WARN_ON(!drm_modeset_is_locked(&crtc
->mutex
));
484 /* Check for mixers on all encoders attached to this crtc */
485 drm_for_each_encoder_mask(enc
, crtc
->dev
, crtc
->state
->encoder_mask
)
486 _dpu_crtc_setup_mixer_for_encoder(crtc
, enc
);
489 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc
*crtc
,
490 struct drm_crtc_state
*state
)
492 struct dpu_crtc_state
*cstate
= to_dpu_crtc_state(state
);
493 struct drm_display_mode
*adj_mode
= &state
->adjusted_mode
;
494 u32 crtc_split_width
= adj_mode
->hdisplay
/ cstate
->num_mixers
;
497 for (i
= 0; i
< cstate
->num_mixers
; i
++) {
498 struct drm_rect
*r
= &cstate
->lm_bounds
[i
];
499 r
->x1
= crtc_split_width
* i
;
501 r
->x2
= r
->x1
+ crtc_split_width
;
502 r
->y2
= adj_mode
->vdisplay
;
504 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc
), i
, r
);
507 drm_mode_debug_printmodeline(adj_mode
);
510 static void dpu_crtc_atomic_begin(struct drm_crtc
*crtc
,
511 struct drm_crtc_state
*old_state
)
513 struct dpu_crtc
*dpu_crtc
;
514 struct dpu_crtc_state
*cstate
;
515 struct drm_encoder
*encoder
;
516 struct drm_device
*dev
;
518 struct dpu_crtc_smmu_state_data
*smmu_state
;
521 DPU_ERROR("invalid crtc\n");
525 if (!crtc
->state
->enable
) {
526 DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
527 crtc
->base
.id
, crtc
->state
->enable
);
531 DPU_DEBUG("crtc%d\n", crtc
->base
.id
);
533 dpu_crtc
= to_dpu_crtc(crtc
);
534 cstate
= to_dpu_crtc_state(crtc
->state
);
536 smmu_state
= &dpu_crtc
->smmu_state
;
538 if (!cstate
->num_mixers
) {
539 _dpu_crtc_setup_mixers(crtc
);
540 _dpu_crtc_setup_lm_bounds(crtc
, crtc
->state
);
543 if (dpu_crtc
->event
) {
544 WARN_ON(dpu_crtc
->event
);
546 spin_lock_irqsave(&dev
->event_lock
, flags
);
547 dpu_crtc
->event
= crtc
->state
->event
;
548 crtc
->state
->event
= NULL
;
549 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
552 /* encoder will trigger pending mask now */
553 drm_for_each_encoder_mask(encoder
, crtc
->dev
, crtc
->state
->encoder_mask
)
554 dpu_encoder_trigger_kickoff_pending(encoder
);
557 * If no mixers have been allocated in dpu_crtc_atomic_check(),
558 * it means we are trying to flush a CRTC whose state is disabled:
559 * nothing else needs to be done.
561 if (unlikely(!cstate
->num_mixers
))
564 _dpu_crtc_blend_setup(crtc
);
567 * PP_DONE irq is only used by command mode for now.
568 * It is better to request pending before FLUSH and START trigger
569 * to make sure no pp_done irq missed.
570 * This is safe because no pp_done will happen before SW trigger
575 static void dpu_crtc_atomic_flush(struct drm_crtc
*crtc
,
576 struct drm_crtc_state
*old_crtc_state
)
578 struct dpu_crtc
*dpu_crtc
;
579 struct drm_device
*dev
;
580 struct drm_plane
*plane
;
581 struct msm_drm_private
*priv
;
582 struct msm_drm_thread
*event_thread
;
584 struct dpu_crtc_state
*cstate
;
586 if (!crtc
->state
->enable
) {
587 DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
588 crtc
->base
.id
, crtc
->state
->enable
);
592 DPU_DEBUG("crtc%d\n", crtc
->base
.id
);
594 dpu_crtc
= to_dpu_crtc(crtc
);
595 cstate
= to_dpu_crtc_state(crtc
->state
);
597 priv
= dev
->dev_private
;
599 if (crtc
->index
>= ARRAY_SIZE(priv
->event_thread
)) {
600 DPU_ERROR("invalid crtc index[%d]\n", crtc
->index
);
604 event_thread
= &priv
->event_thread
[crtc
->index
];
606 if (dpu_crtc
->event
) {
607 DPU_DEBUG("already received dpu_crtc->event\n");
609 spin_lock_irqsave(&dev
->event_lock
, flags
);
610 dpu_crtc
->event
= crtc
->state
->event
;
611 crtc
->state
->event
= NULL
;
612 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
616 * If no mixers has been allocated in dpu_crtc_atomic_check(),
617 * it means we are trying to flush a CRTC whose state is disabled:
618 * nothing else needs to be done.
620 if (unlikely(!cstate
->num_mixers
))
624 * For planes without commit update, drm framework will not add
625 * those planes to current state since hardware update is not
626 * required. However, if those planes were power collapsed since
627 * last commit cycle, driver has to restore the hardware state
628 * of those planes explicitly here prior to plane flush.
630 drm_atomic_crtc_for_each_plane(plane
, crtc
)
631 dpu_plane_restore(plane
);
633 /* update performance setting before crtc kickoff */
634 dpu_core_perf_crtc_update(crtc
, 1, false);
637 * Final plane updates: Give each plane a chance to complete all
638 * required writes/flushing before crtc's "flush
639 * everything" call below.
641 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
642 if (dpu_crtc
->smmu_state
.transition_error
)
643 dpu_plane_set_error(plane
, true);
644 dpu_plane_flush(plane
);
647 /* Kickoff will be scheduled by outer layer */
651 * dpu_crtc_destroy_state - state destroy hook
653 * @state: CRTC state object to release
655 static void dpu_crtc_destroy_state(struct drm_crtc
*crtc
,
656 struct drm_crtc_state
*state
)
658 struct dpu_crtc
*dpu_crtc
;
659 struct dpu_crtc_state
*cstate
;
661 if (!crtc
|| !state
) {
662 DPU_ERROR("invalid argument(s)\n");
666 dpu_crtc
= to_dpu_crtc(crtc
);
667 cstate
= to_dpu_crtc_state(state
);
669 DPU_DEBUG("crtc%d\n", crtc
->base
.id
);
671 __drm_atomic_helper_crtc_destroy_state(state
);
676 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc
*crtc
)
678 struct dpu_crtc
*dpu_crtc
= to_dpu_crtc(crtc
);
681 if (!atomic_read(&dpu_crtc
->frame_pending
)) {
682 DPU_DEBUG("no frames pending\n");
686 DPU_ATRACE_BEGIN("frame done completion wait");
687 ret
= wait_for_completion_timeout(&dpu_crtc
->frame_done_comp
,
688 msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT
));
690 DRM_ERROR("frame done wait timed out, ret:%d\n", ret
);
693 DPU_ATRACE_END("frame done completion wait");
698 void dpu_crtc_commit_kickoff(struct drm_crtc
*crtc
, bool async
)
700 struct drm_encoder
*encoder
;
701 struct dpu_crtc
*dpu_crtc
= to_dpu_crtc(crtc
);
702 struct dpu_kms
*dpu_kms
= _dpu_crtc_get_kms(crtc
);
703 struct dpu_crtc_state
*cstate
= to_dpu_crtc_state(crtc
->state
);
707 * If no mixers has been allocated in dpu_crtc_atomic_check(),
708 * it means we are trying to start a CRTC whose state is disabled:
709 * nothing else needs to be done.
711 if (unlikely(!cstate
->num_mixers
))
714 DPU_ATRACE_BEGIN("crtc_commit");
717 * Encoder will flush/start now, unless it has a tx pending. If so, it
718 * may delay and flush at an irq event (e.g. ppdone)
720 drm_for_each_encoder_mask(encoder
, crtc
->dev
,
721 crtc
->state
->encoder_mask
) {
722 struct dpu_encoder_kickoff_params params
= { 0 };
723 dpu_encoder_prepare_for_kickoff(encoder
, ¶ms
, async
);
728 /* wait for frame_event_done completion */
729 DPU_ATRACE_BEGIN("wait_for_frame_done_event");
730 ret
= _dpu_crtc_wait_for_frame_done(crtc
);
731 DPU_ATRACE_END("wait_for_frame_done_event");
733 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
735 atomic_read(&dpu_crtc
->frame_pending
));
739 if (atomic_inc_return(&dpu_crtc
->frame_pending
) == 1) {
740 /* acquire bandwidth and other resources */
741 DPU_DEBUG("crtc%d first commit\n", crtc
->base
.id
);
743 DPU_DEBUG("crtc%d commit\n", crtc
->base
.id
);
745 dpu_crtc
->play_count
++;
748 dpu_vbif_clear_errors(dpu_kms
);
750 drm_for_each_encoder_mask(encoder
, crtc
->dev
, crtc
->state
->encoder_mask
)
751 dpu_encoder_kickoff(encoder
, async
);
755 reinit_completion(&dpu_crtc
->frame_done_comp
);
756 DPU_ATRACE_END("crtc_commit");
759 static void dpu_crtc_reset(struct drm_crtc
*crtc
)
761 struct dpu_crtc_state
*cstate
;
764 dpu_crtc_destroy_state(crtc
, crtc
->state
);
766 crtc
->state
= kzalloc(sizeof(*cstate
), GFP_KERNEL
);
768 crtc
->state
->crtc
= crtc
;
772 * dpu_crtc_duplicate_state - state duplicate hook
773 * @crtc: Pointer to drm crtc structure
774 * @Returns: Pointer to new drm_crtc_state structure
776 static struct drm_crtc_state
*dpu_crtc_duplicate_state(struct drm_crtc
*crtc
)
778 struct dpu_crtc
*dpu_crtc
;
779 struct dpu_crtc_state
*cstate
, *old_cstate
;
781 if (!crtc
|| !crtc
->state
) {
782 DPU_ERROR("invalid argument(s)\n");
786 dpu_crtc
= to_dpu_crtc(crtc
);
787 old_cstate
= to_dpu_crtc_state(crtc
->state
);
788 cstate
= kmemdup(old_cstate
, sizeof(*old_cstate
), GFP_KERNEL
);
790 DPU_ERROR("failed to allocate state\n");
794 /* duplicate base helper */
795 __drm_atomic_helper_crtc_duplicate_state(crtc
, &cstate
->base
);
797 return &cstate
->base
;
800 static void dpu_crtc_disable(struct drm_crtc
*crtc
,
801 struct drm_crtc_state
*old_crtc_state
)
803 struct dpu_crtc
*dpu_crtc
;
804 struct dpu_crtc_state
*cstate
;
805 struct drm_display_mode
*mode
;
806 struct drm_encoder
*encoder
;
807 struct msm_drm_private
*priv
;
810 if (!crtc
|| !crtc
->dev
|| !crtc
->dev
->dev_private
|| !crtc
->state
) {
811 DPU_ERROR("invalid crtc\n");
814 dpu_crtc
= to_dpu_crtc(crtc
);
815 cstate
= to_dpu_crtc_state(crtc
->state
);
816 mode
= &cstate
->base
.adjusted_mode
;
817 priv
= crtc
->dev
->dev_private
;
819 DRM_DEBUG_KMS("crtc%d\n", crtc
->base
.id
);
821 /* Disable/save vblank irq handling */
822 drm_crtc_vblank_off(crtc
);
824 drm_for_each_encoder_mask(encoder
, crtc
->dev
,
825 old_crtc_state
->encoder_mask
)
826 dpu_encoder_assign_crtc(encoder
, NULL
);
828 /* wait for frame_event_done completion */
829 if (_dpu_crtc_wait_for_frame_done(crtc
))
830 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
832 atomic_read(&dpu_crtc
->frame_pending
));
834 trace_dpu_crtc_disable(DRMID(crtc
), false, dpu_crtc
);
835 dpu_crtc
->enabled
= false;
837 if (atomic_read(&dpu_crtc
->frame_pending
)) {
838 trace_dpu_crtc_disable_frame_pending(DRMID(crtc
),
839 atomic_read(&dpu_crtc
->frame_pending
));
840 dpu_core_perf_crtc_release_bw(crtc
);
841 atomic_set(&dpu_crtc
->frame_pending
, 0);
844 dpu_core_perf_crtc_update(crtc
, 0, true);
846 drm_for_each_encoder_mask(encoder
, crtc
->dev
, crtc
->state
->encoder_mask
)
847 dpu_encoder_register_frame_event_callback(encoder
, NULL
, NULL
);
849 memset(cstate
->mixers
, 0, sizeof(cstate
->mixers
));
850 cstate
->num_mixers
= 0;
852 /* disable clk & bw control until clk & bw properties are set */
853 cstate
->bw_control
= false;
854 cstate
->bw_split_vote
= false;
856 if (crtc
->state
->event
&& !crtc
->state
->active
) {
857 spin_lock_irqsave(&crtc
->dev
->event_lock
, flags
);
858 drm_crtc_send_vblank_event(crtc
, crtc
->state
->event
);
859 crtc
->state
->event
= NULL
;
860 spin_unlock_irqrestore(&crtc
->dev
->event_lock
, flags
);
863 pm_runtime_put_sync(crtc
->dev
->dev
);
866 static void dpu_crtc_enable(struct drm_crtc
*crtc
,
867 struct drm_crtc_state
*old_crtc_state
)
869 struct dpu_crtc
*dpu_crtc
;
870 struct drm_encoder
*encoder
;
871 struct msm_drm_private
*priv
;
873 if (!crtc
|| !crtc
->dev
|| !crtc
->dev
->dev_private
) {
874 DPU_ERROR("invalid crtc\n");
877 priv
= crtc
->dev
->dev_private
;
879 pm_runtime_get_sync(crtc
->dev
->dev
);
881 DRM_DEBUG_KMS("crtc%d\n", crtc
->base
.id
);
882 dpu_crtc
= to_dpu_crtc(crtc
);
884 drm_for_each_encoder_mask(encoder
, crtc
->dev
, crtc
->state
->encoder_mask
)
885 dpu_encoder_register_frame_event_callback(encoder
,
886 dpu_crtc_frame_event_cb
, (void *)crtc
);
888 trace_dpu_crtc_enable(DRMID(crtc
), true, dpu_crtc
);
889 dpu_crtc
->enabled
= true;
891 drm_for_each_encoder_mask(encoder
, crtc
->dev
, crtc
->state
->encoder_mask
)
892 dpu_encoder_assign_crtc(encoder
, crtc
);
894 /* Enable/restore vblank irq handling */
895 drm_crtc_vblank_on(crtc
);
899 struct dpu_plane_state
*dpu_pstate
;
900 const struct drm_plane_state
*drm_pstate
;
905 static int dpu_crtc_atomic_check(struct drm_crtc
*crtc
,
906 struct drm_crtc_state
*state
)
908 struct dpu_crtc
*dpu_crtc
;
909 struct plane_state
*pstates
;
910 struct dpu_crtc_state
*cstate
;
912 const struct drm_plane_state
*pstate
;
913 struct drm_plane
*plane
;
914 struct drm_display_mode
*mode
;
916 int cnt
= 0, rc
= 0, mixer_width
, i
, z_pos
;
918 struct dpu_multirect_plane_states multirect_plane
[DPU_STAGE_MAX
* 2];
919 int multirect_count
= 0;
920 const struct drm_plane_state
*pipe_staged
[SSPP_MAX
];
921 int left_zpos_cnt
= 0, right_zpos_cnt
= 0;
922 struct drm_rect crtc_rect
= { 0 };
925 DPU_ERROR("invalid crtc\n");
929 pstates
= kzalloc(sizeof(*pstates
) * DPU_STAGE_MAX
* 4, GFP_KERNEL
);
931 dpu_crtc
= to_dpu_crtc(crtc
);
932 cstate
= to_dpu_crtc_state(state
);
934 if (!state
->enable
|| !state
->active
) {
935 DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
936 crtc
->base
.id
, state
->enable
, state
->active
);
940 mode
= &state
->adjusted_mode
;
941 DPU_DEBUG("%s: check", dpu_crtc
->name
);
943 /* force a full mode set if active state changed */
944 if (state
->active_changed
)
945 state
->mode_changed
= true;
947 memset(pipe_staged
, 0, sizeof(pipe_staged
));
949 mixer_width
= mode
->hdisplay
/ cstate
->num_mixers
;
951 _dpu_crtc_setup_lm_bounds(crtc
, state
);
953 crtc_rect
.x2
= mode
->hdisplay
;
954 crtc_rect
.y2
= mode
->vdisplay
;
956 /* get plane state for all drm planes associated with crtc state */
957 drm_atomic_crtc_state_for_each_plane_state(plane
, pstate
, state
) {
958 struct drm_rect dst
, clip
= crtc_rect
;
960 if (IS_ERR_OR_NULL(pstate
)) {
961 rc
= PTR_ERR(pstate
);
962 DPU_ERROR("%s: failed to get plane%d state, %d\n",
963 dpu_crtc
->name
, plane
->base
.id
, rc
);
966 if (cnt
>= DPU_STAGE_MAX
* 4)
969 pstates
[cnt
].dpu_pstate
= to_dpu_plane_state(pstate
);
970 pstates
[cnt
].drm_pstate
= pstate
;
971 pstates
[cnt
].stage
= pstate
->normalized_zpos
;
972 pstates
[cnt
].pipe_id
= dpu_plane_pipe(plane
);
974 if (pipe_staged
[pstates
[cnt
].pipe_id
]) {
975 multirect_plane
[multirect_count
].r0
=
976 pipe_staged
[pstates
[cnt
].pipe_id
];
977 multirect_plane
[multirect_count
].r1
= pstate
;
980 pipe_staged
[pstates
[cnt
].pipe_id
] = NULL
;
982 pipe_staged
[pstates
[cnt
].pipe_id
] = pstate
;
987 dst
= drm_plane_state_dest(pstate
);
988 if (!drm_rect_intersect(&clip
, &dst
)) {
989 DPU_ERROR("invalid vertical/horizontal destination\n");
990 DPU_ERROR("display: " DRM_RECT_FMT
" plane: "
991 DRM_RECT_FMT
"\n", DRM_RECT_ARG(&crtc_rect
),
998 for (i
= 1; i
< SSPP_MAX
; i
++) {
999 if (pipe_staged
[i
]) {
1000 dpu_plane_clear_multirect(pipe_staged
[i
]);
1002 if (is_dpu_plane_virtual(pipe_staged
[i
]->plane
)) {
1004 "r1 only virt plane:%d not supported\n",
1005 pipe_staged
[i
]->plane
->base
.id
);
1013 for (i
= 0; i
< cnt
; i
++) {
1014 /* reset counts at every new blend stage */
1015 if (pstates
[i
].stage
!= z_pos
) {
1018 z_pos
= pstates
[i
].stage
;
1021 /* verify z_pos setting before using it */
1022 if (z_pos
>= DPU_STAGE_MAX
- DPU_STAGE_0
) {
1023 DPU_ERROR("> %d plane stages assigned\n",
1024 DPU_STAGE_MAX
- DPU_STAGE_0
);
1027 } else if (pstates
[i
].drm_pstate
->crtc_x
< mixer_width
) {
1028 if (left_zpos_cnt
== 2) {
1029 DPU_ERROR("> 2 planes @ stage %d on left\n",
1037 if (right_zpos_cnt
== 2) {
1038 DPU_ERROR("> 2 planes @ stage %d on right\n",
1046 pstates
[i
].dpu_pstate
->stage
= z_pos
+ DPU_STAGE_0
;
1047 DPU_DEBUG("%s: zpos %d", dpu_crtc
->name
, z_pos
);
1050 for (i
= 0; i
< multirect_count
; i
++) {
1051 if (dpu_plane_validate_multirect_v2(&multirect_plane
[i
])) {
1053 "multirect validation failed for planes (%d - %d)\n",
1054 multirect_plane
[i
].r0
->plane
->base
.id
,
1055 multirect_plane
[i
].r1
->plane
->base
.id
);
1061 rc
= dpu_core_perf_crtc_check(crtc
, state
);
1063 DPU_ERROR("crtc%d failed performance check %d\n",
1068 /* validate source split:
1069 * use pstates sorted by stage to check planes on same stage
1070 * we assume that all pipes are in source split so its valid to compare
1071 * without taking into account left/right mixer placement
1073 for (i
= 1; i
< cnt
; i
++) {
1074 struct plane_state
*prv_pstate
, *cur_pstate
;
1075 struct drm_rect left_rect
, right_rect
;
1076 int32_t left_pid
, right_pid
;
1079 prv_pstate
= &pstates
[i
- 1];
1080 cur_pstate
= &pstates
[i
];
1081 if (prv_pstate
->stage
!= cur_pstate
->stage
)
1084 stage
= cur_pstate
->stage
;
1086 left_pid
= prv_pstate
->dpu_pstate
->base
.plane
->base
.id
;
1087 left_rect
= drm_plane_state_dest(prv_pstate
->drm_pstate
);
1089 right_pid
= cur_pstate
->dpu_pstate
->base
.plane
->base
.id
;
1090 right_rect
= drm_plane_state_dest(cur_pstate
->drm_pstate
);
1092 if (right_rect
.x1
< left_rect
.x1
) {
1093 swap(left_pid
, right_pid
);
1094 swap(left_rect
, right_rect
);
1098 * - planes are enumerated in pipe-priority order such that
1099 * planes with lower drm_id must be left-most in a shared
1100 * blend-stage when using source split.
1101 * - planes in source split must be contiguous in width
1102 * - planes in source split must have same dest yoff and height
1104 if (right_pid
< left_pid
) {
1106 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1107 stage
, left_pid
, right_pid
);
1110 } else if (right_rect
.x1
!= drm_rect_width(&left_rect
)) {
1111 DPU_ERROR("non-contiguous coordinates for src split. "
1112 "stage: %d left: " DRM_RECT_FMT
" right: "
1113 DRM_RECT_FMT
"\n", stage
,
1114 DRM_RECT_ARG(&left_rect
),
1115 DRM_RECT_ARG(&right_rect
));
1118 } else if (left_rect
.y1
!= right_rect
.y1
||
1119 drm_rect_height(&left_rect
) != drm_rect_height(&right_rect
)) {
1120 DPU_ERROR("source split at stage: %d. invalid "
1121 "yoff/height: left: " DRM_RECT_FMT
" right: "
1122 DRM_RECT_FMT
"\n", stage
,
1123 DRM_RECT_ARG(&left_rect
),
1124 DRM_RECT_ARG(&right_rect
));
1135 int dpu_crtc_vblank(struct drm_crtc
*crtc
, bool en
)
1137 struct dpu_crtc
*dpu_crtc
= to_dpu_crtc(crtc
);
1138 struct drm_encoder
*enc
;
1140 trace_dpu_crtc_vblank(DRMID(&dpu_crtc
->base
), en
, dpu_crtc
);
1143 * Normally we would iterate through encoder_mask in crtc state to find
1144 * attached encoders. In this case, we might be disabling vblank _after_
1145 * encoder_mask has been cleared.
1147 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1148 * disable (which is also after encoder_mask is cleared). So instead of
1149 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1150 * currently assigned to our crtc.
1152 * Note also that this function cannot be called while crtc is disabled
1153 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1154 * about the assigned crtcs being inconsistent with the current state
1155 * (which means no need to worry about modeset locks).
1157 list_for_each_entry(enc
, &crtc
->dev
->mode_config
.encoder_list
, head
) {
1158 trace_dpu_crtc_vblank_enable(DRMID(crtc
), DRMID(enc
), en
,
1161 dpu_encoder_toggle_vblank_for_crtc(enc
, crtc
, en
);
1167 #ifdef CONFIG_DEBUG_FS
1168 static int _dpu_debugfs_status_show(struct seq_file
*s
, void *data
)
1170 struct dpu_crtc
*dpu_crtc
;
1171 struct dpu_plane_state
*pstate
= NULL
;
1172 struct dpu_crtc_mixer
*m
;
1174 struct drm_crtc
*crtc
;
1175 struct drm_plane
*plane
;
1176 struct drm_display_mode
*mode
;
1177 struct drm_framebuffer
*fb
;
1178 struct drm_plane_state
*state
;
1179 struct dpu_crtc_state
*cstate
;
1183 dpu_crtc
= s
->private;
1184 crtc
= &dpu_crtc
->base
;
1186 drm_modeset_lock_all(crtc
->dev
);
1187 cstate
= to_dpu_crtc_state(crtc
->state
);
1189 mode
= &crtc
->state
->adjusted_mode
;
1190 out_width
= mode
->hdisplay
/ cstate
->num_mixers
;
1192 seq_printf(s
, "crtc:%d width:%d height:%d\n", crtc
->base
.id
,
1193 mode
->hdisplay
, mode
->vdisplay
);
1197 for (i
= 0; i
< cstate
->num_mixers
; ++i
) {
1198 m
= &cstate
->mixers
[i
];
1200 seq_printf(s
, "\tmixer[%d] has no lm\n", i
);
1201 else if (!m
->lm_ctl
)
1202 seq_printf(s
, "\tmixer[%d] has no ctl\n", i
);
1204 seq_printf(s
, "\tmixer:%d ctl:%d width:%d height:%d\n",
1205 m
->hw_lm
->idx
- LM_0
, m
->lm_ctl
->idx
- CTL_0
,
1206 out_width
, mode
->vdisplay
);
1211 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
1212 pstate
= to_dpu_plane_state(plane
->state
);
1213 state
= plane
->state
;
1215 if (!pstate
|| !state
)
1218 seq_printf(s
, "\tplane:%u stage:%d\n", plane
->base
.id
,
1221 if (plane
->state
->fb
) {
1222 fb
= plane
->state
->fb
;
1224 seq_printf(s
, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1225 fb
->base
.id
, (char *) &fb
->format
->format
,
1226 fb
->width
, fb
->height
);
1227 for (i
= 0; i
< ARRAY_SIZE(fb
->format
->cpp
); ++i
)
1228 seq_printf(s
, "cpp[%d]:%u ",
1229 i
, fb
->format
->cpp
[i
]);
1230 seq_puts(s
, "\n\t");
1232 seq_printf(s
, "modifier:%8llu ", fb
->modifier
);
1236 for (i
= 0; i
< ARRAY_SIZE(fb
->pitches
); i
++)
1237 seq_printf(s
, "pitches[%d]:%8u ", i
,
1242 for (i
= 0; i
< ARRAY_SIZE(fb
->offsets
); i
++)
1243 seq_printf(s
, "offsets[%d]:%8u ", i
,
1248 seq_printf(s
, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1249 state
->src_x
, state
->src_y
, state
->src_w
, state
->src_h
);
1251 seq_printf(s
, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1252 state
->crtc_x
, state
->crtc_y
, state
->crtc_w
,
1254 seq_printf(s
, "\tmultirect: mode: %d index: %d\n",
1255 pstate
->multirect_mode
, pstate
->multirect_index
);
1259 if (dpu_crtc
->vblank_cb_count
) {
1260 ktime_t diff
= ktime_sub(ktime_get(), dpu_crtc
->vblank_cb_time
);
1261 s64 diff_ms
= ktime_to_ms(diff
);
1262 s64 fps
= diff_ms
? div_s64(
1263 dpu_crtc
->vblank_cb_count
* 1000, diff_ms
) : 0;
1266 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1267 fps
, dpu_crtc
->vblank_cb_count
,
1268 ktime_to_ms(diff
), dpu_crtc
->play_count
);
1270 /* reset time & count for next measurement */
1271 dpu_crtc
->vblank_cb_count
= 0;
1272 dpu_crtc
->vblank_cb_time
= ktime_set(0, 0);
1275 drm_modeset_unlock_all(crtc
->dev
);
1280 static int _dpu_debugfs_status_open(struct inode
*inode
, struct file
*file
)
1282 return single_open(file
, _dpu_debugfs_status_show
, inode
->i_private
);
1285 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
1286 static int __prefix ## _open(struct inode *inode, struct file *file) \
1288 return single_open(file, __prefix ## _show, inode->i_private); \
1290 static const struct file_operations __prefix ## _fops = { \
1291 .owner = THIS_MODULE, \
1292 .open = __prefix ## _open, \
1293 .release = single_release, \
1295 .llseek = seq_lseek, \
1298 static int dpu_crtc_debugfs_state_show(struct seq_file
*s
, void *v
)
1300 struct drm_crtc
*crtc
= (struct drm_crtc
*) s
->private;
1301 struct dpu_crtc
*dpu_crtc
= to_dpu_crtc(crtc
);
1304 seq_printf(s
, "client type: %d\n", dpu_crtc_get_client_type(crtc
));
1305 seq_printf(s
, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc
));
1306 seq_printf(s
, "core_clk_rate: %llu\n",
1307 dpu_crtc
->cur_perf
.core_clk_rate
);
1308 for (i
= DPU_CORE_PERF_DATA_BUS_ID_MNOC
;
1309 i
< DPU_CORE_PERF_DATA_BUS_ID_MAX
; i
++) {
1310 seq_printf(s
, "bw_ctl[%d]: %llu\n", i
,
1311 dpu_crtc
->cur_perf
.bw_ctl
[i
]);
1312 seq_printf(s
, "max_per_pipe_ib[%d]: %llu\n", i
,
1313 dpu_crtc
->cur_perf
.max_per_pipe_ib
[i
]);
1318 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state
);
1320 static int _dpu_crtc_init_debugfs(struct drm_crtc
*crtc
)
1322 struct dpu_crtc
*dpu_crtc
= to_dpu_crtc(crtc
);
1324 static const struct file_operations debugfs_status_fops
= {
1325 .open
= _dpu_debugfs_status_open
,
1327 .llseek
= seq_lseek
,
1328 .release
= single_release
,
1331 dpu_crtc
->debugfs_root
= debugfs_create_dir(dpu_crtc
->name
,
1332 crtc
->dev
->primary
->debugfs_root
);
1333 if (!dpu_crtc
->debugfs_root
)
1336 /* don't error check these */
1337 debugfs_create_file("status", 0400,
1338 dpu_crtc
->debugfs_root
,
1339 dpu_crtc
, &debugfs_status_fops
);
1340 debugfs_create_file("state", 0600,
1341 dpu_crtc
->debugfs_root
,
1343 &dpu_crtc_debugfs_state_fops
);
1348 static int _dpu_crtc_init_debugfs(struct drm_crtc
*crtc
)
1352 #endif /* CONFIG_DEBUG_FS */
1354 static int dpu_crtc_late_register(struct drm_crtc
*crtc
)
1356 return _dpu_crtc_init_debugfs(crtc
);
1359 static void dpu_crtc_early_unregister(struct drm_crtc
*crtc
)
1361 struct dpu_crtc
*dpu_crtc
= to_dpu_crtc(crtc
);
1363 debugfs_remove_recursive(dpu_crtc
->debugfs_root
);
1366 static const struct drm_crtc_funcs dpu_crtc_funcs
= {
1367 .set_config
= drm_atomic_helper_set_config
,
1368 .destroy
= dpu_crtc_destroy
,
1369 .page_flip
= drm_atomic_helper_page_flip
,
1370 .reset
= dpu_crtc_reset
,
1371 .atomic_duplicate_state
= dpu_crtc_duplicate_state
,
1372 .atomic_destroy_state
= dpu_crtc_destroy_state
,
1373 .late_register
= dpu_crtc_late_register
,
1374 .early_unregister
= dpu_crtc_early_unregister
,
1377 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs
= {
1378 .atomic_disable
= dpu_crtc_disable
,
1379 .atomic_enable
= dpu_crtc_enable
,
1380 .atomic_check
= dpu_crtc_atomic_check
,
1381 .atomic_begin
= dpu_crtc_atomic_begin
,
1382 .atomic_flush
= dpu_crtc_atomic_flush
,
1385 /* initialize crtc */
1386 struct drm_crtc
*dpu_crtc_init(struct drm_device
*dev
, struct drm_plane
*plane
,
1387 struct drm_plane
*cursor
)
1389 struct drm_crtc
*crtc
= NULL
;
1390 struct dpu_crtc
*dpu_crtc
= NULL
;
1391 struct msm_drm_private
*priv
= NULL
;
1392 struct dpu_kms
*kms
= NULL
;
1395 priv
= dev
->dev_private
;
1396 kms
= to_dpu_kms(priv
->kms
);
1398 dpu_crtc
= kzalloc(sizeof(*dpu_crtc
), GFP_KERNEL
);
1400 return ERR_PTR(-ENOMEM
);
1402 crtc
= &dpu_crtc
->base
;
1405 spin_lock_init(&dpu_crtc
->spin_lock
);
1406 atomic_set(&dpu_crtc
->frame_pending
, 0);
1408 init_completion(&dpu_crtc
->frame_done_comp
);
1410 INIT_LIST_HEAD(&dpu_crtc
->frame_event_list
);
1412 for (i
= 0; i
< ARRAY_SIZE(dpu_crtc
->frame_events
); i
++) {
1413 INIT_LIST_HEAD(&dpu_crtc
->frame_events
[i
].list
);
1414 list_add(&dpu_crtc
->frame_events
[i
].list
,
1415 &dpu_crtc
->frame_event_list
);
1416 kthread_init_work(&dpu_crtc
->frame_events
[i
].work
,
1417 dpu_crtc_frame_event_work
);
1420 drm_crtc_init_with_planes(dev
, crtc
, plane
, cursor
, &dpu_crtc_funcs
,
1423 drm_crtc_helper_add(crtc
, &dpu_crtc_helper_funcs
);
1425 /* save user friendly CRTC name for later */
1426 snprintf(dpu_crtc
->name
, DPU_CRTC_NAME_SIZE
, "crtc%u", crtc
->base
.id
);
1428 /* initialize event handling */
1429 spin_lock_init(&dpu_crtc
->event_lock
);
1431 DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc
->name
);