]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
drm/amd/display: Check hpd_gpio for NULL before accessing it
[thirdparty/kernel/stable.git] / drivers / gpu / drm / msm / disp / dpu1 / dpu_crtc.c
1 /*
2 * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
20 #include <linux/sort.h>
21 #include <linux/debugfs.h>
22 #include <linux/ktime.h>
23 #include <drm/drm_mode.h>
24 #include <drm/drm_crtc.h>
25 #include <drm/drm_crtc_helper.h>
26 #include <drm/drm_flip_work.h>
27 #include <drm/drm_rect.h>
28
29 #include "dpu_kms.h"
30 #include "dpu_hw_lm.h"
31 #include "dpu_hw_ctl.h"
32 #include "dpu_crtc.h"
33 #include "dpu_plane.h"
34 #include "dpu_encoder.h"
35 #include "dpu_vbif.h"
36 #include "dpu_core_perf.h"
37 #include "dpu_trace.h"
38
39 #define DPU_DRM_BLEND_OP_NOT_DEFINED 0
40 #define DPU_DRM_BLEND_OP_OPAQUE 1
41 #define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
42 #define DPU_DRM_BLEND_OP_COVERAGE 3
43 #define DPU_DRM_BLEND_OP_MAX 4
44
45 /* layer mixer index on dpu_crtc */
46 #define LEFT_MIXER 0
47 #define RIGHT_MIXER 1
48
49 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
50 {
51 struct msm_drm_private *priv = crtc->dev->dev_private;
52
53 return to_dpu_kms(priv->kms);
54 }
55
56 static void dpu_crtc_destroy(struct drm_crtc *crtc)
57 {
58 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
59
60 DPU_DEBUG("\n");
61
62 if (!crtc)
63 return;
64
65 drm_crtc_cleanup(crtc);
66 kfree(dpu_crtc);
67 }
68
69 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
70 struct dpu_plane_state *pstate, struct dpu_format *format)
71 {
72 struct dpu_hw_mixer *lm = mixer->hw_lm;
73 uint32_t blend_op;
74 struct drm_format_name_buf format_name;
75
76 /* default to opaque blending */
77 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
78 DPU_BLEND_BG_ALPHA_BG_CONST;
79
80 if (format->alpha_enable) {
81 /* coverage blending */
82 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
83 DPU_BLEND_BG_ALPHA_FG_PIXEL |
84 DPU_BLEND_BG_INV_ALPHA;
85 }
86
87 lm->ops.setup_blend_config(lm, pstate->stage,
88 0xFF, 0, blend_op);
89
90 DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
91 drm_get_format_name(format->base.pixel_format, &format_name),
92 format->alpha_enable, blend_op);
93 }
94
95 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
96 {
97 struct dpu_crtc *dpu_crtc;
98 struct dpu_crtc_state *crtc_state;
99 int lm_idx, lm_horiz_position;
100
101 dpu_crtc = to_dpu_crtc(crtc);
102 crtc_state = to_dpu_crtc_state(crtc->state);
103
104 lm_horiz_position = 0;
105 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
106 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
107 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
108 struct dpu_hw_mixer_cfg cfg;
109
110 if (!lm_roi || !drm_rect_visible(lm_roi))
111 continue;
112
113 cfg.out_width = drm_rect_width(lm_roi);
114 cfg.out_height = drm_rect_height(lm_roi);
115 cfg.right_mixer = lm_horiz_position++;
116 cfg.flags = 0;
117 hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
118 }
119 }
120
121 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
122 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
123 {
124 struct drm_plane *plane;
125 struct drm_framebuffer *fb;
126 struct drm_plane_state *state;
127 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
128 struct dpu_plane_state *pstate = NULL;
129 struct dpu_format *format;
130 struct dpu_hw_ctl *ctl = mixer->lm_ctl;
131 struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
132
133 u32 flush_mask;
134 uint32_t stage_idx, lm_idx;
135 int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
136 bool bg_alpha_enable = false;
137
138 drm_atomic_crtc_for_each_plane(plane, crtc) {
139 state = plane->state;
140 if (!state)
141 continue;
142
143 pstate = to_dpu_plane_state(state);
144 fb = state->fb;
145
146 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
147
148 DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
149 crtc->base.id,
150 pstate->stage,
151 plane->base.id,
152 dpu_plane_pipe(plane) - SSPP_VIG0,
153 state->fb ? state->fb->base.id : -1);
154
155 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
156
157 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
158 bg_alpha_enable = true;
159
160 stage_idx = zpos_cnt[pstate->stage]++;
161 stage_cfg->stage[pstate->stage][stage_idx] =
162 dpu_plane_pipe(plane);
163 stage_cfg->multirect_index[pstate->stage][stage_idx] =
164 pstate->multirect_index;
165
166 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
167 state, pstate, stage_idx,
168 dpu_plane_pipe(plane) - SSPP_VIG0,
169 format->base.pixel_format,
170 fb ? fb->modifier : 0);
171
172 /* blend config update */
173 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
174 _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
175 pstate, format);
176
177 mixer[lm_idx].flush_mask |= flush_mask;
178
179 if (bg_alpha_enable && !format->alpha_enable)
180 mixer[lm_idx].mixer_op_mode = 0;
181 else
182 mixer[lm_idx].mixer_op_mode |=
183 1 << pstate->stage;
184 }
185 }
186
187 _dpu_crtc_program_lm_output_roi(crtc);
188 }
189
190 /**
191 * _dpu_crtc_blend_setup - configure crtc mixers
192 * @crtc: Pointer to drm crtc structure
193 */
194 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
195 {
196 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
197 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
198 struct dpu_crtc_mixer *mixer = cstate->mixers;
199 struct dpu_hw_ctl *ctl;
200 struct dpu_hw_mixer *lm;
201 int i;
202
203 DPU_DEBUG("%s\n", dpu_crtc->name);
204
205 for (i = 0; i < cstate->num_mixers; i++) {
206 if (!mixer[i].hw_lm || !mixer[i].lm_ctl) {
207 DPU_ERROR("invalid lm or ctl assigned to mixer\n");
208 return;
209 }
210 mixer[i].mixer_op_mode = 0;
211 mixer[i].flush_mask = 0;
212 if (mixer[i].lm_ctl->ops.clear_all_blendstages)
213 mixer[i].lm_ctl->ops.clear_all_blendstages(
214 mixer[i].lm_ctl);
215 }
216
217 /* initialize stage cfg */
218 memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
219
220 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
221
222 for (i = 0; i < cstate->num_mixers; i++) {
223 ctl = mixer[i].lm_ctl;
224 lm = mixer[i].hw_lm;
225
226 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
227
228 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
229 mixer[i].hw_lm->idx);
230
231 /* stage config flush mask */
232 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
233
234 DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
235 mixer[i].hw_lm->idx - LM_0,
236 mixer[i].mixer_op_mode,
237 ctl->idx - CTL_0,
238 mixer[i].flush_mask);
239
240 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
241 &dpu_crtc->stage_cfg);
242 }
243 }
244
245 /**
246 * _dpu_crtc_complete_flip - signal pending page_flip events
247 * Any pending vblank events are added to the vblank_event_list
248 * so that the next vblank interrupt shall signal them.
249 * However PAGE_FLIP events are not handled through the vblank_event_list.
250 * This API signals any pending PAGE_FLIP events requested through
251 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
252 * @crtc: Pointer to drm crtc structure
253 */
254 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
255 {
256 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
257 struct drm_device *dev = crtc->dev;
258 unsigned long flags;
259
260 spin_lock_irqsave(&dev->event_lock, flags);
261 if (dpu_crtc->event) {
262 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
263 dpu_crtc->event);
264 trace_dpu_crtc_complete_flip(DRMID(crtc));
265 drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
266 dpu_crtc->event = NULL;
267 }
268 spin_unlock_irqrestore(&dev->event_lock, flags);
269 }
270
271 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
272 {
273 struct drm_encoder *encoder;
274
275 if (!crtc || !crtc->dev) {
276 DPU_ERROR("invalid crtc\n");
277 return INTF_MODE_NONE;
278 }
279
280 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
281
282 /* TODO: Returns the first INTF_MODE, could there be multiple values? */
283 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
284 return dpu_encoder_get_intf_mode(encoder);
285
286 return INTF_MODE_NONE;
287 }
288
289 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
290 {
291 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
292
293 /* keep statistics on vblank callback - with auto reset via debugfs */
294 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
295 dpu_crtc->vblank_cb_time = ktime_get();
296 else
297 dpu_crtc->vblank_cb_count++;
298 _dpu_crtc_complete_flip(crtc);
299 drm_crtc_handle_vblank(crtc);
300 trace_dpu_crtc_vblank_cb(DRMID(crtc));
301 }
302
303 static void dpu_crtc_release_bw_unlocked(struct drm_crtc *crtc)
304 {
305 int ret = 0;
306 struct drm_modeset_acquire_ctx ctx;
307
308 DRM_MODESET_LOCK_ALL_BEGIN(crtc->dev, ctx, 0, ret);
309 dpu_core_perf_crtc_release_bw(crtc);
310 DRM_MODESET_LOCK_ALL_END(ctx, ret);
311 if (ret)
312 DRM_ERROR("Failed to acquire modeset locks to release bw, %d\n",
313 ret);
314 }
315
316 static void dpu_crtc_frame_event_work(struct kthread_work *work)
317 {
318 struct dpu_crtc_frame_event *fevent = container_of(work,
319 struct dpu_crtc_frame_event, work);
320 struct drm_crtc *crtc = fevent->crtc;
321 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
322 unsigned long flags;
323 bool frame_done = false;
324
325 DPU_ATRACE_BEGIN("crtc_frame_event");
326
327 DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
328 ktime_to_ns(fevent->ts));
329
330 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
331 | DPU_ENCODER_FRAME_EVENT_ERROR
332 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
333
334 if (atomic_read(&dpu_crtc->frame_pending) < 1) {
335 /* this should not happen */
336 DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
337 crtc->base.id,
338 fevent->event,
339 ktime_to_ns(fevent->ts),
340 atomic_read(&dpu_crtc->frame_pending));
341 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
342 /* release bandwidth and other resources */
343 trace_dpu_crtc_frame_event_done(DRMID(crtc),
344 fevent->event);
345 dpu_crtc_release_bw_unlocked(crtc);
346 } else {
347 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
348 fevent->event);
349 }
350
351 if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
352 dpu_core_perf_crtc_update(crtc, 0, false);
353
354 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
355 | DPU_ENCODER_FRAME_EVENT_ERROR))
356 frame_done = true;
357 }
358
359 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
360 DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
361 crtc->base.id, ktime_to_ns(fevent->ts));
362
363 if (frame_done)
364 complete_all(&dpu_crtc->frame_done_comp);
365
366 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
367 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
368 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
369 DPU_ATRACE_END("crtc_frame_event");
370 }
371
372 /*
373 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
374 * registers this API to encoder for all frame event callbacks like
375 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
376 * from different context - IRQ, user thread, commit_thread, etc. Each event
377 * should be carefully reviewed and should be processed in proper task context
378 * to avoid schedulin delay or properly manage the irq context's bottom half
379 * processing.
380 */
381 static void dpu_crtc_frame_event_cb(void *data, u32 event)
382 {
383 struct drm_crtc *crtc = (struct drm_crtc *)data;
384 struct dpu_crtc *dpu_crtc;
385 struct msm_drm_private *priv;
386 struct dpu_crtc_frame_event *fevent;
387 unsigned long flags;
388 u32 crtc_id;
389
390 /* Nothing to do on idle event */
391 if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
392 return;
393
394 dpu_crtc = to_dpu_crtc(crtc);
395 priv = crtc->dev->dev_private;
396 crtc_id = drm_crtc_index(crtc);
397
398 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
399
400 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
401 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
402 struct dpu_crtc_frame_event, list);
403 if (fevent)
404 list_del_init(&fevent->list);
405 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
406
407 if (!fevent) {
408 DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
409 return;
410 }
411
412 fevent->event = event;
413 fevent->crtc = crtc;
414 fevent->ts = ktime_get();
415 kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
416 }
417
418 void dpu_crtc_complete_commit(struct drm_crtc *crtc,
419 struct drm_crtc_state *old_state)
420 {
421 if (!crtc || !crtc->state) {
422 DPU_ERROR("invalid crtc\n");
423 return;
424 }
425 trace_dpu_crtc_complete_commit(DRMID(crtc));
426 }
427
428 static void _dpu_crtc_setup_mixer_for_encoder(
429 struct drm_crtc *crtc,
430 struct drm_encoder *enc)
431 {
432 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
433 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
434 struct dpu_rm *rm = &dpu_kms->rm;
435 struct dpu_crtc_mixer *mixer;
436 struct dpu_hw_ctl *last_valid_ctl = NULL;
437 int i;
438 struct dpu_rm_hw_iter lm_iter, ctl_iter;
439
440 dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
441 dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
442
443 /* Set up all the mixers and ctls reserved by this encoder */
444 for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) {
445 mixer = &cstate->mixers[i];
446
447 if (!dpu_rm_get_hw(rm, &lm_iter))
448 break;
449 mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
450
451 /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
452 if (!dpu_rm_get_hw(rm, &ctl_iter)) {
453 DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
454 mixer->hw_lm->idx - LM_0);
455 mixer->lm_ctl = last_valid_ctl;
456 } else {
457 mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
458 last_valid_ctl = mixer->lm_ctl;
459 }
460
461 /* Shouldn't happen, mixers are always >= ctls */
462 if (!mixer->lm_ctl) {
463 DPU_ERROR("no valid ctls found for lm %d\n",
464 mixer->hw_lm->idx - LM_0);
465 return;
466 }
467
468 mixer->encoder = enc;
469
470 cstate->num_mixers++;
471 DPU_DEBUG("setup mixer %d: lm %d\n",
472 i, mixer->hw_lm->idx - LM_0);
473 DPU_DEBUG("setup mixer %d: ctl %d\n",
474 i, mixer->lm_ctl->idx - CTL_0);
475 }
476 }
477
478 static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
479 {
480 struct drm_encoder *enc;
481
482 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
483
484 /* Check for mixers on all encoders attached to this crtc */
485 drm_for_each_encoder_mask(enc, crtc->dev, crtc->state->encoder_mask)
486 _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
487 }
488
489 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
490 struct drm_crtc_state *state)
491 {
492 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
493 struct drm_display_mode *adj_mode = &state->adjusted_mode;
494 u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
495 int i;
496
497 for (i = 0; i < cstate->num_mixers; i++) {
498 struct drm_rect *r = &cstate->lm_bounds[i];
499 r->x1 = crtc_split_width * i;
500 r->y1 = 0;
501 r->x2 = r->x1 + crtc_split_width;
502 r->y2 = adj_mode->vdisplay;
503
504 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
505 }
506
507 drm_mode_debug_printmodeline(adj_mode);
508 }
509
510 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
511 struct drm_crtc_state *old_state)
512 {
513 struct dpu_crtc *dpu_crtc;
514 struct dpu_crtc_state *cstate;
515 struct drm_encoder *encoder;
516 struct drm_device *dev;
517 unsigned long flags;
518 struct dpu_crtc_smmu_state_data *smmu_state;
519
520 if (!crtc) {
521 DPU_ERROR("invalid crtc\n");
522 return;
523 }
524
525 if (!crtc->state->enable) {
526 DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
527 crtc->base.id, crtc->state->enable);
528 return;
529 }
530
531 DPU_DEBUG("crtc%d\n", crtc->base.id);
532
533 dpu_crtc = to_dpu_crtc(crtc);
534 cstate = to_dpu_crtc_state(crtc->state);
535 dev = crtc->dev;
536 smmu_state = &dpu_crtc->smmu_state;
537
538 if (!cstate->num_mixers) {
539 _dpu_crtc_setup_mixers(crtc);
540 _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
541 }
542
543 if (dpu_crtc->event) {
544 WARN_ON(dpu_crtc->event);
545 } else {
546 spin_lock_irqsave(&dev->event_lock, flags);
547 dpu_crtc->event = crtc->state->event;
548 crtc->state->event = NULL;
549 spin_unlock_irqrestore(&dev->event_lock, flags);
550 }
551
552 /* encoder will trigger pending mask now */
553 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
554 dpu_encoder_trigger_kickoff_pending(encoder);
555
556 /*
557 * If no mixers have been allocated in dpu_crtc_atomic_check(),
558 * it means we are trying to flush a CRTC whose state is disabled:
559 * nothing else needs to be done.
560 */
561 if (unlikely(!cstate->num_mixers))
562 return;
563
564 _dpu_crtc_blend_setup(crtc);
565
566 /*
567 * PP_DONE irq is only used by command mode for now.
568 * It is better to request pending before FLUSH and START trigger
569 * to make sure no pp_done irq missed.
570 * This is safe because no pp_done will happen before SW trigger
571 * in command mode.
572 */
573 }
574
575 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
576 struct drm_crtc_state *old_crtc_state)
577 {
578 struct dpu_crtc *dpu_crtc;
579 struct drm_device *dev;
580 struct drm_plane *plane;
581 struct msm_drm_private *priv;
582 struct msm_drm_thread *event_thread;
583 unsigned long flags;
584 struct dpu_crtc_state *cstate;
585
586 if (!crtc->state->enable) {
587 DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
588 crtc->base.id, crtc->state->enable);
589 return;
590 }
591
592 DPU_DEBUG("crtc%d\n", crtc->base.id);
593
594 dpu_crtc = to_dpu_crtc(crtc);
595 cstate = to_dpu_crtc_state(crtc->state);
596 dev = crtc->dev;
597 priv = dev->dev_private;
598
599 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
600 DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
601 return;
602 }
603
604 event_thread = &priv->event_thread[crtc->index];
605
606 if (dpu_crtc->event) {
607 DPU_DEBUG("already received dpu_crtc->event\n");
608 } else {
609 spin_lock_irqsave(&dev->event_lock, flags);
610 dpu_crtc->event = crtc->state->event;
611 crtc->state->event = NULL;
612 spin_unlock_irqrestore(&dev->event_lock, flags);
613 }
614
615 /*
616 * If no mixers has been allocated in dpu_crtc_atomic_check(),
617 * it means we are trying to flush a CRTC whose state is disabled:
618 * nothing else needs to be done.
619 */
620 if (unlikely(!cstate->num_mixers))
621 return;
622
623 /*
624 * For planes without commit update, drm framework will not add
625 * those planes to current state since hardware update is not
626 * required. However, if those planes were power collapsed since
627 * last commit cycle, driver has to restore the hardware state
628 * of those planes explicitly here prior to plane flush.
629 */
630 drm_atomic_crtc_for_each_plane(plane, crtc)
631 dpu_plane_restore(plane);
632
633 /* update performance setting before crtc kickoff */
634 dpu_core_perf_crtc_update(crtc, 1, false);
635
636 /*
637 * Final plane updates: Give each plane a chance to complete all
638 * required writes/flushing before crtc's "flush
639 * everything" call below.
640 */
641 drm_atomic_crtc_for_each_plane(plane, crtc) {
642 if (dpu_crtc->smmu_state.transition_error)
643 dpu_plane_set_error(plane, true);
644 dpu_plane_flush(plane);
645 }
646
647 /* Kickoff will be scheduled by outer layer */
648 }
649
650 /**
651 * dpu_crtc_destroy_state - state destroy hook
652 * @crtc: drm CRTC
653 * @state: CRTC state object to release
654 */
655 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
656 struct drm_crtc_state *state)
657 {
658 struct dpu_crtc *dpu_crtc;
659 struct dpu_crtc_state *cstate;
660
661 if (!crtc || !state) {
662 DPU_ERROR("invalid argument(s)\n");
663 return;
664 }
665
666 dpu_crtc = to_dpu_crtc(crtc);
667 cstate = to_dpu_crtc_state(state);
668
669 DPU_DEBUG("crtc%d\n", crtc->base.id);
670
671 __drm_atomic_helper_crtc_destroy_state(state);
672
673 kfree(cstate);
674 }
675
676 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
677 {
678 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
679 int ret, rc = 0;
680
681 if (!atomic_read(&dpu_crtc->frame_pending)) {
682 DPU_DEBUG("no frames pending\n");
683 return 0;
684 }
685
686 DPU_ATRACE_BEGIN("frame done completion wait");
687 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
688 msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
689 if (!ret) {
690 DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
691 rc = -ETIMEDOUT;
692 }
693 DPU_ATRACE_END("frame done completion wait");
694
695 return rc;
696 }
697
698 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc, bool async)
699 {
700 struct drm_encoder *encoder;
701 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
702 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
703 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
704 int ret;
705
706 /*
707 * If no mixers has been allocated in dpu_crtc_atomic_check(),
708 * it means we are trying to start a CRTC whose state is disabled:
709 * nothing else needs to be done.
710 */
711 if (unlikely(!cstate->num_mixers))
712 return;
713
714 DPU_ATRACE_BEGIN("crtc_commit");
715
716 /*
717 * Encoder will flush/start now, unless it has a tx pending. If so, it
718 * may delay and flush at an irq event (e.g. ppdone)
719 */
720 drm_for_each_encoder_mask(encoder, crtc->dev,
721 crtc->state->encoder_mask) {
722 struct dpu_encoder_kickoff_params params = { 0 };
723 dpu_encoder_prepare_for_kickoff(encoder, &params, async);
724 }
725
726
727 if (!async) {
728 /* wait for frame_event_done completion */
729 DPU_ATRACE_BEGIN("wait_for_frame_done_event");
730 ret = _dpu_crtc_wait_for_frame_done(crtc);
731 DPU_ATRACE_END("wait_for_frame_done_event");
732 if (ret) {
733 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
734 crtc->base.id,
735 atomic_read(&dpu_crtc->frame_pending));
736 goto end;
737 }
738
739 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
740 /* acquire bandwidth and other resources */
741 DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
742 } else
743 DPU_DEBUG("crtc%d commit\n", crtc->base.id);
744
745 dpu_crtc->play_count++;
746 }
747
748 dpu_vbif_clear_errors(dpu_kms);
749
750 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
751 dpu_encoder_kickoff(encoder, async);
752
753 end:
754 if (!async)
755 reinit_completion(&dpu_crtc->frame_done_comp);
756 DPU_ATRACE_END("crtc_commit");
757 }
758
759 static void dpu_crtc_reset(struct drm_crtc *crtc)
760 {
761 struct dpu_crtc_state *cstate;
762
763 if (crtc->state)
764 dpu_crtc_destroy_state(crtc, crtc->state);
765
766 crtc->state = kzalloc(sizeof(*cstate), GFP_KERNEL);
767 if (crtc->state)
768 crtc->state->crtc = crtc;
769 }
770
771 /**
772 * dpu_crtc_duplicate_state - state duplicate hook
773 * @crtc: Pointer to drm crtc structure
774 * @Returns: Pointer to new drm_crtc_state structure
775 */
776 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
777 {
778 struct dpu_crtc *dpu_crtc;
779 struct dpu_crtc_state *cstate, *old_cstate;
780
781 if (!crtc || !crtc->state) {
782 DPU_ERROR("invalid argument(s)\n");
783 return NULL;
784 }
785
786 dpu_crtc = to_dpu_crtc(crtc);
787 old_cstate = to_dpu_crtc_state(crtc->state);
788 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
789 if (!cstate) {
790 DPU_ERROR("failed to allocate state\n");
791 return NULL;
792 }
793
794 /* duplicate base helper */
795 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
796
797 return &cstate->base;
798 }
799
800 static void dpu_crtc_disable(struct drm_crtc *crtc,
801 struct drm_crtc_state *old_crtc_state)
802 {
803 struct dpu_crtc *dpu_crtc;
804 struct dpu_crtc_state *cstate;
805 struct drm_display_mode *mode;
806 struct drm_encoder *encoder;
807 struct msm_drm_private *priv;
808 unsigned long flags;
809
810 if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
811 DPU_ERROR("invalid crtc\n");
812 return;
813 }
814 dpu_crtc = to_dpu_crtc(crtc);
815 cstate = to_dpu_crtc_state(crtc->state);
816 mode = &cstate->base.adjusted_mode;
817 priv = crtc->dev->dev_private;
818
819 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
820
821 /* Disable/save vblank irq handling */
822 drm_crtc_vblank_off(crtc);
823
824 drm_for_each_encoder_mask(encoder, crtc->dev,
825 old_crtc_state->encoder_mask)
826 dpu_encoder_assign_crtc(encoder, NULL);
827
828 /* wait for frame_event_done completion */
829 if (_dpu_crtc_wait_for_frame_done(crtc))
830 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
831 crtc->base.id,
832 atomic_read(&dpu_crtc->frame_pending));
833
834 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
835 dpu_crtc->enabled = false;
836
837 if (atomic_read(&dpu_crtc->frame_pending)) {
838 trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
839 atomic_read(&dpu_crtc->frame_pending));
840 dpu_core_perf_crtc_release_bw(crtc);
841 atomic_set(&dpu_crtc->frame_pending, 0);
842 }
843
844 dpu_core_perf_crtc_update(crtc, 0, true);
845
846 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
847 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
848
849 memset(cstate->mixers, 0, sizeof(cstate->mixers));
850 cstate->num_mixers = 0;
851
852 /* disable clk & bw control until clk & bw properties are set */
853 cstate->bw_control = false;
854 cstate->bw_split_vote = false;
855
856 if (crtc->state->event && !crtc->state->active) {
857 spin_lock_irqsave(&crtc->dev->event_lock, flags);
858 drm_crtc_send_vblank_event(crtc, crtc->state->event);
859 crtc->state->event = NULL;
860 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
861 }
862
863 pm_runtime_put_sync(crtc->dev->dev);
864 }
865
866 static void dpu_crtc_enable(struct drm_crtc *crtc,
867 struct drm_crtc_state *old_crtc_state)
868 {
869 struct dpu_crtc *dpu_crtc;
870 struct drm_encoder *encoder;
871 struct msm_drm_private *priv;
872
873 if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
874 DPU_ERROR("invalid crtc\n");
875 return;
876 }
877 priv = crtc->dev->dev_private;
878
879 pm_runtime_get_sync(crtc->dev->dev);
880
881 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
882 dpu_crtc = to_dpu_crtc(crtc);
883
884 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
885 dpu_encoder_register_frame_event_callback(encoder,
886 dpu_crtc_frame_event_cb, (void *)crtc);
887
888 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
889 dpu_crtc->enabled = true;
890
891 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
892 dpu_encoder_assign_crtc(encoder, crtc);
893
894 /* Enable/restore vblank irq handling */
895 drm_crtc_vblank_on(crtc);
896 }
897
898 struct plane_state {
899 struct dpu_plane_state *dpu_pstate;
900 const struct drm_plane_state *drm_pstate;
901 int stage;
902 u32 pipe_id;
903 };
904
905 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
906 struct drm_crtc_state *state)
907 {
908 struct dpu_crtc *dpu_crtc;
909 struct plane_state *pstates;
910 struct dpu_crtc_state *cstate;
911
912 const struct drm_plane_state *pstate;
913 struct drm_plane *plane;
914 struct drm_display_mode *mode;
915
916 int cnt = 0, rc = 0, mixer_width, i, z_pos;
917
918 struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
919 int multirect_count = 0;
920 const struct drm_plane_state *pipe_staged[SSPP_MAX];
921 int left_zpos_cnt = 0, right_zpos_cnt = 0;
922 struct drm_rect crtc_rect = { 0 };
923
924 if (!crtc) {
925 DPU_ERROR("invalid crtc\n");
926 return -EINVAL;
927 }
928
929 pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
930
931 dpu_crtc = to_dpu_crtc(crtc);
932 cstate = to_dpu_crtc_state(state);
933
934 if (!state->enable || !state->active) {
935 DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
936 crtc->base.id, state->enable, state->active);
937 goto end;
938 }
939
940 mode = &state->adjusted_mode;
941 DPU_DEBUG("%s: check", dpu_crtc->name);
942
943 /* force a full mode set if active state changed */
944 if (state->active_changed)
945 state->mode_changed = true;
946
947 memset(pipe_staged, 0, sizeof(pipe_staged));
948
949 mixer_width = mode->hdisplay / cstate->num_mixers;
950
951 _dpu_crtc_setup_lm_bounds(crtc, state);
952
953 crtc_rect.x2 = mode->hdisplay;
954 crtc_rect.y2 = mode->vdisplay;
955
956 /* get plane state for all drm planes associated with crtc state */
957 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
958 struct drm_rect dst, clip = crtc_rect;
959
960 if (IS_ERR_OR_NULL(pstate)) {
961 rc = PTR_ERR(pstate);
962 DPU_ERROR("%s: failed to get plane%d state, %d\n",
963 dpu_crtc->name, plane->base.id, rc);
964 goto end;
965 }
966 if (cnt >= DPU_STAGE_MAX * 4)
967 continue;
968
969 pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
970 pstates[cnt].drm_pstate = pstate;
971 pstates[cnt].stage = pstate->normalized_zpos;
972 pstates[cnt].pipe_id = dpu_plane_pipe(plane);
973
974 if (pipe_staged[pstates[cnt].pipe_id]) {
975 multirect_plane[multirect_count].r0 =
976 pipe_staged[pstates[cnt].pipe_id];
977 multirect_plane[multirect_count].r1 = pstate;
978 multirect_count++;
979
980 pipe_staged[pstates[cnt].pipe_id] = NULL;
981 } else {
982 pipe_staged[pstates[cnt].pipe_id] = pstate;
983 }
984
985 cnt++;
986
987 dst = drm_plane_state_dest(pstate);
988 if (!drm_rect_intersect(&clip, &dst)) {
989 DPU_ERROR("invalid vertical/horizontal destination\n");
990 DPU_ERROR("display: " DRM_RECT_FMT " plane: "
991 DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
992 DRM_RECT_ARG(&dst));
993 rc = -E2BIG;
994 goto end;
995 }
996 }
997
998 for (i = 1; i < SSPP_MAX; i++) {
999 if (pipe_staged[i]) {
1000 dpu_plane_clear_multirect(pipe_staged[i]);
1001
1002 if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
1003 DPU_ERROR(
1004 "r1 only virt plane:%d not supported\n",
1005 pipe_staged[i]->plane->base.id);
1006 rc = -EINVAL;
1007 goto end;
1008 }
1009 }
1010 }
1011
1012 z_pos = -1;
1013 for (i = 0; i < cnt; i++) {
1014 /* reset counts at every new blend stage */
1015 if (pstates[i].stage != z_pos) {
1016 left_zpos_cnt = 0;
1017 right_zpos_cnt = 0;
1018 z_pos = pstates[i].stage;
1019 }
1020
1021 /* verify z_pos setting before using it */
1022 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
1023 DPU_ERROR("> %d plane stages assigned\n",
1024 DPU_STAGE_MAX - DPU_STAGE_0);
1025 rc = -EINVAL;
1026 goto end;
1027 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
1028 if (left_zpos_cnt == 2) {
1029 DPU_ERROR("> 2 planes @ stage %d on left\n",
1030 z_pos);
1031 rc = -EINVAL;
1032 goto end;
1033 }
1034 left_zpos_cnt++;
1035
1036 } else {
1037 if (right_zpos_cnt == 2) {
1038 DPU_ERROR("> 2 planes @ stage %d on right\n",
1039 z_pos);
1040 rc = -EINVAL;
1041 goto end;
1042 }
1043 right_zpos_cnt++;
1044 }
1045
1046 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
1047 DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
1048 }
1049
1050 for (i = 0; i < multirect_count; i++) {
1051 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
1052 DPU_ERROR(
1053 "multirect validation failed for planes (%d - %d)\n",
1054 multirect_plane[i].r0->plane->base.id,
1055 multirect_plane[i].r1->plane->base.id);
1056 rc = -EINVAL;
1057 goto end;
1058 }
1059 }
1060
1061 rc = dpu_core_perf_crtc_check(crtc, state);
1062 if (rc) {
1063 DPU_ERROR("crtc%d failed performance check %d\n",
1064 crtc->base.id, rc);
1065 goto end;
1066 }
1067
1068 /* validate source split:
1069 * use pstates sorted by stage to check planes on same stage
1070 * we assume that all pipes are in source split so its valid to compare
1071 * without taking into account left/right mixer placement
1072 */
1073 for (i = 1; i < cnt; i++) {
1074 struct plane_state *prv_pstate, *cur_pstate;
1075 struct drm_rect left_rect, right_rect;
1076 int32_t left_pid, right_pid;
1077 int32_t stage;
1078
1079 prv_pstate = &pstates[i - 1];
1080 cur_pstate = &pstates[i];
1081 if (prv_pstate->stage != cur_pstate->stage)
1082 continue;
1083
1084 stage = cur_pstate->stage;
1085
1086 left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1087 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1088
1089 right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1090 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1091
1092 if (right_rect.x1 < left_rect.x1) {
1093 swap(left_pid, right_pid);
1094 swap(left_rect, right_rect);
1095 }
1096
1097 /**
1098 * - planes are enumerated in pipe-priority order such that
1099 * planes with lower drm_id must be left-most in a shared
1100 * blend-stage when using source split.
1101 * - planes in source split must be contiguous in width
1102 * - planes in source split must have same dest yoff and height
1103 */
1104 if (right_pid < left_pid) {
1105 DPU_ERROR(
1106 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1107 stage, left_pid, right_pid);
1108 rc = -EINVAL;
1109 goto end;
1110 } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1111 DPU_ERROR("non-contiguous coordinates for src split. "
1112 "stage: %d left: " DRM_RECT_FMT " right: "
1113 DRM_RECT_FMT "\n", stage,
1114 DRM_RECT_ARG(&left_rect),
1115 DRM_RECT_ARG(&right_rect));
1116 rc = -EINVAL;
1117 goto end;
1118 } else if (left_rect.y1 != right_rect.y1 ||
1119 drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1120 DPU_ERROR("source split at stage: %d. invalid "
1121 "yoff/height: left: " DRM_RECT_FMT " right: "
1122 DRM_RECT_FMT "\n", stage,
1123 DRM_RECT_ARG(&left_rect),
1124 DRM_RECT_ARG(&right_rect));
1125 rc = -EINVAL;
1126 goto end;
1127 }
1128 }
1129
1130 end:
1131 kfree(pstates);
1132 return rc;
1133 }
1134
1135 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1136 {
1137 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1138 struct drm_encoder *enc;
1139
1140 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1141
1142 /*
1143 * Normally we would iterate through encoder_mask in crtc state to find
1144 * attached encoders. In this case, we might be disabling vblank _after_
1145 * encoder_mask has been cleared.
1146 *
1147 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1148 * disable (which is also after encoder_mask is cleared). So instead of
1149 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1150 * currently assigned to our crtc.
1151 *
1152 * Note also that this function cannot be called while crtc is disabled
1153 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1154 * about the assigned crtcs being inconsistent with the current state
1155 * (which means no need to worry about modeset locks).
1156 */
1157 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1158 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1159 dpu_crtc);
1160
1161 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1162 }
1163
1164 return 0;
1165 }
1166
1167 #ifdef CONFIG_DEBUG_FS
1168 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1169 {
1170 struct dpu_crtc *dpu_crtc;
1171 struct dpu_plane_state *pstate = NULL;
1172 struct dpu_crtc_mixer *m;
1173
1174 struct drm_crtc *crtc;
1175 struct drm_plane *plane;
1176 struct drm_display_mode *mode;
1177 struct drm_framebuffer *fb;
1178 struct drm_plane_state *state;
1179 struct dpu_crtc_state *cstate;
1180
1181 int i, out_width;
1182
1183 dpu_crtc = s->private;
1184 crtc = &dpu_crtc->base;
1185
1186 drm_modeset_lock_all(crtc->dev);
1187 cstate = to_dpu_crtc_state(crtc->state);
1188
1189 mode = &crtc->state->adjusted_mode;
1190 out_width = mode->hdisplay / cstate->num_mixers;
1191
1192 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1193 mode->hdisplay, mode->vdisplay);
1194
1195 seq_puts(s, "\n");
1196
1197 for (i = 0; i < cstate->num_mixers; ++i) {
1198 m = &cstate->mixers[i];
1199 if (!m->hw_lm)
1200 seq_printf(s, "\tmixer[%d] has no lm\n", i);
1201 else if (!m->lm_ctl)
1202 seq_printf(s, "\tmixer[%d] has no ctl\n", i);
1203 else
1204 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1205 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1206 out_width, mode->vdisplay);
1207 }
1208
1209 seq_puts(s, "\n");
1210
1211 drm_atomic_crtc_for_each_plane(plane, crtc) {
1212 pstate = to_dpu_plane_state(plane->state);
1213 state = plane->state;
1214
1215 if (!pstate || !state)
1216 continue;
1217
1218 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1219 pstate->stage);
1220
1221 if (plane->state->fb) {
1222 fb = plane->state->fb;
1223
1224 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1225 fb->base.id, (char *) &fb->format->format,
1226 fb->width, fb->height);
1227 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1228 seq_printf(s, "cpp[%d]:%u ",
1229 i, fb->format->cpp[i]);
1230 seq_puts(s, "\n\t");
1231
1232 seq_printf(s, "modifier:%8llu ", fb->modifier);
1233 seq_puts(s, "\n");
1234
1235 seq_puts(s, "\t");
1236 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1237 seq_printf(s, "pitches[%d]:%8u ", i,
1238 fb->pitches[i]);
1239 seq_puts(s, "\n");
1240
1241 seq_puts(s, "\t");
1242 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1243 seq_printf(s, "offsets[%d]:%8u ", i,
1244 fb->offsets[i]);
1245 seq_puts(s, "\n");
1246 }
1247
1248 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1249 state->src_x, state->src_y, state->src_w, state->src_h);
1250
1251 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1252 state->crtc_x, state->crtc_y, state->crtc_w,
1253 state->crtc_h);
1254 seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1255 pstate->multirect_mode, pstate->multirect_index);
1256
1257 seq_puts(s, "\n");
1258 }
1259 if (dpu_crtc->vblank_cb_count) {
1260 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1261 s64 diff_ms = ktime_to_ms(diff);
1262 s64 fps = diff_ms ? div_s64(
1263 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1264
1265 seq_printf(s,
1266 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1267 fps, dpu_crtc->vblank_cb_count,
1268 ktime_to_ms(diff), dpu_crtc->play_count);
1269
1270 /* reset time & count for next measurement */
1271 dpu_crtc->vblank_cb_count = 0;
1272 dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1273 }
1274
1275 drm_modeset_unlock_all(crtc->dev);
1276
1277 return 0;
1278 }
1279
1280 static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
1281 {
1282 return single_open(file, _dpu_debugfs_status_show, inode->i_private);
1283 }
1284
1285 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
1286 static int __prefix ## _open(struct inode *inode, struct file *file) \
1287 { \
1288 return single_open(file, __prefix ## _show, inode->i_private); \
1289 } \
1290 static const struct file_operations __prefix ## _fops = { \
1291 .owner = THIS_MODULE, \
1292 .open = __prefix ## _open, \
1293 .release = single_release, \
1294 .read = seq_read, \
1295 .llseek = seq_lseek, \
1296 }
1297
1298 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1299 {
1300 struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1301 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1302 int i;
1303
1304 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1305 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1306 seq_printf(s, "core_clk_rate: %llu\n",
1307 dpu_crtc->cur_perf.core_clk_rate);
1308 for (i = DPU_CORE_PERF_DATA_BUS_ID_MNOC;
1309 i < DPU_CORE_PERF_DATA_BUS_ID_MAX; i++) {
1310 seq_printf(s, "bw_ctl[%d]: %llu\n", i,
1311 dpu_crtc->cur_perf.bw_ctl[i]);
1312 seq_printf(s, "max_per_pipe_ib[%d]: %llu\n", i,
1313 dpu_crtc->cur_perf.max_per_pipe_ib[i]);
1314 }
1315
1316 return 0;
1317 }
1318 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
1319
1320 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1321 {
1322 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1323
1324 static const struct file_operations debugfs_status_fops = {
1325 .open = _dpu_debugfs_status_open,
1326 .read = seq_read,
1327 .llseek = seq_lseek,
1328 .release = single_release,
1329 };
1330
1331 dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
1332 crtc->dev->primary->debugfs_root);
1333 if (!dpu_crtc->debugfs_root)
1334 return -ENOMEM;
1335
1336 /* don't error check these */
1337 debugfs_create_file("status", 0400,
1338 dpu_crtc->debugfs_root,
1339 dpu_crtc, &debugfs_status_fops);
1340 debugfs_create_file("state", 0600,
1341 dpu_crtc->debugfs_root,
1342 &dpu_crtc->base,
1343 &dpu_crtc_debugfs_state_fops);
1344
1345 return 0;
1346 }
1347 #else
1348 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1349 {
1350 return 0;
1351 }
1352 #endif /* CONFIG_DEBUG_FS */
1353
1354 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1355 {
1356 return _dpu_crtc_init_debugfs(crtc);
1357 }
1358
1359 static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
1360 {
1361 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1362
1363 debugfs_remove_recursive(dpu_crtc->debugfs_root);
1364 }
1365
1366 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1367 .set_config = drm_atomic_helper_set_config,
1368 .destroy = dpu_crtc_destroy,
1369 .page_flip = drm_atomic_helper_page_flip,
1370 .reset = dpu_crtc_reset,
1371 .atomic_duplicate_state = dpu_crtc_duplicate_state,
1372 .atomic_destroy_state = dpu_crtc_destroy_state,
1373 .late_register = dpu_crtc_late_register,
1374 .early_unregister = dpu_crtc_early_unregister,
1375 };
1376
1377 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1378 .atomic_disable = dpu_crtc_disable,
1379 .atomic_enable = dpu_crtc_enable,
1380 .atomic_check = dpu_crtc_atomic_check,
1381 .atomic_begin = dpu_crtc_atomic_begin,
1382 .atomic_flush = dpu_crtc_atomic_flush,
1383 };
1384
1385 /* initialize crtc */
1386 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1387 struct drm_plane *cursor)
1388 {
1389 struct drm_crtc *crtc = NULL;
1390 struct dpu_crtc *dpu_crtc = NULL;
1391 struct msm_drm_private *priv = NULL;
1392 struct dpu_kms *kms = NULL;
1393 int i;
1394
1395 priv = dev->dev_private;
1396 kms = to_dpu_kms(priv->kms);
1397
1398 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1399 if (!dpu_crtc)
1400 return ERR_PTR(-ENOMEM);
1401
1402 crtc = &dpu_crtc->base;
1403 crtc->dev = dev;
1404
1405 spin_lock_init(&dpu_crtc->spin_lock);
1406 atomic_set(&dpu_crtc->frame_pending, 0);
1407
1408 init_completion(&dpu_crtc->frame_done_comp);
1409
1410 INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1411
1412 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1413 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1414 list_add(&dpu_crtc->frame_events[i].list,
1415 &dpu_crtc->frame_event_list);
1416 kthread_init_work(&dpu_crtc->frame_events[i].work,
1417 dpu_crtc_frame_event_work);
1418 }
1419
1420 drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1421 NULL);
1422
1423 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1424
1425 /* save user friendly CRTC name for later */
1426 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1427
1428 /* initialize event handling */
1429 spin_lock_init(&dpu_crtc->event_lock);
1430
1431 DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
1432 return crtc;
1433 }