1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
8 #include <linux/delay.h>
9 #include <linux/interconnect.h>
10 #include <linux/of_irq.h>
12 #include <drm/drm_debugfs.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_vblank.h>
22 static int mdp5_hw_init(struct msm_kms
*kms
)
24 struct mdp5_kms
*mdp5_kms
= to_mdp5_kms(to_mdp_kms(kms
));
25 struct device
*dev
= &mdp5_kms
->pdev
->dev
;
28 pm_runtime_get_sync(dev
);
30 /* Magic unknown register writes:
32 * W VBIF:0x004 00000001 (mdss_mdp.c:839)
33 * W MDP5:0x2e0 0xe9 (mdss_mdp.c:839)
34 * W MDP5:0x2e4 0x55 (mdss_mdp.c:839)
35 * W MDP5:0x3ac 0xc0000ccc (mdss_mdp.c:839)
36 * W MDP5:0x3b4 0xc0000ccc (mdss_mdp.c:839)
37 * W MDP5:0x3bc 0xcccccc (mdss_mdp.c:839)
38 * W MDP5:0x4a8 0xcccc0c0 (mdss_mdp.c:839)
39 * W MDP5:0x4b0 0xccccc0c0 (mdss_mdp.c:839)
40 * W MDP5:0x4b8 0xccccc000 (mdss_mdp.c:839)
42 * Downstream fbdev driver gets these register offsets/values
43 * from DT.. not really sure what these registers are or if
44 * different values for different boards/SoC's, etc. I guess
45 * they are the golden registers.
47 * Not setting these does not seem to cause any problem. But
48 * we may be getting lucky with the bootloader initializing
49 * them for us. OTOH, if we can always count on the bootloader
50 * setting the golden registers, then perhaps we don't need to
54 spin_lock_irqsave(&mdp5_kms
->resource_lock
, flags
);
55 mdp5_write(mdp5_kms
, REG_MDP5_DISP_INTF_SEL
, 0);
56 spin_unlock_irqrestore(&mdp5_kms
->resource_lock
, flags
);
58 mdp5_ctlm_hw_reset(mdp5_kms
->ctlm
);
60 pm_runtime_put_sync(dev
);
65 /* Global/shared object state funcs */
68 * This is a helper that returns the private state currently in operation.
69 * Note that this would return the "old_state" if called in the atomic check
70 * path, and the "new_state" after the atomic swap has been done.
72 struct mdp5_global_state
*
73 mdp5_get_existing_global_state(struct mdp5_kms
*mdp5_kms
)
75 return to_mdp5_global_state(mdp5_kms
->glob_state
.state
);
79 * This acquires the modeset lock set aside for global state, creates
80 * a new duplicated private object state.
82 struct mdp5_global_state
*mdp5_get_global_state(struct drm_atomic_state
*s
)
84 struct msm_drm_private
*priv
= s
->dev
->dev_private
;
85 struct mdp5_kms
*mdp5_kms
= to_mdp5_kms(to_mdp_kms(priv
->kms
));
86 struct drm_private_state
*priv_state
;
89 ret
= drm_modeset_lock(&mdp5_kms
->glob_state_lock
, s
->acquire_ctx
);
93 priv_state
= drm_atomic_get_private_obj_state(s
, &mdp5_kms
->glob_state
);
94 if (IS_ERR(priv_state
))
95 return ERR_CAST(priv_state
);
97 return to_mdp5_global_state(priv_state
);
100 static struct drm_private_state
*
101 mdp5_global_duplicate_state(struct drm_private_obj
*obj
)
103 struct mdp5_global_state
*state
;
105 state
= kmemdup(obj
->state
, sizeof(*state
), GFP_KERNEL
);
109 __drm_atomic_helper_private_obj_duplicate_state(obj
, &state
->base
);
114 static void mdp5_global_destroy_state(struct drm_private_obj
*obj
,
115 struct drm_private_state
*state
)
117 struct mdp5_global_state
*mdp5_state
= to_mdp5_global_state(state
);
122 static const struct drm_private_state_funcs mdp5_global_state_funcs
= {
123 .atomic_duplicate_state
= mdp5_global_duplicate_state
,
124 .atomic_destroy_state
= mdp5_global_destroy_state
,
127 static int mdp5_global_obj_init(struct mdp5_kms
*mdp5_kms
)
129 struct mdp5_global_state
*state
;
131 drm_modeset_lock_init(&mdp5_kms
->glob_state_lock
);
133 state
= kzalloc(sizeof(*state
), GFP_KERNEL
);
137 state
->mdp5_kms
= mdp5_kms
;
139 drm_atomic_private_obj_init(mdp5_kms
->dev
, &mdp5_kms
->glob_state
,
141 &mdp5_global_state_funcs
);
145 static void mdp5_enable_commit(struct msm_kms
*kms
)
147 struct mdp5_kms
*mdp5_kms
= to_mdp5_kms(to_mdp_kms(kms
));
148 pm_runtime_get_sync(&mdp5_kms
->pdev
->dev
);
151 static void mdp5_disable_commit(struct msm_kms
*kms
)
153 struct mdp5_kms
*mdp5_kms
= to_mdp5_kms(to_mdp_kms(kms
));
154 pm_runtime_put_sync(&mdp5_kms
->pdev
->dev
);
157 static void mdp5_prepare_commit(struct msm_kms
*kms
, struct drm_atomic_state
*state
)
159 struct mdp5_kms
*mdp5_kms
= to_mdp5_kms(to_mdp_kms(kms
));
160 struct mdp5_global_state
*global_state
;
162 global_state
= mdp5_get_existing_global_state(mdp5_kms
);
165 mdp5_smp_prepare_commit(mdp5_kms
->smp
, &global_state
->smp
);
168 static void mdp5_flush_commit(struct msm_kms
*kms
, unsigned crtc_mask
)
173 static void mdp5_wait_flush(struct msm_kms
*kms
, unsigned crtc_mask
)
175 struct mdp5_kms
*mdp5_kms
= to_mdp5_kms(to_mdp_kms(kms
));
176 struct drm_crtc
*crtc
;
178 for_each_crtc_mask(mdp5_kms
->dev
, crtc
, crtc_mask
)
179 mdp5_crtc_wait_for_commit_done(crtc
);
182 static void mdp5_complete_commit(struct msm_kms
*kms
, unsigned crtc_mask
)
184 struct mdp5_kms
*mdp5_kms
= to_mdp5_kms(to_mdp_kms(kms
));
185 struct mdp5_global_state
*global_state
;
187 global_state
= mdp5_get_existing_global_state(mdp5_kms
);
190 mdp5_smp_complete_commit(mdp5_kms
->smp
, &global_state
->smp
);
193 static long mdp5_round_pixclk(struct msm_kms
*kms
, unsigned long rate
,
194 struct drm_encoder
*encoder
)
199 static int mdp5_set_split_display(struct msm_kms
*kms
,
200 struct drm_encoder
*encoder
,
201 struct drm_encoder
*slave_encoder
,
205 return mdp5_cmd_encoder_set_split_display(encoder
,
208 return mdp5_vid_encoder_set_split_display(encoder
,
212 static void mdp5_set_encoder_mode(struct msm_kms
*kms
,
213 struct drm_encoder
*encoder
,
216 mdp5_encoder_set_intf_mode(encoder
, cmd_mode
);
219 static void mdp5_kms_destroy(struct msm_kms
*kms
)
221 struct mdp5_kms
*mdp5_kms
= to_mdp5_kms(to_mdp_kms(kms
));
222 struct msm_gem_address_space
*aspace
= kms
->aspace
;
225 for (i
= 0; i
< mdp5_kms
->num_hwmixers
; i
++)
226 mdp5_mixer_destroy(mdp5_kms
->hwmixers
[i
]);
228 for (i
= 0; i
< mdp5_kms
->num_hwpipes
; i
++)
229 mdp5_pipe_destroy(mdp5_kms
->hwpipes
[i
]);
232 aspace
->mmu
->funcs
->detach(aspace
->mmu
);
233 msm_gem_address_space_put(aspace
);
237 #ifdef CONFIG_DEBUG_FS
238 static int smp_show(struct seq_file
*m
, void *arg
)
240 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
241 struct drm_device
*dev
= node
->minor
->dev
;
242 struct msm_drm_private
*priv
= dev
->dev_private
;
243 struct mdp5_kms
*mdp5_kms
= to_mdp5_kms(to_mdp_kms(priv
->kms
));
244 struct drm_printer p
= drm_seq_file_printer(m
);
246 if (!mdp5_kms
->smp
) {
247 drm_printf(&p
, "no SMP pool\n");
251 mdp5_smp_dump(mdp5_kms
->smp
, &p
);
256 static struct drm_info_list mdp5_debugfs_list
[] = {
260 static int mdp5_kms_debugfs_init(struct msm_kms
*kms
, struct drm_minor
*minor
)
262 drm_debugfs_create_files(mdp5_debugfs_list
,
263 ARRAY_SIZE(mdp5_debugfs_list
),
264 minor
->debugfs_root
, minor
);
270 static const struct mdp_kms_funcs kms_funcs
= {
272 .hw_init
= mdp5_hw_init
,
273 .irq_preinstall
= mdp5_irq_preinstall
,
274 .irq_postinstall
= mdp5_irq_postinstall
,
275 .irq_uninstall
= mdp5_irq_uninstall
,
277 .enable_vblank
= mdp5_enable_vblank
,
278 .disable_vblank
= mdp5_disable_vblank
,
279 .flush_commit
= mdp5_flush_commit
,
280 .enable_commit
= mdp5_enable_commit
,
281 .disable_commit
= mdp5_disable_commit
,
282 .prepare_commit
= mdp5_prepare_commit
,
283 .wait_flush
= mdp5_wait_flush
,
284 .complete_commit
= mdp5_complete_commit
,
285 .get_format
= mdp_get_format
,
286 .round_pixclk
= mdp5_round_pixclk
,
287 .set_split_display
= mdp5_set_split_display
,
288 .set_encoder_mode
= mdp5_set_encoder_mode
,
289 .destroy
= mdp5_kms_destroy
,
290 #ifdef CONFIG_DEBUG_FS
291 .debugfs_init
= mdp5_kms_debugfs_init
,
294 .set_irqmask
= mdp5_set_irqmask
,
297 int mdp5_disable(struct mdp5_kms
*mdp5_kms
)
301 mdp5_kms
->enable_count
--;
302 WARN_ON(mdp5_kms
->enable_count
< 0);
304 if (mdp5_kms
->tbu_rt_clk
)
305 clk_disable_unprepare(mdp5_kms
->tbu_rt_clk
);
306 if (mdp5_kms
->tbu_clk
)
307 clk_disable_unprepare(mdp5_kms
->tbu_clk
);
308 clk_disable_unprepare(mdp5_kms
->ahb_clk
);
309 clk_disable_unprepare(mdp5_kms
->axi_clk
);
310 clk_disable_unprepare(mdp5_kms
->core_clk
);
311 if (mdp5_kms
->lut_clk
)
312 clk_disable_unprepare(mdp5_kms
->lut_clk
);
317 int mdp5_enable(struct mdp5_kms
*mdp5_kms
)
321 mdp5_kms
->enable_count
++;
323 clk_prepare_enable(mdp5_kms
->ahb_clk
);
324 clk_prepare_enable(mdp5_kms
->axi_clk
);
325 clk_prepare_enable(mdp5_kms
->core_clk
);
326 if (mdp5_kms
->lut_clk
)
327 clk_prepare_enable(mdp5_kms
->lut_clk
);
328 if (mdp5_kms
->tbu_clk
)
329 clk_prepare_enable(mdp5_kms
->tbu_clk
);
330 if (mdp5_kms
->tbu_rt_clk
)
331 clk_prepare_enable(mdp5_kms
->tbu_rt_clk
);
336 static struct drm_encoder
*construct_encoder(struct mdp5_kms
*mdp5_kms
,
337 struct mdp5_interface
*intf
,
338 struct mdp5_ctl
*ctl
)
340 struct drm_device
*dev
= mdp5_kms
->dev
;
341 struct msm_drm_private
*priv
= dev
->dev_private
;
342 struct drm_encoder
*encoder
;
344 encoder
= mdp5_encoder_init(dev
, intf
, ctl
);
345 if (IS_ERR(encoder
)) {
346 DRM_DEV_ERROR(dev
->dev
, "failed to construct encoder\n");
350 priv
->encoders
[priv
->num_encoders
++] = encoder
;
355 static int get_dsi_id_from_intf(const struct mdp5_cfg_hw
*hw_cfg
, int intf_num
)
357 const enum mdp5_intf_type
*intfs
= hw_cfg
->intf
.connect
;
358 const int intf_cnt
= ARRAY_SIZE(hw_cfg
->intf
.connect
);
361 for (i
= 0; i
< intf_cnt
; i
++) {
362 if (intfs
[i
] == INTF_DSI
) {
373 static int modeset_init_intf(struct mdp5_kms
*mdp5_kms
,
374 struct mdp5_interface
*intf
)
376 struct drm_device
*dev
= mdp5_kms
->dev
;
377 struct msm_drm_private
*priv
= dev
->dev_private
;
378 struct mdp5_ctl_manager
*ctlm
= mdp5_kms
->ctlm
;
379 struct mdp5_ctl
*ctl
;
380 struct drm_encoder
*encoder
;
383 switch (intf
->type
) {
388 ctl
= mdp5_ctlm_request(ctlm
, intf
->num
);
394 encoder
= construct_encoder(mdp5_kms
, intf
, ctl
);
395 if (IS_ERR(encoder
)) {
396 ret
= PTR_ERR(encoder
);
400 ret
= msm_edp_modeset_init(priv
->edp
, dev
, encoder
);
406 ctl
= mdp5_ctlm_request(ctlm
, intf
->num
);
412 encoder
= construct_encoder(mdp5_kms
, intf
, ctl
);
413 if (IS_ERR(encoder
)) {
414 ret
= PTR_ERR(encoder
);
418 ret
= msm_hdmi_modeset_init(priv
->hdmi
, dev
, encoder
);
422 const struct mdp5_cfg_hw
*hw_cfg
=
423 mdp5_cfg_get_hw_config(mdp5_kms
->cfg
);
424 int dsi_id
= get_dsi_id_from_intf(hw_cfg
, intf
->num
);
426 if ((dsi_id
>= ARRAY_SIZE(priv
->dsi
)) || (dsi_id
< 0)) {
427 DRM_DEV_ERROR(dev
->dev
, "failed to find dsi from intf %d\n",
433 if (!priv
->dsi
[dsi_id
])
436 ctl
= mdp5_ctlm_request(ctlm
, intf
->num
);
442 encoder
= construct_encoder(mdp5_kms
, intf
, ctl
);
443 if (IS_ERR(encoder
)) {
444 ret
= PTR_ERR(encoder
);
448 ret
= msm_dsi_modeset_init(priv
->dsi
[dsi_id
], dev
, encoder
);
452 DRM_DEV_ERROR(dev
->dev
, "unknown intf: %d\n", intf
->type
);
460 static int modeset_init(struct mdp5_kms
*mdp5_kms
)
462 struct drm_device
*dev
= mdp5_kms
->dev
;
463 struct msm_drm_private
*priv
= dev
->dev_private
;
464 unsigned int num_crtcs
;
465 int i
, ret
, pi
= 0, ci
= 0;
466 struct drm_plane
*primary
[MAX_BASES
] = { NULL
};
467 struct drm_plane
*cursor
[MAX_BASES
] = { NULL
};
470 * Construct encoders and modeset initialize connector devices
471 * for each external display interface.
473 for (i
= 0; i
< mdp5_kms
->num_intfs
; i
++) {
474 ret
= modeset_init_intf(mdp5_kms
, mdp5_kms
->intfs
[i
]);
480 * We should ideally have less number of encoders (set up by parsing
481 * the MDP5 interfaces) than the number of layer mixers present in HW,
482 * but let's be safe here anyway
484 num_crtcs
= min(priv
->num_encoders
, mdp5_kms
->num_hwmixers
);
487 * Construct planes equaling the number of hw pipes, and CRTCs for the
488 * N encoders set up by the driver. The first N planes become primary
489 * planes for the CRTCs, with the remainder as overlay planes:
491 for (i
= 0; i
< mdp5_kms
->num_hwpipes
; i
++) {
492 struct mdp5_hw_pipe
*hwpipe
= mdp5_kms
->hwpipes
[i
];
493 struct drm_plane
*plane
;
494 enum drm_plane_type type
;
497 type
= DRM_PLANE_TYPE_PRIMARY
;
498 else if (hwpipe
->caps
& MDP_PIPE_CAP_CURSOR
)
499 type
= DRM_PLANE_TYPE_CURSOR
;
501 type
= DRM_PLANE_TYPE_OVERLAY
;
503 plane
= mdp5_plane_init(dev
, type
);
505 ret
= PTR_ERR(plane
);
506 DRM_DEV_ERROR(dev
->dev
, "failed to construct plane %d (%d)\n", i
, ret
);
509 priv
->planes
[priv
->num_planes
++] = plane
;
511 if (type
== DRM_PLANE_TYPE_PRIMARY
)
512 primary
[pi
++] = plane
;
513 if (type
== DRM_PLANE_TYPE_CURSOR
)
514 cursor
[ci
++] = plane
;
517 for (i
= 0; i
< num_crtcs
; i
++) {
518 struct drm_crtc
*crtc
;
520 crtc
= mdp5_crtc_init(dev
, primary
[i
], cursor
[i
], i
);
523 DRM_DEV_ERROR(dev
->dev
, "failed to construct crtc %d (%d)\n", i
, ret
);
526 priv
->crtcs
[priv
->num_crtcs
++] = crtc
;
530 * Now that we know the number of crtcs we've created, set the possible
531 * crtcs for the encoders
533 for (i
= 0; i
< priv
->num_encoders
; i
++) {
534 struct drm_encoder
*encoder
= priv
->encoders
[i
];
536 encoder
->possible_crtcs
= (1 << priv
->num_crtcs
) - 1;
545 static void read_mdp_hw_revision(struct mdp5_kms
*mdp5_kms
,
546 u32
*major
, u32
*minor
)
548 struct device
*dev
= &mdp5_kms
->pdev
->dev
;
551 pm_runtime_get_sync(dev
);
552 version
= mdp5_read(mdp5_kms
, REG_MDP5_HW_VERSION
);
553 pm_runtime_put_sync(dev
);
555 *major
= FIELD(version
, MDP5_HW_VERSION_MAJOR
);
556 *minor
= FIELD(version
, MDP5_HW_VERSION_MINOR
);
558 DRM_DEV_INFO(dev
, "MDP5 version v%d.%d", *major
, *minor
);
561 static int get_clk(struct platform_device
*pdev
, struct clk
**clkp
,
562 const char *name
, bool mandatory
)
564 struct device
*dev
= &pdev
->dev
;
565 struct clk
*clk
= msm_clk_get(pdev
, name
);
566 if (IS_ERR(clk
) && mandatory
) {
567 DRM_DEV_ERROR(dev
, "failed to get %s (%ld)\n", name
, PTR_ERR(clk
));
571 DBG("skipping %s", name
);
578 struct msm_kms
*mdp5_kms_init(struct drm_device
*dev
)
580 struct msm_drm_private
*priv
= dev
->dev_private
;
581 struct platform_device
*pdev
;
582 struct mdp5_kms
*mdp5_kms
;
583 struct mdp5_cfg
*config
;
585 struct msm_gem_address_space
*aspace
;
587 struct device
*iommu_dev
;
589 /* priv->kms would have been populated by the MDP5 driver */
594 mdp5_kms
= to_mdp5_kms(to_mdp_kms(kms
));
596 mdp_kms_init(&mdp5_kms
->base
, &kms_funcs
);
598 pdev
= mdp5_kms
->pdev
;
600 irq
= irq_of_parse_and_map(pdev
->dev
.of_node
, 0);
603 DRM_DEV_ERROR(&pdev
->dev
, "failed to get irq: %d\n", ret
);
609 config
= mdp5_cfg_get_config(mdp5_kms
->cfg
);
611 /* make sure things are off before attaching iommu (bootloader could
612 * have left things on, in which case we'll start getting faults if
615 pm_runtime_get_sync(&pdev
->dev
);
616 for (i
= 0; i
< MDP5_INTF_NUM_MAX
; i
++) {
617 if (mdp5_cfg_intf_is_virtual(config
->hw
->intf
.connect
[i
]) ||
618 !config
->hw
->intf
.base
[i
])
620 mdp5_write(mdp5_kms
, REG_MDP5_INTF_TIMING_ENGINE_EN(i
), 0);
622 mdp5_write(mdp5_kms
, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i
), 0x3);
626 if (config
->platform
.iommu
) {
627 iommu_dev
= &pdev
->dev
;
628 if (!dev_iommu_fwspec_get(iommu_dev
))
629 iommu_dev
= iommu_dev
->parent
;
631 aspace
= msm_gem_address_space_create(iommu_dev
,
632 config
->platform
.iommu
, "mdp5");
633 if (IS_ERR(aspace
)) {
634 ret
= PTR_ERR(aspace
);
638 kms
->aspace
= aspace
;
640 ret
= aspace
->mmu
->funcs
->attach(aspace
->mmu
);
642 DRM_DEV_ERROR(&pdev
->dev
, "failed to attach iommu: %d\n",
647 DRM_DEV_INFO(&pdev
->dev
,
648 "no iommu, fallback to phys contig buffers for scanout\n");
652 pm_runtime_put_sync(&pdev
->dev
);
654 ret
= modeset_init(mdp5_kms
);
656 DRM_DEV_ERROR(&pdev
->dev
, "modeset_init failed: %d\n", ret
);
660 dev
->mode_config
.min_width
= 0;
661 dev
->mode_config
.min_height
= 0;
662 dev
->mode_config
.max_width
= 0xffff;
663 dev
->mode_config
.max_height
= 0xffff;
665 dev
->max_vblank_count
= 0; /* max_vblank_count is set on each CRTC */
666 dev
->vblank_disable_immediate
= true;
671 mdp5_kms_destroy(kms
);
675 static void mdp5_destroy(struct platform_device
*pdev
)
677 struct mdp5_kms
*mdp5_kms
= platform_get_drvdata(pdev
);
681 mdp5_ctlm_destroy(mdp5_kms
->ctlm
);
683 mdp5_smp_destroy(mdp5_kms
->smp
);
685 mdp5_cfg_destroy(mdp5_kms
->cfg
);
687 for (i
= 0; i
< mdp5_kms
->num_intfs
; i
++)
688 kfree(mdp5_kms
->intfs
[i
]);
690 if (mdp5_kms
->rpm_enabled
)
691 pm_runtime_disable(&pdev
->dev
);
693 drm_atomic_private_obj_fini(&mdp5_kms
->glob_state
);
694 drm_modeset_lock_fini(&mdp5_kms
->glob_state_lock
);
697 static int construct_pipes(struct mdp5_kms
*mdp5_kms
, int cnt
,
698 const enum mdp5_pipe
*pipes
, const uint32_t *offsets
,
701 struct drm_device
*dev
= mdp5_kms
->dev
;
704 for (i
= 0; i
< cnt
; i
++) {
705 struct mdp5_hw_pipe
*hwpipe
;
707 hwpipe
= mdp5_pipe_init(pipes
[i
], offsets
[i
], caps
);
708 if (IS_ERR(hwpipe
)) {
709 ret
= PTR_ERR(hwpipe
);
710 DRM_DEV_ERROR(dev
->dev
, "failed to construct pipe for %s (%d)\n",
711 pipe2name(pipes
[i
]), ret
);
714 hwpipe
->idx
= mdp5_kms
->num_hwpipes
;
715 mdp5_kms
->hwpipes
[mdp5_kms
->num_hwpipes
++] = hwpipe
;
721 static int hwpipe_init(struct mdp5_kms
*mdp5_kms
)
723 static const enum mdp5_pipe rgb_planes
[] = {
724 SSPP_RGB0
, SSPP_RGB1
, SSPP_RGB2
, SSPP_RGB3
,
726 static const enum mdp5_pipe vig_planes
[] = {
727 SSPP_VIG0
, SSPP_VIG1
, SSPP_VIG2
, SSPP_VIG3
,
729 static const enum mdp5_pipe dma_planes
[] = {
730 SSPP_DMA0
, SSPP_DMA1
,
732 static const enum mdp5_pipe cursor_planes
[] = {
733 SSPP_CURSOR0
, SSPP_CURSOR1
,
735 const struct mdp5_cfg_hw
*hw_cfg
;
738 hw_cfg
= mdp5_cfg_get_hw_config(mdp5_kms
->cfg
);
740 /* Construct RGB pipes: */
741 ret
= construct_pipes(mdp5_kms
, hw_cfg
->pipe_rgb
.count
, rgb_planes
,
742 hw_cfg
->pipe_rgb
.base
, hw_cfg
->pipe_rgb
.caps
);
746 /* Construct video (VIG) pipes: */
747 ret
= construct_pipes(mdp5_kms
, hw_cfg
->pipe_vig
.count
, vig_planes
,
748 hw_cfg
->pipe_vig
.base
, hw_cfg
->pipe_vig
.caps
);
752 /* Construct DMA pipes: */
753 ret
= construct_pipes(mdp5_kms
, hw_cfg
->pipe_dma
.count
, dma_planes
,
754 hw_cfg
->pipe_dma
.base
, hw_cfg
->pipe_dma
.caps
);
758 /* Construct cursor pipes: */
759 ret
= construct_pipes(mdp5_kms
, hw_cfg
->pipe_cursor
.count
,
760 cursor_planes
, hw_cfg
->pipe_cursor
.base
,
761 hw_cfg
->pipe_cursor
.caps
);
768 static int hwmixer_init(struct mdp5_kms
*mdp5_kms
)
770 struct drm_device
*dev
= mdp5_kms
->dev
;
771 const struct mdp5_cfg_hw
*hw_cfg
;
774 hw_cfg
= mdp5_cfg_get_hw_config(mdp5_kms
->cfg
);
776 for (i
= 0; i
< hw_cfg
->lm
.count
; i
++) {
777 struct mdp5_hw_mixer
*mixer
;
779 mixer
= mdp5_mixer_init(&hw_cfg
->lm
.instances
[i
]);
781 ret
= PTR_ERR(mixer
);
782 DRM_DEV_ERROR(dev
->dev
, "failed to construct LM%d (%d)\n",
787 mixer
->idx
= mdp5_kms
->num_hwmixers
;
788 mdp5_kms
->hwmixers
[mdp5_kms
->num_hwmixers
++] = mixer
;
794 static int interface_init(struct mdp5_kms
*mdp5_kms
)
796 struct drm_device
*dev
= mdp5_kms
->dev
;
797 const struct mdp5_cfg_hw
*hw_cfg
;
798 const enum mdp5_intf_type
*intf_types
;
801 hw_cfg
= mdp5_cfg_get_hw_config(mdp5_kms
->cfg
);
802 intf_types
= hw_cfg
->intf
.connect
;
804 for (i
= 0; i
< ARRAY_SIZE(hw_cfg
->intf
.connect
); i
++) {
805 struct mdp5_interface
*intf
;
807 if (intf_types
[i
] == INTF_DISABLED
)
810 intf
= kzalloc(sizeof(*intf
), GFP_KERNEL
);
812 DRM_DEV_ERROR(dev
->dev
, "failed to construct INTF%d\n", i
);
817 intf
->type
= intf_types
[i
];
818 intf
->mode
= MDP5_INTF_MODE_NONE
;
819 intf
->idx
= mdp5_kms
->num_intfs
;
820 mdp5_kms
->intfs
[mdp5_kms
->num_intfs
++] = intf
;
826 static int mdp5_init(struct platform_device
*pdev
, struct drm_device
*dev
)
828 struct msm_drm_private
*priv
= dev
->dev_private
;
829 struct mdp5_kms
*mdp5_kms
;
830 struct mdp5_cfg
*config
;
834 mdp5_kms
= devm_kzalloc(&pdev
->dev
, sizeof(*mdp5_kms
), GFP_KERNEL
);
840 platform_set_drvdata(pdev
, mdp5_kms
);
842 spin_lock_init(&mdp5_kms
->resource_lock
);
845 mdp5_kms
->pdev
= pdev
;
847 ret
= mdp5_global_obj_init(mdp5_kms
);
851 mdp5_kms
->mmio
= msm_ioremap(pdev
, "mdp_phys", "MDP5");
852 if (IS_ERR(mdp5_kms
->mmio
)) {
853 ret
= PTR_ERR(mdp5_kms
->mmio
);
857 /* mandatory clocks: */
858 ret
= get_clk(pdev
, &mdp5_kms
->axi_clk
, "bus", true);
861 ret
= get_clk(pdev
, &mdp5_kms
->ahb_clk
, "iface", true);
864 ret
= get_clk(pdev
, &mdp5_kms
->core_clk
, "core", true);
867 ret
= get_clk(pdev
, &mdp5_kms
->vsync_clk
, "vsync", true);
871 /* optional clocks: */
872 get_clk(pdev
, &mdp5_kms
->lut_clk
, "lut", false);
873 get_clk(pdev
, &mdp5_kms
->tbu_clk
, "tbu", false);
874 get_clk(pdev
, &mdp5_kms
->tbu_rt_clk
, "tbu_rt", false);
876 /* we need to set a default rate before enabling. Set a safe
877 * rate first, then figure out hw revision, and then set a
880 clk_set_rate(mdp5_kms
->core_clk
, 200000000);
882 pm_runtime_enable(&pdev
->dev
);
883 mdp5_kms
->rpm_enabled
= true;
885 read_mdp_hw_revision(mdp5_kms
, &major
, &minor
);
887 mdp5_kms
->cfg
= mdp5_cfg_init(mdp5_kms
, major
, minor
);
888 if (IS_ERR(mdp5_kms
->cfg
)) {
889 ret
= PTR_ERR(mdp5_kms
->cfg
);
890 mdp5_kms
->cfg
= NULL
;
894 config
= mdp5_cfg_get_config(mdp5_kms
->cfg
);
895 mdp5_kms
->caps
= config
->hw
->mdp
.caps
;
897 /* TODO: compute core clock rate at runtime */
898 clk_set_rate(mdp5_kms
->core_clk
, config
->hw
->max_clk
);
901 * Some chipsets have a Shared Memory Pool (SMP), while others
902 * have dedicated latency buffering per source pipe instead;
903 * this section initializes the SMP:
905 if (mdp5_kms
->caps
& MDP_CAP_SMP
) {
906 mdp5_kms
->smp
= mdp5_smp_init(mdp5_kms
, &config
->hw
->smp
);
907 if (IS_ERR(mdp5_kms
->smp
)) {
908 ret
= PTR_ERR(mdp5_kms
->smp
);
909 mdp5_kms
->smp
= NULL
;
914 mdp5_kms
->ctlm
= mdp5_ctlm_init(dev
, mdp5_kms
->mmio
, mdp5_kms
->cfg
);
915 if (IS_ERR(mdp5_kms
->ctlm
)) {
916 ret
= PTR_ERR(mdp5_kms
->ctlm
);
917 mdp5_kms
->ctlm
= NULL
;
921 ret
= hwpipe_init(mdp5_kms
);
925 ret
= hwmixer_init(mdp5_kms
);
929 ret
= interface_init(mdp5_kms
);
933 /* set uninit-ed kms */
934 priv
->kms
= &mdp5_kms
->base
.base
;
942 static int mdp5_bind(struct device
*dev
, struct device
*master
, void *data
)
944 struct drm_device
*ddev
= dev_get_drvdata(master
);
945 struct platform_device
*pdev
= to_platform_device(dev
);
949 return mdp5_init(pdev
, ddev
);
952 static void mdp5_unbind(struct device
*dev
, struct device
*master
,
955 struct platform_device
*pdev
= to_platform_device(dev
);
960 static const struct component_ops mdp5_ops
= {
962 .unbind
= mdp5_unbind
,
965 static int mdp5_setup_interconnect(struct platform_device
*pdev
)
967 struct icc_path
*path0
= of_icc_get(&pdev
->dev
, "mdp0-mem");
968 struct icc_path
*path1
= of_icc_get(&pdev
->dev
, "mdp1-mem");
969 struct icc_path
*path_rot
= of_icc_get(&pdev
->dev
, "rotator-mem");
972 return PTR_ERR(path0
);
975 /* no interconnect support is not necessarily a fatal
976 * condition, the platform may simply not have an
977 * interconnect driver yet. But warn about it in case
978 * bootloader didn't setup bus clocks high enough for
981 dev_warn(&pdev
->dev
, "No interconnect support may cause display underflows!\n");
985 icc_set_bw(path0
, 0, MBps_to_icc(6400));
987 if (!IS_ERR_OR_NULL(path1
))
988 icc_set_bw(path1
, 0, MBps_to_icc(6400));
989 if (!IS_ERR_OR_NULL(path_rot
))
990 icc_set_bw(path_rot
, 0, MBps_to_icc(6400));
995 static int mdp5_dev_probe(struct platform_device
*pdev
)
1001 ret
= mdp5_setup_interconnect(pdev
);
1005 return component_add(&pdev
->dev
, &mdp5_ops
);
1008 static int mdp5_dev_remove(struct platform_device
*pdev
)
1011 component_del(&pdev
->dev
, &mdp5_ops
);
1015 static __maybe_unused
int mdp5_runtime_suspend(struct device
*dev
)
1017 struct platform_device
*pdev
= to_platform_device(dev
);
1018 struct mdp5_kms
*mdp5_kms
= platform_get_drvdata(pdev
);
1022 return mdp5_disable(mdp5_kms
);
1025 static __maybe_unused
int mdp5_runtime_resume(struct device
*dev
)
1027 struct platform_device
*pdev
= to_platform_device(dev
);
1028 struct mdp5_kms
*mdp5_kms
= platform_get_drvdata(pdev
);
1032 return mdp5_enable(mdp5_kms
);
1035 static const struct dev_pm_ops mdp5_pm_ops
= {
1036 SET_RUNTIME_PM_OPS(mdp5_runtime_suspend
, mdp5_runtime_resume
, NULL
)
1039 static const struct of_device_id mdp5_dt_match
[] = {
1040 { .compatible
= "qcom,mdp5", },
1041 /* to support downstream DT files */
1042 { .compatible
= "qcom,mdss_mdp", },
1045 MODULE_DEVICE_TABLE(of
, mdp5_dt_match
);
1047 static struct platform_driver mdp5_driver
= {
1048 .probe
= mdp5_dev_probe
,
1049 .remove
= mdp5_dev_remove
,
1052 .of_match_table
= mdp5_dt_match
,
1057 void __init
msm_mdp_register(void)
1060 platform_driver_register(&mdp5_driver
);
1063 void __exit
msm_mdp_unregister(void)
1066 platform_driver_unregister(&mdp5_driver
);