]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
990a4007 AT |
2 | /* |
3 | * Copyright (c) 2016, The Linux Foundation. All rights reserved. | |
990a4007 AT |
4 | */ |
5 | ||
6 | #include <linux/irqdomain.h> | |
7 | #include <linux/irq.h> | |
8 | ||
9 | #include "msm_drv.h" | |
10 | #include "mdp5_kms.h" | |
11 | ||
bc3220be RY |
12 | #define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base) |
13 | ||
14 | struct mdp5_mdss { | |
15 | struct msm_mdss base; | |
990a4007 AT |
16 | |
17 | void __iomem *mmio, *vbif; | |
18 | ||
19 | struct regulator *vdd; | |
20 | ||
774e39ee AT |
21 | struct clk *ahb_clk; |
22 | struct clk *axi_clk; | |
23 | struct clk *vsync_clk; | |
24 | ||
990a4007 AT |
25 | struct { |
26 | volatile unsigned long enabled_mask; | |
27 | struct irq_domain *domain; | |
28 | } irqcontroller; | |
29 | }; | |
30 | ||
bc3220be | 31 | static inline void mdss_write(struct mdp5_mdss *mdp5_mdss, u32 reg, u32 data) |
990a4007 | 32 | { |
bc3220be | 33 | msm_writel(data, mdp5_mdss->mmio + reg); |
990a4007 AT |
34 | } |
35 | ||
bc3220be | 36 | static inline u32 mdss_read(struct mdp5_mdss *mdp5_mdss, u32 reg) |
990a4007 | 37 | { |
bc3220be | 38 | return msm_readl(mdp5_mdss->mmio + reg); |
990a4007 AT |
39 | } |
40 | ||
41 | static irqreturn_t mdss_irq(int irq, void *arg) | |
42 | { | |
bc3220be | 43 | struct mdp5_mdss *mdp5_mdss = arg; |
990a4007 AT |
44 | u32 intr; |
45 | ||
bc3220be | 46 | intr = mdss_read(mdp5_mdss, REG_MDSS_HW_INTR_STATUS); |
990a4007 AT |
47 | |
48 | VERB("intr=%08x", intr); | |
49 | ||
50 | while (intr) { | |
51 | irq_hw_number_t hwirq = fls(intr) - 1; | |
52 | ||
53 | generic_handle_irq(irq_find_mapping( | |
bc3220be | 54 | mdp5_mdss->irqcontroller.domain, hwirq)); |
990a4007 AT |
55 | intr &= ~(1 << hwirq); |
56 | } | |
57 | ||
58 | return IRQ_HANDLED; | |
59 | } | |
60 | ||
61 | /* | |
62 | * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc) | |
63 | * can register to get their irq's delivered | |
64 | */ | |
65 | ||
66 | #define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \ | |
67 | MDSS_HW_INTR_STATUS_INTR_DSI0 | \ | |
68 | MDSS_HW_INTR_STATUS_INTR_DSI1 | \ | |
69 | MDSS_HW_INTR_STATUS_INTR_HDMI | \ | |
70 | MDSS_HW_INTR_STATUS_INTR_EDP) | |
71 | ||
72 | static void mdss_hw_mask_irq(struct irq_data *irqd) | |
73 | { | |
bc3220be | 74 | struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd); |
990a4007 AT |
75 | |
76 | smp_mb__before_atomic(); | |
bc3220be | 77 | clear_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask); |
990a4007 AT |
78 | smp_mb__after_atomic(); |
79 | } | |
80 | ||
81 | static void mdss_hw_unmask_irq(struct irq_data *irqd) | |
82 | { | |
bc3220be | 83 | struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd); |
990a4007 AT |
84 | |
85 | smp_mb__before_atomic(); | |
bc3220be | 86 | set_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask); |
990a4007 AT |
87 | smp_mb__after_atomic(); |
88 | } | |
89 | ||
90 | static struct irq_chip mdss_hw_irq_chip = { | |
91 | .name = "mdss", | |
92 | .irq_mask = mdss_hw_mask_irq, | |
93 | .irq_unmask = mdss_hw_unmask_irq, | |
94 | }; | |
95 | ||
96 | static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq, | |
97 | irq_hw_number_t hwirq) | |
98 | { | |
bc3220be | 99 | struct mdp5_mdss *mdp5_mdss = d->host_data; |
990a4007 AT |
100 | |
101 | if (!(VALID_IRQS & (1 << hwirq))) | |
102 | return -EPERM; | |
103 | ||
104 | irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq); | |
bc3220be | 105 | irq_set_chip_data(irq, mdp5_mdss); |
990a4007 AT |
106 | |
107 | return 0; | |
108 | } | |
109 | ||
c43dd227 | 110 | static const struct irq_domain_ops mdss_hw_irqdomain_ops = { |
990a4007 AT |
111 | .map = mdss_hw_irqdomain_map, |
112 | .xlate = irq_domain_xlate_onecell, | |
113 | }; | |
114 | ||
115 | ||
bc3220be | 116 | static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss) |
990a4007 | 117 | { |
bc3220be | 118 | struct device *dev = mdp5_mdss->base.dev->dev; |
990a4007 AT |
119 | struct irq_domain *d; |
120 | ||
121 | d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops, | |
bc3220be | 122 | mdp5_mdss); |
990a4007 | 123 | if (!d) { |
6a41da17 | 124 | DRM_DEV_ERROR(dev, "mdss irq domain add failed\n"); |
990a4007 AT |
125 | return -ENXIO; |
126 | } | |
127 | ||
bc3220be RY |
128 | mdp5_mdss->irqcontroller.enabled_mask = 0; |
129 | mdp5_mdss->irqcontroller.domain = d; | |
990a4007 AT |
130 | |
131 | return 0; | |
132 | } | |
133 | ||
bc3220be | 134 | static int mdp5_mdss_enable(struct msm_mdss *mdss) |
774e39ee | 135 | { |
bc3220be | 136 | struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss); |
774e39ee AT |
137 | DBG(""); |
138 | ||
bc3220be RY |
139 | clk_prepare_enable(mdp5_mdss->ahb_clk); |
140 | if (mdp5_mdss->axi_clk) | |
141 | clk_prepare_enable(mdp5_mdss->axi_clk); | |
142 | if (mdp5_mdss->vsync_clk) | |
143 | clk_prepare_enable(mdp5_mdss->vsync_clk); | |
774e39ee AT |
144 | |
145 | return 0; | |
146 | } | |
147 | ||
bc3220be | 148 | static int mdp5_mdss_disable(struct msm_mdss *mdss) |
774e39ee | 149 | { |
bc3220be | 150 | struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss); |
774e39ee AT |
151 | DBG(""); |
152 | ||
bc3220be RY |
153 | if (mdp5_mdss->vsync_clk) |
154 | clk_disable_unprepare(mdp5_mdss->vsync_clk); | |
155 | if (mdp5_mdss->axi_clk) | |
156 | clk_disable_unprepare(mdp5_mdss->axi_clk); | |
157 | clk_disable_unprepare(mdp5_mdss->ahb_clk); | |
774e39ee AT |
158 | |
159 | return 0; | |
160 | } | |
161 | ||
bc3220be | 162 | static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss) |
774e39ee | 163 | { |
bc3220be RY |
164 | struct platform_device *pdev = |
165 | to_platform_device(mdp5_mdss->base.dev->dev); | |
774e39ee | 166 | |
bc3220be RY |
167 | mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface"); |
168 | if (IS_ERR(mdp5_mdss->ahb_clk)) | |
169 | mdp5_mdss->ahb_clk = NULL; | |
774e39ee | 170 | |
bc3220be RY |
171 | mdp5_mdss->axi_clk = msm_clk_get(pdev, "bus"); |
172 | if (IS_ERR(mdp5_mdss->axi_clk)) | |
173 | mdp5_mdss->axi_clk = NULL; | |
774e39ee | 174 | |
bc3220be RY |
175 | mdp5_mdss->vsync_clk = msm_clk_get(pdev, "vsync"); |
176 | if (IS_ERR(mdp5_mdss->vsync_clk)) | |
177 | mdp5_mdss->vsync_clk = NULL; | |
774e39ee AT |
178 | |
179 | return 0; | |
180 | } | |
181 | ||
bc3220be | 182 | static void mdp5_mdss_destroy(struct drm_device *dev) |
990a4007 AT |
183 | { |
184 | struct msm_drm_private *priv = dev->dev_private; | |
bc3220be | 185 | struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss); |
990a4007 | 186 | |
bc3220be | 187 | if (!mdp5_mdss) |
990a4007 AT |
188 | return; |
189 | ||
bc3220be RY |
190 | irq_domain_remove(mdp5_mdss->irqcontroller.domain); |
191 | mdp5_mdss->irqcontroller.domain = NULL; | |
990a4007 | 192 | |
bc3220be | 193 | regulator_disable(mdp5_mdss->vdd); |
cd792726 | 194 | |
cd792726 | 195 | pm_runtime_disable(dev->dev); |
990a4007 AT |
196 | } |
197 | ||
bc3220be RY |
198 | static const struct msm_mdss_funcs mdss_funcs = { |
199 | .enable = mdp5_mdss_enable, | |
200 | .disable = mdp5_mdss_disable, | |
201 | .destroy = mdp5_mdss_destroy, | |
202 | }; | |
203 | ||
204 | int mdp5_mdss_init(struct drm_device *dev) | |
990a4007 | 205 | { |
76adb460 | 206 | struct platform_device *pdev = to_platform_device(dev->dev); |
990a4007 | 207 | struct msm_drm_private *priv = dev->dev_private; |
bc3220be | 208 | struct mdp5_mdss *mdp5_mdss; |
990a4007 AT |
209 | int ret; |
210 | ||
211 | DBG(""); | |
212 | ||
213 | if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss")) | |
214 | return 0; | |
215 | ||
bc3220be RY |
216 | mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL); |
217 | if (!mdp5_mdss) { | |
990a4007 AT |
218 | ret = -ENOMEM; |
219 | goto fail; | |
220 | } | |
221 | ||
bc3220be | 222 | mdp5_mdss->base.dev = dev; |
990a4007 | 223 | |
bc3220be RY |
224 | mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS"); |
225 | if (IS_ERR(mdp5_mdss->mmio)) { | |
226 | ret = PTR_ERR(mdp5_mdss->mmio); | |
990a4007 AT |
227 | goto fail; |
228 | } | |
229 | ||
bc3220be RY |
230 | mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF"); |
231 | if (IS_ERR(mdp5_mdss->vbif)) { | |
232 | ret = PTR_ERR(mdp5_mdss->vbif); | |
990a4007 AT |
233 | goto fail; |
234 | } | |
235 | ||
bc3220be | 236 | ret = msm_mdss_get_clocks(mdp5_mdss); |
774e39ee | 237 | if (ret) { |
6a41da17 | 238 | DRM_DEV_ERROR(dev->dev, "failed to get clocks: %d\n", ret); |
774e39ee AT |
239 | goto fail; |
240 | } | |
241 | ||
990a4007 | 242 | /* Regulator to enable GDSCs in downstream kernels */ |
bc3220be RY |
243 | mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd"); |
244 | if (IS_ERR(mdp5_mdss->vdd)) { | |
245 | ret = PTR_ERR(mdp5_mdss->vdd); | |
990a4007 AT |
246 | goto fail; |
247 | } | |
248 | ||
bc3220be | 249 | ret = regulator_enable(mdp5_mdss->vdd); |
990a4007 | 250 | if (ret) { |
6a41da17 | 251 | DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n", |
990a4007 AT |
252 | ret); |
253 | goto fail; | |
254 | } | |
255 | ||
256 | ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0), | |
bc3220be | 257 | mdss_irq, 0, "mdss_isr", mdp5_mdss); |
990a4007 | 258 | if (ret) { |
6a41da17 | 259 | DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret); |
990a4007 AT |
260 | goto fail_irq; |
261 | } | |
262 | ||
bc3220be | 263 | ret = mdss_irq_domain_init(mdp5_mdss); |
990a4007 | 264 | if (ret) { |
6a41da17 | 265 | DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret); |
990a4007 AT |
266 | goto fail_irq; |
267 | } | |
268 | ||
bc3220be RY |
269 | mdp5_mdss->base.funcs = &mdss_funcs; |
270 | priv->mdss = &mdp5_mdss->base; | |
990a4007 | 271 | |
cd792726 AT |
272 | pm_runtime_enable(dev->dev); |
273 | ||
990a4007 AT |
274 | return 0; |
275 | fail_irq: | |
bc3220be | 276 | regulator_disable(mdp5_mdss->vdd); |
990a4007 AT |
277 | fail: |
278 | return ret; | |
279 | } |