]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/video/msm/mdp.c
switch simple cases of fget_light to fdget
[thirdparty/linux.git] / drivers / video / msm / mdp.c
CommitLineData
d480ace0
PM
1/* drivers/video/msm_fb/mdp.c
2 *
3 * MSM MDP Interface (used by framebuffer core)
4 *
5 * Copyright (C) 2007 QUALCOMM Incorporated
6 * Copyright (C) 2007 Google Incorporated
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/kernel.h>
19#include <linux/fb.h>
20#include <linux/msm_mdp.h>
21#include <linux/interrupt.h>
22#include <linux/wait.h>
23#include <linux/clk.h>
24#include <linux/file.h>
d480ace0 25#include <linux/major.h>
5a0e3ad6 26#include <linux/slab.h>
d480ace0
PM
27
28#include <mach/msm_iomap.h>
29#include <mach/msm_fb.h>
30#include <linux/platform_device.h>
a8a35931 31#include <linux/export.h>
d480ace0
PM
32
33#include "mdp_hw.h"
34
35struct class *mdp_class;
36
37#define MDP_CMD_DEBUG_ACCESS_BASE (0x10000)
38
39static uint16_t mdp_default_ccs[] = {
40 0x254, 0x000, 0x331, 0x254, 0xF38, 0xE61, 0x254, 0x409, 0x000,
41 0x010, 0x080, 0x080
42};
43
44static DECLARE_WAIT_QUEUE_HEAD(mdp_dma2_waitqueue);
45static DECLARE_WAIT_QUEUE_HEAD(mdp_ppp_waitqueue);
46static struct msmfb_callback *dma_callback;
47static struct clk *clk;
48static unsigned int mdp_irq_mask;
49static DEFINE_SPINLOCK(mdp_lock);
50DEFINE_MUTEX(mdp_mutex);
51
52static int enable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
53{
54 unsigned long irq_flags;
55 int ret = 0;
56
57 BUG_ON(!mask);
58
59 spin_lock_irqsave(&mdp_lock, irq_flags);
60 /* if the mask bits are already set return an error, this interrupt
61 * is already enabled */
62 if (mdp_irq_mask & mask) {
63 printk(KERN_ERR "mdp irq already on already on %x %x\n",
64 mdp_irq_mask, mask);
65 ret = -1;
66 }
67 /* if the mdp irq is not already enabled enable it */
68 if (!mdp_irq_mask) {
69 if (clk)
70 clk_enable(clk);
71 enable_irq(mdp->irq);
72 }
73
74 /* update the irq mask to reflect the fact that the interrupt is
75 * enabled */
76 mdp_irq_mask |= mask;
77 spin_unlock_irqrestore(&mdp_lock, irq_flags);
78 return ret;
79}
80
81static int locked_disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
82{
83 /* this interrupt is already disabled! */
84 if (!(mdp_irq_mask & mask)) {
85 printk(KERN_ERR "mdp irq already off %x %x\n",
86 mdp_irq_mask, mask);
87 return -1;
88 }
89 /* update the irq mask to reflect the fact that the interrupt is
90 * disabled */
91 mdp_irq_mask &= ~(mask);
92 /* if no one is waiting on the interrupt, disable it */
93 if (!mdp_irq_mask) {
5ad43ff9 94 disable_irq_nosync(mdp->irq);
d480ace0
PM
95 if (clk)
96 clk_disable(clk);
97 }
98 return 0;
99}
100
101static int disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
102{
103 unsigned long irq_flags;
104 int ret;
105
106 spin_lock_irqsave(&mdp_lock, irq_flags);
107 ret = locked_disable_mdp_irq(mdp, mask);
108 spin_unlock_irqrestore(&mdp_lock, irq_flags);
109 return ret;
110}
111
112static irqreturn_t mdp_isr(int irq, void *data)
113{
114 uint32_t status;
115 unsigned long irq_flags;
116 struct mdp_info *mdp = data;
117
118 spin_lock_irqsave(&mdp_lock, irq_flags);
119
120 status = mdp_readl(mdp, MDP_INTR_STATUS);
121 mdp_writel(mdp, status, MDP_INTR_CLEAR);
122
123 status &= mdp_irq_mask;
124 if (status & DL0_DMA2_TERM_DONE) {
125 if (dma_callback) {
126 dma_callback->func(dma_callback);
127 dma_callback = NULL;
128 }
129 wake_up(&mdp_dma2_waitqueue);
130 }
131
132 if (status & DL0_ROI_DONE)
133 wake_up(&mdp_ppp_waitqueue);
134
135 if (status)
136 locked_disable_mdp_irq(mdp, status);
137
138 spin_unlock_irqrestore(&mdp_lock, irq_flags);
139 return IRQ_HANDLED;
140}
141
142static uint32_t mdp_check_mask(uint32_t mask)
143{
144 uint32_t ret;
145 unsigned long irq_flags;
146
147 spin_lock_irqsave(&mdp_lock, irq_flags);
148 ret = mdp_irq_mask & mask;
149 spin_unlock_irqrestore(&mdp_lock, irq_flags);
150 return ret;
151}
152
153static int mdp_wait(struct mdp_info *mdp, uint32_t mask, wait_queue_head_t *wq)
154{
155 int ret = 0;
156 unsigned long irq_flags;
157
158 wait_event_timeout(*wq, !mdp_check_mask(mask), HZ);
159
160 spin_lock_irqsave(&mdp_lock, irq_flags);
161 if (mdp_irq_mask & mask) {
162 locked_disable_mdp_irq(mdp, mask);
163 printk(KERN_WARNING "timeout waiting for mdp to complete %x\n",
164 mask);
165 ret = -ETIMEDOUT;
166 }
167 spin_unlock_irqrestore(&mdp_lock, irq_flags);
168
169 return ret;
170}
171
172void mdp_dma_wait(struct mdp_device *mdp_dev)
173{
174#define MDP_MAX_TIMEOUTS 20
175 static int timeout_count;
176 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
177
178 if (mdp_wait(mdp, DL0_DMA2_TERM_DONE, &mdp_dma2_waitqueue) == -ETIMEDOUT)
179 timeout_count++;
180 else
181 timeout_count = 0;
182
183 if (timeout_count > MDP_MAX_TIMEOUTS) {
184 printk(KERN_ERR "mdp: dma failed %d times, somethings wrong!\n",
185 MDP_MAX_TIMEOUTS);
186 BUG();
187 }
188}
189
190static int mdp_ppp_wait(struct mdp_info *mdp)
191{
192 return mdp_wait(mdp, DL0_ROI_DONE, &mdp_ppp_waitqueue);
193}
194
195void mdp_dma_to_mddi(struct mdp_info *mdp, uint32_t addr, uint32_t stride,
196 uint32_t width, uint32_t height, uint32_t x, uint32_t y,
197 struct msmfb_callback *callback)
198{
199 uint32_t dma2_cfg;
200 uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */
201
202 if (enable_mdp_irq(mdp, DL0_DMA2_TERM_DONE)) {
203 printk(KERN_ERR "mdp_dma_to_mddi: busy\n");
204 return;
205 }
206
207 dma_callback = callback;
208
209 dma2_cfg = DMA_PACK_TIGHT |
210 DMA_PACK_ALIGN_LSB |
211 DMA_PACK_PATTERN_RGB |
212 DMA_OUT_SEL_AHB |
213 DMA_IBUF_NONCONTIGUOUS;
214
215 dma2_cfg |= DMA_IBUF_FORMAT_RGB565;
216
217 dma2_cfg |= DMA_OUT_SEL_MDDI;
218
219 dma2_cfg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY;
220
221 dma2_cfg |= DMA_DITHER_EN;
222
223 /* setup size, address, and stride */
224 mdp_writel(mdp, (height << 16) | (width),
225 MDP_CMD_DEBUG_ACCESS_BASE + 0x0184);
226 mdp_writel(mdp, addr, MDP_CMD_DEBUG_ACCESS_BASE + 0x0188);
227 mdp_writel(mdp, stride, MDP_CMD_DEBUG_ACCESS_BASE + 0x018C);
228
229 /* 666 18BPP */
230 dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
231
232 /* set y & x offset and MDDI transaction parameters */
233 mdp_writel(mdp, (y << 16) | (x), MDP_CMD_DEBUG_ACCESS_BASE + 0x0194);
234 mdp_writel(mdp, ld_param, MDP_CMD_DEBUG_ACCESS_BASE + 0x01a0);
235 mdp_writel(mdp, (MDDI_VDO_PACKET_DESC << 16) | MDDI_VDO_PACKET_PRIM,
236 MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4);
237
238 mdp_writel(mdp, dma2_cfg, MDP_CMD_DEBUG_ACCESS_BASE + 0x0180);
239
240 /* start DMA2 */
241 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0044);
242}
243
244void mdp_dma(struct mdp_device *mdp_dev, uint32_t addr, uint32_t stride,
245 uint32_t width, uint32_t height, uint32_t x, uint32_t y,
246 struct msmfb_callback *callback, int interface)
247{
248 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
249
250 if (interface == MSM_MDDI_PMDH_INTERFACE) {
251 mdp_dma_to_mddi(mdp, addr, stride, width, height, x, y,
252 callback);
253 }
254}
255
256int get_img(struct mdp_img *img, struct fb_info *info,
257 unsigned long *start, unsigned long *len,
258 struct file **filep)
259{
2903ff01
AV
260 int ret = 0;
261 struct fd f = fdget(img->memory_id);
262 if (f.file == NULL)
d480ace0
PM
263 return -1;
264
2903ff01 265 if (MAJOR(f.file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
d480ace0
PM
266 *start = info->fix.smem_start;
267 *len = info->fix.smem_len;
268 } else
269 ret = -1;
2903ff01 270 fdput(f);
d480ace0
PM
271
272 return ret;
273}
274
275void put_img(struct file *src_file, struct file *dst_file)
276{
d480ace0
PM
277}
278
279int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb,
280 struct mdp_blit_req *req)
281{
282 int ret;
283 unsigned long src_start = 0, src_len = 0, dst_start = 0, dst_len = 0;
284 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
285 struct file *src_file = 0, *dst_file = 0;
286
287 /* WORKAROUND FOR HARDWARE BUG IN BG TILE FETCH */
288 if (unlikely(req->src_rect.h == 0 ||
289 req->src_rect.w == 0)) {
290 printk(KERN_ERR "mpd_ppp: src img of zero size!\n");
291 return -EINVAL;
292 }
293 if (unlikely(req->dst_rect.h == 0 ||
294 req->dst_rect.w == 0))
295 return -EINVAL;
296
297 /* do this first so that if this fails, the caller can always
298 * safely call put_img */
299 if (unlikely(get_img(&req->src, fb, &src_start, &src_len, &src_file))) {
300 printk(KERN_ERR "mpd_ppp: could not retrieve src image from "
301 "memory\n");
302 return -EINVAL;
303 }
304
305 if (unlikely(get_img(&req->dst, fb, &dst_start, &dst_len, &dst_file))) {
306 printk(KERN_ERR "mpd_ppp: could not retrieve dst image from "
307 "memory\n");
d480ace0
PM
308 return -EINVAL;
309 }
310 mutex_lock(&mdp_mutex);
311
312 /* transp_masking unimplemented */
313 req->transp_mask = MDP_TRANSP_NOP;
314 if (unlikely((req->transp_mask != MDP_TRANSP_NOP ||
315 req->alpha != MDP_ALPHA_NOP ||
316 HAS_ALPHA(req->src.format)) &&
317 (req->flags & MDP_ROT_90 &&
318 req->dst_rect.w <= 16 && req->dst_rect.h >= 16))) {
319 int i;
320 unsigned int tiles = req->dst_rect.h / 16;
321 unsigned int remainder = req->dst_rect.h % 16;
322 req->src_rect.w = 16*req->src_rect.w / req->dst_rect.h;
323 req->dst_rect.h = 16;
324 for (i = 0; i < tiles; i++) {
325 enable_mdp_irq(mdp, DL0_ROI_DONE);
326 ret = mdp_ppp_blit(mdp, req, src_file, src_start,
327 src_len, dst_file, dst_start,
328 dst_len);
329 if (ret)
330 goto err_bad_blit;
331 ret = mdp_ppp_wait(mdp);
332 if (ret)
333 goto err_wait_failed;
334 req->dst_rect.y += 16;
335 req->src_rect.x += req->src_rect.w;
336 }
337 if (!remainder)
338 goto end;
339 req->src_rect.w = remainder*req->src_rect.w / req->dst_rect.h;
340 req->dst_rect.h = remainder;
341 }
342 enable_mdp_irq(mdp, DL0_ROI_DONE);
343 ret = mdp_ppp_blit(mdp, req, src_file, src_start, src_len, dst_file,
344 dst_start,
345 dst_len);
346 if (ret)
347 goto err_bad_blit;
348 ret = mdp_ppp_wait(mdp);
349 if (ret)
350 goto err_wait_failed;
351end:
352 put_img(src_file, dst_file);
353 mutex_unlock(&mdp_mutex);
354 return 0;
355err_bad_blit:
356 disable_mdp_irq(mdp, DL0_ROI_DONE);
357err_wait_failed:
358 put_img(src_file, dst_file);
359 mutex_unlock(&mdp_mutex);
360 return ret;
361}
362
363void mdp_set_grp_disp(struct mdp_device *mdp_dev, unsigned disp_id)
364{
365 struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
366
367 disp_id &= 0xf;
368 mdp_writel(mdp, disp_id, MDP_FULL_BYPASS_WORD43);
369}
370
371int register_mdp_client(struct class_interface *cint)
372{
373 if (!mdp_class) {
374 pr_err("mdp: no mdp_class when registering mdp client\n");
375 return -ENODEV;
376 }
377 cint->class = mdp_class;
378 return class_interface_register(cint);
379}
380
381#include "mdp_csc_table.h"
382#include "mdp_scale_tables.h"
383
384int mdp_probe(struct platform_device *pdev)
385{
386 struct resource *resource;
387 int ret;
388 int n;
389 struct mdp_info *mdp;
390
391 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
392 if (!resource) {
393 pr_err("mdp: can not get mdp mem resource!\n");
394 return -ENOMEM;
395 }
396
397 mdp = kzalloc(sizeof(struct mdp_info), GFP_KERNEL);
398 if (!mdp)
399 return -ENOMEM;
400
401 mdp->irq = platform_get_irq(pdev, 0);
402 if (mdp->irq < 0) {
403 pr_err("mdp: can not get mdp irq\n");
404 ret = mdp->irq;
405 goto error_get_irq;
406 }
407
28f65c11 408 mdp->base = ioremap(resource->start, resource_size(resource));
d480ace0
PM
409 if (mdp->base == 0) {
410 printk(KERN_ERR "msmfb: cannot allocate mdp regs!\n");
411 ret = -ENOMEM;
412 goto error_ioremap;
413 }
414
415 mdp->mdp_dev.dma = mdp_dma;
416 mdp->mdp_dev.dma_wait = mdp_dma_wait;
417 mdp->mdp_dev.blit = mdp_blit;
418 mdp->mdp_dev.set_grp_disp = mdp_set_grp_disp;
419
420 clk = clk_get(&pdev->dev, "mdp_clk");
421 if (IS_ERR(clk)) {
422 printk(KERN_INFO "mdp: failed to get mdp clk");
945903c7
JL
423 ret = PTR_ERR(clk);
424 goto error_get_clk;
d480ace0
PM
425 }
426
f8798ccb 427 ret = request_irq(mdp->irq, mdp_isr, 0, "msm_mdp", mdp);
d480ace0
PM
428 if (ret)
429 goto error_request_irq;
430 disable_irq(mdp->irq);
431 mdp_irq_mask = 0;
432
433 /* debug interface write access */
434 mdp_writel(mdp, 1, 0x60);
435
436 mdp_writel(mdp, MDP_ANY_INTR_MASK, MDP_INTR_ENABLE);
437 mdp_writel(mdp, 1, MDP_EBI2_PORTMAP_MODE);
438
439 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8);
440 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc);
441
442 for (n = 0; n < ARRAY_SIZE(csc_table); n++)
443 mdp_writel(mdp, csc_table[n].val, csc_table[n].reg);
444
445 /* clear up unused fg/main registers */
446 /* comp.plane 2&3 ystride */
447 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0120);
448
449 /* unpacked pattern */
450 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x012c);
451 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0130);
452 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0134);
453 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0158);
454 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x015c);
455 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0160);
456 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0170);
457 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0174);
458 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x017c);
459
460 /* comp.plane 2 & 3 */
461 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0114);
462 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0118);
463
464 /* clear unused bg registers */
465 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8);
466 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0);
467 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc);
468 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0);
469 mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4);
470
471 for (n = 0; n < ARRAY_SIZE(mdp_upscale_table); n++)
472 mdp_writel(mdp, mdp_upscale_table[n].val,
473 mdp_upscale_table[n].reg);
474
475 for (n = 0; n < 9; n++)
476 mdp_writel(mdp, mdp_default_ccs[n], 0x40440 + 4 * n);
477 mdp_writel(mdp, mdp_default_ccs[9], 0x40500 + 4 * 0);
478 mdp_writel(mdp, mdp_default_ccs[10], 0x40500 + 4 * 0);
479 mdp_writel(mdp, mdp_default_ccs[11], 0x40500 + 4 * 0);
480
481 /* register mdp device */
482 mdp->mdp_dev.dev.parent = &pdev->dev;
483 mdp->mdp_dev.dev.class = mdp_class;
d601c795 484 dev_set_name(&mdp->mdp_dev.dev, "mdp%d", pdev->id);
d480ace0
PM
485
486 /* if you can remove the platform device you'd have to implement
487 * this:
488 mdp_dev.release = mdp_class; */
489
490 ret = device_register(&mdp->mdp_dev.dev);
491 if (ret)
492 goto error_device_register;
493 return 0;
494
495error_device_register:
496 free_irq(mdp->irq, mdp);
497error_request_irq:
945903c7 498error_get_clk:
d480ace0
PM
499 iounmap(mdp->base);
500error_get_irq:
501error_ioremap:
502 kfree(mdp);
503 return ret;
504}
505
506static struct platform_driver msm_mdp_driver = {
507 .probe = mdp_probe,
508 .driver = {.name = "msm_mdp"},
509};
510
511static int __init mdp_init(void)
512{
513 mdp_class = class_create(THIS_MODULE, "msm_mdp");
514 if (IS_ERR(mdp_class)) {
515 printk(KERN_ERR "Error creating mdp class\n");
516 return PTR_ERR(mdp_class);
517 }
518 return platform_driver_register(&msm_mdp_driver);
519}
520
521subsys_initcall(mdp_init);