]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
Merge tag 'vfio-v5.2-rc5' of git://github.com/awilliam/linux-vfio
[thirdparty/linux.git] / arch / powerpc / platforms / 52xx / mpc52xx_lpbfifo.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * LocalPlus Bus FIFO driver for the Freescale MPC52xx.
4 *
5 * Copyright (C) 2009 Secret Lab Technologies Ltd.
6 *
7 * Todo:
8 * - Add support for multiple requests to be queued.
9 */
10
11 #include <linux/interrupt.h>
12 #include <linux/kernel.h>
13 #include <linux/of.h>
14 #include <linux/of_platform.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <asm/io.h>
18 #include <asm/prom.h>
19 #include <asm/mpc52xx.h>
20 #include <asm/time.h>
21
22 #include <linux/fsl/bestcomm/bestcomm.h>
23 #include <linux/fsl/bestcomm/bestcomm_priv.h>
24 #include <linux/fsl/bestcomm/gen_bd.h>
25
26 MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
27 MODULE_DESCRIPTION("MPC5200 LocalPlus FIFO device driver");
28 MODULE_LICENSE("GPL");
29
30 #define LPBFIFO_REG_PACKET_SIZE (0x00)
31 #define LPBFIFO_REG_START_ADDRESS (0x04)
32 #define LPBFIFO_REG_CONTROL (0x08)
33 #define LPBFIFO_REG_ENABLE (0x0C)
34 #define LPBFIFO_REG_BYTES_DONE_STATUS (0x14)
35 #define LPBFIFO_REG_FIFO_DATA (0x40)
36 #define LPBFIFO_REG_FIFO_STATUS (0x44)
37 #define LPBFIFO_REG_FIFO_CONTROL (0x48)
38 #define LPBFIFO_REG_FIFO_ALARM (0x4C)
39
40 struct mpc52xx_lpbfifo {
41 struct device *dev;
42 phys_addr_t regs_phys;
43 void __iomem *regs;
44 int irq;
45 spinlock_t lock;
46
47 struct bcom_task *bcom_tx_task;
48 struct bcom_task *bcom_rx_task;
49 struct bcom_task *bcom_cur_task;
50
51 /* Current state data */
52 struct mpc52xx_lpbfifo_request *req;
53 int dma_irqs_enabled;
54 };
55
56 /* The MPC5200 has only one fifo, so only need one instance structure */
57 static struct mpc52xx_lpbfifo lpbfifo;
58
59 /**
60 * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transferred
61 */
62 static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req)
63 {
64 size_t transfer_size = req->size - req->pos;
65 struct bcom_bd *bd;
66 void __iomem *reg;
67 u32 *data;
68 int i;
69 int bit_fields;
70 int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
71 int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
72 int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
73
74 /* Set and clear the reset bits; is good practice in User Manual */
75 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
76
77 /* set master enable bit */
78 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000001);
79 if (!dma) {
80 /* While the FIFO can be setup for transfer sizes as large as
81 * 16M-1, the FIFO itself is only 512 bytes deep and it does
82 * not generate interrupts for FIFO full events (only transfer
83 * complete will raise an IRQ). Therefore when not using
84 * Bestcomm to drive the FIFO it needs to either be polled, or
85 * transfers need to constrained to the size of the fifo.
86 *
87 * This driver restricts the size of the transfer
88 */
89 if (transfer_size > 512)
90 transfer_size = 512;
91
92 /* Load the FIFO with data */
93 if (write) {
94 reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
95 data = req->data + req->pos;
96 for (i = 0; i < transfer_size; i += 4)
97 out_be32(reg, *data++);
98 }
99
100 /* Unmask both error and completion irqs */
101 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x00000301);
102 } else {
103 /* Choose the correct direction
104 *
105 * Configure the watermarks so DMA will always complete correctly.
106 * It may be worth experimenting with the ALARM value to see if
107 * there is a performance impacit. However, if it is wrong there
108 * is a risk of DMA not transferring the last chunk of data
109 */
110 if (write) {
111 out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1e4);
112 out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 7);
113 lpbfifo.bcom_cur_task = lpbfifo.bcom_tx_task;
114 } else {
115 out_be32(lpbfifo.regs + LPBFIFO_REG_FIFO_ALARM, 0x1ff);
116 out_8(lpbfifo.regs + LPBFIFO_REG_FIFO_CONTROL, 0);
117 lpbfifo.bcom_cur_task = lpbfifo.bcom_rx_task;
118
119 if (poll_dma) {
120 if (lpbfifo.dma_irqs_enabled) {
121 disable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
122 lpbfifo.dma_irqs_enabled = 0;
123 }
124 } else {
125 if (!lpbfifo.dma_irqs_enabled) {
126 enable_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task));
127 lpbfifo.dma_irqs_enabled = 1;
128 }
129 }
130 }
131
132 bd = bcom_prepare_next_buffer(lpbfifo.bcom_cur_task);
133 bd->status = transfer_size;
134 if (!write) {
135 /*
136 * In the DMA read case, the DMA doesn't complete,
137 * possibly due to incorrect watermarks in the ALARM
138 * and CONTROL regs. For now instead of trying to
139 * determine the right watermarks that will make this
140 * work, just increase the number of bytes the FIFO is
141 * expecting.
142 *
143 * When submitting another operation, the FIFO will get
144 * reset, so the condition of the FIFO waiting for a
145 * non-existent 4 bytes will get cleared.
146 */
147 transfer_size += 4; /* BLECH! */
148 }
149 bd->data[0] = req->data_phys + req->pos;
150 bcom_submit_next_buffer(lpbfifo.bcom_cur_task, NULL);
151
152 /* error irq & master enabled bit */
153 bit_fields = 0x00000201;
154
155 /* Unmask irqs */
156 if (write && (!poll_dma))
157 bit_fields |= 0x00000100; /* completion irq too */
158 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, bit_fields);
159 }
160
161 /* Set transfer size, width, chip select and READ mode */
162 out_be32(lpbfifo.regs + LPBFIFO_REG_START_ADDRESS,
163 req->offset + req->pos);
164 out_be32(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, transfer_size);
165
166 bit_fields = req->cs << 24 | 0x000008;
167 if (!write)
168 bit_fields |= 0x010000; /* read mode */
169 out_be32(lpbfifo.regs + LPBFIFO_REG_CONTROL, bit_fields);
170
171 /* Kick it off */
172 if (!lpbfifo.req->defer_xfer_start)
173 out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
174 if (dma)
175 bcom_enable(lpbfifo.bcom_cur_task);
176 }
177
178 /**
179 * mpc52xx_lpbfifo_irq - IRQ handler for LPB FIFO
180 *
181 * On transmit, the dma completion irq triggers before the fifo completion
182 * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm
183 * task completion irq because everything is not really done until the LPB FIFO
184 * completion irq triggers.
185 *
186 * In other words:
187 * For DMA, on receive, the "Fat Lady" is the bestcom completion irq. on
188 * transmit, the fifo completion irq is the "Fat Lady". The opera (or in this
189 * case the DMA/FIFO operation) is not finished until the "Fat Lady" sings.
190 *
191 * Reasons for entering this routine:
192 * 1) PIO mode rx and tx completion irq
193 * 2) DMA interrupt mode tx completion irq
194 * 3) DMA polled mode tx
195 *
196 * Exit conditions:
197 * 1) Transfer aborted
198 * 2) FIFO complete without DMA; more data to do
199 * 3) FIFO complete without DMA; all data transferred
200 * 4) FIFO complete using DMA
201 *
202 * Condition 1 can occur regardless of whether or not DMA is used.
203 * It requires executing the callback to report the error and exiting
204 * immediately.
205 *
206 * Condition 2 requires programming the FIFO with the next block of data
207 *
208 * Condition 3 requires executing the callback to report completion
209 *
210 * Condition 4 means the same as 3, except that we also retrieve the bcom
211 * buffer so DMA doesn't get clogged up.
212 *
213 * To make things trickier, the spinlock must be dropped before
214 * executing the callback, otherwise we could end up with a deadlock
215 * or nested spinlock condition. The out path is non-trivial, so
216 * extra fiddling is done to make sure all paths lead to the same
217 * outbound code.
218 */
219 static irqreturn_t mpc52xx_lpbfifo_irq(int irq, void *dev_id)
220 {
221 struct mpc52xx_lpbfifo_request *req;
222 u32 status = in_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
223 void __iomem *reg;
224 u32 *data;
225 int count, i;
226 int do_callback = 0;
227 u32 ts;
228 unsigned long flags;
229 int dma, write, poll_dma;
230
231 spin_lock_irqsave(&lpbfifo.lock, flags);
232 ts = get_tbl();
233
234 req = lpbfifo.req;
235 if (!req) {
236 spin_unlock_irqrestore(&lpbfifo.lock, flags);
237 pr_err("bogus LPBFIFO IRQ\n");
238 return IRQ_HANDLED;
239 }
240
241 dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
242 write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
243 poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA;
244
245 if (dma && !write) {
246 spin_unlock_irqrestore(&lpbfifo.lock, flags);
247 pr_err("bogus LPBFIFO IRQ (dma and not writing)\n");
248 return IRQ_HANDLED;
249 }
250
251 if ((status & 0x01) == 0) {
252 goto out;
253 }
254
255 /* check abort bit */
256 if (status & 0x10) {
257 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
258 do_callback = 1;
259 goto out;
260 }
261
262 /* Read result from hardware */
263 count = in_be32(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS);
264 count &= 0x00ffffff;
265
266 if (!dma && !write) {
267 /* copy the data out of the FIFO */
268 reg = lpbfifo.regs + LPBFIFO_REG_FIFO_DATA;
269 data = req->data + req->pos;
270 for (i = 0; i < count; i += 4)
271 *data++ = in_be32(reg);
272 }
273
274 /* Update transfer position and count */
275 req->pos += count;
276
277 /* Decide what to do next */
278 if (req->size - req->pos)
279 mpc52xx_lpbfifo_kick(req); /* more work to do */
280 else
281 do_callback = 1;
282
283 out:
284 /* Clear the IRQ */
285 out_8(lpbfifo.regs + LPBFIFO_REG_BYTES_DONE_STATUS, 0x01);
286
287 if (dma && (status & 0x11)) {
288 /*
289 * Count the DMA as complete only when the FIFO completion
290 * status or abort bits are set.
291 *
292 * (status & 0x01) should always be the case except sometimes
293 * when using polled DMA.
294 *
295 * (status & 0x10) {transfer aborted}: This case needs more
296 * testing.
297 */
298 bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
299 }
300 req->last_byte = ((u8 *)req->data)[req->size - 1];
301
302 /* When the do_callback flag is set; it means the transfer is finished
303 * so set the FIFO as idle */
304 if (do_callback)
305 lpbfifo.req = NULL;
306
307 if (irq != 0) /* don't increment on polled case */
308 req->irq_count++;
309
310 req->irq_ticks += get_tbl() - ts;
311 spin_unlock_irqrestore(&lpbfifo.lock, flags);
312
313 /* Spinlock is released; it is now safe to call the callback */
314 if (do_callback && req->callback)
315 req->callback(req);
316
317 return IRQ_HANDLED;
318 }
319
320 /**
321 * mpc52xx_lpbfifo_bcom_irq - IRQ handler for LPB FIFO Bestcomm task
322 *
323 * Only used when receiving data.
324 */
325 static irqreturn_t mpc52xx_lpbfifo_bcom_irq(int irq, void *dev_id)
326 {
327 struct mpc52xx_lpbfifo_request *req;
328 unsigned long flags;
329 u32 status;
330 u32 ts;
331
332 spin_lock_irqsave(&lpbfifo.lock, flags);
333 ts = get_tbl();
334
335 req = lpbfifo.req;
336 if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) {
337 spin_unlock_irqrestore(&lpbfifo.lock, flags);
338 return IRQ_HANDLED;
339 }
340
341 if (irq != 0) /* don't increment on polled case */
342 req->irq_count++;
343
344 if (!bcom_buffer_done(lpbfifo.bcom_cur_task)) {
345 spin_unlock_irqrestore(&lpbfifo.lock, flags);
346
347 req->buffer_not_done_cnt++;
348 if ((req->buffer_not_done_cnt % 1000) == 0)
349 pr_err("transfer stalled\n");
350
351 return IRQ_HANDLED;
352 }
353
354 bcom_retrieve_buffer(lpbfifo.bcom_cur_task, &status, NULL);
355
356 req->last_byte = ((u8 *)req->data)[req->size - 1];
357
358 req->pos = status & 0x00ffffff;
359
360 /* Mark the FIFO as idle */
361 lpbfifo.req = NULL;
362
363 /* Release the lock before calling out to the callback. */
364 req->irq_ticks += get_tbl() - ts;
365 spin_unlock_irqrestore(&lpbfifo.lock, flags);
366
367 if (req->callback)
368 req->callback(req);
369
370 return IRQ_HANDLED;
371 }
372
373 /**
374 * mpc52xx_lpbfifo_bcom_poll - Poll for DMA completion
375 */
376 void mpc52xx_lpbfifo_poll(void)
377 {
378 struct mpc52xx_lpbfifo_request *req = lpbfifo.req;
379 int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA);
380 int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE;
381
382 /*
383 * For more information, see comments on the "Fat Lady"
384 */
385 if (dma && write)
386 mpc52xx_lpbfifo_irq(0, NULL);
387 else
388 mpc52xx_lpbfifo_bcom_irq(0, NULL);
389 }
390 EXPORT_SYMBOL(mpc52xx_lpbfifo_poll);
391
392 /**
393 * mpc52xx_lpbfifo_submit - Submit an LPB FIFO transfer request.
394 * @req: Pointer to request structure
395 */
396 int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req)
397 {
398 unsigned long flags;
399
400 if (!lpbfifo.regs)
401 return -ENODEV;
402
403 spin_lock_irqsave(&lpbfifo.lock, flags);
404
405 /* If the req pointer is already set, then a transfer is in progress */
406 if (lpbfifo.req) {
407 spin_unlock_irqrestore(&lpbfifo.lock, flags);
408 return -EBUSY;
409 }
410
411 /* Setup the transfer */
412 lpbfifo.req = req;
413 req->irq_count = 0;
414 req->irq_ticks = 0;
415 req->buffer_not_done_cnt = 0;
416 req->pos = 0;
417
418 mpc52xx_lpbfifo_kick(req);
419 spin_unlock_irqrestore(&lpbfifo.lock, flags);
420 return 0;
421 }
422 EXPORT_SYMBOL(mpc52xx_lpbfifo_submit);
423
424 int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req)
425 {
426 unsigned long flags;
427
428 if (!lpbfifo.regs)
429 return -ENODEV;
430
431 spin_lock_irqsave(&lpbfifo.lock, flags);
432
433 /*
434 * If the req pointer is already set and a transfer was
435 * started on submit, then this transfer is in progress
436 */
437 if (lpbfifo.req && !lpbfifo.req->defer_xfer_start) {
438 spin_unlock_irqrestore(&lpbfifo.lock, flags);
439 return -EBUSY;
440 }
441
442 /*
443 * If the req was previously submitted but not
444 * started, start it now
445 */
446 if (lpbfifo.req && lpbfifo.req == req &&
447 lpbfifo.req->defer_xfer_start) {
448 out_8(lpbfifo.regs + LPBFIFO_REG_PACKET_SIZE, 0x01);
449 }
450
451 spin_unlock_irqrestore(&lpbfifo.lock, flags);
452 return 0;
453 }
454 EXPORT_SYMBOL(mpc52xx_lpbfifo_start_xfer);
455
456 void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req)
457 {
458 unsigned long flags;
459
460 spin_lock_irqsave(&lpbfifo.lock, flags);
461 if (lpbfifo.req == req) {
462 /* Put it into reset and clear the state */
463 bcom_gen_bd_rx_reset(lpbfifo.bcom_rx_task);
464 bcom_gen_bd_tx_reset(lpbfifo.bcom_tx_task);
465 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
466 lpbfifo.req = NULL;
467 }
468 spin_unlock_irqrestore(&lpbfifo.lock, flags);
469 }
470 EXPORT_SYMBOL(mpc52xx_lpbfifo_abort);
471
472 static int mpc52xx_lpbfifo_probe(struct platform_device *op)
473 {
474 struct resource res;
475 int rc = -ENOMEM;
476
477 if (lpbfifo.dev != NULL)
478 return -ENOSPC;
479
480 lpbfifo.irq = irq_of_parse_and_map(op->dev.of_node, 0);
481 if (!lpbfifo.irq)
482 return -ENODEV;
483
484 if (of_address_to_resource(op->dev.of_node, 0, &res))
485 return -ENODEV;
486 lpbfifo.regs_phys = res.start;
487 lpbfifo.regs = of_iomap(op->dev.of_node, 0);
488 if (!lpbfifo.regs)
489 return -ENOMEM;
490
491 spin_lock_init(&lpbfifo.lock);
492
493 /* Put FIFO into reset */
494 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
495
496 /* Register the interrupt handler */
497 rc = request_irq(lpbfifo.irq, mpc52xx_lpbfifo_irq, 0,
498 "mpc52xx-lpbfifo", &lpbfifo);
499 if (rc)
500 goto err_irq;
501
502 /* Request the Bestcomm receive (fifo --> memory) task and IRQ */
503 lpbfifo.bcom_rx_task =
504 bcom_gen_bd_rx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
505 BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC,
506 16*1024*1024);
507 if (!lpbfifo.bcom_rx_task)
508 goto err_bcom_rx;
509
510 rc = request_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task),
511 mpc52xx_lpbfifo_bcom_irq, 0,
512 "mpc52xx-lpbfifo-rx", &lpbfifo);
513 if (rc)
514 goto err_bcom_rx_irq;
515
516 lpbfifo.dma_irqs_enabled = 1;
517
518 /* Request the Bestcomm transmit (memory --> fifo) task and IRQ */
519 lpbfifo.bcom_tx_task =
520 bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
521 BCOM_INITIATOR_SCLPC, BCOM_IPR_SCLPC);
522 if (!lpbfifo.bcom_tx_task)
523 goto err_bcom_tx;
524
525 lpbfifo.dev = &op->dev;
526 return 0;
527
528 err_bcom_tx:
529 free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
530 err_bcom_rx_irq:
531 bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
532 err_bcom_rx:
533 err_irq:
534 iounmap(lpbfifo.regs);
535 lpbfifo.regs = NULL;
536
537 dev_err(&op->dev, "mpc52xx_lpbfifo_probe() failed\n");
538 return -ENODEV;
539 }
540
541
542 static int mpc52xx_lpbfifo_remove(struct platform_device *op)
543 {
544 if (lpbfifo.dev != &op->dev)
545 return 0;
546
547 /* Put FIFO in reset */
548 out_be32(lpbfifo.regs + LPBFIFO_REG_ENABLE, 0x01010000);
549
550 /* Release the bestcomm transmit task */
551 free_irq(bcom_get_task_irq(lpbfifo.bcom_tx_task), &lpbfifo);
552 bcom_gen_bd_tx_release(lpbfifo.bcom_tx_task);
553
554 /* Release the bestcomm receive task */
555 free_irq(bcom_get_task_irq(lpbfifo.bcom_rx_task), &lpbfifo);
556 bcom_gen_bd_rx_release(lpbfifo.bcom_rx_task);
557
558 free_irq(lpbfifo.irq, &lpbfifo);
559 iounmap(lpbfifo.regs);
560 lpbfifo.regs = NULL;
561 lpbfifo.dev = NULL;
562
563 return 0;
564 }
565
566 static const struct of_device_id mpc52xx_lpbfifo_match[] = {
567 { .compatible = "fsl,mpc5200-lpbfifo", },
568 {},
569 };
570 MODULE_DEVICE_TABLE(of, mpc52xx_lpbfifo_match);
571
572 static struct platform_driver mpc52xx_lpbfifo_driver = {
573 .driver = {
574 .name = "mpc52xx-lpbfifo",
575 .of_match_table = mpc52xx_lpbfifo_match,
576 },
577 .probe = mpc52xx_lpbfifo_probe,
578 .remove = mpc52xx_lpbfifo_remove,
579 };
580 module_platform_driver(mpc52xx_lpbfifo_driver);