]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - sim/bfin/dv-bfin_dma.c
Update years in copyright notice for the GDB files.
[thirdparty/binutils-gdb.git] / sim / bfin / dv-bfin_dma.c
CommitLineData
ef016f83
MF
1/* Blackfin Direct Memory Access (DMA) Channel model.
2
8acc9f48 3 Copyright (C) 2010-2013 Free Software Foundation, Inc.
ef016f83
MF
4 Contributed by Analog Devices, Inc.
5
6 This file is part of simulators.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22
23#include "sim-main.h"
24#include "devices.h"
25#include "hw-device.h"
26#include "dv-bfin_dma.h"
27#include "dv-bfin_dmac.h"
28
29/* Note: This DMA implementation requires the producer to be the master when
30 the peer is MDMA. The source is always a slave. This way we don't
31 have the two DMA devices thrashing each other with one trying to
32 write and the other trying to read. */
33
34struct bfin_dma
35{
36 /* This top portion matches common dv_bfin struct. */
37 bu32 base;
38 struct hw *dma_master;
39 bool acked;
40
41 struct hw_event *handler;
42 unsigned ele_size;
43 struct hw *hw_peer;
44
45 /* Order after here is important -- matches hardware MMR layout. */
46 union {
47 struct { bu16 ndpl, ndph; };
48 bu32 next_desc_ptr;
49 };
50 union {
51 struct { bu16 sal, sah; };
52 bu32 start_addr;
53 };
54 bu16 BFIN_MMR_16 (config);
55 bu32 _pad0;
56 bu16 BFIN_MMR_16 (x_count);
57 bs16 BFIN_MMR_16 (x_modify);
58 bu16 BFIN_MMR_16 (y_count);
59 bs16 BFIN_MMR_16 (y_modify);
60 bu32 curr_desc_ptr, curr_addr;
61 bu16 BFIN_MMR_16 (irq_status);
62 bu16 BFIN_MMR_16 (peripheral_map);
63 bu16 BFIN_MMR_16 (curr_x_count);
64 bu32 _pad1;
65 bu16 BFIN_MMR_16 (curr_y_count);
66 bu32 _pad2;
67};
68#define mmr_base() offsetof(struct bfin_dma, next_desc_ptr)
69#define mmr_offset(mmr) (offsetof(struct bfin_dma, mmr) - mmr_base())
70
990d19fd
MF
71static const char * const mmr_names[] =
72{
ef016f83
MF
73 "NEXT_DESC_PTR", "START_ADDR", "CONFIG", "<INV>", "X_COUNT", "X_MODIFY",
74 "Y_COUNT", "Y_MODIFY", "CURR_DESC_PTR", "CURR_ADDR", "IRQ_STATUS",
75 "PERIPHERAL_MAP", "CURR_X_COUNT", "<INV>", "CURR_Y_COUNT", "<INV>",
76};
77#define mmr_name(off) mmr_names[(off) / 4]
78
79static bool
80bfin_dma_enabled (struct bfin_dma *dma)
81{
82 return (dma->config & DMAEN);
83}
84
85static bool
86bfin_dma_running (struct bfin_dma *dma)
87{
88 return (dma->irq_status & DMA_RUN);
89}
90
91static struct hw *
92bfin_dma_get_peer (struct hw *me, struct bfin_dma *dma)
93{
94 if (dma->hw_peer)
95 return dma->hw_peer;
96 return dma->hw_peer = bfin_dmac_get_peer (me, dma->peripheral_map);
97}
98
99static void
100bfin_dma_process_desc (struct hw *me, struct bfin_dma *dma)
101{
102 bu8 ndsize = (dma->config & NDSIZE) >> NDSIZE_SHIFT;
103 bu16 _flows[9], *flows = _flows;
104
105 HW_TRACE ((me, "dma starting up %#x", dma->config));
106
107 switch (dma->config & WDSIZE)
108 {
109 case WDSIZE_32:
110 dma->ele_size = 4;
111 break;
112 case WDSIZE_16:
113 dma->ele_size = 2;
114 break;
115 default:
116 dma->ele_size = 1;
117 break;
118 }
119
120 /* Address has to be mutiple of transfer size. */
121 if (dma->start_addr & (dma->ele_size - 1))
122 dma->irq_status |= DMA_ERR;
123
124 if (dma->ele_size != (unsigned) abs (dma->x_modify))
125 hw_abort (me, "DMA config (striding) %#x not supported (x_modify: %d)",
126 dma->config, dma->x_modify);
127
128 switch (dma->config & DMAFLOW)
129 {
130 case DMAFLOW_AUTO:
131 case DMAFLOW_STOP:
132 if (ndsize)
133 hw_abort (me, "DMA config error: DMAFLOW_{AUTO,STOP} requires NDSIZE_0");
134 break;
135 case DMAFLOW_ARRAY:
136 if (ndsize == 0 || ndsize > 7)
137 hw_abort (me, "DMA config error: DMAFLOW_ARRAY requires NDSIZE 1...7");
138 sim_read (hw_system (me), dma->curr_desc_ptr, (void *)flows, ndsize * 2);
139 break;
140 case DMAFLOW_SMALL:
141 if (ndsize == 0 || ndsize > 8)
142 hw_abort (me, "DMA config error: DMAFLOW_SMALL requires NDSIZE 1...8");
143 sim_read (hw_system (me), dma->next_desc_ptr, (void *)flows, ndsize * 2);
144 break;
145 case DMAFLOW_LARGE:
146 if (ndsize == 0 || ndsize > 9)
147 hw_abort (me, "DMA config error: DMAFLOW_LARGE requires NDSIZE 1...9");
148 sim_read (hw_system (me), dma->next_desc_ptr, (void *)flows, ndsize * 2);
149 break;
150 default:
151 hw_abort (me, "DMA config error: invalid DMAFLOW %#x", dma->config);
152 }
153
154 if (ndsize)
155 {
156 bu8 idx;
157 bu16 *stores[] = {
158 &dma->sal,
159 &dma->sah,
160 &dma->config,
161 &dma->x_count,
162 (void *) &dma->x_modify,
163 &dma->y_count,
164 (void *) &dma->y_modify,
165 };
166
167 switch (dma->config & DMAFLOW)
168 {
169 case DMAFLOW_LARGE:
170 dma->ndph = _flows[1];
171 --ndsize;
172 ++flows;
173 case DMAFLOW_SMALL:
174 dma->ndpl = _flows[0];
175 --ndsize;
176 ++flows;
177 break;
178 }
179
180 for (idx = 0; idx < ndsize; ++idx)
181 *stores[idx] = flows[idx];
182 }
183
184 dma->curr_desc_ptr = dma->next_desc_ptr;
185 dma->curr_addr = dma->start_addr;
186 dma->curr_x_count = dma->x_count ? : 0xffff;
187 dma->curr_y_count = dma->y_count ? : 0xffff;
188}
189
190static int
191bfin_dma_finish_x (struct hw *me, struct bfin_dma *dma)
192{
193 /* XXX: This would be the time to process the next descriptor. */
194 /* XXX: Should this toggle Enable in dma->config ? */
195
196 if (dma->config & DI_EN)
197 hw_port_event (me, 0, 1);
198
199 if ((dma->config & DMA2D) && dma->curr_y_count > 1)
200 {
201 dma->curr_y_count -= 1;
202 dma->curr_x_count = dma->x_count;
203
204 /* With 2D, last X transfer does not modify curr_addr. */
205 dma->curr_addr = dma->curr_addr - dma->x_modify + dma->y_modify;
206
207 return 1;
208 }
209
210 switch (dma->config & DMAFLOW)
211 {
212 case DMAFLOW_STOP:
213 HW_TRACE ((me, "dma is complete"));
214 dma->irq_status = (dma->irq_status & ~DMA_RUN) | DMA_DONE;
215 return 0;
216 default:
217 bfin_dma_process_desc (me, dma);
218 return 1;
219 }
220}
221
222static void bfin_dma_hw_event_callback (struct hw *, void *);
223
224static void
225bfin_dma_reschedule (struct hw *me, unsigned delay)
226{
227 struct bfin_dma *dma = hw_data (me);
228 if (dma->handler)
229 {
230 hw_event_queue_deschedule (me, dma->handler);
231 dma->handler = NULL;
232 }
233 if (!delay)
234 return;
235 HW_TRACE ((me, "scheduling next process in %u", delay));
236 dma->handler = hw_event_queue_schedule (me, delay,
237 bfin_dma_hw_event_callback, dma);
238}
239
240/* Chew through the DMA over and over. */
241static void
242bfin_dma_hw_event_callback (struct hw *me, void *data)
243{
244 struct bfin_dma *dma = data;
245 struct hw *peer;
246 struct dv_bfin *bfin_peer;
247 bu8 buf[4096];
248 unsigned ret, nr_bytes, ele_count;
249
250 dma->handler = NULL;
251 peer = bfin_dma_get_peer (me, dma);
252 bfin_peer = hw_data (peer);
253 ret = 0;
254 if (dma->x_modify < 0)
255 /* XXX: This sucks performance wise. */
256 nr_bytes = dma->ele_size;
257 else
258 nr_bytes = MIN (sizeof (buf), dma->curr_x_count * dma->ele_size);
259
260 /* Pumping a chunk! */
261 bfin_peer->dma_master = me;
262 bfin_peer->acked = false;
263 if (dma->config & WNR)
264 {
265 HW_TRACE ((me, "dma transfer to 0x%08lx length %u",
266 (unsigned long) dma->curr_addr, nr_bytes));
267
268 ret = hw_dma_read_buffer (peer, buf, 0, dma->curr_addr, nr_bytes);
269 /* Has the DMA stalled ? abort for now. */
270 if (ret == 0)
271 goto reschedule;
272 /* XXX: How to handle partial DMA transfers ? */
273 if (ret % dma->ele_size)
274 goto error;
275 ret = sim_write (hw_system (me), dma->curr_addr, buf, ret);
276 }
277 else
278 {
279 HW_TRACE ((me, "dma transfer from 0x%08lx length %u",
280 (unsigned long) dma->curr_addr, nr_bytes));
281
282 ret = sim_read (hw_system (me), dma->curr_addr, buf, nr_bytes);
283 if (ret == 0)
284 goto reschedule;
285 /* XXX: How to handle partial DMA transfers ? */
286 if (ret % dma->ele_size)
287 goto error;
288 ret = hw_dma_write_buffer (peer, buf, 0, dma->curr_addr, ret, 0);
289 if (ret == 0)
290 goto reschedule;
291 }
292
293 /* Ignore partial writes. */
294 ele_count = ret / dma->ele_size;
295 dma->curr_addr += ele_count * dma->x_modify;
296 dma->curr_x_count -= ele_count;
297
298 if ((!dma->acked && dma->curr_x_count) || bfin_dma_finish_x (me, dma))
299 /* Still got work to do, so schedule again. */
300 reschedule:
301 bfin_dma_reschedule (me, ret ? 1 : 5000);
302
303 return;
304
305 error:
306 /* Don't reschedule on errors ... */
307 dma->irq_status |= DMA_ERR;
308}
309
310static unsigned
311bfin_dma_io_write_buffer (struct hw *me, const void *source, int space,
312 address_word addr, unsigned nr_bytes)
313{
314 struct bfin_dma *dma = hw_data (me);
315 bu32 mmr_off;
316 bu32 value;
317 bu16 *value16p;
318 bu32 *value32p;
319 void *valuep;
320
321 if (nr_bytes == 4)
322 value = dv_load_4 (source);
323 else
324 value = dv_load_2 (source);
325
326 mmr_off = addr % dma->base;
327 valuep = (void *)((unsigned long)dma + mmr_base() + mmr_off);
328 value16p = valuep;
329 value32p = valuep;
330
331 HW_TRACE_WRITE ();
332
333 /* XXX: All registers are RO when DMA is enabled (except IRQ_STATUS).
334 But does the HW discard writes or send up IVGHW ? The sim
335 simply discards atm ... */
336 switch (mmr_off)
337 {
338 case mmr_offset(next_desc_ptr):
339 case mmr_offset(start_addr):
340 case mmr_offset(curr_desc_ptr):
341 case mmr_offset(curr_addr):
342 /* Don't require 32bit access as all DMA MMRs can be used as 16bit. */
343 if (!bfin_dma_running (dma))
344 {
345 if (nr_bytes == 4)
346 *value32p = value;
347 else
36f3e981 348 *value16p = value;
ef016f83
MF
349 }
350 else
351 HW_TRACE ((me, "discarding write while dma running"));
352 break;
353 case mmr_offset(x_count):
354 case mmr_offset(x_modify):
355 case mmr_offset(y_count):
356 case mmr_offset(y_modify):
357 if (!bfin_dma_running (dma))
358 *value16p = value;
359 break;
360 case mmr_offset(peripheral_map):
361 if (!bfin_dma_running (dma))
362 {
363 *value16p = (*value16p & CTYPE) | (value & ~CTYPE);
364 /* Clear peripheral peer so it gets looked up again. */
365 dma->hw_peer = NULL;
366 }
367 else
368 HW_TRACE ((me, "discarding write while dma running"));
369 break;
370 case mmr_offset(config):
371 /* XXX: How to handle updating CONFIG of a running channel ? */
372 if (nr_bytes == 4)
373 *value32p = value;
374 else
375 *value16p = value;
376
377 if (bfin_dma_enabled (dma))
378 {
379 dma->irq_status |= DMA_RUN;
380 bfin_dma_process_desc (me, dma);
381 /* The writer is the master. */
382 if (!(dma->peripheral_map & CTYPE) || (dma->config & WNR))
383 bfin_dma_reschedule (me, 1);
384 }
385 else
386 {
387 dma->irq_status &= ~DMA_RUN;
388 bfin_dma_reschedule (me, 0);
389 }
390 break;
391 case mmr_offset(irq_status):
392 dv_w1c_2 (value16p, value, DMA_DONE | DMA_ERR);
393 break;
394 case mmr_offset(curr_x_count):
395 case mmr_offset(curr_y_count):
396 if (!bfin_dma_running (dma))
397 *value16p = value;
398 else
399 HW_TRACE ((me, "discarding write while dma running"));
400 break;
401 default:
402 /* XXX: The HW lets the pad regions be read/written ... */
403 dv_bfin_mmr_invalid (me, addr, nr_bytes, true);
404 break;
405 }
406
407 return nr_bytes;
408}
409
410static unsigned
411bfin_dma_io_read_buffer (struct hw *me, void *dest, int space,
412 address_word addr, unsigned nr_bytes)
413{
414 struct bfin_dma *dma = hw_data (me);
415 bu32 mmr_off;
416 bu16 *value16p;
417 bu32 *value32p;
418 void *valuep;
419
420 mmr_off = addr % dma->base;
421 valuep = (void *)((unsigned long)dma + mmr_base() + mmr_off);
422 value16p = valuep;
423 value32p = valuep;
424
425 HW_TRACE_READ ();
426
427 /* Hardware lets you read all MMRs as 16 or 32 bits, even reserved. */
428 if (nr_bytes == 4)
429 dv_store_4 (dest, *value32p);
430 else
431 dv_store_2 (dest, *value16p);
432
433 return nr_bytes;
434}
435
436static unsigned
437bfin_dma_dma_read_buffer (struct hw *me, void *dest, int space,
438 unsigned_word addr, unsigned nr_bytes)
439{
440 struct bfin_dma *dma = hw_data (me);
441 unsigned ret, ele_count;
442
443 HW_TRACE_DMA_READ ();
444
445 /* If someone is trying to read from me, I have to be enabled. */
446 if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
447 return 0;
448
449 /* XXX: handle x_modify ... */
450 ret = sim_read (hw_system (me), dma->curr_addr, dest, nr_bytes);
451 /* Ignore partial writes. */
452 ele_count = ret / dma->ele_size;
453 /* Has the DMA stalled ? abort for now. */
454 if (!ele_count)
455 return 0;
456
457 dma->curr_addr += ele_count * dma->x_modify;
458 dma->curr_x_count -= ele_count;
459
460 if (dma->curr_x_count == 0)
461 bfin_dma_finish_x (me, dma);
462
463 return ret;
464}
465
466static unsigned
467bfin_dma_dma_write_buffer (struct hw *me, const void *source,
468 int space, unsigned_word addr,
469 unsigned nr_bytes,
470 int violate_read_only_section)
471{
472 struct bfin_dma *dma = hw_data (me);
473 unsigned ret, ele_count;
474
475 HW_TRACE_DMA_WRITE ();
476
477 /* If someone is trying to write to me, I have to be enabled. */
478 if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
479 return 0;
480
481 /* XXX: handle x_modify ... */
482 ret = sim_write (hw_system (me), dma->curr_addr, source, nr_bytes);
483 /* Ignore partial writes. */
484 ele_count = ret / dma->ele_size;
485 /* Has the DMA stalled ? abort for now. */
486 if (!ele_count)
487 return 0;
488
489 dma->curr_addr += ele_count * dma->x_modify;
490 dma->curr_x_count -= ele_count;
491
492 if (dma->curr_x_count == 0)
493 bfin_dma_finish_x (me, dma);
494
495 return ret;
496}
497
990d19fd
MF
498static const struct hw_port_descriptor bfin_dma_ports[] =
499{
ef016f83
MF
500 { "di", 0, 0, output_port, }, /* DMA Interrupt */
501 { NULL, 0, 0, 0, },
502};
503
504static void
505attach_bfin_dma_regs (struct hw *me, struct bfin_dma *dma)
506{
507 address_word attach_address;
508 int attach_space;
509 unsigned attach_size;
510 reg_property_spec reg;
511
512 if (hw_find_property (me, "reg") == NULL)
513 hw_abort (me, "Missing \"reg\" property");
514
515 if (!hw_find_reg_array_property (me, "reg", 0, &reg))
516 hw_abort (me, "\"reg\" property must contain three addr/size entries");
517
518 hw_unit_address_to_attach_address (hw_parent (me),
519 &reg.address,
520 &attach_space, &attach_address, me);
521 hw_unit_size_to_attach_size (hw_parent (me), &reg.size, &attach_size, me);
522
523 if (attach_size != BFIN_MMR_DMA_SIZE)
524 hw_abort (me, "\"reg\" size must be %#x", BFIN_MMR_DMA_SIZE);
525
526 hw_attach_address (hw_parent (me),
527 0, attach_space, attach_address, attach_size, me);
528
529 dma->base = attach_address;
530}
531
532static void
533bfin_dma_finish (struct hw *me)
534{
535 struct bfin_dma *dma;
536
537 dma = HW_ZALLOC (me, struct bfin_dma);
538
539 set_hw_data (me, dma);
540 set_hw_io_read_buffer (me, bfin_dma_io_read_buffer);
541 set_hw_io_write_buffer (me, bfin_dma_io_write_buffer);
542 set_hw_dma_read_buffer (me, bfin_dma_dma_read_buffer);
543 set_hw_dma_write_buffer (me, bfin_dma_dma_write_buffer);
544 set_hw_ports (me, bfin_dma_ports);
545
546 attach_bfin_dma_regs (me, dma);
547
548 /* Initialize the DMA Channel. */
549 dma->peripheral_map = bfin_dmac_default_pmap (me);
550}
551
81d126c3
MF
552const struct hw_descriptor dv_bfin_dma_descriptor[] =
553{
ef016f83
MF
554 {"bfin_dma", bfin_dma_finish,},
555 {NULL, NULL},
556};