]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - sim/bfin/dv-bfin_dma.c
Update copyright year range in header of all files managed by GDB
[thirdparty/binutils-gdb.git] / sim / bfin / dv-bfin_dma.c
1 /* Blackfin Direct Memory Access (DMA) Channel model.
2
3 Copyright (C) 2010-2024 Free Software Foundation, Inc.
4 Contributed by Analog Devices, Inc.
5
6 This file is part of simulators.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 /* This must come before any other includes. */
22 #include "defs.h"
23
24 #include <stdlib.h>
25
26 #include "sim-main.h"
27 #include "devices.h"
28 #include "hw-device.h"
29 #include "dv-bfin_dma.h"
30 #include "dv-bfin_dmac.h"
31
32 /* Note: This DMA implementation requires the producer to be the master when
33 the peer is MDMA. The source is always a slave. This way we don't
34 have the two DMA devices thrashing each other with one trying to
35 write and the other trying to read. */
36
37 struct bfin_dma
38 {
39 /* This top portion matches common dv_bfin struct. */
40 bu32 base;
41 struct hw *dma_master;
42 bool acked;
43
44 struct hw_event *handler;
45 unsigned ele_size;
46 struct hw *hw_peer;
47
48 /* Order after here is important -- matches hardware MMR layout. */
49 union {
50 struct { bu16 ndpl, ndph; };
51 bu32 next_desc_ptr;
52 };
53 union {
54 struct { bu16 sal, sah; };
55 bu32 start_addr;
56 };
57 bu16 BFIN_MMR_16 (config);
58 bu32 _pad0;
59 bu16 BFIN_MMR_16 (x_count);
60 bs16 BFIN_MMR_16 (x_modify);
61 bu16 BFIN_MMR_16 (y_count);
62 bs16 BFIN_MMR_16 (y_modify);
63 bu32 curr_desc_ptr, curr_addr;
64 bu16 BFIN_MMR_16 (irq_status);
65 bu16 BFIN_MMR_16 (peripheral_map);
66 bu16 BFIN_MMR_16 (curr_x_count);
67 bu32 _pad1;
68 bu16 BFIN_MMR_16 (curr_y_count);
69 bu32 _pad2;
70 };
71 #define mmr_base() offsetof(struct bfin_dma, next_desc_ptr)
72 #define mmr_offset(mmr) (offsetof(struct bfin_dma, mmr) - mmr_base())
73
74 static const char * const mmr_names[] =
75 {
76 "NEXT_DESC_PTR", "START_ADDR", "CONFIG", "<INV>", "X_COUNT", "X_MODIFY",
77 "Y_COUNT", "Y_MODIFY", "CURR_DESC_PTR", "CURR_ADDR", "IRQ_STATUS",
78 "PERIPHERAL_MAP", "CURR_X_COUNT", "<INV>", "CURR_Y_COUNT", "<INV>",
79 };
80 #define mmr_name(off) mmr_names[(off) / 4]
81
82 static bool
83 bfin_dma_enabled (struct bfin_dma *dma)
84 {
85 return (dma->config & DMAEN);
86 }
87
88 static bool
89 bfin_dma_running (struct bfin_dma *dma)
90 {
91 return (dma->irq_status & DMA_RUN);
92 }
93
94 static struct hw *
95 bfin_dma_get_peer (struct hw *me, struct bfin_dma *dma)
96 {
97 if (dma->hw_peer)
98 return dma->hw_peer;
99 return dma->hw_peer = bfin_dmac_get_peer (me, dma->peripheral_map);
100 }
101
102 static void
103 bfin_dma_process_desc (struct hw *me, struct bfin_dma *dma)
104 {
105 bu8 ndsize = (dma->config & NDSIZE) >> NDSIZE_SHIFT;
106 bu16 _flows[9], *flows = _flows;
107
108 HW_TRACE ((me, "dma starting up %#x", dma->config));
109
110 switch (dma->config & WDSIZE)
111 {
112 case WDSIZE_32:
113 dma->ele_size = 4;
114 break;
115 case WDSIZE_16:
116 dma->ele_size = 2;
117 break;
118 default:
119 dma->ele_size = 1;
120 break;
121 }
122
123 /* Address has to be mutiple of transfer size. */
124 if (dma->start_addr & (dma->ele_size - 1))
125 dma->irq_status |= DMA_ERR;
126
127 if (dma->ele_size != (unsigned) abs (dma->x_modify))
128 hw_abort (me, "DMA config (striding) %#x not supported (x_modify: %d)",
129 dma->config, dma->x_modify);
130
131 switch (dma->config & DMAFLOW)
132 {
133 case DMAFLOW_AUTO:
134 case DMAFLOW_STOP:
135 if (ndsize)
136 hw_abort (me, "DMA config error: DMAFLOW_{AUTO,STOP} requires NDSIZE_0");
137 break;
138 case DMAFLOW_ARRAY:
139 if (ndsize == 0 || ndsize > 7)
140 hw_abort (me, "DMA config error: DMAFLOW_ARRAY requires NDSIZE 1...7");
141 sim_read (hw_system (me), dma->curr_desc_ptr, flows, ndsize * 2);
142 break;
143 case DMAFLOW_SMALL:
144 if (ndsize == 0 || ndsize > 8)
145 hw_abort (me, "DMA config error: DMAFLOW_SMALL requires NDSIZE 1...8");
146 sim_read (hw_system (me), dma->next_desc_ptr, flows, ndsize * 2);
147 break;
148 case DMAFLOW_LARGE:
149 if (ndsize == 0 || ndsize > 9)
150 hw_abort (me, "DMA config error: DMAFLOW_LARGE requires NDSIZE 1...9");
151 sim_read (hw_system (me), dma->next_desc_ptr, flows, ndsize * 2);
152 break;
153 default:
154 hw_abort (me, "DMA config error: invalid DMAFLOW %#x", dma->config);
155 }
156
157 if (ndsize)
158 {
159 bu8 idx;
160 bu16 *stores[] = {
161 &dma->sal,
162 &dma->sah,
163 &dma->config,
164 &dma->x_count,
165 (void *) &dma->x_modify,
166 &dma->y_count,
167 (void *) &dma->y_modify,
168 };
169
170 switch (dma->config & DMAFLOW)
171 {
172 case DMAFLOW_LARGE:
173 dma->ndph = _flows[1];
174 --ndsize;
175 ++flows;
176 ATTRIBUTE_FALLTHROUGH;
177 case DMAFLOW_SMALL:
178 dma->ndpl = _flows[0];
179 --ndsize;
180 ++flows;
181 break;
182 }
183
184 for (idx = 0; idx < ndsize; ++idx)
185 *stores[idx] = flows[idx];
186 }
187
188 dma->curr_desc_ptr = dma->next_desc_ptr;
189 dma->curr_addr = dma->start_addr;
190 dma->curr_x_count = dma->x_count ? : 0xffff;
191 dma->curr_y_count = dma->y_count ? : 0xffff;
192 }
193
194 static int
195 bfin_dma_finish_x (struct hw *me, struct bfin_dma *dma)
196 {
197 /* XXX: This would be the time to process the next descriptor. */
198 /* XXX: Should this toggle Enable in dma->config ? */
199
200 if (dma->config & DI_EN)
201 hw_port_event (me, 0, 1);
202
203 if ((dma->config & DMA2D) && dma->curr_y_count > 1)
204 {
205 dma->curr_y_count -= 1;
206 dma->curr_x_count = dma->x_count;
207
208 /* With 2D, last X transfer does not modify curr_addr. */
209 dma->curr_addr = dma->curr_addr - dma->x_modify + dma->y_modify;
210
211 return 1;
212 }
213
214 switch (dma->config & DMAFLOW)
215 {
216 case DMAFLOW_STOP:
217 HW_TRACE ((me, "dma is complete"));
218 dma->irq_status = (dma->irq_status & ~DMA_RUN) | DMA_DONE;
219 return 0;
220 default:
221 bfin_dma_process_desc (me, dma);
222 return 1;
223 }
224 }
225
226 static void bfin_dma_hw_event_callback (struct hw *, void *);
227
228 static void
229 bfin_dma_reschedule (struct hw *me, unsigned delay)
230 {
231 struct bfin_dma *dma = hw_data (me);
232 if (dma->handler)
233 {
234 hw_event_queue_deschedule (me, dma->handler);
235 dma->handler = NULL;
236 }
237 if (!delay)
238 return;
239 HW_TRACE ((me, "scheduling next process in %u", delay));
240 dma->handler = hw_event_queue_schedule (me, delay,
241 bfin_dma_hw_event_callback, dma);
242 }
243
244 /* Chew through the DMA over and over. */
245 static void
246 bfin_dma_hw_event_callback (struct hw *me, void *data)
247 {
248 struct bfin_dma *dma = data;
249 struct hw *peer;
250 struct dv_bfin *bfin_peer;
251 bu8 buf[4096];
252 unsigned ret, nr_bytes, ele_count;
253
254 dma->handler = NULL;
255 peer = bfin_dma_get_peer (me, dma);
256 bfin_peer = hw_data (peer);
257 ret = 0;
258 if (dma->x_modify < 0)
259 /* XXX: This sucks performance wise. */
260 nr_bytes = dma->ele_size;
261 else
262 nr_bytes = min (sizeof (buf), dma->curr_x_count * dma->ele_size);
263
264 /* Pumping a chunk! */
265 bfin_peer->dma_master = me;
266 bfin_peer->acked = false;
267 if (dma->config & WNR)
268 {
269 HW_TRACE ((me, "dma transfer to 0x%08lx length %u",
270 (unsigned long) dma->curr_addr, nr_bytes));
271
272 ret = hw_dma_read_buffer (peer, buf, 0, dma->curr_addr, nr_bytes);
273 /* Has the DMA stalled ? abort for now. */
274 if (ret == 0)
275 goto reschedule;
276 /* XXX: How to handle partial DMA transfers ? */
277 if (ret % dma->ele_size)
278 goto error;
279 ret = sim_write (hw_system (me), dma->curr_addr, buf, ret);
280 }
281 else
282 {
283 HW_TRACE ((me, "dma transfer from 0x%08lx length %u",
284 (unsigned long) dma->curr_addr, nr_bytes));
285
286 ret = sim_read (hw_system (me), dma->curr_addr, buf, nr_bytes);
287 if (ret == 0)
288 goto reschedule;
289 /* XXX: How to handle partial DMA transfers ? */
290 if (ret % dma->ele_size)
291 goto error;
292 ret = hw_dma_write_buffer (peer, buf, 0, dma->curr_addr, ret, 0);
293 if (ret == 0)
294 goto reschedule;
295 }
296
297 /* Ignore partial writes. */
298 ele_count = ret / dma->ele_size;
299 dma->curr_addr += ele_count * dma->x_modify;
300 dma->curr_x_count -= ele_count;
301
302 if ((!dma->acked && dma->curr_x_count) || bfin_dma_finish_x (me, dma))
303 /* Still got work to do, so schedule again. */
304 reschedule:
305 bfin_dma_reschedule (me, ret ? 1 : 5000);
306
307 return;
308
309 error:
310 /* Don't reschedule on errors ... */
311 dma->irq_status |= DMA_ERR;
312 }
313
314 static unsigned
315 bfin_dma_io_write_buffer (struct hw *me, const void *source, int space,
316 address_word addr, unsigned nr_bytes)
317 {
318 struct bfin_dma *dma = hw_data (me);
319 bu32 mmr_off;
320 bu32 value;
321 bu16 *value16p;
322 bu32 *value32p;
323 void *valuep;
324
325 /* Invalid access mode is higher priority than missing register. */
326 if (!dv_bfin_mmr_require_16_32 (me, addr, nr_bytes, true))
327 return 0;
328
329 if (nr_bytes == 4)
330 value = dv_load_4 (source);
331 else
332 value = dv_load_2 (source);
333
334 mmr_off = addr % dma->base;
335 valuep = (void *)((uintptr_t)dma + mmr_base() + mmr_off);
336 value16p = valuep;
337 value32p = valuep;
338
339 HW_TRACE_WRITE ();
340
341 /* XXX: All registers are RO when DMA is enabled (except IRQ_STATUS).
342 But does the HW discard writes or send up IVGHW ? The sim
343 simply discards atm ... */
344 switch (mmr_off)
345 {
346 case mmr_offset(next_desc_ptr):
347 case mmr_offset(start_addr):
348 case mmr_offset(curr_desc_ptr):
349 case mmr_offset(curr_addr):
350 /* Don't require 32bit access as all DMA MMRs can be used as 16bit. */
351 if (!bfin_dma_running (dma))
352 {
353 if (nr_bytes == 4)
354 *value32p = value;
355 else
356 *value16p = value;
357 }
358 else
359 HW_TRACE ((me, "discarding write while dma running"));
360 break;
361 case mmr_offset(x_count):
362 case mmr_offset(x_modify):
363 case mmr_offset(y_count):
364 case mmr_offset(y_modify):
365 if (!bfin_dma_running (dma))
366 *value16p = value;
367 break;
368 case mmr_offset(peripheral_map):
369 if (!bfin_dma_running (dma))
370 {
371 *value16p = (*value16p & CTYPE) | (value & ~CTYPE);
372 /* Clear peripheral peer so it gets looked up again. */
373 dma->hw_peer = NULL;
374 }
375 else
376 HW_TRACE ((me, "discarding write while dma running"));
377 break;
378 case mmr_offset(config):
379 /* XXX: How to handle updating CONFIG of a running channel ? */
380 if (nr_bytes == 4)
381 *value32p = value;
382 else
383 *value16p = value;
384
385 if (bfin_dma_enabled (dma))
386 {
387 dma->irq_status |= DMA_RUN;
388 bfin_dma_process_desc (me, dma);
389 /* The writer is the master. */
390 if (!(dma->peripheral_map & CTYPE) || (dma->config & WNR))
391 bfin_dma_reschedule (me, 1);
392 }
393 else
394 {
395 dma->irq_status &= ~DMA_RUN;
396 bfin_dma_reschedule (me, 0);
397 }
398 break;
399 case mmr_offset(irq_status):
400 dv_w1c_2 (value16p, value, DMA_DONE | DMA_ERR);
401 break;
402 case mmr_offset(curr_x_count):
403 case mmr_offset(curr_y_count):
404 if (!bfin_dma_running (dma))
405 *value16p = value;
406 else
407 HW_TRACE ((me, "discarding write while dma running"));
408 break;
409 default:
410 /* XXX: The HW lets the pad regions be read/written ... */
411 dv_bfin_mmr_invalid (me, addr, nr_bytes, true);
412 return 0;
413 }
414
415 return nr_bytes;
416 }
417
418 static unsigned
419 bfin_dma_io_read_buffer (struct hw *me, void *dest, int space,
420 address_word addr, unsigned nr_bytes)
421 {
422 struct bfin_dma *dma = hw_data (me);
423 bu32 mmr_off;
424 bu16 *value16p;
425 bu32 *value32p;
426 void *valuep;
427
428 /* Invalid access mode is higher priority than missing register. */
429 if (!dv_bfin_mmr_require_16_32 (me, addr, nr_bytes, false))
430 return 0;
431
432 mmr_off = addr % dma->base;
433 valuep = (void *)((uintptr_t)dma + mmr_base() + mmr_off);
434 value16p = valuep;
435 value32p = valuep;
436
437 HW_TRACE_READ ();
438
439 /* Hardware lets you read all MMRs as 16 or 32 bits, even reserved. */
440 if (nr_bytes == 4)
441 dv_store_4 (dest, *value32p);
442 else
443 dv_store_2 (dest, *value16p);
444
445 return nr_bytes;
446 }
447
448 static unsigned
449 bfin_dma_dma_read_buffer (struct hw *me, void *dest, int space,
450 unsigned_word addr, unsigned nr_bytes)
451 {
452 struct bfin_dma *dma = hw_data (me);
453 unsigned ret, ele_count;
454
455 HW_TRACE_DMA_READ ();
456
457 /* If someone is trying to read from me, I have to be enabled. */
458 if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
459 return 0;
460
461 /* XXX: handle x_modify ... */
462 ret = sim_read (hw_system (me), dma->curr_addr, dest, nr_bytes);
463 /* Ignore partial writes. */
464 ele_count = ret / dma->ele_size;
465 /* Has the DMA stalled ? abort for now. */
466 if (!ele_count)
467 return 0;
468
469 dma->curr_addr += ele_count * dma->x_modify;
470 dma->curr_x_count -= ele_count;
471
472 if (dma->curr_x_count == 0)
473 bfin_dma_finish_x (me, dma);
474
475 return ret;
476 }
477
478 static unsigned
479 bfin_dma_dma_write_buffer (struct hw *me, const void *source,
480 int space, unsigned_word addr,
481 unsigned nr_bytes,
482 int violate_read_only_section)
483 {
484 struct bfin_dma *dma = hw_data (me);
485 unsigned ret, ele_count;
486
487 HW_TRACE_DMA_WRITE ();
488
489 /* If someone is trying to write to me, I have to be enabled. */
490 if (!bfin_dma_enabled (dma) && !bfin_dma_running (dma))
491 return 0;
492
493 /* XXX: handle x_modify ... */
494 ret = sim_write (hw_system (me), dma->curr_addr, source, nr_bytes);
495 /* Ignore partial writes. */
496 ele_count = ret / dma->ele_size;
497 /* Has the DMA stalled ? abort for now. */
498 if (!ele_count)
499 return 0;
500
501 dma->curr_addr += ele_count * dma->x_modify;
502 dma->curr_x_count -= ele_count;
503
504 if (dma->curr_x_count == 0)
505 bfin_dma_finish_x (me, dma);
506
507 return ret;
508 }
509
510 static const struct hw_port_descriptor bfin_dma_ports[] =
511 {
512 { "di", 0, 0, output_port, }, /* DMA Interrupt */
513 { NULL, 0, 0, 0, },
514 };
515
516 static void
517 attach_bfin_dma_regs (struct hw *me, struct bfin_dma *dma)
518 {
519 address_word attach_address;
520 int attach_space;
521 unsigned attach_size;
522 reg_property_spec reg;
523
524 if (hw_find_property (me, "reg") == NULL)
525 hw_abort (me, "Missing \"reg\" property");
526
527 if (!hw_find_reg_array_property (me, "reg", 0, &reg))
528 hw_abort (me, "\"reg\" property must contain three addr/size entries");
529
530 hw_unit_address_to_attach_address (hw_parent (me),
531 &reg.address,
532 &attach_space, &attach_address, me);
533 hw_unit_size_to_attach_size (hw_parent (me), &reg.size, &attach_size, me);
534
535 if (attach_size != BFIN_MMR_DMA_SIZE)
536 hw_abort (me, "\"reg\" size must be %#x", BFIN_MMR_DMA_SIZE);
537
538 hw_attach_address (hw_parent (me),
539 0, attach_space, attach_address, attach_size, me);
540
541 dma->base = attach_address;
542 }
543
544 static void
545 bfin_dma_finish (struct hw *me)
546 {
547 struct bfin_dma *dma;
548
549 dma = HW_ZALLOC (me, struct bfin_dma);
550
551 set_hw_data (me, dma);
552 set_hw_io_read_buffer (me, bfin_dma_io_read_buffer);
553 set_hw_io_write_buffer (me, bfin_dma_io_write_buffer);
554 set_hw_dma_read_buffer (me, bfin_dma_dma_read_buffer);
555 set_hw_dma_write_buffer (me, bfin_dma_dma_write_buffer);
556 set_hw_ports (me, bfin_dma_ports);
557
558 attach_bfin_dma_regs (me, dma);
559
560 /* Initialize the DMA Channel. */
561 dma->peripheral_map = bfin_dmac_default_pmap (me);
562 }
563
564 const struct hw_descriptor dv_bfin_dma_descriptor[] =
565 {
566 {"bfin_dma", bfin_dma_finish,},
567 {NULL, NULL},
568 };