]> git.ipfire.org Git - thirdparty/qemu.git/blame - hw/dma/pxa2xx_dma.c
sysbus: Convert to sysbus_realize() etc. with Coccinelle
[thirdparty/qemu.git] / hw / dma / pxa2xx_dma.c
CommitLineData
c1713132
AZ
1/*
2 * Intel XScale PXA255/270 DMA controller.
3 *
4 * Copyright (c) 2006 Openedhand Ltd.
5 * Copyright (c) 2006 Thorsten Zitterell
6 * Written by Andrzej Zaborowski <balrog@zabor.org>
7 *
8e31bf38 8 * This code is licensed under the GPL.
c1713132
AZ
9 */
10
8ef94f0b 11#include "qemu/osdep.h"
5a0001ec 12#include "qemu/log.h"
83c9f4ca 13#include "hw/hw.h"
64552b6b 14#include "hw/irq.h"
a27bd6c7 15#include "hw/qdev-properties.h"
0d09e41a 16#include "hw/arm/pxa.h"
83c9f4ca 17#include "hw/sysbus.h"
d6454270 18#include "migration/vmstate.h"
c9796d71 19#include "qapi/error.h"
0b8fa32f 20#include "qemu/module.h"
2115c019
AZ
21
22#define PXA255_DMA_NUM_CHANNELS 16
23#define PXA27X_DMA_NUM_CHANNELS 32
24
25#define PXA2XX_DMA_NUM_REQUESTS 75
c1713132 26
bc24a225 27typedef struct {
a10394e1
MI
28 uint32_t descr;
29 uint32_t src;
30 uint32_t dest;
c1713132
AZ
31 uint32_t cmd;
32 uint32_t state;
33 int request;
bc24a225 34} PXA2xxDMAChannel;
c1713132 35
358cd7ac
AF
36#define TYPE_PXA2XX_DMA "pxa2xx-dma"
37#define PXA2XX_DMA(obj) OBJECT_CHECK(PXA2xxDMAState, (obj), TYPE_PXA2XX_DMA)
38
2115c019 39typedef struct PXA2xxDMAState {
358cd7ac
AF
40 SysBusDevice parent_obj;
41
00049a12 42 MemoryRegion iomem;
c1713132
AZ
43 qemu_irq irq;
44
45 uint32_t stopintr;
46 uint32_t eorintr;
47 uint32_t rasintr;
48 uint32_t startintr;
49 uint32_t endintr;
50
51 uint32_t align;
52 uint32_t pio;
53
54 int channels;
bc24a225 55 PXA2xxDMAChannel *chan;
c1713132 56
2115c019 57 uint8_t req[PXA2XX_DMA_NUM_REQUESTS];
c1713132
AZ
58
59 /* Flag to avoid recursive DMA invocations. */
60 int running;
2115c019 61} PXA2xxDMAState;
c1713132
AZ
62
63#define DCSR0 0x0000 /* DMA Control / Status register for Channel 0 */
64#define DCSR31 0x007c /* DMA Control / Status register for Channel 31 */
65#define DALGN 0x00a0 /* DMA Alignment register */
66#define DPCSR 0x00a4 /* DMA Programmed I/O Control Status register */
67#define DRQSR0 0x00e0 /* DMA DREQ<0> Status register */
68#define DRQSR1 0x00e4 /* DMA DREQ<1> Status register */
69#define DRQSR2 0x00e8 /* DMA DREQ<2> Status register */
70#define DINT 0x00f0 /* DMA Interrupt register */
71#define DRCMR0 0x0100 /* Request to Channel Map register 0 */
72#define DRCMR63 0x01fc /* Request to Channel Map register 63 */
73#define D_CH0 0x0200 /* Channel 0 Descriptor start */
74#define DRCMR64 0x1100 /* Request to Channel Map register 64 */
75#define DRCMR74 0x1128 /* Request to Channel Map register 74 */
76
77/* Per-channel register */
78#define DDADR 0x00
79#define DSADR 0x01
80#define DTADR 0x02
81#define DCMD 0x03
82
83/* Bit-field masks */
84#define DRCMR_CHLNUM 0x1f
85#define DRCMR_MAPVLD (1 << 7)
86#define DDADR_STOP (1 << 0)
87#define DDADR_BREN (1 << 1)
88#define DCMD_LEN 0x1fff
89#define DCMD_WIDTH(x) (1 << ((((x) >> 14) & 3) - 1))
90#define DCMD_SIZE(x) (4 << (((x) >> 16) & 3))
91#define DCMD_FLYBYT (1 << 19)
92#define DCMD_FLYBYS (1 << 20)
93#define DCMD_ENDIRQEN (1 << 21)
94#define DCMD_STARTIRQEN (1 << 22)
95#define DCMD_CMPEN (1 << 25)
96#define DCMD_FLOWTRG (1 << 28)
97#define DCMD_FLOWSRC (1 << 29)
98#define DCMD_INCTRGADDR (1 << 30)
99#define DCMD_INCSRCADDR (1 << 31)
100#define DCSR_BUSERRINTR (1 << 0)
101#define DCSR_STARTINTR (1 << 1)
102#define DCSR_ENDINTR (1 << 2)
103#define DCSR_STOPINTR (1 << 3)
104#define DCSR_RASINTR (1 << 4)
105#define DCSR_REQPEND (1 << 8)
106#define DCSR_EORINT (1 << 9)
107#define DCSR_CMPST (1 << 10)
108#define DCSR_MASKRUN (1 << 22)
109#define DCSR_RASIRQEN (1 << 23)
110#define DCSR_CLRCMPST (1 << 24)
111#define DCSR_SETCMPST (1 << 25)
112#define DCSR_EORSTOPEN (1 << 26)
113#define DCSR_EORJMPEN (1 << 27)
114#define DCSR_EORIRQEN (1 << 28)
115#define DCSR_STOPIRQEN (1 << 29)
116#define DCSR_NODESCFETCH (1 << 30)
117#define DCSR_RUN (1 << 31)
118
bc24a225 119static inline void pxa2xx_dma_update(PXA2xxDMAState *s, int ch)
c1713132
AZ
120{
121 if (ch >= 0) {
122 if ((s->chan[ch].state & DCSR_STOPIRQEN) &&
123 (s->chan[ch].state & DCSR_STOPINTR))
124 s->stopintr |= 1 << ch;
125 else
126 s->stopintr &= ~(1 << ch);
127
128 if ((s->chan[ch].state & DCSR_EORIRQEN) &&
129 (s->chan[ch].state & DCSR_EORINT))
130 s->eorintr |= 1 << ch;
131 else
132 s->eorintr &= ~(1 << ch);
133
134 if ((s->chan[ch].state & DCSR_RASIRQEN) &&
135 (s->chan[ch].state & DCSR_RASINTR))
136 s->rasintr |= 1 << ch;
137 else
138 s->rasintr &= ~(1 << ch);
139
140 if (s->chan[ch].state & DCSR_STARTINTR)
141 s->startintr |= 1 << ch;
142 else
143 s->startintr &= ~(1 << ch);
144
145 if (s->chan[ch].state & DCSR_ENDINTR)
146 s->endintr |= 1 << ch;
147 else
148 s->endintr &= ~(1 << ch);
149 }
150
151 if (s->stopintr | s->eorintr | s->rasintr | s->startintr | s->endintr)
152 qemu_irq_raise(s->irq);
153 else
154 qemu_irq_lower(s->irq);
155}
156
157static inline void pxa2xx_dma_descriptor_fetch(
bc24a225 158 PXA2xxDMAState *s, int ch)
c1713132
AZ
159{
160 uint32_t desc[4];
a8170e5e 161 hwaddr daddr = s->chan[ch].descr & ~0xf;
c1713132
AZ
162 if ((s->chan[ch].descr & DDADR_BREN) && (s->chan[ch].state & DCSR_CMPST))
163 daddr += 32;
164
e1fe50dc 165 cpu_physical_memory_read(daddr, desc, 16);
c1713132
AZ
166 s->chan[ch].descr = desc[DDADR];
167 s->chan[ch].src = desc[DSADR];
168 s->chan[ch].dest = desc[DTADR];
169 s->chan[ch].cmd = desc[DCMD];
170
171 if (s->chan[ch].cmd & DCMD_FLOWSRC)
172 s->chan[ch].src &= ~3;
173 if (s->chan[ch].cmd & DCMD_FLOWTRG)
174 s->chan[ch].dest &= ~3;
175
176 if (s->chan[ch].cmd & (DCMD_CMPEN | DCMD_FLYBYS | DCMD_FLYBYT))
a89f364a 177 printf("%s: unsupported mode in channel %i\n", __func__, ch);
c1713132
AZ
178
179 if (s->chan[ch].cmd & DCMD_STARTIRQEN)
180 s->chan[ch].state |= DCSR_STARTINTR;
181}
182
bc24a225 183static void pxa2xx_dma_run(PXA2xxDMAState *s)
c1713132
AZ
184{
185 int c, srcinc, destinc;
186 uint32_t n, size;
187 uint32_t width;
188 uint32_t length;
b55266b5 189 uint8_t buffer[32];
bc24a225 190 PXA2xxDMAChannel *ch;
c1713132
AZ
191
192 if (s->running ++)
193 return;
194
195 while (s->running) {
196 s->running = 1;
197 for (c = 0; c < s->channels; c ++) {
198 ch = &s->chan[c];
199
200 while ((ch->state & DCSR_RUN) && !(ch->state & DCSR_STOPINTR)) {
201 /* Test for pending requests */
202 if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) && !ch->request)
203 break;
204
205 length = ch->cmd & DCMD_LEN;
206 size = DCMD_SIZE(ch->cmd);
207 width = DCMD_WIDTH(ch->cmd);
208
209 srcinc = (ch->cmd & DCMD_INCSRCADDR) ? width : 0;
210 destinc = (ch->cmd & DCMD_INCTRGADDR) ? width : 0;
211
212 while (length) {
213 size = MIN(length, size);
214
215 for (n = 0; n < size; n += width) {
216 cpu_physical_memory_read(ch->src, buffer + n, width);
217 ch->src += srcinc;
218 }
219
220 for (n = 0; n < size; n += width) {
221 cpu_physical_memory_write(ch->dest, buffer + n, width);
222 ch->dest += destinc;
223 }
224
225 length -= size;
226
227 if ((ch->cmd & (DCMD_FLOWSRC | DCMD_FLOWTRG)) &&
228 !ch->request) {
229 ch->state |= DCSR_EORINT;
230 if (ch->state & DCSR_EORSTOPEN)
231 ch->state |= DCSR_STOPINTR;
232 if ((ch->state & DCSR_EORJMPEN) &&
233 !(ch->state & DCSR_NODESCFETCH))
234 pxa2xx_dma_descriptor_fetch(s, c);
235 break;
7d37435b 236 }
c1713132
AZ
237 }
238
239 ch->cmd = (ch->cmd & ~DCMD_LEN) | length;
240
241 /* Is the transfer complete now? */
242 if (!length) {
243 if (ch->cmd & DCMD_ENDIRQEN)
244 ch->state |= DCSR_ENDINTR;
245
246 if ((ch->state & DCSR_NODESCFETCH) ||
247 (ch->descr & DDADR_STOP) ||
248 (ch->state & DCSR_EORSTOPEN)) {
249 ch->state |= DCSR_STOPINTR;
250 ch->state &= ~DCSR_RUN;
251
252 break;
253 }
254
255 ch->state |= DCSR_STOPINTR;
256 break;
257 }
258 }
259 }
260
261 s->running --;
262 }
263}
264
a8170e5e 265static uint64_t pxa2xx_dma_read(void *opaque, hwaddr offset,
00049a12 266 unsigned size)
c1713132 267{
bc24a225 268 PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
c1713132 269 unsigned int channel;
c1713132 270
00049a12 271 if (size != 4) {
5a0001ec
PMD
272 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n",
273 __func__, size);
00049a12
AK
274 return 5;
275 }
276
c1713132
AZ
277 switch (offset) {
278 case DRCMR64 ... DRCMR74:
279 offset -= DRCMR64 - DRCMR0 - (64 << 2);
280 /* Fall through */
281 case DRCMR0 ... DRCMR63:
282 channel = (offset - DRCMR0) >> 2;
283 return s->req[channel];
284
285 case DRQSR0:
286 case DRQSR1:
287 case DRQSR2:
288 return 0;
289
290 case DCSR0 ... DCSR31:
291 channel = offset >> 2;
7d37435b 292 if (s->chan[channel].request)
c1713132
AZ
293 return s->chan[channel].state | DCSR_REQPEND;
294 return s->chan[channel].state;
295
296 case DINT:
297 return s->stopintr | s->eorintr | s->rasintr |
298 s->startintr | s->endintr;
299
300 case DALGN:
301 return s->align;
302
303 case DPCSR:
304 return s->pio;
305 }
306
307 if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
308 channel = (offset - D_CH0) >> 4;
309 switch ((offset & 0x0f) >> 2) {
310 case DDADR:
311 return s->chan[channel].descr;
312 case DSADR:
313 return s->chan[channel].src;
314 case DTADR:
315 return s->chan[channel].dest;
316 case DCMD:
317 return s->chan[channel].cmd;
318 }
319 }
5a0001ec
PMD
320 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
321 __func__, offset);
c1713132
AZ
322 return 7;
323}
324
a8170e5e 325static void pxa2xx_dma_write(void *opaque, hwaddr offset,
00049a12 326 uint64_t value, unsigned size)
c1713132 327{
bc24a225 328 PXA2xxDMAState *s = (PXA2xxDMAState *) opaque;
c1713132 329 unsigned int channel;
c1713132 330
00049a12 331 if (size != 4) {
5a0001ec
PMD
332 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad access width %u\n",
333 __func__, size);
00049a12
AK
334 return;
335 }
336
c1713132
AZ
337 switch (offset) {
338 case DRCMR64 ... DRCMR74:
339 offset -= DRCMR64 - DRCMR0 - (64 << 2);
340 /* Fall through */
341 case DRCMR0 ... DRCMR63:
342 channel = (offset - DRCMR0) >> 2;
343
344 if (value & DRCMR_MAPVLD)
345 if ((value & DRCMR_CHLNUM) > s->channels)
2ac71179 346 hw_error("%s: Bad DMA channel %i\n",
a89f364a 347 __func__, (unsigned)value & DRCMR_CHLNUM);
c1713132
AZ
348
349 s->req[channel] = value;
350 break;
351
352 case DRQSR0:
353 case DRQSR1:
354 case DRQSR2:
355 /* Nothing to do */
356 break;
357
358 case DCSR0 ... DCSR31:
359 channel = offset >> 2;
360 s->chan[channel].state &= 0x0000071f & ~(value &
361 (DCSR_EORINT | DCSR_ENDINTR |
362 DCSR_STARTINTR | DCSR_BUSERRINTR));
363 s->chan[channel].state |= value & 0xfc800000;
364
365 if (s->chan[channel].state & DCSR_STOPIRQEN)
366 s->chan[channel].state &= ~DCSR_STOPINTR;
367
368 if (value & DCSR_NODESCFETCH) {
369 /* No-descriptor-fetch mode */
e1dad5a6
AZ
370 if (value & DCSR_RUN) {
371 s->chan[channel].state &= ~DCSR_STOPINTR;
c1713132 372 pxa2xx_dma_run(s);
e1dad5a6 373 }
c1713132
AZ
374 } else {
375 /* Descriptor-fetch mode */
376 if (value & DCSR_RUN) {
377 s->chan[channel].state &= ~DCSR_STOPINTR;
378 pxa2xx_dma_descriptor_fetch(s, channel);
379 pxa2xx_dma_run(s);
380 }
381 }
382
383 /* Shouldn't matter as our DMA is synchronous. */
384 if (!(value & (DCSR_RUN | DCSR_MASKRUN)))
385 s->chan[channel].state |= DCSR_STOPINTR;
386
387 if (value & DCSR_CLRCMPST)
388 s->chan[channel].state &= ~DCSR_CMPST;
389 if (value & DCSR_SETCMPST)
390 s->chan[channel].state |= DCSR_CMPST;
391
392 pxa2xx_dma_update(s, channel);
393 break;
394
395 case DALGN:
396 s->align = value;
397 break;
398
399 case DPCSR:
400 s->pio = value & 0x80000001;
401 break;
402
403 default:
404 if (offset >= D_CH0 && offset < D_CH0 + (s->channels << 4)) {
405 channel = (offset - D_CH0) >> 4;
406 switch ((offset & 0x0f) >> 2) {
407 case DDADR:
408 s->chan[channel].descr = value;
409 break;
410 case DSADR:
411 s->chan[channel].src = value;
412 break;
413 case DTADR:
414 s->chan[channel].dest = value;
415 break;
416 case DCMD:
417 s->chan[channel].cmd = value;
418 break;
419 default:
420 goto fail;
421 }
422
423 break;
424 }
425 fail:
5a0001ec
PMD
426 qemu_log_mask(LOG_GUEST_ERROR, "%s: Bad offset 0x%" HWADDR_PRIX "\n",
427 __func__, offset);
c1713132
AZ
428 }
429}
430
00049a12
AK
431static const MemoryRegionOps pxa2xx_dma_ops = {
432 .read = pxa2xx_dma_read,
433 .write = pxa2xx_dma_write,
434 .endianness = DEVICE_NATIVE_ENDIAN,
c1713132
AZ
435};
436
f114c826
AZ
437static void pxa2xx_dma_request(void *opaque, int req_num, int on)
438{
439 PXA2xxDMAState *s = opaque;
440 int ch;
441 if (req_num < 0 || req_num >= PXA2XX_DMA_NUM_REQUESTS)
a89f364a 442 hw_error("%s: Bad DMA request %i\n", __func__, req_num);
f114c826
AZ
443
444 if (!(s->req[req_num] & DRCMR_MAPVLD))
445 return;
446 ch = s->req[req_num] & DRCMR_CHLNUM;
447
448 if (!s->chan[ch].request && on)
449 s->chan[ch].state |= DCSR_RASINTR;
450 else
451 s->chan[ch].state &= ~DCSR_RASINTR;
452 if (s->chan[ch].request && !on)
453 s->chan[ch].state |= DCSR_EORINT;
454
455 s->chan[ch].request = on;
456 if (on) {
457 pxa2xx_dma_run(s);
458 pxa2xx_dma_update(s, ch);
459 }
460}
aa941b94 461
c9796d71
XZ
462static void pxa2xx_dma_init(Object *obj)
463{
464 DeviceState *dev = DEVICE(obj);
465 PXA2xxDMAState *s = PXA2XX_DMA(obj);
466 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
467
468 memset(s->req, 0, sizeof(uint8_t) * PXA2XX_DMA_NUM_REQUESTS);
469
470 qdev_init_gpio_in(dev, pxa2xx_dma_request, PXA2XX_DMA_NUM_REQUESTS);
471
472 memory_region_init_io(&s->iomem, obj, &pxa2xx_dma_ops, s,
473 "pxa2xx.dma", 0x00010000);
474 sysbus_init_mmio(sbd, &s->iomem);
475 sysbus_init_irq(sbd, &s->irq);
476}
477
478static void pxa2xx_dma_realize(DeviceState *dev, Error **errp)
c1713132 479{
358cd7ac 480 PXA2xxDMAState *s = PXA2XX_DMA(dev);
00049a12 481 int i;
2115c019
AZ
482
483 if (s->channels <= 0) {
c9796d71
XZ
484 error_setg(errp, "channels value invalid");
485 return;
2115c019 486 }
c1713132 487
1a13b272 488 s->chan = g_new0(PXA2xxDMAChannel, s->channels);
c1713132 489
c1713132
AZ
490 for (i = 0; i < s->channels; i ++)
491 s->chan[i].state = DCSR_STOPINTR;
c1713132
AZ
492}
493
a8170e5e 494DeviceState *pxa27x_dma_init(hwaddr base, qemu_irq irq)
c1713132 495{
2115c019
AZ
496 DeviceState *dev;
497
3e80f690 498 dev = qdev_new("pxa2xx-dma");
2115c019 499 qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
3c6ef471 500 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
2115c019 501
1356b98d
AF
502 sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
503 sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
2115c019
AZ
504
505 return dev;
c1713132
AZ
506}
507
a8170e5e 508DeviceState *pxa255_dma_init(hwaddr base, qemu_irq irq)
c1713132 509{
2115c019
AZ
510 DeviceState *dev;
511
3e80f690 512 dev = qdev_new("pxa2xx-dma");
2115c019 513 qdev_prop_set_int32(dev, "channels", PXA27X_DMA_NUM_CHANNELS);
3c6ef471 514 sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
2115c019 515
1356b98d
AF
516 sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, base);
517 sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq);
2115c019
AZ
518
519 return dev;
c1713132
AZ
520}
521
2115c019
AZ
522static bool is_version_0(void *opaque, int version_id)
523{
524 return version_id == 0;
525}
526
527static VMStateDescription vmstate_pxa2xx_dma_chan = {
528 .name = "pxa2xx_dma_chan",
529 .version_id = 1,
530 .minimum_version_id = 1,
2115c019 531 .fields = (VMStateField[]) {
a10394e1
MI
532 VMSTATE_UINT32(descr, PXA2xxDMAChannel),
533 VMSTATE_UINT32(src, PXA2xxDMAChannel),
534 VMSTATE_UINT32(dest, PXA2xxDMAChannel),
2115c019
AZ
535 VMSTATE_UINT32(cmd, PXA2xxDMAChannel),
536 VMSTATE_UINT32(state, PXA2xxDMAChannel),
537 VMSTATE_INT32(request, PXA2xxDMAChannel),
538 VMSTATE_END_OF_LIST(),
539 },
540};
541
542static VMStateDescription vmstate_pxa2xx_dma = {
543 .name = "pxa2xx_dma",
544 .version_id = 1,
545 .minimum_version_id = 0,
2115c019
AZ
546 .fields = (VMStateField[]) {
547 VMSTATE_UNUSED_TEST(is_version_0, 4),
548 VMSTATE_UINT32(stopintr, PXA2xxDMAState),
549 VMSTATE_UINT32(eorintr, PXA2xxDMAState),
550 VMSTATE_UINT32(rasintr, PXA2xxDMAState),
551 VMSTATE_UINT32(startintr, PXA2xxDMAState),
552 VMSTATE_UINT32(endintr, PXA2xxDMAState),
553 VMSTATE_UINT32(align, PXA2xxDMAState),
554 VMSTATE_UINT32(pio, PXA2xxDMAState),
555 VMSTATE_BUFFER(req, PXA2xxDMAState),
556 VMSTATE_STRUCT_VARRAY_POINTER_INT32(chan, PXA2xxDMAState, channels,
557 vmstate_pxa2xx_dma_chan, PXA2xxDMAChannel),
558 VMSTATE_END_OF_LIST(),
559 },
560};
561
999e12bb
AL
562static Property pxa2xx_dma_properties[] = {
563 DEFINE_PROP_INT32("channels", PXA2xxDMAState, channels, -1),
564 DEFINE_PROP_END_OF_LIST(),
565};
566
567static void pxa2xx_dma_class_init(ObjectClass *klass, void *data)
568{
39bffca2 569 DeviceClass *dc = DEVICE_CLASS(klass);
999e12bb 570
39bffca2
AL
571 dc->desc = "PXA2xx DMA controller";
572 dc->vmsd = &vmstate_pxa2xx_dma;
4f67d30b 573 device_class_set_props(dc, pxa2xx_dma_properties);
c9796d71 574 dc->realize = pxa2xx_dma_realize;
999e12bb
AL
575}
576
8c43a6f0 577static const TypeInfo pxa2xx_dma_info = {
358cd7ac 578 .name = TYPE_PXA2XX_DMA,
39bffca2
AL
579 .parent = TYPE_SYS_BUS_DEVICE,
580 .instance_size = sizeof(PXA2xxDMAState),
c9796d71 581 .instance_init = pxa2xx_dma_init,
39bffca2 582 .class_init = pxa2xx_dma_class_init,
2115c019
AZ
583};
584
83f7d43a 585static void pxa2xx_dma_register_types(void)
2115c019 586{
39bffca2 587 type_register_static(&pxa2xx_dma_info);
2115c019 588}
83f7d43a
AF
589
590type_init(pxa2xx_dma_register_types)