]> git.ipfire.org Git - thirdparty/qemu.git/blob - hw/net/imx_fec.c
Include hw/irq.h a lot less
[thirdparty/qemu.git] / hw / net / imx_fec.c
1 /*
2 * i.MX Fast Ethernet Controller emulation.
3 *
4 * Copyright (c) 2013 Jean-Christophe Dubois. <jcd@tribudubois.net>
5 *
6 * Based on Coldfire Fast Ethernet Controller emulation.
7 *
8 * Copyright (c) 2007 CodeSourcery.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 */
23
24 #include "qemu/osdep.h"
25 #include "hw/irq.h"
26 #include "hw/net/imx_fec.h"
27 #include "sysemu/dma.h"
28 #include "qemu/log.h"
29 #include "qemu/module.h"
30 #include "net/checksum.h"
31 #include "net/eth.h"
32
33 /* For crc32 */
34 #include <zlib.h>
35
36 #ifndef DEBUG_IMX_FEC
37 #define DEBUG_IMX_FEC 0
38 #endif
39
40 #define FEC_PRINTF(fmt, args...) \
41 do { \
42 if (DEBUG_IMX_FEC) { \
43 fprintf(stderr, "[%s]%s: " fmt , TYPE_IMX_FEC, \
44 __func__, ##args); \
45 } \
46 } while (0)
47
48 #ifndef DEBUG_IMX_PHY
49 #define DEBUG_IMX_PHY 0
50 #endif
51
52 #define PHY_PRINTF(fmt, args...) \
53 do { \
54 if (DEBUG_IMX_PHY) { \
55 fprintf(stderr, "[%s.phy]%s: " fmt , TYPE_IMX_FEC, \
56 __func__, ##args); \
57 } \
58 } while (0)
59
60 #define IMX_MAX_DESC 1024
61
62 static const char *imx_default_reg_name(IMXFECState *s, uint32_t index)
63 {
64 static char tmp[20];
65 sprintf(tmp, "index %d", index);
66 return tmp;
67 }
68
69 static const char *imx_fec_reg_name(IMXFECState *s, uint32_t index)
70 {
71 switch (index) {
72 case ENET_FRBR:
73 return "FRBR";
74 case ENET_FRSR:
75 return "FRSR";
76 case ENET_MIIGSK_CFGR:
77 return "MIIGSK_CFGR";
78 case ENET_MIIGSK_ENR:
79 return "MIIGSK_ENR";
80 default:
81 return imx_default_reg_name(s, index);
82 }
83 }
84
85 static const char *imx_enet_reg_name(IMXFECState *s, uint32_t index)
86 {
87 switch (index) {
88 case ENET_RSFL:
89 return "RSFL";
90 case ENET_RSEM:
91 return "RSEM";
92 case ENET_RAEM:
93 return "RAEM";
94 case ENET_RAFL:
95 return "RAFL";
96 case ENET_TSEM:
97 return "TSEM";
98 case ENET_TAEM:
99 return "TAEM";
100 case ENET_TAFL:
101 return "TAFL";
102 case ENET_TIPG:
103 return "TIPG";
104 case ENET_FTRL:
105 return "FTRL";
106 case ENET_TACC:
107 return "TACC";
108 case ENET_RACC:
109 return "RACC";
110 case ENET_ATCR:
111 return "ATCR";
112 case ENET_ATVR:
113 return "ATVR";
114 case ENET_ATOFF:
115 return "ATOFF";
116 case ENET_ATPER:
117 return "ATPER";
118 case ENET_ATCOR:
119 return "ATCOR";
120 case ENET_ATINC:
121 return "ATINC";
122 case ENET_ATSTMP:
123 return "ATSTMP";
124 case ENET_TGSR:
125 return "TGSR";
126 case ENET_TCSR0:
127 return "TCSR0";
128 case ENET_TCCR0:
129 return "TCCR0";
130 case ENET_TCSR1:
131 return "TCSR1";
132 case ENET_TCCR1:
133 return "TCCR1";
134 case ENET_TCSR2:
135 return "TCSR2";
136 case ENET_TCCR2:
137 return "TCCR2";
138 case ENET_TCSR3:
139 return "TCSR3";
140 case ENET_TCCR3:
141 return "TCCR3";
142 default:
143 return imx_default_reg_name(s, index);
144 }
145 }
146
147 static const char *imx_eth_reg_name(IMXFECState *s, uint32_t index)
148 {
149 switch (index) {
150 case ENET_EIR:
151 return "EIR";
152 case ENET_EIMR:
153 return "EIMR";
154 case ENET_RDAR:
155 return "RDAR";
156 case ENET_TDAR:
157 return "TDAR";
158 case ENET_ECR:
159 return "ECR";
160 case ENET_MMFR:
161 return "MMFR";
162 case ENET_MSCR:
163 return "MSCR";
164 case ENET_MIBC:
165 return "MIBC";
166 case ENET_RCR:
167 return "RCR";
168 case ENET_TCR:
169 return "TCR";
170 case ENET_PALR:
171 return "PALR";
172 case ENET_PAUR:
173 return "PAUR";
174 case ENET_OPD:
175 return "OPD";
176 case ENET_IAUR:
177 return "IAUR";
178 case ENET_IALR:
179 return "IALR";
180 case ENET_GAUR:
181 return "GAUR";
182 case ENET_GALR:
183 return "GALR";
184 case ENET_TFWR:
185 return "TFWR";
186 case ENET_RDSR:
187 return "RDSR";
188 case ENET_TDSR:
189 return "TDSR";
190 case ENET_MRBR:
191 return "MRBR";
192 default:
193 if (s->is_fec) {
194 return imx_fec_reg_name(s, index);
195 } else {
196 return imx_enet_reg_name(s, index);
197 }
198 }
199 }
200
201 /*
202 * Versions of this device with more than one TX descriptor save the
203 * 2nd and 3rd descriptors in a subsection, to maintain migration
204 * compatibility with previous versions of the device that only
205 * supported a single descriptor.
206 */
207 static bool imx_eth_is_multi_tx_ring(void *opaque)
208 {
209 IMXFECState *s = IMX_FEC(opaque);
210
211 return s->tx_ring_num > 1;
212 }
213
214 static const VMStateDescription vmstate_imx_eth_txdescs = {
215 .name = "imx.fec/txdescs",
216 .version_id = 1,
217 .minimum_version_id = 1,
218 .needed = imx_eth_is_multi_tx_ring,
219 .fields = (VMStateField[]) {
220 VMSTATE_UINT32(tx_descriptor[1], IMXFECState),
221 VMSTATE_UINT32(tx_descriptor[2], IMXFECState),
222 VMSTATE_END_OF_LIST()
223 }
224 };
225
226 static const VMStateDescription vmstate_imx_eth = {
227 .name = TYPE_IMX_FEC,
228 .version_id = 2,
229 .minimum_version_id = 2,
230 .fields = (VMStateField[]) {
231 VMSTATE_UINT32_ARRAY(regs, IMXFECState, ENET_MAX),
232 VMSTATE_UINT32(rx_descriptor, IMXFECState),
233 VMSTATE_UINT32(tx_descriptor[0], IMXFECState),
234 VMSTATE_UINT32(phy_status, IMXFECState),
235 VMSTATE_UINT32(phy_control, IMXFECState),
236 VMSTATE_UINT32(phy_advertise, IMXFECState),
237 VMSTATE_UINT32(phy_int, IMXFECState),
238 VMSTATE_UINT32(phy_int_mask, IMXFECState),
239 VMSTATE_END_OF_LIST()
240 },
241 .subsections = (const VMStateDescription * []) {
242 &vmstate_imx_eth_txdescs,
243 NULL
244 },
245 };
246
247 #define PHY_INT_ENERGYON (1 << 7)
248 #define PHY_INT_AUTONEG_COMPLETE (1 << 6)
249 #define PHY_INT_FAULT (1 << 5)
250 #define PHY_INT_DOWN (1 << 4)
251 #define PHY_INT_AUTONEG_LP (1 << 3)
252 #define PHY_INT_PARFAULT (1 << 2)
253 #define PHY_INT_AUTONEG_PAGE (1 << 1)
254
255 static void imx_eth_update(IMXFECState *s);
256
257 /*
258 * The MII phy could raise a GPIO to the processor which in turn
259 * could be handled as an interrpt by the OS.
260 * For now we don't handle any GPIO/interrupt line, so the OS will
261 * have to poll for the PHY status.
262 */
263 static void phy_update_irq(IMXFECState *s)
264 {
265 imx_eth_update(s);
266 }
267
268 static void phy_update_link(IMXFECState *s)
269 {
270 /* Autonegotiation status mirrors link status. */
271 if (qemu_get_queue(s->nic)->link_down) {
272 PHY_PRINTF("link is down\n");
273 s->phy_status &= ~0x0024;
274 s->phy_int |= PHY_INT_DOWN;
275 } else {
276 PHY_PRINTF("link is up\n");
277 s->phy_status |= 0x0024;
278 s->phy_int |= PHY_INT_ENERGYON;
279 s->phy_int |= PHY_INT_AUTONEG_COMPLETE;
280 }
281 phy_update_irq(s);
282 }
283
284 static void imx_eth_set_link(NetClientState *nc)
285 {
286 phy_update_link(IMX_FEC(qemu_get_nic_opaque(nc)));
287 }
288
289 static void phy_reset(IMXFECState *s)
290 {
291 s->phy_status = 0x7809;
292 s->phy_control = 0x3000;
293 s->phy_advertise = 0x01e1;
294 s->phy_int_mask = 0;
295 s->phy_int = 0;
296 phy_update_link(s);
297 }
298
299 static uint32_t do_phy_read(IMXFECState *s, int reg)
300 {
301 uint32_t val;
302
303 if (reg > 31) {
304 /* we only advertise one phy */
305 return 0;
306 }
307
308 switch (reg) {
309 case 0: /* Basic Control */
310 val = s->phy_control;
311 break;
312 case 1: /* Basic Status */
313 val = s->phy_status;
314 break;
315 case 2: /* ID1 */
316 val = 0x0007;
317 break;
318 case 3: /* ID2 */
319 val = 0xc0d1;
320 break;
321 case 4: /* Auto-neg advertisement */
322 val = s->phy_advertise;
323 break;
324 case 5: /* Auto-neg Link Partner Ability */
325 val = 0x0f71;
326 break;
327 case 6: /* Auto-neg Expansion */
328 val = 1;
329 break;
330 case 29: /* Interrupt source. */
331 val = s->phy_int;
332 s->phy_int = 0;
333 phy_update_irq(s);
334 break;
335 case 30: /* Interrupt mask */
336 val = s->phy_int_mask;
337 break;
338 case 17:
339 case 18:
340 case 27:
341 case 31:
342 qemu_log_mask(LOG_UNIMP, "[%s.phy]%s: reg %d not implemented\n",
343 TYPE_IMX_FEC, __func__, reg);
344 val = 0;
345 break;
346 default:
347 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
348 TYPE_IMX_FEC, __func__, reg);
349 val = 0;
350 break;
351 }
352
353 PHY_PRINTF("read 0x%04x @ %d\n", val, reg);
354
355 return val;
356 }
357
358 static void do_phy_write(IMXFECState *s, int reg, uint32_t val)
359 {
360 PHY_PRINTF("write 0x%04x @ %d\n", val, reg);
361
362 if (reg > 31) {
363 /* we only advertise one phy */
364 return;
365 }
366
367 switch (reg) {
368 case 0: /* Basic Control */
369 if (val & 0x8000) {
370 phy_reset(s);
371 } else {
372 s->phy_control = val & 0x7980;
373 /* Complete autonegotiation immediately. */
374 if (val & 0x1000) {
375 s->phy_status |= 0x0020;
376 }
377 }
378 break;
379 case 4: /* Auto-neg advertisement */
380 s->phy_advertise = (val & 0x2d7f) | 0x80;
381 break;
382 case 30: /* Interrupt mask */
383 s->phy_int_mask = val & 0xff;
384 phy_update_irq(s);
385 break;
386 case 17:
387 case 18:
388 case 27:
389 case 31:
390 qemu_log_mask(LOG_UNIMP, "[%s.phy)%s: reg %d not implemented\n",
391 TYPE_IMX_FEC, __func__, reg);
392 break;
393 default:
394 qemu_log_mask(LOG_GUEST_ERROR, "[%s.phy]%s: Bad address at offset %d\n",
395 TYPE_IMX_FEC, __func__, reg);
396 break;
397 }
398 }
399
400 static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr)
401 {
402 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
403 }
404
405 static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr)
406 {
407 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
408 }
409
410 static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr)
411 {
412 dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd));
413 }
414
415 static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr)
416 {
417 dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd));
418 }
419
420 static void imx_eth_update(IMXFECState *s)
421 {
422 /*
423 * Previous versions of qemu had the ENET_INT_MAC and ENET_INT_TS_TIMER
424 * interrupts swapped. This worked with older versions of Linux (4.14
425 * and older) since Linux associated both interrupt lines with Ethernet
426 * MAC interrupts. Specifically,
427 * - Linux 4.15 and later have separate interrupt handlers for the MAC and
428 * timer interrupts. Those versions of Linux fail with versions of QEMU
429 * with swapped interrupt assignments.
430 * - In linux 4.14, both interrupt lines were registered with the Ethernet
431 * MAC interrupt handler. As a result, all versions of qemu happen to
432 * work, though that is accidental.
433 * - In Linux 4.9 and older, the timer interrupt was registered directly
434 * with the Ethernet MAC interrupt handler. The MAC interrupt was
435 * redirected to a GPIO interrupt to work around erratum ERR006687.
436 * This was implemented using the SOC's IOMUX block. In qemu, this GPIO
437 * interrupt never fired since IOMUX is currently not supported in qemu.
438 * Linux instead received MAC interrupts on the timer interrupt.
439 * As a result, qemu versions with the swapped interrupt assignment work,
440 * albeit accidentally, but qemu versions with the correct interrupt
441 * assignment fail.
442 *
443 * To ensure that all versions of Linux work, generate ENET_INT_MAC
444 * interrrupts on both interrupt lines. This should be changed if and when
445 * qemu supports IOMUX.
446 */
447 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] &
448 (ENET_INT_MAC | ENET_INT_TS_TIMER)) {
449 qemu_set_irq(s->irq[1], 1);
450 } else {
451 qemu_set_irq(s->irq[1], 0);
452 }
453
454 if (s->regs[ENET_EIR] & s->regs[ENET_EIMR] & ENET_INT_MAC) {
455 qemu_set_irq(s->irq[0], 1);
456 } else {
457 qemu_set_irq(s->irq[0], 0);
458 }
459 }
460
461 static void imx_fec_do_tx(IMXFECState *s)
462 {
463 int frame_size = 0, descnt = 0;
464 uint8_t *ptr = s->frame;
465 uint32_t addr = s->tx_descriptor[0];
466
467 while (descnt++ < IMX_MAX_DESC) {
468 IMXFECBufDesc bd;
469 int len;
470
471 imx_fec_read_bd(&bd, addr);
472 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x\n",
473 addr, bd.flags, bd.length, bd.data);
474 if ((bd.flags & ENET_BD_R) == 0) {
475 /* Run out of descriptors to transmit. */
476 FEC_PRINTF("tx_bd ran out of descriptors to transmit\n");
477 break;
478 }
479 len = bd.length;
480 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
481 len = ENET_MAX_FRAME_SIZE - frame_size;
482 s->regs[ENET_EIR] |= ENET_INT_BABT;
483 }
484 dma_memory_read(&address_space_memory, bd.data, ptr, len);
485 ptr += len;
486 frame_size += len;
487 if (bd.flags & ENET_BD_L) {
488 /* Last buffer in frame. */
489 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
490 ptr = s->frame;
491 frame_size = 0;
492 s->regs[ENET_EIR] |= ENET_INT_TXF;
493 }
494 s->regs[ENET_EIR] |= ENET_INT_TXB;
495 bd.flags &= ~ENET_BD_R;
496 /* Write back the modified descriptor. */
497 imx_fec_write_bd(&bd, addr);
498 /* Advance to the next descriptor. */
499 if ((bd.flags & ENET_BD_W) != 0) {
500 addr = s->regs[ENET_TDSR];
501 } else {
502 addr += sizeof(bd);
503 }
504 }
505
506 s->tx_descriptor[0] = addr;
507
508 imx_eth_update(s);
509 }
510
511 static void imx_enet_do_tx(IMXFECState *s, uint32_t index)
512 {
513 int frame_size = 0, descnt = 0;
514
515 uint8_t *ptr = s->frame;
516 uint32_t addr, int_txb, int_txf, tdsr;
517 size_t ring;
518
519 switch (index) {
520 case ENET_TDAR:
521 ring = 0;
522 int_txb = ENET_INT_TXB;
523 int_txf = ENET_INT_TXF;
524 tdsr = ENET_TDSR;
525 break;
526 case ENET_TDAR1:
527 ring = 1;
528 int_txb = ENET_INT_TXB1;
529 int_txf = ENET_INT_TXF1;
530 tdsr = ENET_TDSR1;
531 break;
532 case ENET_TDAR2:
533 ring = 2;
534 int_txb = ENET_INT_TXB2;
535 int_txf = ENET_INT_TXF2;
536 tdsr = ENET_TDSR2;
537 break;
538 default:
539 qemu_log_mask(LOG_GUEST_ERROR,
540 "%s: bogus value for index %x\n",
541 __func__, index);
542 abort();
543 break;
544 }
545
546 addr = s->tx_descriptor[ring];
547
548 while (descnt++ < IMX_MAX_DESC) {
549 IMXENETBufDesc bd;
550 int len;
551
552 imx_enet_read_bd(&bd, addr);
553 FEC_PRINTF("tx_bd %x flags %04x len %d data %08x option %04x "
554 "status %04x\n", addr, bd.flags, bd.length, bd.data,
555 bd.option, bd.status);
556 if ((bd.flags & ENET_BD_R) == 0) {
557 /* Run out of descriptors to transmit. */
558 break;
559 }
560 len = bd.length;
561 if (frame_size + len > ENET_MAX_FRAME_SIZE) {
562 len = ENET_MAX_FRAME_SIZE - frame_size;
563 s->regs[ENET_EIR] |= ENET_INT_BABT;
564 }
565 dma_memory_read(&address_space_memory, bd.data, ptr, len);
566 ptr += len;
567 frame_size += len;
568 if (bd.flags & ENET_BD_L) {
569 if (bd.option & ENET_BD_PINS) {
570 struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame);
571 if (IP_HEADER_VERSION(ip_hd) == 4) {
572 net_checksum_calculate(s->frame, frame_size);
573 }
574 }
575 if (bd.option & ENET_BD_IINS) {
576 struct ip_header *ip_hd = PKT_GET_IP_HDR(s->frame);
577 /* We compute checksum only for IPv4 frames */
578 if (IP_HEADER_VERSION(ip_hd) == 4) {
579 uint16_t csum;
580 ip_hd->ip_sum = 0;
581 csum = net_raw_checksum((uint8_t *)ip_hd, sizeof(*ip_hd));
582 ip_hd->ip_sum = cpu_to_be16(csum);
583 }
584 }
585 /* Last buffer in frame. */
586
587 qemu_send_packet(qemu_get_queue(s->nic), s->frame, frame_size);
588 ptr = s->frame;
589
590 frame_size = 0;
591 if (bd.option & ENET_BD_TX_INT) {
592 s->regs[ENET_EIR] |= int_txf;
593 }
594 }
595 if (bd.option & ENET_BD_TX_INT) {
596 s->regs[ENET_EIR] |= int_txb;
597 }
598 bd.flags &= ~ENET_BD_R;
599 /* Write back the modified descriptor. */
600 imx_enet_write_bd(&bd, addr);
601 /* Advance to the next descriptor. */
602 if ((bd.flags & ENET_BD_W) != 0) {
603 addr = s->regs[tdsr];
604 } else {
605 addr += sizeof(bd);
606 }
607 }
608
609 s->tx_descriptor[ring] = addr;
610
611 imx_eth_update(s);
612 }
613
614 static void imx_eth_do_tx(IMXFECState *s, uint32_t index)
615 {
616 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
617 imx_enet_do_tx(s, index);
618 } else {
619 imx_fec_do_tx(s);
620 }
621 }
622
623 static void imx_eth_enable_rx(IMXFECState *s, bool flush)
624 {
625 IMXFECBufDesc bd;
626
627 imx_fec_read_bd(&bd, s->rx_descriptor);
628
629 s->regs[ENET_RDAR] = (bd.flags & ENET_BD_E) ? ENET_RDAR_RDAR : 0;
630
631 if (!s->regs[ENET_RDAR]) {
632 FEC_PRINTF("RX buffer full\n");
633 } else if (flush) {
634 qemu_flush_queued_packets(qemu_get_queue(s->nic));
635 }
636 }
637
638 static void imx_eth_reset(DeviceState *d)
639 {
640 IMXFECState *s = IMX_FEC(d);
641
642 /* Reset the Device */
643 memset(s->regs, 0, sizeof(s->regs));
644 s->regs[ENET_ECR] = 0xf0000000;
645 s->regs[ENET_MIBC] = 0xc0000000;
646 s->regs[ENET_RCR] = 0x05ee0001;
647 s->regs[ENET_OPD] = 0x00010000;
648
649 s->regs[ENET_PALR] = (s->conf.macaddr.a[0] << 24)
650 | (s->conf.macaddr.a[1] << 16)
651 | (s->conf.macaddr.a[2] << 8)
652 | s->conf.macaddr.a[3];
653 s->regs[ENET_PAUR] = (s->conf.macaddr.a[4] << 24)
654 | (s->conf.macaddr.a[5] << 16)
655 | 0x8808;
656
657 if (s->is_fec) {
658 s->regs[ENET_FRBR] = 0x00000600;
659 s->regs[ENET_FRSR] = 0x00000500;
660 s->regs[ENET_MIIGSK_ENR] = 0x00000006;
661 } else {
662 s->regs[ENET_RAEM] = 0x00000004;
663 s->regs[ENET_RAFL] = 0x00000004;
664 s->regs[ENET_TAEM] = 0x00000004;
665 s->regs[ENET_TAFL] = 0x00000008;
666 s->regs[ENET_TIPG] = 0x0000000c;
667 s->regs[ENET_FTRL] = 0x000007ff;
668 s->regs[ENET_ATPER] = 0x3b9aca00;
669 }
670
671 s->rx_descriptor = 0;
672 memset(s->tx_descriptor, 0, sizeof(s->tx_descriptor));
673
674 /* We also reset the PHY */
675 phy_reset(s);
676 }
677
678 static uint32_t imx_default_read(IMXFECState *s, uint32_t index)
679 {
680 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad register at offset 0x%"
681 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
682 return 0;
683 }
684
685 static uint32_t imx_fec_read(IMXFECState *s, uint32_t index)
686 {
687 switch (index) {
688 case ENET_FRBR:
689 case ENET_FRSR:
690 case ENET_MIIGSK_CFGR:
691 case ENET_MIIGSK_ENR:
692 return s->regs[index];
693 default:
694 return imx_default_read(s, index);
695 }
696 }
697
698 static uint32_t imx_enet_read(IMXFECState *s, uint32_t index)
699 {
700 switch (index) {
701 case ENET_RSFL:
702 case ENET_RSEM:
703 case ENET_RAEM:
704 case ENET_RAFL:
705 case ENET_TSEM:
706 case ENET_TAEM:
707 case ENET_TAFL:
708 case ENET_TIPG:
709 case ENET_FTRL:
710 case ENET_TACC:
711 case ENET_RACC:
712 case ENET_ATCR:
713 case ENET_ATVR:
714 case ENET_ATOFF:
715 case ENET_ATPER:
716 case ENET_ATCOR:
717 case ENET_ATINC:
718 case ENET_ATSTMP:
719 case ENET_TGSR:
720 case ENET_TCSR0:
721 case ENET_TCCR0:
722 case ENET_TCSR1:
723 case ENET_TCCR1:
724 case ENET_TCSR2:
725 case ENET_TCCR2:
726 case ENET_TCSR3:
727 case ENET_TCCR3:
728 return s->regs[index];
729 default:
730 return imx_default_read(s, index);
731 }
732 }
733
734 static uint64_t imx_eth_read(void *opaque, hwaddr offset, unsigned size)
735 {
736 uint32_t value = 0;
737 IMXFECState *s = IMX_FEC(opaque);
738 uint32_t index = offset >> 2;
739
740 switch (index) {
741 case ENET_EIR:
742 case ENET_EIMR:
743 case ENET_RDAR:
744 case ENET_TDAR:
745 case ENET_ECR:
746 case ENET_MMFR:
747 case ENET_MSCR:
748 case ENET_MIBC:
749 case ENET_RCR:
750 case ENET_TCR:
751 case ENET_PALR:
752 case ENET_PAUR:
753 case ENET_OPD:
754 case ENET_IAUR:
755 case ENET_IALR:
756 case ENET_GAUR:
757 case ENET_GALR:
758 case ENET_TFWR:
759 case ENET_RDSR:
760 case ENET_TDSR:
761 case ENET_MRBR:
762 value = s->regs[index];
763 break;
764 default:
765 if (s->is_fec) {
766 value = imx_fec_read(s, index);
767 } else {
768 value = imx_enet_read(s, index);
769 }
770 break;
771 }
772
773 FEC_PRINTF("reg[%s] => 0x%" PRIx32 "\n", imx_eth_reg_name(s, index),
774 value);
775
776 return value;
777 }
778
779 static void imx_default_write(IMXFECState *s, uint32_t index, uint32_t value)
780 {
781 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Bad address at offset 0x%"
782 PRIx32 "\n", TYPE_IMX_FEC, __func__, index * 4);
783 return;
784 }
785
786 static void imx_fec_write(IMXFECState *s, uint32_t index, uint32_t value)
787 {
788 switch (index) {
789 case ENET_FRBR:
790 /* FRBR is read only */
791 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register FRBR is read only\n",
792 TYPE_IMX_FEC, __func__);
793 break;
794 case ENET_FRSR:
795 s->regs[index] = (value & 0x000003fc) | 0x00000400;
796 break;
797 case ENET_MIIGSK_CFGR:
798 s->regs[index] = value & 0x00000053;
799 break;
800 case ENET_MIIGSK_ENR:
801 s->regs[index] = (value & 0x00000002) ? 0x00000006 : 0;
802 break;
803 default:
804 imx_default_write(s, index, value);
805 break;
806 }
807 }
808
809 static void imx_enet_write(IMXFECState *s, uint32_t index, uint32_t value)
810 {
811 switch (index) {
812 case ENET_RSFL:
813 case ENET_RSEM:
814 case ENET_RAEM:
815 case ENET_RAFL:
816 case ENET_TSEM:
817 case ENET_TAEM:
818 case ENET_TAFL:
819 s->regs[index] = value & 0x000001ff;
820 break;
821 case ENET_TIPG:
822 s->regs[index] = value & 0x0000001f;
823 break;
824 case ENET_FTRL:
825 s->regs[index] = value & 0x00003fff;
826 break;
827 case ENET_TACC:
828 s->regs[index] = value & 0x00000019;
829 break;
830 case ENET_RACC:
831 s->regs[index] = value & 0x000000C7;
832 break;
833 case ENET_ATCR:
834 s->regs[index] = value & 0x00002a9d;
835 break;
836 case ENET_ATVR:
837 case ENET_ATOFF:
838 case ENET_ATPER:
839 s->regs[index] = value;
840 break;
841 case ENET_ATSTMP:
842 /* ATSTMP is read only */
843 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Register ATSTMP is read only\n",
844 TYPE_IMX_FEC, __func__);
845 break;
846 case ENET_ATCOR:
847 s->regs[index] = value & 0x7fffffff;
848 break;
849 case ENET_ATINC:
850 s->regs[index] = value & 0x00007f7f;
851 break;
852 case ENET_TGSR:
853 /* implement clear timer flag */
854 value = value & 0x0000000f;
855 break;
856 case ENET_TCSR0:
857 case ENET_TCSR1:
858 case ENET_TCSR2:
859 case ENET_TCSR3:
860 value = value & 0x000000fd;
861 break;
862 case ENET_TCCR0:
863 case ENET_TCCR1:
864 case ENET_TCCR2:
865 case ENET_TCCR3:
866 s->regs[index] = value;
867 break;
868 default:
869 imx_default_write(s, index, value);
870 break;
871 }
872 }
873
874 static void imx_eth_write(void *opaque, hwaddr offset, uint64_t value,
875 unsigned size)
876 {
877 IMXFECState *s = IMX_FEC(opaque);
878 const bool single_tx_ring = !imx_eth_is_multi_tx_ring(s);
879 uint32_t index = offset >> 2;
880
881 FEC_PRINTF("reg[%s] <= 0x%" PRIx32 "\n", imx_eth_reg_name(s, index),
882 (uint32_t)value);
883
884 switch (index) {
885 case ENET_EIR:
886 s->regs[index] &= ~value;
887 break;
888 case ENET_EIMR:
889 s->regs[index] = value;
890 break;
891 case ENET_RDAR:
892 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
893 if (!s->regs[index]) {
894 imx_eth_enable_rx(s, true);
895 }
896 } else {
897 s->regs[index] = 0;
898 }
899 break;
900 case ENET_TDAR1: /* FALLTHROUGH */
901 case ENET_TDAR2: /* FALLTHROUGH */
902 if (unlikely(single_tx_ring)) {
903 qemu_log_mask(LOG_GUEST_ERROR,
904 "[%s]%s: trying to access TDAR2 or TDAR1\n",
905 TYPE_IMX_FEC, __func__);
906 return;
907 }
908 case ENET_TDAR: /* FALLTHROUGH */
909 if (s->regs[ENET_ECR] & ENET_ECR_ETHEREN) {
910 s->regs[index] = ENET_TDAR_TDAR;
911 imx_eth_do_tx(s, index);
912 }
913 s->regs[index] = 0;
914 break;
915 case ENET_ECR:
916 if (value & ENET_ECR_RESET) {
917 return imx_eth_reset(DEVICE(s));
918 }
919 s->regs[index] = value;
920 if ((s->regs[index] & ENET_ECR_ETHEREN) == 0) {
921 s->regs[ENET_RDAR] = 0;
922 s->rx_descriptor = s->regs[ENET_RDSR];
923 s->regs[ENET_TDAR] = 0;
924 s->regs[ENET_TDAR1] = 0;
925 s->regs[ENET_TDAR2] = 0;
926 s->tx_descriptor[0] = s->regs[ENET_TDSR];
927 s->tx_descriptor[1] = s->regs[ENET_TDSR1];
928 s->tx_descriptor[2] = s->regs[ENET_TDSR2];
929 }
930 break;
931 case ENET_MMFR:
932 s->regs[index] = value;
933 if (extract32(value, 29, 1)) {
934 /* This is a read operation */
935 s->regs[ENET_MMFR] = deposit32(s->regs[ENET_MMFR], 0, 16,
936 do_phy_read(s,
937 extract32(value,
938 18, 10)));
939 } else {
940 /* This a write operation */
941 do_phy_write(s, extract32(value, 18, 10), extract32(value, 0, 16));
942 }
943 /* raise the interrupt as the PHY operation is done */
944 s->regs[ENET_EIR] |= ENET_INT_MII;
945 break;
946 case ENET_MSCR:
947 s->regs[index] = value & 0xfe;
948 break;
949 case ENET_MIBC:
950 /* TODO: Implement MIB. */
951 s->regs[index] = (value & 0x80000000) ? 0xc0000000 : 0;
952 break;
953 case ENET_RCR:
954 s->regs[index] = value & 0x07ff003f;
955 /* TODO: Implement LOOP mode. */
956 break;
957 case ENET_TCR:
958 /* We transmit immediately, so raise GRA immediately. */
959 s->regs[index] = value;
960 if (value & 1) {
961 s->regs[ENET_EIR] |= ENET_INT_GRA;
962 }
963 break;
964 case ENET_PALR:
965 s->regs[index] = value;
966 s->conf.macaddr.a[0] = value >> 24;
967 s->conf.macaddr.a[1] = value >> 16;
968 s->conf.macaddr.a[2] = value >> 8;
969 s->conf.macaddr.a[3] = value;
970 break;
971 case ENET_PAUR:
972 s->regs[index] = (value | 0x0000ffff) & 0xffff8808;
973 s->conf.macaddr.a[4] = value >> 24;
974 s->conf.macaddr.a[5] = value >> 16;
975 break;
976 case ENET_OPD:
977 s->regs[index] = (value & 0x0000ffff) | 0x00010000;
978 break;
979 case ENET_IAUR:
980 case ENET_IALR:
981 case ENET_GAUR:
982 case ENET_GALR:
983 /* TODO: implement MAC hash filtering. */
984 break;
985 case ENET_TFWR:
986 if (s->is_fec) {
987 s->regs[index] = value & 0x3;
988 } else {
989 s->regs[index] = value & 0x13f;
990 }
991 break;
992 case ENET_RDSR:
993 if (s->is_fec) {
994 s->regs[index] = value & ~3;
995 } else {
996 s->regs[index] = value & ~7;
997 }
998 s->rx_descriptor = s->regs[index];
999 break;
1000 case ENET_TDSR:
1001 if (s->is_fec) {
1002 s->regs[index] = value & ~3;
1003 } else {
1004 s->regs[index] = value & ~7;
1005 }
1006 s->tx_descriptor[0] = s->regs[index];
1007 break;
1008 case ENET_TDSR1:
1009 if (unlikely(single_tx_ring)) {
1010 qemu_log_mask(LOG_GUEST_ERROR,
1011 "[%s]%s: trying to access TDSR1\n",
1012 TYPE_IMX_FEC, __func__);
1013 return;
1014 }
1015
1016 s->regs[index] = value & ~7;
1017 s->tx_descriptor[1] = s->regs[index];
1018 break;
1019 case ENET_TDSR2:
1020 if (unlikely(single_tx_ring)) {
1021 qemu_log_mask(LOG_GUEST_ERROR,
1022 "[%s]%s: trying to access TDSR2\n",
1023 TYPE_IMX_FEC, __func__);
1024 return;
1025 }
1026
1027 s->regs[index] = value & ~7;
1028 s->tx_descriptor[2] = s->regs[index];
1029 break;
1030 case ENET_MRBR:
1031 s->regs[index] = value & 0x00003ff0;
1032 break;
1033 default:
1034 if (s->is_fec) {
1035 imx_fec_write(s, index, value);
1036 } else {
1037 imx_enet_write(s, index, value);
1038 }
1039 return;
1040 }
1041
1042 imx_eth_update(s);
1043 }
1044
1045 static int imx_eth_can_receive(NetClientState *nc)
1046 {
1047 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1048
1049 FEC_PRINTF("\n");
1050
1051 return !!s->regs[ENET_RDAR];
1052 }
1053
1054 static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf,
1055 size_t len)
1056 {
1057 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1058 IMXFECBufDesc bd;
1059 uint32_t flags = 0;
1060 uint32_t addr;
1061 uint32_t crc;
1062 uint32_t buf_addr;
1063 uint8_t *crc_ptr;
1064 unsigned int buf_len;
1065 size_t size = len;
1066
1067 FEC_PRINTF("len %d\n", (int)size);
1068
1069 if (!s->regs[ENET_RDAR]) {
1070 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1071 TYPE_IMX_FEC, __func__);
1072 return 0;
1073 }
1074
1075 /* 4 bytes for the CRC. */
1076 size += 4;
1077 crc = cpu_to_be32(crc32(~0, buf, size));
1078 crc_ptr = (uint8_t *) &crc;
1079
1080 /* Huge frames are truncated. */
1081 if (size > ENET_MAX_FRAME_SIZE) {
1082 size = ENET_MAX_FRAME_SIZE;
1083 flags |= ENET_BD_TR | ENET_BD_LG;
1084 }
1085
1086 /* Frames larger than the user limit just set error flags. */
1087 if (size > (s->regs[ENET_RCR] >> 16)) {
1088 flags |= ENET_BD_LG;
1089 }
1090
1091 addr = s->rx_descriptor;
1092 while (size > 0) {
1093 imx_fec_read_bd(&bd, addr);
1094 if ((bd.flags & ENET_BD_E) == 0) {
1095 /* No descriptors available. Bail out. */
1096 /*
1097 * FIXME: This is wrong. We should probably either
1098 * save the remainder for when more RX buffers are
1099 * available, or flag an error.
1100 */
1101 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1102 TYPE_IMX_FEC, __func__);
1103 break;
1104 }
1105 buf_len = (size <= s->regs[ENET_MRBR]) ? size : s->regs[ENET_MRBR];
1106 bd.length = buf_len;
1107 size -= buf_len;
1108
1109 FEC_PRINTF("rx_bd 0x%x length %d\n", addr, bd.length);
1110
1111 /* The last 4 bytes are the CRC. */
1112 if (size < 4) {
1113 buf_len += size - 4;
1114 }
1115 buf_addr = bd.data;
1116 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
1117 buf += buf_len;
1118 if (size < 4) {
1119 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1120 crc_ptr, 4 - size);
1121 crc_ptr += 4 - size;
1122 }
1123 bd.flags &= ~ENET_BD_E;
1124 if (size == 0) {
1125 /* Last buffer in frame. */
1126 bd.flags |= flags | ENET_BD_L;
1127 FEC_PRINTF("rx frame flags %04x\n", bd.flags);
1128 s->regs[ENET_EIR] |= ENET_INT_RXF;
1129 } else {
1130 s->regs[ENET_EIR] |= ENET_INT_RXB;
1131 }
1132 imx_fec_write_bd(&bd, addr);
1133 /* Advance to the next descriptor. */
1134 if ((bd.flags & ENET_BD_W) != 0) {
1135 addr = s->regs[ENET_RDSR];
1136 } else {
1137 addr += sizeof(bd);
1138 }
1139 }
1140 s->rx_descriptor = addr;
1141 imx_eth_enable_rx(s, false);
1142 imx_eth_update(s);
1143 return len;
1144 }
1145
1146 static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf,
1147 size_t len)
1148 {
1149 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1150 IMXENETBufDesc bd;
1151 uint32_t flags = 0;
1152 uint32_t addr;
1153 uint32_t crc;
1154 uint32_t buf_addr;
1155 uint8_t *crc_ptr;
1156 unsigned int buf_len;
1157 size_t size = len;
1158 bool shift16 = s->regs[ENET_RACC] & ENET_RACC_SHIFT16;
1159
1160 FEC_PRINTF("len %d\n", (int)size);
1161
1162 if (!s->regs[ENET_RDAR]) {
1163 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Unexpected packet\n",
1164 TYPE_IMX_FEC, __func__);
1165 return 0;
1166 }
1167
1168 /* 4 bytes for the CRC. */
1169 size += 4;
1170 crc = cpu_to_be32(crc32(~0, buf, size));
1171 crc_ptr = (uint8_t *) &crc;
1172
1173 if (shift16) {
1174 size += 2;
1175 }
1176
1177 /* Huge frames are truncated. */
1178 if (size > s->regs[ENET_FTRL]) {
1179 size = s->regs[ENET_FTRL];
1180 flags |= ENET_BD_TR | ENET_BD_LG;
1181 }
1182
1183 /* Frames larger than the user limit just set error flags. */
1184 if (size > (s->regs[ENET_RCR] >> 16)) {
1185 flags |= ENET_BD_LG;
1186 }
1187
1188 addr = s->rx_descriptor;
1189 while (size > 0) {
1190 imx_enet_read_bd(&bd, addr);
1191 if ((bd.flags & ENET_BD_E) == 0) {
1192 /* No descriptors available. Bail out. */
1193 /*
1194 * FIXME: This is wrong. We should probably either
1195 * save the remainder for when more RX buffers are
1196 * available, or flag an error.
1197 */
1198 qemu_log_mask(LOG_GUEST_ERROR, "[%s]%s: Lost end of frame\n",
1199 TYPE_IMX_FEC, __func__);
1200 break;
1201 }
1202 buf_len = MIN(size, s->regs[ENET_MRBR]);
1203 bd.length = buf_len;
1204 size -= buf_len;
1205
1206 FEC_PRINTF("rx_bd 0x%x length %d\n", addr, bd.length);
1207
1208 /* The last 4 bytes are the CRC. */
1209 if (size < 4) {
1210 buf_len += size - 4;
1211 }
1212 buf_addr = bd.data;
1213
1214 if (shift16) {
1215 /*
1216 * If SHIFT16 bit of ENETx_RACC register is set we need to
1217 * align the payload to 4-byte boundary.
1218 */
1219 const uint8_t zeros[2] = { 0 };
1220
1221 dma_memory_write(&address_space_memory, buf_addr,
1222 zeros, sizeof(zeros));
1223
1224 buf_addr += sizeof(zeros);
1225 buf_len -= sizeof(zeros);
1226
1227 /* We only do this once per Ethernet frame */
1228 shift16 = false;
1229 }
1230
1231 dma_memory_write(&address_space_memory, buf_addr, buf, buf_len);
1232 buf += buf_len;
1233 if (size < 4) {
1234 dma_memory_write(&address_space_memory, buf_addr + buf_len,
1235 crc_ptr, 4 - size);
1236 crc_ptr += 4 - size;
1237 }
1238 bd.flags &= ~ENET_BD_E;
1239 if (size == 0) {
1240 /* Last buffer in frame. */
1241 bd.flags |= flags | ENET_BD_L;
1242 FEC_PRINTF("rx frame flags %04x\n", bd.flags);
1243 if (bd.option & ENET_BD_RX_INT) {
1244 s->regs[ENET_EIR] |= ENET_INT_RXF;
1245 }
1246 } else {
1247 if (bd.option & ENET_BD_RX_INT) {
1248 s->regs[ENET_EIR] |= ENET_INT_RXB;
1249 }
1250 }
1251 imx_enet_write_bd(&bd, addr);
1252 /* Advance to the next descriptor. */
1253 if ((bd.flags & ENET_BD_W) != 0) {
1254 addr = s->regs[ENET_RDSR];
1255 } else {
1256 addr += sizeof(bd);
1257 }
1258 }
1259 s->rx_descriptor = addr;
1260 imx_eth_enable_rx(s, false);
1261 imx_eth_update(s);
1262 return len;
1263 }
1264
1265 static ssize_t imx_eth_receive(NetClientState *nc, const uint8_t *buf,
1266 size_t len)
1267 {
1268 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1269
1270 if (!s->is_fec && (s->regs[ENET_ECR] & ENET_ECR_EN1588)) {
1271 return imx_enet_receive(nc, buf, len);
1272 } else {
1273 return imx_fec_receive(nc, buf, len);
1274 }
1275 }
1276
1277 static const MemoryRegionOps imx_eth_ops = {
1278 .read = imx_eth_read,
1279 .write = imx_eth_write,
1280 .valid.min_access_size = 4,
1281 .valid.max_access_size = 4,
1282 .endianness = DEVICE_NATIVE_ENDIAN,
1283 };
1284
1285 static void imx_eth_cleanup(NetClientState *nc)
1286 {
1287 IMXFECState *s = IMX_FEC(qemu_get_nic_opaque(nc));
1288
1289 s->nic = NULL;
1290 }
1291
1292 static NetClientInfo imx_eth_net_info = {
1293 .type = NET_CLIENT_DRIVER_NIC,
1294 .size = sizeof(NICState),
1295 .can_receive = imx_eth_can_receive,
1296 .receive = imx_eth_receive,
1297 .cleanup = imx_eth_cleanup,
1298 .link_status_changed = imx_eth_set_link,
1299 };
1300
1301
1302 static void imx_eth_realize(DeviceState *dev, Error **errp)
1303 {
1304 IMXFECState *s = IMX_FEC(dev);
1305 SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1306
1307 memory_region_init_io(&s->iomem, OBJECT(dev), &imx_eth_ops, s,
1308 TYPE_IMX_FEC, FSL_IMX25_FEC_SIZE);
1309 sysbus_init_mmio(sbd, &s->iomem);
1310 sysbus_init_irq(sbd, &s->irq[0]);
1311 sysbus_init_irq(sbd, &s->irq[1]);
1312
1313 qemu_macaddr_default_if_unset(&s->conf.macaddr);
1314
1315 s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf,
1316 object_get_typename(OBJECT(dev)),
1317 DEVICE(dev)->id, s);
1318
1319 qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a);
1320 }
1321
1322 static Property imx_eth_properties[] = {
1323 DEFINE_NIC_PROPERTIES(IMXFECState, conf),
1324 DEFINE_PROP_UINT32("tx-ring-num", IMXFECState, tx_ring_num, 1),
1325 DEFINE_PROP_END_OF_LIST(),
1326 };
1327
1328 static void imx_eth_class_init(ObjectClass *klass, void *data)
1329 {
1330 DeviceClass *dc = DEVICE_CLASS(klass);
1331
1332 dc->vmsd = &vmstate_imx_eth;
1333 dc->reset = imx_eth_reset;
1334 dc->props = imx_eth_properties;
1335 dc->realize = imx_eth_realize;
1336 dc->desc = "i.MX FEC/ENET Ethernet Controller";
1337 }
1338
1339 static void imx_fec_init(Object *obj)
1340 {
1341 IMXFECState *s = IMX_FEC(obj);
1342
1343 s->is_fec = true;
1344 }
1345
1346 static void imx_enet_init(Object *obj)
1347 {
1348 IMXFECState *s = IMX_FEC(obj);
1349
1350 s->is_fec = false;
1351 }
1352
1353 static const TypeInfo imx_fec_info = {
1354 .name = TYPE_IMX_FEC,
1355 .parent = TYPE_SYS_BUS_DEVICE,
1356 .instance_size = sizeof(IMXFECState),
1357 .instance_init = imx_fec_init,
1358 .class_init = imx_eth_class_init,
1359 };
1360
1361 static const TypeInfo imx_enet_info = {
1362 .name = TYPE_IMX_ENET,
1363 .parent = TYPE_IMX_FEC,
1364 .instance_init = imx_enet_init,
1365 };
1366
1367 static void imx_eth_register_types(void)
1368 {
1369 type_register_static(&imx_fec_info);
1370 type_register_static(&imx_enet_info);
1371 }
1372
1373 type_init(imx_eth_register_types)