]> git.ipfire.org Git - people/ms/u-boot.git/blob - drivers/net/ldpaa_eth/ldpaa_eth.c
driver/ldpaa_eth: Add LDPAA Ethernet driver
[people/ms/u-boot.git] / drivers / net / ldpaa_eth / ldpaa_eth.c
1 /*
2 * Copyright (C) 2014 Freescale Semiconductor
3 *
4 * SPDX-License-Identifier: GPL-2.0+
5 */
6
7 #include <common.h>
8 #include <asm/io.h>
9 #include <asm/types.h>
10 #include <malloc.h>
11 #include <net.h>
12 #include <hwconfig.h>
13 #include <phy.h>
14 #include <linux/compat.h>
15
16 #include "ldpaa_eth.h"
17
18 static int init_phy(struct eth_device *dev)
19 {
20 /*TODO for external PHY */
21
22 return 0;
23 }
24
25 static void ldpaa_eth_rx(struct ldpaa_eth_priv *priv,
26 const struct dpaa_fd *fd)
27 {
28 u64 fd_addr;
29 uint16_t fd_offset;
30 uint32_t fd_length;
31 struct ldpaa_fas *fas;
32 uint32_t status, err;
33 struct qbman_release_desc releasedesc;
34 struct qbman_swp *swp = dflt_dpio->sw_portal;
35
36 invalidate_dcache_all();
37
38 fd_addr = ldpaa_fd_get_addr(fd);
39 fd_offset = ldpaa_fd_get_offset(fd);
40 fd_length = ldpaa_fd_get_len(fd);
41
42 debug("Rx frame:data addr=0x%p size=0x%x\n", (u64 *)fd_addr, fd_length);
43
44 if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
45 /* Read the frame annotation status word and check for errors */
46 fas = (struct ldpaa_fas *)
47 ((uint8_t *)(fd_addr) +
48 priv->buf_layout.private_data_size);
49 status = le32_to_cpu(fas->status);
50 if (status & LDPAA_ETH_RX_ERR_MASK) {
51 printf("Rx frame error(s): 0x%08x\n",
52 status & LDPAA_ETH_RX_ERR_MASK);
53 goto error;
54 } else if (status & LDPAA_ETH_RX_UNSUPP_MASK) {
55 printf("Unsupported feature in bitmask: 0x%08x\n",
56 status & LDPAA_ETH_RX_UNSUPP_MASK);
57 goto error;
58 }
59 }
60
61 debug("Rx frame: To Upper layer\n");
62 net_process_received_packet((uint8_t *)(fd_addr) + fd_offset,
63 fd_length);
64
65 error:
66 qbman_release_desc_clear(&releasedesc);
67 qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
68 do {
69 /* Release buffer into the QBMAN */
70 err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
71 } while (err == -EBUSY);
72 return;
73 }
74
75 static int ldpaa_eth_pull_dequeue_rx(struct eth_device *dev)
76 {
77 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)dev->priv;
78 const struct ldpaa_dq *dq;
79 const struct dpaa_fd *fd;
80 int i = 5, err = 0, status;
81 static struct qbman_pull_desc pulldesc;
82 struct qbman_swp *swp = dflt_dpio->sw_portal;
83
84 qbman_pull_desc_clear(&pulldesc);
85 qbman_pull_desc_set_numframes(&pulldesc, 1);
86 qbman_pull_desc_set_fq(&pulldesc, priv->rx_dflt_fqid);
87
88 while (--i) {
89 err = qbman_swp_pull(swp, &pulldesc);
90 if (err < 0) {
91 printf("Dequeue frames error:0x%08x\n", err);
92 continue;
93 }
94
95 dq = qbman_swp_dqrr_next(swp);
96 if (dq) {
97 /* Check for valid frame. If not sent a consume
98 * confirmation to QBMAN otherwise give it to NADK
99 * application and then send consume confirmation to
100 * QBMAN.
101 */
102 status = (uint8_t)ldpaa_dq_flags(dq);
103 if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
104 debug("Dequeue RX frames:");
105 debug("No frame delivered\n");
106
107 qbman_swp_dqrr_consume(swp, dq);
108 break;
109 }
110
111 fd = ldpaa_dq_fd(dq);
112
113 /* Obtain FD and process it */
114 ldpaa_eth_rx(priv, fd);
115 qbman_swp_dqrr_consume(swp, dq);
116 break;
117 }
118 }
119
120 return err;
121 }
122
123 static void ldpaa_eth_tx_conf(struct ldpaa_eth_priv *priv,
124 const struct dpaa_fd *fd)
125 {
126 uint64_t fd_addr;
127 struct ldpaa_fas *fas;
128 uint32_t status, err;
129 struct qbman_release_desc releasedesc;
130 struct qbman_swp *swp = dflt_dpio->sw_portal;
131
132 invalidate_dcache_all();
133 fd_addr = ldpaa_fd_get_addr(fd);
134
135
136 debug("TX Conf frame:data addr=0x%p\n", (u64 *)fd_addr);
137
138 /* Check the status from the Frame Annotation */
139 if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
140 fas = (struct ldpaa_fas *)
141 ((uint8_t *)(fd_addr) +
142 priv->buf_layout.private_data_size);
143 status = le32_to_cpu(fas->status);
144 if (status & LDPAA_ETH_TXCONF_ERR_MASK) {
145 printf("TxConf frame error(s): 0x%08x\n",
146 status & LDPAA_ETH_TXCONF_ERR_MASK);
147 }
148 }
149
150 qbman_release_desc_clear(&releasedesc);
151 qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
152 do {
153 /* Release buffer into the QBMAN */
154 err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
155 } while (err == -EBUSY);
156 }
157
158 static int ldpaa_eth_pull_dequeue_tx_conf(struct ldpaa_eth_priv *priv)
159 {
160 const struct ldpaa_dq *dq;
161 const struct dpaa_fd *fd;
162 int err = 0;
163 int i = 5, status;
164 static struct qbman_pull_desc pulldesc;
165 struct qbman_swp *swp = dflt_dpio->sw_portal;
166
167 qbman_pull_desc_clear(&pulldesc);
168 qbman_pull_desc_set_numframes(&pulldesc, 1);
169 qbman_pull_desc_set_fq(&pulldesc, priv->tx_conf_fqid);
170
171 while (--i) {
172 err = qbman_swp_pull(swp, &pulldesc);
173 if (err < 0) {
174 printf("Dequeue TX conf frames error:0x%08x\n", err);
175 continue;
176 }
177
178 dq = qbman_swp_dqrr_next(swp);
179 if (dq) {
180 /* Check for valid frame. If not sent a consume
181 * confirmation to QBMAN otherwise give it to NADK
182 * application and then send consume confirmation to
183 * QBMAN.
184 */
185 status = (uint8_t)ldpaa_dq_flags(dq);
186 if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
187 debug("Dequeue TX conf frames:");
188 debug("No frame is delivered\n");
189
190 qbman_swp_dqrr_consume(swp, dq);
191 break;
192 }
193 fd = ldpaa_dq_fd(dq);
194
195 ldpaa_eth_tx_conf(priv, fd);
196 qbman_swp_dqrr_consume(swp, dq);
197 break;
198 }
199 }
200
201 return err;
202 }
203
204 static int ldpaa_eth_tx(struct eth_device *net_dev, void *buf, int len)
205 {
206 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
207 struct dpaa_fd fd;
208 u64 buffer_start;
209 int data_offset, err;
210 struct qbman_swp *swp = dflt_dpio->sw_portal;
211 struct qbman_eq_desc ed;
212
213 /* Setup the FD fields */
214 memset(&fd, 0, sizeof(fd));
215
216 data_offset = priv->tx_data_offset;
217
218 do {
219 err = qbman_swp_acquire(dflt_dpio->sw_portal,
220 dflt_dpbp->dpbp_attr.bpid,
221 &buffer_start, 1);
222 } while (err == -EBUSY);
223
224 if (err < 0) {
225 printf("qbman_swp_acquire() failed\n");
226 return -ENOMEM;
227 }
228
229 debug("TX data: malloc buffer start=0x%p\n", (u64 *)buffer_start);
230
231 memcpy(((uint8_t *)(buffer_start) + data_offset), buf, len);
232
233 flush_dcache_range(buffer_start, LDPAA_ETH_RX_BUFFER_SIZE);
234
235 ldpaa_fd_set_addr(&fd, (u64)buffer_start);
236 ldpaa_fd_set_offset(&fd, (uint16_t)(data_offset));
237 ldpaa_fd_set_bpid(&fd, dflt_dpbp->dpbp_attr.bpid);
238 ldpaa_fd_set_len(&fd, len);
239
240 fd.simple.ctrl = LDPAA_FD_CTRL_ASAL | LDPAA_FD_CTRL_PTA |
241 LDPAA_FD_CTRL_PTV1;
242
243 qbman_eq_desc_clear(&ed);
244 qbman_eq_desc_set_no_orp(&ed, 0);
245 qbman_eq_desc_set_qd(&ed, priv->tx_qdid, priv->tx_flow_id, 0);
246 err = qbman_swp_enqueue(swp, &ed, (const struct qbman_fd *)(&fd));
247 if (err < 0)
248 printf("error enqueueing Tx frame\n");
249
250 mdelay(1);
251
252 err = ldpaa_eth_pull_dequeue_tx_conf(priv);
253 if (err < 0)
254 printf("error Tx Conf frame\n");
255
256 return err;
257 }
258
259 static int ldpaa_eth_open(struct eth_device *net_dev, bd_t *bd)
260 {
261 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
262 struct dpni_queue_attr rx_queue_attr;
263 struct dpni_tx_flow_attr tx_flow_attr;
264 uint8_t mac_addr[6];
265 int err;
266
267 if (net_dev->state == ETH_STATE_ACTIVE)
268 return 0;
269
270 /* DPNI initialization */
271 err = ldpaa_dpni_setup(priv);
272 if (err < 0)
273 goto err_dpni_setup;
274
275 err = ldpaa_dpbp_setup();
276 if (err < 0)
277 goto err_dpbp_setup;
278
279 /* DPNI binding DPBP */
280 err = ldpaa_dpni_bind(priv);
281 if (err)
282 goto err_bind;
283
284 err = dpni_get_primary_mac_addr(dflt_mc_io, priv->dpni_handle,
285 mac_addr);
286 if (err) {
287 printf("dpni_get_primary_mac_addr() failed\n");
288 return err;
289 }
290
291 memcpy(net_dev->enetaddr, mac_addr, 0x6);
292
293 /* setup the MAC address */
294 if (net_dev->enetaddr[0] & 0x01) {
295 printf("%s: MacAddress is multcast address\n", __func__);
296 return 1;
297 }
298
299 #ifdef CONFIG_PHYLIB
300 /* TODO Check this path */
301 ret = phy_startup(priv->phydev);
302 if (ret) {
303 printf("%s: Could not initialize\n", priv->phydev->dev->name);
304 return ret;
305 }
306 #else
307 priv->phydev->speed = SPEED_1000;
308 priv->phydev->link = 1;
309 priv->phydev->duplex = DUPLEX_FULL;
310 #endif
311
312 err = dpni_enable(dflt_mc_io, priv->dpni_handle);
313 if (err < 0) {
314 printf("dpni_enable() failed\n");
315 return err;
316 }
317
318 /* TODO: support multiple Rx flows */
319 err = dpni_get_rx_flow(dflt_mc_io, priv->dpni_handle, 0, 0,
320 &rx_queue_attr);
321 if (err) {
322 printf("dpni_get_rx_flow() failed\n");
323 goto err_rx_flow;
324 }
325
326 priv->rx_dflt_fqid = rx_queue_attr.fqid;
327
328 err = dpni_get_qdid(dflt_mc_io, priv->dpni_handle, &priv->tx_qdid);
329 if (err) {
330 printf("dpni_get_qdid() failed\n");
331 goto err_qdid;
332 }
333
334 err = dpni_get_tx_flow(dflt_mc_io, priv->dpni_handle, priv->tx_flow_id,
335 &tx_flow_attr);
336 if (err) {
337 printf("dpni_get_tx_flow() failed\n");
338 goto err_tx_flow;
339 }
340
341 priv->tx_conf_fqid = tx_flow_attr.conf_err_attr.queue_attr.fqid;
342
343 if (!priv->phydev->link)
344 printf("%s: No link.\n", priv->phydev->dev->name);
345
346 return priv->phydev->link ? 0 : -1;
347
348 err_tx_flow:
349 err_qdid:
350 err_rx_flow:
351 dpni_disable(dflt_mc_io, priv->dpni_handle);
352 err_bind:
353 ldpaa_dpbp_free();
354 err_dpbp_setup:
355 dpni_close(dflt_mc_io, priv->dpni_handle);
356 err_dpni_setup:
357 return err;
358 }
359
360 static void ldpaa_eth_stop(struct eth_device *net_dev)
361 {
362 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
363 int err = 0;
364
365 if (net_dev->state == ETH_STATE_PASSIVE)
366 return;
367 /* Stop Tx and Rx traffic */
368 err = dpni_disable(dflt_mc_io, priv->dpni_handle);
369 if (err < 0)
370 printf("dpni_disable() failed\n");
371
372 #ifdef CONFIG_PHYLIB
373 phy_shutdown(priv->phydev);
374 #endif
375
376 ldpaa_dpbp_free();
377 dpni_reset(dflt_mc_io, priv->dpni_handle);
378 dpni_close(dflt_mc_io, priv->dpni_handle);
379 }
380
381 static void ldpaa_dpbp_drain_cnt(int count)
382 {
383 uint64_t buf_array[7];
384 void *addr;
385 int ret, i;
386
387 BUG_ON(count > 7);
388
389 do {
390 ret = qbman_swp_acquire(dflt_dpio->sw_portal,
391 dflt_dpbp->dpbp_attr.bpid,
392 buf_array, count);
393 if (ret < 0) {
394 printf("qbman_swp_acquire() failed\n");
395 return;
396 }
397 for (i = 0; i < ret; i++) {
398 addr = (void *)buf_array[i];
399 debug("Free: buffer addr =0x%p\n", addr);
400 free(addr);
401 }
402 } while (ret);
403 }
404
405 static void ldpaa_dpbp_drain(void)
406 {
407 int i;
408 for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7)
409 ldpaa_dpbp_drain_cnt(7);
410 }
411
412 static int ldpaa_bp_add_7(uint16_t bpid)
413 {
414 uint64_t buf_array[7];
415 u8 *addr;
416 int i;
417 struct qbman_release_desc rd;
418
419 for (i = 0; i < 7; i++) {
420 addr = memalign(L1_CACHE_BYTES, LDPAA_ETH_RX_BUFFER_SIZE);
421 if (!addr) {
422 printf("addr allocation failed\n");
423 goto err_alloc;
424 }
425 memset(addr, 0x00, LDPAA_ETH_RX_BUFFER_SIZE);
426
427 buf_array[i] = (uint64_t)addr;
428 debug("Release: buffer addr =0x%p\n", addr);
429 }
430
431 release_bufs:
432 /* In case the portal is busy, retry until successful.
433 * This function is guaranteed to succeed in a reasonable amount
434 * of time.
435 */
436
437 do {
438 mdelay(1);
439 qbman_release_desc_clear(&rd);
440 qbman_release_desc_set_bpid(&rd, bpid);
441 } while (qbman_swp_release(dflt_dpio->sw_portal, &rd, buf_array, i));
442
443 return i;
444
445 err_alloc:
446 if (i)
447 goto release_bufs;
448
449 return 0;
450 }
451
452 static int ldpaa_dpbp_seed(uint16_t bpid)
453 {
454 int i;
455 int count;
456
457 for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7) {
458 count = ldpaa_bp_add_7(bpid);
459 if (count < 7)
460 printf("Buffer Seed= %d\n", count);
461 }
462
463 return 0;
464 }
465
466 static int ldpaa_dpbp_setup(void)
467 {
468 int err;
469
470 err = dpbp_open(dflt_mc_io, dflt_dpbp->dpbp_attr.id,
471 &dflt_dpbp->dpbp_handle);
472 if (err) {
473 printf("dpbp_open() failed\n");
474 goto err_open;
475 }
476
477 err = dpbp_enable(dflt_mc_io, dflt_dpbp->dpbp_handle);
478 if (err) {
479 printf("dpbp_enable() failed\n");
480 goto err_enable;
481 }
482
483 err = dpbp_get_attributes(dflt_mc_io, dflt_dpbp->dpbp_handle,
484 &dflt_dpbp->dpbp_attr);
485 if (err) {
486 printf("dpbp_get_attributes() failed\n");
487 goto err_get_attr;
488 }
489
490 err = ldpaa_dpbp_seed(dflt_dpbp->dpbp_attr.bpid);
491 if (err) {
492 printf("Buffer seeding failed for DPBP %d (bpid=%d)\n",
493 dflt_dpbp->dpbp_attr.id, dflt_dpbp->dpbp_attr.bpid);
494 goto err_seed;
495 }
496
497 return 0;
498
499 err_seed:
500 err_get_attr:
501 dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
502 err_enable:
503 dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
504 err_open:
505 return err;
506 }
507
508 static void ldpaa_dpbp_free(void)
509 {
510 ldpaa_dpbp_drain();
511 dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
512 dpbp_reset(dflt_mc_io, dflt_dpbp->dpbp_handle);
513 dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
514 }
515
516 static int ldpaa_dpni_setup(struct ldpaa_eth_priv *priv)
517 {
518 int err;
519
520 /* and get a handle for the DPNI this interface is associate with */
521 err = dpni_open(dflt_mc_io, priv->dpni_id, &priv->dpni_handle);
522 if (err) {
523 printf("dpni_open() failed\n");
524 goto err_open;
525 }
526
527 err = dpni_get_attributes(dflt_mc_io, priv->dpni_handle,
528 &priv->dpni_attrs);
529 if (err) {
530 printf("dpni_get_attributes() failed (err=%d)\n", err);
531 goto err_get_attr;
532 }
533
534 /* Configure our buffers' layout */
535 priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
536 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
537 DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
538 priv->buf_layout.pass_parser_result = true;
539 priv->buf_layout.pass_frame_status = true;
540 priv->buf_layout.private_data_size = LDPAA_ETH_SWA_SIZE;
541 /* ...rx, ... */
542 err = dpni_set_rx_buffer_layout(dflt_mc_io, priv->dpni_handle,
543 &priv->buf_layout);
544 if (err) {
545 printf("dpni_set_rx_buffer_layout() failed");
546 goto err_buf_layout;
547 }
548
549 /* ... tx, ... */
550 priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PARSER_RESULT;
551 err = dpni_set_tx_buffer_layout(dflt_mc_io, priv->dpni_handle,
552 &priv->buf_layout);
553 if (err) {
554 printf("dpni_set_tx_buffer_layout() failed");
555 goto err_buf_layout;
556 }
557
558 /* ... tx-confirm. */
559 priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
560 err = dpni_set_tx_conf_buffer_layout(dflt_mc_io, priv->dpni_handle,
561 &priv->buf_layout);
562 if (err) {
563 printf("dpni_set_tx_conf_buffer_layout() failed");
564 goto err_buf_layout;
565 }
566
567 /* Now that we've set our tx buffer layout, retrieve the minimum
568 * required tx data offset.
569 */
570 err = dpni_get_tx_data_offset(dflt_mc_io, priv->dpni_handle,
571 &priv->tx_data_offset);
572 if (err) {
573 printf("dpni_get_tx_data_offset() failed\n");
574 goto err_data_offset;
575 }
576
577 /* Warn in case TX data offset is not multiple of 64 bytes. */
578 WARN_ON(priv->tx_data_offset % 64);
579
580 /* Accomodate SWA space. */
581 priv->tx_data_offset += LDPAA_ETH_SWA_SIZE;
582 debug("priv->tx_data_offset=%d\n", priv->tx_data_offset);
583
584 return 0;
585
586 err_data_offset:
587 err_buf_layout:
588 err_get_attr:
589 dpni_close(dflt_mc_io, priv->dpni_handle);
590 err_open:
591 return err;
592 }
593
594 static int ldpaa_dpni_bind(struct ldpaa_eth_priv *priv)
595 {
596 struct dpni_pools_cfg pools_params;
597 struct dpni_tx_flow_cfg dflt_tx_flow;
598 int err = 0;
599
600 pools_params.num_dpbp = 1;
601 pools_params.pools[0].dpbp_id = (uint16_t)dflt_dpbp->dpbp_attr.id;
602 pools_params.pools[0].buffer_size = LDPAA_ETH_RX_BUFFER_SIZE;
603 err = dpni_set_pools(dflt_mc_io, priv->dpni_handle, &pools_params);
604 if (err) {
605 printf("dpni_set_pools() failed\n");
606 return err;
607 }
608
609 priv->tx_flow_id = DPNI_NEW_FLOW_ID;
610 memset(&dflt_tx_flow, 0, sizeof(dflt_tx_flow));
611
612 err = dpni_set_tx_flow(dflt_mc_io, priv->dpni_handle,
613 &priv->tx_flow_id, &dflt_tx_flow);
614 if (err) {
615 printf("dpni_set_tx_flow() failed\n");
616 return err;
617 }
618
619 return 0;
620 }
621
622 static int ldpaa_eth_netdev_init(struct eth_device *net_dev)
623 {
624 int err;
625 struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
626
627 if (priv->type == LDPAA_ETH_1G_E)
628 sprintf(net_dev->name, "DTSEC%d", priv->dpni_id);
629 else
630 sprintf(net_dev->name, "TGEC%d", priv->dpni_id);
631
632 net_dev->iobase = 0;
633 net_dev->init = ldpaa_eth_open;
634 net_dev->halt = ldpaa_eth_stop;
635 net_dev->send = ldpaa_eth_tx;
636 net_dev->recv = ldpaa_eth_pull_dequeue_rx;
637 /*
638 TODO: PHY MDIO information
639 priv->bus = info->bus;
640 priv->phyaddr = info->phy_addr;
641 priv->enet_if = info->enet_if;
642 */
643
644 if (init_phy(net_dev))
645 return 0;
646
647 err = eth_register(net_dev);
648 if (err < 0) {
649 printf("eth_register() = %d\n", err);
650 return err;
651 }
652
653 return 0;
654 }
655
656 int ldpaa_eth_init(struct dprc_obj_desc obj_desc)
657 {
658 struct eth_device *net_dev = NULL;
659 struct ldpaa_eth_priv *priv = NULL;
660 int err = 0;
661
662
663 /* Net device */
664 net_dev = (struct eth_device *)malloc(sizeof(struct eth_device));
665 if (!net_dev) {
666 printf("eth_device malloc() failed\n");
667 return -ENOMEM;
668 }
669 memset(net_dev, 0, sizeof(struct eth_device));
670
671 /* alloc the ldpaa ethernet private struct */
672 priv = (struct ldpaa_eth_priv *)malloc(sizeof(struct ldpaa_eth_priv));
673 if (!priv) {
674 printf("ldpaa_eth_priv malloc() failed\n");
675 return -ENOMEM;
676 }
677 memset(priv, 0, sizeof(struct ldpaa_eth_priv));
678
679 net_dev->priv = (void *)priv;
680 priv->net_dev = (struct eth_device *)net_dev;
681 priv->dpni_id = obj_desc.id;
682
683 err = ldpaa_eth_netdev_init(net_dev);
684 if (err)
685 goto err_netdev_init;
686
687 debug("ldpaa ethernet: Probed interface %s\n", net_dev->name);
688 return 0;
689
690 err_netdev_init:
691 free(priv);
692 net_dev->priv = NULL;
693 free(net_dev);
694
695 return err;
696 }