1 /* Applied Micro X-Gene SoC Ethernet Driver
3 * Copyright (c) 2014, Applied Micro Circuits Corporation
4 * Authors: Iyappan Subramanian <isubramanian@apm.com>
5 * Ravi Patel <rapatel@apm.com>
6 * Keyur Chudgar <kchudgar@apm.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
24 #include "xgene_enet_sgmac.h"
25 #include "xgene_enet_xgmac.h"
27 #define RES_ENET_CSR 0
28 #define RES_RING_CSR 1
29 #define RES_RING_CMD 2
31 static const struct of_device_id xgene_enet_of_match
[];
32 static const struct acpi_device_id xgene_enet_acpi_match
[];
34 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring
*buf_pool
)
36 struct xgene_enet_raw_desc16
*raw_desc
;
39 for (i
= 0; i
< buf_pool
->slots
; i
++) {
40 raw_desc
= &buf_pool
->raw_desc16
[i
];
42 /* Hardware expects descriptor in little endian format */
43 raw_desc
->m0
= cpu_to_le64(i
|
44 SET_VAL(FPQNUM
, buf_pool
->dst_ring_num
) |
49 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring
*buf_pool
,
53 struct xgene_enet_raw_desc16
*raw_desc
;
54 struct xgene_enet_pdata
*pdata
;
55 struct net_device
*ndev
;
58 u32 tail
= buf_pool
->tail
;
59 u32 slots
= buf_pool
->slots
- 1;
63 ndev
= buf_pool
->ndev
;
64 dev
= ndev_to_dev(buf_pool
->ndev
);
65 pdata
= netdev_priv(ndev
);
66 bufdatalen
= BUF_LEN_CODE_2K
| (SKB_BUFFER_SIZE
& GENMASK(11, 0));
67 len
= XGENE_ENET_MAX_MTU
;
69 for (i
= 0; i
< nbuf
; i
++) {
70 raw_desc
= &buf_pool
->raw_desc16
[tail
];
72 skb
= netdev_alloc_skb_ip_align(ndev
, len
);
75 buf_pool
->rx_skb
[tail
] = skb
;
77 dma_addr
= dma_map_single(dev
, skb
->data
, len
, DMA_FROM_DEVICE
);
78 if (dma_mapping_error(dev
, dma_addr
)) {
79 netdev_err(ndev
, "DMA mapping error\n");
80 dev_kfree_skb_any(skb
);
84 raw_desc
->m1
= cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
85 SET_VAL(BUFDATALEN
, bufdatalen
) |
87 tail
= (tail
+ 1) & slots
;
90 pdata
->ring_ops
->wr_cmd(buf_pool
, nbuf
);
91 buf_pool
->tail
= tail
;
96 static u16
xgene_enet_dst_ring_num(struct xgene_enet_desc_ring
*ring
)
98 struct xgene_enet_pdata
*pdata
= netdev_priv(ring
->ndev
);
100 return ((u16
)pdata
->rm
<< 10) | ring
->num
;
103 static u8
xgene_enet_hdr_len(const void *data
)
105 const struct ethhdr
*eth
= data
;
107 return (eth
->h_proto
== htons(ETH_P_8021Q
)) ? VLAN_ETH_HLEN
: ETH_HLEN
;
110 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring
*buf_pool
)
112 struct xgene_enet_pdata
*pdata
= netdev_priv(buf_pool
->ndev
);
113 struct xgene_enet_raw_desc16
*raw_desc
;
114 u32 slots
= buf_pool
->slots
- 1;
115 u32 tail
= buf_pool
->tail
;
119 len
= pdata
->ring_ops
->len(buf_pool
);
120 for (i
= 0; i
< len
; i
++) {
121 tail
= (tail
- 1) & slots
;
122 raw_desc
= &buf_pool
->raw_desc16
[tail
];
124 /* Hardware stores descriptor in little endian format */
125 userinfo
= GET_VAL(USERINFO
, le64_to_cpu(raw_desc
->m0
));
126 dev_kfree_skb_any(buf_pool
->rx_skb
[userinfo
]);
129 pdata
->ring_ops
->wr_cmd(buf_pool
, -len
);
130 buf_pool
->tail
= tail
;
133 static irqreturn_t
xgene_enet_rx_irq(const int irq
, void *data
)
135 struct xgene_enet_desc_ring
*rx_ring
= data
;
137 if (napi_schedule_prep(&rx_ring
->napi
)) {
138 disable_irq_nosync(irq
);
139 __napi_schedule(&rx_ring
->napi
);
145 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring
*cp_ring
,
146 struct xgene_enet_raw_desc
*raw_desc
)
151 dma_addr_t
*frag_dma_addr
;
156 skb_index
= GET_VAL(USERINFO
, le64_to_cpu(raw_desc
->m0
));
157 skb
= cp_ring
->cp_skb
[skb_index
];
158 frag_dma_addr
= &cp_ring
->frag_dma_addr
[skb_index
* MAX_SKB_FRAGS
];
160 dev
= ndev_to_dev(cp_ring
->ndev
);
161 dma_unmap_single(dev
, GET_VAL(DATAADDR
, le64_to_cpu(raw_desc
->m1
)),
165 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
166 frag
= &skb_shinfo(skb
)->frags
[i
];
167 dma_unmap_page(dev
, frag_dma_addr
[i
], skb_frag_size(frag
),
171 /* Checking for error */
172 status
= GET_VAL(LERR
, le64_to_cpu(raw_desc
->m0
));
173 if (unlikely(status
> 2)) {
174 xgene_enet_parse_error(cp_ring
, netdev_priv(cp_ring
->ndev
),
180 dev_kfree_skb_any(skb
);
182 netdev_err(cp_ring
->ndev
, "completion skb is NULL\n");
189 static u64
xgene_enet_work_msg(struct sk_buff
*skb
)
191 struct net_device
*ndev
= skb
->dev
;
192 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
194 u8 l3hlen
= 0, l4hlen
= 0;
195 u8 ethhdr
, proto
= 0, csum_enable
= 0;
197 u32 hdr_len
, mss
= 0;
198 u32 i
, len
, nr_frags
;
200 ethhdr
= xgene_enet_hdr_len(skb
->data
);
202 if (unlikely(skb
->protocol
!= htons(ETH_P_IP
)) &&
203 unlikely(skb
->protocol
!= htons(ETH_P_8021Q
)))
206 if (unlikely(!(skb
->dev
->features
& NETIF_F_IP_CSUM
)))
210 if (unlikely(ip_is_fragment(iph
)))
213 if (likely(iph
->protocol
== IPPROTO_TCP
)) {
214 l4hlen
= tcp_hdrlen(skb
) >> 2;
216 proto
= TSO_IPPROTO_TCP
;
217 if (ndev
->features
& NETIF_F_TSO
) {
218 hdr_len
= ethhdr
+ ip_hdrlen(skb
) + tcp_hdrlen(skb
);
219 mss
= skb_shinfo(skb
)->gso_size
;
221 if (skb_is_nonlinear(skb
)) {
222 len
= skb_headlen(skb
);
223 nr_frags
= skb_shinfo(skb
)->nr_frags
;
225 for (i
= 0; i
< 2 && i
< nr_frags
; i
++)
226 len
+= skb_shinfo(skb
)->frags
[i
].size
;
228 /* HW requires header must reside in 3 buffer */
229 if (unlikely(hdr_len
> len
)) {
230 if (skb_linearize(skb
))
235 if (!mss
|| ((skb
->len
- hdr_len
) <= mss
))
238 if (mss
!= pdata
->mss
) {
240 pdata
->mac_ops
->set_mss(pdata
);
242 hopinfo
|= SET_BIT(ET
);
244 } else if (iph
->protocol
== IPPROTO_UDP
) {
245 l4hlen
= UDP_HDR_SIZE
;
249 l3hlen
= ip_hdrlen(skb
) >> 2;
250 hopinfo
|= SET_VAL(TCPHDR
, l4hlen
) |
251 SET_VAL(IPHDR
, l3hlen
) |
252 SET_VAL(ETHHDR
, ethhdr
) |
253 SET_VAL(EC
, csum_enable
) |
256 SET_BIT(TYPE_ETH_WORK_MESSAGE
);
261 static u16
xgene_enet_encode_len(u16 len
)
263 return (len
== BUFLEN_16K
) ? 0 : len
;
266 static void xgene_set_addr_len(__le64
*desc
, u32 idx
, dma_addr_t addr
, u32 len
)
268 desc
[idx
^ 1] = cpu_to_le64(SET_VAL(DATAADDR
, addr
) |
269 SET_VAL(BUFDATALEN
, len
));
272 static __le64
*xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring
*ring
)
276 exp_bufs
= &ring
->exp_bufs
[ring
->exp_buf_tail
* MAX_EXP_BUFFS
];
277 memset(exp_bufs
, 0, sizeof(__le64
) * MAX_EXP_BUFFS
);
278 ring
->exp_buf_tail
= (ring
->exp_buf_tail
+ 1) & ((ring
->slots
/ 2) - 1);
283 static dma_addr_t
*xgene_get_frag_dma_array(struct xgene_enet_desc_ring
*ring
)
285 return &ring
->cp_ring
->frag_dma_addr
[ring
->tail
* MAX_SKB_FRAGS
];
288 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring
*tx_ring
,
291 struct device
*dev
= ndev_to_dev(tx_ring
->ndev
);
292 struct xgene_enet_raw_desc
*raw_desc
;
293 __le64
*exp_desc
= NULL
, *exp_bufs
= NULL
;
294 dma_addr_t dma_addr
, pbuf_addr
, *frag_dma_addr
;
296 u16 tail
= tx_ring
->tail
;
299 u8 ll
= 0, nv
= 0, idx
= 0;
301 u32 size
, offset
, ell_bytes
= 0;
302 u32 i
, fidx
, nr_frags
, count
= 1;
304 raw_desc
= &tx_ring
->raw_desc
[tail
];
305 tail
= (tail
+ 1) & (tx_ring
->slots
- 1);
306 memset(raw_desc
, 0, sizeof(struct xgene_enet_raw_desc
));
308 hopinfo
= xgene_enet_work_msg(skb
);
311 raw_desc
->m3
= cpu_to_le64(SET_VAL(HENQNUM
, tx_ring
->dst_ring_num
) |
314 len
= skb_headlen(skb
);
315 hw_len
= xgene_enet_encode_len(len
);
317 dma_addr
= dma_map_single(dev
, skb
->data
, len
, DMA_TO_DEVICE
);
318 if (dma_mapping_error(dev
, dma_addr
)) {
319 netdev_err(tx_ring
->ndev
, "DMA mapping error\n");
323 /* Hardware expects descriptor in little endian format */
324 raw_desc
->m1
= cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
325 SET_VAL(BUFDATALEN
, hw_len
) |
328 if (!skb_is_nonlinear(skb
))
333 exp_desc
= (void *)&tx_ring
->raw_desc
[tail
];
334 tail
= (tail
+ 1) & (tx_ring
->slots
- 1);
335 memset(exp_desc
, 0, sizeof(struct xgene_enet_raw_desc
));
337 nr_frags
= skb_shinfo(skb
)->nr_frags
;
338 for (i
= nr_frags
; i
< 4 ; i
++)
339 exp_desc
[i
^ 1] = cpu_to_le64(LAST_BUFFER
);
341 frag_dma_addr
= xgene_get_frag_dma_array(tx_ring
);
343 for (i
= 0, fidx
= 0; split
|| (fidx
< nr_frags
); i
++) {
345 frag
= &skb_shinfo(skb
)->frags
[fidx
];
346 size
= skb_frag_size(frag
);
349 pbuf_addr
= skb_frag_dma_map(dev
, frag
, 0, size
,
351 if (dma_mapping_error(dev
, pbuf_addr
))
354 frag_dma_addr
[fidx
] = pbuf_addr
;
357 if (size
> BUFLEN_16K
)
361 if (size
> BUFLEN_16K
) {
369 dma_addr
= pbuf_addr
+ offset
;
370 hw_len
= xgene_enet_encode_len(len
);
376 xgene_set_addr_len(exp_desc
, i
, dma_addr
, hw_len
);
379 if (split
|| (fidx
!= nr_frags
)) {
380 exp_bufs
= xgene_enet_get_exp_bufs(tx_ring
);
381 xgene_set_addr_len(exp_bufs
, idx
, dma_addr
,
386 xgene_set_addr_len(exp_desc
, i
, dma_addr
,
391 xgene_set_addr_len(exp_bufs
, idx
, dma_addr
, hw_len
);
398 offset
+= BUFLEN_16K
;
404 dma_addr
= dma_map_single(dev
, exp_bufs
,
405 sizeof(u64
) * MAX_EXP_BUFFS
,
407 if (dma_mapping_error(dev
, dma_addr
)) {
408 dev_kfree_skb_any(skb
);
411 i
= ell_bytes
>> LL_BYTES_LSB_LEN
;
412 exp_desc
[2] = cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
413 SET_VAL(LL_BYTES_MSB
, i
) |
414 SET_VAL(LL_LEN
, idx
));
415 raw_desc
->m2
= cpu_to_le64(SET_VAL(LL_BYTES_LSB
, ell_bytes
));
419 raw_desc
->m0
= cpu_to_le64(SET_VAL(LL
, ll
) | SET_VAL(NV
, nv
) |
420 SET_VAL(USERINFO
, tx_ring
->tail
));
421 tx_ring
->cp_ring
->cp_skb
[tx_ring
->tail
] = skb
;
422 tx_ring
->tail
= tail
;
427 static netdev_tx_t
xgene_enet_start_xmit(struct sk_buff
*skb
,
428 struct net_device
*ndev
)
430 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
431 struct xgene_enet_desc_ring
*tx_ring
= pdata
->tx_ring
;
432 struct xgene_enet_desc_ring
*cp_ring
= tx_ring
->cp_ring
;
433 u32 tx_level
, cq_level
;
436 tx_level
= pdata
->ring_ops
->len(tx_ring
);
437 cq_level
= pdata
->ring_ops
->len(cp_ring
);
438 if (unlikely(tx_level
> pdata
->tx_qcnt_hi
||
439 cq_level
> pdata
->cp_qcnt_hi
)) {
440 netif_stop_queue(ndev
);
441 return NETDEV_TX_BUSY
;
444 if (skb_padto(skb
, XGENE_MIN_ENET_FRAME_SIZE
))
447 count
= xgene_enet_setup_tx_desc(tx_ring
, skb
);
449 dev_kfree_skb_any(skb
);
453 pdata
->ring_ops
->wr_cmd(tx_ring
, count
);
454 skb_tx_timestamp(skb
);
456 pdata
->stats
.tx_packets
++;
457 pdata
->stats
.tx_bytes
+= skb
->len
;
462 static void xgene_enet_skip_csum(struct sk_buff
*skb
)
464 struct iphdr
*iph
= ip_hdr(skb
);
466 if (!ip_is_fragment(iph
) ||
467 (iph
->protocol
!= IPPROTO_TCP
&& iph
->protocol
!= IPPROTO_UDP
)) {
468 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
472 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring
*rx_ring
,
473 struct xgene_enet_raw_desc
*raw_desc
)
475 struct net_device
*ndev
;
476 struct xgene_enet_pdata
*pdata
;
478 struct xgene_enet_desc_ring
*buf_pool
;
479 u32 datalen
, skb_index
;
484 ndev
= rx_ring
->ndev
;
485 pdata
= netdev_priv(ndev
);
486 dev
= ndev_to_dev(rx_ring
->ndev
);
487 buf_pool
= rx_ring
->buf_pool
;
489 dma_unmap_single(dev
, GET_VAL(DATAADDR
, le64_to_cpu(raw_desc
->m1
)),
490 XGENE_ENET_MAX_MTU
, DMA_FROM_DEVICE
);
491 skb_index
= GET_VAL(USERINFO
, le64_to_cpu(raw_desc
->m0
));
492 skb
= buf_pool
->rx_skb
[skb_index
];
494 /* checking for error */
495 status
= GET_VAL(LERR
, le64_to_cpu(raw_desc
->m0
));
496 if (unlikely(status
> 2)) {
497 dev_kfree_skb_any(skb
);
498 xgene_enet_parse_error(rx_ring
, netdev_priv(rx_ring
->ndev
),
500 pdata
->stats
.rx_dropped
++;
505 /* strip off CRC as HW isn't doing this */
506 datalen
= GET_VAL(BUFDATALEN
, le64_to_cpu(raw_desc
->m1
));
507 datalen
= (datalen
& DATALEN_MASK
) - 4;
508 prefetch(skb
->data
- NET_IP_ALIGN
);
509 skb_put(skb
, datalen
);
511 skb_checksum_none_assert(skb
);
512 skb
->protocol
= eth_type_trans(skb
, ndev
);
513 if (likely((ndev
->features
& NETIF_F_IP_CSUM
) &&
514 skb
->protocol
== htons(ETH_P_IP
))) {
515 xgene_enet_skip_csum(skb
);
518 pdata
->stats
.rx_packets
++;
519 pdata
->stats
.rx_bytes
+= datalen
;
520 napi_gro_receive(&rx_ring
->napi
, skb
);
522 if (--rx_ring
->nbufpool
== 0) {
523 ret
= xgene_enet_refill_bufpool(buf_pool
, NUM_BUFPOOL
);
524 rx_ring
->nbufpool
= NUM_BUFPOOL
;
530 static bool is_rx_desc(struct xgene_enet_raw_desc
*raw_desc
)
532 return GET_VAL(FPQNUM
, le64_to_cpu(raw_desc
->m0
)) ? true : false;
535 static int xgene_enet_process_ring(struct xgene_enet_desc_ring
*ring
,
538 struct xgene_enet_pdata
*pdata
= netdev_priv(ring
->ndev
);
539 struct xgene_enet_raw_desc
*raw_desc
, *exp_desc
;
540 u16 head
= ring
->head
;
541 u16 slots
= ring
->slots
- 1;
542 int ret
, count
= 0, processed
= 0;
545 raw_desc
= &ring
->raw_desc
[head
];
547 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc
)))
550 /* read fpqnum field after dataaddr field */
552 if (GET_BIT(NV
, le64_to_cpu(raw_desc
->m0
))) {
553 head
= (head
+ 1) & slots
;
554 exp_desc
= &ring
->raw_desc
[head
];
556 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc
))) {
557 head
= (head
- 1) & slots
;
563 if (is_rx_desc(raw_desc
))
564 ret
= xgene_enet_rx_frame(ring
, raw_desc
);
566 ret
= xgene_enet_tx_completion(ring
, raw_desc
);
567 xgene_enet_mark_desc_slot_empty(raw_desc
);
569 xgene_enet_mark_desc_slot_empty(exp_desc
);
571 head
= (head
+ 1) & slots
;
580 pdata
->ring_ops
->wr_cmd(ring
, -count
);
583 if (netif_queue_stopped(ring
->ndev
)) {
584 if (pdata
->ring_ops
->len(ring
) < pdata
->cp_qcnt_low
)
585 netif_wake_queue(ring
->ndev
);
592 static int xgene_enet_napi(struct napi_struct
*napi
, const int budget
)
594 struct xgene_enet_desc_ring
*ring
;
597 ring
= container_of(napi
, struct xgene_enet_desc_ring
, napi
);
598 processed
= xgene_enet_process_ring(ring
, budget
);
600 if (processed
!= budget
) {
602 enable_irq(ring
->irq
);
608 static void xgene_enet_timeout(struct net_device
*ndev
)
610 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
612 pdata
->mac_ops
->reset(pdata
);
615 static int xgene_enet_register_irq(struct net_device
*ndev
)
617 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
618 struct device
*dev
= ndev_to_dev(ndev
);
619 struct xgene_enet_desc_ring
*ring
;
622 ring
= pdata
->rx_ring
;
623 ret
= devm_request_irq(dev
, ring
->irq
, xgene_enet_rx_irq
,
624 IRQF_SHARED
, ring
->irq_name
, ring
);
626 netdev_err(ndev
, "Failed to request irq %s\n", ring
->irq_name
);
629 ring
= pdata
->tx_ring
->cp_ring
;
630 ret
= devm_request_irq(dev
, ring
->irq
, xgene_enet_rx_irq
,
631 IRQF_SHARED
, ring
->irq_name
, ring
);
633 netdev_err(ndev
, "Failed to request irq %s\n",
641 static void xgene_enet_free_irq(struct net_device
*ndev
)
643 struct xgene_enet_pdata
*pdata
;
646 pdata
= netdev_priv(ndev
);
647 dev
= ndev_to_dev(ndev
);
648 devm_free_irq(dev
, pdata
->rx_ring
->irq
, pdata
->rx_ring
);
651 devm_free_irq(dev
, pdata
->tx_ring
->cp_ring
->irq
,
652 pdata
->tx_ring
->cp_ring
);
656 static void xgene_enet_napi_enable(struct xgene_enet_pdata
*pdata
)
658 struct napi_struct
*napi
;
660 napi
= &pdata
->rx_ring
->napi
;
664 napi
= &pdata
->tx_ring
->cp_ring
->napi
;
669 static void xgene_enet_napi_disable(struct xgene_enet_pdata
*pdata
)
671 struct napi_struct
*napi
;
673 napi
= &pdata
->rx_ring
->napi
;
677 napi
= &pdata
->tx_ring
->cp_ring
->napi
;
682 static int xgene_enet_open(struct net_device
*ndev
)
684 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
685 struct xgene_mac_ops
*mac_ops
= pdata
->mac_ops
;
688 mac_ops
->tx_enable(pdata
);
689 mac_ops
->rx_enable(pdata
);
691 ret
= xgene_enet_register_irq(ndev
);
694 xgene_enet_napi_enable(pdata
);
696 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
)
697 phy_start(pdata
->phy_dev
);
699 schedule_delayed_work(&pdata
->link_work
, PHY_POLL_LINK_OFF
);
701 netif_carrier_off(ndev
);
702 netif_start_queue(ndev
);
707 static int xgene_enet_close(struct net_device
*ndev
)
709 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
710 struct xgene_mac_ops
*mac_ops
= pdata
->mac_ops
;
712 netif_stop_queue(ndev
);
714 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
)
715 phy_stop(pdata
->phy_dev
);
717 cancel_delayed_work_sync(&pdata
->link_work
);
719 xgene_enet_napi_disable(pdata
);
720 xgene_enet_free_irq(ndev
);
721 xgene_enet_process_ring(pdata
->rx_ring
, -1);
723 mac_ops
->tx_disable(pdata
);
724 mac_ops
->rx_disable(pdata
);
729 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring
*ring
)
731 struct xgene_enet_pdata
*pdata
;
734 pdata
= netdev_priv(ring
->ndev
);
735 dev
= ndev_to_dev(ring
->ndev
);
737 pdata
->ring_ops
->clear(ring
);
738 dma_free_coherent(dev
, ring
->size
, ring
->desc_addr
, ring
->dma
);
741 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata
*pdata
)
743 struct xgene_enet_desc_ring
*buf_pool
;
745 if (pdata
->tx_ring
) {
746 xgene_enet_delete_ring(pdata
->tx_ring
);
747 pdata
->tx_ring
= NULL
;
750 if (pdata
->rx_ring
) {
751 buf_pool
= pdata
->rx_ring
->buf_pool
;
752 xgene_enet_delete_bufpool(buf_pool
);
753 xgene_enet_delete_ring(buf_pool
);
754 xgene_enet_delete_ring(pdata
->rx_ring
);
755 pdata
->rx_ring
= NULL
;
759 static int xgene_enet_get_ring_size(struct device
*dev
,
760 enum xgene_enet_ring_cfgsize cfgsize
)
765 case RING_CFGSIZE_512B
:
768 case RING_CFGSIZE_2KB
:
771 case RING_CFGSIZE_16KB
:
774 case RING_CFGSIZE_64KB
:
777 case RING_CFGSIZE_512KB
:
781 dev_err(dev
, "Unsupported cfg ring size %d\n", cfgsize
);
788 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring
*ring
)
790 struct xgene_enet_pdata
*pdata
;
796 dev
= ndev_to_dev(ring
->ndev
);
797 pdata
= netdev_priv(ring
->ndev
);
799 if (ring
->desc_addr
) {
800 pdata
->ring_ops
->clear(ring
);
801 dma_free_coherent(dev
, ring
->size
, ring
->desc_addr
, ring
->dma
);
803 devm_kfree(dev
, ring
);
806 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata
*pdata
)
808 struct device
*dev
= &pdata
->pdev
->dev
;
809 struct xgene_enet_desc_ring
*ring
;
811 ring
= pdata
->tx_ring
;
813 if (ring
->cp_ring
&& ring
->cp_ring
->cp_skb
)
814 devm_kfree(dev
, ring
->cp_ring
->cp_skb
);
815 if (ring
->cp_ring
&& pdata
->cq_cnt
)
816 xgene_enet_free_desc_ring(ring
->cp_ring
);
817 xgene_enet_free_desc_ring(ring
);
820 ring
= pdata
->rx_ring
;
822 if (ring
->buf_pool
) {
823 if (ring
->buf_pool
->rx_skb
)
824 devm_kfree(dev
, ring
->buf_pool
->rx_skb
);
825 xgene_enet_free_desc_ring(ring
->buf_pool
);
827 xgene_enet_free_desc_ring(ring
);
831 static bool is_irq_mbox_required(struct xgene_enet_pdata
*pdata
,
832 struct xgene_enet_desc_ring
*ring
)
834 if ((pdata
->enet_id
== XGENE_ENET2
) &&
835 (xgene_enet_ring_owner(ring
->id
) == RING_OWNER_CPU
)) {
842 static void __iomem
*xgene_enet_ring_cmd_base(struct xgene_enet_pdata
*pdata
,
843 struct xgene_enet_desc_ring
*ring
)
845 u8 num_ring_id_shift
= pdata
->ring_ops
->num_ring_id_shift
;
847 return pdata
->ring_cmd_addr
+ (ring
->num
<< num_ring_id_shift
);
850 static struct xgene_enet_desc_ring
*xgene_enet_create_desc_ring(
851 struct net_device
*ndev
, u32 ring_num
,
852 enum xgene_enet_ring_cfgsize cfgsize
, u32 ring_id
)
854 struct xgene_enet_desc_ring
*ring
;
855 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
856 struct device
*dev
= ndev_to_dev(ndev
);
859 size
= xgene_enet_get_ring_size(dev
, cfgsize
);
863 ring
= devm_kzalloc(dev
, sizeof(struct xgene_enet_desc_ring
),
869 ring
->num
= ring_num
;
870 ring
->cfgsize
= cfgsize
;
873 ring
->desc_addr
= dma_zalloc_coherent(dev
, size
, &ring
->dma
,
875 if (!ring
->desc_addr
) {
876 devm_kfree(dev
, ring
);
881 if (is_irq_mbox_required(pdata
, ring
)) {
882 ring
->irq_mbox_addr
= dma_zalloc_coherent(dev
, INTR_MBOX_SIZE
,
883 &ring
->irq_mbox_dma
, GFP_KERNEL
);
884 if (!ring
->irq_mbox_addr
) {
885 dma_free_coherent(dev
, size
, ring
->desc_addr
,
887 devm_kfree(dev
, ring
);
892 ring
->cmd_base
= xgene_enet_ring_cmd_base(pdata
, ring
);
893 ring
->cmd
= ring
->cmd_base
+ INC_DEC_CMD_ADDR
;
894 ring
= pdata
->ring_ops
->setup(ring
);
895 netdev_dbg(ndev
, "ring info: num=%d size=%d id=%d slots=%d\n",
896 ring
->num
, ring
->size
, ring
->id
, ring
->slots
);
901 static u16
xgene_enet_get_ring_id(enum xgene_ring_owner owner
, u8 bufnum
)
903 return (owner
<< 6) | (bufnum
& GENMASK(5, 0));
906 static enum xgene_ring_owner
xgene_derive_ring_owner(struct xgene_enet_pdata
*p
)
908 enum xgene_ring_owner owner
;
910 if (p
->enet_id
== XGENE_ENET1
) {
911 switch (p
->phy_mode
) {
912 case PHY_INTERFACE_MODE_SGMII
:
913 owner
= RING_OWNER_ETH0
;
916 owner
= (!p
->port_id
) ? RING_OWNER_ETH0
:
921 owner
= (!p
->port_id
) ? RING_OWNER_ETH0
: RING_OWNER_ETH1
;
927 static int xgene_enet_create_desc_rings(struct net_device
*ndev
)
929 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
930 struct device
*dev
= ndev_to_dev(ndev
);
931 struct xgene_enet_desc_ring
*rx_ring
, *tx_ring
, *cp_ring
;
932 struct xgene_enet_desc_ring
*buf_pool
= NULL
;
933 enum xgene_ring_owner owner
;
934 dma_addr_t dma_exp_bufs
;
935 u8 cpu_bufnum
= pdata
->cpu_bufnum
;
936 u8 eth_bufnum
= pdata
->eth_bufnum
;
937 u8 bp_bufnum
= pdata
->bp_bufnum
;
938 u16 ring_num
= pdata
->ring_num
;
942 /* allocate rx descriptor ring */
943 owner
= xgene_derive_ring_owner(pdata
);
944 ring_id
= xgene_enet_get_ring_id(RING_OWNER_CPU
, cpu_bufnum
++);
945 rx_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
946 RING_CFGSIZE_16KB
, ring_id
);
952 /* allocate buffer pool for receiving packets */
953 owner
= xgene_derive_ring_owner(pdata
);
954 ring_id
= xgene_enet_get_ring_id(owner
, bp_bufnum
++);
955 buf_pool
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
956 RING_CFGSIZE_2KB
, ring_id
);
962 rx_ring
->nbufpool
= NUM_BUFPOOL
;
963 rx_ring
->buf_pool
= buf_pool
;
964 rx_ring
->irq
= pdata
->rx_irq
;
965 if (!pdata
->cq_cnt
) {
966 snprintf(rx_ring
->irq_name
, IRQ_ID_SIZE
, "%s-rx-txc",
969 snprintf(rx_ring
->irq_name
, IRQ_ID_SIZE
, "%s-rx", ndev
->name
);
971 buf_pool
->rx_skb
= devm_kcalloc(dev
, buf_pool
->slots
,
972 sizeof(struct sk_buff
*), GFP_KERNEL
);
973 if (!buf_pool
->rx_skb
) {
978 buf_pool
->dst_ring_num
= xgene_enet_dst_ring_num(buf_pool
);
979 rx_ring
->buf_pool
= buf_pool
;
980 pdata
->rx_ring
= rx_ring
;
982 /* allocate tx descriptor ring */
983 owner
= xgene_derive_ring_owner(pdata
);
984 ring_id
= xgene_enet_get_ring_id(owner
, eth_bufnum
++);
985 tx_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
986 RING_CFGSIZE_16KB
, ring_id
);
992 size
= (tx_ring
->slots
/ 2) * sizeof(__le64
) * MAX_EXP_BUFFS
;
993 tx_ring
->exp_bufs
= dma_zalloc_coherent(dev
, size
, &dma_exp_bufs
,
995 if (!tx_ring
->exp_bufs
) {
1000 pdata
->tx_ring
= tx_ring
;
1002 if (!pdata
->cq_cnt
) {
1003 cp_ring
= pdata
->rx_ring
;
1005 /* allocate tx completion descriptor ring */
1006 ring_id
= xgene_enet_get_ring_id(RING_OWNER_CPU
, cpu_bufnum
++);
1007 cp_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
1014 cp_ring
->irq
= pdata
->txc_irq
;
1015 snprintf(cp_ring
->irq_name
, IRQ_ID_SIZE
, "%s-txc", ndev
->name
);
1018 cp_ring
->cp_skb
= devm_kcalloc(dev
, tx_ring
->slots
,
1019 sizeof(struct sk_buff
*), GFP_KERNEL
);
1020 if (!cp_ring
->cp_skb
) {
1025 size
= sizeof(dma_addr_t
) * MAX_SKB_FRAGS
;
1026 cp_ring
->frag_dma_addr
= devm_kcalloc(dev
, tx_ring
->slots
,
1028 if (!cp_ring
->frag_dma_addr
) {
1029 devm_kfree(dev
, cp_ring
->cp_skb
);
1034 pdata
->tx_ring
->cp_ring
= cp_ring
;
1035 pdata
->tx_ring
->dst_ring_num
= xgene_enet_dst_ring_num(cp_ring
);
1037 pdata
->tx_qcnt_hi
= pdata
->tx_ring
->slots
/ 2;
1038 pdata
->cp_qcnt_hi
= pdata
->rx_ring
->slots
/ 2;
1039 pdata
->cp_qcnt_low
= pdata
->cp_qcnt_hi
/ 2;
1044 xgene_enet_free_desc_rings(pdata
);
1048 static struct rtnl_link_stats64
*xgene_enet_get_stats64(
1049 struct net_device
*ndev
,
1050 struct rtnl_link_stats64
*storage
)
1052 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1053 struct rtnl_link_stats64
*stats
= &pdata
->stats
;
1055 stats
->rx_errors
+= stats
->rx_length_errors
+
1056 stats
->rx_crc_errors
+
1057 stats
->rx_frame_errors
+
1058 stats
->rx_fifo_errors
;
1059 memcpy(storage
, &pdata
->stats
, sizeof(struct rtnl_link_stats64
));
1064 static int xgene_enet_set_mac_address(struct net_device
*ndev
, void *addr
)
1066 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1069 ret
= eth_mac_addr(ndev
, addr
);
1072 pdata
->mac_ops
->set_mac_addr(pdata
);
1077 static const struct net_device_ops xgene_ndev_ops
= {
1078 .ndo_open
= xgene_enet_open
,
1079 .ndo_stop
= xgene_enet_close
,
1080 .ndo_start_xmit
= xgene_enet_start_xmit
,
1081 .ndo_tx_timeout
= xgene_enet_timeout
,
1082 .ndo_get_stats64
= xgene_enet_get_stats64
,
1083 .ndo_change_mtu
= eth_change_mtu
,
1084 .ndo_set_mac_address
= xgene_enet_set_mac_address
,
1088 static int xgene_get_port_id_acpi(struct device
*dev
,
1089 struct xgene_enet_pdata
*pdata
)
1094 status
= acpi_evaluate_integer(ACPI_HANDLE(dev
), "_SUN", NULL
, &temp
);
1095 if (ACPI_FAILURE(status
)) {
1098 pdata
->port_id
= temp
;
1105 static int xgene_get_port_id_dt(struct device
*dev
, struct xgene_enet_pdata
*pdata
)
1110 ret
= of_property_read_u32(dev
->of_node
, "port-id", &id
);
1115 pdata
->port_id
= id
& BIT(0);
1121 static int xgene_get_tx_delay(struct xgene_enet_pdata
*pdata
)
1123 struct device
*dev
= &pdata
->pdev
->dev
;
1126 ret
= of_property_read_u32(dev
->of_node
, "tx-delay", &delay
);
1128 pdata
->tx_delay
= 4;
1132 if (delay
< 0 || delay
> 7) {
1133 dev_err(dev
, "Invalid tx-delay specified\n");
1137 pdata
->tx_delay
= delay
;
1142 static int xgene_get_rx_delay(struct xgene_enet_pdata
*pdata
)
1144 struct device
*dev
= &pdata
->pdev
->dev
;
1147 ret
= of_property_read_u32(dev
->of_node
, "rx-delay", &delay
);
1149 pdata
->rx_delay
= 2;
1153 if (delay
< 0 || delay
> 7) {
1154 dev_err(dev
, "Invalid rx-delay specified\n");
1158 pdata
->rx_delay
= delay
;
1163 static int xgene_enet_get_resources(struct xgene_enet_pdata
*pdata
)
1165 struct platform_device
*pdev
;
1166 struct net_device
*ndev
;
1168 struct resource
*res
;
1169 void __iomem
*base_addr
;
1177 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, RES_ENET_CSR
);
1179 dev_err(dev
, "Resource enet_csr not defined\n");
1182 pdata
->base_addr
= devm_ioremap(dev
, res
->start
, resource_size(res
));
1183 if (!pdata
->base_addr
) {
1184 dev_err(dev
, "Unable to retrieve ENET Port CSR region\n");
1188 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, RES_RING_CSR
);
1190 dev_err(dev
, "Resource ring_csr not defined\n");
1193 pdata
->ring_csr_addr
= devm_ioremap(dev
, res
->start
,
1194 resource_size(res
));
1195 if (!pdata
->ring_csr_addr
) {
1196 dev_err(dev
, "Unable to retrieve ENET Ring CSR region\n");
1200 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, RES_RING_CMD
);
1202 dev_err(dev
, "Resource ring_cmd not defined\n");
1205 pdata
->ring_cmd_addr
= devm_ioremap(dev
, res
->start
,
1206 resource_size(res
));
1207 if (!pdata
->ring_cmd_addr
) {
1208 dev_err(dev
, "Unable to retrieve ENET Ring command region\n");
1213 ret
= xgene_get_port_id_dt(dev
, pdata
);
1216 ret
= xgene_get_port_id_acpi(dev
, pdata
);
1221 if (!device_get_mac_address(dev
, ndev
->dev_addr
, ETH_ALEN
))
1222 eth_hw_addr_random(ndev
);
1224 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
1226 pdata
->phy_mode
= device_get_phy_mode(dev
);
1227 if (pdata
->phy_mode
< 0) {
1228 dev_err(dev
, "Unable to get phy-connection-type\n");
1229 return pdata
->phy_mode
;
1231 if (pdata
->phy_mode
!= PHY_INTERFACE_MODE_RGMII
&&
1232 pdata
->phy_mode
!= PHY_INTERFACE_MODE_SGMII
&&
1233 pdata
->phy_mode
!= PHY_INTERFACE_MODE_XGMII
) {
1234 dev_err(dev
, "Incorrect phy-connection-type specified\n");
1238 ret
= xgene_get_tx_delay(pdata
);
1242 ret
= xgene_get_rx_delay(pdata
);
1246 ret
= platform_get_irq(pdev
, 0);
1248 dev_err(dev
, "Unable to get ENET Rx IRQ\n");
1249 ret
= ret
? : -ENXIO
;
1252 pdata
->rx_irq
= ret
;
1254 if (pdata
->phy_mode
!= PHY_INTERFACE_MODE_RGMII
) {
1255 ret
= platform_get_irq(pdev
, 1);
1258 dev_info(dev
, "Unable to get Tx completion IRQ,"
1259 "using Rx IRQ instead\n");
1261 pdata
->cq_cnt
= XGENE_MAX_TXC_RINGS
;
1262 pdata
->txc_irq
= ret
;
1266 pdata
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1267 if (IS_ERR(pdata
->clk
)) {
1268 /* Firmware may have set up the clock already. */
1269 dev_info(dev
, "clocks have been setup already\n");
1272 if (pdata
->phy_mode
!= PHY_INTERFACE_MODE_XGMII
)
1273 base_addr
= pdata
->base_addr
- (pdata
->port_id
* MAC_OFFSET
);
1275 base_addr
= pdata
->base_addr
;
1276 pdata
->eth_csr_addr
= base_addr
+ BLOCK_ETH_CSR_OFFSET
;
1277 pdata
->eth_ring_if_addr
= base_addr
+ BLOCK_ETH_RING_IF_OFFSET
;
1278 pdata
->eth_diag_csr_addr
= base_addr
+ BLOCK_ETH_DIAG_CSR_OFFSET
;
1279 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
||
1280 pdata
->phy_mode
== PHY_INTERFACE_MODE_SGMII
) {
1281 pdata
->mcx_mac_addr
= pdata
->base_addr
+ BLOCK_ETH_MAC_OFFSET
;
1282 offset
= (pdata
->enet_id
== XGENE_ENET1
) ?
1283 BLOCK_ETH_MAC_CSR_OFFSET
:
1284 X2_BLOCK_ETH_MAC_CSR_OFFSET
;
1285 pdata
->mcx_mac_csr_addr
= base_addr
+ offset
;
1287 pdata
->mcx_mac_addr
= base_addr
+ BLOCK_AXG_MAC_OFFSET
;
1288 pdata
->mcx_mac_csr_addr
= base_addr
+ BLOCK_AXG_MAC_CSR_OFFSET
;
1290 pdata
->rx_buff_cnt
= NUM_PKT_BUF
;
1295 static int xgene_enet_init_hw(struct xgene_enet_pdata
*pdata
)
1297 struct net_device
*ndev
= pdata
->ndev
;
1298 struct xgene_enet_desc_ring
*buf_pool
;
1302 ret
= pdata
->port_ops
->reset(pdata
);
1306 ret
= xgene_enet_create_desc_rings(ndev
);
1308 netdev_err(ndev
, "Error in ring configuration\n");
1312 /* setup buffer pool */
1313 buf_pool
= pdata
->rx_ring
->buf_pool
;
1314 xgene_enet_init_bufpool(buf_pool
);
1315 ret
= xgene_enet_refill_bufpool(buf_pool
, pdata
->rx_buff_cnt
);
1317 xgene_enet_delete_desc_rings(pdata
);
1321 dst_ring_num
= xgene_enet_dst_ring_num(pdata
->rx_ring
);
1322 pdata
->port_ops
->cle_bypass(pdata
, dst_ring_num
, buf_pool
->id
);
1323 pdata
->mac_ops
->init(pdata
);
1328 static void xgene_enet_setup_ops(struct xgene_enet_pdata
*pdata
)
1330 switch (pdata
->phy_mode
) {
1331 case PHY_INTERFACE_MODE_RGMII
:
1332 pdata
->mac_ops
= &xgene_gmac_ops
;
1333 pdata
->port_ops
= &xgene_gport_ops
;
1336 case PHY_INTERFACE_MODE_SGMII
:
1337 pdata
->mac_ops
= &xgene_sgmac_ops
;
1338 pdata
->port_ops
= &xgene_sgport_ops
;
1342 pdata
->mac_ops
= &xgene_xgmac_ops
;
1343 pdata
->port_ops
= &xgene_xgport_ops
;
1348 if (pdata
->enet_id
== XGENE_ENET1
) {
1349 switch (pdata
->port_id
) {
1351 pdata
->cpu_bufnum
= START_CPU_BUFNUM_0
;
1352 pdata
->eth_bufnum
= START_ETH_BUFNUM_0
;
1353 pdata
->bp_bufnum
= START_BP_BUFNUM_0
;
1354 pdata
->ring_num
= START_RING_NUM_0
;
1357 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1358 pdata
->cpu_bufnum
= XG_START_CPU_BUFNUM_1
;
1359 pdata
->eth_bufnum
= XG_START_ETH_BUFNUM_1
;
1360 pdata
->bp_bufnum
= XG_START_BP_BUFNUM_1
;
1361 pdata
->ring_num
= XG_START_RING_NUM_1
;
1363 pdata
->cpu_bufnum
= START_CPU_BUFNUM_1
;
1364 pdata
->eth_bufnum
= START_ETH_BUFNUM_1
;
1365 pdata
->bp_bufnum
= START_BP_BUFNUM_1
;
1366 pdata
->ring_num
= START_RING_NUM_1
;
1372 pdata
->ring_ops
= &xgene_ring1_ops
;
1374 switch (pdata
->port_id
) {
1376 pdata
->cpu_bufnum
= X2_START_CPU_BUFNUM_0
;
1377 pdata
->eth_bufnum
= X2_START_ETH_BUFNUM_0
;
1378 pdata
->bp_bufnum
= X2_START_BP_BUFNUM_0
;
1379 pdata
->ring_num
= X2_START_RING_NUM_0
;
1382 pdata
->cpu_bufnum
= X2_START_CPU_BUFNUM_1
;
1383 pdata
->eth_bufnum
= X2_START_ETH_BUFNUM_1
;
1384 pdata
->bp_bufnum
= X2_START_BP_BUFNUM_1
;
1385 pdata
->ring_num
= X2_START_RING_NUM_1
;
1391 pdata
->ring_ops
= &xgene_ring2_ops
;
1395 static void xgene_enet_napi_add(struct xgene_enet_pdata
*pdata
)
1397 struct napi_struct
*napi
;
1399 napi
= &pdata
->rx_ring
->napi
;
1400 netif_napi_add(pdata
->ndev
, napi
, xgene_enet_napi
, NAPI_POLL_WEIGHT
);
1402 if (pdata
->cq_cnt
) {
1403 napi
= &pdata
->tx_ring
->cp_ring
->napi
;
1404 netif_napi_add(pdata
->ndev
, napi
, xgene_enet_napi
,
1409 static void xgene_enet_napi_del(struct xgene_enet_pdata
*pdata
)
1411 struct napi_struct
*napi
;
1413 napi
= &pdata
->rx_ring
->napi
;
1414 netif_napi_del(napi
);
1416 if (pdata
->cq_cnt
) {
1417 napi
= &pdata
->tx_ring
->cp_ring
->napi
;
1418 netif_napi_del(napi
);
1422 static int xgene_enet_probe(struct platform_device
*pdev
)
1424 struct net_device
*ndev
;
1425 struct xgene_enet_pdata
*pdata
;
1426 struct device
*dev
= &pdev
->dev
;
1427 struct xgene_mac_ops
*mac_ops
;
1428 const struct of_device_id
*of_id
;
1431 ndev
= alloc_etherdev(sizeof(struct xgene_enet_pdata
));
1435 pdata
= netdev_priv(ndev
);
1439 SET_NETDEV_DEV(ndev
, dev
);
1440 platform_set_drvdata(pdev
, pdata
);
1441 ndev
->netdev_ops
= &xgene_ndev_ops
;
1442 xgene_enet_set_ethtool_ops(ndev
);
1443 ndev
->features
|= NETIF_F_IP_CSUM
|
1448 of_id
= of_match_device(xgene_enet_of_match
, &pdev
->dev
);
1450 pdata
->enet_id
= (enum xgene_enet_id
)of_id
->data
;
1454 const struct acpi_device_id
*acpi_id
;
1456 acpi_id
= acpi_match_device(xgene_enet_acpi_match
, &pdev
->dev
);
1458 pdata
->enet_id
= (enum xgene_enet_id
) acpi_id
->driver_data
;
1461 if (!pdata
->enet_id
) {
1466 ret
= xgene_enet_get_resources(pdata
);
1470 xgene_enet_setup_ops(pdata
);
1472 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1473 ndev
->features
|= NETIF_F_TSO
;
1474 pdata
->mss
= XGENE_ENET_MSS
;
1476 ndev
->hw_features
= ndev
->features
;
1478 ret
= register_netdev(ndev
);
1480 netdev_err(ndev
, "Failed to register netdev\n");
1484 ret
= dma_coerce_mask_and_coherent(dev
, DMA_BIT_MASK(64));
1486 netdev_err(ndev
, "No usable DMA configuration\n");
1490 ret
= xgene_enet_init_hw(pdata
);
1494 xgene_enet_napi_add(pdata
);
1495 mac_ops
= pdata
->mac_ops
;
1496 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
)
1497 ret
= xgene_enet_mdio_config(pdata
);
1499 INIT_DELAYED_WORK(&pdata
->link_work
, mac_ops
->link_state
);
1503 unregister_netdev(ndev
);
1508 static int xgene_enet_remove(struct platform_device
*pdev
)
1510 struct xgene_enet_pdata
*pdata
;
1511 struct xgene_mac_ops
*mac_ops
;
1512 struct net_device
*ndev
;
1514 pdata
= platform_get_drvdata(pdev
);
1515 mac_ops
= pdata
->mac_ops
;
1518 mac_ops
->rx_disable(pdata
);
1519 mac_ops
->tx_disable(pdata
);
1521 xgene_enet_napi_del(pdata
);
1522 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_RGMII
)
1523 xgene_enet_mdio_remove(pdata
);
1524 unregister_netdev(ndev
);
1525 xgene_enet_delete_desc_rings(pdata
);
1526 pdata
->port_ops
->shutdown(pdata
);
1533 static const struct acpi_device_id xgene_enet_acpi_match
[] = {
1534 { "APMC0D05", XGENE_ENET1
},
1535 { "APMC0D30", XGENE_ENET1
},
1536 { "APMC0D31", XGENE_ENET1
},
1537 { "APMC0D3F", XGENE_ENET1
},
1538 { "APMC0D26", XGENE_ENET2
},
1539 { "APMC0D25", XGENE_ENET2
},
1542 MODULE_DEVICE_TABLE(acpi
, xgene_enet_acpi_match
);
1546 static const struct of_device_id xgene_enet_of_match
[] = {
1547 {.compatible
= "apm,xgene-enet", .data
= (void *)XGENE_ENET1
},
1548 {.compatible
= "apm,xgene1-sgenet", .data
= (void *)XGENE_ENET1
},
1549 {.compatible
= "apm,xgene1-xgenet", .data
= (void *)XGENE_ENET1
},
1550 {.compatible
= "apm,xgene2-sgenet", .data
= (void *)XGENE_ENET2
},
1551 {.compatible
= "apm,xgene2-xgenet", .data
= (void *)XGENE_ENET2
},
1555 MODULE_DEVICE_TABLE(of
, xgene_enet_of_match
);
1558 static struct platform_driver xgene_enet_driver
= {
1560 .name
= "xgene-enet",
1561 .of_match_table
= of_match_ptr(xgene_enet_of_match
),
1562 .acpi_match_table
= ACPI_PTR(xgene_enet_acpi_match
),
1564 .probe
= xgene_enet_probe
,
1565 .remove
= xgene_enet_remove
,
1568 module_platform_driver(xgene_enet_driver
);
1570 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
1571 MODULE_VERSION(XGENE_DRV_VERSION
);
1572 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
1573 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
1574 MODULE_LICENSE("GPL");