]>
Commit | Line | Data |
---|---|---|
93a76530 GS |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver | |
3 | * | |
4 | * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ | |
5 | * | |
6 | */ | |
7 | ||
8 | #include <linux/etherdevice.h> | |
9 | #include <linux/if_vlan.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/kmemleak.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/netdevice.h> | |
15 | #include <linux/net_tstamp.h> | |
16 | #include <linux/of.h> | |
17 | #include <linux/of_mdio.h> | |
18 | #include <linux/of_net.h> | |
19 | #include <linux/of_device.h> | |
20 | #include <linux/phy.h> | |
21 | #include <linux/phy/phy.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/pm_runtime.h> | |
24 | #include <linux/regmap.h> | |
25 | #include <linux/mfd/syscon.h> | |
26 | #include <linux/dma/ti-cppi5.h> | |
27 | #include <linux/dma/k3-udma-glue.h> | |
28 | ||
29 | #include "cpsw_ale.h" | |
30 | #include "cpsw_sl.h" | |
31 | #include "am65-cpsw-nuss.h" | |
32 | #include "k3-cppi-desc-pool.h" | |
33 | ||
34 | #define AM65_CPSW_SS_BASE 0x0 | |
35 | #define AM65_CPSW_SGMII_BASE 0x100 | |
36 | #define AM65_CPSW_XGMII_BASE 0x2100 | |
37 | #define AM65_CPSW_CPSW_NU_BASE 0x20000 | |
38 | #define AM65_CPSW_NU_PORTS_BASE 0x1000 | |
39 | #define AM65_CPSW_NU_STATS_BASE 0x1a000 | |
40 | #define AM65_CPSW_NU_ALE_BASE 0x1e000 | |
41 | #define AM65_CPSW_NU_CPTS_BASE 0x1d000 | |
42 | ||
43 | #define AM65_CPSW_NU_PORTS_OFFSET 0x1000 | |
44 | #define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200 | |
45 | ||
46 | #define AM65_CPSW_MAX_PORTS 8 | |
47 | ||
48 | #define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN | |
49 | #define AM65_CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) | |
50 | ||
51 | #define AM65_CPSW_REG_CTL 0x004 | |
52 | #define AM65_CPSW_REG_STAT_PORT_EN 0x014 | |
53 | #define AM65_CPSW_REG_PTYPE 0x018 | |
54 | ||
55 | #define AM65_CPSW_P0_REG_CTL 0x004 | |
56 | #define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET 0x008 | |
57 | ||
58 | #define AM65_CPSW_PORT_REG_PRI_CTL 0x01c | |
59 | #define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020 | |
60 | #define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024 | |
61 | ||
62 | #define AM65_CPSW_PORTN_REG_SA_L 0x308 | |
63 | #define AM65_CPSW_PORTN_REG_SA_H 0x30c | |
64 | #define AM65_CPSW_PORTN_REG_TS_CTL 0x310 | |
65 | #define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG 0x314 | |
66 | #define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318 | |
67 | #define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C | |
68 | ||
69 | #define AM65_CPSW_CTL_VLAN_AWARE BIT(1) | |
70 | #define AM65_CPSW_CTL_P0_ENABLE BIT(2) | |
71 | #define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13) | |
72 | #define AM65_CPSW_CTL_P0_RX_PAD BIT(14) | |
73 | ||
74 | /* AM65_CPSW_P0_REG_CTL */ | |
75 | #define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN BIT(0) | |
76 | ||
77 | /* AM65_CPSW_PORT_REG_PRI_CTL */ | |
78 | #define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8) | |
79 | ||
80 | /* AM65_CPSW_PN_TS_CTL register fields */ | |
81 | #define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4) | |
82 | #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5) | |
83 | #define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN BIT(6) | |
84 | #define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN BIT(7) | |
85 | #define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN BIT(10) | |
86 | #define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11) | |
87 | #define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16 | |
88 | ||
89 | /* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */ | |
90 | #define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16 | |
91 | ||
92 | /* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */ | |
93 | #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 BIT(16) | |
94 | #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 BIT(17) | |
95 | #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 BIT(18) | |
96 | #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 BIT(19) | |
97 | #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 BIT(20) | |
98 | #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 BIT(21) | |
99 | #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 BIT(22) | |
100 | #define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23) | |
101 | ||
102 | /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */ | |
103 | #define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3)) | |
104 | ||
105 | #define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e) | |
106 | ||
107 | #define AM65_CPSW_TS_TX_ANX_ALL_EN \ | |
108 | (AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN | \ | |
109 | AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \ | |
110 | AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN) | |
111 | ||
112 | #define AM65_CPSW_ALE_AGEOUT_DEFAULT 30 | |
113 | /* Number of TX/RX descriptors */ | |
114 | #define AM65_CPSW_MAX_TX_DESC 500 | |
115 | #define AM65_CPSW_MAX_RX_DESC 500 | |
116 | ||
117 | #define AM65_CPSW_NAV_PS_DATA_SIZE 16 | |
118 | #define AM65_CPSW_NAV_SW_DATA_SIZE 16 | |
119 | ||
120 | #define AM65_CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \ | |
121 | NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \ | |
122 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) | |
123 | ||
124 | static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave, | |
125 | const u8 *dev_addr) | |
126 | { | |
127 | u32 mac_hi = (dev_addr[0] << 0) | (dev_addr[1] << 8) | | |
128 | (dev_addr[2] << 16) | (dev_addr[3] << 24); | |
129 | u32 mac_lo = (dev_addr[4] << 0) | (dev_addr[5] << 8); | |
130 | ||
131 | writel(mac_hi, slave->port_base + AM65_CPSW_PORTN_REG_SA_H); | |
132 | writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L); | |
133 | } | |
134 | ||
135 | static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port) | |
136 | { | |
137 | cpsw_sl_reset(port->slave.mac_sl, 100); | |
138 | /* Max length register has to be restored after MAC SL reset */ | |
139 | writel(AM65_CPSW_MAX_PACKET_SIZE, | |
140 | port->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN); | |
141 | } | |
142 | ||
143 | static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common) | |
144 | { | |
145 | common->nuss_ver = readl(common->ss_base); | |
146 | common->cpsw_ver = readl(common->cpsw_base); | |
147 | dev_info(common->dev, | |
148 | "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u\n", | |
149 | common->nuss_ver, | |
150 | common->cpsw_ver, | |
151 | common->port_num + 1); | |
152 | } | |
153 | ||
154 | void am65_cpsw_nuss_adjust_link(struct net_device *ndev) | |
155 | { | |
156 | struct am65_cpsw_common *common = am65_ndev_to_common(ndev); | |
157 | struct am65_cpsw_port *port = am65_ndev_to_port(ndev); | |
158 | struct phy_device *phy = port->slave.phy; | |
159 | u32 mac_control = 0; | |
160 | ||
161 | if (!phy) | |
162 | return; | |
163 | ||
164 | if (phy->link) { | |
165 | mac_control = CPSW_SL_CTL_GMII_EN; | |
166 | ||
167 | if (phy->speed == 1000) | |
168 | mac_control |= CPSW_SL_CTL_GIG; | |
169 | if (phy->speed == 10 && phy_interface_is_rgmii(phy)) | |
170 | /* Can be used with in band mode only */ | |
171 | mac_control |= CPSW_SL_CTL_EXT_EN; | |
172 | if (phy->duplex) | |
173 | mac_control |= CPSW_SL_CTL_FULLDUPLEX; | |
174 | ||
175 | /* RGMII speed is 100M if !CPSW_SL_CTL_GIG*/ | |
176 | ||
177 | /* rx_pause/tx_pause */ | |
178 | if (port->slave.rx_pause) | |
179 | mac_control |= CPSW_SL_CTL_RX_FLOW_EN; | |
180 | ||
181 | if (port->slave.tx_pause) | |
182 | mac_control |= CPSW_SL_CTL_TX_FLOW_EN; | |
183 | ||
184 | cpsw_sl_ctl_set(port->slave.mac_sl, mac_control); | |
185 | ||
186 | /* enable forwarding */ | |
187 | cpsw_ale_control_set(common->ale, port->port_id, | |
188 | ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); | |
189 | ||
190 | netif_tx_wake_all_queues(ndev); | |
191 | } else { | |
192 | int tmo; | |
193 | /* disable forwarding */ | |
194 | cpsw_ale_control_set(common->ale, port->port_id, | |
195 | ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); | |
196 | ||
197 | cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE); | |
198 | ||
199 | tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100); | |
200 | dev_dbg(common->dev, "donw msc_sl %08x tmo %d\n", | |
201 | cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), | |
202 | tmo); | |
203 | ||
204 | cpsw_sl_ctl_reset(port->slave.mac_sl); | |
205 | ||
206 | netif_tx_stop_all_queues(ndev); | |
207 | } | |
208 | ||
209 | phy_print_status(phy); | |
210 | } | |
211 | ||
212 | static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev, | |
213 | __be16 proto, u16 vid) | |
214 | { | |
215 | struct am65_cpsw_common *common = am65_ndev_to_common(ndev); | |
216 | struct am65_cpsw_port *port = am65_ndev_to_port(ndev); | |
217 | u32 port_mask, unreg_mcast = 0; | |
218 | int ret; | |
219 | ||
220 | ret = pm_runtime_get_sync(common->dev); | |
221 | if (ret < 0) { | |
222 | pm_runtime_put_noidle(common->dev); | |
223 | return ret; | |
224 | } | |
225 | ||
226 | port_mask = BIT(port->port_id) | ALE_PORT_HOST; | |
227 | if (!vid) | |
228 | unreg_mcast = port_mask; | |
229 | dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid); | |
230 | ret = cpsw_ale_add_vlan(common->ale, vid, port_mask, | |
231 | unreg_mcast, port_mask, 0); | |
232 | ||
233 | pm_runtime_put(common->dev); | |
234 | return ret; | |
235 | } | |
236 | ||
237 | static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev, | |
238 | __be16 proto, u16 vid) | |
239 | { | |
240 | struct am65_cpsw_common *common = am65_ndev_to_common(ndev); | |
241 | int ret; | |
242 | ||
243 | ret = pm_runtime_get_sync(common->dev); | |
244 | if (ret < 0) { | |
245 | pm_runtime_put_noidle(common->dev); | |
246 | return ret; | |
247 | } | |
248 | ||
249 | dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid); | |
250 | ret = cpsw_ale_del_vlan(common->ale, vid, 0); | |
251 | ||
252 | pm_runtime_put(common->dev); | |
253 | return ret; | |
254 | } | |
255 | ||
256 | static void am65_cpsw_slave_set_promisc_2g(struct am65_cpsw_port *port, | |
257 | bool promisc) | |
258 | { | |
259 | struct am65_cpsw_common *common = port->common; | |
260 | ||
261 | if (promisc) { | |
262 | /* Enable promiscuous mode */ | |
263 | cpsw_ale_control_set(common->ale, port->port_id, | |
264 | ALE_PORT_MACONLY_CAF, 1); | |
265 | dev_dbg(common->dev, "promisc enabled\n"); | |
266 | } else { | |
267 | /* Disable promiscuous mode */ | |
268 | cpsw_ale_control_set(common->ale, port->port_id, | |
269 | ALE_PORT_MACONLY_CAF, 0); | |
270 | dev_dbg(common->dev, "promisc disabled\n"); | |
271 | } | |
272 | } | |
273 | ||
274 | static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev) | |
275 | { | |
276 | struct am65_cpsw_common *common = am65_ndev_to_common(ndev); | |
277 | struct am65_cpsw_port *port = am65_ndev_to_port(ndev); | |
278 | u32 port_mask; | |
279 | bool promisc; | |
280 | ||
281 | promisc = !!(ndev->flags & IFF_PROMISC); | |
282 | am65_cpsw_slave_set_promisc_2g(port, promisc); | |
283 | ||
284 | if (promisc) | |
285 | return; | |
286 | ||
287 | /* Restore allmulti on vlans if necessary */ | |
288 | cpsw_ale_set_allmulti(common->ale, | |
289 | ndev->flags & IFF_ALLMULTI, port->port_id); | |
290 | ||
291 | port_mask = ALE_PORT_HOST; | |
292 | /* Clear all mcast from ALE */ | |
293 | cpsw_ale_flush_multicast(common->ale, port_mask, -1); | |
294 | ||
295 | if (!netdev_mc_empty(ndev)) { | |
296 | struct netdev_hw_addr *ha; | |
297 | ||
298 | /* program multicast address list into ALE register */ | |
299 | netdev_for_each_mc_addr(ha, ndev) { | |
300 | cpsw_ale_add_mcast(common->ale, ha->addr, | |
301 | port_mask, 0, 0, 0); | |
302 | } | |
303 | } | |
304 | } | |
305 | ||
306 | static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev, | |
307 | unsigned int txqueue) | |
308 | { | |
309 | struct am65_cpsw_common *common = am65_ndev_to_common(ndev); | |
310 | struct am65_cpsw_tx_chn *tx_chn; | |
311 | struct netdev_queue *netif_txq; | |
312 | unsigned long trans_start; | |
313 | ||
314 | netif_txq = netdev_get_tx_queue(ndev, txqueue); | |
315 | tx_chn = &common->tx_chns[txqueue]; | |
316 | trans_start = netif_txq->trans_start; | |
317 | ||
318 | netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n", | |
319 | txqueue, | |
320 | netif_tx_queue_stopped(netif_txq), | |
321 | jiffies_to_msecs(jiffies - trans_start), | |
322 | dql_avail(&netif_txq->dql), | |
323 | k3_cppi_desc_pool_avail(tx_chn->desc_pool)); | |
324 | ||
325 | if (netif_tx_queue_stopped(netif_txq)) { | |
326 | /* try recover if stopped by us */ | |
327 | txq_trans_update(netif_txq); | |
328 | netif_tx_wake_queue(netif_txq); | |
329 | } | |
330 | } | |
331 | ||
332 | static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common, | |
333 | struct sk_buff *skb) | |
334 | { | |
335 | struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; | |
336 | struct cppi5_host_desc_t *desc_rx; | |
337 | struct device *dev = common->dev; | |
338 | u32 pkt_len = skb_tailroom(skb); | |
339 | dma_addr_t desc_dma; | |
340 | dma_addr_t buf_dma; | |
341 | void *swdata; | |
342 | ||
343 | desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool); | |
344 | if (!desc_rx) { | |
345 | dev_err(dev, "Failed to allocate RXFDQ descriptor\n"); | |
346 | return -ENOMEM; | |
347 | } | |
348 | desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx); | |
349 | ||
350 | buf_dma = dma_map_single(dev, skb->data, pkt_len, DMA_FROM_DEVICE); | |
351 | if (unlikely(dma_mapping_error(dev, buf_dma))) { | |
352 | k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); | |
353 | dev_err(dev, "Failed to map rx skb buffer\n"); | |
354 | return -EINVAL; | |
355 | } | |
356 | ||
357 | cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT, | |
358 | AM65_CPSW_NAV_PS_DATA_SIZE); | |
359 | cppi5_hdesc_attach_buf(desc_rx, 0, 0, buf_dma, skb_tailroom(skb)); | |
360 | swdata = cppi5_hdesc_get_swdata(desc_rx); | |
361 | *((void **)swdata) = skb; | |
362 | ||
363 | return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma); | |
364 | } | |
365 | ||
366 | void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common) | |
367 | { | |
368 | struct am65_cpsw_host *host_p = am65_common_get_host(common); | |
369 | u32 val, pri_map; | |
370 | ||
371 | /* P0 set Receive Priority Type */ | |
372 | val = readl(host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL); | |
373 | ||
374 | if (common->pf_p0_rx_ptype_rrobin) { | |
375 | val |= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN; | |
376 | /* Enet Ports fifos works in fixed priority mode only, so | |
377 | * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0 | |
378 | */ | |
379 | pri_map = 0x0; | |
380 | } else { | |
381 | val &= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN; | |
382 | /* restore P0_Rx_Pri_Map */ | |
383 | pri_map = 0x76543210; | |
384 | } | |
385 | ||
386 | writel(pri_map, host_p->port_base + AM65_CPSW_PORT_REG_RX_PRI_MAP); | |
387 | writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL); | |
388 | } | |
389 | ||
390 | static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common, | |
391 | netdev_features_t features) | |
392 | { | |
393 | struct am65_cpsw_host *host_p = am65_common_get_host(common); | |
394 | int port_idx, i, ret; | |
395 | struct sk_buff *skb; | |
396 | u32 val, port_mask; | |
397 | ||
398 | if (common->usage_count) | |
399 | return 0; | |
400 | ||
401 | /* Control register */ | |
402 | writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE | | |
403 | AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD, | |
404 | common->cpsw_base + AM65_CPSW_REG_CTL); | |
405 | /* Max length register */ | |
406 | writel(AM65_CPSW_MAX_PACKET_SIZE, | |
407 | host_p->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN); | |
408 | /* set base flow_id */ | |
409 | writel(common->rx_flow_id_base, | |
410 | host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET); | |
411 | /* en tx crc offload */ | |
412 | if (features & NETIF_F_HW_CSUM) | |
413 | writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN, | |
414 | host_p->port_base + AM65_CPSW_P0_REG_CTL); | |
415 | ||
416 | am65_cpsw_nuss_set_p0_ptype(common); | |
417 | ||
418 | /* enable statistic */ | |
419 | val = BIT(HOST_PORT_NUM); | |
420 | for (port_idx = 0; port_idx < common->port_num; port_idx++) { | |
421 | struct am65_cpsw_port *port = &common->ports[port_idx]; | |
422 | ||
423 | if (!port->disabled) | |
424 | val |= BIT(port->port_id); | |
425 | } | |
426 | writel(val, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN); | |
427 | ||
428 | /* disable priority elevation */ | |
429 | writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE); | |
430 | ||
431 | cpsw_ale_start(common->ale); | |
432 | ||
433 | /* limit to one RX flow only */ | |
434 | cpsw_ale_control_set(common->ale, HOST_PORT_NUM, | |
435 | ALE_DEFAULT_THREAD_ID, 0); | |
436 | cpsw_ale_control_set(common->ale, HOST_PORT_NUM, | |
437 | ALE_DEFAULT_THREAD_ENABLE, 1); | |
438 | if (AM65_CPSW_IS_CPSW2G(common)) | |
439 | cpsw_ale_control_set(common->ale, HOST_PORT_NUM, | |
440 | ALE_PORT_NOLEARN, 1); | |
441 | /* switch to vlan unaware mode */ | |
442 | cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1); | |
443 | cpsw_ale_control_set(common->ale, HOST_PORT_NUM, | |
444 | ALE_PORT_STATE, ALE_PORT_STATE_FORWARD); | |
445 | ||
446 | /* default vlan cfg: create mask based on enabled ports */ | |
447 | port_mask = GENMASK(common->port_num, 0) & | |
448 | ~common->disabled_ports_mask; | |
449 | ||
450 | cpsw_ale_add_vlan(common->ale, 0, port_mask, | |
451 | port_mask, port_mask, | |
452 | port_mask & ~ALE_PORT_HOST); | |
453 | ||
454 | for (i = 0; i < common->rx_chns.descs_num; i++) { | |
455 | skb = __netdev_alloc_skb_ip_align(NULL, | |
456 | AM65_CPSW_MAX_PACKET_SIZE, | |
457 | GFP_KERNEL); | |
458 | if (!skb) { | |
459 | dev_err(common->dev, "cannot allocate skb\n"); | |
460 | return -ENOMEM; | |
461 | } | |
462 | ||
463 | ret = am65_cpsw_nuss_rx_push(common, skb); | |
464 | if (ret < 0) { | |
465 | dev_err(common->dev, | |
466 | "cannot submit skb to channel rx, error %d\n", | |
467 | ret); | |
468 | kfree_skb(skb); | |
469 | return ret; | |
470 | } | |
471 | kmemleak_not_leak(skb); | |
472 | } | |
473 | k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn); | |
474 | ||
475 | for (i = 0; i < common->tx_ch_num; i++) { | |
476 | ret = k3_udma_glue_enable_tx_chn(common->tx_chns[i].tx_chn); | |
477 | if (ret) | |
478 | return ret; | |
479 | napi_enable(&common->tx_chns[i].napi_tx); | |
480 | } | |
481 | ||
482 | napi_enable(&common->napi_rx); | |
483 | ||
484 | dev_dbg(common->dev, "cpsw_nuss started\n"); | |
485 | return 0; | |
486 | } | |
487 | ||
488 | static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma); | |
489 | static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma); | |
490 | ||
491 | static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common) | |
492 | { | |
493 | int i; | |
494 | ||
495 | if (common->usage_count != 1) | |
496 | return 0; | |
497 | ||
498 | cpsw_ale_control_set(common->ale, HOST_PORT_NUM, | |
499 | ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); | |
500 | ||
501 | /* shutdown tx channels */ | |
502 | atomic_set(&common->tdown_cnt, common->tx_ch_num); | |
503 | /* ensure new tdown_cnt value is visible */ | |
504 | smp_mb__after_atomic(); | |
505 | reinit_completion(&common->tdown_complete); | |
506 | ||
507 | for (i = 0; i < common->tx_ch_num; i++) | |
508 | k3_udma_glue_tdown_tx_chn(common->tx_chns[i].tx_chn, false); | |
509 | ||
510 | i = wait_for_completion_timeout(&common->tdown_complete, | |
511 | msecs_to_jiffies(1000)); | |
512 | if (!i) | |
513 | dev_err(common->dev, "tx timeout\n"); | |
514 | for (i = 0; i < common->tx_ch_num; i++) | |
515 | napi_disable(&common->tx_chns[i].napi_tx); | |
516 | ||
517 | for (i = 0; i < common->tx_ch_num; i++) { | |
518 | k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn, | |
519 | &common->tx_chns[i], | |
520 | am65_cpsw_nuss_tx_cleanup); | |
521 | k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn); | |
522 | } | |
523 | ||
524 | k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true); | |
525 | napi_disable(&common->napi_rx); | |
526 | ||
527 | for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++) | |
528 | k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i, | |
529 | &common->rx_chns, | |
530 | am65_cpsw_nuss_rx_cleanup, !!i); | |
531 | ||
532 | k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn); | |
533 | ||
534 | cpsw_ale_stop(common->ale); | |
535 | ||
536 | writel(0, common->cpsw_base + AM65_CPSW_REG_CTL); | |
537 | writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN); | |
538 | ||
539 | dev_dbg(common->dev, "cpsw_nuss stopped\n"); | |
540 | return 0; | |
541 | } | |
542 | ||
543 | static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev) | |
544 | { | |
545 | struct am65_cpsw_common *common = am65_ndev_to_common(ndev); | |
546 | struct am65_cpsw_port *port = am65_ndev_to_port(ndev); | |
547 | int ret; | |
548 | ||
549 | if (port->slave.phy) | |
550 | phy_stop(port->slave.phy); | |
551 | ||
552 | netif_tx_stop_all_queues(ndev); | |
553 | ||
554 | if (port->slave.phy) { | |
555 | phy_disconnect(port->slave.phy); | |
556 | port->slave.phy = NULL; | |
557 | } | |
558 | ||
559 | ret = am65_cpsw_nuss_common_stop(common); | |
560 | if (ret) | |
561 | return ret; | |
562 | ||
563 | common->usage_count--; | |
564 | pm_runtime_put(common->dev); | |
565 | return 0; | |
566 | } | |
567 | ||
568 | static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) | |
569 | { | |
570 | struct am65_cpsw_common *common = am65_ndev_to_common(ndev); | |
571 | struct am65_cpsw_port *port = am65_ndev_to_port(ndev); | |
572 | u32 port_mask; | |
573 | int ret, i; | |
574 | ||
575 | ret = pm_runtime_get_sync(common->dev); | |
576 | if (ret < 0) { | |
577 | pm_runtime_put_noidle(common->dev); | |
578 | return ret; | |
579 | } | |
580 | ||
581 | /* Notify the stack of the actual queue counts. */ | |
582 | ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num); | |
583 | if (ret) { | |
584 | dev_err(common->dev, "cannot set real number of tx queues\n"); | |
585 | return ret; | |
586 | } | |
587 | ||
588 | ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES); | |
589 | if (ret) { | |
590 | dev_err(common->dev, "cannot set real number of rx queues\n"); | |
591 | return ret; | |
592 | } | |
593 | ||
594 | for (i = 0; i < common->tx_ch_num; i++) | |
595 | netdev_tx_reset_queue(netdev_get_tx_queue(ndev, i)); | |
596 | ||
597 | ret = am65_cpsw_nuss_common_open(common, ndev->features); | |
598 | if (ret) | |
599 | return ret; | |
600 | ||
601 | common->usage_count++; | |
602 | ||
603 | am65_cpsw_port_set_sl_mac(port, ndev->dev_addr); | |
604 | ||
605 | if (port->slave.mac_only) | |
606 | /* enable mac-only mode on port */ | |
607 | cpsw_ale_control_set(common->ale, port->port_id, | |
608 | ALE_PORT_MACONLY, 1); | |
609 | if (AM65_CPSW_IS_CPSW2G(common)) | |
610 | cpsw_ale_control_set(common->ale, port->port_id, | |
611 | ALE_PORT_NOLEARN, 1); | |
612 | ||
613 | port_mask = BIT(port->port_id) | ALE_PORT_HOST; | |
614 | cpsw_ale_add_ucast(common->ale, ndev->dev_addr, | |
615 | HOST_PORT_NUM, ALE_SECURE, 0); | |
616 | cpsw_ale_add_mcast(common->ale, ndev->broadcast, | |
617 | port_mask, 0, 0, ALE_MCAST_FWD_2); | |
618 | ||
619 | /* mac_sl should be configured via phy-link interface */ | |
620 | am65_cpsw_sl_ctl_reset(port); | |
621 | ||
622 | ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, | |
623 | port->slave.phy_if); | |
624 | if (ret) | |
625 | goto error_cleanup; | |
626 | ||
627 | if (port->slave.phy_node) { | |
628 | port->slave.phy = of_phy_connect(ndev, | |
629 | port->slave.phy_node, | |
630 | &am65_cpsw_nuss_adjust_link, | |
631 | 0, port->slave.phy_if); | |
632 | if (!port->slave.phy) { | |
633 | dev_err(common->dev, "phy %pOF not found on slave %d\n", | |
634 | port->slave.phy_node, | |
635 | port->port_id); | |
636 | ret = -ENODEV; | |
637 | goto error_cleanup; | |
638 | } | |
639 | } | |
640 | ||
641 | phy_attached_info(port->slave.phy); | |
642 | phy_start(port->slave.phy); | |
643 | ||
644 | return 0; | |
645 | ||
646 | error_cleanup: | |
647 | am65_cpsw_nuss_ndo_slave_stop(ndev); | |
648 | return ret; | |
649 | } | |
650 | ||
651 | static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma) | |
652 | { | |
653 | struct am65_cpsw_rx_chn *rx_chn = data; | |
654 | struct cppi5_host_desc_t *desc_rx; | |
655 | struct sk_buff *skb; | |
656 | dma_addr_t buf_dma; | |
657 | u32 buf_dma_len; | |
658 | void **swdata; | |
659 | ||
660 | desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); | |
661 | swdata = cppi5_hdesc_get_swdata(desc_rx); | |
662 | skb = *swdata; | |
663 | cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); | |
664 | ||
665 | dma_unmap_single(rx_chn->dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); | |
666 | k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); | |
667 | ||
668 | dev_kfree_skb_any(skb); | |
669 | } | |
670 | ||
671 | /* RX psdata[2] word format - checksum information */ | |
672 | #define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0) | |
673 | #define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16) | |
674 | #define AM65_CPSW_RX_PSD_IS_FRAGMENT BIT(17) | |
675 | #define AM65_CPSW_RX_PSD_IS_TCP BIT(18) | |
676 | #define AM65_CPSW_RX_PSD_IPV6_VALID BIT(19) | |
677 | #define AM65_CPSW_RX_PSD_IPV4_VALID BIT(20) | |
678 | ||
679 | static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info) | |
680 | { | |
681 | /* HW can verify IPv4/IPv6 TCP/UDP packets checksum | |
682 | * csum information provides in psdata[2] word: | |
683 | * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error | |
684 | * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID | |
685 | * bits - indicates IPv4/IPv6 packet | |
686 | * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet | |
687 | * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets | |
688 | * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR | |
689 | */ | |
690 | skb_checksum_none_assert(skb); | |
691 | ||
692 | if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) | |
693 | return; | |
694 | ||
695 | if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID | | |
696 | AM65_CPSW_RX_PSD_IPV4_VALID)) && | |
697 | !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) { | |
698 | /* csum for fragmented packets is unsupported */ | |
699 | if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT)) | |
700 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
701 | } | |
702 | } | |
703 | ||
704 | static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common, | |
705 | u32 flow_idx) | |
706 | { | |
707 | struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; | |
708 | u32 buf_dma_len, pkt_len, port_id = 0, csum_info; | |
709 | struct am65_cpsw_ndev_priv *ndev_priv; | |
710 | struct am65_cpsw_ndev_stats *stats; | |
711 | struct cppi5_host_desc_t *desc_rx; | |
712 | struct device *dev = common->dev; | |
713 | struct sk_buff *skb, *new_skb; | |
714 | dma_addr_t desc_dma, buf_dma; | |
715 | struct am65_cpsw_port *port; | |
716 | struct net_device *ndev; | |
717 | void **swdata; | |
718 | u32 *psdata; | |
719 | int ret = 0; | |
720 | ||
721 | ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma); | |
722 | if (ret) { | |
723 | if (ret != -ENODATA) | |
724 | dev_err(dev, "RX: pop chn fail %d\n", ret); | |
725 | return ret; | |
726 | } | |
727 | ||
728 | if (desc_dma & 0x1) { | |
729 | dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx); | |
730 | return 0; | |
731 | } | |
732 | ||
733 | desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma); | |
734 | dev_dbg(dev, "%s flow_idx: %u desc %pad\n", | |
735 | __func__, flow_idx, &desc_dma); | |
736 | ||
737 | swdata = cppi5_hdesc_get_swdata(desc_rx); | |
738 | skb = *swdata; | |
739 | cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len); | |
740 | pkt_len = cppi5_hdesc_get_pktlen(desc_rx); | |
741 | cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL); | |
742 | dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id); | |
743 | port = am65_common_get_port(common, port_id); | |
744 | ndev = port->ndev; | |
745 | skb->dev = ndev; | |
746 | ||
747 | psdata = cppi5_hdesc_get_psdata(desc_rx); | |
748 | csum_info = psdata[2]; | |
749 | dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info); | |
750 | ||
751 | dma_unmap_single(dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE); | |
752 | ||
753 | k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx); | |
754 | ||
755 | new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE); | |
756 | if (new_skb) { | |
757 | skb_put(skb, pkt_len); | |
758 | skb->protocol = eth_type_trans(skb, ndev); | |
759 | am65_cpsw_nuss_rx_csum(skb, csum_info); | |
760 | napi_gro_receive(&common->napi_rx, skb); | |
761 | ||
762 | ndev_priv = netdev_priv(ndev); | |
763 | stats = this_cpu_ptr(ndev_priv->stats); | |
764 | ||
765 | u64_stats_update_begin(&stats->syncp); | |
766 | stats->rx_packets++; | |
767 | stats->rx_bytes += pkt_len; | |
768 | u64_stats_update_end(&stats->syncp); | |
769 | kmemleak_not_leak(new_skb); | |
770 | } else { | |
771 | ndev->stats.rx_dropped++; | |
772 | new_skb = skb; | |
773 | } | |
774 | ||
775 | if (netif_dormant(ndev)) { | |
776 | dev_kfree_skb_any(new_skb); | |
777 | ndev->stats.rx_dropped++; | |
778 | return 0; | |
779 | } | |
780 | ||
781 | ret = am65_cpsw_nuss_rx_push(common, new_skb); | |
782 | if (WARN_ON(ret < 0)) { | |
783 | dev_kfree_skb_any(new_skb); | |
784 | ndev->stats.rx_errors++; | |
785 | ndev->stats.rx_dropped++; | |
786 | } | |
787 | ||
788 | return ret; | |
789 | } | |
790 | ||
791 | static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) | |
792 | { | |
793 | struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx); | |
794 | int flow = AM65_CPSW_MAX_RX_FLOWS; | |
795 | int cur_budget, ret; | |
796 | int num_rx = 0; | |
797 | ||
798 | /* process every flow */ | |
799 | while (flow--) { | |
800 | cur_budget = budget - num_rx; | |
801 | ||
802 | while (cur_budget--) { | |
803 | ret = am65_cpsw_nuss_rx_packets(common, flow); | |
804 | if (ret) | |
805 | break; | |
806 | num_rx++; | |
807 | } | |
808 | ||
809 | if (num_rx >= budget) | |
810 | break; | |
811 | } | |
812 | ||
813 | dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget); | |
814 | ||
815 | if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) | |
816 | enable_irq(common->rx_chns.irq); | |
817 | ||
818 | return num_rx; | |
819 | } | |
820 | ||
821 | static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn, | |
822 | struct device *dev, | |
823 | struct cppi5_host_desc_t *desc) | |
824 | { | |
825 | struct cppi5_host_desc_t *first_desc, *next_desc; | |
826 | dma_addr_t buf_dma, next_desc_dma; | |
827 | u32 buf_dma_len; | |
828 | ||
829 | first_desc = desc; | |
830 | next_desc = first_desc; | |
831 | ||
832 | cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len); | |
833 | ||
834 | dma_unmap_single(dev, buf_dma, buf_dma_len, | |
835 | DMA_TO_DEVICE); | |
836 | ||
837 | next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc); | |
838 | while (next_desc_dma) { | |
839 | next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, | |
840 | next_desc_dma); | |
841 | cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len); | |
842 | ||
843 | dma_unmap_page(dev, buf_dma, buf_dma_len, | |
844 | DMA_TO_DEVICE); | |
845 | ||
846 | next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc); | |
847 | ||
848 | k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); | |
849 | } | |
850 | ||
851 | k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); | |
852 | } | |
853 | ||
854 | static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma) | |
855 | { | |
856 | struct am65_cpsw_tx_chn *tx_chn = data; | |
857 | struct cppi5_host_desc_t *desc_tx; | |
858 | struct sk_buff *skb; | |
859 | void **swdata; | |
860 | ||
861 | desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma); | |
862 | swdata = cppi5_hdesc_get_swdata(desc_tx); | |
863 | skb = *(swdata); | |
864 | am65_cpsw_nuss_xmit_free(tx_chn, tx_chn->common->dev, desc_tx); | |
865 | ||
866 | dev_kfree_skb_any(skb); | |
867 | } | |
868 | ||
869 | static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common, | |
870 | int chn, unsigned int budget) | |
871 | { | |
872 | struct cppi5_host_desc_t *desc_tx; | |
873 | struct device *dev = common->dev; | |
874 | struct am65_cpsw_tx_chn *tx_chn; | |
875 | struct netdev_queue *netif_txq; | |
876 | unsigned int total_bytes = 0; | |
877 | struct net_device *ndev; | |
878 | struct sk_buff *skb; | |
879 | dma_addr_t desc_dma; | |
880 | int res, num_tx = 0; | |
881 | void **swdata; | |
882 | ||
883 | tx_chn = &common->tx_chns[chn]; | |
884 | ||
885 | while (true) { | |
886 | struct am65_cpsw_ndev_priv *ndev_priv; | |
887 | struct am65_cpsw_ndev_stats *stats; | |
888 | ||
889 | res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma); | |
890 | if (res == -ENODATA) | |
891 | break; | |
892 | ||
893 | if (desc_dma & 0x1) { | |
894 | if (atomic_dec_and_test(&common->tdown_cnt)) | |
895 | complete(&common->tdown_complete); | |
896 | break; | |
897 | } | |
898 | ||
899 | desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, | |
900 | desc_dma); | |
901 | swdata = cppi5_hdesc_get_swdata(desc_tx); | |
902 | skb = *(swdata); | |
903 | am65_cpsw_nuss_xmit_free(tx_chn, dev, desc_tx); | |
904 | ||
905 | ndev = skb->dev; | |
906 | ||
907 | ndev_priv = netdev_priv(ndev); | |
908 | stats = this_cpu_ptr(ndev_priv->stats); | |
909 | u64_stats_update_begin(&stats->syncp); | |
910 | stats->tx_packets++; | |
911 | stats->tx_bytes += skb->len; | |
912 | u64_stats_update_end(&stats->syncp); | |
913 | ||
914 | total_bytes += skb->len; | |
915 | napi_consume_skb(skb, budget); | |
916 | num_tx++; | |
917 | } | |
918 | ||
919 | if (!num_tx) | |
920 | return 0; | |
921 | ||
922 | netif_txq = netdev_get_tx_queue(ndev, chn); | |
923 | ||
924 | netdev_tx_completed_queue(netif_txq, num_tx, total_bytes); | |
925 | ||
926 | if (netif_tx_queue_stopped(netif_txq)) { | |
927 | /* Check whether the queue is stopped due to stalled tx dma, | |
928 | * if the queue is stopped then wake the queue as | |
929 | * we have free desc for tx | |
930 | */ | |
931 | __netif_tx_lock(netif_txq, smp_processor_id()); | |
932 | if (netif_running(ndev) && | |
933 | (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= | |
934 | MAX_SKB_FRAGS)) | |
935 | netif_tx_wake_queue(netif_txq); | |
936 | ||
937 | __netif_tx_unlock(netif_txq); | |
938 | } | |
939 | dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx); | |
940 | ||
941 | return num_tx; | |
942 | } | |
943 | ||
944 | static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget) | |
945 | { | |
946 | struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx); | |
947 | int num_tx; | |
948 | ||
949 | num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, | |
950 | budget); | |
951 | num_tx = min(num_tx, budget); | |
952 | if (num_tx < budget) { | |
953 | napi_complete(napi_tx); | |
954 | enable_irq(tx_chn->irq); | |
955 | } | |
956 | ||
957 | return num_tx; | |
958 | } | |
959 | ||
960 | static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id) | |
961 | { | |
962 | struct am65_cpsw_common *common = dev_id; | |
963 | ||
964 | disable_irq_nosync(irq); | |
965 | napi_schedule(&common->napi_rx); | |
966 | ||
967 | return IRQ_HANDLED; | |
968 | } | |
969 | ||
970 | static irqreturn_t am65_cpsw_nuss_tx_irq(int irq, void *dev_id) | |
971 | { | |
972 | struct am65_cpsw_tx_chn *tx_chn = dev_id; | |
973 | ||
974 | disable_irq_nosync(irq); | |
975 | napi_schedule(&tx_chn->napi_tx); | |
976 | ||
977 | return IRQ_HANDLED; | |
978 | } | |
979 | ||
980 | static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb, | |
981 | struct net_device *ndev) | |
982 | { | |
983 | struct am65_cpsw_common *common = am65_ndev_to_common(ndev); | |
984 | struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc; | |
985 | struct am65_cpsw_port *port = am65_ndev_to_port(ndev); | |
986 | struct device *dev = common->dev; | |
987 | struct am65_cpsw_tx_chn *tx_chn; | |
988 | struct netdev_queue *netif_txq; | |
989 | dma_addr_t desc_dma, buf_dma; | |
990 | int ret, q_idx, i; | |
991 | void **swdata; | |
992 | u32 *psdata; | |
993 | u32 pkt_len; | |
994 | ||
995 | /* padding enabled in hw */ | |
996 | pkt_len = skb_headlen(skb); | |
997 | ||
998 | q_idx = skb_get_queue_mapping(skb); | |
999 | dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx); | |
1000 | ||
1001 | tx_chn = &common->tx_chns[q_idx]; | |
1002 | netif_txq = netdev_get_tx_queue(ndev, q_idx); | |
1003 | ||
1004 | /* Map the linear buffer */ | |
1005 | buf_dma = dma_map_single(dev, skb->data, pkt_len, | |
1006 | DMA_TO_DEVICE); | |
1007 | if (unlikely(dma_mapping_error(dev, buf_dma))) { | |
1008 | dev_err(dev, "Failed to map tx skb buffer\n"); | |
1009 | ndev->stats.tx_errors++; | |
1010 | goto err_free_skb; | |
1011 | } | |
1012 | ||
1013 | first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); | |
1014 | if (!first_desc) { | |
1015 | dev_dbg(dev, "Failed to allocate descriptor\n"); | |
1016 | dma_unmap_single(dev, buf_dma, pkt_len, DMA_TO_DEVICE); | |
1017 | goto busy_stop_q; | |
1018 | } | |
1019 | ||
1020 | cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT, | |
1021 | AM65_CPSW_NAV_PS_DATA_SIZE); | |
1022 | cppi5_desc_set_pktids(&first_desc->hdr, 0, 0x3FFF); | |
1023 | cppi5_hdesc_set_pkttype(first_desc, 0x7); | |
1024 | cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id); | |
1025 | ||
1026 | cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len); | |
1027 | swdata = cppi5_hdesc_get_swdata(first_desc); | |
1028 | *(swdata) = skb; | |
1029 | psdata = cppi5_hdesc_get_psdata(first_desc); | |
1030 | ||
1031 | /* HW csum offload if enabled */ | |
1032 | psdata[2] = 0; | |
1033 | if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { | |
1034 | unsigned int cs_start, cs_offset; | |
1035 | ||
1036 | cs_start = skb_transport_offset(skb); | |
1037 | cs_offset = cs_start + skb->csum_offset; | |
1038 | /* HW numerates bytes starting from 1 */ | |
1039 | psdata[2] = ((cs_offset + 1) << 24) | | |
1040 | ((cs_start + 1) << 16) | (skb->len - cs_start); | |
1041 | dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]); | |
1042 | } | |
1043 | ||
1044 | if (!skb_is_nonlinear(skb)) | |
1045 | goto done_tx; | |
1046 | ||
1047 | dev_dbg(dev, "fragmented SKB\n"); | |
1048 | ||
1049 | /* Handle the case where skb is fragmented in pages */ | |
1050 | cur_desc = first_desc; | |
1051 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
1052 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
1053 | u32 frag_size = skb_frag_size(frag); | |
1054 | ||
1055 | next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); | |
1056 | if (!next_desc) { | |
1057 | dev_err(dev, "Failed to allocate descriptor\n"); | |
1058 | goto busy_free_descs; | |
1059 | } | |
1060 | ||
1061 | buf_dma = skb_frag_dma_map(dev, frag, 0, frag_size, | |
1062 | DMA_TO_DEVICE); | |
1063 | if (unlikely(dma_mapping_error(dev, buf_dma))) { | |
1064 | dev_err(dev, "Failed to map tx skb page\n"); | |
1065 | k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc); | |
1066 | ndev->stats.tx_errors++; | |
1067 | goto err_free_descs; | |
1068 | } | |
1069 | ||
1070 | cppi5_hdesc_reset_hbdesc(next_desc); | |
1071 | cppi5_hdesc_attach_buf(next_desc, | |
1072 | buf_dma, frag_size, buf_dma, frag_size); | |
1073 | ||
1074 | desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, | |
1075 | next_desc); | |
1076 | cppi5_hdesc_link_hbdesc(cur_desc, desc_dma); | |
1077 | ||
1078 | pkt_len += frag_size; | |
1079 | cur_desc = next_desc; | |
1080 | } | |
1081 | WARN_ON(pkt_len != skb->len); | |
1082 | ||
1083 | done_tx: | |
1084 | skb_tx_timestamp(skb); | |
1085 | ||
1086 | /* report bql before sending packet */ | |
1087 | netdev_tx_sent_queue(netif_txq, pkt_len); | |
1088 | ||
1089 | cppi5_hdesc_set_pktlen(first_desc, pkt_len); | |
1090 | desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc); | |
1091 | ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma); | |
1092 | if (ret) { | |
1093 | dev_err(dev, "can't push desc %d\n", ret); | |
1094 | /* inform bql */ | |
1095 | netdev_tx_completed_queue(netif_txq, 1, pkt_len); | |
1096 | ndev->stats.tx_errors++; | |
1097 | goto err_free_descs; | |
1098 | } | |
1099 | ||
1100 | if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) { | |
1101 | netif_tx_stop_queue(netif_txq); | |
1102 | /* Barrier, so that stop_queue visible to other cpus */ | |
1103 | smp_mb__after_atomic(); | |
1104 | dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx); | |
1105 | ||
1106 | /* re-check for smp */ | |
1107 | if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= | |
1108 | MAX_SKB_FRAGS) { | |
1109 | netif_tx_wake_queue(netif_txq); | |
1110 | dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx); | |
1111 | } | |
1112 | } | |
1113 | ||
1114 | return NETDEV_TX_OK; | |
1115 | ||
1116 | err_free_descs: | |
1117 | am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc); | |
1118 | err_free_skb: | |
1119 | ndev->stats.tx_dropped++; | |
1120 | dev_kfree_skb_any(skb); | |
1121 | return NETDEV_TX_OK; | |
1122 | ||
1123 | busy_free_descs: | |
1124 | am65_cpsw_nuss_xmit_free(tx_chn, dev, first_desc); | |
1125 | busy_stop_q: | |
1126 | netif_tx_stop_queue(netif_txq); | |
1127 | return NETDEV_TX_BUSY; | |
1128 | } | |
1129 | ||
1130 | static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev, | |
1131 | void *addr) | |
1132 | { | |
1133 | struct am65_cpsw_common *common = am65_ndev_to_common(ndev); | |
1134 | struct am65_cpsw_port *port = am65_ndev_to_port(ndev); | |
1135 | struct sockaddr *sockaddr = (struct sockaddr *)addr; | |
1136 | int ret; | |
1137 | ||
1138 | ret = eth_prepare_mac_addr_change(ndev, addr); | |
1139 | if (ret < 0) | |
1140 | return ret; | |
1141 | ||
1142 | ret = pm_runtime_get_sync(common->dev); | |
1143 | if (ret < 0) { | |
1144 | pm_runtime_put_noidle(common->dev); | |
1145 | return ret; | |
1146 | } | |
1147 | ||
1148 | cpsw_ale_del_ucast(common->ale, ndev->dev_addr, | |
1149 | HOST_PORT_NUM, 0, 0); | |
1150 | cpsw_ale_add_ucast(common->ale, sockaddr->sa_data, | |
1151 | HOST_PORT_NUM, ALE_SECURE, 0); | |
1152 | ||
1153 | am65_cpsw_port_set_sl_mac(port, addr); | |
1154 | eth_commit_mac_addr_change(ndev, sockaddr); | |
1155 | ||
1156 | pm_runtime_put(common->dev); | |
1157 | ||
1158 | return 0; | |
1159 | } | |
1160 | ||
1161 | static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev, | |
1162 | struct ifreq *req, int cmd) | |
1163 | { | |
1164 | struct am65_cpsw_port *port = am65_ndev_to_port(ndev); | |
1165 | ||
1166 | if (!netif_running(ndev)) | |
1167 | return -EINVAL; | |
1168 | ||
1169 | if (!port->slave.phy) | |
1170 | return -EOPNOTSUPP; | |
1171 | ||
1172 | return phy_mii_ioctl(port->slave.phy, req, cmd); | |
1173 | } | |
1174 | ||
1175 | static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev, | |
1176 | struct rtnl_link_stats64 *stats) | |
1177 | { | |
1178 | struct am65_cpsw_ndev_priv *ndev_priv = netdev_priv(dev); | |
1179 | unsigned int start; | |
1180 | int cpu; | |
1181 | ||
1182 | for_each_possible_cpu(cpu) { | |
1183 | struct am65_cpsw_ndev_stats *cpu_stats; | |
1184 | u64 rx_packets; | |
1185 | u64 rx_bytes; | |
1186 | u64 tx_packets; | |
1187 | u64 tx_bytes; | |
1188 | ||
1189 | cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu); | |
1190 | do { | |
1191 | start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); | |
1192 | rx_packets = cpu_stats->rx_packets; | |
1193 | rx_bytes = cpu_stats->rx_bytes; | |
1194 | tx_packets = cpu_stats->tx_packets; | |
1195 | tx_bytes = cpu_stats->tx_bytes; | |
1196 | } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); | |
1197 | ||
1198 | stats->rx_packets += rx_packets; | |
1199 | stats->rx_bytes += rx_bytes; | |
1200 | stats->tx_packets += tx_packets; | |
1201 | stats->tx_bytes += tx_bytes; | |
1202 | } | |
1203 | ||
1204 | stats->rx_errors = dev->stats.rx_errors; | |
1205 | stats->rx_dropped = dev->stats.rx_dropped; | |
1206 | stats->tx_dropped = dev->stats.tx_dropped; | |
1207 | } | |
1208 | ||
1209 | static int am65_cpsw_nuss_ndo_slave_set_features(struct net_device *ndev, | |
1210 | netdev_features_t features) | |
1211 | { | |
1212 | struct am65_cpsw_common *common = am65_ndev_to_common(ndev); | |
1213 | netdev_features_t changes = features ^ ndev->features; | |
1214 | struct am65_cpsw_host *host_p; | |
1215 | ||
1216 | host_p = am65_common_get_host(common); | |
1217 | ||
1218 | if (changes & NETIF_F_HW_CSUM) { | |
1219 | bool enable = !!(features & NETIF_F_HW_CSUM); | |
1220 | ||
1221 | dev_info(common->dev, "Turn %s tx-checksum-ip-generic\n", | |
1222 | enable ? "ON" : "OFF"); | |
1223 | if (enable) | |
1224 | writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN, | |
1225 | host_p->port_base + AM65_CPSW_P0_REG_CTL); | |
1226 | else | |
1227 | writel(0, | |
1228 | host_p->port_base + AM65_CPSW_P0_REG_CTL); | |
1229 | } | |
1230 | ||
1231 | return 0; | |
1232 | } | |
1233 | ||
1234 | static const struct net_device_ops am65_cpsw_nuss_netdev_ops_2g = { | |
1235 | .ndo_open = am65_cpsw_nuss_ndo_slave_open, | |
1236 | .ndo_stop = am65_cpsw_nuss_ndo_slave_stop, | |
1237 | .ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit, | |
1238 | .ndo_set_rx_mode = am65_cpsw_nuss_ndo_slave_set_rx_mode, | |
1239 | .ndo_get_stats64 = am65_cpsw_nuss_ndo_get_stats, | |
1240 | .ndo_validate_addr = eth_validate_addr, | |
1241 | .ndo_set_mac_address = am65_cpsw_nuss_ndo_slave_set_mac_address, | |
1242 | .ndo_tx_timeout = am65_cpsw_nuss_ndo_host_tx_timeout, | |
1243 | .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid, | |
1244 | .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid, | |
1245 | .ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl, | |
1246 | .ndo_set_features = am65_cpsw_nuss_ndo_slave_set_features, | |
1247 | }; | |
1248 | ||
1249 | static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port) | |
1250 | { | |
1251 | struct am65_cpsw_common *common = port->common; | |
1252 | ||
1253 | if (!port->disabled) | |
1254 | return; | |
1255 | ||
1256 | common->disabled_ports_mask |= BIT(port->port_id); | |
1257 | cpsw_ale_control_set(common->ale, port->port_id, | |
1258 | ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); | |
1259 | ||
1260 | cpsw_sl_reset(port->slave.mac_sl, 100); | |
1261 | cpsw_sl_ctl_reset(port->slave.mac_sl); | |
1262 | } | |
1263 | ||
1264 | static void am65_cpsw_nuss_free_tx_chns(void *data) | |
1265 | { | |
1266 | struct am65_cpsw_common *common = data; | |
1267 | int i; | |
1268 | ||
1269 | for (i = 0; i < common->tx_ch_num; i++) { | |
1270 | struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; | |
1271 | ||
1272 | if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) | |
1273 | k3_udma_glue_release_tx_chn(tx_chn->tx_chn); | |
1274 | ||
1275 | if (!IS_ERR_OR_NULL(tx_chn->desc_pool)) | |
1276 | k3_cppi_desc_pool_destroy(tx_chn->desc_pool); | |
1277 | ||
1278 | memset(tx_chn, 0, sizeof(*tx_chn)); | |
1279 | } | |
1280 | } | |
1281 | ||
1282 | void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common) | |
1283 | { | |
1284 | struct device *dev = common->dev; | |
1285 | int i; | |
1286 | ||
1287 | devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common); | |
1288 | ||
1289 | for (i = 0; i < common->tx_ch_num; i++) { | |
1290 | struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; | |
1291 | ||
1292 | if (tx_chn->irq) | |
1293 | devm_free_irq(dev, tx_chn->irq, tx_chn); | |
1294 | ||
1295 | netif_napi_del(&tx_chn->napi_tx); | |
1296 | ||
1297 | if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) | |
1298 | k3_udma_glue_release_tx_chn(tx_chn->tx_chn); | |
1299 | ||
1300 | if (!IS_ERR_OR_NULL(tx_chn->desc_pool)) | |
1301 | k3_cppi_desc_pool_destroy(tx_chn->desc_pool); | |
1302 | ||
1303 | memset(tx_chn, 0, sizeof(*tx_chn)); | |
1304 | } | |
1305 | } | |
1306 | ||
1307 | static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common) | |
1308 | { | |
1309 | u32 max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS); | |
1310 | struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 }; | |
1311 | struct device *dev = common->dev; | |
1312 | struct k3_ring_cfg ring_cfg = { | |
1313 | .elm_size = K3_RINGACC_RING_ELSIZE_8, | |
1314 | .mode = K3_RINGACC_RING_MODE_RING, | |
1315 | .flags = 0 | |
1316 | }; | |
1317 | u32 hdesc_size; | |
1318 | int i, ret = 0; | |
1319 | ||
1320 | hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE, | |
1321 | AM65_CPSW_NAV_SW_DATA_SIZE); | |
1322 | ||
1323 | tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE; | |
1324 | tx_cfg.tx_cfg = ring_cfg; | |
1325 | tx_cfg.txcq_cfg = ring_cfg; | |
1326 | tx_cfg.tx_cfg.size = max_desc_num; | |
1327 | tx_cfg.txcq_cfg.size = max_desc_num; | |
1328 | ||
1329 | for (i = 0; i < common->tx_ch_num; i++) { | |
1330 | struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; | |
1331 | ||
1332 | snprintf(tx_chn->tx_chn_name, | |
1333 | sizeof(tx_chn->tx_chn_name), "tx%d", i); | |
1334 | ||
1335 | tx_chn->common = common; | |
1336 | tx_chn->id = i; | |
1337 | tx_chn->descs_num = max_desc_num; | |
1338 | tx_chn->desc_pool = | |
1339 | k3_cppi_desc_pool_create_name(dev, | |
1340 | tx_chn->descs_num, | |
1341 | hdesc_size, | |
1342 | tx_chn->tx_chn_name); | |
1343 | if (IS_ERR(tx_chn->desc_pool)) { | |
1344 | ret = PTR_ERR(tx_chn->desc_pool); | |
1345 | dev_err(dev, "Failed to create poll %d\n", ret); | |
1346 | goto err; | |
1347 | } | |
1348 | ||
1349 | tx_chn->tx_chn = | |
1350 | k3_udma_glue_request_tx_chn(dev, | |
1351 | tx_chn->tx_chn_name, | |
1352 | &tx_cfg); | |
1353 | if (IS_ERR(tx_chn->tx_chn)) { | |
1354 | ret = PTR_ERR(tx_chn->tx_chn); | |
1355 | dev_err(dev, "Failed to request tx dma channel %d\n", | |
1356 | ret); | |
1357 | goto err; | |
1358 | } | |
1359 | ||
1360 | tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn); | |
1361 | if (tx_chn->irq <= 0) { | |
1362 | dev_err(dev, "Failed to get tx dma irq %d\n", | |
1363 | tx_chn->irq); | |
1364 | goto err; | |
1365 | } | |
1366 | ||
1367 | snprintf(tx_chn->tx_chn_name, | |
1368 | sizeof(tx_chn->tx_chn_name), "%s-tx%d", | |
1369 | dev_name(dev), tx_chn->id); | |
1370 | } | |
1371 | ||
1372 | err: | |
1373 | i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common); | |
1374 | if (i) { | |
e6aaeafd | 1375 | dev_err(dev, "Failed to add free_tx_chns action %d\n", i); |
93a76530 GS |
1376 | return i; |
1377 | } | |
1378 | ||
1379 | return ret; | |
1380 | } | |
1381 | ||
1382 | static void am65_cpsw_nuss_free_rx_chns(void *data) | |
1383 | { | |
1384 | struct am65_cpsw_common *common = data; | |
1385 | struct am65_cpsw_rx_chn *rx_chn; | |
1386 | ||
1387 | rx_chn = &common->rx_chns; | |
1388 | ||
1389 | if (!IS_ERR_OR_NULL(rx_chn->rx_chn)) | |
1390 | k3_udma_glue_release_rx_chn(rx_chn->rx_chn); | |
1391 | ||
1392 | if (!IS_ERR_OR_NULL(rx_chn->desc_pool)) | |
1393 | k3_cppi_desc_pool_destroy(rx_chn->desc_pool); | |
1394 | } | |
1395 | ||
1396 | static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) | |
1397 | { | |
1398 | struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns; | |
1399 | struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 }; | |
1400 | u32 max_desc_num = AM65_CPSW_MAX_RX_DESC; | |
1401 | struct device *dev = common->dev; | |
1402 | u32 hdesc_size; | |
1403 | u32 fdqring_id; | |
1404 | int i, ret = 0; | |
1405 | ||
1406 | hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE, | |
1407 | AM65_CPSW_NAV_SW_DATA_SIZE); | |
1408 | ||
1409 | rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE; | |
1410 | rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS; | |
1411 | rx_cfg.flow_id_base = common->rx_flow_id_base; | |
1412 | ||
1413 | /* init all flows */ | |
1414 | rx_chn->dev = dev; | |
1415 | rx_chn->descs_num = max_desc_num; | |
1416 | rx_chn->desc_pool = k3_cppi_desc_pool_create_name(dev, | |
1417 | rx_chn->descs_num, | |
1418 | hdesc_size, "rx"); | |
1419 | if (IS_ERR(rx_chn->desc_pool)) { | |
1420 | ret = PTR_ERR(rx_chn->desc_pool); | |
1421 | dev_err(dev, "Failed to create rx poll %d\n", ret); | |
1422 | goto err; | |
1423 | } | |
1424 | ||
1425 | rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg); | |
1426 | if (IS_ERR(rx_chn->rx_chn)) { | |
1427 | ret = PTR_ERR(rx_chn->rx_chn); | |
1428 | dev_err(dev, "Failed to request rx dma channel %d\n", ret); | |
1429 | goto err; | |
1430 | } | |
1431 | ||
1432 | common->rx_flow_id_base = | |
1433 | k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn); | |
1434 | dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base); | |
1435 | ||
1436 | fdqring_id = K3_RINGACC_RING_ID_ANY; | |
1437 | for (i = 0; i < rx_cfg.flow_id_num; i++) { | |
1438 | struct k3_ring_cfg rxring_cfg = { | |
1439 | .elm_size = K3_RINGACC_RING_ELSIZE_8, | |
1440 | .mode = K3_RINGACC_RING_MODE_RING, | |
1441 | .flags = 0, | |
1442 | }; | |
1443 | struct k3_ring_cfg fdqring_cfg = { | |
1444 | .elm_size = K3_RINGACC_RING_ELSIZE_8, | |
1445 | .mode = K3_RINGACC_RING_MODE_MESSAGE, | |
1446 | .flags = K3_RINGACC_RING_SHARED, | |
1447 | }; | |
1448 | struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = { | |
1449 | .rx_cfg = rxring_cfg, | |
1450 | .rxfdq_cfg = fdqring_cfg, | |
1451 | .ring_rxq_id = K3_RINGACC_RING_ID_ANY, | |
1452 | .src_tag_lo_sel = | |
1453 | K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG, | |
1454 | }; | |
1455 | ||
1456 | rx_flow_cfg.ring_rxfdq0_id = fdqring_id; | |
1457 | rx_flow_cfg.rx_cfg.size = max_desc_num; | |
1458 | rx_flow_cfg.rxfdq_cfg.size = max_desc_num; | |
1459 | ||
1460 | ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn, | |
1461 | i, &rx_flow_cfg); | |
1462 | if (ret) { | |
1463 | dev_err(dev, "Failed to init rx flow%d %d\n", i, ret); | |
1464 | goto err; | |
1465 | } | |
1466 | if (!i) | |
1467 | fdqring_id = | |
1468 | k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn, | |
1469 | i); | |
1470 | ||
1471 | rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); | |
1472 | ||
1473 | if (rx_chn->irq <= 0) { | |
1474 | dev_err(dev, "Failed to get rx dma irq %d\n", | |
1475 | rx_chn->irq); | |
1476 | ret = -ENXIO; | |
1477 | goto err; | |
1478 | } | |
1479 | } | |
1480 | ||
1481 | err: | |
1482 | i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common); | |
1483 | if (i) { | |
e6aaeafd | 1484 | dev_err(dev, "Failed to add free_rx_chns action %d\n", i); |
93a76530 GS |
1485 | return i; |
1486 | } | |
1487 | ||
1488 | return ret; | |
1489 | } | |
1490 | ||
1491 | static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common *common) | |
1492 | { | |
1493 | struct am65_cpsw_host *host_p = am65_common_get_host(common); | |
1494 | ||
1495 | host_p->common = common; | |
1496 | host_p->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE; | |
1497 | host_p->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE; | |
1498 | ||
1499 | return 0; | |
1500 | } | |
1501 | ||
1502 | static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node, | |
1503 | int slave, u8 *mac_addr) | |
1504 | { | |
1505 | u32 mac_lo, mac_hi, offset; | |
1506 | struct regmap *syscon; | |
1507 | int ret; | |
1508 | ||
1509 | syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse"); | |
1510 | if (IS_ERR(syscon)) { | |
1511 | if (PTR_ERR(syscon) == -ENODEV) | |
1512 | return 0; | |
1513 | return PTR_ERR(syscon); | |
1514 | } | |
1515 | ||
1516 | ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1, | |
1517 | &offset); | |
1518 | if (ret) | |
1519 | return ret; | |
1520 | ||
1521 | regmap_read(syscon, offset, &mac_lo); | |
1522 | regmap_read(syscon, offset + 4, &mac_hi); | |
1523 | ||
1524 | mac_addr[0] = (mac_hi >> 8) & 0xff; | |
1525 | mac_addr[1] = mac_hi & 0xff; | |
1526 | mac_addr[2] = (mac_lo >> 24) & 0xff; | |
1527 | mac_addr[3] = (mac_lo >> 16) & 0xff; | |
1528 | mac_addr[4] = (mac_lo >> 8) & 0xff; | |
1529 | mac_addr[5] = mac_lo & 0xff; | |
1530 | ||
1531 | return 0; | |
1532 | } | |
1533 | ||
1534 | static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) | |
1535 | { | |
1536 | struct device_node *node, *port_np; | |
1537 | struct device *dev = common->dev; | |
1538 | int ret; | |
1539 | ||
1540 | node = of_get_child_by_name(dev->of_node, "ethernet-ports"); | |
1541 | if (!node) | |
1542 | return -ENOENT; | |
1543 | ||
1544 | for_each_child_of_node(node, port_np) { | |
1545 | struct am65_cpsw_port *port; | |
1546 | const void *mac_addr; | |
1547 | u32 port_id; | |
1548 | ||
1549 | /* it is not a slave port node, continue */ | |
1550 | if (strcmp(port_np->name, "port")) | |
1551 | continue; | |
1552 | ||
1553 | ret = of_property_read_u32(port_np, "reg", &port_id); | |
1554 | if (ret < 0) { | |
1555 | dev_err(dev, "%pOF error reading port_id %d\n", | |
1556 | port_np, ret); | |
1557 | return ret; | |
1558 | } | |
1559 | ||
1560 | if (!port_id || port_id > common->port_num) { | |
1561 | dev_err(dev, "%pOF has invalid port_id %u %s\n", | |
1562 | port_np, port_id, port_np->name); | |
1563 | return -EINVAL; | |
1564 | } | |
1565 | ||
1566 | port = am65_common_get_port(common, port_id); | |
1567 | port->port_id = port_id; | |
1568 | port->common = common; | |
1569 | port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE + | |
1570 | AM65_CPSW_NU_PORTS_OFFSET * (port_id); | |
1571 | port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE + | |
1572 | (AM65_CPSW_NU_STATS_PORT_OFFSET * port_id); | |
1573 | port->name = of_get_property(port_np, "label", NULL); | |
1574 | ||
1575 | port->disabled = !of_device_is_available(port_np); | |
1576 | if (port->disabled) | |
1577 | continue; | |
1578 | ||
1579 | port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL); | |
1580 | if (IS_ERR(port->slave.ifphy)) { | |
1581 | ret = PTR_ERR(port->slave.ifphy); | |
1582 | dev_err(dev, "%pOF error retrieving port phy: %d\n", | |
1583 | port_np, ret); | |
1584 | return ret; | |
1585 | } | |
1586 | ||
1587 | port->slave.mac_only = | |
1588 | of_property_read_bool(port_np, "ti,mac-only"); | |
1589 | ||
1590 | /* get phy/link info */ | |
1591 | if (of_phy_is_fixed_link(port_np)) { | |
1592 | ret = of_phy_register_fixed_link(port_np); | |
1593 | if (ret) { | |
1594 | if (ret != -EPROBE_DEFER) | |
1595 | dev_err(dev, "%pOF failed to register fixed-link phy: %d\n", | |
1596 | port_np, ret); | |
1597 | return ret; | |
1598 | } | |
1599 | port->slave.phy_node = of_node_get(port_np); | |
1600 | } else { | |
1601 | port->slave.phy_node = | |
1602 | of_parse_phandle(port_np, "phy-handle", 0); | |
1603 | } | |
1604 | ||
1605 | if (!port->slave.phy_node) { | |
1606 | dev_err(dev, | |
1607 | "slave[%d] no phy found\n", port_id); | |
1608 | return -ENODEV; | |
1609 | } | |
1610 | ||
1611 | ret = of_get_phy_mode(port_np, &port->slave.phy_if); | |
1612 | if (ret) { | |
1613 | dev_err(dev, "%pOF read phy-mode err %d\n", | |
1614 | port_np, ret); | |
1615 | return ret; | |
1616 | } | |
1617 | ||
1618 | port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base); | |
1619 | if (IS_ERR(port->slave.mac_sl)) | |
1620 | return PTR_ERR(port->slave.mac_sl); | |
1621 | ||
1622 | mac_addr = of_get_mac_address(port_np); | |
1623 | if (!IS_ERR(mac_addr)) { | |
1624 | ether_addr_copy(port->slave.mac_addr, mac_addr); | |
1625 | } else if (am65_cpsw_am654_get_efuse_macid(port_np, | |
1626 | port->port_id, | |
1627 | port->slave.mac_addr) || | |
1628 | !is_valid_ether_addr(port->slave.mac_addr)) { | |
1629 | random_ether_addr(port->slave.mac_addr); | |
76dcbd23 | 1630 | dev_err(dev, "Use random MAC address\n"); |
93a76530 GS |
1631 | } |
1632 | } | |
1633 | of_node_put(node); | |
1634 | ||
1635 | return 0; | |
1636 | } | |
1637 | ||
1638 | static void am65_cpsw_pcpu_stats_free(void *data) | |
1639 | { | |
1640 | struct am65_cpsw_ndev_stats __percpu *stats = data; | |
1641 | ||
1642 | free_percpu(stats); | |
1643 | } | |
1644 | ||
1645 | static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common) | |
1646 | { | |
1647 | struct am65_cpsw_ndev_priv *ndev_priv; | |
1648 | struct device *dev = common->dev; | |
1649 | struct am65_cpsw_port *port; | |
1650 | int ret; | |
1651 | ||
1652 | port = am65_common_get_port(common, 1); | |
1653 | ||
1654 | /* alloc netdev */ | |
1655 | port->ndev = devm_alloc_etherdev_mqs(common->dev, | |
1656 | sizeof(struct am65_cpsw_ndev_priv), | |
1657 | AM65_CPSW_MAX_TX_QUEUES, | |
1658 | AM65_CPSW_MAX_RX_QUEUES); | |
1659 | if (!port->ndev) { | |
1660 | dev_err(dev, "error allocating slave net_device %u\n", | |
1661 | port->port_id); | |
1662 | return -ENOMEM; | |
1663 | } | |
1664 | ||
1665 | ndev_priv = netdev_priv(port->ndev); | |
1666 | ndev_priv->port = port; | |
1667 | ndev_priv->msg_enable = AM65_CPSW_DEBUG; | |
1668 | SET_NETDEV_DEV(port->ndev, dev); | |
1669 | ||
1670 | ether_addr_copy(port->ndev->dev_addr, port->slave.mac_addr); | |
1671 | ||
1672 | port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE; | |
1673 | port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE; | |
1674 | port->ndev->hw_features = NETIF_F_SG | | |
1675 | NETIF_F_RXCSUM | | |
1676 | NETIF_F_HW_CSUM; | |
1677 | port->ndev->features = port->ndev->hw_features | | |
1678 | NETIF_F_HW_VLAN_CTAG_FILTER; | |
1679 | port->ndev->vlan_features |= NETIF_F_SG; | |
1680 | port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops_2g; | |
1681 | port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave; | |
1682 | ||
1683 | /* Disable TX checksum offload by default due to HW bug */ | |
1684 | if (common->pdata->quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM) | |
1685 | port->ndev->features &= ~NETIF_F_HW_CSUM; | |
1686 | ||
1687 | ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats); | |
1688 | if (!ndev_priv->stats) | |
1689 | return -ENOMEM; | |
1690 | ||
1691 | ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free, | |
1692 | ndev_priv->stats); | |
1693 | if (ret) { | |
e6aaeafd | 1694 | dev_err(dev, "Failed to add percpu stat free action %d\n", ret); |
93a76530 GS |
1695 | return ret; |
1696 | } | |
1697 | ||
1698 | netif_napi_add(port->ndev, &common->napi_rx, | |
1699 | am65_cpsw_nuss_rx_poll, NAPI_POLL_WEIGHT); | |
1700 | ||
1701 | common->pf_p0_rx_ptype_rrobin = false; | |
1702 | ||
1703 | return ret; | |
1704 | } | |
1705 | ||
1706 | static int am65_cpsw_nuss_ndev_add_napi_2g(struct am65_cpsw_common *common) | |
1707 | { | |
1708 | struct device *dev = common->dev; | |
1709 | struct am65_cpsw_port *port; | |
1710 | int i, ret = 0; | |
1711 | ||
1712 | port = am65_common_get_port(common, 1); | |
1713 | ||
1714 | for (i = 0; i < common->tx_ch_num; i++) { | |
1715 | struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; | |
1716 | ||
1717 | netif_tx_napi_add(port->ndev, &tx_chn->napi_tx, | |
1718 | am65_cpsw_nuss_tx_poll, NAPI_POLL_WEIGHT); | |
1719 | ||
1720 | ret = devm_request_irq(dev, tx_chn->irq, | |
1721 | am65_cpsw_nuss_tx_irq, | |
6f5c27f9 GS |
1722 | IRQF_TRIGGER_HIGH, |
1723 | tx_chn->tx_chn_name, tx_chn); | |
93a76530 GS |
1724 | if (ret) { |
1725 | dev_err(dev, "failure requesting tx%u irq %u, %d\n", | |
1726 | tx_chn->id, tx_chn->irq, ret); | |
1727 | goto err; | |
1728 | } | |
1729 | } | |
1730 | ||
1731 | err: | |
1732 | return ret; | |
1733 | } | |
1734 | ||
1735 | static int am65_cpsw_nuss_ndev_reg_2g(struct am65_cpsw_common *common) | |
1736 | { | |
1737 | struct device *dev = common->dev; | |
1738 | struct am65_cpsw_port *port; | |
1739 | int ret = 0; | |
1740 | ||
1741 | port = am65_common_get_port(common, 1); | |
1742 | ret = am65_cpsw_nuss_ndev_add_napi_2g(common); | |
1743 | if (ret) | |
1744 | goto err; | |
1745 | ||
1746 | ret = devm_request_irq(dev, common->rx_chns.irq, | |
1747 | am65_cpsw_nuss_rx_irq, | |
6f5c27f9 | 1748 | IRQF_TRIGGER_HIGH, dev_name(dev), common); |
93a76530 GS |
1749 | if (ret) { |
1750 | dev_err(dev, "failure requesting rx irq %u, %d\n", | |
1751 | common->rx_chns.irq, ret); | |
1752 | goto err; | |
1753 | } | |
1754 | ||
1755 | ret = register_netdev(port->ndev); | |
1756 | if (ret) | |
1757 | dev_err(dev, "error registering slave net device %d\n", ret); | |
1758 | ||
1759 | /* can't auto unregister ndev using devm_add_action() due to | |
1760 | * devres release sequence in DD core for DMA | |
1761 | */ | |
1762 | err: | |
1763 | return ret; | |
1764 | } | |
1765 | ||
1766 | int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx) | |
1767 | { | |
1768 | int ret; | |
1769 | ||
1770 | common->tx_ch_num = num_tx; | |
1771 | ret = am65_cpsw_nuss_init_tx_chns(common); | |
1772 | if (ret) | |
1773 | return ret; | |
1774 | ||
1775 | return am65_cpsw_nuss_ndev_add_napi_2g(common); | |
1776 | } | |
1777 | ||
1778 | static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common) | |
1779 | { | |
1780 | struct am65_cpsw_port *port; | |
1781 | int i; | |
1782 | ||
1783 | for (i = 0; i < common->port_num; i++) { | |
1784 | port = &common->ports[i]; | |
1785 | if (port->ndev) | |
1786 | unregister_netdev(port->ndev); | |
1787 | } | |
1788 | } | |
1789 | ||
1790 | static const struct am65_cpsw_pdata am65x_sr1_0 = { | |
1791 | .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM, | |
1792 | }; | |
1793 | ||
1794 | static const struct am65_cpsw_pdata j721e_sr1_0 = { | |
1795 | .quirks = 0, | |
1796 | }; | |
1797 | ||
1798 | static const struct of_device_id am65_cpsw_nuss_of_mtable[] = { | |
1799 | { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0 }, | |
1800 | { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_sr1_0 }, | |
1801 | { /* sentinel */ }, | |
1802 | }; | |
1803 | MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable); | |
1804 | ||
1805 | static int am65_cpsw_nuss_probe(struct platform_device *pdev) | |
1806 | { | |
1807 | struct cpsw_ale_params ale_params; | |
1808 | const struct of_device_id *of_id; | |
1809 | struct device *dev = &pdev->dev; | |
1810 | struct am65_cpsw_common *common; | |
1811 | struct device_node *node; | |
1812 | struct resource *res; | |
1813 | int ret, i; | |
1814 | ||
1815 | common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL); | |
1816 | if (!common) | |
1817 | return -ENOMEM; | |
1818 | common->dev = dev; | |
1819 | ||
1820 | of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev); | |
1821 | if (!of_id) | |
1822 | return -EINVAL; | |
1823 | common->pdata = of_id->data; | |
1824 | ||
1825 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss"); | |
1826 | common->ss_base = devm_ioremap_resource(&pdev->dev, res); | |
1827 | if (IS_ERR(common->ss_base)) | |
1828 | return PTR_ERR(common->ss_base); | |
1829 | common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE; | |
1830 | ||
1831 | node = of_get_child_by_name(dev->of_node, "ethernet-ports"); | |
1832 | if (!node) | |
1833 | return -ENOENT; | |
1834 | common->port_num = of_get_child_count(node); | |
1835 | if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS) | |
1836 | return -ENOENT; | |
1837 | of_node_put(node); | |
1838 | ||
1839 | if (common->port_num != 1) | |
1840 | return -EOPNOTSUPP; | |
1841 | ||
1842 | common->rx_flow_id_base = -1; | |
1843 | init_completion(&common->tdown_complete); | |
1844 | common->tx_ch_num = 1; | |
1845 | ||
1846 | ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48)); | |
1847 | if (ret) { | |
1848 | dev_err(dev, "error setting dma mask: %d\n", ret); | |
1849 | return ret; | |
1850 | } | |
1851 | ||
1852 | common->ports = devm_kcalloc(dev, common->port_num, | |
1853 | sizeof(*common->ports), | |
1854 | GFP_KERNEL); | |
1855 | if (!common->ports) | |
1856 | return -ENOMEM; | |
1857 | ||
1858 | pm_runtime_enable(dev); | |
1859 | ret = pm_runtime_get_sync(dev); | |
1860 | if (ret < 0) { | |
1861 | pm_runtime_put_noidle(dev); | |
1862 | pm_runtime_disable(dev); | |
1863 | return ret; | |
1864 | } | |
1865 | ||
1866 | ret = of_platform_populate(dev->of_node, NULL, NULL, dev); | |
1867 | /* We do not want to force this, as in some cases may not have child */ | |
1868 | if (ret) | |
1869 | dev_warn(dev, "populating child nodes err:%d\n", ret); | |
1870 | ||
1871 | am65_cpsw_nuss_get_ver(common); | |
1872 | ||
1873 | /* init tx channels */ | |
1874 | ret = am65_cpsw_nuss_init_tx_chns(common); | |
1875 | if (ret) | |
1876 | goto err_of_clear; | |
1877 | ret = am65_cpsw_nuss_init_rx_chns(common); | |
1878 | if (ret) | |
1879 | goto err_of_clear; | |
1880 | ||
1881 | ret = am65_cpsw_nuss_init_host_p(common); | |
1882 | if (ret) | |
1883 | goto err_of_clear; | |
1884 | ||
1885 | ret = am65_cpsw_nuss_init_slave_ports(common); | |
1886 | if (ret) | |
1887 | goto err_of_clear; | |
1888 | ||
1889 | /* init common data */ | |
1890 | ale_params.dev = dev; | |
1891 | ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT; | |
1892 | ale_params.ale_entries = 0; | |
1893 | ale_params.ale_ports = common->port_num + 1; | |
1894 | ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE; | |
1895 | ale_params.nu_switch_ale = true; | |
1896 | ||
1897 | common->ale = cpsw_ale_create(&ale_params); | |
1898 | if (!common->ale) { | |
1899 | dev_err(dev, "error initializing ale engine\n"); | |
1900 | goto err_of_clear; | |
1901 | } | |
1902 | ||
1903 | /* init ports */ | |
1904 | for (i = 0; i < common->port_num; i++) | |
1905 | am65_cpsw_nuss_slave_disable_unused(&common->ports[i]); | |
1906 | ||
1907 | dev_set_drvdata(dev, common); | |
1908 | ||
1909 | ret = am65_cpsw_nuss_init_ndev_2g(common); | |
1910 | if (ret) | |
1911 | goto err_of_clear; | |
1912 | ||
1913 | ret = am65_cpsw_nuss_ndev_reg_2g(common); | |
1914 | if (ret) | |
1915 | goto err_of_clear; | |
1916 | ||
1917 | pm_runtime_put(dev); | |
1918 | return 0; | |
1919 | ||
1920 | err_of_clear: | |
1921 | of_platform_depopulate(dev); | |
1922 | pm_runtime_put_sync(dev); | |
1923 | pm_runtime_disable(dev); | |
1924 | return ret; | |
1925 | } | |
1926 | ||
1927 | static int am65_cpsw_nuss_remove(struct platform_device *pdev) | |
1928 | { | |
1929 | struct device *dev = &pdev->dev; | |
1930 | struct am65_cpsw_common *common; | |
1931 | int ret; | |
1932 | ||
1933 | common = dev_get_drvdata(dev); | |
1934 | ||
1935 | ret = pm_runtime_get_sync(&pdev->dev); | |
1936 | if (ret < 0) { | |
1937 | pm_runtime_put_noidle(&pdev->dev); | |
1938 | return ret; | |
1939 | } | |
1940 | ||
1941 | /* must unregister ndevs here because DD release_driver routine calls | |
1942 | * dma_deconfigure(dev) before devres_release_all(dev) | |
1943 | */ | |
1944 | am65_cpsw_nuss_cleanup_ndev(common); | |
1945 | ||
1946 | of_platform_depopulate(dev); | |
1947 | ||
1948 | pm_runtime_put_sync(&pdev->dev); | |
1949 | pm_runtime_disable(&pdev->dev); | |
1950 | return 0; | |
1951 | } | |
1952 | ||
1953 | static struct platform_driver am65_cpsw_nuss_driver = { | |
1954 | .driver = { | |
1955 | .name = AM65_CPSW_DRV_NAME, | |
1956 | .of_match_table = am65_cpsw_nuss_of_mtable, | |
1957 | }, | |
1958 | .probe = am65_cpsw_nuss_probe, | |
1959 | .remove = am65_cpsw_nuss_remove, | |
1960 | }; | |
1961 | ||
1962 | module_platform_driver(am65_cpsw_nuss_driver); | |
1963 | ||
1964 | MODULE_LICENSE("GPL v2"); | |
1965 | MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>"); | |
1966 | MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver"); |