]>
Commit | Line | Data |
---|---|---|
e0d1f481 DA |
1 | /* |
2 | * CAN bus driver for Bosch M_CAN controller | |
3 | * | |
4 | * Copyright (C) 2014 Freescale Semiconductor, Inc. | |
5 | * Dong Aisheng <b29396@freescale.com> | |
6 | * | |
7 | * Bosch M_CAN user manual can be obtained from: | |
8 | * http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/ | |
9 | * mcan_users_manual_v302.pdf | |
10 | * | |
11 | * This file is licensed under the terms of the GNU General Public | |
12 | * License version 2. This program is licensed "as is" without any | |
13 | * warranty of any kind, whether express or implied. | |
14 | */ | |
15 | ||
16 | #include <linux/clk.h> | |
17 | #include <linux/delay.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/io.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/netdevice.h> | |
23 | #include <linux/of.h> | |
24 | #include <linux/of_device.h> | |
25 | #include <linux/platform_device.h> | |
26 | ||
27 | #include <linux/can/dev.h> | |
28 | ||
29 | /* napi related */ | |
30 | #define M_CAN_NAPI_WEIGHT 64 | |
31 | ||
32 | /* message ram configuration data length */ | |
33 | #define MRAM_CFG_LEN 8 | |
34 | ||
35 | /* registers definition */ | |
36 | enum m_can_reg { | |
37 | M_CAN_CREL = 0x0, | |
38 | M_CAN_ENDN = 0x4, | |
39 | M_CAN_CUST = 0x8, | |
40 | M_CAN_FBTP = 0xc, | |
41 | M_CAN_TEST = 0x10, | |
42 | M_CAN_RWD = 0x14, | |
43 | M_CAN_CCCR = 0x18, | |
44 | M_CAN_BTP = 0x1c, | |
45 | M_CAN_TSCC = 0x20, | |
46 | M_CAN_TSCV = 0x24, | |
47 | M_CAN_TOCC = 0x28, | |
48 | M_CAN_TOCV = 0x2c, | |
49 | M_CAN_ECR = 0x40, | |
50 | M_CAN_PSR = 0x44, | |
51 | M_CAN_IR = 0x50, | |
52 | M_CAN_IE = 0x54, | |
53 | M_CAN_ILS = 0x58, | |
54 | M_CAN_ILE = 0x5c, | |
55 | M_CAN_GFC = 0x80, | |
56 | M_CAN_SIDFC = 0x84, | |
57 | M_CAN_XIDFC = 0x88, | |
58 | M_CAN_XIDAM = 0x90, | |
59 | M_CAN_HPMS = 0x94, | |
60 | M_CAN_NDAT1 = 0x98, | |
61 | M_CAN_NDAT2 = 0x9c, | |
62 | M_CAN_RXF0C = 0xa0, | |
63 | M_CAN_RXF0S = 0xa4, | |
64 | M_CAN_RXF0A = 0xa8, | |
65 | M_CAN_RXBC = 0xac, | |
66 | M_CAN_RXF1C = 0xb0, | |
67 | M_CAN_RXF1S = 0xb4, | |
68 | M_CAN_RXF1A = 0xb8, | |
69 | M_CAN_RXESC = 0xbc, | |
70 | M_CAN_TXBC = 0xc0, | |
71 | M_CAN_TXFQS = 0xc4, | |
72 | M_CAN_TXESC = 0xc8, | |
73 | M_CAN_TXBRP = 0xcc, | |
74 | M_CAN_TXBAR = 0xd0, | |
75 | M_CAN_TXBCR = 0xd4, | |
76 | M_CAN_TXBTO = 0xd8, | |
77 | M_CAN_TXBCF = 0xdc, | |
78 | M_CAN_TXBTIE = 0xe0, | |
79 | M_CAN_TXBCIE = 0xe4, | |
80 | M_CAN_TXEFC = 0xf0, | |
81 | M_CAN_TXEFS = 0xf4, | |
82 | M_CAN_TXEFA = 0xf8, | |
83 | }; | |
84 | ||
85 | /* m_can lec values */ | |
86 | enum m_can_lec_type { | |
87 | LEC_NO_ERROR = 0, | |
88 | LEC_STUFF_ERROR, | |
89 | LEC_FORM_ERROR, | |
90 | LEC_ACK_ERROR, | |
91 | LEC_BIT1_ERROR, | |
92 | LEC_BIT0_ERROR, | |
93 | LEC_CRC_ERROR, | |
94 | LEC_UNUSED, | |
95 | }; | |
96 | ||
97 | enum m_can_mram_cfg { | |
98 | MRAM_SIDF = 0, | |
99 | MRAM_XIDF, | |
100 | MRAM_RXF0, | |
101 | MRAM_RXF1, | |
102 | MRAM_RXB, | |
103 | MRAM_TXE, | |
104 | MRAM_TXB, | |
105 | MRAM_CFG_NUM, | |
106 | }; | |
107 | ||
108 | /* Test Register (TEST) */ | |
109 | #define TEST_LBCK BIT(4) | |
110 | ||
111 | /* CC Control Register(CCCR) */ | |
112 | #define CCCR_TEST BIT(7) | |
113 | #define CCCR_MON BIT(5) | |
114 | #define CCCR_CCE BIT(1) | |
115 | #define CCCR_INIT BIT(0) | |
116 | ||
117 | /* Bit Timing & Prescaler Register (BTP) */ | |
118 | #define BTR_BRP_MASK 0x3ff | |
119 | #define BTR_BRP_SHIFT 16 | |
120 | #define BTR_TSEG1_SHIFT 8 | |
121 | #define BTR_TSEG1_MASK (0x3f << BTR_TSEG1_SHIFT) | |
122 | #define BTR_TSEG2_SHIFT 4 | |
123 | #define BTR_TSEG2_MASK (0xf << BTR_TSEG2_SHIFT) | |
124 | #define BTR_SJW_SHIFT 0 | |
125 | #define BTR_SJW_MASK 0xf | |
126 | ||
127 | /* Error Counter Register(ECR) */ | |
128 | #define ECR_RP BIT(15) | |
129 | #define ECR_REC_SHIFT 8 | |
130 | #define ECR_REC_MASK (0x7f << ECR_REC_SHIFT) | |
131 | #define ECR_TEC_SHIFT 0 | |
132 | #define ECR_TEC_MASK 0xff | |
133 | ||
134 | /* Protocol Status Register(PSR) */ | |
135 | #define PSR_BO BIT(7) | |
136 | #define PSR_EW BIT(6) | |
137 | #define PSR_EP BIT(5) | |
138 | #define PSR_LEC_MASK 0x7 | |
139 | ||
140 | /* Interrupt Register(IR) */ | |
141 | #define IR_ALL_INT 0xffffffff | |
142 | #define IR_STE BIT(31) | |
143 | #define IR_FOE BIT(30) | |
144 | #define IR_ACKE BIT(29) | |
145 | #define IR_BE BIT(28) | |
146 | #define IR_CRCE BIT(27) | |
147 | #define IR_WDI BIT(26) | |
148 | #define IR_BO BIT(25) | |
149 | #define IR_EW BIT(24) | |
150 | #define IR_EP BIT(23) | |
151 | #define IR_ELO BIT(22) | |
152 | #define IR_BEU BIT(21) | |
153 | #define IR_BEC BIT(20) | |
154 | #define IR_DRX BIT(19) | |
155 | #define IR_TOO BIT(18) | |
156 | #define IR_MRAF BIT(17) | |
157 | #define IR_TSW BIT(16) | |
158 | #define IR_TEFL BIT(15) | |
159 | #define IR_TEFF BIT(14) | |
160 | #define IR_TEFW BIT(13) | |
161 | #define IR_TEFN BIT(12) | |
162 | #define IR_TFE BIT(11) | |
163 | #define IR_TCF BIT(10) | |
164 | #define IR_TC BIT(9) | |
165 | #define IR_HPM BIT(8) | |
166 | #define IR_RF1L BIT(7) | |
167 | #define IR_RF1F BIT(6) | |
168 | #define IR_RF1W BIT(5) | |
169 | #define IR_RF1N BIT(4) | |
170 | #define IR_RF0L BIT(3) | |
171 | #define IR_RF0F BIT(2) | |
172 | #define IR_RF0W BIT(1) | |
173 | #define IR_RF0N BIT(0) | |
174 | #define IR_ERR_STATE (IR_BO | IR_EW | IR_EP) | |
175 | #define IR_ERR_LEC (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE) | |
176 | #define IR_ERR_BUS (IR_ERR_LEC | IR_WDI | IR_ELO | IR_BEU | \ | |
177 | IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \ | |
178 | IR_RF1L | IR_RF0L) | |
179 | #define IR_ERR_ALL (IR_ERR_STATE | IR_ERR_BUS) | |
180 | ||
181 | /* Interrupt Line Select (ILS) */ | |
182 | #define ILS_ALL_INT0 0x0 | |
183 | #define ILS_ALL_INT1 0xFFFFFFFF | |
184 | ||
185 | /* Interrupt Line Enable (ILE) */ | |
186 | #define ILE_EINT0 BIT(0) | |
187 | #define ILE_EINT1 BIT(1) | |
188 | ||
189 | /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ | |
190 | #define RXFC_FWM_OFF 24 | |
191 | #define RXFC_FWM_MASK 0x7f | |
192 | #define RXFC_FWM_1 (1 << RXFC_FWM_OFF) | |
193 | #define RXFC_FS_OFF 16 | |
194 | #define RXFC_FS_MASK 0x7f | |
195 | ||
196 | /* Rx FIFO 0/1 Status (RXF0S/RXF1S) */ | |
197 | #define RXFS_RFL BIT(25) | |
198 | #define RXFS_FF BIT(24) | |
199 | #define RXFS_FPI_OFF 16 | |
200 | #define RXFS_FPI_MASK 0x3f0000 | |
201 | #define RXFS_FGI_OFF 8 | |
202 | #define RXFS_FGI_MASK 0x3f00 | |
203 | #define RXFS_FFL_MASK 0x7f | |
204 | ||
205 | /* Rx Buffer / FIFO Element Size Configuration (RXESC) */ | |
206 | #define M_CAN_RXESC_8BYTES 0x0 | |
207 | ||
208 | /* Tx Buffer Configuration(TXBC) */ | |
209 | #define TXBC_NDTB_OFF 16 | |
210 | #define TXBC_NDTB_MASK 0x3f | |
211 | ||
212 | /* Tx Buffer Element Size Configuration(TXESC) */ | |
213 | #define TXESC_TBDS_8BYTES 0x0 | |
214 | ||
215 | /* Tx Event FIFO Con.guration (TXEFC) */ | |
216 | #define TXEFC_EFS_OFF 16 | |
217 | #define TXEFC_EFS_MASK 0x3f | |
218 | ||
219 | /* Message RAM Configuration (in bytes) */ | |
220 | #define SIDF_ELEMENT_SIZE 4 | |
221 | #define XIDF_ELEMENT_SIZE 8 | |
222 | #define RXF0_ELEMENT_SIZE 16 | |
223 | #define RXF1_ELEMENT_SIZE 16 | |
224 | #define RXB_ELEMENT_SIZE 16 | |
225 | #define TXE_ELEMENT_SIZE 8 | |
226 | #define TXB_ELEMENT_SIZE 16 | |
227 | ||
228 | /* Message RAM Elements */ | |
229 | #define M_CAN_FIFO_ID 0x0 | |
230 | #define M_CAN_FIFO_DLC 0x4 | |
231 | #define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2)) | |
232 | ||
233 | /* Rx Buffer Element */ | |
234 | #define RX_BUF_ESI BIT(31) | |
235 | #define RX_BUF_XTD BIT(30) | |
236 | #define RX_BUF_RTR BIT(29) | |
237 | ||
238 | /* Tx Buffer Element */ | |
239 | #define TX_BUF_XTD BIT(30) | |
240 | #define TX_BUF_RTR BIT(29) | |
241 | ||
242 | /* address offset and element number for each FIFO/Buffer in the Message RAM */ | |
243 | struct mram_cfg { | |
244 | u16 off; | |
245 | u8 num; | |
246 | }; | |
247 | ||
248 | /* m_can private data structure */ | |
249 | struct m_can_priv { | |
250 | struct can_priv can; /* must be the first member */ | |
251 | struct napi_struct napi; | |
252 | struct net_device *dev; | |
253 | struct device *device; | |
254 | struct clk *hclk; | |
255 | struct clk *cclk; | |
256 | void __iomem *base; | |
257 | u32 irqstatus; | |
258 | ||
259 | /* message ram configuration */ | |
260 | void __iomem *mram_base; | |
261 | struct mram_cfg mcfg[MRAM_CFG_NUM]; | |
262 | }; | |
263 | ||
264 | static inline u32 m_can_read(const struct m_can_priv *priv, enum m_can_reg reg) | |
265 | { | |
266 | return readl(priv->base + reg); | |
267 | } | |
268 | ||
269 | static inline void m_can_write(const struct m_can_priv *priv, | |
270 | enum m_can_reg reg, u32 val) | |
271 | { | |
272 | writel(val, priv->base + reg); | |
273 | } | |
274 | ||
275 | static inline u32 m_can_fifo_read(const struct m_can_priv *priv, | |
276 | u32 fgi, unsigned int offset) | |
277 | { | |
278 | return readl(priv->mram_base + priv->mcfg[MRAM_RXF0].off + | |
279 | fgi * RXF0_ELEMENT_SIZE + offset); | |
280 | } | |
281 | ||
282 | static inline void m_can_fifo_write(const struct m_can_priv *priv, | |
283 | u32 fpi, unsigned int offset, u32 val) | |
284 | { | |
285 | return writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off + | |
286 | fpi * TXB_ELEMENT_SIZE + offset); | |
287 | } | |
288 | ||
289 | static inline void m_can_config_endisable(const struct m_can_priv *priv, | |
290 | bool enable) | |
291 | { | |
292 | u32 cccr = m_can_read(priv, M_CAN_CCCR); | |
293 | u32 timeout = 10; | |
294 | u32 val = 0; | |
295 | ||
296 | if (enable) { | |
297 | /* enable m_can configuration */ | |
298 | m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT); | |
299 | /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ | |
300 | m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); | |
301 | } else { | |
302 | m_can_write(priv, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE)); | |
303 | } | |
304 | ||
305 | /* there's a delay for module initialization */ | |
306 | if (enable) | |
307 | val = CCCR_INIT | CCCR_CCE; | |
308 | ||
309 | while ((m_can_read(priv, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) { | |
310 | if (timeout == 0) { | |
311 | netdev_warn(priv->dev, "Failed to init module\n"); | |
312 | return; | |
313 | } | |
314 | timeout--; | |
315 | udelay(1); | |
316 | } | |
317 | } | |
318 | ||
319 | static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv) | |
320 | { | |
321 | m_can_write(priv, M_CAN_ILE, ILE_EINT0 | ILE_EINT1); | |
322 | } | |
323 | ||
324 | static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv) | |
325 | { | |
326 | m_can_write(priv, M_CAN_ILE, 0x0); | |
327 | } | |
328 | ||
329 | static void m_can_read_fifo(const struct net_device *dev, struct can_frame *cf, | |
330 | u32 rxfs) | |
331 | { | |
332 | struct m_can_priv *priv = netdev_priv(dev); | |
921f1681 | 333 | u32 id, fgi, dlc; |
e0d1f481 DA |
334 | |
335 | /* calculate the fifo get index for where to read data */ | |
336 | fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF; | |
337 | id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID); | |
338 | if (id & RX_BUF_XTD) | |
339 | cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; | |
340 | else | |
341 | cf->can_id = (id >> 18) & CAN_SFF_MASK; | |
342 | ||
921f1681 DA |
343 | dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); |
344 | cf->can_dlc = get_can_dlc((dlc >> 16) & 0x0F); | |
345 | ||
e0d1f481 DA |
346 | if (id & RX_BUF_RTR) { |
347 | cf->can_id |= CAN_RTR_FLAG; | |
348 | } else { | |
e0d1f481 DA |
349 | *(u32 *)(cf->data + 0) = m_can_fifo_read(priv, fgi, |
350 | M_CAN_FIFO_DATA(0)); | |
351 | *(u32 *)(cf->data + 4) = m_can_fifo_read(priv, fgi, | |
352 | M_CAN_FIFO_DATA(1)); | |
353 | } | |
354 | ||
355 | /* acknowledge rx fifo 0 */ | |
356 | m_can_write(priv, M_CAN_RXF0A, fgi); | |
357 | } | |
358 | ||
359 | static int m_can_do_rx_poll(struct net_device *dev, int quota) | |
360 | { | |
361 | struct m_can_priv *priv = netdev_priv(dev); | |
362 | struct net_device_stats *stats = &dev->stats; | |
363 | struct sk_buff *skb; | |
364 | struct can_frame *frame; | |
365 | u32 pkts = 0; | |
366 | u32 rxfs; | |
367 | ||
368 | rxfs = m_can_read(priv, M_CAN_RXF0S); | |
369 | if (!(rxfs & RXFS_FFL_MASK)) { | |
370 | netdev_dbg(dev, "no messages in fifo0\n"); | |
371 | return 0; | |
372 | } | |
373 | ||
374 | while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) { | |
375 | if (rxfs & RXFS_RFL) | |
376 | netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); | |
377 | ||
378 | skb = alloc_can_skb(dev, &frame); | |
379 | if (!skb) { | |
380 | stats->rx_dropped++; | |
381 | return pkts; | |
382 | } | |
383 | ||
384 | m_can_read_fifo(dev, frame, rxfs); | |
385 | ||
386 | stats->rx_packets++; | |
387 | stats->rx_bytes += frame->can_dlc; | |
388 | ||
389 | netif_receive_skb(skb); | |
390 | ||
391 | quota--; | |
392 | pkts++; | |
393 | rxfs = m_can_read(priv, M_CAN_RXF0S); | |
394 | } | |
395 | ||
396 | if (pkts) | |
397 | can_led_event(dev, CAN_LED_EVENT_RX); | |
398 | ||
399 | return pkts; | |
400 | } | |
401 | ||
402 | static int m_can_handle_lost_msg(struct net_device *dev) | |
403 | { | |
404 | struct net_device_stats *stats = &dev->stats; | |
405 | struct sk_buff *skb; | |
406 | struct can_frame *frame; | |
407 | ||
408 | netdev_err(dev, "msg lost in rxf0\n"); | |
409 | ||
410 | stats->rx_errors++; | |
411 | stats->rx_over_errors++; | |
412 | ||
413 | skb = alloc_can_err_skb(dev, &frame); | |
414 | if (unlikely(!skb)) | |
415 | return 0; | |
416 | ||
417 | frame->can_id |= CAN_ERR_CRTL; | |
418 | frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; | |
419 | ||
420 | netif_receive_skb(skb); | |
421 | ||
422 | return 1; | |
423 | } | |
424 | ||
425 | static int m_can_handle_lec_err(struct net_device *dev, | |
426 | enum m_can_lec_type lec_type) | |
427 | { | |
428 | struct m_can_priv *priv = netdev_priv(dev); | |
429 | struct net_device_stats *stats = &dev->stats; | |
430 | struct can_frame *cf; | |
431 | struct sk_buff *skb; | |
432 | ||
433 | priv->can.can_stats.bus_error++; | |
434 | stats->rx_errors++; | |
435 | ||
436 | /* propagate the error condition to the CAN stack */ | |
437 | skb = alloc_can_err_skb(dev, &cf); | |
438 | if (unlikely(!skb)) | |
439 | return 0; | |
440 | ||
441 | /* check for 'last error code' which tells us the | |
442 | * type of the last error to occur on the CAN bus | |
443 | */ | |
444 | cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; | |
445 | cf->data[2] |= CAN_ERR_PROT_UNSPEC; | |
446 | ||
447 | switch (lec_type) { | |
448 | case LEC_STUFF_ERROR: | |
449 | netdev_dbg(dev, "stuff error\n"); | |
450 | cf->data[2] |= CAN_ERR_PROT_STUFF; | |
451 | break; | |
452 | case LEC_FORM_ERROR: | |
453 | netdev_dbg(dev, "form error\n"); | |
454 | cf->data[2] |= CAN_ERR_PROT_FORM; | |
455 | break; | |
456 | case LEC_ACK_ERROR: | |
457 | netdev_dbg(dev, "ack error\n"); | |
458 | cf->data[3] |= (CAN_ERR_PROT_LOC_ACK | | |
459 | CAN_ERR_PROT_LOC_ACK_DEL); | |
460 | break; | |
461 | case LEC_BIT1_ERROR: | |
462 | netdev_dbg(dev, "bit1 error\n"); | |
463 | cf->data[2] |= CAN_ERR_PROT_BIT1; | |
464 | break; | |
465 | case LEC_BIT0_ERROR: | |
466 | netdev_dbg(dev, "bit0 error\n"); | |
467 | cf->data[2] |= CAN_ERR_PROT_BIT0; | |
468 | break; | |
469 | case LEC_CRC_ERROR: | |
470 | netdev_dbg(dev, "CRC error\n"); | |
471 | cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ | | |
472 | CAN_ERR_PROT_LOC_CRC_DEL); | |
473 | break; | |
474 | default: | |
475 | break; | |
476 | } | |
477 | ||
478 | stats->rx_packets++; | |
479 | stats->rx_bytes += cf->can_dlc; | |
480 | netif_receive_skb(skb); | |
481 | ||
482 | return 1; | |
483 | } | |
484 | ||
f6a99649 DA |
485 | static int __m_can_get_berr_counter(const struct net_device *dev, |
486 | struct can_berr_counter *bec) | |
487 | { | |
488 | struct m_can_priv *priv = netdev_priv(dev); | |
489 | unsigned int ecr; | |
490 | ||
491 | ecr = m_can_read(priv, M_CAN_ECR); | |
492 | bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT; | |
493 | bec->txerr = ecr & ECR_TEC_MASK; | |
494 | ||
495 | return 0; | |
496 | } | |
497 | ||
e0d1f481 DA |
498 | static int m_can_get_berr_counter(const struct net_device *dev, |
499 | struct can_berr_counter *bec) | |
500 | { | |
501 | struct m_can_priv *priv = netdev_priv(dev); | |
e0d1f481 DA |
502 | int err; |
503 | ||
504 | err = clk_prepare_enable(priv->hclk); | |
505 | if (err) | |
506 | return err; | |
507 | ||
508 | err = clk_prepare_enable(priv->cclk); | |
509 | if (err) { | |
510 | clk_disable_unprepare(priv->hclk); | |
511 | return err; | |
512 | } | |
513 | ||
f6a99649 | 514 | __m_can_get_berr_counter(dev, bec); |
e0d1f481 DA |
515 | |
516 | clk_disable_unprepare(priv->cclk); | |
517 | clk_disable_unprepare(priv->hclk); | |
518 | ||
519 | return 0; | |
520 | } | |
521 | ||
522 | static int m_can_handle_state_change(struct net_device *dev, | |
523 | enum can_state new_state) | |
524 | { | |
525 | struct m_can_priv *priv = netdev_priv(dev); | |
526 | struct net_device_stats *stats = &dev->stats; | |
527 | struct can_frame *cf; | |
528 | struct sk_buff *skb; | |
529 | struct can_berr_counter bec; | |
530 | unsigned int ecr; | |
531 | ||
532 | switch (new_state) { | |
533 | case CAN_STATE_ERROR_ACTIVE: | |
534 | /* error warning state */ | |
535 | priv->can.can_stats.error_warning++; | |
536 | priv->can.state = CAN_STATE_ERROR_WARNING; | |
537 | break; | |
538 | case CAN_STATE_ERROR_PASSIVE: | |
539 | /* error passive state */ | |
540 | priv->can.can_stats.error_passive++; | |
541 | priv->can.state = CAN_STATE_ERROR_PASSIVE; | |
542 | break; | |
543 | case CAN_STATE_BUS_OFF: | |
544 | /* bus-off state */ | |
545 | priv->can.state = CAN_STATE_BUS_OFF; | |
546 | m_can_disable_all_interrupts(priv); | |
547 | can_bus_off(dev); | |
548 | break; | |
549 | default: | |
550 | break; | |
551 | } | |
552 | ||
553 | /* propagate the error condition to the CAN stack */ | |
554 | skb = alloc_can_err_skb(dev, &cf); | |
555 | if (unlikely(!skb)) | |
556 | return 0; | |
557 | ||
f6a99649 | 558 | __m_can_get_berr_counter(dev, &bec); |
e0d1f481 DA |
559 | |
560 | switch (new_state) { | |
561 | case CAN_STATE_ERROR_ACTIVE: | |
562 | /* error warning state */ | |
563 | cf->can_id |= CAN_ERR_CRTL; | |
564 | cf->data[1] = (bec.txerr > bec.rxerr) ? | |
565 | CAN_ERR_CRTL_TX_WARNING : | |
566 | CAN_ERR_CRTL_RX_WARNING; | |
567 | cf->data[6] = bec.txerr; | |
568 | cf->data[7] = bec.rxerr; | |
569 | break; | |
570 | case CAN_STATE_ERROR_PASSIVE: | |
571 | /* error passive state */ | |
572 | cf->can_id |= CAN_ERR_CRTL; | |
573 | ecr = m_can_read(priv, M_CAN_ECR); | |
574 | if (ecr & ECR_RP) | |
575 | cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; | |
576 | if (bec.txerr > 127) | |
577 | cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; | |
578 | cf->data[6] = bec.txerr; | |
579 | cf->data[7] = bec.rxerr; | |
580 | break; | |
581 | case CAN_STATE_BUS_OFF: | |
582 | /* bus-off state */ | |
583 | cf->can_id |= CAN_ERR_BUSOFF; | |
584 | break; | |
585 | default: | |
586 | break; | |
587 | } | |
588 | ||
589 | stats->rx_packets++; | |
590 | stats->rx_bytes += cf->can_dlc; | |
591 | netif_receive_skb(skb); | |
592 | ||
593 | return 1; | |
594 | } | |
595 | ||
596 | static int m_can_handle_state_errors(struct net_device *dev, u32 psr) | |
597 | { | |
598 | struct m_can_priv *priv = netdev_priv(dev); | |
599 | int work_done = 0; | |
600 | ||
601 | if ((psr & PSR_EW) && | |
602 | (priv->can.state != CAN_STATE_ERROR_WARNING)) { | |
603 | netdev_dbg(dev, "entered error warning state\n"); | |
604 | work_done += m_can_handle_state_change(dev, | |
605 | CAN_STATE_ERROR_WARNING); | |
606 | } | |
607 | ||
608 | if ((psr & PSR_EP) && | |
609 | (priv->can.state != CAN_STATE_ERROR_PASSIVE)) { | |
610 | netdev_dbg(dev, "entered error warning state\n"); | |
611 | work_done += m_can_handle_state_change(dev, | |
612 | CAN_STATE_ERROR_PASSIVE); | |
613 | } | |
614 | ||
615 | if ((psr & PSR_BO) && | |
616 | (priv->can.state != CAN_STATE_BUS_OFF)) { | |
617 | netdev_dbg(dev, "entered error warning state\n"); | |
618 | work_done += m_can_handle_state_change(dev, | |
619 | CAN_STATE_BUS_OFF); | |
620 | } | |
621 | ||
622 | return work_done; | |
623 | } | |
624 | ||
625 | static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus) | |
626 | { | |
627 | if (irqstatus & IR_WDI) | |
628 | netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); | |
629 | if (irqstatus & IR_BEU) | |
630 | netdev_err(dev, "Error Logging Overflow\n"); | |
631 | if (irqstatus & IR_BEU) | |
632 | netdev_err(dev, "Bit Error Uncorrected\n"); | |
633 | if (irqstatus & IR_BEC) | |
634 | netdev_err(dev, "Bit Error Corrected\n"); | |
635 | if (irqstatus & IR_TOO) | |
636 | netdev_err(dev, "Timeout reached\n"); | |
637 | if (irqstatus & IR_MRAF) | |
638 | netdev_err(dev, "Message RAM access failure occurred\n"); | |
639 | } | |
640 | ||
641 | static inline bool is_lec_err(u32 psr) | |
642 | { | |
643 | psr &= LEC_UNUSED; | |
644 | ||
645 | return psr && (psr != LEC_UNUSED); | |
646 | } | |
647 | ||
648 | static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus, | |
649 | u32 psr) | |
650 | { | |
651 | struct m_can_priv *priv = netdev_priv(dev); | |
652 | int work_done = 0; | |
653 | ||
654 | if (irqstatus & IR_RF0L) | |
655 | work_done += m_can_handle_lost_msg(dev); | |
656 | ||
657 | /* handle lec errors on the bus */ | |
658 | if ((priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && | |
659 | is_lec_err(psr)) | |
660 | work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED); | |
661 | ||
662 | /* other unproccessed error interrupts */ | |
663 | m_can_handle_other_err(dev, irqstatus); | |
664 | ||
665 | return work_done; | |
666 | } | |
667 | ||
668 | static int m_can_poll(struct napi_struct *napi, int quota) | |
669 | { | |
670 | struct net_device *dev = napi->dev; | |
671 | struct m_can_priv *priv = netdev_priv(dev); | |
672 | int work_done = 0; | |
673 | u32 irqstatus, psr; | |
674 | ||
675 | irqstatus = priv->irqstatus | m_can_read(priv, M_CAN_IR); | |
676 | if (!irqstatus) | |
677 | goto end; | |
678 | ||
679 | psr = m_can_read(priv, M_CAN_PSR); | |
680 | if (irqstatus & IR_ERR_STATE) | |
681 | work_done += m_can_handle_state_errors(dev, psr); | |
682 | ||
683 | if (irqstatus & IR_ERR_BUS) | |
684 | work_done += m_can_handle_bus_errors(dev, irqstatus, psr); | |
685 | ||
686 | if (irqstatus & IR_RF0N) | |
687 | work_done += m_can_do_rx_poll(dev, (quota - work_done)); | |
688 | ||
689 | if (work_done < quota) { | |
690 | napi_complete(napi); | |
691 | m_can_enable_all_interrupts(priv); | |
692 | } | |
693 | ||
694 | end: | |
695 | return work_done; | |
696 | } | |
697 | ||
698 | static irqreturn_t m_can_isr(int irq, void *dev_id) | |
699 | { | |
700 | struct net_device *dev = (struct net_device *)dev_id; | |
701 | struct m_can_priv *priv = netdev_priv(dev); | |
702 | struct net_device_stats *stats = &dev->stats; | |
703 | u32 ir; | |
704 | ||
705 | ir = m_can_read(priv, M_CAN_IR); | |
706 | if (!ir) | |
707 | return IRQ_NONE; | |
708 | ||
709 | /* ACK all irqs */ | |
710 | if (ir & IR_ALL_INT) | |
711 | m_can_write(priv, M_CAN_IR, ir); | |
712 | ||
713 | /* schedule NAPI in case of | |
714 | * - rx IRQ | |
715 | * - state change IRQ | |
716 | * - bus error IRQ and bus error reporting | |
717 | */ | |
718 | if ((ir & IR_RF0N) || (ir & IR_ERR_ALL)) { | |
719 | priv->irqstatus = ir; | |
720 | m_can_disable_all_interrupts(priv); | |
721 | napi_schedule(&priv->napi); | |
722 | } | |
723 | ||
724 | /* transmission complete interrupt */ | |
725 | if (ir & IR_TC) { | |
726 | stats->tx_bytes += can_get_echo_skb(dev, 0); | |
727 | stats->tx_packets++; | |
728 | can_led_event(dev, CAN_LED_EVENT_TX); | |
729 | netif_wake_queue(dev); | |
730 | } | |
731 | ||
732 | return IRQ_HANDLED; | |
733 | } | |
734 | ||
735 | static const struct can_bittiming_const m_can_bittiming_const = { | |
736 | .name = KBUILD_MODNAME, | |
737 | .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ | |
738 | .tseg1_max = 64, | |
739 | .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ | |
740 | .tseg2_max = 16, | |
741 | .sjw_max = 16, | |
742 | .brp_min = 1, | |
743 | .brp_max = 1024, | |
744 | .brp_inc = 1, | |
745 | }; | |
746 | ||
747 | static int m_can_set_bittiming(struct net_device *dev) | |
748 | { | |
749 | struct m_can_priv *priv = netdev_priv(dev); | |
750 | const struct can_bittiming *bt = &priv->can.bittiming; | |
751 | u16 brp, sjw, tseg1, tseg2; | |
752 | u32 reg_btp; | |
753 | ||
754 | brp = bt->brp - 1; | |
755 | sjw = bt->sjw - 1; | |
756 | tseg1 = bt->prop_seg + bt->phase_seg1 - 1; | |
757 | tseg2 = bt->phase_seg2 - 1; | |
758 | reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) | | |
759 | (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT); | |
760 | m_can_write(priv, M_CAN_BTP, reg_btp); | |
761 | netdev_dbg(dev, "setting BTP 0x%x\n", reg_btp); | |
762 | ||
763 | return 0; | |
764 | } | |
765 | ||
766 | /* Configure M_CAN chip: | |
767 | * - set rx buffer/fifo element size | |
768 | * - configure rx fifo | |
769 | * - accept non-matching frame into fifo 0 | |
770 | * - configure tx buffer | |
771 | * - configure mode | |
772 | * - setup bittiming | |
773 | */ | |
774 | static void m_can_chip_config(struct net_device *dev) | |
775 | { | |
776 | struct m_can_priv *priv = netdev_priv(dev); | |
777 | u32 cccr, test; | |
778 | ||
779 | m_can_config_endisable(priv, true); | |
780 | ||
781 | /* RX Buffer/FIFO Element Size 8 bytes data field */ | |
782 | m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_8BYTES); | |
783 | ||
784 | /* Accept Non-matching Frames Into FIFO 0 */ | |
785 | m_can_write(priv, M_CAN_GFC, 0x0); | |
786 | ||
787 | /* only support one Tx Buffer currently */ | |
788 | m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) | | |
789 | priv->mcfg[MRAM_TXB].off); | |
790 | ||
791 | /* only support 8 bytes firstly */ | |
792 | m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_8BYTES); | |
793 | ||
794 | m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) | | |
795 | priv->mcfg[MRAM_TXE].off); | |
796 | ||
797 | /* rx fifo configuration, blocking mode, fifo size 1 */ | |
798 | m_can_write(priv, M_CAN_RXF0C, | |
799 | (priv->mcfg[MRAM_RXF0].num << RXFC_FS_OFF) | | |
800 | RXFC_FWM_1 | priv->mcfg[MRAM_RXF0].off); | |
801 | ||
802 | m_can_write(priv, M_CAN_RXF1C, | |
803 | (priv->mcfg[MRAM_RXF1].num << RXFC_FS_OFF) | | |
804 | RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off); | |
805 | ||
806 | cccr = m_can_read(priv, M_CAN_CCCR); | |
807 | cccr &= ~(CCCR_TEST | CCCR_MON); | |
808 | test = m_can_read(priv, M_CAN_TEST); | |
809 | test &= ~TEST_LBCK; | |
810 | ||
811 | if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) | |
812 | cccr |= CCCR_MON; | |
813 | ||
814 | if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { | |
815 | cccr |= CCCR_TEST; | |
816 | test |= TEST_LBCK; | |
817 | } | |
818 | ||
819 | m_can_write(priv, M_CAN_CCCR, cccr); | |
820 | m_can_write(priv, M_CAN_TEST, test); | |
821 | ||
822 | /* enable interrupts */ | |
823 | m_can_write(priv, M_CAN_IR, IR_ALL_INT); | |
824 | if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) | |
825 | m_can_write(priv, M_CAN_IE, IR_ALL_INT & ~IR_ERR_LEC); | |
826 | else | |
827 | m_can_write(priv, M_CAN_IE, IR_ALL_INT); | |
828 | ||
829 | /* route all interrupts to INT0 */ | |
830 | m_can_write(priv, M_CAN_ILS, ILS_ALL_INT0); | |
831 | ||
832 | /* set bittiming params */ | |
833 | m_can_set_bittiming(dev); | |
834 | ||
835 | m_can_config_endisable(priv, false); | |
836 | } | |
837 | ||
838 | static void m_can_start(struct net_device *dev) | |
839 | { | |
840 | struct m_can_priv *priv = netdev_priv(dev); | |
841 | ||
842 | /* basic m_can configuration */ | |
843 | m_can_chip_config(dev); | |
844 | ||
845 | priv->can.state = CAN_STATE_ERROR_ACTIVE; | |
846 | ||
847 | m_can_enable_all_interrupts(priv); | |
848 | } | |
849 | ||
850 | static int m_can_set_mode(struct net_device *dev, enum can_mode mode) | |
851 | { | |
852 | switch (mode) { | |
853 | case CAN_MODE_START: | |
854 | m_can_start(dev); | |
855 | netif_wake_queue(dev); | |
856 | break; | |
857 | default: | |
858 | return -EOPNOTSUPP; | |
859 | } | |
860 | ||
861 | return 0; | |
862 | } | |
863 | ||
864 | static void free_m_can_dev(struct net_device *dev) | |
865 | { | |
866 | free_candev(dev); | |
867 | } | |
868 | ||
869 | static struct net_device *alloc_m_can_dev(void) | |
870 | { | |
871 | struct net_device *dev; | |
872 | struct m_can_priv *priv; | |
873 | ||
874 | dev = alloc_candev(sizeof(*priv), 1); | |
875 | if (!dev) | |
876 | return NULL; | |
877 | ||
878 | priv = netdev_priv(dev); | |
879 | netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT); | |
880 | ||
881 | priv->dev = dev; | |
882 | priv->can.bittiming_const = &m_can_bittiming_const; | |
883 | priv->can.do_set_mode = m_can_set_mode; | |
884 | priv->can.do_get_berr_counter = m_can_get_berr_counter; | |
885 | priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | | |
886 | CAN_CTRLMODE_LISTENONLY | | |
887 | CAN_CTRLMODE_BERR_REPORTING; | |
888 | ||
889 | return dev; | |
890 | } | |
891 | ||
892 | static int m_can_open(struct net_device *dev) | |
893 | { | |
894 | struct m_can_priv *priv = netdev_priv(dev); | |
895 | int err; | |
896 | ||
897 | err = clk_prepare_enable(priv->hclk); | |
898 | if (err) | |
899 | return err; | |
900 | ||
901 | err = clk_prepare_enable(priv->cclk); | |
902 | if (err) | |
903 | goto exit_disable_hclk; | |
904 | ||
905 | /* open the can device */ | |
906 | err = open_candev(dev); | |
907 | if (err) { | |
908 | netdev_err(dev, "failed to open can device\n"); | |
909 | goto exit_disable_cclk; | |
910 | } | |
911 | ||
912 | /* register interrupt handler */ | |
913 | err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name, | |
914 | dev); | |
915 | if (err < 0) { | |
916 | netdev_err(dev, "failed to request interrupt\n"); | |
917 | goto exit_irq_fail; | |
918 | } | |
919 | ||
920 | /* start the m_can controller */ | |
921 | m_can_start(dev); | |
922 | ||
923 | can_led_event(dev, CAN_LED_EVENT_OPEN); | |
924 | napi_enable(&priv->napi); | |
925 | netif_start_queue(dev); | |
926 | ||
927 | return 0; | |
928 | ||
929 | exit_irq_fail: | |
930 | close_candev(dev); | |
931 | exit_disable_cclk: | |
932 | clk_disable_unprepare(priv->cclk); | |
933 | exit_disable_hclk: | |
934 | clk_disable_unprepare(priv->hclk); | |
935 | return err; | |
936 | } | |
937 | ||
938 | static void m_can_stop(struct net_device *dev) | |
939 | { | |
940 | struct m_can_priv *priv = netdev_priv(dev); | |
941 | ||
942 | /* disable all interrupts */ | |
943 | m_can_disable_all_interrupts(priv); | |
944 | ||
945 | clk_disable_unprepare(priv->hclk); | |
946 | clk_disable_unprepare(priv->cclk); | |
947 | ||
948 | /* set the state as STOPPED */ | |
949 | priv->can.state = CAN_STATE_STOPPED; | |
950 | } | |
951 | ||
952 | static int m_can_close(struct net_device *dev) | |
953 | { | |
954 | struct m_can_priv *priv = netdev_priv(dev); | |
955 | ||
956 | netif_stop_queue(dev); | |
957 | napi_disable(&priv->napi); | |
958 | m_can_stop(dev); | |
959 | free_irq(dev->irq, dev); | |
960 | close_candev(dev); | |
961 | can_led_event(dev, CAN_LED_EVENT_STOP); | |
962 | ||
963 | return 0; | |
964 | } | |
965 | ||
966 | static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, | |
967 | struct net_device *dev) | |
968 | { | |
969 | struct m_can_priv *priv = netdev_priv(dev); | |
970 | struct can_frame *cf = (struct can_frame *)skb->data; | |
971 | u32 id; | |
972 | ||
973 | if (can_dropped_invalid_skb(dev, skb)) | |
974 | return NETDEV_TX_OK; | |
975 | ||
976 | netif_stop_queue(dev); | |
977 | ||
978 | if (cf->can_id & CAN_EFF_FLAG) { | |
979 | id = cf->can_id & CAN_EFF_MASK; | |
980 | id |= TX_BUF_XTD; | |
981 | } else { | |
982 | id = ((cf->can_id & CAN_SFF_MASK) << 18); | |
983 | } | |
984 | ||
985 | if (cf->can_id & CAN_RTR_FLAG) | |
986 | id |= TX_BUF_RTR; | |
987 | ||
988 | /* message ram configuration */ | |
989 | m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); | |
990 | m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, cf->can_dlc << 16); | |
991 | m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(0), *(u32 *)(cf->data + 0)); | |
992 | m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(1), *(u32 *)(cf->data + 4)); | |
993 | can_put_echo_skb(skb, dev, 0); | |
994 | ||
995 | /* enable first TX buffer to start transfer */ | |
996 | m_can_write(priv, M_CAN_TXBTIE, 0x1); | |
997 | m_can_write(priv, M_CAN_TXBAR, 0x1); | |
998 | ||
999 | return NETDEV_TX_OK; | |
1000 | } | |
1001 | ||
1002 | static const struct net_device_ops m_can_netdev_ops = { | |
1003 | .ndo_open = m_can_open, | |
1004 | .ndo_stop = m_can_close, | |
1005 | .ndo_start_xmit = m_can_start_xmit, | |
d6fdb38b | 1006 | .ndo_change_mtu = can_change_mtu, |
e0d1f481 DA |
1007 | }; |
1008 | ||
1009 | static int register_m_can_dev(struct net_device *dev) | |
1010 | { | |
1011 | dev->flags |= IFF_ECHO; /* we support local echo */ | |
1012 | dev->netdev_ops = &m_can_netdev_ops; | |
1013 | ||
1014 | return register_candev(dev); | |
1015 | } | |
1016 | ||
1017 | static int m_can_of_parse_mram(struct platform_device *pdev, | |
1018 | struct m_can_priv *priv) | |
1019 | { | |
1020 | struct device_node *np = pdev->dev.of_node; | |
1021 | struct resource *res; | |
1022 | void __iomem *addr; | |
1023 | u32 out_val[MRAM_CFG_LEN]; | |
962845da | 1024 | int i, start, end, ret; |
e0d1f481 DA |
1025 | |
1026 | /* message ram could be shared */ | |
1027 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); | |
1028 | if (!res) | |
1029 | return -ENODEV; | |
1030 | ||
1031 | addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); | |
1032 | if (!addr) | |
1033 | return -ENOMEM; | |
1034 | ||
1035 | /* get message ram configuration */ | |
1036 | ret = of_property_read_u32_array(np, "bosch,mram-cfg", | |
1037 | out_val, sizeof(out_val) / 4); | |
1038 | if (ret) { | |
1039 | dev_err(&pdev->dev, "can not get message ram configuration\n"); | |
1040 | return -ENODEV; | |
1041 | } | |
1042 | ||
1043 | priv->mram_base = addr; | |
1044 | priv->mcfg[MRAM_SIDF].off = out_val[0]; | |
1045 | priv->mcfg[MRAM_SIDF].num = out_val[1]; | |
1046 | priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off + | |
1047 | priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE; | |
1048 | priv->mcfg[MRAM_XIDF].num = out_val[2]; | |
1049 | priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off + | |
1050 | priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE; | |
1051 | priv->mcfg[MRAM_RXF0].num = out_val[3] & RXFC_FS_MASK; | |
1052 | priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off + | |
1053 | priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE; | |
1054 | priv->mcfg[MRAM_RXF1].num = out_val[4] & RXFC_FS_MASK; | |
1055 | priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off + | |
1056 | priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE; | |
1057 | priv->mcfg[MRAM_RXB].num = out_val[5]; | |
1058 | priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off + | |
1059 | priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE; | |
1060 | priv->mcfg[MRAM_TXE].num = out_val[6]; | |
1061 | priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off + | |
1062 | priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE; | |
1063 | priv->mcfg[MRAM_TXB].num = out_val[7] & TXBC_NDTB_MASK; | |
1064 | ||
1065 | dev_dbg(&pdev->dev, "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n", | |
1066 | priv->mram_base, | |
1067 | priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num, | |
1068 | priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num, | |
1069 | priv->mcfg[MRAM_RXF0].off, priv->mcfg[MRAM_RXF0].num, | |
1070 | priv->mcfg[MRAM_RXF1].off, priv->mcfg[MRAM_RXF1].num, | |
1071 | priv->mcfg[MRAM_RXB].off, priv->mcfg[MRAM_RXB].num, | |
1072 | priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num, | |
1073 | priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num); | |
1074 | ||
962845da DA |
1075 | /* initialize the entire Message RAM in use to avoid possible |
1076 | * ECC/parity checksum errors when reading an uninitialized buffer | |
1077 | */ | |
1078 | start = priv->mcfg[MRAM_SIDF].off; | |
1079 | end = priv->mcfg[MRAM_TXB].off + | |
1080 | priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE; | |
1081 | for (i = start; i < end; i += 4) | |
1082 | writel(0x0, priv->mram_base + i); | |
1083 | ||
e0d1f481 DA |
1084 | return 0; |
1085 | } | |
1086 | ||
1087 | static int m_can_plat_probe(struct platform_device *pdev) | |
1088 | { | |
1089 | struct net_device *dev; | |
1090 | struct m_can_priv *priv; | |
1091 | struct resource *res; | |
1092 | void __iomem *addr; | |
1093 | struct clk *hclk, *cclk; | |
1094 | int irq, ret; | |
1095 | ||
1096 | hclk = devm_clk_get(&pdev->dev, "hclk"); | |
1097 | cclk = devm_clk_get(&pdev->dev, "cclk"); | |
1098 | if (IS_ERR(hclk) || IS_ERR(cclk)) { | |
1099 | dev_err(&pdev->dev, "no clock find\n"); | |
1100 | return -ENODEV; | |
1101 | } | |
1102 | ||
1103 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can"); | |
1104 | addr = devm_ioremap_resource(&pdev->dev, res); | |
1105 | irq = platform_get_irq_byname(pdev, "int0"); | |
1106 | if (IS_ERR(addr) || irq < 0) | |
1107 | return -EINVAL; | |
1108 | ||
1109 | /* allocate the m_can device */ | |
1110 | dev = alloc_m_can_dev(); | |
1111 | if (!dev) | |
1112 | return -ENOMEM; | |
1113 | ||
1114 | priv = netdev_priv(dev); | |
1115 | dev->irq = irq; | |
1116 | priv->base = addr; | |
1117 | priv->device = &pdev->dev; | |
1118 | priv->hclk = hclk; | |
1119 | priv->cclk = cclk; | |
1120 | priv->can.clock.freq = clk_get_rate(cclk); | |
1121 | ||
1122 | ret = m_can_of_parse_mram(pdev, priv); | |
1123 | if (ret) | |
1124 | goto failed_free_dev; | |
1125 | ||
1126 | platform_set_drvdata(pdev, dev); | |
1127 | SET_NETDEV_DEV(dev, &pdev->dev); | |
1128 | ||
1129 | ret = register_m_can_dev(dev); | |
1130 | if (ret) { | |
1131 | dev_err(&pdev->dev, "registering %s failed (err=%d)\n", | |
1132 | KBUILD_MODNAME, ret); | |
1133 | goto failed_free_dev; | |
1134 | } | |
1135 | ||
1136 | devm_can_led_init(dev); | |
1137 | ||
1138 | dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", | |
1139 | KBUILD_MODNAME, priv->base, dev->irq); | |
1140 | ||
1141 | return 0; | |
1142 | ||
1143 | failed_free_dev: | |
1144 | free_m_can_dev(dev); | |
1145 | return ret; | |
1146 | } | |
1147 | ||
1148 | static __maybe_unused int m_can_suspend(struct device *dev) | |
1149 | { | |
1150 | struct net_device *ndev = dev_get_drvdata(dev); | |
1151 | struct m_can_priv *priv = netdev_priv(ndev); | |
1152 | ||
1153 | if (netif_running(ndev)) { | |
1154 | netif_stop_queue(ndev); | |
1155 | netif_device_detach(ndev); | |
1156 | } | |
1157 | ||
1158 | /* TODO: enter low power */ | |
1159 | ||
1160 | priv->can.state = CAN_STATE_SLEEPING; | |
1161 | ||
1162 | return 0; | |
1163 | } | |
1164 | ||
1165 | static __maybe_unused int m_can_resume(struct device *dev) | |
1166 | { | |
1167 | struct net_device *ndev = dev_get_drvdata(dev); | |
1168 | struct m_can_priv *priv = netdev_priv(ndev); | |
1169 | ||
1170 | /* TODO: exit low power */ | |
1171 | ||
1172 | priv->can.state = CAN_STATE_ERROR_ACTIVE; | |
1173 | ||
1174 | if (netif_running(ndev)) { | |
1175 | netif_device_attach(ndev); | |
1176 | netif_start_queue(ndev); | |
1177 | } | |
1178 | ||
1179 | return 0; | |
1180 | } | |
1181 | ||
1182 | static void unregister_m_can_dev(struct net_device *dev) | |
1183 | { | |
1184 | unregister_candev(dev); | |
1185 | } | |
1186 | ||
1187 | static int m_can_plat_remove(struct platform_device *pdev) | |
1188 | { | |
1189 | struct net_device *dev = platform_get_drvdata(pdev); | |
1190 | ||
1191 | unregister_m_can_dev(dev); | |
1192 | platform_set_drvdata(pdev, NULL); | |
1193 | ||
1194 | free_m_can_dev(dev); | |
1195 | ||
1196 | return 0; | |
1197 | } | |
1198 | ||
1199 | static const struct dev_pm_ops m_can_pmops = { | |
1200 | SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume) | |
1201 | }; | |
1202 | ||
1203 | static const struct of_device_id m_can_of_table[] = { | |
1204 | { .compatible = "bosch,m_can", .data = NULL }, | |
1205 | { /* sentinel */ }, | |
1206 | }; | |
1207 | MODULE_DEVICE_TABLE(of, m_can_of_table); | |
1208 | ||
1209 | static struct platform_driver m_can_plat_driver = { | |
1210 | .driver = { | |
1211 | .name = KBUILD_MODNAME, | |
1212 | .of_match_table = m_can_of_table, | |
1213 | .pm = &m_can_pmops, | |
1214 | }, | |
1215 | .probe = m_can_plat_probe, | |
1216 | .remove = m_can_plat_remove, | |
1217 | }; | |
1218 | ||
1219 | module_platform_driver(m_can_plat_driver); | |
1220 | ||
1221 | MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>"); | |
1222 | MODULE_LICENSE("GPL v2"); | |
1223 | MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller"); |