]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/i2c/busses/i2c-at91-master.c
Merge tag 'io_uring-5.7-2020-05-22' of git://git.kernel.dk/linux-block
[thirdparty/linux.git] / drivers / i2c / busses / i2c-at91-master.c
CommitLineData
ad7d142f 1// SPDX-License-Identifier: GPL-2.0
fac368a0
NV
2/*
3 * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
4 *
5 * Copyright (C) 2011 Weinmann Medical GmbH
6 * Author: Nikolaus Voss <n.voss@weinmann.de>
7 *
8 * Evolved from original work by:
9 * Copyright (C) 2004 Rick Bronson
10 * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
11 *
12 * Borrowed heavily from original work by:
13 * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
fac368a0
NV
14 */
15
16#include <linux/clk.h>
17#include <linux/completion.h>
60937b2c
LD
18#include <linux/dma-mapping.h>
19#include <linux/dmaengine.h>
fac368a0 20#include <linux/err.h>
d3d3fdcc 21#include <linux/gpio/consumer.h>
fac368a0
NV
22#include <linux/i2c.h>
23#include <linux/interrupt.h>
24#include <linux/io.h>
70d46a24
LD
25#include <linux/of.h>
26#include <linux/of_device.h>
d3d3fdcc 27#include <linux/pinctrl/consumer.h>
fac368a0 28#include <linux/platform_device.h>
60937b2c 29#include <linux/platform_data/dma-atmel.h>
d64a8188 30#include <linux/pm_runtime.h>
fac368a0 31
ad7d142f 32#include "i2c-at91.h"
60937b2c 33
ad7d142f 34void at91_init_twi_bus_master(struct at91_twi_dev *dev)
fac368a0 35{
2989b459 36 struct at91_twi_pdata *pdata = dev->pdata;
dda96713 37 u32 filtr = 0;
2989b459 38
5e3cfc6c
CP
39 /* FIFO should be enabled immediately after the software reset */
40 if (dev->fifo_size)
41 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_FIFOEN);
fac368a0
NV
42 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
43 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
44 at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
2989b459
EH
45
46 /* enable digital filter */
47 if (pdata->has_dig_filtr && dev->enable_dig_filt)
dda96713 48 filtr |= AT91_TWI_FILTR_FILT;
2be357af
EH
49
50 /* enable advanced digital filter */
51 if (pdata->has_adv_dig_filtr && dev->enable_dig_filt)
dda96713
EH
52 filtr |= AT91_TWI_FILTR_FILT |
53 (AT91_TWI_FILTR_THRES(dev->filter_width) &
54 AT91_TWI_FILTR_THRES_MASK);
55
56 /* enable analog filter */
57 if (pdata->has_ana_filtr && dev->enable_ana_filt)
58 filtr |= AT91_TWI_FILTR_PADFEN;
59
60 if (filtr)
61 at91_twi_write(dev, AT91_TWI_FILTR, filtr);
fac368a0
NV
62}
63
64/*
65 * Calculate symmetric clock as stated in datasheet:
66 * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
67 */
265bd824 68static void at91_calc_twi_clock(struct at91_twi_dev *dev)
fac368a0 69{
2be357af 70 int ckdiv, cdiv, div, hold = 0, filter_width = 0;
fac368a0
NV
71 struct at91_twi_pdata *pdata = dev->pdata;
72 int offset = pdata->clk_offset;
73 int max_ckdiv = pdata->clk_max_div;
265bd824
AS
74 struct i2c_timings timings, *t = &timings;
75
76 i2c_parse_fw_timings(dev->dev, t, true);
fac368a0
NV
77
78 div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
265bd824 79 2 * t->bus_freq_hz) - offset);
fac368a0
NV
80 ckdiv = fls(div >> 8);
81 cdiv = div >> ckdiv;
82
83 if (ckdiv > max_ckdiv) {
84 dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
85 ckdiv, max_ckdiv);
86 ckdiv = max_ckdiv;
87 cdiv = 255;
88 }
89
cc018e36 90 if (pdata->has_hold_field) {
cc018e36
LD
91 /*
92 * hold time = HOLD + 3 x T_peripheral_clock
93 * Use clk rate in kHz to prevent overflows when computing
94 * hold.
95 */
265bd824 96 hold = DIV_ROUND_UP(t->sda_hold_ns
cc018e36
LD
97 * (clk_get_rate(dev->clk) / 1000), 1000000);
98 hold -= 3;
99 if (hold < 0)
100 hold = 0;
101 if (hold > AT91_TWI_CWGR_HOLD_MAX) {
102 dev_warn(dev->dev,
103 "HOLD field set to its maximum value (%d instead of %d)\n",
104 AT91_TWI_CWGR_HOLD_MAX, hold);
105 hold = AT91_TWI_CWGR_HOLD_MAX;
106 }
107 }
108
2be357af
EH
109 if (pdata->has_adv_dig_filtr) {
110 /*
111 * filter width = 0 to AT91_TWI_FILTR_THRES_MAX
112 * peripheral clocks
113 */
114 filter_width = DIV_ROUND_UP(t->digital_filter_width_ns
115 * (clk_get_rate(dev->clk) / 1000), 1000000);
116 if (filter_width > AT91_TWI_FILTR_THRES_MAX) {
117 dev_warn(dev->dev,
118 "Filter threshold set to its maximum value (%d instead of %d)\n",
119 AT91_TWI_FILTR_THRES_MAX, filter_width);
120 filter_width = AT91_TWI_FILTR_THRES_MAX;
121 }
122 }
123
cc018e36
LD
124 dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv
125 | AT91_TWI_CWGR_HOLD(hold);
126
2be357af
EH
127 dev->filter_width = filter_width;
128
129 dev_dbg(dev->dev, "cdiv %d ckdiv %d hold %d (%d ns), filter_width %d (%d ns)\n",
130 cdiv, ckdiv, hold, t->sda_hold_ns, filter_width,
131 t->digital_filter_width_ns);
fac368a0
NV
132}
133
60937b2c
LD
134static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
135{
136 struct at91_twi_dma *dma = &dev->dma;
137
138 at91_twi_irq_save(dev);
139
140 if (dma->xfer_in_progress) {
141 if (dma->direction == DMA_FROM_DEVICE)
142 dmaengine_terminate_all(dma->chan_rx);
143 else
144 dmaengine_terminate_all(dma->chan_tx);
145 dma->xfer_in_progress = false;
146 }
147 if (dma->buf_mapped) {
5e3cfc6c 148 dma_unmap_single(dev->dev, sg_dma_address(&dma->sg[0]),
60937b2c
LD
149 dev->buf_len, dma->direction);
150 dma->buf_mapped = false;
151 }
152
153 at91_twi_irq_restore(dev);
154}
155
fac368a0
NV
156static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
157{
f30dc520 158 if (!dev->buf_len)
fac368a0
NV
159 return;
160
5e3cfc6c
CP
161 /* 8bit write works with and without FIFO */
162 writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
fac368a0
NV
163
164 /* send stop when last byte has been written */
d12e3aae 165 if (--dev->buf_len == 0) {
434f14e7 166 if (!dev->use_alt_cmd)
0ef6f321 167 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
d12e3aae
MM
168 at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
169 }
fac368a0 170
f27e7805 171 dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
fac368a0
NV
172
173 ++dev->buf;
174}
175
60937b2c
LD
176static void at91_twi_write_data_dma_callback(void *data)
177{
178 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
179
5e3cfc6c 180 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
28772ac8 181 dev->buf_len, DMA_TO_DEVICE);
60937b2c 182
93563a6a
CP
183 /*
184 * When this callback is called, THR/TX FIFO is likely not to be empty
185 * yet. So we have to wait for TXCOMP or NACK bits to be set into the
186 * Status Register to be sure that the STOP bit has been sent and the
187 * transfer is completed. The NACK interrupt has already been enabled,
188 * we just have to enable TXCOMP one.
189 */
190 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
434f14e7 191 if (!dev->use_alt_cmd)
0ef6f321 192 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
60937b2c
LD
193}
194
195static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
196{
197 dma_addr_t dma_addr;
198 struct dma_async_tx_descriptor *txdesc;
199 struct at91_twi_dma *dma = &dev->dma;
200 struct dma_chan *chan_tx = dma->chan_tx;
5e3cfc6c 201 unsigned int sg_len = 1;
60937b2c 202
f30dc520 203 if (!dev->buf_len)
60937b2c
LD
204 return;
205
206 dma->direction = DMA_TO_DEVICE;
207
208 at91_twi_irq_save(dev);
209 dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
210 DMA_TO_DEVICE);
211 if (dma_mapping_error(dev->dev, dma_addr)) {
212 dev_err(dev->dev, "dma map failed\n");
213 return;
214 }
215 dma->buf_mapped = true;
216 at91_twi_irq_restore(dev);
60937b2c 217
5e3cfc6c
CP
218 if (dev->fifo_size) {
219 size_t part1_len, part2_len;
220 struct scatterlist *sg;
221 unsigned fifo_mr;
222
223 sg_len = 0;
224
225 part1_len = dev->buf_len & ~0x3;
226 if (part1_len) {
227 sg = &dma->sg[sg_len++];
228 sg_dma_len(sg) = part1_len;
229 sg_dma_address(sg) = dma_addr;
230 }
231
232 part2_len = dev->buf_len & 0x3;
233 if (part2_len) {
234 sg = &dma->sg[sg_len++];
235 sg_dma_len(sg) = part2_len;
236 sg_dma_address(sg) = dma_addr + part1_len;
237 }
238
239 /*
240 * DMA controller is triggered when at least 4 data can be
241 * written into the TX FIFO
242 */
243 fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
244 fifo_mr &= ~AT91_TWI_FMR_TXRDYM_MASK;
245 fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_FOUR_DATA);
246 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
247 } else {
248 sg_dma_len(&dma->sg[0]) = dev->buf_len;
249 sg_dma_address(&dma->sg[0]) = dma_addr;
250 }
251
252 txdesc = dmaengine_prep_slave_sg(chan_tx, dma->sg, sg_len,
253 DMA_MEM_TO_DEV,
60937b2c
LD
254 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
255 if (!txdesc) {
256 dev_err(dev->dev, "dma prep slave sg failed\n");
257 goto error;
258 }
259
260 txdesc->callback = at91_twi_write_data_dma_callback;
261 txdesc->callback_param = dev;
262
263 dma->xfer_in_progress = true;
264 dmaengine_submit(txdesc);
265 dma_async_issue_pending(chan_tx);
266
267 return;
268
269error:
270 at91_twi_dma_cleanup(dev);
271}
272
fac368a0
NV
273static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
274{
a9bed6b1
LD
275 /*
276 * If we are in this case, it means there is garbage data in RHR, so
277 * delete them.
278 */
279 if (!dev->buf_len) {
280 at91_twi_read(dev, AT91_TWI_RHR);
fac368a0 281 return;
a9bed6b1 282 }
fac368a0 283
5e3cfc6c
CP
284 /* 8bit read works with and without FIFO */
285 *dev->buf = readb_relaxed(dev->base + AT91_TWI_RHR);
fac368a0
NV
286 --dev->buf_len;
287
75b81f33
MR
288 /* return if aborting, we only needed to read RHR to clear RXRDY*/
289 if (dev->recv_len_abort)
290 return;
291
fac368a0
NV
292 /* handle I2C_SMBUS_BLOCK_DATA */
293 if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
75b81f33
MR
294 /* ensure length byte is a valid value */
295 if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) {
296 dev->msg->flags &= ~I2C_M_RECV_LEN;
297 dev->buf_len += *dev->buf;
298 dev->msg->len = dev->buf_len + 1;
f27e7805 299 dev_dbg(dev->dev, "received block length %zu\n",
75b81f33
MR
300 dev->buf_len);
301 } else {
302 /* abort and send the stop by reading one more byte */
303 dev->recv_len_abort = true;
304 dev->buf_len = 1;
305 }
fac368a0
NV
306 }
307
308 /* send stop if second but last byte has been read */
434f14e7 309 if (!dev->use_alt_cmd && dev->buf_len == 1)
fac368a0
NV
310 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
311
f27e7805 312 dev_dbg(dev->dev, "read 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
fac368a0
NV
313
314 ++dev->buf;
315}
316
60937b2c
LD
317static void at91_twi_read_data_dma_callback(void *data)
318{
319 struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
0ef6f321 320 unsigned ier = AT91_TWI_TXCOMP;
60937b2c 321
5e3cfc6c 322 dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg[0]),
28772ac8 323 dev->buf_len, DMA_FROM_DEVICE);
60937b2c 324
434f14e7 325 if (!dev->use_alt_cmd) {
0ef6f321
CP
326 /* The last two bytes have to be read without using dma */
327 dev->buf += dev->buf_len - 2;
328 dev->buf_len = 2;
329 ier |= AT91_TWI_RXRDY;
330 }
331 at91_twi_write(dev, AT91_TWI_IER, ier);
60937b2c
LD
332}
333
334static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
335{
336 dma_addr_t dma_addr;
337 struct dma_async_tx_descriptor *rxdesc;
338 struct at91_twi_dma *dma = &dev->dma;
339 struct dma_chan *chan_rx = dma->chan_rx;
0ef6f321 340 size_t buf_len;
60937b2c 341
434f14e7 342 buf_len = (dev->use_alt_cmd) ? dev->buf_len : dev->buf_len - 2;
60937b2c
LD
343 dma->direction = DMA_FROM_DEVICE;
344
345 /* Keep in mind that we won't use dma to read the last two bytes */
346 at91_twi_irq_save(dev);
0ef6f321 347 dma_addr = dma_map_single(dev->dev, dev->buf, buf_len, DMA_FROM_DEVICE);
60937b2c
LD
348 if (dma_mapping_error(dev->dev, dma_addr)) {
349 dev_err(dev->dev, "dma map failed\n");
350 return;
351 }
352 dma->buf_mapped = true;
353 at91_twi_irq_restore(dev);
60937b2c 354
5e3cfc6c
CP
355 if (dev->fifo_size && IS_ALIGNED(buf_len, 4)) {
356 unsigned fifo_mr;
357
358 /*
359 * DMA controller is triggered when at least 4 data can be
360 * read from the RX FIFO
361 */
362 fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
363 fifo_mr &= ~AT91_TWI_FMR_RXRDYM_MASK;
364 fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_FOUR_DATA);
365 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
366 }
367
368 sg_dma_len(&dma->sg[0]) = buf_len;
369 sg_dma_address(&dma->sg[0]) = dma_addr;
370
371 rxdesc = dmaengine_prep_slave_sg(chan_rx, dma->sg, 1, DMA_DEV_TO_MEM,
60937b2c
LD
372 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
373 if (!rxdesc) {
374 dev_err(dev->dev, "dma prep slave sg failed\n");
375 goto error;
376 }
377
378 rxdesc->callback = at91_twi_read_data_dma_callback;
379 rxdesc->callback_param = dev;
380
381 dma->xfer_in_progress = true;
382 dmaengine_submit(rxdesc);
383 dma_async_issue_pending(dma->chan_rx);
384
385 return;
386
387error:
388 at91_twi_dma_cleanup(dev);
389}
390
fac368a0
NV
391static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
392{
393 struct at91_twi_dev *dev = dev_id;
394 const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
395 const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
396
397 if (!irqstatus)
398 return IRQ_NONE;
a9bed6b1
LD
399 /*
400 * In reception, the behavior of the twi device (before sama5d2) is
401 * weird. There is some magic about RXRDY flag! When a data has been
402 * almost received, the reception of a new one is anticipated if there
403 * is no stop command to send. That is the reason why ask for sending
404 * the stop command not on the last data but on the second last one.
405 *
406 * Unfortunately, we could still have the RXRDY flag set even if the
407 * transfer is done and we have read the last data. It might happen
408 * when the i2c slave device sends too quickly data after receiving the
409 * ack from the master. The data has been almost received before having
410 * the order to send stop. In this case, sending the stop command could
411 * cause a RXRDY interrupt with a TXCOMP one. It is better to manage
412 * the RXRDY interrupt first in order to not keep garbage data in the
413 * Receive Holding Register for the next transfer.
414 */
e8f39e9f
DE
415 if (irqstatus & AT91_TWI_RXRDY) {
416 /*
417 * Read all available bytes at once by polling RXRDY usable w/
418 * and w/o FIFO. With FIFO enabled we could also read RXFL and
419 * avoid polling RXRDY.
420 */
421 do {
422 at91_twi_read_next_byte(dev);
423 } while (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY);
424 }
fac368a0 425
6f6ddbb0
CP
426 /*
427 * When a NACK condition is detected, the I2C controller sets the NACK,
428 * TXCOMP and TXRDY bits all together in the Status Register (SR).
429 *
430 * 1 - Handling NACK errors with CPU write transfer.
431 *
432 * In such case, we should not write the next byte into the Transmit
433 * Holding Register (THR) otherwise the I2C controller would start a new
434 * transfer and the I2C slave is likely to reply by another NACK.
435 *
436 * 2 - Handling NACK errors with DMA write transfer.
437 *
438 * By setting the TXRDY bit in the SR, the I2C controller also triggers
439 * the DMA controller to write the next data into the THR. Then the
440 * result depends on the hardware version of the I2C controller.
441 *
442 * 2a - Without support of the Alternative Command mode.
443 *
444 * This is the worst case: the DMA controller is triggered to write the
445 * next data into the THR, hence starting a new transfer: the I2C slave
446 * is likely to reply by another NACK.
447 * Concurrently, this interrupt handler is likely to be called to manage
448 * the first NACK before the I2C controller detects the second NACK and
449 * sets once again the NACK bit into the SR.
450 * When handling the first NACK, this interrupt handler disables the I2C
451 * controller interruptions, especially the NACK interrupt.
452 * Hence, the NACK bit is pending into the SR. This is why we should
453 * read the SR to clear all pending interrupts at the beginning of
454 * at91_do_twi_transfer() before actually starting a new transfer.
455 *
456 * 2b - With support of the Alternative Command mode.
457 *
458 * When a NACK condition is detected, the I2C controller also locks the
459 * THR (and sets the LOCK bit in the SR): even though the DMA controller
460 * is triggered by the TXRDY bit to write the next data into the THR,
461 * this data actually won't go on the I2C bus hence a second NACK is not
462 * generated.
463 */
93563a6a 464 if (irqstatus & (AT91_TWI_TXCOMP | AT91_TWI_NACK)) {
fac368a0
NV
465 at91_disable_twi_interrupts(dev);
466 complete(&dev->cmd_complete);
6f6ddbb0
CP
467 } else if (irqstatus & AT91_TWI_TXRDY) {
468 at91_twi_write_next_byte(dev);
fac368a0
NV
469 }
470
6f6ddbb0
CP
471 /* catch error flags */
472 dev->transfer_status |= status;
473
fac368a0
NV
474 return IRQ_HANDLED;
475}
476
477static int at91_do_twi_transfer(struct at91_twi_dev *dev)
478{
479 int ret;
1c42aca5 480 unsigned long time_left;
fac368a0 481 bool has_unre_flag = dev->pdata->has_unre_flag;
0ef6f321 482 bool has_alt_cmd = dev->pdata->has_alt_cmd;
d3d3fdcc 483 struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
fac368a0 484
93563a6a
CP
485 /*
486 * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
487 * read flag but shows the state of the transmission at the time the
488 * Status Register is read. According to the programmer datasheet,
489 * TXCOMP is set when both holding register and internal shifter are
490 * empty and STOP condition has been sent.
491 * Consequently, we should enable NACK interrupt rather than TXCOMP to
492 * detect transmission failure.
0ef6f321
CP
493 * Indeed let's take the case of an i2c write command using DMA.
494 * Whenever the slave doesn't acknowledge a byte, the LOCK, NACK and
495 * TXCOMP bits are set together into the Status Register.
496 * LOCK is a clear on write bit, which is set to prevent the DMA
497 * controller from sending new data on the i2c bus after a NACK
498 * condition has happened. Once locked, this i2c peripheral stops
499 * triggering the DMA controller for new data but it is more than
500 * likely that a new DMA transaction is already in progress, writing
501 * into the Transmit Holding Register. Since the peripheral is locked,
502 * these new data won't be sent to the i2c bus but they will remain
503 * into the Transmit Holding Register, so TXCOMP bit is cleared.
504 * Then when the interrupt handler is called, the Status Register is
505 * read: the TXCOMP bit is clear but NACK bit is still set. The driver
506 * manage the error properly, without waiting for timeout.
507 * This case can be reproduced easyly when writing into an at24 eeprom.
93563a6a
CP
508 *
509 * Besides, the TXCOMP bit is already set before the i2c transaction
510 * has been started. For read transactions, this bit is cleared when
511 * writing the START bit into the Control Register. So the
512 * corresponding interrupt can safely be enabled just after.
513 * However for write transactions managed by the CPU, we first write
514 * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
515 * interrupt. If TXCOMP interrupt were enabled before writing into THR,
516 * the interrupt handler would be called immediately and the i2c command
517 * would be reported as completed.
518 * Also when a write transaction is managed by the DMA controller,
519 * enabling the TXCOMP interrupt in this function may lead to a race
520 * condition since we don't know whether the TXCOMP interrupt is enabled
521 * before or after the DMA has started to write into THR. So the TXCOMP
522 * interrupt is enabled later by at91_twi_write_data_dma_callback().
0ef6f321
CP
523 * Immediately after in that DMA callback, if the alternative command
524 * mode is not used, we still need to send the STOP condition manually
525 * writing the corresponding bit into the Control Register.
93563a6a
CP
526 */
527
f27e7805 528 dev_dbg(dev->dev, "transfer: %s %zu bytes.\n",
fac368a0
NV
529 (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
530
16735d02 531 reinit_completion(&dev->cmd_complete);
fac368a0 532 dev->transfer_status = 0;
7c3fe64d 533
6f6ddbb0 534 /* Clear pending interrupts, such as NACK. */
a9bed6b1 535 at91_twi_read(dev, AT91_TWI_SR);
6f6ddbb0 536
5e3cfc6c
CP
537 if (dev->fifo_size) {
538 unsigned fifo_mr = at91_twi_read(dev, AT91_TWI_FMR);
539
540 /* Reset FIFO mode register */
541 fifo_mr &= ~(AT91_TWI_FMR_TXRDYM_MASK |
542 AT91_TWI_FMR_RXRDYM_MASK);
543 fifo_mr |= AT91_TWI_FMR_TXRDYM(AT91_TWI_ONE_DATA);
544 fifo_mr |= AT91_TWI_FMR_RXRDYM(AT91_TWI_ONE_DATA);
545 at91_twi_write(dev, AT91_TWI_FMR, fifo_mr);
546
547 /* Flush FIFOs */
548 at91_twi_write(dev, AT91_TWI_CR,
549 AT91_TWI_THRCLR | AT91_TWI_RHRCLR);
550 }
551
7c3fe64d
LD
552 if (!dev->buf_len) {
553 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
554 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
555 } else if (dev->msg->flags & I2C_M_RD) {
fac368a0
NV
556 unsigned start_flags = AT91_TWI_START;
557
fac368a0 558 /* if only one byte is to be read, immediately stop transfer */
434f14e7 559 if (!dev->use_alt_cmd && dev->buf_len <= 1 &&
0ef6f321 560 !(dev->msg->flags & I2C_M_RECV_LEN))
fac368a0
NV
561 start_flags |= AT91_TWI_STOP;
562 at91_twi_write(dev, AT91_TWI_CR, start_flags);
60937b2c 563 /*
0ef6f321
CP
564 * When using dma without alternative command mode, the last
565 * byte has to be read manually in order to not send the stop
566 * command too late and then to receive extra data.
567 * In practice, there are some issues if you use the dma to
568 * read n-1 bytes because of latency.
60937b2c
LD
569 * Reading n-2 bytes with dma and the two last ones manually
570 * seems to be the best solution.
571 */
572 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
93563a6a 573 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
60937b2c 574 at91_twi_read_data_dma(dev);
93563a6a 575 } else {
60937b2c 576 at91_twi_write(dev, AT91_TWI_IER,
93563a6a
CP
577 AT91_TWI_TXCOMP |
578 AT91_TWI_NACK |
579 AT91_TWI_RXRDY);
580 }
fac368a0 581 } else {
60937b2c 582 if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
93563a6a 583 at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_NACK);
60937b2c 584 at91_twi_write_data_dma(dev);
60937b2c
LD
585 } else {
586 at91_twi_write_next_byte(dev);
587 at91_twi_write(dev, AT91_TWI_IER,
d12e3aae
MM
588 AT91_TWI_TXCOMP | AT91_TWI_NACK |
589 (dev->buf_len ? AT91_TWI_TXRDY : 0));
60937b2c 590 }
fac368a0
NV
591 }
592
1c42aca5
NMG
593 time_left = wait_for_completion_timeout(&dev->cmd_complete,
594 dev->adapter.timeout);
595 if (time_left == 0) {
0ef6f321 596 dev->transfer_status |= at91_twi_read(dev, AT91_TWI_SR);
fac368a0
NV
597 dev_err(dev->dev, "controller timed out\n");
598 at91_init_twi_bus(dev);
60937b2c
LD
599 ret = -ETIMEDOUT;
600 goto error;
fac368a0
NV
601 }
602 if (dev->transfer_status & AT91_TWI_NACK) {
603 dev_dbg(dev->dev, "received nack\n");
60937b2c
LD
604 ret = -EREMOTEIO;
605 goto error;
fac368a0
NV
606 }
607 if (dev->transfer_status & AT91_TWI_OVRE) {
608 dev_err(dev->dev, "overrun while reading\n");
60937b2c
LD
609 ret = -EIO;
610 goto error;
fac368a0
NV
611 }
612 if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
613 dev_err(dev->dev, "underrun while writing\n");
60937b2c
LD
614 ret = -EIO;
615 goto error;
fac368a0 616 }
5e3cfc6c
CP
617 if ((has_alt_cmd || dev->fifo_size) &&
618 (dev->transfer_status & AT91_TWI_LOCK)) {
0ef6f321
CP
619 dev_err(dev->dev, "tx locked\n");
620 ret = -EIO;
621 goto error;
622 }
75b81f33
MR
623 if (dev->recv_len_abort) {
624 dev_err(dev->dev, "invalid smbus block length recvd\n");
625 ret = -EPROTO;
626 goto error;
627 }
628
fac368a0
NV
629 dev_dbg(dev->dev, "transfer complete\n");
630
631 return 0;
60937b2c
LD
632
633error:
0ef6f321 634 /* first stop DMA transfer if still in progress */
60937b2c 635 at91_twi_dma_cleanup(dev);
0ef6f321 636 /* then flush THR/FIFO and unlock TX if locked */
5e3cfc6c
CP
637 if ((has_alt_cmd || dev->fifo_size) &&
638 (dev->transfer_status & AT91_TWI_LOCK)) {
0ef6f321
CP
639 dev_dbg(dev->dev, "unlock tx\n");
640 at91_twi_write(dev, AT91_TWI_CR,
641 AT91_TWI_THRCLR | AT91_TWI_LOCKCLR);
642 }
d3d3fdcc
KB
643
644 if (rinfo->get_sda && !(rinfo->get_sda(&dev->adapter))) {
645 dev_dbg(dev->dev,
646 "SDA is down; clear bus using gpio\n");
647 i2c_recover_bus(&dev->adapter);
648 }
649
60937b2c 650 return ret;
fac368a0
NV
651}
652
653static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
654{
655 struct at91_twi_dev *dev = i2c_get_adapdata(adap);
656 int ret;
657 unsigned int_addr_flag = 0;
658 struct i2c_msg *m_start = msg;
434f14e7 659 bool is_read;
fac368a0
NV
660
661 dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
662
d64a8188
WY
663 ret = pm_runtime_get_sync(dev->dev);
664 if (ret < 0)
665 goto out;
666
a7405844 667 if (num == 2) {
fac368a0
NV
668 int internal_address = 0;
669 int i;
670
fac368a0
NV
671 /* 1st msg is put into the internal address, start with 2nd */
672 m_start = &msg[1];
673 for (i = 0; i < msg->len; ++i) {
674 const unsigned addr = msg->buf[msg->len - 1 - i];
675
676 internal_address |= addr << (8 * i);
677 int_addr_flag += AT91_TWI_IADRSZ_1;
678 }
679 at91_twi_write(dev, AT91_TWI_IADR, internal_address);
680 }
681
434f14e7 682 dev->use_alt_cmd = false;
0ef6f321
CP
683 is_read = (m_start->flags & I2C_M_RD);
684 if (dev->pdata->has_alt_cmd) {
434f14e7
CP
685 if (m_start->len > 0 &&
686 m_start->len < AT91_I2C_MAX_ALT_CMD_DATA_SIZE) {
0ef6f321
CP
687 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMEN);
688 at91_twi_write(dev, AT91_TWI_ACR,
689 AT91_TWI_ACR_DATAL(m_start->len) |
690 ((is_read) ? AT91_TWI_ACR_DIR : 0));
434f14e7 691 dev->use_alt_cmd = true;
0ef6f321
CP
692 } else {
693 at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_ACMDIS);
694 }
695 }
696
697 at91_twi_write(dev, AT91_TWI_MMR,
698 (m_start->addr << 16) |
699 int_addr_flag |
434f14e7 700 ((!dev->use_alt_cmd && is_read) ? AT91_TWI_MREAD : 0));
fac368a0
NV
701
702 dev->buf_len = m_start->len;
703 dev->buf = m_start->buf;
704 dev->msg = m_start;
75b81f33 705 dev->recv_len_abort = false;
fac368a0
NV
706
707 ret = at91_do_twi_transfer(dev);
708
d64a8188
WY
709 ret = (ret < 0) ? ret : num;
710out:
711 pm_runtime_mark_last_busy(dev->dev);
712 pm_runtime_put_autosuspend(dev->dev);
713
714 return ret;
fac368a0
NV
715}
716
a7405844
WS
717/*
718 * The hardware can handle at most two messages concatenated by a
719 * repeated start via it's internal address feature.
720 */
ae3923a2 721static const struct i2c_adapter_quirks at91_twi_quirks = {
a7405844
WS
722 .flags = I2C_AQ_COMB | I2C_AQ_COMB_WRITE_FIRST | I2C_AQ_COMB_SAME_ADDR,
723 .max_comb_1st_msg_len = 3,
724};
725
fac368a0
NV
726static u32 at91_twi_func(struct i2c_adapter *adapter)
727{
728 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
729 | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
730}
731
92d9d0df 732static const struct i2c_algorithm at91_twi_algorithm = {
fac368a0
NV
733 .master_xfer = at91_twi_xfer,
734 .functionality = at91_twi_func,
735};
736
0b255e92 737static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
60937b2c
LD
738{
739 int ret = 0;
60937b2c
LD
740 struct dma_slave_config slave_config;
741 struct at91_twi_dma *dma = &dev->dma;
5e3cfc6c
CP
742 enum dma_slave_buswidth addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
743
744 /*
745 * The actual width of the access will be chosen in
746 * dmaengine_prep_slave_sg():
747 * for each buffer in the scatter-gather list, if its size is aligned
748 * to addr_width then addr_width accesses will be performed to transfer
749 * the buffer. On the other hand, if the buffer size is not aligned to
750 * addr_width then the buffer is transferred using single byte accesses.
751 * Please refer to the Atmel eXtended DMA controller driver.
752 * When FIFOs are used, the TXRDYM threshold can always be set to
753 * trigger the XDMAC when at least 4 data can be written into the TX
754 * FIFO, even if single byte accesses are performed.
755 * However the RXRDYM threshold must be set to fit the access width,
756 * deduced from buffer length, so the XDMAC is triggered properly to
757 * read data from the RX FIFO.
758 */
759 if (dev->fifo_size)
760 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
60937b2c
LD
761
762 memset(&slave_config, 0, sizeof(slave_config));
763 slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
5e3cfc6c 764 slave_config.src_addr_width = addr_width;
60937b2c
LD
765 slave_config.src_maxburst = 1;
766 slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
5e3cfc6c 767 slave_config.dst_addr_width = addr_width;
60937b2c
LD
768 slave_config.dst_maxburst = 1;
769 slave_config.device_fc = false;
770
a2b0e390 771 dma->chan_tx = dma_request_chan(dev->dev, "tx");
727f9c2d
LD
772 if (IS_ERR(dma->chan_tx)) {
773 ret = PTR_ERR(dma->chan_tx);
774 dma->chan_tx = NULL;
d877a721
LD
775 goto error;
776 }
777
a2b0e390 778 dma->chan_rx = dma_request_chan(dev->dev, "rx");
727f9c2d
LD
779 if (IS_ERR(dma->chan_rx)) {
780 ret = PTR_ERR(dma->chan_rx);
781 dma->chan_rx = NULL;
60937b2c
LD
782 goto error;
783 }
784
785 slave_config.direction = DMA_MEM_TO_DEV;
786 if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
787 dev_err(dev->dev, "failed to configure tx channel\n");
788 ret = -EINVAL;
789 goto error;
790 }
791
792 slave_config.direction = DMA_DEV_TO_MEM;
793 if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
794 dev_err(dev->dev, "failed to configure rx channel\n");
795 ret = -EINVAL;
796 goto error;
797 }
798
5e3cfc6c 799 sg_init_table(dma->sg, 2);
60937b2c
LD
800 dma->buf_mapped = false;
801 dma->xfer_in_progress = false;
727f9c2d 802 dev->use_dma = true;
60937b2c
LD
803
804 dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
805 dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
806
807 return ret;
808
809error:
727f9c2d 810 if (ret != -EPROBE_DEFER)
67fed0da 811 dev_info(dev->dev, "can't get DMA channel, continue without DMA support\n");
60937b2c
LD
812 if (dma->chan_rx)
813 dma_release_channel(dma->chan_rx);
814 if (dma->chan_tx)
815 dma_release_channel(dma->chan_tx);
816 return ret;
817}
818
d3d3fdcc
KB
819static void at91_prepare_twi_recovery(struct i2c_adapter *adap)
820{
821 struct at91_twi_dev *dev = i2c_get_adapdata(adap);
822
823 pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_gpio);
824}
825
826static void at91_unprepare_twi_recovery(struct i2c_adapter *adap)
827{
828 struct at91_twi_dev *dev = i2c_get_adapdata(adap);
829
830 pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
831}
832
833static int at91_init_twi_recovery_info(struct platform_device *pdev,
834 struct at91_twi_dev *dev)
835{
836 struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
837
838 dev->pinctrl = devm_pinctrl_get(&pdev->dev);
839 if (!dev->pinctrl || IS_ERR(dev->pinctrl)) {
840 dev_info(dev->dev, "can't get pinctrl, bus recovery not supported\n");
841 return PTR_ERR(dev->pinctrl);
842 }
843
844 dev->pinctrl_pins_default = pinctrl_lookup_state(dev->pinctrl,
845 PINCTRL_STATE_DEFAULT);
846 dev->pinctrl_pins_gpio = pinctrl_lookup_state(dev->pinctrl,
847 "gpio");
7d66976f
CC
848 if (IS_ERR(dev->pinctrl_pins_default) ||
849 IS_ERR(dev->pinctrl_pins_gpio)) {
850 dev_info(&pdev->dev, "pinctrl states incomplete for recovery\n");
851 return -EINVAL;
852 }
853
854 /*
855 * pins will be taken as GPIO, so we might as well inform pinctrl about
856 * this and move the state to GPIO
857 */
858 pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_gpio);
859
d3d3fdcc
KB
860 rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
861 if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER)
862 return -EPROBE_DEFER;
863
864 rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl",
865 GPIOD_OUT_HIGH_OPEN_DRAIN);
866 if (PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER)
867 return -EPROBE_DEFER;
868
869 if (IS_ERR(rinfo->sda_gpiod) ||
7d66976f 870 IS_ERR(rinfo->scl_gpiod)) {
d3d3fdcc
KB
871 dev_info(&pdev->dev, "recovery information incomplete\n");
872 if (!IS_ERR(rinfo->sda_gpiod)) {
873 gpiod_put(rinfo->sda_gpiod);
874 rinfo->sda_gpiod = NULL;
875 }
876 if (!IS_ERR(rinfo->scl_gpiod)) {
877 gpiod_put(rinfo->scl_gpiod);
878 rinfo->scl_gpiod = NULL;
879 }
7d66976f 880 pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
d3d3fdcc
KB
881 return -EINVAL;
882 }
883
7d66976f
CC
884 /* change the state of the pins back to their default state */
885 pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default);
886
d3d3fdcc
KB
887 dev_info(&pdev->dev, "using scl, sda for recovery\n");
888
889 rinfo->prepare_recovery = at91_prepare_twi_recovery;
890 rinfo->unprepare_recovery = at91_unprepare_twi_recovery;
891 rinfo->recover_bus = i2c_generic_scl_recovery;
892 dev->adapter.bus_recovery_info = rinfo;
893
894 return 0;
895}
896
ad7d142f
JF
897int at91_twi_probe_master(struct platform_device *pdev,
898 u32 phy_addr, struct at91_twi_dev *dev)
07345ab2
JF
899{
900 int rc;
07345ab2
JF
901
902 init_completion(&dev->cmd_complete);
903
904 rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
905 dev_name(dev->dev), dev);
906 if (rc) {
907 dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
908 return rc;
909 }
910
911 if (dev->dev->of_node) {
912 rc = at91_twi_configure_dma(dev, phy_addr);
913 if (rc == -EPROBE_DEFER)
914 return rc;
915 }
916
917 if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
918 &dev->fifo_size)) {
919 dev_info(dev->dev, "Using FIFO (%u data)\n", dev->fifo_size);
920 }
921
2989b459
EH
922 dev->enable_dig_filt = of_property_read_bool(pdev->dev.of_node,
923 "i2c-digital-filter");
924
dda96713
EH
925 dev->enable_ana_filt = of_property_read_bool(pdev->dev.of_node,
926 "i2c-analog-filter");
265bd824 927 at91_calc_twi_clock(dev);
07345ab2 928
d3d3fdcc
KB
929 rc = at91_init_twi_recovery_info(pdev, dev);
930 if (rc == -EPROBE_DEFER)
931 return rc;
932
07345ab2
JF
933 dev->adapter.algo = &at91_twi_algorithm;
934 dev->adapter.quirks = &at91_twi_quirks;
935
936 return 0;
937}