1 // SPDX-License-Identifier: GPL-2.0
3 * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
5 * Copyright (C) 2011 Weinmann Medical GmbH
6 * Author: Nikolaus Voss <n.voss@weinmann.de>
8 * Evolved from original work by:
9 * Copyright (C) 2004 Rick Bronson
10 * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
12 * Borrowed heavily from original work by:
13 * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
16 #include <linux/clk.h>
17 #include <linux/completion.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/dmaengine.h>
20 #include <linux/err.h>
21 #include <linux/gpio/consumer.h>
22 #include <linux/i2c.h>
23 #include <linux/interrupt.h>
26 #include <linux/of_device.h>
27 #include <linux/pinctrl/consumer.h>
28 #include <linux/platform_device.h>
29 #include <linux/platform_data/dma-atmel.h>
30 #include <linux/pm_runtime.h>
34 void at91_init_twi_bus_master(struct at91_twi_dev
*dev
)
36 struct at91_twi_pdata
*pdata
= dev
->pdata
;
39 /* FIFO should be enabled immediately after the software reset */
41 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_FIFOEN
);
42 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_MSEN
);
43 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_SVDIS
);
44 at91_twi_write(dev
, AT91_TWI_CWGR
, dev
->twi_cwgr_reg
);
46 /* enable digital filter */
47 if (pdata
->has_dig_filtr
&& dev
->enable_dig_filt
)
48 filtr
|= AT91_TWI_FILTR_FILT
;
50 /* enable advanced digital filter */
51 if (pdata
->has_adv_dig_filtr
&& dev
->enable_dig_filt
)
52 filtr
|= AT91_TWI_FILTR_FILT
|
53 (AT91_TWI_FILTR_THRES(dev
->filter_width
) &
54 AT91_TWI_FILTR_THRES_MASK
);
56 /* enable analog filter */
57 if (pdata
->has_ana_filtr
&& dev
->enable_ana_filt
)
58 filtr
|= AT91_TWI_FILTR_PADFEN
;
61 at91_twi_write(dev
, AT91_TWI_FILTR
, filtr
);
65 * Calculate symmetric clock as stated in datasheet:
66 * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
68 static void at91_calc_twi_clock(struct at91_twi_dev
*dev
)
70 int ckdiv
, cdiv
, div
, hold
= 0, filter_width
= 0;
71 struct at91_twi_pdata
*pdata
= dev
->pdata
;
72 int offset
= pdata
->clk_offset
;
73 int max_ckdiv
= pdata
->clk_max_div
;
74 struct i2c_timings timings
, *t
= &timings
;
76 i2c_parse_fw_timings(dev
->dev
, t
, true);
78 div
= max(0, (int)DIV_ROUND_UP(clk_get_rate(dev
->clk
),
79 2 * t
->bus_freq_hz
) - offset
);
80 ckdiv
= fls(div
>> 8);
83 if (ckdiv
> max_ckdiv
) {
84 dev_warn(dev
->dev
, "%d exceeds ckdiv max value which is %d.\n",
90 if (pdata
->has_hold_field
) {
92 * hold time = HOLD + 3 x T_peripheral_clock
93 * Use clk rate in kHz to prevent overflows when computing
96 hold
= DIV_ROUND_UP(t
->sda_hold_ns
97 * (clk_get_rate(dev
->clk
) / 1000), 1000000);
101 if (hold
> AT91_TWI_CWGR_HOLD_MAX
) {
103 "HOLD field set to its maximum value (%d instead of %d)\n",
104 AT91_TWI_CWGR_HOLD_MAX
, hold
);
105 hold
= AT91_TWI_CWGR_HOLD_MAX
;
109 if (pdata
->has_adv_dig_filtr
) {
111 * filter width = 0 to AT91_TWI_FILTR_THRES_MAX
114 filter_width
= DIV_ROUND_UP(t
->digital_filter_width_ns
115 * (clk_get_rate(dev
->clk
) / 1000), 1000000);
116 if (filter_width
> AT91_TWI_FILTR_THRES_MAX
) {
118 "Filter threshold set to its maximum value (%d instead of %d)\n",
119 AT91_TWI_FILTR_THRES_MAX
, filter_width
);
120 filter_width
= AT91_TWI_FILTR_THRES_MAX
;
124 dev
->twi_cwgr_reg
= (ckdiv
<< 16) | (cdiv
<< 8) | cdiv
125 | AT91_TWI_CWGR_HOLD(hold
);
127 dev
->filter_width
= filter_width
;
129 dev_dbg(dev
->dev
, "cdiv %d ckdiv %d hold %d (%d ns), filter_width %d (%d ns)\n",
130 cdiv
, ckdiv
, hold
, t
->sda_hold_ns
, filter_width
,
131 t
->digital_filter_width_ns
);
134 static void at91_twi_dma_cleanup(struct at91_twi_dev
*dev
)
136 struct at91_twi_dma
*dma
= &dev
->dma
;
138 at91_twi_irq_save(dev
);
140 if (dma
->xfer_in_progress
) {
141 if (dma
->direction
== DMA_FROM_DEVICE
)
142 dmaengine_terminate_all(dma
->chan_rx
);
144 dmaengine_terminate_all(dma
->chan_tx
);
145 dma
->xfer_in_progress
= false;
147 if (dma
->buf_mapped
) {
148 dma_unmap_single(dev
->dev
, sg_dma_address(&dma
->sg
[0]),
149 dev
->buf_len
, dma
->direction
);
150 dma
->buf_mapped
= false;
153 at91_twi_irq_restore(dev
);
156 static void at91_twi_write_next_byte(struct at91_twi_dev
*dev
)
161 /* 8bit write works with and without FIFO */
162 writeb_relaxed(*dev
->buf
, dev
->base
+ AT91_TWI_THR
);
164 /* send stop when last byte has been written */
165 if (--dev
->buf_len
== 0) {
166 if (!dev
->use_alt_cmd
)
167 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_STOP
);
168 at91_twi_write(dev
, AT91_TWI_IDR
, AT91_TWI_TXRDY
);
171 dev_dbg(dev
->dev
, "wrote 0x%x, to go %zu\n", *dev
->buf
, dev
->buf_len
);
176 static void at91_twi_write_data_dma_callback(void *data
)
178 struct at91_twi_dev
*dev
= (struct at91_twi_dev
*)data
;
180 dma_unmap_single(dev
->dev
, sg_dma_address(&dev
->dma
.sg
[0]),
181 dev
->buf_len
, DMA_TO_DEVICE
);
184 * When this callback is called, THR/TX FIFO is likely not to be empty
185 * yet. So we have to wait for TXCOMP or NACK bits to be set into the
186 * Status Register to be sure that the STOP bit has been sent and the
187 * transfer is completed. The NACK interrupt has already been enabled,
188 * we just have to enable TXCOMP one.
190 at91_twi_write(dev
, AT91_TWI_IER
, AT91_TWI_TXCOMP
);
191 if (!dev
->use_alt_cmd
)
192 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_STOP
);
195 static void at91_twi_write_data_dma(struct at91_twi_dev
*dev
)
198 struct dma_async_tx_descriptor
*txdesc
;
199 struct at91_twi_dma
*dma
= &dev
->dma
;
200 struct dma_chan
*chan_tx
= dma
->chan_tx
;
201 unsigned int sg_len
= 1;
206 dma
->direction
= DMA_TO_DEVICE
;
208 at91_twi_irq_save(dev
);
209 dma_addr
= dma_map_single(dev
->dev
, dev
->buf
, dev
->buf_len
,
211 if (dma_mapping_error(dev
->dev
, dma_addr
)) {
212 dev_err(dev
->dev
, "dma map failed\n");
215 dma
->buf_mapped
= true;
216 at91_twi_irq_restore(dev
);
218 if (dev
->fifo_size
) {
219 size_t part1_len
, part2_len
;
220 struct scatterlist
*sg
;
225 part1_len
= dev
->buf_len
& ~0x3;
227 sg
= &dma
->sg
[sg_len
++];
228 sg_dma_len(sg
) = part1_len
;
229 sg_dma_address(sg
) = dma_addr
;
232 part2_len
= dev
->buf_len
& 0x3;
234 sg
= &dma
->sg
[sg_len
++];
235 sg_dma_len(sg
) = part2_len
;
236 sg_dma_address(sg
) = dma_addr
+ part1_len
;
240 * DMA controller is triggered when at least 4 data can be
241 * written into the TX FIFO
243 fifo_mr
= at91_twi_read(dev
, AT91_TWI_FMR
);
244 fifo_mr
&= ~AT91_TWI_FMR_TXRDYM_MASK
;
245 fifo_mr
|= AT91_TWI_FMR_TXRDYM(AT91_TWI_FOUR_DATA
);
246 at91_twi_write(dev
, AT91_TWI_FMR
, fifo_mr
);
248 sg_dma_len(&dma
->sg
[0]) = dev
->buf_len
;
249 sg_dma_address(&dma
->sg
[0]) = dma_addr
;
252 txdesc
= dmaengine_prep_slave_sg(chan_tx
, dma
->sg
, sg_len
,
254 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
256 dev_err(dev
->dev
, "dma prep slave sg failed\n");
260 txdesc
->callback
= at91_twi_write_data_dma_callback
;
261 txdesc
->callback_param
= dev
;
263 dma
->xfer_in_progress
= true;
264 dmaengine_submit(txdesc
);
265 dma_async_issue_pending(chan_tx
);
270 at91_twi_dma_cleanup(dev
);
273 static void at91_twi_read_next_byte(struct at91_twi_dev
*dev
)
276 * If we are in this case, it means there is garbage data in RHR, so
280 at91_twi_read(dev
, AT91_TWI_RHR
);
284 /* 8bit read works with and without FIFO */
285 *dev
->buf
= readb_relaxed(dev
->base
+ AT91_TWI_RHR
);
288 /* return if aborting, we only needed to read RHR to clear RXRDY*/
289 if (dev
->recv_len_abort
)
292 /* handle I2C_SMBUS_BLOCK_DATA */
293 if (unlikely(dev
->msg
->flags
& I2C_M_RECV_LEN
)) {
294 /* ensure length byte is a valid value */
295 if (*dev
->buf
<= I2C_SMBUS_BLOCK_MAX
&& *dev
->buf
> 0) {
296 dev
->msg
->flags
&= ~I2C_M_RECV_LEN
;
297 dev
->buf_len
+= *dev
->buf
;
298 dev
->msg
->len
= dev
->buf_len
+ 1;
299 dev_dbg(dev
->dev
, "received block length %zu\n",
302 /* abort and send the stop by reading one more byte */
303 dev
->recv_len_abort
= true;
308 /* send stop if second but last byte has been read */
309 if (!dev
->use_alt_cmd
&& dev
->buf_len
== 1)
310 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_STOP
);
312 dev_dbg(dev
->dev
, "read 0x%x, to go %zu\n", *dev
->buf
, dev
->buf_len
);
317 static void at91_twi_read_data_dma_callback(void *data
)
319 struct at91_twi_dev
*dev
= (struct at91_twi_dev
*)data
;
320 unsigned ier
= AT91_TWI_TXCOMP
;
322 dma_unmap_single(dev
->dev
, sg_dma_address(&dev
->dma
.sg
[0]),
323 dev
->buf_len
, DMA_FROM_DEVICE
);
325 if (!dev
->use_alt_cmd
) {
326 /* The last two bytes have to be read without using dma */
327 dev
->buf
+= dev
->buf_len
- 2;
329 ier
|= AT91_TWI_RXRDY
;
331 at91_twi_write(dev
, AT91_TWI_IER
, ier
);
334 static void at91_twi_read_data_dma(struct at91_twi_dev
*dev
)
337 struct dma_async_tx_descriptor
*rxdesc
;
338 struct at91_twi_dma
*dma
= &dev
->dma
;
339 struct dma_chan
*chan_rx
= dma
->chan_rx
;
342 buf_len
= (dev
->use_alt_cmd
) ? dev
->buf_len
: dev
->buf_len
- 2;
343 dma
->direction
= DMA_FROM_DEVICE
;
345 /* Keep in mind that we won't use dma to read the last two bytes */
346 at91_twi_irq_save(dev
);
347 dma_addr
= dma_map_single(dev
->dev
, dev
->buf
, buf_len
, DMA_FROM_DEVICE
);
348 if (dma_mapping_error(dev
->dev
, dma_addr
)) {
349 dev_err(dev
->dev
, "dma map failed\n");
352 dma
->buf_mapped
= true;
353 at91_twi_irq_restore(dev
);
355 if (dev
->fifo_size
&& IS_ALIGNED(buf_len
, 4)) {
359 * DMA controller is triggered when at least 4 data can be
360 * read from the RX FIFO
362 fifo_mr
= at91_twi_read(dev
, AT91_TWI_FMR
);
363 fifo_mr
&= ~AT91_TWI_FMR_RXRDYM_MASK
;
364 fifo_mr
|= AT91_TWI_FMR_RXRDYM(AT91_TWI_FOUR_DATA
);
365 at91_twi_write(dev
, AT91_TWI_FMR
, fifo_mr
);
368 sg_dma_len(&dma
->sg
[0]) = buf_len
;
369 sg_dma_address(&dma
->sg
[0]) = dma_addr
;
371 rxdesc
= dmaengine_prep_slave_sg(chan_rx
, dma
->sg
, 1, DMA_DEV_TO_MEM
,
372 DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
374 dev_err(dev
->dev
, "dma prep slave sg failed\n");
378 rxdesc
->callback
= at91_twi_read_data_dma_callback
;
379 rxdesc
->callback_param
= dev
;
381 dma
->xfer_in_progress
= true;
382 dmaengine_submit(rxdesc
);
383 dma_async_issue_pending(dma
->chan_rx
);
388 at91_twi_dma_cleanup(dev
);
391 static irqreturn_t
atmel_twi_interrupt(int irq
, void *dev_id
)
393 struct at91_twi_dev
*dev
= dev_id
;
394 const unsigned status
= at91_twi_read(dev
, AT91_TWI_SR
);
395 const unsigned irqstatus
= status
& at91_twi_read(dev
, AT91_TWI_IMR
);
400 * In reception, the behavior of the twi device (before sama5d2) is
401 * weird. There is some magic about RXRDY flag! When a data has been
402 * almost received, the reception of a new one is anticipated if there
403 * is no stop command to send. That is the reason why ask for sending
404 * the stop command not on the last data but on the second last one.
406 * Unfortunately, we could still have the RXRDY flag set even if the
407 * transfer is done and we have read the last data. It might happen
408 * when the i2c slave device sends too quickly data after receiving the
409 * ack from the master. The data has been almost received before having
410 * the order to send stop. In this case, sending the stop command could
411 * cause a RXRDY interrupt with a TXCOMP one. It is better to manage
412 * the RXRDY interrupt first in order to not keep garbage data in the
413 * Receive Holding Register for the next transfer.
415 if (irqstatus
& AT91_TWI_RXRDY
) {
417 * Read all available bytes at once by polling RXRDY usable w/
418 * and w/o FIFO. With FIFO enabled we could also read RXFL and
419 * avoid polling RXRDY.
422 at91_twi_read_next_byte(dev
);
423 } while (at91_twi_read(dev
, AT91_TWI_SR
) & AT91_TWI_RXRDY
);
427 * When a NACK condition is detected, the I2C controller sets the NACK,
428 * TXCOMP and TXRDY bits all together in the Status Register (SR).
430 * 1 - Handling NACK errors with CPU write transfer.
432 * In such case, we should not write the next byte into the Transmit
433 * Holding Register (THR) otherwise the I2C controller would start a new
434 * transfer and the I2C slave is likely to reply by another NACK.
436 * 2 - Handling NACK errors with DMA write transfer.
438 * By setting the TXRDY bit in the SR, the I2C controller also triggers
439 * the DMA controller to write the next data into the THR. Then the
440 * result depends on the hardware version of the I2C controller.
442 * 2a - Without support of the Alternative Command mode.
444 * This is the worst case: the DMA controller is triggered to write the
445 * next data into the THR, hence starting a new transfer: the I2C slave
446 * is likely to reply by another NACK.
447 * Concurrently, this interrupt handler is likely to be called to manage
448 * the first NACK before the I2C controller detects the second NACK and
449 * sets once again the NACK bit into the SR.
450 * When handling the first NACK, this interrupt handler disables the I2C
451 * controller interruptions, especially the NACK interrupt.
452 * Hence, the NACK bit is pending into the SR. This is why we should
453 * read the SR to clear all pending interrupts at the beginning of
454 * at91_do_twi_transfer() before actually starting a new transfer.
456 * 2b - With support of the Alternative Command mode.
458 * When a NACK condition is detected, the I2C controller also locks the
459 * THR (and sets the LOCK bit in the SR): even though the DMA controller
460 * is triggered by the TXRDY bit to write the next data into the THR,
461 * this data actually won't go on the I2C bus hence a second NACK is not
464 if (irqstatus
& (AT91_TWI_TXCOMP
| AT91_TWI_NACK
)) {
465 at91_disable_twi_interrupts(dev
);
466 complete(&dev
->cmd_complete
);
467 } else if (irqstatus
& AT91_TWI_TXRDY
) {
468 at91_twi_write_next_byte(dev
);
471 /* catch error flags */
472 dev
->transfer_status
|= status
;
477 static int at91_do_twi_transfer(struct at91_twi_dev
*dev
)
480 unsigned long time_left
;
481 bool has_unre_flag
= dev
->pdata
->has_unre_flag
;
482 bool has_alt_cmd
= dev
->pdata
->has_alt_cmd
;
483 struct i2c_bus_recovery_info
*rinfo
= &dev
->rinfo
;
486 * WARNING: the TXCOMP bit in the Status Register is NOT a clear on
487 * read flag but shows the state of the transmission at the time the
488 * Status Register is read. According to the programmer datasheet,
489 * TXCOMP is set when both holding register and internal shifter are
490 * empty and STOP condition has been sent.
491 * Consequently, we should enable NACK interrupt rather than TXCOMP to
492 * detect transmission failure.
493 * Indeed let's take the case of an i2c write command using DMA.
494 * Whenever the slave doesn't acknowledge a byte, the LOCK, NACK and
495 * TXCOMP bits are set together into the Status Register.
496 * LOCK is a clear on write bit, which is set to prevent the DMA
497 * controller from sending new data on the i2c bus after a NACK
498 * condition has happened. Once locked, this i2c peripheral stops
499 * triggering the DMA controller for new data but it is more than
500 * likely that a new DMA transaction is already in progress, writing
501 * into the Transmit Holding Register. Since the peripheral is locked,
502 * these new data won't be sent to the i2c bus but they will remain
503 * into the Transmit Holding Register, so TXCOMP bit is cleared.
504 * Then when the interrupt handler is called, the Status Register is
505 * read: the TXCOMP bit is clear but NACK bit is still set. The driver
506 * manage the error properly, without waiting for timeout.
507 * This case can be reproduced easyly when writing into an at24 eeprom.
509 * Besides, the TXCOMP bit is already set before the i2c transaction
510 * has been started. For read transactions, this bit is cleared when
511 * writing the START bit into the Control Register. So the
512 * corresponding interrupt can safely be enabled just after.
513 * However for write transactions managed by the CPU, we first write
514 * into THR, so TXCOMP is cleared. Then we can safely enable TXCOMP
515 * interrupt. If TXCOMP interrupt were enabled before writing into THR,
516 * the interrupt handler would be called immediately and the i2c command
517 * would be reported as completed.
518 * Also when a write transaction is managed by the DMA controller,
519 * enabling the TXCOMP interrupt in this function may lead to a race
520 * condition since we don't know whether the TXCOMP interrupt is enabled
521 * before or after the DMA has started to write into THR. So the TXCOMP
522 * interrupt is enabled later by at91_twi_write_data_dma_callback().
523 * Immediately after in that DMA callback, if the alternative command
524 * mode is not used, we still need to send the STOP condition manually
525 * writing the corresponding bit into the Control Register.
528 dev_dbg(dev
->dev
, "transfer: %s %zu bytes.\n",
529 (dev
->msg
->flags
& I2C_M_RD
) ? "read" : "write", dev
->buf_len
);
531 reinit_completion(&dev
->cmd_complete
);
532 dev
->transfer_status
= 0;
534 /* Clear pending interrupts, such as NACK. */
535 at91_twi_read(dev
, AT91_TWI_SR
);
537 if (dev
->fifo_size
) {
538 unsigned fifo_mr
= at91_twi_read(dev
, AT91_TWI_FMR
);
540 /* Reset FIFO mode register */
541 fifo_mr
&= ~(AT91_TWI_FMR_TXRDYM_MASK
|
542 AT91_TWI_FMR_RXRDYM_MASK
);
543 fifo_mr
|= AT91_TWI_FMR_TXRDYM(AT91_TWI_ONE_DATA
);
544 fifo_mr
|= AT91_TWI_FMR_RXRDYM(AT91_TWI_ONE_DATA
);
545 at91_twi_write(dev
, AT91_TWI_FMR
, fifo_mr
);
548 at91_twi_write(dev
, AT91_TWI_CR
,
549 AT91_TWI_THRCLR
| AT91_TWI_RHRCLR
);
553 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_QUICK
);
554 at91_twi_write(dev
, AT91_TWI_IER
, AT91_TWI_TXCOMP
);
555 } else if (dev
->msg
->flags
& I2C_M_RD
) {
556 unsigned start_flags
= AT91_TWI_START
;
558 /* if only one byte is to be read, immediately stop transfer */
559 if (!dev
->use_alt_cmd
&& dev
->buf_len
<= 1 &&
560 !(dev
->msg
->flags
& I2C_M_RECV_LEN
))
561 start_flags
|= AT91_TWI_STOP
;
562 at91_twi_write(dev
, AT91_TWI_CR
, start_flags
);
564 * When using dma without alternative command mode, the last
565 * byte has to be read manually in order to not send the stop
566 * command too late and then to receive extra data.
567 * In practice, there are some issues if you use the dma to
568 * read n-1 bytes because of latency.
569 * Reading n-2 bytes with dma and the two last ones manually
570 * seems to be the best solution.
572 if (dev
->use_dma
&& (dev
->buf_len
> AT91_I2C_DMA_THRESHOLD
)) {
573 at91_twi_write(dev
, AT91_TWI_IER
, AT91_TWI_NACK
);
574 at91_twi_read_data_dma(dev
);
576 at91_twi_write(dev
, AT91_TWI_IER
,
582 if (dev
->use_dma
&& (dev
->buf_len
> AT91_I2C_DMA_THRESHOLD
)) {
583 at91_twi_write(dev
, AT91_TWI_IER
, AT91_TWI_NACK
);
584 at91_twi_write_data_dma(dev
);
586 at91_twi_write_next_byte(dev
);
587 at91_twi_write(dev
, AT91_TWI_IER
,
588 AT91_TWI_TXCOMP
| AT91_TWI_NACK
|
589 (dev
->buf_len
? AT91_TWI_TXRDY
: 0));
593 time_left
= wait_for_completion_timeout(&dev
->cmd_complete
,
594 dev
->adapter
.timeout
);
595 if (time_left
== 0) {
596 dev
->transfer_status
|= at91_twi_read(dev
, AT91_TWI_SR
);
597 dev_err(dev
->dev
, "controller timed out\n");
598 at91_init_twi_bus(dev
);
602 if (dev
->transfer_status
& AT91_TWI_NACK
) {
603 dev_dbg(dev
->dev
, "received nack\n");
607 if (dev
->transfer_status
& AT91_TWI_OVRE
) {
608 dev_err(dev
->dev
, "overrun while reading\n");
612 if (has_unre_flag
&& dev
->transfer_status
& AT91_TWI_UNRE
) {
613 dev_err(dev
->dev
, "underrun while writing\n");
617 if ((has_alt_cmd
|| dev
->fifo_size
) &&
618 (dev
->transfer_status
& AT91_TWI_LOCK
)) {
619 dev_err(dev
->dev
, "tx locked\n");
623 if (dev
->recv_len_abort
) {
624 dev_err(dev
->dev
, "invalid smbus block length recvd\n");
629 dev_dbg(dev
->dev
, "transfer complete\n");
634 /* first stop DMA transfer if still in progress */
635 at91_twi_dma_cleanup(dev
);
636 /* then flush THR/FIFO and unlock TX if locked */
637 if ((has_alt_cmd
|| dev
->fifo_size
) &&
638 (dev
->transfer_status
& AT91_TWI_LOCK
)) {
639 dev_dbg(dev
->dev
, "unlock tx\n");
640 at91_twi_write(dev
, AT91_TWI_CR
,
641 AT91_TWI_THRCLR
| AT91_TWI_LOCKCLR
);
644 if (rinfo
->get_sda
&& !(rinfo
->get_sda(&dev
->adapter
))) {
646 "SDA is down; clear bus using gpio\n");
647 i2c_recover_bus(&dev
->adapter
);
653 static int at91_twi_xfer(struct i2c_adapter
*adap
, struct i2c_msg
*msg
, int num
)
655 struct at91_twi_dev
*dev
= i2c_get_adapdata(adap
);
657 unsigned int_addr_flag
= 0;
658 struct i2c_msg
*m_start
= msg
;
661 dev_dbg(&adap
->dev
, "at91_xfer: processing %d messages:\n", num
);
663 ret
= pm_runtime_get_sync(dev
->dev
);
668 int internal_address
= 0;
671 /* 1st msg is put into the internal address, start with 2nd */
673 for (i
= 0; i
< msg
->len
; ++i
) {
674 const unsigned addr
= msg
->buf
[msg
->len
- 1 - i
];
676 internal_address
|= addr
<< (8 * i
);
677 int_addr_flag
+= AT91_TWI_IADRSZ_1
;
679 at91_twi_write(dev
, AT91_TWI_IADR
, internal_address
);
682 dev
->use_alt_cmd
= false;
683 is_read
= (m_start
->flags
& I2C_M_RD
);
684 if (dev
->pdata
->has_alt_cmd
) {
685 if (m_start
->len
> 0 &&
686 m_start
->len
< AT91_I2C_MAX_ALT_CMD_DATA_SIZE
) {
687 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_ACMEN
);
688 at91_twi_write(dev
, AT91_TWI_ACR
,
689 AT91_TWI_ACR_DATAL(m_start
->len
) |
690 ((is_read
) ? AT91_TWI_ACR_DIR
: 0));
691 dev
->use_alt_cmd
= true;
693 at91_twi_write(dev
, AT91_TWI_CR
, AT91_TWI_ACMDIS
);
697 at91_twi_write(dev
, AT91_TWI_MMR
,
698 (m_start
->addr
<< 16) |
700 ((!dev
->use_alt_cmd
&& is_read
) ? AT91_TWI_MREAD
: 0));
702 dev
->buf_len
= m_start
->len
;
703 dev
->buf
= m_start
->buf
;
705 dev
->recv_len_abort
= false;
707 ret
= at91_do_twi_transfer(dev
);
709 ret
= (ret
< 0) ? ret
: num
;
711 pm_runtime_mark_last_busy(dev
->dev
);
712 pm_runtime_put_autosuspend(dev
->dev
);
718 * The hardware can handle at most two messages concatenated by a
719 * repeated start via it's internal address feature.
721 static const struct i2c_adapter_quirks at91_twi_quirks
= {
722 .flags
= I2C_AQ_COMB
| I2C_AQ_COMB_WRITE_FIRST
| I2C_AQ_COMB_SAME_ADDR
,
723 .max_comb_1st_msg_len
= 3,
726 static u32
at91_twi_func(struct i2c_adapter
*adapter
)
728 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
729 | I2C_FUNC_SMBUS_READ_BLOCK_DATA
;
732 static const struct i2c_algorithm at91_twi_algorithm
= {
733 .master_xfer
= at91_twi_xfer
,
734 .functionality
= at91_twi_func
,
737 static int at91_twi_configure_dma(struct at91_twi_dev
*dev
, u32 phy_addr
)
740 struct dma_slave_config slave_config
;
741 struct at91_twi_dma
*dma
= &dev
->dma
;
742 enum dma_slave_buswidth addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
745 * The actual width of the access will be chosen in
746 * dmaengine_prep_slave_sg():
747 * for each buffer in the scatter-gather list, if its size is aligned
748 * to addr_width then addr_width accesses will be performed to transfer
749 * the buffer. On the other hand, if the buffer size is not aligned to
750 * addr_width then the buffer is transferred using single byte accesses.
751 * Please refer to the Atmel eXtended DMA controller driver.
752 * When FIFOs are used, the TXRDYM threshold can always be set to
753 * trigger the XDMAC when at least 4 data can be written into the TX
754 * FIFO, even if single byte accesses are performed.
755 * However the RXRDYM threshold must be set to fit the access width,
756 * deduced from buffer length, so the XDMAC is triggered properly to
757 * read data from the RX FIFO.
760 addr_width
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
762 memset(&slave_config
, 0, sizeof(slave_config
));
763 slave_config
.src_addr
= (dma_addr_t
)phy_addr
+ AT91_TWI_RHR
;
764 slave_config
.src_addr_width
= addr_width
;
765 slave_config
.src_maxburst
= 1;
766 slave_config
.dst_addr
= (dma_addr_t
)phy_addr
+ AT91_TWI_THR
;
767 slave_config
.dst_addr_width
= addr_width
;
768 slave_config
.dst_maxburst
= 1;
769 slave_config
.device_fc
= false;
771 dma
->chan_tx
= dma_request_chan(dev
->dev
, "tx");
772 if (IS_ERR(dma
->chan_tx
)) {
773 ret
= PTR_ERR(dma
->chan_tx
);
778 dma
->chan_rx
= dma_request_chan(dev
->dev
, "rx");
779 if (IS_ERR(dma
->chan_rx
)) {
780 ret
= PTR_ERR(dma
->chan_rx
);
785 slave_config
.direction
= DMA_MEM_TO_DEV
;
786 if (dmaengine_slave_config(dma
->chan_tx
, &slave_config
)) {
787 dev_err(dev
->dev
, "failed to configure tx channel\n");
792 slave_config
.direction
= DMA_DEV_TO_MEM
;
793 if (dmaengine_slave_config(dma
->chan_rx
, &slave_config
)) {
794 dev_err(dev
->dev
, "failed to configure rx channel\n");
799 sg_init_table(dma
->sg
, 2);
800 dma
->buf_mapped
= false;
801 dma
->xfer_in_progress
= false;
804 dev_info(dev
->dev
, "using %s (tx) and %s (rx) for DMA transfers\n",
805 dma_chan_name(dma
->chan_tx
), dma_chan_name(dma
->chan_rx
));
810 if (ret
!= -EPROBE_DEFER
)
811 dev_info(dev
->dev
, "can't get DMA channel, continue without DMA support\n");
813 dma_release_channel(dma
->chan_rx
);
815 dma_release_channel(dma
->chan_tx
);
819 static void at91_prepare_twi_recovery(struct i2c_adapter
*adap
)
821 struct at91_twi_dev
*dev
= i2c_get_adapdata(adap
);
823 pinctrl_select_state(dev
->pinctrl
, dev
->pinctrl_pins_gpio
);
826 static void at91_unprepare_twi_recovery(struct i2c_adapter
*adap
)
828 struct at91_twi_dev
*dev
= i2c_get_adapdata(adap
);
830 pinctrl_select_state(dev
->pinctrl
, dev
->pinctrl_pins_default
);
833 static int at91_init_twi_recovery_info(struct platform_device
*pdev
,
834 struct at91_twi_dev
*dev
)
836 struct i2c_bus_recovery_info
*rinfo
= &dev
->rinfo
;
838 dev
->pinctrl
= devm_pinctrl_get(&pdev
->dev
);
839 if (!dev
->pinctrl
|| IS_ERR(dev
->pinctrl
)) {
840 dev_info(dev
->dev
, "can't get pinctrl, bus recovery not supported\n");
841 return PTR_ERR(dev
->pinctrl
);
844 dev
->pinctrl_pins_default
= pinctrl_lookup_state(dev
->pinctrl
,
845 PINCTRL_STATE_DEFAULT
);
846 dev
->pinctrl_pins_gpio
= pinctrl_lookup_state(dev
->pinctrl
,
848 if (IS_ERR(dev
->pinctrl_pins_default
) ||
849 IS_ERR(dev
->pinctrl_pins_gpio
)) {
850 dev_info(&pdev
->dev
, "pinctrl states incomplete for recovery\n");
855 * pins will be taken as GPIO, so we might as well inform pinctrl about
856 * this and move the state to GPIO
858 pinctrl_select_state(dev
->pinctrl
, dev
->pinctrl_pins_gpio
);
860 rinfo
->sda_gpiod
= devm_gpiod_get(&pdev
->dev
, "sda", GPIOD_IN
);
861 if (PTR_ERR(rinfo
->sda_gpiod
) == -EPROBE_DEFER
)
862 return -EPROBE_DEFER
;
864 rinfo
->scl_gpiod
= devm_gpiod_get(&pdev
->dev
, "scl",
865 GPIOD_OUT_HIGH_OPEN_DRAIN
);
866 if (PTR_ERR(rinfo
->scl_gpiod
) == -EPROBE_DEFER
)
867 return -EPROBE_DEFER
;
869 if (IS_ERR(rinfo
->sda_gpiod
) ||
870 IS_ERR(rinfo
->scl_gpiod
)) {
871 dev_info(&pdev
->dev
, "recovery information incomplete\n");
872 if (!IS_ERR(rinfo
->sda_gpiod
)) {
873 gpiod_put(rinfo
->sda_gpiod
);
874 rinfo
->sda_gpiod
= NULL
;
876 if (!IS_ERR(rinfo
->scl_gpiod
)) {
877 gpiod_put(rinfo
->scl_gpiod
);
878 rinfo
->scl_gpiod
= NULL
;
880 pinctrl_select_state(dev
->pinctrl
, dev
->pinctrl_pins_default
);
884 /* change the state of the pins back to their default state */
885 pinctrl_select_state(dev
->pinctrl
, dev
->pinctrl_pins_default
);
887 dev_info(&pdev
->dev
, "using scl, sda for recovery\n");
889 rinfo
->prepare_recovery
= at91_prepare_twi_recovery
;
890 rinfo
->unprepare_recovery
= at91_unprepare_twi_recovery
;
891 rinfo
->recover_bus
= i2c_generic_scl_recovery
;
892 dev
->adapter
.bus_recovery_info
= rinfo
;
897 int at91_twi_probe_master(struct platform_device
*pdev
,
898 u32 phy_addr
, struct at91_twi_dev
*dev
)
902 init_completion(&dev
->cmd_complete
);
904 rc
= devm_request_irq(&pdev
->dev
, dev
->irq
, atmel_twi_interrupt
, 0,
905 dev_name(dev
->dev
), dev
);
907 dev_err(dev
->dev
, "Cannot get irq %d: %d\n", dev
->irq
, rc
);
911 if (dev
->dev
->of_node
) {
912 rc
= at91_twi_configure_dma(dev
, phy_addr
);
913 if (rc
== -EPROBE_DEFER
)
917 if (!of_property_read_u32(pdev
->dev
.of_node
, "atmel,fifo-size",
919 dev_info(dev
->dev
, "Using FIFO (%u data)\n", dev
->fifo_size
);
922 dev
->enable_dig_filt
= of_property_read_bool(pdev
->dev
.of_node
,
923 "i2c-digital-filter");
925 dev
->enable_ana_filt
= of_property_read_bool(pdev
->dev
.of_node
,
926 "i2c-analog-filter");
927 at91_calc_twi_clock(dev
);
929 rc
= at91_init_twi_recovery_info(pdev
, dev
);
930 if (rc
== -EPROBE_DEFER
)
933 dev
->adapter
.algo
= &at91_twi_algorithm
;
934 dev
->adapter
.quirks
= &at91_twi_quirks
;