Merge tag 'uml-for-linus-6.14-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / i2c / busses / i2c-imx-lpi2c.c
blob0d4b3935e6873270553cfddf5e7beb9f1254a5f1
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * This is i.MX low power i2c controller driver.
5 * Copyright 2016 Freescale Semiconductor, Inc.
6 */
8 #include <linux/clk.h>
9 #include <linux/completion.h>
10 #include <linux/delay.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/dmaengine.h>
13 #include <linux/err.h>
14 #include <linux/errno.h>
15 #include <linux/i2c.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/of.h>
22 #include <linux/pinctrl/consumer.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
28 #define DRIVER_NAME "imx-lpi2c"
30 #define LPI2C_PARAM 0x04 /* i2c RX/TX FIFO size */
31 #define LPI2C_MCR 0x10 /* i2c contrl register */
32 #define LPI2C_MSR 0x14 /* i2c status register */
33 #define LPI2C_MIER 0x18 /* i2c interrupt enable */
34 #define LPI2C_MDER 0x1C /* i2c DMA enable */
35 #define LPI2C_MCFGR0 0x20 /* i2c master configuration */
36 #define LPI2C_MCFGR1 0x24 /* i2c master configuration */
37 #define LPI2C_MCFGR2 0x28 /* i2c master configuration */
38 #define LPI2C_MCFGR3 0x2C /* i2c master configuration */
39 #define LPI2C_MCCR0 0x48 /* i2c master clk configuration */
40 #define LPI2C_MCCR1 0x50 /* i2c master clk configuration */
41 #define LPI2C_MFCR 0x58 /* i2c master FIFO control */
42 #define LPI2C_MFSR 0x5C /* i2c master FIFO status */
43 #define LPI2C_MTDR 0x60 /* i2c master TX data register */
44 #define LPI2C_MRDR 0x70 /* i2c master RX data register */
46 #define LPI2C_SCR 0x110 /* i2c target control register */
47 #define LPI2C_SSR 0x114 /* i2c target status register */
48 #define LPI2C_SIER 0x118 /* i2c target interrupt enable */
49 #define LPI2C_SDER 0x11C /* i2c target DMA enable */
50 #define LPI2C_SCFGR0 0x120 /* i2c target configuration */
51 #define LPI2C_SCFGR1 0x124 /* i2c target configuration */
52 #define LPI2C_SCFGR2 0x128 /* i2c target configuration */
53 #define LPI2C_SAMR 0x140 /* i2c target address match */
54 #define LPI2C_SASR 0x150 /* i2c target address status */
55 #define LPI2C_STAR 0x154 /* i2c target transmit ACK */
56 #define LPI2C_STDR 0x160 /* i2c target transmit data */
57 #define LPI2C_SRDR 0x170 /* i2c target receive data */
58 #define LPI2C_SRDROR 0x178 /* i2c target receive data read only */
60 /* i2c command */
61 #define TRAN_DATA 0X00
62 #define RECV_DATA 0X01
63 #define GEN_STOP 0X02
64 #define RECV_DISCARD 0X03
65 #define GEN_START 0X04
66 #define START_NACK 0X05
67 #define START_HIGH 0X06
68 #define START_HIGH_NACK 0X07
70 #define MCR_MEN BIT(0)
71 #define MCR_RST BIT(1)
72 #define MCR_DOZEN BIT(2)
73 #define MCR_DBGEN BIT(3)
74 #define MCR_RTF BIT(8)
75 #define MCR_RRF BIT(9)
76 #define MSR_TDF BIT(0)
77 #define MSR_RDF BIT(1)
78 #define MSR_SDF BIT(9)
79 #define MSR_NDF BIT(10)
80 #define MSR_ALF BIT(11)
81 #define MSR_MBF BIT(24)
82 #define MSR_BBF BIT(25)
83 #define MIER_TDIE BIT(0)
84 #define MIER_RDIE BIT(1)
85 #define MIER_SDIE BIT(9)
86 #define MIER_NDIE BIT(10)
87 #define MCFGR1_AUTOSTOP BIT(8)
88 #define MCFGR1_IGNACK BIT(9)
89 #define MRDR_RXEMPTY BIT(14)
90 #define MDER_TDDE BIT(0)
91 #define MDER_RDDE BIT(1)
93 #define SCR_SEN BIT(0)
94 #define SCR_RST BIT(1)
95 #define SCR_FILTEN BIT(4)
96 #define SCR_RTF BIT(8)
97 #define SCR_RRF BIT(9)
98 #define SSR_TDF BIT(0)
99 #define SSR_RDF BIT(1)
100 #define SSR_AVF BIT(2)
101 #define SSR_TAF BIT(3)
102 #define SSR_RSF BIT(8)
103 #define SSR_SDF BIT(9)
104 #define SSR_BEF BIT(10)
105 #define SSR_FEF BIT(11)
106 #define SSR_SBF BIT(24)
107 #define SSR_BBF BIT(25)
108 #define SSR_CLEAR_BITS (SSR_RSF | SSR_SDF | SSR_BEF | SSR_FEF)
109 #define SIER_TDIE BIT(0)
110 #define SIER_RDIE BIT(1)
111 #define SIER_AVIE BIT(2)
112 #define SIER_TAIE BIT(3)
113 #define SIER_RSIE BIT(8)
114 #define SIER_SDIE BIT(9)
115 #define SIER_BEIE BIT(10)
116 #define SIER_FEIE BIT(11)
117 #define SIER_AM0F BIT(12)
118 #define SCFGR1_RXSTALL BIT(1)
119 #define SCFGR1_TXDSTALL BIT(2)
120 #define SCFGR2_FILTSDA_SHIFT 24
121 #define SCFGR2_FILTSCL_SHIFT 16
122 #define SCFGR2_CLKHOLD(x) (x)
123 #define SCFGR2_FILTSDA(x) ((x) << SCFGR2_FILTSDA_SHIFT)
124 #define SCFGR2_FILTSCL(x) ((x) << SCFGR2_FILTSCL_SHIFT)
125 #define SASR_READ_REQ 0x1
126 #define SLAVE_INT_FLAG (SIER_TDIE | SIER_RDIE | SIER_AVIE | \
127 SIER_SDIE | SIER_BEIE)
129 #define I2C_CLK_RATIO 2
130 #define CHUNK_DATA 256
132 #define I2C_PM_TIMEOUT 10 /* ms */
133 #define I2C_DMA_THRESHOLD 8 /* bytes */
135 enum lpi2c_imx_mode {
136 STANDARD, /* 100+Kbps */
137 FAST, /* 400+Kbps */
138 FAST_PLUS, /* 1.0+Mbps */
139 HS, /* 3.4+Mbps */
140 ULTRA_FAST, /* 5.0+Mbps */
143 enum lpi2c_imx_pincfg {
144 TWO_PIN_OD,
145 TWO_PIN_OO,
146 TWO_PIN_PP,
147 FOUR_PIN_PP,
150 struct lpi2c_imx_dma {
151 bool using_pio_mode;
152 u8 rx_cmd_buf_len;
153 u8 *dma_buf;
154 u16 *rx_cmd_buf;
155 unsigned int dma_len;
156 unsigned int tx_burst_num;
157 unsigned int rx_burst_num;
158 unsigned long dma_msg_flag;
159 resource_size_t phy_addr;
160 dma_addr_t dma_tx_addr;
161 dma_addr_t dma_addr;
162 enum dma_data_direction dma_data_dir;
163 enum dma_transfer_direction dma_transfer_dir;
164 struct dma_chan *chan_tx;
165 struct dma_chan *chan_rx;
168 struct lpi2c_imx_struct {
169 struct i2c_adapter adapter;
170 int num_clks;
171 struct clk_bulk_data *clks;
172 void __iomem *base;
173 __u8 *rx_buf;
174 __u8 *tx_buf;
175 struct completion complete;
176 unsigned long rate_per;
177 unsigned int msglen;
178 unsigned int delivered;
179 unsigned int block_data;
180 unsigned int bitrate;
181 unsigned int txfifosize;
182 unsigned int rxfifosize;
183 enum lpi2c_imx_mode mode;
184 struct i2c_bus_recovery_info rinfo;
185 bool can_use_dma;
186 struct lpi2c_imx_dma *dma;
187 struct i2c_client *target;
190 static void lpi2c_imx_intctrl(struct lpi2c_imx_struct *lpi2c_imx,
191 unsigned int enable)
193 writel(enable, lpi2c_imx->base + LPI2C_MIER);
196 static int lpi2c_imx_bus_busy(struct lpi2c_imx_struct *lpi2c_imx)
198 unsigned long orig_jiffies = jiffies;
199 unsigned int temp;
201 while (1) {
202 temp = readl(lpi2c_imx->base + LPI2C_MSR);
204 /* check for arbitration lost, clear if set */
205 if (temp & MSR_ALF) {
206 writel(temp, lpi2c_imx->base + LPI2C_MSR);
207 return -EAGAIN;
210 if (temp & (MSR_BBF | MSR_MBF))
211 break;
213 if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
214 dev_dbg(&lpi2c_imx->adapter.dev, "bus not work\n");
215 if (lpi2c_imx->adapter.bus_recovery_info)
216 i2c_recover_bus(&lpi2c_imx->adapter);
217 return -ETIMEDOUT;
219 schedule();
222 return 0;
225 static void lpi2c_imx_set_mode(struct lpi2c_imx_struct *lpi2c_imx)
227 unsigned int bitrate = lpi2c_imx->bitrate;
228 enum lpi2c_imx_mode mode;
230 if (bitrate < I2C_MAX_FAST_MODE_FREQ)
231 mode = STANDARD;
232 else if (bitrate < I2C_MAX_FAST_MODE_PLUS_FREQ)
233 mode = FAST;
234 else if (bitrate < I2C_MAX_HIGH_SPEED_MODE_FREQ)
235 mode = FAST_PLUS;
236 else if (bitrate < I2C_MAX_ULTRA_FAST_MODE_FREQ)
237 mode = HS;
238 else
239 mode = ULTRA_FAST;
241 lpi2c_imx->mode = mode;
244 static int lpi2c_imx_start(struct lpi2c_imx_struct *lpi2c_imx,
245 struct i2c_msg *msgs)
247 unsigned int temp;
249 temp = readl(lpi2c_imx->base + LPI2C_MCR);
250 temp |= MCR_RRF | MCR_RTF;
251 writel(temp, lpi2c_imx->base + LPI2C_MCR);
252 writel(0x7f00, lpi2c_imx->base + LPI2C_MSR);
254 temp = i2c_8bit_addr_from_msg(msgs) | (GEN_START << 8);
255 writel(temp, lpi2c_imx->base + LPI2C_MTDR);
257 return lpi2c_imx_bus_busy(lpi2c_imx);
260 static void lpi2c_imx_stop(struct lpi2c_imx_struct *lpi2c_imx)
262 unsigned long orig_jiffies = jiffies;
263 unsigned int temp;
265 writel(GEN_STOP << 8, lpi2c_imx->base + LPI2C_MTDR);
267 do {
268 temp = readl(lpi2c_imx->base + LPI2C_MSR);
269 if (temp & MSR_SDF)
270 break;
272 if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
273 dev_dbg(&lpi2c_imx->adapter.dev, "stop timeout\n");
274 if (lpi2c_imx->adapter.bus_recovery_info)
275 i2c_recover_bus(&lpi2c_imx->adapter);
276 break;
278 schedule();
280 } while (1);
283 /* CLKLO = I2C_CLK_RATIO * CLKHI, SETHOLD = CLKHI, DATAVD = CLKHI/2 */
284 static int lpi2c_imx_config(struct lpi2c_imx_struct *lpi2c_imx)
286 u8 prescale, filt, sethold, datavd;
287 unsigned int clk_rate, clk_cycle, clkhi, clklo;
288 enum lpi2c_imx_pincfg pincfg;
289 unsigned int temp;
291 lpi2c_imx_set_mode(lpi2c_imx);
293 clk_rate = lpi2c_imx->rate_per;
295 if (lpi2c_imx->mode == HS || lpi2c_imx->mode == ULTRA_FAST)
296 filt = 0;
297 else
298 filt = 2;
300 for (prescale = 0; prescale <= 7; prescale++) {
301 clk_cycle = clk_rate / ((1 << prescale) * lpi2c_imx->bitrate)
302 - 3 - (filt >> 1);
303 clkhi = DIV_ROUND_UP(clk_cycle, I2C_CLK_RATIO + 1);
304 clklo = clk_cycle - clkhi;
305 if (clklo < 64)
306 break;
309 if (prescale > 7)
310 return -EINVAL;
312 /* set MCFGR1: PINCFG, PRESCALE, IGNACK */
313 if (lpi2c_imx->mode == ULTRA_FAST)
314 pincfg = TWO_PIN_OO;
315 else
316 pincfg = TWO_PIN_OD;
317 temp = prescale | pincfg << 24;
319 if (lpi2c_imx->mode == ULTRA_FAST)
320 temp |= MCFGR1_IGNACK;
322 writel(temp, lpi2c_imx->base + LPI2C_MCFGR1);
324 /* set MCFGR2: FILTSDA, FILTSCL */
325 temp = (filt << 16) | (filt << 24);
326 writel(temp, lpi2c_imx->base + LPI2C_MCFGR2);
328 /* set MCCR: DATAVD, SETHOLD, CLKHI, CLKLO */
329 sethold = clkhi;
330 datavd = clkhi >> 1;
331 temp = datavd << 24 | sethold << 16 | clkhi << 8 | clklo;
333 if (lpi2c_imx->mode == HS)
334 writel(temp, lpi2c_imx->base + LPI2C_MCCR1);
335 else
336 writel(temp, lpi2c_imx->base + LPI2C_MCCR0);
338 return 0;
341 static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
343 unsigned int temp;
344 int ret;
346 ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
347 if (ret < 0)
348 return ret;
350 temp = MCR_RST;
351 writel(temp, lpi2c_imx->base + LPI2C_MCR);
352 writel(0, lpi2c_imx->base + LPI2C_MCR);
354 ret = lpi2c_imx_config(lpi2c_imx);
355 if (ret)
356 goto rpm_put;
358 temp = readl(lpi2c_imx->base + LPI2C_MCR);
359 temp |= MCR_MEN;
360 writel(temp, lpi2c_imx->base + LPI2C_MCR);
362 return 0;
364 rpm_put:
365 pm_runtime_mark_last_busy(lpi2c_imx->adapter.dev.parent);
366 pm_runtime_put_autosuspend(lpi2c_imx->adapter.dev.parent);
368 return ret;
371 static int lpi2c_imx_master_disable(struct lpi2c_imx_struct *lpi2c_imx)
373 u32 temp;
375 temp = readl(lpi2c_imx->base + LPI2C_MCR);
376 temp &= ~MCR_MEN;
377 writel(temp, lpi2c_imx->base + LPI2C_MCR);
379 pm_runtime_mark_last_busy(lpi2c_imx->adapter.dev.parent);
380 pm_runtime_put_autosuspend(lpi2c_imx->adapter.dev.parent);
382 return 0;
385 static int lpi2c_imx_pio_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
387 unsigned long time_left;
389 time_left = wait_for_completion_timeout(&lpi2c_imx->complete, HZ);
391 return time_left ? 0 : -ETIMEDOUT;
394 static int lpi2c_imx_txfifo_empty(struct lpi2c_imx_struct *lpi2c_imx)
396 unsigned long orig_jiffies = jiffies;
397 u32 txcnt;
399 do {
400 txcnt = readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff;
402 if (readl(lpi2c_imx->base + LPI2C_MSR) & MSR_NDF) {
403 dev_dbg(&lpi2c_imx->adapter.dev, "NDF detected\n");
404 return -EIO;
407 if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) {
408 dev_dbg(&lpi2c_imx->adapter.dev, "txfifo empty timeout\n");
409 if (lpi2c_imx->adapter.bus_recovery_info)
410 i2c_recover_bus(&lpi2c_imx->adapter);
411 return -ETIMEDOUT;
413 schedule();
415 } while (txcnt);
417 return 0;
420 static void lpi2c_imx_set_tx_watermark(struct lpi2c_imx_struct *lpi2c_imx)
422 writel(lpi2c_imx->txfifosize >> 1, lpi2c_imx->base + LPI2C_MFCR);
425 static void lpi2c_imx_set_rx_watermark(struct lpi2c_imx_struct *lpi2c_imx)
427 unsigned int temp, remaining;
429 remaining = lpi2c_imx->msglen - lpi2c_imx->delivered;
431 if (remaining > (lpi2c_imx->rxfifosize >> 1))
432 temp = lpi2c_imx->rxfifosize >> 1;
433 else
434 temp = 0;
436 writel(temp << 16, lpi2c_imx->base + LPI2C_MFCR);
439 static void lpi2c_imx_write_txfifo(struct lpi2c_imx_struct *lpi2c_imx)
441 unsigned int data, txcnt;
443 txcnt = readl(lpi2c_imx->base + LPI2C_MFSR) & 0xff;
445 while (txcnt < lpi2c_imx->txfifosize) {
446 if (lpi2c_imx->delivered == lpi2c_imx->msglen)
447 break;
449 data = lpi2c_imx->tx_buf[lpi2c_imx->delivered++];
450 writel(data, lpi2c_imx->base + LPI2C_MTDR);
451 txcnt++;
454 if (lpi2c_imx->delivered < lpi2c_imx->msglen)
455 lpi2c_imx_intctrl(lpi2c_imx, MIER_TDIE | MIER_NDIE);
456 else
457 complete(&lpi2c_imx->complete);
460 static void lpi2c_imx_read_rxfifo(struct lpi2c_imx_struct *lpi2c_imx)
462 unsigned int blocklen, remaining;
463 unsigned int temp, data;
465 do {
466 data = readl(lpi2c_imx->base + LPI2C_MRDR);
467 if (data & MRDR_RXEMPTY)
468 break;
470 lpi2c_imx->rx_buf[lpi2c_imx->delivered++] = data & 0xff;
471 } while (1);
474 * First byte is the length of remaining packet in the SMBus block
475 * data read. Add it to msgs->len.
477 if (lpi2c_imx->block_data) {
478 blocklen = lpi2c_imx->rx_buf[0];
479 lpi2c_imx->msglen += blocklen;
482 remaining = lpi2c_imx->msglen - lpi2c_imx->delivered;
484 if (!remaining) {
485 complete(&lpi2c_imx->complete);
486 return;
489 /* not finished, still waiting for rx data */
490 lpi2c_imx_set_rx_watermark(lpi2c_imx);
492 /* multiple receive commands */
493 if (lpi2c_imx->block_data) {
494 lpi2c_imx->block_data = 0;
495 temp = remaining;
496 temp |= (RECV_DATA << 8);
497 writel(temp, lpi2c_imx->base + LPI2C_MTDR);
498 } else if (!(lpi2c_imx->delivered & 0xff)) {
499 temp = (remaining > CHUNK_DATA ? CHUNK_DATA : remaining) - 1;
500 temp |= (RECV_DATA << 8);
501 writel(temp, lpi2c_imx->base + LPI2C_MTDR);
504 lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE);
507 static void lpi2c_imx_write(struct lpi2c_imx_struct *lpi2c_imx,
508 struct i2c_msg *msgs)
510 lpi2c_imx->tx_buf = msgs->buf;
511 lpi2c_imx_set_tx_watermark(lpi2c_imx);
512 lpi2c_imx_write_txfifo(lpi2c_imx);
515 static void lpi2c_imx_read(struct lpi2c_imx_struct *lpi2c_imx,
516 struct i2c_msg *msgs)
518 unsigned int temp;
520 lpi2c_imx->rx_buf = msgs->buf;
521 lpi2c_imx->block_data = msgs->flags & I2C_M_RECV_LEN;
523 lpi2c_imx_set_rx_watermark(lpi2c_imx);
524 temp = msgs->len > CHUNK_DATA ? CHUNK_DATA - 1 : msgs->len - 1;
525 temp |= (RECV_DATA << 8);
526 writel(temp, lpi2c_imx->base + LPI2C_MTDR);
528 lpi2c_imx_intctrl(lpi2c_imx, MIER_RDIE | MIER_NDIE);
531 static bool is_use_dma(struct lpi2c_imx_struct *lpi2c_imx, struct i2c_msg *msg)
533 if (!lpi2c_imx->can_use_dma)
534 return false;
537 * When the length of data is less than I2C_DMA_THRESHOLD,
538 * cpu mode is used directly to avoid low performance.
540 return !(msg->len < I2C_DMA_THRESHOLD);
543 static int lpi2c_imx_pio_xfer(struct lpi2c_imx_struct *lpi2c_imx,
544 struct i2c_msg *msg)
546 reinit_completion(&lpi2c_imx->complete);
548 if (msg->flags & I2C_M_RD)
549 lpi2c_imx_read(lpi2c_imx, msg);
550 else
551 lpi2c_imx_write(lpi2c_imx, msg);
553 return lpi2c_imx_pio_msg_complete(lpi2c_imx);
556 static int lpi2c_imx_dma_timeout_calculate(struct lpi2c_imx_struct *lpi2c_imx)
558 unsigned long time = 0;
560 time = 8 * lpi2c_imx->dma->dma_len * 1000 / lpi2c_imx->bitrate;
562 /* Add extra second for scheduler related activities */
563 time += 1;
565 /* Double calculated time */
566 return msecs_to_jiffies(time * MSEC_PER_SEC);
569 static int lpi2c_imx_alloc_rx_cmd_buf(struct lpi2c_imx_struct *lpi2c_imx)
571 struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
572 u16 rx_remain = dma->dma_len;
573 int cmd_num;
574 u16 temp;
577 * Calculate the number of rx command words via the DMA TX channel
578 * writing into command register based on the i2c msg len, and build
579 * the rx command words buffer.
581 cmd_num = DIV_ROUND_UP(rx_remain, CHUNK_DATA);
582 dma->rx_cmd_buf = kcalloc(cmd_num, sizeof(u16), GFP_KERNEL);
583 dma->rx_cmd_buf_len = cmd_num * sizeof(u16);
585 if (!dma->rx_cmd_buf) {
586 dev_err(&lpi2c_imx->adapter.dev, "Alloc RX cmd buffer failed\n");
587 return -ENOMEM;
590 for (int i = 0; i < cmd_num ; i++) {
591 temp = rx_remain > CHUNK_DATA ? CHUNK_DATA - 1 : rx_remain - 1;
592 temp |= (RECV_DATA << 8);
593 rx_remain -= CHUNK_DATA;
594 dma->rx_cmd_buf[i] = temp;
597 return 0;
600 static int lpi2c_imx_dma_msg_complete(struct lpi2c_imx_struct *lpi2c_imx)
602 unsigned long time_left, time;
604 time = lpi2c_imx_dma_timeout_calculate(lpi2c_imx);
605 time_left = wait_for_completion_timeout(&lpi2c_imx->complete, time);
606 if (time_left == 0) {
607 dev_err(&lpi2c_imx->adapter.dev, "I/O Error in DMA Data Transfer\n");
608 return -ETIMEDOUT;
611 return 0;
614 static void lpi2c_dma_unmap(struct lpi2c_imx_dma *dma)
616 struct dma_chan *chan = dma->dma_data_dir == DMA_FROM_DEVICE
617 ? dma->chan_rx : dma->chan_tx;
619 dma_unmap_single(chan->device->dev, dma->dma_addr,
620 dma->dma_len, dma->dma_data_dir);
622 dma->dma_data_dir = DMA_NONE;
625 static void lpi2c_cleanup_rx_cmd_dma(struct lpi2c_imx_dma *dma)
627 dmaengine_terminate_sync(dma->chan_tx);
628 dma_unmap_single(dma->chan_tx->device->dev, dma->dma_tx_addr,
629 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
632 static void lpi2c_cleanup_dma(struct lpi2c_imx_dma *dma)
634 if (dma->dma_data_dir == DMA_FROM_DEVICE)
635 dmaengine_terminate_sync(dma->chan_rx);
636 else if (dma->dma_data_dir == DMA_TO_DEVICE)
637 dmaengine_terminate_sync(dma->chan_tx);
639 lpi2c_dma_unmap(dma);
642 static void lpi2c_dma_callback(void *data)
644 struct lpi2c_imx_struct *lpi2c_imx = (struct lpi2c_imx_struct *)data;
646 complete(&lpi2c_imx->complete);
649 static int lpi2c_dma_rx_cmd_submit(struct lpi2c_imx_struct *lpi2c_imx)
651 struct dma_async_tx_descriptor *rx_cmd_desc;
652 struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
653 struct dma_chan *txchan = dma->chan_tx;
654 dma_cookie_t cookie;
656 dma->dma_tx_addr = dma_map_single(txchan->device->dev,
657 dma->rx_cmd_buf, dma->rx_cmd_buf_len,
658 DMA_TO_DEVICE);
659 if (dma_mapping_error(txchan->device->dev, dma->dma_tx_addr)) {
660 dev_err(&lpi2c_imx->adapter.dev, "DMA map failed, use pio\n");
661 return -EINVAL;
664 rx_cmd_desc = dmaengine_prep_slave_single(txchan, dma->dma_tx_addr,
665 dma->rx_cmd_buf_len, DMA_MEM_TO_DEV,
666 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
667 if (!rx_cmd_desc) {
668 dev_err(&lpi2c_imx->adapter.dev, "DMA prep slave sg failed, use pio\n");
669 goto desc_prepare_err_exit;
672 cookie = dmaengine_submit(rx_cmd_desc);
673 if (dma_submit_error(cookie)) {
674 dev_err(&lpi2c_imx->adapter.dev, "submitting DMA failed, use pio\n");
675 goto submit_err_exit;
678 dma_async_issue_pending(txchan);
680 return 0;
682 desc_prepare_err_exit:
683 dma_unmap_single(txchan->device->dev, dma->dma_tx_addr,
684 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
685 return -EINVAL;
687 submit_err_exit:
688 dma_unmap_single(txchan->device->dev, dma->dma_tx_addr,
689 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
690 dmaengine_desc_free(rx_cmd_desc);
691 return -EINVAL;
694 static int lpi2c_dma_submit(struct lpi2c_imx_struct *lpi2c_imx)
696 struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
697 struct dma_async_tx_descriptor *desc;
698 struct dma_chan *chan;
699 dma_cookie_t cookie;
701 if (dma->dma_msg_flag & I2C_M_RD) {
702 chan = dma->chan_rx;
703 dma->dma_data_dir = DMA_FROM_DEVICE;
704 dma->dma_transfer_dir = DMA_DEV_TO_MEM;
705 } else {
706 chan = dma->chan_tx;
707 dma->dma_data_dir = DMA_TO_DEVICE;
708 dma->dma_transfer_dir = DMA_MEM_TO_DEV;
711 dma->dma_addr = dma_map_single(chan->device->dev,
712 dma->dma_buf, dma->dma_len, dma->dma_data_dir);
713 if (dma_mapping_error(chan->device->dev, dma->dma_addr)) {
714 dev_err(&lpi2c_imx->adapter.dev, "DMA map failed, use pio\n");
715 return -EINVAL;
718 desc = dmaengine_prep_slave_single(chan, dma->dma_addr,
719 dma->dma_len, dma->dma_transfer_dir,
720 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
721 if (!desc) {
722 dev_err(&lpi2c_imx->adapter.dev, "DMA prep slave sg failed, use pio\n");
723 goto desc_prepare_err_exit;
726 reinit_completion(&lpi2c_imx->complete);
727 desc->callback = lpi2c_dma_callback;
728 desc->callback_param = lpi2c_imx;
730 cookie = dmaengine_submit(desc);
731 if (dma_submit_error(cookie)) {
732 dev_err(&lpi2c_imx->adapter.dev, "submitting DMA failed, use pio\n");
733 goto submit_err_exit;
736 /* Can't switch to PIO mode when DMA have started transfer */
737 dma->using_pio_mode = false;
739 dma_async_issue_pending(chan);
741 return 0;
743 desc_prepare_err_exit:
744 lpi2c_dma_unmap(dma);
745 return -EINVAL;
747 submit_err_exit:
748 lpi2c_dma_unmap(dma);
749 dmaengine_desc_free(desc);
750 return -EINVAL;
753 static int lpi2c_imx_find_max_burst_num(unsigned int fifosize, unsigned int len)
755 unsigned int i;
757 for (i = fifosize / 2; i > 0; i--)
758 if (!(len % i))
759 break;
761 return i;
765 * For a highest DMA efficiency, tx/rx burst number should be calculated according
766 * to the FIFO depth.
768 static void lpi2c_imx_dma_burst_num_calculate(struct lpi2c_imx_struct *lpi2c_imx)
770 struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
771 unsigned int cmd_num;
773 if (dma->dma_msg_flag & I2C_M_RD) {
775 * One RX cmd word can trigger DMA receive no more than 256 bytes.
776 * The number of RX cmd words should be calculated based on the data
777 * length.
779 cmd_num = DIV_ROUND_UP(dma->dma_len, CHUNK_DATA);
780 dma->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
781 cmd_num);
782 dma->rx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->rxfifosize,
783 dma->dma_len);
784 } else {
785 dma->tx_burst_num = lpi2c_imx_find_max_burst_num(lpi2c_imx->txfifosize,
786 dma->dma_len);
790 static int lpi2c_dma_config(struct lpi2c_imx_struct *lpi2c_imx)
792 struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
793 struct dma_slave_config rx = {}, tx = {};
794 int ret;
796 lpi2c_imx_dma_burst_num_calculate(lpi2c_imx);
798 if (dma->dma_msg_flag & I2C_M_RD) {
799 tx.dst_addr = dma->phy_addr + LPI2C_MTDR;
800 tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
801 tx.dst_maxburst = dma->tx_burst_num;
802 tx.direction = DMA_MEM_TO_DEV;
803 ret = dmaengine_slave_config(dma->chan_tx, &tx);
804 if (ret < 0)
805 return ret;
807 rx.src_addr = dma->phy_addr + LPI2C_MRDR;
808 rx.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
809 rx.src_maxburst = dma->rx_burst_num;
810 rx.direction = DMA_DEV_TO_MEM;
811 ret = dmaengine_slave_config(dma->chan_rx, &rx);
812 if (ret < 0)
813 return ret;
814 } else {
815 tx.dst_addr = dma->phy_addr + LPI2C_MTDR;
816 tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
817 tx.dst_maxburst = dma->tx_burst_num;
818 tx.direction = DMA_MEM_TO_DEV;
819 ret = dmaengine_slave_config(dma->chan_tx, &tx);
820 if (ret < 0)
821 return ret;
824 return 0;
827 static void lpi2c_dma_enable(struct lpi2c_imx_struct *lpi2c_imx)
829 struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
831 * TX interrupt will be triggered when the number of words in
832 * the transmit FIFO is equal or less than TX watermark.
833 * RX interrupt will be triggered when the number of words in
834 * the receive FIFO is greater than RX watermark.
835 * In order to trigger the DMA interrupt, TX watermark should be
836 * set equal to the DMA TX burst number but RX watermark should
837 * be set less than the DMA RX burst number.
839 if (dma->dma_msg_flag & I2C_M_RD) {
840 /* Set I2C TX/RX watermark */
841 writel(dma->tx_burst_num | (dma->rx_burst_num - 1) << 16,
842 lpi2c_imx->base + LPI2C_MFCR);
843 /* Enable I2C DMA TX/RX function */
844 writel(MDER_TDDE | MDER_RDDE, lpi2c_imx->base + LPI2C_MDER);
845 } else {
846 /* Set I2C TX watermark */
847 writel(dma->tx_burst_num, lpi2c_imx->base + LPI2C_MFCR);
848 /* Enable I2C DMA TX function */
849 writel(MDER_TDDE, lpi2c_imx->base + LPI2C_MDER);
852 /* Enable NACK detected */
853 lpi2c_imx_intctrl(lpi2c_imx, MIER_NDIE);
857 * When lpi2c is in TX DMA mode we can use one DMA TX channel to write
858 * data word into TXFIFO, but in RX DMA mode it is different.
860 * The LPI2C MTDR register is a command data and transmit data register.
861 * Bits 8-10 are the command data field and Bits 0-7 are the transmit
862 * data field. When the LPI2C master needs to read data, the number of
863 * bytes to read should be set in the command field and RECV_DATA should
864 * be set into the command data field to receive (DATA[7:0] + 1) bytes.
865 * The recv data command word is made of RECV_DATA in the command data
866 * field and the number of bytes to read in transmit data field. When the
867 * length of data to be read exceeds 256 bytes, recv data command word
868 * needs to be written to TXFIFO multiple times.
870 * So when in RX DMA mode, the TX channel also must to be configured to
871 * send RX command words and the RX command word must be set in advance
872 * before transmitting.
874 static int lpi2c_imx_dma_xfer(struct lpi2c_imx_struct *lpi2c_imx,
875 struct i2c_msg *msg)
877 struct lpi2c_imx_dma *dma = lpi2c_imx->dma;
878 int ret;
880 /* When DMA mode fails before transferring, CPU mode can be used. */
881 dma->using_pio_mode = true;
883 dma->dma_len = msg->len;
884 dma->dma_msg_flag = msg->flags;
885 dma->dma_buf = i2c_get_dma_safe_msg_buf(msg, I2C_DMA_THRESHOLD);
886 if (!dma->dma_buf)
887 return -ENOMEM;
889 ret = lpi2c_dma_config(lpi2c_imx);
890 if (ret) {
891 dev_err(&lpi2c_imx->adapter.dev, "Failed to configure DMA (%d)\n", ret);
892 goto disable_dma;
895 lpi2c_dma_enable(lpi2c_imx);
897 ret = lpi2c_dma_submit(lpi2c_imx);
898 if (ret) {
899 dev_err(&lpi2c_imx->adapter.dev, "DMA submission failed (%d)\n", ret);
900 goto disable_dma;
903 if (dma->dma_msg_flag & I2C_M_RD) {
904 ret = lpi2c_imx_alloc_rx_cmd_buf(lpi2c_imx);
905 if (ret)
906 goto disable_cleanup_data_dma;
908 ret = lpi2c_dma_rx_cmd_submit(lpi2c_imx);
909 if (ret)
910 goto disable_cleanup_data_dma;
913 ret = lpi2c_imx_dma_msg_complete(lpi2c_imx);
914 if (ret)
915 goto disable_cleanup_all_dma;
917 /* When encountering NACK in transfer, clean up all DMA transfers */
918 if ((readl(lpi2c_imx->base + LPI2C_MSR) & MSR_NDF) && !ret) {
919 ret = -EIO;
920 goto disable_cleanup_all_dma;
923 if (dma->dma_msg_flag & I2C_M_RD)
924 dma_unmap_single(dma->chan_tx->device->dev, dma->dma_tx_addr,
925 dma->rx_cmd_buf_len, DMA_TO_DEVICE);
926 lpi2c_dma_unmap(dma);
928 goto disable_dma;
930 disable_cleanup_all_dma:
931 if (dma->dma_msg_flag & I2C_M_RD)
932 lpi2c_cleanup_rx_cmd_dma(dma);
933 disable_cleanup_data_dma:
934 lpi2c_cleanup_dma(dma);
935 disable_dma:
936 /* Disable I2C DMA function */
937 writel(0, lpi2c_imx->base + LPI2C_MDER);
939 if (dma->dma_msg_flag & I2C_M_RD)
940 kfree(dma->rx_cmd_buf);
942 if (ret)
943 i2c_put_dma_safe_msg_buf(dma->dma_buf, msg, false);
944 else
945 i2c_put_dma_safe_msg_buf(dma->dma_buf, msg, true);
947 return ret;
950 static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
951 struct i2c_msg *msgs, int num)
953 struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(adapter);
954 unsigned int temp;
955 int i, result;
957 result = lpi2c_imx_master_enable(lpi2c_imx);
958 if (result)
959 return result;
961 for (i = 0; i < num; i++) {
962 result = lpi2c_imx_start(lpi2c_imx, &msgs[i]);
963 if (result)
964 goto disable;
966 /* quick smbus */
967 if (num == 1 && msgs[0].len == 0)
968 goto stop;
970 lpi2c_imx->rx_buf = NULL;
971 lpi2c_imx->tx_buf = NULL;
972 lpi2c_imx->delivered = 0;
973 lpi2c_imx->msglen = msgs[i].len;
974 init_completion(&lpi2c_imx->complete);
976 if (is_use_dma(lpi2c_imx, &msgs[i])) {
977 result = lpi2c_imx_dma_xfer(lpi2c_imx, &msgs[i]);
978 if (result && lpi2c_imx->dma->using_pio_mode)
979 result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
980 } else {
981 result = lpi2c_imx_pio_xfer(lpi2c_imx, &msgs[i]);
984 if (result)
985 goto stop;
987 if (!(msgs[i].flags & I2C_M_RD)) {
988 result = lpi2c_imx_txfifo_empty(lpi2c_imx);
989 if (result)
990 goto stop;
994 stop:
995 lpi2c_imx_stop(lpi2c_imx);
997 temp = readl(lpi2c_imx->base + LPI2C_MSR);
998 if ((temp & MSR_NDF) && !result)
999 result = -EIO;
1001 disable:
1002 lpi2c_imx_master_disable(lpi2c_imx);
1004 dev_dbg(&lpi2c_imx->adapter.dev, "<%s> exit with: %s: %d\n", __func__,
1005 (result < 0) ? "error" : "success msg",
1006 (result < 0) ? result : num);
1008 return (result < 0) ? result : num;
1011 static irqreturn_t lpi2c_imx_target_isr(struct lpi2c_imx_struct *lpi2c_imx,
1012 u32 ssr, u32 sier_filter)
1014 u8 value;
1015 u32 sasr;
1017 /* Arbitration lost */
1018 if (sier_filter & SSR_BEF) {
1019 writel(0, lpi2c_imx->base + LPI2C_SIER);
1020 return IRQ_HANDLED;
1023 /* Address detected */
1024 if (sier_filter & SSR_AVF) {
1025 sasr = readl(lpi2c_imx->base + LPI2C_SASR);
1026 if (SASR_READ_REQ & sasr) {
1027 /* Read request */
1028 i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_READ_REQUESTED, &value);
1029 writel(value, lpi2c_imx->base + LPI2C_STDR);
1030 goto ret;
1031 } else {
1032 /* Write request */
1033 i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_WRITE_REQUESTED, &value);
1037 if (sier_filter & SSR_SDF)
1038 /* STOP */
1039 i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_STOP, &value);
1041 if (sier_filter & SSR_TDF) {
1042 /* Target send data */
1043 i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_READ_PROCESSED, &value);
1044 writel(value, lpi2c_imx->base + LPI2C_STDR);
1047 if (sier_filter & SSR_RDF) {
1048 /* Target receive data */
1049 value = readl(lpi2c_imx->base + LPI2C_SRDR);
1050 i2c_slave_event(lpi2c_imx->target, I2C_SLAVE_WRITE_RECEIVED, &value);
1053 ret:
1054 /* Clear SSR */
1055 writel(ssr & SSR_CLEAR_BITS, lpi2c_imx->base + LPI2C_SSR);
1056 return IRQ_HANDLED;
1059 static irqreturn_t lpi2c_imx_master_isr(struct lpi2c_imx_struct *lpi2c_imx)
1061 unsigned int enabled;
1062 unsigned int temp;
1064 enabled = readl(lpi2c_imx->base + LPI2C_MIER);
1066 lpi2c_imx_intctrl(lpi2c_imx, 0);
1067 temp = readl(lpi2c_imx->base + LPI2C_MSR);
1068 temp &= enabled;
1070 if (temp & MSR_NDF)
1071 complete(&lpi2c_imx->complete);
1072 else if (temp & MSR_RDF)
1073 lpi2c_imx_read_rxfifo(lpi2c_imx);
1074 else if (temp & MSR_TDF)
1075 lpi2c_imx_write_txfifo(lpi2c_imx);
1077 return IRQ_HANDLED;
1080 static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
1082 struct lpi2c_imx_struct *lpi2c_imx = dev_id;
1084 if (lpi2c_imx->target) {
1085 u32 scr = readl(lpi2c_imx->base + LPI2C_SCR);
1086 u32 ssr = readl(lpi2c_imx->base + LPI2C_SSR);
1087 u32 sier_filter = ssr & readl(lpi2c_imx->base + LPI2C_SIER);
1090 * The target is enabled and an interrupt has been triggered.
1091 * Enter the target's irq handler.
1093 if ((scr & SCR_SEN) && sier_filter)
1094 return lpi2c_imx_target_isr(lpi2c_imx, ssr, sier_filter);
1098 * Otherwise the interrupt has been triggered by the master.
1099 * Enter the master's irq handler.
1101 return lpi2c_imx_master_isr(lpi2c_imx);
1104 static void lpi2c_imx_target_init(struct lpi2c_imx_struct *lpi2c_imx)
1106 u32 temp;
1108 /* reset target module */
1109 writel(SCR_RST, lpi2c_imx->base + LPI2C_SCR);
1110 writel(0, lpi2c_imx->base + LPI2C_SCR);
1112 /* Set target address */
1113 writel((lpi2c_imx->target->addr << 1), lpi2c_imx->base + LPI2C_SAMR);
1115 writel(SCFGR1_RXSTALL | SCFGR1_TXDSTALL, lpi2c_imx->base + LPI2C_SCFGR1);
1118 * set SCFGR2: FILTSDA, FILTSCL and CLKHOLD
1120 * FILTSCL/FILTSDA can eliminate signal skew. It should generally be
1121 * set to the same value and should be set >= 50ns.
1123 * CLKHOLD is only used when clock stretching is enabled, but it will
1124 * extend the clock stretching to ensure there is an additional delay
1125 * between the target driving SDA and the target releasing the SCL pin.
1127 * CLKHOLD setting is crucial for lpi2c target. When master read data
1128 * from target, if there is a delay caused by cpu idle, excessive load,
1129 * or other delays between two bytes in one message transmission, it
1130 * will cause a short interval time between the driving SDA signal and
1131 * releasing SCL signal. The lpi2c master will mistakenly think it is a stop
1132 * signal resulting in an arbitration failure. This issue can be avoided
1133 * by setting CLKHOLD.
1135 * In order to ensure lpi2c function normally when the lpi2c speed is as
1136 * low as 100kHz, CLKHOLD should be set to 3 and it is also compatible with
1137 * higher clock frequency like 400kHz and 1MHz.
1139 temp = SCFGR2_FILTSDA(2) | SCFGR2_FILTSCL(2) | SCFGR2_CLKHOLD(3);
1140 writel(temp, lpi2c_imx->base + LPI2C_SCFGR2);
1143 * Enable module:
1144 * SCR_FILTEN can enable digital filter and output delay counter for LPI2C
1145 * target mode. So SCR_FILTEN need be asserted when enable SDA/SCL FILTER
1146 * and CLKHOLD.
1148 writel(SCR_SEN | SCR_FILTEN, lpi2c_imx->base + LPI2C_SCR);
1150 /* Enable interrupt from i2c module */
1151 writel(SLAVE_INT_FLAG, lpi2c_imx->base + LPI2C_SIER);
1154 static int lpi2c_imx_register_target(struct i2c_client *client)
1156 struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(client->adapter);
1157 int ret;
1159 if (lpi2c_imx->target)
1160 return -EBUSY;
1162 lpi2c_imx->target = client;
1164 ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
1165 if (ret < 0) {
1166 dev_err(&lpi2c_imx->adapter.dev, "failed to resume i2c controller");
1167 return ret;
1170 lpi2c_imx_target_init(lpi2c_imx);
1172 return 0;
1175 static int lpi2c_imx_unregister_target(struct i2c_client *client)
1177 struct lpi2c_imx_struct *lpi2c_imx = i2c_get_adapdata(client->adapter);
1178 int ret;
1180 if (!lpi2c_imx->target)
1181 return -EINVAL;
1183 /* Reset target address. */
1184 writel(0, lpi2c_imx->base + LPI2C_SAMR);
1186 writel(SCR_RST, lpi2c_imx->base + LPI2C_SCR);
1187 writel(0, lpi2c_imx->base + LPI2C_SCR);
1189 lpi2c_imx->target = NULL;
1191 ret = pm_runtime_put_sync(lpi2c_imx->adapter.dev.parent);
1192 if (ret < 0)
1193 dev_err(&lpi2c_imx->adapter.dev, "failed to suspend i2c controller");
1195 return ret;
1198 static int lpi2c_imx_init_recovery_info(struct lpi2c_imx_struct *lpi2c_imx,
1199 struct platform_device *pdev)
1201 struct i2c_bus_recovery_info *bri = &lpi2c_imx->rinfo;
1203 bri->pinctrl = devm_pinctrl_get(&pdev->dev);
1204 if (IS_ERR(bri->pinctrl))
1205 return PTR_ERR(bri->pinctrl);
1207 lpi2c_imx->adapter.bus_recovery_info = bri;
1209 return 0;
1212 static void dma_exit(struct device *dev, struct lpi2c_imx_dma *dma)
1214 if (dma->chan_rx)
1215 dma_release_channel(dma->chan_rx);
1217 if (dma->chan_tx)
1218 dma_release_channel(dma->chan_tx);
1220 devm_kfree(dev, dma);
1223 static int lpi2c_dma_init(struct device *dev, dma_addr_t phy_addr)
1225 struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1226 struct lpi2c_imx_dma *dma;
1227 int ret;
1229 dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
1230 if (!dma)
1231 return -ENOMEM;
1233 dma->phy_addr = phy_addr;
1235 /* Prepare for TX DMA: */
1236 dma->chan_tx = dma_request_chan(dev, "tx");
1237 if (IS_ERR(dma->chan_tx)) {
1238 ret = PTR_ERR(dma->chan_tx);
1239 if (ret != -ENODEV && ret != -EPROBE_DEFER)
1240 dev_err(dev, "can't request DMA tx channel (%d)\n", ret);
1241 dma->chan_tx = NULL;
1242 goto dma_exit;
1245 /* Prepare for RX DMA: */
1246 dma->chan_rx = dma_request_chan(dev, "rx");
1247 if (IS_ERR(dma->chan_rx)) {
1248 ret = PTR_ERR(dma->chan_rx);
1249 if (ret != -ENODEV && ret != -EPROBE_DEFER)
1250 dev_err(dev, "can't request DMA rx channel (%d)\n", ret);
1251 dma->chan_rx = NULL;
1252 goto dma_exit;
1255 lpi2c_imx->can_use_dma = true;
1256 lpi2c_imx->dma = dma;
1257 return 0;
1259 dma_exit:
1260 dma_exit(dev, dma);
1261 return ret;
1264 static u32 lpi2c_imx_func(struct i2c_adapter *adapter)
1266 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
1267 I2C_FUNC_SMBUS_READ_BLOCK_DATA;
1270 static const struct i2c_algorithm lpi2c_imx_algo = {
1271 .master_xfer = lpi2c_imx_xfer,
1272 .functionality = lpi2c_imx_func,
1273 .reg_target = lpi2c_imx_register_target,
1274 .unreg_target = lpi2c_imx_unregister_target,
1277 static const struct of_device_id lpi2c_imx_of_match[] = {
1278 { .compatible = "fsl,imx7ulp-lpi2c" },
1281 MODULE_DEVICE_TABLE(of, lpi2c_imx_of_match);
1283 static int lpi2c_imx_probe(struct platform_device *pdev)
1285 struct lpi2c_imx_struct *lpi2c_imx;
1286 struct resource *res;
1287 dma_addr_t phy_addr;
1288 unsigned int temp;
1289 int irq, ret;
1291 lpi2c_imx = devm_kzalloc(&pdev->dev, sizeof(*lpi2c_imx), GFP_KERNEL);
1292 if (!lpi2c_imx)
1293 return -ENOMEM;
1295 lpi2c_imx->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
1296 if (IS_ERR(lpi2c_imx->base))
1297 return PTR_ERR(lpi2c_imx->base);
1299 irq = platform_get_irq(pdev, 0);
1300 if (irq < 0)
1301 return irq;
1303 lpi2c_imx->adapter.owner = THIS_MODULE;
1304 lpi2c_imx->adapter.algo = &lpi2c_imx_algo;
1305 lpi2c_imx->adapter.dev.parent = &pdev->dev;
1306 lpi2c_imx->adapter.dev.of_node = pdev->dev.of_node;
1307 strscpy(lpi2c_imx->adapter.name, pdev->name,
1308 sizeof(lpi2c_imx->adapter.name));
1309 phy_addr = (dma_addr_t)res->start;
1311 ret = devm_clk_bulk_get_all(&pdev->dev, &lpi2c_imx->clks);
1312 if (ret < 0)
1313 return dev_err_probe(&pdev->dev, ret, "can't get I2C peripheral clock\n");
1314 lpi2c_imx->num_clks = ret;
1316 ret = of_property_read_u32(pdev->dev.of_node,
1317 "clock-frequency", &lpi2c_imx->bitrate);
1318 if (ret)
1319 lpi2c_imx->bitrate = I2C_MAX_STANDARD_MODE_FREQ;
1321 ret = devm_request_irq(&pdev->dev, irq, lpi2c_imx_isr, IRQF_NO_SUSPEND,
1322 pdev->name, lpi2c_imx);
1323 if (ret)
1324 return dev_err_probe(&pdev->dev, ret, "can't claim irq %d\n", irq);
1326 i2c_set_adapdata(&lpi2c_imx->adapter, lpi2c_imx);
1327 platform_set_drvdata(pdev, lpi2c_imx);
1329 ret = clk_bulk_prepare_enable(lpi2c_imx->num_clks, lpi2c_imx->clks);
1330 if (ret)
1331 return ret;
1334 * Lock the parent clock rate to avoid getting parent clock upon
1335 * each transfer
1337 ret = devm_clk_rate_exclusive_get(&pdev->dev, lpi2c_imx->clks[0].clk);
1338 if (ret)
1339 return dev_err_probe(&pdev->dev, ret,
1340 "can't lock I2C peripheral clock rate\n");
1342 lpi2c_imx->rate_per = clk_get_rate(lpi2c_imx->clks[0].clk);
1343 if (!lpi2c_imx->rate_per)
1344 return dev_err_probe(&pdev->dev, -EINVAL,
1345 "can't get I2C peripheral clock rate\n");
1347 pm_runtime_set_autosuspend_delay(&pdev->dev, I2C_PM_TIMEOUT);
1348 pm_runtime_use_autosuspend(&pdev->dev);
1349 pm_runtime_get_noresume(&pdev->dev);
1350 pm_runtime_set_active(&pdev->dev);
1351 pm_runtime_enable(&pdev->dev);
1353 temp = readl(lpi2c_imx->base + LPI2C_PARAM);
1354 lpi2c_imx->txfifosize = 1 << (temp & 0x0f);
1355 lpi2c_imx->rxfifosize = 1 << ((temp >> 8) & 0x0f);
1357 /* Init optional bus recovery function */
1358 ret = lpi2c_imx_init_recovery_info(lpi2c_imx, pdev);
1359 /* Give it another chance if pinctrl used is not ready yet */
1360 if (ret == -EPROBE_DEFER)
1361 goto rpm_disable;
1363 /* Init DMA */
1364 ret = lpi2c_dma_init(&pdev->dev, phy_addr);
1365 if (ret) {
1366 if (ret == -EPROBE_DEFER)
1367 goto rpm_disable;
1368 dev_info(&pdev->dev, "use pio mode\n");
1371 ret = i2c_add_adapter(&lpi2c_imx->adapter);
1372 if (ret)
1373 goto rpm_disable;
1375 pm_runtime_mark_last_busy(&pdev->dev);
1376 pm_runtime_put_autosuspend(&pdev->dev);
1378 dev_info(&lpi2c_imx->adapter.dev, "LPI2C adapter registered\n");
1380 return 0;
1382 rpm_disable:
1383 pm_runtime_put(&pdev->dev);
1384 pm_runtime_disable(&pdev->dev);
1385 pm_runtime_dont_use_autosuspend(&pdev->dev);
1387 return ret;
1390 static void lpi2c_imx_remove(struct platform_device *pdev)
1392 struct lpi2c_imx_struct *lpi2c_imx = platform_get_drvdata(pdev);
1394 i2c_del_adapter(&lpi2c_imx->adapter);
1396 pm_runtime_disable(&pdev->dev);
1397 pm_runtime_dont_use_autosuspend(&pdev->dev);
1400 static int __maybe_unused lpi2c_runtime_suspend(struct device *dev)
1402 struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1404 clk_bulk_disable(lpi2c_imx->num_clks, lpi2c_imx->clks);
1405 pinctrl_pm_select_sleep_state(dev);
1407 return 0;
1410 static int __maybe_unused lpi2c_runtime_resume(struct device *dev)
1412 struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1413 int ret;
1415 pinctrl_pm_select_default_state(dev);
1416 ret = clk_bulk_enable(lpi2c_imx->num_clks, lpi2c_imx->clks);
1417 if (ret) {
1418 dev_err(dev, "failed to enable I2C clock, ret=%d\n", ret);
1419 return ret;
1422 return 0;
1425 static int __maybe_unused lpi2c_suspend_noirq(struct device *dev)
1427 return pm_runtime_force_suspend(dev);
1430 static int __maybe_unused lpi2c_resume_noirq(struct device *dev)
1432 struct lpi2c_imx_struct *lpi2c_imx = dev_get_drvdata(dev);
1433 int ret;
1435 ret = pm_runtime_force_resume(dev);
1436 if (ret)
1437 return ret;
1440 * If the I2C module powers down during system suspend,
1441 * the register values will be lost. Therefore, reinitialize
1442 * the target when the system resumes.
1444 if (lpi2c_imx->target)
1445 lpi2c_imx_target_init(lpi2c_imx);
1447 return 0;
1450 static int lpi2c_suspend(struct device *dev)
1453 * Some I2C devices may need the I2C controller to remain active
1454 * during resume_noirq() or suspend_noirq(). If the controller is
1455 * autosuspended, there is no way to wake it up once runtime PM is
1456 * disabled (in suspend_late()).
1458 * During system resume, the I2C controller will be available only
1459 * after runtime PM is re-enabled (in resume_early()). However, this
1460 * may be too late for some devices.
1462 * Wake up the controller in the suspend() callback while runtime PM
1463 * is still enabled. The I2C controller will remain available until
1464 * the suspend_noirq() callback (pm_runtime_force_suspend()) is
1465 * called. During resume, the I2C controller can be restored by the
1466 * resume_noirq() callback (pm_runtime_force_resume()).
1468 * Finally, the resume() callback re-enables autosuspend, ensuring
1469 * the I2C controller remains available until the system enters
1470 * suspend_noirq() and from resume_noirq().
1472 return pm_runtime_resume_and_get(dev);
1475 static int lpi2c_resume(struct device *dev)
1477 pm_runtime_mark_last_busy(dev);
1478 pm_runtime_put_autosuspend(dev);
1480 return 0;
1483 static const struct dev_pm_ops lpi2c_pm_ops = {
1484 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(lpi2c_suspend_noirq,
1485 lpi2c_resume_noirq)
1486 SYSTEM_SLEEP_PM_OPS(lpi2c_suspend, lpi2c_resume)
1487 SET_RUNTIME_PM_OPS(lpi2c_runtime_suspend,
1488 lpi2c_runtime_resume, NULL)
1491 static struct platform_driver lpi2c_imx_driver = {
1492 .probe = lpi2c_imx_probe,
1493 .remove = lpi2c_imx_remove,
1494 .driver = {
1495 .name = DRIVER_NAME,
1496 .of_match_table = lpi2c_imx_of_match,
1497 .pm = &lpi2c_pm_ops,
1501 module_platform_driver(lpi2c_imx_driver);
1503 MODULE_AUTHOR("Gao Pan <pandy.gao@nxp.com>");
1504 MODULE_DESCRIPTION("I2C adapter driver for LPI2C bus");
1505 MODULE_LICENSE("GPL");