treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / i2c / busses / i2c-xiic.c
blobd8d49f1814c7e10c4f7f9b401a249242ac2bde8f
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * i2c-xiic.c
4 * Copyright (c) 2002-2007 Xilinx Inc.
5 * Copyright (c) 2009-2010 Intel Corporation
7 * This code was implemented by Mocean Laboratories AB when porting linux
8 * to the automotive development board Russellville. The copyright holder
9 * as seen in the header is Intel corporation.
10 * Mocean Laboratories forked off the GNU/Linux platform work into a
11 * separate company called Pelagicore AB, which committed the code to the
12 * kernel.
15 /* Supports:
16 * Xilinx IIC
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/errno.h>
21 #include <linux/err.h>
22 #include <linux/delay.h>
23 #include <linux/platform_device.h>
24 #include <linux/i2c.h>
25 #include <linux/interrupt.h>
26 #include <linux/wait.h>
27 #include <linux/platform_data/i2c-xiic.h>
28 #include <linux/io.h>
29 #include <linux/slab.h>
30 #include <linux/of.h>
31 #include <linux/clk.h>
32 #include <linux/pm_runtime.h>
34 #define DRIVER_NAME "xiic-i2c"
36 enum xilinx_i2c_state {
37 STATE_DONE,
38 STATE_ERROR,
39 STATE_START
42 enum xiic_endian {
43 LITTLE,
44 BIG
47 /**
48 * struct xiic_i2c - Internal representation of the XIIC I2C bus
49 * @dev: Pointer to device structure
50 * @base: Memory base of the HW registers
51 * @wait: Wait queue for callers
52 * @adap: Kernel adapter representation
53 * @tx_msg: Messages from above to be sent
54 * @lock: Mutual exclusion
55 * @tx_pos: Current pos in TX message
56 * @nmsgs: Number of messages in tx_msg
57 * @state: See STATE_
58 * @rx_msg: Current RX message
59 * @rx_pos: Position within current RX message
60 * @endianness: big/little-endian byte order
61 * @clk: Pointer to AXI4-lite input clock
63 struct xiic_i2c {
64 struct device *dev;
65 void __iomem *base;
66 wait_queue_head_t wait;
67 struct i2c_adapter adap;
68 struct i2c_msg *tx_msg;
69 struct mutex lock;
70 unsigned int tx_pos;
71 unsigned int nmsgs;
72 enum xilinx_i2c_state state;
73 struct i2c_msg *rx_msg;
74 int rx_pos;
75 enum xiic_endian endianness;
76 struct clk *clk;
80 #define XIIC_MSB_OFFSET 0
81 #define XIIC_REG_OFFSET (0x100+XIIC_MSB_OFFSET)
84 * Register offsets in bytes from RegisterBase. Three is added to the
85 * base offset to access LSB (IBM style) of the word
87 #define XIIC_CR_REG_OFFSET (0x00+XIIC_REG_OFFSET) /* Control Register */
88 #define XIIC_SR_REG_OFFSET (0x04+XIIC_REG_OFFSET) /* Status Register */
89 #define XIIC_DTR_REG_OFFSET (0x08+XIIC_REG_OFFSET) /* Data Tx Register */
90 #define XIIC_DRR_REG_OFFSET (0x0C+XIIC_REG_OFFSET) /* Data Rx Register */
91 #define XIIC_ADR_REG_OFFSET (0x10+XIIC_REG_OFFSET) /* Address Register */
92 #define XIIC_TFO_REG_OFFSET (0x14+XIIC_REG_OFFSET) /* Tx FIFO Occupancy */
93 #define XIIC_RFO_REG_OFFSET (0x18+XIIC_REG_OFFSET) /* Rx FIFO Occupancy */
94 #define XIIC_TBA_REG_OFFSET (0x1C+XIIC_REG_OFFSET) /* 10 Bit Address reg */
95 #define XIIC_RFD_REG_OFFSET (0x20+XIIC_REG_OFFSET) /* Rx FIFO Depth reg */
96 #define XIIC_GPO_REG_OFFSET (0x24+XIIC_REG_OFFSET) /* Output Register */
98 /* Control Register masks */
99 #define XIIC_CR_ENABLE_DEVICE_MASK 0x01 /* Device enable = 1 */
100 #define XIIC_CR_TX_FIFO_RESET_MASK 0x02 /* Transmit FIFO reset=1 */
101 #define XIIC_CR_MSMS_MASK 0x04 /* Master starts Txing=1 */
102 #define XIIC_CR_DIR_IS_TX_MASK 0x08 /* Dir of tx. Txing=1 */
103 #define XIIC_CR_NO_ACK_MASK 0x10 /* Tx Ack. NO ack = 1 */
104 #define XIIC_CR_REPEATED_START_MASK 0x20 /* Repeated start = 1 */
105 #define XIIC_CR_GENERAL_CALL_MASK 0x40 /* Gen Call enabled = 1 */
107 /* Status Register masks */
108 #define XIIC_SR_GEN_CALL_MASK 0x01 /* 1=a mstr issued a GC */
109 #define XIIC_SR_ADDR_AS_SLAVE_MASK 0x02 /* 1=when addr as slave */
110 #define XIIC_SR_BUS_BUSY_MASK 0x04 /* 1 = bus is busy */
111 #define XIIC_SR_MSTR_RDING_SLAVE_MASK 0x08 /* 1=Dir: mstr <-- slave */
112 #define XIIC_SR_TX_FIFO_FULL_MASK 0x10 /* 1 = Tx FIFO full */
113 #define XIIC_SR_RX_FIFO_FULL_MASK 0x20 /* 1 = Rx FIFO full */
114 #define XIIC_SR_RX_FIFO_EMPTY_MASK 0x40 /* 1 = Rx FIFO empty */
115 #define XIIC_SR_TX_FIFO_EMPTY_MASK 0x80 /* 1 = Tx FIFO empty */
117 /* Interrupt Status Register masks Interrupt occurs when... */
118 #define XIIC_INTR_ARB_LOST_MASK 0x01 /* 1 = arbitration lost */
119 #define XIIC_INTR_TX_ERROR_MASK 0x02 /* 1=Tx error/msg complete */
120 #define XIIC_INTR_TX_EMPTY_MASK 0x04 /* 1 = Tx FIFO/reg empty */
121 #define XIIC_INTR_RX_FULL_MASK 0x08 /* 1=Rx FIFO/reg=OCY level */
122 #define XIIC_INTR_BNB_MASK 0x10 /* 1 = Bus not busy */
123 #define XIIC_INTR_AAS_MASK 0x20 /* 1 = when addr as slave */
124 #define XIIC_INTR_NAAS_MASK 0x40 /* 1 = not addr as slave */
125 #define XIIC_INTR_TX_HALF_MASK 0x80 /* 1 = TX FIFO half empty */
127 /* The following constants specify the depth of the FIFOs */
128 #define IIC_RX_FIFO_DEPTH 16 /* Rx fifo capacity */
129 #define IIC_TX_FIFO_DEPTH 16 /* Tx fifo capacity */
131 /* The following constants specify groups of interrupts that are typically
132 * enabled or disables at the same time
134 #define XIIC_TX_INTERRUPTS \
135 (XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)
137 #define XIIC_TX_RX_INTERRUPTS (XIIC_INTR_RX_FULL_MASK | XIIC_TX_INTERRUPTS)
140 * Tx Fifo upper bit masks.
142 #define XIIC_TX_DYN_START_MASK 0x0100 /* 1 = Set dynamic start */
143 #define XIIC_TX_DYN_STOP_MASK 0x0200 /* 1 = Set dynamic stop */
146 * The following constants define the register offsets for the Interrupt
147 * registers. There are some holes in the memory map for reserved addresses
148 * to allow other registers to be added and still match the memory map of the
149 * interrupt controller registers
151 #define XIIC_DGIER_OFFSET 0x1C /* Device Global Interrupt Enable Register */
152 #define XIIC_IISR_OFFSET 0x20 /* Interrupt Status Register */
153 #define XIIC_IIER_OFFSET 0x28 /* Interrupt Enable Register */
154 #define XIIC_RESETR_OFFSET 0x40 /* Reset Register */
156 #define XIIC_RESET_MASK 0xAUL
158 #define XIIC_PM_TIMEOUT 1000 /* ms */
160 * The following constant is used for the device global interrupt enable
161 * register, to enable all interrupts for the device, this is the only bit
162 * in the register
164 #define XIIC_GINTR_ENABLE_MASK 0x80000000UL
166 #define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos)
167 #define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos)
169 static void xiic_start_xfer(struct xiic_i2c *i2c);
170 static void __xiic_start_xfer(struct xiic_i2c *i2c);
173 * For the register read and write functions, a little-endian and big-endian
174 * version are necessary. Endianness is detected during the probe function.
175 * Only the least significant byte [doublet] of the register are ever
176 * accessed. This requires an offset of 3 [2] from the base address for
177 * big-endian systems.
180 static inline void xiic_setreg8(struct xiic_i2c *i2c, int reg, u8 value)
182 if (i2c->endianness == LITTLE)
183 iowrite8(value, i2c->base + reg);
184 else
185 iowrite8(value, i2c->base + reg + 3);
188 static inline u8 xiic_getreg8(struct xiic_i2c *i2c, int reg)
190 u8 ret;
192 if (i2c->endianness == LITTLE)
193 ret = ioread8(i2c->base + reg);
194 else
195 ret = ioread8(i2c->base + reg + 3);
196 return ret;
199 static inline void xiic_setreg16(struct xiic_i2c *i2c, int reg, u16 value)
201 if (i2c->endianness == LITTLE)
202 iowrite16(value, i2c->base + reg);
203 else
204 iowrite16be(value, i2c->base + reg + 2);
207 static inline void xiic_setreg32(struct xiic_i2c *i2c, int reg, int value)
209 if (i2c->endianness == LITTLE)
210 iowrite32(value, i2c->base + reg);
211 else
212 iowrite32be(value, i2c->base + reg);
215 static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg)
217 u32 ret;
219 if (i2c->endianness == LITTLE)
220 ret = ioread32(i2c->base + reg);
221 else
222 ret = ioread32be(i2c->base + reg);
223 return ret;
226 static inline void xiic_irq_dis(struct xiic_i2c *i2c, u32 mask)
228 u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
229 xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier & ~mask);
232 static inline void xiic_irq_en(struct xiic_i2c *i2c, u32 mask)
234 u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
235 xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier | mask);
238 static inline void xiic_irq_clr(struct xiic_i2c *i2c, u32 mask)
240 u32 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
241 xiic_setreg32(i2c, XIIC_IISR_OFFSET, isr & mask);
244 static inline void xiic_irq_clr_en(struct xiic_i2c *i2c, u32 mask)
246 xiic_irq_clr(i2c, mask);
247 xiic_irq_en(i2c, mask);
250 static void xiic_clear_rx_fifo(struct xiic_i2c *i2c)
252 u8 sr;
253 for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
254 !(sr & XIIC_SR_RX_FIFO_EMPTY_MASK);
255 sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET))
256 xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
259 static void xiic_reinit(struct xiic_i2c *i2c)
261 xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
263 /* Set receive Fifo depth to maximum (zero based). */
264 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, IIC_RX_FIFO_DEPTH - 1);
266 /* Reset Tx Fifo. */
267 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK);
269 /* Enable IIC Device, remove Tx Fifo reset & disable general call. */
270 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK);
272 /* make sure RX fifo is empty */
273 xiic_clear_rx_fifo(i2c);
275 /* Enable interrupts */
276 xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
278 xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK);
281 static void xiic_deinit(struct xiic_i2c *i2c)
283 u8 cr;
285 xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
287 /* Disable IIC Device. */
288 cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
289 xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & ~XIIC_CR_ENABLE_DEVICE_MASK);
292 static void xiic_read_rx(struct xiic_i2c *i2c)
294 u8 bytes_in_fifo;
295 int i;
297 bytes_in_fifo = xiic_getreg8(i2c, XIIC_RFO_REG_OFFSET) + 1;
299 dev_dbg(i2c->adap.dev.parent,
300 "%s entry, bytes in fifo: %d, msg: %d, SR: 0x%x, CR: 0x%x\n",
301 __func__, bytes_in_fifo, xiic_rx_space(i2c),
302 xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
303 xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
305 if (bytes_in_fifo > xiic_rx_space(i2c))
306 bytes_in_fifo = xiic_rx_space(i2c);
308 for (i = 0; i < bytes_in_fifo; i++)
309 i2c->rx_msg->buf[i2c->rx_pos++] =
310 xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
312 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET,
313 (xiic_rx_space(i2c) > IIC_RX_FIFO_DEPTH) ?
314 IIC_RX_FIFO_DEPTH - 1 : xiic_rx_space(i2c) - 1);
317 static int xiic_tx_fifo_space(struct xiic_i2c *i2c)
319 /* return the actual space left in the FIFO */
320 return IIC_TX_FIFO_DEPTH - xiic_getreg8(i2c, XIIC_TFO_REG_OFFSET) - 1;
323 static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
325 u8 fifo_space = xiic_tx_fifo_space(i2c);
326 int len = xiic_tx_space(i2c);
328 len = (len > fifo_space) ? fifo_space : len;
330 dev_dbg(i2c->adap.dev.parent, "%s entry, len: %d, fifo space: %d\n",
331 __func__, len, fifo_space);
333 while (len--) {
334 u16 data = i2c->tx_msg->buf[i2c->tx_pos++];
335 if ((xiic_tx_space(i2c) == 0) && (i2c->nmsgs == 1)) {
336 /* last message in transfer -> STOP */
337 data |= XIIC_TX_DYN_STOP_MASK;
338 dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
340 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
344 static void xiic_wakeup(struct xiic_i2c *i2c, int code)
346 i2c->tx_msg = NULL;
347 i2c->rx_msg = NULL;
348 i2c->nmsgs = 0;
349 i2c->state = code;
350 wake_up(&i2c->wait);
353 static irqreturn_t xiic_process(int irq, void *dev_id)
355 struct xiic_i2c *i2c = dev_id;
356 u32 pend, isr, ier;
357 u32 clr = 0;
359 /* Get the interrupt Status from the IPIF. There is no clearing of
360 * interrupts in the IPIF. Interrupts must be cleared at the source.
361 * To find which interrupts are pending; AND interrupts pending with
362 * interrupts masked.
364 mutex_lock(&i2c->lock);
365 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
366 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
367 pend = isr & ier;
369 dev_dbg(i2c->adap.dev.parent, "%s: IER: 0x%x, ISR: 0x%x, pend: 0x%x\n",
370 __func__, ier, isr, pend);
371 dev_dbg(i2c->adap.dev.parent, "%s: SR: 0x%x, msg: %p, nmsgs: %d\n",
372 __func__, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
373 i2c->tx_msg, i2c->nmsgs);
376 /* Service requesting interrupt */
377 if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
378 ((pend & XIIC_INTR_TX_ERROR_MASK) &&
379 !(pend & XIIC_INTR_RX_FULL_MASK))) {
380 /* bus arbritration lost, or...
381 * Transmit error _OR_ RX completed
382 * if this happens when RX_FULL is not set
383 * this is probably a TX error
386 dev_dbg(i2c->adap.dev.parent, "%s error\n", __func__);
388 /* dynamic mode seem to suffer from problems if we just flushes
389 * fifos and the next message is a TX with len 0 (only addr)
390 * reset the IP instead of just flush fifos
392 xiic_reinit(i2c);
394 if (i2c->rx_msg)
395 xiic_wakeup(i2c, STATE_ERROR);
396 if (i2c->tx_msg)
397 xiic_wakeup(i2c, STATE_ERROR);
399 if (pend & XIIC_INTR_RX_FULL_MASK) {
400 /* Receive register/FIFO is full */
402 clr |= XIIC_INTR_RX_FULL_MASK;
403 if (!i2c->rx_msg) {
404 dev_dbg(i2c->adap.dev.parent,
405 "%s unexpected RX IRQ\n", __func__);
406 xiic_clear_rx_fifo(i2c);
407 goto out;
410 xiic_read_rx(i2c);
411 if (xiic_rx_space(i2c) == 0) {
412 /* this is the last part of the message */
413 i2c->rx_msg = NULL;
415 /* also clear TX error if there (RX complete) */
416 clr |= (isr & XIIC_INTR_TX_ERROR_MASK);
418 dev_dbg(i2c->adap.dev.parent,
419 "%s end of message, nmsgs: %d\n",
420 __func__, i2c->nmsgs);
422 /* send next message if this wasn't the last,
423 * otherwise the transfer will be finialise when
424 * receiving the bus not busy interrupt
426 if (i2c->nmsgs > 1) {
427 i2c->nmsgs--;
428 i2c->tx_msg++;
429 dev_dbg(i2c->adap.dev.parent,
430 "%s will start next...\n", __func__);
432 __xiic_start_xfer(i2c);
436 if (pend & XIIC_INTR_BNB_MASK) {
437 /* IIC bus has transitioned to not busy */
438 clr |= XIIC_INTR_BNB_MASK;
440 /* The bus is not busy, disable BusNotBusy interrupt */
441 xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK);
443 if (!i2c->tx_msg)
444 goto out;
446 if ((i2c->nmsgs == 1) && !i2c->rx_msg &&
447 xiic_tx_space(i2c) == 0)
448 xiic_wakeup(i2c, STATE_DONE);
449 else
450 xiic_wakeup(i2c, STATE_ERROR);
452 if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
453 /* Transmit register/FIFO is empty or ½ empty */
455 clr |= (pend &
456 (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK));
458 if (!i2c->tx_msg) {
459 dev_dbg(i2c->adap.dev.parent,
460 "%s unexpected TX IRQ\n", __func__);
461 goto out;
464 xiic_fill_tx_fifo(i2c);
466 /* current message sent and there is space in the fifo */
467 if (!xiic_tx_space(i2c) && xiic_tx_fifo_space(i2c) >= 2) {
468 dev_dbg(i2c->adap.dev.parent,
469 "%s end of message sent, nmsgs: %d\n",
470 __func__, i2c->nmsgs);
471 if (i2c->nmsgs > 1) {
472 i2c->nmsgs--;
473 i2c->tx_msg++;
474 __xiic_start_xfer(i2c);
475 } else {
476 xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
478 dev_dbg(i2c->adap.dev.parent,
479 "%s Got TX IRQ but no more to do...\n",
480 __func__);
482 } else if (!xiic_tx_space(i2c) && (i2c->nmsgs == 1))
483 /* current frame is sent and is last,
484 * make sure to disable tx half
486 xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
488 out:
489 dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr);
491 xiic_setreg32(i2c, XIIC_IISR_OFFSET, clr);
492 mutex_unlock(&i2c->lock);
493 return IRQ_HANDLED;
496 static int xiic_bus_busy(struct xiic_i2c *i2c)
498 u8 sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
500 return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0;
503 static int xiic_busy(struct xiic_i2c *i2c)
505 int tries = 3;
506 int err;
508 if (i2c->tx_msg)
509 return -EBUSY;
511 /* for instance if previous transfer was terminated due to TX error
512 * it might be that the bus is on it's way to become available
513 * give it at most 3 ms to wake
515 err = xiic_bus_busy(i2c);
516 while (err && tries--) {
517 msleep(1);
518 err = xiic_bus_busy(i2c);
521 return err;
524 static void xiic_start_recv(struct xiic_i2c *i2c)
526 u8 rx_watermark;
527 struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
528 unsigned long flags;
530 /* Clear and enable Rx full interrupt. */
531 xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
533 /* we want to get all but last byte, because the TX_ERROR IRQ is used
534 * to inidicate error ACK on the address, and negative ack on the last
535 * received byte, so to not mix them receive all but last.
536 * In the case where there is only one byte to receive
537 * we can check if ERROR and RX full is set at the same time
539 rx_watermark = msg->len;
540 if (rx_watermark > IIC_RX_FIFO_DEPTH)
541 rx_watermark = IIC_RX_FIFO_DEPTH;
542 xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
544 local_irq_save(flags);
545 if (!(msg->flags & I2C_M_NOSTART))
546 /* write the address */
547 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
548 i2c_8bit_addr_from_msg(msg) | XIIC_TX_DYN_START_MASK);
550 xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
552 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
553 msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
554 local_irq_restore(flags);
556 if (i2c->nmsgs == 1)
557 /* very last, enable bus not busy as well */
558 xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
560 /* the message is tx:ed */
561 i2c->tx_pos = msg->len;
564 static void xiic_start_send(struct xiic_i2c *i2c)
566 struct i2c_msg *msg = i2c->tx_msg;
568 xiic_irq_clr(i2c, XIIC_INTR_TX_ERROR_MASK);
570 dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d",
571 __func__, msg, msg->len);
572 dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n",
573 __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
574 xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
576 if (!(msg->flags & I2C_M_NOSTART)) {
577 /* write the address */
578 u16 data = i2c_8bit_addr_from_msg(msg) |
579 XIIC_TX_DYN_START_MASK;
580 if ((i2c->nmsgs == 1) && msg->len == 0)
581 /* no data and last message -> add STOP */
582 data |= XIIC_TX_DYN_STOP_MASK;
584 xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
587 xiic_fill_tx_fifo(i2c);
589 /* Clear any pending Tx empty, Tx Error and then enable them. */
590 xiic_irq_clr_en(i2c, XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_ERROR_MASK |
591 XIIC_INTR_BNB_MASK);
594 static irqreturn_t xiic_isr(int irq, void *dev_id)
596 struct xiic_i2c *i2c = dev_id;
597 u32 pend, isr, ier;
598 irqreturn_t ret = IRQ_NONE;
599 /* Do not processes a devices interrupts if the device has no
600 * interrupts pending
603 dev_dbg(i2c->adap.dev.parent, "%s entry\n", __func__);
605 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
606 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
607 pend = isr & ier;
608 if (pend)
609 ret = IRQ_WAKE_THREAD;
611 return ret;
614 static void __xiic_start_xfer(struct xiic_i2c *i2c)
616 int first = 1;
617 int fifo_space = xiic_tx_fifo_space(i2c);
618 dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n",
619 __func__, i2c->tx_msg, fifo_space);
621 if (!i2c->tx_msg)
622 return;
624 i2c->rx_pos = 0;
625 i2c->tx_pos = 0;
626 i2c->state = STATE_START;
627 while ((fifo_space >= 2) && (first || (i2c->nmsgs > 1))) {
628 if (!first) {
629 i2c->nmsgs--;
630 i2c->tx_msg++;
631 i2c->tx_pos = 0;
632 } else
633 first = 0;
635 if (i2c->tx_msg->flags & I2C_M_RD) {
636 /* we dont date putting several reads in the FIFO */
637 xiic_start_recv(i2c);
638 return;
639 } else {
640 xiic_start_send(i2c);
641 if (xiic_tx_space(i2c) != 0) {
642 /* the message could not be completely sent */
643 break;
647 fifo_space = xiic_tx_fifo_space(i2c);
650 /* there are more messages or the current one could not be completely
651 * put into the FIFO, also enable the half empty interrupt
653 if (i2c->nmsgs > 1 || xiic_tx_space(i2c))
654 xiic_irq_clr_en(i2c, XIIC_INTR_TX_HALF_MASK);
658 static void xiic_start_xfer(struct xiic_i2c *i2c)
660 mutex_lock(&i2c->lock);
661 xiic_reinit(i2c);
662 __xiic_start_xfer(i2c);
663 mutex_unlock(&i2c->lock);
666 static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
668 struct xiic_i2c *i2c = i2c_get_adapdata(adap);
669 int err;
671 dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
672 xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
674 err = pm_runtime_get_sync(i2c->dev);
675 if (err < 0)
676 return err;
678 err = xiic_busy(i2c);
679 if (err)
680 goto out;
682 i2c->tx_msg = msgs;
683 i2c->nmsgs = num;
685 xiic_start_xfer(i2c);
687 if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) ||
688 (i2c->state == STATE_DONE), HZ)) {
689 err = (i2c->state == STATE_DONE) ? num : -EIO;
690 goto out;
691 } else {
692 i2c->tx_msg = NULL;
693 i2c->rx_msg = NULL;
694 i2c->nmsgs = 0;
695 err = -ETIMEDOUT;
696 goto out;
698 out:
699 pm_runtime_mark_last_busy(i2c->dev);
700 pm_runtime_put_autosuspend(i2c->dev);
701 return err;
704 static u32 xiic_func(struct i2c_adapter *adap)
706 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
709 static const struct i2c_algorithm xiic_algorithm = {
710 .master_xfer = xiic_xfer,
711 .functionality = xiic_func,
714 static const struct i2c_adapter_quirks xiic_quirks = {
715 .max_read_len = 255,
718 static const struct i2c_adapter xiic_adapter = {
719 .owner = THIS_MODULE,
720 .name = DRIVER_NAME,
721 .class = I2C_CLASS_DEPRECATED,
722 .algo = &xiic_algorithm,
723 .quirks = &xiic_quirks,
727 static int xiic_i2c_probe(struct platform_device *pdev)
729 struct xiic_i2c *i2c;
730 struct xiic_i2c_platform_data *pdata;
731 struct resource *res;
732 int ret, irq;
733 u8 i;
734 u32 sr;
736 i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
737 if (!i2c)
738 return -ENOMEM;
740 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
741 i2c->base = devm_ioremap_resource(&pdev->dev, res);
742 if (IS_ERR(i2c->base))
743 return PTR_ERR(i2c->base);
745 irq = platform_get_irq(pdev, 0);
746 if (irq < 0)
747 return irq;
749 pdata = dev_get_platdata(&pdev->dev);
751 /* hook up driver to tree */
752 platform_set_drvdata(pdev, i2c);
753 i2c->adap = xiic_adapter;
754 i2c_set_adapdata(&i2c->adap, i2c);
755 i2c->adap.dev.parent = &pdev->dev;
756 i2c->adap.dev.of_node = pdev->dev.of_node;
758 mutex_init(&i2c->lock);
759 init_waitqueue_head(&i2c->wait);
761 i2c->clk = devm_clk_get(&pdev->dev, NULL);
762 if (IS_ERR(i2c->clk)) {
763 dev_err(&pdev->dev, "input clock not found.\n");
764 return PTR_ERR(i2c->clk);
766 ret = clk_prepare_enable(i2c->clk);
767 if (ret) {
768 dev_err(&pdev->dev, "Unable to enable clock.\n");
769 return ret;
771 i2c->dev = &pdev->dev;
772 pm_runtime_enable(i2c->dev);
773 pm_runtime_set_autosuspend_delay(i2c->dev, XIIC_PM_TIMEOUT);
774 pm_runtime_use_autosuspend(i2c->dev);
775 pm_runtime_set_active(i2c->dev);
776 ret = devm_request_threaded_irq(&pdev->dev, irq, xiic_isr,
777 xiic_process, IRQF_ONESHOT,
778 pdev->name, i2c);
780 if (ret < 0) {
781 dev_err(&pdev->dev, "Cannot claim IRQ\n");
782 goto err_clk_dis;
786 * Detect endianness
787 * Try to reset the TX FIFO. Then check the EMPTY flag. If it is not
788 * set, assume that the endianness was wrong and swap.
790 i2c->endianness = LITTLE;
791 xiic_setreg32(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK);
792 /* Reset is cleared in xiic_reinit */
793 sr = xiic_getreg32(i2c, XIIC_SR_REG_OFFSET);
794 if (!(sr & XIIC_SR_TX_FIFO_EMPTY_MASK))
795 i2c->endianness = BIG;
797 xiic_reinit(i2c);
799 /* add i2c adapter to i2c tree */
800 ret = i2c_add_adapter(&i2c->adap);
801 if (ret) {
802 xiic_deinit(i2c);
803 goto err_clk_dis;
806 if (pdata) {
807 /* add in known devices to the bus */
808 for (i = 0; i < pdata->num_devices; i++)
809 i2c_new_device(&i2c->adap, pdata->devices + i);
812 return 0;
814 err_clk_dis:
815 pm_runtime_set_suspended(&pdev->dev);
816 pm_runtime_disable(&pdev->dev);
817 clk_disable_unprepare(i2c->clk);
818 return ret;
821 static int xiic_i2c_remove(struct platform_device *pdev)
823 struct xiic_i2c *i2c = platform_get_drvdata(pdev);
824 int ret;
826 /* remove adapter & data */
827 i2c_del_adapter(&i2c->adap);
829 ret = clk_prepare_enable(i2c->clk);
830 if (ret) {
831 dev_err(&pdev->dev, "Unable to enable clock.\n");
832 return ret;
834 xiic_deinit(i2c);
835 clk_disable_unprepare(i2c->clk);
836 pm_runtime_disable(&pdev->dev);
838 return 0;
841 #if defined(CONFIG_OF)
842 static const struct of_device_id xiic_of_match[] = {
843 { .compatible = "xlnx,xps-iic-2.00.a", },
846 MODULE_DEVICE_TABLE(of, xiic_of_match);
847 #endif
849 static int __maybe_unused xiic_i2c_runtime_suspend(struct device *dev)
851 struct xiic_i2c *i2c = dev_get_drvdata(dev);
853 clk_disable(i2c->clk);
855 return 0;
858 static int __maybe_unused xiic_i2c_runtime_resume(struct device *dev)
860 struct xiic_i2c *i2c = dev_get_drvdata(dev);
861 int ret;
863 ret = clk_enable(i2c->clk);
864 if (ret) {
865 dev_err(dev, "Cannot enable clock.\n");
866 return ret;
869 return 0;
872 static const struct dev_pm_ops xiic_dev_pm_ops = {
873 SET_RUNTIME_PM_OPS(xiic_i2c_runtime_suspend,
874 xiic_i2c_runtime_resume, NULL)
876 static struct platform_driver xiic_i2c_driver = {
877 .probe = xiic_i2c_probe,
878 .remove = xiic_i2c_remove,
879 .driver = {
880 .name = DRIVER_NAME,
881 .of_match_table = of_match_ptr(xiic_of_match),
882 .pm = &xiic_dev_pm_ops,
886 module_platform_driver(xiic_i2c_driver);
888 MODULE_AUTHOR("info@mocean-labs.com");
889 MODULE_DESCRIPTION("Xilinx I2C bus driver");
890 MODULE_LICENSE("GPL v2");
891 MODULE_ALIAS("platform:"DRIVER_NAME);