1 // SPDX-License-Identifier: GPL-2.0-only
3 * Aspeed 24XX/25XX I2C Controller.
5 * Copyright (C) 2012-2017 ASPEED Technology Inc.
6 * Copyright 2017 IBM Corporation
7 * Copyright 2017 Google, Inc.
10 #include <linux/clk.h>
11 #include <linux/completion.h>
12 #include <linux/err.h>
13 #include <linux/errno.h>
14 #include <linux/i2c.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
18 #include <linux/irq.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/reset.h>
26 #include <linux/slab.h>
29 #define ASPEED_I2C_FUN_CTRL_REG 0x00
30 #define ASPEED_I2C_AC_TIMING_REG1 0x04
31 #define ASPEED_I2C_AC_TIMING_REG2 0x08
32 #define ASPEED_I2C_INTR_CTRL_REG 0x0c
33 #define ASPEED_I2C_INTR_STS_REG 0x10
34 #define ASPEED_I2C_CMD_REG 0x14
35 #define ASPEED_I2C_DEV_ADDR_REG 0x18
36 #define ASPEED_I2C_BYTE_BUF_REG 0x20
38 /* Global Register Definition */
39 /* 0x00 : I2C Interrupt Status Register */
40 /* 0x08 : I2C Interrupt Target Assignment */
42 /* Device Register Definition */
43 /* 0x00 : I2CD Function Control Register */
44 #define ASPEED_I2CD_MULTI_MASTER_DIS BIT(15)
45 #define ASPEED_I2CD_SDA_DRIVE_1T_EN BIT(8)
46 #define ASPEED_I2CD_M_SDA_DRIVE_1T_EN BIT(7)
47 #define ASPEED_I2CD_M_HIGH_SPEED_EN BIT(6)
48 #define ASPEED_I2CD_SLAVE_EN BIT(1)
49 #define ASPEED_I2CD_MASTER_EN BIT(0)
51 /* 0x04 : I2CD Clock and AC Timing Control Register #1 */
52 #define ASPEED_I2CD_TIME_TBUF_MASK GENMASK(31, 28)
53 #define ASPEED_I2CD_TIME_THDSTA_MASK GENMASK(27, 24)
54 #define ASPEED_I2CD_TIME_TACST_MASK GENMASK(23, 20)
55 #define ASPEED_I2CD_TIME_SCL_HIGH_SHIFT 16
56 #define ASPEED_I2CD_TIME_SCL_HIGH_MASK GENMASK(19, 16)
57 #define ASPEED_I2CD_TIME_SCL_LOW_SHIFT 12
58 #define ASPEED_I2CD_TIME_SCL_LOW_MASK GENMASK(15, 12)
59 #define ASPEED_I2CD_TIME_BASE_DIVISOR_MASK GENMASK(3, 0)
60 #define ASPEED_I2CD_TIME_SCL_REG_MAX GENMASK(3, 0)
61 /* 0x08 : I2CD Clock and AC Timing Control Register #2 */
62 #define ASPEED_NO_TIMEOUT_CTRL 0
64 /* 0x0c : I2CD Interrupt Control Register &
65 * 0x10 : I2CD Interrupt Status Register
67 * These share bit definitions, so use the same values for the enable &
70 #define ASPEED_I2CD_INTR_RECV_MASK 0xf000ffff
71 #define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT BIT(14)
72 #define ASPEED_I2CD_INTR_BUS_RECOVER_DONE BIT(13)
73 #define ASPEED_I2CD_INTR_SLAVE_MATCH BIT(7)
74 #define ASPEED_I2CD_INTR_SCL_TIMEOUT BIT(6)
75 #define ASPEED_I2CD_INTR_ABNORMAL BIT(5)
76 #define ASPEED_I2CD_INTR_NORMAL_STOP BIT(4)
77 #define ASPEED_I2CD_INTR_ARBIT_LOSS BIT(3)
78 #define ASPEED_I2CD_INTR_RX_DONE BIT(2)
79 #define ASPEED_I2CD_INTR_TX_NAK BIT(1)
80 #define ASPEED_I2CD_INTR_TX_ACK BIT(0)
81 #define ASPEED_I2CD_INTR_MASTER_ERRORS \
82 (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \
83 ASPEED_I2CD_INTR_SCL_TIMEOUT | \
84 ASPEED_I2CD_INTR_ABNORMAL | \
85 ASPEED_I2CD_INTR_ARBIT_LOSS)
86 #define ASPEED_I2CD_INTR_ALL \
87 (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \
88 ASPEED_I2CD_INTR_BUS_RECOVER_DONE | \
89 ASPEED_I2CD_INTR_SCL_TIMEOUT | \
90 ASPEED_I2CD_INTR_ABNORMAL | \
91 ASPEED_I2CD_INTR_NORMAL_STOP | \
92 ASPEED_I2CD_INTR_ARBIT_LOSS | \
93 ASPEED_I2CD_INTR_RX_DONE | \
94 ASPEED_I2CD_INTR_TX_NAK | \
95 ASPEED_I2CD_INTR_TX_ACK)
97 /* 0x14 : I2CD Command/Status Register */
98 #define ASPEED_I2CD_SCL_LINE_STS BIT(18)
99 #define ASPEED_I2CD_SDA_LINE_STS BIT(17)
100 #define ASPEED_I2CD_BUS_BUSY_STS BIT(16)
101 #define ASPEED_I2CD_BUS_RECOVER_CMD BIT(11)
104 #define ASPEED_I2CD_M_STOP_CMD BIT(5)
105 #define ASPEED_I2CD_M_S_RX_CMD_LAST BIT(4)
106 #define ASPEED_I2CD_M_RX_CMD BIT(3)
107 #define ASPEED_I2CD_S_TX_CMD BIT(2)
108 #define ASPEED_I2CD_M_TX_CMD BIT(1)
109 #define ASPEED_I2CD_M_START_CMD BIT(0)
110 #define ASPEED_I2CD_MASTER_CMDS_MASK \
111 (ASPEED_I2CD_M_STOP_CMD | \
112 ASPEED_I2CD_M_S_RX_CMD_LAST | \
113 ASPEED_I2CD_M_RX_CMD | \
114 ASPEED_I2CD_M_TX_CMD | \
115 ASPEED_I2CD_M_START_CMD)
117 /* 0x18 : I2CD Slave Device Address Register */
118 #define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0)
120 enum aspeed_i2c_master_state
{
121 ASPEED_I2C_MASTER_INACTIVE
,
122 ASPEED_I2C_MASTER_PENDING
,
123 ASPEED_I2C_MASTER_START
,
124 ASPEED_I2C_MASTER_TX_FIRST
,
125 ASPEED_I2C_MASTER_TX
,
126 ASPEED_I2C_MASTER_RX_FIRST
,
127 ASPEED_I2C_MASTER_RX
,
128 ASPEED_I2C_MASTER_STOP
,
131 enum aspeed_i2c_slave_state
{
132 ASPEED_I2C_SLAVE_INACTIVE
,
133 ASPEED_I2C_SLAVE_START
,
134 ASPEED_I2C_SLAVE_READ_REQUESTED
,
135 ASPEED_I2C_SLAVE_READ_PROCESSED
,
136 ASPEED_I2C_SLAVE_WRITE_REQUESTED
,
137 ASPEED_I2C_SLAVE_WRITE_RECEIVED
,
138 ASPEED_I2C_SLAVE_STOP
,
141 struct aspeed_i2c_bus
{
142 struct i2c_adapter adap
;
145 struct reset_control
*rst
;
146 /* Synchronizes I/O mem access to base. */
148 struct completion cmd_complete
;
149 u32 (*get_clk_reg_val
)(struct device
*dev
,
151 unsigned long parent_clk_frequency
;
153 /* Transaction state. */
154 enum aspeed_i2c_master_state master_state
;
155 struct i2c_msg
*msgs
;
161 /* Protected only by i2c_lock_bus */
162 int master_xfer_result
;
165 #if IS_ENABLED(CONFIG_I2C_SLAVE)
166 struct i2c_client
*slave
;
167 enum aspeed_i2c_slave_state slave_state
;
168 #endif /* CONFIG_I2C_SLAVE */
171 static int aspeed_i2c_reset(struct aspeed_i2c_bus
*bus
);
173 /* precondition: bus.lock has been acquired. */
174 static void aspeed_i2c_do_stop(struct aspeed_i2c_bus
*bus
)
176 bus
->master_state
= ASPEED_I2C_MASTER_STOP
;
177 writel(ASPEED_I2CD_M_STOP_CMD
, bus
->base
+ ASPEED_I2C_CMD_REG
);
180 static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus
*bus
)
182 unsigned long time_left
, flags
;
186 spin_lock_irqsave(&bus
->lock
, flags
);
187 command
= readl(bus
->base
+ ASPEED_I2C_CMD_REG
);
189 if (command
& ASPEED_I2CD_SDA_LINE_STS
) {
190 /* Bus is idle: no recovery needed. */
191 if (command
& ASPEED_I2CD_SCL_LINE_STS
)
193 dev_dbg(bus
->dev
, "SCL hung (state %x), attempting recovery\n",
196 reinit_completion(&bus
->cmd_complete
);
197 aspeed_i2c_do_stop(bus
);
198 spin_unlock_irqrestore(&bus
->lock
, flags
);
200 time_left
= wait_for_completion_timeout(
201 &bus
->cmd_complete
, bus
->adap
.timeout
);
203 spin_lock_irqsave(&bus
->lock
, flags
);
206 else if (bus
->cmd_err
)
208 /* Recovery failed. */
209 else if (!(readl(bus
->base
+ ASPEED_I2C_CMD_REG
) &
210 ASPEED_I2CD_SCL_LINE_STS
))
214 dev_dbg(bus
->dev
, "SDA hung (state %x), attempting recovery\n",
217 reinit_completion(&bus
->cmd_complete
);
218 /* Writes 1 to 8 SCL clock cycles until SDA is released. */
219 writel(ASPEED_I2CD_BUS_RECOVER_CMD
,
220 bus
->base
+ ASPEED_I2C_CMD_REG
);
221 spin_unlock_irqrestore(&bus
->lock
, flags
);
223 time_left
= wait_for_completion_timeout(
224 &bus
->cmd_complete
, bus
->adap
.timeout
);
226 spin_lock_irqsave(&bus
->lock
, flags
);
229 else if (bus
->cmd_err
)
231 /* Recovery failed. */
232 else if (!(readl(bus
->base
+ ASPEED_I2C_CMD_REG
) &
233 ASPEED_I2CD_SDA_LINE_STS
))
238 spin_unlock_irqrestore(&bus
->lock
, flags
);
243 spin_unlock_irqrestore(&bus
->lock
, flags
);
245 return aspeed_i2c_reset(bus
);
248 #if IS_ENABLED(CONFIG_I2C_SLAVE)
249 static u32
aspeed_i2c_slave_irq(struct aspeed_i2c_bus
*bus
, u32 irq_status
)
251 u32 command
, irq_handled
= 0;
252 struct i2c_client
*slave
= bus
->slave
;
260 * Handle stop conditions early, prior to SLAVE_MATCH. Some masters may drive
261 * transfers with low enough latency between the nak/stop phase of the current
262 * command and the start/address phase of the following command that the
263 * interrupts are coalesced by the time we process them.
265 if (irq_status
& ASPEED_I2CD_INTR_NORMAL_STOP
) {
266 irq_handled
|= ASPEED_I2CD_INTR_NORMAL_STOP
;
267 bus
->slave_state
= ASPEED_I2C_SLAVE_STOP
;
270 if (irq_status
& ASPEED_I2CD_INTR_TX_NAK
&&
271 bus
->slave_state
== ASPEED_I2C_SLAVE_READ_PROCESSED
) {
272 irq_handled
|= ASPEED_I2CD_INTR_TX_NAK
;
273 bus
->slave_state
= ASPEED_I2C_SLAVE_STOP
;
276 /* Propagate any stop conditions to the slave implementation. */
277 if (bus
->slave_state
== ASPEED_I2C_SLAVE_STOP
) {
278 i2c_slave_event(slave
, I2C_SLAVE_STOP
, &value
);
279 bus
->slave_state
= ASPEED_I2C_SLAVE_INACTIVE
;
283 * Now that we've dealt with any potentially coalesced stop conditions,
284 * address any start conditions.
286 if (irq_status
& ASPEED_I2CD_INTR_SLAVE_MATCH
) {
287 irq_handled
|= ASPEED_I2CD_INTR_SLAVE_MATCH
;
288 bus
->slave_state
= ASPEED_I2C_SLAVE_START
;
292 * If the slave has been stopped and not started then slave interrupt
293 * handling is complete.
295 if (bus
->slave_state
== ASPEED_I2C_SLAVE_INACTIVE
)
298 command
= readl(bus
->base
+ ASPEED_I2C_CMD_REG
);
299 dev_dbg(bus
->dev
, "slave irq status 0x%08x, cmd 0x%08x\n",
300 irq_status
, command
);
302 /* Slave was sent something. */
303 if (irq_status
& ASPEED_I2CD_INTR_RX_DONE
) {
304 value
= readl(bus
->base
+ ASPEED_I2C_BYTE_BUF_REG
) >> 8;
305 /* Handle address frame. */
306 if (bus
->slave_state
== ASPEED_I2C_SLAVE_START
) {
309 ASPEED_I2C_SLAVE_READ_REQUESTED
;
312 ASPEED_I2C_SLAVE_WRITE_REQUESTED
;
314 irq_handled
|= ASPEED_I2CD_INTR_RX_DONE
;
317 switch (bus
->slave_state
) {
318 case ASPEED_I2C_SLAVE_READ_REQUESTED
:
319 if (unlikely(irq_status
& ASPEED_I2CD_INTR_TX_ACK
))
320 dev_err(bus
->dev
, "Unexpected ACK on read request.\n");
321 bus
->slave_state
= ASPEED_I2C_SLAVE_READ_PROCESSED
;
322 i2c_slave_event(slave
, I2C_SLAVE_READ_REQUESTED
, &value
);
323 writel(value
, bus
->base
+ ASPEED_I2C_BYTE_BUF_REG
);
324 writel(ASPEED_I2CD_S_TX_CMD
, bus
->base
+ ASPEED_I2C_CMD_REG
);
326 case ASPEED_I2C_SLAVE_READ_PROCESSED
:
327 if (unlikely(!(irq_status
& ASPEED_I2CD_INTR_TX_ACK
))) {
329 "Expected ACK after processed read.\n");
332 irq_handled
|= ASPEED_I2CD_INTR_TX_ACK
;
333 i2c_slave_event(slave
, I2C_SLAVE_READ_PROCESSED
, &value
);
334 writel(value
, bus
->base
+ ASPEED_I2C_BYTE_BUF_REG
);
335 writel(ASPEED_I2CD_S_TX_CMD
, bus
->base
+ ASPEED_I2C_CMD_REG
);
337 case ASPEED_I2C_SLAVE_WRITE_REQUESTED
:
338 bus
->slave_state
= ASPEED_I2C_SLAVE_WRITE_RECEIVED
;
339 ret
= i2c_slave_event(slave
, I2C_SLAVE_WRITE_REQUESTED
, &value
);
341 * Slave ACK's on this address phase already but as the backend driver
342 * returns an errno, the bus driver should nack the next incoming byte.
345 writel(ASPEED_I2CD_M_S_RX_CMD_LAST
, bus
->base
+ ASPEED_I2C_CMD_REG
);
347 case ASPEED_I2C_SLAVE_WRITE_RECEIVED
:
348 i2c_slave_event(slave
, I2C_SLAVE_WRITE_RECEIVED
, &value
);
350 case ASPEED_I2C_SLAVE_STOP
:
351 /* Stop event handling is done early. Unreachable. */
353 case ASPEED_I2C_SLAVE_START
:
354 /* Slave was just started. Waiting for the next event. */;
357 dev_err(bus
->dev
, "unknown slave_state: %d\n",
359 bus
->slave_state
= ASPEED_I2C_SLAVE_INACTIVE
;
365 #endif /* CONFIG_I2C_SLAVE */
367 /* precondition: bus.lock has been acquired. */
368 static void aspeed_i2c_do_start(struct aspeed_i2c_bus
*bus
)
370 u32 command
= ASPEED_I2CD_M_START_CMD
| ASPEED_I2CD_M_TX_CMD
;
371 struct i2c_msg
*msg
= &bus
->msgs
[bus
->msgs_index
];
372 u8 slave_addr
= i2c_8bit_addr_from_msg(msg
);
374 #if IS_ENABLED(CONFIG_I2C_SLAVE)
376 * If it's requested in the middle of a slave session, set the master
377 * state to 'pending' then H/W will continue handling this master
378 * command when the bus comes back to the idle state.
380 if (bus
->slave_state
!= ASPEED_I2C_SLAVE_INACTIVE
) {
381 bus
->master_state
= ASPEED_I2C_MASTER_PENDING
;
384 #endif /* CONFIG_I2C_SLAVE */
386 bus
->master_state
= ASPEED_I2C_MASTER_START
;
389 if (msg
->flags
& I2C_M_RD
) {
390 command
|= ASPEED_I2CD_M_RX_CMD
;
391 /* Need to let the hardware know to NACK after RX. */
392 if (msg
->len
== 1 && !(msg
->flags
& I2C_M_RECV_LEN
))
393 command
|= ASPEED_I2CD_M_S_RX_CMD_LAST
;
396 writel(slave_addr
, bus
->base
+ ASPEED_I2C_BYTE_BUF_REG
);
397 writel(command
, bus
->base
+ ASPEED_I2C_CMD_REG
);
400 /* precondition: bus.lock has been acquired. */
401 static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus
*bus
)
403 if (bus
->msgs_index
+ 1 < bus
->msgs_count
) {
405 aspeed_i2c_do_start(bus
);
407 aspeed_i2c_do_stop(bus
);
411 static int aspeed_i2c_is_irq_error(u32 irq_status
)
413 if (irq_status
& ASPEED_I2CD_INTR_ARBIT_LOSS
)
415 if (irq_status
& (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT
|
416 ASPEED_I2CD_INTR_SCL_TIMEOUT
))
418 if (irq_status
& (ASPEED_I2CD_INTR_ABNORMAL
))
424 static u32
aspeed_i2c_master_irq(struct aspeed_i2c_bus
*bus
, u32 irq_status
)
426 u32 irq_handled
= 0, command
= 0;
431 if (irq_status
& ASPEED_I2CD_INTR_BUS_RECOVER_DONE
) {
432 bus
->master_state
= ASPEED_I2C_MASTER_INACTIVE
;
433 irq_handled
|= ASPEED_I2CD_INTR_BUS_RECOVER_DONE
;
438 * We encountered an interrupt that reports an error: the hardware
439 * should clear the command queue effectively taking us back to the
442 ret
= aspeed_i2c_is_irq_error(irq_status
);
444 dev_dbg(bus
->dev
, "received error interrupt: 0x%08x\n",
446 irq_handled
|= (irq_status
& ASPEED_I2CD_INTR_MASTER_ERRORS
);
447 if (bus
->master_state
!= ASPEED_I2C_MASTER_INACTIVE
) {
448 irq_handled
= irq_status
;
450 bus
->master_state
= ASPEED_I2C_MASTER_INACTIVE
;
455 /* Master is not currently active, irq was for someone else. */
456 if (bus
->master_state
== ASPEED_I2C_MASTER_INACTIVE
||
457 bus
->master_state
== ASPEED_I2C_MASTER_PENDING
)
458 goto out_no_complete
;
460 /* We are in an invalid state; reset bus to a known state. */
462 dev_err(bus
->dev
, "bus in unknown state. irq_status: 0x%x\n",
465 if (bus
->master_state
!= ASPEED_I2C_MASTER_STOP
&&
466 bus
->master_state
!= ASPEED_I2C_MASTER_INACTIVE
)
467 aspeed_i2c_do_stop(bus
);
468 goto out_no_complete
;
470 msg
= &bus
->msgs
[bus
->msgs_index
];
473 * START is a special case because we still have to handle a subsequent
474 * TX or RX immediately after we handle it, so we handle it here and
475 * then update the state and handle the new state below.
477 if (bus
->master_state
== ASPEED_I2C_MASTER_START
) {
478 #if IS_ENABLED(CONFIG_I2C_SLAVE)
480 * If a peer master starts a xfer immediately after it queues a
481 * master command, clear the queued master command and change
482 * its state to 'pending'. To simplify handling of pending
483 * cases, it uses S/W solution instead of H/W command queue
486 if (unlikely(irq_status
& ASPEED_I2CD_INTR_SLAVE_MATCH
)) {
487 writel(readl(bus
->base
+ ASPEED_I2C_CMD_REG
) &
488 ~ASPEED_I2CD_MASTER_CMDS_MASK
,
489 bus
->base
+ ASPEED_I2C_CMD_REG
);
490 bus
->master_state
= ASPEED_I2C_MASTER_PENDING
;
492 "master goes pending due to a slave start\n");
493 goto out_no_complete
;
495 #endif /* CONFIG_I2C_SLAVE */
496 if (unlikely(!(irq_status
& ASPEED_I2CD_INTR_TX_ACK
))) {
497 if (unlikely(!(irq_status
& ASPEED_I2CD_INTR_TX_NAK
))) {
498 bus
->cmd_err
= -ENXIO
;
499 bus
->master_state
= ASPEED_I2C_MASTER_INACTIVE
;
502 pr_devel("no slave present at %02x\n", msg
->addr
);
503 irq_handled
|= ASPEED_I2CD_INTR_TX_NAK
;
504 bus
->cmd_err
= -ENXIO
;
505 aspeed_i2c_do_stop(bus
);
506 goto out_no_complete
;
508 irq_handled
|= ASPEED_I2CD_INTR_TX_ACK
;
509 if (msg
->len
== 0) { /* SMBUS_QUICK */
510 aspeed_i2c_do_stop(bus
);
511 goto out_no_complete
;
513 if (msg
->flags
& I2C_M_RD
)
514 bus
->master_state
= ASPEED_I2C_MASTER_RX_FIRST
;
516 bus
->master_state
= ASPEED_I2C_MASTER_TX_FIRST
;
519 switch (bus
->master_state
) {
520 case ASPEED_I2C_MASTER_TX
:
521 if (unlikely(irq_status
& ASPEED_I2CD_INTR_TX_NAK
)) {
522 dev_dbg(bus
->dev
, "slave NACKed TX\n");
523 irq_handled
|= ASPEED_I2CD_INTR_TX_NAK
;
525 } else if (unlikely(!(irq_status
& ASPEED_I2CD_INTR_TX_ACK
))) {
526 dev_err(bus
->dev
, "slave failed to ACK TX\n");
529 irq_handled
|= ASPEED_I2CD_INTR_TX_ACK
;
531 case ASPEED_I2C_MASTER_TX_FIRST
:
532 if (bus
->buf_index
< msg
->len
) {
533 bus
->master_state
= ASPEED_I2C_MASTER_TX
;
534 writel(msg
->buf
[bus
->buf_index
++],
535 bus
->base
+ ASPEED_I2C_BYTE_BUF_REG
);
536 writel(ASPEED_I2CD_M_TX_CMD
,
537 bus
->base
+ ASPEED_I2C_CMD_REG
);
539 aspeed_i2c_next_msg_or_stop(bus
);
541 goto out_no_complete
;
542 case ASPEED_I2C_MASTER_RX_FIRST
:
543 /* RX may not have completed yet (only address cycle) */
544 if (!(irq_status
& ASPEED_I2CD_INTR_RX_DONE
))
545 goto out_no_complete
;
547 case ASPEED_I2C_MASTER_RX
:
548 if (unlikely(!(irq_status
& ASPEED_I2CD_INTR_RX_DONE
))) {
549 dev_err(bus
->dev
, "master failed to RX\n");
552 irq_handled
|= ASPEED_I2CD_INTR_RX_DONE
;
554 recv_byte
= readl(bus
->base
+ ASPEED_I2C_BYTE_BUF_REG
) >> 8;
555 msg
->buf
[bus
->buf_index
++] = recv_byte
;
557 if (msg
->flags
& I2C_M_RECV_LEN
) {
558 if (unlikely(recv_byte
> I2C_SMBUS_BLOCK_MAX
)) {
559 bus
->cmd_err
= -EPROTO
;
560 aspeed_i2c_do_stop(bus
);
561 goto out_no_complete
;
563 msg
->len
= recv_byte
+
564 ((msg
->flags
& I2C_CLIENT_PEC
) ? 2 : 1);
565 msg
->flags
&= ~I2C_M_RECV_LEN
;
568 if (bus
->buf_index
< msg
->len
) {
569 bus
->master_state
= ASPEED_I2C_MASTER_RX
;
570 command
= ASPEED_I2CD_M_RX_CMD
;
571 if (bus
->buf_index
+ 1 == msg
->len
)
572 command
|= ASPEED_I2CD_M_S_RX_CMD_LAST
;
573 writel(command
, bus
->base
+ ASPEED_I2C_CMD_REG
);
575 aspeed_i2c_next_msg_or_stop(bus
);
577 goto out_no_complete
;
578 case ASPEED_I2C_MASTER_STOP
:
579 if (unlikely(!(irq_status
& ASPEED_I2CD_INTR_NORMAL_STOP
))) {
581 "master failed to STOP. irq_status:0x%x\n",
584 /* Do not STOP as we have already tried. */
586 irq_handled
|= ASPEED_I2CD_INTR_NORMAL_STOP
;
589 bus
->master_state
= ASPEED_I2C_MASTER_INACTIVE
;
591 case ASPEED_I2C_MASTER_INACTIVE
:
593 "master received interrupt 0x%08x, but is inactive\n",
596 /* Do not STOP as we should be inactive. */
599 WARN(1, "unknown master state\n");
600 bus
->master_state
= ASPEED_I2C_MASTER_INACTIVE
;
601 bus
->cmd_err
= -EINVAL
;
606 aspeed_i2c_do_stop(bus
);
607 goto out_no_complete
;
611 bus
->master_xfer_result
= bus
->cmd_err
;
613 bus
->master_xfer_result
= bus
->msgs_index
+ 1;
614 complete(&bus
->cmd_complete
);
619 static irqreturn_t
aspeed_i2c_bus_irq(int irq
, void *dev_id
)
621 struct aspeed_i2c_bus
*bus
= dev_id
;
622 u32 irq_received
, irq_remaining
, irq_handled
;
624 spin_lock(&bus
->lock
);
625 irq_received
= readl(bus
->base
+ ASPEED_I2C_INTR_STS_REG
);
626 /* Ack all interrupts except for Rx done */
627 writel(irq_received
& ~ASPEED_I2CD_INTR_RX_DONE
,
628 bus
->base
+ ASPEED_I2C_INTR_STS_REG
);
629 readl(bus
->base
+ ASPEED_I2C_INTR_STS_REG
);
630 irq_received
&= ASPEED_I2CD_INTR_RECV_MASK
;
631 irq_remaining
= irq_received
;
633 #if IS_ENABLED(CONFIG_I2C_SLAVE)
635 * In most cases, interrupt bits will be set one by one, although
636 * multiple interrupt bits could be set at the same time. It's also
637 * possible that master interrupt bits could be set along with slave
638 * interrupt bits. Each case needs to be handled using corresponding
639 * handlers depending on the current state.
641 if (bus
->master_state
!= ASPEED_I2C_MASTER_INACTIVE
&&
642 bus
->master_state
!= ASPEED_I2C_MASTER_PENDING
) {
643 irq_handled
= aspeed_i2c_master_irq(bus
, irq_remaining
);
644 irq_remaining
&= ~irq_handled
;
646 irq_handled
|= aspeed_i2c_slave_irq(bus
, irq_remaining
);
648 irq_handled
= aspeed_i2c_slave_irq(bus
, irq_remaining
);
649 irq_remaining
&= ~irq_handled
;
651 irq_handled
|= aspeed_i2c_master_irq(bus
,
656 * Start a pending master command at here if a slave operation is
659 if (bus
->master_state
== ASPEED_I2C_MASTER_PENDING
&&
660 bus
->slave_state
== ASPEED_I2C_SLAVE_INACTIVE
)
661 aspeed_i2c_do_start(bus
);
663 irq_handled
= aspeed_i2c_master_irq(bus
, irq_remaining
);
664 #endif /* CONFIG_I2C_SLAVE */
666 irq_remaining
&= ~irq_handled
;
669 "irq handled != irq. expected 0x%08x, but was 0x%08x\n",
670 irq_received
, irq_handled
);
673 if (irq_received
& ASPEED_I2CD_INTR_RX_DONE
) {
674 writel(ASPEED_I2CD_INTR_RX_DONE
,
675 bus
->base
+ ASPEED_I2C_INTR_STS_REG
);
676 readl(bus
->base
+ ASPEED_I2C_INTR_STS_REG
);
678 spin_unlock(&bus
->lock
);
679 return irq_remaining
? IRQ_NONE
: IRQ_HANDLED
;
682 static int aspeed_i2c_master_xfer(struct i2c_adapter
*adap
,
683 struct i2c_msg
*msgs
, int num
)
685 struct aspeed_i2c_bus
*bus
= i2c_get_adapdata(adap
);
686 unsigned long time_left
, flags
;
688 spin_lock_irqsave(&bus
->lock
, flags
);
691 /* If bus is busy in a single master environment, attempt recovery. */
692 if (!bus
->multi_master
&&
693 (readl(bus
->base
+ ASPEED_I2C_CMD_REG
) &
694 ASPEED_I2CD_BUS_BUSY_STS
)) {
697 spin_unlock_irqrestore(&bus
->lock
, flags
);
698 ret
= aspeed_i2c_recover_bus(bus
);
701 spin_lock_irqsave(&bus
->lock
, flags
);
707 bus
->msgs_count
= num
;
709 reinit_completion(&bus
->cmd_complete
);
710 aspeed_i2c_do_start(bus
);
711 spin_unlock_irqrestore(&bus
->lock
, flags
);
713 time_left
= wait_for_completion_timeout(&bus
->cmd_complete
,
716 if (time_left
== 0) {
718 * In a multi-master setup, if a timeout occurs, attempt
719 * recovery. But if the bus is idle, we still need to reset the
720 * i2c controller to clear the remaining interrupts.
722 if (bus
->multi_master
&&
723 (readl(bus
->base
+ ASPEED_I2C_CMD_REG
) &
724 ASPEED_I2CD_BUS_BUSY_STS
))
725 aspeed_i2c_recover_bus(bus
);
727 aspeed_i2c_reset(bus
);
730 * If timed out and the state is still pending, drop the pending
733 spin_lock_irqsave(&bus
->lock
, flags
);
734 if (bus
->master_state
== ASPEED_I2C_MASTER_PENDING
)
735 bus
->master_state
= ASPEED_I2C_MASTER_INACTIVE
;
736 spin_unlock_irqrestore(&bus
->lock
, flags
);
741 return bus
->master_xfer_result
;
744 static u32
aspeed_i2c_functionality(struct i2c_adapter
*adap
)
746 return I2C_FUNC_I2C
| I2C_FUNC_SMBUS_EMUL
| I2C_FUNC_SMBUS_BLOCK_DATA
;
749 #if IS_ENABLED(CONFIG_I2C_SLAVE)
750 /* precondition: bus.lock has been acquired. */
751 static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus
*bus
, u16 slave_addr
)
753 u32 addr_reg_val
, func_ctrl_reg_val
;
756 * Set slave addr. Reserved bits can all safely be written with zeros
757 * on all of ast2[456]00, so zero everything else to ensure we only
758 * enable a single slave address (ast2500 has two, ast2600 has three,
759 * the enable bits for which are also in this register) so that we don't
760 * end up with additional phantom devices responding on the bus.
762 addr_reg_val
= slave_addr
& ASPEED_I2CD_DEV_ADDR_MASK
;
763 writel(addr_reg_val
, bus
->base
+ ASPEED_I2C_DEV_ADDR_REG
);
765 /* Turn on slave mode. */
766 func_ctrl_reg_val
= readl(bus
->base
+ ASPEED_I2C_FUN_CTRL_REG
);
767 func_ctrl_reg_val
|= ASPEED_I2CD_SLAVE_EN
;
768 writel(func_ctrl_reg_val
, bus
->base
+ ASPEED_I2C_FUN_CTRL_REG
);
770 bus
->slave_state
= ASPEED_I2C_SLAVE_INACTIVE
;
773 static int aspeed_i2c_reg_slave(struct i2c_client
*client
)
775 struct aspeed_i2c_bus
*bus
= i2c_get_adapdata(client
->adapter
);
778 spin_lock_irqsave(&bus
->lock
, flags
);
780 spin_unlock_irqrestore(&bus
->lock
, flags
);
784 __aspeed_i2c_reg_slave(bus
, client
->addr
);
787 spin_unlock_irqrestore(&bus
->lock
, flags
);
792 static int aspeed_i2c_unreg_slave(struct i2c_client
*client
)
794 struct aspeed_i2c_bus
*bus
= i2c_get_adapdata(client
->adapter
);
795 u32 func_ctrl_reg_val
;
798 spin_lock_irqsave(&bus
->lock
, flags
);
800 spin_unlock_irqrestore(&bus
->lock
, flags
);
804 /* Turn off slave mode. */
805 func_ctrl_reg_val
= readl(bus
->base
+ ASPEED_I2C_FUN_CTRL_REG
);
806 func_ctrl_reg_val
&= ~ASPEED_I2CD_SLAVE_EN
;
807 writel(func_ctrl_reg_val
, bus
->base
+ ASPEED_I2C_FUN_CTRL_REG
);
810 spin_unlock_irqrestore(&bus
->lock
, flags
);
814 #endif /* CONFIG_I2C_SLAVE */
816 static const struct i2c_algorithm aspeed_i2c_algo
= {
817 .master_xfer
= aspeed_i2c_master_xfer
,
818 .functionality
= aspeed_i2c_functionality
,
819 #if IS_ENABLED(CONFIG_I2C_SLAVE)
820 .reg_slave
= aspeed_i2c_reg_slave
,
821 .unreg_slave
= aspeed_i2c_unreg_slave
,
822 #endif /* CONFIG_I2C_SLAVE */
825 static u32
aspeed_i2c_get_clk_reg_val(struct device
*dev
,
826 u32 clk_high_low_mask
,
829 u32 base_clk_divisor
, clk_high_low_max
, clk_high
, clk_low
, tmp
;
832 * SCL_high and SCL_low represent a value 1 greater than what is stored
833 * since a zero divider is meaningless. Thus, the max value each can
834 * store is every bit set + 1. Since SCL_high and SCL_low are added
835 * together (see below), the max value of both is the max value of one
838 clk_high_low_max
= (clk_high_low_mask
+ 1) * 2;
841 * The actual clock frequency of SCL is:
842 * SCL_freq = APB_freq / (base_freq * (SCL_high + SCL_low))
843 * = APB_freq / divisor
844 * where base_freq is a programmable clock divider; its value is
845 * base_freq = 1 << base_clk_divisor
846 * SCL_high is the number of base_freq clock cycles that SCL stays high
847 * and SCL_low is the number of base_freq clock cycles that SCL stays
848 * low for a period of SCL.
849 * The actual register has a minimum SCL_high and SCL_low minimum of 1;
850 * thus, they start counting at zero. So
851 * SCL_high = clk_high + 1
852 * SCL_low = clk_low + 1
854 * SCL_freq = APB_freq /
855 * ((1 << base_clk_divisor) * (clk_high + 1 + clk_low + 1))
856 * The documentation recommends clk_high >= clk_high_max / 2 and
857 * clk_low >= clk_low_max / 2 - 1 when possible; this last constraint
858 * gives us the following solution:
860 base_clk_divisor
= divisor
> clk_high_low_max
?
861 ilog2((divisor
- 1) / clk_high_low_max
) + 1 : 0;
863 if (base_clk_divisor
> ASPEED_I2CD_TIME_BASE_DIVISOR_MASK
) {
864 base_clk_divisor
= ASPEED_I2CD_TIME_BASE_DIVISOR_MASK
;
865 clk_low
= clk_high_low_mask
;
866 clk_high
= clk_high_low_mask
;
868 "clamping clock divider: divider requested, %u, is greater than largest possible divider, %u.\n",
869 divisor
, (1 << base_clk_divisor
) * clk_high_low_max
);
871 tmp
= (divisor
+ (1 << base_clk_divisor
) - 1)
874 clk_high
= tmp
- clk_low
;
884 return ((clk_high
<< ASPEED_I2CD_TIME_SCL_HIGH_SHIFT
)
885 & ASPEED_I2CD_TIME_SCL_HIGH_MASK
)
886 | ((clk_low
<< ASPEED_I2CD_TIME_SCL_LOW_SHIFT
)
887 & ASPEED_I2CD_TIME_SCL_LOW_MASK
)
889 & ASPEED_I2CD_TIME_BASE_DIVISOR_MASK
);
892 static u32
aspeed_i2c_24xx_get_clk_reg_val(struct device
*dev
, u32 divisor
)
895 * clk_high and clk_low are each 3 bits wide, so each can hold a max
896 * value of 8 giving a clk_high_low_max of 16.
898 return aspeed_i2c_get_clk_reg_val(dev
, GENMASK(2, 0), divisor
);
901 static u32
aspeed_i2c_25xx_get_clk_reg_val(struct device
*dev
, u32 divisor
)
904 * clk_high and clk_low are each 4 bits wide, so each can hold a max
905 * value of 16 giving a clk_high_low_max of 32.
907 return aspeed_i2c_get_clk_reg_val(dev
, GENMASK(3, 0), divisor
);
910 /* precondition: bus.lock has been acquired. */
911 static int aspeed_i2c_init_clk(struct aspeed_i2c_bus
*bus
)
913 u32 divisor
, clk_reg_val
;
915 divisor
= DIV_ROUND_UP(bus
->parent_clk_frequency
, bus
->bus_frequency
);
916 clk_reg_val
= readl(bus
->base
+ ASPEED_I2C_AC_TIMING_REG1
);
917 clk_reg_val
&= (ASPEED_I2CD_TIME_TBUF_MASK
|
918 ASPEED_I2CD_TIME_THDSTA_MASK
|
919 ASPEED_I2CD_TIME_TACST_MASK
);
920 clk_reg_val
|= bus
->get_clk_reg_val(bus
->dev
, divisor
);
921 writel(clk_reg_val
, bus
->base
+ ASPEED_I2C_AC_TIMING_REG1
);
922 writel(ASPEED_NO_TIMEOUT_CTRL
, bus
->base
+ ASPEED_I2C_AC_TIMING_REG2
);
927 /* precondition: bus.lock has been acquired. */
928 static int aspeed_i2c_init(struct aspeed_i2c_bus
*bus
,
929 struct platform_device
*pdev
)
931 u32 fun_ctrl_reg
= ASPEED_I2CD_MASTER_EN
;
934 /* Disable everything. */
935 writel(0, bus
->base
+ ASPEED_I2C_FUN_CTRL_REG
);
937 ret
= aspeed_i2c_init_clk(bus
);
941 if (of_property_read_bool(pdev
->dev
.of_node
, "multi-master"))
942 bus
->multi_master
= true;
944 fun_ctrl_reg
|= ASPEED_I2CD_MULTI_MASTER_DIS
;
946 /* Enable Master Mode */
947 writel(readl(bus
->base
+ ASPEED_I2C_FUN_CTRL_REG
) | fun_ctrl_reg
,
948 bus
->base
+ ASPEED_I2C_FUN_CTRL_REG
);
950 #if IS_ENABLED(CONFIG_I2C_SLAVE)
951 /* If slave has already been registered, re-enable it. */
953 __aspeed_i2c_reg_slave(bus
, bus
->slave
->addr
);
954 #endif /* CONFIG_I2C_SLAVE */
956 /* Set interrupt generation of I2C controller */
957 writel(ASPEED_I2CD_INTR_ALL
, bus
->base
+ ASPEED_I2C_INTR_CTRL_REG
);
962 static int aspeed_i2c_reset(struct aspeed_i2c_bus
*bus
)
964 struct platform_device
*pdev
= to_platform_device(bus
->dev
);
968 spin_lock_irqsave(&bus
->lock
, flags
);
970 /* Disable and ack all interrupts. */
971 writel(0, bus
->base
+ ASPEED_I2C_INTR_CTRL_REG
);
972 writel(0xffffffff, bus
->base
+ ASPEED_I2C_INTR_STS_REG
);
974 ret
= aspeed_i2c_init(bus
, pdev
);
976 spin_unlock_irqrestore(&bus
->lock
, flags
);
981 static const struct of_device_id aspeed_i2c_bus_of_table
[] = {
983 .compatible
= "aspeed,ast2400-i2c-bus",
984 .data
= aspeed_i2c_24xx_get_clk_reg_val
,
987 .compatible
= "aspeed,ast2500-i2c-bus",
988 .data
= aspeed_i2c_25xx_get_clk_reg_val
,
991 .compatible
= "aspeed,ast2600-i2c-bus",
992 .data
= aspeed_i2c_25xx_get_clk_reg_val
,
996 MODULE_DEVICE_TABLE(of
, aspeed_i2c_bus_of_table
);
998 static int aspeed_i2c_probe_bus(struct platform_device
*pdev
)
1000 const struct of_device_id
*match
;
1001 struct aspeed_i2c_bus
*bus
;
1002 struct clk
*parent_clk
;
1005 bus
= devm_kzalloc(&pdev
->dev
, sizeof(*bus
), GFP_KERNEL
);
1009 bus
->base
= devm_platform_get_and_ioremap_resource(pdev
, 0, NULL
);
1010 if (IS_ERR(bus
->base
))
1011 return PTR_ERR(bus
->base
);
1013 parent_clk
= devm_clk_get(&pdev
->dev
, NULL
);
1014 if (IS_ERR(parent_clk
))
1015 return PTR_ERR(parent_clk
);
1016 bus
->parent_clk_frequency
= clk_get_rate(parent_clk
);
1017 /* We just need the clock rate, we don't actually use the clk object. */
1018 devm_clk_put(&pdev
->dev
, parent_clk
);
1020 bus
->rst
= devm_reset_control_get_shared(&pdev
->dev
, NULL
);
1021 if (IS_ERR(bus
->rst
)) {
1023 "missing or invalid reset controller device tree entry\n");
1024 return PTR_ERR(bus
->rst
);
1026 reset_control_deassert(bus
->rst
);
1028 ret
= of_property_read_u32(pdev
->dev
.of_node
,
1029 "bus-frequency", &bus
->bus_frequency
);
1032 "Could not read bus-frequency property\n");
1033 bus
->bus_frequency
= I2C_MAX_STANDARD_MODE_FREQ
;
1036 match
= of_match_node(aspeed_i2c_bus_of_table
, pdev
->dev
.of_node
);
1038 bus
->get_clk_reg_val
= aspeed_i2c_24xx_get_clk_reg_val
;
1040 bus
->get_clk_reg_val
= (u32 (*)(struct device
*, u32
))
1043 /* Initialize the I2C adapter */
1044 spin_lock_init(&bus
->lock
);
1045 init_completion(&bus
->cmd_complete
);
1046 bus
->adap
.owner
= THIS_MODULE
;
1047 bus
->adap
.retries
= 0;
1048 bus
->adap
.algo
= &aspeed_i2c_algo
;
1049 bus
->adap
.dev
.parent
= &pdev
->dev
;
1050 bus
->adap
.dev
.of_node
= pdev
->dev
.of_node
;
1051 strscpy(bus
->adap
.name
, pdev
->name
, sizeof(bus
->adap
.name
));
1052 i2c_set_adapdata(&bus
->adap
, bus
);
1054 bus
->dev
= &pdev
->dev
;
1056 /* Clean up any left over interrupt state. */
1057 writel(0, bus
->base
+ ASPEED_I2C_INTR_CTRL_REG
);
1058 writel(0xffffffff, bus
->base
+ ASPEED_I2C_INTR_STS_REG
);
1060 * bus.lock does not need to be held because the interrupt handler has
1061 * not been enabled yet.
1063 ret
= aspeed_i2c_init(bus
, pdev
);
1067 irq
= irq_of_parse_and_map(pdev
->dev
.of_node
, 0);
1068 ret
= devm_request_irq(&pdev
->dev
, irq
, aspeed_i2c_bus_irq
,
1069 0, dev_name(&pdev
->dev
), bus
);
1073 ret
= i2c_add_adapter(&bus
->adap
);
1077 platform_set_drvdata(pdev
, bus
);
1079 dev_info(bus
->dev
, "i2c bus %d registered, irq %d\n",
1085 static void aspeed_i2c_remove_bus(struct platform_device
*pdev
)
1087 struct aspeed_i2c_bus
*bus
= platform_get_drvdata(pdev
);
1088 unsigned long flags
;
1090 spin_lock_irqsave(&bus
->lock
, flags
);
1092 /* Disable everything. */
1093 writel(0, bus
->base
+ ASPEED_I2C_FUN_CTRL_REG
);
1094 writel(0, bus
->base
+ ASPEED_I2C_INTR_CTRL_REG
);
1096 spin_unlock_irqrestore(&bus
->lock
, flags
);
1098 reset_control_assert(bus
->rst
);
1100 i2c_del_adapter(&bus
->adap
);
1103 static struct platform_driver aspeed_i2c_bus_driver
= {
1104 .probe
= aspeed_i2c_probe_bus
,
1105 .remove_new
= aspeed_i2c_remove_bus
,
1107 .name
= "aspeed-i2c-bus",
1108 .of_match_table
= aspeed_i2c_bus_of_table
,
1111 module_platform_driver(aspeed_i2c_bus_driver
);
1113 MODULE_AUTHOR("Brendan Higgins <brendanhiggins@google.com>");
1114 MODULE_DESCRIPTION("Aspeed I2C Bus Driver");
1115 MODULE_LICENSE("GPL v2");