1 // SPDX-License-Identifier: GPL-2.0-only
3 * SPI-Engine SPI controller driver
4 * Copyright 2015 Analog Devices Inc.
5 * Author: Lars-Peter Clausen <lars@metafoo.de>
9 #include <linux/completion.h>
10 #include <linux/fpga/adi-axi-common.h>
11 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/overflow.h>
16 #include <linux/platform_device.h>
17 #include <linux/spi/spi.h>
18 #include <trace/events/spi.h>
20 #define SPI_ENGINE_REG_RESET 0x40
22 #define SPI_ENGINE_REG_INT_ENABLE 0x80
23 #define SPI_ENGINE_REG_INT_PENDING 0x84
24 #define SPI_ENGINE_REG_INT_SOURCE 0x88
26 #define SPI_ENGINE_REG_SYNC_ID 0xc0
28 #define SPI_ENGINE_REG_CMD_FIFO_ROOM 0xd0
29 #define SPI_ENGINE_REG_SDO_FIFO_ROOM 0xd4
30 #define SPI_ENGINE_REG_SDI_FIFO_LEVEL 0xd8
32 #define SPI_ENGINE_REG_CMD_FIFO 0xe0
33 #define SPI_ENGINE_REG_SDO_DATA_FIFO 0xe4
34 #define SPI_ENGINE_REG_SDI_DATA_FIFO 0xe8
35 #define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK 0xec
37 #define SPI_ENGINE_INT_CMD_ALMOST_EMPTY BIT(0)
38 #define SPI_ENGINE_INT_SDO_ALMOST_EMPTY BIT(1)
39 #define SPI_ENGINE_INT_SDI_ALMOST_FULL BIT(2)
40 #define SPI_ENGINE_INT_SYNC BIT(3)
42 #define SPI_ENGINE_CONFIG_CPHA BIT(0)
43 #define SPI_ENGINE_CONFIG_CPOL BIT(1)
44 #define SPI_ENGINE_CONFIG_3WIRE BIT(2)
45 #define SPI_ENGINE_CONFIG_SDO_IDLE_HIGH BIT(3)
47 #define SPI_ENGINE_INST_TRANSFER 0x0
48 #define SPI_ENGINE_INST_ASSERT 0x1
49 #define SPI_ENGINE_INST_WRITE 0x2
50 #define SPI_ENGINE_INST_MISC 0x3
51 #define SPI_ENGINE_INST_CS_INV 0x4
53 #define SPI_ENGINE_CMD_REG_CLK_DIV 0x0
54 #define SPI_ENGINE_CMD_REG_CONFIG 0x1
55 #define SPI_ENGINE_CMD_REG_XFER_BITS 0x2
57 #define SPI_ENGINE_MISC_SYNC 0x0
58 #define SPI_ENGINE_MISC_SLEEP 0x1
60 #define SPI_ENGINE_TRANSFER_WRITE 0x1
61 #define SPI_ENGINE_TRANSFER_READ 0x2
63 /* Arbitrary sync ID for use by host->cur_msg */
64 #define AXI_SPI_ENGINE_CUR_MSG_SYNC_ID 0x1
66 #define SPI_ENGINE_CMD(inst, arg1, arg2) \
67 (((inst) << 12) | ((arg1) << 8) | (arg2))
69 #define SPI_ENGINE_CMD_TRANSFER(flags, n) \
70 SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
71 #define SPI_ENGINE_CMD_ASSERT(delay, cs) \
72 SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
73 #define SPI_ENGINE_CMD_WRITE(reg, val) \
74 SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
75 #define SPI_ENGINE_CMD_SLEEP(delay) \
76 SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
77 #define SPI_ENGINE_CMD_SYNC(id) \
78 SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
79 #define SPI_ENGINE_CMD_CS_INV(flags) \
80 SPI_ENGINE_CMD(SPI_ENGINE_INST_CS_INV, 0, (flags))
82 struct spi_engine_program
{
84 uint16_t instructions
[] __counted_by(length
);
88 * struct spi_engine_message_state - SPI engine per-message state
90 struct spi_engine_message_state
{
91 /** @cmd_length: Number of elements in cmd_buf array. */
93 /** @cmd_buf: Array of commands not yet written to CMD FIFO. */
94 const uint16_t *cmd_buf
;
95 /** @tx_xfer: Next xfer with tx_buf not yet fully written to TX FIFO. */
96 struct spi_transfer
*tx_xfer
;
97 /** @tx_length: Size of tx_buf in bytes. */
98 unsigned int tx_length
;
99 /** @tx_buf: Bytes not yet written to TX FIFO. */
100 const uint8_t *tx_buf
;
101 /** @rx_xfer: Next xfer with rx_buf not yet fully written to RX FIFO. */
102 struct spi_transfer
*rx_xfer
;
103 /** @rx_length: Size of tx_buf in bytes. */
104 unsigned int rx_length
;
105 /** @rx_buf: Bytes not yet written to the RX FIFO. */
116 struct spi_engine_message_state msg_state
;
117 struct completion msg_complete
;
118 unsigned int int_enable
;
119 /* shadows hardware CS inversion flag state */
123 static void spi_engine_program_add_cmd(struct spi_engine_program
*p
,
124 bool dry
, uint16_t cmd
)
129 p
->instructions
[p
->length
- 1] = cmd
;
132 static unsigned int spi_engine_get_config(struct spi_device
*spi
)
134 unsigned int config
= 0;
136 if (spi
->mode
& SPI_CPOL
)
137 config
|= SPI_ENGINE_CONFIG_CPOL
;
138 if (spi
->mode
& SPI_CPHA
)
139 config
|= SPI_ENGINE_CONFIG_CPHA
;
140 if (spi
->mode
& SPI_3WIRE
)
141 config
|= SPI_ENGINE_CONFIG_3WIRE
;
142 if (spi
->mode
& SPI_MOSI_IDLE_HIGH
)
143 config
|= SPI_ENGINE_CONFIG_SDO_IDLE_HIGH
;
144 if (spi
->mode
& SPI_MOSI_IDLE_LOW
)
145 config
&= ~SPI_ENGINE_CONFIG_SDO_IDLE_HIGH
;
150 static void spi_engine_gen_xfer(struct spi_engine_program
*p
, bool dry
,
151 struct spi_transfer
*xfer
)
155 if (xfer
->bits_per_word
<= 8)
157 else if (xfer
->bits_per_word
<= 16)
163 unsigned int n
= min(len
, 256U);
164 unsigned int flags
= 0;
167 flags
|= SPI_ENGINE_TRANSFER_WRITE
;
169 flags
|= SPI_ENGINE_TRANSFER_READ
;
171 spi_engine_program_add_cmd(p
, dry
,
172 SPI_ENGINE_CMD_TRANSFER(flags
, n
- 1));
177 static void spi_engine_gen_sleep(struct spi_engine_program
*p
, bool dry
,
178 int delay_ns
, int inst_ns
, u32 sclk_hz
)
183 * Negative delay indicates error, e.g. from spi_delay_to_ns(). And if
184 * delay is less that the instruction execution time, there is no need
185 * for an extra sleep instruction since the instruction execution time
186 * will already cover the required delay.
188 if (delay_ns
< 0 || delay_ns
<= inst_ns
)
191 t
= DIV_ROUND_UP_ULL((u64
)(delay_ns
- inst_ns
) * sclk_hz
, NSEC_PER_SEC
);
193 unsigned int n
= min(t
, 256U);
195 spi_engine_program_add_cmd(p
, dry
, SPI_ENGINE_CMD_SLEEP(n
- 1));
200 static void spi_engine_gen_cs(struct spi_engine_program
*p
, bool dry
,
201 struct spi_device
*spi
, bool assert)
203 unsigned int mask
= 0xff;
206 mask
^= BIT(spi_get_chipselect(spi
, 0));
208 spi_engine_program_add_cmd(p
, dry
, SPI_ENGINE_CMD_ASSERT(0, mask
));
212 * Performs precompile steps on the message.
214 * The SPI core does most of the message/transfer validation and filling in
215 * fields for us via __spi_validate(). This fixes up anything remaining not
218 * NB: This is separate from spi_engine_compile_message() because the latter
219 * is called twice and would otherwise result in double-evaluation.
221 static void spi_engine_precompile_message(struct spi_message
*msg
)
223 unsigned int clk_div
, max_hz
= msg
->spi
->controller
->max_speed_hz
;
224 struct spi_transfer
*xfer
;
226 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
227 clk_div
= DIV_ROUND_UP(max_hz
, xfer
->speed_hz
);
228 xfer
->effective_speed_hz
= max_hz
/ min(clk_div
, 256U);
232 static void spi_engine_compile_message(struct spi_message
*msg
, bool dry
,
233 struct spi_engine_program
*p
)
235 struct spi_device
*spi
= msg
->spi
;
236 struct spi_controller
*host
= spi
->controller
;
237 struct spi_transfer
*xfer
;
238 int clk_div
, new_clk_div
, inst_ns
;
239 bool keep_cs
= false;
240 u8 bits_per_word
= 0;
243 * Take into account instruction execution time for more accurate sleep
244 * times, especially when the delay is small.
246 inst_ns
= DIV_ROUND_UP(NSEC_PER_SEC
, host
->max_speed_hz
);
250 spi_engine_program_add_cmd(p
, dry
,
251 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG
,
252 spi_engine_get_config(spi
)));
254 xfer
= list_first_entry(&msg
->transfers
, struct spi_transfer
, transfer_list
);
255 spi_engine_gen_cs(p
, dry
, spi
, !xfer
->cs_off
);
257 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
258 new_clk_div
= host
->max_speed_hz
/ xfer
->effective_speed_hz
;
259 if (new_clk_div
!= clk_div
) {
260 clk_div
= new_clk_div
;
261 /* actual divider used is register value + 1 */
262 spi_engine_program_add_cmd(p
, dry
,
263 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV
,
267 if (bits_per_word
!= xfer
->bits_per_word
&& xfer
->len
) {
268 bits_per_word
= xfer
->bits_per_word
;
269 spi_engine_program_add_cmd(p
, dry
,
270 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_XFER_BITS
,
274 spi_engine_gen_xfer(p
, dry
, xfer
);
275 spi_engine_gen_sleep(p
, dry
, spi_delay_to_ns(&xfer
->delay
, xfer
),
276 inst_ns
, xfer
->effective_speed_hz
);
278 if (xfer
->cs_change
) {
279 if (list_is_last(&xfer
->transfer_list
, &msg
->transfers
)) {
283 spi_engine_gen_cs(p
, dry
, spi
, false);
285 spi_engine_gen_sleep(p
, dry
, spi_delay_to_ns(
286 &xfer
->cs_change_delay
, xfer
), inst_ns
,
287 xfer
->effective_speed_hz
);
289 if (!list_next_entry(xfer
, transfer_list
)->cs_off
)
290 spi_engine_gen_cs(p
, dry
, spi
, true);
292 } else if (!list_is_last(&xfer
->transfer_list
, &msg
->transfers
) &&
293 xfer
->cs_off
!= list_next_entry(xfer
, transfer_list
)->cs_off
) {
294 spi_engine_gen_cs(p
, dry
, spi
, xfer
->cs_off
);
299 spi_engine_gen_cs(p
, dry
, spi
, false);
302 * Restore clockdiv to default so that future gen_sleep commands don't
303 * have to be aware of the current register state.
306 spi_engine_program_add_cmd(p
, dry
,
307 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV
, 0));
310 static void spi_engine_xfer_next(struct spi_message
*msg
,
311 struct spi_transfer
**_xfer
)
313 struct spi_transfer
*xfer
= *_xfer
;
316 xfer
= list_first_entry(&msg
->transfers
,
317 struct spi_transfer
, transfer_list
);
318 } else if (list_is_last(&xfer
->transfer_list
, &msg
->transfers
)) {
321 xfer
= list_next_entry(xfer
, transfer_list
);
327 static void spi_engine_tx_next(struct spi_message
*msg
)
329 struct spi_engine_message_state
*st
= msg
->state
;
330 struct spi_transfer
*xfer
= st
->tx_xfer
;
333 spi_engine_xfer_next(msg
, &xfer
);
334 } while (xfer
&& !xfer
->tx_buf
);
338 st
->tx_length
= xfer
->len
;
339 st
->tx_buf
= xfer
->tx_buf
;
345 static void spi_engine_rx_next(struct spi_message
*msg
)
347 struct spi_engine_message_state
*st
= msg
->state
;
348 struct spi_transfer
*xfer
= st
->rx_xfer
;
351 spi_engine_xfer_next(msg
, &xfer
);
352 } while (xfer
&& !xfer
->rx_buf
);
356 st
->rx_length
= xfer
->len
;
357 st
->rx_buf
= xfer
->rx_buf
;
363 static bool spi_engine_write_cmd_fifo(struct spi_engine
*spi_engine
,
364 struct spi_message
*msg
)
366 void __iomem
*addr
= spi_engine
->base
+ SPI_ENGINE_REG_CMD_FIFO
;
367 struct spi_engine_message_state
*st
= msg
->state
;
368 unsigned int n
, m
, i
;
371 n
= readl_relaxed(spi_engine
->base
+ SPI_ENGINE_REG_CMD_FIFO_ROOM
);
372 while (n
&& st
->cmd_length
) {
373 m
= min(n
, st
->cmd_length
);
375 for (i
= 0; i
< m
; i
++)
376 writel_relaxed(buf
[i
], addr
);
382 return st
->cmd_length
!= 0;
385 static bool spi_engine_write_tx_fifo(struct spi_engine
*spi_engine
,
386 struct spi_message
*msg
)
388 void __iomem
*addr
= spi_engine
->base
+ SPI_ENGINE_REG_SDO_DATA_FIFO
;
389 struct spi_engine_message_state
*st
= msg
->state
;
390 unsigned int n
, m
, i
;
392 n
= readl_relaxed(spi_engine
->base
+ SPI_ENGINE_REG_SDO_FIFO_ROOM
);
393 while (n
&& st
->tx_length
) {
394 if (st
->tx_xfer
->bits_per_word
<= 8) {
395 const u8
*buf
= st
->tx_buf
;
397 m
= min(n
, st
->tx_length
);
398 for (i
= 0; i
< m
; i
++)
399 writel_relaxed(buf
[i
], addr
);
402 } else if (st
->tx_xfer
->bits_per_word
<= 16) {
403 const u16
*buf
= (const u16
*)st
->tx_buf
;
405 m
= min(n
, st
->tx_length
/ 2);
406 for (i
= 0; i
< m
; i
++)
407 writel_relaxed(buf
[i
], addr
);
409 st
->tx_length
-= m
* 2;
411 const u32
*buf
= (const u32
*)st
->tx_buf
;
413 m
= min(n
, st
->tx_length
/ 4);
414 for (i
= 0; i
< m
; i
++)
415 writel_relaxed(buf
[i
], addr
);
417 st
->tx_length
-= m
* 4;
420 if (st
->tx_length
== 0)
421 spi_engine_tx_next(msg
);
424 return st
->tx_length
!= 0;
427 static bool spi_engine_read_rx_fifo(struct spi_engine
*spi_engine
,
428 struct spi_message
*msg
)
430 void __iomem
*addr
= spi_engine
->base
+ SPI_ENGINE_REG_SDI_DATA_FIFO
;
431 struct spi_engine_message_state
*st
= msg
->state
;
432 unsigned int n
, m
, i
;
434 n
= readl_relaxed(spi_engine
->base
+ SPI_ENGINE_REG_SDI_FIFO_LEVEL
);
435 while (n
&& st
->rx_length
) {
436 if (st
->rx_xfer
->bits_per_word
<= 8) {
437 u8
*buf
= st
->rx_buf
;
439 m
= min(n
, st
->rx_length
);
440 for (i
= 0; i
< m
; i
++)
441 buf
[i
] = readl_relaxed(addr
);
444 } else if (st
->rx_xfer
->bits_per_word
<= 16) {
445 u16
*buf
= (u16
*)st
->rx_buf
;
447 m
= min(n
, st
->rx_length
/ 2);
448 for (i
= 0; i
< m
; i
++)
449 buf
[i
] = readl_relaxed(addr
);
451 st
->rx_length
-= m
* 2;
453 u32
*buf
= (u32
*)st
->rx_buf
;
455 m
= min(n
, st
->rx_length
/ 4);
456 for (i
= 0; i
< m
; i
++)
457 buf
[i
] = readl_relaxed(addr
);
459 st
->rx_length
-= m
* 4;
462 if (st
->rx_length
== 0)
463 spi_engine_rx_next(msg
);
466 return st
->rx_length
!= 0;
469 static irqreturn_t
spi_engine_irq(int irq
, void *devid
)
471 struct spi_controller
*host
= devid
;
472 struct spi_message
*msg
= host
->cur_msg
;
473 struct spi_engine
*spi_engine
= spi_controller_get_devdata(host
);
474 unsigned int disable_int
= 0;
475 unsigned int pending
;
476 int completed_id
= -1;
478 pending
= readl_relaxed(spi_engine
->base
+ SPI_ENGINE_REG_INT_PENDING
);
480 if (pending
& SPI_ENGINE_INT_SYNC
) {
481 writel_relaxed(SPI_ENGINE_INT_SYNC
,
482 spi_engine
->base
+ SPI_ENGINE_REG_INT_PENDING
);
483 completed_id
= readl_relaxed(
484 spi_engine
->base
+ SPI_ENGINE_REG_SYNC_ID
);
487 spin_lock(&spi_engine
->lock
);
489 if (pending
& SPI_ENGINE_INT_CMD_ALMOST_EMPTY
) {
490 if (!spi_engine_write_cmd_fifo(spi_engine
, msg
))
491 disable_int
|= SPI_ENGINE_INT_CMD_ALMOST_EMPTY
;
494 if (pending
& SPI_ENGINE_INT_SDO_ALMOST_EMPTY
) {
495 if (!spi_engine_write_tx_fifo(spi_engine
, msg
))
496 disable_int
|= SPI_ENGINE_INT_SDO_ALMOST_EMPTY
;
499 if (pending
& (SPI_ENGINE_INT_SDI_ALMOST_FULL
| SPI_ENGINE_INT_SYNC
)) {
500 if (!spi_engine_read_rx_fifo(spi_engine
, msg
))
501 disable_int
|= SPI_ENGINE_INT_SDI_ALMOST_FULL
;
504 if (pending
& SPI_ENGINE_INT_SYNC
&& msg
) {
505 if (completed_id
== AXI_SPI_ENGINE_CUR_MSG_SYNC_ID
) {
507 msg
->actual_length
= msg
->frame_length
;
508 complete(&spi_engine
->msg_complete
);
509 disable_int
|= SPI_ENGINE_INT_SYNC
;
514 spi_engine
->int_enable
&= ~disable_int
;
515 writel_relaxed(spi_engine
->int_enable
,
516 spi_engine
->base
+ SPI_ENGINE_REG_INT_ENABLE
);
519 spin_unlock(&spi_engine
->lock
);
524 static int spi_engine_optimize_message(struct spi_message
*msg
)
526 struct spi_engine_program p_dry
, *p
;
528 spi_engine_precompile_message(msg
);
531 spi_engine_compile_message(msg
, true, &p_dry
);
533 p
= kzalloc(struct_size(p
, instructions
, p_dry
.length
+ 1), GFP_KERNEL
);
537 spi_engine_compile_message(msg
, false, p
);
539 spi_engine_program_add_cmd(p
, false, SPI_ENGINE_CMD_SYNC(
540 AXI_SPI_ENGINE_CUR_MSG_SYNC_ID
));
547 static int spi_engine_unoptimize_message(struct spi_message
*msg
)
549 kfree(msg
->opt_state
);
554 static int spi_engine_setup(struct spi_device
*device
)
556 struct spi_controller
*host
= device
->controller
;
557 struct spi_engine
*spi_engine
= spi_controller_get_devdata(host
);
559 if (device
->mode
& SPI_CS_HIGH
)
560 spi_engine
->cs_inv
|= BIT(spi_get_chipselect(device
, 0));
562 spi_engine
->cs_inv
&= ~BIT(spi_get_chipselect(device
, 0));
564 writel_relaxed(SPI_ENGINE_CMD_CS_INV(spi_engine
->cs_inv
),
565 spi_engine
->base
+ SPI_ENGINE_REG_CMD_FIFO
);
568 * In addition to setting the flags, we have to do a CS assert command
569 * to make the new setting actually take effect.
571 writel_relaxed(SPI_ENGINE_CMD_ASSERT(0, 0xff),
572 spi_engine
->base
+ SPI_ENGINE_REG_CMD_FIFO
);
577 static int spi_engine_transfer_one_message(struct spi_controller
*host
,
578 struct spi_message
*msg
)
580 struct spi_engine
*spi_engine
= spi_controller_get_devdata(host
);
581 struct spi_engine_message_state
*st
= &spi_engine
->msg_state
;
582 struct spi_engine_program
*p
= msg
->opt_state
;
583 unsigned int int_enable
= 0;
586 /* reinitialize message state for this transfer */
587 memset(st
, 0, sizeof(*st
));
588 st
->cmd_buf
= p
->instructions
;
589 st
->cmd_length
= p
->length
;
592 reinit_completion(&spi_engine
->msg_complete
);
594 if (trace_spi_transfer_start_enabled()) {
595 struct spi_transfer
*xfer
;
597 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
)
598 trace_spi_transfer_start(msg
, xfer
);
601 spin_lock_irqsave(&spi_engine
->lock
, flags
);
603 if (spi_engine_write_cmd_fifo(spi_engine
, msg
))
604 int_enable
|= SPI_ENGINE_INT_CMD_ALMOST_EMPTY
;
606 spi_engine_tx_next(msg
);
607 if (spi_engine_write_tx_fifo(spi_engine
, msg
))
608 int_enable
|= SPI_ENGINE_INT_SDO_ALMOST_EMPTY
;
610 spi_engine_rx_next(msg
);
611 if (st
->rx_length
!= 0)
612 int_enable
|= SPI_ENGINE_INT_SDI_ALMOST_FULL
;
614 int_enable
|= SPI_ENGINE_INT_SYNC
;
616 writel_relaxed(int_enable
,
617 spi_engine
->base
+ SPI_ENGINE_REG_INT_ENABLE
);
618 spi_engine
->int_enable
= int_enable
;
619 spin_unlock_irqrestore(&spi_engine
->lock
, flags
);
621 if (!wait_for_completion_timeout(&spi_engine
->msg_complete
,
622 msecs_to_jiffies(5000))) {
624 "Timeout occurred while waiting for transfer to complete. Hardware is probably broken.\n");
625 msg
->status
= -ETIMEDOUT
;
628 if (trace_spi_transfer_stop_enabled()) {
629 struct spi_transfer
*xfer
;
631 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
)
632 trace_spi_transfer_stop(msg
, xfer
);
635 spi_finalize_current_message(host
);
640 static void spi_engine_release_hw(void *p
)
642 struct spi_engine
*spi_engine
= p
;
644 writel_relaxed(0xff, spi_engine
->base
+ SPI_ENGINE_REG_INT_PENDING
);
645 writel_relaxed(0x00, spi_engine
->base
+ SPI_ENGINE_REG_INT_ENABLE
);
646 writel_relaxed(0x01, spi_engine
->base
+ SPI_ENGINE_REG_RESET
);
649 static int spi_engine_probe(struct platform_device
*pdev
)
651 struct spi_engine
*spi_engine
;
652 struct spi_controller
*host
;
653 unsigned int version
;
657 irq
= platform_get_irq(pdev
, 0);
661 host
= devm_spi_alloc_host(&pdev
->dev
, sizeof(*spi_engine
));
665 spi_engine
= spi_controller_get_devdata(host
);
667 spin_lock_init(&spi_engine
->lock
);
668 init_completion(&spi_engine
->msg_complete
);
670 spi_engine
->clk
= devm_clk_get_enabled(&pdev
->dev
, "s_axi_aclk");
671 if (IS_ERR(spi_engine
->clk
))
672 return PTR_ERR(spi_engine
->clk
);
674 spi_engine
->ref_clk
= devm_clk_get_enabled(&pdev
->dev
, "spi_clk");
675 if (IS_ERR(spi_engine
->ref_clk
))
676 return PTR_ERR(spi_engine
->ref_clk
);
678 spi_engine
->base
= devm_platform_ioremap_resource(pdev
, 0);
679 if (IS_ERR(spi_engine
->base
))
680 return PTR_ERR(spi_engine
->base
);
682 version
= readl(spi_engine
->base
+ ADI_AXI_REG_VERSION
);
683 if (ADI_AXI_PCORE_VER_MAJOR(version
) != 1) {
684 dev_err(&pdev
->dev
, "Unsupported peripheral version %u.%u.%u\n",
685 ADI_AXI_PCORE_VER_MAJOR(version
),
686 ADI_AXI_PCORE_VER_MINOR(version
),
687 ADI_AXI_PCORE_VER_PATCH(version
));
691 writel_relaxed(0x00, spi_engine
->base
+ SPI_ENGINE_REG_RESET
);
692 writel_relaxed(0xff, spi_engine
->base
+ SPI_ENGINE_REG_INT_PENDING
);
693 writel_relaxed(0x00, spi_engine
->base
+ SPI_ENGINE_REG_INT_ENABLE
);
695 ret
= devm_add_action_or_reset(&pdev
->dev
, spi_engine_release_hw
,
700 ret
= devm_request_irq(&pdev
->dev
, irq
, spi_engine_irq
, 0, pdev
->name
,
705 host
->dev
.of_node
= pdev
->dev
.of_node
;
706 host
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_3WIRE
;
707 host
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(1, 32);
708 host
->max_speed_hz
= clk_get_rate(spi_engine
->ref_clk
) / 2;
709 host
->transfer_one_message
= spi_engine_transfer_one_message
;
710 host
->optimize_message
= spi_engine_optimize_message
;
711 host
->unoptimize_message
= spi_engine_unoptimize_message
;
712 host
->num_chipselect
= 8;
714 /* Some features depend of the IP core version. */
715 if (ADI_AXI_PCORE_VER_MAJOR(version
) >= 1) {
716 if (ADI_AXI_PCORE_VER_MINOR(version
) >= 2) {
717 host
->mode_bits
|= SPI_CS_HIGH
;
718 host
->setup
= spi_engine_setup
;
720 if (ADI_AXI_PCORE_VER_MINOR(version
) >= 3)
721 host
->mode_bits
|= SPI_MOSI_IDLE_LOW
| SPI_MOSI_IDLE_HIGH
;
724 if (host
->max_speed_hz
== 0)
725 return dev_err_probe(&pdev
->dev
, -EINVAL
, "spi_clk rate is 0");
727 return devm_spi_register_controller(&pdev
->dev
, host
);
730 static const struct of_device_id spi_engine_match_table
[] = {
731 { .compatible
= "adi,axi-spi-engine-1.00.a" },
734 MODULE_DEVICE_TABLE(of
, spi_engine_match_table
);
736 static struct platform_driver spi_engine_driver
= {
737 .probe
= spi_engine_probe
,
739 .name
= "spi-engine",
740 .of_match_table
= spi_engine_match_table
,
743 module_platform_driver(spi_engine_driver
);
745 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
746 MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
747 MODULE_LICENSE("GPL");