1 // SPDX-License-Identifier: GPL-2.0
2 // spi-uniphier.c - Socionext UniPhier SPI controller driver
3 // Copyright 2012 Panasonic Corporation
4 // Copyright 2016-2018 Socionext Inc.
6 #include <linux/kernel.h>
7 #include <linux/bitfield.h>
8 #include <linux/bitops.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/platform_device.h>
16 #include <linux/spi/spi.h>
18 #include <asm/unaligned.h>
20 #define SSI_TIMEOUT_MS 2000
21 #define SSI_POLL_TIMEOUT_US 200
22 #define SSI_MAX_CLK_DIVIDER 254
23 #define SSI_MIN_CLK_DIVIDER 4
25 struct uniphier_spi_priv
{
27 dma_addr_t base_dma_addr
;
29 struct spi_master
*master
;
30 struct completion xfer_done
;
33 unsigned int tx_bytes
;
34 unsigned int rx_bytes
;
46 #define SSI_CTL_EN BIT(0)
49 #define SSI_CKS_CKRAT_MASK GENMASK(7, 0)
50 #define SSI_CKS_CKPHS BIT(14)
51 #define SSI_CKS_CKINIT BIT(13)
52 #define SSI_CKS_CKDLY BIT(12)
54 #define SSI_TXWDS 0x08
55 #define SSI_TXWDS_WDLEN_MASK GENMASK(13, 8)
56 #define SSI_TXWDS_TDTF_MASK GENMASK(7, 6)
57 #define SSI_TXWDS_DTLEN_MASK GENMASK(5, 0)
59 #define SSI_RXWDS 0x0c
60 #define SSI_RXWDS_DTLEN_MASK GENMASK(5, 0)
63 #define SSI_FPS_FSPOL BIT(15)
64 #define SSI_FPS_FSTRT BIT(14)
67 #define SSI_SR_BUSY BIT(7)
68 #define SSI_SR_RNE BIT(0)
71 #define SSI_IE_TCIE BIT(4)
72 #define SSI_IE_RCIE BIT(3)
73 #define SSI_IE_TXRE BIT(2)
74 #define SSI_IE_RXRE BIT(1)
75 #define SSI_IE_RORIE BIT(0)
76 #define SSI_IE_ALL_MASK GENMASK(4, 0)
79 #define SSI_IS_RXRS BIT(9)
80 #define SSI_IS_RCID BIT(3)
81 #define SSI_IS_RORID BIT(0)
84 #define SSI_IC_TCIC BIT(4)
85 #define SSI_IC_RCIC BIT(3)
86 #define SSI_IC_RORIC BIT(0)
89 #define SSI_FC_TXFFL BIT(12)
90 #define SSI_FC_TXFTH_MASK GENMASK(11, 8)
91 #define SSI_FC_RXFFL BIT(4)
92 #define SSI_FC_RXFTH_MASK GENMASK(3, 0)
97 #define SSI_FIFO_DEPTH 8U
98 #define SSI_FIFO_BURST_NUM 1
100 #define SSI_DMA_RX_BUSY BIT(1)
101 #define SSI_DMA_TX_BUSY BIT(0)
103 static inline unsigned int bytes_per_word(unsigned int bits
)
105 return bits
<= 8 ? 1 : (bits
<= 16 ? 2 : 4);
108 static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv
*priv
,
113 val
= readl(priv
->base
+ SSI_IE
);
115 writel(val
, priv
->base
+ SSI_IE
);
118 static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv
*priv
,
123 val
= readl(priv
->base
+ SSI_IE
);
125 writel(val
, priv
->base
+ SSI_IE
);
128 static void uniphier_spi_set_mode(struct spi_device
*spi
)
130 struct uniphier_spi_priv
*priv
= spi_master_get_devdata(spi
->master
);
135 * CKPHS capture timing. 0:rising edge, 1:falling edge
136 * CKINIT clock initial level. 0:low, 1:high
137 * CKDLY clock delay. 0:no delay, 1:delay depending on FSTRT
138 * (FSTRT=0: 1 clock, FSTRT=1: 0.5 clock)
141 * FSPOL frame signal porarity. 0: low, 1: high
142 * FSTRT start frame timing
143 * 0: rising edge of clock, 1: falling edge of clock
145 switch (spi
->mode
& (SPI_CPOL
| SPI_CPHA
)) {
147 /* CKPHS=1, CKINIT=0, CKDLY=1, FSTRT=0 */
148 val1
= SSI_CKS_CKPHS
| SSI_CKS_CKDLY
;
152 /* CKPHS=0, CKINIT=0, CKDLY=0, FSTRT=1 */
154 val2
= SSI_FPS_FSTRT
;
157 /* CKPHS=0, CKINIT=1, CKDLY=1, FSTRT=1 */
158 val1
= SSI_CKS_CKINIT
| SSI_CKS_CKDLY
;
159 val2
= SSI_FPS_FSTRT
;
162 /* CKPHS=1, CKINIT=1, CKDLY=0, FSTRT=0 */
163 val1
= SSI_CKS_CKPHS
| SSI_CKS_CKINIT
;
168 if (!(spi
->mode
& SPI_CS_HIGH
))
169 val2
|= SSI_FPS_FSPOL
;
171 writel(val1
, priv
->base
+ SSI_CKS
);
172 writel(val2
, priv
->base
+ SSI_FPS
);
175 if (spi
->mode
& SPI_LSB_FIRST
)
176 val1
|= FIELD_PREP(SSI_TXWDS_TDTF_MASK
, 1);
177 writel(val1
, priv
->base
+ SSI_TXWDS
);
178 writel(val1
, priv
->base
+ SSI_RXWDS
);
181 static void uniphier_spi_set_transfer_size(struct spi_device
*spi
, int size
)
183 struct uniphier_spi_priv
*priv
= spi_master_get_devdata(spi
->master
);
186 val
= readl(priv
->base
+ SSI_TXWDS
);
187 val
&= ~(SSI_TXWDS_WDLEN_MASK
| SSI_TXWDS_DTLEN_MASK
);
188 val
|= FIELD_PREP(SSI_TXWDS_WDLEN_MASK
, size
);
189 val
|= FIELD_PREP(SSI_TXWDS_DTLEN_MASK
, size
);
190 writel(val
, priv
->base
+ SSI_TXWDS
);
192 val
= readl(priv
->base
+ SSI_RXWDS
);
193 val
&= ~SSI_RXWDS_DTLEN_MASK
;
194 val
|= FIELD_PREP(SSI_RXWDS_DTLEN_MASK
, size
);
195 writel(val
, priv
->base
+ SSI_RXWDS
);
198 static void uniphier_spi_set_baudrate(struct spi_device
*spi
,
201 struct uniphier_spi_priv
*priv
= spi_master_get_devdata(spi
->master
);
205 * the supported rates are even numbers from 4 to 254. (4,6,8...254)
206 * round up as we look for equal or less speed
208 ckdiv
= DIV_ROUND_UP(clk_get_rate(priv
->clk
), speed
);
209 ckdiv
= round_up(ckdiv
, 2);
211 val
= readl(priv
->base
+ SSI_CKS
);
212 val
&= ~SSI_CKS_CKRAT_MASK
;
213 val
|= ckdiv
& SSI_CKS_CKRAT_MASK
;
214 writel(val
, priv
->base
+ SSI_CKS
);
217 static void uniphier_spi_setup_transfer(struct spi_device
*spi
,
218 struct spi_transfer
*t
)
220 struct uniphier_spi_priv
*priv
= spi_master_get_devdata(spi
->master
);
224 priv
->tx_buf
= t
->tx_buf
;
225 priv
->rx_buf
= t
->rx_buf
;
226 priv
->tx_bytes
= priv
->rx_bytes
= t
->len
;
228 if (!priv
->is_save_param
|| priv
->mode
!= spi
->mode
) {
229 uniphier_spi_set_mode(spi
);
230 priv
->mode
= spi
->mode
;
231 priv
->is_save_param
= false;
234 if (!priv
->is_save_param
|| priv
->bits_per_word
!= t
->bits_per_word
) {
235 uniphier_spi_set_transfer_size(spi
, t
->bits_per_word
);
236 priv
->bits_per_word
= t
->bits_per_word
;
239 if (!priv
->is_save_param
|| priv
->speed_hz
!= t
->speed_hz
) {
240 uniphier_spi_set_baudrate(spi
, t
->speed_hz
);
241 priv
->speed_hz
= t
->speed_hz
;
244 priv
->is_save_param
= true;
247 val
= SSI_FC_TXFFL
| SSI_FC_RXFFL
;
248 writel(val
, priv
->base
+ SSI_FC
);
251 static void uniphier_spi_send(struct uniphier_spi_priv
*priv
)
256 wsize
= min(bytes_per_word(priv
->bits_per_word
), priv
->tx_bytes
);
257 priv
->tx_bytes
-= wsize
;
265 val
= get_unaligned_le16(priv
->tx_buf
);
268 val
= get_unaligned_le32(priv
->tx_buf
);
272 priv
->tx_buf
+= wsize
;
275 writel(val
, priv
->base
+ SSI_TXDR
);
278 static void uniphier_spi_recv(struct uniphier_spi_priv
*priv
)
283 rsize
= min(bytes_per_word(priv
->bits_per_word
), priv
->rx_bytes
);
284 priv
->rx_bytes
-= rsize
;
286 val
= readl(priv
->base
+ SSI_RXDR
);
294 put_unaligned_le16(val
, priv
->rx_buf
);
297 put_unaligned_le32(val
, priv
->rx_buf
);
301 priv
->rx_buf
+= rsize
;
305 static void uniphier_spi_set_fifo_threshold(struct uniphier_spi_priv
*priv
,
306 unsigned int threshold
)
310 val
= readl(priv
->base
+ SSI_FC
);
311 val
&= ~(SSI_FC_TXFTH_MASK
| SSI_FC_RXFTH_MASK
);
312 val
|= FIELD_PREP(SSI_FC_TXFTH_MASK
, SSI_FIFO_DEPTH
- threshold
);
313 val
|= FIELD_PREP(SSI_FC_RXFTH_MASK
, threshold
);
314 writel(val
, priv
->base
+ SSI_FC
);
317 static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv
*priv
)
319 unsigned int fifo_threshold
, fill_words
;
320 unsigned int bpw
= bytes_per_word(priv
->bits_per_word
);
322 fifo_threshold
= DIV_ROUND_UP(priv
->rx_bytes
, bpw
);
323 fifo_threshold
= min(fifo_threshold
, SSI_FIFO_DEPTH
);
325 uniphier_spi_set_fifo_threshold(priv
, fifo_threshold
);
327 fill_words
= fifo_threshold
-
328 DIV_ROUND_UP(priv
->rx_bytes
- priv
->tx_bytes
, bpw
);
331 uniphier_spi_send(priv
);
334 static void uniphier_spi_set_cs(struct spi_device
*spi
, bool enable
)
336 struct uniphier_spi_priv
*priv
= spi_master_get_devdata(spi
->master
);
339 val
= readl(priv
->base
+ SSI_FPS
);
342 val
|= SSI_FPS_FSPOL
;
344 val
&= ~SSI_FPS_FSPOL
;
346 writel(val
, priv
->base
+ SSI_FPS
);
349 static bool uniphier_spi_can_dma(struct spi_master
*master
,
350 struct spi_device
*spi
,
351 struct spi_transfer
*t
)
353 struct uniphier_spi_priv
*priv
= spi_master_get_devdata(master
);
354 unsigned int bpw
= bytes_per_word(priv
->bits_per_word
);
356 if ((!master
->dma_tx
&& !master
->dma_rx
)
357 || (!master
->dma_tx
&& t
->tx_buf
)
358 || (!master
->dma_rx
&& t
->rx_buf
))
361 return DIV_ROUND_UP(t
->len
, bpw
) > SSI_FIFO_DEPTH
;
364 static void uniphier_spi_dma_rxcb(void *data
)
366 struct spi_master
*master
= data
;
367 struct uniphier_spi_priv
*priv
= spi_master_get_devdata(master
);
368 int state
= atomic_fetch_andnot(SSI_DMA_RX_BUSY
, &priv
->dma_busy
);
370 uniphier_spi_irq_disable(priv
, SSI_IE_RXRE
);
372 if (!(state
& SSI_DMA_TX_BUSY
))
373 spi_finalize_current_transfer(master
);
376 static void uniphier_spi_dma_txcb(void *data
)
378 struct spi_master
*master
= data
;
379 struct uniphier_spi_priv
*priv
= spi_master_get_devdata(master
);
380 int state
= atomic_fetch_andnot(SSI_DMA_TX_BUSY
, &priv
->dma_busy
);
382 uniphier_spi_irq_disable(priv
, SSI_IE_TXRE
);
384 if (!(state
& SSI_DMA_RX_BUSY
))
385 spi_finalize_current_transfer(master
);
388 static int uniphier_spi_transfer_one_dma(struct spi_master
*master
,
389 struct spi_device
*spi
,
390 struct spi_transfer
*t
)
392 struct uniphier_spi_priv
*priv
= spi_master_get_devdata(master
);
393 struct dma_async_tx_descriptor
*rxdesc
= NULL
, *txdesc
= NULL
;
396 atomic_set(&priv
->dma_busy
, 0);
398 uniphier_spi_set_fifo_threshold(priv
, SSI_FIFO_BURST_NUM
);
400 if (priv
->bits_per_word
<= 8)
401 buswidth
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
402 else if (priv
->bits_per_word
<= 16)
403 buswidth
= DMA_SLAVE_BUSWIDTH_2_BYTES
;
405 buswidth
= DMA_SLAVE_BUSWIDTH_4_BYTES
;
408 struct dma_slave_config rxconf
= {
409 .direction
= DMA_DEV_TO_MEM
,
410 .src_addr
= priv
->base_dma_addr
+ SSI_RXDR
,
411 .src_addr_width
= buswidth
,
412 .src_maxburst
= SSI_FIFO_BURST_NUM
,
415 dmaengine_slave_config(master
->dma_rx
, &rxconf
);
417 rxdesc
= dmaengine_prep_slave_sg(
419 t
->rx_sg
.sgl
, t
->rx_sg
.nents
,
420 DMA_DEV_TO_MEM
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
424 rxdesc
->callback
= uniphier_spi_dma_rxcb
;
425 rxdesc
->callback_param
= master
;
427 uniphier_spi_irq_enable(priv
, SSI_IE_RXRE
);
428 atomic_or(SSI_DMA_RX_BUSY
, &priv
->dma_busy
);
430 dmaengine_submit(rxdesc
);
431 dma_async_issue_pending(master
->dma_rx
);
435 struct dma_slave_config txconf
= {
436 .direction
= DMA_MEM_TO_DEV
,
437 .dst_addr
= priv
->base_dma_addr
+ SSI_TXDR
,
438 .dst_addr_width
= buswidth
,
439 .dst_maxburst
= SSI_FIFO_BURST_NUM
,
442 dmaengine_slave_config(master
->dma_tx
, &txconf
);
444 txdesc
= dmaengine_prep_slave_sg(
446 t
->tx_sg
.sgl
, t
->tx_sg
.nents
,
447 DMA_MEM_TO_DEV
, DMA_PREP_INTERRUPT
| DMA_CTRL_ACK
);
451 txdesc
->callback
= uniphier_spi_dma_txcb
;
452 txdesc
->callback_param
= master
;
454 uniphier_spi_irq_enable(priv
, SSI_IE_TXRE
);
455 atomic_or(SSI_DMA_TX_BUSY
, &priv
->dma_busy
);
457 dmaengine_submit(txdesc
);
458 dma_async_issue_pending(master
->dma_tx
);
461 /* signal that we need to wait for completion */
462 return (priv
->tx_buf
|| priv
->rx_buf
);
466 dmaengine_terminate_sync(master
->dma_rx
);
471 static int uniphier_spi_transfer_one_irq(struct spi_master
*master
,
472 struct spi_device
*spi
,
473 struct spi_transfer
*t
)
475 struct uniphier_spi_priv
*priv
= spi_master_get_devdata(master
);
476 struct device
*dev
= master
->dev
.parent
;
477 unsigned long time_left
;
479 reinit_completion(&priv
->xfer_done
);
481 uniphier_spi_fill_tx_fifo(priv
);
483 uniphier_spi_irq_enable(priv
, SSI_IE_RCIE
| SSI_IE_RORIE
);
485 time_left
= wait_for_completion_timeout(&priv
->xfer_done
,
486 msecs_to_jiffies(SSI_TIMEOUT_MS
));
488 uniphier_spi_irq_disable(priv
, SSI_IE_RCIE
| SSI_IE_RORIE
);
491 dev_err(dev
, "transfer timeout.\n");
498 static int uniphier_spi_transfer_one_poll(struct spi_master
*master
,
499 struct spi_device
*spi
,
500 struct spi_transfer
*t
)
502 struct uniphier_spi_priv
*priv
= spi_master_get_devdata(master
);
503 int loop
= SSI_POLL_TIMEOUT_US
* 10;
505 while (priv
->tx_bytes
) {
506 uniphier_spi_fill_tx_fifo(priv
);
508 while ((priv
->rx_bytes
- priv
->tx_bytes
) > 0) {
509 while (!(readl(priv
->base
+ SSI_SR
) & SSI_SR_RNE
)
516 uniphier_spi_recv(priv
);
523 return uniphier_spi_transfer_one_irq(master
, spi
, t
);
526 static int uniphier_spi_transfer_one(struct spi_master
*master
,
527 struct spi_device
*spi
,
528 struct spi_transfer
*t
)
530 struct uniphier_spi_priv
*priv
= spi_master_get_devdata(master
);
531 unsigned long threshold
;
534 /* Terminate and return success for 0 byte length transfer */
538 uniphier_spi_setup_transfer(spi
, t
);
540 use_dma
= master
->can_dma
? master
->can_dma(master
, spi
, t
) : false;
542 return uniphier_spi_transfer_one_dma(master
, spi
, t
);
545 * If the transfer operation will take longer than
546 * SSI_POLL_TIMEOUT_US, it should use irq.
548 threshold
= DIV_ROUND_UP(SSI_POLL_TIMEOUT_US
* priv
->speed_hz
,
549 USEC_PER_SEC
* BITS_PER_BYTE
);
550 if (t
->len
> threshold
)
551 return uniphier_spi_transfer_one_irq(master
, spi
, t
);
553 return uniphier_spi_transfer_one_poll(master
, spi
, t
);
556 static int uniphier_spi_prepare_transfer_hardware(struct spi_master
*master
)
558 struct uniphier_spi_priv
*priv
= spi_master_get_devdata(master
);
560 writel(SSI_CTL_EN
, priv
->base
+ SSI_CTL
);
565 static int uniphier_spi_unprepare_transfer_hardware(struct spi_master
*master
)
567 struct uniphier_spi_priv
*priv
= spi_master_get_devdata(master
);
569 writel(0, priv
->base
+ SSI_CTL
);
574 static void uniphier_spi_handle_err(struct spi_master
*master
,
575 struct spi_message
*msg
)
577 struct uniphier_spi_priv
*priv
= spi_master_get_devdata(master
);
580 /* stop running spi transfer */
581 writel(0, priv
->base
+ SSI_CTL
);
584 val
= SSI_FC_TXFFL
| SSI_FC_RXFFL
;
585 writel(val
, priv
->base
+ SSI_FC
);
587 uniphier_spi_irq_disable(priv
, SSI_IE_ALL_MASK
);
589 if (atomic_read(&priv
->dma_busy
) & SSI_DMA_TX_BUSY
) {
590 dmaengine_terminate_async(master
->dma_tx
);
591 atomic_andnot(SSI_DMA_TX_BUSY
, &priv
->dma_busy
);
594 if (atomic_read(&priv
->dma_busy
) & SSI_DMA_RX_BUSY
) {
595 dmaengine_terminate_async(master
->dma_rx
);
596 atomic_andnot(SSI_DMA_RX_BUSY
, &priv
->dma_busy
);
600 static irqreturn_t
uniphier_spi_handler(int irq
, void *dev_id
)
602 struct uniphier_spi_priv
*priv
= dev_id
;
605 stat
= readl(priv
->base
+ SSI_IS
);
606 val
= SSI_IC_TCIC
| SSI_IC_RCIC
| SSI_IC_RORIC
;
607 writel(val
, priv
->base
+ SSI_IC
);
609 /* rx fifo overrun */
610 if (stat
& SSI_IS_RORID
) {
616 if ((stat
& SSI_IS_RCID
) && (stat
& SSI_IS_RXRS
)) {
617 while ((readl(priv
->base
+ SSI_SR
) & SSI_SR_RNE
) &&
618 (priv
->rx_bytes
- priv
->tx_bytes
) > 0)
619 uniphier_spi_recv(priv
);
621 if ((readl(priv
->base
+ SSI_SR
) & SSI_SR_RNE
) ||
622 (priv
->rx_bytes
!= priv
->tx_bytes
)) {
625 } else if (priv
->rx_bytes
== 0)
628 /* next tx transfer */
629 uniphier_spi_fill_tx_fifo(priv
);
637 complete(&priv
->xfer_done
);
641 static int uniphier_spi_probe(struct platform_device
*pdev
)
643 struct uniphier_spi_priv
*priv
;
644 struct spi_master
*master
;
645 struct resource
*res
;
646 struct dma_slave_caps caps
;
647 u32 dma_tx_burst
= 0, dma_rx_burst
= 0;
648 unsigned long clk_rate
;
652 master
= spi_alloc_master(&pdev
->dev
, sizeof(*priv
));
656 platform_set_drvdata(pdev
, master
);
658 priv
= spi_master_get_devdata(master
);
659 priv
->master
= master
;
660 priv
->is_save_param
= false;
662 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
663 priv
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
664 if (IS_ERR(priv
->base
)) {
665 ret
= PTR_ERR(priv
->base
);
668 priv
->base_dma_addr
= res
->start
;
670 priv
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
671 if (IS_ERR(priv
->clk
)) {
672 dev_err(&pdev
->dev
, "failed to get clock\n");
673 ret
= PTR_ERR(priv
->clk
);
677 ret
= clk_prepare_enable(priv
->clk
);
681 irq
= platform_get_irq(pdev
, 0);
684 goto out_disable_clk
;
687 ret
= devm_request_irq(&pdev
->dev
, irq
, uniphier_spi_handler
,
688 0, "uniphier-spi", priv
);
690 dev_err(&pdev
->dev
, "failed to request IRQ\n");
691 goto out_disable_clk
;
694 init_completion(&priv
->xfer_done
);
696 clk_rate
= clk_get_rate(priv
->clk
);
698 master
->max_speed_hz
= DIV_ROUND_UP(clk_rate
, SSI_MIN_CLK_DIVIDER
);
699 master
->min_speed_hz
= DIV_ROUND_UP(clk_rate
, SSI_MAX_CLK_DIVIDER
);
700 master
->mode_bits
= SPI_CPOL
| SPI_CPHA
| SPI_CS_HIGH
| SPI_LSB_FIRST
;
701 master
->dev
.of_node
= pdev
->dev
.of_node
;
702 master
->bus_num
= pdev
->id
;
703 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(1, 32);
705 master
->set_cs
= uniphier_spi_set_cs
;
706 master
->transfer_one
= uniphier_spi_transfer_one
;
707 master
->prepare_transfer_hardware
708 = uniphier_spi_prepare_transfer_hardware
;
709 master
->unprepare_transfer_hardware
710 = uniphier_spi_unprepare_transfer_hardware
;
711 master
->handle_err
= uniphier_spi_handle_err
;
712 master
->can_dma
= uniphier_spi_can_dma
;
714 master
->num_chipselect
= 1;
715 master
->flags
= SPI_CONTROLLER_MUST_RX
| SPI_CONTROLLER_MUST_TX
;
717 master
->dma_tx
= dma_request_chan(&pdev
->dev
, "tx");
718 if (IS_ERR_OR_NULL(master
->dma_tx
)) {
719 if (PTR_ERR(master
->dma_tx
) == -EPROBE_DEFER
)
720 goto out_disable_clk
;
721 master
->dma_tx
= NULL
;
722 dma_tx_burst
= INT_MAX
;
724 ret
= dma_get_slave_caps(master
->dma_tx
, &caps
);
726 dev_err(&pdev
->dev
, "failed to get TX DMA capacities: %d\n",
728 goto out_disable_clk
;
730 dma_tx_burst
= caps
.max_burst
;
733 master
->dma_rx
= dma_request_chan(&pdev
->dev
, "rx");
734 if (IS_ERR_OR_NULL(master
->dma_rx
)) {
735 if (PTR_ERR(master
->dma_rx
) == -EPROBE_DEFER
)
736 goto out_disable_clk
;
737 master
->dma_rx
= NULL
;
738 dma_rx_burst
= INT_MAX
;
740 ret
= dma_get_slave_caps(master
->dma_rx
, &caps
);
742 dev_err(&pdev
->dev
, "failed to get RX DMA capacities: %d\n",
744 goto out_disable_clk
;
746 dma_rx_burst
= caps
.max_burst
;
749 master
->max_dma_len
= min(dma_tx_burst
, dma_rx_burst
);
751 ret
= devm_spi_register_master(&pdev
->dev
, master
);
753 goto out_disable_clk
;
758 clk_disable_unprepare(priv
->clk
);
761 spi_master_put(master
);
765 static int uniphier_spi_remove(struct platform_device
*pdev
)
767 struct uniphier_spi_priv
*priv
= platform_get_drvdata(pdev
);
769 if (priv
->master
->dma_tx
)
770 dma_release_channel(priv
->master
->dma_tx
);
771 if (priv
->master
->dma_rx
)
772 dma_release_channel(priv
->master
->dma_rx
);
774 clk_disable_unprepare(priv
->clk
);
779 static const struct of_device_id uniphier_spi_match
[] = {
780 { .compatible
= "socionext,uniphier-scssi" },
783 MODULE_DEVICE_TABLE(of
, uniphier_spi_match
);
785 static struct platform_driver uniphier_spi_driver
= {
786 .probe
= uniphier_spi_probe
,
787 .remove
= uniphier_spi_remove
,
789 .name
= "uniphier-spi",
790 .of_match_table
= uniphier_spi_match
,
793 module_platform_driver(uniphier_spi_driver
);
795 MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
796 MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>");
797 MODULE_DESCRIPTION("Socionext UniPhier SPI controller driver");
798 MODULE_LICENSE("GPL v2");