1 // SPDX-License-Identifier: GPL-2.0-only
3 * PIC32 Quad SPI controller driver.
5 * Purna Chandra Mandal <purna.mandal@microchip.com>
6 * Copyright (c) 2016, Microchip Technology Inc.
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
13 #include <linux/iopoll.h>
14 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spi/spi.h>
21 #define PESQI_XIP_CONF1_REG 0x00
22 #define PESQI_XIP_CONF2_REG 0x04
23 #define PESQI_CONF_REG 0x08
24 #define PESQI_CTRL_REG 0x0C
25 #define PESQI_CLK_CTRL_REG 0x10
26 #define PESQI_CMD_THRES_REG 0x14
27 #define PESQI_INT_THRES_REG 0x18
28 #define PESQI_INT_ENABLE_REG 0x1C
29 #define PESQI_INT_STAT_REG 0x20
30 #define PESQI_TX_DATA_REG 0x24
31 #define PESQI_RX_DATA_REG 0x28
32 #define PESQI_STAT1_REG 0x2C
33 #define PESQI_STAT2_REG 0x30
34 #define PESQI_BD_CTRL_REG 0x34
35 #define PESQI_BD_CUR_ADDR_REG 0x38
36 #define PESQI_BD_BASE_ADDR_REG 0x40
37 #define PESQI_BD_STAT_REG 0x44
38 #define PESQI_BD_POLL_CTRL_REG 0x48
39 #define PESQI_BD_TX_DMA_STAT_REG 0x4C
40 #define PESQI_BD_RX_DMA_STAT_REG 0x50
41 #define PESQI_THRES_REG 0x54
42 #define PESQI_INT_SIGEN_REG 0x58
44 /* PESQI_CONF_REG fields */
45 #define PESQI_MODE 0x7
46 #define PESQI_MODE_BOOT 0
47 #define PESQI_MODE_PIO 1
48 #define PESQI_MODE_DMA 2
49 #define PESQI_MODE_XIP 3
50 #define PESQI_MODE_SHIFT 0
51 #define PESQI_CPHA BIT(3)
52 #define PESQI_CPOL BIT(4)
53 #define PESQI_LSBF BIT(5)
54 #define PESQI_RXLATCH BIT(7)
55 #define PESQI_SERMODE BIT(8)
56 #define PESQI_WP_EN BIT(9)
57 #define PESQI_HOLD_EN BIT(10)
58 #define PESQI_BURST_EN BIT(12)
59 #define PESQI_CS_CTRL_HW BIT(15)
60 #define PESQI_SOFT_RESET BIT(16)
61 #define PESQI_LANES_SHIFT 20
62 #define PESQI_SINGLE_LANE 0
63 #define PESQI_DUAL_LANE 1
64 #define PESQI_QUAD_LANE 2
65 #define PESQI_CSEN_SHIFT 24
66 #define PESQI_EN BIT(23)
68 /* PESQI_CLK_CTRL_REG fields */
69 #define PESQI_CLK_EN BIT(0)
70 #define PESQI_CLK_STABLE BIT(1)
71 #define PESQI_CLKDIV_SHIFT 8
72 #define PESQI_CLKDIV 0xff
74 /* PESQI_INT_THR/CMD_THR_REG */
75 #define PESQI_TXTHR_MASK 0x1f
76 #define PESQI_TXTHR_SHIFT 8
77 #define PESQI_RXTHR_MASK 0x1f
78 #define PESQI_RXTHR_SHIFT 0
80 /* PESQI_INT_EN/INT_STAT/INT_SIG_EN_REG */
81 #define PESQI_TXEMPTY BIT(0)
82 #define PESQI_TXFULL BIT(1)
83 #define PESQI_TXTHR BIT(2)
84 #define PESQI_RXEMPTY BIT(3)
85 #define PESQI_RXFULL BIT(4)
86 #define PESQI_RXTHR BIT(5)
87 #define PESQI_BDDONE BIT(9) /* BD processing complete */
88 #define PESQI_PKTCOMP BIT(10) /* packet processing complete */
89 #define PESQI_DMAERR BIT(11) /* error */
91 /* PESQI_BD_CTRL_REG */
92 #define PESQI_DMA_EN BIT(0) /* enable DMA engine */
93 #define PESQI_POLL_EN BIT(1) /* enable polling */
94 #define PESQI_BDP_START BIT(2) /* start BD processor */
96 /* PESQI controller buffer descriptor */
98 u32 bd_ctrl
; /* control */
99 u32 bd_status
; /* reserved */
100 u32 bd_addr
; /* DMA buffer addr */
101 u32 bd_nextp
; /* next item in chain */
105 #define BD_BUFLEN 0x1ff
106 #define BD_CBD_INT_EN BIT(16) /* Current BD is processed */
107 #define BD_PKT_INT_EN BIT(17) /* All BDs of PKT processed */
108 #define BD_LIFM BIT(18) /* last data of pkt */
109 #define BD_LAST BIT(19) /* end of list */
110 #define BD_DATA_RECV BIT(20) /* receive data */
111 #define BD_DDR BIT(21) /* DDR mode */
112 #define BD_DUAL BIT(22) /* Dual SPI */
113 #define BD_QUAD BIT(23) /* Quad SPI */
114 #define BD_LSBF BIT(25) /* LSB First */
115 #define BD_STAT_CHECK BIT(27) /* Status poll */
116 #define BD_DEVSEL_SHIFT 28 /* CS */
117 #define BD_CS_DEASSERT BIT(30) /* de-assert CS after current BD */
118 #define BD_EN BIT(31) /* BD owned by H/W */
121 * struct ring_desc - Representation of SQI ring descriptor
122 * @list: list element to add to free or used list.
123 * @bd: PESQI controller buffer descriptor
124 * @bd_dma: DMA address of PESQI controller buffer descriptor
125 * @xfer_len: transfer length
128 struct list_head list
;
134 /* Global constants */
135 #define PESQI_BD_BUF_LEN_MAX 256
136 #define PESQI_BD_COUNT 256 /* max 64KB data per spi message */
141 struct clk
*base_clk
; /* drives spi clock */
142 struct spi_master
*master
;
144 struct completion xfer_done
;
145 struct ring_desc
*ring
;
148 struct list_head bd_list_free
; /* free */
149 struct list_head bd_list_used
; /* allocated */
150 struct spi_device
*cur_spi
;
155 static inline void pic32_setbits(void __iomem
*reg
, u32 set
)
157 writel(readl(reg
) | set
, reg
);
160 static inline void pic32_clrbits(void __iomem
*reg
, u32 clr
)
162 writel(readl(reg
) & ~clr
, reg
);
165 static int pic32_sqi_set_clk_rate(struct pic32_sqi
*sqi
, u32 sck
)
169 /* div = base_clk / (2 * spi_clk) */
170 div
= clk_get_rate(sqi
->base_clk
) / (2 * sck
);
173 val
= readl(sqi
->regs
+ PESQI_CLK_CTRL_REG
);
174 /* apply new divider */
175 val
&= ~(PESQI_CLK_STABLE
| (PESQI_CLKDIV
<< PESQI_CLKDIV_SHIFT
));
176 val
|= div
<< PESQI_CLKDIV_SHIFT
;
177 writel(val
, sqi
->regs
+ PESQI_CLK_CTRL_REG
);
179 /* wait for stability */
180 return readl_poll_timeout(sqi
->regs
+ PESQI_CLK_CTRL_REG
, val
,
181 val
& PESQI_CLK_STABLE
, 1, 5000);
184 static inline void pic32_sqi_enable_int(struct pic32_sqi
*sqi
)
186 u32 mask
= PESQI_DMAERR
| PESQI_BDDONE
| PESQI_PKTCOMP
;
188 writel(mask
, sqi
->regs
+ PESQI_INT_ENABLE_REG
);
189 /* INT_SIGEN works as interrupt-gate to INTR line */
190 writel(mask
, sqi
->regs
+ PESQI_INT_SIGEN_REG
);
193 static inline void pic32_sqi_disable_int(struct pic32_sqi
*sqi
)
195 writel(0, sqi
->regs
+ PESQI_INT_ENABLE_REG
);
196 writel(0, sqi
->regs
+ PESQI_INT_SIGEN_REG
);
199 static irqreturn_t
pic32_sqi_isr(int irq
, void *dev_id
)
201 struct pic32_sqi
*sqi
= dev_id
;
204 enable
= readl(sqi
->regs
+ PESQI_INT_ENABLE_REG
);
205 status
= readl(sqi
->regs
+ PESQI_INT_STAT_REG
);
207 /* check spurious interrupt */
211 if (status
& PESQI_DMAERR
) {
216 if (status
& PESQI_TXTHR
)
217 enable
&= ~(PESQI_TXTHR
| PESQI_TXFULL
| PESQI_TXEMPTY
);
219 if (status
& PESQI_RXTHR
)
220 enable
&= ~(PESQI_RXTHR
| PESQI_RXFULL
| PESQI_RXEMPTY
);
222 if (status
& PESQI_BDDONE
)
223 enable
&= ~PESQI_BDDONE
;
225 /* packet processing completed */
226 if (status
& PESQI_PKTCOMP
) {
227 /* mask all interrupts */
229 /* complete trasaction */
230 complete(&sqi
->xfer_done
);
234 /* interrupts are sticky, so mask when handled */
235 writel(enable
, sqi
->regs
+ PESQI_INT_ENABLE_REG
);
240 static struct ring_desc
*ring_desc_get(struct pic32_sqi
*sqi
)
242 struct ring_desc
*rdesc
;
244 if (list_empty(&sqi
->bd_list_free
))
247 rdesc
= list_first_entry(&sqi
->bd_list_free
, struct ring_desc
, list
);
248 list_move_tail(&rdesc
->list
, &sqi
->bd_list_used
);
252 static void ring_desc_put(struct pic32_sqi
*sqi
, struct ring_desc
*rdesc
)
254 list_move(&rdesc
->list
, &sqi
->bd_list_free
);
257 static int pic32_sqi_one_transfer(struct pic32_sqi
*sqi
,
258 struct spi_message
*mesg
,
259 struct spi_transfer
*xfer
)
261 struct spi_device
*spi
= mesg
->spi
;
262 struct scatterlist
*sg
, *sgl
;
263 struct ring_desc
*rdesc
;
269 /* Device selection */
270 bd_ctrl
= spi
->chip_select
<< BD_DEVSEL_SHIFT
;
272 /* half-duplex: select transfer buffer, direction and lane */
274 bd_ctrl
|= BD_DATA_RECV
;
275 nbits
= xfer
->rx_nbits
;
276 sgl
= xfer
->rx_sg
.sgl
;
277 nents
= xfer
->rx_sg
.nents
;
279 nbits
= xfer
->tx_nbits
;
280 sgl
= xfer
->tx_sg
.sgl
;
281 nents
= xfer
->tx_sg
.nents
;
284 if (nbits
& SPI_NBITS_QUAD
)
286 else if (nbits
& SPI_NBITS_DUAL
)
290 if (spi
->mode
& SPI_LSB_FIRST
)
293 /* ownership to hardware */
296 for_each_sg(sgl
, sg
, nents
, i
) {
297 /* get ring descriptor */
298 rdesc
= ring_desc_get(sqi
);
304 /* BD CTRL: length */
305 rdesc
->xfer_len
= sg_dma_len(sg
);
306 bd
->bd_ctrl
= bd_ctrl
;
307 bd
->bd_ctrl
|= rdesc
->xfer_len
;
312 /* BD BUFFER ADDRESS */
313 bd
->bd_addr
= sg
->dma_address
;
319 static int pic32_sqi_prepare_hardware(struct spi_master
*master
)
321 struct pic32_sqi
*sqi
= spi_master_get_devdata(master
);
323 /* enable spi interface */
324 pic32_setbits(sqi
->regs
+ PESQI_CONF_REG
, PESQI_EN
);
326 pic32_setbits(sqi
->regs
+ PESQI_CLK_CTRL_REG
, PESQI_CLK_EN
);
331 static bool pic32_sqi_can_dma(struct spi_master
*master
,
332 struct spi_device
*spi
,
333 struct spi_transfer
*x
)
335 /* Do DMA irrespective of transfer size */
339 static int pic32_sqi_one_message(struct spi_master
*master
,
340 struct spi_message
*msg
)
342 struct spi_device
*spi
= msg
->spi
;
343 struct ring_desc
*rdesc
, *next
;
344 struct spi_transfer
*xfer
;
345 struct pic32_sqi
*sqi
;
347 unsigned long timeout
;
350 sqi
= spi_master_get_devdata(master
);
352 reinit_completion(&sqi
->xfer_done
);
353 msg
->actual_length
= 0;
355 /* We can't handle spi_transfer specific "speed_hz", "bits_per_word"
356 * and "delay_usecs". But spi_device specific speed and mode change
357 * can be handled at best during spi chip-select switch.
359 if (sqi
->cur_spi
!= spi
) {
361 if (sqi
->cur_speed
!= spi
->max_speed_hz
) {
362 sqi
->cur_speed
= spi
->max_speed_hz
;
363 ret
= pic32_sqi_set_clk_rate(sqi
, spi
->max_speed_hz
);
365 dev_warn(&spi
->dev
, "set_clk, %d\n", ret
);
369 mode
= spi
->mode
& (SPI_MODE_3
| SPI_LSB_FIRST
);
370 if (sqi
->cur_mode
!= mode
) {
371 val
= readl(sqi
->regs
+ PESQI_CONF_REG
);
372 val
&= ~(PESQI_CPOL
| PESQI_CPHA
| PESQI_LSBF
);
375 if (mode
& SPI_LSB_FIRST
)
378 writel(val
, sqi
->regs
+ PESQI_CONF_REG
);
380 sqi
->cur_mode
= mode
;
385 /* prepare hardware desc-list(BD) for transfer(s) */
386 list_for_each_entry(xfer
, &msg
->transfers
, transfer_list
) {
387 ret
= pic32_sqi_one_transfer(sqi
, msg
, xfer
);
389 dev_err(&spi
->dev
, "xfer %p err\n", xfer
);
394 /* BDs are prepared and chained. Now mark LAST_BD, CS_DEASSERT at last
395 * element of the list.
397 rdesc
= list_last_entry(&sqi
->bd_list_used
, struct ring_desc
, list
);
398 rdesc
->bd
->bd_ctrl
|= BD_LAST
| BD_CS_DEASSERT
|
399 BD_LIFM
| BD_PKT_INT_EN
;
401 /* set base address BD list for DMA engine */
402 rdesc
= list_first_entry(&sqi
->bd_list_used
, struct ring_desc
, list
);
403 writel(rdesc
->bd_dma
, sqi
->regs
+ PESQI_BD_BASE_ADDR_REG
);
405 /* enable interrupt */
406 pic32_sqi_enable_int(sqi
);
408 /* enable DMA engine */
409 val
= PESQI_DMA_EN
| PESQI_POLL_EN
| PESQI_BDP_START
;
410 writel(val
, sqi
->regs
+ PESQI_BD_CTRL_REG
);
412 /* wait for xfer completion */
413 timeout
= wait_for_completion_timeout(&sqi
->xfer_done
, 5 * HZ
);
415 dev_err(&sqi
->master
->dev
, "wait timedout/interrupted\n");
425 writel(0, sqi
->regs
+ PESQI_BD_CTRL_REG
);
427 pic32_sqi_disable_int(sqi
);
430 list_for_each_entry_safe_reverse(rdesc
, next
,
431 &sqi
->bd_list_used
, list
) {
432 /* Update total byte transferred */
433 msg
->actual_length
+= rdesc
->xfer_len
;
434 /* release ring descr */
435 ring_desc_put(sqi
, rdesc
);
437 spi_finalize_current_message(spi
->master
);
442 static int pic32_sqi_unprepare_hardware(struct spi_master
*master
)
444 struct pic32_sqi
*sqi
= spi_master_get_devdata(master
);
447 pic32_clrbits(sqi
->regs
+ PESQI_CLK_CTRL_REG
, PESQI_CLK_EN
);
449 pic32_clrbits(sqi
->regs
+ PESQI_CONF_REG
, PESQI_EN
);
454 static int ring_desc_ring_alloc(struct pic32_sqi
*sqi
)
456 struct ring_desc
*rdesc
;
460 /* allocate coherent DMAable memory for hardware buffer descriptors. */
461 sqi
->bd
= dma_alloc_coherent(&sqi
->master
->dev
,
462 sizeof(*bd
) * PESQI_BD_COUNT
,
463 &sqi
->bd_dma
, GFP_KERNEL
);
465 dev_err(&sqi
->master
->dev
, "failed allocating dma buffer\n");
469 /* allocate software ring descriptors */
470 sqi
->ring
= kcalloc(PESQI_BD_COUNT
, sizeof(*rdesc
), GFP_KERNEL
);
472 dma_free_coherent(&sqi
->master
->dev
,
473 sizeof(*bd
) * PESQI_BD_COUNT
,
474 sqi
->bd
, sqi
->bd_dma
);
478 bd
= (struct buf_desc
*)sqi
->bd
;
480 INIT_LIST_HEAD(&sqi
->bd_list_free
);
481 INIT_LIST_HEAD(&sqi
->bd_list_used
);
483 /* initialize ring-desc */
484 for (i
= 0, rdesc
= sqi
->ring
; i
< PESQI_BD_COUNT
; i
++, rdesc
++) {
485 INIT_LIST_HEAD(&rdesc
->list
);
487 rdesc
->bd_dma
= sqi
->bd_dma
+ (void *)&bd
[i
] - (void *)bd
;
488 list_add_tail(&rdesc
->list
, &sqi
->bd_list_free
);
491 /* Prepare BD: chain to next BD(s) */
492 for (i
= 0, rdesc
= sqi
->ring
; i
< PESQI_BD_COUNT
- 1; i
++)
493 bd
[i
].bd_nextp
= rdesc
[i
+ 1].bd_dma
;
494 bd
[PESQI_BD_COUNT
- 1].bd_nextp
= 0;
499 static void ring_desc_ring_free(struct pic32_sqi
*sqi
)
501 dma_free_coherent(&sqi
->master
->dev
,
502 sizeof(struct buf_desc
) * PESQI_BD_COUNT
,
503 sqi
->bd
, sqi
->bd_dma
);
507 static void pic32_sqi_hw_init(struct pic32_sqi
*sqi
)
512 /* Soft-reset of PESQI controller triggers interrupt.
513 * We are not yet ready to handle them so disable CPU
514 * interrupt for the time being.
516 local_irq_save(flags
);
518 /* assert soft-reset */
519 writel(PESQI_SOFT_RESET
, sqi
->regs
+ PESQI_CONF_REG
);
521 /* wait until clear */
522 readl_poll_timeout_atomic(sqi
->regs
+ PESQI_CONF_REG
, val
,
523 !(val
& PESQI_SOFT_RESET
), 1, 5000);
525 /* disable all interrupts */
526 pic32_sqi_disable_int(sqi
);
528 /* Now it is safe to enable back CPU interrupt */
529 local_irq_restore(flags
);
531 /* tx and rx fifo interrupt threshold */
532 val
= readl(sqi
->regs
+ PESQI_CMD_THRES_REG
);
533 val
&= ~(PESQI_TXTHR_MASK
<< PESQI_TXTHR_SHIFT
);
534 val
&= ~(PESQI_RXTHR_MASK
<< PESQI_RXTHR_SHIFT
);
535 val
|= (1U << PESQI_TXTHR_SHIFT
) | (1U << PESQI_RXTHR_SHIFT
);
536 writel(val
, sqi
->regs
+ PESQI_CMD_THRES_REG
);
538 val
= readl(sqi
->regs
+ PESQI_INT_THRES_REG
);
539 val
&= ~(PESQI_TXTHR_MASK
<< PESQI_TXTHR_SHIFT
);
540 val
&= ~(PESQI_RXTHR_MASK
<< PESQI_RXTHR_SHIFT
);
541 val
|= (1U << PESQI_TXTHR_SHIFT
) | (1U << PESQI_RXTHR_SHIFT
);
542 writel(val
, sqi
->regs
+ PESQI_INT_THRES_REG
);
544 /* default configuration */
545 val
= readl(sqi
->regs
+ PESQI_CONF_REG
);
549 val
|= PESQI_MODE_DMA
<< PESQI_MODE_SHIFT
;
550 writel(val
, sqi
->regs
+ PESQI_CONF_REG
);
552 /* DATAEN - SQIID0-ID3 */
553 val
|= PESQI_QUAD_LANE
<< PESQI_LANES_SHIFT
;
555 /* burst/INCR4 enable */
556 val
|= PESQI_BURST_EN
;
559 val
|= 3U << PESQI_CSEN_SHIFT
;
560 writel(val
, sqi
->regs
+ PESQI_CONF_REG
);
562 /* write poll count */
563 writel(0, sqi
->regs
+ PESQI_BD_POLL_CTRL_REG
);
569 static int pic32_sqi_probe(struct platform_device
*pdev
)
571 struct spi_master
*master
;
572 struct pic32_sqi
*sqi
;
575 master
= spi_alloc_master(&pdev
->dev
, sizeof(*sqi
));
579 sqi
= spi_master_get_devdata(master
);
580 sqi
->master
= master
;
582 sqi
->regs
= devm_platform_ioremap_resource(pdev
, 0);
583 if (IS_ERR(sqi
->regs
)) {
584 ret
= PTR_ERR(sqi
->regs
);
585 goto err_free_master
;
589 sqi
->irq
= platform_get_irq(pdev
, 0);
592 goto err_free_master
;
596 sqi
->sys_clk
= devm_clk_get(&pdev
->dev
, "reg_ck");
597 if (IS_ERR(sqi
->sys_clk
)) {
598 ret
= PTR_ERR(sqi
->sys_clk
);
599 dev_err(&pdev
->dev
, "no sys_clk ?\n");
600 goto err_free_master
;
603 sqi
->base_clk
= devm_clk_get(&pdev
->dev
, "spi_ck");
604 if (IS_ERR(sqi
->base_clk
)) {
605 ret
= PTR_ERR(sqi
->base_clk
);
606 dev_err(&pdev
->dev
, "no base clk ?\n");
607 goto err_free_master
;
610 ret
= clk_prepare_enable(sqi
->sys_clk
);
612 dev_err(&pdev
->dev
, "sys clk enable failed\n");
613 goto err_free_master
;
616 ret
= clk_prepare_enable(sqi
->base_clk
);
618 dev_err(&pdev
->dev
, "base clk enable failed\n");
619 clk_disable_unprepare(sqi
->sys_clk
);
620 goto err_free_master
;
623 init_completion(&sqi
->xfer_done
);
625 /* initialize hardware */
626 pic32_sqi_hw_init(sqi
);
628 /* allocate buffers & descriptors */
629 ret
= ring_desc_ring_alloc(sqi
);
631 dev_err(&pdev
->dev
, "ring alloc failed\n");
632 goto err_disable_clk
;
635 /* install irq handlers */
636 ret
= request_irq(sqi
->irq
, pic32_sqi_isr
, 0,
637 dev_name(&pdev
->dev
), sqi
);
639 dev_err(&pdev
->dev
, "request_irq(%d), failed\n", sqi
->irq
);
643 /* register master */
644 master
->num_chipselect
= 2;
645 master
->max_speed_hz
= clk_get_rate(sqi
->base_clk
);
646 master
->dma_alignment
= 32;
647 master
->max_dma_len
= PESQI_BD_BUF_LEN_MAX
;
648 master
->dev
.of_node
= pdev
->dev
.of_node
;
649 master
->mode_bits
= SPI_MODE_3
| SPI_MODE_0
| SPI_TX_DUAL
|
650 SPI_RX_DUAL
| SPI_TX_QUAD
| SPI_RX_QUAD
;
651 master
->flags
= SPI_MASTER_HALF_DUPLEX
;
652 master
->can_dma
= pic32_sqi_can_dma
;
653 master
->bits_per_word_mask
= SPI_BPW_RANGE_MASK(8, 32);
654 master
->transfer_one_message
= pic32_sqi_one_message
;
655 master
->prepare_transfer_hardware
= pic32_sqi_prepare_hardware
;
656 master
->unprepare_transfer_hardware
= pic32_sqi_unprepare_hardware
;
658 ret
= devm_spi_register_master(&pdev
->dev
, master
);
660 dev_err(&master
->dev
, "failed registering spi master\n");
661 free_irq(sqi
->irq
, sqi
);
665 platform_set_drvdata(pdev
, sqi
);
670 ring_desc_ring_free(sqi
);
673 clk_disable_unprepare(sqi
->base_clk
);
674 clk_disable_unprepare(sqi
->sys_clk
);
677 spi_master_put(master
);
681 static int pic32_sqi_remove(struct platform_device
*pdev
)
683 struct pic32_sqi
*sqi
= platform_get_drvdata(pdev
);
685 /* release resources */
686 free_irq(sqi
->irq
, sqi
);
687 ring_desc_ring_free(sqi
);
690 clk_disable_unprepare(sqi
->base_clk
);
691 clk_disable_unprepare(sqi
->sys_clk
);
696 static const struct of_device_id pic32_sqi_of_ids
[] = {
697 {.compatible
= "microchip,pic32mzda-sqi",},
700 MODULE_DEVICE_TABLE(of
, pic32_sqi_of_ids
);
702 static struct platform_driver pic32_sqi_driver
= {
705 .of_match_table
= of_match_ptr(pic32_sqi_of_ids
),
707 .probe
= pic32_sqi_probe
,
708 .remove
= pic32_sqi_remove
,
711 module_platform_driver(pic32_sqi_driver
);
713 MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
714 MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SQI controller.");
715 MODULE_LICENSE("GPL v2");