libceph: fix legacy layout decode with pool 0
[linux/fpc-iii.git] / drivers / spi / spi-pic32-sqi.c
blobc41abddab31893a1d1948f6bb912459b2973a00f
1 /*
2 * PIC32 Quad SPI controller driver.
4 * Purna Chandra Mandal <purna.mandal@microchip.com>
5 * Copyright (c) 2016, Microchip Technology Inc.
7 * This program is free software; you can distribute it and/or modify it
8 * under the terms of the GNU General Public License (Version 2) as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * for more details.
17 #include <linux/clk.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/iopoll.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/spi/spi.h>
28 /* SQI registers */
29 #define PESQI_XIP_CONF1_REG 0x00
30 #define PESQI_XIP_CONF2_REG 0x04
31 #define PESQI_CONF_REG 0x08
32 #define PESQI_CTRL_REG 0x0C
33 #define PESQI_CLK_CTRL_REG 0x10
34 #define PESQI_CMD_THRES_REG 0x14
35 #define PESQI_INT_THRES_REG 0x18
36 #define PESQI_INT_ENABLE_REG 0x1C
37 #define PESQI_INT_STAT_REG 0x20
38 #define PESQI_TX_DATA_REG 0x24
39 #define PESQI_RX_DATA_REG 0x28
40 #define PESQI_STAT1_REG 0x2C
41 #define PESQI_STAT2_REG 0x30
42 #define PESQI_BD_CTRL_REG 0x34
43 #define PESQI_BD_CUR_ADDR_REG 0x38
44 #define PESQI_BD_BASE_ADDR_REG 0x40
45 #define PESQI_BD_STAT_REG 0x44
46 #define PESQI_BD_POLL_CTRL_REG 0x48
47 #define PESQI_BD_TX_DMA_STAT_REG 0x4C
48 #define PESQI_BD_RX_DMA_STAT_REG 0x50
49 #define PESQI_THRES_REG 0x54
50 #define PESQI_INT_SIGEN_REG 0x58
52 /* PESQI_CONF_REG fields */
53 #define PESQI_MODE 0x7
54 #define PESQI_MODE_BOOT 0
55 #define PESQI_MODE_PIO 1
56 #define PESQI_MODE_DMA 2
57 #define PESQI_MODE_XIP 3
58 #define PESQI_MODE_SHIFT 0
59 #define PESQI_CPHA BIT(3)
60 #define PESQI_CPOL BIT(4)
61 #define PESQI_LSBF BIT(5)
62 #define PESQI_RXLATCH BIT(7)
63 #define PESQI_SERMODE BIT(8)
64 #define PESQI_WP_EN BIT(9)
65 #define PESQI_HOLD_EN BIT(10)
66 #define PESQI_BURST_EN BIT(12)
67 #define PESQI_CS_CTRL_HW BIT(15)
68 #define PESQI_SOFT_RESET BIT(16)
69 #define PESQI_LANES_SHIFT 20
70 #define PESQI_SINGLE_LANE 0
71 #define PESQI_DUAL_LANE 1
72 #define PESQI_QUAD_LANE 2
73 #define PESQI_CSEN_SHIFT 24
74 #define PESQI_EN BIT(23)
76 /* PESQI_CLK_CTRL_REG fields */
77 #define PESQI_CLK_EN BIT(0)
78 #define PESQI_CLK_STABLE BIT(1)
79 #define PESQI_CLKDIV_SHIFT 8
80 #define PESQI_CLKDIV 0xff
82 /* PESQI_INT_THR/CMD_THR_REG */
83 #define PESQI_TXTHR_MASK 0x1f
84 #define PESQI_TXTHR_SHIFT 8
85 #define PESQI_RXTHR_MASK 0x1f
86 #define PESQI_RXTHR_SHIFT 0
88 /* PESQI_INT_EN/INT_STAT/INT_SIG_EN_REG */
89 #define PESQI_TXEMPTY BIT(0)
90 #define PESQI_TXFULL BIT(1)
91 #define PESQI_TXTHR BIT(2)
92 #define PESQI_RXEMPTY BIT(3)
93 #define PESQI_RXFULL BIT(4)
94 #define PESQI_RXTHR BIT(5)
95 #define PESQI_BDDONE BIT(9) /* BD processing complete */
96 #define PESQI_PKTCOMP BIT(10) /* packet processing complete */
97 #define PESQI_DMAERR BIT(11) /* error */
99 /* PESQI_BD_CTRL_REG */
100 #define PESQI_DMA_EN BIT(0) /* enable DMA engine */
101 #define PESQI_POLL_EN BIT(1) /* enable polling */
102 #define PESQI_BDP_START BIT(2) /* start BD processor */
104 /* PESQI controller buffer descriptor */
105 struct buf_desc {
106 u32 bd_ctrl; /* control */
107 u32 bd_status; /* reserved */
108 u32 bd_addr; /* DMA buffer addr */
109 u32 bd_nextp; /* next item in chain */
112 /* bd_ctrl */
113 #define BD_BUFLEN 0x1ff
114 #define BD_CBD_INT_EN BIT(16) /* Current BD is processed */
115 #define BD_PKT_INT_EN BIT(17) /* All BDs of PKT processed */
116 #define BD_LIFM BIT(18) /* last data of pkt */
117 #define BD_LAST BIT(19) /* end of list */
118 #define BD_DATA_RECV BIT(20) /* receive data */
119 #define BD_DDR BIT(21) /* DDR mode */
120 #define BD_DUAL BIT(22) /* Dual SPI */
121 #define BD_QUAD BIT(23) /* Quad SPI */
122 #define BD_LSBF BIT(25) /* LSB First */
123 #define BD_STAT_CHECK BIT(27) /* Status poll */
124 #define BD_DEVSEL_SHIFT 28 /* CS */
125 #define BD_CS_DEASSERT BIT(30) /* de-assert CS after current BD */
126 #define BD_EN BIT(31) /* BD owned by H/W */
129 * struct ring_desc - Representation of SQI ring descriptor
130 * @list: list element to add to free or used list.
131 * @bd: PESQI controller buffer descriptor
132 * @bd_dma: DMA address of PESQI controller buffer descriptor
133 * @xfer_len: transfer length
135 struct ring_desc {
136 struct list_head list;
137 struct buf_desc *bd;
138 dma_addr_t bd_dma;
139 u32 xfer_len;
142 /* Global constants */
143 #define PESQI_BD_BUF_LEN_MAX 256
144 #define PESQI_BD_COUNT 256 /* max 64KB data per spi message */
146 struct pic32_sqi {
147 void __iomem *regs;
148 struct clk *sys_clk;
149 struct clk *base_clk; /* drives spi clock */
150 struct spi_master *master;
151 int irq;
152 struct completion xfer_done;
153 struct ring_desc *ring;
154 void *bd;
155 dma_addr_t bd_dma;
156 struct list_head bd_list_free; /* free */
157 struct list_head bd_list_used; /* allocated */
158 struct spi_device *cur_spi;
159 u32 cur_speed;
160 u8 cur_mode;
163 static inline void pic32_setbits(void __iomem *reg, u32 set)
165 writel(readl(reg) | set, reg);
168 static inline void pic32_clrbits(void __iomem *reg, u32 clr)
170 writel(readl(reg) & ~clr, reg);
173 static int pic32_sqi_set_clk_rate(struct pic32_sqi *sqi, u32 sck)
175 u32 val, div;
177 /* div = base_clk / (2 * spi_clk) */
178 div = clk_get_rate(sqi->base_clk) / (2 * sck);
179 div &= PESQI_CLKDIV;
181 val = readl(sqi->regs + PESQI_CLK_CTRL_REG);
182 /* apply new divider */
183 val &= ~(PESQI_CLK_STABLE | (PESQI_CLKDIV << PESQI_CLKDIV_SHIFT));
184 val |= div << PESQI_CLKDIV_SHIFT;
185 writel(val, sqi->regs + PESQI_CLK_CTRL_REG);
187 /* wait for stability */
188 return readl_poll_timeout(sqi->regs + PESQI_CLK_CTRL_REG, val,
189 val & PESQI_CLK_STABLE, 1, 5000);
192 static inline void pic32_sqi_enable_int(struct pic32_sqi *sqi)
194 u32 mask = PESQI_DMAERR | PESQI_BDDONE | PESQI_PKTCOMP;
196 writel(mask, sqi->regs + PESQI_INT_ENABLE_REG);
197 /* INT_SIGEN works as interrupt-gate to INTR line */
198 writel(mask, sqi->regs + PESQI_INT_SIGEN_REG);
201 static inline void pic32_sqi_disable_int(struct pic32_sqi *sqi)
203 writel(0, sqi->regs + PESQI_INT_ENABLE_REG);
204 writel(0, sqi->regs + PESQI_INT_SIGEN_REG);
207 static irqreturn_t pic32_sqi_isr(int irq, void *dev_id)
209 struct pic32_sqi *sqi = dev_id;
210 u32 enable, status;
212 enable = readl(sqi->regs + PESQI_INT_ENABLE_REG);
213 status = readl(sqi->regs + PESQI_INT_STAT_REG);
215 /* check spurious interrupt */
216 if (!status)
217 return IRQ_NONE;
219 if (status & PESQI_DMAERR) {
220 enable = 0;
221 goto irq_done;
224 if (status & PESQI_TXTHR)
225 enable &= ~(PESQI_TXTHR | PESQI_TXFULL | PESQI_TXEMPTY);
227 if (status & PESQI_RXTHR)
228 enable &= ~(PESQI_RXTHR | PESQI_RXFULL | PESQI_RXEMPTY);
230 if (status & PESQI_BDDONE)
231 enable &= ~PESQI_BDDONE;
233 /* packet processing completed */
234 if (status & PESQI_PKTCOMP) {
235 /* mask all interrupts */
236 enable = 0;
237 /* complete trasaction */
238 complete(&sqi->xfer_done);
241 irq_done:
242 /* interrupts are sticky, so mask when handled */
243 writel(enable, sqi->regs + PESQI_INT_ENABLE_REG);
245 return IRQ_HANDLED;
248 static struct ring_desc *ring_desc_get(struct pic32_sqi *sqi)
250 struct ring_desc *rdesc;
252 if (list_empty(&sqi->bd_list_free))
253 return NULL;
255 rdesc = list_first_entry(&sqi->bd_list_free, struct ring_desc, list);
256 list_del(&rdesc->list);
257 list_add_tail(&rdesc->list, &sqi->bd_list_used);
258 return rdesc;
261 static void ring_desc_put(struct pic32_sqi *sqi, struct ring_desc *rdesc)
263 list_del(&rdesc->list);
264 list_add(&rdesc->list, &sqi->bd_list_free);
267 static int pic32_sqi_one_transfer(struct pic32_sqi *sqi,
268 struct spi_message *mesg,
269 struct spi_transfer *xfer)
271 struct spi_device *spi = mesg->spi;
272 struct scatterlist *sg, *sgl;
273 struct ring_desc *rdesc;
274 struct buf_desc *bd;
275 int nents, i;
276 u32 bd_ctrl;
277 u32 nbits;
279 /* Device selection */
280 bd_ctrl = spi->chip_select << BD_DEVSEL_SHIFT;
282 /* half-duplex: select transfer buffer, direction and lane */
283 if (xfer->rx_buf) {
284 bd_ctrl |= BD_DATA_RECV;
285 nbits = xfer->rx_nbits;
286 sgl = xfer->rx_sg.sgl;
287 nents = xfer->rx_sg.nents;
288 } else {
289 nbits = xfer->tx_nbits;
290 sgl = xfer->tx_sg.sgl;
291 nents = xfer->tx_sg.nents;
294 if (nbits & SPI_NBITS_QUAD)
295 bd_ctrl |= BD_QUAD;
296 else if (nbits & SPI_NBITS_DUAL)
297 bd_ctrl |= BD_DUAL;
299 /* LSB first */
300 if (spi->mode & SPI_LSB_FIRST)
301 bd_ctrl |= BD_LSBF;
303 /* ownership to hardware */
304 bd_ctrl |= BD_EN;
306 for_each_sg(sgl, sg, nents, i) {
307 /* get ring descriptor */
308 rdesc = ring_desc_get(sqi);
309 if (!rdesc)
310 break;
312 bd = rdesc->bd;
314 /* BD CTRL: length */
315 rdesc->xfer_len = sg_dma_len(sg);
316 bd->bd_ctrl = bd_ctrl;
317 bd->bd_ctrl |= rdesc->xfer_len;
319 /* BD STAT */
320 bd->bd_status = 0;
322 /* BD BUFFER ADDRESS */
323 bd->bd_addr = sg->dma_address;
326 return 0;
329 static int pic32_sqi_prepare_hardware(struct spi_master *master)
331 struct pic32_sqi *sqi = spi_master_get_devdata(master);
333 /* enable spi interface */
334 pic32_setbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
335 /* enable spi clk */
336 pic32_setbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
338 return 0;
341 static bool pic32_sqi_can_dma(struct spi_master *master,
342 struct spi_device *spi,
343 struct spi_transfer *x)
345 /* Do DMA irrespective of transfer size */
346 return true;
349 static int pic32_sqi_one_message(struct spi_master *master,
350 struct spi_message *msg)
352 struct spi_device *spi = msg->spi;
353 struct ring_desc *rdesc, *next;
354 struct spi_transfer *xfer;
355 struct pic32_sqi *sqi;
356 int ret = 0, mode;
357 unsigned long timeout;
358 u32 val;
360 sqi = spi_master_get_devdata(master);
362 reinit_completion(&sqi->xfer_done);
363 msg->actual_length = 0;
365 /* We can't handle spi_transfer specific "speed_hz", "bits_per_word"
366 * and "delay_usecs". But spi_device specific speed and mode change
367 * can be handled at best during spi chip-select switch.
369 if (sqi->cur_spi != spi) {
370 /* set spi speed */
371 if (sqi->cur_speed != spi->max_speed_hz) {
372 sqi->cur_speed = spi->max_speed_hz;
373 ret = pic32_sqi_set_clk_rate(sqi, spi->max_speed_hz);
374 if (ret)
375 dev_warn(&spi->dev, "set_clk, %d\n", ret);
378 /* set spi mode */
379 mode = spi->mode & (SPI_MODE_3 | SPI_LSB_FIRST);
380 if (sqi->cur_mode != mode) {
381 val = readl(sqi->regs + PESQI_CONF_REG);
382 val &= ~(PESQI_CPOL | PESQI_CPHA | PESQI_LSBF);
383 if (mode & SPI_CPOL)
384 val |= PESQI_CPOL;
385 if (mode & SPI_LSB_FIRST)
386 val |= PESQI_LSBF;
387 val |= PESQI_CPHA;
388 writel(val, sqi->regs + PESQI_CONF_REG);
390 sqi->cur_mode = mode;
392 sqi->cur_spi = spi;
395 /* prepare hardware desc-list(BD) for transfer(s) */
396 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
397 ret = pic32_sqi_one_transfer(sqi, msg, xfer);
398 if (ret) {
399 dev_err(&spi->dev, "xfer %p err\n", xfer);
400 goto xfer_out;
404 /* BDs are prepared and chained. Now mark LAST_BD, CS_DEASSERT at last
405 * element of the list.
407 rdesc = list_last_entry(&sqi->bd_list_used, struct ring_desc, list);
408 rdesc->bd->bd_ctrl |= BD_LAST | BD_CS_DEASSERT |
409 BD_LIFM | BD_PKT_INT_EN;
411 /* set base address BD list for DMA engine */
412 rdesc = list_first_entry(&sqi->bd_list_used, struct ring_desc, list);
413 writel(rdesc->bd_dma, sqi->regs + PESQI_BD_BASE_ADDR_REG);
415 /* enable interrupt */
416 pic32_sqi_enable_int(sqi);
418 /* enable DMA engine */
419 val = PESQI_DMA_EN | PESQI_POLL_EN | PESQI_BDP_START;
420 writel(val, sqi->regs + PESQI_BD_CTRL_REG);
422 /* wait for xfer completion */
423 timeout = wait_for_completion_timeout(&sqi->xfer_done, 5 * HZ);
424 if (timeout == 0) {
425 dev_err(&sqi->master->dev, "wait timedout/interrupted\n");
426 ret = -ETIMEDOUT;
427 msg->status = ret;
428 } else {
429 /* success */
430 msg->status = 0;
431 ret = 0;
434 /* disable DMA */
435 writel(0, sqi->regs + PESQI_BD_CTRL_REG);
437 pic32_sqi_disable_int(sqi);
439 xfer_out:
440 list_for_each_entry_safe_reverse(rdesc, next,
441 &sqi->bd_list_used, list) {
442 /* Update total byte transferred */
443 msg->actual_length += rdesc->xfer_len;
444 /* release ring descr */
445 ring_desc_put(sqi, rdesc);
447 spi_finalize_current_message(spi->master);
449 return ret;
452 static int pic32_sqi_unprepare_hardware(struct spi_master *master)
454 struct pic32_sqi *sqi = spi_master_get_devdata(master);
456 /* disable clk */
457 pic32_clrbits(sqi->regs + PESQI_CLK_CTRL_REG, PESQI_CLK_EN);
458 /* disable spi */
459 pic32_clrbits(sqi->regs + PESQI_CONF_REG, PESQI_EN);
461 return 0;
464 static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
466 struct ring_desc *rdesc;
467 struct buf_desc *bd;
468 int i;
470 /* allocate coherent DMAable memory for hardware buffer descriptors. */
471 sqi->bd = dma_zalloc_coherent(&sqi->master->dev,
472 sizeof(*bd) * PESQI_BD_COUNT,
473 &sqi->bd_dma, GFP_DMA32);
474 if (!sqi->bd) {
475 dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
476 return -ENOMEM;
479 /* allocate software ring descriptors */
480 sqi->ring = kcalloc(PESQI_BD_COUNT, sizeof(*rdesc), GFP_KERNEL);
481 if (!sqi->ring) {
482 dma_free_coherent(&sqi->master->dev,
483 sizeof(*bd) * PESQI_BD_COUNT,
484 sqi->bd, sqi->bd_dma);
485 return -ENOMEM;
488 bd = (struct buf_desc *)sqi->bd;
490 INIT_LIST_HEAD(&sqi->bd_list_free);
491 INIT_LIST_HEAD(&sqi->bd_list_used);
493 /* initialize ring-desc */
494 for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT; i++, rdesc++) {
495 INIT_LIST_HEAD(&rdesc->list);
496 rdesc->bd = &bd[i];
497 rdesc->bd_dma = sqi->bd_dma + (void *)&bd[i] - (void *)bd;
498 list_add_tail(&rdesc->list, &sqi->bd_list_free);
501 /* Prepare BD: chain to next BD(s) */
502 for (i = 0, rdesc = sqi->ring; i < PESQI_BD_COUNT - 1; i++)
503 bd[i].bd_nextp = rdesc[i + 1].bd_dma;
504 bd[PESQI_BD_COUNT - 1].bd_nextp = 0;
506 return 0;
509 static void ring_desc_ring_free(struct pic32_sqi *sqi)
511 dma_free_coherent(&sqi->master->dev,
512 sizeof(struct buf_desc) * PESQI_BD_COUNT,
513 sqi->bd, sqi->bd_dma);
514 kfree(sqi->ring);
517 static void pic32_sqi_hw_init(struct pic32_sqi *sqi)
519 unsigned long flags;
520 u32 val;
522 /* Soft-reset of PESQI controller triggers interrupt.
523 * We are not yet ready to handle them so disable CPU
524 * interrupt for the time being.
526 local_irq_save(flags);
528 /* assert soft-reset */
529 writel(PESQI_SOFT_RESET, sqi->regs + PESQI_CONF_REG);
531 /* wait until clear */
532 readl_poll_timeout_atomic(sqi->regs + PESQI_CONF_REG, val,
533 !(val & PESQI_SOFT_RESET), 1, 5000);
535 /* disable all interrupts */
536 pic32_sqi_disable_int(sqi);
538 /* Now it is safe to enable back CPU interrupt */
539 local_irq_restore(flags);
541 /* tx and rx fifo interrupt threshold */
542 val = readl(sqi->regs + PESQI_CMD_THRES_REG);
543 val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
544 val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
545 val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
546 writel(val, sqi->regs + PESQI_CMD_THRES_REG);
548 val = readl(sqi->regs + PESQI_INT_THRES_REG);
549 val &= ~(PESQI_TXTHR_MASK << PESQI_TXTHR_SHIFT);
550 val &= ~(PESQI_RXTHR_MASK << PESQI_RXTHR_SHIFT);
551 val |= (1U << PESQI_TXTHR_SHIFT) | (1U << PESQI_RXTHR_SHIFT);
552 writel(val, sqi->regs + PESQI_INT_THRES_REG);
554 /* default configuration */
555 val = readl(sqi->regs + PESQI_CONF_REG);
557 /* set mode: DMA */
558 val &= ~PESQI_MODE;
559 val |= PESQI_MODE_DMA << PESQI_MODE_SHIFT;
560 writel(val, sqi->regs + PESQI_CONF_REG);
562 /* DATAEN - SQIID0-ID3 */
563 val |= PESQI_QUAD_LANE << PESQI_LANES_SHIFT;
565 /* burst/INCR4 enable */
566 val |= PESQI_BURST_EN;
568 /* CSEN - all CS */
569 val |= 3U << PESQI_CSEN_SHIFT;
570 writel(val, sqi->regs + PESQI_CONF_REG);
572 /* write poll count */
573 writel(0, sqi->regs + PESQI_BD_POLL_CTRL_REG);
575 sqi->cur_speed = 0;
576 sqi->cur_mode = -1;
579 static int pic32_sqi_probe(struct platform_device *pdev)
581 struct spi_master *master;
582 struct pic32_sqi *sqi;
583 struct resource *reg;
584 int ret;
586 master = spi_alloc_master(&pdev->dev, sizeof(*sqi));
587 if (!master)
588 return -ENOMEM;
590 sqi = spi_master_get_devdata(master);
591 sqi->master = master;
593 reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
594 sqi->regs = devm_ioremap_resource(&pdev->dev, reg);
595 if (IS_ERR(sqi->regs)) {
596 ret = PTR_ERR(sqi->regs);
597 goto err_free_master;
600 /* irq */
601 sqi->irq = platform_get_irq(pdev, 0);
602 if (sqi->irq < 0) {
603 dev_err(&pdev->dev, "no irq found\n");
604 ret = sqi->irq;
605 goto err_free_master;
608 /* clocks */
609 sqi->sys_clk = devm_clk_get(&pdev->dev, "reg_ck");
610 if (IS_ERR(sqi->sys_clk)) {
611 ret = PTR_ERR(sqi->sys_clk);
612 dev_err(&pdev->dev, "no sys_clk ?\n");
613 goto err_free_master;
616 sqi->base_clk = devm_clk_get(&pdev->dev, "spi_ck");
617 if (IS_ERR(sqi->base_clk)) {
618 ret = PTR_ERR(sqi->base_clk);
619 dev_err(&pdev->dev, "no base clk ?\n");
620 goto err_free_master;
623 ret = clk_prepare_enable(sqi->sys_clk);
624 if (ret) {
625 dev_err(&pdev->dev, "sys clk enable failed\n");
626 goto err_free_master;
629 ret = clk_prepare_enable(sqi->base_clk);
630 if (ret) {
631 dev_err(&pdev->dev, "base clk enable failed\n");
632 clk_disable_unprepare(sqi->sys_clk);
633 goto err_free_master;
636 init_completion(&sqi->xfer_done);
638 /* initialize hardware */
639 pic32_sqi_hw_init(sqi);
641 /* allocate buffers & descriptors */
642 ret = ring_desc_ring_alloc(sqi);
643 if (ret) {
644 dev_err(&pdev->dev, "ring alloc failed\n");
645 goto err_disable_clk;
648 /* install irq handlers */
649 ret = request_irq(sqi->irq, pic32_sqi_isr, 0,
650 dev_name(&pdev->dev), sqi);
651 if (ret < 0) {
652 dev_err(&pdev->dev, "request_irq(%d), failed\n", sqi->irq);
653 goto err_free_ring;
656 /* register master */
657 master->num_chipselect = 2;
658 master->max_speed_hz = clk_get_rate(sqi->base_clk);
659 master->dma_alignment = 32;
660 master->max_dma_len = PESQI_BD_BUF_LEN_MAX;
661 master->dev.of_node = of_node_get(pdev->dev.of_node);
662 master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL |
663 SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
664 master->flags = SPI_MASTER_HALF_DUPLEX;
665 master->can_dma = pic32_sqi_can_dma;
666 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
667 master->transfer_one_message = pic32_sqi_one_message;
668 master->prepare_transfer_hardware = pic32_sqi_prepare_hardware;
669 master->unprepare_transfer_hardware = pic32_sqi_unprepare_hardware;
671 ret = devm_spi_register_master(&pdev->dev, master);
672 if (ret) {
673 dev_err(&master->dev, "failed registering spi master\n");
674 free_irq(sqi->irq, sqi);
675 goto err_free_ring;
678 platform_set_drvdata(pdev, sqi);
680 return 0;
682 err_free_ring:
683 ring_desc_ring_free(sqi);
685 err_disable_clk:
686 clk_disable_unprepare(sqi->base_clk);
687 clk_disable_unprepare(sqi->sys_clk);
689 err_free_master:
690 spi_master_put(master);
691 return ret;
694 static int pic32_sqi_remove(struct platform_device *pdev)
696 struct pic32_sqi *sqi = platform_get_drvdata(pdev);
698 /* release resources */
699 free_irq(sqi->irq, sqi);
700 ring_desc_ring_free(sqi);
702 /* disable clk */
703 clk_disable_unprepare(sqi->base_clk);
704 clk_disable_unprepare(sqi->sys_clk);
706 return 0;
709 static const struct of_device_id pic32_sqi_of_ids[] = {
710 {.compatible = "microchip,pic32mzda-sqi",},
713 MODULE_DEVICE_TABLE(of, pic32_sqi_of_ids);
715 static struct platform_driver pic32_sqi_driver = {
716 .driver = {
717 .name = "sqi-pic32",
718 .of_match_table = of_match_ptr(pic32_sqi_of_ids),
720 .probe = pic32_sqi_probe,
721 .remove = pic32_sqi_remove,
724 module_platform_driver(pic32_sqi_driver);
726 MODULE_AUTHOR("Purna Chandra Mandal <purna.mandal@microchip.com>");
727 MODULE_DESCRIPTION("Microchip SPI driver for PIC32 SQI controller.");
728 MODULE_LICENSE("GPL v2");