2 * Freescale SPI controller driver cpm functions.
4 * Maintainer: Kumar Gala
6 * Copyright (C) 2006 Polycom, Inc.
7 * Copyright 2010 Freescale Semiconductor, Inc.
9 * CPM SPI and QE buffer descriptors mode support:
10 * Copyright (c) 2009 MontaVista Software, Inc.
11 * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
19 #include <soc/fsl/qe/qe.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/fsl_devices.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/of_address.h>
25 #include <linux/spi/spi.h>
26 #include <linux/types.h>
27 #include <linux/platform_device.h>
29 #include "spi-fsl-cpm.h"
30 #include "spi-fsl-lib.h"
31 #include "spi-fsl-spi.h"
33 /* CPM1 and CPM2 are mutually exclusive. */
36 #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
39 #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
42 #define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
43 #define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
45 /* SPCOM register values */
46 #define SPCOM_STR (1 << 23) /* Start transmit */
48 #define SPI_PRAM_SIZE 0x100
49 #define SPI_MRBLR ((unsigned int)PAGE_SIZE)
51 static void *fsl_dummy_rx
;
52 static DEFINE_MUTEX(fsl_dummy_rx_lock
);
53 static int fsl_dummy_rx_refcnt
;
55 void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi
*mspi
)
57 if (mspi
->flags
& SPI_QE
) {
58 qe_issue_cmd(QE_INIT_TX_RX
, mspi
->subblock
,
59 QE_CR_PROTOCOL_UNSPECIFIED
, 0);
61 if (mspi
->flags
& SPI_CPM1
) {
62 out_be32(&mspi
->pram
->rstate
, 0);
63 out_be16(&mspi
->pram
->rbptr
,
64 in_be16(&mspi
->pram
->rbase
));
65 out_be32(&mspi
->pram
->tstate
, 0);
66 out_be16(&mspi
->pram
->tbptr
,
67 in_be16(&mspi
->pram
->tbase
));
69 cpm_command(CPM_SPI_CMD
, CPM_CR_INIT_TRX
);
73 EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx
);
75 static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi
*mspi
)
77 struct cpm_buf_desc __iomem
*tx_bd
= mspi
->tx_bd
;
78 struct cpm_buf_desc __iomem
*rx_bd
= mspi
->rx_bd
;
79 unsigned int xfer_len
= min(mspi
->count
, SPI_MRBLR
);
80 unsigned int xfer_ofs
;
81 struct fsl_spi_reg
*reg_base
= mspi
->reg_base
;
83 xfer_ofs
= mspi
->xfer_in_progress
->len
- mspi
->count
;
85 if (mspi
->rx_dma
== mspi
->dma_dummy_rx
)
86 out_be32(&rx_bd
->cbd_bufaddr
, mspi
->rx_dma
);
88 out_be32(&rx_bd
->cbd_bufaddr
, mspi
->rx_dma
+ xfer_ofs
);
89 out_be16(&rx_bd
->cbd_datlen
, 0);
90 out_be16(&rx_bd
->cbd_sc
, BD_SC_EMPTY
| BD_SC_INTRPT
| BD_SC_WRAP
);
92 if (mspi
->tx_dma
== mspi
->dma_dummy_tx
)
93 out_be32(&tx_bd
->cbd_bufaddr
, mspi
->tx_dma
);
95 out_be32(&tx_bd
->cbd_bufaddr
, mspi
->tx_dma
+ xfer_ofs
);
96 out_be16(&tx_bd
->cbd_datlen
, xfer_len
);
97 out_be16(&tx_bd
->cbd_sc
, BD_SC_READY
| BD_SC_INTRPT
| BD_SC_WRAP
|
101 mpc8xxx_spi_write_reg(®_base
->command
, SPCOM_STR
);
104 int fsl_spi_cpm_bufs(struct mpc8xxx_spi
*mspi
,
105 struct spi_transfer
*t
, bool is_dma_mapped
)
107 struct device
*dev
= mspi
->dev
;
108 struct fsl_spi_reg
*reg_base
= mspi
->reg_base
;
111 mspi
->map_tx_dma
= 0;
112 mspi
->map_rx_dma
= 0;
114 mspi
->map_tx_dma
= 1;
115 mspi
->map_rx_dma
= 1;
119 mspi
->tx_dma
= mspi
->dma_dummy_tx
;
120 mspi
->map_tx_dma
= 0;
124 mspi
->rx_dma
= mspi
->dma_dummy_rx
;
125 mspi
->map_rx_dma
= 0;
128 if (mspi
->map_tx_dma
) {
129 void *nonconst_tx
= (void *)mspi
->tx
; /* shut up gcc */
131 mspi
->tx_dma
= dma_map_single(dev
, nonconst_tx
, t
->len
,
133 if (dma_mapping_error(dev
, mspi
->tx_dma
)) {
134 dev_err(dev
, "unable to map tx dma\n");
137 } else if (t
->tx_buf
) {
138 mspi
->tx_dma
= t
->tx_dma
;
141 if (mspi
->map_rx_dma
) {
142 mspi
->rx_dma
= dma_map_single(dev
, mspi
->rx
, t
->len
,
144 if (dma_mapping_error(dev
, mspi
->rx_dma
)) {
145 dev_err(dev
, "unable to map rx dma\n");
148 } else if (t
->rx_buf
) {
149 mspi
->rx_dma
= t
->rx_dma
;
153 mpc8xxx_spi_write_reg(®_base
->mask
, SPIE_RXB
);
155 mspi
->xfer_in_progress
= t
;
156 mspi
->count
= t
->len
;
158 /* start CPM transfers */
159 fsl_spi_cpm_bufs_start(mspi
);
164 if (mspi
->map_tx_dma
)
165 dma_unmap_single(dev
, mspi
->tx_dma
, t
->len
, DMA_TO_DEVICE
);
168 EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs
);
170 void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi
*mspi
)
172 struct device
*dev
= mspi
->dev
;
173 struct spi_transfer
*t
= mspi
->xfer_in_progress
;
175 if (mspi
->map_tx_dma
)
176 dma_unmap_single(dev
, mspi
->tx_dma
, t
->len
, DMA_TO_DEVICE
);
177 if (mspi
->map_rx_dma
)
178 dma_unmap_single(dev
, mspi
->rx_dma
, t
->len
, DMA_FROM_DEVICE
);
179 mspi
->xfer_in_progress
= NULL
;
181 EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete
);
183 void fsl_spi_cpm_irq(struct mpc8xxx_spi
*mspi
, u32 events
)
186 struct fsl_spi_reg
*reg_base
= mspi
->reg_base
;
188 dev_dbg(mspi
->dev
, "%s: bd datlen %d, count %d\n", __func__
,
189 in_be16(&mspi
->rx_bd
->cbd_datlen
), mspi
->count
);
191 len
= in_be16(&mspi
->rx_bd
->cbd_datlen
);
192 if (len
> mspi
->count
) {
197 /* Clear the events */
198 mpc8xxx_spi_write_reg(®_base
->event
, events
);
202 fsl_spi_cpm_bufs_start(mspi
);
204 complete(&mspi
->done
);
206 EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq
);
208 static void *fsl_spi_alloc_dummy_rx(void)
210 mutex_lock(&fsl_dummy_rx_lock
);
213 fsl_dummy_rx
= kmalloc(SPI_MRBLR
, GFP_KERNEL
);
215 fsl_dummy_rx_refcnt
++;
217 mutex_unlock(&fsl_dummy_rx_lock
);
222 static void fsl_spi_free_dummy_rx(void)
224 mutex_lock(&fsl_dummy_rx_lock
);
226 switch (fsl_dummy_rx_refcnt
) {
235 fsl_dummy_rx_refcnt
--;
239 mutex_unlock(&fsl_dummy_rx_lock
);
242 static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi
*mspi
)
244 struct device
*dev
= mspi
->dev
;
245 struct device_node
*np
= dev
->of_node
;
248 void __iomem
*spi_base
;
249 unsigned long pram_ofs
= -ENOMEM
;
251 /* Can't use of_address_to_resource(), QE muram isn't at 0. */
252 iprop
= of_get_property(np
, "reg", &size
);
254 /* QE with a fixed pram location? */
255 if (mspi
->flags
& SPI_QE
&& iprop
&& size
== sizeof(*iprop
) * 4)
256 return cpm_muram_alloc_fixed(iprop
[2], SPI_PRAM_SIZE
);
258 /* QE but with a dynamic pram location? */
259 if (mspi
->flags
& SPI_QE
) {
260 pram_ofs
= cpm_muram_alloc(SPI_PRAM_SIZE
, 64);
261 qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE
, mspi
->subblock
,
262 QE_CR_PROTOCOL_UNSPECIFIED
, pram_ofs
);
266 spi_base
= of_iomap(np
, 1);
267 if (spi_base
== NULL
)
270 if (mspi
->flags
& SPI_CPM2
) {
271 pram_ofs
= cpm_muram_alloc(SPI_PRAM_SIZE
, 64);
272 out_be16(spi_base
, pram_ofs
);
279 int fsl_spi_cpm_init(struct mpc8xxx_spi
*mspi
)
281 struct device
*dev
= mspi
->dev
;
282 struct device_node
*np
= dev
->of_node
;
285 unsigned long bds_ofs
;
287 if (!(mspi
->flags
& SPI_CPM_MODE
))
290 if (!fsl_spi_alloc_dummy_rx())
293 if (mspi
->flags
& SPI_QE
) {
294 iprop
= of_get_property(np
, "cell-index", &size
);
295 if (iprop
&& size
== sizeof(*iprop
))
296 mspi
->subblock
= *iprop
;
298 switch (mspi
->subblock
) {
300 dev_warn(dev
, "cell-index unspecified, assuming SPI1\n");
303 mspi
->subblock
= QE_CR_SUBBLOCK_SPI1
;
306 mspi
->subblock
= QE_CR_SUBBLOCK_SPI2
;
311 if (mspi
->flags
& SPI_CPM1
) {
312 struct resource
*res
;
315 res
= platform_get_resource(to_platform_device(dev
),
317 pram
= devm_ioremap_resource(dev
, res
);
323 unsigned long pram_ofs
= fsl_spi_cpm_get_pram(mspi
);
325 if (IS_ERR_VALUE(pram_ofs
))
328 mspi
->pram
= cpm_muram_addr(pram_ofs
);
330 if (mspi
->pram
== NULL
) {
331 dev_err(dev
, "can't allocate spi parameter ram\n");
335 bds_ofs
= cpm_muram_alloc(sizeof(*mspi
->tx_bd
) +
336 sizeof(*mspi
->rx_bd
), 8);
337 if (IS_ERR_VALUE(bds_ofs
)) {
338 dev_err(dev
, "can't allocate bds\n");
342 mspi
->dma_dummy_tx
= dma_map_single(dev
, empty_zero_page
, PAGE_SIZE
,
344 if (dma_mapping_error(dev
, mspi
->dma_dummy_tx
)) {
345 dev_err(dev
, "unable to map dummy tx buffer\n");
349 mspi
->dma_dummy_rx
= dma_map_single(dev
, fsl_dummy_rx
, SPI_MRBLR
,
351 if (dma_mapping_error(dev
, mspi
->dma_dummy_rx
)) {
352 dev_err(dev
, "unable to map dummy rx buffer\n");
356 mspi
->tx_bd
= cpm_muram_addr(bds_ofs
);
357 mspi
->rx_bd
= cpm_muram_addr(bds_ofs
+ sizeof(*mspi
->tx_bd
));
359 /* Initialize parameter ram. */
360 out_be16(&mspi
->pram
->tbase
, cpm_muram_offset(mspi
->tx_bd
));
361 out_be16(&mspi
->pram
->rbase
, cpm_muram_offset(mspi
->rx_bd
));
362 out_8(&mspi
->pram
->tfcr
, CPMFCR_EB
| CPMFCR_GBL
);
363 out_8(&mspi
->pram
->rfcr
, CPMFCR_EB
| CPMFCR_GBL
);
364 out_be16(&mspi
->pram
->mrblr
, SPI_MRBLR
);
365 out_be32(&mspi
->pram
->rstate
, 0);
366 out_be32(&mspi
->pram
->rdp
, 0);
367 out_be16(&mspi
->pram
->rbptr
, 0);
368 out_be16(&mspi
->pram
->rbc
, 0);
369 out_be32(&mspi
->pram
->rxtmp
, 0);
370 out_be32(&mspi
->pram
->tstate
, 0);
371 out_be32(&mspi
->pram
->tdp
, 0);
372 out_be16(&mspi
->pram
->tbptr
, 0);
373 out_be16(&mspi
->pram
->tbc
, 0);
374 out_be32(&mspi
->pram
->txtmp
, 0);
379 dma_unmap_single(dev
, mspi
->dma_dummy_tx
, PAGE_SIZE
, DMA_TO_DEVICE
);
381 cpm_muram_free(bds_ofs
);
383 if (!(mspi
->flags
& SPI_CPM1
))
384 cpm_muram_free(cpm_muram_offset(mspi
->pram
));
386 fsl_spi_free_dummy_rx();
389 EXPORT_SYMBOL_GPL(fsl_spi_cpm_init
);
391 void fsl_spi_cpm_free(struct mpc8xxx_spi
*mspi
)
393 struct device
*dev
= mspi
->dev
;
395 if (!(mspi
->flags
& SPI_CPM_MODE
))
398 dma_unmap_single(dev
, mspi
->dma_dummy_rx
, SPI_MRBLR
, DMA_FROM_DEVICE
);
399 dma_unmap_single(dev
, mspi
->dma_dummy_tx
, PAGE_SIZE
, DMA_TO_DEVICE
);
400 cpm_muram_free(cpm_muram_offset(mspi
->tx_bd
));
401 cpm_muram_free(cpm_muram_offset(mspi
->pram
));
402 fsl_spi_free_dummy_rx();
404 EXPORT_SYMBOL_GPL(fsl_spi_cpm_free
);
406 MODULE_LICENSE("GPL");