sctp: walk the list of asoc safely
[linux/fpc-iii.git] / drivers / spi / spi-dw.c
blobac2eb89ef7a5a0616fe915ec96453ac441f589be
1 /*
2 * Designware SPI core controller driver (refer pxa2xx_spi.c)
4 * Copyright (c) 2009, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
16 #include <linux/dma-mapping.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/highmem.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/spi/spi.h>
23 #include <linux/gpio.h>
25 #include "spi-dw.h"
27 #ifdef CONFIG_DEBUG_FS
28 #include <linux/debugfs.h>
29 #endif
31 /* Slave spi_dev related */
32 struct chip_data {
33 u8 tmode; /* TR/TO/RO/EEPROM */
34 u8 type; /* SPI/SSP/MicroWire */
36 u8 poll_mode; /* 1 means use poll mode */
38 u16 clk_div; /* baud rate divider */
39 u32 speed_hz; /* baud rate */
40 void (*cs_control)(u32 command);
43 #ifdef CONFIG_DEBUG_FS
44 #define SPI_REGS_BUFSIZE 1024
45 static ssize_t dw_spi_show_regs(struct file *file, char __user *user_buf,
46 size_t count, loff_t *ppos)
48 struct dw_spi *dws = file->private_data;
49 char *buf;
50 u32 len = 0;
51 ssize_t ret;
53 buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
54 if (!buf)
55 return 0;
57 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
58 "%s registers:\n", dev_name(&dws->master->dev));
59 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
60 "=================================\n");
61 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
62 "CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0));
63 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
64 "CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1));
65 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
66 "SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR));
67 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
68 "SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER));
69 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
70 "BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR));
71 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
72 "TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR));
73 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
74 "RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR));
75 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
76 "TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR));
77 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
78 "RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR));
79 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
80 "SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR));
81 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
82 "IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR));
83 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
84 "ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR));
85 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
86 "DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR));
87 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
88 "DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR));
89 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
90 "DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR));
91 len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
92 "=================================\n");
94 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
95 kfree(buf);
96 return ret;
99 static const struct file_operations dw_spi_regs_ops = {
100 .owner = THIS_MODULE,
101 .open = simple_open,
102 .read = dw_spi_show_regs,
103 .llseek = default_llseek,
106 static int dw_spi_debugfs_init(struct dw_spi *dws)
108 char name[32];
110 snprintf(name, 32, "dw_spi%d", dws->master->bus_num);
111 dws->debugfs = debugfs_create_dir(name, NULL);
112 if (!dws->debugfs)
113 return -ENOMEM;
115 debugfs_create_file("registers", S_IFREG | S_IRUGO,
116 dws->debugfs, (void *)dws, &dw_spi_regs_ops);
117 return 0;
120 static void dw_spi_debugfs_remove(struct dw_spi *dws)
122 debugfs_remove_recursive(dws->debugfs);
125 #else
126 static inline int dw_spi_debugfs_init(struct dw_spi *dws)
128 return 0;
131 static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
134 #endif /* CONFIG_DEBUG_FS */
136 void dw_spi_set_cs(struct spi_device *spi, bool enable)
138 struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
139 struct chip_data *chip = spi_get_ctldata(spi);
141 /* Chip select logic is inverted from spi_set_cs() */
142 if (chip && chip->cs_control)
143 chip->cs_control(!enable);
145 if (!enable)
146 dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
148 EXPORT_SYMBOL_GPL(dw_spi_set_cs);
150 /* Return the max entries we can fill into tx fifo */
151 static inline u32 tx_max(struct dw_spi *dws)
153 u32 tx_left, tx_room, rxtx_gap;
155 tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
156 tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
159 * Another concern is about the tx/rx mismatch, we
160 * though to use (dws->fifo_len - rxflr - txflr) as
161 * one maximum value for tx, but it doesn't cover the
162 * data which is out of tx/rx fifo and inside the
163 * shift registers. So a control from sw point of
164 * view is taken.
166 rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx))
167 / dws->n_bytes;
169 return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap));
172 /* Return the max entries we should read out of rx fifo */
173 static inline u32 rx_max(struct dw_spi *dws)
175 u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
177 return min_t(u32, rx_left, dw_readl(dws, DW_SPI_RXFLR));
180 static void dw_writer(struct dw_spi *dws)
182 u32 max = tx_max(dws);
183 u16 txw = 0;
185 while (max--) {
186 /* Set the tx word if the transfer's original "tx" is not null */
187 if (dws->tx_end - dws->len) {
188 if (dws->n_bytes == 1)
189 txw = *(u8 *)(dws->tx);
190 else
191 txw = *(u16 *)(dws->tx);
193 dw_write_io_reg(dws, DW_SPI_DR, txw);
194 dws->tx += dws->n_bytes;
198 static void dw_reader(struct dw_spi *dws)
200 u32 max = rx_max(dws);
201 u16 rxw;
203 while (max--) {
204 rxw = dw_read_io_reg(dws, DW_SPI_DR);
205 /* Care rx only if the transfer's original "rx" is not null */
206 if (dws->rx_end - dws->len) {
207 if (dws->n_bytes == 1)
208 *(u8 *)(dws->rx) = rxw;
209 else
210 *(u16 *)(dws->rx) = rxw;
212 dws->rx += dws->n_bytes;
216 static void int_error_stop(struct dw_spi *dws, const char *msg)
218 spi_reset_chip(dws);
220 dev_err(&dws->master->dev, "%s\n", msg);
221 dws->master->cur_msg->status = -EIO;
222 spi_finalize_current_transfer(dws->master);
225 static irqreturn_t interrupt_transfer(struct dw_spi *dws)
227 u16 irq_status = dw_readl(dws, DW_SPI_ISR);
229 /* Error handling */
230 if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
231 dw_readl(dws, DW_SPI_ICR);
232 int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
233 return IRQ_HANDLED;
236 dw_reader(dws);
237 if (dws->rx_end == dws->rx) {
238 spi_mask_intr(dws, SPI_INT_TXEI);
239 spi_finalize_current_transfer(dws->master);
240 return IRQ_HANDLED;
242 if (irq_status & SPI_INT_TXEI) {
243 spi_mask_intr(dws, SPI_INT_TXEI);
244 dw_writer(dws);
245 /* Enable TX irq always, it will be disabled when RX finished */
246 spi_umask_intr(dws, SPI_INT_TXEI);
249 return IRQ_HANDLED;
252 static irqreturn_t dw_spi_irq(int irq, void *dev_id)
254 struct spi_controller *master = dev_id;
255 struct dw_spi *dws = spi_controller_get_devdata(master);
256 u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
258 if (!irq_status)
259 return IRQ_NONE;
261 if (!master->cur_msg) {
262 spi_mask_intr(dws, SPI_INT_TXEI);
263 return IRQ_HANDLED;
266 return dws->transfer_handler(dws);
269 /* Must be called inside pump_transfers() */
270 static int poll_transfer(struct dw_spi *dws)
272 do {
273 dw_writer(dws);
274 dw_reader(dws);
275 cpu_relax();
276 } while (dws->rx_end > dws->rx);
278 return 0;
281 static int dw_spi_transfer_one(struct spi_controller *master,
282 struct spi_device *spi, struct spi_transfer *transfer)
284 struct dw_spi *dws = spi_controller_get_devdata(master);
285 struct chip_data *chip = spi_get_ctldata(spi);
286 u8 imask = 0;
287 u16 txlevel = 0;
288 u32 cr0;
289 int ret;
291 dws->dma_mapped = 0;
293 dws->tx = (void *)transfer->tx_buf;
294 dws->tx_end = dws->tx + transfer->len;
295 dws->rx = transfer->rx_buf;
296 dws->rx_end = dws->rx + transfer->len;
297 dws->len = transfer->len;
299 spi_enable_chip(dws, 0);
301 /* Handle per transfer options for bpw and speed */
302 if (transfer->speed_hz != dws->current_freq) {
303 if (transfer->speed_hz != chip->speed_hz) {
304 /* clk_div doesn't support odd number */
305 chip->clk_div = (DIV_ROUND_UP(dws->max_freq, transfer->speed_hz) + 1) & 0xfffe;
306 chip->speed_hz = transfer->speed_hz;
308 dws->current_freq = transfer->speed_hz;
309 spi_set_clk(dws, chip->clk_div);
311 if (transfer->bits_per_word == 8) {
312 dws->n_bytes = 1;
313 dws->dma_width = 1;
314 } else if (transfer->bits_per_word == 16) {
315 dws->n_bytes = 2;
316 dws->dma_width = 2;
317 } else {
318 return -EINVAL;
320 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
321 cr0 = (transfer->bits_per_word - 1)
322 | (chip->type << SPI_FRF_OFFSET)
323 | (spi->mode << SPI_MODE_OFFSET)
324 | (chip->tmode << SPI_TMOD_OFFSET);
327 * Adjust transfer mode if necessary. Requires platform dependent
328 * chipselect mechanism.
330 if (chip->cs_control) {
331 if (dws->rx && dws->tx)
332 chip->tmode = SPI_TMOD_TR;
333 else if (dws->rx)
334 chip->tmode = SPI_TMOD_RO;
335 else
336 chip->tmode = SPI_TMOD_TO;
338 cr0 &= ~SPI_TMOD_MASK;
339 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
342 dw_writel(dws, DW_SPI_CTRL0, cr0);
344 /* Check if current transfer is a DMA transaction */
345 if (master->can_dma && master->can_dma(master, spi, transfer))
346 dws->dma_mapped = master->cur_msg_mapped;
348 /* For poll mode just disable all interrupts */
349 spi_mask_intr(dws, 0xff);
352 * Interrupt mode
353 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
355 if (dws->dma_mapped) {
356 ret = dws->dma_ops->dma_setup(dws, transfer);
357 if (ret < 0) {
358 spi_enable_chip(dws, 1);
359 return ret;
361 } else if (!chip->poll_mode) {
362 txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes);
363 dw_writel(dws, DW_SPI_TXFLTR, txlevel);
365 /* Set the interrupt mask */
366 imask |= SPI_INT_TXEI | SPI_INT_TXOI |
367 SPI_INT_RXUI | SPI_INT_RXOI;
368 spi_umask_intr(dws, imask);
370 dws->transfer_handler = interrupt_transfer;
373 spi_enable_chip(dws, 1);
375 if (dws->dma_mapped) {
376 ret = dws->dma_ops->dma_transfer(dws, transfer);
377 if (ret < 0)
378 return ret;
381 if (chip->poll_mode)
382 return poll_transfer(dws);
384 return 1;
387 static void dw_spi_handle_err(struct spi_controller *master,
388 struct spi_message *msg)
390 struct dw_spi *dws = spi_controller_get_devdata(master);
392 if (dws->dma_mapped)
393 dws->dma_ops->dma_stop(dws);
395 spi_reset_chip(dws);
398 /* This may be called twice for each spi dev */
399 static int dw_spi_setup(struct spi_device *spi)
401 struct dw_spi_chip *chip_info = NULL;
402 struct chip_data *chip;
403 int ret;
405 /* Only alloc on first setup */
406 chip = spi_get_ctldata(spi);
407 if (!chip) {
408 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
409 if (!chip)
410 return -ENOMEM;
411 spi_set_ctldata(spi, chip);
415 * Protocol drivers may change the chip settings, so...
416 * if chip_info exists, use it
418 chip_info = spi->controller_data;
420 /* chip_info doesn't always exist */
421 if (chip_info) {
422 if (chip_info->cs_control)
423 chip->cs_control = chip_info->cs_control;
425 chip->poll_mode = chip_info->poll_mode;
426 chip->type = chip_info->type;
429 chip->tmode = SPI_TMOD_TR;
431 if (gpio_is_valid(spi->cs_gpio)) {
432 ret = gpio_direction_output(spi->cs_gpio,
433 !(spi->mode & SPI_CS_HIGH));
434 if (ret)
435 return ret;
438 return 0;
441 static void dw_spi_cleanup(struct spi_device *spi)
443 struct chip_data *chip = spi_get_ctldata(spi);
445 kfree(chip);
446 spi_set_ctldata(spi, NULL);
449 /* Restart the controller, disable all interrupts, clean rx fifo */
450 static void spi_hw_init(struct device *dev, struct dw_spi *dws)
452 spi_reset_chip(dws);
455 * Try to detect the FIFO depth if not set by interface driver,
456 * the depth could be from 2 to 256 from HW spec
458 if (!dws->fifo_len) {
459 u32 fifo;
461 for (fifo = 1; fifo < 256; fifo++) {
462 dw_writel(dws, DW_SPI_TXFLTR, fifo);
463 if (fifo != dw_readl(dws, DW_SPI_TXFLTR))
464 break;
466 dw_writel(dws, DW_SPI_TXFLTR, 0);
468 dws->fifo_len = (fifo == 1) ? 0 : fifo;
469 dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
473 int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
475 struct spi_controller *master;
476 int ret;
478 BUG_ON(dws == NULL);
480 master = spi_alloc_master(dev, 0);
481 if (!master)
482 return -ENOMEM;
484 dws->master = master;
485 dws->type = SSI_MOTO_SPI;
486 dws->dma_inited = 0;
487 dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
489 spi_controller_set_devdata(master, dws);
491 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
492 master);
493 if (ret < 0) {
494 dev_err(dev, "can not get IRQ\n");
495 goto err_free_master;
498 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
499 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
500 master->bus_num = dws->bus_num;
501 master->num_chipselect = dws->num_cs;
502 master->setup = dw_spi_setup;
503 master->cleanup = dw_spi_cleanup;
504 master->set_cs = dw_spi_set_cs;
505 master->transfer_one = dw_spi_transfer_one;
506 master->handle_err = dw_spi_handle_err;
507 master->max_speed_hz = dws->max_freq;
508 master->dev.of_node = dev->of_node;
509 master->flags = SPI_MASTER_GPIO_SS;
511 if (dws->set_cs)
512 master->set_cs = dws->set_cs;
514 /* Basic HW init */
515 spi_hw_init(dev, dws);
517 if (dws->dma_ops && dws->dma_ops->dma_init) {
518 ret = dws->dma_ops->dma_init(dws);
519 if (ret) {
520 dev_warn(dev, "DMA init failed\n");
521 dws->dma_inited = 0;
522 } else {
523 master->can_dma = dws->dma_ops->can_dma;
527 ret = devm_spi_register_controller(dev, master);
528 if (ret) {
529 dev_err(&master->dev, "problem registering spi master\n");
530 goto err_dma_exit;
533 dw_spi_debugfs_init(dws);
534 return 0;
536 err_dma_exit:
537 if (dws->dma_ops && dws->dma_ops->dma_exit)
538 dws->dma_ops->dma_exit(dws);
539 spi_enable_chip(dws, 0);
540 free_irq(dws->irq, master);
541 err_free_master:
542 spi_controller_put(master);
543 return ret;
545 EXPORT_SYMBOL_GPL(dw_spi_add_host);
547 void dw_spi_remove_host(struct dw_spi *dws)
549 dw_spi_debugfs_remove(dws);
551 if (dws->dma_ops && dws->dma_ops->dma_exit)
552 dws->dma_ops->dma_exit(dws);
554 spi_shutdown_chip(dws);
556 free_irq(dws->irq, dws->master);
558 EXPORT_SYMBOL_GPL(dw_spi_remove_host);
560 int dw_spi_suspend_host(struct dw_spi *dws)
562 int ret;
564 ret = spi_controller_suspend(dws->master);
565 if (ret)
566 return ret;
568 spi_shutdown_chip(dws);
569 return 0;
571 EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
573 int dw_spi_resume_host(struct dw_spi *dws)
575 int ret;
577 spi_hw_init(&dws->master->dev, dws);
578 ret = spi_controller_resume(dws->master);
579 if (ret)
580 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
581 return ret;
583 EXPORT_SYMBOL_GPL(dw_spi_resume_host);
585 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
586 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
587 MODULE_LICENSE("GPL v2");