gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / spi / spi-dw.c
blob31e3f866d11a78be9786803548b5f936bfb1e057
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Designware SPI core controller driver (refer pxa2xx_spi.c)
5 * Copyright (c) 2009, Intel Corporation.
6 */
8 #include <linux/dma-mapping.h>
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/highmem.h>
12 #include <linux/delay.h>
13 #include <linux/slab.h>
14 #include <linux/spi/spi.h>
16 #include "spi-dw.h"
18 #ifdef CONFIG_DEBUG_FS
19 #include <linux/debugfs.h>
20 #endif
22 /* Slave spi_dev related */
23 struct chip_data {
24 u8 tmode; /* TR/TO/RO/EEPROM */
25 u8 type; /* SPI/SSP/MicroWire */
27 u8 poll_mode; /* 1 means use poll mode */
29 u16 clk_div; /* baud rate divider */
30 u32 speed_hz; /* baud rate */
31 void (*cs_control)(u32 command);
34 #ifdef CONFIG_DEBUG_FS
35 #define SPI_REGS_BUFSIZE 1024
36 static ssize_t dw_spi_show_regs(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos)
39 struct dw_spi *dws = file->private_data;
40 char *buf;
41 u32 len = 0;
42 ssize_t ret;
44 buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
45 if (!buf)
46 return 0;
48 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
49 "%s registers:\n", dev_name(&dws->master->dev));
50 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
51 "=================================\n");
52 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
53 "CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0));
54 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
55 "CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1));
56 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
57 "SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR));
58 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
59 "SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER));
60 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
61 "BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR));
62 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
63 "TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR));
64 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
65 "RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR));
66 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
67 "TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR));
68 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
69 "RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR));
70 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
71 "SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR));
72 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
73 "IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR));
74 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
75 "ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR));
76 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
77 "DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR));
78 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
79 "DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR));
80 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
81 "DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR));
82 len += scnprintf(buf + len, SPI_REGS_BUFSIZE - len,
83 "=================================\n");
85 ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
86 kfree(buf);
87 return ret;
90 static const struct file_operations dw_spi_regs_ops = {
91 .owner = THIS_MODULE,
92 .open = simple_open,
93 .read = dw_spi_show_regs,
94 .llseek = default_llseek,
97 static int dw_spi_debugfs_init(struct dw_spi *dws)
99 char name[32];
101 snprintf(name, 32, "dw_spi%d", dws->master->bus_num);
102 dws->debugfs = debugfs_create_dir(name, NULL);
103 if (!dws->debugfs)
104 return -ENOMEM;
106 debugfs_create_file("registers", S_IFREG | S_IRUGO,
107 dws->debugfs, (void *)dws, &dw_spi_regs_ops);
108 return 0;
111 static void dw_spi_debugfs_remove(struct dw_spi *dws)
113 debugfs_remove_recursive(dws->debugfs);
116 #else
117 static inline int dw_spi_debugfs_init(struct dw_spi *dws)
119 return 0;
122 static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
125 #endif /* CONFIG_DEBUG_FS */
127 void dw_spi_set_cs(struct spi_device *spi, bool enable)
129 struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
130 struct chip_data *chip = spi_get_ctldata(spi);
132 /* Chip select logic is inverted from spi_set_cs() */
133 if (chip && chip->cs_control)
134 chip->cs_control(!enable);
136 if (!enable)
137 dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
138 else if (dws->cs_override)
139 dw_writel(dws, DW_SPI_SER, 0);
141 EXPORT_SYMBOL_GPL(dw_spi_set_cs);
143 /* Return the max entries we can fill into tx fifo */
144 static inline u32 tx_max(struct dw_spi *dws)
146 u32 tx_left, tx_room, rxtx_gap;
148 tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
149 tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
152 * Another concern is about the tx/rx mismatch, we
153 * though to use (dws->fifo_len - rxflr - txflr) as
154 * one maximum value for tx, but it doesn't cover the
155 * data which is out of tx/rx fifo and inside the
156 * shift registers. So a control from sw point of
157 * view is taken.
159 rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx))
160 / dws->n_bytes;
162 return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap));
165 /* Return the max entries we should read out of rx fifo */
166 static inline u32 rx_max(struct dw_spi *dws)
168 u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
170 return min_t(u32, rx_left, dw_readl(dws, DW_SPI_RXFLR));
173 static void dw_writer(struct dw_spi *dws)
175 u32 max;
176 u16 txw = 0;
178 spin_lock(&dws->buf_lock);
179 max = tx_max(dws);
180 while (max--) {
181 /* Set the tx word if the transfer's original "tx" is not null */
182 if (dws->tx_end - dws->len) {
183 if (dws->n_bytes == 1)
184 txw = *(u8 *)(dws->tx);
185 else
186 txw = *(u16 *)(dws->tx);
188 dw_write_io_reg(dws, DW_SPI_DR, txw);
189 dws->tx += dws->n_bytes;
191 spin_unlock(&dws->buf_lock);
194 static void dw_reader(struct dw_spi *dws)
196 u32 max;
197 u16 rxw;
199 spin_lock(&dws->buf_lock);
200 max = rx_max(dws);
201 while (max--) {
202 rxw = dw_read_io_reg(dws, DW_SPI_DR);
203 /* Care rx only if the transfer's original "rx" is not null */
204 if (dws->rx_end - dws->len) {
205 if (dws->n_bytes == 1)
206 *(u8 *)(dws->rx) = rxw;
207 else
208 *(u16 *)(dws->rx) = rxw;
210 dws->rx += dws->n_bytes;
212 spin_unlock(&dws->buf_lock);
215 static void int_error_stop(struct dw_spi *dws, const char *msg)
217 spi_reset_chip(dws);
219 dev_err(&dws->master->dev, "%s\n", msg);
220 dws->master->cur_msg->status = -EIO;
221 spi_finalize_current_transfer(dws->master);
224 static irqreturn_t interrupt_transfer(struct dw_spi *dws)
226 u16 irq_status = dw_readl(dws, DW_SPI_ISR);
228 /* Error handling */
229 if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
230 dw_readl(dws, DW_SPI_ICR);
231 int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
232 return IRQ_HANDLED;
235 dw_reader(dws);
236 if (dws->rx_end == dws->rx) {
237 spi_mask_intr(dws, SPI_INT_TXEI);
238 spi_finalize_current_transfer(dws->master);
239 return IRQ_HANDLED;
241 if (irq_status & SPI_INT_TXEI) {
242 spi_mask_intr(dws, SPI_INT_TXEI);
243 dw_writer(dws);
244 /* Enable TX irq always, it will be disabled when RX finished */
245 spi_umask_intr(dws, SPI_INT_TXEI);
248 return IRQ_HANDLED;
251 static irqreturn_t dw_spi_irq(int irq, void *dev_id)
253 struct spi_controller *master = dev_id;
254 struct dw_spi *dws = spi_controller_get_devdata(master);
255 u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
257 if (!irq_status)
258 return IRQ_NONE;
260 if (!master->cur_msg) {
261 spi_mask_intr(dws, SPI_INT_TXEI);
262 return IRQ_HANDLED;
265 return dws->transfer_handler(dws);
268 /* Must be called inside pump_transfers() */
269 static int poll_transfer(struct dw_spi *dws)
271 do {
272 dw_writer(dws);
273 dw_reader(dws);
274 cpu_relax();
275 } while (dws->rx_end > dws->rx);
277 return 0;
280 static int dw_spi_transfer_one(struct spi_controller *master,
281 struct spi_device *spi, struct spi_transfer *transfer)
283 struct dw_spi *dws = spi_controller_get_devdata(master);
284 struct chip_data *chip = spi_get_ctldata(spi);
285 unsigned long flags;
286 u8 imask = 0;
287 u16 txlevel = 0;
288 u32 cr0;
289 int ret;
291 dws->dma_mapped = 0;
292 spin_lock_irqsave(&dws->buf_lock, flags);
293 dws->tx = (void *)transfer->tx_buf;
294 dws->tx_end = dws->tx + transfer->len;
295 dws->rx = transfer->rx_buf;
296 dws->rx_end = dws->rx + transfer->len;
297 dws->len = transfer->len;
298 spin_unlock_irqrestore(&dws->buf_lock, flags);
300 /* Ensure dw->rx and dw->rx_end are visible */
301 smp_mb();
303 spi_enable_chip(dws, 0);
305 /* Handle per transfer options for bpw and speed */
306 if (transfer->speed_hz != dws->current_freq) {
307 if (transfer->speed_hz != chip->speed_hz) {
308 /* clk_div doesn't support odd number */
309 chip->clk_div = (DIV_ROUND_UP(dws->max_freq, transfer->speed_hz) + 1) & 0xfffe;
310 chip->speed_hz = transfer->speed_hz;
312 dws->current_freq = transfer->speed_hz;
313 spi_set_clk(dws, chip->clk_div);
316 dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
317 dws->dma_width = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
319 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
320 cr0 = (transfer->bits_per_word - 1)
321 | (chip->type << SPI_FRF_OFFSET)
322 | ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) |
323 (((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET) |
324 (((spi->mode & SPI_LOOP) ? 1 : 0) << SPI_SRL_OFFSET))
325 | (chip->tmode << SPI_TMOD_OFFSET);
328 * Adjust transfer mode if necessary. Requires platform dependent
329 * chipselect mechanism.
331 if (chip->cs_control) {
332 if (dws->rx && dws->tx)
333 chip->tmode = SPI_TMOD_TR;
334 else if (dws->rx)
335 chip->tmode = SPI_TMOD_RO;
336 else
337 chip->tmode = SPI_TMOD_TO;
339 cr0 &= ~SPI_TMOD_MASK;
340 cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
343 dw_writel(dws, DW_SPI_CTRL0, cr0);
345 /* Check if current transfer is a DMA transaction */
346 if (master->can_dma && master->can_dma(master, spi, transfer))
347 dws->dma_mapped = master->cur_msg_mapped;
349 /* For poll mode just disable all interrupts */
350 spi_mask_intr(dws, 0xff);
353 * Interrupt mode
354 * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
356 if (dws->dma_mapped) {
357 ret = dws->dma_ops->dma_setup(dws, transfer);
358 if (ret < 0) {
359 spi_enable_chip(dws, 1);
360 return ret;
362 } else if (!chip->poll_mode) {
363 txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes);
364 dw_writel(dws, DW_SPI_TXFLTR, txlevel);
366 /* Set the interrupt mask */
367 imask |= SPI_INT_TXEI | SPI_INT_TXOI |
368 SPI_INT_RXUI | SPI_INT_RXOI;
369 spi_umask_intr(dws, imask);
371 dws->transfer_handler = interrupt_transfer;
374 spi_enable_chip(dws, 1);
376 if (dws->dma_mapped) {
377 ret = dws->dma_ops->dma_transfer(dws, transfer);
378 if (ret < 0)
379 return ret;
382 if (chip->poll_mode)
383 return poll_transfer(dws);
385 return 1;
388 static void dw_spi_handle_err(struct spi_controller *master,
389 struct spi_message *msg)
391 struct dw_spi *dws = spi_controller_get_devdata(master);
393 if (dws->dma_mapped)
394 dws->dma_ops->dma_stop(dws);
396 spi_reset_chip(dws);
399 /* This may be called twice for each spi dev */
400 static int dw_spi_setup(struct spi_device *spi)
402 struct dw_spi_chip *chip_info = NULL;
403 struct chip_data *chip;
405 /* Only alloc on first setup */
406 chip = spi_get_ctldata(spi);
407 if (!chip) {
408 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
409 if (!chip)
410 return -ENOMEM;
411 spi_set_ctldata(spi, chip);
415 * Protocol drivers may change the chip settings, so...
416 * if chip_info exists, use it
418 chip_info = spi->controller_data;
420 /* chip_info doesn't always exist */
421 if (chip_info) {
422 if (chip_info->cs_control)
423 chip->cs_control = chip_info->cs_control;
425 chip->poll_mode = chip_info->poll_mode;
426 chip->type = chip_info->type;
429 chip->tmode = SPI_TMOD_TR;
431 return 0;
434 static void dw_spi_cleanup(struct spi_device *spi)
436 struct chip_data *chip = spi_get_ctldata(spi);
438 kfree(chip);
439 spi_set_ctldata(spi, NULL);
442 /* Restart the controller, disable all interrupts, clean rx fifo */
443 static void spi_hw_init(struct device *dev, struct dw_spi *dws)
445 spi_reset_chip(dws);
448 * Try to detect the FIFO depth if not set by interface driver,
449 * the depth could be from 2 to 256 from HW spec
451 if (!dws->fifo_len) {
452 u32 fifo;
454 for (fifo = 1; fifo < 256; fifo++) {
455 dw_writel(dws, DW_SPI_TXFLTR, fifo);
456 if (fifo != dw_readl(dws, DW_SPI_TXFLTR))
457 break;
459 dw_writel(dws, DW_SPI_TXFLTR, 0);
461 dws->fifo_len = (fifo == 1) ? 0 : fifo;
462 dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
465 /* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
466 if (dws->cs_override)
467 dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
470 int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
472 struct spi_controller *master;
473 int ret;
475 if (!dws)
476 return -EINVAL;
478 master = spi_alloc_master(dev, 0);
479 if (!master)
480 return -ENOMEM;
482 dws->master = master;
483 dws->type = SSI_MOTO_SPI;
484 dws->dma_inited = 0;
485 dws->dma_addr = (dma_addr_t)(dws->paddr + DW_SPI_DR);
486 spin_lock_init(&dws->buf_lock);
488 spi_controller_set_devdata(master, dws);
490 ret = request_irq(dws->irq, dw_spi_irq, IRQF_SHARED, dev_name(dev),
491 master);
492 if (ret < 0) {
493 dev_err(dev, "can not get IRQ\n");
494 goto err_free_master;
497 master->use_gpio_descriptors = true;
498 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
499 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
500 master->bus_num = dws->bus_num;
501 master->num_chipselect = dws->num_cs;
502 master->setup = dw_spi_setup;
503 master->cleanup = dw_spi_cleanup;
504 master->set_cs = dw_spi_set_cs;
505 master->transfer_one = dw_spi_transfer_one;
506 master->handle_err = dw_spi_handle_err;
507 master->max_speed_hz = dws->max_freq;
508 master->dev.of_node = dev->of_node;
509 master->dev.fwnode = dev->fwnode;
510 master->flags = SPI_MASTER_GPIO_SS;
511 master->auto_runtime_pm = true;
513 if (dws->set_cs)
514 master->set_cs = dws->set_cs;
516 /* Basic HW init */
517 spi_hw_init(dev, dws);
519 if (dws->dma_ops && dws->dma_ops->dma_init) {
520 ret = dws->dma_ops->dma_init(dws);
521 if (ret) {
522 dev_warn(dev, "DMA init failed\n");
523 dws->dma_inited = 0;
524 } else {
525 master->can_dma = dws->dma_ops->can_dma;
529 ret = devm_spi_register_controller(dev, master);
530 if (ret) {
531 dev_err(&master->dev, "problem registering spi master\n");
532 goto err_dma_exit;
535 dw_spi_debugfs_init(dws);
536 return 0;
538 err_dma_exit:
539 if (dws->dma_ops && dws->dma_ops->dma_exit)
540 dws->dma_ops->dma_exit(dws);
541 spi_enable_chip(dws, 0);
542 free_irq(dws->irq, master);
543 err_free_master:
544 spi_controller_put(master);
545 return ret;
547 EXPORT_SYMBOL_GPL(dw_spi_add_host);
549 void dw_spi_remove_host(struct dw_spi *dws)
551 dw_spi_debugfs_remove(dws);
553 if (dws->dma_ops && dws->dma_ops->dma_exit)
554 dws->dma_ops->dma_exit(dws);
556 spi_shutdown_chip(dws);
558 free_irq(dws->irq, dws->master);
560 EXPORT_SYMBOL_GPL(dw_spi_remove_host);
562 int dw_spi_suspend_host(struct dw_spi *dws)
564 int ret;
566 ret = spi_controller_suspend(dws->master);
567 if (ret)
568 return ret;
570 spi_shutdown_chip(dws);
571 return 0;
573 EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
575 int dw_spi_resume_host(struct dw_spi *dws)
577 spi_hw_init(&dws->master->dev, dws);
578 return spi_controller_resume(dws->master);
580 EXPORT_SYMBOL_GPL(dw_spi_resume_host);
582 MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
583 MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
584 MODULE_LICENSE("GPL v2");