1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2018 Exceet Electronics GmbH
4 * Copyright (C) 2018 Bootlin
6 * Author: Boris Brezillon <boris.brezillon@bootlin.com>
8 #include <linux/dmaengine.h>
9 #include <linux/iopoll.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/spi/spi.h>
12 #include <linux/spi/spi-mem.h>
13 #include <linux/sched/task_stack.h>
15 #include "internals.h"
17 #define SPI_MEM_MAX_BUSWIDTH 8
20 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
22 * @ctlr: the SPI controller requesting this dma_map()
23 * @op: the memory operation containing the buffer to map
24 * @sgt: a pointer to a non-initialized sg_table that will be filled by this
27 * Some controllers might want to do DMA on the data buffer embedded in @op.
28 * This helper prepares everything for you and provides a ready-to-use
29 * sg_table. This function is not intended to be called from spi drivers.
30 * Only SPI controller drivers should use it.
31 * Note that the caller must ensure the memory region pointed by
32 * op->data.buf.{in,out} is DMA-able before calling this function.
34 * Return: 0 in case of success, a negative error code otherwise.
36 int spi_controller_dma_map_mem_op_data(struct spi_controller
*ctlr
,
37 const struct spi_mem_op
*op
,
40 struct device
*dmadev
;
45 if (op
->data
.dir
== SPI_MEM_DATA_OUT
&& ctlr
->dma_tx
)
46 dmadev
= ctlr
->dma_tx
->device
->dev
;
47 else if (op
->data
.dir
== SPI_MEM_DATA_IN
&& ctlr
->dma_rx
)
48 dmadev
= ctlr
->dma_rx
->device
->dev
;
50 dmadev
= ctlr
->dev
.parent
;
55 return spi_map_buf(ctlr
, dmadev
, sgt
, op
->data
.buf
.in
, op
->data
.nbytes
,
56 op
->data
.dir
== SPI_MEM_DATA_IN
?
57 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
59 EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data
);
62 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
64 * @ctlr: the SPI controller requesting this dma_unmap()
65 * @op: the memory operation containing the buffer to unmap
66 * @sgt: a pointer to an sg_table previously initialized by
67 * spi_controller_dma_map_mem_op_data()
69 * Some controllers might want to do DMA on the data buffer embedded in @op.
70 * This helper prepares things so that the CPU can access the
71 * op->data.buf.{in,out} buffer again.
73 * This function is not intended to be called from SPI drivers. Only SPI
74 * controller drivers should use it.
76 * This function should be called after the DMA operation has finished and is
77 * only valid if the previous spi_controller_dma_map_mem_op_data() call
80 * Return: 0 in case of success, a negative error code otherwise.
82 void spi_controller_dma_unmap_mem_op_data(struct spi_controller
*ctlr
,
83 const struct spi_mem_op
*op
,
86 struct device
*dmadev
;
91 if (op
->data
.dir
== SPI_MEM_DATA_OUT
&& ctlr
->dma_tx
)
92 dmadev
= ctlr
->dma_tx
->device
->dev
;
93 else if (op
->data
.dir
== SPI_MEM_DATA_IN
&& ctlr
->dma_rx
)
94 dmadev
= ctlr
->dma_rx
->device
->dev
;
96 dmadev
= ctlr
->dev
.parent
;
98 spi_unmap_buf(ctlr
, dmadev
, sgt
,
99 op
->data
.dir
== SPI_MEM_DATA_IN
?
100 DMA_FROM_DEVICE
: DMA_TO_DEVICE
);
102 EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data
);
104 static int spi_check_buswidth_req(struct spi_mem
*mem
, u8 buswidth
, bool tx
)
106 u32 mode
= mem
->spi
->mode
;
114 (mode
& (SPI_TX_DUAL
| SPI_TX_QUAD
| SPI_TX_OCTAL
))) ||
116 (mode
& (SPI_RX_DUAL
| SPI_RX_QUAD
| SPI_RX_OCTAL
))))
122 if ((tx
&& (mode
& (SPI_TX_QUAD
| SPI_TX_OCTAL
))) ||
123 (!tx
&& (mode
& (SPI_RX_QUAD
| SPI_RX_OCTAL
))))
129 if ((tx
&& (mode
& SPI_TX_OCTAL
)) ||
130 (!tx
&& (mode
& SPI_RX_OCTAL
)))
142 static bool spi_mem_check_buswidth(struct spi_mem
*mem
,
143 const struct spi_mem_op
*op
)
145 if (spi_check_buswidth_req(mem
, op
->cmd
.buswidth
, true))
148 if (op
->addr
.nbytes
&&
149 spi_check_buswidth_req(mem
, op
->addr
.buswidth
, true))
152 if (op
->dummy
.nbytes
&&
153 spi_check_buswidth_req(mem
, op
->dummy
.buswidth
, true))
156 if (op
->data
.dir
!= SPI_MEM_NO_DATA
&&
157 spi_check_buswidth_req(mem
, op
->data
.buswidth
,
158 op
->data
.dir
== SPI_MEM_DATA_OUT
))
164 bool spi_mem_default_supports_op(struct spi_mem
*mem
,
165 const struct spi_mem_op
*op
)
167 struct spi_controller
*ctlr
= mem
->spi
->controller
;
169 op
->cmd
.dtr
|| op
->addr
.dtr
|| op
->dummy
.dtr
|| op
->data
.dtr
;
172 if (!spi_mem_controller_is_capable(ctlr
, dtr
))
175 if (op
->data
.swap16
&& !spi_mem_controller_is_capable(ctlr
, swap16
))
178 if (op
->cmd
.nbytes
!= 2)
181 if (op
->cmd
.nbytes
!= 1)
186 if (!spi_mem_controller_is_capable(ctlr
, ecc
))
190 return spi_mem_check_buswidth(mem
, op
);
192 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op
);
194 static bool spi_mem_buswidth_is_valid(u8 buswidth
)
196 if (hweight8(buswidth
) > 1 || buswidth
> SPI_MEM_MAX_BUSWIDTH
)
202 static int spi_mem_check_op(const struct spi_mem_op
*op
)
204 if (!op
->cmd
.buswidth
|| !op
->cmd
.nbytes
)
207 if ((op
->addr
.nbytes
&& !op
->addr
.buswidth
) ||
208 (op
->dummy
.nbytes
&& !op
->dummy
.buswidth
) ||
209 (op
->data
.nbytes
&& !op
->data
.buswidth
))
212 if (!spi_mem_buswidth_is_valid(op
->cmd
.buswidth
) ||
213 !spi_mem_buswidth_is_valid(op
->addr
.buswidth
) ||
214 !spi_mem_buswidth_is_valid(op
->dummy
.buswidth
) ||
215 !spi_mem_buswidth_is_valid(op
->data
.buswidth
))
218 /* Buffers must be DMA-able. */
219 if (WARN_ON_ONCE(op
->data
.dir
== SPI_MEM_DATA_IN
&&
220 object_is_on_stack(op
->data
.buf
.in
)))
223 if (WARN_ON_ONCE(op
->data
.dir
== SPI_MEM_DATA_OUT
&&
224 object_is_on_stack(op
->data
.buf
.out
)))
230 static bool spi_mem_internal_supports_op(struct spi_mem
*mem
,
231 const struct spi_mem_op
*op
)
233 struct spi_controller
*ctlr
= mem
->spi
->controller
;
235 if (ctlr
->mem_ops
&& ctlr
->mem_ops
->supports_op
)
236 return ctlr
->mem_ops
->supports_op(mem
, op
);
238 return spi_mem_default_supports_op(mem
, op
);
242 * spi_mem_supports_op() - Check if a memory device and the controller it is
243 * connected to support a specific memory operation
244 * @mem: the SPI memory
245 * @op: the memory operation to check
247 * Some controllers are only supporting Single or Dual IOs, others might only
248 * support specific opcodes, or it can even be that the controller and device
249 * both support Quad IOs but the hardware prevents you from using it because
250 * only 2 IO lines are connected.
252 * This function checks whether a specific operation is supported.
254 * Return: true if @op is supported, false otherwise.
256 bool spi_mem_supports_op(struct spi_mem
*mem
, const struct spi_mem_op
*op
)
258 if (spi_mem_check_op(op
))
261 return spi_mem_internal_supports_op(mem
, op
);
263 EXPORT_SYMBOL_GPL(spi_mem_supports_op
);
265 static int spi_mem_access_start(struct spi_mem
*mem
)
267 struct spi_controller
*ctlr
= mem
->spi
->controller
;
270 * Flush the message queue before executing our SPI memory
271 * operation to prevent preemption of regular SPI transfers.
273 spi_flush_queue(ctlr
);
275 if (ctlr
->auto_runtime_pm
) {
278 ret
= pm_runtime_resume_and_get(ctlr
->dev
.parent
);
280 dev_err(&ctlr
->dev
, "Failed to power device: %d\n",
286 mutex_lock(&ctlr
->bus_lock_mutex
);
287 mutex_lock(&ctlr
->io_mutex
);
292 static void spi_mem_access_end(struct spi_mem
*mem
)
294 struct spi_controller
*ctlr
= mem
->spi
->controller
;
296 mutex_unlock(&ctlr
->io_mutex
);
297 mutex_unlock(&ctlr
->bus_lock_mutex
);
299 if (ctlr
->auto_runtime_pm
)
300 pm_runtime_put(ctlr
->dev
.parent
);
303 static void spi_mem_add_op_stats(struct spi_statistics __percpu
*pcpu_stats
,
304 const struct spi_mem_op
*op
, int exec_op_ret
)
306 struct spi_statistics
*stats
;
310 stats
= this_cpu_ptr(pcpu_stats
);
311 u64_stats_update_begin(&stats
->syncp
);
314 * We do not have the concept of messages or transfers. Let's consider
315 * that one operation is equivalent to one message and one transfer.
317 u64_stats_inc(&stats
->messages
);
318 u64_stats_inc(&stats
->transfers
);
320 /* Use the sum of all lengths as bytes count and histogram value. */
321 len
= op
->cmd
.nbytes
+ op
->addr
.nbytes
;
322 len
+= op
->dummy
.nbytes
+ op
->data
.nbytes
;
323 u64_stats_add(&stats
->bytes
, len
);
324 l2len
= min(fls(len
), SPI_STATISTICS_HISTO_SIZE
) - 1;
325 u64_stats_inc(&stats
->transfer_bytes_histo
[l2len
]);
327 /* Only account for data bytes as transferred bytes. */
328 if (op
->data
.nbytes
&& op
->data
.dir
== SPI_MEM_DATA_OUT
)
329 u64_stats_add(&stats
->bytes_tx
, op
->data
.nbytes
);
330 if (op
->data
.nbytes
&& op
->data
.dir
== SPI_MEM_DATA_IN
)
331 u64_stats_add(&stats
->bytes_rx
, op
->data
.nbytes
);
334 * A timeout is not an error, following the same behavior as
335 * spi_transfer_one_message().
337 if (exec_op_ret
== -ETIMEDOUT
)
338 u64_stats_inc(&stats
->timedout
);
339 else if (exec_op_ret
)
340 u64_stats_inc(&stats
->errors
);
342 u64_stats_update_end(&stats
->syncp
);
347 * spi_mem_exec_op() - Execute a memory operation
348 * @mem: the SPI memory
349 * @op: the memory operation to execute
351 * Executes a memory operation.
353 * This function first checks that @op is supported and then tries to execute
356 * Return: 0 in case of success, a negative error code otherwise.
358 int spi_mem_exec_op(struct spi_mem
*mem
, const struct spi_mem_op
*op
)
360 unsigned int tmpbufsize
, xferpos
= 0, totalxferlen
= 0;
361 struct spi_controller
*ctlr
= mem
->spi
->controller
;
362 struct spi_transfer xfers
[4] = { };
363 struct spi_message msg
;
367 ret
= spi_mem_check_op(op
);
371 if (!spi_mem_internal_supports_op(mem
, op
))
374 if (ctlr
->mem_ops
&& ctlr
->mem_ops
->exec_op
&& !spi_get_csgpiod(mem
->spi
, 0)) {
375 ret
= spi_mem_access_start(mem
);
379 ret
= ctlr
->mem_ops
->exec_op(mem
, op
);
381 spi_mem_access_end(mem
);
384 * Some controllers only optimize specific paths (typically the
385 * read path) and expect the core to use the regular SPI
386 * interface in other cases.
388 if (!ret
|| (ret
!= -ENOTSUPP
&& ret
!= -EOPNOTSUPP
)) {
389 spi_mem_add_op_stats(ctlr
->pcpu_statistics
, op
, ret
);
390 spi_mem_add_op_stats(mem
->spi
->pcpu_statistics
, op
, ret
);
396 tmpbufsize
= op
->cmd
.nbytes
+ op
->addr
.nbytes
+ op
->dummy
.nbytes
;
399 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
400 * we're guaranteed that this buffer is DMA-able, as required by the
403 tmpbuf
= kzalloc(tmpbufsize
, GFP_KERNEL
| GFP_DMA
);
407 spi_message_init(&msg
);
409 tmpbuf
[0] = op
->cmd
.opcode
;
410 xfers
[xferpos
].tx_buf
= tmpbuf
;
411 xfers
[xferpos
].len
= op
->cmd
.nbytes
;
412 xfers
[xferpos
].tx_nbits
= op
->cmd
.buswidth
;
413 spi_message_add_tail(&xfers
[xferpos
], &msg
);
417 if (op
->addr
.nbytes
) {
420 for (i
= 0; i
< op
->addr
.nbytes
; i
++)
421 tmpbuf
[i
+ 1] = op
->addr
.val
>>
422 (8 * (op
->addr
.nbytes
- i
- 1));
424 xfers
[xferpos
].tx_buf
= tmpbuf
+ 1;
425 xfers
[xferpos
].len
= op
->addr
.nbytes
;
426 xfers
[xferpos
].tx_nbits
= op
->addr
.buswidth
;
427 spi_message_add_tail(&xfers
[xferpos
], &msg
);
429 totalxferlen
+= op
->addr
.nbytes
;
432 if (op
->dummy
.nbytes
) {
433 memset(tmpbuf
+ op
->addr
.nbytes
+ 1, 0xff, op
->dummy
.nbytes
);
434 xfers
[xferpos
].tx_buf
= tmpbuf
+ op
->addr
.nbytes
+ 1;
435 xfers
[xferpos
].len
= op
->dummy
.nbytes
;
436 xfers
[xferpos
].tx_nbits
= op
->dummy
.buswidth
;
437 xfers
[xferpos
].dummy_data
= 1;
438 spi_message_add_tail(&xfers
[xferpos
], &msg
);
440 totalxferlen
+= op
->dummy
.nbytes
;
443 if (op
->data
.nbytes
) {
444 if (op
->data
.dir
== SPI_MEM_DATA_IN
) {
445 xfers
[xferpos
].rx_buf
= op
->data
.buf
.in
;
446 xfers
[xferpos
].rx_nbits
= op
->data
.buswidth
;
448 xfers
[xferpos
].tx_buf
= op
->data
.buf
.out
;
449 xfers
[xferpos
].tx_nbits
= op
->data
.buswidth
;
452 xfers
[xferpos
].len
= op
->data
.nbytes
;
453 spi_message_add_tail(&xfers
[xferpos
], &msg
);
455 totalxferlen
+= op
->data
.nbytes
;
458 ret
= spi_sync(mem
->spi
, &msg
);
465 if (msg
.actual_length
!= totalxferlen
)
470 EXPORT_SYMBOL_GPL(spi_mem_exec_op
);
473 * spi_mem_get_name() - Return the SPI mem device name to be used by the
474 * upper layer if necessary
475 * @mem: the SPI memory
477 * This function allows SPI mem users to retrieve the SPI mem device name.
478 * It is useful if the upper layer needs to expose a custom name for
479 * compatibility reasons.
481 * Return: a string containing the name of the memory device to be used
482 * by the SPI mem user
484 const char *spi_mem_get_name(struct spi_mem
*mem
)
488 EXPORT_SYMBOL_GPL(spi_mem_get_name
);
491 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
492 * match controller limitations
493 * @mem: the SPI memory
494 * @op: the operation to adjust
496 * Some controllers have FIFO limitations and must split a data transfer
497 * operation into multiple ones, others require a specific alignment for
498 * optimized accesses. This function allows SPI mem drivers to split a single
499 * operation into multiple sub-operations when required.
501 * Return: a negative error code if the controller can't properly adjust @op,
502 * 0 otherwise. Note that @op->data.nbytes will be updated if @op
503 * can't be handled in a single step.
505 int spi_mem_adjust_op_size(struct spi_mem
*mem
, struct spi_mem_op
*op
)
507 struct spi_controller
*ctlr
= mem
->spi
->controller
;
510 if (ctlr
->mem_ops
&& ctlr
->mem_ops
->adjust_op_size
)
511 return ctlr
->mem_ops
->adjust_op_size(mem
, op
);
513 if (!ctlr
->mem_ops
|| !ctlr
->mem_ops
->exec_op
) {
514 len
= op
->cmd
.nbytes
+ op
->addr
.nbytes
+ op
->dummy
.nbytes
;
516 if (len
> spi_max_transfer_size(mem
->spi
))
519 op
->data
.nbytes
= min3((size_t)op
->data
.nbytes
,
520 spi_max_transfer_size(mem
->spi
),
521 spi_max_message_size(mem
->spi
) -
523 if (!op
->data
.nbytes
)
529 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size
);
531 static ssize_t
spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc
*desc
,
532 u64 offs
, size_t len
, void *buf
)
534 struct spi_mem_op op
= desc
->info
.op_tmpl
;
537 op
.addr
.val
= desc
->info
.offset
+ offs
;
538 op
.data
.buf
.in
= buf
;
539 op
.data
.nbytes
= len
;
540 ret
= spi_mem_adjust_op_size(desc
->mem
, &op
);
544 ret
= spi_mem_exec_op(desc
->mem
, &op
);
548 return op
.data
.nbytes
;
551 static ssize_t
spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc
*desc
,
552 u64 offs
, size_t len
, const void *buf
)
554 struct spi_mem_op op
= desc
->info
.op_tmpl
;
557 op
.addr
.val
= desc
->info
.offset
+ offs
;
558 op
.data
.buf
.out
= buf
;
559 op
.data
.nbytes
= len
;
560 ret
= spi_mem_adjust_op_size(desc
->mem
, &op
);
564 ret
= spi_mem_exec_op(desc
->mem
, &op
);
568 return op
.data
.nbytes
;
572 * spi_mem_dirmap_create() - Create a direct mapping descriptor
573 * @mem: SPI mem device this direct mapping should be created for
574 * @info: direct mapping information
576 * This function is creating a direct mapping descriptor which can then be used
577 * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
578 * If the SPI controller driver does not support direct mapping, this function
579 * falls back to an implementation using spi_mem_exec_op(), so that the caller
580 * doesn't have to bother implementing a fallback on his own.
582 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
584 struct spi_mem_dirmap_desc
*
585 spi_mem_dirmap_create(struct spi_mem
*mem
,
586 const struct spi_mem_dirmap_info
*info
)
588 struct spi_controller
*ctlr
= mem
->spi
->controller
;
589 struct spi_mem_dirmap_desc
*desc
;
592 /* Make sure the number of address cycles is between 1 and 8 bytes. */
593 if (!info
->op_tmpl
.addr
.nbytes
|| info
->op_tmpl
.addr
.nbytes
> 8)
594 return ERR_PTR(-EINVAL
);
596 /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
597 if (info
->op_tmpl
.data
.dir
== SPI_MEM_NO_DATA
)
598 return ERR_PTR(-EINVAL
);
600 desc
= kzalloc(sizeof(*desc
), GFP_KERNEL
);
602 return ERR_PTR(-ENOMEM
);
606 if (ctlr
->mem_ops
&& ctlr
->mem_ops
->dirmap_create
)
607 ret
= ctlr
->mem_ops
->dirmap_create(desc
);
610 desc
->nodirmap
= true;
611 if (!spi_mem_supports_op(desc
->mem
, &desc
->info
.op_tmpl
))
624 EXPORT_SYMBOL_GPL(spi_mem_dirmap_create
);
627 * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
628 * @desc: the direct mapping descriptor to destroy
630 * This function destroys a direct mapping descriptor previously created by
631 * spi_mem_dirmap_create().
633 void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc
*desc
)
635 struct spi_controller
*ctlr
= desc
->mem
->spi
->controller
;
637 if (!desc
->nodirmap
&& ctlr
->mem_ops
&& ctlr
->mem_ops
->dirmap_destroy
)
638 ctlr
->mem_ops
->dirmap_destroy(desc
);
642 EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy
);
644 static void devm_spi_mem_dirmap_release(struct device
*dev
, void *res
)
646 struct spi_mem_dirmap_desc
*desc
= *(struct spi_mem_dirmap_desc
**)res
;
648 spi_mem_dirmap_destroy(desc
);
652 * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
654 * @dev: device the dirmap desc will be attached to
655 * @mem: SPI mem device this direct mapping should be created for
656 * @info: direct mapping information
658 * devm_ variant of the spi_mem_dirmap_create() function. See
659 * spi_mem_dirmap_create() for more details.
661 * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
663 struct spi_mem_dirmap_desc
*
664 devm_spi_mem_dirmap_create(struct device
*dev
, struct spi_mem
*mem
,
665 const struct spi_mem_dirmap_info
*info
)
667 struct spi_mem_dirmap_desc
**ptr
, *desc
;
669 ptr
= devres_alloc(devm_spi_mem_dirmap_release
, sizeof(*ptr
),
672 return ERR_PTR(-ENOMEM
);
674 desc
= spi_mem_dirmap_create(mem
, info
);
679 devres_add(dev
, ptr
);
684 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create
);
686 static int devm_spi_mem_dirmap_match(struct device
*dev
, void *res
, void *data
)
688 struct spi_mem_dirmap_desc
**ptr
= res
;
690 if (WARN_ON(!ptr
|| !*ptr
))
697 * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
699 * @dev: device the dirmap desc is attached to
700 * @desc: the direct mapping descriptor to destroy
702 * devm_ variant of the spi_mem_dirmap_destroy() function. See
703 * spi_mem_dirmap_destroy() for more details.
705 void devm_spi_mem_dirmap_destroy(struct device
*dev
,
706 struct spi_mem_dirmap_desc
*desc
)
708 devres_release(dev
, devm_spi_mem_dirmap_release
,
709 devm_spi_mem_dirmap_match
, desc
);
711 EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy
);
714 * spi_mem_dirmap_read() - Read data through a direct mapping
715 * @desc: direct mapping descriptor
716 * @offs: offset to start reading from. Note that this is not an absolute
717 * offset, but the offset within the direct mapping which already has
719 * @len: length in bytes
720 * @buf: destination buffer. This buffer must be DMA-able
722 * This function reads data from a memory device using a direct mapping
723 * previously instantiated with spi_mem_dirmap_create().
725 * Return: the amount of data read from the memory device or a negative error
726 * code. Note that the returned size might be smaller than @len, and the caller
727 * is responsible for calling spi_mem_dirmap_read() again when that happens.
729 ssize_t
spi_mem_dirmap_read(struct spi_mem_dirmap_desc
*desc
,
730 u64 offs
, size_t len
, void *buf
)
732 struct spi_controller
*ctlr
= desc
->mem
->spi
->controller
;
735 if (desc
->info
.op_tmpl
.data
.dir
!= SPI_MEM_DATA_IN
)
741 if (desc
->nodirmap
) {
742 ret
= spi_mem_no_dirmap_read(desc
, offs
, len
, buf
);
743 } else if (ctlr
->mem_ops
&& ctlr
->mem_ops
->dirmap_read
) {
744 ret
= spi_mem_access_start(desc
->mem
);
748 ret
= ctlr
->mem_ops
->dirmap_read(desc
, offs
, len
, buf
);
750 spi_mem_access_end(desc
->mem
);
757 EXPORT_SYMBOL_GPL(spi_mem_dirmap_read
);
760 * spi_mem_dirmap_write() - Write data through a direct mapping
761 * @desc: direct mapping descriptor
762 * @offs: offset to start writing from. Note that this is not an absolute
763 * offset, but the offset within the direct mapping which already has
765 * @len: length in bytes
766 * @buf: source buffer. This buffer must be DMA-able
768 * This function writes data to a memory device using a direct mapping
769 * previously instantiated with spi_mem_dirmap_create().
771 * Return: the amount of data written to the memory device or a negative error
772 * code. Note that the returned size might be smaller than @len, and the caller
773 * is responsible for calling spi_mem_dirmap_write() again when that happens.
775 ssize_t
spi_mem_dirmap_write(struct spi_mem_dirmap_desc
*desc
,
776 u64 offs
, size_t len
, const void *buf
)
778 struct spi_controller
*ctlr
= desc
->mem
->spi
->controller
;
781 if (desc
->info
.op_tmpl
.data
.dir
!= SPI_MEM_DATA_OUT
)
787 if (desc
->nodirmap
) {
788 ret
= spi_mem_no_dirmap_write(desc
, offs
, len
, buf
);
789 } else if (ctlr
->mem_ops
&& ctlr
->mem_ops
->dirmap_write
) {
790 ret
= spi_mem_access_start(desc
->mem
);
794 ret
= ctlr
->mem_ops
->dirmap_write(desc
, offs
, len
, buf
);
796 spi_mem_access_end(desc
->mem
);
803 EXPORT_SYMBOL_GPL(spi_mem_dirmap_write
);
805 static inline struct spi_mem_driver
*to_spi_mem_drv(struct device_driver
*drv
)
807 return container_of(drv
, struct spi_mem_driver
, spidrv
.driver
);
810 static int spi_mem_read_status(struct spi_mem
*mem
,
811 const struct spi_mem_op
*op
,
814 const u8
*bytes
= (u8
*)op
->data
.buf
.in
;
817 ret
= spi_mem_exec_op(mem
, op
);
821 if (op
->data
.nbytes
> 1)
822 *status
= ((u16
)bytes
[0] << 8) | bytes
[1];
830 * spi_mem_poll_status() - Poll memory device status
831 * @mem: SPI memory device
832 * @op: the memory operation to execute
833 * @mask: status bitmask to ckeck
834 * @match: (status & mask) expected value
835 * @initial_delay_us: delay in us before starting to poll
836 * @polling_delay_us: time to sleep between reads in us
837 * @timeout_ms: timeout in milliseconds
839 * This function polls a status register and returns when
840 * (status & mask) == match or when the timeout has expired.
842 * Return: 0 in case of success, -ETIMEDOUT in case of error,
843 * -EOPNOTSUPP if not supported.
845 int spi_mem_poll_status(struct spi_mem
*mem
,
846 const struct spi_mem_op
*op
,
848 unsigned long initial_delay_us
,
849 unsigned long polling_delay_us
,
852 struct spi_controller
*ctlr
= mem
->spi
->controller
;
853 int ret
= -EOPNOTSUPP
;
857 if (op
->data
.nbytes
< 1 || op
->data
.nbytes
> 2 ||
858 op
->data
.dir
!= SPI_MEM_DATA_IN
)
861 if (ctlr
->mem_ops
&& ctlr
->mem_ops
->poll_status
&& !spi_get_csgpiod(mem
->spi
, 0)) {
862 ret
= spi_mem_access_start(mem
);
866 ret
= ctlr
->mem_ops
->poll_status(mem
, op
, mask
, match
,
867 initial_delay_us
, polling_delay_us
,
870 spi_mem_access_end(mem
);
873 if (ret
== -EOPNOTSUPP
) {
874 if (!spi_mem_supports_op(mem
, op
))
877 if (initial_delay_us
< 10)
878 udelay(initial_delay_us
);
880 usleep_range((initial_delay_us
>> 2) + 1,
883 ret
= read_poll_timeout(spi_mem_read_status
, read_status_ret
,
884 (read_status_ret
|| ((status
) & mask
) == match
),
885 polling_delay_us
, timeout_ms
* 1000, false, mem
,
888 return read_status_ret
;
893 EXPORT_SYMBOL_GPL(spi_mem_poll_status
);
895 static int spi_mem_probe(struct spi_device
*spi
)
897 struct spi_mem_driver
*memdrv
= to_spi_mem_drv(spi
->dev
.driver
);
898 struct spi_controller
*ctlr
= spi
->controller
;
901 mem
= devm_kzalloc(&spi
->dev
, sizeof(*mem
), GFP_KERNEL
);
907 if (ctlr
->mem_ops
&& ctlr
->mem_ops
->get_name
)
908 mem
->name
= ctlr
->mem_ops
->get_name(mem
);
910 mem
->name
= dev_name(&spi
->dev
);
912 if (IS_ERR_OR_NULL(mem
->name
))
913 return PTR_ERR_OR_ZERO(mem
->name
);
915 spi_set_drvdata(spi
, mem
);
917 return memdrv
->probe(mem
);
920 static void spi_mem_remove(struct spi_device
*spi
)
922 struct spi_mem_driver
*memdrv
= to_spi_mem_drv(spi
->dev
.driver
);
923 struct spi_mem
*mem
= spi_get_drvdata(spi
);
929 static void spi_mem_shutdown(struct spi_device
*spi
)
931 struct spi_mem_driver
*memdrv
= to_spi_mem_drv(spi
->dev
.driver
);
932 struct spi_mem
*mem
= spi_get_drvdata(spi
);
934 if (memdrv
->shutdown
)
935 memdrv
->shutdown(mem
);
939 * spi_mem_driver_register_with_owner() - Register a SPI memory driver
940 * @memdrv: the SPI memory driver to register
941 * @owner: the owner of this driver
943 * Registers a SPI memory driver.
945 * Return: 0 in case of success, a negative error core otherwise.
948 int spi_mem_driver_register_with_owner(struct spi_mem_driver
*memdrv
,
949 struct module
*owner
)
951 memdrv
->spidrv
.probe
= spi_mem_probe
;
952 memdrv
->spidrv
.remove
= spi_mem_remove
;
953 memdrv
->spidrv
.shutdown
= spi_mem_shutdown
;
955 return __spi_register_driver(owner
, &memdrv
->spidrv
);
957 EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner
);
960 * spi_mem_driver_unregister() - Unregister a SPI memory driver
961 * @memdrv: the SPI memory driver to unregister
963 * Unregisters a SPI memory driver.
965 void spi_mem_driver_unregister(struct spi_mem_driver
*memdrv
)
967 spi_unregister_driver(&memdrv
->spidrv
);
969 EXPORT_SYMBOL_GPL(spi_mem_driver_unregister
);