2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
6 * Jaswinder Singh <jassi.brar@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/amba/bus.h>
25 #include <linux/amba/pl330.h>
26 #include <linux/scatterlist.h>
28 #include <linux/of_dma.h>
29 #include <linux/err.h>
31 #include "dmaengine.h"
32 #define PL330_MAX_CHAN 8
33 #define PL330_MAX_IRQS 32
34 #define PL330_MAX_PERI 32
36 enum pl330_srccachectrl
{
37 SCCTRL0
, /* Noncacheable and nonbufferable */
38 SCCTRL1
, /* Bufferable only */
39 SCCTRL2
, /* Cacheable, but do not allocate */
40 SCCTRL3
, /* Cacheable and bufferable, but do not allocate */
43 SCCTRL6
, /* Cacheable write-through, allocate on reads only */
44 SCCTRL7
, /* Cacheable write-back, allocate on reads only */
47 enum pl330_dstcachectrl
{
48 DCCTRL0
, /* Noncacheable and nonbufferable */
49 DCCTRL1
, /* Bufferable only */
50 DCCTRL2
, /* Cacheable, but do not allocate */
51 DCCTRL3
, /* Cacheable and bufferable, but do not allocate */
52 DINVALID1
, /* AWCACHE = 0x1000 */
54 DCCTRL6
, /* Cacheable write-through, allocate on writes only */
55 DCCTRL7
, /* Cacheable write-back, allocate on writes only */
73 /* Register and Bit field Definitions */
75 #define DS_ST_STOP 0x0
76 #define DS_ST_EXEC 0x1
77 #define DS_ST_CMISS 0x2
78 #define DS_ST_UPDTPC 0x3
80 #define DS_ST_ATBRR 0x5
81 #define DS_ST_QBUSY 0x6
83 #define DS_ST_KILL 0x8
84 #define DS_ST_CMPLT 0x9
85 #define DS_ST_FLTCMP 0xe
86 #define DS_ST_FAULT 0xf
91 #define INTSTATUS 0x28
98 #define FTC(n) (_FTC + (n)*0x4)
101 #define CS(n) (_CS + (n)*0x8)
102 #define CS_CNS (1 << 21)
105 #define CPC(n) (_CPC + (n)*0x8)
108 #define SA(n) (_SA + (n)*0x20)
111 #define DA(n) (_DA + (n)*0x20)
114 #define CC(n) (_CC + (n)*0x20)
116 #define CC_SRCINC (1 << 0)
117 #define CC_DSTINC (1 << 14)
118 #define CC_SRCPRI (1 << 8)
119 #define CC_DSTPRI (1 << 22)
120 #define CC_SRCNS (1 << 9)
121 #define CC_DSTNS (1 << 23)
122 #define CC_SRCIA (1 << 10)
123 #define CC_DSTIA (1 << 24)
124 #define CC_SRCBRSTLEN_SHFT 4
125 #define CC_DSTBRSTLEN_SHFT 18
126 #define CC_SRCBRSTSIZE_SHFT 1
127 #define CC_DSTBRSTSIZE_SHFT 15
128 #define CC_SRCCCTRL_SHFT 11
129 #define CC_SRCCCTRL_MASK 0x7
130 #define CC_DSTCCTRL_SHFT 25
131 #define CC_DRCCCTRL_MASK 0x7
132 #define CC_SWAP_SHFT 28
135 #define LC0(n) (_LC0 + (n)*0x20)
138 #define LC1(n) (_LC1 + (n)*0x20)
140 #define DBGSTATUS 0xd00
141 #define DBG_BUSY (1 << 0)
144 #define DBGINST0 0xd08
145 #define DBGINST1 0xd0c
154 #define PERIPH_ID 0xfe0
155 #define PERIPH_REV_SHIFT 20
156 #define PERIPH_REV_MASK 0xf
157 #define PERIPH_REV_R0P0 0
158 #define PERIPH_REV_R1P0 1
159 #define PERIPH_REV_R1P1 2
161 #define CR0_PERIPH_REQ_SET (1 << 0)
162 #define CR0_BOOT_EN_SET (1 << 1)
163 #define CR0_BOOT_MAN_NS (1 << 2)
164 #define CR0_NUM_CHANS_SHIFT 4
165 #define CR0_NUM_CHANS_MASK 0x7
166 #define CR0_NUM_PERIPH_SHIFT 12
167 #define CR0_NUM_PERIPH_MASK 0x1f
168 #define CR0_NUM_EVENTS_SHIFT 17
169 #define CR0_NUM_EVENTS_MASK 0x1f
171 #define CR1_ICACHE_LEN_SHIFT 0
172 #define CR1_ICACHE_LEN_MASK 0x7
173 #define CR1_NUM_ICACHELINES_SHIFT 4
174 #define CR1_NUM_ICACHELINES_MASK 0xf
176 #define CRD_DATA_WIDTH_SHIFT 0
177 #define CRD_DATA_WIDTH_MASK 0x7
178 #define CRD_WR_CAP_SHIFT 4
179 #define CRD_WR_CAP_MASK 0x7
180 #define CRD_WR_Q_DEP_SHIFT 8
181 #define CRD_WR_Q_DEP_MASK 0xf
182 #define CRD_RD_CAP_SHIFT 12
183 #define CRD_RD_CAP_MASK 0x7
184 #define CRD_RD_Q_DEP_SHIFT 16
185 #define CRD_RD_Q_DEP_MASK 0xf
186 #define CRD_DATA_BUFF_SHIFT 20
187 #define CRD_DATA_BUFF_MASK 0x3ff
190 #define DESIGNER 0x41
192 #define INTEG_CFG 0x0
193 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
195 #define PL330_STATE_STOPPED (1 << 0)
196 #define PL330_STATE_EXECUTING (1 << 1)
197 #define PL330_STATE_WFE (1 << 2)
198 #define PL330_STATE_FAULTING (1 << 3)
199 #define PL330_STATE_COMPLETING (1 << 4)
200 #define PL330_STATE_WFP (1 << 5)
201 #define PL330_STATE_KILLING (1 << 6)
202 #define PL330_STATE_FAULT_COMPLETING (1 << 7)
203 #define PL330_STATE_CACHEMISS (1 << 8)
204 #define PL330_STATE_UPDTPC (1 << 9)
205 #define PL330_STATE_ATBARRIER (1 << 10)
206 #define PL330_STATE_QUEUEBUSY (1 << 11)
207 #define PL330_STATE_INVALID (1 << 15)
209 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
210 | PL330_STATE_WFE | PL330_STATE_FAULTING)
212 #define CMD_DMAADDH 0x54
213 #define CMD_DMAEND 0x00
214 #define CMD_DMAFLUSHP 0x35
215 #define CMD_DMAGO 0xa0
216 #define CMD_DMALD 0x04
217 #define CMD_DMALDP 0x25
218 #define CMD_DMALP 0x20
219 #define CMD_DMALPEND 0x28
220 #define CMD_DMAKILL 0x01
221 #define CMD_DMAMOV 0xbc
222 #define CMD_DMANOP 0x18
223 #define CMD_DMARMB 0x12
224 #define CMD_DMASEV 0x34
225 #define CMD_DMAST 0x08
226 #define CMD_DMASTP 0x29
227 #define CMD_DMASTZ 0x0c
228 #define CMD_DMAWFE 0x36
229 #define CMD_DMAWFP 0x30
230 #define CMD_DMAWMB 0x13
234 #define SZ_DMAFLUSHP 2
238 #define SZ_DMALPEND 2
252 #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
253 #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
255 #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
256 #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
259 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
260 * at 1byte/burst for P<->M and M<->M respectively.
261 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
262 * should be enough for P<->M and M<->M respectively.
264 #define MCODE_BUFF_PER_REQ 256
266 /* If the _pl330_req is available to the client */
267 #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
269 /* Use this _only_ to wait on transient states */
270 #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
272 #ifdef PL330_DEBUG_MCGEN
273 static unsigned cmd_line
;
274 #define PL330_DBGCMD_DUMP(off, x...) do { \
275 printk("%x:", cmd_line); \
279 #define PL330_DBGMC_START(addr) (cmd_line = addr)
281 #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
282 #define PL330_DBGMC_START(addr) do {} while (0)
285 /* The number of default descriptors */
287 #define NR_DEFAULT_DESC 16
289 /* Populated by the PL330 core driver for DMA API driver's info */
290 struct pl330_config
{
292 #define DMAC_MODE_NS (1 << 0)
294 unsigned int data_bus_width
:10; /* In number of bits */
295 unsigned int data_buf_dep
:10;
296 unsigned int num_chan
:4;
297 unsigned int num_peri
:6;
299 unsigned int num_events
:6;
303 /* Handle to the DMAC provided to the PL330 core */
307 /* Size of MicroCode buffers for each channel. */
309 /* ioremap'ed address of PL330 registers. */
311 /* Client can freely use it. */
313 /* PL330 core data, Client must not touch it. */
315 /* Populated by the PL330 core driver during pl330_add */
316 struct pl330_config pcfg
;
318 * If the DMAC has some reset mechanism, then the
319 * client may want to provide pointer to the method.
321 void (*dmac_reset
)(struct pl330_info
*pi
);
325 * Request Configuration.
326 * The PL330 core does not modify this and uses the last
327 * working configuration if the request doesn't provide any.
329 * The Client may want to provide this info only for the
330 * first request and a request with new settings.
332 struct pl330_reqcfg
{
333 /* Address Incrementing */
338 * For now, the SRC & DST protection levels
339 * and burst size/length are assumed same.
345 unsigned brst_size
:3; /* in power of 2 */
347 enum pl330_dstcachectrl dcctl
;
348 enum pl330_srccachectrl scctl
;
349 enum pl330_byteswap swap
;
350 struct pl330_config
*pcfg
;
354 * One cycle of DMAC operation.
355 * There may be more than one xfer in a request.
363 * Pointer to next xfer in the list.
364 * The last xfer in the req must point to NULL.
366 struct pl330_xfer
*next
;
369 /* The xfer callbacks are made with one of these arguments. */
371 /* The all xfers in the request were success. */
373 /* If req aborted due to global error. */
375 /* If req failed due to problem with Channel. */
379 /* A request defining Scatter-Gather List ending with NULL xfer. */
381 enum pl330_reqtype rqtype
;
382 /* Index of peripheral for the xfer. */
384 /* Unique token for this xfer, set by the client. */
386 /* Callback to be called after xfer. */
387 void (*xfer_cb
)(void *token
, enum pl330_op_err err
);
388 /* If NULL, req will be done at last set parameters. */
389 struct pl330_reqcfg
*cfg
;
390 /* Pointer to first xfer in the request. */
391 struct pl330_xfer
*x
;
392 /* Hook to attach to DMAC's list of reqs with due callback */
393 struct list_head rqd
;
397 * To know the status of the channel and DMAC, the client
398 * provides a pointer to this structure. The PL330 core
399 * fills it with current information.
401 struct pl330_chanstatus
{
403 * If the DMAC engine halted due to some error,
404 * the client should remove-add DMAC.
408 * If channel is halted due to some error,
409 * the client should ABORT/FLUSH and START the channel.
412 /* Location of last load */
414 /* Location of last store */
417 * Pointer to the currently active req, NULL if channel is
418 * inactive, even though the requests may be present.
420 struct pl330_req
*top_req
;
421 /* Pointer to req waiting second in the queue if any. */
422 struct pl330_req
*wait_req
;
426 /* Start the channel */
428 /* Abort the active xfer */
430 /* Stop xfer and flush queue */
437 struct pl330_xfer
*x
;
460 /* Number of bytes taken to setup MC for the req */
465 /* ToBeDone for tasklet */
473 struct pl330_thread
{
476 /* If the channel is not yet acquired by any client */
479 struct pl330_dmac
*dmac
;
480 /* Only two at a time */
481 struct _pl330_req req
[2];
482 /* Index of the last enqueued request */
484 /* Index of the last submitted request or -1 if the DMA is stopped */
488 enum pl330_dmac_state
{
497 /* Holds list of reqs with due callbacks */
498 struct list_head req_done
;
499 /* Pointer to platform specific stuff */
500 struct pl330_info
*pinfo
;
501 /* Maximum possible events/irqs */
503 /* BUS address of MicroCode buffer */
504 dma_addr_t mcode_bus
;
505 /* CPU address of MicroCode buffer */
507 /* List of all Channel threads */
508 struct pl330_thread
*channels
;
509 /* Pointer to the MANAGER thread */
510 struct pl330_thread
*manager
;
511 /* To handle bad news in interrupt */
512 struct tasklet_struct tasks
;
513 struct _pl330_tbd dmac_tbd
;
514 /* State of DMAC operation */
515 enum pl330_dmac_state state
;
519 /* In the DMAC pool */
522 * Allocated to some channel during prep_xxx
523 * Also may be sitting on the work_list.
527 * Sitting on the work_list and already submitted
528 * to the PL330 core. Not more than two descriptors
529 * of a channel can be BUSY at any time.
533 * Sitting on the channel work_list but xfer done
539 struct dma_pl330_chan
{
540 /* Schedule desc completion */
541 struct tasklet_struct task
;
543 /* DMA-Engine Channel */
544 struct dma_chan chan
;
546 /* List of submitted descriptors */
547 struct list_head submitted_list
;
548 /* List of issued descriptors */
549 struct list_head work_list
;
550 /* List of completed descriptors */
551 struct list_head completed_list
;
553 /* Pointer to the DMAC that manages this channel,
554 * NULL if the channel is available to be acquired.
555 * As the parent, this DMAC also provides descriptors
558 struct dma_pl330_dmac
*dmac
;
560 /* To protect channel manipulation */
563 /* Token of a hardware channel thread of PL330 DMAC
564 * NULL if the channel is available to be acquired.
568 /* For D-to-M and M-to-D channels */
569 int burst_sz
; /* the peripheral fifo width */
570 int burst_len
; /* the number of burst */
571 dma_addr_t fifo_addr
;
573 /* for cyclic capability */
577 struct dma_pl330_dmac
{
578 struct pl330_info pif
;
580 /* DMA-Engine Device */
581 struct dma_device ddma
;
583 /* Holds info about sg limitations */
584 struct device_dma_parameters dma_parms
;
586 /* Pool of descriptors available for the DMAC's channels */
587 struct list_head desc_pool
;
588 /* To protect desc_pool manipulation */
589 spinlock_t pool_lock
;
591 /* Peripheral channels connected to this DMAC */
592 unsigned int num_peripherals
;
593 struct dma_pl330_chan
*peripherals
; /* keep at end */
596 struct dma_pl330_desc
{
597 /* To attach to a queue as child */
598 struct list_head node
;
600 /* Descriptor for the DMA Engine API */
601 struct dma_async_tx_descriptor txd
;
603 /* Xfer for PL330 core */
604 struct pl330_xfer px
;
606 struct pl330_reqcfg rqcfg
;
607 struct pl330_req req
;
609 enum desc_status status
;
611 /* The channel which currently holds this desc */
612 struct dma_pl330_chan
*pchan
;
615 static inline void _callback(struct pl330_req
*r
, enum pl330_op_err err
)
618 r
->xfer_cb(r
->token
, err
);
621 static inline bool _queue_empty(struct pl330_thread
*thrd
)
623 return (IS_FREE(&thrd
->req
[0]) && IS_FREE(&thrd
->req
[1]))
627 static inline bool _queue_full(struct pl330_thread
*thrd
)
629 return (IS_FREE(&thrd
->req
[0]) || IS_FREE(&thrd
->req
[1]))
633 static inline bool is_manager(struct pl330_thread
*thrd
)
635 struct pl330_dmac
*pl330
= thrd
->dmac
;
637 /* MANAGER is indexed at the end */
638 if (thrd
->id
== pl330
->pinfo
->pcfg
.num_chan
)
644 /* If manager of the thread is in Non-Secure mode */
645 static inline bool _manager_ns(struct pl330_thread
*thrd
)
647 struct pl330_dmac
*pl330
= thrd
->dmac
;
649 return (pl330
->pinfo
->pcfg
.mode
& DMAC_MODE_NS
) ? true : false;
652 static inline u32
get_revision(u32 periph_id
)
654 return (periph_id
>> PERIPH_REV_SHIFT
) & PERIPH_REV_MASK
;
657 static inline u32
_emit_ADDH(unsigned dry_run
, u8 buf
[],
658 enum pl330_dst da
, u16 val
)
663 buf
[0] = CMD_DMAADDH
;
665 *((u16
*)&buf
[1]) = val
;
667 PL330_DBGCMD_DUMP(SZ_DMAADDH
, "\tDMAADDH %s %u\n",
668 da
== 1 ? "DA" : "SA", val
);
673 static inline u32
_emit_END(unsigned dry_run
, u8 buf
[])
680 PL330_DBGCMD_DUMP(SZ_DMAEND
, "\tDMAEND\n");
685 static inline u32
_emit_FLUSHP(unsigned dry_run
, u8 buf
[], u8 peri
)
690 buf
[0] = CMD_DMAFLUSHP
;
696 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP
, "\tDMAFLUSHP %u\n", peri
>> 3);
701 static inline u32
_emit_LD(unsigned dry_run
, u8 buf
[], enum pl330_cond cond
)
709 buf
[0] |= (0 << 1) | (1 << 0);
710 else if (cond
== BURST
)
711 buf
[0] |= (1 << 1) | (1 << 0);
713 PL330_DBGCMD_DUMP(SZ_DMALD
, "\tDMALD%c\n",
714 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'));
719 static inline u32
_emit_LDP(unsigned dry_run
, u8 buf
[],
720 enum pl330_cond cond
, u8 peri
)
734 PL330_DBGCMD_DUMP(SZ_DMALDP
, "\tDMALDP%c %u\n",
735 cond
== SINGLE
? 'S' : 'B', peri
>> 3);
740 static inline u32
_emit_LP(unsigned dry_run
, u8 buf
[],
741 unsigned loop
, u8 cnt
)
751 cnt
--; /* DMAC increments by 1 internally */
754 PL330_DBGCMD_DUMP(SZ_DMALP
, "\tDMALP_%c %u\n", loop
? '1' : '0', cnt
);
760 enum pl330_cond cond
;
766 static inline u32
_emit_LPEND(unsigned dry_run
, u8 buf
[],
767 const struct _arg_LPEND
*arg
)
769 enum pl330_cond cond
= arg
->cond
;
770 bool forever
= arg
->forever
;
771 unsigned loop
= arg
->loop
;
772 u8 bjump
= arg
->bjump
;
777 buf
[0] = CMD_DMALPEND
;
786 buf
[0] |= (0 << 1) | (1 << 0);
787 else if (cond
== BURST
)
788 buf
[0] |= (1 << 1) | (1 << 0);
792 PL330_DBGCMD_DUMP(SZ_DMALPEND
, "\tDMALP%s%c_%c bjmpto_%x\n",
793 forever
? "FE" : "END",
794 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'),
801 static inline u32
_emit_KILL(unsigned dry_run
, u8 buf
[])
806 buf
[0] = CMD_DMAKILL
;
811 static inline u32
_emit_MOV(unsigned dry_run
, u8 buf
[],
812 enum dmamov_dst dst
, u32 val
)
819 *((u32
*)&buf
[2]) = val
;
821 PL330_DBGCMD_DUMP(SZ_DMAMOV
, "\tDMAMOV %s 0x%x\n",
822 dst
== SAR
? "SAR" : (dst
== DAR
? "DAR" : "CCR"), val
);
827 static inline u32
_emit_NOP(unsigned dry_run
, u8 buf
[])
834 PL330_DBGCMD_DUMP(SZ_DMANOP
, "\tDMANOP\n");
839 static inline u32
_emit_RMB(unsigned dry_run
, u8 buf
[])
846 PL330_DBGCMD_DUMP(SZ_DMARMB
, "\tDMARMB\n");
851 static inline u32
_emit_SEV(unsigned dry_run
, u8 buf
[], u8 ev
)
862 PL330_DBGCMD_DUMP(SZ_DMASEV
, "\tDMASEV %u\n", ev
>> 3);
867 static inline u32
_emit_ST(unsigned dry_run
, u8 buf
[], enum pl330_cond cond
)
875 buf
[0] |= (0 << 1) | (1 << 0);
876 else if (cond
== BURST
)
877 buf
[0] |= (1 << 1) | (1 << 0);
879 PL330_DBGCMD_DUMP(SZ_DMAST
, "\tDMAST%c\n",
880 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'));
885 static inline u32
_emit_STP(unsigned dry_run
, u8 buf
[],
886 enum pl330_cond cond
, u8 peri
)
900 PL330_DBGCMD_DUMP(SZ_DMASTP
, "\tDMASTP%c %u\n",
901 cond
== SINGLE
? 'S' : 'B', peri
>> 3);
906 static inline u32
_emit_STZ(unsigned dry_run
, u8 buf
[])
913 PL330_DBGCMD_DUMP(SZ_DMASTZ
, "\tDMASTZ\n");
918 static inline u32
_emit_WFE(unsigned dry_run
, u8 buf
[], u8 ev
,
933 PL330_DBGCMD_DUMP(SZ_DMAWFE
, "\tDMAWFE %u%s\n",
934 ev
>> 3, invalidate
? ", I" : "");
939 static inline u32
_emit_WFP(unsigned dry_run
, u8 buf
[],
940 enum pl330_cond cond
, u8 peri
)
948 buf
[0] |= (0 << 1) | (0 << 0);
949 else if (cond
== BURST
)
950 buf
[0] |= (1 << 1) | (0 << 0);
952 buf
[0] |= (0 << 1) | (1 << 0);
958 PL330_DBGCMD_DUMP(SZ_DMAWFP
, "\tDMAWFP%c %u\n",
959 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'P'), peri
>> 3);
964 static inline u32
_emit_WMB(unsigned dry_run
, u8 buf
[])
971 PL330_DBGCMD_DUMP(SZ_DMAWMB
, "\tDMAWMB\n");
982 static inline u32
_emit_GO(unsigned dry_run
, u8 buf
[],
983 const struct _arg_GO
*arg
)
986 u32 addr
= arg
->addr
;
987 unsigned ns
= arg
->ns
;
997 *((u32
*)&buf
[2]) = addr
;
1002 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
1004 /* Returns Time-Out */
1005 static bool _until_dmac_idle(struct pl330_thread
*thrd
)
1007 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1008 unsigned long loops
= msecs_to_loops(5);
1011 /* Until Manager is Idle */
1012 if (!(readl(regs
+ DBGSTATUS
) & DBG_BUSY
))
1024 static inline void _execute_DBGINSN(struct pl330_thread
*thrd
,
1025 u8 insn
[], bool as_manager
)
1027 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1030 val
= (insn
[0] << 16) | (insn
[1] << 24);
1033 val
|= (thrd
->id
<< 8); /* Channel Number */
1035 writel(val
, regs
+ DBGINST0
);
1037 val
= *((u32
*)&insn
[2]);
1038 writel(val
, regs
+ DBGINST1
);
1040 /* If timed out due to halted state-machine */
1041 if (_until_dmac_idle(thrd
)) {
1042 dev_err(thrd
->dmac
->pinfo
->dev
, "DMAC halted!\n");
1047 writel(0, regs
+ DBGCMD
);
1051 * Mark a _pl330_req as free.
1052 * We do it by writing DMAEND as the first instruction
1053 * because no valid request is going to have DMAEND as
1054 * its first instruction to execute.
1056 static void mark_free(struct pl330_thread
*thrd
, int idx
)
1058 struct _pl330_req
*req
= &thrd
->req
[idx
];
1060 _emit_END(0, req
->mc_cpu
);
1063 thrd
->req_running
= -1;
1066 static inline u32
_state(struct pl330_thread
*thrd
)
1068 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1071 if (is_manager(thrd
))
1072 val
= readl(regs
+ DS
) & 0xf;
1074 val
= readl(regs
+ CS(thrd
->id
)) & 0xf;
1078 return PL330_STATE_STOPPED
;
1080 return PL330_STATE_EXECUTING
;
1082 return PL330_STATE_CACHEMISS
;
1084 return PL330_STATE_UPDTPC
;
1086 return PL330_STATE_WFE
;
1088 return PL330_STATE_FAULTING
;
1090 if (is_manager(thrd
))
1091 return PL330_STATE_INVALID
;
1093 return PL330_STATE_ATBARRIER
;
1095 if (is_manager(thrd
))
1096 return PL330_STATE_INVALID
;
1098 return PL330_STATE_QUEUEBUSY
;
1100 if (is_manager(thrd
))
1101 return PL330_STATE_INVALID
;
1103 return PL330_STATE_WFP
;
1105 if (is_manager(thrd
))
1106 return PL330_STATE_INVALID
;
1108 return PL330_STATE_KILLING
;
1110 if (is_manager(thrd
))
1111 return PL330_STATE_INVALID
;
1113 return PL330_STATE_COMPLETING
;
1115 if (is_manager(thrd
))
1116 return PL330_STATE_INVALID
;
1118 return PL330_STATE_FAULT_COMPLETING
;
1120 return PL330_STATE_INVALID
;
1124 static void _stop(struct pl330_thread
*thrd
)
1126 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1127 u8 insn
[6] = {0, 0, 0, 0, 0, 0};
1129 if (_state(thrd
) == PL330_STATE_FAULT_COMPLETING
)
1130 UNTIL(thrd
, PL330_STATE_FAULTING
| PL330_STATE_KILLING
);
1132 /* Return if nothing needs to be done */
1133 if (_state(thrd
) == PL330_STATE_COMPLETING
1134 || _state(thrd
) == PL330_STATE_KILLING
1135 || _state(thrd
) == PL330_STATE_STOPPED
)
1138 _emit_KILL(0, insn
);
1140 /* Stop generating interrupts for SEV */
1141 writel(readl(regs
+ INTEN
) & ~(1 << thrd
->ev
), regs
+ INTEN
);
1143 _execute_DBGINSN(thrd
, insn
, is_manager(thrd
));
1146 /* Start doing req 'idx' of thread 'thrd' */
1147 static bool _trigger(struct pl330_thread
*thrd
)
1149 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
1150 struct _pl330_req
*req
;
1151 struct pl330_req
*r
;
1154 u8 insn
[6] = {0, 0, 0, 0, 0, 0};
1157 /* Return if already ACTIVE */
1158 if (_state(thrd
) != PL330_STATE_STOPPED
)
1161 idx
= 1 - thrd
->lstenq
;
1162 if (!IS_FREE(&thrd
->req
[idx
]))
1163 req
= &thrd
->req
[idx
];
1166 if (!IS_FREE(&thrd
->req
[idx
]))
1167 req
= &thrd
->req
[idx
];
1172 /* Return if no request */
1173 if (!req
|| !req
->r
)
1179 ns
= r
->cfg
->nonsecure
? 1 : 0;
1180 else if (readl(regs
+ CS(thrd
->id
)) & CS_CNS
)
1185 /* See 'Abort Sources' point-4 at Page 2-25 */
1186 if (_manager_ns(thrd
) && !ns
)
1187 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d Recipe for ABORT!\n",
1188 __func__
, __LINE__
);
1191 go
.addr
= req
->mc_bus
;
1193 _emit_GO(0, insn
, &go
);
1195 /* Set to generate interrupts for SEV */
1196 writel(readl(regs
+ INTEN
) | (1 << thrd
->ev
), regs
+ INTEN
);
1198 /* Only manager can execute GO */
1199 _execute_DBGINSN(thrd
, insn
, true);
1201 thrd
->req_running
= idx
;
1206 static bool _start(struct pl330_thread
*thrd
)
1208 switch (_state(thrd
)) {
1209 case PL330_STATE_FAULT_COMPLETING
:
1210 UNTIL(thrd
, PL330_STATE_FAULTING
| PL330_STATE_KILLING
);
1212 if (_state(thrd
) == PL330_STATE_KILLING
)
1213 UNTIL(thrd
, PL330_STATE_STOPPED
)
1215 case PL330_STATE_FAULTING
:
1218 case PL330_STATE_KILLING
:
1219 case PL330_STATE_COMPLETING
:
1220 UNTIL(thrd
, PL330_STATE_STOPPED
)
1222 case PL330_STATE_STOPPED
:
1223 return _trigger(thrd
);
1225 case PL330_STATE_WFP
:
1226 case PL330_STATE_QUEUEBUSY
:
1227 case PL330_STATE_ATBARRIER
:
1228 case PL330_STATE_UPDTPC
:
1229 case PL330_STATE_CACHEMISS
:
1230 case PL330_STATE_EXECUTING
:
1233 case PL330_STATE_WFE
: /* For RESUME, nothing yet */
1239 static inline int _ldst_memtomem(unsigned dry_run
, u8 buf
[],
1240 const struct _xfer_spec
*pxs
, int cyc
)
1243 struct pl330_config
*pcfg
= pxs
->r
->cfg
->pcfg
;
1245 /* check lock-up free version */
1246 if (get_revision(pcfg
->periph_id
) >= PERIPH_REV_R1P0
) {
1248 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1249 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
1253 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1254 off
+= _emit_RMB(dry_run
, &buf
[off
]);
1255 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
1256 off
+= _emit_WMB(dry_run
, &buf
[off
]);
1263 static inline int _ldst_devtomem(unsigned dry_run
, u8 buf
[],
1264 const struct _xfer_spec
*pxs
, int cyc
)
1269 off
+= _emit_WFP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1270 off
+= _emit_LDP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1271 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
1272 off
+= _emit_FLUSHP(dry_run
, &buf
[off
], pxs
->r
->peri
);
1278 static inline int _ldst_memtodev(unsigned dry_run
, u8 buf
[],
1279 const struct _xfer_spec
*pxs
, int cyc
)
1284 off
+= _emit_WFP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1285 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1286 off
+= _emit_STP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1287 off
+= _emit_FLUSHP(dry_run
, &buf
[off
], pxs
->r
->peri
);
1293 static int _bursts(unsigned dry_run
, u8 buf
[],
1294 const struct _xfer_spec
*pxs
, int cyc
)
1298 switch (pxs
->r
->rqtype
) {
1300 off
+= _ldst_memtodev(dry_run
, &buf
[off
], pxs
, cyc
);
1303 off
+= _ldst_devtomem(dry_run
, &buf
[off
], pxs
, cyc
);
1306 off
+= _ldst_memtomem(dry_run
, &buf
[off
], pxs
, cyc
);
1309 off
+= 0x40000000; /* Scare off the Client */
1316 /* Returns bytes consumed and updates bursts */
1317 static inline int _loop(unsigned dry_run
, u8 buf
[],
1318 unsigned long *bursts
, const struct _xfer_spec
*pxs
)
1320 int cyc
, cycmax
, szlp
, szlpend
, szbrst
, off
;
1321 unsigned lcnt0
, lcnt1
, ljmp0
, ljmp1
;
1322 struct _arg_LPEND lpend
;
1324 /* Max iterations possible in DMALP is 256 */
1325 if (*bursts
>= 256*256) {
1328 cyc
= *bursts
/ lcnt1
/ lcnt0
;
1329 } else if (*bursts
> 256) {
1331 lcnt0
= *bursts
/ lcnt1
;
1339 szlp
= _emit_LP(1, buf
, 0, 0);
1340 szbrst
= _bursts(1, buf
, pxs
, 1);
1342 lpend
.cond
= ALWAYS
;
1343 lpend
.forever
= false;
1346 szlpend
= _emit_LPEND(1, buf
, &lpend
);
1354 * Max bursts that we can unroll due to limit on the
1355 * size of backward jump that can be encoded in DMALPEND
1356 * which is 8-bits and hence 255
1358 cycmax
= (255 - (szlp
+ szlpend
)) / szbrst
;
1360 cyc
= (cycmax
< cyc
) ? cycmax
: cyc
;
1365 off
+= _emit_LP(dry_run
, &buf
[off
], 0, lcnt0
);
1369 off
+= _emit_LP(dry_run
, &buf
[off
], 1, lcnt1
);
1372 off
+= _bursts(dry_run
, &buf
[off
], pxs
, cyc
);
1374 lpend
.cond
= ALWAYS
;
1375 lpend
.forever
= false;
1377 lpend
.bjump
= off
- ljmp1
;
1378 off
+= _emit_LPEND(dry_run
, &buf
[off
], &lpend
);
1381 lpend
.cond
= ALWAYS
;
1382 lpend
.forever
= false;
1384 lpend
.bjump
= off
- ljmp0
;
1385 off
+= _emit_LPEND(dry_run
, &buf
[off
], &lpend
);
1388 *bursts
= lcnt1
* cyc
;
1395 static inline int _setup_loops(unsigned dry_run
, u8 buf
[],
1396 const struct _xfer_spec
*pxs
)
1398 struct pl330_xfer
*x
= pxs
->x
;
1400 unsigned long c
, bursts
= BYTE_TO_BURST(x
->bytes
, ccr
);
1405 off
+= _loop(dry_run
, &buf
[off
], &c
, pxs
);
1412 static inline int _setup_xfer(unsigned dry_run
, u8 buf
[],
1413 const struct _xfer_spec
*pxs
)
1415 struct pl330_xfer
*x
= pxs
->x
;
1418 /* DMAMOV SAR, x->src_addr */
1419 off
+= _emit_MOV(dry_run
, &buf
[off
], SAR
, x
->src_addr
);
1420 /* DMAMOV DAR, x->dst_addr */
1421 off
+= _emit_MOV(dry_run
, &buf
[off
], DAR
, x
->dst_addr
);
1424 off
+= _setup_loops(dry_run
, &buf
[off
], pxs
);
1430 * A req is a sequence of one or more xfer units.
1431 * Returns the number of bytes taken to setup the MC for the req.
1433 static int _setup_req(unsigned dry_run
, struct pl330_thread
*thrd
,
1434 unsigned index
, struct _xfer_spec
*pxs
)
1436 struct _pl330_req
*req
= &thrd
->req
[index
];
1437 struct pl330_xfer
*x
;
1438 u8
*buf
= req
->mc_cpu
;
1441 PL330_DBGMC_START(req
->mc_bus
);
1443 /* DMAMOV CCR, ccr */
1444 off
+= _emit_MOV(dry_run
, &buf
[off
], CCR
, pxs
->ccr
);
1448 /* Error if xfer length is not aligned at burst size */
1449 if (x
->bytes
% (BRST_SIZE(pxs
->ccr
) * BRST_LEN(pxs
->ccr
)))
1453 off
+= _setup_xfer(dry_run
, &buf
[off
], pxs
);
1458 /* DMASEV peripheral/event */
1459 off
+= _emit_SEV(dry_run
, &buf
[off
], thrd
->ev
);
1461 off
+= _emit_END(dry_run
, &buf
[off
]);
1466 static inline u32
_prepare_ccr(const struct pl330_reqcfg
*rqc
)
1476 /* We set same protection levels for Src and DST for now */
1477 if (rqc
->privileged
)
1478 ccr
|= CC_SRCPRI
| CC_DSTPRI
;
1480 ccr
|= CC_SRCNS
| CC_DSTNS
;
1481 if (rqc
->insnaccess
)
1482 ccr
|= CC_SRCIA
| CC_DSTIA
;
1484 ccr
|= (((rqc
->brst_len
- 1) & 0xf) << CC_SRCBRSTLEN_SHFT
);
1485 ccr
|= (((rqc
->brst_len
- 1) & 0xf) << CC_DSTBRSTLEN_SHFT
);
1487 ccr
|= (rqc
->brst_size
<< CC_SRCBRSTSIZE_SHFT
);
1488 ccr
|= (rqc
->brst_size
<< CC_DSTBRSTSIZE_SHFT
);
1490 ccr
|= (rqc
->scctl
<< CC_SRCCCTRL_SHFT
);
1491 ccr
|= (rqc
->dcctl
<< CC_DSTCCTRL_SHFT
);
1493 ccr
|= (rqc
->swap
<< CC_SWAP_SHFT
);
1498 static inline bool _is_valid(u32 ccr
)
1500 enum pl330_dstcachectrl dcctl
;
1501 enum pl330_srccachectrl scctl
;
1503 dcctl
= (ccr
>> CC_DSTCCTRL_SHFT
) & CC_DRCCCTRL_MASK
;
1504 scctl
= (ccr
>> CC_SRCCCTRL_SHFT
) & CC_SRCCCTRL_MASK
;
1506 if (dcctl
== DINVALID1
|| dcctl
== DINVALID2
1507 || scctl
== SINVALID1
|| scctl
== SINVALID2
)
1514 * Submit a list of xfers after which the client wants notification.
1515 * Client is not notified after each xfer unit, just once after all
1516 * xfer units are done or some error occurs.
1518 static int pl330_submit_req(void *ch_id
, struct pl330_req
*r
)
1520 struct pl330_thread
*thrd
= ch_id
;
1521 struct pl330_dmac
*pl330
;
1522 struct pl330_info
*pi
;
1523 struct _xfer_spec xs
;
1524 unsigned long flags
;
1530 /* No Req or Unacquired Channel or DMAC */
1531 if (!r
|| !thrd
|| thrd
->free
)
1538 if (pl330
->state
== DYING
1539 || pl330
->dmac_tbd
.reset_chan
& (1 << thrd
->id
)) {
1540 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d\n",
1541 __func__
, __LINE__
);
1545 /* If request for non-existing peripheral */
1546 if (r
->rqtype
!= MEMTOMEM
&& r
->peri
>= pi
->pcfg
.num_peri
) {
1547 dev_info(thrd
->dmac
->pinfo
->dev
,
1548 "%s:%d Invalid peripheral(%u)!\n",
1549 __func__
, __LINE__
, r
->peri
);
1553 spin_lock_irqsave(&pl330
->lock
, flags
);
1555 if (_queue_full(thrd
)) {
1561 /* Use last settings, if not provided */
1563 /* Prefer Secure Channel */
1564 if (!_manager_ns(thrd
))
1565 r
->cfg
->nonsecure
= 0;
1567 r
->cfg
->nonsecure
= 1;
1569 ccr
= _prepare_ccr(r
->cfg
);
1571 ccr
= readl(regs
+ CC(thrd
->id
));
1574 /* If this req doesn't have valid xfer settings */
1575 if (!_is_valid(ccr
)) {
1577 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d Invalid CCR(%x)!\n",
1578 __func__
, __LINE__
, ccr
);
1582 idx
= IS_FREE(&thrd
->req
[0]) ? 0 : 1;
1587 /* First dry run to check if req is acceptable */
1588 ret
= _setup_req(1, thrd
, idx
, &xs
);
1592 if (ret
> pi
->mcbufsz
/ 2) {
1593 dev_info(thrd
->dmac
->pinfo
->dev
,
1594 "%s:%d Trying increasing mcbufsz\n",
1595 __func__
, __LINE__
);
1600 /* Hook the request */
1602 thrd
->req
[idx
].mc_len
= _setup_req(0, thrd
, idx
, &xs
);
1603 thrd
->req
[idx
].r
= r
;
1608 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1613 static void pl330_dotask(unsigned long data
)
1615 struct pl330_dmac
*pl330
= (struct pl330_dmac
*) data
;
1616 struct pl330_info
*pi
= pl330
->pinfo
;
1617 unsigned long flags
;
1620 spin_lock_irqsave(&pl330
->lock
, flags
);
1622 /* The DMAC itself gone nuts */
1623 if (pl330
->dmac_tbd
.reset_dmac
) {
1624 pl330
->state
= DYING
;
1625 /* Reset the manager too */
1626 pl330
->dmac_tbd
.reset_mngr
= true;
1627 /* Clear the reset flag */
1628 pl330
->dmac_tbd
.reset_dmac
= false;
1631 if (pl330
->dmac_tbd
.reset_mngr
) {
1632 _stop(pl330
->manager
);
1633 /* Reset all channels */
1634 pl330
->dmac_tbd
.reset_chan
= (1 << pi
->pcfg
.num_chan
) - 1;
1635 /* Clear the reset flag */
1636 pl330
->dmac_tbd
.reset_mngr
= false;
1639 for (i
= 0; i
< pi
->pcfg
.num_chan
; i
++) {
1641 if (pl330
->dmac_tbd
.reset_chan
& (1 << i
)) {
1642 struct pl330_thread
*thrd
= &pl330
->channels
[i
];
1643 void __iomem
*regs
= pi
->base
;
1644 enum pl330_op_err err
;
1648 if (readl(regs
+ FSC
) & (1 << thrd
->id
))
1649 err
= PL330_ERR_FAIL
;
1651 err
= PL330_ERR_ABORT
;
1653 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1655 _callback(thrd
->req
[1 - thrd
->lstenq
].r
, err
);
1656 _callback(thrd
->req
[thrd
->lstenq
].r
, err
);
1658 spin_lock_irqsave(&pl330
->lock
, flags
);
1660 thrd
->req
[0].r
= NULL
;
1661 thrd
->req
[1].r
= NULL
;
1665 /* Clear the reset flag */
1666 pl330
->dmac_tbd
.reset_chan
&= ~(1 << i
);
1670 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1675 /* Returns 1 if state was updated, 0 otherwise */
1676 static int pl330_update(const struct pl330_info
*pi
)
1678 struct pl330_req
*rqdone
, *tmp
;
1679 struct pl330_dmac
*pl330
;
1680 unsigned long flags
;
1683 int id
, ev
, ret
= 0;
1685 if (!pi
|| !pi
->pl330_data
)
1689 pl330
= pi
->pl330_data
;
1691 spin_lock_irqsave(&pl330
->lock
, flags
);
1693 val
= readl(regs
+ FSM
) & 0x1;
1695 pl330
->dmac_tbd
.reset_mngr
= true;
1697 pl330
->dmac_tbd
.reset_mngr
= false;
1699 val
= readl(regs
+ FSC
) & ((1 << pi
->pcfg
.num_chan
) - 1);
1700 pl330
->dmac_tbd
.reset_chan
|= val
;
1703 while (i
< pi
->pcfg
.num_chan
) {
1704 if (val
& (1 << i
)) {
1706 "Reset Channel-%d\t CS-%x FTC-%x\n",
1707 i
, readl(regs
+ CS(i
)),
1708 readl(regs
+ FTC(i
)));
1709 _stop(&pl330
->channels
[i
]);
1715 /* Check which event happened i.e, thread notified */
1716 val
= readl(regs
+ ES
);
1717 if (pi
->pcfg
.num_events
< 32
1718 && val
& ~((1 << pi
->pcfg
.num_events
) - 1)) {
1719 pl330
->dmac_tbd
.reset_dmac
= true;
1720 dev_err(pi
->dev
, "%s:%d Unexpected!\n", __func__
, __LINE__
);
1725 for (ev
= 0; ev
< pi
->pcfg
.num_events
; ev
++) {
1726 if (val
& (1 << ev
)) { /* Event occurred */
1727 struct pl330_thread
*thrd
;
1728 u32 inten
= readl(regs
+ INTEN
);
1731 /* Clear the event */
1732 if (inten
& (1 << ev
))
1733 writel(1 << ev
, regs
+ INTCLR
);
1737 id
= pl330
->events
[ev
];
1739 thrd
= &pl330
->channels
[id
];
1741 active
= thrd
->req_running
;
1742 if (active
== -1) /* Aborted */
1745 /* Detach the req */
1746 rqdone
= thrd
->req
[active
].r
;
1747 thrd
->req
[active
].r
= NULL
;
1749 mark_free(thrd
, active
);
1751 /* Get going again ASAP */
1754 /* For now, just make a list of callbacks to be done */
1755 list_add_tail(&rqdone
->rqd
, &pl330
->req_done
);
1759 /* Now that we are in no hurry, do the callbacks */
1760 list_for_each_entry_safe(rqdone
, tmp
, &pl330
->req_done
, rqd
) {
1761 list_del(&rqdone
->rqd
);
1763 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1764 _callback(rqdone
, PL330_ERR_NONE
);
1765 spin_lock_irqsave(&pl330
->lock
, flags
);
1769 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1771 if (pl330
->dmac_tbd
.reset_dmac
1772 || pl330
->dmac_tbd
.reset_mngr
1773 || pl330
->dmac_tbd
.reset_chan
) {
1775 tasklet_schedule(&pl330
->tasks
);
1781 static int pl330_chan_ctrl(void *ch_id
, enum pl330_chan_op op
)
1783 struct pl330_thread
*thrd
= ch_id
;
1784 struct pl330_dmac
*pl330
;
1785 unsigned long flags
;
1786 int ret
= 0, active
;
1788 if (!thrd
|| thrd
->free
|| thrd
->dmac
->state
== DYING
)
1792 active
= thrd
->req_running
;
1794 spin_lock_irqsave(&pl330
->lock
, flags
);
1797 case PL330_OP_FLUSH
:
1798 /* Make sure the channel is stopped */
1801 thrd
->req
[0].r
= NULL
;
1802 thrd
->req
[1].r
= NULL
;
1807 case PL330_OP_ABORT
:
1808 /* Make sure the channel is stopped */
1811 /* ABORT is only for the active req */
1815 thrd
->req
[active
].r
= NULL
;
1816 mark_free(thrd
, active
);
1818 /* Start the next */
1819 case PL330_OP_START
:
1820 if ((active
== -1) && !_start(thrd
))
1828 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1832 /* Reserve an event */
1833 static inline int _alloc_event(struct pl330_thread
*thrd
)
1835 struct pl330_dmac
*pl330
= thrd
->dmac
;
1836 struct pl330_info
*pi
= pl330
->pinfo
;
1839 for (ev
= 0; ev
< pi
->pcfg
.num_events
; ev
++)
1840 if (pl330
->events
[ev
] == -1) {
1841 pl330
->events
[ev
] = thrd
->id
;
1848 static bool _chan_ns(const struct pl330_info
*pi
, int i
)
1850 return pi
->pcfg
.irq_ns
& (1 << i
);
1853 /* Upon success, returns IdentityToken for the
1854 * allocated channel, NULL otherwise.
1856 static void *pl330_request_channel(const struct pl330_info
*pi
)
1858 struct pl330_thread
*thrd
= NULL
;
1859 struct pl330_dmac
*pl330
;
1860 unsigned long flags
;
1863 if (!pi
|| !pi
->pl330_data
)
1866 pl330
= pi
->pl330_data
;
1868 if (pl330
->state
== DYING
)
1871 chans
= pi
->pcfg
.num_chan
;
1873 spin_lock_irqsave(&pl330
->lock
, flags
);
1875 for (i
= 0; i
< chans
; i
++) {
1876 thrd
= &pl330
->channels
[i
];
1877 if ((thrd
->free
) && (!_manager_ns(thrd
) ||
1879 thrd
->ev
= _alloc_event(thrd
);
1880 if (thrd
->ev
>= 0) {
1883 thrd
->req
[0].r
= NULL
;
1885 thrd
->req
[1].r
= NULL
;
1893 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1898 /* Release an event */
1899 static inline void _free_event(struct pl330_thread
*thrd
, int ev
)
1901 struct pl330_dmac
*pl330
= thrd
->dmac
;
1902 struct pl330_info
*pi
= pl330
->pinfo
;
1904 /* If the event is valid and was held by the thread */
1905 if (ev
>= 0 && ev
< pi
->pcfg
.num_events
1906 && pl330
->events
[ev
] == thrd
->id
)
1907 pl330
->events
[ev
] = -1;
1910 static void pl330_release_channel(void *ch_id
)
1912 struct pl330_thread
*thrd
= ch_id
;
1913 struct pl330_dmac
*pl330
;
1914 unsigned long flags
;
1916 if (!thrd
|| thrd
->free
)
1921 _callback(thrd
->req
[1 - thrd
->lstenq
].r
, PL330_ERR_ABORT
);
1922 _callback(thrd
->req
[thrd
->lstenq
].r
, PL330_ERR_ABORT
);
1926 spin_lock_irqsave(&pl330
->lock
, flags
);
1927 _free_event(thrd
, thrd
->ev
);
1929 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1932 /* Initialize the structure for PL330 configuration, that can be used
1933 * by the client driver the make best use of the DMAC
1935 static void read_dmac_config(struct pl330_info
*pi
)
1937 void __iomem
*regs
= pi
->base
;
1940 val
= readl(regs
+ CRD
) >> CRD_DATA_WIDTH_SHIFT
;
1941 val
&= CRD_DATA_WIDTH_MASK
;
1942 pi
->pcfg
.data_bus_width
= 8 * (1 << val
);
1944 val
= readl(regs
+ CRD
) >> CRD_DATA_BUFF_SHIFT
;
1945 val
&= CRD_DATA_BUFF_MASK
;
1946 pi
->pcfg
.data_buf_dep
= val
+ 1;
1948 val
= readl(regs
+ CR0
) >> CR0_NUM_CHANS_SHIFT
;
1949 val
&= CR0_NUM_CHANS_MASK
;
1951 pi
->pcfg
.num_chan
= val
;
1953 val
= readl(regs
+ CR0
);
1954 if (val
& CR0_PERIPH_REQ_SET
) {
1955 val
= (val
>> CR0_NUM_PERIPH_SHIFT
) & CR0_NUM_PERIPH_MASK
;
1957 pi
->pcfg
.num_peri
= val
;
1958 pi
->pcfg
.peri_ns
= readl(regs
+ CR4
);
1960 pi
->pcfg
.num_peri
= 0;
1963 val
= readl(regs
+ CR0
);
1964 if (val
& CR0_BOOT_MAN_NS
)
1965 pi
->pcfg
.mode
|= DMAC_MODE_NS
;
1967 pi
->pcfg
.mode
&= ~DMAC_MODE_NS
;
1969 val
= readl(regs
+ CR0
) >> CR0_NUM_EVENTS_SHIFT
;
1970 val
&= CR0_NUM_EVENTS_MASK
;
1972 pi
->pcfg
.num_events
= val
;
1974 pi
->pcfg
.irq_ns
= readl(regs
+ CR3
);
1977 static inline void _reset_thread(struct pl330_thread
*thrd
)
1979 struct pl330_dmac
*pl330
= thrd
->dmac
;
1980 struct pl330_info
*pi
= pl330
->pinfo
;
1982 thrd
->req
[0].mc_cpu
= pl330
->mcode_cpu
1983 + (thrd
->id
* pi
->mcbufsz
);
1984 thrd
->req
[0].mc_bus
= pl330
->mcode_bus
1985 + (thrd
->id
* pi
->mcbufsz
);
1986 thrd
->req
[0].r
= NULL
;
1989 thrd
->req
[1].mc_cpu
= thrd
->req
[0].mc_cpu
1991 thrd
->req
[1].mc_bus
= thrd
->req
[0].mc_bus
1993 thrd
->req
[1].r
= NULL
;
1997 static int dmac_alloc_threads(struct pl330_dmac
*pl330
)
1999 struct pl330_info
*pi
= pl330
->pinfo
;
2000 int chans
= pi
->pcfg
.num_chan
;
2001 struct pl330_thread
*thrd
;
2004 /* Allocate 1 Manager and 'chans' Channel threads */
2005 pl330
->channels
= kzalloc((1 + chans
) * sizeof(*thrd
),
2007 if (!pl330
->channels
)
2010 /* Init Channel threads */
2011 for (i
= 0; i
< chans
; i
++) {
2012 thrd
= &pl330
->channels
[i
];
2015 _reset_thread(thrd
);
2019 /* MANAGER is indexed at the end */
2020 thrd
= &pl330
->channels
[chans
];
2024 pl330
->manager
= thrd
;
2029 static int dmac_alloc_resources(struct pl330_dmac
*pl330
)
2031 struct pl330_info
*pi
= pl330
->pinfo
;
2032 int chans
= pi
->pcfg
.num_chan
;
2036 * Alloc MicroCode buffer for 'chans' Channel threads.
2037 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
2039 pl330
->mcode_cpu
= dma_alloc_coherent(pi
->dev
,
2040 chans
* pi
->mcbufsz
,
2041 &pl330
->mcode_bus
, GFP_KERNEL
);
2042 if (!pl330
->mcode_cpu
) {
2043 dev_err(pi
->dev
, "%s:%d Can't allocate memory!\n",
2044 __func__
, __LINE__
);
2048 ret
= dmac_alloc_threads(pl330
);
2050 dev_err(pi
->dev
, "%s:%d Can't to create channels for DMAC!\n",
2051 __func__
, __LINE__
);
2052 dma_free_coherent(pi
->dev
,
2053 chans
* pi
->mcbufsz
,
2054 pl330
->mcode_cpu
, pl330
->mcode_bus
);
2061 static int pl330_add(struct pl330_info
*pi
)
2063 struct pl330_dmac
*pl330
;
2067 if (!pi
|| !pi
->dev
)
2070 /* If already added */
2075 * If the SoC can perform reset on the DMAC, then do it
2076 * before reading its configuration.
2083 /* Check if we can handle this DMAC */
2084 if ((pi
->pcfg
.periph_id
& 0xfffff) != PERIPH_ID_VAL
) {
2085 dev_err(pi
->dev
, "PERIPH_ID 0x%x !\n", pi
->pcfg
.periph_id
);
2089 /* Read the configuration of the DMAC */
2090 read_dmac_config(pi
);
2092 if (pi
->pcfg
.num_events
== 0) {
2093 dev_err(pi
->dev
, "%s:%d Can't work without events!\n",
2094 __func__
, __LINE__
);
2098 pl330
= kzalloc(sizeof(*pl330
), GFP_KERNEL
);
2100 dev_err(pi
->dev
, "%s:%d Can't allocate memory!\n",
2101 __func__
, __LINE__
);
2105 /* Assign the info structure and private data */
2107 pi
->pl330_data
= pl330
;
2109 spin_lock_init(&pl330
->lock
);
2111 INIT_LIST_HEAD(&pl330
->req_done
);
2113 /* Use default MC buffer size if not provided */
2115 pi
->mcbufsz
= MCODE_BUFF_PER_REQ
* 2;
2117 /* Mark all events as free */
2118 for (i
= 0; i
< pi
->pcfg
.num_events
; i
++)
2119 pl330
->events
[i
] = -1;
2121 /* Allocate resources needed by the DMAC */
2122 ret
= dmac_alloc_resources(pl330
);
2124 dev_err(pi
->dev
, "Unable to create channels for DMAC\n");
2129 tasklet_init(&pl330
->tasks
, pl330_dotask
, (unsigned long) pl330
);
2131 pl330
->state
= INIT
;
2136 static int dmac_free_threads(struct pl330_dmac
*pl330
)
2138 struct pl330_info
*pi
= pl330
->pinfo
;
2139 int chans
= pi
->pcfg
.num_chan
;
2140 struct pl330_thread
*thrd
;
2143 /* Release Channel threads */
2144 for (i
= 0; i
< chans
; i
++) {
2145 thrd
= &pl330
->channels
[i
];
2146 pl330_release_channel((void *)thrd
);
2150 kfree(pl330
->channels
);
2155 static void dmac_free_resources(struct pl330_dmac
*pl330
)
2157 struct pl330_info
*pi
= pl330
->pinfo
;
2158 int chans
= pi
->pcfg
.num_chan
;
2160 dmac_free_threads(pl330
);
2162 dma_free_coherent(pi
->dev
, chans
* pi
->mcbufsz
,
2163 pl330
->mcode_cpu
, pl330
->mcode_bus
);
2166 static void pl330_del(struct pl330_info
*pi
)
2168 struct pl330_dmac
*pl330
;
2170 if (!pi
|| !pi
->pl330_data
)
2173 pl330
= pi
->pl330_data
;
2175 pl330
->state
= UNINIT
;
2177 tasklet_kill(&pl330
->tasks
);
2179 /* Free DMAC resources */
2180 dmac_free_resources(pl330
);
2183 pi
->pl330_data
= NULL
;
2186 /* forward declaration */
2187 static struct amba_driver pl330_driver
;
2189 static inline struct dma_pl330_chan
*
2190 to_pchan(struct dma_chan
*ch
)
2195 return container_of(ch
, struct dma_pl330_chan
, chan
);
2198 static inline struct dma_pl330_desc
*
2199 to_desc(struct dma_async_tx_descriptor
*tx
)
2201 return container_of(tx
, struct dma_pl330_desc
, txd
);
2204 static inline void fill_queue(struct dma_pl330_chan
*pch
)
2206 struct dma_pl330_desc
*desc
;
2209 list_for_each_entry(desc
, &pch
->work_list
, node
) {
2211 /* If already submitted */
2212 if (desc
->status
== BUSY
)
2215 ret
= pl330_submit_req(pch
->pl330_chid
,
2218 desc
->status
= BUSY
;
2219 } else if (ret
== -EAGAIN
) {
2220 /* QFull or DMAC Dying */
2223 /* Unacceptable request */
2224 desc
->status
= DONE
;
2225 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Bad Desc(%d)\n",
2226 __func__
, __LINE__
, desc
->txd
.cookie
);
2227 tasklet_schedule(&pch
->task
);
2232 static void pl330_tasklet(unsigned long data
)
2234 struct dma_pl330_chan
*pch
= (struct dma_pl330_chan
*)data
;
2235 struct dma_pl330_desc
*desc
, *_dt
;
2236 unsigned long flags
;
2238 spin_lock_irqsave(&pch
->lock
, flags
);
2240 /* Pick up ripe tomatoes */
2241 list_for_each_entry_safe(desc
, _dt
, &pch
->work_list
, node
)
2242 if (desc
->status
== DONE
) {
2244 dma_cookie_complete(&desc
->txd
);
2245 list_move_tail(&desc
->node
, &pch
->completed_list
);
2248 /* Try to submit a req imm. next to the last completed cookie */
2251 /* Make sure the PL330 Channel thread is active */
2252 pl330_chan_ctrl(pch
->pl330_chid
, PL330_OP_START
);
2254 while (!list_empty(&pch
->completed_list
)) {
2255 dma_async_tx_callback callback
;
2256 void *callback_param
;
2258 desc
= list_first_entry(&pch
->completed_list
,
2259 struct dma_pl330_desc
, node
);
2261 callback
= desc
->txd
.callback
;
2262 callback_param
= desc
->txd
.callback_param
;
2265 desc
->status
= PREP
;
2266 list_move_tail(&desc
->node
, &pch
->work_list
);
2268 desc
->status
= FREE
;
2269 list_move_tail(&desc
->node
, &pch
->dmac
->desc_pool
);
2272 dma_descriptor_unmap(&desc
->txd
);
2275 spin_unlock_irqrestore(&pch
->lock
, flags
);
2276 callback(callback_param
);
2277 spin_lock_irqsave(&pch
->lock
, flags
);
2280 spin_unlock_irqrestore(&pch
->lock
, flags
);
2283 static void dma_pl330_rqcb(void *token
, enum pl330_op_err err
)
2285 struct dma_pl330_desc
*desc
= token
;
2286 struct dma_pl330_chan
*pch
= desc
->pchan
;
2287 unsigned long flags
;
2289 /* If desc aborted */
2293 spin_lock_irqsave(&pch
->lock
, flags
);
2295 desc
->status
= DONE
;
2297 spin_unlock_irqrestore(&pch
->lock
, flags
);
2299 tasklet_schedule(&pch
->task
);
2302 bool pl330_filter(struct dma_chan
*chan
, void *param
)
2306 if (chan
->device
->dev
->driver
!= &pl330_driver
.drv
)
2309 peri_id
= chan
->private;
2310 return *peri_id
== (unsigned long)param
;
2312 EXPORT_SYMBOL(pl330_filter
);
2314 static struct dma_chan
*of_dma_pl330_xlate(struct of_phandle_args
*dma_spec
,
2315 struct of_dma
*ofdma
)
2317 int count
= dma_spec
->args_count
;
2318 struct dma_pl330_dmac
*pdmac
= ofdma
->of_dma_data
;
2319 unsigned int chan_id
;
2324 chan_id
= dma_spec
->args
[0];
2325 if (chan_id
>= pdmac
->num_peripherals
)
2328 return dma_get_slave_channel(&pdmac
->peripherals
[chan_id
].chan
);
2331 static int pl330_alloc_chan_resources(struct dma_chan
*chan
)
2333 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2334 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2335 unsigned long flags
;
2337 spin_lock_irqsave(&pch
->lock
, flags
);
2339 dma_cookie_init(chan
);
2340 pch
->cyclic
= false;
2342 pch
->pl330_chid
= pl330_request_channel(&pdmac
->pif
);
2343 if (!pch
->pl330_chid
) {
2344 spin_unlock_irqrestore(&pch
->lock
, flags
);
2348 tasklet_init(&pch
->task
, pl330_tasklet
, (unsigned long) pch
);
2350 spin_unlock_irqrestore(&pch
->lock
, flags
);
2355 static int pl330_control(struct dma_chan
*chan
, enum dma_ctrl_cmd cmd
, unsigned long arg
)
2357 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2358 struct dma_pl330_desc
*desc
;
2359 unsigned long flags
;
2360 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2361 struct dma_slave_config
*slave_config
;
2365 case DMA_TERMINATE_ALL
:
2366 spin_lock_irqsave(&pch
->lock
, flags
);
2368 /* FLUSH the PL330 Channel thread */
2369 pl330_chan_ctrl(pch
->pl330_chid
, PL330_OP_FLUSH
);
2371 /* Mark all desc done */
2372 list_for_each_entry(desc
, &pch
->submitted_list
, node
) {
2373 desc
->status
= FREE
;
2374 dma_cookie_complete(&desc
->txd
);
2377 list_for_each_entry(desc
, &pch
->work_list
, node
) {
2378 desc
->status
= FREE
;
2379 dma_cookie_complete(&desc
->txd
);
2382 list_for_each_entry(desc
, &pch
->completed_list
, node
) {
2383 desc
->status
= FREE
;
2384 dma_cookie_complete(&desc
->txd
);
2387 list_splice_tail_init(&pch
->submitted_list
, &pdmac
->desc_pool
);
2388 list_splice_tail_init(&pch
->work_list
, &pdmac
->desc_pool
);
2389 list_splice_tail_init(&pch
->completed_list
, &pdmac
->desc_pool
);
2390 spin_unlock_irqrestore(&pch
->lock
, flags
);
2392 case DMA_SLAVE_CONFIG
:
2393 slave_config
= (struct dma_slave_config
*)arg
;
2395 if (slave_config
->direction
== DMA_MEM_TO_DEV
) {
2396 if (slave_config
->dst_addr
)
2397 pch
->fifo_addr
= slave_config
->dst_addr
;
2398 if (slave_config
->dst_addr_width
)
2399 pch
->burst_sz
= __ffs(slave_config
->dst_addr_width
);
2400 if (slave_config
->dst_maxburst
)
2401 pch
->burst_len
= slave_config
->dst_maxburst
;
2402 } else if (slave_config
->direction
== DMA_DEV_TO_MEM
) {
2403 if (slave_config
->src_addr
)
2404 pch
->fifo_addr
= slave_config
->src_addr
;
2405 if (slave_config
->src_addr_width
)
2406 pch
->burst_sz
= __ffs(slave_config
->src_addr_width
);
2407 if (slave_config
->src_maxburst
)
2408 pch
->burst_len
= slave_config
->src_maxburst
;
2412 dev_err(pch
->dmac
->pif
.dev
, "Not supported command.\n");
2419 static void pl330_free_chan_resources(struct dma_chan
*chan
)
2421 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2422 unsigned long flags
;
2424 tasklet_kill(&pch
->task
);
2426 spin_lock_irqsave(&pch
->lock
, flags
);
2428 pl330_release_channel(pch
->pl330_chid
);
2429 pch
->pl330_chid
= NULL
;
2432 list_splice_tail_init(&pch
->work_list
, &pch
->dmac
->desc_pool
);
2434 spin_unlock_irqrestore(&pch
->lock
, flags
);
2437 static enum dma_status
2438 pl330_tx_status(struct dma_chan
*chan
, dma_cookie_t cookie
,
2439 struct dma_tx_state
*txstate
)
2441 return dma_cookie_status(chan
, cookie
, txstate
);
2444 static void pl330_issue_pending(struct dma_chan
*chan
)
2446 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2447 unsigned long flags
;
2449 spin_lock_irqsave(&pch
->lock
, flags
);
2450 list_splice_tail_init(&pch
->submitted_list
, &pch
->work_list
);
2451 spin_unlock_irqrestore(&pch
->lock
, flags
);
2453 pl330_tasklet((unsigned long)pch
);
2457 * We returned the last one of the circular list of descriptor(s)
2458 * from prep_xxx, so the argument to submit corresponds to the last
2459 * descriptor of the list.
2461 static dma_cookie_t
pl330_tx_submit(struct dma_async_tx_descriptor
*tx
)
2463 struct dma_pl330_desc
*desc
, *last
= to_desc(tx
);
2464 struct dma_pl330_chan
*pch
= to_pchan(tx
->chan
);
2465 dma_cookie_t cookie
;
2466 unsigned long flags
;
2468 spin_lock_irqsave(&pch
->lock
, flags
);
2470 /* Assign cookies to all nodes */
2471 while (!list_empty(&last
->node
)) {
2472 desc
= list_entry(last
->node
.next
, struct dma_pl330_desc
, node
);
2474 desc
->txd
.callback
= last
->txd
.callback
;
2475 desc
->txd
.callback_param
= last
->txd
.callback_param
;
2478 dma_cookie_assign(&desc
->txd
);
2480 list_move_tail(&desc
->node
, &pch
->submitted_list
);
2483 cookie
= dma_cookie_assign(&last
->txd
);
2484 list_add_tail(&last
->node
, &pch
->submitted_list
);
2485 spin_unlock_irqrestore(&pch
->lock
, flags
);
2490 static inline void _init_desc(struct dma_pl330_desc
*desc
)
2492 desc
->req
.x
= &desc
->px
;
2493 desc
->req
.token
= desc
;
2494 desc
->rqcfg
.swap
= SWAP_NO
;
2495 desc
->rqcfg
.scctl
= SCCTRL0
;
2496 desc
->rqcfg
.dcctl
= DCCTRL0
;
2497 desc
->req
.cfg
= &desc
->rqcfg
;
2498 desc
->req
.xfer_cb
= dma_pl330_rqcb
;
2499 desc
->txd
.tx_submit
= pl330_tx_submit
;
2501 INIT_LIST_HEAD(&desc
->node
);
2504 /* Returns the number of descriptors added to the DMAC pool */
2505 static int add_desc(struct dma_pl330_dmac
*pdmac
, gfp_t flg
, int count
)
2507 struct dma_pl330_desc
*desc
;
2508 unsigned long flags
;
2514 desc
= kcalloc(count
, sizeof(*desc
), flg
);
2518 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2520 for (i
= 0; i
< count
; i
++) {
2521 _init_desc(&desc
[i
]);
2522 list_add_tail(&desc
[i
].node
, &pdmac
->desc_pool
);
2525 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2530 static struct dma_pl330_desc
*
2531 pluck_desc(struct dma_pl330_dmac
*pdmac
)
2533 struct dma_pl330_desc
*desc
= NULL
;
2534 unsigned long flags
;
2539 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2541 if (!list_empty(&pdmac
->desc_pool
)) {
2542 desc
= list_entry(pdmac
->desc_pool
.next
,
2543 struct dma_pl330_desc
, node
);
2545 list_del_init(&desc
->node
);
2547 desc
->status
= PREP
;
2548 desc
->txd
.callback
= NULL
;
2551 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2556 static struct dma_pl330_desc
*pl330_get_desc(struct dma_pl330_chan
*pch
)
2558 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2559 u8
*peri_id
= pch
->chan
.private;
2560 struct dma_pl330_desc
*desc
;
2562 /* Pluck one desc from the pool of DMAC */
2563 desc
= pluck_desc(pdmac
);
2565 /* If the DMAC pool is empty, alloc new */
2567 if (!add_desc(pdmac
, GFP_ATOMIC
, 1))
2571 desc
= pluck_desc(pdmac
);
2573 dev_err(pch
->dmac
->pif
.dev
,
2574 "%s:%d ALERT!\n", __func__
, __LINE__
);
2579 /* Initialize the descriptor */
2581 desc
->txd
.cookie
= 0;
2582 async_tx_ack(&desc
->txd
);
2584 desc
->req
.peri
= peri_id
? pch
->chan
.chan_id
: 0;
2585 desc
->rqcfg
.pcfg
= &pch
->dmac
->pif
.pcfg
;
2587 dma_async_tx_descriptor_init(&desc
->txd
, &pch
->chan
);
2592 static inline void fill_px(struct pl330_xfer
*px
,
2593 dma_addr_t dst
, dma_addr_t src
, size_t len
)
2601 static struct dma_pl330_desc
*
2602 __pl330_prep_dma_memcpy(struct dma_pl330_chan
*pch
, dma_addr_t dst
,
2603 dma_addr_t src
, size_t len
)
2605 struct dma_pl330_desc
*desc
= pl330_get_desc(pch
);
2608 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Unable to fetch desc\n",
2609 __func__
, __LINE__
);
2614 * Ideally we should lookout for reqs bigger than
2615 * those that can be programmed with 256 bytes of
2616 * MC buffer, but considering a req size is seldom
2617 * going to be word-unaligned and more than 200MB,
2619 * Also, should the limit is reached we'd rather
2620 * have the platform increase MC buffer size than
2621 * complicating this API driver.
2623 fill_px(&desc
->px
, dst
, src
, len
);
2628 /* Call after fixing burst size */
2629 static inline int get_burst_len(struct dma_pl330_desc
*desc
, size_t len
)
2631 struct dma_pl330_chan
*pch
= desc
->pchan
;
2632 struct pl330_info
*pi
= &pch
->dmac
->pif
;
2635 burst_len
= pi
->pcfg
.data_bus_width
/ 8;
2636 burst_len
*= pi
->pcfg
.data_buf_dep
;
2637 burst_len
>>= desc
->rqcfg
.brst_size
;
2639 /* src/dst_burst_len can't be more than 16 */
2643 while (burst_len
> 1) {
2644 if (!(len
% (burst_len
<< desc
->rqcfg
.brst_size
)))
2652 static struct dma_async_tx_descriptor
*pl330_prep_dma_cyclic(
2653 struct dma_chan
*chan
, dma_addr_t dma_addr
, size_t len
,
2654 size_t period_len
, enum dma_transfer_direction direction
,
2655 unsigned long flags
, void *context
)
2657 struct dma_pl330_desc
*desc
= NULL
, *first
= NULL
;
2658 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2659 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2664 if (len
% period_len
!= 0)
2667 if (!is_slave_direction(direction
)) {
2668 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Invalid dma direction\n",
2669 __func__
, __LINE__
);
2673 for (i
= 0; i
< len
/ period_len
; i
++) {
2674 desc
= pl330_get_desc(pch
);
2676 dev_err(pch
->dmac
->pif
.dev
, "%s:%d Unable to fetch desc\n",
2677 __func__
, __LINE__
);
2682 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2684 while (!list_empty(&first
->node
)) {
2685 desc
= list_entry(first
->node
.next
,
2686 struct dma_pl330_desc
, node
);
2687 list_move_tail(&desc
->node
, &pdmac
->desc_pool
);
2690 list_move_tail(&first
->node
, &pdmac
->desc_pool
);
2692 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2697 switch (direction
) {
2698 case DMA_MEM_TO_DEV
:
2699 desc
->rqcfg
.src_inc
= 1;
2700 desc
->rqcfg
.dst_inc
= 0;
2701 desc
->req
.rqtype
= MEMTODEV
;
2703 dst
= pch
->fifo_addr
;
2705 case DMA_DEV_TO_MEM
:
2706 desc
->rqcfg
.src_inc
= 0;
2707 desc
->rqcfg
.dst_inc
= 1;
2708 desc
->req
.rqtype
= DEVTOMEM
;
2709 src
= pch
->fifo_addr
;
2716 desc
->rqcfg
.brst_size
= pch
->burst_sz
;
2717 desc
->rqcfg
.brst_len
= 1;
2718 fill_px(&desc
->px
, dst
, src
, period_len
);
2723 list_add_tail(&desc
->node
, &first
->node
);
2725 dma_addr
+= period_len
;
2732 desc
->txd
.flags
= flags
;
2737 static struct dma_async_tx_descriptor
*
2738 pl330_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dst
,
2739 dma_addr_t src
, size_t len
, unsigned long flags
)
2741 struct dma_pl330_desc
*desc
;
2742 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2743 struct pl330_info
*pi
;
2746 if (unlikely(!pch
|| !len
))
2749 pi
= &pch
->dmac
->pif
;
2751 desc
= __pl330_prep_dma_memcpy(pch
, dst
, src
, len
);
2755 desc
->rqcfg
.src_inc
= 1;
2756 desc
->rqcfg
.dst_inc
= 1;
2757 desc
->req
.rqtype
= MEMTOMEM
;
2759 /* Select max possible burst size */
2760 burst
= pi
->pcfg
.data_bus_width
/ 8;
2768 desc
->rqcfg
.brst_size
= 0;
2769 while (burst
!= (1 << desc
->rqcfg
.brst_size
))
2770 desc
->rqcfg
.brst_size
++;
2772 desc
->rqcfg
.brst_len
= get_burst_len(desc
, len
);
2774 desc
->txd
.flags
= flags
;
2779 static void __pl330_giveback_desc(struct dma_pl330_dmac
*pdmac
,
2780 struct dma_pl330_desc
*first
)
2782 unsigned long flags
;
2783 struct dma_pl330_desc
*desc
;
2788 spin_lock_irqsave(&pdmac
->pool_lock
, flags
);
2790 while (!list_empty(&first
->node
)) {
2791 desc
= list_entry(first
->node
.next
,
2792 struct dma_pl330_desc
, node
);
2793 list_move_tail(&desc
->node
, &pdmac
->desc_pool
);
2796 list_move_tail(&first
->node
, &pdmac
->desc_pool
);
2798 spin_unlock_irqrestore(&pdmac
->pool_lock
, flags
);
2801 static struct dma_async_tx_descriptor
*
2802 pl330_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
2803 unsigned int sg_len
, enum dma_transfer_direction direction
,
2804 unsigned long flg
, void *context
)
2806 struct dma_pl330_desc
*first
, *desc
= NULL
;
2807 struct dma_pl330_chan
*pch
= to_pchan(chan
);
2808 struct scatterlist
*sg
;
2812 if (unlikely(!pch
|| !sgl
|| !sg_len
))
2815 addr
= pch
->fifo_addr
;
2819 for_each_sg(sgl
, sg
, sg_len
, i
) {
2821 desc
= pl330_get_desc(pch
);
2823 struct dma_pl330_dmac
*pdmac
= pch
->dmac
;
2825 dev_err(pch
->dmac
->pif
.dev
,
2826 "%s:%d Unable to fetch desc\n",
2827 __func__
, __LINE__
);
2828 __pl330_giveback_desc(pdmac
, first
);
2836 list_add_tail(&desc
->node
, &first
->node
);
2838 if (direction
== DMA_MEM_TO_DEV
) {
2839 desc
->rqcfg
.src_inc
= 1;
2840 desc
->rqcfg
.dst_inc
= 0;
2841 desc
->req
.rqtype
= MEMTODEV
;
2843 addr
, sg_dma_address(sg
), sg_dma_len(sg
));
2845 desc
->rqcfg
.src_inc
= 0;
2846 desc
->rqcfg
.dst_inc
= 1;
2847 desc
->req
.rqtype
= DEVTOMEM
;
2849 sg_dma_address(sg
), addr
, sg_dma_len(sg
));
2852 desc
->rqcfg
.brst_size
= pch
->burst_sz
;
2853 desc
->rqcfg
.brst_len
= 1;
2856 /* Return the last desc in the chain */
2857 desc
->txd
.flags
= flg
;
2861 static irqreturn_t
pl330_irq_handler(int irq
, void *data
)
2863 if (pl330_update(data
))
2869 #define PL330_DMA_BUSWIDTHS \
2870 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
2871 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
2872 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
2873 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2874 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2876 static int pl330_dma_device_slave_caps(struct dma_chan
*dchan
,
2877 struct dma_slave_caps
*caps
)
2879 caps
->src_addr_widths
= PL330_DMA_BUSWIDTHS
;
2880 caps
->dstn_addr_widths
= PL330_DMA_BUSWIDTHS
;
2881 caps
->directions
= BIT(DMA_DEV_TO_MEM
) | BIT(DMA_MEM_TO_DEV
);
2882 caps
->cmd_pause
= false;
2883 caps
->cmd_terminate
= true;
2884 caps
->residue_granularity
= DMA_RESIDUE_GRANULARITY_DESCRIPTOR
;
2890 pl330_probe(struct amba_device
*adev
, const struct amba_id
*id
)
2892 struct dma_pl330_platdata
*pdat
;
2893 struct dma_pl330_dmac
*pdmac
;
2894 struct dma_pl330_chan
*pch
, *_p
;
2895 struct pl330_info
*pi
;
2896 struct dma_device
*pd
;
2897 struct resource
*res
;
2901 pdat
= dev_get_platdata(&adev
->dev
);
2903 ret
= dma_set_mask_and_coherent(&adev
->dev
, DMA_BIT_MASK(32));
2907 /* Allocate a new DMAC and its Channels */
2908 pdmac
= devm_kzalloc(&adev
->dev
, sizeof(*pdmac
), GFP_KERNEL
);
2910 dev_err(&adev
->dev
, "unable to allocate mem\n");
2915 pi
->dev
= &adev
->dev
;
2916 pi
->pl330_data
= NULL
;
2917 pi
->mcbufsz
= pdat
? pdat
->mcbuf_sz
: 0;
2920 pi
->base
= devm_ioremap_resource(&adev
->dev
, res
);
2921 if (IS_ERR(pi
->base
))
2922 return PTR_ERR(pi
->base
);
2924 amba_set_drvdata(adev
, pdmac
);
2926 for (i
= 0; i
< AMBA_NR_IRQS
; i
++) {
2929 ret
= devm_request_irq(&adev
->dev
, irq
,
2930 pl330_irq_handler
, 0,
2931 dev_name(&adev
->dev
), pi
);
2939 pi
->pcfg
.periph_id
= adev
->periphid
;
2940 ret
= pl330_add(pi
);
2944 INIT_LIST_HEAD(&pdmac
->desc_pool
);
2945 spin_lock_init(&pdmac
->pool_lock
);
2947 /* Create a descriptor pool of default size */
2948 if (!add_desc(pdmac
, GFP_KERNEL
, NR_DEFAULT_DESC
))
2949 dev_warn(&adev
->dev
, "unable to allocate desc\n");
2952 INIT_LIST_HEAD(&pd
->channels
);
2954 /* Initialize channel parameters */
2956 num_chan
= max_t(int, pdat
->nr_valid_peri
, pi
->pcfg
.num_chan
);
2958 num_chan
= max_t(int, pi
->pcfg
.num_peri
, pi
->pcfg
.num_chan
);
2960 pdmac
->num_peripherals
= num_chan
;
2962 pdmac
->peripherals
= kzalloc(num_chan
* sizeof(*pch
), GFP_KERNEL
);
2963 if (!pdmac
->peripherals
) {
2965 dev_err(&adev
->dev
, "unable to allocate pdmac->peripherals\n");
2969 for (i
= 0; i
< num_chan
; i
++) {
2970 pch
= &pdmac
->peripherals
[i
];
2971 if (!adev
->dev
.of_node
)
2972 pch
->chan
.private = pdat
? &pdat
->peri_id
[i
] : NULL
;
2974 pch
->chan
.private = adev
->dev
.of_node
;
2976 INIT_LIST_HEAD(&pch
->submitted_list
);
2977 INIT_LIST_HEAD(&pch
->work_list
);
2978 INIT_LIST_HEAD(&pch
->completed_list
);
2979 spin_lock_init(&pch
->lock
);
2980 pch
->pl330_chid
= NULL
;
2981 pch
->chan
.device
= pd
;
2984 /* Add the channel to the DMAC list */
2985 list_add_tail(&pch
->chan
.device_node
, &pd
->channels
);
2988 pd
->dev
= &adev
->dev
;
2990 pd
->cap_mask
= pdat
->cap_mask
;
2992 dma_cap_set(DMA_MEMCPY
, pd
->cap_mask
);
2993 if (pi
->pcfg
.num_peri
) {
2994 dma_cap_set(DMA_SLAVE
, pd
->cap_mask
);
2995 dma_cap_set(DMA_CYCLIC
, pd
->cap_mask
);
2996 dma_cap_set(DMA_PRIVATE
, pd
->cap_mask
);
3000 pd
->device_alloc_chan_resources
= pl330_alloc_chan_resources
;
3001 pd
->device_free_chan_resources
= pl330_free_chan_resources
;
3002 pd
->device_prep_dma_memcpy
= pl330_prep_dma_memcpy
;
3003 pd
->device_prep_dma_cyclic
= pl330_prep_dma_cyclic
;
3004 pd
->device_tx_status
= pl330_tx_status
;
3005 pd
->device_prep_slave_sg
= pl330_prep_slave_sg
;
3006 pd
->device_control
= pl330_control
;
3007 pd
->device_issue_pending
= pl330_issue_pending
;
3008 pd
->device_slave_caps
= pl330_dma_device_slave_caps
;
3010 ret
= dma_async_device_register(pd
);
3012 dev_err(&adev
->dev
, "unable to register DMAC\n");
3016 if (adev
->dev
.of_node
) {
3017 ret
= of_dma_controller_register(adev
->dev
.of_node
,
3018 of_dma_pl330_xlate
, pdmac
);
3021 "unable to register DMA to the generic DT DMA helpers\n");
3025 adev
->dev
.dma_parms
= &pdmac
->dma_parms
;
3028 * This is the limit for transfers with a buswidth of 1, larger
3029 * buswidths will have larger limits.
3031 ret
= dma_set_max_seg_size(&adev
->dev
, 1900800);
3033 dev_err(&adev
->dev
, "unable to set the seg size\n");
3036 dev_info(&adev
->dev
,
3037 "Loaded driver for PL330 DMAC-%d\n", adev
->periphid
);
3038 dev_info(&adev
->dev
,
3039 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
3040 pi
->pcfg
.data_buf_dep
,
3041 pi
->pcfg
.data_bus_width
/ 8, pi
->pcfg
.num_chan
,
3042 pi
->pcfg
.num_peri
, pi
->pcfg
.num_events
);
3047 list_for_each_entry_safe(pch
, _p
, &pdmac
->ddma
.channels
,
3050 /* Remove the channel */
3051 list_del(&pch
->chan
.device_node
);
3053 /* Flush the channel */
3054 pl330_control(&pch
->chan
, DMA_TERMINATE_ALL
, 0);
3055 pl330_free_chan_resources(&pch
->chan
);
3063 static int pl330_remove(struct amba_device
*adev
)
3065 struct dma_pl330_dmac
*pdmac
= amba_get_drvdata(adev
);
3066 struct dma_pl330_chan
*pch
, *_p
;
3067 struct pl330_info
*pi
;
3072 if (adev
->dev
.of_node
)
3073 of_dma_controller_free(adev
->dev
.of_node
);
3075 dma_async_device_unregister(&pdmac
->ddma
);
3078 list_for_each_entry_safe(pch
, _p
, &pdmac
->ddma
.channels
,
3081 /* Remove the channel */
3082 list_del(&pch
->chan
.device_node
);
3084 /* Flush the channel */
3085 pl330_control(&pch
->chan
, DMA_TERMINATE_ALL
, 0);
3086 pl330_free_chan_resources(&pch
->chan
);
3096 static struct amba_id pl330_ids
[] = {
3104 MODULE_DEVICE_TABLE(amba
, pl330_ids
);
3106 static struct amba_driver pl330_driver
= {
3108 .owner
= THIS_MODULE
,
3109 .name
= "dma-pl330",
3111 .id_table
= pl330_ids
,
3112 .probe
= pl330_probe
,
3113 .remove
= pl330_remove
,
3116 module_amba_driver(pl330_driver
);
3118 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
3119 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3120 MODULE_LICENSE("GPL");