1 /* linux/arch/arm/common/pl330.c
3 * Copyright (C) 2010 Samsung Electronics Co Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/string.h>
27 #include <linux/delay.h>
28 #include <linux/interrupt.h>
29 #include <linux/dma-mapping.h>
31 #include <asm/hardware/pl330.h>
33 /* Register and Bit field Definitions */
35 #define DS_ST_STOP 0x0
36 #define DS_ST_EXEC 0x1
37 #define DS_ST_CMISS 0x2
38 #define DS_ST_UPDTPC 0x3
40 #define DS_ST_ATBRR 0x5
41 #define DS_ST_QBUSY 0x6
43 #define DS_ST_KILL 0x8
44 #define DS_ST_CMPLT 0x9
45 #define DS_ST_FLTCMP 0xe
46 #define DS_ST_FAULT 0xf
51 #define INTSTATUS 0x28
58 #define FTC(n) (_FTC + (n)*0x4)
61 #define CS(n) (_CS + (n)*0x8)
62 #define CS_CNS (1 << 21)
65 #define CPC(n) (_CPC + (n)*0x8)
68 #define SA(n) (_SA + (n)*0x20)
71 #define DA(n) (_DA + (n)*0x20)
74 #define CC(n) (_CC + (n)*0x20)
76 #define CC_SRCINC (1 << 0)
77 #define CC_DSTINC (1 << 14)
78 #define CC_SRCPRI (1 << 8)
79 #define CC_DSTPRI (1 << 22)
80 #define CC_SRCNS (1 << 9)
81 #define CC_DSTNS (1 << 23)
82 #define CC_SRCIA (1 << 10)
83 #define CC_DSTIA (1 << 24)
84 #define CC_SRCBRSTLEN_SHFT 4
85 #define CC_DSTBRSTLEN_SHFT 18
86 #define CC_SRCBRSTSIZE_SHFT 1
87 #define CC_DSTBRSTSIZE_SHFT 15
88 #define CC_SRCCCTRL_SHFT 11
89 #define CC_SRCCCTRL_MASK 0x7
90 #define CC_DSTCCTRL_SHFT 25
91 #define CC_DRCCCTRL_MASK 0x7
92 #define CC_SWAP_SHFT 28
95 #define LC0(n) (_LC0 + (n)*0x20)
98 #define LC1(n) (_LC1 + (n)*0x20)
100 #define DBGSTATUS 0xd00
101 #define DBG_BUSY (1 << 0)
104 #define DBGINST0 0xd08
105 #define DBGINST1 0xd0c
114 #define PERIPH_ID 0xfe0
115 #define PCELL_ID 0xff0
117 #define CR0_PERIPH_REQ_SET (1 << 0)
118 #define CR0_BOOT_EN_SET (1 << 1)
119 #define CR0_BOOT_MAN_NS (1 << 2)
120 #define CR0_NUM_CHANS_SHIFT 4
121 #define CR0_NUM_CHANS_MASK 0x7
122 #define CR0_NUM_PERIPH_SHIFT 12
123 #define CR0_NUM_PERIPH_MASK 0x1f
124 #define CR0_NUM_EVENTS_SHIFT 17
125 #define CR0_NUM_EVENTS_MASK 0x1f
127 #define CR1_ICACHE_LEN_SHIFT 0
128 #define CR1_ICACHE_LEN_MASK 0x7
129 #define CR1_NUM_ICACHELINES_SHIFT 4
130 #define CR1_NUM_ICACHELINES_MASK 0xf
132 #define CRD_DATA_WIDTH_SHIFT 0
133 #define CRD_DATA_WIDTH_MASK 0x7
134 #define CRD_WR_CAP_SHIFT 4
135 #define CRD_WR_CAP_MASK 0x7
136 #define CRD_WR_Q_DEP_SHIFT 8
137 #define CRD_WR_Q_DEP_MASK 0xf
138 #define CRD_RD_CAP_SHIFT 12
139 #define CRD_RD_CAP_MASK 0x7
140 #define CRD_RD_Q_DEP_SHIFT 16
141 #define CRD_RD_Q_DEP_MASK 0xf
142 #define CRD_DATA_BUFF_SHIFT 20
143 #define CRD_DATA_BUFF_MASK 0x3ff
146 #define DESIGNER 0x41
148 #define INTEG_CFG 0x0
149 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
151 #define PCELL_ID_VAL 0xb105f00d
153 #define PL330_STATE_STOPPED (1 << 0)
154 #define PL330_STATE_EXECUTING (1 << 1)
155 #define PL330_STATE_WFE (1 << 2)
156 #define PL330_STATE_FAULTING (1 << 3)
157 #define PL330_STATE_COMPLETING (1 << 4)
158 #define PL330_STATE_WFP (1 << 5)
159 #define PL330_STATE_KILLING (1 << 6)
160 #define PL330_STATE_FAULT_COMPLETING (1 << 7)
161 #define PL330_STATE_CACHEMISS (1 << 8)
162 #define PL330_STATE_UPDTPC (1 << 9)
163 #define PL330_STATE_ATBARRIER (1 << 10)
164 #define PL330_STATE_QUEUEBUSY (1 << 11)
165 #define PL330_STATE_INVALID (1 << 15)
167 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
168 | PL330_STATE_WFE | PL330_STATE_FAULTING)
170 #define CMD_DMAADDH 0x54
171 #define CMD_DMAEND 0x00
172 #define CMD_DMAFLUSHP 0x35
173 #define CMD_DMAGO 0xa0
174 #define CMD_DMALD 0x04
175 #define CMD_DMALDP 0x25
176 #define CMD_DMALP 0x20
177 #define CMD_DMALPEND 0x28
178 #define CMD_DMAKILL 0x01
179 #define CMD_DMAMOV 0xbc
180 #define CMD_DMANOP 0x18
181 #define CMD_DMARMB 0x12
182 #define CMD_DMASEV 0x34
183 #define CMD_DMAST 0x08
184 #define CMD_DMASTP 0x29
185 #define CMD_DMASTZ 0x0c
186 #define CMD_DMAWFE 0x36
187 #define CMD_DMAWFP 0x30
188 #define CMD_DMAWMB 0x13
192 #define SZ_DMAFLUSHP 2
196 #define SZ_DMALPEND 2
210 #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
211 #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
213 #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
214 #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
217 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
218 * at 1byte/burst for P<->M and M<->M respectively.
219 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
220 * should be enough for P<->M and M<->M respectively.
222 #define MCODE_BUFF_PER_REQ 256
225 * Mark a _pl330_req as free.
226 * We do it by writing DMAEND as the first instruction
227 * because no valid request is going to have DMAEND as
228 * its first instruction to execute.
230 #define MARK_FREE(req) do { \
231 _emit_END(0, (req)->mc_cpu); \
235 /* If the _pl330_req is available to the client */
236 #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND)
238 /* Use this _only_ to wait on transient states */
239 #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
241 #ifdef PL330_DEBUG_MCGEN
242 static unsigned cmd_line
;
243 #define PL330_DBGCMD_DUMP(off, x...) do { \
244 printk("%x:", cmd_line); \
248 #define PL330_DBGMC_START(addr) (cmd_line = addr)
250 #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
251 #define PL330_DBGMC_START(addr) do {} while (0)
257 struct pl330_xfer
*x
;
280 /* Number of bytes taken to setup MC for the req */
283 /* Hook to attach to DMAC's list of reqs with due callback */
284 struct list_head rqd
;
287 /* ToBeDone for tasklet */
295 struct pl330_thread
{
298 /* If the channel is not yet acquired by any client */
301 struct pl330_dmac
*dmac
;
302 /* Only two at a time */
303 struct _pl330_req req
[2];
304 /* Index of the last submitted request */
308 enum pl330_dmac_state
{
317 /* Holds list of reqs with due callbacks */
318 struct list_head req_done
;
319 /* Pointer to platform specific stuff */
320 struct pl330_info
*pinfo
;
321 /* Maximum possible events/irqs */
323 /* BUS address of MicroCode buffer */
325 /* CPU address of MicroCode buffer */
327 /* List of all Channel threads */
328 struct pl330_thread
*channels
;
329 /* Pointer to the MANAGER thread */
330 struct pl330_thread
*manager
;
331 /* To handle bad news in interrupt */
332 struct tasklet_struct tasks
;
333 struct _pl330_tbd dmac_tbd
;
334 /* State of DMAC operation */
335 enum pl330_dmac_state state
;
338 static inline void _callback(struct pl330_req
*r
, enum pl330_op_err err
)
341 r
->xfer_cb(r
->token
, err
);
344 static inline bool _queue_empty(struct pl330_thread
*thrd
)
346 return (IS_FREE(&thrd
->req
[0]) && IS_FREE(&thrd
->req
[1]))
350 static inline bool _queue_full(struct pl330_thread
*thrd
)
352 return (IS_FREE(&thrd
->req
[0]) || IS_FREE(&thrd
->req
[1]))
356 static inline bool is_manager(struct pl330_thread
*thrd
)
358 struct pl330_dmac
*pl330
= thrd
->dmac
;
360 /* MANAGER is indexed at the end */
361 if (thrd
->id
== pl330
->pinfo
->pcfg
.num_chan
)
367 /* If manager of the thread is in Non-Secure mode */
368 static inline bool _manager_ns(struct pl330_thread
*thrd
)
370 struct pl330_dmac
*pl330
= thrd
->dmac
;
372 return (pl330
->pinfo
->pcfg
.mode
& DMAC_MODE_NS
) ? true : false;
375 static inline u32
get_id(struct pl330_info
*pi
, u32 off
)
377 void __iomem
*regs
= pi
->base
;
380 id
|= (readb(regs
+ off
+ 0x0) << 0);
381 id
|= (readb(regs
+ off
+ 0x4) << 8);
382 id
|= (readb(regs
+ off
+ 0x8) << 16);
383 id
|= (readb(regs
+ off
+ 0xc) << 24);
388 static inline u32
_emit_ADDH(unsigned dry_run
, u8 buf
[],
389 enum pl330_dst da
, u16 val
)
394 buf
[0] = CMD_DMAADDH
;
396 *((u16
*)&buf
[1]) = val
;
398 PL330_DBGCMD_DUMP(SZ_DMAADDH
, "\tDMAADDH %s %u\n",
399 da
== 1 ? "DA" : "SA", val
);
404 static inline u32
_emit_END(unsigned dry_run
, u8 buf
[])
411 PL330_DBGCMD_DUMP(SZ_DMAEND
, "\tDMAEND\n");
416 static inline u32
_emit_FLUSHP(unsigned dry_run
, u8 buf
[], u8 peri
)
421 buf
[0] = CMD_DMAFLUSHP
;
427 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP
, "\tDMAFLUSHP %u\n", peri
>> 3);
432 static inline u32
_emit_LD(unsigned dry_run
, u8 buf
[], enum pl330_cond cond
)
440 buf
[0] |= (0 << 1) | (1 << 0);
441 else if (cond
== BURST
)
442 buf
[0] |= (1 << 1) | (1 << 0);
444 PL330_DBGCMD_DUMP(SZ_DMALD
, "\tDMALD%c\n",
445 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'));
450 static inline u32
_emit_LDP(unsigned dry_run
, u8 buf
[],
451 enum pl330_cond cond
, u8 peri
)
465 PL330_DBGCMD_DUMP(SZ_DMALDP
, "\tDMALDP%c %u\n",
466 cond
== SINGLE
? 'S' : 'B', peri
>> 3);
471 static inline u32
_emit_LP(unsigned dry_run
, u8 buf
[],
472 unsigned loop
, u8 cnt
)
482 cnt
--; /* DMAC increments by 1 internally */
485 PL330_DBGCMD_DUMP(SZ_DMALP
, "\tDMALP_%c %u\n", loop
? '1' : '0', cnt
);
491 enum pl330_cond cond
;
497 static inline u32
_emit_LPEND(unsigned dry_run
, u8 buf
[],
498 const struct _arg_LPEND
*arg
)
500 enum pl330_cond cond
= arg
->cond
;
501 bool forever
= arg
->forever
;
502 unsigned loop
= arg
->loop
;
503 u8 bjump
= arg
->bjump
;
508 buf
[0] = CMD_DMALPEND
;
517 buf
[0] |= (0 << 1) | (1 << 0);
518 else if (cond
== BURST
)
519 buf
[0] |= (1 << 1) | (1 << 0);
523 PL330_DBGCMD_DUMP(SZ_DMALPEND
, "\tDMALP%s%c_%c bjmpto_%x\n",
524 forever
? "FE" : "END",
525 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'),
532 static inline u32
_emit_KILL(unsigned dry_run
, u8 buf
[])
537 buf
[0] = CMD_DMAKILL
;
542 static inline u32
_emit_MOV(unsigned dry_run
, u8 buf
[],
543 enum dmamov_dst dst
, u32 val
)
550 *((u32
*)&buf
[2]) = val
;
552 PL330_DBGCMD_DUMP(SZ_DMAMOV
, "\tDMAMOV %s 0x%x\n",
553 dst
== SAR
? "SAR" : (dst
== DAR
? "DAR" : "CCR"), val
);
558 static inline u32
_emit_NOP(unsigned dry_run
, u8 buf
[])
565 PL330_DBGCMD_DUMP(SZ_DMANOP
, "\tDMANOP\n");
570 static inline u32
_emit_RMB(unsigned dry_run
, u8 buf
[])
577 PL330_DBGCMD_DUMP(SZ_DMARMB
, "\tDMARMB\n");
582 static inline u32
_emit_SEV(unsigned dry_run
, u8 buf
[], u8 ev
)
593 PL330_DBGCMD_DUMP(SZ_DMASEV
, "\tDMASEV %u\n", ev
>> 3);
598 static inline u32
_emit_ST(unsigned dry_run
, u8 buf
[], enum pl330_cond cond
)
606 buf
[0] |= (0 << 1) | (1 << 0);
607 else if (cond
== BURST
)
608 buf
[0] |= (1 << 1) | (1 << 0);
610 PL330_DBGCMD_DUMP(SZ_DMAST
, "\tDMAST%c\n",
611 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'A'));
616 static inline u32
_emit_STP(unsigned dry_run
, u8 buf
[],
617 enum pl330_cond cond
, u8 peri
)
631 PL330_DBGCMD_DUMP(SZ_DMASTP
, "\tDMASTP%c %u\n",
632 cond
== SINGLE
? 'S' : 'B', peri
>> 3);
637 static inline u32
_emit_STZ(unsigned dry_run
, u8 buf
[])
644 PL330_DBGCMD_DUMP(SZ_DMASTZ
, "\tDMASTZ\n");
649 static inline u32
_emit_WFE(unsigned dry_run
, u8 buf
[], u8 ev
,
664 PL330_DBGCMD_DUMP(SZ_DMAWFE
, "\tDMAWFE %u%s\n",
665 ev
>> 3, invalidate
? ", I" : "");
670 static inline u32
_emit_WFP(unsigned dry_run
, u8 buf
[],
671 enum pl330_cond cond
, u8 peri
)
679 buf
[0] |= (0 << 1) | (0 << 0);
680 else if (cond
== BURST
)
681 buf
[0] |= (1 << 1) | (0 << 0);
683 buf
[0] |= (0 << 1) | (1 << 0);
689 PL330_DBGCMD_DUMP(SZ_DMAWFP
, "\tDMAWFP%c %u\n",
690 cond
== SINGLE
? 'S' : (cond
== BURST
? 'B' : 'P'), peri
>> 3);
695 static inline u32
_emit_WMB(unsigned dry_run
, u8 buf
[])
702 PL330_DBGCMD_DUMP(SZ_DMAWMB
, "\tDMAWMB\n");
713 static inline u32
_emit_GO(unsigned dry_run
, u8 buf
[],
714 const struct _arg_GO
*arg
)
717 u32 addr
= arg
->addr
;
718 unsigned ns
= arg
->ns
;
728 *((u32
*)&buf
[2]) = addr
;
733 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
735 /* Returns Time-Out */
736 static bool _until_dmac_idle(struct pl330_thread
*thrd
)
738 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
739 unsigned long loops
= msecs_to_loops(5);
742 /* Until Manager is Idle */
743 if (!(readl(regs
+ DBGSTATUS
) & DBG_BUSY
))
755 static inline void _execute_DBGINSN(struct pl330_thread
*thrd
,
756 u8 insn
[], bool as_manager
)
758 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
761 val
= (insn
[0] << 16) | (insn
[1] << 24);
764 val
|= (thrd
->id
<< 8); /* Channel Number */
766 writel(val
, regs
+ DBGINST0
);
768 val
= *((u32
*)&insn
[2]);
769 writel(val
, regs
+ DBGINST1
);
771 /* If timed out due to halted state-machine */
772 if (_until_dmac_idle(thrd
)) {
773 dev_err(thrd
->dmac
->pinfo
->dev
, "DMAC halted!\n");
778 writel(0, regs
+ DBGCMD
);
781 static inline u32
_state(struct pl330_thread
*thrd
)
783 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
786 if (is_manager(thrd
))
787 val
= readl(regs
+ DS
) & 0xf;
789 val
= readl(regs
+ CS(thrd
->id
)) & 0xf;
793 return PL330_STATE_STOPPED
;
795 return PL330_STATE_EXECUTING
;
797 return PL330_STATE_CACHEMISS
;
799 return PL330_STATE_UPDTPC
;
801 return PL330_STATE_WFE
;
803 return PL330_STATE_FAULTING
;
805 if (is_manager(thrd
))
806 return PL330_STATE_INVALID
;
808 return PL330_STATE_ATBARRIER
;
810 if (is_manager(thrd
))
811 return PL330_STATE_INVALID
;
813 return PL330_STATE_QUEUEBUSY
;
815 if (is_manager(thrd
))
816 return PL330_STATE_INVALID
;
818 return PL330_STATE_WFP
;
820 if (is_manager(thrd
))
821 return PL330_STATE_INVALID
;
823 return PL330_STATE_KILLING
;
825 if (is_manager(thrd
))
826 return PL330_STATE_INVALID
;
828 return PL330_STATE_COMPLETING
;
830 if (is_manager(thrd
))
831 return PL330_STATE_INVALID
;
833 return PL330_STATE_FAULT_COMPLETING
;
835 return PL330_STATE_INVALID
;
839 /* If the request 'req' of thread 'thrd' is currently active */
840 static inline bool _req_active(struct pl330_thread
*thrd
,
841 struct _pl330_req
*req
)
843 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
844 u32 buf
= req
->mc_bus
, pc
= readl(regs
+ CPC(thrd
->id
));
849 return (pc
>= buf
&& pc
<= buf
+ req
->mc_len
) ? true : false;
852 /* Returns 0 if the thread is inactive, ID of active req + 1 otherwise */
853 static inline unsigned _thrd_active(struct pl330_thread
*thrd
)
855 if (_req_active(thrd
, &thrd
->req
[0]))
856 return 1; /* First req active */
858 if (_req_active(thrd
, &thrd
->req
[1]))
859 return 2; /* Second req active */
864 static void _stop(struct pl330_thread
*thrd
)
866 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
867 u8 insn
[6] = {0, 0, 0, 0, 0, 0};
869 if (_state(thrd
) == PL330_STATE_FAULT_COMPLETING
)
870 UNTIL(thrd
, PL330_STATE_FAULTING
| PL330_STATE_KILLING
);
872 /* Return if nothing needs to be done */
873 if (_state(thrd
) == PL330_STATE_COMPLETING
874 || _state(thrd
) == PL330_STATE_KILLING
875 || _state(thrd
) == PL330_STATE_STOPPED
)
880 /* Stop generating interrupts for SEV */
881 writel(readl(regs
+ INTEN
) & ~(1 << thrd
->ev
), regs
+ INTEN
);
883 _execute_DBGINSN(thrd
, insn
, is_manager(thrd
));
886 /* Start doing req 'idx' of thread 'thrd' */
887 static bool _trigger(struct pl330_thread
*thrd
)
889 void __iomem
*regs
= thrd
->dmac
->pinfo
->base
;
890 struct _pl330_req
*req
;
894 u8 insn
[6] = {0, 0, 0, 0, 0, 0};
896 /* Return if already ACTIVE */
897 if (_state(thrd
) != PL330_STATE_STOPPED
)
900 if (!IS_FREE(&thrd
->req
[1 - thrd
->lstenq
]))
901 req
= &thrd
->req
[1 - thrd
->lstenq
];
902 else if (!IS_FREE(&thrd
->req
[thrd
->lstenq
]))
903 req
= &thrd
->req
[thrd
->lstenq
];
907 /* Return if no request */
914 ns
= r
->cfg
->nonsecure
? 1 : 0;
915 else if (readl(regs
+ CS(thrd
->id
)) & CS_CNS
)
920 /* See 'Abort Sources' point-4 at Page 2-25 */
921 if (_manager_ns(thrd
) && !ns
)
922 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d Recipe for ABORT!\n",
926 go
.addr
= req
->mc_bus
;
928 _emit_GO(0, insn
, &go
);
930 /* Set to generate interrupts for SEV */
931 writel(readl(regs
+ INTEN
) | (1 << thrd
->ev
), regs
+ INTEN
);
933 /* Only manager can execute GO */
934 _execute_DBGINSN(thrd
, insn
, true);
939 static bool _start(struct pl330_thread
*thrd
)
941 switch (_state(thrd
)) {
942 case PL330_STATE_FAULT_COMPLETING
:
943 UNTIL(thrd
, PL330_STATE_FAULTING
| PL330_STATE_KILLING
);
945 if (_state(thrd
) == PL330_STATE_KILLING
)
946 UNTIL(thrd
, PL330_STATE_STOPPED
)
948 case PL330_STATE_FAULTING
:
951 case PL330_STATE_KILLING
:
952 case PL330_STATE_COMPLETING
:
953 UNTIL(thrd
, PL330_STATE_STOPPED
)
955 case PL330_STATE_STOPPED
:
956 return _trigger(thrd
);
958 case PL330_STATE_WFP
:
959 case PL330_STATE_QUEUEBUSY
:
960 case PL330_STATE_ATBARRIER
:
961 case PL330_STATE_UPDTPC
:
962 case PL330_STATE_CACHEMISS
:
963 case PL330_STATE_EXECUTING
:
966 case PL330_STATE_WFE
: /* For RESUME, nothing yet */
972 static inline int _ldst_memtomem(unsigned dry_run
, u8 buf
[],
973 const struct _xfer_spec
*pxs
, int cyc
)
978 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
979 off
+= _emit_RMB(dry_run
, &buf
[off
]);
980 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
981 off
+= _emit_WMB(dry_run
, &buf
[off
]);
987 static inline int _ldst_devtomem(unsigned dry_run
, u8 buf
[],
988 const struct _xfer_spec
*pxs
, int cyc
)
993 off
+= _emit_WFP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
994 off
+= _emit_LDP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
995 off
+= _emit_ST(dry_run
, &buf
[off
], ALWAYS
);
996 off
+= _emit_FLUSHP(dry_run
, &buf
[off
], pxs
->r
->peri
);
1002 static inline int _ldst_memtodev(unsigned dry_run
, u8 buf
[],
1003 const struct _xfer_spec
*pxs
, int cyc
)
1008 off
+= _emit_WFP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1009 off
+= _emit_LD(dry_run
, &buf
[off
], ALWAYS
);
1010 off
+= _emit_STP(dry_run
, &buf
[off
], SINGLE
, pxs
->r
->peri
);
1011 off
+= _emit_FLUSHP(dry_run
, &buf
[off
], pxs
->r
->peri
);
1017 static int _bursts(unsigned dry_run
, u8 buf
[],
1018 const struct _xfer_spec
*pxs
, int cyc
)
1022 switch (pxs
->r
->rqtype
) {
1024 off
+= _ldst_memtodev(dry_run
, &buf
[off
], pxs
, cyc
);
1027 off
+= _ldst_devtomem(dry_run
, &buf
[off
], pxs
, cyc
);
1030 off
+= _ldst_memtomem(dry_run
, &buf
[off
], pxs
, cyc
);
1033 off
+= 0x40000000; /* Scare off the Client */
1040 /* Returns bytes consumed and updates bursts */
1041 static inline int _loop(unsigned dry_run
, u8 buf
[],
1042 unsigned long *bursts
, const struct _xfer_spec
*pxs
)
1044 int cyc
, cycmax
, szlp
, szlpend
, szbrst
, off
;
1045 unsigned lcnt0
, lcnt1
, ljmp0
, ljmp1
;
1046 struct _arg_LPEND lpend
;
1048 /* Max iterations possible in DMALP is 256 */
1049 if (*bursts
>= 256*256) {
1052 cyc
= *bursts
/ lcnt1
/ lcnt0
;
1053 } else if (*bursts
> 256) {
1055 lcnt0
= *bursts
/ lcnt1
;
1063 szlp
= _emit_LP(1, buf
, 0, 0);
1064 szbrst
= _bursts(1, buf
, pxs
, 1);
1066 lpend
.cond
= ALWAYS
;
1067 lpend
.forever
= false;
1070 szlpend
= _emit_LPEND(1, buf
, &lpend
);
1078 * Max bursts that we can unroll due to limit on the
1079 * size of backward jump that can be encoded in DMALPEND
1080 * which is 8-bits and hence 255
1082 cycmax
= (255 - (szlp
+ szlpend
)) / szbrst
;
1084 cyc
= (cycmax
< cyc
) ? cycmax
: cyc
;
1089 off
+= _emit_LP(dry_run
, &buf
[off
], 0, lcnt0
);
1093 off
+= _emit_LP(dry_run
, &buf
[off
], 1, lcnt1
);
1096 off
+= _bursts(dry_run
, &buf
[off
], pxs
, cyc
);
1098 lpend
.cond
= ALWAYS
;
1099 lpend
.forever
= false;
1101 lpend
.bjump
= off
- ljmp1
;
1102 off
+= _emit_LPEND(dry_run
, &buf
[off
], &lpend
);
1105 lpend
.cond
= ALWAYS
;
1106 lpend
.forever
= false;
1108 lpend
.bjump
= off
- ljmp0
;
1109 off
+= _emit_LPEND(dry_run
, &buf
[off
], &lpend
);
1112 *bursts
= lcnt1
* cyc
;
1119 static inline int _setup_loops(unsigned dry_run
, u8 buf
[],
1120 const struct _xfer_spec
*pxs
)
1122 struct pl330_xfer
*x
= pxs
->x
;
1124 unsigned long c
, bursts
= BYTE_TO_BURST(x
->bytes
, ccr
);
1129 off
+= _loop(dry_run
, &buf
[off
], &c
, pxs
);
1136 static inline int _setup_xfer(unsigned dry_run
, u8 buf
[],
1137 const struct _xfer_spec
*pxs
)
1139 struct pl330_xfer
*x
= pxs
->x
;
1142 /* DMAMOV SAR, x->src_addr */
1143 off
+= _emit_MOV(dry_run
, &buf
[off
], SAR
, x
->src_addr
);
1144 /* DMAMOV DAR, x->dst_addr */
1145 off
+= _emit_MOV(dry_run
, &buf
[off
], DAR
, x
->dst_addr
);
1148 off
+= _setup_loops(dry_run
, &buf
[off
], pxs
);
1154 * A req is a sequence of one or more xfer units.
1155 * Returns the number of bytes taken to setup the MC for the req.
1157 static int _setup_req(unsigned dry_run
, struct pl330_thread
*thrd
,
1158 unsigned index
, struct _xfer_spec
*pxs
)
1160 struct _pl330_req
*req
= &thrd
->req
[index
];
1161 struct pl330_xfer
*x
;
1162 u8
*buf
= req
->mc_cpu
;
1165 PL330_DBGMC_START(req
->mc_bus
);
1167 /* DMAMOV CCR, ccr */
1168 off
+= _emit_MOV(dry_run
, &buf
[off
], CCR
, pxs
->ccr
);
1172 /* Error if xfer length is not aligned at burst size */
1173 if (x
->bytes
% (BRST_SIZE(pxs
->ccr
) * BRST_LEN(pxs
->ccr
)))
1177 off
+= _setup_xfer(dry_run
, &buf
[off
], pxs
);
1182 /* DMASEV peripheral/event */
1183 off
+= _emit_SEV(dry_run
, &buf
[off
], thrd
->ev
);
1185 off
+= _emit_END(dry_run
, &buf
[off
]);
1190 static inline u32
_prepare_ccr(const struct pl330_reqcfg
*rqc
)
1200 /* We set same protection levels for Src and DST for now */
1201 if (rqc
->privileged
)
1202 ccr
|= CC_SRCPRI
| CC_DSTPRI
;
1204 ccr
|= CC_SRCNS
| CC_DSTNS
;
1205 if (rqc
->insnaccess
)
1206 ccr
|= CC_SRCIA
| CC_DSTIA
;
1208 ccr
|= (((rqc
->brst_len
- 1) & 0xf) << CC_SRCBRSTLEN_SHFT
);
1209 ccr
|= (((rqc
->brst_len
- 1) & 0xf) << CC_DSTBRSTLEN_SHFT
);
1211 ccr
|= (rqc
->brst_size
<< CC_SRCBRSTSIZE_SHFT
);
1212 ccr
|= (rqc
->brst_size
<< CC_DSTBRSTSIZE_SHFT
);
1214 ccr
|= (rqc
->dcctl
<< CC_SRCCCTRL_SHFT
);
1215 ccr
|= (rqc
->scctl
<< CC_DSTCCTRL_SHFT
);
1217 ccr
|= (rqc
->swap
<< CC_SWAP_SHFT
);
1222 static inline bool _is_valid(u32 ccr
)
1224 enum pl330_dstcachectrl dcctl
;
1225 enum pl330_srccachectrl scctl
;
1227 dcctl
= (ccr
>> CC_DSTCCTRL_SHFT
) & CC_DRCCCTRL_MASK
;
1228 scctl
= (ccr
>> CC_SRCCCTRL_SHFT
) & CC_SRCCCTRL_MASK
;
1230 if (dcctl
== DINVALID1
|| dcctl
== DINVALID2
1231 || scctl
== SINVALID1
|| scctl
== SINVALID2
)
1238 * Submit a list of xfers after which the client wants notification.
1239 * Client is not notified after each xfer unit, just once after all
1240 * xfer units are done or some error occurs.
1242 int pl330_submit_req(void *ch_id
, struct pl330_req
*r
)
1244 struct pl330_thread
*thrd
= ch_id
;
1245 struct pl330_dmac
*pl330
;
1246 struct pl330_info
*pi
;
1247 struct _xfer_spec xs
;
1248 unsigned long flags
;
1254 /* No Req or Unacquired Channel or DMAC */
1255 if (!r
|| !thrd
|| thrd
->free
)
1262 if (pl330
->state
== DYING
1263 || pl330
->dmac_tbd
.reset_chan
& (1 << thrd
->id
)) {
1264 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d\n",
1265 __func__
, __LINE__
);
1269 /* If request for non-existing peripheral */
1270 if (r
->rqtype
!= MEMTOMEM
&& r
->peri
>= pi
->pcfg
.num_peri
) {
1271 dev_info(thrd
->dmac
->pinfo
->dev
,
1272 "%s:%d Invalid peripheral(%u)!\n",
1273 __func__
, __LINE__
, r
->peri
);
1277 spin_lock_irqsave(&pl330
->lock
, flags
);
1279 if (_queue_full(thrd
)) {
1284 /* Prefer Secure Channel */
1285 if (!_manager_ns(thrd
))
1286 r
->cfg
->nonsecure
= 0;
1288 r
->cfg
->nonsecure
= 1;
1290 /* Use last settings, if not provided */
1292 ccr
= _prepare_ccr(r
->cfg
);
1294 ccr
= readl(regs
+ CC(thrd
->id
));
1296 /* If this req doesn't have valid xfer settings */
1297 if (!_is_valid(ccr
)) {
1299 dev_info(thrd
->dmac
->pinfo
->dev
, "%s:%d Invalid CCR(%x)!\n",
1300 __func__
, __LINE__
, ccr
);
1304 idx
= IS_FREE(&thrd
->req
[0]) ? 0 : 1;
1309 /* First dry run to check if req is acceptable */
1310 ret
= _setup_req(1, thrd
, idx
, &xs
);
1314 if (ret
> pi
->mcbufsz
/ 2) {
1315 dev_info(thrd
->dmac
->pinfo
->dev
,
1316 "%s:%d Trying increasing mcbufsz\n",
1317 __func__
, __LINE__
);
1322 /* Hook the request */
1324 thrd
->req
[idx
].mc_len
= _setup_req(0, thrd
, idx
, &xs
);
1325 thrd
->req
[idx
].r
= r
;
1330 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1334 EXPORT_SYMBOL(pl330_submit_req
);
1336 static void pl330_dotask(unsigned long data
)
1338 struct pl330_dmac
*pl330
= (struct pl330_dmac
*) data
;
1339 struct pl330_info
*pi
= pl330
->pinfo
;
1340 unsigned long flags
;
1343 spin_lock_irqsave(&pl330
->lock
, flags
);
1345 /* The DMAC itself gone nuts */
1346 if (pl330
->dmac_tbd
.reset_dmac
) {
1347 pl330
->state
= DYING
;
1348 /* Reset the manager too */
1349 pl330
->dmac_tbd
.reset_mngr
= true;
1350 /* Clear the reset flag */
1351 pl330
->dmac_tbd
.reset_dmac
= false;
1354 if (pl330
->dmac_tbd
.reset_mngr
) {
1355 _stop(pl330
->manager
);
1356 /* Reset all channels */
1357 pl330
->dmac_tbd
.reset_chan
= (1 << pi
->pcfg
.num_chan
) - 1;
1358 /* Clear the reset flag */
1359 pl330
->dmac_tbd
.reset_mngr
= false;
1362 for (i
= 0; i
< pi
->pcfg
.num_chan
; i
++) {
1364 if (pl330
->dmac_tbd
.reset_chan
& (1 << i
)) {
1365 struct pl330_thread
*thrd
= &pl330
->channels
[i
];
1366 void __iomem
*regs
= pi
->base
;
1367 enum pl330_op_err err
;
1371 if (readl(regs
+ FSC
) & (1 << thrd
->id
))
1372 err
= PL330_ERR_FAIL
;
1374 err
= PL330_ERR_ABORT
;
1376 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1378 _callback(thrd
->req
[1 - thrd
->lstenq
].r
, err
);
1379 _callback(thrd
->req
[thrd
->lstenq
].r
, err
);
1381 spin_lock_irqsave(&pl330
->lock
, flags
);
1383 thrd
->req
[0].r
= NULL
;
1384 thrd
->req
[1].r
= NULL
;
1385 MARK_FREE(&thrd
->req
[0]);
1386 MARK_FREE(&thrd
->req
[1]);
1388 /* Clear the reset flag */
1389 pl330
->dmac_tbd
.reset_chan
&= ~(1 << i
);
1393 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1398 /* Returns 1 if state was updated, 0 otherwise */
1399 int pl330_update(const struct pl330_info
*pi
)
1401 struct _pl330_req
*rqdone
;
1402 struct pl330_dmac
*pl330
;
1403 unsigned long flags
;
1406 int id
, ev
, ret
= 0;
1408 if (!pi
|| !pi
->pl330_data
)
1412 pl330
= pi
->pl330_data
;
1414 spin_lock_irqsave(&pl330
->lock
, flags
);
1416 val
= readl(regs
+ FSM
) & 0x1;
1418 pl330
->dmac_tbd
.reset_mngr
= true;
1420 pl330
->dmac_tbd
.reset_mngr
= false;
1422 val
= readl(regs
+ FSC
) & ((1 << pi
->pcfg
.num_chan
) - 1);
1423 pl330
->dmac_tbd
.reset_chan
|= val
;
1426 while (i
< pi
->pcfg
.num_chan
) {
1427 if (val
& (1 << i
)) {
1429 "Reset Channel-%d\t CS-%x FTC-%x\n",
1430 i
, readl(regs
+ CS(i
)),
1431 readl(regs
+ FTC(i
)));
1432 _stop(&pl330
->channels
[i
]);
1438 /* Check which event happened i.e, thread notified */
1439 val
= readl(regs
+ ES
);
1440 if (pi
->pcfg
.num_events
< 32
1441 && val
& ~((1 << pi
->pcfg
.num_events
) - 1)) {
1442 pl330
->dmac_tbd
.reset_dmac
= true;
1443 dev_err(pi
->dev
, "%s:%d Unexpected!\n", __func__
, __LINE__
);
1448 for (ev
= 0; ev
< pi
->pcfg
.num_events
; ev
++) {
1449 if (val
& (1 << ev
)) { /* Event occurred */
1450 struct pl330_thread
*thrd
;
1451 u32 inten
= readl(regs
+ INTEN
);
1454 /* Clear the event */
1455 if (inten
& (1 << ev
))
1456 writel(1 << ev
, regs
+ INTCLR
);
1460 id
= pl330
->events
[ev
];
1462 thrd
= &pl330
->channels
[id
];
1464 active
= _thrd_active(thrd
);
1465 if (!active
) /* Aborted */
1470 rqdone
= &thrd
->req
[active
];
1473 /* Get going again ASAP */
1476 /* For now, just make a list of callbacks to be done */
1477 list_add_tail(&rqdone
->rqd
, &pl330
->req_done
);
1481 /* Now that we are in no hurry, do the callbacks */
1482 while (!list_empty(&pl330
->req_done
)) {
1483 rqdone
= container_of(pl330
->req_done
.next
,
1484 struct _pl330_req
, rqd
);
1486 list_del_init(&rqdone
->rqd
);
1488 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1489 _callback(rqdone
->r
, PL330_ERR_NONE
);
1490 spin_lock_irqsave(&pl330
->lock
, flags
);
1494 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1496 if (pl330
->dmac_tbd
.reset_dmac
1497 || pl330
->dmac_tbd
.reset_mngr
1498 || pl330
->dmac_tbd
.reset_chan
) {
1500 tasklet_schedule(&pl330
->tasks
);
1505 EXPORT_SYMBOL(pl330_update
);
1507 int pl330_chan_ctrl(void *ch_id
, enum pl330_chan_op op
)
1509 struct pl330_thread
*thrd
= ch_id
;
1510 struct pl330_dmac
*pl330
;
1511 unsigned long flags
;
1512 int ret
= 0, active
;
1514 if (!thrd
|| thrd
->free
|| thrd
->dmac
->state
== DYING
)
1519 spin_lock_irqsave(&pl330
->lock
, flags
);
1522 case PL330_OP_FLUSH
:
1523 /* Make sure the channel is stopped */
1526 thrd
->req
[0].r
= NULL
;
1527 thrd
->req
[1].r
= NULL
;
1528 MARK_FREE(&thrd
->req
[0]);
1529 MARK_FREE(&thrd
->req
[1]);
1532 case PL330_OP_ABORT
:
1533 active
= _thrd_active(thrd
);
1535 /* Make sure the channel is stopped */
1538 /* ABORT is only for the active req */
1544 thrd
->req
[active
].r
= NULL
;
1545 MARK_FREE(&thrd
->req
[active
]);
1547 /* Start the next */
1548 case PL330_OP_START
:
1549 if (!_thrd_active(thrd
) && !_start(thrd
))
1557 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1560 EXPORT_SYMBOL(pl330_chan_ctrl
);
1562 int pl330_chan_status(void *ch_id
, struct pl330_chanstatus
*pstatus
)
1564 struct pl330_thread
*thrd
= ch_id
;
1565 struct pl330_dmac
*pl330
;
1566 struct pl330_info
*pi
;
1571 if (!pstatus
|| !thrd
|| thrd
->free
)
1578 /* The client should remove the DMAC and add again */
1579 if (pl330
->state
== DYING
)
1580 pstatus
->dmac_halted
= true;
1582 pstatus
->dmac_halted
= false;
1584 val
= readl(regs
+ FSC
);
1585 if (val
& (1 << thrd
->id
))
1586 pstatus
->faulting
= true;
1588 pstatus
->faulting
= false;
1590 active
= _thrd_active(thrd
);
1593 /* Indicate that the thread is not running */
1594 pstatus
->top_req
= NULL
;
1595 pstatus
->wait_req
= NULL
;
1598 pstatus
->top_req
= thrd
->req
[active
].r
;
1599 pstatus
->wait_req
= !IS_FREE(&thrd
->req
[1 - active
])
1600 ? thrd
->req
[1 - active
].r
: NULL
;
1603 pstatus
->src_addr
= readl(regs
+ SA(thrd
->id
));
1604 pstatus
->dst_addr
= readl(regs
+ DA(thrd
->id
));
1608 EXPORT_SYMBOL(pl330_chan_status
);
1610 /* Reserve an event */
1611 static inline int _alloc_event(struct pl330_thread
*thrd
)
1613 struct pl330_dmac
*pl330
= thrd
->dmac
;
1614 struct pl330_info
*pi
= pl330
->pinfo
;
1617 for (ev
= 0; ev
< pi
->pcfg
.num_events
; ev
++)
1618 if (pl330
->events
[ev
] == -1) {
1619 pl330
->events
[ev
] = thrd
->id
;
1626 /* Upon success, returns IdentityToken for the
1627 * allocated channel, NULL otherwise.
1629 void *pl330_request_channel(const struct pl330_info
*pi
)
1631 struct pl330_thread
*thrd
= NULL
;
1632 struct pl330_dmac
*pl330
;
1633 unsigned long flags
;
1636 if (!pi
|| !pi
->pl330_data
)
1639 pl330
= pi
->pl330_data
;
1641 if (pl330
->state
== DYING
)
1644 chans
= pi
->pcfg
.num_chan
;
1646 spin_lock_irqsave(&pl330
->lock
, flags
);
1648 for (i
= 0; i
< chans
; i
++) {
1649 thrd
= &pl330
->channels
[i
];
1651 thrd
->ev
= _alloc_event(thrd
);
1652 if (thrd
->ev
>= 0) {
1655 thrd
->req
[0].r
= NULL
;
1656 MARK_FREE(&thrd
->req
[0]);
1657 thrd
->req
[1].r
= NULL
;
1658 MARK_FREE(&thrd
->req
[1]);
1665 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1669 EXPORT_SYMBOL(pl330_request_channel
);
1671 /* Release an event */
1672 static inline void _free_event(struct pl330_thread
*thrd
, int ev
)
1674 struct pl330_dmac
*pl330
= thrd
->dmac
;
1675 struct pl330_info
*pi
= pl330
->pinfo
;
1677 /* If the event is valid and was held by the thread */
1678 if (ev
>= 0 && ev
< pi
->pcfg
.num_events
1679 && pl330
->events
[ev
] == thrd
->id
)
1680 pl330
->events
[ev
] = -1;
1683 void pl330_release_channel(void *ch_id
)
1685 struct pl330_thread
*thrd
= ch_id
;
1686 struct pl330_dmac
*pl330
;
1687 unsigned long flags
;
1689 if (!thrd
|| thrd
->free
)
1694 _callback(thrd
->req
[1 - thrd
->lstenq
].r
, PL330_ERR_ABORT
);
1695 _callback(thrd
->req
[thrd
->lstenq
].r
, PL330_ERR_ABORT
);
1699 spin_lock_irqsave(&pl330
->lock
, flags
);
1700 _free_event(thrd
, thrd
->ev
);
1702 spin_unlock_irqrestore(&pl330
->lock
, flags
);
1704 EXPORT_SYMBOL(pl330_release_channel
);
1706 /* Initialize the structure for PL330 configuration, that can be used
1707 * by the client driver the make best use of the DMAC
1709 static void read_dmac_config(struct pl330_info
*pi
)
1711 void __iomem
*regs
= pi
->base
;
1714 val
= readl(regs
+ CRD
) >> CRD_DATA_WIDTH_SHIFT
;
1715 val
&= CRD_DATA_WIDTH_MASK
;
1716 pi
->pcfg
.data_bus_width
= 8 * (1 << val
);
1718 val
= readl(regs
+ CRD
) >> CRD_DATA_BUFF_SHIFT
;
1719 val
&= CRD_DATA_BUFF_MASK
;
1720 pi
->pcfg
.data_buf_dep
= val
+ 1;
1722 val
= readl(regs
+ CR0
) >> CR0_NUM_CHANS_SHIFT
;
1723 val
&= CR0_NUM_CHANS_MASK
;
1725 pi
->pcfg
.num_chan
= val
;
1727 val
= readl(regs
+ CR0
);
1728 if (val
& CR0_PERIPH_REQ_SET
) {
1729 val
= (val
>> CR0_NUM_PERIPH_SHIFT
) & CR0_NUM_PERIPH_MASK
;
1731 pi
->pcfg
.num_peri
= val
;
1732 pi
->pcfg
.peri_ns
= readl(regs
+ CR4
);
1734 pi
->pcfg
.num_peri
= 0;
1737 val
= readl(regs
+ CR0
);
1738 if (val
& CR0_BOOT_MAN_NS
)
1739 pi
->pcfg
.mode
|= DMAC_MODE_NS
;
1741 pi
->pcfg
.mode
&= ~DMAC_MODE_NS
;
1743 val
= readl(regs
+ CR0
) >> CR0_NUM_EVENTS_SHIFT
;
1744 val
&= CR0_NUM_EVENTS_MASK
;
1746 pi
->pcfg
.num_events
= val
;
1748 pi
->pcfg
.irq_ns
= readl(regs
+ CR3
);
1750 pi
->pcfg
.periph_id
= get_id(pi
, PERIPH_ID
);
1751 pi
->pcfg
.pcell_id
= get_id(pi
, PCELL_ID
);
1754 static inline void _reset_thread(struct pl330_thread
*thrd
)
1756 struct pl330_dmac
*pl330
= thrd
->dmac
;
1757 struct pl330_info
*pi
= pl330
->pinfo
;
1759 thrd
->req
[0].mc_cpu
= pl330
->mcode_cpu
1760 + (thrd
->id
* pi
->mcbufsz
);
1761 thrd
->req
[0].mc_bus
= pl330
->mcode_bus
1762 + (thrd
->id
* pi
->mcbufsz
);
1763 thrd
->req
[0].r
= NULL
;
1764 MARK_FREE(&thrd
->req
[0]);
1766 thrd
->req
[1].mc_cpu
= thrd
->req
[0].mc_cpu
1768 thrd
->req
[1].mc_bus
= thrd
->req
[0].mc_bus
1770 thrd
->req
[1].r
= NULL
;
1771 MARK_FREE(&thrd
->req
[1]);
1774 static int dmac_alloc_threads(struct pl330_dmac
*pl330
)
1776 struct pl330_info
*pi
= pl330
->pinfo
;
1777 int chans
= pi
->pcfg
.num_chan
;
1778 struct pl330_thread
*thrd
;
1781 /* Allocate 1 Manager and 'chans' Channel threads */
1782 pl330
->channels
= kzalloc((1 + chans
) * sizeof(*thrd
),
1784 if (!pl330
->channels
)
1787 /* Init Channel threads */
1788 for (i
= 0; i
< chans
; i
++) {
1789 thrd
= &pl330
->channels
[i
];
1792 _reset_thread(thrd
);
1796 /* MANAGER is indexed at the end */
1797 thrd
= &pl330
->channels
[chans
];
1801 pl330
->manager
= thrd
;
1806 static int dmac_alloc_resources(struct pl330_dmac
*pl330
)
1808 struct pl330_info
*pi
= pl330
->pinfo
;
1809 int chans
= pi
->pcfg
.num_chan
;
1813 * Alloc MicroCode buffer for 'chans' Channel threads.
1814 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1816 pl330
->mcode_cpu
= dma_alloc_coherent(pi
->dev
,
1817 chans
* pi
->mcbufsz
,
1818 &pl330
->mcode_bus
, GFP_KERNEL
);
1819 if (!pl330
->mcode_cpu
) {
1820 dev_err(pi
->dev
, "%s:%d Can't allocate memory!\n",
1821 __func__
, __LINE__
);
1825 ret
= dmac_alloc_threads(pl330
);
1827 dev_err(pi
->dev
, "%s:%d Can't to create channels for DMAC!\n",
1828 __func__
, __LINE__
);
1829 dma_free_coherent(pi
->dev
,
1830 chans
* pi
->mcbufsz
,
1831 pl330
->mcode_cpu
, pl330
->mcode_bus
);
1838 int pl330_add(struct pl330_info
*pi
)
1840 struct pl330_dmac
*pl330
;
1844 if (!pi
|| !pi
->dev
)
1847 /* If already added */
1852 * If the SoC can perform reset on the DMAC, then do it
1853 * before reading its configuration.
1860 /* Check if we can handle this DMAC */
1861 if ((get_id(pi
, PERIPH_ID
) & 0xfffff) != PERIPH_ID_VAL
1862 || get_id(pi
, PCELL_ID
) != PCELL_ID_VAL
) {
1863 dev_err(pi
->dev
, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n",
1864 get_id(pi
, PERIPH_ID
), get_id(pi
, PCELL_ID
));
1868 /* Read the configuration of the DMAC */
1869 read_dmac_config(pi
);
1871 if (pi
->pcfg
.num_events
== 0) {
1872 dev_err(pi
->dev
, "%s:%d Can't work without events!\n",
1873 __func__
, __LINE__
);
1877 pl330
= kzalloc(sizeof(*pl330
), GFP_KERNEL
);
1879 dev_err(pi
->dev
, "%s:%d Can't allocate memory!\n",
1880 __func__
, __LINE__
);
1884 /* Assign the info structure and private data */
1886 pi
->pl330_data
= pl330
;
1888 spin_lock_init(&pl330
->lock
);
1890 INIT_LIST_HEAD(&pl330
->req_done
);
1892 /* Use default MC buffer size if not provided */
1894 pi
->mcbufsz
= MCODE_BUFF_PER_REQ
* 2;
1896 /* Mark all events as free */
1897 for (i
= 0; i
< pi
->pcfg
.num_events
; i
++)
1898 pl330
->events
[i
] = -1;
1900 /* Allocate resources needed by the DMAC */
1901 ret
= dmac_alloc_resources(pl330
);
1903 dev_err(pi
->dev
, "Unable to create channels for DMAC\n");
1908 tasklet_init(&pl330
->tasks
, pl330_dotask
, (unsigned long) pl330
);
1910 pl330
->state
= INIT
;
1914 EXPORT_SYMBOL(pl330_add
);
1916 static int dmac_free_threads(struct pl330_dmac
*pl330
)
1918 struct pl330_info
*pi
= pl330
->pinfo
;
1919 int chans
= pi
->pcfg
.num_chan
;
1920 struct pl330_thread
*thrd
;
1923 /* Release Channel threads */
1924 for (i
= 0; i
< chans
; i
++) {
1925 thrd
= &pl330
->channels
[i
];
1926 pl330_release_channel((void *)thrd
);
1930 kfree(pl330
->channels
);
1935 static void dmac_free_resources(struct pl330_dmac
*pl330
)
1937 struct pl330_info
*pi
= pl330
->pinfo
;
1938 int chans
= pi
->pcfg
.num_chan
;
1940 dmac_free_threads(pl330
);
1942 dma_free_coherent(pi
->dev
, chans
* pi
->mcbufsz
,
1943 pl330
->mcode_cpu
, pl330
->mcode_bus
);
1946 void pl330_del(struct pl330_info
*pi
)
1948 struct pl330_dmac
*pl330
;
1950 if (!pi
|| !pi
->pl330_data
)
1953 pl330
= pi
->pl330_data
;
1955 pl330
->state
= UNINIT
;
1957 tasklet_kill(&pl330
->tasks
);
1959 /* Free DMAC resources */
1960 dmac_free_resources(pl330
);
1963 pi
->pl330_data
= NULL
;
1965 EXPORT_SYMBOL(pl330_del
);