1 /* linux/arch/arm/plat-samsung/s3c-pl330.c
3 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
4 * Jaswinder Singh <jassi.brar@samsung.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/platform_device.h>
18 #include <linux/clk.h>
19 #include <linux/err.h>
21 #include <asm/hardware/pl330.h>
23 #include <plat/s3c-pl330-pdata.h>
26 * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC.
27 * @busy_chan: Number of channels currently busy.
28 * @peri: List of IDs of peripherals this DMAC can work with.
29 * @node: To attach to the global list of DMACs.
30 * @pi: PL330 configuration info for the DMAC.
31 * @kmcache: Pool to quickly allocate xfers for all channels in the dmac.
32 * @clk: Pointer of DMAC operation clock.
34 struct s3c_pl330_dmac
{
37 struct list_head node
;
38 struct pl330_info
*pi
;
39 struct kmem_cache
*kmcache
;
44 * struct s3c_pl330_xfer - A request submitted by S3C DMA clients.
45 * @token: Xfer ID provided by the client.
46 * @node: To attach to the list of xfers on a channel.
47 * @px: Xfer for PL330 core.
48 * @chan: Owner channel of this xfer.
50 struct s3c_pl330_xfer
{
52 struct list_head node
;
54 struct s3c_pl330_chan
*chan
;
58 * struct s3c_pl330_chan - Logical channel to communicate with
59 * a Physical peripheral.
60 * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC.
61 * NULL if the channel is available to be acquired.
62 * @id: ID of the peripheral that this channel can communicate with.
63 * @options: Options specified by the client.
64 * @sdaddr: Address provided via s3c2410_dma_devconfig.
65 * @node: To attach to the global list of channels.
66 * @lrq: Pointer to the last submitted pl330_req to PL330 core.
67 * @xfer_list: To manage list of xfers enqueued.
68 * @req: Two requests to communicate with the PL330 engine.
69 * @callback_fn: Callback function to the client.
70 * @rqcfg: Channel configuration for the xfers.
71 * @xfer_head: Pointer to the xfer to be next executed.
72 * @dmac: Pointer to the DMAC that manages this channel, NULL if the
73 * channel is available to be acquired.
74 * @client: Client of this channel. NULL if the
75 * channel is available to be acquired.
77 struct s3c_pl330_chan
{
82 struct list_head node
;
83 struct pl330_req
*lrq
;
84 struct list_head xfer_list
;
85 struct pl330_req req
[2];
86 s3c2410_dma_cbfn_t callback_fn
;
87 struct pl330_reqcfg rqcfg
;
88 struct s3c_pl330_xfer
*xfer_head
;
89 struct s3c_pl330_dmac
*dmac
;
90 struct s3c2410_dma_client
*client
;
93 /* All DMACs in the platform */
94 static LIST_HEAD(dmac_list
);
96 /* All channels to peripherals in the platform */
97 static LIST_HEAD(chan_list
);
100 * Since we add resources(DMACs and Channels) to the global pool,
101 * we need to guard access to the resources using a global lock
103 static DEFINE_SPINLOCK(res_lock
);
105 /* Returns the channel with ID 'id' in the chan_list */
106 static struct s3c_pl330_chan
*id_to_chan(const enum dma_ch id
)
108 struct s3c_pl330_chan
*ch
;
110 list_for_each_entry(ch
, &chan_list
, node
)
117 /* Allocate a new channel with ID 'id' and add to chan_list */
118 static void chan_add(const enum dma_ch id
)
120 struct s3c_pl330_chan
*ch
= id_to_chan(id
);
122 /* Return if the channel already exists */
126 ch
= kmalloc(sizeof(*ch
), GFP_KERNEL
);
127 /* Return silently to work with other channels */
134 list_add_tail(&ch
->node
, &chan_list
);
137 /* If the channel is not yet acquired by any client */
138 static bool chan_free(struct s3c_pl330_chan
*ch
)
143 /* Channel points to some DMAC only when it's acquired */
144 return ch
->dmac
? false : true;
148 * Returns 0 is peripheral i/f is invalid or not present on the dmac.
149 * Index + 1, otherwise.
151 static unsigned iface_of_dmac(struct s3c_pl330_dmac
*dmac
, enum dma_ch ch_id
)
153 enum dma_ch
*id
= dmac
->peri
;
156 /* Discount invalid markers */
157 if (ch_id
== DMACH_MAX
)
160 for (i
= 0; i
< PL330_MAX_PERI
; i
++)
167 /* If all channel threads of the DMAC are busy */
168 static inline bool dmac_busy(struct s3c_pl330_dmac
*dmac
)
170 struct pl330_info
*pi
= dmac
->pi
;
172 return (dmac
->busy_chan
< pi
->pcfg
.num_chan
) ? false : true;
176 * Returns the number of free channels that
177 * can be handled by this dmac only.
179 static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac
*dmac
)
181 enum dma_ch
*id
= dmac
->peri
;
182 struct s3c_pl330_dmac
*d
;
183 struct s3c_pl330_chan
*ch
;
184 unsigned found
, count
= 0;
188 for (i
= 0; i
< PL330_MAX_PERI
; i
++) {
192 if (p
== DMACH_MAX
|| !chan_free(ch
))
196 list_for_each_entry(d
, &dmac_list
, node
) {
197 if (d
!= dmac
&& iface_of_dmac(d
, ch
->id
)) {
210 * Measure of suitability of 'dmac' handling 'ch'
212 * 0 indicates 'dmac' can not handle 'ch' either
213 * because it is not supported by the hardware or
214 * because all dmac channels are currently busy.
216 * >0 vlaue indicates 'dmac' has the capability.
217 * The bigger the value the more suitable the dmac.
219 #define MAX_SUIT UINT_MAX
222 static unsigned suitablility(struct s3c_pl330_dmac
*dmac
,
223 struct s3c_pl330_chan
*ch
)
225 struct pl330_info
*pi
= dmac
->pi
;
226 enum dma_ch
*id
= dmac
->peri
;
227 struct s3c_pl330_dmac
*d
;
232 /* If all the DMAC channel threads are busy */
236 for (i
= 0; i
< PL330_MAX_PERI
; i
++)
240 /* If the 'dmac' can't talk to 'ch' */
241 if (i
== PL330_MAX_PERI
)
245 list_for_each_entry(d
, &dmac_list
, node
) {
247 * If some other dmac can talk to this
248 * peri and has some channel free.
250 if (d
!= dmac
&& iface_of_dmac(d
, ch
->id
) && !dmac_busy(d
)) {
260 /* Good if free chans are more, bad otherwise */
261 s
+= (pi
->pcfg
.num_chan
- dmac
->busy_chan
) - ch_onlyby_dmac(dmac
);
266 /* More than one DMAC may have capability to transfer data with the
267 * peripheral. This function assigns most suitable DMAC to manage the
268 * channel and hence communicate with the peripheral.
270 static struct s3c_pl330_dmac
*map_chan_to_dmac(struct s3c_pl330_chan
*ch
)
272 struct s3c_pl330_dmac
*d
, *dmac
= NULL
;
273 unsigned sn
, sl
= MIN_SUIT
;
275 list_for_each_entry(d
, &dmac_list
, node
) {
276 sn
= suitablility(d
, ch
);
288 /* Acquire the channel for peripheral 'id' */
289 static struct s3c_pl330_chan
*chan_acquire(const enum dma_ch id
)
291 struct s3c_pl330_chan
*ch
= id_to_chan(id
);
292 struct s3c_pl330_dmac
*dmac
;
294 /* If the channel doesn't exist or is already acquired */
295 if (!ch
|| !chan_free(ch
)) {
300 dmac
= map_chan_to_dmac(ch
);
301 /* If couldn't map */
314 /* Delete xfer from the queue */
315 static inline void del_from_queue(struct s3c_pl330_xfer
*xfer
)
317 struct s3c_pl330_xfer
*t
;
318 struct s3c_pl330_chan
*ch
;
326 /* Make sure xfer is in the queue */
328 list_for_each_entry(t
, &ch
->xfer_list
, node
)
337 /* If xfer is last entry in the queue */
338 if (xfer
->node
.next
== &ch
->xfer_list
)
339 t
= list_entry(ch
->xfer_list
.next
,
340 struct s3c_pl330_xfer
, node
);
342 t
= list_entry(xfer
->node
.next
,
343 struct s3c_pl330_xfer
, node
);
345 /* If there was only one node left */
347 ch
->xfer_head
= NULL
;
348 else if (ch
->xfer_head
== xfer
)
351 list_del(&xfer
->node
);
354 /* Provides pointer to the next xfer in the queue.
355 * If CIRCULAR option is set, the list is left intact,
356 * otherwise the xfer is removed from the list.
357 * Forced delete 'pluck' can be set to override the CIRCULAR option.
359 static struct s3c_pl330_xfer
*get_from_queue(struct s3c_pl330_chan
*ch
,
362 struct s3c_pl330_xfer
*xfer
= ch
->xfer_head
;
367 /* If xfer is last entry in the queue */
368 if (xfer
->node
.next
== &ch
->xfer_list
)
369 ch
->xfer_head
= list_entry(ch
->xfer_list
.next
,
370 struct s3c_pl330_xfer
, node
);
372 ch
->xfer_head
= list_entry(xfer
->node
.next
,
373 struct s3c_pl330_xfer
, node
);
375 if (pluck
|| !(ch
->options
& S3C2410_DMAF_CIRCULAR
))
376 del_from_queue(xfer
);
381 static inline void add_to_queue(struct s3c_pl330_chan
*ch
,
382 struct s3c_pl330_xfer
*xfer
, int front
)
384 struct pl330_xfer
*xt
;
387 if (ch
->xfer_head
== NULL
)
388 ch
->xfer_head
= xfer
;
390 xt
= &ch
->xfer_head
->px
;
391 /* If the head already submitted (CIRCULAR head) */
392 if (ch
->options
& S3C2410_DMAF_CIRCULAR
&&
393 (xt
== ch
->req
[0].x
|| xt
== ch
->req
[1].x
))
394 ch
->xfer_head
= xfer
;
396 /* If this is a resubmission, it should go at the head */
398 ch
->xfer_head
= xfer
;
399 list_add(&xfer
->node
, &ch
->xfer_list
);
401 list_add_tail(&xfer
->node
, &ch
->xfer_list
);
405 static inline void _finish_off(struct s3c_pl330_xfer
*xfer
,
406 enum s3c2410_dma_buffresult res
, int ffree
)
408 struct s3c_pl330_chan
*ch
;
417 ch
->callback_fn(NULL
, xfer
->token
, xfer
->px
.bytes
, res
);
419 /* Force Free or if buffer is not needed anymore */
420 if (ffree
|| !(ch
->options
& S3C2410_DMAF_CIRCULAR
))
421 kmem_cache_free(ch
->dmac
->kmcache
, xfer
);
424 static inline int s3c_pl330_submit(struct s3c_pl330_chan
*ch
,
427 struct s3c_pl330_xfer
*xfer
;
430 /* If already submitted */
434 xfer
= get_from_queue(ch
, 0);
438 /* Use max bandwidth for M<->M xfers */
439 if (r
->rqtype
== MEMTOMEM
) {
440 struct pl330_info
*pi
= xfer
->chan
->dmac
->pi
;
441 int burst
= 1 << ch
->rqcfg
.brst_size
;
442 u32 bytes
= r
->x
->bytes
;
445 bl
= pi
->pcfg
.data_bus_width
/ 8;
446 bl
*= pi
->pcfg
.data_buf_dep
;
449 /* src/dst_burst_len can't be more than 16 */
454 if (!(bytes
% (bl
* burst
)))
459 ch
->rqcfg
.brst_len
= bl
;
461 ch
->rqcfg
.brst_len
= 1;
464 ret
= pl330_submit_req(ch
->pl330_chan_id
, r
);
466 /* If submission was successful */
468 ch
->lrq
= r
; /* latest submitted req */
474 /* If both of the PL330 ping-pong buffers filled */
475 if (ret
== -EAGAIN
) {
476 dev_err(ch
->dmac
->pi
->dev
, "%s:%d!\n",
478 /* Queue back again */
479 add_to_queue(ch
, xfer
, 1);
482 dev_err(ch
->dmac
->pi
->dev
, "%s:%d!\n",
484 _finish_off(xfer
, S3C2410_RES_ERR
, 0);
491 static void s3c_pl330_rq(struct s3c_pl330_chan
*ch
,
492 struct pl330_req
*r
, enum pl330_op_err err
)
495 struct s3c_pl330_xfer
*xfer
;
496 struct pl330_xfer
*xl
= r
->x
;
497 enum s3c2410_dma_buffresult res
;
499 spin_lock_irqsave(&res_lock
, flags
);
503 s3c_pl330_submit(ch
, r
);
505 spin_unlock_irqrestore(&res_lock
, flags
);
507 /* Map result to S3C DMA API */
508 if (err
== PL330_ERR_NONE
)
509 res
= S3C2410_RES_OK
;
510 else if (err
== PL330_ERR_ABORT
)
511 res
= S3C2410_RES_ABORT
;
513 res
= S3C2410_RES_ERR
;
515 /* If last request had some xfer */
517 xfer
= container_of(xl
, struct s3c_pl330_xfer
, px
);
518 _finish_off(xfer
, res
, 0);
520 dev_info(ch
->dmac
->pi
->dev
, "%s:%d No Xfer?!\n",
525 static void s3c_pl330_rq0(void *token
, enum pl330_op_err err
)
527 struct pl330_req
*r
= token
;
528 struct s3c_pl330_chan
*ch
= container_of(r
,
529 struct s3c_pl330_chan
, req
[0]);
530 s3c_pl330_rq(ch
, r
, err
);
533 static void s3c_pl330_rq1(void *token
, enum pl330_op_err err
)
535 struct pl330_req
*r
= token
;
536 struct s3c_pl330_chan
*ch
= container_of(r
,
537 struct s3c_pl330_chan
, req
[1]);
538 s3c_pl330_rq(ch
, r
, err
);
541 /* Release an acquired channel */
542 static void chan_release(struct s3c_pl330_chan
*ch
)
544 struct s3c_pl330_dmac
*dmac
;
554 int s3c2410_dma_ctrl(enum dma_ch id
, enum s3c2410_chan_op op
)
556 struct s3c_pl330_xfer
*xfer
;
557 enum pl330_chan_op pl330op
;
558 struct s3c_pl330_chan
*ch
;
562 spin_lock_irqsave(&res_lock
, flags
);
566 if (!ch
|| chan_free(ch
)) {
572 case S3C2410_DMAOP_START
:
573 /* Make sure both reqs are enqueued */
574 idx
= (ch
->lrq
== &ch
->req
[0]) ? 1 : 0;
575 s3c_pl330_submit(ch
, &ch
->req
[idx
]);
576 s3c_pl330_submit(ch
, &ch
->req
[1 - idx
]);
577 pl330op
= PL330_OP_START
;
580 case S3C2410_DMAOP_STOP
:
581 pl330op
= PL330_OP_ABORT
;
584 case S3C2410_DMAOP_FLUSH
:
585 pl330op
= PL330_OP_FLUSH
;
588 case S3C2410_DMAOP_PAUSE
:
589 case S3C2410_DMAOP_RESUME
:
590 case S3C2410_DMAOP_TIMEOUT
:
591 case S3C2410_DMAOP_STARTED
:
592 spin_unlock_irqrestore(&res_lock
, flags
);
596 spin_unlock_irqrestore(&res_lock
, flags
);
600 ret
= pl330_chan_ctrl(ch
->pl330_chan_id
, pl330op
);
602 if (pl330op
== PL330_OP_START
) {
603 spin_unlock_irqrestore(&res_lock
, flags
);
607 idx
= (ch
->lrq
== &ch
->req
[0]) ? 1 : 0;
609 /* Abort the current xfer */
610 if (ch
->req
[idx
].x
) {
611 xfer
= container_of(ch
->req
[idx
].x
,
612 struct s3c_pl330_xfer
, px
);
614 /* Drop xfer during FLUSH */
615 if (pl330op
== PL330_OP_FLUSH
)
616 del_from_queue(xfer
);
618 ch
->req
[idx
].x
= NULL
;
620 spin_unlock_irqrestore(&res_lock
, flags
);
621 _finish_off(xfer
, S3C2410_RES_ABORT
,
622 pl330op
== PL330_OP_FLUSH
? 1 : 0);
623 spin_lock_irqsave(&res_lock
, flags
);
626 /* Flush the whole queue */
627 if (pl330op
== PL330_OP_FLUSH
) {
629 if (ch
->req
[1 - idx
].x
) {
630 xfer
= container_of(ch
->req
[1 - idx
].x
,
631 struct s3c_pl330_xfer
, px
);
633 del_from_queue(xfer
);
635 ch
->req
[1 - idx
].x
= NULL
;
637 spin_unlock_irqrestore(&res_lock
, flags
);
638 _finish_off(xfer
, S3C2410_RES_ABORT
, 1);
639 spin_lock_irqsave(&res_lock
, flags
);
642 /* Finish off the remaining in the queue */
643 xfer
= ch
->xfer_head
;
646 del_from_queue(xfer
);
648 spin_unlock_irqrestore(&res_lock
, flags
);
649 _finish_off(xfer
, S3C2410_RES_ABORT
, 1);
650 spin_lock_irqsave(&res_lock
, flags
);
652 xfer
= ch
->xfer_head
;
657 spin_unlock_irqrestore(&res_lock
, flags
);
661 EXPORT_SYMBOL(s3c2410_dma_ctrl
);
663 int s3c2410_dma_enqueue(enum dma_ch id
, void *token
,
664 dma_addr_t addr
, int size
)
666 struct s3c_pl330_chan
*ch
;
667 struct s3c_pl330_xfer
*xfer
;
671 spin_lock_irqsave(&res_lock
, flags
);
675 /* Error if invalid or free channel */
676 if (!ch
|| chan_free(ch
)) {
681 /* Error if size is unaligned */
682 if (ch
->rqcfg
.brst_size
&& size
% (1 << ch
->rqcfg
.brst_size
)) {
687 xfer
= kmem_cache_alloc(ch
->dmac
->kmcache
, GFP_ATOMIC
);
695 xfer
->px
.bytes
= size
;
696 xfer
->px
.next
= NULL
; /* Single request */
698 /* For S3C DMA API, direction is always fixed for all xfers */
699 if (ch
->req
[0].rqtype
== MEMTODEV
) {
700 xfer
->px
.src_addr
= addr
;
701 xfer
->px
.dst_addr
= ch
->sdaddr
;
703 xfer
->px
.src_addr
= ch
->sdaddr
;
704 xfer
->px
.dst_addr
= addr
;
707 add_to_queue(ch
, xfer
, 0);
709 /* Try submitting on either request */
710 idx
= (ch
->lrq
== &ch
->req
[0]) ? 1 : 0;
713 s3c_pl330_submit(ch
, &ch
->req
[idx
]);
715 s3c_pl330_submit(ch
, &ch
->req
[1 - idx
]);
717 spin_unlock_irqrestore(&res_lock
, flags
);
719 if (ch
->options
& S3C2410_DMAF_AUTOSTART
)
720 s3c2410_dma_ctrl(id
, S3C2410_DMAOP_START
);
725 spin_unlock_irqrestore(&res_lock
, flags
);
729 EXPORT_SYMBOL(s3c2410_dma_enqueue
);
731 int s3c2410_dma_request(enum dma_ch id
,
732 struct s3c2410_dma_client
*client
,
735 struct s3c_pl330_dmac
*dmac
;
736 struct s3c_pl330_chan
*ch
;
740 spin_lock_irqsave(&res_lock
, flags
);
742 ch
= chan_acquire(id
);
750 ch
->pl330_chan_id
= pl330_request_channel(dmac
->pi
);
751 if (!ch
->pl330_chan_id
) {
758 ch
->options
= 0; /* Clear any option */
759 ch
->callback_fn
= NULL
; /* Clear any callback */
762 ch
->rqcfg
.brst_size
= 2; /* Default word size */
763 ch
->rqcfg
.swap
= SWAP_NO
;
764 ch
->rqcfg
.scctl
= SCCTRL0
; /* Noncacheable and nonbufferable */
765 ch
->rqcfg
.dcctl
= DCCTRL0
; /* Noncacheable and nonbufferable */
766 ch
->rqcfg
.privileged
= 0;
767 ch
->rqcfg
.insnaccess
= 0;
769 /* Set invalid direction */
770 ch
->req
[0].rqtype
= DEVTODEV
;
771 ch
->req
[1].rqtype
= ch
->req
[0].rqtype
;
773 ch
->req
[0].cfg
= &ch
->rqcfg
;
774 ch
->req
[1].cfg
= ch
->req
[0].cfg
;
776 ch
->req
[0].peri
= iface_of_dmac(dmac
, id
) - 1; /* Original index */
777 ch
->req
[1].peri
= ch
->req
[0].peri
;
779 ch
->req
[0].token
= &ch
->req
[0];
780 ch
->req
[0].xfer_cb
= s3c_pl330_rq0
;
781 ch
->req
[1].token
= &ch
->req
[1];
782 ch
->req
[1].xfer_cb
= s3c_pl330_rq1
;
787 /* Reset xfer list */
788 INIT_LIST_HEAD(&ch
->xfer_list
);
789 ch
->xfer_head
= NULL
;
792 spin_unlock_irqrestore(&res_lock
, flags
);
796 EXPORT_SYMBOL(s3c2410_dma_request
);
798 int s3c2410_dma_free(enum dma_ch id
, struct s3c2410_dma_client
*client
)
800 struct s3c_pl330_chan
*ch
;
801 struct s3c_pl330_xfer
*xfer
;
806 spin_lock_irqsave(&res_lock
, flags
);
810 if (!ch
|| chan_free(ch
))
813 /* Refuse if someone else wanted to free the channel */
814 if (ch
->client
!= client
) {
819 /* Stop any active xfer, Flushe the queue and do callbacks */
820 pl330_chan_ctrl(ch
->pl330_chan_id
, PL330_OP_FLUSH
);
822 /* Abort the submitted requests */
823 idx
= (ch
->lrq
== &ch
->req
[0]) ? 1 : 0;
825 if (ch
->req
[idx
].x
) {
826 xfer
= container_of(ch
->req
[idx
].x
,
827 struct s3c_pl330_xfer
, px
);
829 ch
->req
[idx
].x
= NULL
;
830 del_from_queue(xfer
);
832 spin_unlock_irqrestore(&res_lock
, flags
);
833 _finish_off(xfer
, S3C2410_RES_ABORT
, 1);
834 spin_lock_irqsave(&res_lock
, flags
);
837 if (ch
->req
[1 - idx
].x
) {
838 xfer
= container_of(ch
->req
[1 - idx
].x
,
839 struct s3c_pl330_xfer
, px
);
841 ch
->req
[1 - idx
].x
= NULL
;
842 del_from_queue(xfer
);
844 spin_unlock_irqrestore(&res_lock
, flags
);
845 _finish_off(xfer
, S3C2410_RES_ABORT
, 1);
846 spin_lock_irqsave(&res_lock
, flags
);
849 /* Pluck and Abort the queued requests in order */
851 xfer
= get_from_queue(ch
, 1);
853 spin_unlock_irqrestore(&res_lock
, flags
);
854 _finish_off(xfer
, S3C2410_RES_ABORT
, 1);
855 spin_lock_irqsave(&res_lock
, flags
);
860 pl330_release_channel(ch
->pl330_chan_id
);
862 ch
->pl330_chan_id
= NULL
;
867 spin_unlock_irqrestore(&res_lock
, flags
);
871 EXPORT_SYMBOL(s3c2410_dma_free
);
873 int s3c2410_dma_config(enum dma_ch id
, int xferunit
)
875 struct s3c_pl330_chan
*ch
;
876 struct pl330_info
*pi
;
878 int i
, dbwidth
, ret
= 0;
880 spin_lock_irqsave(&res_lock
, flags
);
884 if (!ch
|| chan_free(ch
)) {
890 dbwidth
= pi
->pcfg
.data_bus_width
/ 8;
892 /* Max size of xfer can be pcfg.data_bus_width */
893 if (xferunit
> dbwidth
) {
899 while (xferunit
!= (1 << i
))
903 if (xferunit
== (1 << i
))
904 ch
->rqcfg
.brst_size
= i
;
909 spin_unlock_irqrestore(&res_lock
, flags
);
913 EXPORT_SYMBOL(s3c2410_dma_config
);
915 /* Options that are supported by this driver */
916 #define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART)
918 int s3c2410_dma_setflags(enum dma_ch id
, unsigned int options
)
920 struct s3c_pl330_chan
*ch
;
924 spin_lock_irqsave(&res_lock
, flags
);
928 if (!ch
|| chan_free(ch
) || options
& ~(S3C_PL330_FLAGS
))
931 ch
->options
= options
;
933 spin_unlock_irqrestore(&res_lock
, flags
);
937 EXPORT_SYMBOL(s3c2410_dma_setflags
);
939 int s3c2410_dma_set_buffdone_fn(enum dma_ch id
, s3c2410_dma_cbfn_t rtn
)
941 struct s3c_pl330_chan
*ch
;
945 spin_lock_irqsave(&res_lock
, flags
);
949 if (!ch
|| chan_free(ch
))
952 ch
->callback_fn
= rtn
;
954 spin_unlock_irqrestore(&res_lock
, flags
);
958 EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn
);
960 int s3c2410_dma_devconfig(enum dma_ch id
, enum s3c2410_dmasrc source
,
961 unsigned long address
)
963 struct s3c_pl330_chan
*ch
;
967 spin_lock_irqsave(&res_lock
, flags
);
971 if (!ch
|| chan_free(ch
)) {
977 case S3C2410_DMASRC_HW
: /* P->M */
978 ch
->req
[0].rqtype
= DEVTOMEM
;
979 ch
->req
[1].rqtype
= DEVTOMEM
;
980 ch
->rqcfg
.src_inc
= 0;
981 ch
->rqcfg
.dst_inc
= 1;
983 case S3C2410_DMASRC_MEM
: /* M->P */
984 ch
->req
[0].rqtype
= MEMTODEV
;
985 ch
->req
[1].rqtype
= MEMTODEV
;
986 ch
->rqcfg
.src_inc
= 1;
987 ch
->rqcfg
.dst_inc
= 0;
994 ch
->sdaddr
= address
;
997 spin_unlock_irqrestore(&res_lock
, flags
);
1001 EXPORT_SYMBOL(s3c2410_dma_devconfig
);
1003 int s3c2410_dma_getposition(enum dma_ch id
, dma_addr_t
*src
, dma_addr_t
*dst
)
1005 struct s3c_pl330_chan
*ch
= id_to_chan(id
);
1006 struct pl330_chanstatus status
;
1009 if (!ch
|| chan_free(ch
))
1012 ret
= pl330_chan_status(ch
->pl330_chan_id
, &status
);
1016 *src
= status
.src_addr
;
1017 *dst
= status
.dst_addr
;
1021 EXPORT_SYMBOL(s3c2410_dma_getposition
);
1023 static irqreturn_t
pl330_irq_handler(int irq
, void *data
)
1025 if (pl330_update(data
))
1031 static int pl330_probe(struct platform_device
*pdev
)
1033 struct s3c_pl330_dmac
*s3c_pl330_dmac
;
1034 struct s3c_pl330_platdata
*pl330pd
;
1035 struct pl330_info
*pl330_info
;
1036 struct resource
*res
;
1039 pl330pd
= pdev
->dev
.platform_data
;
1041 /* Can't do without the list of _32_ peripherals */
1042 if (!pl330pd
|| !pl330pd
->peri
) {
1043 dev_err(&pdev
->dev
, "platform data missing!\n");
1047 pl330_info
= kzalloc(sizeof(*pl330_info
), GFP_KERNEL
);
1051 pl330_info
->pl330_data
= NULL
;
1052 pl330_info
->dev
= &pdev
->dev
;
1054 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1060 request_mem_region(res
->start
, resource_size(res
), pdev
->name
);
1062 pl330_info
->base
= ioremap(res
->start
, resource_size(res
));
1063 if (!pl330_info
->base
) {
1068 irq
= platform_get_irq(pdev
, 0);
1074 ret
= request_irq(irq
, pl330_irq_handler
, 0,
1075 dev_name(&pdev
->dev
), pl330_info
);
1079 /* Allocate a new DMAC */
1080 s3c_pl330_dmac
= kmalloc(sizeof(*s3c_pl330_dmac
), GFP_KERNEL
);
1081 if (!s3c_pl330_dmac
) {
1086 /* Get operation clock and enable it */
1087 s3c_pl330_dmac
->clk
= clk_get(&pdev
->dev
, "pdma");
1088 if (IS_ERR(s3c_pl330_dmac
->clk
)) {
1089 dev_err(&pdev
->dev
, "Cannot get operation clock.\n");
1093 clk_enable(s3c_pl330_dmac
->clk
);
1095 ret
= pl330_add(pl330_info
);
1100 s3c_pl330_dmac
->pi
= pl330_info
;
1102 /* No busy channels */
1103 s3c_pl330_dmac
->busy_chan
= 0;
1105 s3c_pl330_dmac
->kmcache
= kmem_cache_create(dev_name(&pdev
->dev
),
1106 sizeof(struct s3c_pl330_xfer
), 0, 0, NULL
);
1108 if (!s3c_pl330_dmac
->kmcache
) {
1113 /* Get the list of peripherals */
1114 s3c_pl330_dmac
->peri
= pl330pd
->peri
;
1116 /* Attach to the list of DMACs */
1117 list_add_tail(&s3c_pl330_dmac
->node
, &dmac_list
);
1119 /* Create a channel for each peripheral in the DMAC
1120 * that is, if it doesn't already exist
1122 for (i
= 0; i
< PL330_MAX_PERI
; i
++)
1123 if (s3c_pl330_dmac
->peri
[i
] != DMACH_MAX
)
1124 chan_add(s3c_pl330_dmac
->peri
[i
]);
1127 "Loaded driver for PL330 DMAC-%d %s\n", pdev
->id
, pdev
->name
);
1129 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
1130 pl330_info
->pcfg
.data_buf_dep
,
1131 pl330_info
->pcfg
.data_bus_width
/ 8, pl330_info
->pcfg
.num_chan
,
1132 pl330_info
->pcfg
.num_peri
, pl330_info
->pcfg
.num_events
);
1137 pl330_del(pl330_info
);
1139 clk_disable(s3c_pl330_dmac
->clk
);
1140 clk_put(s3c_pl330_dmac
->clk
);
1142 kfree(s3c_pl330_dmac
);
1144 free_irq(irq
, pl330_info
);
1147 iounmap(pl330_info
->base
);
1149 release_mem_region(res
->start
, resource_size(res
));
1156 static int pl330_remove(struct platform_device
*pdev
)
1158 struct s3c_pl330_dmac
*dmac
, *d
;
1159 struct s3c_pl330_chan
*ch
;
1160 unsigned long flags
;
1163 if (!pdev
->dev
.platform_data
)
1166 spin_lock_irqsave(&res_lock
, flags
);
1169 list_for_each_entry(d
, &dmac_list
, node
)
1170 if (d
->pi
->dev
== &pdev
->dev
) {
1176 spin_unlock_irqrestore(&res_lock
, flags
);
1182 /* Remove all Channels that are managed only by this DMAC */
1183 list_for_each_entry(ch
, &chan_list
, node
) {
1185 /* Only channels that are handled by this DMAC */
1186 if (iface_of_dmac(dmac
, ch
->id
))
1191 /* Don't remove if some other DMAC has it too */
1192 list_for_each_entry(d
, &dmac_list
, node
)
1193 if (d
!= dmac
&& iface_of_dmac(d
, ch
->id
)) {
1199 spin_unlock_irqrestore(&res_lock
, flags
);
1200 s3c2410_dma_free(ch
->id
, ch
->client
);
1201 spin_lock_irqsave(&res_lock
, flags
);
1202 list_del(&ch
->node
);
1207 /* Disable operation clock */
1208 clk_disable(dmac
->clk
);
1211 /* Remove the DMAC */
1212 list_del(&dmac
->node
);
1215 spin_unlock_irqrestore(&res_lock
, flags
);
1220 static struct platform_driver pl330_driver
= {
1222 .owner
= THIS_MODULE
,
1223 .name
= "s3c-pl330",
1225 .probe
= pl330_probe
,
1226 .remove
= pl330_remove
,
1229 static int __init
pl330_init(void)
1231 return platform_driver_register(&pl330_driver
);
1233 module_init(pl330_init
);
1235 static void __exit
pl330_exit(void)
1237 platform_driver_unregister(&pl330_driver
);
1240 module_exit(pl330_exit
);
1242 MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
1243 MODULE_DESCRIPTION("Driver for PL330 DMA Controller");
1244 MODULE_LICENSE("GPL");