2 * Texas Instruments CPDMA Driver
4 * Copyright (C) 2010 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 #include <linux/kernel.h>
16 #include <linux/spinlock.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/dma-mapping.h>
23 #include <linux/delay.h>
24 #include <linux/genalloc.h>
25 #include "davinci_cpdma.h"
28 #define CPDMA_TXIDVER 0x00
29 #define CPDMA_TXCONTROL 0x04
30 #define CPDMA_TXTEARDOWN 0x08
31 #define CPDMA_RXIDVER 0x10
32 #define CPDMA_RXCONTROL 0x14
33 #define CPDMA_SOFTRESET 0x1c
34 #define CPDMA_RXTEARDOWN 0x18
35 #define CPDMA_TXINTSTATRAW 0x80
36 #define CPDMA_TXINTSTATMASKED 0x84
37 #define CPDMA_TXINTMASKSET 0x88
38 #define CPDMA_TXINTMASKCLEAR 0x8c
39 #define CPDMA_MACINVECTOR 0x90
40 #define CPDMA_MACEOIVECTOR 0x94
41 #define CPDMA_RXINTSTATRAW 0xa0
42 #define CPDMA_RXINTSTATMASKED 0xa4
43 #define CPDMA_RXINTMASKSET 0xa8
44 #define CPDMA_RXINTMASKCLEAR 0xac
45 #define CPDMA_DMAINTSTATRAW 0xb0
46 #define CPDMA_DMAINTSTATMASKED 0xb4
47 #define CPDMA_DMAINTMASKSET 0xb8
48 #define CPDMA_DMAINTMASKCLEAR 0xbc
49 #define CPDMA_DMAINT_HOSTERR BIT(1)
51 /* the following exist only if has_ext_regs is set */
52 #define CPDMA_DMACONTROL 0x20
53 #define CPDMA_DMASTATUS 0x24
54 #define CPDMA_RXBUFFOFS 0x28
55 #define CPDMA_EM_CONTROL 0x2c
57 /* Descriptor mode bits */
58 #define CPDMA_DESC_SOP BIT(31)
59 #define CPDMA_DESC_EOP BIT(30)
60 #define CPDMA_DESC_OWNER BIT(29)
61 #define CPDMA_DESC_EOQ BIT(28)
62 #define CPDMA_DESC_TD_COMPLETE BIT(27)
63 #define CPDMA_DESC_PASS_CRC BIT(26)
64 #define CPDMA_DESC_TO_PORT_EN BIT(20)
65 #define CPDMA_TO_PORT_SHIFT 16
66 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
67 #define CPDMA_DESC_CRC_LEN 4
69 #define CPDMA_TEARDOWN_VALUE 0xfffffffc
83 struct cpdma_desc_pool
{
86 void __iomem
*iomap
; /* ioremap map */
87 void *cpumap
; /* dma_alloc map */
88 int desc_size
, mem_size
;
91 struct gen_pool
*gen_pool
;
101 enum cpdma_state state
;
102 struct cpdma_params params
;
104 struct cpdma_desc_pool
*pool
;
106 struct cpdma_chan
*channels
[2 * CPDMA_MAX_CHANNELS
];
111 struct cpdma_desc __iomem
*head
, *tail
;
112 void __iomem
*hdp
, *cp
, *rxfree
;
113 enum cpdma_state state
;
114 struct cpdma_ctlr
*ctlr
;
120 cpdma_handler_fn handler
;
121 enum dma_data_direction dir
;
122 struct cpdma_chan_stats stats
;
123 /* offsets into dmaregs */
124 int int_set
, int_clear
, td
;
127 #define tx_chan_num(chan) (chan)
128 #define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
129 #define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
130 #define is_tx_chan(chan) (!is_rx_chan(chan))
131 #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
132 #define chan_linear(chan) __chan_linear((chan)->chan_num)
134 /* The following make access to common cpdma_ctlr params more readable */
135 #define dmaregs params.dmaregs
136 #define num_chan params.num_chan
138 /* various accessors */
139 #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs))
140 #define chan_read(chan, fld) __raw_readl((chan)->fld)
141 #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
142 #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs))
143 #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld)
144 #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld)
146 #define cpdma_desc_to_port(chan, mode, directed) \
148 if (!is_rx_chan(chan) && ((directed == 1) || \
150 mode |= (CPDMA_DESC_TO_PORT_EN | \
151 (directed << CPDMA_TO_PORT_SHIFT)); \
154 static void cpdma_desc_pool_destroy(struct cpdma_desc_pool
*pool
)
159 WARN(gen_pool_size(pool
->gen_pool
) != gen_pool_avail(pool
->gen_pool
),
160 "cpdma_desc_pool size %d != avail %d",
161 gen_pool_size(pool
->gen_pool
),
162 gen_pool_avail(pool
->gen_pool
));
164 dma_free_coherent(pool
->dev
, pool
->mem_size
, pool
->cpumap
,
167 iounmap(pool
->iomap
);
171 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
172 * emac) have dedicated on-chip memory for these descriptors. Some other
173 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
174 * abstract out these details
176 static struct cpdma_desc_pool
*
177 cpdma_desc_pool_create(struct device
*dev
, u32 phys
, dma_addr_t hw_addr
,
180 struct cpdma_desc_pool
*pool
;
183 pool
= devm_kzalloc(dev
, sizeof(*pool
), GFP_KERNEL
);
185 goto gen_pool_create_fail
;
188 pool
->mem_size
= size
;
189 pool
->desc_size
= ALIGN(sizeof(struct cpdma_desc
), align
);
190 pool
->num_desc
= size
/ pool
->desc_size
;
192 pool
->gen_pool
= devm_gen_pool_create(dev
, ilog2(pool
->desc_size
), -1,
194 if (IS_ERR(pool
->gen_pool
)) {
195 dev_err(dev
, "pool create failed %ld\n",
196 PTR_ERR(pool
->gen_pool
));
197 goto gen_pool_create_fail
;
202 pool
->iomap
= ioremap(phys
, size
); /* should be memremap? */
203 pool
->hw_addr
= hw_addr
;
205 pool
->cpumap
= dma_alloc_coherent(dev
, size
, &pool
->hw_addr
,
207 pool
->iomap
= (void __iomem __force
*)pool
->cpumap
;
208 pool
->phys
= pool
->hw_addr
; /* assumes no IOMMU, don't use this value */
212 goto gen_pool_create_fail
;
214 ret
= gen_pool_add_virt(pool
->gen_pool
, (unsigned long)pool
->iomap
,
215 pool
->phys
, pool
->mem_size
, -1);
217 dev_err(dev
, "pool add failed %d\n", ret
);
218 goto gen_pool_add_virt_fail
;
223 gen_pool_add_virt_fail
:
224 cpdma_desc_pool_destroy(pool
);
225 gen_pool_create_fail
:
229 static inline dma_addr_t
desc_phys(struct cpdma_desc_pool
*pool
,
230 struct cpdma_desc __iomem
*desc
)
234 return pool
->hw_addr
+ (__force
long)desc
- (__force
long)pool
->iomap
;
237 static inline struct cpdma_desc __iomem
*
238 desc_from_phys(struct cpdma_desc_pool
*pool
, dma_addr_t dma
)
240 return dma
? pool
->iomap
+ dma
- pool
->hw_addr
: NULL
;
243 static struct cpdma_desc __iomem
*
244 cpdma_desc_alloc(struct cpdma_desc_pool
*pool
)
246 return (struct cpdma_desc __iomem
*)
247 gen_pool_alloc(pool
->gen_pool
, pool
->desc_size
);
250 static void cpdma_desc_free(struct cpdma_desc_pool
*pool
,
251 struct cpdma_desc __iomem
*desc
, int num_desc
)
253 gen_pool_free(pool
->gen_pool
, (unsigned long)desc
, pool
->desc_size
);
256 struct cpdma_ctlr
*cpdma_ctlr_create(struct cpdma_params
*params
)
258 struct cpdma_ctlr
*ctlr
;
260 ctlr
= devm_kzalloc(params
->dev
, sizeof(*ctlr
), GFP_KERNEL
);
264 ctlr
->state
= CPDMA_STATE_IDLE
;
265 ctlr
->params
= *params
;
266 ctlr
->dev
= params
->dev
;
268 spin_lock_init(&ctlr
->lock
);
270 ctlr
->pool
= cpdma_desc_pool_create(ctlr
->dev
,
271 ctlr
->params
.desc_mem_phys
,
272 ctlr
->params
.desc_hw_addr
,
273 ctlr
->params
.desc_mem_size
,
274 ctlr
->params
.desc_align
);
278 if (WARN_ON(ctlr
->num_chan
> CPDMA_MAX_CHANNELS
))
279 ctlr
->num_chan
= CPDMA_MAX_CHANNELS
;
282 EXPORT_SYMBOL_GPL(cpdma_ctlr_create
);
284 int cpdma_ctlr_start(struct cpdma_ctlr
*ctlr
)
289 spin_lock_irqsave(&ctlr
->lock
, flags
);
290 if (ctlr
->state
!= CPDMA_STATE_IDLE
) {
291 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
295 if (ctlr
->params
.has_soft_reset
) {
296 unsigned timeout
= 10 * 100;
298 dma_reg_write(ctlr
, CPDMA_SOFTRESET
, 1);
300 if (dma_reg_read(ctlr
, CPDMA_SOFTRESET
) == 0)
308 for (i
= 0; i
< ctlr
->num_chan
; i
++) {
309 __raw_writel(0, ctlr
->params
.txhdp
+ 4 * i
);
310 __raw_writel(0, ctlr
->params
.rxhdp
+ 4 * i
);
311 __raw_writel(0, ctlr
->params
.txcp
+ 4 * i
);
312 __raw_writel(0, ctlr
->params
.rxcp
+ 4 * i
);
315 dma_reg_write(ctlr
, CPDMA_RXINTMASKCLEAR
, 0xffffffff);
316 dma_reg_write(ctlr
, CPDMA_TXINTMASKCLEAR
, 0xffffffff);
318 dma_reg_write(ctlr
, CPDMA_TXCONTROL
, 1);
319 dma_reg_write(ctlr
, CPDMA_RXCONTROL
, 1);
321 ctlr
->state
= CPDMA_STATE_ACTIVE
;
323 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
324 if (ctlr
->channels
[i
])
325 cpdma_chan_start(ctlr
->channels
[i
]);
327 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
330 EXPORT_SYMBOL_GPL(cpdma_ctlr_start
);
332 int cpdma_ctlr_stop(struct cpdma_ctlr
*ctlr
)
337 spin_lock_irqsave(&ctlr
->lock
, flags
);
338 if (ctlr
->state
== CPDMA_STATE_TEARDOWN
) {
339 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
343 ctlr
->state
= CPDMA_STATE_TEARDOWN
;
344 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
346 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
347 if (ctlr
->channels
[i
])
348 cpdma_chan_stop(ctlr
->channels
[i
]);
351 spin_lock_irqsave(&ctlr
->lock
, flags
);
352 dma_reg_write(ctlr
, CPDMA_RXINTMASKCLEAR
, 0xffffffff);
353 dma_reg_write(ctlr
, CPDMA_TXINTMASKCLEAR
, 0xffffffff);
355 dma_reg_write(ctlr
, CPDMA_TXCONTROL
, 0);
356 dma_reg_write(ctlr
, CPDMA_RXCONTROL
, 0);
358 ctlr
->state
= CPDMA_STATE_IDLE
;
360 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
363 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop
);
365 int cpdma_ctlr_destroy(struct cpdma_ctlr
*ctlr
)
372 if (ctlr
->state
!= CPDMA_STATE_IDLE
)
373 cpdma_ctlr_stop(ctlr
);
375 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++)
376 cpdma_chan_destroy(ctlr
->channels
[i
]);
378 cpdma_desc_pool_destroy(ctlr
->pool
);
381 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy
);
383 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr
*ctlr
, bool enable
)
388 spin_lock_irqsave(&ctlr
->lock
, flags
);
389 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
390 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
394 reg
= enable
? CPDMA_DMAINTMASKSET
: CPDMA_DMAINTMASKCLEAR
;
395 dma_reg_write(ctlr
, reg
, CPDMA_DMAINT_HOSTERR
);
397 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
398 if (ctlr
->channels
[i
])
399 cpdma_chan_int_ctrl(ctlr
->channels
[i
], enable
);
402 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
405 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl
);
407 void cpdma_ctlr_eoi(struct cpdma_ctlr
*ctlr
, u32 value
)
409 dma_reg_write(ctlr
, CPDMA_MACEOIVECTOR
, value
);
411 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi
);
413 u32
cpdma_ctrl_rxchs_state(struct cpdma_ctlr
*ctlr
)
415 return dma_reg_read(ctlr
, CPDMA_RXINTSTATMASKED
);
417 EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state
);
419 u32
cpdma_ctrl_txchs_state(struct cpdma_ctlr
*ctlr
)
421 return dma_reg_read(ctlr
, CPDMA_TXINTSTATMASKED
);
423 EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state
);
426 * cpdma_chan_split_pool - Splits ctrl pool between all channels.
427 * Has to be called under ctlr lock
429 static void cpdma_chan_split_pool(struct cpdma_ctlr
*ctlr
)
431 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
432 struct cpdma_chan
*chan
;
439 /* calculate average size of pool slice */
440 ch_desc_num
= pool
->num_desc
/ ctlr
->chan_num
;
442 /* split ctlr pool */
443 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
444 chan
= ctlr
->channels
[i
];
446 chan
->desc_num
= ch_desc_num
;
450 struct cpdma_chan
*cpdma_chan_create(struct cpdma_ctlr
*ctlr
, int chan_num
,
451 cpdma_handler_fn handler
, int rx_type
)
453 int offset
= chan_num
* 4;
454 struct cpdma_chan
*chan
;
457 chan_num
= rx_type
? rx_chan_num(chan_num
) : tx_chan_num(chan_num
);
459 if (__chan_linear(chan_num
) >= ctlr
->num_chan
)
462 chan
= devm_kzalloc(ctlr
->dev
, sizeof(*chan
), GFP_KERNEL
);
464 return ERR_PTR(-ENOMEM
);
466 spin_lock_irqsave(&ctlr
->lock
, flags
);
467 if (ctlr
->channels
[chan_num
]) {
468 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
469 devm_kfree(ctlr
->dev
, chan
);
470 return ERR_PTR(-EBUSY
);
474 chan
->state
= CPDMA_STATE_IDLE
;
475 chan
->chan_num
= chan_num
;
476 chan
->handler
= handler
;
477 chan
->desc_num
= ctlr
->pool
->num_desc
/ 2;
479 if (is_rx_chan(chan
)) {
480 chan
->hdp
= ctlr
->params
.rxhdp
+ offset
;
481 chan
->cp
= ctlr
->params
.rxcp
+ offset
;
482 chan
->rxfree
= ctlr
->params
.rxfree
+ offset
;
483 chan
->int_set
= CPDMA_RXINTMASKSET
;
484 chan
->int_clear
= CPDMA_RXINTMASKCLEAR
;
485 chan
->td
= CPDMA_RXTEARDOWN
;
486 chan
->dir
= DMA_FROM_DEVICE
;
488 chan
->hdp
= ctlr
->params
.txhdp
+ offset
;
489 chan
->cp
= ctlr
->params
.txcp
+ offset
;
490 chan
->int_set
= CPDMA_TXINTMASKSET
;
491 chan
->int_clear
= CPDMA_TXINTMASKCLEAR
;
492 chan
->td
= CPDMA_TXTEARDOWN
;
493 chan
->dir
= DMA_TO_DEVICE
;
495 chan
->mask
= BIT(chan_linear(chan
));
497 spin_lock_init(&chan
->lock
);
499 ctlr
->channels
[chan_num
] = chan
;
502 cpdma_chan_split_pool(ctlr
);
504 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
507 EXPORT_SYMBOL_GPL(cpdma_chan_create
);
509 int cpdma_chan_get_rx_buf_num(struct cpdma_chan
*chan
)
514 spin_lock_irqsave(&chan
->lock
, flags
);
515 desc_num
= chan
->desc_num
;
516 spin_unlock_irqrestore(&chan
->lock
, flags
);
520 EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num
);
522 int cpdma_chan_destroy(struct cpdma_chan
*chan
)
524 struct cpdma_ctlr
*ctlr
;
531 spin_lock_irqsave(&ctlr
->lock
, flags
);
532 if (chan
->state
!= CPDMA_STATE_IDLE
)
533 cpdma_chan_stop(chan
);
534 ctlr
->channels
[chan
->chan_num
] = NULL
;
537 cpdma_chan_split_pool(ctlr
);
539 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
542 EXPORT_SYMBOL_GPL(cpdma_chan_destroy
);
544 int cpdma_chan_get_stats(struct cpdma_chan
*chan
,
545 struct cpdma_chan_stats
*stats
)
550 spin_lock_irqsave(&chan
->lock
, flags
);
551 memcpy(stats
, &chan
->stats
, sizeof(*stats
));
552 spin_unlock_irqrestore(&chan
->lock
, flags
);
555 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats
);
557 static void __cpdma_chan_submit(struct cpdma_chan
*chan
,
558 struct cpdma_desc __iomem
*desc
)
560 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
561 struct cpdma_desc __iomem
*prev
= chan
->tail
;
562 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
566 desc_dma
= desc_phys(pool
, desc
);
568 /* simple case - idle channel */
570 chan
->stats
.head_enqueue
++;
573 if (chan
->state
== CPDMA_STATE_ACTIVE
)
574 chan_write(chan
, hdp
, desc_dma
);
578 /* first chain the descriptor at the tail of the list */
579 desc_write(prev
, hw_next
, desc_dma
);
581 chan
->stats
.tail_enqueue
++;
583 /* next check if EOQ has been triggered already */
584 mode
= desc_read(prev
, hw_mode
);
585 if (((mode
& (CPDMA_DESC_EOQ
| CPDMA_DESC_OWNER
)) == CPDMA_DESC_EOQ
) &&
586 (chan
->state
== CPDMA_STATE_ACTIVE
)) {
587 desc_write(prev
, hw_mode
, mode
& ~CPDMA_DESC_EOQ
);
588 chan_write(chan
, hdp
, desc_dma
);
589 chan
->stats
.misqueued
++;
593 int cpdma_chan_submit(struct cpdma_chan
*chan
, void *token
, void *data
,
594 int len
, int directed
)
596 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
597 struct cpdma_desc __iomem
*desc
;
603 spin_lock_irqsave(&chan
->lock
, flags
);
605 if (chan
->state
== CPDMA_STATE_TEARDOWN
) {
610 if (chan
->count
>= chan
->desc_num
) {
611 chan
->stats
.desc_alloc_fail
++;
616 desc
= cpdma_desc_alloc(ctlr
->pool
);
618 chan
->stats
.desc_alloc_fail
++;
623 if (len
< ctlr
->params
.min_packet_size
) {
624 len
= ctlr
->params
.min_packet_size
;
625 chan
->stats
.runt_transmit_buff
++;
628 buffer
= dma_map_single(ctlr
->dev
, data
, len
, chan
->dir
);
629 ret
= dma_mapping_error(ctlr
->dev
, buffer
);
631 cpdma_desc_free(ctlr
->pool
, desc
, 1);
636 mode
= CPDMA_DESC_OWNER
| CPDMA_DESC_SOP
| CPDMA_DESC_EOP
;
637 cpdma_desc_to_port(chan
, mode
, directed
);
639 desc_write(desc
, hw_next
, 0);
640 desc_write(desc
, hw_buffer
, buffer
);
641 desc_write(desc
, hw_len
, len
);
642 desc_write(desc
, hw_mode
, mode
| len
);
643 desc_write(desc
, sw_token
, token
);
644 desc_write(desc
, sw_buffer
, buffer
);
645 desc_write(desc
, sw_len
, len
);
647 __cpdma_chan_submit(chan
, desc
);
649 if (chan
->state
== CPDMA_STATE_ACTIVE
&& chan
->rxfree
)
650 chan_write(chan
, rxfree
, 1);
655 spin_unlock_irqrestore(&chan
->lock
, flags
);
658 EXPORT_SYMBOL_GPL(cpdma_chan_submit
);
660 bool cpdma_check_free_tx_desc(struct cpdma_chan
*chan
)
662 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
663 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
667 spin_lock_irqsave(&chan
->lock
, flags
);
668 free_tx_desc
= (chan
->count
< chan
->desc_num
) &&
669 gen_pool_avail(pool
->gen_pool
);
670 spin_unlock_irqrestore(&chan
->lock
, flags
);
673 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc
);
675 static void __cpdma_chan_free(struct cpdma_chan
*chan
,
676 struct cpdma_desc __iomem
*desc
,
677 int outlen
, int status
)
679 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
680 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
685 token
= (void *)desc_read(desc
, sw_token
);
686 buff_dma
= desc_read(desc
, sw_buffer
);
687 origlen
= desc_read(desc
, sw_len
);
689 dma_unmap_single(ctlr
->dev
, buff_dma
, origlen
, chan
->dir
);
690 cpdma_desc_free(pool
, desc
, 1);
691 (*chan
->handler
)(token
, outlen
, status
);
694 static int __cpdma_chan_process(struct cpdma_chan
*chan
)
696 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
697 struct cpdma_desc __iomem
*desc
;
700 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
704 spin_lock_irqsave(&chan
->lock
, flags
);
708 chan
->stats
.empty_dequeue
++;
712 desc_dma
= desc_phys(pool
, desc
);
714 status
= __raw_readl(&desc
->hw_mode
);
715 outlen
= status
& 0x7ff;
716 if (status
& CPDMA_DESC_OWNER
) {
717 chan
->stats
.busy_dequeue
++;
722 if (status
& CPDMA_DESC_PASS_CRC
)
723 outlen
-= CPDMA_DESC_CRC_LEN
;
725 status
= status
& (CPDMA_DESC_EOQ
| CPDMA_DESC_TD_COMPLETE
|
726 CPDMA_DESC_PORT_MASK
);
728 chan
->head
= desc_from_phys(pool
, desc_read(desc
, hw_next
));
729 chan_write(chan
, cp
, desc_dma
);
731 chan
->stats
.good_dequeue
++;
733 if (status
& CPDMA_DESC_EOQ
) {
734 chan
->stats
.requeue
++;
735 chan_write(chan
, hdp
, desc_phys(pool
, chan
->head
));
738 spin_unlock_irqrestore(&chan
->lock
, flags
);
739 if (unlikely(status
& CPDMA_DESC_TD_COMPLETE
))
744 __cpdma_chan_free(chan
, desc
, outlen
, cb_status
);
748 spin_unlock_irqrestore(&chan
->lock
, flags
);
752 int cpdma_chan_process(struct cpdma_chan
*chan
, int quota
)
754 int used
= 0, ret
= 0;
756 if (chan
->state
!= CPDMA_STATE_ACTIVE
)
759 while (used
< quota
) {
760 ret
= __cpdma_chan_process(chan
);
767 EXPORT_SYMBOL_GPL(cpdma_chan_process
);
769 int cpdma_chan_start(struct cpdma_chan
*chan
)
771 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
772 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
775 spin_lock_irqsave(&chan
->lock
, flags
);
776 if (chan
->state
!= CPDMA_STATE_IDLE
) {
777 spin_unlock_irqrestore(&chan
->lock
, flags
);
780 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
781 spin_unlock_irqrestore(&chan
->lock
, flags
);
784 dma_reg_write(ctlr
, chan
->int_set
, chan
->mask
);
785 chan
->state
= CPDMA_STATE_ACTIVE
;
787 chan_write(chan
, hdp
, desc_phys(pool
, chan
->head
));
789 chan_write(chan
, rxfree
, chan
->count
);
792 spin_unlock_irqrestore(&chan
->lock
, flags
);
795 EXPORT_SYMBOL_GPL(cpdma_chan_start
);
797 int cpdma_chan_stop(struct cpdma_chan
*chan
)
799 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
800 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
805 spin_lock_irqsave(&chan
->lock
, flags
);
806 if (chan
->state
== CPDMA_STATE_TEARDOWN
) {
807 spin_unlock_irqrestore(&chan
->lock
, flags
);
811 chan
->state
= CPDMA_STATE_TEARDOWN
;
812 dma_reg_write(ctlr
, chan
->int_clear
, chan
->mask
);
814 /* trigger teardown */
815 dma_reg_write(ctlr
, chan
->td
, chan_linear(chan
));
817 /* wait for teardown complete */
818 timeout
= 100 * 100; /* 100 ms */
820 u32 cp
= chan_read(chan
, cp
);
821 if ((cp
& CPDMA_TEARDOWN_VALUE
) == CPDMA_TEARDOWN_VALUE
)
827 chan_write(chan
, cp
, CPDMA_TEARDOWN_VALUE
);
829 /* handle completed packets */
830 spin_unlock_irqrestore(&chan
->lock
, flags
);
832 ret
= __cpdma_chan_process(chan
);
835 } while ((ret
& CPDMA_DESC_TD_COMPLETE
) == 0);
836 spin_lock_irqsave(&chan
->lock
, flags
);
838 /* remaining packets haven't been tx/rx'ed, clean them up */
840 struct cpdma_desc __iomem
*desc
= chan
->head
;
843 next_dma
= desc_read(desc
, hw_next
);
844 chan
->head
= desc_from_phys(pool
, next_dma
);
846 chan
->stats
.teardown_dequeue
++;
848 /* issue callback without locks held */
849 spin_unlock_irqrestore(&chan
->lock
, flags
);
850 __cpdma_chan_free(chan
, desc
, 0, -ENOSYS
);
851 spin_lock_irqsave(&chan
->lock
, flags
);
854 chan
->state
= CPDMA_STATE_IDLE
;
855 spin_unlock_irqrestore(&chan
->lock
, flags
);
858 EXPORT_SYMBOL_GPL(cpdma_chan_stop
);
860 int cpdma_chan_int_ctrl(struct cpdma_chan
*chan
, bool enable
)
864 spin_lock_irqsave(&chan
->lock
, flags
);
865 if (chan
->state
!= CPDMA_STATE_ACTIVE
) {
866 spin_unlock_irqrestore(&chan
->lock
, flags
);
870 dma_reg_write(chan
->ctlr
, enable
? chan
->int_set
: chan
->int_clear
,
872 spin_unlock_irqrestore(&chan
->lock
, flags
);
877 struct cpdma_control_info
{
881 #define ACCESS_RO BIT(0)
882 #define ACCESS_WO BIT(1)
883 #define ACCESS_RW (ACCESS_RO | ACCESS_WO)
886 static struct cpdma_control_info controls
[] = {
887 [CPDMA_CMD_IDLE
] = {CPDMA_DMACONTROL
, 3, 1, ACCESS_WO
},
888 [CPDMA_COPY_ERROR_FRAMES
] = {CPDMA_DMACONTROL
, 4, 1, ACCESS_RW
},
889 [CPDMA_RX_OFF_LEN_UPDATE
] = {CPDMA_DMACONTROL
, 2, 1, ACCESS_RW
},
890 [CPDMA_RX_OWNERSHIP_FLIP
] = {CPDMA_DMACONTROL
, 1, 1, ACCESS_RW
},
891 [CPDMA_TX_PRIO_FIXED
] = {CPDMA_DMACONTROL
, 0, 1, ACCESS_RW
},
892 [CPDMA_STAT_IDLE
] = {CPDMA_DMASTATUS
, 31, 1, ACCESS_RO
},
893 [CPDMA_STAT_TX_ERR_CODE
] = {CPDMA_DMASTATUS
, 20, 0xf, ACCESS_RW
},
894 [CPDMA_STAT_TX_ERR_CHAN
] = {CPDMA_DMASTATUS
, 16, 0x7, ACCESS_RW
},
895 [CPDMA_STAT_RX_ERR_CODE
] = {CPDMA_DMASTATUS
, 12, 0xf, ACCESS_RW
},
896 [CPDMA_STAT_RX_ERR_CHAN
] = {CPDMA_DMASTATUS
, 8, 0x7, ACCESS_RW
},
897 [CPDMA_RX_BUFFER_OFFSET
] = {CPDMA_RXBUFFOFS
, 0, 0xffff, ACCESS_RW
},
900 int cpdma_control_get(struct cpdma_ctlr
*ctlr
, int control
)
903 struct cpdma_control_info
*info
= &controls
[control
];
906 spin_lock_irqsave(&ctlr
->lock
, flags
);
909 if (!ctlr
->params
.has_ext_regs
)
913 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
)
917 if (control
< 0 || control
>= ARRAY_SIZE(controls
))
921 if ((info
->access
& ACCESS_RO
) != ACCESS_RO
)
924 ret
= (dma_reg_read(ctlr
, info
->reg
) >> info
->shift
) & info
->mask
;
927 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
931 int cpdma_control_set(struct cpdma_ctlr
*ctlr
, int control
, int value
)
934 struct cpdma_control_info
*info
= &controls
[control
];
938 spin_lock_irqsave(&ctlr
->lock
, flags
);
941 if (!ctlr
->params
.has_ext_regs
)
945 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
)
949 if (control
< 0 || control
>= ARRAY_SIZE(controls
))
953 if ((info
->access
& ACCESS_WO
) != ACCESS_WO
)
956 val
= dma_reg_read(ctlr
, info
->reg
);
957 val
&= ~(info
->mask
<< info
->shift
);
958 val
|= (value
& info
->mask
) << info
->shift
;
959 dma_reg_write(ctlr
, info
->reg
, val
);
963 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
966 EXPORT_SYMBOL_GPL(cpdma_control_set
);
968 MODULE_LICENSE("GPL");