2 * Texas Instruments CPDMA Driver
4 * Copyright (C) 2010 Texas Instruments
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 #include <linux/kernel.h>
16 #include <linux/spinlock.h>
17 #include <linux/device.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/dma-mapping.h>
23 #include <linux/delay.h>
24 #include <linux/genalloc.h>
25 #include "davinci_cpdma.h"
28 #define CPDMA_TXIDVER 0x00
29 #define CPDMA_TXCONTROL 0x04
30 #define CPDMA_TXTEARDOWN 0x08
31 #define CPDMA_RXIDVER 0x10
32 #define CPDMA_RXCONTROL 0x14
33 #define CPDMA_SOFTRESET 0x1c
34 #define CPDMA_RXTEARDOWN 0x18
35 #define CPDMA_TX_PRI0_RATE 0x30
36 #define CPDMA_TXINTSTATRAW 0x80
37 #define CPDMA_TXINTSTATMASKED 0x84
38 #define CPDMA_TXINTMASKSET 0x88
39 #define CPDMA_TXINTMASKCLEAR 0x8c
40 #define CPDMA_MACINVECTOR 0x90
41 #define CPDMA_MACEOIVECTOR 0x94
42 #define CPDMA_RXINTSTATRAW 0xa0
43 #define CPDMA_RXINTSTATMASKED 0xa4
44 #define CPDMA_RXINTMASKSET 0xa8
45 #define CPDMA_RXINTMASKCLEAR 0xac
46 #define CPDMA_DMAINTSTATRAW 0xb0
47 #define CPDMA_DMAINTSTATMASKED 0xb4
48 #define CPDMA_DMAINTMASKSET 0xb8
49 #define CPDMA_DMAINTMASKCLEAR 0xbc
50 #define CPDMA_DMAINT_HOSTERR BIT(1)
52 /* the following exist only if has_ext_regs is set */
53 #define CPDMA_DMACONTROL 0x20
54 #define CPDMA_DMASTATUS 0x24
55 #define CPDMA_RXBUFFOFS 0x28
56 #define CPDMA_EM_CONTROL 0x2c
58 /* Descriptor mode bits */
59 #define CPDMA_DESC_SOP BIT(31)
60 #define CPDMA_DESC_EOP BIT(30)
61 #define CPDMA_DESC_OWNER BIT(29)
62 #define CPDMA_DESC_EOQ BIT(28)
63 #define CPDMA_DESC_TD_COMPLETE BIT(27)
64 #define CPDMA_DESC_PASS_CRC BIT(26)
65 #define CPDMA_DESC_TO_PORT_EN BIT(20)
66 #define CPDMA_TO_PORT_SHIFT 16
67 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
68 #define CPDMA_DESC_CRC_LEN 4
70 #define CPDMA_TEARDOWN_VALUE 0xfffffffc
72 #define CPDMA_MAX_RLIM_CNT 16384
86 struct cpdma_desc_pool
{
89 void __iomem
*iomap
; /* ioremap map */
90 void *cpumap
; /* dma_alloc map */
91 int desc_size
, mem_size
;
94 struct gen_pool
*gen_pool
;
100 CPDMA_STATE_TEARDOWN
,
104 enum cpdma_state state
;
105 struct cpdma_params params
;
107 struct cpdma_desc_pool
*pool
;
109 struct cpdma_chan
*channels
[2 * CPDMA_MAX_CHANNELS
];
111 int num_rx_desc
; /* RX descriptors number */
112 int num_tx_desc
; /* TX descriptors number */
116 struct cpdma_desc __iomem
*head
, *tail
;
117 void __iomem
*hdp
, *cp
, *rxfree
;
118 enum cpdma_state state
;
119 struct cpdma_ctlr
*ctlr
;
125 cpdma_handler_fn handler
;
126 enum dma_data_direction dir
;
127 struct cpdma_chan_stats stats
;
128 /* offsets into dmaregs */
129 int int_set
, int_clear
, td
;
135 struct cpdma_control_info
{
139 #define ACCESS_RO BIT(0)
140 #define ACCESS_WO BIT(1)
141 #define ACCESS_RW (ACCESS_RO | ACCESS_WO)
144 static struct cpdma_control_info controls
[] = {
145 [CPDMA_TX_RLIM
] = {CPDMA_DMACONTROL
, 8, 0xffff, ACCESS_RW
},
146 [CPDMA_CMD_IDLE
] = {CPDMA_DMACONTROL
, 3, 1, ACCESS_WO
},
147 [CPDMA_COPY_ERROR_FRAMES
] = {CPDMA_DMACONTROL
, 4, 1, ACCESS_RW
},
148 [CPDMA_RX_OFF_LEN_UPDATE
] = {CPDMA_DMACONTROL
, 2, 1, ACCESS_RW
},
149 [CPDMA_RX_OWNERSHIP_FLIP
] = {CPDMA_DMACONTROL
, 1, 1, ACCESS_RW
},
150 [CPDMA_TX_PRIO_FIXED
] = {CPDMA_DMACONTROL
, 0, 1, ACCESS_RW
},
151 [CPDMA_STAT_IDLE
] = {CPDMA_DMASTATUS
, 31, 1, ACCESS_RO
},
152 [CPDMA_STAT_TX_ERR_CODE
] = {CPDMA_DMASTATUS
, 20, 0xf, ACCESS_RW
},
153 [CPDMA_STAT_TX_ERR_CHAN
] = {CPDMA_DMASTATUS
, 16, 0x7, ACCESS_RW
},
154 [CPDMA_STAT_RX_ERR_CODE
] = {CPDMA_DMASTATUS
, 12, 0xf, ACCESS_RW
},
155 [CPDMA_STAT_RX_ERR_CHAN
] = {CPDMA_DMASTATUS
, 8, 0x7, ACCESS_RW
},
156 [CPDMA_RX_BUFFER_OFFSET
] = {CPDMA_RXBUFFOFS
, 0, 0xffff, ACCESS_RW
},
159 #define tx_chan_num(chan) (chan)
160 #define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
161 #define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
162 #define is_tx_chan(chan) (!is_rx_chan(chan))
163 #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
164 #define chan_linear(chan) __chan_linear((chan)->chan_num)
166 /* The following make access to common cpdma_ctlr params more readable */
167 #define dmaregs params.dmaregs
168 #define num_chan params.num_chan
170 /* various accessors */
171 #define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs))
172 #define chan_read(chan, fld) readl((chan)->fld)
173 #define desc_read(desc, fld) readl(&(desc)->fld)
174 #define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs))
175 #define chan_write(chan, fld, v) writel(v, (chan)->fld)
176 #define desc_write(desc, fld, v) writel((u32)(v), &(desc)->fld)
178 #define cpdma_desc_to_port(chan, mode, directed) \
180 if (!is_rx_chan(chan) && ((directed == 1) || \
182 mode |= (CPDMA_DESC_TO_PORT_EN | \
183 (directed << CPDMA_TO_PORT_SHIFT)); \
186 static void cpdma_desc_pool_destroy(struct cpdma_ctlr
*ctlr
)
188 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
193 WARN(gen_pool_size(pool
->gen_pool
) != gen_pool_avail(pool
->gen_pool
),
194 "cpdma_desc_pool size %zd != avail %zd",
195 gen_pool_size(pool
->gen_pool
),
196 gen_pool_avail(pool
->gen_pool
));
198 dma_free_coherent(ctlr
->dev
, pool
->mem_size
, pool
->cpumap
,
203 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
204 * emac) have dedicated on-chip memory for these descriptors. Some other
205 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
206 * abstract out these details
208 static int cpdma_desc_pool_create(struct cpdma_ctlr
*ctlr
)
210 struct cpdma_params
*cpdma_params
= &ctlr
->params
;
211 struct cpdma_desc_pool
*pool
;
214 pool
= devm_kzalloc(ctlr
->dev
, sizeof(*pool
), GFP_KERNEL
);
216 goto gen_pool_create_fail
;
219 pool
->mem_size
= cpdma_params
->desc_mem_size
;
220 pool
->desc_size
= ALIGN(sizeof(struct cpdma_desc
),
221 cpdma_params
->desc_align
);
222 pool
->num_desc
= pool
->mem_size
/ pool
->desc_size
;
224 if (cpdma_params
->descs_pool_size
) {
225 /* recalculate memory size required cpdma descriptor pool
226 * basing on number of descriptors specified by user and
227 * if memory size > CPPI internal RAM size (desc_mem_size)
228 * then switch to use DDR
230 pool
->num_desc
= cpdma_params
->descs_pool_size
;
231 pool
->mem_size
= pool
->desc_size
* pool
->num_desc
;
232 if (pool
->mem_size
> cpdma_params
->desc_mem_size
)
233 cpdma_params
->desc_mem_phys
= 0;
236 pool
->gen_pool
= devm_gen_pool_create(ctlr
->dev
, ilog2(pool
->desc_size
),
238 if (IS_ERR(pool
->gen_pool
)) {
239 ret
= PTR_ERR(pool
->gen_pool
);
240 dev_err(ctlr
->dev
, "pool create failed %d\n", ret
);
241 goto gen_pool_create_fail
;
244 if (cpdma_params
->desc_mem_phys
) {
245 pool
->phys
= cpdma_params
->desc_mem_phys
;
246 pool
->iomap
= devm_ioremap(ctlr
->dev
, pool
->phys
,
248 pool
->hw_addr
= cpdma_params
->desc_hw_addr
;
250 pool
->cpumap
= dma_alloc_coherent(ctlr
->dev
, pool
->mem_size
,
251 &pool
->hw_addr
, GFP_KERNEL
);
252 pool
->iomap
= (void __iomem __force
*)pool
->cpumap
;
253 pool
->phys
= pool
->hw_addr
; /* assumes no IOMMU, don't use this value */
257 goto gen_pool_create_fail
;
259 ret
= gen_pool_add_virt(pool
->gen_pool
, (unsigned long)pool
->iomap
,
260 pool
->phys
, pool
->mem_size
, -1);
262 dev_err(ctlr
->dev
, "pool add failed %d\n", ret
);
263 goto gen_pool_add_virt_fail
;
268 gen_pool_add_virt_fail
:
269 cpdma_desc_pool_destroy(ctlr
);
270 gen_pool_create_fail
:
275 static inline dma_addr_t
desc_phys(struct cpdma_desc_pool
*pool
,
276 struct cpdma_desc __iomem
*desc
)
280 return pool
->hw_addr
+ (__force
long)desc
- (__force
long)pool
->iomap
;
283 static inline struct cpdma_desc __iomem
*
284 desc_from_phys(struct cpdma_desc_pool
*pool
, dma_addr_t dma
)
286 return dma
? pool
->iomap
+ dma
- pool
->hw_addr
: NULL
;
289 static struct cpdma_desc __iomem
*
290 cpdma_desc_alloc(struct cpdma_desc_pool
*pool
)
292 return (struct cpdma_desc __iomem
*)
293 gen_pool_alloc(pool
->gen_pool
, pool
->desc_size
);
296 static void cpdma_desc_free(struct cpdma_desc_pool
*pool
,
297 struct cpdma_desc __iomem
*desc
, int num_desc
)
299 gen_pool_free(pool
->gen_pool
, (unsigned long)desc
, pool
->desc_size
);
302 static int _cpdma_control_set(struct cpdma_ctlr
*ctlr
, int control
, int value
)
304 struct cpdma_control_info
*info
= &controls
[control
];
307 if (!ctlr
->params
.has_ext_regs
)
310 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
)
313 if (control
< 0 || control
>= ARRAY_SIZE(controls
))
316 if ((info
->access
& ACCESS_WO
) != ACCESS_WO
)
319 val
= dma_reg_read(ctlr
, info
->reg
);
320 val
&= ~(info
->mask
<< info
->shift
);
321 val
|= (value
& info
->mask
) << info
->shift
;
322 dma_reg_write(ctlr
, info
->reg
, val
);
327 static int _cpdma_control_get(struct cpdma_ctlr
*ctlr
, int control
)
329 struct cpdma_control_info
*info
= &controls
[control
];
332 if (!ctlr
->params
.has_ext_regs
)
335 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
)
338 if (control
< 0 || control
>= ARRAY_SIZE(controls
))
341 if ((info
->access
& ACCESS_RO
) != ACCESS_RO
)
344 ret
= (dma_reg_read(ctlr
, info
->reg
) >> info
->shift
) & info
->mask
;
348 /* cpdma_chan_set_chan_shaper - set shaper for a channel
349 * Has to be called under ctlr lock
351 static int cpdma_chan_set_chan_shaper(struct cpdma_chan
*chan
)
353 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
361 rate_reg
= CPDMA_TX_PRI0_RATE
+ 4 * chan
->chan_num
;
362 dma_reg_write(ctlr
, rate_reg
, chan
->rate_factor
);
364 rmask
= _cpdma_control_get(ctlr
, CPDMA_TX_RLIM
);
367 ret
= _cpdma_control_set(ctlr
, CPDMA_TX_RLIM
, rmask
);
371 static int cpdma_chan_on(struct cpdma_chan
*chan
)
373 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
374 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
377 spin_lock_irqsave(&chan
->lock
, flags
);
378 if (chan
->state
!= CPDMA_STATE_IDLE
) {
379 spin_unlock_irqrestore(&chan
->lock
, flags
);
382 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
383 spin_unlock_irqrestore(&chan
->lock
, flags
);
386 dma_reg_write(ctlr
, chan
->int_set
, chan
->mask
);
387 chan
->state
= CPDMA_STATE_ACTIVE
;
389 chan_write(chan
, hdp
, desc_phys(pool
, chan
->head
));
391 chan_write(chan
, rxfree
, chan
->count
);
394 spin_unlock_irqrestore(&chan
->lock
, flags
);
398 /* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
399 * rmask - mask of rate limited channels
400 * Returns min rate in Kb/s
402 static int cpdma_chan_fit_rate(struct cpdma_chan
*ch
, u32 rate
,
403 u32
*rmask
, int *prio_mode
)
405 struct cpdma_ctlr
*ctlr
= ch
->ctlr
;
406 struct cpdma_chan
*chan
;
407 u32 old_rate
= ch
->rate
;
412 for (i
= tx_chan_num(0); i
< tx_chan_num(CPDMA_MAX_CHANNELS
); i
++) {
413 chan
= ctlr
->channels
[i
];
422 new_rmask
|= chan
->mask
;
436 dev_err(ctlr
->dev
, "Upper cpdma ch%d is not rate limited\n",
441 static u32
cpdma_chan_set_factors(struct cpdma_ctlr
*ctlr
,
442 struct cpdma_chan
*ch
)
444 u32 delta
= UINT_MAX
, prev_delta
= UINT_MAX
, best_delta
= UINT_MAX
;
445 u32 best_send_cnt
= 0, best_idle_cnt
= 0;
446 u32 new_rate
, best_rate
= 0, rate_reg
;
447 u64 send_cnt
, idle_cnt
;
448 u32 min_send_cnt
, freq
;
449 u64 divident
, divisor
;
456 freq
= ctlr
->params
.bus_freq_mhz
* 1000 * 32;
458 dev_err(ctlr
->dev
, "The bus frequency is not set\n");
462 min_send_cnt
= freq
- ch
->rate
;
463 send_cnt
= DIV_ROUND_UP(min_send_cnt
, ch
->rate
);
464 while (send_cnt
<= CPDMA_MAX_RLIM_CNT
) {
465 divident
= ch
->rate
* send_cnt
;
466 divisor
= min_send_cnt
;
467 idle_cnt
= DIV_ROUND_CLOSEST_ULL(divident
, divisor
);
469 divident
= freq
* idle_cnt
;
470 divisor
= idle_cnt
+ send_cnt
;
471 new_rate
= DIV_ROUND_CLOSEST_ULL(divident
, divisor
);
473 delta
= new_rate
>= ch
->rate
? new_rate
- ch
->rate
: delta
;
474 if (delta
< best_delta
) {
476 best_send_cnt
= send_cnt
;
477 best_idle_cnt
= idle_cnt
;
478 best_rate
= new_rate
;
484 if (prev_delta
>= delta
) {
491 divident
= freq
* idle_cnt
;
492 send_cnt
= DIV_ROUND_CLOSEST_ULL(divident
, ch
->rate
);
493 send_cnt
-= idle_cnt
;
494 prev_delta
= UINT_MAX
;
497 ch
->rate
= best_rate
;
498 ch
->rate_factor
= best_send_cnt
| (best_idle_cnt
<< 16);
501 rate_reg
= CPDMA_TX_PRI0_RATE
+ 4 * ch
->chan_num
;
502 dma_reg_write(ctlr
, rate_reg
, ch
->rate_factor
);
506 struct cpdma_ctlr
*cpdma_ctlr_create(struct cpdma_params
*params
)
508 struct cpdma_ctlr
*ctlr
;
510 ctlr
= devm_kzalloc(params
->dev
, sizeof(*ctlr
), GFP_KERNEL
);
514 ctlr
->state
= CPDMA_STATE_IDLE
;
515 ctlr
->params
= *params
;
516 ctlr
->dev
= params
->dev
;
518 spin_lock_init(&ctlr
->lock
);
520 if (cpdma_desc_pool_create(ctlr
))
522 /* split pool equally between RX/TX by default */
523 ctlr
->num_tx_desc
= ctlr
->pool
->num_desc
/ 2;
524 ctlr
->num_rx_desc
= ctlr
->pool
->num_desc
- ctlr
->num_tx_desc
;
526 if (WARN_ON(ctlr
->num_chan
> CPDMA_MAX_CHANNELS
))
527 ctlr
->num_chan
= CPDMA_MAX_CHANNELS
;
530 EXPORT_SYMBOL_GPL(cpdma_ctlr_create
);
532 int cpdma_ctlr_start(struct cpdma_ctlr
*ctlr
)
534 struct cpdma_chan
*chan
;
538 spin_lock_irqsave(&ctlr
->lock
, flags
);
539 if (ctlr
->state
!= CPDMA_STATE_IDLE
) {
540 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
544 if (ctlr
->params
.has_soft_reset
) {
545 unsigned timeout
= 10 * 100;
547 dma_reg_write(ctlr
, CPDMA_SOFTRESET
, 1);
549 if (dma_reg_read(ctlr
, CPDMA_SOFTRESET
) == 0)
557 for (i
= 0; i
< ctlr
->num_chan
; i
++) {
558 writel(0, ctlr
->params
.txhdp
+ 4 * i
);
559 writel(0, ctlr
->params
.rxhdp
+ 4 * i
);
560 writel(0, ctlr
->params
.txcp
+ 4 * i
);
561 writel(0, ctlr
->params
.rxcp
+ 4 * i
);
564 dma_reg_write(ctlr
, CPDMA_RXINTMASKCLEAR
, 0xffffffff);
565 dma_reg_write(ctlr
, CPDMA_TXINTMASKCLEAR
, 0xffffffff);
567 dma_reg_write(ctlr
, CPDMA_TXCONTROL
, 1);
568 dma_reg_write(ctlr
, CPDMA_RXCONTROL
, 1);
570 ctlr
->state
= CPDMA_STATE_ACTIVE
;
573 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
574 chan
= ctlr
->channels
[i
];
576 cpdma_chan_set_chan_shaper(chan
);
579 /* off prio mode if all tx channels are rate limited */
580 if (is_tx_chan(chan
) && !chan
->rate
)
585 _cpdma_control_set(ctlr
, CPDMA_TX_PRIO_FIXED
, prio_mode
);
586 _cpdma_control_set(ctlr
, CPDMA_RX_BUFFER_OFFSET
, 0);
588 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
591 EXPORT_SYMBOL_GPL(cpdma_ctlr_start
);
593 int cpdma_ctlr_stop(struct cpdma_ctlr
*ctlr
)
598 spin_lock_irqsave(&ctlr
->lock
, flags
);
599 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
600 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
604 ctlr
->state
= CPDMA_STATE_TEARDOWN
;
605 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
607 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
608 if (ctlr
->channels
[i
])
609 cpdma_chan_stop(ctlr
->channels
[i
]);
612 spin_lock_irqsave(&ctlr
->lock
, flags
);
613 dma_reg_write(ctlr
, CPDMA_RXINTMASKCLEAR
, 0xffffffff);
614 dma_reg_write(ctlr
, CPDMA_TXINTMASKCLEAR
, 0xffffffff);
616 dma_reg_write(ctlr
, CPDMA_TXCONTROL
, 0);
617 dma_reg_write(ctlr
, CPDMA_RXCONTROL
, 0);
619 ctlr
->state
= CPDMA_STATE_IDLE
;
621 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
624 EXPORT_SYMBOL_GPL(cpdma_ctlr_stop
);
626 int cpdma_ctlr_destroy(struct cpdma_ctlr
*ctlr
)
633 if (ctlr
->state
!= CPDMA_STATE_IDLE
)
634 cpdma_ctlr_stop(ctlr
);
636 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++)
637 cpdma_chan_destroy(ctlr
->channels
[i
]);
639 cpdma_desc_pool_destroy(ctlr
);
642 EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy
);
644 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr
*ctlr
, bool enable
)
649 spin_lock_irqsave(&ctlr
->lock
, flags
);
650 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
651 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
655 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
656 if (ctlr
->channels
[i
])
657 cpdma_chan_int_ctrl(ctlr
->channels
[i
], enable
);
660 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
663 EXPORT_SYMBOL_GPL(cpdma_ctlr_int_ctrl
);
665 void cpdma_ctlr_eoi(struct cpdma_ctlr
*ctlr
, u32 value
)
667 dma_reg_write(ctlr
, CPDMA_MACEOIVECTOR
, value
);
669 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi
);
671 u32
cpdma_ctrl_rxchs_state(struct cpdma_ctlr
*ctlr
)
673 return dma_reg_read(ctlr
, CPDMA_RXINTSTATMASKED
);
675 EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state
);
677 u32
cpdma_ctrl_txchs_state(struct cpdma_ctlr
*ctlr
)
679 return dma_reg_read(ctlr
, CPDMA_TXINTSTATMASKED
);
681 EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state
);
683 static void cpdma_chan_set_descs(struct cpdma_ctlr
*ctlr
,
684 int rx
, int desc_num
,
687 struct cpdma_chan
*chan
, *most_chan
= NULL
;
688 int desc_cnt
= desc_num
;
696 min
= rx_chan_num(0);
697 max
= rx_chan_num(CPDMA_MAX_CHANNELS
);
699 min
= tx_chan_num(0);
700 max
= tx_chan_num(CPDMA_MAX_CHANNELS
);
703 for (i
= min
; i
< max
; i
++) {
704 chan
= ctlr
->channels
[i
];
709 chan
->desc_num
= (chan
->weight
* desc_num
) / 100;
711 chan
->desc_num
= per_ch_desc
;
713 desc_cnt
-= chan
->desc_num
;
715 if (most_dnum
< chan
->desc_num
) {
716 most_dnum
= chan
->desc_num
;
722 most_chan
->desc_num
+= desc_cnt
;
726 * cpdma_chan_split_pool - Splits ctrl pool between all channels.
727 * Has to be called under ctlr lock
729 int cpdma_chan_split_pool(struct cpdma_ctlr
*ctlr
)
731 int tx_per_ch_desc
= 0, rx_per_ch_desc
= 0;
732 int free_rx_num
= 0, free_tx_num
= 0;
733 int rx_weight
= 0, tx_weight
= 0;
734 int tx_desc_num
, rx_desc_num
;
735 struct cpdma_chan
*chan
;
741 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
742 chan
= ctlr
->channels
[i
];
746 if (is_rx_chan(chan
)) {
749 rx_weight
+= chan
->weight
;
753 tx_weight
+= chan
->weight
;
757 if (rx_weight
> 100 || tx_weight
> 100)
760 tx_desc_num
= ctlr
->num_tx_desc
;
761 rx_desc_num
= ctlr
->num_rx_desc
;
764 tx_per_ch_desc
= tx_desc_num
- (tx_weight
* tx_desc_num
) / 100;
765 tx_per_ch_desc
/= free_tx_num
;
768 rx_per_ch_desc
= rx_desc_num
- (rx_weight
* rx_desc_num
) / 100;
769 rx_per_ch_desc
/= free_rx_num
;
772 cpdma_chan_set_descs(ctlr
, 0, tx_desc_num
, tx_per_ch_desc
);
773 cpdma_chan_set_descs(ctlr
, 1, rx_desc_num
, rx_per_ch_desc
);
777 EXPORT_SYMBOL_GPL(cpdma_chan_split_pool
);
780 /* cpdma_chan_set_weight - set weight of a channel in percentage.
781 * Tx and Rx channels have separate weights. That is 100% for RX
782 * and 100% for Tx. The weight is used to split cpdma resources
783 * in correct proportion required by the channels, including number
784 * of descriptors. The channel rate is not enough to know the
785 * weight of a channel as the maximum rate of an interface is needed.
786 * If weight = 0, then channel uses rest of descriptors leaved by
789 int cpdma_chan_set_weight(struct cpdma_chan
*ch
, int weight
)
791 struct cpdma_ctlr
*ctlr
= ch
->ctlr
;
792 unsigned long flags
, ch_flags
;
795 spin_lock_irqsave(&ctlr
->lock
, flags
);
796 spin_lock_irqsave(&ch
->lock
, ch_flags
);
797 if (ch
->weight
== weight
) {
798 spin_unlock_irqrestore(&ch
->lock
, ch_flags
);
799 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
803 spin_unlock_irqrestore(&ch
->lock
, ch_flags
);
805 /* re-split pool using new channel weight */
806 ret
= cpdma_chan_split_pool(ctlr
);
807 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
810 EXPORT_SYMBOL_GPL(cpdma_chan_set_weight
);
812 /* cpdma_chan_get_min_rate - get minimum allowed rate for channel
813 * Should be called before cpdma_chan_set_rate.
814 * Returns min rate in Kb/s
816 u32
cpdma_chan_get_min_rate(struct cpdma_ctlr
*ctlr
)
818 unsigned int divident
, divisor
;
820 divident
= ctlr
->params
.bus_freq_mhz
* 32 * 1000;
821 divisor
= 1 + CPDMA_MAX_RLIM_CNT
;
823 return DIV_ROUND_UP(divident
, divisor
);
825 EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate
);
827 /* cpdma_chan_set_rate - limits bandwidth for transmit channel.
828 * The bandwidth * limited channels have to be in order beginning from lowest.
829 * ch - transmit channel the bandwidth is configured for
830 * rate - bandwidth in Kb/s, if 0 - then off shaper
832 int cpdma_chan_set_rate(struct cpdma_chan
*ch
, u32 rate
)
834 unsigned long flags
, ch_flags
;
835 struct cpdma_ctlr
*ctlr
;
839 if (!ch
|| !is_tx_chan(ch
))
842 if (ch
->rate
== rate
)
846 spin_lock_irqsave(&ctlr
->lock
, flags
);
847 spin_lock_irqsave(&ch
->lock
, ch_flags
);
849 ret
= cpdma_chan_fit_rate(ch
, rate
, &rmask
, &prio_mode
);
853 ret
= cpdma_chan_set_factors(ctlr
, ch
);
857 spin_unlock_irqrestore(&ch
->lock
, ch_flags
);
860 _cpdma_control_set(ctlr
, CPDMA_TX_RLIM
, rmask
);
861 _cpdma_control_set(ctlr
, CPDMA_TX_PRIO_FIXED
, prio_mode
);
862 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
866 spin_unlock_irqrestore(&ch
->lock
, ch_flags
);
867 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
870 EXPORT_SYMBOL_GPL(cpdma_chan_set_rate
);
872 u32
cpdma_chan_get_rate(struct cpdma_chan
*ch
)
877 spin_lock_irqsave(&ch
->lock
, flags
);
879 spin_unlock_irqrestore(&ch
->lock
, flags
);
883 EXPORT_SYMBOL_GPL(cpdma_chan_get_rate
);
885 struct cpdma_chan
*cpdma_chan_create(struct cpdma_ctlr
*ctlr
, int chan_num
,
886 cpdma_handler_fn handler
, int rx_type
)
888 int offset
= chan_num
* 4;
889 struct cpdma_chan
*chan
;
892 chan_num
= rx_type
? rx_chan_num(chan_num
) : tx_chan_num(chan_num
);
894 if (__chan_linear(chan_num
) >= ctlr
->num_chan
)
895 return ERR_PTR(-EINVAL
);
897 chan
= devm_kzalloc(ctlr
->dev
, sizeof(*chan
), GFP_KERNEL
);
899 return ERR_PTR(-ENOMEM
);
901 spin_lock_irqsave(&ctlr
->lock
, flags
);
902 if (ctlr
->channels
[chan_num
]) {
903 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
904 devm_kfree(ctlr
->dev
, chan
);
905 return ERR_PTR(-EBUSY
);
909 chan
->state
= CPDMA_STATE_IDLE
;
910 chan
->chan_num
= chan_num
;
911 chan
->handler
= handler
;
915 if (is_rx_chan(chan
)) {
916 chan
->hdp
= ctlr
->params
.rxhdp
+ offset
;
917 chan
->cp
= ctlr
->params
.rxcp
+ offset
;
918 chan
->rxfree
= ctlr
->params
.rxfree
+ offset
;
919 chan
->int_set
= CPDMA_RXINTMASKSET
;
920 chan
->int_clear
= CPDMA_RXINTMASKCLEAR
;
921 chan
->td
= CPDMA_RXTEARDOWN
;
922 chan
->dir
= DMA_FROM_DEVICE
;
924 chan
->hdp
= ctlr
->params
.txhdp
+ offset
;
925 chan
->cp
= ctlr
->params
.txcp
+ offset
;
926 chan
->int_set
= CPDMA_TXINTMASKSET
;
927 chan
->int_clear
= CPDMA_TXINTMASKCLEAR
;
928 chan
->td
= CPDMA_TXTEARDOWN
;
929 chan
->dir
= DMA_TO_DEVICE
;
931 chan
->mask
= BIT(chan_linear(chan
));
933 spin_lock_init(&chan
->lock
);
935 ctlr
->channels
[chan_num
] = chan
;
938 cpdma_chan_split_pool(ctlr
);
940 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
943 EXPORT_SYMBOL_GPL(cpdma_chan_create
);
945 int cpdma_chan_get_rx_buf_num(struct cpdma_chan
*chan
)
950 spin_lock_irqsave(&chan
->lock
, flags
);
951 desc_num
= chan
->desc_num
;
952 spin_unlock_irqrestore(&chan
->lock
, flags
);
956 EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num
);
958 int cpdma_chan_destroy(struct cpdma_chan
*chan
)
960 struct cpdma_ctlr
*ctlr
;
967 spin_lock_irqsave(&ctlr
->lock
, flags
);
968 if (chan
->state
!= CPDMA_STATE_IDLE
)
969 cpdma_chan_stop(chan
);
970 ctlr
->channels
[chan
->chan_num
] = NULL
;
972 devm_kfree(ctlr
->dev
, chan
);
973 cpdma_chan_split_pool(ctlr
);
975 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
978 EXPORT_SYMBOL_GPL(cpdma_chan_destroy
);
980 int cpdma_chan_get_stats(struct cpdma_chan
*chan
,
981 struct cpdma_chan_stats
*stats
)
986 spin_lock_irqsave(&chan
->lock
, flags
);
987 memcpy(stats
, &chan
->stats
, sizeof(*stats
));
988 spin_unlock_irqrestore(&chan
->lock
, flags
);
991 EXPORT_SYMBOL_GPL(cpdma_chan_get_stats
);
993 static void __cpdma_chan_submit(struct cpdma_chan
*chan
,
994 struct cpdma_desc __iomem
*desc
)
996 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
997 struct cpdma_desc __iomem
*prev
= chan
->tail
;
998 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
1002 desc_dma
= desc_phys(pool
, desc
);
1004 /* simple case - idle channel */
1006 chan
->stats
.head_enqueue
++;
1009 if (chan
->state
== CPDMA_STATE_ACTIVE
)
1010 chan_write(chan
, hdp
, desc_dma
);
1014 /* first chain the descriptor at the tail of the list */
1015 desc_write(prev
, hw_next
, desc_dma
);
1017 chan
->stats
.tail_enqueue
++;
1019 /* next check if EOQ has been triggered already */
1020 mode
= desc_read(prev
, hw_mode
);
1021 if (((mode
& (CPDMA_DESC_EOQ
| CPDMA_DESC_OWNER
)) == CPDMA_DESC_EOQ
) &&
1022 (chan
->state
== CPDMA_STATE_ACTIVE
)) {
1023 desc_write(prev
, hw_mode
, mode
& ~CPDMA_DESC_EOQ
);
1024 chan_write(chan
, hdp
, desc_dma
);
1025 chan
->stats
.misqueued
++;
1029 int cpdma_chan_submit(struct cpdma_chan
*chan
, void *token
, void *data
,
1030 int len
, int directed
)
1032 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
1033 struct cpdma_desc __iomem
*desc
;
1035 unsigned long flags
;
1039 spin_lock_irqsave(&chan
->lock
, flags
);
1041 if (chan
->state
== CPDMA_STATE_TEARDOWN
) {
1046 if (chan
->count
>= chan
->desc_num
) {
1047 chan
->stats
.desc_alloc_fail
++;
1052 desc
= cpdma_desc_alloc(ctlr
->pool
);
1054 chan
->stats
.desc_alloc_fail
++;
1059 if (len
< ctlr
->params
.min_packet_size
) {
1060 len
= ctlr
->params
.min_packet_size
;
1061 chan
->stats
.runt_transmit_buff
++;
1064 buffer
= dma_map_single(ctlr
->dev
, data
, len
, chan
->dir
);
1065 ret
= dma_mapping_error(ctlr
->dev
, buffer
);
1067 cpdma_desc_free(ctlr
->pool
, desc
, 1);
1072 mode
= CPDMA_DESC_OWNER
| CPDMA_DESC_SOP
| CPDMA_DESC_EOP
;
1073 cpdma_desc_to_port(chan
, mode
, directed
);
1075 /* Relaxed IO accessors can be used here as there is read barrier
1076 * at the end of write sequence.
1078 writel_relaxed(0, &desc
->hw_next
);
1079 writel_relaxed(buffer
, &desc
->hw_buffer
);
1080 writel_relaxed(len
, &desc
->hw_len
);
1081 writel_relaxed(mode
| len
, &desc
->hw_mode
);
1082 writel_relaxed((uintptr_t)token
, &desc
->sw_token
);
1083 writel_relaxed(buffer
, &desc
->sw_buffer
);
1084 writel_relaxed(len
, &desc
->sw_len
);
1085 desc_read(desc
, sw_len
);
1087 __cpdma_chan_submit(chan
, desc
);
1089 if (chan
->state
== CPDMA_STATE_ACTIVE
&& chan
->rxfree
)
1090 chan_write(chan
, rxfree
, 1);
1095 spin_unlock_irqrestore(&chan
->lock
, flags
);
1098 EXPORT_SYMBOL_GPL(cpdma_chan_submit
);
1100 bool cpdma_check_free_tx_desc(struct cpdma_chan
*chan
)
1102 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
1103 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
1105 unsigned long flags
;
1107 spin_lock_irqsave(&chan
->lock
, flags
);
1108 free_tx_desc
= (chan
->count
< chan
->desc_num
) &&
1109 gen_pool_avail(pool
->gen_pool
);
1110 spin_unlock_irqrestore(&chan
->lock
, flags
);
1111 return free_tx_desc
;
1113 EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc
);
1115 static void __cpdma_chan_free(struct cpdma_chan
*chan
,
1116 struct cpdma_desc __iomem
*desc
,
1117 int outlen
, int status
)
1119 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
1120 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
1121 dma_addr_t buff_dma
;
1125 token
= desc_read(desc
, sw_token
);
1126 buff_dma
= desc_read(desc
, sw_buffer
);
1127 origlen
= desc_read(desc
, sw_len
);
1129 dma_unmap_single(ctlr
->dev
, buff_dma
, origlen
, chan
->dir
);
1130 cpdma_desc_free(pool
, desc
, 1);
1131 (*chan
->handler
)((void *)token
, outlen
, status
);
1134 static int __cpdma_chan_process(struct cpdma_chan
*chan
)
1136 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
1137 struct cpdma_desc __iomem
*desc
;
1140 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
1141 dma_addr_t desc_dma
;
1142 unsigned long flags
;
1144 spin_lock_irqsave(&chan
->lock
, flags
);
1148 chan
->stats
.empty_dequeue
++;
1152 desc_dma
= desc_phys(pool
, desc
);
1154 status
= desc_read(desc
, hw_mode
);
1155 outlen
= status
& 0x7ff;
1156 if (status
& CPDMA_DESC_OWNER
) {
1157 chan
->stats
.busy_dequeue
++;
1162 if (status
& CPDMA_DESC_PASS_CRC
)
1163 outlen
-= CPDMA_DESC_CRC_LEN
;
1165 status
= status
& (CPDMA_DESC_EOQ
| CPDMA_DESC_TD_COMPLETE
|
1166 CPDMA_DESC_PORT_MASK
| CPDMA_RX_VLAN_ENCAP
);
1168 chan
->head
= desc_from_phys(pool
, desc_read(desc
, hw_next
));
1169 chan_write(chan
, cp
, desc_dma
);
1171 chan
->stats
.good_dequeue
++;
1173 if ((status
& CPDMA_DESC_EOQ
) && chan
->head
) {
1174 chan
->stats
.requeue
++;
1175 chan_write(chan
, hdp
, desc_phys(pool
, chan
->head
));
1178 spin_unlock_irqrestore(&chan
->lock
, flags
);
1179 if (unlikely(status
& CPDMA_DESC_TD_COMPLETE
))
1180 cb_status
= -ENOSYS
;
1184 __cpdma_chan_free(chan
, desc
, outlen
, cb_status
);
1188 spin_unlock_irqrestore(&chan
->lock
, flags
);
1192 int cpdma_chan_process(struct cpdma_chan
*chan
, int quota
)
1194 int used
= 0, ret
= 0;
1196 if (chan
->state
!= CPDMA_STATE_ACTIVE
)
1199 while (used
< quota
) {
1200 ret
= __cpdma_chan_process(chan
);
1207 EXPORT_SYMBOL_GPL(cpdma_chan_process
);
1209 int cpdma_chan_start(struct cpdma_chan
*chan
)
1211 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
1212 unsigned long flags
;
1215 spin_lock_irqsave(&ctlr
->lock
, flags
);
1216 ret
= cpdma_chan_set_chan_shaper(chan
);
1217 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
1221 ret
= cpdma_chan_on(chan
);
1227 EXPORT_SYMBOL_GPL(cpdma_chan_start
);
1229 int cpdma_chan_stop(struct cpdma_chan
*chan
)
1231 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
1232 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
1233 unsigned long flags
;
1237 spin_lock_irqsave(&chan
->lock
, flags
);
1238 if (chan
->state
== CPDMA_STATE_TEARDOWN
) {
1239 spin_unlock_irqrestore(&chan
->lock
, flags
);
1243 chan
->state
= CPDMA_STATE_TEARDOWN
;
1244 dma_reg_write(ctlr
, chan
->int_clear
, chan
->mask
);
1246 /* trigger teardown */
1247 dma_reg_write(ctlr
, chan
->td
, chan_linear(chan
));
1249 /* wait for teardown complete */
1250 timeout
= 100 * 100; /* 100 ms */
1252 u32 cp
= chan_read(chan
, cp
);
1253 if ((cp
& CPDMA_TEARDOWN_VALUE
) == CPDMA_TEARDOWN_VALUE
)
1259 chan_write(chan
, cp
, CPDMA_TEARDOWN_VALUE
);
1261 /* handle completed packets */
1262 spin_unlock_irqrestore(&chan
->lock
, flags
);
1264 ret
= __cpdma_chan_process(chan
);
1267 } while ((ret
& CPDMA_DESC_TD_COMPLETE
) == 0);
1268 spin_lock_irqsave(&chan
->lock
, flags
);
1270 /* remaining packets haven't been tx/rx'ed, clean them up */
1271 while (chan
->head
) {
1272 struct cpdma_desc __iomem
*desc
= chan
->head
;
1273 dma_addr_t next_dma
;
1275 next_dma
= desc_read(desc
, hw_next
);
1276 chan
->head
= desc_from_phys(pool
, next_dma
);
1278 chan
->stats
.teardown_dequeue
++;
1280 /* issue callback without locks held */
1281 spin_unlock_irqrestore(&chan
->lock
, flags
);
1282 __cpdma_chan_free(chan
, desc
, 0, -ENOSYS
);
1283 spin_lock_irqsave(&chan
->lock
, flags
);
1286 chan
->state
= CPDMA_STATE_IDLE
;
1287 spin_unlock_irqrestore(&chan
->lock
, flags
);
1290 EXPORT_SYMBOL_GPL(cpdma_chan_stop
);
1292 int cpdma_chan_int_ctrl(struct cpdma_chan
*chan
, bool enable
)
1294 unsigned long flags
;
1296 spin_lock_irqsave(&chan
->lock
, flags
);
1297 if (chan
->state
!= CPDMA_STATE_ACTIVE
) {
1298 spin_unlock_irqrestore(&chan
->lock
, flags
);
1302 dma_reg_write(chan
->ctlr
, enable
? chan
->int_set
: chan
->int_clear
,
1304 spin_unlock_irqrestore(&chan
->lock
, flags
);
1309 int cpdma_control_get(struct cpdma_ctlr
*ctlr
, int control
)
1311 unsigned long flags
;
1314 spin_lock_irqsave(&ctlr
->lock
, flags
);
1315 ret
= _cpdma_control_get(ctlr
, control
);
1316 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
1321 int cpdma_control_set(struct cpdma_ctlr
*ctlr
, int control
, int value
)
1323 unsigned long flags
;
1326 spin_lock_irqsave(&ctlr
->lock
, flags
);
1327 ret
= _cpdma_control_set(ctlr
, control
, value
);
1328 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
1332 EXPORT_SYMBOL_GPL(cpdma_control_set
);
1334 int cpdma_get_num_rx_descs(struct cpdma_ctlr
*ctlr
)
1336 return ctlr
->num_rx_desc
;
1338 EXPORT_SYMBOL_GPL(cpdma_get_num_rx_descs
);
1340 int cpdma_get_num_tx_descs(struct cpdma_ctlr
*ctlr
)
1342 return ctlr
->num_tx_desc
;
1344 EXPORT_SYMBOL_GPL(cpdma_get_num_tx_descs
);
1346 void cpdma_set_num_rx_descs(struct cpdma_ctlr
*ctlr
, int num_rx_desc
)
1348 ctlr
->num_rx_desc
= num_rx_desc
;
1349 ctlr
->num_tx_desc
= ctlr
->pool
->num_desc
- ctlr
->num_rx_desc
;
1351 EXPORT_SYMBOL_GPL(cpdma_set_num_rx_descs
);
1353 MODULE_LICENSE("GPL");