1 // SPDX-License-Identifier: GPL-2.0
3 * Texas Instruments CPDMA Driver
5 * Copyright (C) 2010 Texas Instruments
8 #include <linux/kernel.h>
9 #include <linux/spinlock.h>
10 #include <linux/device.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/err.h>
14 #include <linux/dma-mapping.h>
16 #include <linux/delay.h>
17 #include <linux/genalloc.h>
18 #include "davinci_cpdma.h"
21 #define CPDMA_TXIDVER 0x00
22 #define CPDMA_TXCONTROL 0x04
23 #define CPDMA_TXTEARDOWN 0x08
24 #define CPDMA_RXIDVER 0x10
25 #define CPDMA_RXCONTROL 0x14
26 #define CPDMA_SOFTRESET 0x1c
27 #define CPDMA_RXTEARDOWN 0x18
28 #define CPDMA_TX_PRI0_RATE 0x30
29 #define CPDMA_TXINTSTATRAW 0x80
30 #define CPDMA_TXINTSTATMASKED 0x84
31 #define CPDMA_TXINTMASKSET 0x88
32 #define CPDMA_TXINTMASKCLEAR 0x8c
33 #define CPDMA_MACINVECTOR 0x90
34 #define CPDMA_MACEOIVECTOR 0x94
35 #define CPDMA_RXINTSTATRAW 0xa0
36 #define CPDMA_RXINTSTATMASKED 0xa4
37 #define CPDMA_RXINTMASKSET 0xa8
38 #define CPDMA_RXINTMASKCLEAR 0xac
39 #define CPDMA_DMAINTSTATRAW 0xb0
40 #define CPDMA_DMAINTSTATMASKED 0xb4
41 #define CPDMA_DMAINTMASKSET 0xb8
42 #define CPDMA_DMAINTMASKCLEAR 0xbc
43 #define CPDMA_DMAINT_HOSTERR BIT(1)
45 /* the following exist only if has_ext_regs is set */
46 #define CPDMA_DMACONTROL 0x20
47 #define CPDMA_DMASTATUS 0x24
48 #define CPDMA_RXBUFFOFS 0x28
49 #define CPDMA_EM_CONTROL 0x2c
51 /* Descriptor mode bits */
52 #define CPDMA_DESC_SOP BIT(31)
53 #define CPDMA_DESC_EOP BIT(30)
54 #define CPDMA_DESC_OWNER BIT(29)
55 #define CPDMA_DESC_EOQ BIT(28)
56 #define CPDMA_DESC_TD_COMPLETE BIT(27)
57 #define CPDMA_DESC_PASS_CRC BIT(26)
58 #define CPDMA_DESC_TO_PORT_EN BIT(20)
59 #define CPDMA_TO_PORT_SHIFT 16
60 #define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
61 #define CPDMA_DESC_CRC_LEN 4
63 #define CPDMA_TEARDOWN_VALUE 0xfffffffc
65 #define CPDMA_MAX_RLIM_CNT 16384
79 struct cpdma_desc_pool
{
82 void __iomem
*iomap
; /* ioremap map */
83 void *cpumap
; /* dma_alloc map */
84 int desc_size
, mem_size
;
87 struct gen_pool
*gen_pool
;
97 enum cpdma_state state
;
98 struct cpdma_params params
;
100 struct cpdma_desc_pool
*pool
;
102 struct cpdma_chan
*channels
[2 * CPDMA_MAX_CHANNELS
];
104 int num_rx_desc
; /* RX descriptors number */
105 int num_tx_desc
; /* TX descriptors number */
109 struct cpdma_desc __iomem
*head
, *tail
;
110 void __iomem
*hdp
, *cp
, *rxfree
;
111 enum cpdma_state state
;
112 struct cpdma_ctlr
*ctlr
;
118 cpdma_handler_fn handler
;
119 enum dma_data_direction dir
;
120 struct cpdma_chan_stats stats
;
121 /* offsets into dmaregs */
122 int int_set
, int_clear
, td
;
128 struct cpdma_control_info
{
132 #define ACCESS_RO BIT(0)
133 #define ACCESS_WO BIT(1)
134 #define ACCESS_RW (ACCESS_RO | ACCESS_WO)
138 struct cpdma_chan
*chan
;
146 static struct cpdma_control_info controls
[] = {
147 [CPDMA_TX_RLIM
] = {CPDMA_DMACONTROL
, 8, 0xffff, ACCESS_RW
},
148 [CPDMA_CMD_IDLE
] = {CPDMA_DMACONTROL
, 3, 1, ACCESS_WO
},
149 [CPDMA_COPY_ERROR_FRAMES
] = {CPDMA_DMACONTROL
, 4, 1, ACCESS_RW
},
150 [CPDMA_RX_OFF_LEN_UPDATE
] = {CPDMA_DMACONTROL
, 2, 1, ACCESS_RW
},
151 [CPDMA_RX_OWNERSHIP_FLIP
] = {CPDMA_DMACONTROL
, 1, 1, ACCESS_RW
},
152 [CPDMA_TX_PRIO_FIXED
] = {CPDMA_DMACONTROL
, 0, 1, ACCESS_RW
},
153 [CPDMA_STAT_IDLE
] = {CPDMA_DMASTATUS
, 31, 1, ACCESS_RO
},
154 [CPDMA_STAT_TX_ERR_CODE
] = {CPDMA_DMASTATUS
, 20, 0xf, ACCESS_RW
},
155 [CPDMA_STAT_TX_ERR_CHAN
] = {CPDMA_DMASTATUS
, 16, 0x7, ACCESS_RW
},
156 [CPDMA_STAT_RX_ERR_CODE
] = {CPDMA_DMASTATUS
, 12, 0xf, ACCESS_RW
},
157 [CPDMA_STAT_RX_ERR_CHAN
] = {CPDMA_DMASTATUS
, 8, 0x7, ACCESS_RW
},
158 [CPDMA_RX_BUFFER_OFFSET
] = {CPDMA_RXBUFFOFS
, 0, 0xffff, ACCESS_RW
},
161 #define tx_chan_num(chan) (chan)
162 #define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
163 #define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
164 #define is_tx_chan(chan) (!is_rx_chan(chan))
165 #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
166 #define chan_linear(chan) __chan_linear((chan)->chan_num)
168 /* The following make access to common cpdma_ctlr params more readable */
169 #define dmaregs params.dmaregs
170 #define num_chan params.num_chan
172 /* various accessors */
173 #define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs))
174 #define chan_read(chan, fld) readl((chan)->fld)
175 #define desc_read(desc, fld) readl(&(desc)->fld)
176 #define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs))
177 #define chan_write(chan, fld, v) writel(v, (chan)->fld)
178 #define desc_write(desc, fld, v) writel((u32)(v), &(desc)->fld)
180 #define cpdma_desc_to_port(chan, mode, directed) \
182 if (!is_rx_chan(chan) && ((directed == 1) || \
184 mode |= (CPDMA_DESC_TO_PORT_EN | \
185 (directed << CPDMA_TO_PORT_SHIFT)); \
188 #define CPDMA_DMA_EXT_MAP BIT(16)
190 static void cpdma_desc_pool_destroy(struct cpdma_ctlr
*ctlr
)
192 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
197 WARN(gen_pool_size(pool
->gen_pool
) != gen_pool_avail(pool
->gen_pool
),
198 "cpdma_desc_pool size %zd != avail %zd",
199 gen_pool_size(pool
->gen_pool
),
200 gen_pool_avail(pool
->gen_pool
));
202 dma_free_coherent(ctlr
->dev
, pool
->mem_size
, pool
->cpumap
,
207 * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
208 * emac) have dedicated on-chip memory for these descriptors. Some other
209 * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
210 * abstract out these details
212 static int cpdma_desc_pool_create(struct cpdma_ctlr
*ctlr
)
214 struct cpdma_params
*cpdma_params
= &ctlr
->params
;
215 struct cpdma_desc_pool
*pool
;
218 pool
= devm_kzalloc(ctlr
->dev
, sizeof(*pool
), GFP_KERNEL
);
220 goto gen_pool_create_fail
;
223 pool
->mem_size
= cpdma_params
->desc_mem_size
;
224 pool
->desc_size
= ALIGN(sizeof(struct cpdma_desc
),
225 cpdma_params
->desc_align
);
226 pool
->num_desc
= pool
->mem_size
/ pool
->desc_size
;
228 if (cpdma_params
->descs_pool_size
) {
229 /* recalculate memory size required cpdma descriptor pool
230 * basing on number of descriptors specified by user and
231 * if memory size > CPPI internal RAM size (desc_mem_size)
232 * then switch to use DDR
234 pool
->num_desc
= cpdma_params
->descs_pool_size
;
235 pool
->mem_size
= pool
->desc_size
* pool
->num_desc
;
236 if (pool
->mem_size
> cpdma_params
->desc_mem_size
)
237 cpdma_params
->desc_mem_phys
= 0;
240 pool
->gen_pool
= devm_gen_pool_create(ctlr
->dev
, ilog2(pool
->desc_size
),
242 if (IS_ERR(pool
->gen_pool
)) {
243 ret
= PTR_ERR(pool
->gen_pool
);
244 dev_err(ctlr
->dev
, "pool create failed %d\n", ret
);
245 goto gen_pool_create_fail
;
248 if (cpdma_params
->desc_mem_phys
) {
249 pool
->phys
= cpdma_params
->desc_mem_phys
;
250 pool
->iomap
= devm_ioremap(ctlr
->dev
, pool
->phys
,
252 pool
->hw_addr
= cpdma_params
->desc_hw_addr
;
254 pool
->cpumap
= dma_alloc_coherent(ctlr
->dev
, pool
->mem_size
,
255 &pool
->hw_addr
, GFP_KERNEL
);
256 pool
->iomap
= (void __iomem __force
*)pool
->cpumap
;
257 pool
->phys
= pool
->hw_addr
; /* assumes no IOMMU, don't use this value */
261 goto gen_pool_create_fail
;
263 ret
= gen_pool_add_virt(pool
->gen_pool
, (unsigned long)pool
->iomap
,
264 pool
->phys
, pool
->mem_size
, -1);
266 dev_err(ctlr
->dev
, "pool add failed %d\n", ret
);
267 goto gen_pool_add_virt_fail
;
272 gen_pool_add_virt_fail
:
273 cpdma_desc_pool_destroy(ctlr
);
274 gen_pool_create_fail
:
279 static inline dma_addr_t
desc_phys(struct cpdma_desc_pool
*pool
,
280 struct cpdma_desc __iomem
*desc
)
284 return pool
->hw_addr
+ (__force
long)desc
- (__force
long)pool
->iomap
;
287 static inline struct cpdma_desc __iomem
*
288 desc_from_phys(struct cpdma_desc_pool
*pool
, dma_addr_t dma
)
290 return dma
? pool
->iomap
+ dma
- pool
->hw_addr
: NULL
;
293 static struct cpdma_desc __iomem
*
294 cpdma_desc_alloc(struct cpdma_desc_pool
*pool
)
296 return (struct cpdma_desc __iomem
*)
297 gen_pool_alloc(pool
->gen_pool
, pool
->desc_size
);
300 static void cpdma_desc_free(struct cpdma_desc_pool
*pool
,
301 struct cpdma_desc __iomem
*desc
, int num_desc
)
303 gen_pool_free(pool
->gen_pool
, (unsigned long)desc
, pool
->desc_size
);
306 static int _cpdma_control_set(struct cpdma_ctlr
*ctlr
, int control
, int value
)
308 struct cpdma_control_info
*info
= &controls
[control
];
311 if (!ctlr
->params
.has_ext_regs
)
314 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
)
317 if (control
< 0 || control
>= ARRAY_SIZE(controls
))
320 if ((info
->access
& ACCESS_WO
) != ACCESS_WO
)
323 val
= dma_reg_read(ctlr
, info
->reg
);
324 val
&= ~(info
->mask
<< info
->shift
);
325 val
|= (value
& info
->mask
) << info
->shift
;
326 dma_reg_write(ctlr
, info
->reg
, val
);
331 static int _cpdma_control_get(struct cpdma_ctlr
*ctlr
, int control
)
333 struct cpdma_control_info
*info
= &controls
[control
];
336 if (!ctlr
->params
.has_ext_regs
)
339 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
)
342 if (control
< 0 || control
>= ARRAY_SIZE(controls
))
345 if ((info
->access
& ACCESS_RO
) != ACCESS_RO
)
348 ret
= (dma_reg_read(ctlr
, info
->reg
) >> info
->shift
) & info
->mask
;
352 /* cpdma_chan_set_chan_shaper - set shaper for a channel
353 * Has to be called under ctlr lock
355 static int cpdma_chan_set_chan_shaper(struct cpdma_chan
*chan
)
357 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
365 rate_reg
= CPDMA_TX_PRI0_RATE
+ 4 * chan
->chan_num
;
366 dma_reg_write(ctlr
, rate_reg
, chan
->rate_factor
);
368 rmask
= _cpdma_control_get(ctlr
, CPDMA_TX_RLIM
);
371 ret
= _cpdma_control_set(ctlr
, CPDMA_TX_RLIM
, rmask
);
375 static int cpdma_chan_on(struct cpdma_chan
*chan
)
377 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
378 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
381 spin_lock_irqsave(&chan
->lock
, flags
);
382 if (chan
->state
!= CPDMA_STATE_IDLE
) {
383 spin_unlock_irqrestore(&chan
->lock
, flags
);
386 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
387 spin_unlock_irqrestore(&chan
->lock
, flags
);
390 dma_reg_write(ctlr
, chan
->int_set
, chan
->mask
);
391 chan
->state
= CPDMA_STATE_ACTIVE
;
393 chan_write(chan
, hdp
, desc_phys(pool
, chan
->head
));
395 chan_write(chan
, rxfree
, chan
->count
);
398 spin_unlock_irqrestore(&chan
->lock
, flags
);
402 /* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
403 * rmask - mask of rate limited channels
404 * Returns min rate in Kb/s
406 static int cpdma_chan_fit_rate(struct cpdma_chan
*ch
, u32 rate
,
407 u32
*rmask
, int *prio_mode
)
409 struct cpdma_ctlr
*ctlr
= ch
->ctlr
;
410 struct cpdma_chan
*chan
;
411 u32 old_rate
= ch
->rate
;
416 for (i
= tx_chan_num(0); i
< tx_chan_num(CPDMA_MAX_CHANNELS
); i
++) {
417 chan
= ctlr
->channels
[i
];
426 new_rmask
|= chan
->mask
;
440 dev_err(ctlr
->dev
, "Upper cpdma ch%d is not rate limited\n",
445 static u32
cpdma_chan_set_factors(struct cpdma_ctlr
*ctlr
,
446 struct cpdma_chan
*ch
)
448 u32 delta
= UINT_MAX
, prev_delta
= UINT_MAX
, best_delta
= UINT_MAX
;
449 u32 best_send_cnt
= 0, best_idle_cnt
= 0;
450 u32 new_rate
, best_rate
= 0, rate_reg
;
451 u64 send_cnt
, idle_cnt
;
452 u32 min_send_cnt
, freq
;
453 u64 divident
, divisor
;
460 freq
= ctlr
->params
.bus_freq_mhz
* 1000 * 32;
462 dev_err(ctlr
->dev
, "The bus frequency is not set\n");
466 min_send_cnt
= freq
- ch
->rate
;
467 send_cnt
= DIV_ROUND_UP(min_send_cnt
, ch
->rate
);
468 while (send_cnt
<= CPDMA_MAX_RLIM_CNT
) {
469 divident
= ch
->rate
* send_cnt
;
470 divisor
= min_send_cnt
;
471 idle_cnt
= DIV_ROUND_CLOSEST_ULL(divident
, divisor
);
473 divident
= freq
* idle_cnt
;
474 divisor
= idle_cnt
+ send_cnt
;
475 new_rate
= DIV_ROUND_CLOSEST_ULL(divident
, divisor
);
477 delta
= new_rate
>= ch
->rate
? new_rate
- ch
->rate
: delta
;
478 if (delta
< best_delta
) {
480 best_send_cnt
= send_cnt
;
481 best_idle_cnt
= idle_cnt
;
482 best_rate
= new_rate
;
488 if (prev_delta
>= delta
) {
495 divident
= freq
* idle_cnt
;
496 send_cnt
= DIV_ROUND_CLOSEST_ULL(divident
, ch
->rate
);
497 send_cnt
-= idle_cnt
;
498 prev_delta
= UINT_MAX
;
501 ch
->rate
= best_rate
;
502 ch
->rate_factor
= best_send_cnt
| (best_idle_cnt
<< 16);
505 rate_reg
= CPDMA_TX_PRI0_RATE
+ 4 * ch
->chan_num
;
506 dma_reg_write(ctlr
, rate_reg
, ch
->rate_factor
);
510 struct cpdma_ctlr
*cpdma_ctlr_create(struct cpdma_params
*params
)
512 struct cpdma_ctlr
*ctlr
;
514 ctlr
= devm_kzalloc(params
->dev
, sizeof(*ctlr
), GFP_KERNEL
);
518 ctlr
->state
= CPDMA_STATE_IDLE
;
519 ctlr
->params
= *params
;
520 ctlr
->dev
= params
->dev
;
522 spin_lock_init(&ctlr
->lock
);
524 if (cpdma_desc_pool_create(ctlr
))
526 /* split pool equally between RX/TX by default */
527 ctlr
->num_tx_desc
= ctlr
->pool
->num_desc
/ 2;
528 ctlr
->num_rx_desc
= ctlr
->pool
->num_desc
- ctlr
->num_tx_desc
;
530 if (WARN_ON(ctlr
->num_chan
> CPDMA_MAX_CHANNELS
))
531 ctlr
->num_chan
= CPDMA_MAX_CHANNELS
;
535 int cpdma_ctlr_start(struct cpdma_ctlr
*ctlr
)
537 struct cpdma_chan
*chan
;
541 spin_lock_irqsave(&ctlr
->lock
, flags
);
542 if (ctlr
->state
!= CPDMA_STATE_IDLE
) {
543 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
547 if (ctlr
->params
.has_soft_reset
) {
548 unsigned timeout
= 10 * 100;
550 dma_reg_write(ctlr
, CPDMA_SOFTRESET
, 1);
552 if (dma_reg_read(ctlr
, CPDMA_SOFTRESET
) == 0)
560 for (i
= 0; i
< ctlr
->num_chan
; i
++) {
561 writel(0, ctlr
->params
.txhdp
+ 4 * i
);
562 writel(0, ctlr
->params
.rxhdp
+ 4 * i
);
563 writel(0, ctlr
->params
.txcp
+ 4 * i
);
564 writel(0, ctlr
->params
.rxcp
+ 4 * i
);
567 dma_reg_write(ctlr
, CPDMA_RXINTMASKCLEAR
, 0xffffffff);
568 dma_reg_write(ctlr
, CPDMA_TXINTMASKCLEAR
, 0xffffffff);
570 dma_reg_write(ctlr
, CPDMA_TXCONTROL
, 1);
571 dma_reg_write(ctlr
, CPDMA_RXCONTROL
, 1);
573 ctlr
->state
= CPDMA_STATE_ACTIVE
;
576 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
577 chan
= ctlr
->channels
[i
];
579 cpdma_chan_set_chan_shaper(chan
);
582 /* off prio mode if all tx channels are rate limited */
583 if (is_tx_chan(chan
) && !chan
->rate
)
588 _cpdma_control_set(ctlr
, CPDMA_TX_PRIO_FIXED
, prio_mode
);
589 _cpdma_control_set(ctlr
, CPDMA_RX_BUFFER_OFFSET
, 0);
591 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
595 int cpdma_ctlr_stop(struct cpdma_ctlr
*ctlr
)
600 spin_lock_irqsave(&ctlr
->lock
, flags
);
601 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
602 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
606 ctlr
->state
= CPDMA_STATE_TEARDOWN
;
607 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
609 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
610 if (ctlr
->channels
[i
])
611 cpdma_chan_stop(ctlr
->channels
[i
]);
614 spin_lock_irqsave(&ctlr
->lock
, flags
);
615 dma_reg_write(ctlr
, CPDMA_RXINTMASKCLEAR
, 0xffffffff);
616 dma_reg_write(ctlr
, CPDMA_TXINTMASKCLEAR
, 0xffffffff);
618 dma_reg_write(ctlr
, CPDMA_TXCONTROL
, 0);
619 dma_reg_write(ctlr
, CPDMA_RXCONTROL
, 0);
621 ctlr
->state
= CPDMA_STATE_IDLE
;
623 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
627 int cpdma_ctlr_destroy(struct cpdma_ctlr
*ctlr
)
634 if (ctlr
->state
!= CPDMA_STATE_IDLE
)
635 cpdma_ctlr_stop(ctlr
);
637 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++)
638 cpdma_chan_destroy(ctlr
->channels
[i
]);
640 cpdma_desc_pool_destroy(ctlr
);
644 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr
*ctlr
, bool enable
)
649 spin_lock_irqsave(&ctlr
->lock
, flags
);
650 if (ctlr
->state
!= CPDMA_STATE_ACTIVE
) {
651 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
655 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
656 if (ctlr
->channels
[i
])
657 cpdma_chan_int_ctrl(ctlr
->channels
[i
], enable
);
660 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
664 void cpdma_ctlr_eoi(struct cpdma_ctlr
*ctlr
, u32 value
)
666 dma_reg_write(ctlr
, CPDMA_MACEOIVECTOR
, value
);
669 u32
cpdma_ctrl_rxchs_state(struct cpdma_ctlr
*ctlr
)
671 return dma_reg_read(ctlr
, CPDMA_RXINTSTATMASKED
);
674 u32
cpdma_ctrl_txchs_state(struct cpdma_ctlr
*ctlr
)
676 return dma_reg_read(ctlr
, CPDMA_TXINTSTATMASKED
);
679 static void cpdma_chan_set_descs(struct cpdma_ctlr
*ctlr
,
680 int rx
, int desc_num
,
683 struct cpdma_chan
*chan
, *most_chan
= NULL
;
684 int desc_cnt
= desc_num
;
692 min
= rx_chan_num(0);
693 max
= rx_chan_num(CPDMA_MAX_CHANNELS
);
695 min
= tx_chan_num(0);
696 max
= tx_chan_num(CPDMA_MAX_CHANNELS
);
699 for (i
= min
; i
< max
; i
++) {
700 chan
= ctlr
->channels
[i
];
705 chan
->desc_num
= (chan
->weight
* desc_num
) / 100;
707 chan
->desc_num
= per_ch_desc
;
709 desc_cnt
-= chan
->desc_num
;
711 if (most_dnum
< chan
->desc_num
) {
712 most_dnum
= chan
->desc_num
;
718 most_chan
->desc_num
+= desc_cnt
;
722 * cpdma_chan_split_pool - Splits ctrl pool between all channels.
723 * Has to be called under ctlr lock
725 static int cpdma_chan_split_pool(struct cpdma_ctlr
*ctlr
)
727 int tx_per_ch_desc
= 0, rx_per_ch_desc
= 0;
728 int free_rx_num
= 0, free_tx_num
= 0;
729 int rx_weight
= 0, tx_weight
= 0;
730 int tx_desc_num
, rx_desc_num
;
731 struct cpdma_chan
*chan
;
737 for (i
= 0; i
< ARRAY_SIZE(ctlr
->channels
); i
++) {
738 chan
= ctlr
->channels
[i
];
742 if (is_rx_chan(chan
)) {
745 rx_weight
+= chan
->weight
;
749 tx_weight
+= chan
->weight
;
753 if (rx_weight
> 100 || tx_weight
> 100)
756 tx_desc_num
= ctlr
->num_tx_desc
;
757 rx_desc_num
= ctlr
->num_rx_desc
;
760 tx_per_ch_desc
= tx_desc_num
- (tx_weight
* tx_desc_num
) / 100;
761 tx_per_ch_desc
/= free_tx_num
;
764 rx_per_ch_desc
= rx_desc_num
- (rx_weight
* rx_desc_num
) / 100;
765 rx_per_ch_desc
/= free_rx_num
;
768 cpdma_chan_set_descs(ctlr
, 0, tx_desc_num
, tx_per_ch_desc
);
769 cpdma_chan_set_descs(ctlr
, 1, rx_desc_num
, rx_per_ch_desc
);
775 /* cpdma_chan_set_weight - set weight of a channel in percentage.
776 * Tx and Rx channels have separate weights. That is 100% for RX
777 * and 100% for Tx. The weight is used to split cpdma resources
778 * in correct proportion required by the channels, including number
779 * of descriptors. The channel rate is not enough to know the
780 * weight of a channel as the maximum rate of an interface is needed.
781 * If weight = 0, then channel uses rest of descriptors leaved by
784 int cpdma_chan_set_weight(struct cpdma_chan
*ch
, int weight
)
786 struct cpdma_ctlr
*ctlr
= ch
->ctlr
;
787 unsigned long flags
, ch_flags
;
790 spin_lock_irqsave(&ctlr
->lock
, flags
);
791 spin_lock_irqsave(&ch
->lock
, ch_flags
);
792 if (ch
->weight
== weight
) {
793 spin_unlock_irqrestore(&ch
->lock
, ch_flags
);
794 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
798 spin_unlock_irqrestore(&ch
->lock
, ch_flags
);
800 /* re-split pool using new channel weight */
801 ret
= cpdma_chan_split_pool(ctlr
);
802 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
806 /* cpdma_chan_get_min_rate - get minimum allowed rate for channel
807 * Should be called before cpdma_chan_set_rate.
808 * Returns min rate in Kb/s
810 u32
cpdma_chan_get_min_rate(struct cpdma_ctlr
*ctlr
)
812 unsigned int divident
, divisor
;
814 divident
= ctlr
->params
.bus_freq_mhz
* 32 * 1000;
815 divisor
= 1 + CPDMA_MAX_RLIM_CNT
;
817 return DIV_ROUND_UP(divident
, divisor
);
820 /* cpdma_chan_set_rate - limits bandwidth for transmit channel.
821 * The bandwidth * limited channels have to be in order beginning from lowest.
822 * ch - transmit channel the bandwidth is configured for
823 * rate - bandwidth in Kb/s, if 0 - then off shaper
825 int cpdma_chan_set_rate(struct cpdma_chan
*ch
, u32 rate
)
827 unsigned long flags
, ch_flags
;
828 struct cpdma_ctlr
*ctlr
;
832 if (!ch
|| !is_tx_chan(ch
))
835 if (ch
->rate
== rate
)
839 spin_lock_irqsave(&ctlr
->lock
, flags
);
840 spin_lock_irqsave(&ch
->lock
, ch_flags
);
842 ret
= cpdma_chan_fit_rate(ch
, rate
, &rmask
, &prio_mode
);
846 ret
= cpdma_chan_set_factors(ctlr
, ch
);
850 spin_unlock_irqrestore(&ch
->lock
, ch_flags
);
853 _cpdma_control_set(ctlr
, CPDMA_TX_RLIM
, rmask
);
854 _cpdma_control_set(ctlr
, CPDMA_TX_PRIO_FIXED
, prio_mode
);
855 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
859 spin_unlock_irqrestore(&ch
->lock
, ch_flags
);
860 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
864 u32
cpdma_chan_get_rate(struct cpdma_chan
*ch
)
869 spin_lock_irqsave(&ch
->lock
, flags
);
871 spin_unlock_irqrestore(&ch
->lock
, flags
);
876 struct cpdma_chan
*cpdma_chan_create(struct cpdma_ctlr
*ctlr
, int chan_num
,
877 cpdma_handler_fn handler
, int rx_type
)
879 int offset
= chan_num
* 4;
880 struct cpdma_chan
*chan
;
883 chan_num
= rx_type
? rx_chan_num(chan_num
) : tx_chan_num(chan_num
);
885 if (__chan_linear(chan_num
) >= ctlr
->num_chan
)
886 return ERR_PTR(-EINVAL
);
888 chan
= devm_kzalloc(ctlr
->dev
, sizeof(*chan
), GFP_KERNEL
);
890 return ERR_PTR(-ENOMEM
);
892 spin_lock_irqsave(&ctlr
->lock
, flags
);
893 if (ctlr
->channels
[chan_num
]) {
894 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
895 devm_kfree(ctlr
->dev
, chan
);
896 return ERR_PTR(-EBUSY
);
900 chan
->state
= CPDMA_STATE_IDLE
;
901 chan
->chan_num
= chan_num
;
902 chan
->handler
= handler
;
906 if (is_rx_chan(chan
)) {
907 chan
->hdp
= ctlr
->params
.rxhdp
+ offset
;
908 chan
->cp
= ctlr
->params
.rxcp
+ offset
;
909 chan
->rxfree
= ctlr
->params
.rxfree
+ offset
;
910 chan
->int_set
= CPDMA_RXINTMASKSET
;
911 chan
->int_clear
= CPDMA_RXINTMASKCLEAR
;
912 chan
->td
= CPDMA_RXTEARDOWN
;
913 chan
->dir
= DMA_FROM_DEVICE
;
915 chan
->hdp
= ctlr
->params
.txhdp
+ offset
;
916 chan
->cp
= ctlr
->params
.txcp
+ offset
;
917 chan
->int_set
= CPDMA_TXINTMASKSET
;
918 chan
->int_clear
= CPDMA_TXINTMASKCLEAR
;
919 chan
->td
= CPDMA_TXTEARDOWN
;
920 chan
->dir
= DMA_TO_DEVICE
;
922 chan
->mask
= BIT(chan_linear(chan
));
924 spin_lock_init(&chan
->lock
);
926 ctlr
->channels
[chan_num
] = chan
;
929 cpdma_chan_split_pool(ctlr
);
931 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
935 int cpdma_chan_get_rx_buf_num(struct cpdma_chan
*chan
)
940 spin_lock_irqsave(&chan
->lock
, flags
);
941 desc_num
= chan
->desc_num
;
942 spin_unlock_irqrestore(&chan
->lock
, flags
);
947 int cpdma_chan_destroy(struct cpdma_chan
*chan
)
949 struct cpdma_ctlr
*ctlr
;
956 spin_lock_irqsave(&ctlr
->lock
, flags
);
957 if (chan
->state
!= CPDMA_STATE_IDLE
)
958 cpdma_chan_stop(chan
);
959 ctlr
->channels
[chan
->chan_num
] = NULL
;
961 devm_kfree(ctlr
->dev
, chan
);
962 cpdma_chan_split_pool(ctlr
);
964 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
968 int cpdma_chan_get_stats(struct cpdma_chan
*chan
,
969 struct cpdma_chan_stats
*stats
)
974 spin_lock_irqsave(&chan
->lock
, flags
);
975 memcpy(stats
, &chan
->stats
, sizeof(*stats
));
976 spin_unlock_irqrestore(&chan
->lock
, flags
);
980 static void __cpdma_chan_submit(struct cpdma_chan
*chan
,
981 struct cpdma_desc __iomem
*desc
)
983 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
984 struct cpdma_desc __iomem
*prev
= chan
->tail
;
985 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
989 desc_dma
= desc_phys(pool
, desc
);
991 /* simple case - idle channel */
993 chan
->stats
.head_enqueue
++;
996 if (chan
->state
== CPDMA_STATE_ACTIVE
)
997 chan_write(chan
, hdp
, desc_dma
);
1001 /* first chain the descriptor at the tail of the list */
1002 desc_write(prev
, hw_next
, desc_dma
);
1004 chan
->stats
.tail_enqueue
++;
1006 /* next check if EOQ has been triggered already */
1007 mode
= desc_read(prev
, hw_mode
);
1008 if (((mode
& (CPDMA_DESC_EOQ
| CPDMA_DESC_OWNER
)) == CPDMA_DESC_EOQ
) &&
1009 (chan
->state
== CPDMA_STATE_ACTIVE
)) {
1010 desc_write(prev
, hw_mode
, mode
& ~CPDMA_DESC_EOQ
);
1011 chan_write(chan
, hdp
, desc_dma
);
1012 chan
->stats
.misqueued
++;
1016 static int cpdma_chan_submit_si(struct submit_info
*si
)
1018 struct cpdma_chan
*chan
= si
->chan
;
1019 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
1021 struct cpdma_desc __iomem
*desc
;
1026 if (chan
->count
>= chan
->desc_num
) {
1027 chan
->stats
.desc_alloc_fail
++;
1031 desc
= cpdma_desc_alloc(ctlr
->pool
);
1033 chan
->stats
.desc_alloc_fail
++;
1037 if (len
< ctlr
->params
.min_packet_size
) {
1038 len
= ctlr
->params
.min_packet_size
;
1039 chan
->stats
.runt_transmit_buff
++;
1042 mode
= CPDMA_DESC_OWNER
| CPDMA_DESC_SOP
| CPDMA_DESC_EOP
;
1043 cpdma_desc_to_port(chan
, mode
, si
->directed
);
1046 buffer
= si
->data_dma
;
1047 dma_sync_single_for_device(ctlr
->dev
, buffer
, len
, chan
->dir
);
1049 buffer
= dma_map_single(ctlr
->dev
, si
->data_virt
, len
, chan
->dir
);
1050 ret
= dma_mapping_error(ctlr
->dev
, buffer
);
1052 cpdma_desc_free(ctlr
->pool
, desc
, 1);
1057 /* Relaxed IO accessors can be used here as there is read barrier
1058 * at the end of write sequence.
1060 writel_relaxed(0, &desc
->hw_next
);
1061 writel_relaxed(buffer
, &desc
->hw_buffer
);
1062 writel_relaxed(len
, &desc
->hw_len
);
1063 writel_relaxed(mode
| len
, &desc
->hw_mode
);
1064 writel_relaxed((uintptr_t)si
->token
, &desc
->sw_token
);
1065 writel_relaxed(buffer
, &desc
->sw_buffer
);
1066 writel_relaxed(si
->data_dma
? len
| CPDMA_DMA_EXT_MAP
: len
,
1068 desc_read(desc
, sw_len
);
1070 __cpdma_chan_submit(chan
, desc
);
1072 if (chan
->state
== CPDMA_STATE_ACTIVE
&& chan
->rxfree
)
1073 chan_write(chan
, rxfree
, 1);
1079 int cpdma_chan_idle_submit(struct cpdma_chan
*chan
, void *token
, void *data
,
1080 int len
, int directed
)
1082 struct submit_info si
;
1083 unsigned long flags
;
1088 si
.data_virt
= data
;
1091 si
.directed
= directed
;
1093 spin_lock_irqsave(&chan
->lock
, flags
);
1094 if (chan
->state
== CPDMA_STATE_TEARDOWN
) {
1095 spin_unlock_irqrestore(&chan
->lock
, flags
);
1099 ret
= cpdma_chan_submit_si(&si
);
1100 spin_unlock_irqrestore(&chan
->lock
, flags
);
1104 int cpdma_chan_idle_submit_mapped(struct cpdma_chan
*chan
, void *token
,
1105 dma_addr_t data
, int len
, int directed
)
1107 struct submit_info si
;
1108 unsigned long flags
;
1113 si
.data_virt
= NULL
;
1116 si
.directed
= directed
;
1118 spin_lock_irqsave(&chan
->lock
, flags
);
1119 if (chan
->state
== CPDMA_STATE_TEARDOWN
) {
1120 spin_unlock_irqrestore(&chan
->lock
, flags
);
1124 ret
= cpdma_chan_submit_si(&si
);
1125 spin_unlock_irqrestore(&chan
->lock
, flags
);
1129 int cpdma_chan_submit(struct cpdma_chan
*chan
, void *token
, void *data
,
1130 int len
, int directed
)
1132 struct submit_info si
;
1133 unsigned long flags
;
1138 si
.data_virt
= data
;
1141 si
.directed
= directed
;
1143 spin_lock_irqsave(&chan
->lock
, flags
);
1144 if (chan
->state
!= CPDMA_STATE_ACTIVE
) {
1145 spin_unlock_irqrestore(&chan
->lock
, flags
);
1149 ret
= cpdma_chan_submit_si(&si
);
1150 spin_unlock_irqrestore(&chan
->lock
, flags
);
1154 int cpdma_chan_submit_mapped(struct cpdma_chan
*chan
, void *token
,
1155 dma_addr_t data
, int len
, int directed
)
1157 struct submit_info si
;
1158 unsigned long flags
;
1163 si
.data_virt
= NULL
;
1166 si
.directed
= directed
;
1168 spin_lock_irqsave(&chan
->lock
, flags
);
1169 if (chan
->state
!= CPDMA_STATE_ACTIVE
) {
1170 spin_unlock_irqrestore(&chan
->lock
, flags
);
1174 ret
= cpdma_chan_submit_si(&si
);
1175 spin_unlock_irqrestore(&chan
->lock
, flags
);
1179 bool cpdma_check_free_tx_desc(struct cpdma_chan
*chan
)
1181 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
1182 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
1184 unsigned long flags
;
1186 spin_lock_irqsave(&chan
->lock
, flags
);
1187 free_tx_desc
= (chan
->count
< chan
->desc_num
) &&
1188 gen_pool_avail(pool
->gen_pool
);
1189 spin_unlock_irqrestore(&chan
->lock
, flags
);
1190 return free_tx_desc
;
1193 static void __cpdma_chan_free(struct cpdma_chan
*chan
,
1194 struct cpdma_desc __iomem
*desc
,
1195 int outlen
, int status
)
1197 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
1198 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
1199 dma_addr_t buff_dma
;
1203 token
= desc_read(desc
, sw_token
);
1204 origlen
= desc_read(desc
, sw_len
);
1206 buff_dma
= desc_read(desc
, sw_buffer
);
1207 if (origlen
& CPDMA_DMA_EXT_MAP
) {
1208 origlen
&= ~CPDMA_DMA_EXT_MAP
;
1209 dma_sync_single_for_cpu(ctlr
->dev
, buff_dma
, origlen
,
1212 dma_unmap_single(ctlr
->dev
, buff_dma
, origlen
, chan
->dir
);
1215 cpdma_desc_free(pool
, desc
, 1);
1216 (*chan
->handler
)((void *)token
, outlen
, status
);
1219 static int __cpdma_chan_process(struct cpdma_chan
*chan
)
1221 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
1222 struct cpdma_desc __iomem
*desc
;
1225 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
1226 dma_addr_t desc_dma
;
1227 unsigned long flags
;
1229 spin_lock_irqsave(&chan
->lock
, flags
);
1233 chan
->stats
.empty_dequeue
++;
1237 desc_dma
= desc_phys(pool
, desc
);
1239 status
= desc_read(desc
, hw_mode
);
1240 outlen
= status
& 0x7ff;
1241 if (status
& CPDMA_DESC_OWNER
) {
1242 chan
->stats
.busy_dequeue
++;
1247 if (status
& CPDMA_DESC_PASS_CRC
)
1248 outlen
-= CPDMA_DESC_CRC_LEN
;
1250 status
= status
& (CPDMA_DESC_EOQ
| CPDMA_DESC_TD_COMPLETE
|
1251 CPDMA_DESC_PORT_MASK
| CPDMA_RX_VLAN_ENCAP
);
1253 chan
->head
= desc_from_phys(pool
, desc_read(desc
, hw_next
));
1254 chan_write(chan
, cp
, desc_dma
);
1256 chan
->stats
.good_dequeue
++;
1258 if ((status
& CPDMA_DESC_EOQ
) && chan
->head
) {
1259 chan
->stats
.requeue
++;
1260 chan_write(chan
, hdp
, desc_phys(pool
, chan
->head
));
1263 spin_unlock_irqrestore(&chan
->lock
, flags
);
1264 if (unlikely(status
& CPDMA_DESC_TD_COMPLETE
))
1265 cb_status
= -ENOSYS
;
1269 __cpdma_chan_free(chan
, desc
, outlen
, cb_status
);
1273 spin_unlock_irqrestore(&chan
->lock
, flags
);
1277 int cpdma_chan_process(struct cpdma_chan
*chan
, int quota
)
1279 int used
= 0, ret
= 0;
1281 if (chan
->state
!= CPDMA_STATE_ACTIVE
)
1284 while (used
< quota
) {
1285 ret
= __cpdma_chan_process(chan
);
1293 int cpdma_chan_start(struct cpdma_chan
*chan
)
1295 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
1296 unsigned long flags
;
1299 spin_lock_irqsave(&ctlr
->lock
, flags
);
1300 ret
= cpdma_chan_set_chan_shaper(chan
);
1301 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
1305 ret
= cpdma_chan_on(chan
);
1312 int cpdma_chan_stop(struct cpdma_chan
*chan
)
1314 struct cpdma_ctlr
*ctlr
= chan
->ctlr
;
1315 struct cpdma_desc_pool
*pool
= ctlr
->pool
;
1316 unsigned long flags
;
1320 spin_lock_irqsave(&chan
->lock
, flags
);
1321 if (chan
->state
== CPDMA_STATE_TEARDOWN
) {
1322 spin_unlock_irqrestore(&chan
->lock
, flags
);
1326 chan
->state
= CPDMA_STATE_TEARDOWN
;
1327 dma_reg_write(ctlr
, chan
->int_clear
, chan
->mask
);
1329 /* trigger teardown */
1330 dma_reg_write(ctlr
, chan
->td
, chan_linear(chan
));
1332 /* wait for teardown complete */
1333 timeout
= 100 * 100; /* 100 ms */
1335 u32 cp
= chan_read(chan
, cp
);
1336 if ((cp
& CPDMA_TEARDOWN_VALUE
) == CPDMA_TEARDOWN_VALUE
)
1342 chan_write(chan
, cp
, CPDMA_TEARDOWN_VALUE
);
1344 /* handle completed packets */
1345 spin_unlock_irqrestore(&chan
->lock
, flags
);
1347 ret
= __cpdma_chan_process(chan
);
1350 } while ((ret
& CPDMA_DESC_TD_COMPLETE
) == 0);
1351 spin_lock_irqsave(&chan
->lock
, flags
);
1353 /* remaining packets haven't been tx/rx'ed, clean them up */
1354 while (chan
->head
) {
1355 struct cpdma_desc __iomem
*desc
= chan
->head
;
1356 dma_addr_t next_dma
;
1358 next_dma
= desc_read(desc
, hw_next
);
1359 chan
->head
= desc_from_phys(pool
, next_dma
);
1361 chan
->stats
.teardown_dequeue
++;
1363 /* issue callback without locks held */
1364 spin_unlock_irqrestore(&chan
->lock
, flags
);
1365 __cpdma_chan_free(chan
, desc
, 0, -ENOSYS
);
1366 spin_lock_irqsave(&chan
->lock
, flags
);
1369 chan
->state
= CPDMA_STATE_IDLE
;
1370 spin_unlock_irqrestore(&chan
->lock
, flags
);
1374 int cpdma_chan_int_ctrl(struct cpdma_chan
*chan
, bool enable
)
1376 unsigned long flags
;
1378 spin_lock_irqsave(&chan
->lock
, flags
);
1379 if (chan
->state
!= CPDMA_STATE_ACTIVE
) {
1380 spin_unlock_irqrestore(&chan
->lock
, flags
);
1384 dma_reg_write(chan
->ctlr
, enable
? chan
->int_set
: chan
->int_clear
,
1386 spin_unlock_irqrestore(&chan
->lock
, flags
);
1391 int cpdma_control_get(struct cpdma_ctlr
*ctlr
, int control
)
1393 unsigned long flags
;
1396 spin_lock_irqsave(&ctlr
->lock
, flags
);
1397 ret
= _cpdma_control_get(ctlr
, control
);
1398 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
1403 int cpdma_control_set(struct cpdma_ctlr
*ctlr
, int control
, int value
)
1405 unsigned long flags
;
1408 spin_lock_irqsave(&ctlr
->lock
, flags
);
1409 ret
= _cpdma_control_set(ctlr
, control
, value
);
1410 spin_unlock_irqrestore(&ctlr
->lock
, flags
);
1415 int cpdma_get_num_rx_descs(struct cpdma_ctlr
*ctlr
)
1417 return ctlr
->num_rx_desc
;
1420 int cpdma_get_num_tx_descs(struct cpdma_ctlr
*ctlr
)
1422 return ctlr
->num_tx_desc
;
1425 int cpdma_set_num_rx_descs(struct cpdma_ctlr
*ctlr
, int num_rx_desc
)
1427 unsigned long flags
;
1430 spin_lock_irqsave(&ctlr
->lock
, flags
);
1432 temp
= ctlr
->num_rx_desc
;
1433 ctlr
->num_rx_desc
= num_rx_desc
;
1434 ctlr
->num_tx_desc
= ctlr
->pool
->num_desc
- ctlr
->num_rx_desc
;
1435 ret
= cpdma_chan_split_pool(ctlr
);
1437 ctlr
->num_rx_desc
= temp
;
1438 ctlr
->num_tx_desc
= ctlr
->pool
->num_desc
- ctlr
->num_rx_desc
;
1441 spin_unlock_irqrestore(&ctlr
->lock
, flags
);