2 * Driver for Marvell NETA network controller Buffer Manager.
4 * Copyright (C) 2015 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/clk.h>
14 #include <linux/genalloc.h>
16 #include <linux/kernel.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/skbuff.h>
25 #include "mvneta_bm.h"
27 #define MVNETA_BM_DRIVER_NAME "mvneta_bm"
28 #define MVNETA_BM_DRIVER_VERSION "1.0"
30 static void mvneta_bm_write(struct mvneta_bm
*priv
, u32 offset
, u32 data
)
32 writel(data
, priv
->reg_base
+ offset
);
35 static u32
mvneta_bm_read(struct mvneta_bm
*priv
, u32 offset
)
37 return readl(priv
->reg_base
+ offset
);
40 static void mvneta_bm_pool_enable(struct mvneta_bm
*priv
, int pool_id
)
44 val
= mvneta_bm_read(priv
, MVNETA_BM_POOL_BASE_REG(pool_id
));
45 val
|= MVNETA_BM_POOL_ENABLE_MASK
;
46 mvneta_bm_write(priv
, MVNETA_BM_POOL_BASE_REG(pool_id
), val
);
48 /* Clear BM cause register */
49 mvneta_bm_write(priv
, MVNETA_BM_INTR_CAUSE_REG
, 0);
52 static void mvneta_bm_pool_disable(struct mvneta_bm
*priv
, int pool_id
)
56 val
= mvneta_bm_read(priv
, MVNETA_BM_POOL_BASE_REG(pool_id
));
57 val
&= ~MVNETA_BM_POOL_ENABLE_MASK
;
58 mvneta_bm_write(priv
, MVNETA_BM_POOL_BASE_REG(pool_id
), val
);
61 static inline void mvneta_bm_config_set(struct mvneta_bm
*priv
, u32 mask
)
65 val
= mvneta_bm_read(priv
, MVNETA_BM_CONFIG_REG
);
67 mvneta_bm_write(priv
, MVNETA_BM_CONFIG_REG
, val
);
70 static inline void mvneta_bm_config_clear(struct mvneta_bm
*priv
, u32 mask
)
74 val
= mvneta_bm_read(priv
, MVNETA_BM_CONFIG_REG
);
76 mvneta_bm_write(priv
, MVNETA_BM_CONFIG_REG
, val
);
79 static void mvneta_bm_pool_target_set(struct mvneta_bm
*priv
, int pool_id
,
80 u8 target_id
, u8 attr
)
84 val
= mvneta_bm_read(priv
, MVNETA_BM_XBAR_POOL_REG(pool_id
));
85 val
&= ~MVNETA_BM_TARGET_ID_MASK(pool_id
);
86 val
&= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id
);
87 val
|= MVNETA_BM_TARGET_ID_VAL(pool_id
, target_id
);
88 val
|= MVNETA_BM_XBAR_ATTR_VAL(pool_id
, attr
);
90 mvneta_bm_write(priv
, MVNETA_BM_XBAR_POOL_REG(pool_id
), val
);
93 int mvneta_bm_construct(struct hwbm_pool
*hwbm_pool
, void *buf
)
95 struct mvneta_bm_pool
*bm_pool
=
96 (struct mvneta_bm_pool
*)hwbm_pool
->priv
;
97 struct mvneta_bm
*priv
= bm_pool
->priv
;
100 /* In order to update buf_cookie field of RX descriptor properly,
101 * BM hardware expects buf virtual address to be placed in the
102 * first four bytes of mapped buffer.
104 *(u32
*)buf
= (u32
)buf
;
105 phys_addr
= dma_map_single(&priv
->pdev
->dev
, buf
, bm_pool
->buf_size
,
107 if (unlikely(dma_mapping_error(&priv
->pdev
->dev
, phys_addr
)))
110 mvneta_bm_pool_put_bp(priv
, bm_pool
, phys_addr
);
113 EXPORT_SYMBOL_GPL(mvneta_bm_construct
);
116 static int mvneta_bm_pool_create(struct mvneta_bm
*priv
,
117 struct mvneta_bm_pool
*bm_pool
)
119 struct platform_device
*pdev
= priv
->pdev
;
122 size_bytes
= sizeof(u32
) * bm_pool
->hwbm_pool
.size
;
123 bm_pool
->virt_addr
= dma_alloc_coherent(&pdev
->dev
, size_bytes
,
126 if (!bm_pool
->virt_addr
)
129 if (!IS_ALIGNED((u32
)bm_pool
->virt_addr
, MVNETA_BM_POOL_PTR_ALIGN
)) {
130 dma_free_coherent(&pdev
->dev
, size_bytes
, bm_pool
->virt_addr
,
132 dev_err(&pdev
->dev
, "BM pool %d is not %d bytes aligned\n",
133 bm_pool
->id
, MVNETA_BM_POOL_PTR_ALIGN
);
137 err
= mvebu_mbus_get_dram_win_info(bm_pool
->phys_addr
, &target_id
,
140 dma_free_coherent(&pdev
->dev
, size_bytes
, bm_pool
->virt_addr
,
145 /* Set pool address */
146 mvneta_bm_write(priv
, MVNETA_BM_POOL_BASE_REG(bm_pool
->id
),
149 mvneta_bm_pool_target_set(priv
, bm_pool
->id
, target_id
, attr
);
150 mvneta_bm_pool_enable(priv
, bm_pool
->id
);
155 /* Notify the driver that BM pool is being used as specific type and return the
156 * pool pointer on success
158 struct mvneta_bm_pool
*mvneta_bm_pool_use(struct mvneta_bm
*priv
, u8 pool_id
,
159 enum mvneta_bm_type type
, u8 port_id
,
162 struct mvneta_bm_pool
*new_pool
= &priv
->bm_pools
[pool_id
];
165 if (new_pool
->type
== MVNETA_BM_LONG
&&
166 new_pool
->port_map
!= 1 << port_id
) {
167 dev_err(&priv
->pdev
->dev
,
168 "long pool cannot be shared by the ports\n");
172 if (new_pool
->type
== MVNETA_BM_SHORT
&& new_pool
->type
!= type
) {
173 dev_err(&priv
->pdev
->dev
,
174 "mixing pools' types between the ports is forbidden\n");
178 if (new_pool
->pkt_size
== 0 || type
!= MVNETA_BM_SHORT
)
179 new_pool
->pkt_size
= pkt_size
;
181 /* Allocate buffers in case BM pool hasn't been used yet */
182 if (new_pool
->type
== MVNETA_BM_FREE
) {
183 struct hwbm_pool
*hwbm_pool
= &new_pool
->hwbm_pool
;
185 new_pool
->priv
= priv
;
186 new_pool
->type
= type
;
187 new_pool
->buf_size
= MVNETA_RX_BUF_SIZE(new_pool
->pkt_size
);
188 hwbm_pool
->frag_size
=
189 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool
->pkt_size
)) +
190 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
191 hwbm_pool
->construct
= mvneta_bm_construct
;
192 hwbm_pool
->priv
= new_pool
;
193 mutex_init(&hwbm_pool
->buf_lock
);
195 /* Create new pool */
196 err
= mvneta_bm_pool_create(priv
, new_pool
);
198 dev_err(&priv
->pdev
->dev
, "fail to create pool %d\n",
203 /* Allocate buffers for this pool */
204 num
= hwbm_pool_add(hwbm_pool
, hwbm_pool
->size
);
205 if (num
!= hwbm_pool
->size
) {
206 WARN(1, "pool %d: %d of %d allocated\n",
207 new_pool
->id
, num
, hwbm_pool
->size
);
214 EXPORT_SYMBOL_GPL(mvneta_bm_pool_use
);
216 /* Free all buffers from the pool */
217 void mvneta_bm_bufs_free(struct mvneta_bm
*priv
, struct mvneta_bm_pool
*bm_pool
,
222 bm_pool
->port_map
&= ~port_map
;
223 if (bm_pool
->port_map
)
226 mvneta_bm_config_set(priv
, MVNETA_BM_EMPTY_LIMIT_MASK
);
228 for (i
= 0; i
< bm_pool
->hwbm_pool
.buf_num
; i
++) {
229 dma_addr_t buf_phys_addr
;
232 /* Get buffer physical address (indirect access) */
233 buf_phys_addr
= mvneta_bm_pool_get_bp(priv
, bm_pool
);
235 /* Work-around to the problems when destroying the pool,
236 * when it occurs that a read access to BPPI returns 0.
238 if (buf_phys_addr
== 0)
241 vaddr
= phys_to_virt(buf_phys_addr
);
245 dma_unmap_single(&priv
->pdev
->dev
, buf_phys_addr
,
246 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
247 hwbm_buf_free(&bm_pool
->hwbm_pool
, vaddr
);
250 mvneta_bm_config_clear(priv
, MVNETA_BM_EMPTY_LIMIT_MASK
);
252 /* Update BM driver with number of buffers removed from pool */
253 bm_pool
->hwbm_pool
.buf_num
-= i
;
255 EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free
);
258 void mvneta_bm_pool_destroy(struct mvneta_bm
*priv
,
259 struct mvneta_bm_pool
*bm_pool
, u8 port_map
)
261 struct hwbm_pool
*hwbm_pool
= &bm_pool
->hwbm_pool
;
262 bm_pool
->port_map
&= ~port_map
;
263 if (bm_pool
->port_map
)
266 bm_pool
->type
= MVNETA_BM_FREE
;
268 mvneta_bm_bufs_free(priv
, bm_pool
, port_map
);
269 if (hwbm_pool
->buf_num
)
270 WARN(1, "cannot free all buffers in pool %d\n", bm_pool
->id
);
272 if (bm_pool
->virt_addr
) {
273 dma_free_coherent(&priv
->pdev
->dev
,
274 sizeof(u32
) * hwbm_pool
->size
,
275 bm_pool
->virt_addr
, bm_pool
->phys_addr
);
276 bm_pool
->virt_addr
= NULL
;
279 mvneta_bm_pool_disable(priv
, bm_pool
->id
);
281 EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy
);
283 static void mvneta_bm_pools_init(struct mvneta_bm
*priv
)
285 struct device_node
*dn
= priv
->pdev
->dev
.of_node
;
286 struct mvneta_bm_pool
*bm_pool
;
291 /* Activate BM unit */
292 mvneta_bm_write(priv
, MVNETA_BM_COMMAND_REG
, MVNETA_BM_START_MASK
);
294 /* Create all pools with maximum size */
295 for (i
= 0; i
< MVNETA_BM_POOLS_NUM
; i
++) {
296 bm_pool
= &priv
->bm_pools
[i
];
298 bm_pool
->type
= MVNETA_BM_FREE
;
300 /* Reset read pointer */
301 mvneta_bm_write(priv
, MVNETA_BM_POOL_READ_PTR_REG(i
), 0);
303 /* Reset write pointer */
304 mvneta_bm_write(priv
, MVNETA_BM_POOL_WRITE_PTR_REG(i
), 0);
306 /* Configure pool size according to DT or use default value */
307 sprintf(prop
, "pool%d,capacity", i
);
308 if (of_property_read_u32(dn
, prop
, &size
)) {
309 size
= MVNETA_BM_POOL_CAP_DEF
;
310 } else if (size
> MVNETA_BM_POOL_CAP_MAX
) {
311 dev_warn(&priv
->pdev
->dev
,
312 "Illegal pool %d capacity %d, set to %d\n",
313 i
, size
, MVNETA_BM_POOL_CAP_MAX
);
314 size
= MVNETA_BM_POOL_CAP_MAX
;
315 } else if (size
< MVNETA_BM_POOL_CAP_MIN
) {
316 dev_warn(&priv
->pdev
->dev
,
317 "Illegal pool %d capacity %d, set to %d\n",
318 i
, size
, MVNETA_BM_POOL_CAP_MIN
);
319 size
= MVNETA_BM_POOL_CAP_MIN
;
320 } else if (!IS_ALIGNED(size
, MVNETA_BM_POOL_CAP_ALIGN
)) {
321 dev_warn(&priv
->pdev
->dev
,
322 "Illegal pool %d capacity %d, round to %d\n",
324 MVNETA_BM_POOL_CAP_ALIGN
));
325 size
= ALIGN(size
, MVNETA_BM_POOL_CAP_ALIGN
);
327 bm_pool
->hwbm_pool
.size
= size
;
329 mvneta_bm_write(priv
, MVNETA_BM_POOL_SIZE_REG(i
),
330 bm_pool
->hwbm_pool
.size
);
332 /* Obtain custom pkt_size from DT */
333 sprintf(prop
, "pool%d,pkt-size", i
);
334 if (of_property_read_u32(dn
, prop
, &bm_pool
->pkt_size
))
335 bm_pool
->pkt_size
= 0;
339 static void mvneta_bm_default_set(struct mvneta_bm
*priv
)
343 /* Mask BM all interrupts */
344 mvneta_bm_write(priv
, MVNETA_BM_INTR_MASK_REG
, 0);
346 /* Clear BM cause register */
347 mvneta_bm_write(priv
, MVNETA_BM_INTR_CAUSE_REG
, 0);
349 /* Set BM configuration register */
350 val
= mvneta_bm_read(priv
, MVNETA_BM_CONFIG_REG
);
352 /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */
353 val
&= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK
;
354 val
|= MVNETA_BM_MAX_IN_BURST_SIZE_16BP
;
355 mvneta_bm_write(priv
, MVNETA_BM_CONFIG_REG
, val
);
358 static int mvneta_bm_init(struct mvneta_bm
*priv
)
360 mvneta_bm_default_set(priv
);
362 /* Allocate and initialize BM pools structures */
363 priv
->bm_pools
= devm_kcalloc(&priv
->pdev
->dev
, MVNETA_BM_POOLS_NUM
,
364 sizeof(struct mvneta_bm_pool
),
369 mvneta_bm_pools_init(priv
);
374 static int mvneta_bm_get_sram(struct device_node
*dn
,
375 struct mvneta_bm
*priv
)
377 priv
->bppi_pool
= of_gen_pool_get(dn
, "internal-mem", 0);
378 if (!priv
->bppi_pool
)
381 priv
->bppi_virt_addr
= gen_pool_dma_alloc(priv
->bppi_pool
,
383 &priv
->bppi_phys_addr
);
384 if (!priv
->bppi_virt_addr
)
390 static void mvneta_bm_put_sram(struct mvneta_bm
*priv
)
392 gen_pool_free(priv
->bppi_pool
, priv
->bppi_phys_addr
,
393 MVNETA_BM_BPPI_SIZE
);
396 struct mvneta_bm
*mvneta_bm_get(struct device_node
*node
)
398 struct platform_device
*pdev
= of_find_device_by_node(node
);
400 return pdev
? platform_get_drvdata(pdev
) : NULL
;
402 EXPORT_SYMBOL_GPL(mvneta_bm_get
);
404 void mvneta_bm_put(struct mvneta_bm
*priv
)
406 platform_device_put(priv
->pdev
);
408 EXPORT_SYMBOL_GPL(mvneta_bm_put
);
410 static int mvneta_bm_probe(struct platform_device
*pdev
)
412 struct device_node
*dn
= pdev
->dev
.of_node
;
413 struct mvneta_bm
*priv
;
416 priv
= devm_kzalloc(&pdev
->dev
, sizeof(struct mvneta_bm
), GFP_KERNEL
);
420 priv
->reg_base
= devm_platform_ioremap_resource(pdev
, 0);
421 if (IS_ERR(priv
->reg_base
))
422 return PTR_ERR(priv
->reg_base
);
424 priv
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
425 if (IS_ERR(priv
->clk
))
426 return PTR_ERR(priv
->clk
);
427 err
= clk_prepare_enable(priv
->clk
);
431 err
= mvneta_bm_get_sram(dn
, priv
);
433 dev_err(&pdev
->dev
, "failed to allocate internal memory\n");
439 /* Initialize buffer manager internals */
440 err
= mvneta_bm_init(priv
);
442 dev_err(&pdev
->dev
, "failed to initialize controller\n");
447 platform_set_drvdata(pdev
, priv
);
449 dev_info(&pdev
->dev
, "Buffer Manager for network controller enabled\n");
454 mvneta_bm_put_sram(priv
);
456 clk_disable_unprepare(priv
->clk
);
460 static int mvneta_bm_remove(struct platform_device
*pdev
)
462 struct mvneta_bm
*priv
= platform_get_drvdata(pdev
);
463 u8 all_ports_map
= 0xff;
466 for (i
= 0; i
< MVNETA_BM_POOLS_NUM
; i
++) {
467 struct mvneta_bm_pool
*bm_pool
= &priv
->bm_pools
[i
];
469 mvneta_bm_pool_destroy(priv
, bm_pool
, all_ports_map
);
472 mvneta_bm_put_sram(priv
);
474 /* Dectivate BM unit */
475 mvneta_bm_write(priv
, MVNETA_BM_COMMAND_REG
, MVNETA_BM_STOP_MASK
);
477 clk_disable_unprepare(priv
->clk
);
482 static const struct of_device_id mvneta_bm_match
[] = {
483 { .compatible
= "marvell,armada-380-neta-bm" },
486 MODULE_DEVICE_TABLE(of
, mvneta_bm_match
);
488 static struct platform_driver mvneta_bm_driver
= {
489 .probe
= mvneta_bm_probe
,
490 .remove
= mvneta_bm_remove
,
492 .name
= MVNETA_BM_DRIVER_NAME
,
493 .of_match_table
= mvneta_bm_match
,
497 module_platform_driver(mvneta_bm_driver
);
499 MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com");
500 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
501 MODULE_LICENSE("GPL v2");