2 * Driver for Marvell NETA network controller Buffer Manager.
4 * Copyright (C) 2015 Marvell
6 * Marcin Wojtas <mw@semihalf.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2. This program is licensed "as is" without any
10 * warranty of any kind, whether express or implied.
13 #include <linux/clk.h>
14 #include <linux/genalloc.h>
16 #include <linux/kernel.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
21 #include <linux/platform_device.h>
22 #include <linux/skbuff.h>
24 #include "mvneta_bm.h"
26 #define MVNETA_BM_DRIVER_NAME "mvneta_bm"
27 #define MVNETA_BM_DRIVER_VERSION "1.0"
29 static void mvneta_bm_write(struct mvneta_bm
*priv
, u32 offset
, u32 data
)
31 writel(data
, priv
->reg_base
+ offset
);
34 static u32
mvneta_bm_read(struct mvneta_bm
*priv
, u32 offset
)
36 return readl(priv
->reg_base
+ offset
);
39 static void mvneta_bm_pool_enable(struct mvneta_bm
*priv
, int pool_id
)
43 val
= mvneta_bm_read(priv
, MVNETA_BM_POOL_BASE_REG(pool_id
));
44 val
|= MVNETA_BM_POOL_ENABLE_MASK
;
45 mvneta_bm_write(priv
, MVNETA_BM_POOL_BASE_REG(pool_id
), val
);
47 /* Clear BM cause register */
48 mvneta_bm_write(priv
, MVNETA_BM_INTR_CAUSE_REG
, 0);
51 static void mvneta_bm_pool_disable(struct mvneta_bm
*priv
, int pool_id
)
55 val
= mvneta_bm_read(priv
, MVNETA_BM_POOL_BASE_REG(pool_id
));
56 val
&= ~MVNETA_BM_POOL_ENABLE_MASK
;
57 mvneta_bm_write(priv
, MVNETA_BM_POOL_BASE_REG(pool_id
), val
);
60 static inline void mvneta_bm_config_set(struct mvneta_bm
*priv
, u32 mask
)
64 val
= mvneta_bm_read(priv
, MVNETA_BM_CONFIG_REG
);
66 mvneta_bm_write(priv
, MVNETA_BM_CONFIG_REG
, val
);
69 static inline void mvneta_bm_config_clear(struct mvneta_bm
*priv
, u32 mask
)
73 val
= mvneta_bm_read(priv
, MVNETA_BM_CONFIG_REG
);
75 mvneta_bm_write(priv
, MVNETA_BM_CONFIG_REG
, val
);
78 static void mvneta_bm_pool_target_set(struct mvneta_bm
*priv
, int pool_id
,
79 u8 target_id
, u8 attr
)
83 val
= mvneta_bm_read(priv
, MVNETA_BM_XBAR_POOL_REG(pool_id
));
84 val
&= ~MVNETA_BM_TARGET_ID_MASK(pool_id
);
85 val
&= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id
);
86 val
|= MVNETA_BM_TARGET_ID_VAL(pool_id
, target_id
);
87 val
|= MVNETA_BM_XBAR_ATTR_VAL(pool_id
, attr
);
89 mvneta_bm_write(priv
, MVNETA_BM_XBAR_POOL_REG(pool_id
), val
);
92 int mvneta_bm_construct(struct hwbm_pool
*hwbm_pool
, void *buf
)
94 struct mvneta_bm_pool
*bm_pool
=
95 (struct mvneta_bm_pool
*)hwbm_pool
->priv
;
96 struct mvneta_bm
*priv
= bm_pool
->priv
;
99 /* In order to update buf_cookie field of RX descriptor properly,
100 * BM hardware expects buf virtual address to be placed in the
101 * first four bytes of mapped buffer.
103 *(u32
*)buf
= (u32
)buf
;
104 phys_addr
= dma_map_single(&priv
->pdev
->dev
, buf
, bm_pool
->buf_size
,
106 if (unlikely(dma_mapping_error(&priv
->pdev
->dev
, phys_addr
)))
109 mvneta_bm_pool_put_bp(priv
, bm_pool
, phys_addr
);
112 EXPORT_SYMBOL_GPL(mvneta_bm_construct
);
115 static int mvneta_bm_pool_create(struct mvneta_bm
*priv
,
116 struct mvneta_bm_pool
*bm_pool
)
118 struct platform_device
*pdev
= priv
->pdev
;
121 size_bytes
= sizeof(u32
) * bm_pool
->hwbm_pool
.size
;
122 bm_pool
->virt_addr
= dma_alloc_coherent(&pdev
->dev
, size_bytes
,
125 if (!bm_pool
->virt_addr
)
128 if (!IS_ALIGNED((u32
)bm_pool
->virt_addr
, MVNETA_BM_POOL_PTR_ALIGN
)) {
129 dma_free_coherent(&pdev
->dev
, size_bytes
, bm_pool
->virt_addr
,
131 dev_err(&pdev
->dev
, "BM pool %d is not %d bytes aligned\n",
132 bm_pool
->id
, MVNETA_BM_POOL_PTR_ALIGN
);
136 err
= mvebu_mbus_get_dram_win_info(bm_pool
->phys_addr
, &target_id
,
139 dma_free_coherent(&pdev
->dev
, size_bytes
, bm_pool
->virt_addr
,
144 /* Set pool address */
145 mvneta_bm_write(priv
, MVNETA_BM_POOL_BASE_REG(bm_pool
->id
),
148 mvneta_bm_pool_target_set(priv
, bm_pool
->id
, target_id
, attr
);
149 mvneta_bm_pool_enable(priv
, bm_pool
->id
);
154 /* Notify the driver that BM pool is being used as specific type and return the
155 * pool pointer on success
157 struct mvneta_bm_pool
*mvneta_bm_pool_use(struct mvneta_bm
*priv
, u8 pool_id
,
158 enum mvneta_bm_type type
, u8 port_id
,
161 struct mvneta_bm_pool
*new_pool
= &priv
->bm_pools
[pool_id
];
164 if (new_pool
->type
== MVNETA_BM_LONG
&&
165 new_pool
->port_map
!= 1 << port_id
) {
166 dev_err(&priv
->pdev
->dev
,
167 "long pool cannot be shared by the ports\n");
171 if (new_pool
->type
== MVNETA_BM_SHORT
&& new_pool
->type
!= type
) {
172 dev_err(&priv
->pdev
->dev
,
173 "mixing pools' types between the ports is forbidden\n");
177 if (new_pool
->pkt_size
== 0 || type
!= MVNETA_BM_SHORT
)
178 new_pool
->pkt_size
= pkt_size
;
180 /* Allocate buffers in case BM pool hasn't been used yet */
181 if (new_pool
->type
== MVNETA_BM_FREE
) {
182 struct hwbm_pool
*hwbm_pool
= &new_pool
->hwbm_pool
;
184 new_pool
->priv
= priv
;
185 new_pool
->type
= type
;
186 new_pool
->buf_size
= MVNETA_RX_BUF_SIZE(new_pool
->pkt_size
);
187 hwbm_pool
->frag_size
=
188 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool
->pkt_size
)) +
189 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
190 hwbm_pool
->construct
= mvneta_bm_construct
;
191 hwbm_pool
->priv
= new_pool
;
192 spin_lock_init(&hwbm_pool
->lock
);
194 /* Create new pool */
195 err
= mvneta_bm_pool_create(priv
, new_pool
);
197 dev_err(&priv
->pdev
->dev
, "fail to create pool %d\n",
202 /* Allocate buffers for this pool */
203 num
= hwbm_pool_add(hwbm_pool
, hwbm_pool
->size
, GFP_ATOMIC
);
204 if (num
!= hwbm_pool
->size
) {
205 WARN(1, "pool %d: %d of %d allocated\n",
206 new_pool
->id
, num
, hwbm_pool
->size
);
213 EXPORT_SYMBOL_GPL(mvneta_bm_pool_use
);
215 /* Free all buffers from the pool */
216 void mvneta_bm_bufs_free(struct mvneta_bm
*priv
, struct mvneta_bm_pool
*bm_pool
,
221 bm_pool
->port_map
&= ~port_map
;
222 if (bm_pool
->port_map
)
225 mvneta_bm_config_set(priv
, MVNETA_BM_EMPTY_LIMIT_MASK
);
227 for (i
= 0; i
< bm_pool
->hwbm_pool
.buf_num
; i
++) {
228 dma_addr_t buf_phys_addr
;
231 /* Get buffer physical address (indirect access) */
232 buf_phys_addr
= mvneta_bm_pool_get_bp(priv
, bm_pool
);
234 /* Work-around to the problems when destroying the pool,
235 * when it occurs that a read access to BPPI returns 0.
237 if (buf_phys_addr
== 0)
240 vaddr
= phys_to_virt(buf_phys_addr
);
244 dma_unmap_single(&priv
->pdev
->dev
, buf_phys_addr
,
245 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
246 hwbm_buf_free(&bm_pool
->hwbm_pool
, vaddr
);
249 mvneta_bm_config_clear(priv
, MVNETA_BM_EMPTY_LIMIT_MASK
);
251 /* Update BM driver with number of buffers removed from pool */
252 bm_pool
->hwbm_pool
.buf_num
-= i
;
254 EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free
);
257 void mvneta_bm_pool_destroy(struct mvneta_bm
*priv
,
258 struct mvneta_bm_pool
*bm_pool
, u8 port_map
)
260 struct hwbm_pool
*hwbm_pool
= &bm_pool
->hwbm_pool
;
261 bm_pool
->port_map
&= ~port_map
;
262 if (bm_pool
->port_map
)
265 bm_pool
->type
= MVNETA_BM_FREE
;
267 mvneta_bm_bufs_free(priv
, bm_pool
, port_map
);
268 if (hwbm_pool
->buf_num
)
269 WARN(1, "cannot free all buffers in pool %d\n", bm_pool
->id
);
271 if (bm_pool
->virt_addr
) {
272 dma_free_coherent(&priv
->pdev
->dev
,
273 sizeof(u32
) * hwbm_pool
->size
,
274 bm_pool
->virt_addr
, bm_pool
->phys_addr
);
275 bm_pool
->virt_addr
= NULL
;
278 mvneta_bm_pool_disable(priv
, bm_pool
->id
);
280 EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy
);
282 static void mvneta_bm_pools_init(struct mvneta_bm
*priv
)
284 struct device_node
*dn
= priv
->pdev
->dev
.of_node
;
285 struct mvneta_bm_pool
*bm_pool
;
290 /* Activate BM unit */
291 mvneta_bm_write(priv
, MVNETA_BM_COMMAND_REG
, MVNETA_BM_START_MASK
);
293 /* Create all pools with maximum size */
294 for (i
= 0; i
< MVNETA_BM_POOLS_NUM
; i
++) {
295 bm_pool
= &priv
->bm_pools
[i
];
297 bm_pool
->type
= MVNETA_BM_FREE
;
299 /* Reset read pointer */
300 mvneta_bm_write(priv
, MVNETA_BM_POOL_READ_PTR_REG(i
), 0);
302 /* Reset write pointer */
303 mvneta_bm_write(priv
, MVNETA_BM_POOL_WRITE_PTR_REG(i
), 0);
305 /* Configure pool size according to DT or use default value */
306 sprintf(prop
, "pool%d,capacity", i
);
307 if (of_property_read_u32(dn
, prop
, &size
)) {
308 size
= MVNETA_BM_POOL_CAP_DEF
;
309 } else if (size
> MVNETA_BM_POOL_CAP_MAX
) {
310 dev_warn(&priv
->pdev
->dev
,
311 "Illegal pool %d capacity %d, set to %d\n",
312 i
, size
, MVNETA_BM_POOL_CAP_MAX
);
313 size
= MVNETA_BM_POOL_CAP_MAX
;
314 } else if (size
< MVNETA_BM_POOL_CAP_MIN
) {
315 dev_warn(&priv
->pdev
->dev
,
316 "Illegal pool %d capacity %d, set to %d\n",
317 i
, size
, MVNETA_BM_POOL_CAP_MIN
);
318 size
= MVNETA_BM_POOL_CAP_MIN
;
319 } else if (!IS_ALIGNED(size
, MVNETA_BM_POOL_CAP_ALIGN
)) {
320 dev_warn(&priv
->pdev
->dev
,
321 "Illegal pool %d capacity %d, round to %d\n",
323 MVNETA_BM_POOL_CAP_ALIGN
));
324 size
= ALIGN(size
, MVNETA_BM_POOL_CAP_ALIGN
);
326 bm_pool
->hwbm_pool
.size
= size
;
328 mvneta_bm_write(priv
, MVNETA_BM_POOL_SIZE_REG(i
),
329 bm_pool
->hwbm_pool
.size
);
331 /* Obtain custom pkt_size from DT */
332 sprintf(prop
, "pool%d,pkt-size", i
);
333 if (of_property_read_u32(dn
, prop
, &bm_pool
->pkt_size
))
334 bm_pool
->pkt_size
= 0;
338 static void mvneta_bm_default_set(struct mvneta_bm
*priv
)
342 /* Mask BM all interrupts */
343 mvneta_bm_write(priv
, MVNETA_BM_INTR_MASK_REG
, 0);
345 /* Clear BM cause register */
346 mvneta_bm_write(priv
, MVNETA_BM_INTR_CAUSE_REG
, 0);
348 /* Set BM configuration register */
349 val
= mvneta_bm_read(priv
, MVNETA_BM_CONFIG_REG
);
351 /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */
352 val
&= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK
;
353 val
|= MVNETA_BM_MAX_IN_BURST_SIZE_16BP
;
354 mvneta_bm_write(priv
, MVNETA_BM_CONFIG_REG
, val
);
357 static int mvneta_bm_init(struct mvneta_bm
*priv
)
359 mvneta_bm_default_set(priv
);
361 /* Allocate and initialize BM pools structures */
362 priv
->bm_pools
= devm_kcalloc(&priv
->pdev
->dev
, MVNETA_BM_POOLS_NUM
,
363 sizeof(struct mvneta_bm_pool
),
368 mvneta_bm_pools_init(priv
);
373 static int mvneta_bm_get_sram(struct device_node
*dn
,
374 struct mvneta_bm
*priv
)
376 priv
->bppi_pool
= of_gen_pool_get(dn
, "internal-mem", 0);
377 if (!priv
->bppi_pool
)
380 priv
->bppi_virt_addr
= gen_pool_dma_alloc(priv
->bppi_pool
,
382 &priv
->bppi_phys_addr
);
383 if (!priv
->bppi_virt_addr
)
389 static void mvneta_bm_put_sram(struct mvneta_bm
*priv
)
391 gen_pool_free(priv
->bppi_pool
, priv
->bppi_phys_addr
,
392 MVNETA_BM_BPPI_SIZE
);
395 static int mvneta_bm_probe(struct platform_device
*pdev
)
397 struct device_node
*dn
= pdev
->dev
.of_node
;
398 struct mvneta_bm
*priv
;
399 struct resource
*res
;
402 priv
= devm_kzalloc(&pdev
->dev
, sizeof(struct mvneta_bm
), GFP_KERNEL
);
406 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
407 priv
->reg_base
= devm_ioremap_resource(&pdev
->dev
, res
);
408 if (IS_ERR(priv
->reg_base
))
409 return PTR_ERR(priv
->reg_base
);
411 priv
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
412 if (IS_ERR(priv
->clk
))
413 return PTR_ERR(priv
->clk
);
414 err
= clk_prepare_enable(priv
->clk
);
418 err
= mvneta_bm_get_sram(dn
, priv
);
420 dev_err(&pdev
->dev
, "failed to allocate internal memory\n");
426 /* Initialize buffer manager internals */
427 err
= mvneta_bm_init(priv
);
429 dev_err(&pdev
->dev
, "failed to initialize controller\n");
434 platform_set_drvdata(pdev
, priv
);
436 dev_info(&pdev
->dev
, "Buffer Manager for network controller enabled\n");
441 mvneta_bm_put_sram(priv
);
443 clk_disable_unprepare(priv
->clk
);
447 static int mvneta_bm_remove(struct platform_device
*pdev
)
449 struct mvneta_bm
*priv
= platform_get_drvdata(pdev
);
450 u8 all_ports_map
= 0xff;
453 for (i
= 0; i
< MVNETA_BM_POOLS_NUM
; i
++) {
454 struct mvneta_bm_pool
*bm_pool
= &priv
->bm_pools
[i
];
456 mvneta_bm_pool_destroy(priv
, bm_pool
, all_ports_map
);
459 mvneta_bm_put_sram(priv
);
461 /* Dectivate BM unit */
462 mvneta_bm_write(priv
, MVNETA_BM_COMMAND_REG
, MVNETA_BM_STOP_MASK
);
464 clk_disable_unprepare(priv
->clk
);
469 static const struct of_device_id mvneta_bm_match
[] = {
470 { .compatible
= "marvell,armada-380-neta-bm" },
473 MODULE_DEVICE_TABLE(of
, mvneta_bm_match
);
475 static struct platform_driver mvneta_bm_driver
= {
476 .probe
= mvneta_bm_probe
,
477 .remove
= mvneta_bm_remove
,
479 .name
= MVNETA_BM_DRIVER_NAME
,
480 .of_match_table
= mvneta_bm_match
,
484 module_platform_driver(mvneta_bm_driver
);
486 MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com");
487 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
488 MODULE_LICENSE("GPL v2");