Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_buffers.c
blob93728c694e6df9985cadfccf72ab6f3a9f52f8da
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/dcbnl.h>
38 #include <linux/if_ether.h>
39 #include <linux/list.h>
41 #include "spectrum.h"
42 #include "core.h"
43 #include "port.h"
44 #include "reg.h"
46 struct mlxsw_sp_sb_pr {
47 enum mlxsw_reg_sbpr_mode mode;
48 u32 size;
51 struct mlxsw_cp_sb_occ {
52 u32 cur;
53 u32 max;
56 struct mlxsw_sp_sb_cm {
57 u32 min_buff;
58 u32 max_buff;
59 u8 pool;
60 struct mlxsw_cp_sb_occ occ;
63 struct mlxsw_sp_sb_pm {
64 u32 min_buff;
65 u32 max_buff;
66 struct mlxsw_cp_sb_occ occ;
69 #define MLXSW_SP_SB_POOL_COUNT 4
70 #define MLXSW_SP_SB_TC_COUNT 8
72 struct mlxsw_sp_sb_port {
73 struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
74 struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
77 struct mlxsw_sp_sb {
78 struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
79 struct mlxsw_sp_sb_port *ports;
80 u32 cell_size;
83 u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
85 return mlxsw_sp->sb->cell_size * cells;
88 u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
90 return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
93 static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
94 u8 pool,
95 enum mlxsw_reg_sbxx_dir dir)
97 return &mlxsw_sp->sb->prs[dir][pool];
100 static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
101 u8 local_port, u8 pg_buff,
102 enum mlxsw_reg_sbxx_dir dir)
104 return &mlxsw_sp->sb->ports[local_port].cms[dir][pg_buff];
107 static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
108 u8 local_port, u8 pool,
109 enum mlxsw_reg_sbxx_dir dir)
111 return &mlxsw_sp->sb->ports[local_port].pms[dir][pool];
114 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
115 enum mlxsw_reg_sbxx_dir dir,
116 enum mlxsw_reg_sbpr_mode mode, u32 size)
118 char sbpr_pl[MLXSW_REG_SBPR_LEN];
119 struct mlxsw_sp_sb_pr *pr;
120 int err;
122 mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
123 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
124 if (err)
125 return err;
127 pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
128 pr->mode = mode;
129 pr->size = size;
130 return 0;
133 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
134 u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
135 u32 min_buff, u32 max_buff, u8 pool)
137 char sbcm_pl[MLXSW_REG_SBCM_LEN];
138 int err;
140 mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
141 min_buff, max_buff, pool);
142 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
143 if (err)
144 return err;
145 if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
146 struct mlxsw_sp_sb_cm *cm;
148 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
149 cm->min_buff = min_buff;
150 cm->max_buff = max_buff;
151 cm->pool = pool;
153 return 0;
156 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
157 u8 pool, enum mlxsw_reg_sbxx_dir dir,
158 u32 min_buff, u32 max_buff)
160 char sbpm_pl[MLXSW_REG_SBPM_LEN];
161 struct mlxsw_sp_sb_pm *pm;
162 int err;
164 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
165 min_buff, max_buff);
166 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
167 if (err)
168 return err;
170 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
171 pm->min_buff = min_buff;
172 pm->max_buff = max_buff;
173 return 0;
176 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
177 u8 pool, enum mlxsw_reg_sbxx_dir dir,
178 struct list_head *bulk_list)
180 char sbpm_pl[MLXSW_REG_SBPM_LEN];
182 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
183 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
184 bulk_list, NULL, 0);
187 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
188 char *sbpm_pl, size_t sbpm_pl_len,
189 unsigned long cb_priv)
191 struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
193 mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
196 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
197 u8 pool, enum mlxsw_reg_sbxx_dir dir,
198 struct list_head *bulk_list)
200 char sbpm_pl[MLXSW_REG_SBPM_LEN];
201 struct mlxsw_sp_sb_pm *pm;
203 pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
204 mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
205 return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
206 bulk_list,
207 mlxsw_sp_sb_pm_occ_query_cb,
208 (unsigned long) pm);
211 static const u16 mlxsw_sp_pbs[] = {
212 [0] = 2 * ETH_FRAME_LEN,
213 [9] = 2 * MLXSW_PORT_MAX_MTU,
216 #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
217 #define MLXSW_SP_PB_UNUSED 8
219 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
221 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
222 char pbmc_pl[MLXSW_REG_PBMC_LEN];
223 int i;
225 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
226 0xffff, 0xffff / 2);
227 for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
228 u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp_pbs[i]);
230 if (i == MLXSW_SP_PB_UNUSED)
231 continue;
232 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
234 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
235 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
236 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
239 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
241 char pptb_pl[MLXSW_REG_PPTB_LEN];
242 int i;
244 mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
245 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
246 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
247 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
248 pptb_pl);
251 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
253 int err;
255 err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
256 if (err)
257 return err;
258 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
261 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
263 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
265 mlxsw_sp->sb->ports = kcalloc(max_ports,
266 sizeof(struct mlxsw_sp_sb_port),
267 GFP_KERNEL);
268 if (!mlxsw_sp->sb->ports)
269 return -ENOMEM;
270 return 0;
273 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
275 kfree(mlxsw_sp->sb->ports);
278 #define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000
279 #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
280 #define MLXSW_SP_SB_PR_EGRESS_SIZE 13232000
282 #define MLXSW_SP_SB_PR(_mode, _size) \
284 .mode = _mode, \
285 .size = _size, \
288 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
289 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
290 MLXSW_SP_SB_PR_INGRESS_SIZE),
291 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
292 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
293 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
294 MLXSW_SP_SB_PR_INGRESS_MNG_SIZE),
297 #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
299 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
300 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_PR_EGRESS_SIZE),
301 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
302 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
303 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
306 #define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
308 static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
309 enum mlxsw_reg_sbxx_dir dir,
310 const struct mlxsw_sp_sb_pr *prs,
311 size_t prs_len)
313 int i;
314 int err;
316 for (i = 0; i < prs_len; i++) {
317 u32 size = mlxsw_sp_bytes_cells(mlxsw_sp, prs[i].size);
319 err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir, prs[i].mode, size);
320 if (err)
321 return err;
323 return 0;
326 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
328 int err;
330 err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
331 mlxsw_sp_sb_prs_ingress,
332 MLXSW_SP_SB_PRS_INGRESS_LEN);
333 if (err)
334 return err;
335 return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
336 mlxsw_sp_sb_prs_egress,
337 MLXSW_SP_SB_PRS_EGRESS_LEN);
340 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
342 .min_buff = _min_buff, \
343 .max_buff = _max_buff, \
344 .pool = _pool, \
347 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
348 MLXSW_SP_SB_CM(10000, 8, 0),
349 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
350 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
351 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
352 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
353 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
354 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
355 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
356 MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
357 MLXSW_SP_SB_CM(20000, 1, 3),
360 #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
362 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
363 MLXSW_SP_SB_CM(1500, 9, 0),
364 MLXSW_SP_SB_CM(1500, 9, 0),
365 MLXSW_SP_SB_CM(1500, 9, 0),
366 MLXSW_SP_SB_CM(1500, 9, 0),
367 MLXSW_SP_SB_CM(1500, 9, 0),
368 MLXSW_SP_SB_CM(1500, 9, 0),
369 MLXSW_SP_SB_CM(1500, 9, 0),
370 MLXSW_SP_SB_CM(1500, 9, 0),
371 MLXSW_SP_SB_CM(0, 0, 0),
372 MLXSW_SP_SB_CM(0, 0, 0),
373 MLXSW_SP_SB_CM(0, 0, 0),
374 MLXSW_SP_SB_CM(0, 0, 0),
375 MLXSW_SP_SB_CM(0, 0, 0),
376 MLXSW_SP_SB_CM(0, 0, 0),
377 MLXSW_SP_SB_CM(0, 0, 0),
378 MLXSW_SP_SB_CM(0, 0, 0),
379 MLXSW_SP_SB_CM(1, 0xff, 0),
382 #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
384 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
386 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
387 MLXSW_SP_CPU_PORT_SB_CM,
388 MLXSW_SP_CPU_PORT_SB_CM,
389 MLXSW_SP_CPU_PORT_SB_CM,
390 MLXSW_SP_CPU_PORT_SB_CM,
391 MLXSW_SP_CPU_PORT_SB_CM,
392 MLXSW_SP_CPU_PORT_SB_CM,
393 MLXSW_SP_CPU_PORT_SB_CM,
394 MLXSW_SP_SB_CM(10000, 0, 0),
395 MLXSW_SP_CPU_PORT_SB_CM,
396 MLXSW_SP_CPU_PORT_SB_CM,
397 MLXSW_SP_CPU_PORT_SB_CM,
398 MLXSW_SP_CPU_PORT_SB_CM,
399 MLXSW_SP_CPU_PORT_SB_CM,
400 MLXSW_SP_CPU_PORT_SB_CM,
401 MLXSW_SP_CPU_PORT_SB_CM,
402 MLXSW_SP_CPU_PORT_SB_CM,
403 MLXSW_SP_CPU_PORT_SB_CM,
404 MLXSW_SP_CPU_PORT_SB_CM,
405 MLXSW_SP_CPU_PORT_SB_CM,
406 MLXSW_SP_CPU_PORT_SB_CM,
407 MLXSW_SP_CPU_PORT_SB_CM,
408 MLXSW_SP_CPU_PORT_SB_CM,
409 MLXSW_SP_CPU_PORT_SB_CM,
410 MLXSW_SP_CPU_PORT_SB_CM,
411 MLXSW_SP_CPU_PORT_SB_CM,
412 MLXSW_SP_CPU_PORT_SB_CM,
413 MLXSW_SP_CPU_PORT_SB_CM,
414 MLXSW_SP_CPU_PORT_SB_CM,
415 MLXSW_SP_CPU_PORT_SB_CM,
416 MLXSW_SP_CPU_PORT_SB_CM,
417 MLXSW_SP_CPU_PORT_SB_CM,
418 MLXSW_SP_CPU_PORT_SB_CM,
421 #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
422 ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
424 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
425 enum mlxsw_reg_sbxx_dir dir,
426 const struct mlxsw_sp_sb_cm *cms,
427 size_t cms_len)
429 int i;
430 int err;
432 for (i = 0; i < cms_len; i++) {
433 const struct mlxsw_sp_sb_cm *cm;
434 u32 min_buff;
436 if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
437 continue; /* PG number 8 does not exist, skip it */
438 cm = &cms[i];
439 /* All pools are initialized using dynamic thresholds,
440 * therefore 'max_buff' isn't specified in cells.
442 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
443 err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
444 min_buff, cm->max_buff, cm->pool);
445 if (err)
446 return err;
448 return 0;
451 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
453 int err;
455 err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
456 mlxsw_sp_port->local_port,
457 MLXSW_REG_SBXX_DIR_INGRESS,
458 mlxsw_sp_sb_cms_ingress,
459 MLXSW_SP_SB_CMS_INGRESS_LEN);
460 if (err)
461 return err;
462 return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
463 mlxsw_sp_port->local_port,
464 MLXSW_REG_SBXX_DIR_EGRESS,
465 mlxsw_sp_sb_cms_egress,
466 MLXSW_SP_SB_CMS_EGRESS_LEN);
469 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
471 return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
472 mlxsw_sp_cpu_port_sb_cms,
473 MLXSW_SP_CPU_PORT_SB_MCS_LEN);
476 #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
478 .min_buff = _min_buff, \
479 .max_buff = _max_buff, \
482 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
483 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
484 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
485 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
486 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
489 #define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
491 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
492 MLXSW_SP_SB_PM(0, 7),
493 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
494 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
495 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
498 #define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
500 static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
501 enum mlxsw_reg_sbxx_dir dir,
502 const struct mlxsw_sp_sb_pm *pms,
503 size_t pms_len)
505 int i;
506 int err;
508 for (i = 0; i < pms_len; i++) {
509 const struct mlxsw_sp_sb_pm *pm;
511 pm = &pms[i];
512 err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
513 pm->min_buff, pm->max_buff);
514 if (err)
515 return err;
517 return 0;
520 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
522 int err;
524 err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
525 mlxsw_sp_port->local_port,
526 MLXSW_REG_SBXX_DIR_INGRESS,
527 mlxsw_sp_sb_pms_ingress,
528 MLXSW_SP_SB_PMS_INGRESS_LEN);
529 if (err)
530 return err;
531 return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
532 mlxsw_sp_port->local_port,
533 MLXSW_REG_SBXX_DIR_EGRESS,
534 mlxsw_sp_sb_pms_egress,
535 MLXSW_SP_SB_PMS_EGRESS_LEN);
538 struct mlxsw_sp_sb_mm {
539 u32 min_buff;
540 u32 max_buff;
541 u8 pool;
544 #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
546 .min_buff = _min_buff, \
547 .max_buff = _max_buff, \
548 .pool = _pool, \
551 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
552 MLXSW_SP_SB_MM(20000, 0xff, 0),
553 MLXSW_SP_SB_MM(20000, 0xff, 0),
554 MLXSW_SP_SB_MM(20000, 0xff, 0),
555 MLXSW_SP_SB_MM(20000, 0xff, 0),
556 MLXSW_SP_SB_MM(20000, 0xff, 0),
557 MLXSW_SP_SB_MM(20000, 0xff, 0),
558 MLXSW_SP_SB_MM(20000, 0xff, 0),
559 MLXSW_SP_SB_MM(20000, 0xff, 0),
560 MLXSW_SP_SB_MM(20000, 0xff, 0),
561 MLXSW_SP_SB_MM(20000, 0xff, 0),
562 MLXSW_SP_SB_MM(20000, 0xff, 0),
563 MLXSW_SP_SB_MM(20000, 0xff, 0),
564 MLXSW_SP_SB_MM(20000, 0xff, 0),
565 MLXSW_SP_SB_MM(20000, 0xff, 0),
566 MLXSW_SP_SB_MM(20000, 0xff, 0),
569 #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
571 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
573 char sbmm_pl[MLXSW_REG_SBMM_LEN];
574 int i;
575 int err;
577 for (i = 0; i < MLXSW_SP_SB_MMS_LEN; i++) {
578 const struct mlxsw_sp_sb_mm *mc;
579 u32 min_buff;
581 mc = &mlxsw_sp_sb_mms[i];
582 /* All pools are initialized using dynamic thresholds,
583 * therefore 'max_buff' isn't specified in cells.
585 min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
586 mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
587 mc->pool);
588 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
589 if (err)
590 return err;
592 return 0;
595 int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
597 u64 sb_size;
598 int err;
600 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
601 return -EIO;
603 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_BUFFER_SIZE))
604 return -EIO;
605 sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE);
607 mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
608 if (!mlxsw_sp->sb)
609 return -ENOMEM;
610 mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
612 err = mlxsw_sp_sb_ports_init(mlxsw_sp);
613 if (err)
614 goto err_sb_ports_init;
615 err = mlxsw_sp_sb_prs_init(mlxsw_sp);
616 if (err)
617 goto err_sb_prs_init;
618 err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
619 if (err)
620 goto err_sb_cpu_port_sb_cms_init;
621 err = mlxsw_sp_sb_mms_init(mlxsw_sp);
622 if (err)
623 goto err_sb_mms_init;
624 err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0, sb_size,
625 MLXSW_SP_SB_POOL_COUNT,
626 MLXSW_SP_SB_POOL_COUNT,
627 MLXSW_SP_SB_TC_COUNT,
628 MLXSW_SP_SB_TC_COUNT);
629 if (err)
630 goto err_devlink_sb_register;
632 return 0;
634 err_devlink_sb_register:
635 err_sb_mms_init:
636 err_sb_cpu_port_sb_cms_init:
637 err_sb_prs_init:
638 mlxsw_sp_sb_ports_fini(mlxsw_sp);
639 err_sb_ports_init:
640 kfree(mlxsw_sp->sb);
641 return err;
644 void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
646 devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
647 mlxsw_sp_sb_ports_fini(mlxsw_sp);
648 kfree(mlxsw_sp->sb);
651 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
653 int err;
655 err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
656 if (err)
657 return err;
658 err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
659 if (err)
660 return err;
661 err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
663 return err;
666 static u8 pool_get(u16 pool_index)
668 return pool_index % MLXSW_SP_SB_POOL_COUNT;
671 static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
673 u16 pool_index;
675 pool_index = pool;
676 if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
677 pool_index += MLXSW_SP_SB_POOL_COUNT;
678 return pool_index;
681 static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
683 return pool_index < MLXSW_SP_SB_POOL_COUNT ?
684 MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
687 int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
688 unsigned int sb_index, u16 pool_index,
689 struct devlink_sb_pool_info *pool_info)
691 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
692 u8 pool = pool_get(pool_index);
693 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
694 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
696 pool_info->pool_type = (enum devlink_sb_pool_type) dir;
697 pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
698 pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
699 return 0;
702 int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
703 unsigned int sb_index, u16 pool_index, u32 size,
704 enum devlink_sb_threshold_type threshold_type)
706 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
707 u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
708 u8 pool = pool_get(pool_index);
709 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
710 enum mlxsw_reg_sbpr_mode mode;
712 if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE))
713 return -EINVAL;
715 mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
716 return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
719 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
721 static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
722 enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
724 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
726 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
727 return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
728 return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
731 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
732 enum mlxsw_reg_sbxx_dir dir, u32 threshold,
733 u32 *p_max_buff)
735 struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
737 if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
738 int val;
740 val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
741 if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
742 val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
743 return -EINVAL;
744 *p_max_buff = val;
745 } else {
746 *p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
748 return 0;
751 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
752 unsigned int sb_index, u16 pool_index,
753 u32 *p_threshold)
755 struct mlxsw_sp_port *mlxsw_sp_port =
756 mlxsw_core_port_driver_priv(mlxsw_core_port);
757 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
758 u8 local_port = mlxsw_sp_port->local_port;
759 u8 pool = pool_get(pool_index);
760 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
761 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
762 pool, dir);
764 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
765 pm->max_buff);
766 return 0;
769 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
770 unsigned int sb_index, u16 pool_index,
771 u32 threshold)
773 struct mlxsw_sp_port *mlxsw_sp_port =
774 mlxsw_core_port_driver_priv(mlxsw_core_port);
775 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
776 u8 local_port = mlxsw_sp_port->local_port;
777 u8 pool = pool_get(pool_index);
778 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
779 u32 max_buff;
780 int err;
782 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
783 threshold, &max_buff);
784 if (err)
785 return err;
787 return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
788 0, max_buff);
791 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
792 unsigned int sb_index, u16 tc_index,
793 enum devlink_sb_pool_type pool_type,
794 u16 *p_pool_index, u32 *p_threshold)
796 struct mlxsw_sp_port *mlxsw_sp_port =
797 mlxsw_core_port_driver_priv(mlxsw_core_port);
798 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
799 u8 local_port = mlxsw_sp_port->local_port;
800 u8 pg_buff = tc_index;
801 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
802 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
803 pg_buff, dir);
805 *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
806 cm->max_buff);
807 *p_pool_index = pool_index_get(cm->pool, dir);
808 return 0;
811 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
812 unsigned int sb_index, u16 tc_index,
813 enum devlink_sb_pool_type pool_type,
814 u16 pool_index, u32 threshold)
816 struct mlxsw_sp_port *mlxsw_sp_port =
817 mlxsw_core_port_driver_priv(mlxsw_core_port);
818 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
819 u8 local_port = mlxsw_sp_port->local_port;
820 u8 pg_buff = tc_index;
821 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
822 u8 pool = pool_get(pool_index);
823 u32 max_buff;
824 int err;
826 if (dir != dir_get(pool_index))
827 return -EINVAL;
829 err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
830 threshold, &max_buff);
831 if (err)
832 return err;
834 return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
835 0, max_buff, pool);
838 #define MASKED_COUNT_MAX \
839 (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
841 struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
842 u8 masked_count;
843 u8 local_port_1;
846 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
847 char *sbsr_pl, size_t sbsr_pl_len,
848 unsigned long cb_priv)
850 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
851 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
852 u8 masked_count;
853 u8 local_port;
854 int rec_index = 0;
855 struct mlxsw_sp_sb_cm *cm;
856 int i;
858 memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
860 masked_count = 0;
861 for (local_port = cb_ctx.local_port_1;
862 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
863 if (!mlxsw_sp->ports[local_port])
864 continue;
865 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
866 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
867 MLXSW_REG_SBXX_DIR_INGRESS);
868 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
869 &cm->occ.cur, &cm->occ.max);
871 if (++masked_count == cb_ctx.masked_count)
872 break;
874 masked_count = 0;
875 for (local_port = cb_ctx.local_port_1;
876 local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
877 if (!mlxsw_sp->ports[local_port])
878 continue;
879 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
880 cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
881 MLXSW_REG_SBXX_DIR_EGRESS);
882 mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
883 &cm->occ.cur, &cm->occ.max);
885 if (++masked_count == cb_ctx.masked_count)
886 break;
890 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
891 unsigned int sb_index)
893 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
894 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
895 unsigned long cb_priv;
896 LIST_HEAD(bulk_list);
897 char *sbsr_pl;
898 u8 masked_count;
899 u8 local_port_1;
900 u8 local_port = 0;
901 int i;
902 int err;
903 int err2;
905 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
906 if (!sbsr_pl)
907 return -ENOMEM;
909 next_batch:
910 local_port++;
911 local_port_1 = local_port;
912 masked_count = 0;
913 mlxsw_reg_sbsr_pack(sbsr_pl, false);
914 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
915 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
916 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
918 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
919 if (!mlxsw_sp->ports[local_port])
920 continue;
921 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
922 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
923 for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
924 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
925 MLXSW_REG_SBXX_DIR_INGRESS,
926 &bulk_list);
927 if (err)
928 goto out;
929 err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
930 MLXSW_REG_SBXX_DIR_EGRESS,
931 &bulk_list);
932 if (err)
933 goto out;
935 if (++masked_count == MASKED_COUNT_MAX)
936 goto do_query;
939 do_query:
940 cb_ctx.masked_count = masked_count;
941 cb_ctx.local_port_1 = local_port_1;
942 memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
943 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
944 &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
945 cb_priv);
946 if (err)
947 goto out;
948 if (local_port < mlxsw_core_max_ports(mlxsw_core))
949 goto next_batch;
951 out:
952 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
953 if (!err)
954 err = err2;
955 kfree(sbsr_pl);
956 return err;
959 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
960 unsigned int sb_index)
962 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
963 LIST_HEAD(bulk_list);
964 char *sbsr_pl;
965 unsigned int masked_count;
966 u8 local_port = 0;
967 int i;
968 int err;
969 int err2;
971 sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
972 if (!sbsr_pl)
973 return -ENOMEM;
975 next_batch:
976 local_port++;
977 masked_count = 0;
978 mlxsw_reg_sbsr_pack(sbsr_pl, true);
979 for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
980 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
981 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
983 for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
984 if (!mlxsw_sp->ports[local_port])
985 continue;
986 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
987 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
988 for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
989 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
990 MLXSW_REG_SBXX_DIR_INGRESS,
991 &bulk_list);
992 if (err)
993 goto out;
994 err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
995 MLXSW_REG_SBXX_DIR_EGRESS,
996 &bulk_list);
997 if (err)
998 goto out;
1000 if (++masked_count == MASKED_COUNT_MAX)
1001 goto do_query;
1004 do_query:
1005 err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
1006 &bulk_list, NULL, 0);
1007 if (err)
1008 goto out;
1009 if (local_port < mlxsw_core_max_ports(mlxsw_core))
1010 goto next_batch;
1012 out:
1013 err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
1014 if (!err)
1015 err = err2;
1016 kfree(sbsr_pl);
1017 return err;
1020 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
1021 unsigned int sb_index, u16 pool_index,
1022 u32 *p_cur, u32 *p_max)
1024 struct mlxsw_sp_port *mlxsw_sp_port =
1025 mlxsw_core_port_driver_priv(mlxsw_core_port);
1026 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1027 u8 local_port = mlxsw_sp_port->local_port;
1028 u8 pool = pool_get(pool_index);
1029 enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
1030 struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
1031 pool, dir);
1033 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
1034 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
1035 return 0;
1038 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
1039 unsigned int sb_index, u16 tc_index,
1040 enum devlink_sb_pool_type pool_type,
1041 u32 *p_cur, u32 *p_max)
1043 struct mlxsw_sp_port *mlxsw_sp_port =
1044 mlxsw_core_port_driver_priv(mlxsw_core_port);
1045 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1046 u8 local_port = mlxsw_sp_port->local_port;
1047 u8 pg_buff = tc_index;
1048 enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
1049 struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
1050 pg_buff, dir);
1052 *p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
1053 *p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
1054 return 0;