2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/types.h>
37 #include <linux/dcbnl.h>
38 #include <linux/if_ether.h>
39 #include <linux/list.h>
46 struct mlxsw_sp_sb_pr
{
47 enum mlxsw_reg_sbpr_mode mode
;
51 struct mlxsw_cp_sb_occ
{
56 struct mlxsw_sp_sb_cm
{
60 struct mlxsw_cp_sb_occ occ
;
63 struct mlxsw_sp_sb_pm
{
66 struct mlxsw_cp_sb_occ occ
;
69 #define MLXSW_SP_SB_POOL_COUNT 4
70 #define MLXSW_SP_SB_TC_COUNT 8
72 struct mlxsw_sp_sb_port
{
73 struct mlxsw_sp_sb_cm cms
[2][MLXSW_SP_SB_TC_COUNT
];
74 struct mlxsw_sp_sb_pm pms
[2][MLXSW_SP_SB_POOL_COUNT
];
78 struct mlxsw_sp_sb_pr prs
[2][MLXSW_SP_SB_POOL_COUNT
];
79 struct mlxsw_sp_sb_port
*ports
;
83 u32
mlxsw_sp_cells_bytes(const struct mlxsw_sp
*mlxsw_sp
, u32 cells
)
85 return mlxsw_sp
->sb
->cell_size
* cells
;
88 u32
mlxsw_sp_bytes_cells(const struct mlxsw_sp
*mlxsw_sp
, u32 bytes
)
90 return DIV_ROUND_UP(bytes
, mlxsw_sp
->sb
->cell_size
);
93 static struct mlxsw_sp_sb_pr
*mlxsw_sp_sb_pr_get(struct mlxsw_sp
*mlxsw_sp
,
95 enum mlxsw_reg_sbxx_dir dir
)
97 return &mlxsw_sp
->sb
->prs
[dir
][pool
];
100 static struct mlxsw_sp_sb_cm
*mlxsw_sp_sb_cm_get(struct mlxsw_sp
*mlxsw_sp
,
101 u8 local_port
, u8 pg_buff
,
102 enum mlxsw_reg_sbxx_dir dir
)
104 return &mlxsw_sp
->sb
->ports
[local_port
].cms
[dir
][pg_buff
];
107 static struct mlxsw_sp_sb_pm
*mlxsw_sp_sb_pm_get(struct mlxsw_sp
*mlxsw_sp
,
108 u8 local_port
, u8 pool
,
109 enum mlxsw_reg_sbxx_dir dir
)
111 return &mlxsw_sp
->sb
->ports
[local_port
].pms
[dir
][pool
];
114 static int mlxsw_sp_sb_pr_write(struct mlxsw_sp
*mlxsw_sp
, u8 pool
,
115 enum mlxsw_reg_sbxx_dir dir
,
116 enum mlxsw_reg_sbpr_mode mode
, u32 size
)
118 char sbpr_pl
[MLXSW_REG_SBPR_LEN
];
119 struct mlxsw_sp_sb_pr
*pr
;
122 mlxsw_reg_sbpr_pack(sbpr_pl
, pool
, dir
, mode
, size
);
123 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbpr
), sbpr_pl
);
127 pr
= mlxsw_sp_sb_pr_get(mlxsw_sp
, pool
, dir
);
133 static int mlxsw_sp_sb_cm_write(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
134 u8 pg_buff
, enum mlxsw_reg_sbxx_dir dir
,
135 u32 min_buff
, u32 max_buff
, u8 pool
)
137 char sbcm_pl
[MLXSW_REG_SBCM_LEN
];
140 mlxsw_reg_sbcm_pack(sbcm_pl
, local_port
, pg_buff
, dir
,
141 min_buff
, max_buff
, pool
);
142 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbcm
), sbcm_pl
);
145 if (pg_buff
< MLXSW_SP_SB_TC_COUNT
) {
146 struct mlxsw_sp_sb_cm
*cm
;
148 cm
= mlxsw_sp_sb_cm_get(mlxsw_sp
, local_port
, pg_buff
, dir
);
149 cm
->min_buff
= min_buff
;
150 cm
->max_buff
= max_buff
;
156 static int mlxsw_sp_sb_pm_write(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
157 u8 pool
, enum mlxsw_reg_sbxx_dir dir
,
158 u32 min_buff
, u32 max_buff
)
160 char sbpm_pl
[MLXSW_REG_SBPM_LEN
];
161 struct mlxsw_sp_sb_pm
*pm
;
164 mlxsw_reg_sbpm_pack(sbpm_pl
, local_port
, pool
, dir
, false,
166 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbpm
), sbpm_pl
);
170 pm
= mlxsw_sp_sb_pm_get(mlxsw_sp
, local_port
, pool
, dir
);
171 pm
->min_buff
= min_buff
;
172 pm
->max_buff
= max_buff
;
176 static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
177 u8 pool
, enum mlxsw_reg_sbxx_dir dir
,
178 struct list_head
*bulk_list
)
180 char sbpm_pl
[MLXSW_REG_SBPM_LEN
];
182 mlxsw_reg_sbpm_pack(sbpm_pl
, local_port
, pool
, dir
, true, 0, 0);
183 return mlxsw_reg_trans_query(mlxsw_sp
->core
, MLXSW_REG(sbpm
), sbpm_pl
,
187 static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core
*mlxsw_core
,
188 char *sbpm_pl
, size_t sbpm_pl_len
,
189 unsigned long cb_priv
)
191 struct mlxsw_sp_sb_pm
*pm
= (struct mlxsw_sp_sb_pm
*) cb_priv
;
193 mlxsw_reg_sbpm_unpack(sbpm_pl
, &pm
->occ
.cur
, &pm
->occ
.max
);
196 static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
197 u8 pool
, enum mlxsw_reg_sbxx_dir dir
,
198 struct list_head
*bulk_list
)
200 char sbpm_pl
[MLXSW_REG_SBPM_LEN
];
201 struct mlxsw_sp_sb_pm
*pm
;
203 pm
= mlxsw_sp_sb_pm_get(mlxsw_sp
, local_port
, pool
, dir
);
204 mlxsw_reg_sbpm_pack(sbpm_pl
, local_port
, pool
, dir
, false, 0, 0);
205 return mlxsw_reg_trans_query(mlxsw_sp
->core
, MLXSW_REG(sbpm
), sbpm_pl
,
207 mlxsw_sp_sb_pm_occ_query_cb
,
211 static const u16 mlxsw_sp_pbs
[] = {
212 [0] = 2 * ETH_FRAME_LEN
,
213 [9] = 2 * MLXSW_PORT_MAX_MTU
,
216 #define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
217 #define MLXSW_SP_PB_UNUSED 8
219 static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
221 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
222 char pbmc_pl
[MLXSW_REG_PBMC_LEN
];
225 mlxsw_reg_pbmc_pack(pbmc_pl
, mlxsw_sp_port
->local_port
,
227 for (i
= 0; i
< MLXSW_SP_PBS_LEN
; i
++) {
228 u16 size
= mlxsw_sp_bytes_cells(mlxsw_sp
, mlxsw_sp_pbs
[i
]);
230 if (i
== MLXSW_SP_PB_UNUSED
)
232 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
, i
, size
);
234 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl
,
235 MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX
, 0);
236 return mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(pbmc
), pbmc_pl
);
239 static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
241 char pptb_pl
[MLXSW_REG_PPTB_LEN
];
244 mlxsw_reg_pptb_pack(pptb_pl
, mlxsw_sp_port
->local_port
);
245 for (i
= 0; i
< IEEE_8021QAZ_MAX_TCS
; i
++)
246 mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl
, i
, 0);
247 return mlxsw_reg_write(mlxsw_sp_port
->mlxsw_sp
->core
, MLXSW_REG(pptb
),
251 static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
255 err
= mlxsw_sp_port_pb_init(mlxsw_sp_port
);
258 return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port
);
261 static int mlxsw_sp_sb_ports_init(struct mlxsw_sp
*mlxsw_sp
)
263 unsigned int max_ports
= mlxsw_core_max_ports(mlxsw_sp
->core
);
265 mlxsw_sp
->sb
->ports
= kcalloc(max_ports
,
266 sizeof(struct mlxsw_sp_sb_port
),
268 if (!mlxsw_sp
->sb
->ports
)
273 static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp
*mlxsw_sp
)
275 kfree(mlxsw_sp
->sb
->ports
);
278 #define MLXSW_SP_SB_PR_INGRESS_SIZE 12440000
279 #define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
280 #define MLXSW_SP_SB_PR_EGRESS_SIZE 13232000
282 #define MLXSW_SP_SB_PR(_mode, _size) \
288 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress
[] = {
289 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
,
290 MLXSW_SP_SB_PR_INGRESS_SIZE
),
291 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
, 0),
292 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
, 0),
293 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
,
294 MLXSW_SP_SB_PR_INGRESS_MNG_SIZE
),
297 #define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
299 static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress
[] = {
300 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
, MLXSW_SP_SB_PR_EGRESS_SIZE
),
301 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
, 0),
302 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
, 0),
303 MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC
, 0),
306 #define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
308 static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp
*mlxsw_sp
,
309 enum mlxsw_reg_sbxx_dir dir
,
310 const struct mlxsw_sp_sb_pr
*prs
,
316 for (i
= 0; i
< prs_len
; i
++) {
317 u32 size
= mlxsw_sp_bytes_cells(mlxsw_sp
, prs
[i
].size
);
319 err
= mlxsw_sp_sb_pr_write(mlxsw_sp
, i
, dir
, prs
[i
].mode
, size
);
326 static int mlxsw_sp_sb_prs_init(struct mlxsw_sp
*mlxsw_sp
)
330 err
= __mlxsw_sp_sb_prs_init(mlxsw_sp
, MLXSW_REG_SBXX_DIR_INGRESS
,
331 mlxsw_sp_sb_prs_ingress
,
332 MLXSW_SP_SB_PRS_INGRESS_LEN
);
335 return __mlxsw_sp_sb_prs_init(mlxsw_sp
, MLXSW_REG_SBXX_DIR_EGRESS
,
336 mlxsw_sp_sb_prs_egress
,
337 MLXSW_SP_SB_PRS_EGRESS_LEN
);
340 #define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
342 .min_buff = _min_buff, \
343 .max_buff = _max_buff, \
347 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress
[] = {
348 MLXSW_SP_SB_CM(10000, 8, 0),
349 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
, 0),
350 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
, 0),
351 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
, 0),
352 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
, 0),
353 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
, 0),
354 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
, 0),
355 MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
, 0),
356 MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
357 MLXSW_SP_SB_CM(20000, 1, 3),
360 #define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
362 static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress
[] = {
363 MLXSW_SP_SB_CM(1500, 9, 0),
364 MLXSW_SP_SB_CM(1500, 9, 0),
365 MLXSW_SP_SB_CM(1500, 9, 0),
366 MLXSW_SP_SB_CM(1500, 9, 0),
367 MLXSW_SP_SB_CM(1500, 9, 0),
368 MLXSW_SP_SB_CM(1500, 9, 0),
369 MLXSW_SP_SB_CM(1500, 9, 0),
370 MLXSW_SP_SB_CM(1500, 9, 0),
371 MLXSW_SP_SB_CM(0, 0, 0),
372 MLXSW_SP_SB_CM(0, 0, 0),
373 MLXSW_SP_SB_CM(0, 0, 0),
374 MLXSW_SP_SB_CM(0, 0, 0),
375 MLXSW_SP_SB_CM(0, 0, 0),
376 MLXSW_SP_SB_CM(0, 0, 0),
377 MLXSW_SP_SB_CM(0, 0, 0),
378 MLXSW_SP_SB_CM(0, 0, 0),
379 MLXSW_SP_SB_CM(1, 0xff, 0),
382 #define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
384 #define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
386 static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms
[] = {
387 MLXSW_SP_CPU_PORT_SB_CM
,
388 MLXSW_SP_CPU_PORT_SB_CM
,
389 MLXSW_SP_CPU_PORT_SB_CM
,
390 MLXSW_SP_CPU_PORT_SB_CM
,
391 MLXSW_SP_CPU_PORT_SB_CM
,
392 MLXSW_SP_CPU_PORT_SB_CM
,
393 MLXSW_SP_CPU_PORT_SB_CM
,
394 MLXSW_SP_SB_CM(10000, 0, 0),
395 MLXSW_SP_CPU_PORT_SB_CM
,
396 MLXSW_SP_CPU_PORT_SB_CM
,
397 MLXSW_SP_CPU_PORT_SB_CM
,
398 MLXSW_SP_CPU_PORT_SB_CM
,
399 MLXSW_SP_CPU_PORT_SB_CM
,
400 MLXSW_SP_CPU_PORT_SB_CM
,
401 MLXSW_SP_CPU_PORT_SB_CM
,
402 MLXSW_SP_CPU_PORT_SB_CM
,
403 MLXSW_SP_CPU_PORT_SB_CM
,
404 MLXSW_SP_CPU_PORT_SB_CM
,
405 MLXSW_SP_CPU_PORT_SB_CM
,
406 MLXSW_SP_CPU_PORT_SB_CM
,
407 MLXSW_SP_CPU_PORT_SB_CM
,
408 MLXSW_SP_CPU_PORT_SB_CM
,
409 MLXSW_SP_CPU_PORT_SB_CM
,
410 MLXSW_SP_CPU_PORT_SB_CM
,
411 MLXSW_SP_CPU_PORT_SB_CM
,
412 MLXSW_SP_CPU_PORT_SB_CM
,
413 MLXSW_SP_CPU_PORT_SB_CM
,
414 MLXSW_SP_CPU_PORT_SB_CM
,
415 MLXSW_SP_CPU_PORT_SB_CM
,
416 MLXSW_SP_CPU_PORT_SB_CM
,
417 MLXSW_SP_CPU_PORT_SB_CM
,
418 MLXSW_SP_CPU_PORT_SB_CM
,
421 #define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
422 ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
424 static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
425 enum mlxsw_reg_sbxx_dir dir
,
426 const struct mlxsw_sp_sb_cm
*cms
,
432 for (i
= 0; i
< cms_len
; i
++) {
433 const struct mlxsw_sp_sb_cm
*cm
;
436 if (i
== 8 && dir
== MLXSW_REG_SBXX_DIR_INGRESS
)
437 continue; /* PG number 8 does not exist, skip it */
439 /* All pools are initialized using dynamic thresholds,
440 * therefore 'max_buff' isn't specified in cells.
442 min_buff
= mlxsw_sp_bytes_cells(mlxsw_sp
, cm
->min_buff
);
443 err
= mlxsw_sp_sb_cm_write(mlxsw_sp
, local_port
, i
, dir
,
444 min_buff
, cm
->max_buff
, cm
->pool
);
451 static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
455 err
= __mlxsw_sp_sb_cms_init(mlxsw_sp_port
->mlxsw_sp
,
456 mlxsw_sp_port
->local_port
,
457 MLXSW_REG_SBXX_DIR_INGRESS
,
458 mlxsw_sp_sb_cms_ingress
,
459 MLXSW_SP_SB_CMS_INGRESS_LEN
);
462 return __mlxsw_sp_sb_cms_init(mlxsw_sp_port
->mlxsw_sp
,
463 mlxsw_sp_port
->local_port
,
464 MLXSW_REG_SBXX_DIR_EGRESS
,
465 mlxsw_sp_sb_cms_egress
,
466 MLXSW_SP_SB_CMS_EGRESS_LEN
);
469 static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp
*mlxsw_sp
)
471 return __mlxsw_sp_sb_cms_init(mlxsw_sp
, 0, MLXSW_REG_SBXX_DIR_EGRESS
,
472 mlxsw_sp_cpu_port_sb_cms
,
473 MLXSW_SP_CPU_PORT_SB_MCS_LEN
);
476 #define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
478 .min_buff = _min_buff, \
479 .max_buff = _max_buff, \
482 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress
[] = {
483 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX
),
484 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
485 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
486 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX
),
489 #define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
491 static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress
[] = {
492 MLXSW_SP_SB_PM(0, 7),
493 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
494 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
495 MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
),
498 #define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
500 static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp
*mlxsw_sp
, u8 local_port
,
501 enum mlxsw_reg_sbxx_dir dir
,
502 const struct mlxsw_sp_sb_pm
*pms
,
508 for (i
= 0; i
< pms_len
; i
++) {
509 const struct mlxsw_sp_sb_pm
*pm
;
512 err
= mlxsw_sp_sb_pm_write(mlxsw_sp
, local_port
, i
, dir
,
513 pm
->min_buff
, pm
->max_buff
);
520 static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
524 err
= __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port
->mlxsw_sp
,
525 mlxsw_sp_port
->local_port
,
526 MLXSW_REG_SBXX_DIR_INGRESS
,
527 mlxsw_sp_sb_pms_ingress
,
528 MLXSW_SP_SB_PMS_INGRESS_LEN
);
531 return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port
->mlxsw_sp
,
532 mlxsw_sp_port
->local_port
,
533 MLXSW_REG_SBXX_DIR_EGRESS
,
534 mlxsw_sp_sb_pms_egress
,
535 MLXSW_SP_SB_PMS_EGRESS_LEN
);
538 struct mlxsw_sp_sb_mm
{
544 #define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
546 .min_buff = _min_buff, \
547 .max_buff = _max_buff, \
551 static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms
[] = {
552 MLXSW_SP_SB_MM(20000, 0xff, 0),
553 MLXSW_SP_SB_MM(20000, 0xff, 0),
554 MLXSW_SP_SB_MM(20000, 0xff, 0),
555 MLXSW_SP_SB_MM(20000, 0xff, 0),
556 MLXSW_SP_SB_MM(20000, 0xff, 0),
557 MLXSW_SP_SB_MM(20000, 0xff, 0),
558 MLXSW_SP_SB_MM(20000, 0xff, 0),
559 MLXSW_SP_SB_MM(20000, 0xff, 0),
560 MLXSW_SP_SB_MM(20000, 0xff, 0),
561 MLXSW_SP_SB_MM(20000, 0xff, 0),
562 MLXSW_SP_SB_MM(20000, 0xff, 0),
563 MLXSW_SP_SB_MM(20000, 0xff, 0),
564 MLXSW_SP_SB_MM(20000, 0xff, 0),
565 MLXSW_SP_SB_MM(20000, 0xff, 0),
566 MLXSW_SP_SB_MM(20000, 0xff, 0),
569 #define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
571 static int mlxsw_sp_sb_mms_init(struct mlxsw_sp
*mlxsw_sp
)
573 char sbmm_pl
[MLXSW_REG_SBMM_LEN
];
577 for (i
= 0; i
< MLXSW_SP_SB_MMS_LEN
; i
++) {
578 const struct mlxsw_sp_sb_mm
*mc
;
581 mc
= &mlxsw_sp_sb_mms
[i
];
582 /* All pools are initialized using dynamic thresholds,
583 * therefore 'max_buff' isn't specified in cells.
585 min_buff
= mlxsw_sp_bytes_cells(mlxsw_sp
, mc
->min_buff
);
586 mlxsw_reg_sbmm_pack(sbmm_pl
, i
, min_buff
, mc
->max_buff
,
588 err
= mlxsw_reg_write(mlxsw_sp
->core
, MLXSW_REG(sbmm
), sbmm_pl
);
595 int mlxsw_sp_buffers_init(struct mlxsw_sp
*mlxsw_sp
)
600 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, CELL_SIZE
))
603 if (!MLXSW_CORE_RES_VALID(mlxsw_sp
->core
, MAX_BUFFER_SIZE
))
605 sb_size
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_BUFFER_SIZE
);
607 mlxsw_sp
->sb
= kzalloc(sizeof(*mlxsw_sp
->sb
), GFP_KERNEL
);
610 mlxsw_sp
->sb
->cell_size
= MLXSW_CORE_RES_GET(mlxsw_sp
->core
, CELL_SIZE
);
612 err
= mlxsw_sp_sb_ports_init(mlxsw_sp
);
614 goto err_sb_ports_init
;
615 err
= mlxsw_sp_sb_prs_init(mlxsw_sp
);
617 goto err_sb_prs_init
;
618 err
= mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp
);
620 goto err_sb_cpu_port_sb_cms_init
;
621 err
= mlxsw_sp_sb_mms_init(mlxsw_sp
);
623 goto err_sb_mms_init
;
624 err
= devlink_sb_register(priv_to_devlink(mlxsw_sp
->core
), 0, sb_size
,
625 MLXSW_SP_SB_POOL_COUNT
,
626 MLXSW_SP_SB_POOL_COUNT
,
627 MLXSW_SP_SB_TC_COUNT
,
628 MLXSW_SP_SB_TC_COUNT
);
630 goto err_devlink_sb_register
;
634 err_devlink_sb_register
:
636 err_sb_cpu_port_sb_cms_init
:
638 mlxsw_sp_sb_ports_fini(mlxsw_sp
);
644 void mlxsw_sp_buffers_fini(struct mlxsw_sp
*mlxsw_sp
)
646 devlink_sb_unregister(priv_to_devlink(mlxsw_sp
->core
), 0);
647 mlxsw_sp_sb_ports_fini(mlxsw_sp
);
651 int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port
*mlxsw_sp_port
)
655 err
= mlxsw_sp_port_headroom_init(mlxsw_sp_port
);
658 err
= mlxsw_sp_port_sb_cms_init(mlxsw_sp_port
);
661 err
= mlxsw_sp_port_sb_pms_init(mlxsw_sp_port
);
666 static u8
pool_get(u16 pool_index
)
668 return pool_index
% MLXSW_SP_SB_POOL_COUNT
;
671 static u16
pool_index_get(u8 pool
, enum mlxsw_reg_sbxx_dir dir
)
676 if (dir
== MLXSW_REG_SBXX_DIR_EGRESS
)
677 pool_index
+= MLXSW_SP_SB_POOL_COUNT
;
681 static enum mlxsw_reg_sbxx_dir
dir_get(u16 pool_index
)
683 return pool_index
< MLXSW_SP_SB_POOL_COUNT
?
684 MLXSW_REG_SBXX_DIR_INGRESS
: MLXSW_REG_SBXX_DIR_EGRESS
;
687 int mlxsw_sp_sb_pool_get(struct mlxsw_core
*mlxsw_core
,
688 unsigned int sb_index
, u16 pool_index
,
689 struct devlink_sb_pool_info
*pool_info
)
691 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
692 u8 pool
= pool_get(pool_index
);
693 enum mlxsw_reg_sbxx_dir dir
= dir_get(pool_index
);
694 struct mlxsw_sp_sb_pr
*pr
= mlxsw_sp_sb_pr_get(mlxsw_sp
, pool
, dir
);
696 pool_info
->pool_type
= (enum devlink_sb_pool_type
) dir
;
697 pool_info
->size
= mlxsw_sp_cells_bytes(mlxsw_sp
, pr
->size
);
698 pool_info
->threshold_type
= (enum devlink_sb_threshold_type
) pr
->mode
;
702 int mlxsw_sp_sb_pool_set(struct mlxsw_core
*mlxsw_core
,
703 unsigned int sb_index
, u16 pool_index
, u32 size
,
704 enum devlink_sb_threshold_type threshold_type
)
706 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
707 u32 pool_size
= mlxsw_sp_bytes_cells(mlxsw_sp
, size
);
708 u8 pool
= pool_get(pool_index
);
709 enum mlxsw_reg_sbxx_dir dir
= dir_get(pool_index
);
710 enum mlxsw_reg_sbpr_mode mode
;
712 if (size
> MLXSW_CORE_RES_GET(mlxsw_sp
->core
, MAX_BUFFER_SIZE
))
715 mode
= (enum mlxsw_reg_sbpr_mode
) threshold_type
;
716 return mlxsw_sp_sb_pr_write(mlxsw_sp
, pool
, dir
, mode
, pool_size
);
719 #define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
721 static u32
mlxsw_sp_sb_threshold_out(struct mlxsw_sp
*mlxsw_sp
, u8 pool
,
722 enum mlxsw_reg_sbxx_dir dir
, u32 max_buff
)
724 struct mlxsw_sp_sb_pr
*pr
= mlxsw_sp_sb_pr_get(mlxsw_sp
, pool
, dir
);
726 if (pr
->mode
== MLXSW_REG_SBPR_MODE_DYNAMIC
)
727 return max_buff
- MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET
;
728 return mlxsw_sp_cells_bytes(mlxsw_sp
, max_buff
);
731 static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp
*mlxsw_sp
, u8 pool
,
732 enum mlxsw_reg_sbxx_dir dir
, u32 threshold
,
735 struct mlxsw_sp_sb_pr
*pr
= mlxsw_sp_sb_pr_get(mlxsw_sp
, pool
, dir
);
737 if (pr
->mode
== MLXSW_REG_SBPR_MODE_DYNAMIC
) {
740 val
= threshold
+ MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET
;
741 if (val
< MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN
||
742 val
> MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX
)
746 *p_max_buff
= mlxsw_sp_bytes_cells(mlxsw_sp
, threshold
);
751 int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port
*mlxsw_core_port
,
752 unsigned int sb_index
, u16 pool_index
,
755 struct mlxsw_sp_port
*mlxsw_sp_port
=
756 mlxsw_core_port_driver_priv(mlxsw_core_port
);
757 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
758 u8 local_port
= mlxsw_sp_port
->local_port
;
759 u8 pool
= pool_get(pool_index
);
760 enum mlxsw_reg_sbxx_dir dir
= dir_get(pool_index
);
761 struct mlxsw_sp_sb_pm
*pm
= mlxsw_sp_sb_pm_get(mlxsw_sp
, local_port
,
764 *p_threshold
= mlxsw_sp_sb_threshold_out(mlxsw_sp
, pool
, dir
,
769 int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port
*mlxsw_core_port
,
770 unsigned int sb_index
, u16 pool_index
,
773 struct mlxsw_sp_port
*mlxsw_sp_port
=
774 mlxsw_core_port_driver_priv(mlxsw_core_port
);
775 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
776 u8 local_port
= mlxsw_sp_port
->local_port
;
777 u8 pool
= pool_get(pool_index
);
778 enum mlxsw_reg_sbxx_dir dir
= dir_get(pool_index
);
782 err
= mlxsw_sp_sb_threshold_in(mlxsw_sp
, pool
, dir
,
783 threshold
, &max_buff
);
787 return mlxsw_sp_sb_pm_write(mlxsw_sp
, local_port
, pool
, dir
,
791 int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port
*mlxsw_core_port
,
792 unsigned int sb_index
, u16 tc_index
,
793 enum devlink_sb_pool_type pool_type
,
794 u16
*p_pool_index
, u32
*p_threshold
)
796 struct mlxsw_sp_port
*mlxsw_sp_port
=
797 mlxsw_core_port_driver_priv(mlxsw_core_port
);
798 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
799 u8 local_port
= mlxsw_sp_port
->local_port
;
800 u8 pg_buff
= tc_index
;
801 enum mlxsw_reg_sbxx_dir dir
= (enum mlxsw_reg_sbxx_dir
) pool_type
;
802 struct mlxsw_sp_sb_cm
*cm
= mlxsw_sp_sb_cm_get(mlxsw_sp
, local_port
,
805 *p_threshold
= mlxsw_sp_sb_threshold_out(mlxsw_sp
, cm
->pool
, dir
,
807 *p_pool_index
= pool_index_get(cm
->pool
, dir
);
811 int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port
*mlxsw_core_port
,
812 unsigned int sb_index
, u16 tc_index
,
813 enum devlink_sb_pool_type pool_type
,
814 u16 pool_index
, u32 threshold
)
816 struct mlxsw_sp_port
*mlxsw_sp_port
=
817 mlxsw_core_port_driver_priv(mlxsw_core_port
);
818 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
819 u8 local_port
= mlxsw_sp_port
->local_port
;
820 u8 pg_buff
= tc_index
;
821 enum mlxsw_reg_sbxx_dir dir
= (enum mlxsw_reg_sbxx_dir
) pool_type
;
822 u8 pool
= pool_get(pool_index
);
826 if (dir
!= dir_get(pool_index
))
829 err
= mlxsw_sp_sb_threshold_in(mlxsw_sp
, pool
, dir
,
830 threshold
, &max_buff
);
834 return mlxsw_sp_sb_cm_write(mlxsw_sp
, local_port
, pg_buff
, dir
,
838 #define MASKED_COUNT_MAX \
839 (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
841 struct mlxsw_sp_sb_sr_occ_query_cb_ctx
{
846 static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core
*mlxsw_core
,
847 char *sbsr_pl
, size_t sbsr_pl_len
,
848 unsigned long cb_priv
)
850 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
851 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx
;
855 struct mlxsw_sp_sb_cm
*cm
;
858 memcpy(&cb_ctx
, &cb_priv
, sizeof(cb_ctx
));
861 for (local_port
= cb_ctx
.local_port_1
;
862 local_port
< mlxsw_core_max_ports(mlxsw_core
); local_port
++) {
863 if (!mlxsw_sp
->ports
[local_port
])
865 for (i
= 0; i
< MLXSW_SP_SB_TC_COUNT
; i
++) {
866 cm
= mlxsw_sp_sb_cm_get(mlxsw_sp
, local_port
, i
,
867 MLXSW_REG_SBXX_DIR_INGRESS
);
868 mlxsw_reg_sbsr_rec_unpack(sbsr_pl
, rec_index
++,
869 &cm
->occ
.cur
, &cm
->occ
.max
);
871 if (++masked_count
== cb_ctx
.masked_count
)
875 for (local_port
= cb_ctx
.local_port_1
;
876 local_port
< mlxsw_core_max_ports(mlxsw_core
); local_port
++) {
877 if (!mlxsw_sp
->ports
[local_port
])
879 for (i
= 0; i
< MLXSW_SP_SB_TC_COUNT
; i
++) {
880 cm
= mlxsw_sp_sb_cm_get(mlxsw_sp
, local_port
, i
,
881 MLXSW_REG_SBXX_DIR_EGRESS
);
882 mlxsw_reg_sbsr_rec_unpack(sbsr_pl
, rec_index
++,
883 &cm
->occ
.cur
, &cm
->occ
.max
);
885 if (++masked_count
== cb_ctx
.masked_count
)
890 int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core
*mlxsw_core
,
891 unsigned int sb_index
)
893 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
894 struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx
;
895 unsigned long cb_priv
;
896 LIST_HEAD(bulk_list
);
905 sbsr_pl
= kmalloc(MLXSW_REG_SBSR_LEN
, GFP_KERNEL
);
911 local_port_1
= local_port
;
913 mlxsw_reg_sbsr_pack(sbsr_pl
, false);
914 for (i
= 0; i
< MLXSW_SP_SB_TC_COUNT
; i
++) {
915 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl
, i
, 1);
916 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl
, i
, 1);
918 for (; local_port
< mlxsw_core_max_ports(mlxsw_core
); local_port
++) {
919 if (!mlxsw_sp
->ports
[local_port
])
921 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl
, local_port
, 1);
922 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl
, local_port
, 1);
923 for (i
= 0; i
< MLXSW_SP_SB_POOL_COUNT
; i
++) {
924 err
= mlxsw_sp_sb_pm_occ_query(mlxsw_sp
, local_port
, i
,
925 MLXSW_REG_SBXX_DIR_INGRESS
,
929 err
= mlxsw_sp_sb_pm_occ_query(mlxsw_sp
, local_port
, i
,
930 MLXSW_REG_SBXX_DIR_EGRESS
,
935 if (++masked_count
== MASKED_COUNT_MAX
)
940 cb_ctx
.masked_count
= masked_count
;
941 cb_ctx
.local_port_1
= local_port_1
;
942 memcpy(&cb_priv
, &cb_ctx
, sizeof(cb_ctx
));
943 err
= mlxsw_reg_trans_query(mlxsw_core
, MLXSW_REG(sbsr
), sbsr_pl
,
944 &bulk_list
, mlxsw_sp_sb_sr_occ_query_cb
,
948 if (local_port
< mlxsw_core_max_ports(mlxsw_core
))
952 err2
= mlxsw_reg_trans_bulk_wait(&bulk_list
);
959 int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core
*mlxsw_core
,
960 unsigned int sb_index
)
962 struct mlxsw_sp
*mlxsw_sp
= mlxsw_core_driver_priv(mlxsw_core
);
963 LIST_HEAD(bulk_list
);
965 unsigned int masked_count
;
971 sbsr_pl
= kmalloc(MLXSW_REG_SBSR_LEN
, GFP_KERNEL
);
978 mlxsw_reg_sbsr_pack(sbsr_pl
, true);
979 for (i
= 0; i
< MLXSW_SP_SB_TC_COUNT
; i
++) {
980 mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl
, i
, 1);
981 mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl
, i
, 1);
983 for (; local_port
< mlxsw_core_max_ports(mlxsw_core
); local_port
++) {
984 if (!mlxsw_sp
->ports
[local_port
])
986 mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl
, local_port
, 1);
987 mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl
, local_port
, 1);
988 for (i
= 0; i
< MLXSW_SP_SB_POOL_COUNT
; i
++) {
989 err
= mlxsw_sp_sb_pm_occ_clear(mlxsw_sp
, local_port
, i
,
990 MLXSW_REG_SBXX_DIR_INGRESS
,
994 err
= mlxsw_sp_sb_pm_occ_clear(mlxsw_sp
, local_port
, i
,
995 MLXSW_REG_SBXX_DIR_EGRESS
,
1000 if (++masked_count
== MASKED_COUNT_MAX
)
1005 err
= mlxsw_reg_trans_query(mlxsw_core
, MLXSW_REG(sbsr
), sbsr_pl
,
1006 &bulk_list
, NULL
, 0);
1009 if (local_port
< mlxsw_core_max_ports(mlxsw_core
))
1013 err2
= mlxsw_reg_trans_bulk_wait(&bulk_list
);
1020 int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port
*mlxsw_core_port
,
1021 unsigned int sb_index
, u16 pool_index
,
1022 u32
*p_cur
, u32
*p_max
)
1024 struct mlxsw_sp_port
*mlxsw_sp_port
=
1025 mlxsw_core_port_driver_priv(mlxsw_core_port
);
1026 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1027 u8 local_port
= mlxsw_sp_port
->local_port
;
1028 u8 pool
= pool_get(pool_index
);
1029 enum mlxsw_reg_sbxx_dir dir
= dir_get(pool_index
);
1030 struct mlxsw_sp_sb_pm
*pm
= mlxsw_sp_sb_pm_get(mlxsw_sp
, local_port
,
1033 *p_cur
= mlxsw_sp_cells_bytes(mlxsw_sp
, pm
->occ
.cur
);
1034 *p_max
= mlxsw_sp_cells_bytes(mlxsw_sp
, pm
->occ
.max
);
1038 int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port
*mlxsw_core_port
,
1039 unsigned int sb_index
, u16 tc_index
,
1040 enum devlink_sb_pool_type pool_type
,
1041 u32
*p_cur
, u32
*p_max
)
1043 struct mlxsw_sp_port
*mlxsw_sp_port
=
1044 mlxsw_core_port_driver_priv(mlxsw_core_port
);
1045 struct mlxsw_sp
*mlxsw_sp
= mlxsw_sp_port
->mlxsw_sp
;
1046 u8 local_port
= mlxsw_sp_port
->local_port
;
1047 u8 pg_buff
= tc_index
;
1048 enum mlxsw_reg_sbxx_dir dir
= (enum mlxsw_reg_sbxx_dir
) pool_type
;
1049 struct mlxsw_sp_sb_cm
*cm
= mlxsw_sp_sb_cm_get(mlxsw_sp
, local_port
,
1052 *p_cur
= mlxsw_sp_cells_bytes(mlxsw_sp
, cm
->occ
.cur
);
1053 *p_max
= mlxsw_sp_cells_bytes(mlxsw_sp
, cm
->occ
.max
);