2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Slow Path Operators
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/pci.h>
48 #include "qplib_res.h"
49 #include "qplib_rcfw.h"
51 #include "qplib_tlv.h"
53 const struct bnxt_qplib_gid bnxt_qplib_gid_zero
= {{ 0, 0, 0, 0, 0, 0, 0, 0,
54 0, 0, 0, 0, 0, 0, 0, 0 } };
58 static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw
*rcfw
)
62 if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw
->res
->cctx
))
65 pcie_capability_read_word(rcfw
->pdev
, PCI_EXP_DEVCTL2
, &pcie_ctl2
);
66 return (pcie_ctl2
& PCI_EXP_DEVCTL2_ATOMIC_REQ
);
69 static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw
*rcfw
,
72 struct creq_query_version_resp resp
= {};
73 struct bnxt_qplib_cmdqmsg msg
= {};
74 struct cmdq_query_version req
= {};
77 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
78 CMDQ_BASE_OPCODE_QUERY_VERSION
,
81 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
), sizeof(resp
), 0);
82 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
85 fw_ver
[0] = resp
.fw_maj
;
86 fw_ver
[1] = resp
.fw_minor
;
87 fw_ver
[2] = resp
.fw_bld
;
88 fw_ver
[3] = resp
.fw_rsvd
;
91 int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw
*rcfw
,
92 struct bnxt_qplib_dev_attr
*attr
)
94 struct creq_query_func_resp resp
= {};
95 struct bnxt_qplib_cmdqmsg msg
= {};
96 struct creq_query_func_resp_sb
*sb
;
97 struct bnxt_qplib_rcfw_sbuf sbuf
;
98 struct bnxt_qplib_chip_ctx
*cctx
;
99 struct cmdq_query_func req
= {};
104 cctx
= rcfw
->res
->cctx
;
105 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
106 CMDQ_BASE_OPCODE_QUERY_FUNC
,
109 sbuf
.size
= ALIGN(sizeof(*sb
), BNXT_QPLIB_CMDQE_UNITS
);
110 sbuf
.sb
= dma_alloc_coherent(&rcfw
->pdev
->dev
, sbuf
.size
,
111 &sbuf
.dma_addr
, GFP_KERNEL
);
115 req
.resp_size
= sbuf
.size
/ BNXT_QPLIB_CMDQE_UNITS
;
116 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, &sbuf
, sizeof(req
),
118 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
122 /* Extract the context from the side buffer */
123 attr
->max_qp
= le32_to_cpu(sb
->max_qp
);
124 /* max_qp value reported by FW doesn't include the QP1 */
126 attr
->max_qp_rd_atom
=
127 sb
->max_qp_rd_atom
> BNXT_QPLIB_MAX_OUT_RD_ATOM
?
128 BNXT_QPLIB_MAX_OUT_RD_ATOM
: sb
->max_qp_rd_atom
;
129 attr
->max_qp_init_rd_atom
=
130 sb
->max_qp_init_rd_atom
> BNXT_QPLIB_MAX_OUT_RD_ATOM
?
131 BNXT_QPLIB_MAX_OUT_RD_ATOM
: sb
->max_qp_init_rd_atom
;
132 attr
->max_qp_wqes
= le16_to_cpu(sb
->max_qp_wr
);
134 * 128 WQEs needs to be reserved for the HW (8916). Prevent
135 * reporting the max number
137 attr
->max_qp_wqes
-= BNXT_QPLIB_RESERVED_QP_WRS
+ 1;
139 attr
->max_qp_sges
= cctx
->modes
.wqe_mode
== BNXT_QPLIB_WQE_MODE_VARIABLE
?
140 min_t(u32
, sb
->max_sge_var_wqe
, BNXT_VAR_MAX_SGE
) : 6;
141 attr
->max_cq
= le32_to_cpu(sb
->max_cq
);
142 attr
->max_cq_wqes
= le32_to_cpu(sb
->max_cqe
);
143 if (!bnxt_qplib_is_chip_gen_p7(rcfw
->res
->cctx
))
144 attr
->max_cq_wqes
= min_t(u32
, BNXT_QPLIB_MAX_CQ_WQES
, attr
->max_cq_wqes
);
145 attr
->max_cq_sges
= attr
->max_qp_sges
;
146 attr
->max_mr
= le32_to_cpu(sb
->max_mr
);
147 attr
->max_mw
= le32_to_cpu(sb
->max_mw
);
149 attr
->max_mr_size
= le64_to_cpu(sb
->max_mr_size
);
150 attr
->max_pd
= 64 * 1024;
151 attr
->max_raw_ethy_qp
= le32_to_cpu(sb
->max_raw_eth_qp
);
152 attr
->max_ah
= le32_to_cpu(sb
->max_ah
);
154 attr
->max_srq
= le16_to_cpu(sb
->max_srq
);
155 attr
->max_srq_wqes
= le32_to_cpu(sb
->max_srq_wr
) - 1;
156 attr
->max_srq_sges
= sb
->max_srq_sge
;
158 attr
->max_inline_data
= le32_to_cpu(sb
->max_inline_data
);
159 if (!bnxt_qplib_is_chip_gen_p7(rcfw
->res
->cctx
))
160 attr
->l2_db_size
= (sb
->l2_db_space_size
+ 1) *
161 (0x01 << RCFW_DBR_BASE_PAGE_SHIFT
);
163 * Read the max gid supported by HW.
164 * For each entry in HW GID in HW table, we consume 2
165 * GID entries in the kernel GID table. So max_gid reported
166 * to stack can be up to twice the value reported by the HW, up to 256 gids.
168 attr
->max_sgid
= le32_to_cpu(sb
->max_gid
);
169 attr
->max_sgid
= min_t(u32
, BNXT_QPLIB_NUM_GIDS_SUPPORTED
, 2 * attr
->max_sgid
);
170 attr
->dev_cap_flags
= le16_to_cpu(sb
->dev_cap_flags
);
171 attr
->dev_cap_flags2
= le16_to_cpu(sb
->dev_cap_ext_flags_2
);
173 bnxt_qplib_query_version(rcfw
, attr
->fw_ver
);
175 for (i
= 0; i
< MAX_TQM_ALLOC_REQ
/ 4; i
++) {
176 temp
= le32_to_cpu(sb
->tqm_alloc_reqs
[i
]);
177 tqm_alloc
= (u8
*)&temp
;
178 attr
->tqm_alloc_reqs
[i
* 4] = *tqm_alloc
;
179 attr
->tqm_alloc_reqs
[i
* 4 + 1] = *(++tqm_alloc
);
180 attr
->tqm_alloc_reqs
[i
* 4 + 2] = *(++tqm_alloc
);
181 attr
->tqm_alloc_reqs
[i
* 4 + 3] = *(++tqm_alloc
);
184 if (rcfw
->res
->cctx
->hwrm_intf_ver
>= HWRM_VERSION_DEV_ATTR_MAX_DPI
)
185 attr
->max_dpi
= le32_to_cpu(sb
->max_dpi
);
187 attr
->is_atomic
= bnxt_qplib_is_atomic_cap(rcfw
);
189 dma_free_coherent(&rcfw
->pdev
->dev
, sbuf
.size
,
190 sbuf
.sb
, sbuf
.dma_addr
);
194 int bnxt_qplib_set_func_resources(struct bnxt_qplib_res
*res
,
195 struct bnxt_qplib_rcfw
*rcfw
,
196 struct bnxt_qplib_ctx
*ctx
)
198 struct creq_set_func_resources_resp resp
= {};
199 struct cmdq_set_func_resources req
= {};
200 struct bnxt_qplib_cmdqmsg msg
= {};
203 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
204 CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES
,
207 req
.number_of_qp
= cpu_to_le32(ctx
->qpc_count
);
208 req
.number_of_mrw
= cpu_to_le32(ctx
->mrw_count
);
209 req
.number_of_srq
= cpu_to_le32(ctx
->srqc_count
);
210 req
.number_of_cq
= cpu_to_le32(ctx
->cq_count
);
212 req
.max_qp_per_vf
= cpu_to_le32(ctx
->vf_res
.max_qp_per_vf
);
213 req
.max_mrw_per_vf
= cpu_to_le32(ctx
->vf_res
.max_mrw_per_vf
);
214 req
.max_srq_per_vf
= cpu_to_le32(ctx
->vf_res
.max_srq_per_vf
);
215 req
.max_cq_per_vf
= cpu_to_le32(ctx
->vf_res
.max_cq_per_vf
);
216 req
.max_gid_per_vf
= cpu_to_le32(ctx
->vf_res
.max_gid_per_vf
);
218 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
),
220 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
222 dev_err(&res
->pdev
->dev
, "Failed to set function resources\n");
228 int bnxt_qplib_get_sgid(struct bnxt_qplib_res
*res
,
229 struct bnxt_qplib_sgid_tbl
*sgid_tbl
, int index
,
230 struct bnxt_qplib_gid
*gid
)
232 if (index
>= sgid_tbl
->max
) {
233 dev_err(&res
->pdev
->dev
,
234 "Index %d exceeded SGID table max (%d)\n",
235 index
, sgid_tbl
->max
);
238 memcpy(gid
, &sgid_tbl
->tbl
[index
].gid
, sizeof(*gid
));
242 int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl
*sgid_tbl
,
243 struct bnxt_qplib_gid
*gid
, u16 vlan_id
, bool update
)
245 struct bnxt_qplib_res
*res
= to_bnxt_qplib(sgid_tbl
,
246 struct bnxt_qplib_res
,
248 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
251 /* Do we need a sgid_lock here? */
252 if (!sgid_tbl
->active
) {
253 dev_err(&res
->pdev
->dev
, "SGID table has no active entries\n");
256 for (index
= 0; index
< sgid_tbl
->max
; index
++) {
257 if (!memcmp(&sgid_tbl
->tbl
[index
].gid
, gid
, sizeof(*gid
)) &&
258 vlan_id
== sgid_tbl
->tbl
[index
].vlan_id
)
261 if (index
== sgid_tbl
->max
) {
262 dev_warn(&res
->pdev
->dev
, "GID not found in the SGID table\n");
265 /* Remove GID from the SGID table */
267 struct creq_delete_gid_resp resp
= {};
268 struct bnxt_qplib_cmdqmsg msg
= {};
269 struct cmdq_delete_gid req
= {};
272 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
273 CMDQ_BASE_OPCODE_DELETE_GID
,
275 if (sgid_tbl
->hw_id
[index
] == 0xFFFF) {
276 dev_err(&res
->pdev
->dev
,
277 "GID entry contains an invalid HW id\n");
280 req
.gid_index
= cpu_to_le16(sgid_tbl
->hw_id
[index
]);
281 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
),
283 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
287 memcpy(&sgid_tbl
->tbl
[index
].gid
, &bnxt_qplib_gid_zero
,
288 sizeof(bnxt_qplib_gid_zero
));
289 sgid_tbl
->tbl
[index
].vlan_id
= 0xFFFF;
290 sgid_tbl
->vlan
[index
] = 0;
292 dev_dbg(&res
->pdev
->dev
,
293 "SGID deleted hw_id[0x%x] = 0x%x active = 0x%x\n",
294 index
, sgid_tbl
->hw_id
[index
], sgid_tbl
->active
);
295 sgid_tbl
->hw_id
[index
] = (u16
)-1;
301 int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl
*sgid_tbl
,
302 struct bnxt_qplib_gid
*gid
, const u8
*smac
,
303 u16 vlan_id
, bool update
, u32
*index
)
305 struct bnxt_qplib_res
*res
= to_bnxt_qplib(sgid_tbl
,
306 struct bnxt_qplib_res
,
308 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
311 /* Do we need a sgid_lock here? */
312 if (sgid_tbl
->active
== sgid_tbl
->max
) {
313 dev_err(&res
->pdev
->dev
, "SGID table is full\n");
316 free_idx
= sgid_tbl
->max
;
317 for (i
= 0; i
< sgid_tbl
->max
; i
++) {
318 if (!memcmp(&sgid_tbl
->tbl
[i
], gid
, sizeof(*gid
)) &&
319 sgid_tbl
->tbl
[i
].vlan_id
== vlan_id
) {
320 dev_dbg(&res
->pdev
->dev
,
321 "SGID entry already exist in entry %d!\n", i
);
324 } else if (!memcmp(&sgid_tbl
->tbl
[i
], &bnxt_qplib_gid_zero
,
325 sizeof(bnxt_qplib_gid_zero
)) &&
326 free_idx
== sgid_tbl
->max
) {
330 if (free_idx
== sgid_tbl
->max
) {
331 dev_err(&res
->pdev
->dev
,
332 "SGID table is FULL but count is not MAX??\n");
336 struct creq_add_gid_resp resp
= {};
337 struct bnxt_qplib_cmdqmsg msg
= {};
338 struct cmdq_add_gid req
= {};
341 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
342 CMDQ_BASE_OPCODE_ADD_GID
,
345 req
.gid
[0] = cpu_to_be32(((u32
*)gid
->data
)[3]);
346 req
.gid
[1] = cpu_to_be32(((u32
*)gid
->data
)[2]);
347 req
.gid
[2] = cpu_to_be32(((u32
*)gid
->data
)[1]);
348 req
.gid
[3] = cpu_to_be32(((u32
*)gid
->data
)[0]);
350 * driver should ensure that all RoCE traffic is always VLAN
351 * tagged if RoCE traffic is running on non-zero VLAN ID or
352 * RoCE traffic is running on non-zero Priority.
354 if ((vlan_id
!= 0xFFFF) || res
->prio
) {
355 if (vlan_id
!= 0xFFFF)
356 req
.vlan
= cpu_to_le16
357 (vlan_id
& CMDQ_ADD_GID_VLAN_VLAN_ID_MASK
);
358 req
.vlan
|= cpu_to_le16
359 (CMDQ_ADD_GID_VLAN_TPID_TPID_8100
|
360 CMDQ_ADD_GID_VLAN_VLAN_EN
);
363 /* MAC in network format */
364 req
.src_mac
[0] = cpu_to_be16(((u16
*)smac
)[0]);
365 req
.src_mac
[1] = cpu_to_be16(((u16
*)smac
)[1]);
366 req
.src_mac
[2] = cpu_to_be16(((u16
*)smac
)[2]);
368 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
),
370 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
373 sgid_tbl
->hw_id
[free_idx
] = le32_to_cpu(resp
.xid
);
375 /* Add GID to the sgid_tbl */
376 memcpy(&sgid_tbl
->tbl
[free_idx
], gid
, sizeof(*gid
));
377 sgid_tbl
->tbl
[free_idx
].vlan_id
= vlan_id
;
379 if (vlan_id
!= 0xFFFF)
380 sgid_tbl
->vlan
[free_idx
] = 1;
382 dev_dbg(&res
->pdev
->dev
,
383 "SGID added hw_id[0x%x] = 0x%x active = 0x%x\n",
384 free_idx
, sgid_tbl
->hw_id
[free_idx
], sgid_tbl
->active
);
391 int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl
*sgid_tbl
,
392 struct bnxt_qplib_gid
*gid
, u16 gid_idx
,
395 struct bnxt_qplib_res
*res
= to_bnxt_qplib(sgid_tbl
,
396 struct bnxt_qplib_res
,
398 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
399 struct creq_modify_gid_resp resp
= {};
400 struct bnxt_qplib_cmdqmsg msg
= {};
401 struct cmdq_modify_gid req
= {};
404 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
405 CMDQ_BASE_OPCODE_MODIFY_GID
,
408 req
.gid
[0] = cpu_to_be32(((u32
*)gid
->data
)[3]);
409 req
.gid
[1] = cpu_to_be32(((u32
*)gid
->data
)[2]);
410 req
.gid
[2] = cpu_to_be32(((u32
*)gid
->data
)[1]);
411 req
.gid
[3] = cpu_to_be32(((u32
*)gid
->data
)[0]);
413 req
.vlan
|= cpu_to_le16
414 (CMDQ_ADD_GID_VLAN_TPID_TPID_8100
|
415 CMDQ_ADD_GID_VLAN_VLAN_EN
);
418 /* MAC in network format */
419 req
.src_mac
[0] = cpu_to_be16(((u16
*)smac
)[0]);
420 req
.src_mac
[1] = cpu_to_be16(((u16
*)smac
)[1]);
421 req
.src_mac
[2] = cpu_to_be16(((u16
*)smac
)[2]);
423 req
.gid_index
= cpu_to_le16(gid_idx
);
425 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
),
427 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
432 int bnxt_qplib_create_ah(struct bnxt_qplib_res
*res
, struct bnxt_qplib_ah
*ah
,
435 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
436 struct creq_create_ah_resp resp
= {};
437 struct bnxt_qplib_cmdqmsg msg
= {};
438 struct cmdq_create_ah req
= {};
443 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
444 CMDQ_BASE_OPCODE_CREATE_AH
,
447 memcpy(temp32
, ah
->dgid
.data
, sizeof(struct bnxt_qplib_gid
));
448 req
.dgid
[0] = cpu_to_le32(temp32
[0]);
449 req
.dgid
[1] = cpu_to_le32(temp32
[1]);
450 req
.dgid
[2] = cpu_to_le32(temp32
[2]);
451 req
.dgid
[3] = cpu_to_le32(temp32
[3]);
453 req
.type
= ah
->nw_type
;
454 req
.hop_limit
= ah
->hop_limit
;
455 req
.sgid_index
= cpu_to_le16(res
->sgid_tbl
.hw_id
[ah
->sgid_index
]);
456 req
.dest_vlan_id_flow_label
= cpu_to_le32((ah
->flow_label
&
457 CMDQ_CREATE_AH_FLOW_LABEL_MASK
) |
458 CMDQ_CREATE_AH_DEST_VLAN_ID_MASK
);
459 req
.pd_id
= cpu_to_le32(ah
->pd
->id
);
460 req
.traffic_class
= ah
->traffic_class
;
462 /* MAC in network format */
463 memcpy(temp16
, ah
->dmac
, 6);
464 req
.dest_mac
[0] = cpu_to_le16(temp16
[0]);
465 req
.dest_mac
[1] = cpu_to_le16(temp16
[1]);
466 req
.dest_mac
[2] = cpu_to_le16(temp16
[2]);
468 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
),
469 sizeof(resp
), block
);
470 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
474 ah
->id
= le32_to_cpu(resp
.xid
);
478 int bnxt_qplib_destroy_ah(struct bnxt_qplib_res
*res
, struct bnxt_qplib_ah
*ah
,
481 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
482 struct creq_destroy_ah_resp resp
= {};
483 struct bnxt_qplib_cmdqmsg msg
= {};
484 struct cmdq_destroy_ah req
= {};
487 /* Clean up the AH table in the device */
488 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
489 CMDQ_BASE_OPCODE_DESTROY_AH
,
492 req
.ah_cid
= cpu_to_le32(ah
->id
);
494 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
),
495 sizeof(resp
), block
);
496 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
501 int bnxt_qplib_free_mrw(struct bnxt_qplib_res
*res
, struct bnxt_qplib_mrw
*mrw
)
503 struct creq_deallocate_key_resp resp
= {};
504 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
505 struct cmdq_deallocate_key req
= {};
506 struct bnxt_qplib_cmdqmsg msg
= {};
509 if (mrw
->lkey
== 0xFFFFFFFF) {
510 dev_info(&res
->pdev
->dev
, "SP: Free a reserved lkey MRW\n");
514 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
515 CMDQ_BASE_OPCODE_DEALLOCATE_KEY
,
518 req
.mrw_flags
= mrw
->type
;
520 if ((mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1
) ||
521 (mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A
) ||
522 (mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B
))
523 req
.key
= cpu_to_le32(mrw
->rkey
);
525 req
.key
= cpu_to_le32(mrw
->lkey
);
527 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
),
529 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
533 /* Free the qplib's MRW memory */
534 if (mrw
->hwq
.max_elements
)
535 bnxt_qplib_free_hwq(res
, &mrw
->hwq
);
540 int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res
*res
, struct bnxt_qplib_mrw
*mrw
)
542 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
543 struct creq_allocate_mrw_resp resp
= {};
544 struct bnxt_qplib_cmdqmsg msg
= {};
545 struct cmdq_allocate_mrw req
= {};
549 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
550 CMDQ_BASE_OPCODE_ALLOCATE_MRW
,
553 req
.pd_id
= cpu_to_le32(mrw
->pd
->id
);
554 req
.mrw_flags
= mrw
->type
;
555 if ((mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR
&&
556 mrw
->access_flags
& BNXT_QPLIB_FR_PMR
) ||
557 mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A
||
558 mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B
)
559 req
.access
= CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY
;
560 tmp
= (unsigned long)mrw
;
561 req
.mrw_handle
= cpu_to_le64(tmp
);
563 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
),
565 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
569 if ((mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1
) ||
570 (mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A
) ||
571 (mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B
))
572 mrw
->rkey
= le32_to_cpu(resp
.xid
);
574 mrw
->lkey
= le32_to_cpu(resp
.xid
);
578 int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res
*res
, struct bnxt_qplib_mrw
*mrw
,
581 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
582 struct creq_deregister_mr_resp resp
= {};
583 struct bnxt_qplib_cmdqmsg msg
= {};
584 struct cmdq_deregister_mr req
= {};
587 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
588 CMDQ_BASE_OPCODE_DEREGISTER_MR
,
591 req
.lkey
= cpu_to_le32(mrw
->lkey
);
592 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
),
593 sizeof(resp
), block
);
594 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
598 /* Free the qplib's MR memory */
599 if (mrw
->hwq
.max_elements
) {
602 bnxt_qplib_free_hwq(res
, &mrw
->hwq
);
608 int bnxt_qplib_reg_mr(struct bnxt_qplib_res
*res
, struct bnxt_qplib_mrw
*mr
,
609 struct ib_umem
*umem
, int num_pbls
, u32 buf_pg_size
)
611 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
612 struct bnxt_qplib_hwq_attr hwq_attr
= {};
613 struct bnxt_qplib_sg_info sginfo
= {};
614 struct creq_register_mr_resp resp
= {};
615 struct bnxt_qplib_cmdqmsg msg
= {};
616 struct cmdq_register_mr req
= {};
622 pages
= roundup_pow_of_two(num_pbls
);
623 /* Allocate memory for the non-leaf pages to store buf ptrs.
624 * Non-leaf pages always uses system PAGE_SIZE
626 /* Free the hwq if it already exist, must be a rereg */
627 if (mr
->hwq
.max_elements
)
628 bnxt_qplib_free_hwq(res
, &mr
->hwq
);
630 hwq_attr
.depth
= pages
;
631 hwq_attr
.stride
= sizeof(dma_addr_t
);
632 hwq_attr
.type
= HWQ_TYPE_MR
;
633 hwq_attr
.sginfo
= &sginfo
;
634 hwq_attr
.sginfo
->umem
= umem
;
635 hwq_attr
.sginfo
->npages
= pages
;
636 hwq_attr
.sginfo
->pgsize
= buf_pg_size
;
637 hwq_attr
.sginfo
->pgshft
= ilog2(buf_pg_size
);
638 rc
= bnxt_qplib_alloc_init_hwq(&mr
->hwq
, &hwq_attr
);
640 dev_err(&res
->pdev
->dev
,
641 "SP: Reg MR memory allocation failed\n");
646 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
647 CMDQ_BASE_OPCODE_REGISTER_MR
,
650 /* Configure the request */
651 if (mr
->hwq
.level
== PBL_LVL_MAX
) {
652 /* No PBL provided, just use system PAGE_SIZE */
657 level
= mr
->hwq
.level
;
658 req
.pbl
= cpu_to_le64(mr
->hwq
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
660 pg_size
= buf_pg_size
? buf_pg_size
: PAGE_SIZE
;
661 req
.log2_pg_size_lvl
= (level
<< CMDQ_REGISTER_MR_LVL_SFT
) |
663 CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT
) &
664 CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK
);
665 req
.log2_pbl_pg_size
= cpu_to_le16(((ilog2(PAGE_SIZE
) <<
666 CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT
) &
667 CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK
));
668 req
.access
= (mr
->access_flags
& 0xFFFF);
669 req
.va
= cpu_to_le64(mr
->va
);
670 req
.key
= cpu_to_le32(mr
->lkey
);
671 if (_is_alloc_mr_unified(res
->dattr
->dev_cap_flags
))
672 req
.key
= cpu_to_le32(mr
->pd
->id
);
673 req
.flags
= cpu_to_le16(mr
->flags
);
674 req
.mr_size
= cpu_to_le64(mr
->total_size
);
676 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, NULL
, sizeof(req
),
678 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
682 if (_is_alloc_mr_unified(res
->dattr
->dev_cap_flags
)) {
683 mr
->lkey
= le32_to_cpu(resp
.xid
);
690 if (mr
->hwq
.max_elements
)
691 bnxt_qplib_free_hwq(res
, &mr
->hwq
);
695 int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res
*res
,
696 struct bnxt_qplib_frpl
*frpl
,
699 struct bnxt_qplib_hwq_attr hwq_attr
= {};
700 struct bnxt_qplib_sg_info sginfo
= {};
701 int pg_ptrs
, pages
, rc
;
703 /* Re-calculate the max to fit the HWQ allocation model */
704 pg_ptrs
= roundup_pow_of_two(max_pg_ptrs
);
705 pages
= pg_ptrs
>> MAX_PBL_LVL_1_PGS_SHIFT
;
709 if (pages
> MAX_PBL_LVL_1_PGS
)
712 sginfo
.pgsize
= PAGE_SIZE
;
716 hwq_attr
.depth
= pg_ptrs
;
717 hwq_attr
.stride
= PAGE_SIZE
;
718 hwq_attr
.sginfo
= &sginfo
;
719 hwq_attr
.type
= HWQ_TYPE_CTX
;
720 rc
= bnxt_qplib_alloc_init_hwq(&frpl
->hwq
, &hwq_attr
);
722 frpl
->max_pg_ptrs
= pg_ptrs
;
727 int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res
*res
,
728 struct bnxt_qplib_frpl
*frpl
)
730 bnxt_qplib_free_hwq(res
, &frpl
->hwq
);
734 int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw
*rcfw
,
735 struct bnxt_qplib_roce_stats
*stats
)
737 struct creq_query_roce_stats_resp resp
= {};
738 struct creq_query_roce_stats_resp_sb
*sb
;
739 struct cmdq_query_roce_stats req
= {};
740 struct bnxt_qplib_cmdqmsg msg
= {};
741 struct bnxt_qplib_rcfw_sbuf sbuf
;
744 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
745 CMDQ_BASE_OPCODE_QUERY_ROCE_STATS
,
748 sbuf
.size
= ALIGN(sizeof(*sb
), BNXT_QPLIB_CMDQE_UNITS
);
749 sbuf
.sb
= dma_alloc_coherent(&rcfw
->pdev
->dev
, sbuf
.size
,
750 &sbuf
.dma_addr
, GFP_KERNEL
);
755 req
.resp_size
= sbuf
.size
/ BNXT_QPLIB_CMDQE_UNITS
;
756 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, &sbuf
, sizeof(req
),
758 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
761 /* Extract the context from the side buffer */
762 stats
->to_retransmits
= le64_to_cpu(sb
->to_retransmits
);
763 stats
->seq_err_naks_rcvd
= le64_to_cpu(sb
->seq_err_naks_rcvd
);
764 stats
->max_retry_exceeded
= le64_to_cpu(sb
->max_retry_exceeded
);
765 stats
->rnr_naks_rcvd
= le64_to_cpu(sb
->rnr_naks_rcvd
);
766 stats
->missing_resp
= le64_to_cpu(sb
->missing_resp
);
767 stats
->unrecoverable_err
= le64_to_cpu(sb
->unrecoverable_err
);
768 stats
->bad_resp_err
= le64_to_cpu(sb
->bad_resp_err
);
769 stats
->local_qp_op_err
= le64_to_cpu(sb
->local_qp_op_err
);
770 stats
->local_protection_err
= le64_to_cpu(sb
->local_protection_err
);
771 stats
->mem_mgmt_op_err
= le64_to_cpu(sb
->mem_mgmt_op_err
);
772 stats
->remote_invalid_req_err
= le64_to_cpu(sb
->remote_invalid_req_err
);
773 stats
->remote_access_err
= le64_to_cpu(sb
->remote_access_err
);
774 stats
->remote_op_err
= le64_to_cpu(sb
->remote_op_err
);
775 stats
->dup_req
= le64_to_cpu(sb
->dup_req
);
776 stats
->res_exceed_max
= le64_to_cpu(sb
->res_exceed_max
);
777 stats
->res_length_mismatch
= le64_to_cpu(sb
->res_length_mismatch
);
778 stats
->res_exceeds_wqe
= le64_to_cpu(sb
->res_exceeds_wqe
);
779 stats
->res_opcode_err
= le64_to_cpu(sb
->res_opcode_err
);
780 stats
->res_rx_invalid_rkey
= le64_to_cpu(sb
->res_rx_invalid_rkey
);
781 stats
->res_rx_domain_err
= le64_to_cpu(sb
->res_rx_domain_err
);
782 stats
->res_rx_no_perm
= le64_to_cpu(sb
->res_rx_no_perm
);
783 stats
->res_rx_range_err
= le64_to_cpu(sb
->res_rx_range_err
);
784 stats
->res_tx_invalid_rkey
= le64_to_cpu(sb
->res_tx_invalid_rkey
);
785 stats
->res_tx_domain_err
= le64_to_cpu(sb
->res_tx_domain_err
);
786 stats
->res_tx_no_perm
= le64_to_cpu(sb
->res_tx_no_perm
);
787 stats
->res_tx_range_err
= le64_to_cpu(sb
->res_tx_range_err
);
788 stats
->res_irrq_oflow
= le64_to_cpu(sb
->res_irrq_oflow
);
789 stats
->res_unsup_opcode
= le64_to_cpu(sb
->res_unsup_opcode
);
790 stats
->res_unaligned_atomic
= le64_to_cpu(sb
->res_unaligned_atomic
);
791 stats
->res_rem_inv_err
= le64_to_cpu(sb
->res_rem_inv_err
);
792 stats
->res_mem_error
= le64_to_cpu(sb
->res_mem_error
);
793 stats
->res_srq_err
= le64_to_cpu(sb
->res_srq_err
);
794 stats
->res_cmp_err
= le64_to_cpu(sb
->res_cmp_err
);
795 stats
->res_invalid_dup_rkey
= le64_to_cpu(sb
->res_invalid_dup_rkey
);
796 stats
->res_wqe_format_err
= le64_to_cpu(sb
->res_wqe_format_err
);
797 stats
->res_cq_load_err
= le64_to_cpu(sb
->res_cq_load_err
);
798 stats
->res_srq_load_err
= le64_to_cpu(sb
->res_srq_load_err
);
799 stats
->res_tx_pci_err
= le64_to_cpu(sb
->res_tx_pci_err
);
800 stats
->res_rx_pci_err
= le64_to_cpu(sb
->res_rx_pci_err
);
801 if (!rcfw
->init_oos_stats
) {
802 rcfw
->oos_prev
= le64_to_cpu(sb
->res_oos_drop_count
);
803 rcfw
->init_oos_stats
= 1;
805 stats
->res_oos_drop_count
+=
806 (le64_to_cpu(sb
->res_oos_drop_count
) -
807 rcfw
->oos_prev
) & BNXT_QPLIB_OOS_COUNT_MASK
;
808 rcfw
->oos_prev
= le64_to_cpu(sb
->res_oos_drop_count
);
812 dma_free_coherent(&rcfw
->pdev
->dev
, sbuf
.size
,
813 sbuf
.sb
, sbuf
.dma_addr
);
817 int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw
*rcfw
, u32 fid
,
818 struct bnxt_qplib_ext_stat
*estat
)
820 struct creq_query_roce_stats_ext_resp resp
= {};
821 struct creq_query_roce_stats_ext_resp_sb
*sb
;
822 struct cmdq_query_roce_stats_ext req
= {};
823 struct bnxt_qplib_cmdqmsg msg
= {};
824 struct bnxt_qplib_rcfw_sbuf sbuf
;
827 sbuf
.size
= ALIGN(sizeof(*sb
), BNXT_QPLIB_CMDQE_UNITS
);
828 sbuf
.sb
= dma_alloc_coherent(&rcfw
->pdev
->dev
, sbuf
.size
,
829 &sbuf
.dma_addr
, GFP_KERNEL
);
834 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
835 CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS
,
838 req
.resp_size
= sbuf
.size
/ BNXT_QPLIB_CMDQE_UNITS
;
839 req
.resp_addr
= cpu_to_le64(sbuf
.dma_addr
);
840 req
.function_id
= cpu_to_le32(fid
);
841 req
.flags
= cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID
);
843 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, &sbuf
, sizeof(req
),
845 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
849 estat
->tx_atomic_req
= le64_to_cpu(sb
->tx_atomic_req_pkts
);
850 estat
->tx_read_req
= le64_to_cpu(sb
->tx_read_req_pkts
);
851 estat
->tx_read_res
= le64_to_cpu(sb
->tx_read_res_pkts
);
852 estat
->tx_write_req
= le64_to_cpu(sb
->tx_write_req_pkts
);
853 estat
->tx_send_req
= le64_to_cpu(sb
->tx_send_req_pkts
);
854 estat
->tx_roce_pkts
= le64_to_cpu(sb
->tx_roce_pkts
);
855 estat
->tx_roce_bytes
= le64_to_cpu(sb
->tx_roce_bytes
);
856 estat
->rx_atomic_req
= le64_to_cpu(sb
->rx_atomic_req_pkts
);
857 estat
->rx_read_req
= le64_to_cpu(sb
->rx_read_req_pkts
);
858 estat
->rx_read_res
= le64_to_cpu(sb
->rx_read_res_pkts
);
859 estat
->rx_write_req
= le64_to_cpu(sb
->rx_write_req_pkts
);
860 estat
->rx_send_req
= le64_to_cpu(sb
->rx_send_req_pkts
);
861 estat
->rx_roce_pkts
= le64_to_cpu(sb
->rx_roce_pkts
);
862 estat
->rx_roce_bytes
= le64_to_cpu(sb
->rx_roce_bytes
);
863 estat
->rx_roce_good_pkts
= le64_to_cpu(sb
->rx_roce_good_pkts
);
864 estat
->rx_roce_good_bytes
= le64_to_cpu(sb
->rx_roce_good_bytes
);
865 estat
->rx_out_of_buffer
= le64_to_cpu(sb
->rx_out_of_buffer_pkts
);
866 estat
->rx_out_of_sequence
= le64_to_cpu(sb
->rx_out_of_sequence_pkts
);
867 estat
->tx_cnp
= le64_to_cpu(sb
->tx_cnp_pkts
);
868 estat
->rx_cnp
= le64_to_cpu(sb
->rx_cnp_pkts
);
869 estat
->rx_ecn_marked
= le64_to_cpu(sb
->rx_ecn_marked_pkts
);
872 dma_free_coherent(&rcfw
->pdev
->dev
, sbuf
.size
,
873 sbuf
.sb
, sbuf
.dma_addr
);
877 static void bnxt_qplib_fill_cc_gen1(struct cmdq_modify_roce_cc_gen1_tlv
*ext_req
,
878 struct bnxt_qplib_cc_param_ext
*cc_ext
)
880 ext_req
->modify_mask
= cpu_to_le64(cc_ext
->ext_mask
);
881 cc_ext
->ext_mask
= 0;
882 ext_req
->inactivity_th_hi
= cpu_to_le16(cc_ext
->inact_th_hi
);
883 ext_req
->min_time_between_cnps
= cpu_to_le16(cc_ext
->min_delta_cnp
);
884 ext_req
->init_cp
= cpu_to_le16(cc_ext
->init_cp
);
885 ext_req
->tr_update_mode
= cc_ext
->tr_update_mode
;
886 ext_req
->tr_update_cycles
= cc_ext
->tr_update_cyls
;
887 ext_req
->fr_num_rtts
= cc_ext
->fr_rtt
;
888 ext_req
->ai_rate_increase
= cc_ext
->ai_rate_incr
;
889 ext_req
->reduction_relax_rtts_th
= cpu_to_le16(cc_ext
->rr_rtt_th
);
890 ext_req
->additional_relax_cr_th
= cpu_to_le16(cc_ext
->ar_cr_th
);
891 ext_req
->cr_min_th
= cpu_to_le16(cc_ext
->cr_min_th
);
892 ext_req
->bw_avg_weight
= cc_ext
->bw_avg_weight
;
893 ext_req
->actual_cr_factor
= cc_ext
->cr_factor
;
894 ext_req
->max_cp_cr_th
= cpu_to_le16(cc_ext
->cr_th_max_cp
);
895 ext_req
->cp_bias_en
= cc_ext
->cp_bias_en
;
896 ext_req
->cp_bias
= cc_ext
->cp_bias
;
897 ext_req
->cnp_ecn
= cc_ext
->cnp_ecn
;
898 ext_req
->rtt_jitter_en
= cc_ext
->rtt_jitter_en
;
899 ext_req
->link_bytes_per_usec
= cpu_to_le16(cc_ext
->bytes_per_usec
);
900 ext_req
->reset_cc_cr_th
= cpu_to_le16(cc_ext
->cc_cr_reset_th
);
901 ext_req
->cr_width
= cc_ext
->cr_width
;
902 ext_req
->quota_period_min
= cc_ext
->min_quota
;
903 ext_req
->quota_period_max
= cc_ext
->max_quota
;
904 ext_req
->quota_period_abs_max
= cc_ext
->abs_max_quota
;
905 ext_req
->tr_lower_bound
= cpu_to_le16(cc_ext
->tr_lb
);
906 ext_req
->cr_prob_factor
= cc_ext
->cr_prob_fac
;
907 ext_req
->tr_prob_factor
= cc_ext
->tr_prob_fac
;
908 ext_req
->fairness_cr_th
= cpu_to_le16(cc_ext
->fair_cr_th
);
909 ext_req
->red_div
= cc_ext
->red_div
;
910 ext_req
->cnp_ratio_th
= cc_ext
->cnp_ratio_th
;
911 ext_req
->exp_ai_rtts
= cpu_to_le16(cc_ext
->ai_ext_rtt
);
912 ext_req
->exp_ai_cr_cp_ratio
= cc_ext
->exp_crcp_ratio
;
913 ext_req
->use_rate_table
= cc_ext
->low_rate_en
;
914 ext_req
->cp_exp_update_th
= cpu_to_le16(cc_ext
->cpcr_update_th
);
915 ext_req
->high_exp_ai_rtts_th1
= cpu_to_le16(cc_ext
->ai_rtt_th1
);
916 ext_req
->high_exp_ai_rtts_th2
= cpu_to_le16(cc_ext
->ai_rtt_th2
);
917 ext_req
->actual_cr_cong_free_rtts_th
= cpu_to_le16(cc_ext
->cf_rtt_th
);
918 ext_req
->severe_cong_cr_th1
= cpu_to_le16(cc_ext
->sc_cr_th1
);
919 ext_req
->severe_cong_cr_th2
= cpu_to_le16(cc_ext
->sc_cr_th2
);
920 ext_req
->link64B_per_rtt
= cpu_to_le32(cc_ext
->l64B_per_rtt
);
921 ext_req
->cc_ack_bytes
= cc_ext
->cc_ack_bytes
;
924 int bnxt_qplib_modify_cc(struct bnxt_qplib_res
*res
,
925 struct bnxt_qplib_cc_param
*cc_param
)
927 struct bnxt_qplib_tlv_modify_cc_req tlv_req
= {};
928 struct creq_modify_roce_cc_resp resp
= {};
929 struct bnxt_qplib_cmdqmsg msg
= {};
930 struct cmdq_modify_roce_cc
*req
;
935 /* Prepare the older base command */
936 req
= &tlv_req
.base_req
;
938 req_size
= sizeof(*req
);
939 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)req
, CMDQ_BASE_OPCODE_MODIFY_ROCE_CC
,
941 req
->modify_mask
= cpu_to_le32(cc_param
->mask
);
942 req
->enable_cc
= cc_param
->enable
;
943 req
->g
= cc_param
->g
;
944 req
->num_phases_per_state
= cc_param
->nph_per_state
;
945 req
->time_per_phase
= cc_param
->time_pph
;
946 req
->pkts_per_phase
= cc_param
->pkts_pph
;
947 req
->init_cr
= cpu_to_le16(cc_param
->init_cr
);
948 req
->init_tr
= cpu_to_le16(cc_param
->init_tr
);
949 req
->tos_dscp_tos_ecn
= (cc_param
->tos_dscp
<< CMDQ_MODIFY_ROCE_CC_TOS_DSCP_SFT
) |
950 (cc_param
->tos_ecn
& CMDQ_MODIFY_ROCE_CC_TOS_ECN_MASK
);
951 req
->alt_vlan_pcp
= cc_param
->alt_vlan_pcp
;
952 req
->alt_tos_dscp
= cpu_to_le16(cc_param
->alt_tos_dscp
);
953 req
->rtt
= cpu_to_le16(cc_param
->rtt
);
954 req
->tcp_cp
= cpu_to_le16(cc_param
->tcp_cp
);
955 req
->cc_mode
= cc_param
->cc_mode
;
956 req
->inactivity_th
= cpu_to_le16(cc_param
->inact_th
);
958 /* For chip gen P5 onwards fill extended cmd and header */
959 if (bnxt_qplib_is_chip_gen_p5_p7(res
->cctx
)) {
960 struct roce_tlv
*hdr
;
965 req_size
= sizeof(tlv_req
);
966 /* Prepare primary tlv header */
967 hdr
= &tlv_req
.tlv_hdr
;
968 chunks
= CHUNKS(sizeof(struct bnxt_qplib_tlv_modify_cc_req
));
969 payload
= sizeof(struct cmdq_modify_roce_cc
);
970 __roce_1st_tlv_prep(hdr
, chunks
, payload
, true);
971 /* Prepare secondary tlv header */
972 hdr
= (struct roce_tlv
*)&tlv_req
.ext_req
;
973 payload
= sizeof(struct cmdq_modify_roce_cc_gen1_tlv
) -
974 sizeof(struct roce_tlv
);
975 __roce_ext_tlv_prep(hdr
, TLV_TYPE_MODIFY_ROCE_CC_GEN1
, payload
, false, true);
976 bnxt_qplib_fill_cc_gen1(&tlv_req
.ext_req
, &cc_param
->cc_ext
);
979 bnxt_qplib_fill_cmdqmsg(&msg
, cmd
, &resp
, NULL
, req_size
,
981 rc
= bnxt_qplib_rcfw_send_message(res
->rcfw
, &msg
);
985 int bnxt_qplib_read_context(struct bnxt_qplib_rcfw
*rcfw
, u8 res_type
,
986 u32 xid
, u32 resp_size
, void *resp_va
)
988 struct creq_read_context resp
= {};
989 struct bnxt_qplib_cmdqmsg msg
= {};
990 struct cmdq_read_context req
= {};
991 struct bnxt_qplib_rcfw_sbuf sbuf
;
994 sbuf
.size
= resp_size
;
995 sbuf
.sb
= dma_alloc_coherent(&rcfw
->pdev
->dev
, sbuf
.size
,
996 &sbuf
.dma_addr
, GFP_KERNEL
);
1000 bnxt_qplib_rcfw_cmd_prep((struct cmdq_base
*)&req
,
1001 CMDQ_BASE_OPCODE_READ_CONTEXT
, sizeof(req
));
1002 req
.resp_addr
= cpu_to_le64(sbuf
.dma_addr
);
1003 req
.resp_size
= resp_size
/ BNXT_QPLIB_CMDQE_UNITS
;
1005 req
.xid
= cpu_to_le32(xid
);
1006 req
.type
= res_type
;
1008 bnxt_qplib_fill_cmdqmsg(&msg
, &req
, &resp
, &sbuf
, sizeof(req
),
1010 rc
= bnxt_qplib_rcfw_send_message(rcfw
, &msg
);
1014 memcpy(resp_va
, sbuf
.sb
, resp_size
);
1016 dma_free_coherent(&rcfw
->pdev
->dev
, sbuf
.size
, sbuf
.sb
, sbuf
.dma_addr
);