2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: Slow Path Operators
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/interrupt.h>
42 #include <linux/spinlock.h>
43 #include <linux/sched.h>
44 #include <linux/pci.h>
48 #include "qplib_res.h"
49 #include "qplib_rcfw.h"
52 const struct bnxt_qplib_gid bnxt_qplib_gid_zero
= {{ 0, 0, 0, 0, 0, 0, 0, 0,
53 0, 0, 0, 0, 0, 0, 0, 0 } };
57 static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw
*rcfw
,
60 struct cmdq_query_version req
;
61 struct creq_query_version_resp resp
;
65 RCFW_CMD_PREP(req
, QUERY_VERSION
, cmd_flags
);
67 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
68 (void *)&resp
, NULL
, 0);
71 fw_ver
[0] = resp
.fw_maj
;
72 fw_ver
[1] = resp
.fw_minor
;
73 fw_ver
[2] = resp
.fw_bld
;
74 fw_ver
[3] = resp
.fw_rsvd
;
77 int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw
*rcfw
,
78 struct bnxt_qplib_dev_attr
*attr
, bool vf
)
80 struct cmdq_query_func req
;
81 struct creq_query_func_resp resp
;
82 struct bnxt_qplib_rcfw_sbuf
*sbuf
;
83 struct creq_query_func_resp_sb
*sb
;
89 RCFW_CMD_PREP(req
, QUERY_FUNC
, cmd_flags
);
91 sbuf
= bnxt_qplib_rcfw_alloc_sbuf(rcfw
, sizeof(*sb
));
93 dev_err(&rcfw
->pdev
->dev
,
94 "SP: QUERY_FUNC alloc side buffer failed\n");
99 req
.resp_size
= sizeof(*sb
) / BNXT_QPLIB_CMDQE_UNITS
;
100 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
, (void *)&resp
,
105 /* Extract the context from the side buffer */
106 attr
->max_qp
= le32_to_cpu(sb
->max_qp
);
107 /* max_qp value reported by FW for PF doesn't include the QP1 for PF */
110 attr
->max_qp_rd_atom
=
111 sb
->max_qp_rd_atom
> BNXT_QPLIB_MAX_OUT_RD_ATOM
?
112 BNXT_QPLIB_MAX_OUT_RD_ATOM
: sb
->max_qp_rd_atom
;
113 attr
->max_qp_init_rd_atom
=
114 sb
->max_qp_init_rd_atom
> BNXT_QPLIB_MAX_OUT_RD_ATOM
?
115 BNXT_QPLIB_MAX_OUT_RD_ATOM
: sb
->max_qp_init_rd_atom
;
116 attr
->max_qp_wqes
= le16_to_cpu(sb
->max_qp_wr
);
118 * 128 WQEs needs to be reserved for the HW (8916). Prevent
119 * reporting the max number
121 attr
->max_qp_wqes
-= BNXT_QPLIB_RESERVED_QP_WRS
+ 1;
122 attr
->max_qp_sges
= bnxt_qplib_is_chip_gen_p5(rcfw
->res
->cctx
) ?
124 attr
->max_cq
= le32_to_cpu(sb
->max_cq
);
125 attr
->max_cq_wqes
= le32_to_cpu(sb
->max_cqe
);
126 attr
->max_cq_sges
= attr
->max_qp_sges
;
127 attr
->max_mr
= le32_to_cpu(sb
->max_mr
);
128 attr
->max_mw
= le32_to_cpu(sb
->max_mw
);
130 attr
->max_mr_size
= le64_to_cpu(sb
->max_mr_size
);
131 attr
->max_pd
= 64 * 1024;
132 attr
->max_raw_ethy_qp
= le32_to_cpu(sb
->max_raw_eth_qp
);
133 attr
->max_ah
= le32_to_cpu(sb
->max_ah
);
135 attr
->max_srq
= le16_to_cpu(sb
->max_srq
);
136 attr
->max_srq_wqes
= le32_to_cpu(sb
->max_srq_wr
) - 1;
137 attr
->max_srq_sges
= sb
->max_srq_sge
;
138 attr
->max_pkey
= le32_to_cpu(sb
->max_pkeys
);
140 * Some versions of FW reports more than 0xFFFF.
141 * Restrict it for now to 0xFFFF to avoid
142 * reporting trucated value
144 if (attr
->max_pkey
> 0xFFFF) {
145 /* ib_port_attr::pkey_tbl_len is u16 */
146 attr
->max_pkey
= 0xFFFF;
149 attr
->max_inline_data
= le32_to_cpu(sb
->max_inline_data
);
150 attr
->l2_db_size
= (sb
->l2_db_space_size
+ 1) *
151 (0x01 << RCFW_DBR_BASE_PAGE_SHIFT
);
152 attr
->max_sgid
= BNXT_QPLIB_NUM_GIDS_SUPPORTED
;
154 bnxt_qplib_query_version(rcfw
, attr
->fw_ver
);
156 for (i
= 0; i
< MAX_TQM_ALLOC_REQ
/ 4; i
++) {
157 temp
= le32_to_cpu(sb
->tqm_alloc_reqs
[i
]);
158 tqm_alloc
= (u8
*)&temp
;
159 attr
->tqm_alloc_reqs
[i
* 4] = *tqm_alloc
;
160 attr
->tqm_alloc_reqs
[i
* 4 + 1] = *(++tqm_alloc
);
161 attr
->tqm_alloc_reqs
[i
* 4 + 2] = *(++tqm_alloc
);
162 attr
->tqm_alloc_reqs
[i
* 4 + 3] = *(++tqm_alloc
);
165 attr
->is_atomic
= false;
167 bnxt_qplib_rcfw_free_sbuf(rcfw
, sbuf
);
171 int bnxt_qplib_set_func_resources(struct bnxt_qplib_res
*res
,
172 struct bnxt_qplib_rcfw
*rcfw
,
173 struct bnxt_qplib_ctx
*ctx
)
175 struct cmdq_set_func_resources req
;
176 struct creq_set_func_resources_resp resp
;
180 RCFW_CMD_PREP(req
, SET_FUNC_RESOURCES
, cmd_flags
);
182 req
.number_of_qp
= cpu_to_le32(ctx
->qpc_count
);
183 req
.number_of_mrw
= cpu_to_le32(ctx
->mrw_count
);
184 req
.number_of_srq
= cpu_to_le32(ctx
->srqc_count
);
185 req
.number_of_cq
= cpu_to_le32(ctx
->cq_count
);
187 req
.max_qp_per_vf
= cpu_to_le32(ctx
->vf_res
.max_qp_per_vf
);
188 req
.max_mrw_per_vf
= cpu_to_le32(ctx
->vf_res
.max_mrw_per_vf
);
189 req
.max_srq_per_vf
= cpu_to_le32(ctx
->vf_res
.max_srq_per_vf
);
190 req
.max_cq_per_vf
= cpu_to_le32(ctx
->vf_res
.max_cq_per_vf
);
191 req
.max_gid_per_vf
= cpu_to_le32(ctx
->vf_res
.max_gid_per_vf
);
193 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
197 dev_err(&res
->pdev
->dev
, "Failed to set function resources\n");
203 int bnxt_qplib_get_sgid(struct bnxt_qplib_res
*res
,
204 struct bnxt_qplib_sgid_tbl
*sgid_tbl
, int index
,
205 struct bnxt_qplib_gid
*gid
)
207 if (index
>= sgid_tbl
->max
) {
208 dev_err(&res
->pdev
->dev
,
209 "Index %d exceeded SGID table max (%d)\n",
210 index
, sgid_tbl
->max
);
213 memcpy(gid
, &sgid_tbl
->tbl
[index
].gid
, sizeof(*gid
));
217 int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl
*sgid_tbl
,
218 struct bnxt_qplib_gid
*gid
, u16 vlan_id
, bool update
)
220 struct bnxt_qplib_res
*res
= to_bnxt_qplib(sgid_tbl
,
221 struct bnxt_qplib_res
,
223 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
227 dev_err(&res
->pdev
->dev
, "SGID table not allocated\n");
230 /* Do we need a sgid_lock here? */
231 if (!sgid_tbl
->active
) {
232 dev_err(&res
->pdev
->dev
, "SGID table has no active entries\n");
235 for (index
= 0; index
< sgid_tbl
->max
; index
++) {
236 if (!memcmp(&sgid_tbl
->tbl
[index
].gid
, gid
, sizeof(*gid
)) &&
237 vlan_id
== sgid_tbl
->tbl
[index
].vlan_id
)
240 if (index
== sgid_tbl
->max
) {
241 dev_warn(&res
->pdev
->dev
, "GID not found in the SGID table\n");
244 /* Remove GID from the SGID table */
246 struct cmdq_delete_gid req
;
247 struct creq_delete_gid_resp resp
;
251 RCFW_CMD_PREP(req
, DELETE_GID
, cmd_flags
);
252 if (sgid_tbl
->hw_id
[index
] == 0xFFFF) {
253 dev_err(&res
->pdev
->dev
,
254 "GID entry contains an invalid HW id\n");
257 req
.gid_index
= cpu_to_le16(sgid_tbl
->hw_id
[index
]);
258 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
259 (void *)&resp
, NULL
, 0);
263 memcpy(&sgid_tbl
->tbl
[index
].gid
, &bnxt_qplib_gid_zero
,
264 sizeof(bnxt_qplib_gid_zero
));
265 sgid_tbl
->tbl
[index
].vlan_id
= 0xFFFF;
266 sgid_tbl
->vlan
[index
] = 0;
268 dev_dbg(&res
->pdev
->dev
,
269 "SGID deleted hw_id[0x%x] = 0x%x active = 0x%x\n",
270 index
, sgid_tbl
->hw_id
[index
], sgid_tbl
->active
);
271 sgid_tbl
->hw_id
[index
] = (u16
)-1;
277 int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl
*sgid_tbl
,
278 struct bnxt_qplib_gid
*gid
, u8
*smac
, u16 vlan_id
,
279 bool update
, u32
*index
)
281 struct bnxt_qplib_res
*res
= to_bnxt_qplib(sgid_tbl
,
282 struct bnxt_qplib_res
,
284 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
288 dev_err(&res
->pdev
->dev
, "SGID table not allocated\n");
291 /* Do we need a sgid_lock here? */
292 if (sgid_tbl
->active
== sgid_tbl
->max
) {
293 dev_err(&res
->pdev
->dev
, "SGID table is full\n");
296 free_idx
= sgid_tbl
->max
;
297 for (i
= 0; i
< sgid_tbl
->max
; i
++) {
298 if (!memcmp(&sgid_tbl
->tbl
[i
], gid
, sizeof(*gid
)) &&
299 sgid_tbl
->tbl
[i
].vlan_id
== vlan_id
) {
300 dev_dbg(&res
->pdev
->dev
,
301 "SGID entry already exist in entry %d!\n", i
);
304 } else if (!memcmp(&sgid_tbl
->tbl
[i
], &bnxt_qplib_gid_zero
,
305 sizeof(bnxt_qplib_gid_zero
)) &&
306 free_idx
== sgid_tbl
->max
) {
310 if (free_idx
== sgid_tbl
->max
) {
311 dev_err(&res
->pdev
->dev
,
312 "SGID table is FULL but count is not MAX??\n");
316 struct cmdq_add_gid req
;
317 struct creq_add_gid_resp resp
;
321 RCFW_CMD_PREP(req
, ADD_GID
, cmd_flags
);
323 req
.gid
[0] = cpu_to_be32(((u32
*)gid
->data
)[3]);
324 req
.gid
[1] = cpu_to_be32(((u32
*)gid
->data
)[2]);
325 req
.gid
[2] = cpu_to_be32(((u32
*)gid
->data
)[1]);
326 req
.gid
[3] = cpu_to_be32(((u32
*)gid
->data
)[0]);
328 * driver should ensure that all RoCE traffic is always VLAN
329 * tagged if RoCE traffic is running on non-zero VLAN ID or
330 * RoCE traffic is running on non-zero Priority.
332 if ((vlan_id
!= 0xFFFF) || res
->prio
) {
333 if (vlan_id
!= 0xFFFF)
334 req
.vlan
= cpu_to_le16
335 (vlan_id
& CMDQ_ADD_GID_VLAN_VLAN_ID_MASK
);
336 req
.vlan
|= cpu_to_le16
337 (CMDQ_ADD_GID_VLAN_TPID_TPID_8100
|
338 CMDQ_ADD_GID_VLAN_VLAN_EN
);
341 /* MAC in network format */
342 req
.src_mac
[0] = cpu_to_be16(((u16
*)smac
)[0]);
343 req
.src_mac
[1] = cpu_to_be16(((u16
*)smac
)[1]);
344 req
.src_mac
[2] = cpu_to_be16(((u16
*)smac
)[2]);
346 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
347 (void *)&resp
, NULL
, 0);
350 sgid_tbl
->hw_id
[free_idx
] = le32_to_cpu(resp
.xid
);
352 /* Add GID to the sgid_tbl */
353 memcpy(&sgid_tbl
->tbl
[free_idx
], gid
, sizeof(*gid
));
354 sgid_tbl
->tbl
[free_idx
].vlan_id
= vlan_id
;
356 if (vlan_id
!= 0xFFFF)
357 sgid_tbl
->vlan
[free_idx
] = 1;
359 dev_dbg(&res
->pdev
->dev
,
360 "SGID added hw_id[0x%x] = 0x%x active = 0x%x\n",
361 free_idx
, sgid_tbl
->hw_id
[free_idx
], sgid_tbl
->active
);
368 int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl
*sgid_tbl
,
369 struct bnxt_qplib_gid
*gid
, u16 gid_idx
,
372 struct bnxt_qplib_res
*res
= to_bnxt_qplib(sgid_tbl
,
373 struct bnxt_qplib_res
,
375 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
376 struct creq_modify_gid_resp resp
;
377 struct cmdq_modify_gid req
;
381 RCFW_CMD_PREP(req
, MODIFY_GID
, cmd_flags
);
383 req
.gid
[0] = cpu_to_be32(((u32
*)gid
->data
)[3]);
384 req
.gid
[1] = cpu_to_be32(((u32
*)gid
->data
)[2]);
385 req
.gid
[2] = cpu_to_be32(((u32
*)gid
->data
)[1]);
386 req
.gid
[3] = cpu_to_be32(((u32
*)gid
->data
)[0]);
388 req
.vlan
|= cpu_to_le16
389 (CMDQ_ADD_GID_VLAN_TPID_TPID_8100
|
390 CMDQ_ADD_GID_VLAN_VLAN_EN
);
393 /* MAC in network format */
394 req
.src_mac
[0] = cpu_to_be16(((u16
*)smac
)[0]);
395 req
.src_mac
[1] = cpu_to_be16(((u16
*)smac
)[1]);
396 req
.src_mac
[2] = cpu_to_be16(((u16
*)smac
)[2]);
398 req
.gid_index
= cpu_to_le16(gid_idx
);
400 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
401 (void *)&resp
, NULL
, 0);
406 int bnxt_qplib_get_pkey(struct bnxt_qplib_res
*res
,
407 struct bnxt_qplib_pkey_tbl
*pkey_tbl
, u16 index
,
410 if (index
== 0xFFFF) {
414 if (index
>= pkey_tbl
->max
) {
415 dev_err(&res
->pdev
->dev
,
416 "Index %d exceeded PKEY table max (%d)\n",
417 index
, pkey_tbl
->max
);
420 memcpy(pkey
, &pkey_tbl
->tbl
[index
], sizeof(*pkey
));
424 int bnxt_qplib_del_pkey(struct bnxt_qplib_res
*res
,
425 struct bnxt_qplib_pkey_tbl
*pkey_tbl
, u16
*pkey
,
431 dev_err(&res
->pdev
->dev
, "PKEY table not allocated\n");
435 /* Do we need a pkey_lock here? */
436 if (!pkey_tbl
->active
) {
437 dev_err(&res
->pdev
->dev
, "PKEY table has no active entries\n");
440 for (i
= 0; i
< pkey_tbl
->max
; i
++) {
441 if (!memcmp(&pkey_tbl
->tbl
[i
], pkey
, sizeof(*pkey
)))
444 if (i
== pkey_tbl
->max
) {
445 dev_err(&res
->pdev
->dev
,
446 "PKEY 0x%04x not found in the pkey table\n", *pkey
);
449 memset(&pkey_tbl
->tbl
[i
], 0, sizeof(*pkey
));
456 int bnxt_qplib_add_pkey(struct bnxt_qplib_res
*res
,
457 struct bnxt_qplib_pkey_tbl
*pkey_tbl
, u16
*pkey
,
460 int i
, free_idx
, rc
= 0;
463 dev_err(&res
->pdev
->dev
, "PKEY table not allocated\n");
467 /* Do we need a pkey_lock here? */
468 if (pkey_tbl
->active
== pkey_tbl
->max
) {
469 dev_err(&res
->pdev
->dev
, "PKEY table is full\n");
472 free_idx
= pkey_tbl
->max
;
473 for (i
= 0; i
< pkey_tbl
->max
; i
++) {
474 if (!memcmp(&pkey_tbl
->tbl
[i
], pkey
, sizeof(*pkey
)))
476 else if (!pkey_tbl
->tbl
[i
] && free_idx
== pkey_tbl
->max
)
479 if (free_idx
== pkey_tbl
->max
) {
480 dev_err(&res
->pdev
->dev
,
481 "PKEY table is FULL but count is not MAX??\n");
484 /* Add PKEY to the pkey_tbl */
485 memcpy(&pkey_tbl
->tbl
[free_idx
], pkey
, sizeof(*pkey
));
493 int bnxt_qplib_create_ah(struct bnxt_qplib_res
*res
, struct bnxt_qplib_ah
*ah
,
496 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
497 struct cmdq_create_ah req
;
498 struct creq_create_ah_resp resp
;
504 RCFW_CMD_PREP(req
, CREATE_AH
, cmd_flags
);
506 memcpy(temp32
, ah
->dgid
.data
, sizeof(struct bnxt_qplib_gid
));
507 req
.dgid
[0] = cpu_to_le32(temp32
[0]);
508 req
.dgid
[1] = cpu_to_le32(temp32
[1]);
509 req
.dgid
[2] = cpu_to_le32(temp32
[2]);
510 req
.dgid
[3] = cpu_to_le32(temp32
[3]);
512 req
.type
= ah
->nw_type
;
513 req
.hop_limit
= ah
->hop_limit
;
514 req
.sgid_index
= cpu_to_le16(res
->sgid_tbl
.hw_id
[ah
->sgid_index
]);
515 req
.dest_vlan_id_flow_label
= cpu_to_le32((ah
->flow_label
&
516 CMDQ_CREATE_AH_FLOW_LABEL_MASK
) |
517 CMDQ_CREATE_AH_DEST_VLAN_ID_MASK
);
518 req
.pd_id
= cpu_to_le32(ah
->pd
->id
);
519 req
.traffic_class
= ah
->traffic_class
;
521 /* MAC in network format */
522 memcpy(temp16
, ah
->dmac
, 6);
523 req
.dest_mac
[0] = cpu_to_le16(temp16
[0]);
524 req
.dest_mac
[1] = cpu_to_le16(temp16
[1]);
525 req
.dest_mac
[2] = cpu_to_le16(temp16
[2]);
527 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
, (void *)&resp
,
532 ah
->id
= le32_to_cpu(resp
.xid
);
536 void bnxt_qplib_destroy_ah(struct bnxt_qplib_res
*res
, struct bnxt_qplib_ah
*ah
,
539 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
540 struct cmdq_destroy_ah req
;
541 struct creq_destroy_ah_resp resp
;
544 /* Clean up the AH table in the device */
545 RCFW_CMD_PREP(req
, DESTROY_AH
, cmd_flags
);
547 req
.ah_cid
= cpu_to_le32(ah
->id
);
549 bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
, (void *)&resp
, NULL
,
554 int bnxt_qplib_free_mrw(struct bnxt_qplib_res
*res
, struct bnxt_qplib_mrw
*mrw
)
556 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
557 struct cmdq_deallocate_key req
;
558 struct creq_deallocate_key_resp resp
;
562 if (mrw
->lkey
== 0xFFFFFFFF) {
563 dev_info(&res
->pdev
->dev
, "SP: Free a reserved lkey MRW\n");
567 RCFW_CMD_PREP(req
, DEALLOCATE_KEY
, cmd_flags
);
569 req
.mrw_flags
= mrw
->type
;
571 if ((mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1
) ||
572 (mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A
) ||
573 (mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B
))
574 req
.key
= cpu_to_le32(mrw
->rkey
);
576 req
.key
= cpu_to_le32(mrw
->lkey
);
578 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
, (void *)&resp
,
583 /* Free the qplib's MRW memory */
584 if (mrw
->hwq
.max_elements
)
585 bnxt_qplib_free_hwq(res
, &mrw
->hwq
);
590 int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res
*res
, struct bnxt_qplib_mrw
*mrw
)
592 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
593 struct cmdq_allocate_mrw req
;
594 struct creq_allocate_mrw_resp resp
;
599 RCFW_CMD_PREP(req
, ALLOCATE_MRW
, cmd_flags
);
601 req
.pd_id
= cpu_to_le32(mrw
->pd
->id
);
602 req
.mrw_flags
= mrw
->type
;
603 if ((mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR
&&
604 mrw
->flags
& BNXT_QPLIB_FR_PMR
) ||
605 mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A
||
606 mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B
)
607 req
.access
= CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY
;
608 tmp
= (unsigned long)mrw
;
609 req
.mrw_handle
= cpu_to_le64(tmp
);
611 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
612 (void *)&resp
, NULL
, 0);
616 if ((mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1
) ||
617 (mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A
) ||
618 (mrw
->type
== CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B
))
619 mrw
->rkey
= le32_to_cpu(resp
.xid
);
621 mrw
->lkey
= le32_to_cpu(resp
.xid
);
625 int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res
*res
, struct bnxt_qplib_mrw
*mrw
,
628 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
629 struct cmdq_deregister_mr req
;
630 struct creq_deregister_mr_resp resp
;
634 RCFW_CMD_PREP(req
, DEREGISTER_MR
, cmd_flags
);
636 req
.lkey
= cpu_to_le32(mrw
->lkey
);
637 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
638 (void *)&resp
, NULL
, block
);
642 /* Free the qplib's MR memory */
643 if (mrw
->hwq
.max_elements
) {
646 bnxt_qplib_free_hwq(res
, &mrw
->hwq
);
652 int bnxt_qplib_reg_mr(struct bnxt_qplib_res
*res
, struct bnxt_qplib_mrw
*mr
,
653 u64
*pbl_tbl
, int num_pbls
, bool block
, u32 buf_pg_size
)
655 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
656 struct bnxt_qplib_hwq_attr hwq_attr
= {};
657 struct bnxt_qplib_sg_info sginfo
= {};
658 struct creq_register_mr_resp resp
;
659 struct cmdq_register_mr req
;
660 int pg_ptrs
, pages
, i
, rc
;
661 u16 cmd_flags
= 0, level
;
662 dma_addr_t
**pbl_ptr
;
666 /* Allocate memory for the non-leaf pages to store buf ptrs.
667 * Non-leaf pages always uses system PAGE_SIZE
669 pg_ptrs
= roundup_pow_of_two(num_pbls
);
670 pages
= pg_ptrs
>> MAX_PBL_LVL_1_PGS_SHIFT
;
674 if (pages
> MAX_PBL_LVL_1_PGS
) {
675 dev_err(&res
->pdev
->dev
,
676 "SP: Reg MR: pages requested (0x%x) exceeded max (0x%x)\n",
677 pages
, MAX_PBL_LVL_1_PGS
);
680 /* Free the hwq if it already exist, must be a rereg */
681 if (mr
->hwq
.max_elements
)
682 bnxt_qplib_free_hwq(res
, &mr
->hwq
);
683 /* Use system PAGE_SIZE */
685 hwq_attr
.depth
= pages
;
686 hwq_attr
.stride
= PAGE_SIZE
;
687 hwq_attr
.type
= HWQ_TYPE_MR
;
688 hwq_attr
.sginfo
= &sginfo
;
689 hwq_attr
.sginfo
->npages
= pages
;
690 hwq_attr
.sginfo
->pgsize
= PAGE_SIZE
;
691 hwq_attr
.sginfo
->pgshft
= PAGE_SHIFT
;
692 rc
= bnxt_qplib_alloc_init_hwq(&mr
->hwq
, &hwq_attr
);
694 dev_err(&res
->pdev
->dev
,
695 "SP: Reg MR memory allocation failed\n");
698 /* Write to the hwq */
699 pbl_ptr
= (dma_addr_t
**)mr
->hwq
.pbl_ptr
;
700 for (i
= 0; i
< num_pbls
; i
++)
701 pbl_ptr
[PTR_PG(i
)][PTR_IDX(i
)] =
702 (pbl_tbl
[i
] & PAGE_MASK
) | PTU_PTE_VALID
;
705 RCFW_CMD_PREP(req
, REGISTER_MR
, cmd_flags
);
707 /* Configure the request */
708 if (mr
->hwq
.level
== PBL_LVL_MAX
) {
709 /* No PBL provided, just use system PAGE_SIZE */
714 level
= mr
->hwq
.level
+ 1;
715 req
.pbl
= cpu_to_le64(mr
->hwq
.pbl
[PBL_LVL_0
].pg_map_arr
[0]);
717 pg_size
= buf_pg_size
? buf_pg_size
: PAGE_SIZE
;
718 req
.log2_pg_size_lvl
= (level
<< CMDQ_REGISTER_MR_LVL_SFT
) |
720 CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT
) &
721 CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK
);
722 req
.log2_pbl_pg_size
= cpu_to_le16(((ilog2(PAGE_SIZE
) <<
723 CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT
) &
724 CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK
));
725 req
.access
= (mr
->flags
& 0xFFFF);
726 req
.va
= cpu_to_le64(mr
->va
);
727 req
.key
= cpu_to_le32(mr
->lkey
);
728 req
.mr_size
= cpu_to_le64(mr
->total_size
);
730 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
,
731 (void *)&resp
, NULL
, block
);
738 if (mr
->hwq
.max_elements
)
739 bnxt_qplib_free_hwq(res
, &mr
->hwq
);
743 int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res
*res
,
744 struct bnxt_qplib_frpl
*frpl
,
747 struct bnxt_qplib_hwq_attr hwq_attr
= {};
748 struct bnxt_qplib_sg_info sginfo
= {};
749 int pg_ptrs
, pages
, rc
;
751 /* Re-calculate the max to fit the HWQ allocation model */
752 pg_ptrs
= roundup_pow_of_two(max_pg_ptrs
);
753 pages
= pg_ptrs
>> MAX_PBL_LVL_1_PGS_SHIFT
;
757 if (pages
> MAX_PBL_LVL_1_PGS
)
760 sginfo
.pgsize
= PAGE_SIZE
;
764 hwq_attr
.depth
= pg_ptrs
;
765 hwq_attr
.stride
= PAGE_SIZE
;
766 hwq_attr
.sginfo
= &sginfo
;
767 hwq_attr
.type
= HWQ_TYPE_CTX
;
768 rc
= bnxt_qplib_alloc_init_hwq(&frpl
->hwq
, &hwq_attr
);
770 frpl
->max_pg_ptrs
= pg_ptrs
;
775 int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res
*res
,
776 struct bnxt_qplib_frpl
*frpl
)
778 bnxt_qplib_free_hwq(res
, &frpl
->hwq
);
782 int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res
*res
, u16
*cids
)
784 struct bnxt_qplib_rcfw
*rcfw
= res
->rcfw
;
785 struct cmdq_map_tc_to_cos req
;
786 struct creq_map_tc_to_cos_resp resp
;
789 RCFW_CMD_PREP(req
, MAP_TC_TO_COS
, cmd_flags
);
790 req
.cos0
= cpu_to_le16(cids
[0]);
791 req
.cos1
= cpu_to_le16(cids
[1]);
793 return bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
, (void *)&resp
,
797 int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw
*rcfw
,
798 struct bnxt_qplib_roce_stats
*stats
)
800 struct cmdq_query_roce_stats req
;
801 struct creq_query_roce_stats_resp resp
;
802 struct bnxt_qplib_rcfw_sbuf
*sbuf
;
803 struct creq_query_roce_stats_resp_sb
*sb
;
807 RCFW_CMD_PREP(req
, QUERY_ROCE_STATS
, cmd_flags
);
809 sbuf
= bnxt_qplib_rcfw_alloc_sbuf(rcfw
, sizeof(*sb
));
811 dev_err(&rcfw
->pdev
->dev
,
812 "SP: QUERY_ROCE_STATS alloc side buffer failed\n");
817 req
.resp_size
= sizeof(*sb
) / BNXT_QPLIB_CMDQE_UNITS
;
818 rc
= bnxt_qplib_rcfw_send_message(rcfw
, (void *)&req
, (void *)&resp
,
822 /* Extract the context from the side buffer */
823 stats
->to_retransmits
= le64_to_cpu(sb
->to_retransmits
);
824 stats
->seq_err_naks_rcvd
= le64_to_cpu(sb
->seq_err_naks_rcvd
);
825 stats
->max_retry_exceeded
= le64_to_cpu(sb
->max_retry_exceeded
);
826 stats
->rnr_naks_rcvd
= le64_to_cpu(sb
->rnr_naks_rcvd
);
827 stats
->missing_resp
= le64_to_cpu(sb
->missing_resp
);
828 stats
->unrecoverable_err
= le64_to_cpu(sb
->unrecoverable_err
);
829 stats
->bad_resp_err
= le64_to_cpu(sb
->bad_resp_err
);
830 stats
->local_qp_op_err
= le64_to_cpu(sb
->local_qp_op_err
);
831 stats
->local_protection_err
= le64_to_cpu(sb
->local_protection_err
);
832 stats
->mem_mgmt_op_err
= le64_to_cpu(sb
->mem_mgmt_op_err
);
833 stats
->remote_invalid_req_err
= le64_to_cpu(sb
->remote_invalid_req_err
);
834 stats
->remote_access_err
= le64_to_cpu(sb
->remote_access_err
);
835 stats
->remote_op_err
= le64_to_cpu(sb
->remote_op_err
);
836 stats
->dup_req
= le64_to_cpu(sb
->dup_req
);
837 stats
->res_exceed_max
= le64_to_cpu(sb
->res_exceed_max
);
838 stats
->res_length_mismatch
= le64_to_cpu(sb
->res_length_mismatch
);
839 stats
->res_exceeds_wqe
= le64_to_cpu(sb
->res_exceeds_wqe
);
840 stats
->res_opcode_err
= le64_to_cpu(sb
->res_opcode_err
);
841 stats
->res_rx_invalid_rkey
= le64_to_cpu(sb
->res_rx_invalid_rkey
);
842 stats
->res_rx_domain_err
= le64_to_cpu(sb
->res_rx_domain_err
);
843 stats
->res_rx_no_perm
= le64_to_cpu(sb
->res_rx_no_perm
);
844 stats
->res_rx_range_err
= le64_to_cpu(sb
->res_rx_range_err
);
845 stats
->res_tx_invalid_rkey
= le64_to_cpu(sb
->res_tx_invalid_rkey
);
846 stats
->res_tx_domain_err
= le64_to_cpu(sb
->res_tx_domain_err
);
847 stats
->res_tx_no_perm
= le64_to_cpu(sb
->res_tx_no_perm
);
848 stats
->res_tx_range_err
= le64_to_cpu(sb
->res_tx_range_err
);
849 stats
->res_irrq_oflow
= le64_to_cpu(sb
->res_irrq_oflow
);
850 stats
->res_unsup_opcode
= le64_to_cpu(sb
->res_unsup_opcode
);
851 stats
->res_unaligned_atomic
= le64_to_cpu(sb
->res_unaligned_atomic
);
852 stats
->res_rem_inv_err
= le64_to_cpu(sb
->res_rem_inv_err
);
853 stats
->res_mem_error
= le64_to_cpu(sb
->res_mem_error
);
854 stats
->res_srq_err
= le64_to_cpu(sb
->res_srq_err
);
855 stats
->res_cmp_err
= le64_to_cpu(sb
->res_cmp_err
);
856 stats
->res_invalid_dup_rkey
= le64_to_cpu(sb
->res_invalid_dup_rkey
);
857 stats
->res_wqe_format_err
= le64_to_cpu(sb
->res_wqe_format_err
);
858 stats
->res_cq_load_err
= le64_to_cpu(sb
->res_cq_load_err
);
859 stats
->res_srq_load_err
= le64_to_cpu(sb
->res_srq_load_err
);
860 stats
->res_tx_pci_err
= le64_to_cpu(sb
->res_tx_pci_err
);
861 stats
->res_rx_pci_err
= le64_to_cpu(sb
->res_rx_pci_err
);
862 if (!rcfw
->init_oos_stats
) {
863 rcfw
->oos_prev
= le64_to_cpu(sb
->res_oos_drop_count
);
864 rcfw
->init_oos_stats
= 1;
866 stats
->res_oos_drop_count
+=
867 (le64_to_cpu(sb
->res_oos_drop_count
) -
868 rcfw
->oos_prev
) & BNXT_QPLIB_OOS_COUNT_MASK
;
869 rcfw
->oos_prev
= le64_to_cpu(sb
->res_oos_drop_count
);
873 bnxt_qplib_rcfw_free_sbuf(rcfw
, sbuf
);