2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: QPLib resource manager
39 #include <linux/spinlock.h>
40 #include <linux/pci.h>
41 #include <linux/interrupt.h>
42 #include <linux/inetdevice.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/if_vlan.h>
46 #include "qplib_res.h"
48 #include "qplib_rcfw.h"
50 static void bnxt_qplib_free_stats_ctx(struct pci_dev
*pdev
,
51 struct bnxt_qplib_stats
*stats
);
52 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev
*pdev
,
53 struct bnxt_qplib_stats
*stats
);
56 static void __free_pbl(struct pci_dev
*pdev
, struct bnxt_qplib_pbl
*pbl
,
62 for (i
= 0; i
< pbl
->pg_count
; i
++) {
64 dma_free_coherent(&pdev
->dev
, pbl
->pg_size
,
65 (void *)((unsigned long)
71 "QPLIB: PBL free pg_arr[%d] empty?!",
73 pbl
->pg_arr
[i
] = NULL
;
78 kfree(pbl
->pg_map_arr
);
79 pbl
->pg_map_arr
= NULL
;
84 static int __alloc_pbl(struct pci_dev
*pdev
, struct bnxt_qplib_pbl
*pbl
,
85 struct scatterlist
*sghead
, u32 pages
, u32 pg_size
)
87 struct scatterlist
*sg
;
92 pbl
->pg_arr
= kcalloc(pages
, sizeof(void *), GFP_KERNEL
);
96 pbl
->pg_map_arr
= kcalloc(pages
, sizeof(dma_addr_t
), GFP_KERNEL
);
97 if (!pbl
->pg_map_arr
) {
103 pbl
->pg_size
= pg_size
;
106 for (i
= 0; i
< pages
; i
++) {
107 pbl
->pg_arr
[i
] = dma_zalloc_coherent(&pdev
->dev
,
118 for_each_sg(sghead
, sg
, pages
, i
) {
119 pbl
->pg_map_arr
[i
] = sg_dma_address(sg
);
120 pbl
->pg_arr
[i
] = sg_virt(sg
);
131 __free_pbl(pdev
, pbl
, is_umem
);
136 void bnxt_qplib_free_hwq(struct pci_dev
*pdev
, struct bnxt_qplib_hwq
*hwq
)
140 if (!hwq
->max_elements
)
142 if (hwq
->level
>= PBL_LVL_MAX
)
145 for (i
= 0; i
< hwq
->level
+ 1; i
++) {
147 __free_pbl(pdev
, &hwq
->pbl
[i
], hwq
->is_user
);
149 __free_pbl(pdev
, &hwq
->pbl
[i
], false);
152 hwq
->level
= PBL_LVL_MAX
;
153 hwq
->max_elements
= 0;
154 hwq
->element_size
= 0;
160 /* All HWQs are power of 2 in size */
161 int bnxt_qplib_alloc_init_hwq(struct pci_dev
*pdev
, struct bnxt_qplib_hwq
*hwq
,
162 struct scatterlist
*sghead
, int nmap
,
163 u32
*elements
, u32 element_size
, u32 aux
,
164 u32 pg_size
, enum bnxt_qplib_hwq_type hwq_type
)
166 u32 pages
, slots
, size
, aux_pages
= 0, aux_size
= 0;
167 dma_addr_t
*src_phys_ptr
, **dst_virt_ptr
;
170 hwq
->level
= PBL_LVL_MAX
;
172 slots
= roundup_pow_of_two(*elements
);
174 aux_size
= roundup_pow_of_two(aux
);
175 aux_pages
= (slots
* aux_size
) / pg_size
;
176 if ((slots
* aux_size
) % pg_size
)
179 size
= roundup_pow_of_two(element_size
);
182 hwq
->is_user
= false;
183 pages
= (slots
* size
) / pg_size
+ aux_pages
;
184 if ((slots
* size
) % pg_size
)
193 /* Alloc the 1st memory block; can be a PDL/PTL/PBL */
194 if (sghead
&& (pages
== MAX_PBL_LVL_0_PGS
))
195 rc
= __alloc_pbl(pdev
, &hwq
->pbl
[PBL_LVL_0
], sghead
,
198 rc
= __alloc_pbl(pdev
, &hwq
->pbl
[PBL_LVL_0
], NULL
, 1, pg_size
);
202 hwq
->level
= PBL_LVL_0
;
204 if (pages
> MAX_PBL_LVL_0_PGS
) {
205 if (pages
> MAX_PBL_LVL_1_PGS
) {
206 /* 2 levels of indirection */
207 rc
= __alloc_pbl(pdev
, &hwq
->pbl
[PBL_LVL_1
], NULL
,
208 MAX_PBL_LVL_1_PGS_FOR_LVL_2
, pg_size
);
211 /* Fill in lvl0 PBL */
213 (dma_addr_t
**)hwq
->pbl
[PBL_LVL_0
].pg_arr
;
214 src_phys_ptr
= hwq
->pbl
[PBL_LVL_1
].pg_map_arr
;
215 for (i
= 0; i
< hwq
->pbl
[PBL_LVL_1
].pg_count
; i
++)
216 dst_virt_ptr
[PTR_PG(i
)][PTR_IDX(i
)] =
217 src_phys_ptr
[i
] | PTU_PDE_VALID
;
218 hwq
->level
= PBL_LVL_1
;
220 rc
= __alloc_pbl(pdev
, &hwq
->pbl
[PBL_LVL_2
], sghead
,
225 /* Fill in lvl1 PBL */
227 (dma_addr_t
**)hwq
->pbl
[PBL_LVL_1
].pg_arr
;
228 src_phys_ptr
= hwq
->pbl
[PBL_LVL_2
].pg_map_arr
;
229 for (i
= 0; i
< hwq
->pbl
[PBL_LVL_2
].pg_count
; i
++) {
230 dst_virt_ptr
[PTR_PG(i
)][PTR_IDX(i
)] =
231 src_phys_ptr
[i
] | PTU_PTE_VALID
;
233 if (hwq_type
== HWQ_TYPE_QUEUE
) {
234 /* Find the last pg of the size */
235 i
= hwq
->pbl
[PBL_LVL_2
].pg_count
;
236 dst_virt_ptr
[PTR_PG(i
- 1)][PTR_IDX(i
- 1)] |=
239 dst_virt_ptr
[PTR_PG(i
- 2)]
241 PTU_PTE_NEXT_TO_LAST
;
243 hwq
->level
= PBL_LVL_2
;
245 u32 flag
= hwq_type
== HWQ_TYPE_L2_CMPL
? 0 :
248 /* 1 level of indirection */
249 rc
= __alloc_pbl(pdev
, &hwq
->pbl
[PBL_LVL_1
], sghead
,
253 /* Fill in lvl0 PBL */
255 (dma_addr_t
**)hwq
->pbl
[PBL_LVL_0
].pg_arr
;
256 src_phys_ptr
= hwq
->pbl
[PBL_LVL_1
].pg_map_arr
;
257 for (i
= 0; i
< hwq
->pbl
[PBL_LVL_1
].pg_count
; i
++) {
258 dst_virt_ptr
[PTR_PG(i
)][PTR_IDX(i
)] =
259 src_phys_ptr
[i
] | flag
;
261 if (hwq_type
== HWQ_TYPE_QUEUE
) {
262 /* Find the last pg of the size */
263 i
= hwq
->pbl
[PBL_LVL_1
].pg_count
;
264 dst_virt_ptr
[PTR_PG(i
- 1)][PTR_IDX(i
- 1)] |=
267 dst_virt_ptr
[PTR_PG(i
- 2)]
269 PTU_PTE_NEXT_TO_LAST
;
271 hwq
->level
= PBL_LVL_1
;
275 spin_lock_init(&hwq
->lock
);
278 *elements
= hwq
->max_elements
= slots
;
279 hwq
->element_size
= size
;
281 /* For direct access to the elements */
282 hwq
->pbl_ptr
= hwq
->pbl
[hwq
->level
].pg_arr
;
283 hwq
->pbl_dma_ptr
= hwq
->pbl
[hwq
->level
].pg_map_arr
;
288 bnxt_qplib_free_hwq(pdev
, hwq
);
293 void bnxt_qplib_free_ctx(struct pci_dev
*pdev
,
294 struct bnxt_qplib_ctx
*ctx
)
298 bnxt_qplib_free_hwq(pdev
, &ctx
->qpc_tbl
);
299 bnxt_qplib_free_hwq(pdev
, &ctx
->mrw_tbl
);
300 bnxt_qplib_free_hwq(pdev
, &ctx
->srqc_tbl
);
301 bnxt_qplib_free_hwq(pdev
, &ctx
->cq_tbl
);
302 bnxt_qplib_free_hwq(pdev
, &ctx
->tim_tbl
);
303 for (i
= 0; i
< MAX_TQM_ALLOC_REQ
; i
++)
304 bnxt_qplib_free_hwq(pdev
, &ctx
->tqm_tbl
[i
]);
305 bnxt_qplib_free_hwq(pdev
, &ctx
->tqm_pde
);
306 bnxt_qplib_free_stats_ctx(pdev
, &ctx
->stats
);
310 * Routine: bnxt_qplib_alloc_ctx
312 * Context tables are memories which are used by the chip fw.
313 * The 6 tables defined are:
314 * QPC ctx - holds QP states
315 * MRW ctx - holds memory region and window
316 * SRQ ctx - holds shared RQ states
317 * CQ ctx - holds completion queue states
318 * TQM ctx - holds Tx Queue Manager context
319 * TIM ctx - holds timer context
320 * Depending on the size of the tbl requested, either a 1 Page Buffer List
321 * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
323 * Table might be employed as follows:
324 * For 0 < ctx size <= 1 PAGE, 0 level of ind is used
325 * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
326 * For 512 < ctx size <= MAX, 2 levels of ind is used
328 * 0 if success, else -ERRORS
330 int bnxt_qplib_alloc_ctx(struct pci_dev
*pdev
,
331 struct bnxt_qplib_ctx
*ctx
,
342 ctx
->qpc_tbl
.max_elements
= ctx
->qpc_count
;
343 rc
= bnxt_qplib_alloc_init_hwq(pdev
, &ctx
->qpc_tbl
, NULL
, 0,
344 &ctx
->qpc_tbl
.max_elements
,
345 BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE
, 0,
346 PAGE_SIZE
, HWQ_TYPE_CTX
);
351 ctx
->mrw_tbl
.max_elements
= ctx
->mrw_count
;
352 rc
= bnxt_qplib_alloc_init_hwq(pdev
, &ctx
->mrw_tbl
, NULL
, 0,
353 &ctx
->mrw_tbl
.max_elements
,
354 BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE
, 0,
355 PAGE_SIZE
, HWQ_TYPE_CTX
);
360 ctx
->srqc_tbl
.max_elements
= ctx
->srqc_count
;
361 rc
= bnxt_qplib_alloc_init_hwq(pdev
, &ctx
->srqc_tbl
, NULL
, 0,
362 &ctx
->srqc_tbl
.max_elements
,
363 BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE
, 0,
364 PAGE_SIZE
, HWQ_TYPE_CTX
);
369 ctx
->cq_tbl
.max_elements
= ctx
->cq_count
;
370 rc
= bnxt_qplib_alloc_init_hwq(pdev
, &ctx
->cq_tbl
, NULL
, 0,
371 &ctx
->cq_tbl
.max_elements
,
372 BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE
, 0,
373 PAGE_SIZE
, HWQ_TYPE_CTX
);
378 ctx
->tqm_pde
.max_elements
= 512;
379 rc
= bnxt_qplib_alloc_init_hwq(pdev
, &ctx
->tqm_pde
, NULL
, 0,
380 &ctx
->tqm_pde
.max_elements
, sizeof(u64
),
381 0, PAGE_SIZE
, HWQ_TYPE_CTX
);
385 for (i
= 0; i
< MAX_TQM_ALLOC_REQ
; i
++) {
386 if (!ctx
->tqm_count
[i
])
388 ctx
->tqm_tbl
[i
].max_elements
= ctx
->qpc_count
*
390 rc
= bnxt_qplib_alloc_init_hwq(pdev
, &ctx
->tqm_tbl
[i
], NULL
, 0,
391 &ctx
->tqm_tbl
[i
].max_elements
, 1,
392 0, PAGE_SIZE
, HWQ_TYPE_CTX
);
396 pbl_ptr
= (__le64
**)ctx
->tqm_pde
.pbl_ptr
;
397 for (i
= 0, j
= 0; i
< MAX_TQM_ALLOC_REQ
;
398 i
++, j
+= MAX_TQM_ALLOC_BLK_SIZE
) {
399 if (!ctx
->tqm_tbl
[i
].max_elements
)
403 switch (ctx
->tqm_tbl
[i
].level
) {
405 for (k
= 0; k
< ctx
->tqm_tbl
[i
].pbl
[PBL_LVL_1
].pg_count
;
407 pbl_ptr
[PTR_PG(j
+ k
)][PTR_IDX(j
+ k
)] =
409 ctx
->tqm_tbl
[i
].pbl
[PBL_LVL_1
].pg_map_arr
[k
]
415 pbl_ptr
[PTR_PG(j
)][PTR_IDX(j
)] = cpu_to_le64(
416 ctx
->tqm_tbl
[i
].pbl
[PBL_LVL_0
].pg_map_arr
[0] |
423 ctx
->tqm_pde_level
= ctx
->tqm_tbl
[fnz_idx
].level
== PBL_LVL_2
?
424 PBL_LVL_2
: ctx
->tqm_tbl
[fnz_idx
].level
+ 1;
427 ctx
->tim_tbl
.max_elements
= ctx
->qpc_count
* 16;
428 rc
= bnxt_qplib_alloc_init_hwq(pdev
, &ctx
->tim_tbl
, NULL
, 0,
429 &ctx
->tim_tbl
.max_elements
, 1,
430 0, PAGE_SIZE
, HWQ_TYPE_CTX
);
436 rc
= bnxt_qplib_alloc_stats_ctx(pdev
, &ctx
->stats
);
443 bnxt_qplib_free_ctx(pdev
, ctx
);
448 void bnxt_qplib_get_guid(u8
*dev_addr
, u8
*guid
)
452 /* MAC-48 to EUI-64 mapping */
453 memcpy(mac
, dev_addr
, ETH_ALEN
);
454 guid
[0] = mac
[0] ^ 2;
464 static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res
*res
,
465 struct bnxt_qplib_sgid_tbl
*sgid_tbl
)
467 kfree(sgid_tbl
->tbl
);
468 kfree(sgid_tbl
->hw_id
);
469 kfree(sgid_tbl
->ctx
);
470 kfree(sgid_tbl
->vlan
);
471 sgid_tbl
->tbl
= NULL
;
472 sgid_tbl
->hw_id
= NULL
;
473 sgid_tbl
->ctx
= NULL
;
474 sgid_tbl
->vlan
= NULL
;
476 sgid_tbl
->active
= 0;
479 static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res
*res
,
480 struct bnxt_qplib_sgid_tbl
*sgid_tbl
,
483 sgid_tbl
->tbl
= kcalloc(max
, sizeof(struct bnxt_qplib_gid
), GFP_KERNEL
);
487 sgid_tbl
->hw_id
= kcalloc(max
, sizeof(u16
), GFP_KERNEL
);
488 if (!sgid_tbl
->hw_id
)
491 sgid_tbl
->ctx
= kcalloc(max
, sizeof(void *), GFP_KERNEL
);
495 sgid_tbl
->vlan
= kcalloc(max
, sizeof(u8
), GFP_KERNEL
);
502 kfree(sgid_tbl
->ctx
);
503 sgid_tbl
->ctx
= NULL
;
505 kfree(sgid_tbl
->hw_id
);
506 sgid_tbl
->hw_id
= NULL
;
508 kfree(sgid_tbl
->tbl
);
509 sgid_tbl
->tbl
= NULL
;
513 static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res
*res
,
514 struct bnxt_qplib_sgid_tbl
*sgid_tbl
)
518 for (i
= 0; i
< sgid_tbl
->max
; i
++) {
519 if (memcmp(&sgid_tbl
->tbl
[i
], &bnxt_qplib_gid_zero
,
520 sizeof(bnxt_qplib_gid_zero
)))
521 bnxt_qplib_del_sgid(sgid_tbl
, &sgid_tbl
->tbl
[i
], true);
523 memset(sgid_tbl
->tbl
, 0, sizeof(struct bnxt_qplib_gid
) * sgid_tbl
->max
);
524 memset(sgid_tbl
->hw_id
, -1, sizeof(u16
) * sgid_tbl
->max
);
525 memset(sgid_tbl
->vlan
, 0, sizeof(u8
) * sgid_tbl
->max
);
526 sgid_tbl
->active
= 0;
529 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl
*sgid_tbl
,
530 struct net_device
*netdev
)
532 memset(sgid_tbl
->tbl
, 0, sizeof(struct bnxt_qplib_gid
) * sgid_tbl
->max
);
533 memset(sgid_tbl
->hw_id
, -1, sizeof(u16
) * sgid_tbl
->max
);
536 static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res
*res
,
537 struct bnxt_qplib_pkey_tbl
*pkey_tbl
)
540 dev_dbg(&res
->pdev
->dev
, "QPLIB: PKEY tbl not present");
542 kfree(pkey_tbl
->tbl
);
544 pkey_tbl
->tbl
= NULL
;
546 pkey_tbl
->active
= 0;
549 static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res
*res
,
550 struct bnxt_qplib_pkey_tbl
*pkey_tbl
,
553 pkey_tbl
->tbl
= kcalloc(max
, sizeof(u16
), GFP_KERNEL
);
562 int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl
*pdt
, struct bnxt_qplib_pd
*pd
)
566 bit_num
= find_first_bit(pdt
->tbl
, pdt
->max
);
567 if (bit_num
== pdt
->max
)
570 /* Found unused PD */
571 clear_bit(bit_num
, pdt
->tbl
);
576 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res
*res
,
577 struct bnxt_qplib_pd_tbl
*pdt
,
578 struct bnxt_qplib_pd
*pd
)
580 if (test_and_set_bit(pd
->id
, pdt
->tbl
)) {
581 dev_warn(&res
->pdev
->dev
, "Freeing an unused PD? pdn = %d",
589 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl
*pdt
)
596 static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res
*res
,
597 struct bnxt_qplib_pd_tbl
*pdt
,
605 pdt
->tbl
= kmalloc(bytes
, GFP_KERNEL
);
610 memset((u8
*)pdt
->tbl
, 0xFF, bytes
);
616 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl
*dpit
,
617 struct bnxt_qplib_dpi
*dpi
,
622 bit_num
= find_first_bit(dpit
->tbl
, dpit
->max
);
623 if (bit_num
== dpit
->max
)
626 /* Found unused DPI */
627 clear_bit(bit_num
, dpit
->tbl
);
628 dpit
->app_tbl
[bit_num
] = app
;
631 dpi
->dbr
= dpit
->dbr_bar_reg_iomem
+ (bit_num
* PAGE_SIZE
);
632 dpi
->umdbr
= dpit
->unmapped_dbr
+ (bit_num
* PAGE_SIZE
);
637 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res
*res
,
638 struct bnxt_qplib_dpi_tbl
*dpit
,
639 struct bnxt_qplib_dpi
*dpi
)
641 if (dpi
->dpi
>= dpit
->max
) {
642 dev_warn(&res
->pdev
->dev
, "Invalid DPI? dpi = %d", dpi
->dpi
);
645 if (test_and_set_bit(dpi
->dpi
, dpit
->tbl
)) {
646 dev_warn(&res
->pdev
->dev
, "Freeing an unused DPI? dpi = %d",
651 dpit
->app_tbl
[dpi
->dpi
] = NULL
;
652 memset(dpi
, 0, sizeof(*dpi
));
657 static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res
*res
,
658 struct bnxt_qplib_dpi_tbl
*dpit
)
661 kfree(dpit
->app_tbl
);
662 if (dpit
->dbr_bar_reg_iomem
)
663 pci_iounmap(res
->pdev
, dpit
->dbr_bar_reg_iomem
);
664 memset(dpit
, 0, sizeof(*dpit
));
667 static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res
*res
,
668 struct bnxt_qplib_dpi_tbl
*dpit
,
671 u32 dbr_bar_reg
= RCFW_DBR_PCI_BAR_REGION
;
672 resource_size_t bar_reg_base
;
675 if (dpit
->dbr_bar_reg_iomem
) {
676 dev_err(&res
->pdev
->dev
,
677 "QPLIB: DBR BAR region %d already mapped", dbr_bar_reg
);
681 bar_reg_base
= pci_resource_start(res
->pdev
, dbr_bar_reg
);
683 dev_err(&res
->pdev
->dev
,
684 "QPLIB: BAR region %d resc start failed", dbr_bar_reg
);
688 dbr_len
= pci_resource_len(res
->pdev
, dbr_bar_reg
) - dbr_offset
;
689 if (!dbr_len
|| ((dbr_len
& (PAGE_SIZE
- 1)) != 0)) {
690 dev_err(&res
->pdev
->dev
, "QPLIB: Invalid DBR length %d",
695 dpit
->dbr_bar_reg_iomem
= ioremap_nocache(bar_reg_base
+ dbr_offset
,
697 if (!dpit
->dbr_bar_reg_iomem
) {
698 dev_err(&res
->pdev
->dev
,
699 "QPLIB: FP: DBR BAR region %d mapping failed",
704 dpit
->unmapped_dbr
= bar_reg_base
+ dbr_offset
;
705 dpit
->max
= dbr_len
/ PAGE_SIZE
;
707 dpit
->app_tbl
= kcalloc(dpit
->max
, sizeof(void *), GFP_KERNEL
);
711 bytes
= dpit
->max
>> 3;
715 dpit
->tbl
= kmalloc(bytes
, GFP_KERNEL
);
717 kfree(dpit
->app_tbl
);
718 dpit
->app_tbl
= NULL
;
722 memset((u8
*)dpit
->tbl
, 0xFF, bytes
);
727 pci_iounmap(res
->pdev
, dpit
->dbr_bar_reg_iomem
);
732 static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl
*pkey_tbl
)
734 memset(pkey_tbl
->tbl
, 0, sizeof(u16
) * pkey_tbl
->max
);
735 pkey_tbl
->active
= 0;
738 static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res
*res
,
739 struct bnxt_qplib_pkey_tbl
*pkey_tbl
)
743 memset(pkey_tbl
->tbl
, 0, sizeof(u16
) * pkey_tbl
->max
);
745 /* pkey default = 0xFFFF */
746 bnxt_qplib_add_pkey(res
, pkey_tbl
, &pkey
, false);
750 static void bnxt_qplib_free_stats_ctx(struct pci_dev
*pdev
,
751 struct bnxt_qplib_stats
*stats
)
754 dma_free_coherent(&pdev
->dev
, stats
->size
,
755 stats
->dma
, stats
->dma_map
);
757 memset(stats
, 0, sizeof(*stats
));
761 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev
*pdev
,
762 struct bnxt_qplib_stats
*stats
)
764 memset(stats
, 0, sizeof(*stats
));
766 stats
->size
= sizeof(struct ctx_hw_stats
);
767 stats
->dma
= dma_alloc_coherent(&pdev
->dev
, stats
->size
,
768 &stats
->dma_map
, GFP_KERNEL
);
770 dev_err(&pdev
->dev
, "QPLIB: Stats DMA allocation failed");
776 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res
*res
)
778 bnxt_qplib_cleanup_pkey_tbl(&res
->pkey_tbl
);
779 bnxt_qplib_cleanup_sgid_tbl(res
, &res
->sgid_tbl
);
782 int bnxt_qplib_init_res(struct bnxt_qplib_res
*res
)
784 bnxt_qplib_init_sgid_tbl(&res
->sgid_tbl
, res
->netdev
);
785 bnxt_qplib_init_pkey_tbl(res
, &res
->pkey_tbl
);
790 void bnxt_qplib_free_res(struct bnxt_qplib_res
*res
)
792 bnxt_qplib_free_pkey_tbl(res
, &res
->pkey_tbl
);
793 bnxt_qplib_free_sgid_tbl(res
, &res
->sgid_tbl
);
794 bnxt_qplib_free_pd_tbl(&res
->pd_tbl
);
795 bnxt_qplib_free_dpi_tbl(res
, &res
->dpi_tbl
);
801 int bnxt_qplib_alloc_res(struct bnxt_qplib_res
*res
, struct pci_dev
*pdev
,
802 struct net_device
*netdev
,
803 struct bnxt_qplib_dev_attr
*dev_attr
)
808 res
->netdev
= netdev
;
810 rc
= bnxt_qplib_alloc_sgid_tbl(res
, &res
->sgid_tbl
, dev_attr
->max_sgid
);
814 rc
= bnxt_qplib_alloc_pkey_tbl(res
, &res
->pkey_tbl
, dev_attr
->max_pkey
);
818 rc
= bnxt_qplib_alloc_pd_tbl(res
, &res
->pd_tbl
, dev_attr
->max_pd
);
822 rc
= bnxt_qplib_alloc_dpi_tbl(res
, &res
->dpi_tbl
, dev_attr
->l2_db_size
);
828 bnxt_qplib_free_res(res
);