2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: QPLib resource manager
39 #define dev_fmt(fmt) "QPLIB: " fmt
41 #include <linux/spinlock.h>
42 #include <linux/pci.h>
43 #include <linux/interrupt.h>
44 #include <linux/inetdevice.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/if_vlan.h>
47 #include <linux/vmalloc.h>
49 #include "qplib_res.h"
51 #include "qplib_rcfw.h"
53 static void bnxt_qplib_free_stats_ctx(struct pci_dev
*pdev
,
54 struct bnxt_qplib_stats
*stats
);
55 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev
*pdev
,
56 struct bnxt_qplib_stats
*stats
);
59 static void __free_pbl(struct bnxt_qplib_res
*res
, struct bnxt_qplib_pbl
*pbl
,
62 struct pci_dev
*pdev
= res
->pdev
;
66 for (i
= 0; i
< pbl
->pg_count
; i
++) {
68 dma_free_coherent(&pdev
->dev
, pbl
->pg_size
,
69 (void *)((unsigned long)
75 "PBL free pg_arr[%d] empty?!\n", i
);
76 pbl
->pg_arr
[i
] = NULL
;
81 vfree(pbl
->pg_map_arr
);
82 pbl
->pg_map_arr
= NULL
;
87 static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl
*pbl
,
88 struct bnxt_qplib_sg_info
*sginfo
)
90 struct scatterlist
*sghead
= sginfo
->sghead
;
91 struct sg_dma_page_iter sg_iter
;
94 for_each_sg_dma_page(sghead
, &sg_iter
, sginfo
->nmap
, 0) {
95 pbl
->pg_map_arr
[i
] = sg_page_iter_dma_address(&sg_iter
);
96 pbl
->pg_arr
[i
] = NULL
;
102 static int __alloc_pbl(struct bnxt_qplib_res
*res
,
103 struct bnxt_qplib_pbl
*pbl
,
104 struct bnxt_qplib_sg_info
*sginfo
)
106 struct pci_dev
*pdev
= res
->pdev
;
107 struct scatterlist
*sghead
;
108 bool is_umem
= false;
114 pages
= sginfo
->npages
;
115 sghead
= sginfo
->sghead
;
116 /* page ptr arrays */
117 pbl
->pg_arr
= vmalloc(pages
* sizeof(void *));
121 pbl
->pg_map_arr
= vmalloc(pages
* sizeof(dma_addr_t
));
122 if (!pbl
->pg_map_arr
) {
128 pbl
->pg_size
= sginfo
->pgsize
;
131 for (i
= 0; i
< pages
; i
++) {
132 pbl
->pg_arr
[i
] = dma_alloc_coherent(&pdev
->dev
,
142 bnxt_qplib_fill_user_dma_pages(pbl
, sginfo
);
147 __free_pbl(res
, pbl
, is_umem
);
152 void bnxt_qplib_free_hwq(struct bnxt_qplib_res
*res
,
153 struct bnxt_qplib_hwq
*hwq
)
157 if (!hwq
->max_elements
)
159 if (hwq
->level
>= PBL_LVL_MAX
)
162 for (i
= 0; i
< hwq
->level
+ 1; i
++) {
164 __free_pbl(res
, &hwq
->pbl
[i
], hwq
->is_user
);
166 __free_pbl(res
, &hwq
->pbl
[i
], false);
169 hwq
->level
= PBL_LVL_MAX
;
170 hwq
->max_elements
= 0;
171 hwq
->element_size
= 0;
177 /* All HWQs are power of 2 in size */
179 int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq
*hwq
,
180 struct bnxt_qplib_hwq_attr
*hwq_attr
)
182 u32 npages
, aux_slots
, pg_size
, aux_pages
= 0, aux_size
= 0;
183 struct bnxt_qplib_sg_info sginfo
= {};
184 u32 depth
, stride
, npbl
, npde
;
185 dma_addr_t
*src_phys_ptr
, **dst_virt_ptr
;
186 struct scatterlist
*sghead
= NULL
;
187 struct bnxt_qplib_res
*res
;
188 struct pci_dev
*pdev
;
193 sghead
= hwq_attr
->sginfo
->sghead
;
194 pg_size
= hwq_attr
->sginfo
->pgsize
;
195 hwq
->level
= PBL_LVL_MAX
;
197 depth
= roundup_pow_of_two(hwq_attr
->depth
);
198 stride
= roundup_pow_of_two(hwq_attr
->stride
);
199 if (hwq_attr
->aux_depth
) {
200 aux_slots
= hwq_attr
->aux_depth
;
201 aux_size
= roundup_pow_of_two(hwq_attr
->aux_stride
);
202 aux_pages
= (aux_slots
* aux_size
) / pg_size
;
203 if ((aux_slots
* aux_size
) % pg_size
)
208 hwq
->is_user
= false;
209 npages
= (depth
* stride
) / pg_size
+ aux_pages
;
210 if ((depth
* stride
) % pg_size
)
214 hwq_attr
->sginfo
->npages
= npages
;
217 npages
= hwq_attr
->sginfo
->npages
;
218 npages
= (npages
* PAGE_SIZE
) /
219 BIT_ULL(hwq_attr
->sginfo
->pgshft
);
220 if ((hwq_attr
->sginfo
->npages
* PAGE_SIZE
) %
221 BIT_ULL(hwq_attr
->sginfo
->pgshft
))
226 if (npages
== MAX_PBL_LVL_0_PGS
) {
227 /* This request is Level 0, map PTE */
228 rc
= __alloc_pbl(res
, &hwq
->pbl
[PBL_LVL_0
], hwq_attr
->sginfo
);
231 hwq
->level
= PBL_LVL_0
;
234 if (npages
> MAX_PBL_LVL_0_PGS
) {
235 if (npages
> MAX_PBL_LVL_1_PGS
) {
236 u32 flag
= (hwq_attr
->type
== HWQ_TYPE_L2_CMPL
) ?
238 /* 2 levels of indirection */
239 npbl
= npages
>> MAX_PBL_LVL_1_PGS_SHIFT
;
240 if (npages
% BIT(MAX_PBL_LVL_1_PGS_SHIFT
))
242 npde
= npbl
>> MAX_PDL_LVL_SHIFT
;
243 if (npbl
% BIT(MAX_PDL_LVL_SHIFT
))
245 /* Alloc PDE pages */
246 sginfo
.pgsize
= npde
* pg_size
;
248 rc
= __alloc_pbl(res
, &hwq
->pbl
[PBL_LVL_0
], &sginfo
);
250 /* Alloc PBL pages */
251 sginfo
.npages
= npbl
;
252 sginfo
.pgsize
= PAGE_SIZE
;
253 rc
= __alloc_pbl(res
, &hwq
->pbl
[PBL_LVL_1
], &sginfo
);
256 /* Fill PDL with PBL page pointers */
258 (dma_addr_t
**)hwq
->pbl
[PBL_LVL_0
].pg_arr
;
259 src_phys_ptr
= hwq
->pbl
[PBL_LVL_1
].pg_map_arr
;
260 if (hwq_attr
->type
== HWQ_TYPE_MR
) {
261 /* For MR it is expected that we supply only 1 contigous
262 * page i.e only 1 entry in the PDL that will contain
263 * all the PBLs for the user supplied memory region
265 for (i
= 0; i
< hwq
->pbl
[PBL_LVL_1
].pg_count
;
267 dst_virt_ptr
[0][i
] = src_phys_ptr
[i
] |
270 for (i
= 0; i
< hwq
->pbl
[PBL_LVL_1
].pg_count
;
272 dst_virt_ptr
[PTR_PG(i
)][PTR_IDX(i
)] =
276 /* Alloc or init PTEs */
277 rc
= __alloc_pbl(res
, &hwq
->pbl
[PBL_LVL_2
],
281 hwq
->level
= PBL_LVL_2
;
282 if (hwq_attr
->sginfo
->nopte
)
284 /* Fill PBLs with PTE pointers */
286 (dma_addr_t
**)hwq
->pbl
[PBL_LVL_1
].pg_arr
;
287 src_phys_ptr
= hwq
->pbl
[PBL_LVL_2
].pg_map_arr
;
288 for (i
= 0; i
< hwq
->pbl
[PBL_LVL_2
].pg_count
; i
++) {
289 dst_virt_ptr
[PTR_PG(i
)][PTR_IDX(i
)] =
290 src_phys_ptr
[i
] | PTU_PTE_VALID
;
292 if (hwq_attr
->type
== HWQ_TYPE_QUEUE
) {
293 /* Find the last pg of the size */
294 i
= hwq
->pbl
[PBL_LVL_2
].pg_count
;
295 dst_virt_ptr
[PTR_PG(i
- 1)][PTR_IDX(i
- 1)] |=
298 dst_virt_ptr
[PTR_PG(i
- 2)]
300 PTU_PTE_NEXT_TO_LAST
;
302 } else { /* pages < 512 npbl = 1, npde = 0 */
303 u32 flag
= (hwq_attr
->type
== HWQ_TYPE_L2_CMPL
) ?
306 /* 1 level of indirection */
307 npbl
= npages
>> MAX_PBL_LVL_1_PGS_SHIFT
;
308 if (npages
% BIT(MAX_PBL_LVL_1_PGS_SHIFT
))
310 sginfo
.npages
= npbl
;
311 sginfo
.pgsize
= PAGE_SIZE
;
313 rc
= __alloc_pbl(res
, &hwq
->pbl
[PBL_LVL_0
], &sginfo
);
316 /* Alloc or init PTEs */
317 rc
= __alloc_pbl(res
, &hwq
->pbl
[PBL_LVL_1
],
321 hwq
->level
= PBL_LVL_1
;
322 if (hwq_attr
->sginfo
->nopte
)
324 /* Fill PBL with PTE pointers */
326 (dma_addr_t
**)hwq
->pbl
[PBL_LVL_0
].pg_arr
;
327 src_phys_ptr
= hwq
->pbl
[PBL_LVL_1
].pg_map_arr
;
328 for (i
= 0; i
< hwq
->pbl
[PBL_LVL_1
].pg_count
; i
++)
329 dst_virt_ptr
[PTR_PG(i
)][PTR_IDX(i
)] =
330 src_phys_ptr
[i
] | flag
;
331 if (hwq_attr
->type
== HWQ_TYPE_QUEUE
) {
332 /* Find the last pg of the size */
333 i
= hwq
->pbl
[PBL_LVL_1
].pg_count
;
334 dst_virt_ptr
[PTR_PG(i
- 1)][PTR_IDX(i
- 1)] |=
337 dst_virt_ptr
[PTR_PG(i
- 2)]
339 PTU_PTE_NEXT_TO_LAST
;
347 hwq
->depth
= hwq_attr
->depth
;
348 hwq
->max_elements
= depth
;
349 hwq
->element_size
= stride
;
350 /* For direct access to the elements */
352 if (hwq_attr
->sginfo
->nopte
&& hwq
->level
)
353 lvl
= hwq
->level
- 1;
354 hwq
->pbl_ptr
= hwq
->pbl
[lvl
].pg_arr
;
355 hwq
->pbl_dma_ptr
= hwq
->pbl
[lvl
].pg_map_arr
;
356 spin_lock_init(&hwq
->lock
);
360 bnxt_qplib_free_hwq(res
, hwq
);
365 void bnxt_qplib_free_ctx(struct bnxt_qplib_res
*res
,
366 struct bnxt_qplib_ctx
*ctx
)
370 bnxt_qplib_free_hwq(res
, &ctx
->qpc_tbl
);
371 bnxt_qplib_free_hwq(res
, &ctx
->mrw_tbl
);
372 bnxt_qplib_free_hwq(res
, &ctx
->srqc_tbl
);
373 bnxt_qplib_free_hwq(res
, &ctx
->cq_tbl
);
374 bnxt_qplib_free_hwq(res
, &ctx
->tim_tbl
);
375 for (i
= 0; i
< MAX_TQM_ALLOC_REQ
; i
++)
376 bnxt_qplib_free_hwq(res
, &ctx
->tqm_ctx
.qtbl
[i
]);
377 /* restore original pde level before destroy */
378 ctx
->tqm_ctx
.pde
.level
= ctx
->tqm_ctx
.pde_level
;
379 bnxt_qplib_free_hwq(res
, &ctx
->tqm_ctx
.pde
);
380 bnxt_qplib_free_stats_ctx(res
->pdev
, &ctx
->stats
);
383 static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res
*res
,
384 struct bnxt_qplib_ctx
*ctx
)
386 struct bnxt_qplib_hwq_attr hwq_attr
= {};
387 struct bnxt_qplib_sg_info sginfo
= {};
388 struct bnxt_qplib_tqm_ctx
*tqmctx
;
392 tqmctx
= &ctx
->tqm_ctx
;
394 sginfo
.pgsize
= PAGE_SIZE
;
395 sginfo
.pgshft
= PAGE_SHIFT
;
396 hwq_attr
.sginfo
= &sginfo
;
398 hwq_attr
.type
= HWQ_TYPE_CTX
;
399 hwq_attr
.depth
= 512;
400 hwq_attr
.stride
= sizeof(u64
);
401 /* Alloc pdl buffer */
402 rc
= bnxt_qplib_alloc_init_hwq(&tqmctx
->pde
, &hwq_attr
);
405 /* Save original pdl level */
406 tqmctx
->pde_level
= tqmctx
->pde
.level
;
409 for (i
= 0; i
< MAX_TQM_ALLOC_REQ
; i
++) {
410 if (!tqmctx
->qcount
[i
])
412 hwq_attr
.depth
= ctx
->qpc_count
* tqmctx
->qcount
[i
];
413 rc
= bnxt_qplib_alloc_init_hwq(&tqmctx
->qtbl
[i
], &hwq_attr
);
421 static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx
*ctx
)
423 struct bnxt_qplib_hwq
*tbl
;
425 __le64
**pbl_ptr
, *ptr
;
430 pbl_ptr
= (__le64
**)ctx
->pde
.pbl_ptr
;
432 for (i
= 0, j
= 0; i
< MAX_TQM_ALLOC_REQ
;
433 i
++, j
+= MAX_TQM_ALLOC_BLK_SIZE
) {
435 if (!tbl
->max_elements
)
438 fnz_idx
= i
; /* first non-zero index */
439 switch (tbl
->level
) {
441 pg_count
= tbl
->pbl
[PBL_LVL_1
].pg_count
;
442 for (k
= 0; k
< pg_count
; k
++) {
443 ptr
= &pbl_ptr
[PTR_PG(j
+ k
)][PTR_IDX(j
+ k
)];
444 dma_ptr
= &tbl
->pbl
[PBL_LVL_1
].pg_map_arr
[k
];
445 *ptr
= cpu_to_le64(*dma_ptr
| PTU_PTE_VALID
);
451 ptr
= &pbl_ptr
[PTR_PG(j
)][PTR_IDX(j
)];
452 *ptr
= cpu_to_le64(tbl
->pbl
[PBL_LVL_0
].pg_map_arr
[0] |
459 /* update pde level as per page table programming */
460 ctx
->pde
.level
= (ctx
->qtbl
[fnz_idx
].level
== PBL_LVL_2
) ? PBL_LVL_2
:
461 ctx
->qtbl
[fnz_idx
].level
+ 1;
464 static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res
*res
,
465 struct bnxt_qplib_ctx
*ctx
)
469 rc
= bnxt_qplib_alloc_tqm_rings(res
, ctx
);
473 bnxt_qplib_map_tqm_pgtbl(&ctx
->tqm_ctx
);
479 * Routine: bnxt_qplib_alloc_ctx
481 * Context tables are memories which are used by the chip fw.
482 * The 6 tables defined are:
483 * QPC ctx - holds QP states
484 * MRW ctx - holds memory region and window
485 * SRQ ctx - holds shared RQ states
486 * CQ ctx - holds completion queue states
487 * TQM ctx - holds Tx Queue Manager context
488 * TIM ctx - holds timer context
489 * Depending on the size of the tbl requested, either a 1 Page Buffer List
490 * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
492 * Table might be employed as follows:
493 * For 0 < ctx size <= 1 PAGE, 0 level of ind is used
494 * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
495 * For 512 < ctx size <= MAX, 2 levels of ind is used
497 * 0 if success, else -ERRORS
499 int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res
*res
,
500 struct bnxt_qplib_ctx
*ctx
,
501 bool virt_fn
, bool is_p5
)
503 struct bnxt_qplib_hwq_attr hwq_attr
= {};
504 struct bnxt_qplib_sg_info sginfo
= {};
507 if (virt_fn
|| is_p5
)
511 sginfo
.pgsize
= PAGE_SIZE
;
512 sginfo
.pgshft
= PAGE_SHIFT
;
513 hwq_attr
.sginfo
= &sginfo
;
516 hwq_attr
.depth
= ctx
->qpc_count
;
517 hwq_attr
.stride
= BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE
;
518 hwq_attr
.type
= HWQ_TYPE_CTX
;
519 rc
= bnxt_qplib_alloc_init_hwq(&ctx
->qpc_tbl
, &hwq_attr
);
524 hwq_attr
.depth
= ctx
->mrw_count
;
525 hwq_attr
.stride
= BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE
;
526 rc
= bnxt_qplib_alloc_init_hwq(&ctx
->mrw_tbl
, &hwq_attr
);
531 hwq_attr
.depth
= ctx
->srqc_count
;
532 hwq_attr
.stride
= BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE
;
533 rc
= bnxt_qplib_alloc_init_hwq(&ctx
->srqc_tbl
, &hwq_attr
);
538 hwq_attr
.depth
= ctx
->cq_count
;
539 hwq_attr
.stride
= BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE
;
540 rc
= bnxt_qplib_alloc_init_hwq(&ctx
->cq_tbl
, &hwq_attr
);
545 rc
= bnxt_qplib_setup_tqm_rings(res
, ctx
);
549 ctx
->tim_tbl
.max_elements
= ctx
->qpc_count
* 16;
550 hwq_attr
.depth
= ctx
->qpc_count
* 16;
552 rc
= bnxt_qplib_alloc_init_hwq(&ctx
->tim_tbl
, &hwq_attr
);
557 rc
= bnxt_qplib_alloc_stats_ctx(res
->pdev
, &ctx
->stats
);
564 bnxt_qplib_free_ctx(res
, ctx
);
569 void bnxt_qplib_get_guid(u8
*dev_addr
, u8
*guid
)
573 /* MAC-48 to EUI-64 mapping */
574 memcpy(mac
, dev_addr
, ETH_ALEN
);
575 guid
[0] = mac
[0] ^ 2;
585 static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res
*res
,
586 struct bnxt_qplib_sgid_tbl
*sgid_tbl
)
588 kfree(sgid_tbl
->tbl
);
589 kfree(sgid_tbl
->hw_id
);
590 kfree(sgid_tbl
->ctx
);
591 kfree(sgid_tbl
->vlan
);
592 sgid_tbl
->tbl
= NULL
;
593 sgid_tbl
->hw_id
= NULL
;
594 sgid_tbl
->ctx
= NULL
;
595 sgid_tbl
->vlan
= NULL
;
597 sgid_tbl
->active
= 0;
600 static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res
*res
,
601 struct bnxt_qplib_sgid_tbl
*sgid_tbl
,
604 sgid_tbl
->tbl
= kcalloc(max
, sizeof(*sgid_tbl
->tbl
), GFP_KERNEL
);
608 sgid_tbl
->hw_id
= kcalloc(max
, sizeof(u16
), GFP_KERNEL
);
609 if (!sgid_tbl
->hw_id
)
612 sgid_tbl
->ctx
= kcalloc(max
, sizeof(void *), GFP_KERNEL
);
616 sgid_tbl
->vlan
= kcalloc(max
, sizeof(u8
), GFP_KERNEL
);
623 kfree(sgid_tbl
->ctx
);
624 sgid_tbl
->ctx
= NULL
;
626 kfree(sgid_tbl
->hw_id
);
627 sgid_tbl
->hw_id
= NULL
;
629 kfree(sgid_tbl
->tbl
);
630 sgid_tbl
->tbl
= NULL
;
634 static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res
*res
,
635 struct bnxt_qplib_sgid_tbl
*sgid_tbl
)
639 for (i
= 0; i
< sgid_tbl
->max
; i
++) {
640 if (memcmp(&sgid_tbl
->tbl
[i
], &bnxt_qplib_gid_zero
,
641 sizeof(bnxt_qplib_gid_zero
)))
642 bnxt_qplib_del_sgid(sgid_tbl
, &sgid_tbl
->tbl
[i
].gid
,
643 sgid_tbl
->tbl
[i
].vlan_id
, true);
645 memset(sgid_tbl
->tbl
, 0, sizeof(*sgid_tbl
->tbl
) * sgid_tbl
->max
);
646 memset(sgid_tbl
->hw_id
, -1, sizeof(u16
) * sgid_tbl
->max
);
647 memset(sgid_tbl
->vlan
, 0, sizeof(u8
) * sgid_tbl
->max
);
648 sgid_tbl
->active
= 0;
651 static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl
*sgid_tbl
,
652 struct net_device
*netdev
)
656 for (i
= 0; i
< sgid_tbl
->max
; i
++)
657 sgid_tbl
->tbl
[i
].vlan_id
= 0xffff;
659 memset(sgid_tbl
->hw_id
, -1, sizeof(u16
) * sgid_tbl
->max
);
662 static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res
*res
,
663 struct bnxt_qplib_pkey_tbl
*pkey_tbl
)
666 dev_dbg(&res
->pdev
->dev
, "PKEY tbl not present\n");
668 kfree(pkey_tbl
->tbl
);
670 pkey_tbl
->tbl
= NULL
;
672 pkey_tbl
->active
= 0;
675 static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res
*res
,
676 struct bnxt_qplib_pkey_tbl
*pkey_tbl
,
679 pkey_tbl
->tbl
= kcalloc(max
, sizeof(u16
), GFP_KERNEL
);
688 int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl
*pdt
, struct bnxt_qplib_pd
*pd
)
692 bit_num
= find_first_bit(pdt
->tbl
, pdt
->max
);
693 if (bit_num
== pdt
->max
)
696 /* Found unused PD */
697 clear_bit(bit_num
, pdt
->tbl
);
702 int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res
*res
,
703 struct bnxt_qplib_pd_tbl
*pdt
,
704 struct bnxt_qplib_pd
*pd
)
706 if (test_and_set_bit(pd
->id
, pdt
->tbl
)) {
707 dev_warn(&res
->pdev
->dev
, "Freeing an unused PD? pdn = %d\n",
715 static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl
*pdt
)
722 static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res
*res
,
723 struct bnxt_qplib_pd_tbl
*pdt
,
731 pdt
->tbl
= kmalloc(bytes
, GFP_KERNEL
);
736 memset((u8
*)pdt
->tbl
, 0xFF, bytes
);
742 int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl
*dpit
,
743 struct bnxt_qplib_dpi
*dpi
,
748 bit_num
= find_first_bit(dpit
->tbl
, dpit
->max
);
749 if (bit_num
== dpit
->max
)
752 /* Found unused DPI */
753 clear_bit(bit_num
, dpit
->tbl
);
754 dpit
->app_tbl
[bit_num
] = app
;
757 dpi
->dbr
= dpit
->dbr_bar_reg_iomem
+ (bit_num
* PAGE_SIZE
);
758 dpi
->umdbr
= dpit
->unmapped_dbr
+ (bit_num
* PAGE_SIZE
);
763 int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res
*res
,
764 struct bnxt_qplib_dpi_tbl
*dpit
,
765 struct bnxt_qplib_dpi
*dpi
)
767 if (dpi
->dpi
>= dpit
->max
) {
768 dev_warn(&res
->pdev
->dev
, "Invalid DPI? dpi = %d\n", dpi
->dpi
);
771 if (test_and_set_bit(dpi
->dpi
, dpit
->tbl
)) {
772 dev_warn(&res
->pdev
->dev
, "Freeing an unused DPI? dpi = %d\n",
777 dpit
->app_tbl
[dpi
->dpi
] = NULL
;
778 memset(dpi
, 0, sizeof(*dpi
));
783 static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_res
*res
,
784 struct bnxt_qplib_dpi_tbl
*dpit
)
787 kfree(dpit
->app_tbl
);
788 if (dpit
->dbr_bar_reg_iomem
)
789 pci_iounmap(res
->pdev
, dpit
->dbr_bar_reg_iomem
);
790 memset(dpit
, 0, sizeof(*dpit
));
793 static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res
*res
,
794 struct bnxt_qplib_dpi_tbl
*dpit
,
797 u32 dbr_bar_reg
= RCFW_DBR_PCI_BAR_REGION
;
798 resource_size_t bar_reg_base
;
801 if (dpit
->dbr_bar_reg_iomem
) {
802 dev_err(&res
->pdev
->dev
, "DBR BAR region %d already mapped\n",
807 bar_reg_base
= pci_resource_start(res
->pdev
, dbr_bar_reg
);
809 dev_err(&res
->pdev
->dev
, "BAR region %d resc start failed\n",
814 dbr_len
= pci_resource_len(res
->pdev
, dbr_bar_reg
) - dbr_offset
;
815 if (!dbr_len
|| ((dbr_len
& (PAGE_SIZE
- 1)) != 0)) {
816 dev_err(&res
->pdev
->dev
, "Invalid DBR length %d\n", dbr_len
);
820 dpit
->dbr_bar_reg_iomem
= ioremap(bar_reg_base
+ dbr_offset
,
822 if (!dpit
->dbr_bar_reg_iomem
) {
823 dev_err(&res
->pdev
->dev
,
824 "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg
);
828 dpit
->unmapped_dbr
= bar_reg_base
+ dbr_offset
;
829 dpit
->max
= dbr_len
/ PAGE_SIZE
;
831 dpit
->app_tbl
= kcalloc(dpit
->max
, sizeof(void *), GFP_KERNEL
);
835 bytes
= dpit
->max
>> 3;
839 dpit
->tbl
= kmalloc(bytes
, GFP_KERNEL
);
841 kfree(dpit
->app_tbl
);
842 dpit
->app_tbl
= NULL
;
846 memset((u8
*)dpit
->tbl
, 0xFF, bytes
);
851 pci_iounmap(res
->pdev
, dpit
->dbr_bar_reg_iomem
);
856 static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl
*pkey_tbl
)
858 memset(pkey_tbl
->tbl
, 0, sizeof(u16
) * pkey_tbl
->max
);
859 pkey_tbl
->active
= 0;
862 static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res
*res
,
863 struct bnxt_qplib_pkey_tbl
*pkey_tbl
)
867 memset(pkey_tbl
->tbl
, 0, sizeof(u16
) * pkey_tbl
->max
);
869 /* pkey default = 0xFFFF */
870 bnxt_qplib_add_pkey(res
, pkey_tbl
, &pkey
, false);
874 static void bnxt_qplib_free_stats_ctx(struct pci_dev
*pdev
,
875 struct bnxt_qplib_stats
*stats
)
878 dma_free_coherent(&pdev
->dev
, stats
->size
,
879 stats
->dma
, stats
->dma_map
);
881 memset(stats
, 0, sizeof(*stats
));
885 static int bnxt_qplib_alloc_stats_ctx(struct pci_dev
*pdev
,
886 struct bnxt_qplib_stats
*stats
)
888 memset(stats
, 0, sizeof(*stats
));
890 /* 128 byte aligned context memory is required only for 57500.
891 * However making this unconditional, it does not harm previous
894 stats
->size
= ALIGN(sizeof(struct ctx_hw_stats
), 128);
895 stats
->dma
= dma_alloc_coherent(&pdev
->dev
, stats
->size
,
896 &stats
->dma_map
, GFP_KERNEL
);
898 dev_err(&pdev
->dev
, "Stats DMA allocation failed\n");
904 void bnxt_qplib_cleanup_res(struct bnxt_qplib_res
*res
)
906 bnxt_qplib_cleanup_pkey_tbl(&res
->pkey_tbl
);
907 bnxt_qplib_cleanup_sgid_tbl(res
, &res
->sgid_tbl
);
910 int bnxt_qplib_init_res(struct bnxt_qplib_res
*res
)
912 bnxt_qplib_init_sgid_tbl(&res
->sgid_tbl
, res
->netdev
);
913 bnxt_qplib_init_pkey_tbl(res
, &res
->pkey_tbl
);
918 void bnxt_qplib_free_res(struct bnxt_qplib_res
*res
)
920 bnxt_qplib_free_pkey_tbl(res
, &res
->pkey_tbl
);
921 bnxt_qplib_free_sgid_tbl(res
, &res
->sgid_tbl
);
922 bnxt_qplib_free_pd_tbl(&res
->pd_tbl
);
923 bnxt_qplib_free_dpi_tbl(res
, &res
->dpi_tbl
);
926 int bnxt_qplib_alloc_res(struct bnxt_qplib_res
*res
, struct pci_dev
*pdev
,
927 struct net_device
*netdev
,
928 struct bnxt_qplib_dev_attr
*dev_attr
)
933 res
->netdev
= netdev
;
935 rc
= bnxt_qplib_alloc_sgid_tbl(res
, &res
->sgid_tbl
, dev_attr
->max_sgid
);
939 rc
= bnxt_qplib_alloc_pkey_tbl(res
, &res
->pkey_tbl
, dev_attr
->max_pkey
);
943 rc
= bnxt_qplib_alloc_pd_tbl(res
, &res
->pd_tbl
, dev_attr
->max_pd
);
947 rc
= bnxt_qplib_alloc_dpi_tbl(res
, &res
->dpi_tbl
, dev_attr
->l2_db_size
);
953 bnxt_qplib_free_res(res
);