1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include "i40iw_status.h"
36 #include "i40iw_osdep.h"
37 #include "i40iw_register.h"
38 #include "i40iw_hmc.h"
41 #include "i40iw_type.h"
44 #include <linux/pci.h>
45 #include <linux/genalloc.h>
46 #include <linux/vmalloc.h>
47 #include "i40iw_pble.h"
51 static enum i40iw_status_code
add_pble_pool(struct i40iw_sc_dev
*dev
,
52 struct i40iw_hmc_pble_rsrc
*pble_rsrc
);
53 static void i40iw_free_vmalloc_mem(struct i40iw_hw
*hw
, struct i40iw_chunk
*chunk
);
56 * i40iw_destroy_pble_pool - destroy pool during module unload
57 * @pble_rsrc: pble resources
59 void i40iw_destroy_pble_pool(struct i40iw_sc_dev
*dev
, struct i40iw_hmc_pble_rsrc
*pble_rsrc
)
61 struct list_head
*clist
;
62 struct list_head
*tlist
;
63 struct i40iw_chunk
*chunk
;
64 struct i40iw_pble_pool
*pinfo
= &pble_rsrc
->pinfo
;
67 list_for_each_safe(clist
, tlist
, &pinfo
->clist
) {
68 chunk
= list_entry(clist
, struct i40iw_chunk
, list
);
69 if (chunk
->type
== I40IW_VMALLOC
)
70 i40iw_free_vmalloc_mem(dev
->hw
, chunk
);
73 gen_pool_destroy(pinfo
->pool
);
78 * i40iw_hmc_init_pble - Initialize pble resources during module load
79 * @dev: i40iw_sc_dev struct
80 * @pble_rsrc: pble resources
82 enum i40iw_status_code
i40iw_hmc_init_pble(struct i40iw_sc_dev
*dev
,
83 struct i40iw_hmc_pble_rsrc
*pble_rsrc
)
85 struct i40iw_hmc_info
*hmc_info
;
88 hmc_info
= dev
->hmc_info
;
89 pble_rsrc
->fpm_base_addr
= hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].base
;
90 /* Now start the pble' on 4k boundary */
91 if (pble_rsrc
->fpm_base_addr
& 0xfff)
92 fpm_idx
= (PAGE_SIZE
- (pble_rsrc
->fpm_base_addr
& 0xfff)) >> 3;
94 pble_rsrc
->unallocated_pble
=
95 hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
- fpm_idx
;
96 pble_rsrc
->next_fpm_addr
= pble_rsrc
->fpm_base_addr
+ (fpm_idx
<< 3);
98 pble_rsrc
->pinfo
.pool_shift
= POOL_SHIFT
;
99 pble_rsrc
->pinfo
.pool
= gen_pool_create(pble_rsrc
->pinfo
.pool_shift
, -1);
100 INIT_LIST_HEAD(&pble_rsrc
->pinfo
.clist
);
101 if (!pble_rsrc
->pinfo
.pool
)
104 if (add_pble_pool(dev
, pble_rsrc
))
109 error
:i40iw_destroy_pble_pool(dev
, pble_rsrc
);
110 return I40IW_ERR_NO_MEMORY
;
114 * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
115 * @ pble_rsrc: structure containing fpm address
116 * @ idx: where to return indexes
118 static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
119 struct sd_pd_idx
*idx
)
121 idx
->sd_idx
= (u32
)(pble_rsrc
->next_fpm_addr
) / I40IW_HMC_DIRECT_BP_SIZE
;
122 idx
->pd_idx
= (u32
)(pble_rsrc
->next_fpm_addr
) / I40IW_HMC_PAGED_BP_SIZE
;
123 idx
->rel_pd_idx
= (idx
->pd_idx
% I40IW_HMC_PD_CNT_IN_SD
);
127 * add_sd_direct - add sd direct for pble
128 * @dev: hardware control device structure
129 * @pble_rsrc: pble resource ptr
130 * @info: page info for sd
132 static enum i40iw_status_code
add_sd_direct(struct i40iw_sc_dev
*dev
,
133 struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
134 struct i40iw_add_page_info
*info
)
136 enum i40iw_status_code ret_code
= 0;
137 struct sd_pd_idx
*idx
= &info
->idx
;
138 struct i40iw_chunk
*chunk
= info
->chunk
;
139 struct i40iw_hmc_info
*hmc_info
= info
->hmc_info
;
140 struct i40iw_hmc_sd_entry
*sd_entry
= info
->sd_entry
;
143 if (!sd_entry
->valid
) {
145 ret_code
= i40iw_add_sd_table_entry(dev
->hw
, hmc_info
,
147 I40IW_SD_TYPE_DIRECT
,
148 I40IW_HMC_DIRECT_BP_SIZE
);
151 chunk
->type
= I40IW_DMA_COHERENT
;
154 offset
= idx
->rel_pd_idx
<< I40IW_HMC_PAGED_BP_SHIFT
;
155 chunk
->size
= info
->pages
<< I40IW_HMC_PAGED_BP_SHIFT
;
156 chunk
->vaddr
= ((u8
*)sd_entry
->u
.bp
.addr
.va
+ offset
);
157 chunk
->fpm_addr
= pble_rsrc
->next_fpm_addr
;
158 i40iw_debug(dev
, I40IW_DEBUG_PBLE
, "chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\n",
159 chunk
->size
, chunk
->size
, chunk
->vaddr
, chunk
->fpm_addr
);
164 * i40iw_free_vmalloc_mem - free vmalloc during close
166 * @chunk: chunk information for vmalloc
168 static void i40iw_free_vmalloc_mem(struct i40iw_hw
*hw
, struct i40iw_chunk
*chunk
)
170 struct pci_dev
*pcidev
= (struct pci_dev
*)hw
->dev_context
;
175 for (i
= 0; i
< chunk
->pg_cnt
; i
++)
176 dma_unmap_page(&pcidev
->dev
, chunk
->dmaaddrs
[i
], PAGE_SIZE
, DMA_BIDIRECTIONAL
);
179 kfree(chunk
->dmaaddrs
);
180 chunk
->dmaaddrs
= NULL
;
187 * i40iw_get_vmalloc_mem - get 2M page for sd
188 * @hw: hardware address
189 * @chunk: chunk to adf
190 * @pg_cnt: #of 4 K pages
192 static enum i40iw_status_code
i40iw_get_vmalloc_mem(struct i40iw_hw
*hw
,
193 struct i40iw_chunk
*chunk
,
196 struct pci_dev
*pcidev
= (struct pci_dev
*)hw
->dev_context
;
202 chunk
->dmaaddrs
= kzalloc(pg_cnt
<< 3, GFP_KERNEL
);
203 if (!chunk
->dmaaddrs
)
204 return I40IW_ERR_NO_MEMORY
;
205 size
= PAGE_SIZE
* pg_cnt
;
206 chunk
->vaddr
= vmalloc(size
);
208 kfree(chunk
->dmaaddrs
);
209 chunk
->dmaaddrs
= NULL
;
210 return I40IW_ERR_NO_MEMORY
;
213 addr
= (u8
*)chunk
->vaddr
;
214 for (i
= 0; i
< pg_cnt
; i
++) {
215 page
= vmalloc_to_page((void *)addr
);
218 chunk
->dmaaddrs
[i
] = dma_map_page(&pcidev
->dev
, page
, 0,
219 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
220 if (dma_mapping_error(&pcidev
->dev
, chunk
->dmaaddrs
[i
]))
226 chunk
->type
= I40IW_VMALLOC
;
230 i40iw_free_vmalloc_mem(hw
, chunk
);
231 return I40IW_ERR_NO_MEMORY
;
235 * fpm_to_idx - given fpm address, get pble index
236 * @pble_rsrc: pble resource management
237 * @addr: fpm address for index
239 static inline u32
fpm_to_idx(struct i40iw_hmc_pble_rsrc
*pble_rsrc
, u64 addr
)
241 return (addr
- (pble_rsrc
->fpm_base_addr
)) >> 3;
245 * add_bp_pages - add backing pages for sd
246 * @dev: hardware control device structure
247 * @pble_rsrc: pble resource management
248 * @info: page info for sd
250 static enum i40iw_status_code
add_bp_pages(struct i40iw_sc_dev
*dev
,
251 struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
252 struct i40iw_add_page_info
*info
)
255 struct i40iw_dma_mem mem
;
256 struct i40iw_hmc_pd_entry
*pd_entry
;
257 struct i40iw_hmc_sd_entry
*sd_entry
= info
->sd_entry
;
258 struct i40iw_hmc_info
*hmc_info
= info
->hmc_info
;
259 struct i40iw_chunk
*chunk
= info
->chunk
;
260 struct i40iw_manage_vf_pble_info vf_pble_info
;
261 enum i40iw_status_code status
= 0;
262 u32 rel_pd_idx
= info
->idx
.rel_pd_idx
;
263 u32 pd_idx
= info
->idx
.pd_idx
;
266 status
= i40iw_get_vmalloc_mem(dev
->hw
, chunk
, info
->pages
);
268 return I40IW_ERR_NO_MEMORY
;
269 status
= i40iw_add_sd_table_entry(dev
->hw
, hmc_info
,
270 info
->idx
.sd_idx
, I40IW_SD_TYPE_PAGED
,
271 I40IW_HMC_DIRECT_BP_SIZE
);
273 i40iw_free_vmalloc_mem(dev
->hw
, chunk
);
277 status
= i40iw_vchnl_vf_add_hmc_objs(dev
, I40IW_HMC_IW_PBLE
,
278 fpm_to_idx(pble_rsrc
,
279 pble_rsrc
->next_fpm_addr
),
280 (info
->pages
<< PBLE_512_SHIFT
));
282 i40iw_pr_err("allocate PBLEs in the PF. Error %i\n", status
);
283 i40iw_free_vmalloc_mem(dev
->hw
, chunk
);
288 for (i
= 0; i
< info
->pages
; i
++) {
289 mem
.pa
= chunk
->dmaaddrs
[i
];
290 mem
.size
= PAGE_SIZE
;
291 mem
.va
= (void *)(addr
);
292 pd_entry
= &sd_entry
->u
.pd_table
.pd_entry
[rel_pd_idx
++];
293 if (!pd_entry
->valid
) {
294 status
= i40iw_add_pd_table_entry(dev
->hw
, hmc_info
, pd_idx
++, &mem
);
299 i40iw_pr_err("pd entry is valid expecting to be invalid\n");
303 vf_pble_info
.first_pd_index
= info
->idx
.rel_pd_idx
;
304 vf_pble_info
.inv_pd_ent
= false;
305 vf_pble_info
.pd_entry_cnt
= PBLE_PER_PAGE
;
306 vf_pble_info
.pd_pl_pba
= sd_entry
->u
.pd_table
.pd_page_addr
.pa
;
307 vf_pble_info
.sd_index
= info
->idx
.sd_idx
;
308 status
= i40iw_hw_manage_vf_pble_bp(dev
->back_dev
,
309 &vf_pble_info
, true);
311 i40iw_pr_err("CQP manage VF PBLE BP failed. %i\n", status
);
315 chunk
->fpm_addr
= pble_rsrc
->next_fpm_addr
;
318 i40iw_free_vmalloc_mem(dev
->hw
, chunk
);
323 * add_pble_pool - add a sd entry for pble resoure
324 * @dev: hardware control device structure
325 * @pble_rsrc: pble resource management
327 static enum i40iw_status_code
add_pble_pool(struct i40iw_sc_dev
*dev
,
328 struct i40iw_hmc_pble_rsrc
*pble_rsrc
)
330 struct i40iw_hmc_sd_entry
*sd_entry
;
331 struct i40iw_hmc_info
*hmc_info
;
332 struct i40iw_chunk
*chunk
;
333 struct i40iw_add_page_info info
;
334 struct sd_pd_idx
*idx
= &info
.idx
;
335 enum i40iw_status_code ret_code
= 0;
336 enum i40iw_sd_entry_type sd_entry_type
;
340 if (pble_rsrc
->unallocated_pble
< PBLE_PER_PAGE
)
341 return I40IW_ERR_NO_MEMORY
;
342 if (pble_rsrc
->next_fpm_addr
& 0xfff) {
343 i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc
->next_fpm_addr
);
344 return I40IW_ERR_INVALID_PAGE_DESC_INDEX
;
346 chunk
= kzalloc(sizeof(*chunk
), GFP_KERNEL
);
348 return I40IW_ERR_NO_MEMORY
;
349 hmc_info
= dev
->hmc_info
;
350 chunk
->fpm_addr
= pble_rsrc
->next_fpm_addr
;
351 get_sd_pd_idx(pble_rsrc
, idx
);
352 sd_entry
= &hmc_info
->sd_table
.sd_entry
[idx
->sd_idx
];
353 pages
= (idx
->rel_pd_idx
) ? (I40IW_HMC_PD_CNT_IN_SD
-
354 idx
->rel_pd_idx
) : I40IW_HMC_PD_CNT_IN_SD
;
355 pages
= min(pages
, pble_rsrc
->unallocated_pble
>> PBLE_512_SHIFT
);
357 ret_code
= I40IW_ERR_NO_PBLCHUNKS_AVAILABLE
;
361 info
.hmc_info
= hmc_info
;
363 info
.sd_entry
= sd_entry
;
364 if (!sd_entry
->valid
) {
365 sd_entry_type
= (!idx
->rel_pd_idx
&&
366 (pages
== I40IW_HMC_PD_CNT_IN_SD
) &&
367 dev
->is_pf
) ? I40IW_SD_TYPE_DIRECT
: I40IW_SD_TYPE_PAGED
;
369 sd_entry_type
= sd_entry
->entry_type
;
371 i40iw_debug(dev
, I40IW_DEBUG_PBLE
,
372 "pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\n",
373 pages
, pble_rsrc
->unallocated_pble
, pble_rsrc
->next_fpm_addr
);
374 i40iw_debug(dev
, I40IW_DEBUG_PBLE
, "sd_entry_type = %d sd_entry valid = %d\n",
375 sd_entry_type
, sd_entry
->valid
);
377 if (sd_entry_type
== I40IW_SD_TYPE_DIRECT
)
378 ret_code
= add_sd_direct(dev
, pble_rsrc
, &info
);
380 sd_entry_type
= I40IW_SD_TYPE_PAGED
;
382 pble_rsrc
->stats_direct_sds
++;
384 if (sd_entry_type
== I40IW_SD_TYPE_PAGED
) {
385 ret_code
= add_bp_pages(dev
, pble_rsrc
, &info
);
389 pble_rsrc
->stats_paged_sds
++;
392 if (gen_pool_add_virt(pble_rsrc
->pinfo
.pool
, (unsigned long)chunk
->vaddr
,
393 (phys_addr_t
)chunk
->fpm_addr
, chunk
->size
, -1)) {
394 i40iw_pr_err("could not allocate memory by gen_pool_addr_virt()\n");
395 ret_code
= I40IW_ERR_NO_MEMORY
;
398 pble_rsrc
->next_fpm_addr
+= chunk
->size
;
399 i40iw_debug(dev
, I40IW_DEBUG_PBLE
, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
400 pble_rsrc
->next_fpm_addr
, chunk
->size
, chunk
->size
);
401 pble_rsrc
->unallocated_pble
-= (chunk
->size
>> 3);
402 list_add(&chunk
->list
, &pble_rsrc
->pinfo
.clist
);
403 sd_reg_val
= (sd_entry_type
== I40IW_SD_TYPE_PAGED
) ?
404 sd_entry
->u
.pd_table
.pd_page_addr
.pa
: sd_entry
->u
.bp
.addr
.pa
;
408 ret_code
= i40iw_hmc_sd_one(dev
, hmc_info
->hmc_fn_id
,
409 sd_reg_val
, idx
->sd_idx
,
410 sd_entry
->entry_type
, true);
412 i40iw_pr_err("cqp cmd failed for sd (pbles)\n");
416 sd_entry
->valid
= true;
424 * free_lvl2 - fee level 2 pble
425 * @pble_rsrc: pble resource management
426 * @palloc: level 2 pble allocation
428 static void free_lvl2(struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
429 struct i40iw_pble_alloc
*palloc
)
432 struct gen_pool
*pool
;
433 struct i40iw_pble_level2
*lvl2
= &palloc
->level2
;
434 struct i40iw_pble_info
*root
= &lvl2
->root
;
435 struct i40iw_pble_info
*leaf
= lvl2
->leaf
;
437 pool
= pble_rsrc
->pinfo
.pool
;
439 for (i
= 0; i
< lvl2
->leaf_cnt
; i
++, leaf
++) {
441 gen_pool_free(pool
, leaf
->addr
, (leaf
->cnt
<< 3));
447 gen_pool_free(pool
, root
->addr
, (root
->cnt
<< 3));
454 * get_lvl2_pble - get level 2 pble resource
455 * @pble_rsrc: pble resource management
456 * @palloc: level 2 pble allocation
457 * @pool: pool pointer
459 static enum i40iw_status_code
get_lvl2_pble(struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
460 struct i40iw_pble_alloc
*palloc
,
461 struct gen_pool
*pool
)
463 u32 lf4k
, lflast
, total
, i
;
464 u32 pblcnt
= PBLE_PER_PAGE
;
466 struct i40iw_pble_level2
*lvl2
= &palloc
->level2
;
467 struct i40iw_pble_info
*root
= &lvl2
->root
;
468 struct i40iw_pble_info
*leaf
;
470 /* number of full 512 (4K) leafs) */
471 lf4k
= palloc
->total_cnt
>> 9;
472 lflast
= palloc
->total_cnt
% PBLE_PER_PAGE
;
473 total
= (lflast
== 0) ? lf4k
: lf4k
+ 1;
474 lvl2
->leaf_cnt
= total
;
476 leaf
= kzalloc((sizeof(*leaf
) * total
), GFP_ATOMIC
);
478 return I40IW_ERR_NO_MEMORY
;
480 /* allocate pbles for the root */
481 root
->addr
= gen_pool_alloc(pool
, (total
<< 3));
485 return I40IW_ERR_NO_MEMORY
;
487 root
->idx
= fpm_to_idx(pble_rsrc
,
488 (u64
)gen_pool_virt_to_phys(pool
, root
->addr
));
490 addr
= (u64
*)root
->addr
;
491 for (i
= 0; i
< total
; i
++, leaf
++) {
492 pblcnt
= (lflast
&& ((i
+ 1) == total
)) ? lflast
: PBLE_PER_PAGE
;
493 leaf
->addr
= gen_pool_alloc(pool
, (pblcnt
<< 3));
496 leaf
->idx
= fpm_to_idx(pble_rsrc
, (u64
)gen_pool_virt_to_phys(pool
, leaf
->addr
));
499 *addr
= (u64
)leaf
->idx
;
502 palloc
->level
= I40IW_LEVEL_2
;
503 pble_rsrc
->stats_lvl2
++;
506 free_lvl2(pble_rsrc
, palloc
);
507 return I40IW_ERR_NO_MEMORY
;
511 * get_lvl1_pble - get level 1 pble resource
512 * @dev: hardware control device structure
513 * @pble_rsrc: pble resource management
514 * @palloc: level 1 pble allocation
516 static enum i40iw_status_code
get_lvl1_pble(struct i40iw_sc_dev
*dev
,
517 struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
518 struct i40iw_pble_alloc
*palloc
)
521 struct gen_pool
*pool
;
522 struct i40iw_pble_info
*lvl1
= &palloc
->level1
;
524 pool
= pble_rsrc
->pinfo
.pool
;
525 addr
= (u64
*)gen_pool_alloc(pool
, (palloc
->total_cnt
<< 3));
528 return I40IW_ERR_NO_MEMORY
;
530 palloc
->level
= I40IW_LEVEL_1
;
531 lvl1
->addr
= (unsigned long)addr
;
532 lvl1
->idx
= fpm_to_idx(pble_rsrc
, (u64
)gen_pool_virt_to_phys(pool
,
533 (unsigned long)addr
));
534 lvl1
->cnt
= palloc
->total_cnt
;
535 pble_rsrc
->stats_lvl1
++;
540 * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
541 * @dev: i40iw_sc_dev struct
542 * @pble_rsrc: pble resources
543 * @palloc: contains all inforamtion regarding pble (idx + pble addr)
544 * @pool: pointer to general purpose special memory pool descriptor
546 static inline enum i40iw_status_code
get_lvl1_lvl2_pble(struct i40iw_sc_dev
*dev
,
547 struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
548 struct i40iw_pble_alloc
*palloc
,
549 struct gen_pool
*pool
)
551 enum i40iw_status_code status
= 0;
553 status
= get_lvl1_pble(dev
, pble_rsrc
, palloc
);
554 if (status
&& (palloc
->total_cnt
> PBLE_PER_PAGE
))
555 status
= get_lvl2_pble(pble_rsrc
, palloc
, pool
);
560 * i40iw_get_pble - allocate pbles from the pool
561 * @dev: i40iw_sc_dev struct
562 * @pble_rsrc: pble resources
563 * @palloc: contains all inforamtion regarding pble (idx + pble addr)
564 * @pble_cnt: #of pbles requested
566 enum i40iw_status_code
i40iw_get_pble(struct i40iw_sc_dev
*dev
,
567 struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
568 struct i40iw_pble_alloc
*palloc
,
571 struct gen_pool
*pool
;
572 enum i40iw_status_code status
= 0;
576 pool
= pble_rsrc
->pinfo
.pool
;
577 palloc
->total_cnt
= pble_cnt
;
578 palloc
->level
= I40IW_LEVEL_0
;
579 /*check first to see if we can get pble's without acquiring additional sd's */
580 status
= get_lvl1_lvl2_pble(dev
, pble_rsrc
, palloc
, pool
);
583 max_sds
= (palloc
->total_cnt
>> 18) + 1;
584 for (i
= 0; i
< max_sds
; i
++) {
585 status
= add_pble_pool(dev
, pble_rsrc
);
588 status
= get_lvl1_lvl2_pble(dev
, pble_rsrc
, palloc
, pool
);
594 pble_rsrc
->stats_alloc_ok
++;
596 pble_rsrc
->stats_alloc_fail
++;
602 * i40iw_free_pble - put pbles back into pool
603 * @pble_rsrc: pble resources
604 * @palloc: contains all inforamtion regarding pble resource being freed
606 void i40iw_free_pble(struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
607 struct i40iw_pble_alloc
*palloc
)
609 struct gen_pool
*pool
;
611 pool
= pble_rsrc
->pinfo
.pool
;
612 if (palloc
->level
== I40IW_LEVEL_2
)
613 free_lvl2(pble_rsrc
, palloc
);
615 gen_pool_free(pool
, palloc
->level1
.addr
,
616 (palloc
->level1
.cnt
<< 3));
617 pble_rsrc
->stats_alloc_freed
++;