1 /*******************************************************************************
3 * Copyright (c) 2015-2016 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenFabrics.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 *******************************************************************************/
35 #include "i40iw_status.h"
36 #include "i40iw_osdep.h"
37 #include "i40iw_register.h"
38 #include "i40iw_hmc.h"
41 #include "i40iw_type.h"
44 #include <linux/pci.h>
45 #include <linux/genalloc.h>
46 #include <linux/vmalloc.h>
47 #include "i40iw_pble.h"
51 static enum i40iw_status_code
add_pble_pool(struct i40iw_sc_dev
*dev
,
52 struct i40iw_hmc_pble_rsrc
*pble_rsrc
);
53 static void i40iw_free_vmalloc_mem(struct i40iw_hw
*hw
, struct i40iw_chunk
*chunk
);
56 * i40iw_destroy_pble_pool - destroy pool during module unload
57 * @pble_rsrc: pble resources
59 void i40iw_destroy_pble_pool(struct i40iw_sc_dev
*dev
, struct i40iw_hmc_pble_rsrc
*pble_rsrc
)
61 struct list_head
*clist
;
62 struct list_head
*tlist
;
63 struct i40iw_chunk
*chunk
;
64 struct i40iw_pble_pool
*pinfo
= &pble_rsrc
->pinfo
;
67 list_for_each_safe(clist
, tlist
, &pinfo
->clist
) {
68 chunk
= list_entry(clist
, struct i40iw_chunk
, list
);
69 if (chunk
->type
== I40IW_VMALLOC
)
70 i40iw_free_vmalloc_mem(dev
->hw
, chunk
);
73 gen_pool_destroy(pinfo
->pool
);
78 * i40iw_hmc_init_pble - Initialize pble resources during module load
79 * @dev: i40iw_sc_dev struct
80 * @pble_rsrc: pble resources
82 enum i40iw_status_code
i40iw_hmc_init_pble(struct i40iw_sc_dev
*dev
,
83 struct i40iw_hmc_pble_rsrc
*pble_rsrc
)
85 struct i40iw_hmc_info
*hmc_info
;
88 hmc_info
= dev
->hmc_info
;
89 pble_rsrc
->fpm_base_addr
= hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].base
;
90 /* Now start the pble' on 4k boundary */
91 if (pble_rsrc
->fpm_base_addr
& 0xfff)
92 fpm_idx
= (PAGE_SIZE
- (pble_rsrc
->fpm_base_addr
& 0xfff)) >> 3;
94 pble_rsrc
->unallocated_pble
=
95 hmc_info
->hmc_obj
[I40IW_HMC_IW_PBLE
].cnt
- fpm_idx
;
96 pble_rsrc
->next_fpm_addr
= pble_rsrc
->fpm_base_addr
+ (fpm_idx
<< 3);
98 pble_rsrc
->pinfo
.pool_shift
= POOL_SHIFT
;
99 pble_rsrc
->pinfo
.pool
= gen_pool_create(pble_rsrc
->pinfo
.pool_shift
, -1);
100 INIT_LIST_HEAD(&pble_rsrc
->pinfo
.clist
);
101 if (!pble_rsrc
->pinfo
.pool
)
104 if (add_pble_pool(dev
, pble_rsrc
))
109 error
:i40iw_destroy_pble_pool(dev
, pble_rsrc
);
110 return I40IW_ERR_NO_MEMORY
;
114 * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
115 * @ pble_rsrc: structure containing fpm address
116 * @ idx: where to return indexes
118 static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
119 struct sd_pd_idx
*idx
)
121 idx
->sd_idx
= (u32
)(pble_rsrc
->next_fpm_addr
) / I40IW_HMC_DIRECT_BP_SIZE
;
122 idx
->pd_idx
= (u32
)(pble_rsrc
->next_fpm_addr
) / I40IW_HMC_PAGED_BP_SIZE
;
123 idx
->rel_pd_idx
= (idx
->pd_idx
% I40IW_HMC_PD_CNT_IN_SD
);
127 * add_sd_direct - add sd direct for pble
128 * @dev: hardware control device structure
129 * @pble_rsrc: pble resource ptr
130 * @info: page info for sd
132 static enum i40iw_status_code
add_sd_direct(struct i40iw_sc_dev
*dev
,
133 struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
134 struct i40iw_add_page_info
*info
)
136 enum i40iw_status_code ret_code
= 0;
137 struct sd_pd_idx
*idx
= &info
->idx
;
138 struct i40iw_chunk
*chunk
= info
->chunk
;
139 struct i40iw_hmc_info
*hmc_info
= info
->hmc_info
;
140 struct i40iw_hmc_sd_entry
*sd_entry
= info
->sd_entry
;
143 if (!sd_entry
->valid
) {
145 ret_code
= i40iw_add_sd_table_entry(dev
->hw
, hmc_info
,
147 I40IW_SD_TYPE_DIRECT
,
148 I40IW_HMC_DIRECT_BP_SIZE
);
151 chunk
->type
= I40IW_DMA_COHERENT
;
154 offset
= idx
->rel_pd_idx
<< I40IW_HMC_PAGED_BP_SHIFT
;
155 chunk
->size
= info
->pages
<< I40IW_HMC_PAGED_BP_SHIFT
;
156 chunk
->vaddr
= ((u8
*)sd_entry
->u
.bp
.addr
.va
+ offset
);
157 chunk
->fpm_addr
= pble_rsrc
->next_fpm_addr
;
158 i40iw_debug(dev
, I40IW_DEBUG_PBLE
, "chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\n",
159 chunk
->size
, chunk
->size
, chunk
->vaddr
, chunk
->fpm_addr
);
164 * i40iw_free_vmalloc_mem - free vmalloc during close
166 * @chunk: chunk information for vmalloc
168 static void i40iw_free_vmalloc_mem(struct i40iw_hw
*hw
, struct i40iw_chunk
*chunk
)
170 struct pci_dev
*pcidev
= hw
->pcidev
;
175 for (i
= 0; i
< chunk
->pg_cnt
; i
++)
176 dma_unmap_page(&pcidev
->dev
, chunk
->dmaaddrs
[i
], PAGE_SIZE
, DMA_BIDIRECTIONAL
);
179 kfree(chunk
->dmaaddrs
);
180 chunk
->dmaaddrs
= NULL
;
187 * i40iw_get_vmalloc_mem - get 2M page for sd
188 * @hw: hardware address
189 * @chunk: chunk to adf
190 * @pg_cnt: #of 4 K pages
192 static enum i40iw_status_code
i40iw_get_vmalloc_mem(struct i40iw_hw
*hw
,
193 struct i40iw_chunk
*chunk
,
196 struct pci_dev
*pcidev
= hw
->pcidev
;
202 chunk
->dmaaddrs
= kzalloc(pg_cnt
<< 3, GFP_KERNEL
);
203 if (!chunk
->dmaaddrs
)
204 return I40IW_ERR_NO_MEMORY
;
205 size
= PAGE_SIZE
* pg_cnt
;
206 chunk
->vaddr
= vmalloc(size
);
208 kfree(chunk
->dmaaddrs
);
209 chunk
->dmaaddrs
= NULL
;
210 return I40IW_ERR_NO_MEMORY
;
213 addr
= (u8
*)chunk
->vaddr
;
214 for (i
= 0; i
< pg_cnt
; i
++) {
215 page
= vmalloc_to_page((void *)addr
);
218 chunk
->dmaaddrs
[i
] = dma_map_page(&pcidev
->dev
, page
, 0,
219 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
220 if (dma_mapping_error(&pcidev
->dev
, chunk
->dmaaddrs
[i
]))
226 chunk
->type
= I40IW_VMALLOC
;
230 i40iw_free_vmalloc_mem(hw
, chunk
);
231 return I40IW_ERR_NO_MEMORY
;
235 * fpm_to_idx - given fpm address, get pble index
236 * @pble_rsrc: pble resource management
237 * @addr: fpm address for index
239 static inline u32
fpm_to_idx(struct i40iw_hmc_pble_rsrc
*pble_rsrc
, u64 addr
)
241 return (addr
- (pble_rsrc
->fpm_base_addr
)) >> 3;
245 * add_bp_pages - add backing pages for sd
246 * @dev: hardware control device structure
247 * @pble_rsrc: pble resource management
248 * @info: page info for sd
250 static enum i40iw_status_code
add_bp_pages(struct i40iw_sc_dev
*dev
,
251 struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
252 struct i40iw_add_page_info
*info
)
255 struct i40iw_dma_mem mem
;
256 struct i40iw_hmc_pd_entry
*pd_entry
;
257 struct i40iw_hmc_sd_entry
*sd_entry
= info
->sd_entry
;
258 struct i40iw_hmc_info
*hmc_info
= info
->hmc_info
;
259 struct i40iw_chunk
*chunk
= info
->chunk
;
260 struct i40iw_manage_vf_pble_info vf_pble_info
;
261 enum i40iw_status_code status
= 0;
262 u32 rel_pd_idx
= info
->idx
.rel_pd_idx
;
263 u32 pd_idx
= info
->idx
.pd_idx
;
266 status
= i40iw_get_vmalloc_mem(dev
->hw
, chunk
, info
->pages
);
268 return I40IW_ERR_NO_MEMORY
;
269 status
= i40iw_add_sd_table_entry(dev
->hw
, hmc_info
,
270 info
->idx
.sd_idx
, I40IW_SD_TYPE_PAGED
,
271 I40IW_HMC_DIRECT_BP_SIZE
);
275 status
= i40iw_vchnl_vf_add_hmc_objs(dev
, I40IW_HMC_IW_PBLE
,
276 fpm_to_idx(pble_rsrc
,
277 pble_rsrc
->next_fpm_addr
),
278 (info
->pages
<< PBLE_512_SHIFT
));
280 i40iw_pr_err("allocate PBLEs in the PF. Error %i\n", status
);
285 for (i
= 0; i
< info
->pages
; i
++) {
286 mem
.pa
= chunk
->dmaaddrs
[i
];
287 mem
.size
= PAGE_SIZE
;
288 mem
.va
= (void *)(addr
);
289 pd_entry
= &sd_entry
->u
.pd_table
.pd_entry
[rel_pd_idx
++];
290 if (!pd_entry
->valid
) {
291 status
= i40iw_add_pd_table_entry(dev
->hw
, hmc_info
, pd_idx
++, &mem
);
296 i40iw_pr_err("pd entry is valid expecting to be invalid\n");
300 vf_pble_info
.first_pd_index
= info
->idx
.rel_pd_idx
;
301 vf_pble_info
.inv_pd_ent
= false;
302 vf_pble_info
.pd_entry_cnt
= PBLE_PER_PAGE
;
303 vf_pble_info
.pd_pl_pba
= sd_entry
->u
.pd_table
.pd_page_addr
.pa
;
304 vf_pble_info
.sd_index
= info
->idx
.sd_idx
;
305 status
= i40iw_hw_manage_vf_pble_bp(dev
->back_dev
,
306 &vf_pble_info
, true);
308 i40iw_pr_err("CQP manage VF PBLE BP failed. %i\n", status
);
312 chunk
->fpm_addr
= pble_rsrc
->next_fpm_addr
;
315 i40iw_free_vmalloc_mem(dev
->hw
, chunk
);
320 * add_pble_pool - add a sd entry for pble resoure
321 * @dev: hardware control device structure
322 * @pble_rsrc: pble resource management
324 static enum i40iw_status_code
add_pble_pool(struct i40iw_sc_dev
*dev
,
325 struct i40iw_hmc_pble_rsrc
*pble_rsrc
)
327 struct i40iw_hmc_sd_entry
*sd_entry
;
328 struct i40iw_hmc_info
*hmc_info
;
329 struct i40iw_chunk
*chunk
;
330 struct i40iw_add_page_info info
;
331 struct sd_pd_idx
*idx
= &info
.idx
;
332 enum i40iw_status_code ret_code
= 0;
333 enum i40iw_sd_entry_type sd_entry_type
;
337 if (pble_rsrc
->unallocated_pble
< PBLE_PER_PAGE
)
338 return I40IW_ERR_NO_MEMORY
;
339 if (pble_rsrc
->next_fpm_addr
& 0xfff) {
340 i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc
->next_fpm_addr
);
341 return I40IW_ERR_INVALID_PAGE_DESC_INDEX
;
343 chunk
= kzalloc(sizeof(*chunk
), GFP_KERNEL
);
345 return I40IW_ERR_NO_MEMORY
;
346 hmc_info
= dev
->hmc_info
;
347 chunk
->fpm_addr
= pble_rsrc
->next_fpm_addr
;
348 get_sd_pd_idx(pble_rsrc
, idx
);
349 sd_entry
= &hmc_info
->sd_table
.sd_entry
[idx
->sd_idx
];
350 pages
= (idx
->rel_pd_idx
) ? (I40IW_HMC_PD_CNT_IN_SD
-
351 idx
->rel_pd_idx
) : I40IW_HMC_PD_CNT_IN_SD
;
352 pages
= min(pages
, pble_rsrc
->unallocated_pble
>> PBLE_512_SHIFT
);
354 info
.hmc_info
= hmc_info
;
356 info
.sd_entry
= sd_entry
;
357 if (!sd_entry
->valid
) {
358 sd_entry_type
= (!idx
->rel_pd_idx
&&
359 (pages
== I40IW_HMC_PD_CNT_IN_SD
) &&
360 dev
->is_pf
) ? I40IW_SD_TYPE_DIRECT
: I40IW_SD_TYPE_PAGED
;
362 sd_entry_type
= sd_entry
->entry_type
;
364 i40iw_debug(dev
, I40IW_DEBUG_PBLE
,
365 "pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\n",
366 pages
, pble_rsrc
->unallocated_pble
, pble_rsrc
->next_fpm_addr
);
367 i40iw_debug(dev
, I40IW_DEBUG_PBLE
, "sd_entry_type = %d sd_entry valid = %d\n",
368 sd_entry_type
, sd_entry
->valid
);
370 if (sd_entry_type
== I40IW_SD_TYPE_DIRECT
)
371 ret_code
= add_sd_direct(dev
, pble_rsrc
, &info
);
373 sd_entry_type
= I40IW_SD_TYPE_PAGED
;
375 pble_rsrc
->stats_direct_sds
++;
377 if (sd_entry_type
== I40IW_SD_TYPE_PAGED
) {
378 ret_code
= add_bp_pages(dev
, pble_rsrc
, &info
);
382 pble_rsrc
->stats_paged_sds
++;
385 if (gen_pool_add_virt(pble_rsrc
->pinfo
.pool
, (unsigned long)chunk
->vaddr
,
386 (phys_addr_t
)chunk
->fpm_addr
, chunk
->size
, -1)) {
387 i40iw_pr_err("could not allocate memory by gen_pool_addr_virt()\n");
388 ret_code
= I40IW_ERR_NO_MEMORY
;
391 pble_rsrc
->next_fpm_addr
+= chunk
->size
;
392 i40iw_debug(dev
, I40IW_DEBUG_PBLE
, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
393 pble_rsrc
->next_fpm_addr
, chunk
->size
, chunk
->size
);
394 pble_rsrc
->unallocated_pble
-= (chunk
->size
>> 3);
395 list_add(&chunk
->list
, &pble_rsrc
->pinfo
.clist
);
396 sd_reg_val
= (sd_entry_type
== I40IW_SD_TYPE_PAGED
) ?
397 sd_entry
->u
.pd_table
.pd_page_addr
.pa
: sd_entry
->u
.bp
.addr
.pa
;
401 ret_code
= i40iw_hmc_sd_one(dev
, hmc_info
->hmc_fn_id
,
402 sd_reg_val
, idx
->sd_idx
,
403 sd_entry
->entry_type
, true);
405 i40iw_pr_err("cqp cmd failed for sd (pbles)\n");
410 sd_entry
->valid
= true;
418 * free_lvl2 - fee level 2 pble
419 * @pble_rsrc: pble resource management
420 * @palloc: level 2 pble allocation
422 static void free_lvl2(struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
423 struct i40iw_pble_alloc
*palloc
)
426 struct gen_pool
*pool
;
427 struct i40iw_pble_level2
*lvl2
= &palloc
->level2
;
428 struct i40iw_pble_info
*root
= &lvl2
->root
;
429 struct i40iw_pble_info
*leaf
= lvl2
->leaf
;
431 pool
= pble_rsrc
->pinfo
.pool
;
433 for (i
= 0; i
< lvl2
->leaf_cnt
; i
++, leaf
++) {
435 gen_pool_free(pool
, leaf
->addr
, (leaf
->cnt
<< 3));
441 gen_pool_free(pool
, root
->addr
, (root
->cnt
<< 3));
448 * get_lvl2_pble - get level 2 pble resource
449 * @pble_rsrc: pble resource management
450 * @palloc: level 2 pble allocation
451 * @pool: pool pointer
453 static enum i40iw_status_code
get_lvl2_pble(struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
454 struct i40iw_pble_alloc
*palloc
,
455 struct gen_pool
*pool
)
457 u32 lf4k
, lflast
, total
, i
;
458 u32 pblcnt
= PBLE_PER_PAGE
;
460 struct i40iw_pble_level2
*lvl2
= &palloc
->level2
;
461 struct i40iw_pble_info
*root
= &lvl2
->root
;
462 struct i40iw_pble_info
*leaf
;
464 /* number of full 512 (4K) leafs) */
465 lf4k
= palloc
->total_cnt
>> 9;
466 lflast
= palloc
->total_cnt
% PBLE_PER_PAGE
;
467 total
= (lflast
== 0) ? lf4k
: lf4k
+ 1;
468 lvl2
->leaf_cnt
= total
;
470 leaf
= kzalloc((sizeof(*leaf
) * total
), GFP_ATOMIC
);
472 return I40IW_ERR_NO_MEMORY
;
474 /* allocate pbles for the root */
475 root
->addr
= gen_pool_alloc(pool
, (total
<< 3));
479 return I40IW_ERR_NO_MEMORY
;
481 root
->idx
= fpm_to_idx(pble_rsrc
,
482 (u64
)gen_pool_virt_to_phys(pool
, root
->addr
));
484 addr
= (u64
*)root
->addr
;
485 for (i
= 0; i
< total
; i
++, leaf
++) {
486 pblcnt
= (lflast
&& ((i
+ 1) == total
)) ? lflast
: PBLE_PER_PAGE
;
487 leaf
->addr
= gen_pool_alloc(pool
, (pblcnt
<< 3));
490 leaf
->idx
= fpm_to_idx(pble_rsrc
, (u64
)gen_pool_virt_to_phys(pool
, leaf
->addr
));
493 *addr
= (u64
)leaf
->idx
;
496 palloc
->level
= I40IW_LEVEL_2
;
497 pble_rsrc
->stats_lvl2
++;
500 free_lvl2(pble_rsrc
, palloc
);
501 return I40IW_ERR_NO_MEMORY
;
505 * get_lvl1_pble - get level 1 pble resource
506 * @dev: hardware control device structure
507 * @pble_rsrc: pble resource management
508 * @palloc: level 1 pble allocation
510 static enum i40iw_status_code
get_lvl1_pble(struct i40iw_sc_dev
*dev
,
511 struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
512 struct i40iw_pble_alloc
*palloc
)
515 struct gen_pool
*pool
;
516 struct i40iw_pble_info
*lvl1
= &palloc
->level1
;
518 pool
= pble_rsrc
->pinfo
.pool
;
519 addr
= (u64
*)gen_pool_alloc(pool
, (palloc
->total_cnt
<< 3));
522 return I40IW_ERR_NO_MEMORY
;
524 palloc
->level
= I40IW_LEVEL_1
;
525 lvl1
->addr
= (unsigned long)addr
;
526 lvl1
->idx
= fpm_to_idx(pble_rsrc
, (u64
)gen_pool_virt_to_phys(pool
,
527 (unsigned long)addr
));
528 lvl1
->cnt
= palloc
->total_cnt
;
529 pble_rsrc
->stats_lvl1
++;
534 * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
535 * @dev: i40iw_sc_dev struct
536 * @pble_rsrc: pble resources
537 * @palloc: contains all inforamtion regarding pble (idx + pble addr)
538 * @pool: pointer to general purpose special memory pool descriptor
540 static inline enum i40iw_status_code
get_lvl1_lvl2_pble(struct i40iw_sc_dev
*dev
,
541 struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
542 struct i40iw_pble_alloc
*palloc
,
543 struct gen_pool
*pool
)
545 enum i40iw_status_code status
= 0;
547 status
= get_lvl1_pble(dev
, pble_rsrc
, palloc
);
548 if (status
&& (palloc
->total_cnt
> PBLE_PER_PAGE
))
549 status
= get_lvl2_pble(pble_rsrc
, palloc
, pool
);
554 * i40iw_get_pble - allocate pbles from the pool
555 * @dev: i40iw_sc_dev struct
556 * @pble_rsrc: pble resources
557 * @palloc: contains all inforamtion regarding pble (idx + pble addr)
558 * @pble_cnt: #of pbles requested
560 enum i40iw_status_code
i40iw_get_pble(struct i40iw_sc_dev
*dev
,
561 struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
562 struct i40iw_pble_alloc
*palloc
,
565 struct gen_pool
*pool
;
566 enum i40iw_status_code status
= 0;
570 pool
= pble_rsrc
->pinfo
.pool
;
571 palloc
->total_cnt
= pble_cnt
;
572 palloc
->level
= I40IW_LEVEL_0
;
573 /*check first to see if we can get pble's without acquiring additional sd's */
574 status
= get_lvl1_lvl2_pble(dev
, pble_rsrc
, palloc
, pool
);
577 max_sds
= (palloc
->total_cnt
>> 18) + 1;
578 for (i
= 0; i
< max_sds
; i
++) {
579 status
= add_pble_pool(dev
, pble_rsrc
);
582 status
= get_lvl1_lvl2_pble(dev
, pble_rsrc
, palloc
, pool
);
588 pble_rsrc
->stats_alloc_ok
++;
590 pble_rsrc
->stats_alloc_fail
++;
596 * i40iw_free_pble - put pbles back into pool
597 * @pble_rsrc: pble resources
598 * @palloc: contains all inforamtion regarding pble resource being freed
600 void i40iw_free_pble(struct i40iw_hmc_pble_rsrc
*pble_rsrc
,
601 struct i40iw_pble_alloc
*palloc
)
603 struct gen_pool
*pool
;
605 pool
= pble_rsrc
->pinfo
.pool
;
606 if (palloc
->level
== I40IW_LEVEL_2
)
607 free_lvl2(pble_rsrc
, palloc
);
609 gen_pool_free(pool
, palloc
->level1
.addr
,
610 (palloc
->level1
.cnt
<< 3));
611 pble_rsrc
->stats_alloc_freed
++;