1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
10 static int add_pble_prm(struct irdma_hmc_pble_rsrc
*pble_rsrc
);
13 * irdma_destroy_pble_prm - destroy prm during module unload
14 * @pble_rsrc: pble resources
16 void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc
*pble_rsrc
)
18 struct irdma_chunk
*chunk
;
19 struct irdma_pble_prm
*pinfo
= &pble_rsrc
->pinfo
;
21 while (!list_empty(&pinfo
->clist
)) {
22 chunk
= (struct irdma_chunk
*) pinfo
->clist
.next
;
23 list_del(&chunk
->list
);
24 if (chunk
->type
== PBLE_SD_PAGED
)
25 irdma_pble_free_paged_mem(chunk
);
26 bitmap_free(chunk
->bitmapbuf
);
27 kfree(chunk
->chunkmem
.va
);
32 * irdma_hmc_init_pble - Initialize pble resources during module load
33 * @dev: irdma_sc_dev struct
34 * @pble_rsrc: pble resources
36 int irdma_hmc_init_pble(struct irdma_sc_dev
*dev
,
37 struct irdma_hmc_pble_rsrc
*pble_rsrc
)
39 struct irdma_hmc_info
*hmc_info
;
43 hmc_info
= dev
->hmc_info
;
45 pble_rsrc
->fpm_base_addr
= hmc_info
->hmc_obj
[IRDMA_HMC_IW_PBLE
].base
;
46 /* Start pble' on 4k boundary */
47 if (pble_rsrc
->fpm_base_addr
& 0xfff)
48 fpm_idx
= (4096 - (pble_rsrc
->fpm_base_addr
& 0xfff)) >> 3;
49 pble_rsrc
->unallocated_pble
=
50 hmc_info
->hmc_obj
[IRDMA_HMC_IW_PBLE
].cnt
- fpm_idx
;
51 pble_rsrc
->next_fpm_addr
= pble_rsrc
->fpm_base_addr
+ (fpm_idx
<< 3);
52 pble_rsrc
->pinfo
.pble_shift
= PBLE_SHIFT
;
54 mutex_init(&pble_rsrc
->pble_mutex_lock
);
56 spin_lock_init(&pble_rsrc
->pinfo
.prm_lock
);
57 INIT_LIST_HEAD(&pble_rsrc
->pinfo
.clist
);
58 if (add_pble_prm(pble_rsrc
)) {
59 irdma_destroy_pble_prm(pble_rsrc
);
67 * get_sd_pd_idx - Returns sd index, pd index and rel_pd_idx from fpm address
68 * @pble_rsrc: structure containing fpm address
69 * @idx: where to return indexes
71 static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc
*pble_rsrc
,
72 struct sd_pd_idx
*idx
)
74 idx
->sd_idx
= (u32
)pble_rsrc
->next_fpm_addr
/ IRDMA_HMC_DIRECT_BP_SIZE
;
75 idx
->pd_idx
= (u32
)(pble_rsrc
->next_fpm_addr
/ IRDMA_HMC_PAGED_BP_SIZE
);
76 idx
->rel_pd_idx
= (idx
->pd_idx
% IRDMA_HMC_PD_CNT_IN_SD
);
80 * add_sd_direct - add sd direct for pble
81 * @pble_rsrc: pble resource ptr
82 * @info: page info for sd
84 static int add_sd_direct(struct irdma_hmc_pble_rsrc
*pble_rsrc
,
85 struct irdma_add_page_info
*info
)
87 struct irdma_sc_dev
*dev
= pble_rsrc
->dev
;
89 struct sd_pd_idx
*idx
= &info
->idx
;
90 struct irdma_chunk
*chunk
= info
->chunk
;
91 struct irdma_hmc_info
*hmc_info
= info
->hmc_info
;
92 struct irdma_hmc_sd_entry
*sd_entry
= info
->sd_entry
;
95 if (!sd_entry
->valid
) {
96 ret_code
= irdma_add_sd_table_entry(dev
->hw
, hmc_info
,
99 IRDMA_HMC_DIRECT_BP_SIZE
);
103 chunk
->type
= PBLE_SD_CONTIGOUS
;
106 offset
= idx
->rel_pd_idx
<< HMC_PAGED_BP_SHIFT
;
107 chunk
->size
= info
->pages
<< HMC_PAGED_BP_SHIFT
;
108 chunk
->vaddr
= sd_entry
->u
.bp
.addr
.va
+ offset
;
109 chunk
->fpm_addr
= pble_rsrc
->next_fpm_addr
;
110 ibdev_dbg(to_ibdev(dev
),
111 "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%pK fpm_addr = %llx\n",
112 chunk
->size
, chunk
->size
, chunk
->vaddr
, chunk
->fpm_addr
);
118 * fpm_to_idx - given fpm address, get pble index
119 * @pble_rsrc: pble resource management
120 * @addr: fpm address for index
122 static u32
fpm_to_idx(struct irdma_hmc_pble_rsrc
*pble_rsrc
, u64 addr
)
126 idx
= (addr
- (pble_rsrc
->fpm_base_addr
)) >> 3;
132 * add_bp_pages - add backing pages for sd
133 * @pble_rsrc: pble resource management
134 * @info: page info for sd
136 static int add_bp_pages(struct irdma_hmc_pble_rsrc
*pble_rsrc
,
137 struct irdma_add_page_info
*info
)
139 struct irdma_sc_dev
*dev
= pble_rsrc
->dev
;
141 struct irdma_dma_mem mem
;
142 struct irdma_hmc_pd_entry
*pd_entry
;
143 struct irdma_hmc_sd_entry
*sd_entry
= info
->sd_entry
;
144 struct irdma_hmc_info
*hmc_info
= info
->hmc_info
;
145 struct irdma_chunk
*chunk
= info
->chunk
;
147 u32 rel_pd_idx
= info
->idx
.rel_pd_idx
;
148 u32 pd_idx
= info
->idx
.pd_idx
;
151 if (irdma_pble_get_paged_mem(chunk
, info
->pages
))
154 status
= irdma_add_sd_table_entry(dev
->hw
, hmc_info
, info
->idx
.sd_idx
,
156 IRDMA_HMC_DIRECT_BP_SIZE
);
161 for (i
= 0; i
< info
->pages
; i
++) {
162 mem
.pa
= (u64
)chunk
->dmainfo
.dmaaddrs
[i
];
165 pd_entry
= &sd_entry
->u
.pd_table
.pd_entry
[rel_pd_idx
++];
166 if (!pd_entry
->valid
) {
167 status
= irdma_add_pd_table_entry(dev
, hmc_info
,
176 chunk
->fpm_addr
= pble_rsrc
->next_fpm_addr
;
180 irdma_pble_free_paged_mem(chunk
);
186 * irdma_get_type - add a sd entry type for sd
187 * @dev: irdma_sc_dev struct
189 * @pages: pages in the sd
191 static enum irdma_sd_entry_type
irdma_get_type(struct irdma_sc_dev
*dev
,
192 struct sd_pd_idx
*idx
, u32 pages
)
194 enum irdma_sd_entry_type sd_entry_type
;
196 sd_entry_type
= !idx
->rel_pd_idx
&& pages
== IRDMA_HMC_PD_CNT_IN_SD
?
197 IRDMA_SD_TYPE_DIRECT
: IRDMA_SD_TYPE_PAGED
;
198 return sd_entry_type
;
202 * add_pble_prm - add a sd entry for pble resoure
203 * @pble_rsrc: pble resource management
205 static int add_pble_prm(struct irdma_hmc_pble_rsrc
*pble_rsrc
)
207 struct irdma_sc_dev
*dev
= pble_rsrc
->dev
;
208 struct irdma_hmc_sd_entry
*sd_entry
;
209 struct irdma_hmc_info
*hmc_info
;
210 struct irdma_chunk
*chunk
;
211 struct irdma_add_page_info info
;
212 struct sd_pd_idx
*idx
= &info
.idx
;
214 enum irdma_sd_entry_type sd_entry_type
;
216 struct irdma_virt_mem chunkmem
;
219 if (pble_rsrc
->unallocated_pble
< PBLE_PER_PAGE
)
222 if (pble_rsrc
->next_fpm_addr
& 0xfff)
225 chunkmem
.size
= sizeof(*chunk
);
226 chunkmem
.va
= kzalloc(chunkmem
.size
, GFP_KERNEL
);
231 chunk
->chunkmem
= chunkmem
;
232 hmc_info
= dev
->hmc_info
;
234 chunk
->fpm_addr
= pble_rsrc
->next_fpm_addr
;
235 get_sd_pd_idx(pble_rsrc
, idx
);
236 sd_entry
= &hmc_info
->sd_table
.sd_entry
[idx
->sd_idx
];
237 pages
= (idx
->rel_pd_idx
) ? (IRDMA_HMC_PD_CNT_IN_SD
- idx
->rel_pd_idx
) :
238 IRDMA_HMC_PD_CNT_IN_SD
;
239 pages
= min(pages
, pble_rsrc
->unallocated_pble
>> PBLE_512_SHIFT
);
241 info
.hmc_info
= hmc_info
;
243 info
.sd_entry
= sd_entry
;
244 if (!sd_entry
->valid
)
245 sd_entry_type
= irdma_get_type(dev
, idx
, pages
);
247 sd_entry_type
= sd_entry
->entry_type
;
249 ibdev_dbg(to_ibdev(dev
),
250 "PBLE: pages = %d, unallocated_pble[%d] current_fpm_addr = %llx\n",
251 pages
, pble_rsrc
->unallocated_pble
,
252 pble_rsrc
->next_fpm_addr
);
253 ibdev_dbg(to_ibdev(dev
), "PBLE: sd_entry_type = %d\n", sd_entry_type
);
254 if (sd_entry_type
== IRDMA_SD_TYPE_DIRECT
)
255 ret_code
= add_sd_direct(pble_rsrc
, &info
);
258 sd_entry_type
= IRDMA_SD_TYPE_PAGED
;
260 pble_rsrc
->stats_direct_sds
++;
262 if (sd_entry_type
== IRDMA_SD_TYPE_PAGED
) {
263 ret_code
= add_bp_pages(pble_rsrc
, &info
);
267 pble_rsrc
->stats_paged_sds
++;
270 ret_code
= irdma_prm_add_pble_mem(&pble_rsrc
->pinfo
, chunk
);
274 pble_rsrc
->next_fpm_addr
+= chunk
->size
;
275 ibdev_dbg(to_ibdev(dev
),
276 "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
277 pble_rsrc
->next_fpm_addr
, chunk
->size
, chunk
->size
);
278 pble_rsrc
->unallocated_pble
-= (u32
)(chunk
->size
>> 3);
279 sd_reg_val
= (sd_entry_type
== IRDMA_SD_TYPE_PAGED
) ?
280 sd_entry
->u
.pd_table
.pd_page_addr
.pa
:
281 sd_entry
->u
.bp
.addr
.pa
;
283 if (!sd_entry
->valid
) {
284 ret_code
= irdma_hmc_sd_one(dev
, hmc_info
->hmc_fn_id
, sd_reg_val
,
285 idx
->sd_idx
, sd_entry
->entry_type
, true);
290 list_add(&chunk
->list
, &pble_rsrc
->pinfo
.clist
);
291 sd_entry
->valid
= true;
295 bitmap_free(chunk
->bitmapbuf
);
296 kfree(chunk
->chunkmem
.va
);
302 * free_lvl2 - fee level 2 pble
303 * @pble_rsrc: pble resource management
304 * @palloc: level 2 pble allocation
306 static void free_lvl2(struct irdma_hmc_pble_rsrc
*pble_rsrc
,
307 struct irdma_pble_alloc
*palloc
)
310 struct irdma_pble_level2
*lvl2
= &palloc
->level2
;
311 struct irdma_pble_info
*root
= &lvl2
->root
;
312 struct irdma_pble_info
*leaf
= lvl2
->leaf
;
314 for (i
= 0; i
< lvl2
->leaf_cnt
; i
++, leaf
++) {
316 irdma_prm_return_pbles(&pble_rsrc
->pinfo
,
323 irdma_prm_return_pbles(&pble_rsrc
->pinfo
, &root
->chunkinfo
);
325 kfree(lvl2
->leafmem
.va
);
330 * get_lvl2_pble - get level 2 pble resource
331 * @pble_rsrc: pble resource management
332 * @palloc: level 2 pble allocation
334 static int get_lvl2_pble(struct irdma_hmc_pble_rsrc
*pble_rsrc
,
335 struct irdma_pble_alloc
*palloc
)
337 u32 lf4k
, lflast
, total
, i
;
338 u32 pblcnt
= PBLE_PER_PAGE
;
340 struct irdma_pble_level2
*lvl2
= &palloc
->level2
;
341 struct irdma_pble_info
*root
= &lvl2
->root
;
342 struct irdma_pble_info
*leaf
;
346 /* number of full 512 (4K) leafs) */
347 lf4k
= palloc
->total_cnt
>> 9;
348 lflast
= palloc
->total_cnt
% PBLE_PER_PAGE
;
349 total
= (lflast
== 0) ? lf4k
: lf4k
+ 1;
350 lvl2
->leaf_cnt
= total
;
352 lvl2
->leafmem
.size
= (sizeof(*leaf
) * total
);
353 lvl2
->leafmem
.va
= kzalloc(lvl2
->leafmem
.size
, GFP_KERNEL
);
354 if (!lvl2
->leafmem
.va
)
357 lvl2
->leaf
= lvl2
->leafmem
.va
;
359 ret_code
= irdma_prm_get_pbles(&pble_rsrc
->pinfo
, &root
->chunkinfo
,
360 total
<< 3, &root
->addr
, &fpm_addr
);
362 kfree(lvl2
->leafmem
.va
);
367 root
->idx
= fpm_to_idx(pble_rsrc
, fpm_addr
);
370 for (i
= 0; i
< total
; i
++, leaf
++) {
371 pblcnt
= (lflast
&& ((i
+ 1) == total
)) ?
372 lflast
: PBLE_PER_PAGE
;
373 ret_code
= irdma_prm_get_pbles(&pble_rsrc
->pinfo
,
374 &leaf
->chunkinfo
, pblcnt
<< 3,
375 &leaf
->addr
, &fpm_addr
);
379 leaf
->idx
= fpm_to_idx(pble_rsrc
, fpm_addr
);
382 *addr
= (u64
)leaf
->idx
;
386 palloc
->level
= PBLE_LEVEL_2
;
387 pble_rsrc
->stats_lvl2
++;
391 free_lvl2(pble_rsrc
, palloc
);
397 * get_lvl1_pble - get level 1 pble resource
398 * @pble_rsrc: pble resource management
399 * @palloc: level 1 pble allocation
401 static int get_lvl1_pble(struct irdma_hmc_pble_rsrc
*pble_rsrc
,
402 struct irdma_pble_alloc
*palloc
)
406 struct irdma_pble_info
*lvl1
= &palloc
->level1
;
408 ret_code
= irdma_prm_get_pbles(&pble_rsrc
->pinfo
, &lvl1
->chunkinfo
,
409 palloc
->total_cnt
<< 3, &lvl1
->addr
,
414 palloc
->level
= PBLE_LEVEL_1
;
415 lvl1
->idx
= fpm_to_idx(pble_rsrc
, fpm_addr
);
416 lvl1
->cnt
= palloc
->total_cnt
;
417 pble_rsrc
->stats_lvl1
++;
423 * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
424 * @pble_rsrc: pble resources
425 * @palloc: contains all inforamtion regarding pble (idx + pble addr)
426 * @lvl: Bitmask for requested pble level
428 static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc
*pble_rsrc
,
429 struct irdma_pble_alloc
*palloc
, u8 lvl
)
433 status
= get_lvl1_pble(pble_rsrc
, palloc
);
434 if (!status
|| lvl
== PBLE_LEVEL_1
|| palloc
->total_cnt
<= PBLE_PER_PAGE
)
437 status
= get_lvl2_pble(pble_rsrc
, palloc
);
443 * irdma_get_pble - allocate pbles from the prm
444 * @pble_rsrc: pble resources
445 * @palloc: contains all inforamtion regarding pble (idx + pble addr)
446 * @pble_cnt: #of pbles requested
447 * @lvl: requested pble level mask
449 int irdma_get_pble(struct irdma_hmc_pble_rsrc
*pble_rsrc
,
450 struct irdma_pble_alloc
*palloc
, u32 pble_cnt
,
457 palloc
->total_cnt
= pble_cnt
;
458 palloc
->level
= PBLE_LEVEL_0
;
460 mutex_lock(&pble_rsrc
->pble_mutex_lock
);
462 /*check first to see if we can get pble's without acquiring
465 status
= get_lvl1_lvl2_pble(pble_rsrc
, palloc
, lvl
);
469 max_sds
= (palloc
->total_cnt
>> 18) + 1;
470 for (i
= 0; i
< max_sds
; i
++) {
471 status
= add_pble_prm(pble_rsrc
);
475 status
= get_lvl1_lvl2_pble(pble_rsrc
, palloc
, lvl
);
476 /* if level1_only, only go through it once */
483 pble_rsrc
->allocdpbles
+= pble_cnt
;
484 pble_rsrc
->stats_alloc_ok
++;
486 pble_rsrc
->stats_alloc_fail
++;
488 mutex_unlock(&pble_rsrc
->pble_mutex_lock
);
494 * irdma_free_pble - put pbles back into prm
495 * @pble_rsrc: pble resources
496 * @palloc: contains all information regarding pble resource being freed
498 void irdma_free_pble(struct irdma_hmc_pble_rsrc
*pble_rsrc
,
499 struct irdma_pble_alloc
*palloc
)
501 pble_rsrc
->freedpbles
+= palloc
->total_cnt
;
503 if (palloc
->level
== PBLE_LEVEL_2
)
504 free_lvl2(pble_rsrc
, palloc
);
506 irdma_prm_return_pbles(&pble_rsrc
->pinfo
,
507 &palloc
->level1
.chunkinfo
);
508 pble_rsrc
->stats_alloc_freed
++;