2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/platform_device.h>
35 #include "hns_roce_device.h"
36 #include "hns_roce_hem.h"
37 #include "hns_roce_common.h"
39 #define DMA_ADDR_T_SHIFT 12
40 #define BT_BA_SHIFT 32
42 #define HEM_INDEX_BUF BIT(0)
43 #define HEM_INDEX_L0 BIT(1)
44 #define HEM_INDEX_L1 BIT(2)
45 struct hns_roce_hem_index
{
49 u32 inited
; /* indicate which index is available */
52 bool hns_roce_check_whether_mhop(struct hns_roce_dev
*hr_dev
, u32 type
)
58 hop_num
= hr_dev
->caps
.qpc_hop_num
;
61 hop_num
= hr_dev
->caps
.mpt_hop_num
;
64 hop_num
= hr_dev
->caps
.cqc_hop_num
;
67 hop_num
= hr_dev
->caps
.srqc_hop_num
;
70 hop_num
= hr_dev
->caps
.sccc_hop_num
;
72 case HEM_TYPE_QPC_TIMER
:
73 hop_num
= hr_dev
->caps
.qpc_timer_hop_num
;
75 case HEM_TYPE_CQC_TIMER
:
76 hop_num
= hr_dev
->caps
.cqc_timer_hop_num
;
79 hop_num
= hr_dev
->caps
.gmv_hop_num
;
85 return hop_num
? true : false;
88 static bool hns_roce_check_hem_null(struct hns_roce_hem
**hem
, u64 hem_idx
,
89 u32 bt_chunk_num
, u64 hem_max_num
)
91 u64 start_idx
= round_down(hem_idx
, bt_chunk_num
);
92 u64 check_max_num
= start_idx
+ bt_chunk_num
;
95 for (i
= start_idx
; (i
< check_max_num
) && (i
< hem_max_num
); i
++)
96 if (i
!= hem_idx
&& hem
[i
])
102 static bool hns_roce_check_bt_null(u64
**bt
, u64 ba_idx
, u32 bt_chunk_num
)
104 u64 start_idx
= round_down(ba_idx
, bt_chunk_num
);
107 for (i
= 0; i
< bt_chunk_num
; i
++)
108 if (i
!= ba_idx
&& bt
[start_idx
+ i
])
114 static int hns_roce_get_bt_num(u32 table_type
, u32 hop_num
)
116 if (check_whether_bt_num_3(table_type
, hop_num
))
118 else if (check_whether_bt_num_2(table_type
, hop_num
))
120 else if (check_whether_bt_num_1(table_type
, hop_num
))
126 static int get_hem_table_config(struct hns_roce_dev
*hr_dev
,
127 struct hns_roce_hem_mhop
*mhop
,
130 struct device
*dev
= hr_dev
->dev
;
134 mhop
->buf_chunk_size
= 1 << (hr_dev
->caps
.qpc_buf_pg_sz
136 mhop
->bt_chunk_size
= 1 << (hr_dev
->caps
.qpc_ba_pg_sz
138 mhop
->ba_l0_num
= hr_dev
->caps
.qpc_bt_num
;
139 mhop
->hop_num
= hr_dev
->caps
.qpc_hop_num
;
142 mhop
->buf_chunk_size
= 1 << (hr_dev
->caps
.mpt_buf_pg_sz
144 mhop
->bt_chunk_size
= 1 << (hr_dev
->caps
.mpt_ba_pg_sz
146 mhop
->ba_l0_num
= hr_dev
->caps
.mpt_bt_num
;
147 mhop
->hop_num
= hr_dev
->caps
.mpt_hop_num
;
150 mhop
->buf_chunk_size
= 1 << (hr_dev
->caps
.cqc_buf_pg_sz
152 mhop
->bt_chunk_size
= 1 << (hr_dev
->caps
.cqc_ba_pg_sz
154 mhop
->ba_l0_num
= hr_dev
->caps
.cqc_bt_num
;
155 mhop
->hop_num
= hr_dev
->caps
.cqc_hop_num
;
158 mhop
->buf_chunk_size
= 1 << (hr_dev
->caps
.sccc_buf_pg_sz
160 mhop
->bt_chunk_size
= 1 << (hr_dev
->caps
.sccc_ba_pg_sz
162 mhop
->ba_l0_num
= hr_dev
->caps
.sccc_bt_num
;
163 mhop
->hop_num
= hr_dev
->caps
.sccc_hop_num
;
165 case HEM_TYPE_QPC_TIMER
:
166 mhop
->buf_chunk_size
= 1 << (hr_dev
->caps
.qpc_timer_buf_pg_sz
168 mhop
->bt_chunk_size
= 1 << (hr_dev
->caps
.qpc_timer_ba_pg_sz
170 mhop
->ba_l0_num
= hr_dev
->caps
.qpc_timer_bt_num
;
171 mhop
->hop_num
= hr_dev
->caps
.qpc_timer_hop_num
;
173 case HEM_TYPE_CQC_TIMER
:
174 mhop
->buf_chunk_size
= 1 << (hr_dev
->caps
.cqc_timer_buf_pg_sz
176 mhop
->bt_chunk_size
= 1 << (hr_dev
->caps
.cqc_timer_ba_pg_sz
178 mhop
->ba_l0_num
= hr_dev
->caps
.cqc_timer_bt_num
;
179 mhop
->hop_num
= hr_dev
->caps
.cqc_timer_hop_num
;
182 mhop
->buf_chunk_size
= 1 << (hr_dev
->caps
.srqc_buf_pg_sz
184 mhop
->bt_chunk_size
= 1 << (hr_dev
->caps
.srqc_ba_pg_sz
186 mhop
->ba_l0_num
= hr_dev
->caps
.srqc_bt_num
;
187 mhop
->hop_num
= hr_dev
->caps
.srqc_hop_num
;
190 mhop
->buf_chunk_size
= 1 << (hr_dev
->caps
.gmv_buf_pg_sz
+
192 mhop
->bt_chunk_size
= 1 << (hr_dev
->caps
.gmv_ba_pg_sz
+
194 mhop
->ba_l0_num
= hr_dev
->caps
.gmv_bt_num
;
195 mhop
->hop_num
= hr_dev
->caps
.gmv_hop_num
;
198 dev_err(dev
, "table %u not support multi-hop addressing!\n",
206 int hns_roce_calc_hem_mhop(struct hns_roce_dev
*hr_dev
,
207 struct hns_roce_hem_table
*table
, unsigned long *obj
,
208 struct hns_roce_hem_mhop
*mhop
)
210 struct device
*dev
= hr_dev
->dev
;
216 if (get_hem_table_config(hr_dev
, mhop
, table
->type
))
223 * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages.
224 * MTT/CQE alloc hem for bt pages.
226 bt_num
= hns_roce_get_bt_num(table
->type
, mhop
->hop_num
);
227 chunk_ba_num
= mhop
->bt_chunk_size
/ BA_BYTE_LEN
;
228 chunk_size
= table
->type
< HEM_TYPE_MTT
? mhop
->buf_chunk_size
:
230 table_idx
= (*obj
& (table
->num_obj
- 1)) /
231 (chunk_size
/ table
->obj_size
);
234 mhop
->l2_idx
= table_idx
& (chunk_ba_num
- 1);
235 mhop
->l1_idx
= table_idx
/ chunk_ba_num
& (chunk_ba_num
- 1);
236 mhop
->l0_idx
= (table_idx
/ chunk_ba_num
) / chunk_ba_num
;
239 mhop
->l1_idx
= table_idx
& (chunk_ba_num
- 1);
240 mhop
->l0_idx
= table_idx
/ chunk_ba_num
;
243 mhop
->l0_idx
= table_idx
;
246 dev_err(dev
, "table %u not support hop_num = %u!\n",
247 table
->type
, mhop
->hop_num
);
250 if (mhop
->l0_idx
>= mhop
->ba_l0_num
)
251 mhop
->l0_idx
%= mhop
->ba_l0_num
;
256 static struct hns_roce_hem
*hns_roce_alloc_hem(struct hns_roce_dev
*hr_dev
,
258 unsigned long hem_alloc_size
,
261 struct hns_roce_hem_chunk
*chunk
= NULL
;
262 struct hns_roce_hem
*hem
;
263 struct scatterlist
*mem
;
267 WARN_ON(gfp_mask
& __GFP_HIGHMEM
);
269 hem
= kmalloc(sizeof(*hem
),
270 gfp_mask
& ~(__GFP_HIGHMEM
| __GFP_NOWARN
));
275 INIT_LIST_HEAD(&hem
->chunk_list
);
277 order
= get_order(hem_alloc_size
);
281 chunk
= kmalloc(sizeof(*chunk
),
282 gfp_mask
& ~(__GFP_HIGHMEM
| __GFP_NOWARN
));
286 sg_init_table(chunk
->mem
, HNS_ROCE_HEM_CHUNK_LEN
);
289 memset(chunk
->buf
, 0, sizeof(chunk
->buf
));
290 list_add_tail(&chunk
->list
, &hem
->chunk_list
);
293 while (1 << order
> npages
)
297 * Alloc memory one time. If failed, don't alloc small block
298 * memory, directly return fail.
300 mem
= &chunk
->mem
[chunk
->npages
];
301 buf
= dma_alloc_coherent(hr_dev
->dev
, PAGE_SIZE
<< order
,
302 &sg_dma_address(mem
), gfp_mask
);
306 chunk
->buf
[chunk
->npages
] = buf
;
307 sg_dma_len(mem
) = PAGE_SIZE
<< order
;
311 npages
-= 1 << order
;
317 hns_roce_free_hem(hr_dev
, hem
);
321 void hns_roce_free_hem(struct hns_roce_dev
*hr_dev
, struct hns_roce_hem
*hem
)
323 struct hns_roce_hem_chunk
*chunk
, *tmp
;
329 list_for_each_entry_safe(chunk
, tmp
, &hem
->chunk_list
, list
) {
330 for (i
= 0; i
< chunk
->npages
; ++i
)
331 dma_free_coherent(hr_dev
->dev
,
332 sg_dma_len(&chunk
->mem
[i
]),
334 sg_dma_address(&chunk
->mem
[i
]));
341 static int hns_roce_set_hem(struct hns_roce_dev
*hr_dev
,
342 struct hns_roce_hem_table
*table
, unsigned long obj
)
344 spinlock_t
*lock
= &hr_dev
->bt_cmd_lock
;
345 struct device
*dev
= hr_dev
->dev
;
346 struct hns_roce_hem_iter iter
;
347 void __iomem
*bt_cmd
;
348 __le32 bt_cmd_val
[2];
356 /* Find the HEM(Hardware Entry Memory) entry */
357 unsigned long i
= (obj
& (table
->num_obj
- 1)) /
358 (table
->table_chunk_size
/ table
->obj_size
);
360 switch (table
->type
) {
365 roce_set_field(bt_cmd_h
, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M
,
366 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S
, table
->type
);
372 roce_set_field(bt_cmd_h
, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M
,
373 ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S
, obj
);
374 roce_set_bit(bt_cmd_h
, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S
, 0);
375 roce_set_bit(bt_cmd_h
, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S
, 1);
377 /* Currently iter only a chunk */
378 for (hns_roce_hem_first(table
->hem
[i
], &iter
);
379 !hns_roce_hem_last(&iter
); hns_roce_hem_next(&iter
)) {
380 bt_ba
= hns_roce_hem_addr(&iter
) >> DMA_ADDR_T_SHIFT
;
382 spin_lock_irqsave(lock
, flags
);
384 bt_cmd
= hr_dev
->reg_base
+ ROCEE_BT_CMD_H_REG
;
386 end
= HW_SYNC_TIMEOUT_MSECS
;
388 if (!(readl(bt_cmd
) >> BT_CMD_SYNC_SHIFT
))
391 mdelay(HW_SYNC_SLEEP_TIME_INTERVAL
);
392 end
-= HW_SYNC_SLEEP_TIME_INTERVAL
;
396 dev_err(dev
, "Write bt_cmd err,hw_sync is not zero.\n");
397 spin_unlock_irqrestore(lock
, flags
);
401 bt_cmd_l
= cpu_to_le32(bt_ba
);
402 roce_set_field(bt_cmd_h
, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M
,
403 ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S
,
404 bt_ba
>> BT_BA_SHIFT
);
406 bt_cmd_val
[0] = bt_cmd_l
;
407 bt_cmd_val
[1] = bt_cmd_h
;
408 hns_roce_write64_k(bt_cmd_val
,
409 hr_dev
->reg_base
+ ROCEE_BT_CMD_L_REG
);
410 spin_unlock_irqrestore(lock
, flags
);
416 static int calc_hem_config(struct hns_roce_dev
*hr_dev
,
417 struct hns_roce_hem_table
*table
, unsigned long obj
,
418 struct hns_roce_hem_mhop
*mhop
,
419 struct hns_roce_hem_index
*index
)
421 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
422 unsigned long mhop_obj
= obj
;
423 u32 l0_idx
, l1_idx
, l2_idx
;
428 ret
= hns_roce_calc_hem_mhop(hr_dev
, table
, &mhop_obj
, mhop
);
432 l0_idx
= mhop
->l0_idx
;
433 l1_idx
= mhop
->l1_idx
;
434 l2_idx
= mhop
->l2_idx
;
435 chunk_ba_num
= mhop
->bt_chunk_size
/ BA_BYTE_LEN
;
436 bt_num
= hns_roce_get_bt_num(table
->type
, mhop
->hop_num
);
439 index
->l1
= l0_idx
* chunk_ba_num
+ l1_idx
;
441 index
->buf
= l0_idx
* chunk_ba_num
* chunk_ba_num
+
442 l1_idx
* chunk_ba_num
+ l2_idx
;
446 index
->buf
= l0_idx
* chunk_ba_num
+ l1_idx
;
452 ibdev_err(ibdev
, "table %u not support mhop.hop_num = %u!\n",
453 table
->type
, mhop
->hop_num
);
457 if (unlikely(index
->buf
>= table
->num_hem
)) {
458 ibdev_err(ibdev
, "table %u exceed hem limt idx %llu, max %lu!\n",
459 table
->type
, index
->buf
, table
->num_hem
);
466 static void free_mhop_hem(struct hns_roce_dev
*hr_dev
,
467 struct hns_roce_hem_table
*table
,
468 struct hns_roce_hem_mhop
*mhop
,
469 struct hns_roce_hem_index
*index
)
471 u32 bt_size
= mhop
->bt_chunk_size
;
472 struct device
*dev
= hr_dev
->dev
;
474 if (index
->inited
& HEM_INDEX_BUF
) {
475 hns_roce_free_hem(hr_dev
, table
->hem
[index
->buf
]);
476 table
->hem
[index
->buf
] = NULL
;
479 if (index
->inited
& HEM_INDEX_L1
) {
480 dma_free_coherent(dev
, bt_size
, table
->bt_l1
[index
->l1
],
481 table
->bt_l1_dma_addr
[index
->l1
]);
482 table
->bt_l1
[index
->l1
] = NULL
;
485 if (index
->inited
& HEM_INDEX_L0
) {
486 dma_free_coherent(dev
, bt_size
, table
->bt_l0
[index
->l0
],
487 table
->bt_l0_dma_addr
[index
->l0
]);
488 table
->bt_l0
[index
->l0
] = NULL
;
492 static int alloc_mhop_hem(struct hns_roce_dev
*hr_dev
,
493 struct hns_roce_hem_table
*table
,
494 struct hns_roce_hem_mhop
*mhop
,
495 struct hns_roce_hem_index
*index
)
497 u32 bt_size
= mhop
->bt_chunk_size
;
498 struct device
*dev
= hr_dev
->dev
;
499 struct hns_roce_hem_iter iter
;
505 /* alloc L1 BA's chunk */
506 if ((check_whether_bt_num_3(table
->type
, mhop
->hop_num
) ||
507 check_whether_bt_num_2(table
->type
, mhop
->hop_num
)) &&
508 !table
->bt_l0
[index
->l0
]) {
509 table
->bt_l0
[index
->l0
] = dma_alloc_coherent(dev
, bt_size
,
510 &table
->bt_l0_dma_addr
[index
->l0
],
512 if (!table
->bt_l0
[index
->l0
]) {
516 index
->inited
|= HEM_INDEX_L0
;
519 /* alloc L2 BA's chunk */
520 if (check_whether_bt_num_3(table
->type
, mhop
->hop_num
) &&
521 !table
->bt_l1
[index
->l1
]) {
522 table
->bt_l1
[index
->l1
] = dma_alloc_coherent(dev
, bt_size
,
523 &table
->bt_l1_dma_addr
[index
->l1
],
525 if (!table
->bt_l1
[index
->l1
]) {
529 index
->inited
|= HEM_INDEX_L1
;
530 *(table
->bt_l0
[index
->l0
] + mhop
->l1_idx
) =
531 table
->bt_l1_dma_addr
[index
->l1
];
535 * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
536 * alloc bt space chunk for MTT/CQE.
538 size
= table
->type
< HEM_TYPE_MTT
? mhop
->buf_chunk_size
: bt_size
;
539 flag
= (table
->lowmem
? GFP_KERNEL
: GFP_HIGHUSER
) | __GFP_NOWARN
;
540 table
->hem
[index
->buf
] = hns_roce_alloc_hem(hr_dev
, size
>> PAGE_SHIFT
,
542 if (!table
->hem
[index
->buf
]) {
547 index
->inited
|= HEM_INDEX_BUF
;
548 hns_roce_hem_first(table
->hem
[index
->buf
], &iter
);
549 bt_ba
= hns_roce_hem_addr(&iter
);
550 if (table
->type
< HEM_TYPE_MTT
) {
551 if (mhop
->hop_num
== 2)
552 *(table
->bt_l1
[index
->l1
] + mhop
->l2_idx
) = bt_ba
;
553 else if (mhop
->hop_num
== 1)
554 *(table
->bt_l0
[index
->l0
] + mhop
->l1_idx
) = bt_ba
;
555 } else if (mhop
->hop_num
== 2) {
556 *(table
->bt_l0
[index
->l0
] + mhop
->l1_idx
) = bt_ba
;
561 free_mhop_hem(hr_dev
, table
, mhop
, index
);
566 static int set_mhop_hem(struct hns_roce_dev
*hr_dev
,
567 struct hns_roce_hem_table
*table
, unsigned long obj
,
568 struct hns_roce_hem_mhop
*mhop
,
569 struct hns_roce_hem_index
*index
)
571 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
575 if (index
->inited
& HEM_INDEX_L0
) {
576 ret
= hr_dev
->hw
->set_hem(hr_dev
, table
, obj
, 0);
578 ibdev_err(ibdev
, "set HEM step 0 failed!\n");
583 if (index
->inited
& HEM_INDEX_L1
) {
584 ret
= hr_dev
->hw
->set_hem(hr_dev
, table
, obj
, 1);
586 ibdev_err(ibdev
, "set HEM step 1 failed!\n");
591 if (index
->inited
& HEM_INDEX_BUF
) {
592 if (mhop
->hop_num
== HNS_ROCE_HOP_NUM_0
)
595 step_idx
= mhop
->hop_num
;
596 ret
= hr_dev
->hw
->set_hem(hr_dev
, table
, obj
, step_idx
);
598 ibdev_err(ibdev
, "set HEM step last failed!\n");
604 static int hns_roce_table_mhop_get(struct hns_roce_dev
*hr_dev
,
605 struct hns_roce_hem_table
*table
,
608 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
609 struct hns_roce_hem_index index
= {};
610 struct hns_roce_hem_mhop mhop
= {};
613 ret
= calc_hem_config(hr_dev
, table
, obj
, &mhop
, &index
);
615 ibdev_err(ibdev
, "calc hem config failed!\n");
619 mutex_lock(&table
->mutex
);
620 if (table
->hem
[index
.buf
]) {
621 ++table
->hem
[index
.buf
]->refcount
;
625 ret
= alloc_mhop_hem(hr_dev
, table
, &mhop
, &index
);
627 ibdev_err(ibdev
, "alloc mhop hem failed!\n");
631 /* set HEM base address to hardware */
632 if (table
->type
< HEM_TYPE_MTT
) {
633 ret
= set_mhop_hem(hr_dev
, table
, obj
, &mhop
, &index
);
635 ibdev_err(ibdev
, "set HEM address to HW failed!\n");
640 ++table
->hem
[index
.buf
]->refcount
;
644 free_mhop_hem(hr_dev
, table
, &mhop
, &index
);
646 mutex_unlock(&table
->mutex
);
650 int hns_roce_table_get(struct hns_roce_dev
*hr_dev
,
651 struct hns_roce_hem_table
*table
, unsigned long obj
)
653 struct device
*dev
= hr_dev
->dev
;
657 if (hns_roce_check_whether_mhop(hr_dev
, table
->type
))
658 return hns_roce_table_mhop_get(hr_dev
, table
, obj
);
660 i
= (obj
& (table
->num_obj
- 1)) / (table
->table_chunk_size
/
663 mutex_lock(&table
->mutex
);
666 ++table
->hem
[i
]->refcount
;
670 table
->hem
[i
] = hns_roce_alloc_hem(hr_dev
,
671 table
->table_chunk_size
>> PAGE_SHIFT
,
672 table
->table_chunk_size
,
673 (table
->lowmem
? GFP_KERNEL
:
674 GFP_HIGHUSER
) | __GFP_NOWARN
);
675 if (!table
->hem
[i
]) {
680 /* Set HEM base address(128K/page, pa) to Hardware */
681 if (hns_roce_set_hem(hr_dev
, table
, obj
)) {
682 hns_roce_free_hem(hr_dev
, table
->hem
[i
]);
683 table
->hem
[i
] = NULL
;
685 dev_err(dev
, "set HEM base address to HW failed.\n");
689 ++table
->hem
[i
]->refcount
;
691 mutex_unlock(&table
->mutex
);
695 static void clear_mhop_hem(struct hns_roce_dev
*hr_dev
,
696 struct hns_roce_hem_table
*table
, unsigned long obj
,
697 struct hns_roce_hem_mhop
*mhop
,
698 struct hns_roce_hem_index
*index
)
700 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
701 u32 hop_num
= mhop
->hop_num
;
705 index
->inited
= HEM_INDEX_BUF
;
706 chunk_ba_num
= mhop
->bt_chunk_size
/ BA_BYTE_LEN
;
707 if (check_whether_bt_num_2(table
->type
, hop_num
)) {
708 if (hns_roce_check_hem_null(table
->hem
, index
->buf
,
709 chunk_ba_num
, table
->num_hem
))
710 index
->inited
|= HEM_INDEX_L0
;
711 } else if (check_whether_bt_num_3(table
->type
, hop_num
)) {
712 if (hns_roce_check_hem_null(table
->hem
, index
->buf
,
713 chunk_ba_num
, table
->num_hem
)) {
714 index
->inited
|= HEM_INDEX_L1
;
715 if (hns_roce_check_bt_null(table
->bt_l1
, index
->l1
,
717 index
->inited
|= HEM_INDEX_L0
;
721 if (table
->type
< HEM_TYPE_MTT
) {
722 if (hop_num
== HNS_ROCE_HOP_NUM_0
)
727 if (hr_dev
->hw
->clear_hem(hr_dev
, table
, obj
, step_idx
))
728 ibdev_warn(ibdev
, "failed to clear hop%u HEM.\n", hop_num
);
730 if (index
->inited
& HEM_INDEX_L1
)
731 if (hr_dev
->hw
->clear_hem(hr_dev
, table
, obj
, 1))
732 ibdev_warn(ibdev
, "failed to clear HEM step 1.\n");
734 if (index
->inited
& HEM_INDEX_L0
)
735 if (hr_dev
->hw
->clear_hem(hr_dev
, table
, obj
, 0))
736 ibdev_warn(ibdev
, "failed to clear HEM step 0.\n");
740 static void hns_roce_table_mhop_put(struct hns_roce_dev
*hr_dev
,
741 struct hns_roce_hem_table
*table
,
745 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
746 struct hns_roce_hem_index index
= {};
747 struct hns_roce_hem_mhop mhop
= {};
750 ret
= calc_hem_config(hr_dev
, table
, obj
, &mhop
, &index
);
752 ibdev_err(ibdev
, "calc hem config failed!\n");
756 mutex_lock(&table
->mutex
);
757 if (check_refcount
&& (--table
->hem
[index
.buf
]->refcount
> 0)) {
758 mutex_unlock(&table
->mutex
);
762 clear_mhop_hem(hr_dev
, table
, obj
, &mhop
, &index
);
763 free_mhop_hem(hr_dev
, table
, &mhop
, &index
);
765 mutex_unlock(&table
->mutex
);
768 void hns_roce_table_put(struct hns_roce_dev
*hr_dev
,
769 struct hns_roce_hem_table
*table
, unsigned long obj
)
771 struct device
*dev
= hr_dev
->dev
;
774 if (hns_roce_check_whether_mhop(hr_dev
, table
->type
)) {
775 hns_roce_table_mhop_put(hr_dev
, table
, obj
, 1);
779 i
= (obj
& (table
->num_obj
- 1)) /
780 (table
->table_chunk_size
/ table
->obj_size
);
782 mutex_lock(&table
->mutex
);
784 if (--table
->hem
[i
]->refcount
== 0) {
785 /* Clear HEM base address */
786 if (hr_dev
->hw
->clear_hem(hr_dev
, table
, obj
, 0))
787 dev_warn(dev
, "Clear HEM base address failed.\n");
789 hns_roce_free_hem(hr_dev
, table
->hem
[i
]);
790 table
->hem
[i
] = NULL
;
793 mutex_unlock(&table
->mutex
);
796 void *hns_roce_table_find(struct hns_roce_dev
*hr_dev
,
797 struct hns_roce_hem_table
*table
,
798 unsigned long obj
, dma_addr_t
*dma_handle
)
800 struct hns_roce_hem_chunk
*chunk
;
801 struct hns_roce_hem_mhop mhop
;
802 struct hns_roce_hem
*hem
;
803 unsigned long mhop_obj
= obj
;
804 unsigned long obj_per_chunk
;
805 unsigned long idx_offset
;
806 int offset
, dma_offset
;
815 mutex_lock(&table
->mutex
);
817 if (!hns_roce_check_whether_mhop(hr_dev
, table
->type
)) {
818 obj_per_chunk
= table
->table_chunk_size
/ table
->obj_size
;
819 hem
= table
->hem
[(obj
& (table
->num_obj
- 1)) / obj_per_chunk
];
820 idx_offset
= (obj
& (table
->num_obj
- 1)) % obj_per_chunk
;
821 dma_offset
= offset
= idx_offset
* table
->obj_size
;
823 u32 seg_size
= 64; /* 8 bytes per BA and 8 BA per segment */
825 if (hns_roce_calc_hem_mhop(hr_dev
, table
, &mhop_obj
, &mhop
))
830 if (mhop
.hop_num
== 2)
831 hem_idx
= i
* (mhop
.bt_chunk_size
/ BA_BYTE_LEN
) + j
;
832 else if (mhop
.hop_num
== 1 ||
833 mhop
.hop_num
== HNS_ROCE_HOP_NUM_0
)
836 hem
= table
->hem
[hem_idx
];
837 dma_offset
= offset
= (obj
& (table
->num_obj
- 1)) * seg_size
%
839 if (mhop
.hop_num
== 2)
840 dma_offset
= offset
= 0;
846 list_for_each_entry(chunk
, &hem
->chunk_list
, list
) {
847 for (i
= 0; i
< chunk
->npages
; ++i
) {
848 length
= sg_dma_len(&chunk
->mem
[i
]);
849 if (dma_handle
&& dma_offset
>= 0) {
850 if (length
> (u32
)dma_offset
)
851 *dma_handle
= sg_dma_address(
852 &chunk
->mem
[i
]) + dma_offset
;
853 dma_offset
-= length
;
856 if (length
> (u32
)offset
) {
857 addr
= chunk
->buf
[i
] + offset
;
865 mutex_unlock(&table
->mutex
);
869 int hns_roce_init_hem_table(struct hns_roce_dev
*hr_dev
,
870 struct hns_roce_hem_table
*table
, u32 type
,
871 unsigned long obj_size
, unsigned long nobj
,
874 unsigned long obj_per_chunk
;
875 unsigned long num_hem
;
877 if (!hns_roce_check_whether_mhop(hr_dev
, type
)) {
878 table
->table_chunk_size
= hr_dev
->caps
.chunk_sz
;
879 obj_per_chunk
= table
->table_chunk_size
/ obj_size
;
880 num_hem
= (nobj
+ obj_per_chunk
- 1) / obj_per_chunk
;
882 table
->hem
= kcalloc(num_hem
, sizeof(*table
->hem
), GFP_KERNEL
);
886 struct hns_roce_hem_mhop mhop
= {};
887 unsigned long buf_chunk_size
;
888 unsigned long bt_chunk_size
;
889 unsigned long bt_chunk_num
;
890 unsigned long num_bt_l0
;
893 if (get_hem_table_config(hr_dev
, &mhop
, type
))
896 buf_chunk_size
= mhop
.buf_chunk_size
;
897 bt_chunk_size
= mhop
.bt_chunk_size
;
898 num_bt_l0
= mhop
.ba_l0_num
;
899 hop_num
= mhop
.hop_num
;
901 obj_per_chunk
= buf_chunk_size
/ obj_size
;
902 num_hem
= (nobj
+ obj_per_chunk
- 1) / obj_per_chunk
;
903 bt_chunk_num
= bt_chunk_size
/ BA_BYTE_LEN
;
904 if (type
>= HEM_TYPE_MTT
)
905 num_bt_l0
= bt_chunk_num
;
907 table
->hem
= kcalloc(num_hem
, sizeof(*table
->hem
),
910 goto err_kcalloc_hem_buf
;
912 if (check_whether_bt_num_3(type
, hop_num
)) {
913 unsigned long num_bt_l1
;
915 num_bt_l1
= (num_hem
+ bt_chunk_num
- 1) /
917 table
->bt_l1
= kcalloc(num_bt_l1
,
918 sizeof(*table
->bt_l1
),
921 goto err_kcalloc_bt_l1
;
923 table
->bt_l1_dma_addr
= kcalloc(num_bt_l1
,
924 sizeof(*table
->bt_l1_dma_addr
),
927 if (!table
->bt_l1_dma_addr
)
928 goto err_kcalloc_l1_dma
;
931 if (check_whether_bt_num_2(type
, hop_num
) ||
932 check_whether_bt_num_3(type
, hop_num
)) {
933 table
->bt_l0
= kcalloc(num_bt_l0
, sizeof(*table
->bt_l0
),
936 goto err_kcalloc_bt_l0
;
938 table
->bt_l0_dma_addr
= kcalloc(num_bt_l0
,
939 sizeof(*table
->bt_l0_dma_addr
),
941 if (!table
->bt_l0_dma_addr
)
942 goto err_kcalloc_l0_dma
;
947 table
->num_hem
= num_hem
;
948 table
->num_obj
= nobj
;
949 table
->obj_size
= obj_size
;
950 table
->lowmem
= use_lowmem
;
951 mutex_init(&table
->mutex
);
960 kfree(table
->bt_l1_dma_addr
);
961 table
->bt_l1_dma_addr
= NULL
;
975 static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev
*hr_dev
,
976 struct hns_roce_hem_table
*table
)
978 struct hns_roce_hem_mhop mhop
;
983 if (hns_roce_calc_hem_mhop(hr_dev
, table
, NULL
, &mhop
))
985 buf_chunk_size
= table
->type
< HEM_TYPE_MTT
? mhop
.buf_chunk_size
:
988 for (i
= 0; i
< table
->num_hem
; ++i
) {
989 obj
= i
* buf_chunk_size
/ table
->obj_size
;
991 hns_roce_table_mhop_put(hr_dev
, table
, obj
, 0);
998 kfree(table
->bt_l1_dma_addr
);
999 table
->bt_l1_dma_addr
= NULL
;
1000 kfree(table
->bt_l0
);
1001 table
->bt_l0
= NULL
;
1002 kfree(table
->bt_l0_dma_addr
);
1003 table
->bt_l0_dma_addr
= NULL
;
1006 void hns_roce_cleanup_hem_table(struct hns_roce_dev
*hr_dev
,
1007 struct hns_roce_hem_table
*table
)
1009 struct device
*dev
= hr_dev
->dev
;
1012 if (hns_roce_check_whether_mhop(hr_dev
, table
->type
)) {
1013 hns_roce_cleanup_mhop_hem_table(hr_dev
, table
);
1017 for (i
= 0; i
< table
->num_hem
; ++i
)
1018 if (table
->hem
[i
]) {
1019 if (hr_dev
->hw
->clear_hem(hr_dev
, table
,
1020 i
* table
->table_chunk_size
/ table
->obj_size
, 0))
1021 dev_err(dev
, "Clear HEM base address failed.\n");
1023 hns_roce_free_hem(hr_dev
, table
->hem
[i
]);
1029 void hns_roce_cleanup_hem(struct hns_roce_dev
*hr_dev
)
1031 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SRQ
)
1032 hns_roce_cleanup_hem_table(hr_dev
,
1033 &hr_dev
->srq_table
.table
);
1034 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->cq_table
.table
);
1035 if (hr_dev
->caps
.qpc_timer_entry_sz
)
1036 hns_roce_cleanup_hem_table(hr_dev
,
1037 &hr_dev
->qpc_timer_table
);
1038 if (hr_dev
->caps
.cqc_timer_entry_sz
)
1039 hns_roce_cleanup_hem_table(hr_dev
,
1040 &hr_dev
->cqc_timer_table
);
1041 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL
)
1042 hns_roce_cleanup_hem_table(hr_dev
,
1043 &hr_dev
->qp_table
.sccc_table
);
1044 if (hr_dev
->caps
.trrl_entry_sz
)
1045 hns_roce_cleanup_hem_table(hr_dev
,
1046 &hr_dev
->qp_table
.trrl_table
);
1048 if (hr_dev
->caps
.gmv_entry_sz
)
1049 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->gmv_table
);
1051 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qp_table
.irrl_table
);
1052 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->qp_table
.qp_table
);
1053 hns_roce_cleanup_hem_table(hr_dev
, &hr_dev
->mr_table
.mtpt_table
);
1056 struct roce_hem_item
{
1057 struct list_head list
; /* link all hems in the same bt level */
1058 struct list_head sibling
; /* link all hems in last hop for mtt */
1060 dma_addr_t dma_addr
;
1061 size_t count
; /* max ba numbers */
1062 int start
; /* start buf offset in this hem */
1063 int end
; /* end buf offset in this hem */
1066 static struct roce_hem_item
*hem_list_alloc_item(struct hns_roce_dev
*hr_dev
,
1068 int count
, bool exist_bt
,
1071 struct roce_hem_item
*hem
;
1073 hem
= kzalloc(sizeof(*hem
), GFP_KERNEL
);
1078 hem
->addr
= dma_alloc_coherent(hr_dev
->dev
,
1079 count
* BA_BYTE_LEN
,
1080 &hem
->dma_addr
, GFP_KERNEL
);
1090 INIT_LIST_HEAD(&hem
->list
);
1091 INIT_LIST_HEAD(&hem
->sibling
);
1096 static void hem_list_free_item(struct hns_roce_dev
*hr_dev
,
1097 struct roce_hem_item
*hem
, bool exist_bt
)
1100 dma_free_coherent(hr_dev
->dev
, hem
->count
* BA_BYTE_LEN
,
1101 hem
->addr
, hem
->dma_addr
);
1105 static void hem_list_free_all(struct hns_roce_dev
*hr_dev
,
1106 struct list_head
*head
, bool exist_bt
)
1108 struct roce_hem_item
*hem
, *temp_hem
;
1110 list_for_each_entry_safe(hem
, temp_hem
, head
, list
) {
1111 list_del(&hem
->list
);
1112 hem_list_free_item(hr_dev
, hem
, exist_bt
);
1116 static void hem_list_link_bt(struct hns_roce_dev
*hr_dev
, void *base_addr
,
1119 *(u64
*)(base_addr
) = table_addr
;
1122 /* assign L0 table address to hem from root bt */
1123 static void hem_list_assign_bt(struct hns_roce_dev
*hr_dev
,
1124 struct roce_hem_item
*hem
, void *cpu_addr
,
1127 hem
->addr
= cpu_addr
;
1128 hem
->dma_addr
= (dma_addr_t
)phy_addr
;
1131 static inline bool hem_list_page_is_in_range(struct roce_hem_item
*hem
,
1134 return (hem
->start
<= offset
&& offset
<= hem
->end
);
1137 static struct roce_hem_item
*hem_list_search_item(struct list_head
*ba_list
,
1140 struct roce_hem_item
*hem
, *temp_hem
;
1141 struct roce_hem_item
*found
= NULL
;
1143 list_for_each_entry_safe(hem
, temp_hem
, ba_list
, list
) {
1144 if (hem_list_page_is_in_range(hem
, page_offset
)) {
1153 static bool hem_list_is_bottom_bt(int hopnum
, int bt_level
)
1156 * hopnum base address table levels
1160 * 3 L0 -> L1 -> L2 -> buf
1162 return bt_level
>= (hopnum
? hopnum
- 1 : hopnum
);
1166 * calc base address entries num
1167 * @hopnum: num of mutihop addressing
1168 * @bt_level: base address table level
1169 * @unit: ba entries per bt page
1171 static u32
hem_list_calc_ba_range(int hopnum
, int bt_level
, int unit
)
1177 if (hopnum
<= bt_level
)
1180 * hopnum bt_level range
1186 * 3 0 unit * unit * unit
1191 max
= hopnum
- bt_level
;
1192 for (i
= 0; i
< max
; i
++)
1199 * calc the root ba entries which could cover all regions
1200 * @regions: buf region array
1201 * @region_cnt: array size of @regions
1202 * @unit: ba entries per bt page
1204 int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region
*regions
,
1205 int region_cnt
, int unit
)
1207 struct hns_roce_buf_region
*r
;
1212 for (i
= 0; i
< region_cnt
; i
++) {
1213 r
= (struct hns_roce_buf_region
*)®ions
[i
];
1214 if (r
->hopnum
> 1) {
1215 step
= hem_list_calc_ba_range(r
->hopnum
, 1, unit
);
1217 total
+= (r
->count
+ step
- 1) / step
;
1226 static int hem_list_alloc_mid_bt(struct hns_roce_dev
*hr_dev
,
1227 const struct hns_roce_buf_region
*r
, int unit
,
1228 int offset
, struct list_head
*mid_bt
,
1229 struct list_head
*btm_bt
)
1231 struct roce_hem_item
*hem_ptrs
[HNS_ROCE_MAX_BT_LEVEL
] = { NULL
};
1232 struct list_head temp_list
[HNS_ROCE_MAX_BT_LEVEL
];
1233 struct roce_hem_item
*cur
, *pre
;
1234 const int hopnum
= r
->hopnum
;
1246 if (hopnum
> HNS_ROCE_MAX_BT_LEVEL
) {
1247 dev_err(hr_dev
->dev
, "invalid hopnum %d!\n", hopnum
);
1251 if (offset
< r
->offset
) {
1252 dev_err(hr_dev
->dev
, "invalid offset %d, min %u!\n",
1257 distance
= offset
- r
->offset
;
1258 max_ofs
= r
->offset
+ r
->count
- 1;
1259 for (level
= 0; level
< hopnum
; level
++)
1260 INIT_LIST_HEAD(&temp_list
[level
]);
1262 /* config L1 bt to last bt and link them to corresponding parent */
1263 for (level
= 1; level
< hopnum
; level
++) {
1264 cur
= hem_list_search_item(&mid_bt
[level
], offset
);
1266 hem_ptrs
[level
] = cur
;
1270 step
= hem_list_calc_ba_range(hopnum
, level
, unit
);
1276 start_aligned
= (distance
/ step
) * step
+ r
->offset
;
1277 end
= min_t(int, start_aligned
+ step
- 1, max_ofs
);
1278 cur
= hem_list_alloc_item(hr_dev
, start_aligned
, end
, unit
,
1284 hem_ptrs
[level
] = cur
;
1285 list_add(&cur
->list
, &temp_list
[level
]);
1286 if (hem_list_is_bottom_bt(hopnum
, level
))
1287 list_add(&cur
->sibling
, &temp_list
[0]);
1289 /* link bt to parent bt */
1291 pre
= hem_ptrs
[level
- 1];
1292 step
= (cur
->start
- pre
->start
) / step
* BA_BYTE_LEN
;
1293 hem_list_link_bt(hr_dev
, pre
->addr
+ step
,
1298 list_splice(&temp_list
[0], btm_bt
);
1299 for (level
= 1; level
< hopnum
; level
++)
1300 list_splice(&temp_list
[level
], &mid_bt
[level
]);
1305 for (level
= 1; level
< hopnum
; level
++)
1306 hem_list_free_all(hr_dev
, &temp_list
[level
], true);
1311 static int hem_list_alloc_root_bt(struct hns_roce_dev
*hr_dev
,
1312 struct hns_roce_hem_list
*hem_list
, int unit
,
1313 const struct hns_roce_buf_region
*regions
,
1316 struct list_head temp_list
[HNS_ROCE_MAX_BT_REGION
];
1317 struct roce_hem_item
*hem
, *temp_hem
, *root_hem
;
1318 const struct hns_roce_buf_region
*r
;
1319 struct list_head temp_root
;
1320 struct list_head temp_btm
;
1331 root_hem
= hem_list_search_item(&hem_list
->root_bt
, r
->offset
);
1335 ba_num
= hns_roce_hem_list_calc_root_ba(regions
, region_cnt
, unit
);
1339 INIT_LIST_HEAD(&temp_root
);
1341 /* indicate to last region */
1342 r
= ®ions
[region_cnt
- 1];
1343 root_hem
= hem_list_alloc_item(hr_dev
, offset
, r
->offset
+ r
->count
- 1,
1347 list_add(&root_hem
->list
, &temp_root
);
1349 hem_list
->root_ba
= root_hem
->dma_addr
;
1351 INIT_LIST_HEAD(&temp_btm
);
1352 for (i
= 0; i
< region_cnt
; i
++)
1353 INIT_LIST_HEAD(&temp_list
[i
]);
1356 for (i
= 0; i
< region_cnt
&& total
< ba_num
; i
++) {
1361 /* all regions's mid[x][0] shared the root_bt's trunk */
1362 cpu_base
= root_hem
->addr
+ total
* BA_BYTE_LEN
;
1363 phy_base
= root_hem
->dma_addr
+ total
* BA_BYTE_LEN
;
1365 /* if hopnum is 0 or 1, cut a new fake hem from the root bt
1366 * which's address share to all regions.
1368 if (hem_list_is_bottom_bt(r
->hopnum
, 0)) {
1369 hem
= hem_list_alloc_item(hr_dev
, r
->offset
,
1370 r
->offset
+ r
->count
- 1,
1371 r
->count
, false, 0);
1376 hem_list_assign_bt(hr_dev
, hem
, cpu_base
, phy_base
);
1377 list_add(&hem
->list
, &temp_list
[i
]);
1378 list_add(&hem
->sibling
, &temp_btm
);
1381 step
= hem_list_calc_ba_range(r
->hopnum
, 1, unit
);
1386 /* if exist mid bt, link L1 to L0 */
1387 list_for_each_entry_safe(hem
, temp_hem
,
1388 &hem_list
->mid_bt
[i
][1], list
) {
1389 offset
= (hem
->start
- r
->offset
) / step
*
1391 hem_list_link_bt(hr_dev
, cpu_base
+ offset
,
1398 list_splice(&temp_btm
, &hem_list
->btm_bt
);
1399 list_splice(&temp_root
, &hem_list
->root_bt
);
1400 for (i
= 0; i
< region_cnt
; i
++)
1401 list_splice(&temp_list
[i
], &hem_list
->mid_bt
[i
][0]);
1406 for (i
= 0; i
< region_cnt
; i
++)
1407 hem_list_free_all(hr_dev
, &temp_list
[i
], false);
1409 hem_list_free_all(hr_dev
, &temp_root
, true);
1414 /* construct the base address table and link them by address hop config */
1415 int hns_roce_hem_list_request(struct hns_roce_dev
*hr_dev
,
1416 struct hns_roce_hem_list
*hem_list
,
1417 const struct hns_roce_buf_region
*regions
,
1418 int region_cnt
, unsigned int bt_pg_shift
)
1420 const struct hns_roce_buf_region
*r
;
1426 if (region_cnt
> HNS_ROCE_MAX_BT_REGION
) {
1427 dev_err(hr_dev
->dev
, "invalid region region_cnt %d!\n",
1432 unit
= (1 << bt_pg_shift
) / BA_BYTE_LEN
;
1433 for (i
= 0; i
< region_cnt
; i
++) {
1438 end
= r
->offset
+ r
->count
;
1439 for (ofs
= r
->offset
; ofs
< end
; ofs
+= unit
) {
1440 ret
= hem_list_alloc_mid_bt(hr_dev
, r
, unit
, ofs
,
1441 hem_list
->mid_bt
[i
],
1444 dev_err(hr_dev
->dev
,
1445 "alloc hem trunk fail ret=%d!\n", ret
);
1451 ret
= hem_list_alloc_root_bt(hr_dev
, hem_list
, unit
, regions
,
1454 dev_err(hr_dev
->dev
, "alloc hem root fail ret=%d!\n", ret
);
1459 hns_roce_hem_list_release(hr_dev
, hem_list
);
1464 void hns_roce_hem_list_release(struct hns_roce_dev
*hr_dev
,
1465 struct hns_roce_hem_list
*hem_list
)
1469 for (i
= 0; i
< HNS_ROCE_MAX_BT_REGION
; i
++)
1470 for (j
= 0; j
< HNS_ROCE_MAX_BT_LEVEL
; j
++)
1471 hem_list_free_all(hr_dev
, &hem_list
->mid_bt
[i
][j
],
1474 hem_list_free_all(hr_dev
, &hem_list
->root_bt
, true);
1475 INIT_LIST_HEAD(&hem_list
->btm_bt
);
1476 hem_list
->root_ba
= 0;
1479 void hns_roce_hem_list_init(struct hns_roce_hem_list
*hem_list
)
1483 INIT_LIST_HEAD(&hem_list
->root_bt
);
1484 INIT_LIST_HEAD(&hem_list
->btm_bt
);
1485 for (i
= 0; i
< HNS_ROCE_MAX_BT_REGION
; i
++)
1486 for (j
= 0; j
< HNS_ROCE_MAX_BT_LEVEL
; j
++)
1487 INIT_LIST_HEAD(&hem_list
->mid_bt
[i
][j
]);
1490 void *hns_roce_hem_list_find_mtt(struct hns_roce_dev
*hr_dev
,
1491 struct hns_roce_hem_list
*hem_list
,
1492 int offset
, int *mtt_cnt
, u64
*phy_addr
)
1494 struct list_head
*head
= &hem_list
->btm_bt
;
1495 struct roce_hem_item
*hem
, *temp_hem
;
1496 void *cpu_base
= NULL
;
1500 list_for_each_entry_safe(hem
, temp_hem
, head
, sibling
) {
1501 if (hem_list_page_is_in_range(hem
, offset
)) {
1502 nr
= offset
- hem
->start
;
1503 cpu_base
= hem
->addr
+ nr
* BA_BYTE_LEN
;
1504 phy_base
= hem
->dma_addr
+ nr
* BA_BYTE_LEN
;
1505 nr
= hem
->end
+ 1 - offset
;
1514 *phy_addr
= phy_base
;