2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/platform_device.h>
35 #include <linux/vmalloc.h>
36 #include <rdma/ib_umem.h>
37 #include "hns_roce_device.h"
38 #include "hns_roce_cmd.h"
39 #include "hns_roce_hem.h"
41 static u32
hw_index_to_key(unsigned long ind
)
43 return (u32
)(ind
>> 24) | (ind
<< 8);
46 unsigned long key_to_hw_index(u32 key
)
48 return (key
<< 24) | (key
>> 8);
51 static int hns_roce_hw_create_mpt(struct hns_roce_dev
*hr_dev
,
52 struct hns_roce_cmd_mailbox
*mailbox
,
53 unsigned long mpt_index
)
55 return hns_roce_cmd_mbox(hr_dev
, mailbox
->dma
, 0, mpt_index
, 0,
56 HNS_ROCE_CMD_CREATE_MPT
,
57 HNS_ROCE_CMD_TIMEOUT_MSECS
);
60 int hns_roce_hw_destroy_mpt(struct hns_roce_dev
*hr_dev
,
61 struct hns_roce_cmd_mailbox
*mailbox
,
62 unsigned long mpt_index
)
64 return hns_roce_cmd_mbox(hr_dev
, 0, mailbox
? mailbox
->dma
: 0,
65 mpt_index
, !mailbox
, HNS_ROCE_CMD_DESTROY_MPT
,
66 HNS_ROCE_CMD_TIMEOUT_MSECS
);
69 static int hns_roce_buddy_alloc(struct hns_roce_buddy
*buddy
, int order
,
75 spin_lock(&buddy
->lock
);
77 for (o
= order
; o
<= buddy
->max_order
; ++o
) {
78 if (buddy
->num_free
[o
]) {
79 m
= 1 << (buddy
->max_order
- o
);
80 *seg
= find_first_bit(buddy
->bits
[o
], m
);
85 spin_unlock(&buddy
->lock
);
89 clear_bit(*seg
, buddy
->bits
[o
]);
95 set_bit(*seg
^ 1, buddy
->bits
[o
]);
99 spin_unlock(&buddy
->lock
);
105 static void hns_roce_buddy_free(struct hns_roce_buddy
*buddy
, unsigned long seg
,
110 spin_lock(&buddy
->lock
);
112 while (test_bit(seg
^ 1, buddy
->bits
[order
])) {
113 clear_bit(seg
^ 1, buddy
->bits
[order
]);
114 --buddy
->num_free
[order
];
119 set_bit(seg
, buddy
->bits
[order
]);
120 ++buddy
->num_free
[order
];
122 spin_unlock(&buddy
->lock
);
125 static int hns_roce_buddy_init(struct hns_roce_buddy
*buddy
, int max_order
)
129 buddy
->max_order
= max_order
;
130 spin_lock_init(&buddy
->lock
);
131 buddy
->bits
= kcalloc(buddy
->max_order
+ 1,
132 sizeof(*buddy
->bits
),
134 buddy
->num_free
= kcalloc(buddy
->max_order
+ 1,
135 sizeof(*buddy
->num_free
),
137 if (!buddy
->bits
|| !buddy
->num_free
)
140 for (i
= 0; i
<= buddy
->max_order
; ++i
) {
141 s
= BITS_TO_LONGS(1 << (buddy
->max_order
- i
));
142 buddy
->bits
[i
] = kcalloc(s
, sizeof(long), GFP_KERNEL
|
144 if (!buddy
->bits
[i
]) {
145 buddy
->bits
[i
] = vzalloc(array_size(s
, sizeof(long)));
151 set_bit(0, buddy
->bits
[buddy
->max_order
]);
152 buddy
->num_free
[buddy
->max_order
] = 1;
157 for (i
= 0; i
<= buddy
->max_order
; ++i
)
158 kvfree(buddy
->bits
[i
]);
162 kfree(buddy
->num_free
);
166 static void hns_roce_buddy_cleanup(struct hns_roce_buddy
*buddy
)
170 for (i
= 0; i
<= buddy
->max_order
; ++i
)
171 kvfree(buddy
->bits
[i
]);
174 kfree(buddy
->num_free
);
177 static int hns_roce_alloc_mtt_range(struct hns_roce_dev
*hr_dev
, int order
,
178 unsigned long *seg
, u32 mtt_type
)
180 struct hns_roce_mr_table
*mr_table
= &hr_dev
->mr_table
;
181 struct hns_roce_hem_table
*table
;
182 struct hns_roce_buddy
*buddy
;
187 buddy
= &mr_table
->mtt_buddy
;
188 table
= &mr_table
->mtt_table
;
191 buddy
= &mr_table
->mtt_cqe_buddy
;
192 table
= &mr_table
->mtt_cqe_table
;
194 case MTT_TYPE_SRQWQE
:
195 buddy
= &mr_table
->mtt_srqwqe_buddy
;
196 table
= &mr_table
->mtt_srqwqe_table
;
199 buddy
= &mr_table
->mtt_idx_buddy
;
200 table
= &mr_table
->mtt_idx_table
;
203 dev_err(hr_dev
->dev
, "Unsupport MTT table type: %d\n",
208 ret
= hns_roce_buddy_alloc(buddy
, order
, seg
);
212 ret
= hns_roce_table_get_range(hr_dev
, table
, *seg
,
213 *seg
+ (1 << order
) - 1);
215 hns_roce_buddy_free(buddy
, *seg
, order
);
222 int hns_roce_mtt_init(struct hns_roce_dev
*hr_dev
, int npages
, int page_shift
,
223 struct hns_roce_mtt
*mtt
)
228 /* Page num is zero, correspond to DMA memory register */
231 mtt
->page_shift
= HNS_ROCE_HEM_PAGE_SHIFT
;
235 /* Note: if page_shift is zero, FAST memory register */
236 mtt
->page_shift
= page_shift
;
238 /* Compute MTT entry necessary */
239 for (mtt
->order
= 0, i
= HNS_ROCE_MTT_ENTRY_PER_SEG
; i
< npages
;
243 /* Allocate MTT entry */
244 ret
= hns_roce_alloc_mtt_range(hr_dev
, mtt
->order
, &mtt
->first_seg
,
252 void hns_roce_mtt_cleanup(struct hns_roce_dev
*hr_dev
, struct hns_roce_mtt
*mtt
)
254 struct hns_roce_mr_table
*mr_table
= &hr_dev
->mr_table
;
259 switch (mtt
->mtt_type
) {
261 hns_roce_buddy_free(&mr_table
->mtt_buddy
, mtt
->first_seg
,
263 hns_roce_table_put_range(hr_dev
, &mr_table
->mtt_table
,
265 mtt
->first_seg
+ (1 << mtt
->order
) - 1);
268 hns_roce_buddy_free(&mr_table
->mtt_cqe_buddy
, mtt
->first_seg
,
270 hns_roce_table_put_range(hr_dev
, &mr_table
->mtt_cqe_table
,
272 mtt
->first_seg
+ (1 << mtt
->order
) - 1);
274 case MTT_TYPE_SRQWQE
:
275 hns_roce_buddy_free(&mr_table
->mtt_srqwqe_buddy
, mtt
->first_seg
,
277 hns_roce_table_put_range(hr_dev
, &mr_table
->mtt_srqwqe_table
,
279 mtt
->first_seg
+ (1 << mtt
->order
) - 1);
282 hns_roce_buddy_free(&mr_table
->mtt_idx_buddy
, mtt
->first_seg
,
284 hns_roce_table_put_range(hr_dev
, &mr_table
->mtt_idx_table
,
286 mtt
->first_seg
+ (1 << mtt
->order
) - 1);
290 "Unsupport mtt type %d, clean mtt failed\n",
296 static void hns_roce_loop_free(struct hns_roce_dev
*hr_dev
,
297 struct hns_roce_mr
*mr
, int err_loop_index
,
298 int loop_i
, int loop_j
)
300 struct device
*dev
= hr_dev
->dev
;
306 pbl_bt_sz
= 1 << (hr_dev
->caps
.pbl_ba_pg_sz
+ PAGE_SHIFT
);
307 mhop_num
= hr_dev
->caps
.pbl_hop_num
;
310 if (mhop_num
== 3 && err_loop_index
== 2) {
311 for (; i
>= 0; i
--) {
312 dma_free_coherent(dev
, pbl_bt_sz
, mr
->pbl_bt_l1
[i
],
313 mr
->pbl_l1_dma_addr
[i
]);
315 for (j
= 0; j
< pbl_bt_sz
/ BA_BYTE_LEN
; j
++) {
316 if (i
== loop_i
&& j
>= loop_j
)
319 bt_idx
= i
* pbl_bt_sz
/ BA_BYTE_LEN
+ j
;
320 dma_free_coherent(dev
, pbl_bt_sz
,
321 mr
->pbl_bt_l2
[bt_idx
],
322 mr
->pbl_l2_dma_addr
[bt_idx
]);
325 } else if (mhop_num
== 3 && err_loop_index
== 1) {
326 for (i
-= 1; i
>= 0; i
--) {
327 dma_free_coherent(dev
, pbl_bt_sz
, mr
->pbl_bt_l1
[i
],
328 mr
->pbl_l1_dma_addr
[i
]);
330 for (j
= 0; j
< pbl_bt_sz
/ BA_BYTE_LEN
; j
++) {
331 bt_idx
= i
* pbl_bt_sz
/ BA_BYTE_LEN
+ j
;
332 dma_free_coherent(dev
, pbl_bt_sz
,
333 mr
->pbl_bt_l2
[bt_idx
],
334 mr
->pbl_l2_dma_addr
[bt_idx
]);
337 } else if (mhop_num
== 2 && err_loop_index
== 1) {
338 for (i
-= 1; i
>= 0; i
--)
339 dma_free_coherent(dev
, pbl_bt_sz
, mr
->pbl_bt_l1
[i
],
340 mr
->pbl_l1_dma_addr
[i
]);
342 dev_warn(dev
, "not support: mhop_num=%d, err_loop_index=%d.",
343 mhop_num
, err_loop_index
);
347 dma_free_coherent(dev
, pbl_bt_sz
, mr
->pbl_bt_l0
, mr
->pbl_l0_dma_addr
);
348 mr
->pbl_bt_l0
= NULL
;
349 mr
->pbl_l0_dma_addr
= 0;
351 static int pbl_1hop_alloc(struct hns_roce_dev
*hr_dev
, int npages
,
352 struct hns_roce_mr
*mr
, u32 pbl_bt_sz
)
354 struct device
*dev
= hr_dev
->dev
;
356 if (npages
> pbl_bt_sz
/ 8) {
357 dev_err(dev
, "npages %d is larger than buf_pg_sz!",
361 mr
->pbl_buf
= dma_alloc_coherent(dev
, npages
* 8,
367 mr
->pbl_size
= npages
;
368 mr
->pbl_ba
= mr
->pbl_dma_addr
;
370 mr
->pbl_ba_pg_sz
= hr_dev
->caps
.pbl_ba_pg_sz
;
371 mr
->pbl_buf_pg_sz
= hr_dev
->caps
.pbl_buf_pg_sz
;
377 static int pbl_2hop_alloc(struct hns_roce_dev
*hr_dev
, int npages
,
378 struct hns_roce_mr
*mr
, u32 pbl_bt_sz
)
380 struct device
*dev
= hr_dev
->dev
;
381 int npages_allocated
;
387 pbl_last_bt_num
= (npages
+ pbl_bt_sz
/ 8 - 1) / (pbl_bt_sz
/ 8);
390 for (i
= 0; i
< pbl_bt_sz
/ 8; i
++) {
391 if (pbl_bt_cnt
+ 1 < pbl_last_bt_num
) {
394 npages_allocated
= i
* (pbl_bt_sz
/ 8);
395 size
= (npages
- npages_allocated
) * 8;
397 mr
->pbl_bt_l1
[i
] = dma_alloc_coherent(dev
, size
,
398 &(mr
->pbl_l1_dma_addr
[i
]),
400 if (!mr
->pbl_bt_l1
[i
]) {
401 hns_roce_loop_free(hr_dev
, mr
, 1, i
, 0);
405 *(mr
->pbl_bt_l0
+ i
) = mr
->pbl_l1_dma_addr
[i
];
408 if (pbl_bt_cnt
>= pbl_last_bt_num
)
412 mr
->l0_chunk_last_num
= i
+ 1;
417 static int pbl_3hop_alloc(struct hns_roce_dev
*hr_dev
, int npages
,
418 struct hns_roce_mr
*mr
, u32 pbl_bt_sz
)
420 struct device
*dev
= hr_dev
->dev
;
421 int mr_alloc_done
= 0;
422 int npages_allocated
;
430 pbl_last_bt_num
= (npages
+ pbl_bt_sz
/ 8 - 1) / (pbl_bt_sz
/ 8);
432 mr
->pbl_l2_dma_addr
= kcalloc(pbl_last_bt_num
,
433 sizeof(*mr
->pbl_l2_dma_addr
),
435 if (!mr
->pbl_l2_dma_addr
)
438 mr
->pbl_bt_l2
= kcalloc(pbl_last_bt_num
,
439 sizeof(*mr
->pbl_bt_l2
),
442 goto err_kcalloc_bt_l2
;
444 /* alloc L1, L2 BT */
445 for (i
= 0; i
< pbl_bt_sz
/ 8; i
++) {
446 mr
->pbl_bt_l1
[i
] = dma_alloc_coherent(dev
, pbl_bt_sz
,
447 &(mr
->pbl_l1_dma_addr
[i
]),
449 if (!mr
->pbl_bt_l1
[i
]) {
450 hns_roce_loop_free(hr_dev
, mr
, 1, i
, 0);
451 goto err_dma_alloc_l0
;
454 *(mr
->pbl_bt_l0
+ i
) = mr
->pbl_l1_dma_addr
[i
];
456 for (j
= 0; j
< pbl_bt_sz
/ 8; j
++) {
457 bt_idx
= i
* pbl_bt_sz
/ 8 + j
;
459 if (pbl_bt_cnt
+ 1 < pbl_last_bt_num
) {
462 npages_allocated
= bt_idx
*
464 size
= (npages
- npages_allocated
) * 8;
466 mr
->pbl_bt_l2
[bt_idx
] = dma_alloc_coherent(
468 &(mr
->pbl_l2_dma_addr
[bt_idx
]),
470 if (!mr
->pbl_bt_l2
[bt_idx
]) {
471 hns_roce_loop_free(hr_dev
, mr
, 2, i
, j
);
472 goto err_dma_alloc_l0
;
475 *(mr
->pbl_bt_l1
[i
] + j
) =
476 mr
->pbl_l2_dma_addr
[bt_idx
];
479 if (pbl_bt_cnt
>= pbl_last_bt_num
) {
489 mr
->l0_chunk_last_num
= i
+ 1;
490 mr
->l1_chunk_last_num
= j
+ 1;
496 kfree(mr
->pbl_bt_l2
);
497 mr
->pbl_bt_l2
= NULL
;
500 kfree(mr
->pbl_l2_dma_addr
);
501 mr
->pbl_l2_dma_addr
= NULL
;
507 /* PBL multi hop addressing */
508 static int hns_roce_mhop_alloc(struct hns_roce_dev
*hr_dev
, int npages
,
509 struct hns_roce_mr
*mr
)
511 struct device
*dev
= hr_dev
->dev
;
515 mhop_num
= (mr
->type
== MR_TYPE_FRMR
? 1 : hr_dev
->caps
.pbl_hop_num
);
516 pbl_bt_sz
= 1 << (hr_dev
->caps
.pbl_ba_pg_sz
+ PAGE_SHIFT
);
518 if (mhop_num
== HNS_ROCE_HOP_NUM_0
)
522 return pbl_1hop_alloc(hr_dev
, npages
, mr
, pbl_bt_sz
);
524 mr
->pbl_l1_dma_addr
= kcalloc(pbl_bt_sz
/ 8,
525 sizeof(*mr
->pbl_l1_dma_addr
),
527 if (!mr
->pbl_l1_dma_addr
)
530 mr
->pbl_bt_l1
= kcalloc(pbl_bt_sz
/ 8, sizeof(*mr
->pbl_bt_l1
),
533 goto err_kcalloc_bt_l1
;
536 mr
->pbl_bt_l0
= dma_alloc_coherent(dev
, pbl_bt_sz
,
537 &(mr
->pbl_l0_dma_addr
),
540 goto err_kcalloc_l2_dma
;
543 if (pbl_2hop_alloc(hr_dev
, npages
, mr
, pbl_bt_sz
))
544 goto err_kcalloc_l2_dma
;
548 if (pbl_3hop_alloc(hr_dev
, npages
, mr
, pbl_bt_sz
))
549 goto err_kcalloc_l2_dma
;
553 mr
->pbl_size
= npages
;
554 mr
->pbl_ba
= mr
->pbl_l0_dma_addr
;
555 mr
->pbl_hop_num
= hr_dev
->caps
.pbl_hop_num
;
556 mr
->pbl_ba_pg_sz
= hr_dev
->caps
.pbl_ba_pg_sz
;
557 mr
->pbl_buf_pg_sz
= hr_dev
->caps
.pbl_buf_pg_sz
;
562 kfree(mr
->pbl_bt_l1
);
563 mr
->pbl_bt_l1
= NULL
;
566 kfree(mr
->pbl_l1_dma_addr
);
567 mr
->pbl_l1_dma_addr
= NULL
;
572 static int hns_roce_mr_alloc(struct hns_roce_dev
*hr_dev
, u32 pd
, u64 iova
,
573 u64 size
, u32 access
, int npages
,
574 struct hns_roce_mr
*mr
)
576 struct device
*dev
= hr_dev
->dev
;
577 unsigned long index
= 0;
580 /* Allocate a key for mr from mr_table */
581 ret
= hns_roce_bitmap_alloc(&hr_dev
->mr_table
.mtpt_bitmap
, &index
);
585 mr
->iova
= iova
; /* MR va starting addr */
586 mr
->size
= size
; /* MR addr range */
587 mr
->pd
= pd
; /* MR num */
588 mr
->access
= access
; /* MR access permit */
589 mr
->enabled
= 0; /* MR active status */
590 mr
->key
= hw_index_to_key(index
); /* MR key */
594 mr
->pbl_dma_addr
= 0;
595 /* PBL multi-hop addressing parameters */
596 mr
->pbl_bt_l2
= NULL
;
597 mr
->pbl_bt_l1
= NULL
;
598 mr
->pbl_bt_l0
= NULL
;
599 mr
->pbl_l2_dma_addr
= NULL
;
600 mr
->pbl_l1_dma_addr
= NULL
;
601 mr
->pbl_l0_dma_addr
= 0;
603 if (!hr_dev
->caps
.pbl_hop_num
) {
604 mr
->pbl_buf
= dma_alloc_coherent(dev
,
605 npages
* BA_BYTE_LEN
,
611 ret
= hns_roce_mhop_alloc(hr_dev
, npages
, mr
);
618 static void hns_roce_mhop_free(struct hns_roce_dev
*hr_dev
,
619 struct hns_roce_mr
*mr
)
621 struct device
*dev
= hr_dev
->dev
;
622 int npages_allocated
;
629 npages
= mr
->pbl_size
;
630 pbl_bt_sz
= 1 << (hr_dev
->caps
.pbl_ba_pg_sz
+ PAGE_SHIFT
);
631 mhop_num
= (mr
->type
== MR_TYPE_FRMR
) ? 1 : hr_dev
->caps
.pbl_hop_num
;
633 if (mhop_num
== HNS_ROCE_HOP_NUM_0
)
637 dma_free_coherent(dev
, (unsigned int)(npages
* BA_BYTE_LEN
),
638 mr
->pbl_buf
, mr
->pbl_dma_addr
);
642 dma_free_coherent(dev
, pbl_bt_sz
, mr
->pbl_bt_l0
,
643 mr
->pbl_l0_dma_addr
);
646 for (i
= 0; i
< mr
->l0_chunk_last_num
; i
++) {
647 if (i
== mr
->l0_chunk_last_num
- 1) {
649 i
* (pbl_bt_sz
/ BA_BYTE_LEN
);
651 dma_free_coherent(dev
,
652 (npages
- npages_allocated
) * BA_BYTE_LEN
,
654 mr
->pbl_l1_dma_addr
[i
]);
659 dma_free_coherent(dev
, pbl_bt_sz
, mr
->pbl_bt_l1
[i
],
660 mr
->pbl_l1_dma_addr
[i
]);
662 } else if (mhop_num
== 3) {
663 for (i
= 0; i
< mr
->l0_chunk_last_num
; i
++) {
664 dma_free_coherent(dev
, pbl_bt_sz
, mr
->pbl_bt_l1
[i
],
665 mr
->pbl_l1_dma_addr
[i
]);
667 for (j
= 0; j
< pbl_bt_sz
/ BA_BYTE_LEN
; j
++) {
668 bt_idx
= i
* (pbl_bt_sz
/ BA_BYTE_LEN
) + j
;
670 if ((i
== mr
->l0_chunk_last_num
- 1)
671 && j
== mr
->l1_chunk_last_num
- 1) {
672 npages_allocated
= bt_idx
*
673 (pbl_bt_sz
/ BA_BYTE_LEN
);
675 dma_free_coherent(dev
,
676 (npages
- npages_allocated
) *
678 mr
->pbl_bt_l2
[bt_idx
],
679 mr
->pbl_l2_dma_addr
[bt_idx
]);
684 dma_free_coherent(dev
, pbl_bt_sz
,
685 mr
->pbl_bt_l2
[bt_idx
],
686 mr
->pbl_l2_dma_addr
[bt_idx
]);
691 kfree(mr
->pbl_bt_l1
);
692 kfree(mr
->pbl_l1_dma_addr
);
693 mr
->pbl_bt_l1
= NULL
;
694 mr
->pbl_l1_dma_addr
= NULL
;
696 kfree(mr
->pbl_bt_l2
);
697 kfree(mr
->pbl_l2_dma_addr
);
698 mr
->pbl_bt_l2
= NULL
;
699 mr
->pbl_l2_dma_addr
= NULL
;
703 static void hns_roce_mr_free(struct hns_roce_dev
*hr_dev
,
704 struct hns_roce_mr
*mr
)
706 struct device
*dev
= hr_dev
->dev
;
711 ret
= hns_roce_hw_destroy_mpt(hr_dev
, NULL
,
712 key_to_hw_index(mr
->key
) &
713 (hr_dev
->caps
.num_mtpts
- 1));
715 dev_warn(dev
, "DESTROY_MPT failed (%d)\n", ret
);
718 if (mr
->size
!= ~0ULL) {
719 if (mr
->type
== MR_TYPE_MR
)
720 npages
= ib_umem_page_count(mr
->umem
);
722 if (!hr_dev
->caps
.pbl_hop_num
)
723 dma_free_coherent(dev
,
724 (unsigned int)(npages
* BA_BYTE_LEN
),
725 mr
->pbl_buf
, mr
->pbl_dma_addr
);
727 hns_roce_mhop_free(hr_dev
, mr
);
731 hns_roce_table_put(hr_dev
, &hr_dev
->mr_table
.mtpt_table
,
732 key_to_hw_index(mr
->key
));
734 hns_roce_bitmap_free(&hr_dev
->mr_table
.mtpt_bitmap
,
735 key_to_hw_index(mr
->key
), BITMAP_NO_RR
);
738 static int hns_roce_mr_enable(struct hns_roce_dev
*hr_dev
,
739 struct hns_roce_mr
*mr
)
742 unsigned long mtpt_idx
= key_to_hw_index(mr
->key
);
743 struct device
*dev
= hr_dev
->dev
;
744 struct hns_roce_cmd_mailbox
*mailbox
;
745 struct hns_roce_mr_table
*mr_table
= &hr_dev
->mr_table
;
747 /* Prepare HEM entry memory */
748 ret
= hns_roce_table_get(hr_dev
, &mr_table
->mtpt_table
, mtpt_idx
);
752 /* Allocate mailbox memory */
753 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
754 if (IS_ERR(mailbox
)) {
755 ret
= PTR_ERR(mailbox
);
759 if (mr
->type
!= MR_TYPE_FRMR
)
760 ret
= hr_dev
->hw
->write_mtpt(mailbox
->buf
, mr
, mtpt_idx
);
762 ret
= hr_dev
->hw
->frmr_write_mtpt(mailbox
->buf
, mr
);
764 dev_err(dev
, "Write mtpt fail!\n");
768 ret
= hns_roce_hw_create_mpt(hr_dev
, mailbox
,
769 mtpt_idx
& (hr_dev
->caps
.num_mtpts
- 1));
771 dev_err(dev
, "CREATE_MPT failed (%d)\n", ret
);
776 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
781 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
784 hns_roce_table_put(hr_dev
, &mr_table
->mtpt_table
, mtpt_idx
);
788 static int hns_roce_write_mtt_chunk(struct hns_roce_dev
*hr_dev
,
789 struct hns_roce_mtt
*mtt
, u32 start_index
,
790 u32 npages
, u64
*page_list
)
792 struct hns_roce_hem_table
*table
;
793 dma_addr_t dma_handle
;
798 switch (mtt
->mtt_type
) {
800 table
= &hr_dev
->mr_table
.mtt_table
;
801 bt_page_size
= 1 << (hr_dev
->caps
.mtt_ba_pg_sz
+ PAGE_SHIFT
);
804 table
= &hr_dev
->mr_table
.mtt_cqe_table
;
805 bt_page_size
= 1 << (hr_dev
->caps
.cqe_ba_pg_sz
+ PAGE_SHIFT
);
807 case MTT_TYPE_SRQWQE
:
808 table
= &hr_dev
->mr_table
.mtt_srqwqe_table
;
809 bt_page_size
= 1 << (hr_dev
->caps
.srqwqe_ba_pg_sz
+ PAGE_SHIFT
);
812 table
= &hr_dev
->mr_table
.mtt_idx_table
;
813 bt_page_size
= 1 << (hr_dev
->caps
.idx_ba_pg_sz
+ PAGE_SHIFT
);
819 /* All MTTs must fit in the same page */
820 if (start_index
/ (bt_page_size
/ sizeof(u64
)) !=
821 (start_index
+ npages
- 1) / (bt_page_size
/ sizeof(u64
)))
824 if (start_index
& (HNS_ROCE_MTT_ENTRY_PER_SEG
- 1))
827 mtts
= hns_roce_table_find(hr_dev
, table
,
829 start_index
/ HNS_ROCE_MTT_ENTRY_PER_SEG
,
834 /* Save page addr, low 12 bits : 0 */
835 for (i
= 0; i
< npages
; ++i
) {
836 if (!hr_dev
->caps
.mtt_hop_num
)
837 mtts
[i
] = cpu_to_le64(page_list
[i
] >> PAGE_ADDR_SHIFT
);
839 mtts
[i
] = cpu_to_le64(page_list
[i
]);
845 static int hns_roce_write_mtt(struct hns_roce_dev
*hr_dev
,
846 struct hns_roce_mtt
*mtt
, u32 start_index
,
847 u32 npages
, u64
*page_list
)
856 switch (mtt
->mtt_type
) {
858 bt_page_size
= 1 << (hr_dev
->caps
.mtt_ba_pg_sz
+ PAGE_SHIFT
);
861 bt_page_size
= 1 << (hr_dev
->caps
.cqe_ba_pg_sz
+ PAGE_SHIFT
);
863 case MTT_TYPE_SRQWQE
:
864 bt_page_size
= 1 << (hr_dev
->caps
.srqwqe_ba_pg_sz
+ PAGE_SHIFT
);
867 bt_page_size
= 1 << (hr_dev
->caps
.idx_ba_pg_sz
+ PAGE_SHIFT
);
871 "Unsupport mtt type %d, write mtt failed\n",
877 chunk
= min_t(int, bt_page_size
/ sizeof(u64
), npages
);
879 ret
= hns_roce_write_mtt_chunk(hr_dev
, mtt
, start_index
, chunk
,
885 start_index
+= chunk
;
892 int hns_roce_buf_write_mtt(struct hns_roce_dev
*hr_dev
,
893 struct hns_roce_mtt
*mtt
, struct hns_roce_buf
*buf
)
899 page_list
= kmalloc_array(buf
->npages
, sizeof(*page_list
), GFP_KERNEL
);
903 for (i
= 0; i
< buf
->npages
; ++i
) {
905 page_list
[i
] = buf
->direct
.map
+ (i
<< buf
->page_shift
);
907 page_list
[i
] = buf
->page_list
[i
].map
;
910 ret
= hns_roce_write_mtt(hr_dev
, mtt
, 0, buf
->npages
, page_list
);
917 int hns_roce_init_mr_table(struct hns_roce_dev
*hr_dev
)
919 struct hns_roce_mr_table
*mr_table
= &hr_dev
->mr_table
;
922 ret
= hns_roce_bitmap_init(&mr_table
->mtpt_bitmap
,
923 hr_dev
->caps
.num_mtpts
,
924 hr_dev
->caps
.num_mtpts
- 1,
925 hr_dev
->caps
.reserved_mrws
, 0);
929 ret
= hns_roce_buddy_init(&mr_table
->mtt_buddy
,
930 ilog2(hr_dev
->caps
.num_mtt_segs
));
934 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
)) {
935 ret
= hns_roce_buddy_init(&mr_table
->mtt_cqe_buddy
,
936 ilog2(hr_dev
->caps
.num_cqe_segs
));
941 if (hr_dev
->caps
.num_srqwqe_segs
) {
942 ret
= hns_roce_buddy_init(&mr_table
->mtt_srqwqe_buddy
,
943 ilog2(hr_dev
->caps
.num_srqwqe_segs
));
945 goto err_buddy_srqwqe
;
948 if (hr_dev
->caps
.num_idx_segs
) {
949 ret
= hns_roce_buddy_init(&mr_table
->mtt_idx_buddy
,
950 ilog2(hr_dev
->caps
.num_idx_segs
));
958 if (hr_dev
->caps
.num_srqwqe_segs
)
959 hns_roce_buddy_cleanup(&mr_table
->mtt_srqwqe_buddy
);
962 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
))
963 hns_roce_buddy_cleanup(&mr_table
->mtt_cqe_buddy
);
966 hns_roce_buddy_cleanup(&mr_table
->mtt_buddy
);
969 hns_roce_bitmap_cleanup(&mr_table
->mtpt_bitmap
);
973 void hns_roce_cleanup_mr_table(struct hns_roce_dev
*hr_dev
)
975 struct hns_roce_mr_table
*mr_table
= &hr_dev
->mr_table
;
977 if (hr_dev
->caps
.num_idx_segs
)
978 hns_roce_buddy_cleanup(&mr_table
->mtt_idx_buddy
);
979 if (hr_dev
->caps
.num_srqwqe_segs
)
980 hns_roce_buddy_cleanup(&mr_table
->mtt_srqwqe_buddy
);
981 hns_roce_buddy_cleanup(&mr_table
->mtt_buddy
);
982 if (hns_roce_check_whether_mhop(hr_dev
, HEM_TYPE_CQE
))
983 hns_roce_buddy_cleanup(&mr_table
->mtt_cqe_buddy
);
984 hns_roce_bitmap_cleanup(&mr_table
->mtpt_bitmap
);
987 struct ib_mr
*hns_roce_get_dma_mr(struct ib_pd
*pd
, int acc
)
989 struct hns_roce_mr
*mr
;
992 mr
= kmalloc(sizeof(*mr
), GFP_KERNEL
);
994 return ERR_PTR(-ENOMEM
);
996 mr
->type
= MR_TYPE_DMA
;
998 /* Allocate memory region key */
999 ret
= hns_roce_mr_alloc(to_hr_dev(pd
->device
), to_hr_pd(pd
)->pdn
, 0,
1004 ret
= hns_roce_mr_enable(to_hr_dev(pd
->device
), mr
);
1008 mr
->ibmr
.rkey
= mr
->ibmr
.lkey
= mr
->key
;
1014 hns_roce_mr_free(to_hr_dev(pd
->device
), mr
);
1018 return ERR_PTR(ret
);
1021 int hns_roce_ib_umem_write_mtt(struct hns_roce_dev
*hr_dev
,
1022 struct hns_roce_mtt
*mtt
, struct ib_umem
*umem
)
1024 struct device
*dev
= hr_dev
->dev
;
1025 struct sg_dma_page_iter sg_iter
;
1035 switch (mtt
->mtt_type
) {
1037 order
= hr_dev
->caps
.mtt_ba_pg_sz
;
1040 order
= hr_dev
->caps
.cqe_ba_pg_sz
;
1042 case MTT_TYPE_SRQWQE
:
1043 order
= hr_dev
->caps
.srqwqe_ba_pg_sz
;
1046 order
= hr_dev
->caps
.idx_ba_pg_sz
;
1049 dev_err(dev
, "Unsupport mtt type %d, write mtt failed\n",
1054 bt_page_size
= 1 << (order
+ PAGE_SHIFT
);
1056 pages
= (u64
*) __get_free_pages(GFP_KERNEL
, order
);
1062 for_each_sg_dma_page(umem
->sg_head
.sgl
, &sg_iter
, umem
->nmap
, 0) {
1063 page_addr
= sg_page_iter_dma_address(&sg_iter
);
1064 if (!(npage
% (1 << (mtt
->page_shift
- PAGE_SHIFT
)))) {
1065 if (page_addr
& ((1 << mtt
->page_shift
) - 1)) {
1067 "page_addr is not page_shift %d alignment!\n",
1072 pages
[i
++] = page_addr
;
1075 if (i
== bt_page_size
/ sizeof(u64
)) {
1076 ret
= hns_roce_write_mtt(hr_dev
, mtt
, n
, i
, pages
);
1085 ret
= hns_roce_write_mtt(hr_dev
, mtt
, n
, i
, pages
);
1088 free_pages((unsigned long) pages
, order
);
1092 static int hns_roce_ib_umem_write_mr(struct hns_roce_dev
*hr_dev
,
1093 struct hns_roce_mr
*mr
,
1094 struct ib_umem
*umem
)
1096 struct sg_dma_page_iter sg_iter
;
1101 if (hr_dev
->caps
.pbl_hop_num
== HNS_ROCE_HOP_NUM_0
)
1104 pbl_bt_sz
= 1 << (hr_dev
->caps
.pbl_ba_pg_sz
+ PAGE_SHIFT
);
1105 for_each_sg_dma_page(umem
->sg_head
.sgl
, &sg_iter
, umem
->nmap
, 0) {
1106 page_addr
= sg_page_iter_dma_address(&sg_iter
);
1107 if (!hr_dev
->caps
.pbl_hop_num
) {
1108 /* for hip06, page addr is aligned to 4K */
1109 mr
->pbl_buf
[i
++] = page_addr
>> 12;
1110 } else if (hr_dev
->caps
.pbl_hop_num
== 1) {
1111 mr
->pbl_buf
[i
++] = page_addr
;
1113 if (hr_dev
->caps
.pbl_hop_num
== 2)
1114 mr
->pbl_bt_l1
[i
][j
] = page_addr
;
1115 else if (hr_dev
->caps
.pbl_hop_num
== 3)
1116 mr
->pbl_bt_l2
[i
][j
] = page_addr
;
1119 if (j
>= (pbl_bt_sz
/ BA_BYTE_LEN
)) {
1126 /* Memory barrier */
1132 struct ib_mr
*hns_roce_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
1133 u64 virt_addr
, int access_flags
,
1134 struct ib_udata
*udata
)
1136 struct hns_roce_dev
*hr_dev
= to_hr_dev(pd
->device
);
1137 struct device
*dev
= hr_dev
->dev
;
1138 struct hns_roce_mr
*mr
;
1144 mr
= kmalloc(sizeof(*mr
), GFP_KERNEL
);
1146 return ERR_PTR(-ENOMEM
);
1148 mr
->umem
= ib_umem_get(pd
->device
, start
, length
, access_flags
);
1149 if (IS_ERR(mr
->umem
)) {
1150 ret
= PTR_ERR(mr
->umem
);
1154 n
= ib_umem_page_count(mr
->umem
);
1156 if (!hr_dev
->caps
.pbl_hop_num
) {
1157 if (n
> HNS_ROCE_MAX_MTPT_PBL_NUM
) {
1159 " MR len %lld err. MR is limited to 4G at most!\n",
1167 bt_size
= (1 << (hr_dev
->caps
.pbl_ba_pg_sz
+ PAGE_SHIFT
)) /
1169 for (i
= 0; i
< hr_dev
->caps
.pbl_hop_num
; i
++)
1170 pbl_size
*= bt_size
;
1173 " MR len %lld err. MR page num is limited to %lld!\n",
1180 mr
->type
= MR_TYPE_MR
;
1182 ret
= hns_roce_mr_alloc(hr_dev
, to_hr_pd(pd
)->pdn
, virt_addr
, length
,
1183 access_flags
, n
, mr
);
1187 ret
= hns_roce_ib_umem_write_mr(hr_dev
, mr
, mr
->umem
);
1191 ret
= hns_roce_mr_enable(hr_dev
, mr
);
1195 mr
->ibmr
.rkey
= mr
->ibmr
.lkey
= mr
->key
;
1200 hns_roce_mr_free(hr_dev
, mr
);
1203 ib_umem_release(mr
->umem
);
1207 return ERR_PTR(ret
);
1210 static int rereg_mr_trans(struct ib_mr
*ibmr
, int flags
,
1211 u64 start
, u64 length
,
1212 u64 virt_addr
, int mr_access_flags
,
1213 struct hns_roce_cmd_mailbox
*mailbox
,
1214 u32 pdn
, struct ib_udata
*udata
)
1216 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibmr
->device
);
1217 struct hns_roce_mr
*mr
= to_hr_mr(ibmr
);
1218 struct device
*dev
= hr_dev
->dev
;
1222 if (mr
->size
!= ~0ULL) {
1223 npages
= ib_umem_page_count(mr
->umem
);
1225 if (hr_dev
->caps
.pbl_hop_num
)
1226 hns_roce_mhop_free(hr_dev
, mr
);
1228 dma_free_coherent(dev
, npages
* 8,
1229 mr
->pbl_buf
, mr
->pbl_dma_addr
);
1231 ib_umem_release(mr
->umem
);
1233 mr
->umem
= ib_umem_get(ibmr
->device
, start
, length
, mr_access_flags
);
1234 if (IS_ERR(mr
->umem
)) {
1235 ret
= PTR_ERR(mr
->umem
);
1239 npages
= ib_umem_page_count(mr
->umem
);
1241 if (hr_dev
->caps
.pbl_hop_num
) {
1242 ret
= hns_roce_mhop_alloc(hr_dev
, npages
, mr
);
1246 mr
->pbl_buf
= dma_alloc_coherent(dev
, npages
* 8,
1247 &(mr
->pbl_dma_addr
),
1255 ret
= hr_dev
->hw
->rereg_write_mtpt(hr_dev
, mr
, flags
, pdn
,
1256 mr_access_flags
, virt_addr
,
1257 length
, mailbox
->buf
);
1262 ret
= hns_roce_ib_umem_write_mr(hr_dev
, mr
, mr
->umem
);
1264 if (mr
->size
!= ~0ULL) {
1265 npages
= ib_umem_page_count(mr
->umem
);
1267 if (hr_dev
->caps
.pbl_hop_num
)
1268 hns_roce_mhop_free(hr_dev
, mr
);
1270 dma_free_coherent(dev
, npages
* 8,
1281 ib_umem_release(mr
->umem
);
1287 int hns_roce_rereg_user_mr(struct ib_mr
*ibmr
, int flags
, u64 start
, u64 length
,
1288 u64 virt_addr
, int mr_access_flags
, struct ib_pd
*pd
,
1289 struct ib_udata
*udata
)
1291 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibmr
->device
);
1292 struct hns_roce_mr
*mr
= to_hr_mr(ibmr
);
1293 struct hns_roce_cmd_mailbox
*mailbox
;
1294 struct device
*dev
= hr_dev
->dev
;
1295 unsigned long mtpt_idx
;
1302 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
1303 if (IS_ERR(mailbox
))
1304 return PTR_ERR(mailbox
);
1306 mtpt_idx
= key_to_hw_index(mr
->key
) & (hr_dev
->caps
.num_mtpts
- 1);
1307 ret
= hns_roce_cmd_mbox(hr_dev
, 0, mailbox
->dma
, mtpt_idx
, 0,
1308 HNS_ROCE_CMD_QUERY_MPT
,
1309 HNS_ROCE_CMD_TIMEOUT_MSECS
);
1313 ret
= hns_roce_hw_destroy_mpt(hr_dev
, NULL
, mtpt_idx
);
1315 dev_warn(dev
, "DESTROY_MPT failed (%d)\n", ret
);
1319 if (flags
& IB_MR_REREG_PD
)
1320 pdn
= to_hr_pd(pd
)->pdn
;
1322 if (flags
& IB_MR_REREG_TRANS
) {
1323 ret
= rereg_mr_trans(ibmr
, flags
,
1325 virt_addr
, mr_access_flags
,
1326 mailbox
, pdn
, udata
);
1330 ret
= hr_dev
->hw
->rereg_write_mtpt(hr_dev
, mr
, flags
, pdn
,
1331 mr_access_flags
, virt_addr
,
1332 length
, mailbox
->buf
);
1337 ret
= hns_roce_hw_create_mpt(hr_dev
, mailbox
, mtpt_idx
);
1339 dev_err(dev
, "CREATE_MPT failed (%d)\n", ret
);
1340 ib_umem_release(mr
->umem
);
1345 if (flags
& IB_MR_REREG_ACCESS
)
1346 mr
->access
= mr_access_flags
;
1348 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
1353 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
1358 int hns_roce_dereg_mr(struct ib_mr
*ibmr
, struct ib_udata
*udata
)
1360 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibmr
->device
);
1361 struct hns_roce_mr
*mr
= to_hr_mr(ibmr
);
1364 if (hr_dev
->hw
->dereg_mr
) {
1365 ret
= hr_dev
->hw
->dereg_mr(hr_dev
, mr
, udata
);
1367 hns_roce_mr_free(hr_dev
, mr
);
1369 ib_umem_release(mr
->umem
);
1376 struct ib_mr
*hns_roce_alloc_mr(struct ib_pd
*pd
, enum ib_mr_type mr_type
,
1377 u32 max_num_sg
, struct ib_udata
*udata
)
1379 struct hns_roce_dev
*hr_dev
= to_hr_dev(pd
->device
);
1380 struct device
*dev
= hr_dev
->dev
;
1381 struct hns_roce_mr
*mr
;
1386 page_size
= 1 << (hr_dev
->caps
.pbl_buf_pg_sz
+ PAGE_SHIFT
);
1387 length
= max_num_sg
* page_size
;
1389 if (mr_type
!= IB_MR_TYPE_MEM_REG
)
1390 return ERR_PTR(-EINVAL
);
1392 if (max_num_sg
> HNS_ROCE_FRMR_MAX_PA
) {
1393 dev_err(dev
, "max_num_sg larger than %d\n",
1394 HNS_ROCE_FRMR_MAX_PA
);
1395 return ERR_PTR(-EINVAL
);
1398 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
1400 return ERR_PTR(-ENOMEM
);
1402 mr
->type
= MR_TYPE_FRMR
;
1404 /* Allocate memory region key */
1405 ret
= hns_roce_mr_alloc(hr_dev
, to_hr_pd(pd
)->pdn
, 0, length
,
1410 ret
= hns_roce_mr_enable(hr_dev
, mr
);
1414 mr
->ibmr
.rkey
= mr
->ibmr
.lkey
= mr
->key
;
1420 hns_roce_mr_free(to_hr_dev(pd
->device
), mr
);
1424 return ERR_PTR(ret
);
1427 static int hns_roce_set_page(struct ib_mr
*ibmr
, u64 addr
)
1429 struct hns_roce_mr
*mr
= to_hr_mr(ibmr
);
1431 mr
->pbl_buf
[mr
->npages
++] = addr
;
1436 int hns_roce_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
, int sg_nents
,
1437 unsigned int *sg_offset
)
1439 struct hns_roce_mr
*mr
= to_hr_mr(ibmr
);
1443 return ib_sg_to_pages(ibmr
, sg
, sg_nents
, sg_offset
, hns_roce_set_page
);
1446 static void hns_roce_mw_free(struct hns_roce_dev
*hr_dev
,
1447 struct hns_roce_mw
*mw
)
1449 struct device
*dev
= hr_dev
->dev
;
1453 ret
= hns_roce_hw_destroy_mpt(hr_dev
, NULL
,
1454 key_to_hw_index(mw
->rkey
) &
1455 (hr_dev
->caps
.num_mtpts
- 1));
1457 dev_warn(dev
, "MW DESTROY_MPT failed (%d)\n", ret
);
1459 hns_roce_table_put(hr_dev
, &hr_dev
->mr_table
.mtpt_table
,
1460 key_to_hw_index(mw
->rkey
));
1463 hns_roce_bitmap_free(&hr_dev
->mr_table
.mtpt_bitmap
,
1464 key_to_hw_index(mw
->rkey
), BITMAP_NO_RR
);
1467 static int hns_roce_mw_enable(struct hns_roce_dev
*hr_dev
,
1468 struct hns_roce_mw
*mw
)
1470 struct hns_roce_mr_table
*mr_table
= &hr_dev
->mr_table
;
1471 struct hns_roce_cmd_mailbox
*mailbox
;
1472 struct device
*dev
= hr_dev
->dev
;
1473 unsigned long mtpt_idx
= key_to_hw_index(mw
->rkey
);
1476 /* prepare HEM entry memory */
1477 ret
= hns_roce_table_get(hr_dev
, &mr_table
->mtpt_table
, mtpt_idx
);
1481 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
1482 if (IS_ERR(mailbox
)) {
1483 ret
= PTR_ERR(mailbox
);
1487 ret
= hr_dev
->hw
->mw_write_mtpt(mailbox
->buf
, mw
);
1489 dev_err(dev
, "MW write mtpt fail!\n");
1493 ret
= hns_roce_hw_create_mpt(hr_dev
, mailbox
,
1494 mtpt_idx
& (hr_dev
->caps
.num_mtpts
- 1));
1496 dev_err(dev
, "MW CREATE_MPT failed (%d)\n", ret
);
1502 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
1507 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
1510 hns_roce_table_put(hr_dev
, &mr_table
->mtpt_table
, mtpt_idx
);
1515 struct ib_mw
*hns_roce_alloc_mw(struct ib_pd
*ib_pd
, enum ib_mw_type type
,
1516 struct ib_udata
*udata
)
1518 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_pd
->device
);
1519 struct hns_roce_mw
*mw
;
1520 unsigned long index
= 0;
1523 mw
= kmalloc(sizeof(*mw
), GFP_KERNEL
);
1525 return ERR_PTR(-ENOMEM
);
1527 /* Allocate a key for mw from bitmap */
1528 ret
= hns_roce_bitmap_alloc(&hr_dev
->mr_table
.mtpt_bitmap
, &index
);
1532 mw
->rkey
= hw_index_to_key(index
);
1534 mw
->ibmw
.rkey
= mw
->rkey
;
1535 mw
->ibmw
.type
= type
;
1536 mw
->pdn
= to_hr_pd(ib_pd
)->pdn
;
1537 mw
->pbl_hop_num
= hr_dev
->caps
.pbl_hop_num
;
1538 mw
->pbl_ba_pg_sz
= hr_dev
->caps
.pbl_ba_pg_sz
;
1539 mw
->pbl_buf_pg_sz
= hr_dev
->caps
.pbl_buf_pg_sz
;
1541 ret
= hns_roce_mw_enable(hr_dev
, mw
);
1548 hns_roce_mw_free(hr_dev
, mw
);
1553 return ERR_PTR(ret
);
1556 int hns_roce_dealloc_mw(struct ib_mw
*ibmw
)
1558 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibmw
->device
);
1559 struct hns_roce_mw
*mw
= to_hr_mw(ibmw
);
1561 hns_roce_mw_free(hr_dev
, mw
);
1567 void hns_roce_mtr_init(struct hns_roce_mtr
*mtr
, int bt_pg_shift
,
1570 hns_roce_hem_list_init(&mtr
->hem_list
, bt_pg_shift
);
1571 mtr
->buf_pg_shift
= buf_pg_shift
;
1574 void hns_roce_mtr_cleanup(struct hns_roce_dev
*hr_dev
,
1575 struct hns_roce_mtr
*mtr
)
1577 hns_roce_hem_list_release(hr_dev
, &mtr
->hem_list
);
1580 static int hns_roce_write_mtr(struct hns_roce_dev
*hr_dev
,
1581 struct hns_roce_mtr
*mtr
, dma_addr_t
*bufs
,
1582 struct hns_roce_buf_region
*r
)
1592 end
= offset
+ r
->count
;
1594 while (offset
< end
) {
1595 mtts
= hns_roce_hem_list_find_mtt(hr_dev
, &mtr
->hem_list
,
1596 offset
, &count
, NULL
);
1600 /* Save page addr, low 12 bits : 0 */
1601 for (i
= 0; i
< count
; i
++) {
1602 if (hr_dev
->hw_rev
== HNS_ROCE_HW_VER1
)
1603 mtts
[i
] = bufs
[npage
] >> PAGE_ADDR_SHIFT
;
1605 mtts
[i
] = bufs
[npage
];
1615 int hns_roce_mtr_attach(struct hns_roce_dev
*hr_dev
, struct hns_roce_mtr
*mtr
,
1616 dma_addr_t
**bufs
, struct hns_roce_buf_region
*regions
,
1619 struct hns_roce_buf_region
*r
;
1623 ret
= hns_roce_hem_list_request(hr_dev
, &mtr
->hem_list
, regions
,
1628 for (i
= 0; i
< region_cnt
; i
++) {
1630 ret
= hns_roce_write_mtr(hr_dev
, mtr
, bufs
[i
], r
);
1632 dev_err(hr_dev
->dev
,
1633 "write mtr[%d/%d] err %d,offset=%d.\n",
1634 i
, region_cnt
, ret
, r
->offset
);
1642 hns_roce_hem_list_release(hr_dev
, &mtr
->hem_list
);
1647 int hns_roce_mtr_find(struct hns_roce_dev
*hr_dev
, struct hns_roce_mtr
*mtr
,
1648 int offset
, u64
*mtt_buf
, int mtt_max
, u64
*base_addr
)
1650 u64
*mtts
= mtt_buf
;
1657 if (mtts
== NULL
|| mtt_max
< 1)
1663 addr
= hns_roce_hem_list_find_mtt(hr_dev
, &mtr
->hem_list
,
1666 if (!addr
|| !mtt_count
)
1669 npage
= min(mtt_count
, left
);
1670 memcpy(&mtts
[total
], addr
, BA_BYTE_LEN
* npage
);
1677 *base_addr
= mtr
->hem_list
.root_ba
;