1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2018 Hisilicon Limited.
6 #include <rdma/ib_umem.h>
7 #include <rdma/hns-abi.h>
8 #include "hns_roce_device.h"
9 #include "hns_roce_cmd.h"
10 #include "hns_roce_hem.h"
12 void hns_roce_srq_event(struct hns_roce_dev
*hr_dev
, u32 srqn
, int event_type
)
14 struct hns_roce_srq_table
*srq_table
= &hr_dev
->srq_table
;
15 struct hns_roce_srq
*srq
;
17 xa_lock(&srq_table
->xa
);
18 srq
= xa_load(&srq_table
->xa
, srqn
& (hr_dev
->caps
.num_srqs
- 1));
20 atomic_inc(&srq
->refcount
);
21 xa_unlock(&srq_table
->xa
);
24 dev_warn(hr_dev
->dev
, "Async event for bogus SRQ %08x\n", srqn
);
28 srq
->event(srq
, event_type
);
30 if (atomic_dec_and_test(&srq
->refcount
))
34 static void hns_roce_ib_srq_event(struct hns_roce_srq
*srq
,
35 enum hns_roce_event event_type
)
37 struct hns_roce_dev
*hr_dev
= to_hr_dev(srq
->ibsrq
.device
);
38 struct ib_srq
*ibsrq
= &srq
->ibsrq
;
39 struct ib_event event
;
41 if (ibsrq
->event_handler
) {
42 event
.device
= ibsrq
->device
;
43 event
.element
.srq
= ibsrq
;
45 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH
:
46 event
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
48 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR
:
49 event
.event
= IB_EVENT_SRQ_ERR
;
53 "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
54 event_type
, srq
->srqn
);
58 ibsrq
->event_handler(&event
, ibsrq
->srq_context
);
62 static int hns_roce_hw_create_srq(struct hns_roce_dev
*dev
,
63 struct hns_roce_cmd_mailbox
*mailbox
,
64 unsigned long srq_num
)
66 return hns_roce_cmd_mbox(dev
, mailbox
->dma
, 0, srq_num
, 0,
67 HNS_ROCE_CMD_CREATE_SRQ
,
68 HNS_ROCE_CMD_TIMEOUT_MSECS
);
71 static int hns_roce_hw_destroy_srq(struct hns_roce_dev
*dev
,
72 struct hns_roce_cmd_mailbox
*mailbox
,
73 unsigned long srq_num
)
75 return hns_roce_cmd_mbox(dev
, 0, mailbox
? mailbox
->dma
: 0, srq_num
,
76 mailbox
? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ
,
77 HNS_ROCE_CMD_TIMEOUT_MSECS
);
80 static int hns_roce_srq_alloc(struct hns_roce_dev
*hr_dev
, u32 pdn
, u32 cqn
,
81 u16 xrcd
, struct hns_roce_mtt
*hr_mtt
,
82 u64 db_rec_addr
, struct hns_roce_srq
*srq
)
84 struct hns_roce_srq_table
*srq_table
= &hr_dev
->srq_table
;
85 struct hns_roce_cmd_mailbox
*mailbox
;
86 dma_addr_t dma_handle_wqe
;
87 dma_addr_t dma_handle_idx
;
92 /* Get the physical address of srq buf */
93 mtts_wqe
= hns_roce_table_find(hr_dev
,
94 &hr_dev
->mr_table
.mtt_srqwqe_table
,
98 dev_err(hr_dev
->dev
, "Failed to find mtt for srq buf.\n");
102 /* Get physical address of idx que buf */
103 mtts_idx
= hns_roce_table_find(hr_dev
, &hr_dev
->mr_table
.mtt_idx_table
,
104 srq
->idx_que
.mtt
.first_seg
,
108 "Failed to find mtt for srq idx queue buf.\n");
112 ret
= hns_roce_bitmap_alloc(&srq_table
->bitmap
, &srq
->srqn
);
115 "Failed to alloc a bit from srq bitmap.\n");
119 ret
= hns_roce_table_get(hr_dev
, &srq_table
->table
, srq
->srqn
);
123 ret
= xa_err(xa_store(&srq_table
->xa
, srq
->srqn
, srq
, GFP_KERNEL
));
127 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
128 if (IS_ERR(mailbox
)) {
129 ret
= PTR_ERR(mailbox
);
133 hr_dev
->hw
->write_srqc(hr_dev
, srq
, pdn
, xrcd
, cqn
, mailbox
->buf
,
134 mtts_wqe
, mtts_idx
, dma_handle_wqe
,
137 ret
= hns_roce_hw_create_srq(hr_dev
, mailbox
, srq
->srqn
);
138 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
142 atomic_set(&srq
->refcount
, 1);
143 init_completion(&srq
->free
);
147 xa_erase(&srq_table
->xa
, srq
->srqn
);
150 hns_roce_table_put(hr_dev
, &srq_table
->table
, srq
->srqn
);
153 hns_roce_bitmap_free(&srq_table
->bitmap
, srq
->srqn
, BITMAP_NO_RR
);
157 static void hns_roce_srq_free(struct hns_roce_dev
*hr_dev
,
158 struct hns_roce_srq
*srq
)
160 struct hns_roce_srq_table
*srq_table
= &hr_dev
->srq_table
;
163 ret
= hns_roce_hw_destroy_srq(hr_dev
, NULL
, srq
->srqn
);
165 dev_err(hr_dev
->dev
, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
168 xa_erase(&srq_table
->xa
, srq
->srqn
);
170 if (atomic_dec_and_test(&srq
->refcount
))
171 complete(&srq
->free
);
172 wait_for_completion(&srq
->free
);
174 hns_roce_table_put(hr_dev
, &srq_table
->table
, srq
->srqn
);
175 hns_roce_bitmap_free(&srq_table
->bitmap
, srq
->srqn
, BITMAP_NO_RR
);
178 static int create_user_srq(struct hns_roce_srq
*srq
, struct ib_udata
*udata
,
181 struct hns_roce_dev
*hr_dev
= to_hr_dev(srq
->ibsrq
.device
);
182 struct hns_roce_ib_create_srq ucmd
;
183 struct hns_roce_buf
*buf
;
186 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
)))
190 ib_umem_get(srq
->ibsrq
.device
, ucmd
.buf_addr
, srq_buf_size
, 0);
191 if (IS_ERR(srq
->umem
))
192 return PTR_ERR(srq
->umem
);
195 buf
->npages
= (ib_umem_page_count(srq
->umem
) +
196 (1 << hr_dev
->caps
.srqwqe_buf_pg_sz
) - 1) /
197 (1 << hr_dev
->caps
.srqwqe_buf_pg_sz
);
198 buf
->page_shift
= PAGE_SHIFT
+ hr_dev
->caps
.srqwqe_buf_pg_sz
;
199 ret
= hns_roce_mtt_init(hr_dev
, buf
->npages
, buf
->page_shift
,
204 ret
= hns_roce_ib_umem_write_mtt(hr_dev
, &srq
->mtt
, srq
->umem
);
206 goto err_user_srq_mtt
;
208 /* config index queue BA */
209 srq
->idx_que
.umem
= ib_umem_get(srq
->ibsrq
.device
, ucmd
.que_addr
,
210 srq
->idx_que
.buf_size
, 0);
211 if (IS_ERR(srq
->idx_que
.umem
)) {
212 dev_err(hr_dev
->dev
, "ib_umem_get error for index queue\n");
213 ret
= PTR_ERR(srq
->idx_que
.umem
);
214 goto err_user_srq_mtt
;
217 buf
= &srq
->idx_que
.idx_buf
;
218 buf
->npages
= DIV_ROUND_UP(ib_umem_page_count(srq
->idx_que
.umem
),
219 1 << hr_dev
->caps
.idx_buf_pg_sz
);
220 buf
->page_shift
= PAGE_SHIFT
+ hr_dev
->caps
.idx_buf_pg_sz
;
221 ret
= hns_roce_mtt_init(hr_dev
, buf
->npages
, buf
->page_shift
,
224 dev_err(hr_dev
->dev
, "hns_roce_mtt_init error for idx que\n");
225 goto err_user_idx_mtt
;
228 ret
= hns_roce_ib_umem_write_mtt(hr_dev
, &srq
->idx_que
.mtt
,
232 "hns_roce_ib_umem_write_mtt error for idx que\n");
233 goto err_user_idx_buf
;
239 hns_roce_mtt_cleanup(hr_dev
, &srq
->idx_que
.mtt
);
242 ib_umem_release(srq
->idx_que
.umem
);
245 hns_roce_mtt_cleanup(hr_dev
, &srq
->mtt
);
248 ib_umem_release(srq
->umem
);
253 static int hns_roce_create_idx_que(struct ib_pd
*pd
, struct hns_roce_srq
*srq
,
256 struct hns_roce_dev
*hr_dev
= to_hr_dev(pd
->device
);
257 struct hns_roce_idx_que
*idx_que
= &srq
->idx_que
;
259 idx_que
->bitmap
= bitmap_zalloc(srq
->wqe_cnt
, GFP_KERNEL
);
260 if (!idx_que
->bitmap
)
263 idx_que
->buf_size
= srq
->idx_que
.buf_size
;
265 if (hns_roce_buf_alloc(hr_dev
, idx_que
->buf_size
, (1 << page_shift
) * 2,
266 &idx_que
->idx_buf
, page_shift
)) {
267 bitmap_free(idx_que
->bitmap
);
274 static int create_kernel_srq(struct hns_roce_srq
*srq
, int srq_buf_size
)
276 struct hns_roce_dev
*hr_dev
= to_hr_dev(srq
->ibsrq
.device
);
277 u32 page_shift
= PAGE_SHIFT
+ hr_dev
->caps
.srqwqe_buf_pg_sz
;
280 if (hns_roce_buf_alloc(hr_dev
, srq_buf_size
, (1 << page_shift
) * 2,
281 &srq
->buf
, page_shift
))
285 srq
->tail
= srq
->wqe_cnt
- 1;
287 ret
= hns_roce_mtt_init(hr_dev
, srq
->buf
.npages
, srq
->buf
.page_shift
,
292 ret
= hns_roce_buf_write_mtt(hr_dev
, &srq
->mtt
, &srq
->buf
);
294 goto err_kernel_srq_mtt
;
296 page_shift
= PAGE_SHIFT
+ hr_dev
->caps
.idx_buf_pg_sz
;
297 ret
= hns_roce_create_idx_que(srq
->ibsrq
.pd
, srq
, page_shift
);
299 dev_err(hr_dev
->dev
, "Create idx queue fail(%d)!\n", ret
);
300 goto err_kernel_srq_mtt
;
303 /* Init mtt table for idx_que */
304 ret
= hns_roce_mtt_init(hr_dev
, srq
->idx_que
.idx_buf
.npages
,
305 srq
->idx_que
.idx_buf
.page_shift
,
308 goto err_kernel_create_idx
;
310 /* Write buffer address into the mtt table */
311 ret
= hns_roce_buf_write_mtt(hr_dev
, &srq
->idx_que
.mtt
,
312 &srq
->idx_que
.idx_buf
);
314 goto err_kernel_idx_buf
;
316 srq
->wrid
= kvmalloc_array(srq
->wqe_cnt
, sizeof(u64
), GFP_KERNEL
);
319 goto err_kernel_idx_buf
;
325 hns_roce_mtt_cleanup(hr_dev
, &srq
->idx_que
.mtt
);
327 err_kernel_create_idx
:
328 hns_roce_buf_free(hr_dev
, srq
->idx_que
.buf_size
,
329 &srq
->idx_que
.idx_buf
);
330 kfree(srq
->idx_que
.bitmap
);
333 hns_roce_mtt_cleanup(hr_dev
, &srq
->mtt
);
336 hns_roce_buf_free(hr_dev
, srq_buf_size
, &srq
->buf
);
341 static void destroy_user_srq(struct hns_roce_dev
*hr_dev
,
342 struct hns_roce_srq
*srq
)
344 hns_roce_mtt_cleanup(hr_dev
, &srq
->idx_que
.mtt
);
345 ib_umem_release(srq
->idx_que
.umem
);
346 hns_roce_mtt_cleanup(hr_dev
, &srq
->mtt
);
347 ib_umem_release(srq
->umem
);
350 static void destroy_kernel_srq(struct hns_roce_dev
*hr_dev
,
351 struct hns_roce_srq
*srq
, int srq_buf_size
)
354 hns_roce_mtt_cleanup(hr_dev
, &srq
->idx_que
.mtt
);
355 hns_roce_buf_free(hr_dev
, srq
->idx_que
.buf_size
, &srq
->idx_que
.idx_buf
);
356 kfree(srq
->idx_que
.bitmap
);
357 hns_roce_mtt_cleanup(hr_dev
, &srq
->mtt
);
358 hns_roce_buf_free(hr_dev
, srq_buf_size
, &srq
->buf
);
361 int hns_roce_create_srq(struct ib_srq
*ib_srq
,
362 struct ib_srq_init_attr
*init_attr
,
363 struct ib_udata
*udata
)
365 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_srq
->device
);
366 struct hns_roce_ib_create_srq_resp resp
= {};
367 struct hns_roce_srq
*srq
= to_hr_srq(ib_srq
);
373 /* Check the actual SRQ wqe and SRQ sge num */
374 if (init_attr
->attr
.max_wr
>= hr_dev
->caps
.max_srq_wrs
||
375 init_attr
->attr
.max_sge
> hr_dev
->caps
.max_srq_sges
)
378 mutex_init(&srq
->mutex
);
379 spin_lock_init(&srq
->lock
);
381 srq
->wqe_cnt
= roundup_pow_of_two(init_attr
->attr
.max_wr
+ 1);
382 srq
->max_gs
= init_attr
->attr
.max_sge
;
384 srq_desc_size
= roundup_pow_of_two(max(16, 16 * srq
->max_gs
));
386 srq
->wqe_shift
= ilog2(srq_desc_size
);
388 srq_buf_size
= srq
->wqe_cnt
* srq_desc_size
;
390 srq
->idx_que
.entry_sz
= HNS_ROCE_IDX_QUE_ENTRY_SZ
;
391 srq
->idx_que
.buf_size
= srq
->wqe_cnt
* srq
->idx_que
.entry_sz
;
392 srq
->mtt
.mtt_type
= MTT_TYPE_SRQWQE
;
393 srq
->idx_que
.mtt
.mtt_type
= MTT_TYPE_IDX
;
396 ret
= create_user_srq(srq
, udata
, srq_buf_size
);
398 dev_err(hr_dev
->dev
, "Create user srq failed\n");
402 ret
= create_kernel_srq(srq
, srq_buf_size
);
404 dev_err(hr_dev
->dev
, "Create kernel srq failed\n");
409 cqn
= ib_srq_has_cq(init_attr
->srq_type
) ?
410 to_hr_cq(init_attr
->ext
.cq
)->cqn
: 0;
412 srq
->db_reg_l
= hr_dev
->reg_base
+ SRQ_DB_REG
;
414 ret
= hns_roce_srq_alloc(hr_dev
, to_hr_pd(ib_srq
->pd
)->pdn
, cqn
, 0,
419 srq
->event
= hns_roce_ib_srq_event
;
420 resp
.srqn
= srq
->srqn
;
423 if (ib_copy_to_udata(udata
, &resp
,
424 min(udata
->outlen
, sizeof(resp
)))) {
433 hns_roce_srq_free(hr_dev
, srq
);
437 destroy_user_srq(hr_dev
, srq
);
439 destroy_kernel_srq(hr_dev
, srq
, srq_buf_size
);
445 void hns_roce_destroy_srq(struct ib_srq
*ibsrq
, struct ib_udata
*udata
)
447 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibsrq
->device
);
448 struct hns_roce_srq
*srq
= to_hr_srq(ibsrq
);
450 hns_roce_srq_free(hr_dev
, srq
);
451 hns_roce_mtt_cleanup(hr_dev
, &srq
->mtt
);
454 hns_roce_mtt_cleanup(hr_dev
, &srq
->idx_que
.mtt
);
457 hns_roce_buf_free(hr_dev
, srq
->wqe_cnt
<< srq
->wqe_shift
,
460 ib_umem_release(srq
->idx_que
.umem
);
461 ib_umem_release(srq
->umem
);
464 int hns_roce_init_srq_table(struct hns_roce_dev
*hr_dev
)
466 struct hns_roce_srq_table
*srq_table
= &hr_dev
->srq_table
;
468 xa_init(&srq_table
->xa
);
470 return hns_roce_bitmap_init(&srq_table
->bitmap
, hr_dev
->caps
.num_srqs
,
471 hr_dev
->caps
.num_srqs
- 1,
472 hr_dev
->caps
.reserved_srqs
, 0);
475 void hns_roce_cleanup_srq_table(struct hns_roce_dev
*hr_dev
)
477 hns_roce_bitmap_cleanup(&hr_dev
->srq_table
.bitmap
);