1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2018 Hisilicon Limited.
6 #include <rdma/ib_umem.h>
7 #include "hns_roce_device.h"
8 #include "hns_roce_cmd.h"
9 #include "hns_roce_hem.h"
11 void hns_roce_srq_event(struct hns_roce_dev
*hr_dev
, u32 srqn
, int event_type
)
13 struct hns_roce_srq_table
*srq_table
= &hr_dev
->srq_table
;
14 struct hns_roce_srq
*srq
;
16 xa_lock(&srq_table
->xa
);
17 srq
= xa_load(&srq_table
->xa
, srqn
& (hr_dev
->caps
.num_srqs
- 1));
19 atomic_inc(&srq
->refcount
);
20 xa_unlock(&srq_table
->xa
);
23 dev_warn(hr_dev
->dev
, "Async event for bogus SRQ %08x\n", srqn
);
27 srq
->event(srq
, event_type
);
29 if (atomic_dec_and_test(&srq
->refcount
))
33 static void hns_roce_ib_srq_event(struct hns_roce_srq
*srq
,
34 enum hns_roce_event event_type
)
36 struct hns_roce_dev
*hr_dev
= to_hr_dev(srq
->ibsrq
.device
);
37 struct ib_srq
*ibsrq
= &srq
->ibsrq
;
38 struct ib_event event
;
40 if (ibsrq
->event_handler
) {
41 event
.device
= ibsrq
->device
;
42 event
.element
.srq
= ibsrq
;
44 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH
:
45 event
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
47 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR
:
48 event
.event
= IB_EVENT_SRQ_ERR
;
52 "hns_roce:Unexpected event type 0x%x on SRQ %06lx\n",
53 event_type
, srq
->srqn
);
57 ibsrq
->event_handler(&event
, ibsrq
->srq_context
);
61 static int hns_roce_hw_create_srq(struct hns_roce_dev
*dev
,
62 struct hns_roce_cmd_mailbox
*mailbox
,
63 unsigned long srq_num
)
65 return hns_roce_cmd_mbox(dev
, mailbox
->dma
, 0, srq_num
, 0,
66 HNS_ROCE_CMD_CREATE_SRQ
,
67 HNS_ROCE_CMD_TIMEOUT_MSECS
);
70 static int hns_roce_hw_destroy_srq(struct hns_roce_dev
*dev
,
71 struct hns_roce_cmd_mailbox
*mailbox
,
72 unsigned long srq_num
)
74 return hns_roce_cmd_mbox(dev
, 0, mailbox
? mailbox
->dma
: 0, srq_num
,
75 mailbox
? 0 : 1, HNS_ROCE_CMD_DESTROY_SRQ
,
76 HNS_ROCE_CMD_TIMEOUT_MSECS
);
79 static int alloc_srqc(struct hns_roce_dev
*hr_dev
, struct hns_roce_srq
*srq
,
80 u32 pdn
, u32 cqn
, u16 xrcd
, u64 db_rec_addr
)
82 struct hns_roce_srq_table
*srq_table
= &hr_dev
->srq_table
;
83 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
84 struct hns_roce_cmd_mailbox
*mailbox
;
85 u64 mtts_wqe
[MTT_MIN_COUNT
] = { 0 };
86 u64 mtts_idx
[MTT_MIN_COUNT
] = { 0 };
87 dma_addr_t dma_handle_wqe
= 0;
88 dma_addr_t dma_handle_idx
= 0;
91 /* Get the physical address of srq buf */
92 ret
= hns_roce_mtr_find(hr_dev
, &srq
->buf_mtr
, 0, mtts_wqe
,
93 ARRAY_SIZE(mtts_wqe
), &dma_handle_wqe
);
95 ibdev_err(ibdev
, "failed to find mtr for SRQ WQE, ret = %d.\n",
100 /* Get physical address of idx que buf */
101 ret
= hns_roce_mtr_find(hr_dev
, &srq
->idx_que
.mtr
, 0, mtts_idx
,
102 ARRAY_SIZE(mtts_idx
), &dma_handle_idx
);
104 ibdev_err(ibdev
, "failed to find mtr for SRQ idx, ret = %d.\n",
109 ret
= hns_roce_bitmap_alloc(&srq_table
->bitmap
, &srq
->srqn
);
112 "failed to alloc SRQ number, ret = %d.\n", ret
);
116 ret
= hns_roce_table_get(hr_dev
, &srq_table
->table
, srq
->srqn
);
118 ibdev_err(ibdev
, "failed to get SRQC table, ret = %d.\n", ret
);
122 ret
= xa_err(xa_store(&srq_table
->xa
, srq
->srqn
, srq
, GFP_KERNEL
));
124 ibdev_err(ibdev
, "failed to store SRQC, ret = %d.\n", ret
);
128 mailbox
= hns_roce_alloc_cmd_mailbox(hr_dev
);
129 if (IS_ERR_OR_NULL(mailbox
)) {
131 ibdev_err(ibdev
, "failed to alloc mailbox for SRQC.\n");
135 hr_dev
->hw
->write_srqc(hr_dev
, srq
, pdn
, xrcd
, cqn
, mailbox
->buf
,
136 mtts_wqe
, mtts_idx
, dma_handle_wqe
,
139 ret
= hns_roce_hw_create_srq(hr_dev
, mailbox
, srq
->srqn
);
140 hns_roce_free_cmd_mailbox(hr_dev
, mailbox
);
142 ibdev_err(ibdev
, "failed to config SRQC, ret = %d.\n", ret
);
146 atomic_set(&srq
->refcount
, 1);
147 init_completion(&srq
->free
);
151 xa_erase(&srq_table
->xa
, srq
->srqn
);
154 hns_roce_table_put(hr_dev
, &srq_table
->table
, srq
->srqn
);
157 hns_roce_bitmap_free(&srq_table
->bitmap
, srq
->srqn
, BITMAP_NO_RR
);
161 static void free_srqc(struct hns_roce_dev
*hr_dev
, struct hns_roce_srq
*srq
)
163 struct hns_roce_srq_table
*srq_table
= &hr_dev
->srq_table
;
166 ret
= hns_roce_hw_destroy_srq(hr_dev
, NULL
, srq
->srqn
);
168 dev_err(hr_dev
->dev
, "DESTROY_SRQ failed (%d) for SRQN %06lx\n",
171 xa_erase(&srq_table
->xa
, srq
->srqn
);
173 if (atomic_dec_and_test(&srq
->refcount
))
174 complete(&srq
->free
);
175 wait_for_completion(&srq
->free
);
177 hns_roce_table_put(hr_dev
, &srq_table
->table
, srq
->srqn
);
178 hns_roce_bitmap_free(&srq_table
->bitmap
, srq
->srqn
, BITMAP_NO_RR
);
181 static int alloc_srq_buf(struct hns_roce_dev
*hr_dev
, struct hns_roce_srq
*srq
,
182 struct ib_udata
*udata
, unsigned long addr
)
184 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
185 struct hns_roce_buf_attr buf_attr
= {};
188 srq
->wqe_shift
= ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE
,
192 buf_attr
.page_shift
= hr_dev
->caps
.srqwqe_buf_pg_sz
+ HNS_HW_PAGE_SHIFT
;
193 buf_attr
.region
[0].size
= to_hr_hem_entries_size(srq
->wqe_cnt
,
195 buf_attr
.region
[0].hopnum
= hr_dev
->caps
.srqwqe_hop_num
;
196 buf_attr
.region_count
= 1;
197 buf_attr
.fixed_page
= true;
199 err
= hns_roce_mtr_create(hr_dev
, &srq
->buf_mtr
, &buf_attr
,
200 hr_dev
->caps
.srqwqe_ba_pg_sz
+
201 HNS_HW_PAGE_SHIFT
, udata
, addr
);
204 "failed to alloc SRQ buf mtr, ret = %d.\n", err
);
209 static void free_srq_buf(struct hns_roce_dev
*hr_dev
, struct hns_roce_srq
*srq
)
211 hns_roce_mtr_destroy(hr_dev
, &srq
->buf_mtr
);
214 static int alloc_srq_idx(struct hns_roce_dev
*hr_dev
, struct hns_roce_srq
*srq
,
215 struct ib_udata
*udata
, unsigned long addr
)
217 struct hns_roce_idx_que
*idx_que
= &srq
->idx_que
;
218 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
219 struct hns_roce_buf_attr buf_attr
= {};
222 srq
->idx_que
.entry_shift
= ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ
);
224 buf_attr
.page_shift
= hr_dev
->caps
.idx_buf_pg_sz
+ HNS_HW_PAGE_SHIFT
;
225 buf_attr
.region
[0].size
= to_hr_hem_entries_size(srq
->wqe_cnt
,
226 srq
->idx_que
.entry_shift
);
227 buf_attr
.region
[0].hopnum
= hr_dev
->caps
.idx_hop_num
;
228 buf_attr
.region_count
= 1;
229 buf_attr
.fixed_page
= true;
231 err
= hns_roce_mtr_create(hr_dev
, &idx_que
->mtr
, &buf_attr
,
232 hr_dev
->caps
.idx_ba_pg_sz
+ HNS_HW_PAGE_SHIFT
,
236 "failed to alloc SRQ idx mtr, ret = %d.\n", err
);
241 idx_que
->bitmap
= bitmap_zalloc(srq
->wqe_cnt
, GFP_KERNEL
);
242 if (!idx_que
->bitmap
) {
243 ibdev_err(ibdev
, "failed to alloc SRQ idx bitmap.\n");
251 hns_roce_mtr_destroy(hr_dev
, &idx_que
->mtr
);
256 static void free_srq_idx(struct hns_roce_dev
*hr_dev
, struct hns_roce_srq
*srq
)
258 struct hns_roce_idx_que
*idx_que
= &srq
->idx_que
;
260 bitmap_free(idx_que
->bitmap
);
261 idx_que
->bitmap
= NULL
;
262 hns_roce_mtr_destroy(hr_dev
, &idx_que
->mtr
);
265 static int alloc_srq_wrid(struct hns_roce_dev
*hr_dev
, struct hns_roce_srq
*srq
)
268 srq
->tail
= srq
->wqe_cnt
- 1;
269 srq
->wrid
= kvmalloc_array(srq
->wqe_cnt
, sizeof(u64
), GFP_KERNEL
);
276 static void free_srq_wrid(struct hns_roce_srq
*srq
)
282 int hns_roce_create_srq(struct ib_srq
*ib_srq
,
283 struct ib_srq_init_attr
*init_attr
,
284 struct ib_udata
*udata
)
286 struct hns_roce_dev
*hr_dev
= to_hr_dev(ib_srq
->device
);
287 struct hns_roce_ib_create_srq_resp resp
= {};
288 struct hns_roce_srq
*srq
= to_hr_srq(ib_srq
);
289 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
290 struct hns_roce_ib_create_srq ucmd
= {};
294 if (init_attr
->srq_type
!= IB_SRQT_BASIC
&&
295 init_attr
->srq_type
!= IB_SRQT_XRC
)
298 /* Check the actual SRQ wqe and SRQ sge num */
299 if (init_attr
->attr
.max_wr
>= hr_dev
->caps
.max_srq_wrs
||
300 init_attr
->attr
.max_sge
> hr_dev
->caps
.max_srq_sges
)
303 mutex_init(&srq
->mutex
);
304 spin_lock_init(&srq
->lock
);
306 srq
->wqe_cnt
= roundup_pow_of_two(init_attr
->attr
.max_wr
+ 1);
307 srq
->max_gs
= init_attr
->attr
.max_sge
;
310 ret
= ib_copy_from_udata(&ucmd
, udata
,
311 min(udata
->inlen
, sizeof(ucmd
)));
313 ibdev_err(ibdev
, "failed to copy SRQ udata, ret = %d.\n",
319 ret
= alloc_srq_buf(hr_dev
, srq
, udata
, ucmd
.buf_addr
);
322 "failed to alloc SRQ buffer, ret = %d.\n", ret
);
326 ret
= alloc_srq_idx(hr_dev
, srq
, udata
, ucmd
.que_addr
);
328 ibdev_err(ibdev
, "failed to alloc SRQ idx, ret = %d.\n", ret
);
333 ret
= alloc_srq_wrid(hr_dev
, srq
);
335 ibdev_err(ibdev
, "failed to alloc SRQ wrid, ret = %d.\n",
341 cqn
= ib_srq_has_cq(init_attr
->srq_type
) ?
342 to_hr_cq(init_attr
->ext
.cq
)->cqn
: 0;
343 srq
->db_reg_l
= hr_dev
->reg_base
+ SRQ_DB_REG
;
345 ret
= alloc_srqc(hr_dev
, srq
, to_hr_pd(ib_srq
->pd
)->pdn
, cqn
, 0, 0);
348 "failed to alloc SRQ context, ret = %d.\n", ret
);
352 srq
->event
= hns_roce_ib_srq_event
;
353 resp
.srqn
= srq
->srqn
;
356 ret
= ib_copy_to_udata(udata
, &resp
,
357 min(udata
->outlen
, sizeof(resp
)));
365 free_srqc(hr_dev
, srq
);
369 free_srq_idx(hr_dev
, srq
);
371 free_srq_buf(hr_dev
, srq
);
375 int hns_roce_destroy_srq(struct ib_srq
*ibsrq
, struct ib_udata
*udata
)
377 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibsrq
->device
);
378 struct hns_roce_srq
*srq
= to_hr_srq(ibsrq
);
380 free_srqc(hr_dev
, srq
);
381 free_srq_idx(hr_dev
, srq
);
383 free_srq_buf(hr_dev
, srq
);
387 int hns_roce_init_srq_table(struct hns_roce_dev
*hr_dev
)
389 struct hns_roce_srq_table
*srq_table
= &hr_dev
->srq_table
;
391 xa_init(&srq_table
->xa
);
393 return hns_roce_bitmap_init(&srq_table
->bitmap
, hr_dev
->caps
.num_srqs
,
394 hr_dev
->caps
.num_srqs
- 1,
395 hr_dev
->caps
.reserved_srqs
, 0);
398 void hns_roce_cleanup_srq_table(struct hns_roce_dev
*hr_dev
)
400 hns_roce_bitmap_cleanup(&hr_dev
->srq_table
.bitmap
);