2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/pci.h>
35 #include <linux/platform_device.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_umem.h>
38 #include <rdma/uverbs_ioctl.h>
39 #include "hns_roce_common.h"
40 #include "hns_roce_device.h"
41 #include "hns_roce_hem.h"
42 #include <rdma/hns-abi.h>
44 #define SQP_NUM (2 * HNS_ROCE_MAX_PORTS)
46 void hns_roce_qp_event(struct hns_roce_dev
*hr_dev
, u32 qpn
, int event_type
)
48 struct device
*dev
= hr_dev
->dev
;
49 struct hns_roce_qp
*qp
;
51 xa_lock(&hr_dev
->qp_table_xa
);
52 qp
= __hns_roce_qp_lookup(hr_dev
, qpn
);
54 atomic_inc(&qp
->refcount
);
55 xa_unlock(&hr_dev
->qp_table_xa
);
58 dev_warn(dev
, "Async event for bogus QP %08x\n", qpn
);
62 qp
->event(qp
, (enum hns_roce_event
)event_type
);
64 if (atomic_dec_and_test(&qp
->refcount
))
68 static void hns_roce_ib_qp_event(struct hns_roce_qp
*hr_qp
,
69 enum hns_roce_event type
)
71 struct ib_event event
;
72 struct ib_qp
*ibqp
= &hr_qp
->ibqp
;
74 if (ibqp
->event_handler
) {
75 event
.device
= ibqp
->device
;
76 event
.element
.qp
= ibqp
;
78 case HNS_ROCE_EVENT_TYPE_PATH_MIG
:
79 event
.event
= IB_EVENT_PATH_MIG
;
81 case HNS_ROCE_EVENT_TYPE_COMM_EST
:
82 event
.event
= IB_EVENT_COMM_EST
;
84 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED
:
85 event
.event
= IB_EVENT_SQ_DRAINED
;
87 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH
:
88 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
90 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR
:
91 event
.event
= IB_EVENT_QP_FATAL
;
93 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED
:
94 event
.event
= IB_EVENT_PATH_MIG_ERR
;
96 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR
:
97 event
.event
= IB_EVENT_QP_REQ_ERR
;
99 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR
:
100 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
103 dev_dbg(ibqp
->device
->dev
.parent
, "roce_ib: Unexpected event type %d on QP %06lx\n",
107 ibqp
->event_handler(&event
, ibqp
->qp_context
);
111 static int hns_roce_reserve_range_qp(struct hns_roce_dev
*hr_dev
, int cnt
,
112 int align
, unsigned long *base
)
114 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
116 return hns_roce_bitmap_alloc_range(&qp_table
->bitmap
, cnt
, align
,
122 enum hns_roce_qp_state
to_hns_roce_state(enum ib_qp_state state
)
126 return HNS_ROCE_QP_STATE_RST
;
128 return HNS_ROCE_QP_STATE_INIT
;
130 return HNS_ROCE_QP_STATE_RTR
;
132 return HNS_ROCE_QP_STATE_RTS
;
134 return HNS_ROCE_QP_STATE_SQD
;
136 return HNS_ROCE_QP_STATE_ERR
;
138 return HNS_ROCE_QP_NUM_STATE
;
142 static int hns_roce_gsi_qp_alloc(struct hns_roce_dev
*hr_dev
, unsigned long qpn
,
143 struct hns_roce_qp
*hr_qp
)
145 struct xarray
*xa
= &hr_dev
->qp_table_xa
;
152 atomic_set(&hr_qp
->refcount
, 1);
153 init_completion(&hr_qp
->free
);
155 ret
= xa_err(xa_store_irq(xa
, hr_qp
->qpn
& (hr_dev
->caps
.num_qps
- 1),
158 dev_err(hr_dev
->dev
, "QPC xa_store failed\n");
163 static int hns_roce_qp_alloc(struct hns_roce_dev
*hr_dev
, unsigned long qpn
,
164 struct hns_roce_qp
*hr_qp
)
166 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
167 struct device
*dev
= hr_dev
->dev
;
175 /* Alloc memory for QPC */
176 ret
= hns_roce_table_get(hr_dev
, &qp_table
->qp_table
, hr_qp
->qpn
);
178 dev_err(dev
, "QPC table get failed\n");
182 /* Alloc memory for IRRL */
183 ret
= hns_roce_table_get(hr_dev
, &qp_table
->irrl_table
, hr_qp
->qpn
);
185 dev_err(dev
, "IRRL table get failed\n");
189 if (hr_dev
->caps
.trrl_entry_sz
) {
190 /* Alloc memory for TRRL */
191 ret
= hns_roce_table_get(hr_dev
, &qp_table
->trrl_table
,
194 dev_err(dev
, "TRRL table get failed\n");
199 if (hr_dev
->caps
.sccc_entry_sz
) {
200 /* Alloc memory for SCC CTX */
201 ret
= hns_roce_table_get(hr_dev
, &qp_table
->sccc_table
,
204 dev_err(dev
, "SCC CTX table get failed\n");
209 ret
= hns_roce_gsi_qp_alloc(hr_dev
, qpn
, hr_qp
);
216 if (hr_dev
->caps
.sccc_entry_sz
)
217 hns_roce_table_put(hr_dev
, &qp_table
->sccc_table
,
221 if (hr_dev
->caps
.trrl_entry_sz
)
222 hns_roce_table_put(hr_dev
, &qp_table
->trrl_table
, hr_qp
->qpn
);
225 hns_roce_table_put(hr_dev
, &qp_table
->irrl_table
, hr_qp
->qpn
);
228 hns_roce_table_put(hr_dev
, &qp_table
->qp_table
, hr_qp
->qpn
);
234 void hns_roce_qp_remove(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
)
236 struct xarray
*xa
= &hr_dev
->qp_table_xa
;
239 xa_lock_irqsave(xa
, flags
);
240 __xa_erase(xa
, hr_qp
->qpn
& (hr_dev
->caps
.num_qps
- 1));
241 xa_unlock_irqrestore(xa
, flags
);
244 void hns_roce_qp_free(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
)
246 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
248 if (atomic_dec_and_test(&hr_qp
->refcount
))
249 complete(&hr_qp
->free
);
250 wait_for_completion(&hr_qp
->free
);
252 if ((hr_qp
->ibqp
.qp_type
) != IB_QPT_GSI
) {
253 if (hr_dev
->caps
.trrl_entry_sz
)
254 hns_roce_table_put(hr_dev
, &qp_table
->trrl_table
,
256 hns_roce_table_put(hr_dev
, &qp_table
->irrl_table
, hr_qp
->qpn
);
260 void hns_roce_release_range_qp(struct hns_roce_dev
*hr_dev
, int base_qpn
,
263 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
265 if (base_qpn
< hr_dev
->caps
.reserved_qps
)
268 hns_roce_bitmap_free_range(&qp_table
->bitmap
, base_qpn
, cnt
, BITMAP_RR
);
271 static int hns_roce_set_rq_size(struct hns_roce_dev
*hr_dev
,
272 struct ib_qp_cap
*cap
, bool is_user
, int has_rq
,
273 struct hns_roce_qp
*hr_qp
)
275 struct device
*dev
= hr_dev
->dev
;
278 /* Check the validity of QP support capacity */
279 if (cap
->max_recv_wr
> hr_dev
->caps
.max_wqes
||
280 cap
->max_recv_sge
> hr_dev
->caps
.max_rq_sg
) {
281 dev_err(dev
, "RQ WR or sge error!max_recv_wr=%d max_recv_sge=%d\n",
282 cap
->max_recv_wr
, cap
->max_recv_sge
);
286 /* If srq exist, set zero for relative number of rq */
288 hr_qp
->rq
.wqe_cnt
= 0;
289 hr_qp
->rq
.max_gs
= 0;
290 cap
->max_recv_wr
= 0;
291 cap
->max_recv_sge
= 0;
293 if (is_user
&& (!cap
->max_recv_wr
|| !cap
->max_recv_sge
)) {
294 dev_err(dev
, "user space no need config max_recv_wr max_recv_sge\n");
298 if (hr_dev
->caps
.min_wqes
)
299 max_cnt
= max(cap
->max_recv_wr
, hr_dev
->caps
.min_wqes
);
301 max_cnt
= cap
->max_recv_wr
;
303 hr_qp
->rq
.wqe_cnt
= roundup_pow_of_two(max_cnt
);
305 if ((u32
)hr_qp
->rq
.wqe_cnt
> hr_dev
->caps
.max_wqes
) {
306 dev_err(dev
, "while setting rq size, rq.wqe_cnt too large\n");
310 max_cnt
= max(1U, cap
->max_recv_sge
);
311 hr_qp
->rq
.max_gs
= roundup_pow_of_two(max_cnt
);
312 if (hr_dev
->caps
.max_rq_sg
<= 2)
313 hr_qp
->rq
.wqe_shift
=
314 ilog2(hr_dev
->caps
.max_rq_desc_sz
);
316 hr_qp
->rq
.wqe_shift
=
317 ilog2(hr_dev
->caps
.max_rq_desc_sz
321 cap
->max_recv_wr
= hr_qp
->rq
.wqe_cnt
;
322 cap
->max_recv_sge
= hr_qp
->rq
.max_gs
;
327 static int check_sq_size_with_integrity(struct hns_roce_dev
*hr_dev
,
328 struct ib_qp_cap
*cap
,
329 struct hns_roce_ib_create_qp
*ucmd
)
331 u32 roundup_sq_stride
= roundup_pow_of_two(hr_dev
->caps
.max_sq_desc_sz
);
332 u8 max_sq_stride
= ilog2(roundup_sq_stride
);
334 /* Sanity check SQ size before proceeding */
335 if (ucmd
->log_sq_stride
> max_sq_stride
||
336 ucmd
->log_sq_stride
< HNS_ROCE_IB_MIN_SQ_STRIDE
) {
337 ibdev_err(&hr_dev
->ib_dev
, "check SQ size error!\n");
341 if (cap
->max_send_sge
> hr_dev
->caps
.max_sq_sg
) {
342 ibdev_err(&hr_dev
->ib_dev
, "SQ sge error! max_send_sge=%d\n",
350 static int hns_roce_set_user_sq_size(struct hns_roce_dev
*hr_dev
,
351 struct ib_qp_cap
*cap
,
352 struct hns_roce_qp
*hr_qp
,
353 struct hns_roce_ib_create_qp
*ucmd
)
360 if (check_shl_overflow(1, ucmd
->log_sq_bb_count
, &hr_qp
->sq
.wqe_cnt
) ||
361 hr_qp
->sq
.wqe_cnt
> hr_dev
->caps
.max_wqes
)
364 ret
= check_sq_size_with_integrity(hr_dev
, cap
, ucmd
);
366 ibdev_err(&hr_dev
->ib_dev
, "Sanity check sq size failed\n");
370 hr_qp
->sq
.wqe_shift
= ucmd
->log_sq_stride
;
372 max_cnt
= max(1U, cap
->max_send_sge
);
373 if (hr_dev
->caps
.max_sq_sg
<= 2)
374 hr_qp
->sq
.max_gs
= roundup_pow_of_two(max_cnt
);
376 hr_qp
->sq
.max_gs
= max_cnt
;
378 if (hr_qp
->sq
.max_gs
> 2)
379 hr_qp
->sge
.sge_cnt
= roundup_pow_of_two(hr_qp
->sq
.wqe_cnt
*
380 (hr_qp
->sq
.max_gs
- 2));
382 if ((hr_qp
->sq
.max_gs
> 2) && (hr_dev
->pci_dev
->revision
== 0x20)) {
383 if (hr_qp
->sge
.sge_cnt
> hr_dev
->caps
.max_extend_sg
) {
385 "The extended sge cnt error! sge_cnt=%d\n",
391 hr_qp
->sge
.sge_shift
= 4;
392 ex_sge_num
= hr_qp
->sge
.sge_cnt
;
394 /* Get buf size, SQ and RQ are aligned to page_szie */
395 if (hr_dev
->caps
.max_sq_sg
<= 2) {
396 hr_qp
->buff_size
= round_up((hr_qp
->rq
.wqe_cnt
<<
397 hr_qp
->rq
.wqe_shift
), PAGE_SIZE
) +
398 round_up((hr_qp
->sq
.wqe_cnt
<<
399 hr_qp
->sq
.wqe_shift
), PAGE_SIZE
);
401 hr_qp
->sq
.offset
= 0;
402 hr_qp
->rq
.offset
= round_up((hr_qp
->sq
.wqe_cnt
<<
403 hr_qp
->sq
.wqe_shift
), PAGE_SIZE
);
405 page_size
= 1 << (hr_dev
->caps
.mtt_buf_pg_sz
+ PAGE_SHIFT
);
406 hr_qp
->sge
.sge_cnt
= ex_sge_num
?
407 max(page_size
/ (1 << hr_qp
->sge
.sge_shift
), ex_sge_num
) : 0;
408 hr_qp
->buff_size
= round_up((hr_qp
->rq
.wqe_cnt
<<
409 hr_qp
->rq
.wqe_shift
), page_size
) +
410 round_up((hr_qp
->sge
.sge_cnt
<<
411 hr_qp
->sge
.sge_shift
), page_size
) +
412 round_up((hr_qp
->sq
.wqe_cnt
<<
413 hr_qp
->sq
.wqe_shift
), page_size
);
415 hr_qp
->sq
.offset
= 0;
417 hr_qp
->sge
.offset
= round_up((hr_qp
->sq
.wqe_cnt
<<
418 hr_qp
->sq
.wqe_shift
),
420 hr_qp
->rq
.offset
= hr_qp
->sge
.offset
+
421 round_up((hr_qp
->sge
.sge_cnt
<<
422 hr_qp
->sge
.sge_shift
),
425 hr_qp
->rq
.offset
= round_up((hr_qp
->sq
.wqe_cnt
<<
426 hr_qp
->sq
.wqe_shift
),
434 static int split_wqe_buf_region(struct hns_roce_dev
*hr_dev
,
435 struct hns_roce_qp
*hr_qp
,
436 struct hns_roce_buf_region
*regions
,
437 int region_max
, int page_shift
)
439 int page_size
= 1 << page_shift
;
445 if (hr_qp
->buff_size
< 1 || region_max
< 1)
448 if (hr_qp
->sge
.sge_cnt
> 0)
449 is_extend_sge
= true;
451 is_extend_sge
= false;
455 buf_size
= hr_qp
->sge
.offset
- hr_qp
->sq
.offset
;
457 buf_size
= hr_qp
->rq
.offset
- hr_qp
->sq
.offset
;
459 if (buf_size
> 0 && region_cnt
< region_max
) {
460 buf_cnt
= DIV_ROUND_UP(buf_size
, page_size
);
461 hns_roce_init_buf_region(®ions
[region_cnt
],
462 hr_dev
->caps
.wqe_sq_hop_num
,
463 hr_qp
->sq
.offset
/ page_size
,
470 buf_size
= hr_qp
->rq
.offset
- hr_qp
->sge
.offset
;
471 if (buf_size
> 0 && region_cnt
< region_max
) {
472 buf_cnt
= DIV_ROUND_UP(buf_size
, page_size
);
473 hns_roce_init_buf_region(®ions
[region_cnt
],
474 hr_dev
->caps
.wqe_sge_hop_num
,
475 hr_qp
->sge
.offset
/ page_size
,
482 buf_size
= hr_qp
->buff_size
- hr_qp
->rq
.offset
;
484 buf_cnt
= DIV_ROUND_UP(buf_size
, page_size
);
485 hns_roce_init_buf_region(®ions
[region_cnt
],
486 hr_dev
->caps
.wqe_rq_hop_num
,
487 hr_qp
->rq
.offset
/ page_size
,
495 static int calc_wqe_bt_page_shift(struct hns_roce_dev
*hr_dev
,
496 struct hns_roce_buf_region
*regions
,
503 bt_pg_shift
= PAGE_SHIFT
+ hr_dev
->caps
.mtt_ba_pg_sz
;
505 /* all root ba entries must in one bt page */
507 ba_num
= (1 << bt_pg_shift
) / BA_BYTE_LEN
;
508 ret
= hns_roce_hem_list_calc_root_ba(regions
, region_cnt
,
514 } while (ret
> ba_num
);
516 return bt_pg_shift
- PAGE_SHIFT
;
519 static int set_extend_sge_param(struct hns_roce_dev
*hr_dev
,
520 struct hns_roce_qp
*hr_qp
)
522 struct device
*dev
= hr_dev
->dev
;
524 if (hr_qp
->sq
.max_gs
> 2) {
525 hr_qp
->sge
.sge_cnt
= roundup_pow_of_two(hr_qp
->sq
.wqe_cnt
*
526 (hr_qp
->sq
.max_gs
- 2));
527 hr_qp
->sge
.sge_shift
= 4;
530 /* ud sqwqe's sge use extend sge */
531 if (hr_dev
->caps
.max_sq_sg
> 2 && hr_qp
->ibqp
.qp_type
== IB_QPT_GSI
) {
532 hr_qp
->sge
.sge_cnt
= roundup_pow_of_two(hr_qp
->sq
.wqe_cnt
*
534 hr_qp
->sge
.sge_shift
= 4;
537 if ((hr_qp
->sq
.max_gs
> 2) && hr_dev
->pci_dev
->revision
== 0x20) {
538 if (hr_qp
->sge
.sge_cnt
> hr_dev
->caps
.max_extend_sg
) {
539 dev_err(dev
, "The extended sge cnt error! sge_cnt=%d\n",
548 static int hns_roce_set_kernel_sq_size(struct hns_roce_dev
*hr_dev
,
549 struct ib_qp_cap
*cap
,
550 struct hns_roce_qp
*hr_qp
)
552 struct device
*dev
= hr_dev
->dev
;
558 if (cap
->max_send_wr
> hr_dev
->caps
.max_wqes
||
559 cap
->max_send_sge
> hr_dev
->caps
.max_sq_sg
||
560 cap
->max_inline_data
> hr_dev
->caps
.max_sq_inline
) {
561 dev_err(dev
, "SQ WR or sge or inline data error!\n");
565 hr_qp
->sq
.wqe_shift
= ilog2(hr_dev
->caps
.max_sq_desc_sz
);
567 if (hr_dev
->caps
.min_wqes
)
568 max_cnt
= max(cap
->max_send_wr
, hr_dev
->caps
.min_wqes
);
570 max_cnt
= cap
->max_send_wr
;
572 hr_qp
->sq
.wqe_cnt
= roundup_pow_of_two(max_cnt
);
573 if ((u32
)hr_qp
->sq
.wqe_cnt
> hr_dev
->caps
.max_wqes
) {
574 dev_err(dev
, "while setting kernel sq size, sq.wqe_cnt too large\n");
578 /* Get data_seg numbers */
579 max_cnt
= max(1U, cap
->max_send_sge
);
580 if (hr_dev
->caps
.max_sq_sg
<= 2)
581 hr_qp
->sq
.max_gs
= roundup_pow_of_two(max_cnt
);
583 hr_qp
->sq
.max_gs
= max_cnt
;
585 ret
= set_extend_sge_param(hr_dev
, hr_qp
);
587 dev_err(dev
, "set extend sge parameters fail\n");
591 /* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
592 page_size
= 1 << (hr_dev
->caps
.mtt_buf_pg_sz
+ PAGE_SHIFT
);
593 hr_qp
->sq
.offset
= 0;
594 size
= round_up(hr_qp
->sq
.wqe_cnt
<< hr_qp
->sq
.wqe_shift
, page_size
);
596 if (hr_dev
->caps
.max_sq_sg
> 2 && hr_qp
->sge
.sge_cnt
) {
597 hr_qp
->sge
.sge_cnt
= max(page_size
/(1 << hr_qp
->sge
.sge_shift
),
598 (u32
)hr_qp
->sge
.sge_cnt
);
599 hr_qp
->sge
.offset
= size
;
600 size
+= round_up(hr_qp
->sge
.sge_cnt
<< hr_qp
->sge
.sge_shift
,
604 hr_qp
->rq
.offset
= size
;
605 size
+= round_up((hr_qp
->rq
.wqe_cnt
<< hr_qp
->rq
.wqe_shift
), page_size
);
606 hr_qp
->buff_size
= size
;
608 /* Get wr and sge number which send */
609 cap
->max_send_wr
= hr_qp
->sq
.wqe_cnt
;
610 cap
->max_send_sge
= hr_qp
->sq
.max_gs
;
612 /* We don't support inline sends for kernel QPs (yet) */
613 cap
->max_inline_data
= 0;
618 static int hns_roce_qp_has_sq(struct ib_qp_init_attr
*attr
)
620 if (attr
->qp_type
== IB_QPT_XRC_TGT
|| !attr
->cap
.max_send_wr
)
626 static int hns_roce_qp_has_rq(struct ib_qp_init_attr
*attr
)
628 if (attr
->qp_type
== IB_QPT_XRC_INI
||
629 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
630 !attr
->cap
.max_recv_wr
)
636 static int alloc_rq_inline_buf(struct hns_roce_qp
*hr_qp
,
637 struct ib_qp_init_attr
*init_attr
)
639 u32 max_recv_sge
= init_attr
->cap
.max_recv_sge
;
640 struct hns_roce_rinl_wqe
*wqe_list
;
641 u32 wqe_cnt
= hr_qp
->rq
.wqe_cnt
;
644 /* allocate recv inline buf */
645 wqe_list
= kcalloc(wqe_cnt
, sizeof(struct hns_roce_rinl_wqe
),
651 /* Allocate a continuous buffer for all inline sge we need */
652 wqe_list
[0].sg_list
= kcalloc(wqe_cnt
, (max_recv_sge
*
653 sizeof(struct hns_roce_rinl_sge
)),
655 if (!wqe_list
[0].sg_list
)
658 /* Assign buffers of sg_list to each inline wqe */
659 for (i
= 1; i
< wqe_cnt
; i
++)
660 wqe_list
[i
].sg_list
= &wqe_list
[0].sg_list
[i
* max_recv_sge
];
662 hr_qp
->rq_inl_buf
.wqe_list
= wqe_list
;
663 hr_qp
->rq_inl_buf
.wqe_cnt
= wqe_cnt
;
674 static void free_rq_inline_buf(struct hns_roce_qp
*hr_qp
)
676 kfree(hr_qp
->rq_inl_buf
.wqe_list
[0].sg_list
);
677 kfree(hr_qp
->rq_inl_buf
.wqe_list
);
680 static void add_qp_to_list(struct hns_roce_dev
*hr_dev
,
681 struct hns_roce_qp
*hr_qp
,
682 struct ib_cq
*send_cq
, struct ib_cq
*recv_cq
)
684 struct hns_roce_cq
*hr_send_cq
, *hr_recv_cq
;
687 hr_send_cq
= send_cq
? to_hr_cq(send_cq
) : NULL
;
688 hr_recv_cq
= recv_cq
? to_hr_cq(recv_cq
) : NULL
;
690 spin_lock_irqsave(&hr_dev
->qp_list_lock
, flags
);
691 hns_roce_lock_cqs(hr_send_cq
, hr_recv_cq
);
693 list_add_tail(&hr_qp
->node
, &hr_dev
->qp_list
);
695 list_add_tail(&hr_qp
->sq_node
, &hr_send_cq
->sq_list
);
697 list_add_tail(&hr_qp
->rq_node
, &hr_recv_cq
->rq_list
);
699 hns_roce_unlock_cqs(hr_send_cq
, hr_recv_cq
);
700 spin_unlock_irqrestore(&hr_dev
->qp_list_lock
, flags
);
703 static int hns_roce_create_qp_common(struct hns_roce_dev
*hr_dev
,
705 struct ib_qp_init_attr
*init_attr
,
706 struct ib_udata
*udata
, unsigned long sqpn
,
707 struct hns_roce_qp
*hr_qp
)
709 dma_addr_t
*buf_list
[ARRAY_SIZE(hr_qp
->regions
)] = { NULL
};
710 struct device
*dev
= hr_dev
->dev
;
711 struct hns_roce_ib_create_qp ucmd
;
712 struct hns_roce_ib_create_qp_resp resp
= {};
713 struct hns_roce_ucontext
*uctx
= rdma_udata_to_drv_context(
714 udata
, struct hns_roce_ucontext
, ibucontext
);
715 struct hns_roce_buf_region
*r
;
716 unsigned long qpn
= 0;
722 mutex_init(&hr_qp
->mutex
);
723 spin_lock_init(&hr_qp
->sq
.lock
);
724 spin_lock_init(&hr_qp
->rq
.lock
);
726 hr_qp
->state
= IB_QPS_RESET
;
728 hr_qp
->ibqp
.qp_type
= init_attr
->qp_type
;
730 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
731 hr_qp
->sq_signal_bits
= IB_SIGNAL_ALL_WR
;
733 hr_qp
->sq_signal_bits
= IB_SIGNAL_REQ_WR
;
735 ret
= hns_roce_set_rq_size(hr_dev
, &init_attr
->cap
, udata
,
736 hns_roce_qp_has_rq(init_attr
), hr_qp
);
738 dev_err(dev
, "hns_roce_set_rq_size failed\n");
742 if ((hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RQ_INLINE
) &&
743 hns_roce_qp_has_rq(init_attr
)) {
744 ret
= alloc_rq_inline_buf(hr_qp
, init_attr
);
746 dev_err(dev
, "allocate receive inline buffer failed\n");
751 page_shift
= PAGE_SHIFT
+ hr_dev
->caps
.mtt_buf_pg_sz
;
753 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
))) {
754 dev_err(dev
, "ib_copy_from_udata error for create qp\n");
756 goto err_alloc_rq_inline_buf
;
759 ret
= hns_roce_set_user_sq_size(hr_dev
, &init_attr
->cap
, hr_qp
,
762 dev_err(dev
, "hns_roce_set_user_sq_size error for create qp\n");
763 goto err_alloc_rq_inline_buf
;
766 hr_qp
->umem
= ib_umem_get(ib_pd
->device
, ucmd
.buf_addr
,
767 hr_qp
->buff_size
, 0);
768 if (IS_ERR(hr_qp
->umem
)) {
769 dev_err(dev
, "ib_umem_get error for create qp\n");
770 ret
= PTR_ERR(hr_qp
->umem
);
771 goto err_alloc_rq_inline_buf
;
773 hr_qp
->region_cnt
= split_wqe_buf_region(hr_dev
, hr_qp
,
774 hr_qp
->regions
, ARRAY_SIZE(hr_qp
->regions
),
776 ret
= hns_roce_alloc_buf_list(hr_qp
->regions
, buf_list
,
779 dev_err(dev
, "alloc buf_list error for create qp\n");
783 for (i
= 0; i
< hr_qp
->region_cnt
; i
++) {
784 r
= &hr_qp
->regions
[i
];
785 buf_count
= hns_roce_get_umem_bufs(hr_dev
,
786 buf_list
[i
], r
->count
, r
->offset
,
787 hr_qp
->umem
, page_shift
);
788 if (buf_count
!= r
->count
) {
790 "get umem buf err, expect %d,ret %d.\n",
791 r
->count
, buf_count
);
797 if ((hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SQ_RECORD_DB
) &&
798 (udata
->inlen
>= sizeof(ucmd
)) &&
799 (udata
->outlen
>= sizeof(resp
)) &&
800 hns_roce_qp_has_sq(init_attr
)) {
801 ret
= hns_roce_db_map_user(uctx
, udata
, ucmd
.sdb_addr
,
804 dev_err(dev
, "sq record doorbell map failed!\n");
808 /* indicate kernel supports sq record db */
809 resp
.cap_flags
|= HNS_ROCE_SUPPORT_SQ_RECORD_DB
;
813 if ((hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) &&
814 (udata
->outlen
>= sizeof(resp
)) &&
815 hns_roce_qp_has_rq(init_attr
)) {
816 ret
= hns_roce_db_map_user(uctx
, udata
, ucmd
.db_addr
,
819 dev_err(dev
, "rq record doorbell map failed!\n");
823 /* indicate kernel supports rq record db */
824 resp
.cap_flags
|= HNS_ROCE_SUPPORT_RQ_RECORD_DB
;
828 if (init_attr
->create_flags
&
829 IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
) {
830 dev_err(dev
, "init_attr->create_flags error!\n");
832 goto err_alloc_rq_inline_buf
;
835 if (init_attr
->create_flags
& IB_QP_CREATE_IPOIB_UD_LSO
) {
836 dev_err(dev
, "init_attr->create_flags error!\n");
838 goto err_alloc_rq_inline_buf
;
842 ret
= hns_roce_set_kernel_sq_size(hr_dev
, &init_attr
->cap
,
845 dev_err(dev
, "hns_roce_set_kernel_sq_size error!\n");
846 goto err_alloc_rq_inline_buf
;
849 /* QP doorbell register address */
850 hr_qp
->sq
.db_reg_l
= hr_dev
->reg_base
+ hr_dev
->sdb_offset
+
851 DB_REG_OFFSET
* hr_dev
->priv_uar
.index
;
852 hr_qp
->rq
.db_reg_l
= hr_dev
->reg_base
+ hr_dev
->odb_offset
+
853 DB_REG_OFFSET
* hr_dev
->priv_uar
.index
;
855 if ((hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) &&
856 hns_roce_qp_has_rq(init_attr
)) {
857 ret
= hns_roce_alloc_db(hr_dev
, &hr_qp
->rdb
, 0);
859 dev_err(dev
, "rq record doorbell alloc failed!\n");
860 goto err_alloc_rq_inline_buf
;
862 *hr_qp
->rdb
.db_record
= 0;
866 /* Allocate QP buf */
867 if (hns_roce_buf_alloc(hr_dev
, hr_qp
->buff_size
,
868 (1 << page_shift
) * 2,
869 &hr_qp
->hr_buf
, page_shift
)) {
870 dev_err(dev
, "hns_roce_buf_alloc error!\n");
874 hr_qp
->region_cnt
= split_wqe_buf_region(hr_dev
, hr_qp
,
875 hr_qp
->regions
, ARRAY_SIZE(hr_qp
->regions
),
877 ret
= hns_roce_alloc_buf_list(hr_qp
->regions
, buf_list
,
880 dev_err(dev
, "alloc buf_list error for create qp!\n");
884 for (i
= 0; i
< hr_qp
->region_cnt
; i
++) {
885 r
= &hr_qp
->regions
[i
];
886 buf_count
= hns_roce_get_kmem_bufs(hr_dev
,
887 buf_list
[i
], r
->count
, r
->offset
,
889 if (buf_count
!= r
->count
) {
891 "get kmem buf err, expect %d,ret %d.\n",
892 r
->count
, buf_count
);
898 hr_qp
->sq
.wrid
= kcalloc(hr_qp
->sq
.wqe_cnt
, sizeof(u64
),
900 if (ZERO_OR_NULL_PTR(hr_qp
->sq
.wrid
)) {
905 if (hr_qp
->rq
.wqe_cnt
) {
906 hr_qp
->rq
.wrid
= kcalloc(hr_qp
->rq
.wqe_cnt
, sizeof(u64
),
908 if (ZERO_OR_NULL_PTR(hr_qp
->rq
.wrid
)) {
919 ret
= hns_roce_reserve_range_qp(hr_dev
, 1, 1, &qpn
);
921 dev_err(dev
, "hns_roce_reserve_range_qp alloc qpn error\n");
926 hr_qp
->wqe_bt_pg_shift
= calc_wqe_bt_page_shift(hr_dev
, hr_qp
->regions
,
928 hns_roce_mtr_init(&hr_qp
->mtr
, PAGE_SHIFT
+ hr_qp
->wqe_bt_pg_shift
,
930 ret
= hns_roce_mtr_attach(hr_dev
, &hr_qp
->mtr
, buf_list
,
931 hr_qp
->regions
, hr_qp
->region_cnt
);
933 dev_err(dev
, "mtr attach error for create qp\n");
937 if (init_attr
->qp_type
== IB_QPT_GSI
&&
938 hr_dev
->hw_rev
== HNS_ROCE_HW_VER1
) {
939 /* In v1 engine, GSI QP context in RoCE engine's register */
940 ret
= hns_roce_gsi_qp_alloc(hr_dev
, qpn
, hr_qp
);
942 dev_err(dev
, "hns_roce_qp_alloc failed!\n");
946 ret
= hns_roce_qp_alloc(hr_dev
, qpn
, hr_qp
);
948 dev_err(dev
, "hns_roce_qp_alloc failed!\n");
954 hr_qp
->doorbell_qpn
= 1;
956 hr_qp
->doorbell_qpn
= (u32
)hr_qp
->qpn
;
959 ret
= ib_copy_to_udata(udata
, &resp
,
960 min(udata
->outlen
, sizeof(resp
)));
965 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL
) {
966 ret
= hr_dev
->hw
->qp_flow_control_init(hr_dev
, hr_qp
);
971 hr_qp
->event
= hns_roce_ib_qp_event
;
973 add_qp_to_list(hr_dev
, hr_qp
, init_attr
->send_cq
, init_attr
->recv_cq
);
975 hns_roce_free_buf_list(buf_list
, hr_qp
->region_cnt
);
980 if (init_attr
->qp_type
== IB_QPT_GSI
&&
981 hr_dev
->hw_rev
== HNS_ROCE_HW_VER1
)
982 hns_roce_qp_remove(hr_dev
, hr_qp
);
984 hns_roce_qp_free(hr_dev
, hr_qp
);
988 hns_roce_release_range_qp(hr_dev
, qpn
, 1);
991 hns_roce_mtr_cleanup(hr_dev
, &hr_qp
->mtr
);
995 if ((hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) &&
996 (udata
->outlen
>= sizeof(resp
)) &&
997 hns_roce_qp_has_rq(init_attr
))
998 hns_roce_db_unmap_user(uctx
, &hr_qp
->rdb
);
1000 if (hr_qp
->rq
.wqe_cnt
)
1001 kfree(hr_qp
->rq
.wrid
);
1006 if ((hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SQ_RECORD_DB
) &&
1007 (udata
->inlen
>= sizeof(ucmd
)) &&
1008 (udata
->outlen
>= sizeof(resp
)) &&
1009 hns_roce_qp_has_sq(init_attr
))
1010 hns_roce_db_unmap_user(uctx
, &hr_qp
->sdb
);
1014 kfree(hr_qp
->sq
.wrid
);
1017 hns_roce_free_buf_list(buf_list
, hr_qp
->region_cnt
);
1021 hns_roce_buf_free(hr_dev
, hr_qp
->buff_size
, &hr_qp
->hr_buf
);
1022 ib_umem_release(hr_qp
->umem
);
1025 if (!udata
&& hns_roce_qp_has_rq(init_attr
) &&
1026 (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
))
1027 hns_roce_free_db(hr_dev
, &hr_qp
->rdb
);
1029 err_alloc_rq_inline_buf
:
1030 if ((hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RQ_INLINE
) &&
1031 hns_roce_qp_has_rq(init_attr
))
1032 free_rq_inline_buf(hr_qp
);
1038 struct ib_qp
*hns_roce_create_qp(struct ib_pd
*pd
,
1039 struct ib_qp_init_attr
*init_attr
,
1040 struct ib_udata
*udata
)
1042 struct hns_roce_dev
*hr_dev
= to_hr_dev(pd
->device
);
1043 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
1044 struct hns_roce_qp
*hr_qp
;
1047 switch (init_attr
->qp_type
) {
1049 hr_qp
= kzalloc(sizeof(*hr_qp
), GFP_KERNEL
);
1051 return ERR_PTR(-ENOMEM
);
1053 ret
= hns_roce_create_qp_common(hr_dev
, pd
, init_attr
, udata
, 0,
1056 ibdev_err(ibdev
, "Create QP 0x%06lx failed(%d)\n",
1059 return ERR_PTR(ret
);
1062 hr_qp
->ibqp
.qp_num
= hr_qp
->qpn
;
1067 /* Userspace is not allowed to create special QPs: */
1069 ibdev_err(ibdev
, "not support usr space GSI\n");
1070 return ERR_PTR(-EINVAL
);
1073 hr_qp
= kzalloc(sizeof(*hr_qp
), GFP_KERNEL
);
1075 return ERR_PTR(-ENOMEM
);
1077 hr_qp
->port
= init_attr
->port_num
- 1;
1078 hr_qp
->phy_port
= hr_dev
->iboe
.phy_port
[hr_qp
->port
];
1080 /* when hw version is v1, the sqpn is allocated */
1081 if (hr_dev
->caps
.max_sq_sg
<= 2)
1082 hr_qp
->ibqp
.qp_num
= HNS_ROCE_MAX_PORTS
+
1083 hr_dev
->iboe
.phy_port
[hr_qp
->port
];
1085 hr_qp
->ibqp
.qp_num
= 1;
1087 ret
= hns_roce_create_qp_common(hr_dev
, pd
, init_attr
, udata
,
1088 hr_qp
->ibqp
.qp_num
, hr_qp
);
1090 ibdev_err(ibdev
, "Create GSI QP failed!\n");
1092 return ERR_PTR(ret
);
1098 ibdev_err(ibdev
, "not support QP type %d\n",
1099 init_attr
->qp_type
);
1100 return ERR_PTR(-EINVAL
);
1104 return &hr_qp
->ibqp
;
1107 int to_hr_qp_type(int qp_type
)
1111 if (qp_type
== IB_QPT_RC
)
1112 transport_type
= SERV_TYPE_RC
;
1113 else if (qp_type
== IB_QPT_UC
)
1114 transport_type
= SERV_TYPE_UC
;
1115 else if (qp_type
== IB_QPT_UD
)
1116 transport_type
= SERV_TYPE_UD
;
1117 else if (qp_type
== IB_QPT_GSI
)
1118 transport_type
= SERV_TYPE_UD
;
1120 transport_type
= -1;
1122 return transport_type
;
1125 static int check_mtu_validate(struct hns_roce_dev
*hr_dev
,
1126 struct hns_roce_qp
*hr_qp
,
1127 struct ib_qp_attr
*attr
, int attr_mask
)
1129 enum ib_mtu active_mtu
;
1132 p
= attr_mask
& IB_QP_PORT
? (attr
->port_num
- 1) : hr_qp
->port
;
1133 active_mtu
= iboe_get_mtu(hr_dev
->iboe
.netdevs
[p
]->mtu
);
1135 if ((hr_dev
->caps
.max_mtu
>= IB_MTU_2048
&&
1136 attr
->path_mtu
> hr_dev
->caps
.max_mtu
) ||
1137 attr
->path_mtu
< IB_MTU_256
|| attr
->path_mtu
> active_mtu
) {
1138 ibdev_err(&hr_dev
->ib_dev
,
1139 "attr path_mtu(%d)invalid while modify qp",
1147 static int hns_roce_check_qp_attr(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1150 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
1151 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
1154 if ((attr_mask
& IB_QP_PORT
) &&
1155 (attr
->port_num
== 0 || attr
->port_num
> hr_dev
->caps
.num_ports
)) {
1156 ibdev_err(&hr_dev
->ib_dev
,
1157 "attr port_num invalid.attr->port_num=%d\n",
1162 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1163 p
= attr_mask
& IB_QP_PORT
? (attr
->port_num
- 1) : hr_qp
->port
;
1164 if (attr
->pkey_index
>= hr_dev
->caps
.pkey_table_len
[p
]) {
1165 ibdev_err(&hr_dev
->ib_dev
,
1166 "attr pkey_index invalid.attr->pkey_index=%d\n",
1172 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
1173 attr
->max_rd_atomic
> hr_dev
->caps
.max_qp_init_rdma
) {
1174 ibdev_err(&hr_dev
->ib_dev
,
1175 "attr max_rd_atomic invalid.attr->max_rd_atomic=%d\n",
1176 attr
->max_rd_atomic
);
1180 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
1181 attr
->max_dest_rd_atomic
> hr_dev
->caps
.max_qp_dest_rdma
) {
1182 ibdev_err(&hr_dev
->ib_dev
,
1183 "attr max_dest_rd_atomic invalid.attr->max_dest_rd_atomic=%d\n",
1184 attr
->max_dest_rd_atomic
);
1188 if (attr_mask
& IB_QP_PATH_MTU
)
1189 return check_mtu_validate(hr_dev
, hr_qp
, attr
, attr_mask
);
1194 int hns_roce_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1195 int attr_mask
, struct ib_udata
*udata
)
1197 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
1198 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
1199 enum ib_qp_state cur_state
, new_state
;
1202 mutex_lock(&hr_qp
->mutex
);
1204 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
1205 attr
->cur_qp_state
: (enum ib_qp_state
)hr_qp
->state
;
1206 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1208 if (ibqp
->uobject
&&
1209 (attr_mask
& IB_QP_STATE
) && new_state
== IB_QPS_ERR
) {
1210 if (hr_qp
->sdb_en
== 1) {
1211 hr_qp
->sq
.head
= *(int *)(hr_qp
->sdb
.virt_addr
);
1213 if (hr_qp
->rdb_en
== 1)
1214 hr_qp
->rq
.head
= *(int *)(hr_qp
->rdb
.virt_addr
);
1216 ibdev_warn(&hr_dev
->ib_dev
,
1217 "flush cqe is not supported in userspace!\n");
1222 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
1224 ibdev_err(&hr_dev
->ib_dev
, "ib_modify_qp_is_ok failed\n");
1228 ret
= hns_roce_check_qp_attr(ibqp
, attr
, attr_mask
);
1232 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1233 if (hr_dev
->caps
.min_wqes
) {
1235 ibdev_err(&hr_dev
->ib_dev
,
1236 "cur_state=%d new_state=%d\n", cur_state
,
1245 ret
= hr_dev
->hw
->modify_qp(ibqp
, attr
, attr_mask
, cur_state
,
1249 mutex_unlock(&hr_qp
->mutex
);
1254 void hns_roce_lock_cqs(struct hns_roce_cq
*send_cq
, struct hns_roce_cq
*recv_cq
)
1255 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
1257 if (unlikely(send_cq
== NULL
&& recv_cq
== NULL
)) {
1258 __acquire(&send_cq
->lock
);
1259 __acquire(&recv_cq
->lock
);
1260 } else if (unlikely(send_cq
!= NULL
&& recv_cq
== NULL
)) {
1261 spin_lock_irq(&send_cq
->lock
);
1262 __acquire(&recv_cq
->lock
);
1263 } else if (unlikely(send_cq
== NULL
&& recv_cq
!= NULL
)) {
1264 spin_lock_irq(&recv_cq
->lock
);
1265 __acquire(&send_cq
->lock
);
1266 } else if (send_cq
== recv_cq
) {
1267 spin_lock_irq(&send_cq
->lock
);
1268 __acquire(&recv_cq
->lock
);
1269 } else if (send_cq
->cqn
< recv_cq
->cqn
) {
1270 spin_lock_irq(&send_cq
->lock
);
1271 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
1273 spin_lock_irq(&recv_cq
->lock
);
1274 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
1278 void hns_roce_unlock_cqs(struct hns_roce_cq
*send_cq
,
1279 struct hns_roce_cq
*recv_cq
) __releases(&send_cq
->lock
)
1280 __releases(&recv_cq
->lock
)
1282 if (unlikely(send_cq
== NULL
&& recv_cq
== NULL
)) {
1283 __release(&recv_cq
->lock
);
1284 __release(&send_cq
->lock
);
1285 } else if (unlikely(send_cq
!= NULL
&& recv_cq
== NULL
)) {
1286 __release(&recv_cq
->lock
);
1287 spin_unlock(&send_cq
->lock
);
1288 } else if (unlikely(send_cq
== NULL
&& recv_cq
!= NULL
)) {
1289 __release(&send_cq
->lock
);
1290 spin_unlock(&recv_cq
->lock
);
1291 } else if (send_cq
== recv_cq
) {
1292 __release(&recv_cq
->lock
);
1293 spin_unlock_irq(&send_cq
->lock
);
1294 } else if (send_cq
->cqn
< recv_cq
->cqn
) {
1295 spin_unlock(&recv_cq
->lock
);
1296 spin_unlock_irq(&send_cq
->lock
);
1298 spin_unlock(&send_cq
->lock
);
1299 spin_unlock_irq(&recv_cq
->lock
);
1303 static void *get_wqe(struct hns_roce_qp
*hr_qp
, int offset
)
1306 return hns_roce_buf_offset(&hr_qp
->hr_buf
, offset
);
1309 void *get_recv_wqe(struct hns_roce_qp
*hr_qp
, int n
)
1311 return get_wqe(hr_qp
, hr_qp
->rq
.offset
+ (n
<< hr_qp
->rq
.wqe_shift
));
1314 void *get_send_wqe(struct hns_roce_qp
*hr_qp
, int n
)
1316 return get_wqe(hr_qp
, hr_qp
->sq
.offset
+ (n
<< hr_qp
->sq
.wqe_shift
));
1319 void *get_send_extend_sge(struct hns_roce_qp
*hr_qp
, int n
)
1321 return hns_roce_buf_offset(&hr_qp
->hr_buf
, hr_qp
->sge
.offset
+
1322 (n
<< hr_qp
->sge
.sge_shift
));
1325 bool hns_roce_wq_overflow(struct hns_roce_wq
*hr_wq
, int nreq
,
1326 struct ib_cq
*ib_cq
)
1328 struct hns_roce_cq
*hr_cq
;
1331 cur
= hr_wq
->head
- hr_wq
->tail
;
1332 if (likely(cur
+ nreq
< hr_wq
->wqe_cnt
))
1335 hr_cq
= to_hr_cq(ib_cq
);
1336 spin_lock(&hr_cq
->lock
);
1337 cur
= hr_wq
->head
- hr_wq
->tail
;
1338 spin_unlock(&hr_cq
->lock
);
1340 return cur
+ nreq
>= hr_wq
->wqe_cnt
;
1343 int hns_roce_init_qp_table(struct hns_roce_dev
*hr_dev
)
1345 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
1346 int reserved_from_top
= 0;
1347 int reserved_from_bot
;
1350 mutex_init(&qp_table
->scc_mutex
);
1351 xa_init(&hr_dev
->qp_table_xa
);
1353 reserved_from_bot
= hr_dev
->caps
.reserved_qps
;
1355 ret
= hns_roce_bitmap_init(&qp_table
->bitmap
, hr_dev
->caps
.num_qps
,
1356 hr_dev
->caps
.num_qps
- 1, reserved_from_bot
,
1359 dev_err(hr_dev
->dev
, "qp bitmap init failed!error=%d\n",
1367 void hns_roce_cleanup_qp_table(struct hns_roce_dev
*hr_dev
)
1369 hns_roce_bitmap_cleanup(&hr_dev
->qp_table
.bitmap
);