2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/pci.h>
35 #include <linux/platform_device.h>
36 #include <rdma/ib_addr.h>
37 #include <rdma/ib_umem.h>
38 #include <rdma/uverbs_ioctl.h>
39 #include "hns_roce_common.h"
40 #include "hns_roce_device.h"
41 #include "hns_roce_hem.h"
43 static void flush_work_handle(struct work_struct
*work
)
45 struct hns_roce_work
*flush_work
= container_of(work
,
46 struct hns_roce_work
, work
);
47 struct hns_roce_qp
*hr_qp
= container_of(flush_work
,
48 struct hns_roce_qp
, flush_work
);
49 struct device
*dev
= flush_work
->hr_dev
->dev
;
50 struct ib_qp_attr attr
;
54 attr_mask
= IB_QP_STATE
;
55 attr
.qp_state
= IB_QPS_ERR
;
57 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG
, &hr_qp
->flush_flag
)) {
58 ret
= hns_roce_modify_qp(&hr_qp
->ibqp
, &attr
, attr_mask
, NULL
);
60 dev_err(dev
, "Modify QP to error state failed(%d) during CQE flush\n",
65 * make sure we signal QP destroy leg that flush QP was completed
66 * so that it can safely proceed ahead now and destroy QP
68 if (atomic_dec_and_test(&hr_qp
->refcount
))
69 complete(&hr_qp
->free
);
72 void init_flush_work(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
)
74 struct hns_roce_work
*flush_work
= &hr_qp
->flush_work
;
76 flush_work
->hr_dev
= hr_dev
;
77 INIT_WORK(&flush_work
->work
, flush_work_handle
);
78 atomic_inc(&hr_qp
->refcount
);
79 queue_work(hr_dev
->irq_workq
, &flush_work
->work
);
82 void hns_roce_qp_event(struct hns_roce_dev
*hr_dev
, u32 qpn
, int event_type
)
84 struct device
*dev
= hr_dev
->dev
;
85 struct hns_roce_qp
*qp
;
87 xa_lock(&hr_dev
->qp_table_xa
);
88 qp
= __hns_roce_qp_lookup(hr_dev
, qpn
);
90 atomic_inc(&qp
->refcount
);
91 xa_unlock(&hr_dev
->qp_table_xa
);
94 dev_warn(dev
, "Async event for bogus QP %08x\n", qpn
);
98 if (hr_dev
->hw_rev
!= HNS_ROCE_HW_VER1
&&
99 (event_type
== HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR
||
100 event_type
== HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR
||
101 event_type
== HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR
)) {
102 qp
->state
= IB_QPS_ERR
;
103 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG
, &qp
->flush_flag
))
104 init_flush_work(hr_dev
, qp
);
107 qp
->event(qp
, (enum hns_roce_event
)event_type
);
109 if (atomic_dec_and_test(&qp
->refcount
))
113 static void hns_roce_ib_qp_event(struct hns_roce_qp
*hr_qp
,
114 enum hns_roce_event type
)
116 struct ib_qp
*ibqp
= &hr_qp
->ibqp
;
117 struct ib_event event
;
119 if (ibqp
->event_handler
) {
120 event
.device
= ibqp
->device
;
121 event
.element
.qp
= ibqp
;
123 case HNS_ROCE_EVENT_TYPE_PATH_MIG
:
124 event
.event
= IB_EVENT_PATH_MIG
;
126 case HNS_ROCE_EVENT_TYPE_COMM_EST
:
127 event
.event
= IB_EVENT_COMM_EST
;
129 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED
:
130 event
.event
= IB_EVENT_SQ_DRAINED
;
132 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH
:
133 event
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
135 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR
:
136 event
.event
= IB_EVENT_QP_FATAL
;
138 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED
:
139 event
.event
= IB_EVENT_PATH_MIG_ERR
;
141 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR
:
142 event
.event
= IB_EVENT_QP_REQ_ERR
;
144 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR
:
145 event
.event
= IB_EVENT_QP_ACCESS_ERR
;
148 dev_dbg(ibqp
->device
->dev
.parent
, "roce_ib: Unexpected event type %d on QP %06lx\n",
152 ibqp
->event_handler(&event
, ibqp
->qp_context
);
156 static u8
get_least_load_bankid_for_qp(struct hns_roce_bank
*bank
)
158 u32 least_load
= bank
[0].inuse
;
163 for (i
= 1; i
< HNS_ROCE_QP_BANK_NUM
; i
++) {
164 bankcnt
= bank
[i
].inuse
;
165 if (bankcnt
< least_load
) {
166 least_load
= bankcnt
;
174 static int alloc_qpn_with_bankid(struct hns_roce_bank
*bank
, u8 bankid
,
179 id
= ida_alloc_range(&bank
->ida
, bank
->next
, bank
->max
, GFP_KERNEL
);
181 id
= ida_alloc_range(&bank
->ida
, bank
->min
, bank
->max
,
187 /* the QPN should keep increasing until the max value is reached. */
188 bank
->next
= (id
+ 1) > bank
->max
? bank
->min
: id
+ 1;
190 /* the lower 3 bits is bankid */
191 *qpn
= (id
<< 3) | bankid
;
195 static int alloc_qpn(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
)
197 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
198 unsigned long num
= 0;
202 if (hr_qp
->ibqp
.qp_type
== IB_QPT_GSI
) {
203 /* when hw version is v1, the sqpn is allocated */
204 if (hr_dev
->hw_rev
== HNS_ROCE_HW_VER1
)
205 num
= HNS_ROCE_MAX_PORTS
+
206 hr_dev
->iboe
.phy_port
[hr_qp
->port
];
210 hr_qp
->doorbell_qpn
= 1;
212 spin_lock(&qp_table
->bank_lock
);
213 bankid
= get_least_load_bankid_for_qp(qp_table
->bank
);
215 ret
= alloc_qpn_with_bankid(&qp_table
->bank
[bankid
], bankid
,
218 ibdev_err(&hr_dev
->ib_dev
,
219 "failed to alloc QPN, ret = %d\n", ret
);
220 spin_unlock(&qp_table
->bank_lock
);
224 qp_table
->bank
[bankid
].inuse
++;
225 spin_unlock(&qp_table
->bank_lock
);
227 hr_qp
->doorbell_qpn
= (u32
)num
;
235 enum hns_roce_qp_state
to_hns_roce_state(enum ib_qp_state state
)
239 return HNS_ROCE_QP_STATE_RST
;
241 return HNS_ROCE_QP_STATE_INIT
;
243 return HNS_ROCE_QP_STATE_RTR
;
245 return HNS_ROCE_QP_STATE_RTS
;
247 return HNS_ROCE_QP_STATE_SQD
;
249 return HNS_ROCE_QP_STATE_ERR
;
251 return HNS_ROCE_QP_NUM_STATE
;
255 static void add_qp_to_list(struct hns_roce_dev
*hr_dev
,
256 struct hns_roce_qp
*hr_qp
,
257 struct ib_cq
*send_cq
, struct ib_cq
*recv_cq
)
259 struct hns_roce_cq
*hr_send_cq
, *hr_recv_cq
;
262 hr_send_cq
= send_cq
? to_hr_cq(send_cq
) : NULL
;
263 hr_recv_cq
= recv_cq
? to_hr_cq(recv_cq
) : NULL
;
265 spin_lock_irqsave(&hr_dev
->qp_list_lock
, flags
);
266 hns_roce_lock_cqs(hr_send_cq
, hr_recv_cq
);
268 list_add_tail(&hr_qp
->node
, &hr_dev
->qp_list
);
270 list_add_tail(&hr_qp
->sq_node
, &hr_send_cq
->sq_list
);
272 list_add_tail(&hr_qp
->rq_node
, &hr_recv_cq
->rq_list
);
274 hns_roce_unlock_cqs(hr_send_cq
, hr_recv_cq
);
275 spin_unlock_irqrestore(&hr_dev
->qp_list_lock
, flags
);
278 static int hns_roce_qp_store(struct hns_roce_dev
*hr_dev
,
279 struct hns_roce_qp
*hr_qp
,
280 struct ib_qp_init_attr
*init_attr
)
282 struct xarray
*xa
= &hr_dev
->qp_table_xa
;
288 ret
= xa_err(xa_store_irq(xa
, hr_qp
->qpn
, hr_qp
, GFP_KERNEL
));
290 dev_err(hr_dev
->dev
, "Failed to xa store for QPC\n");
292 /* add QP to device's QP list for softwc */
293 add_qp_to_list(hr_dev
, hr_qp
, init_attr
->send_cq
,
299 static int alloc_qpc(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
)
301 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
302 struct device
*dev
= hr_dev
->dev
;
308 /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
309 if (hr_qp
->ibqp
.qp_type
== IB_QPT_GSI
&&
310 hr_dev
->hw_rev
== HNS_ROCE_HW_VER1
)
313 /* Alloc memory for QPC */
314 ret
= hns_roce_table_get(hr_dev
, &qp_table
->qp_table
, hr_qp
->qpn
);
316 dev_err(dev
, "Failed to get QPC table\n");
320 /* Alloc memory for IRRL */
321 ret
= hns_roce_table_get(hr_dev
, &qp_table
->irrl_table
, hr_qp
->qpn
);
323 dev_err(dev
, "Failed to get IRRL table\n");
327 if (hr_dev
->caps
.trrl_entry_sz
) {
328 /* Alloc memory for TRRL */
329 ret
= hns_roce_table_get(hr_dev
, &qp_table
->trrl_table
,
332 dev_err(dev
, "Failed to get TRRL table\n");
337 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL
) {
338 /* Alloc memory for SCC CTX */
339 ret
= hns_roce_table_get(hr_dev
, &qp_table
->sccc_table
,
342 dev_err(dev
, "Failed to get SCC CTX table\n");
350 if (hr_dev
->caps
.trrl_entry_sz
)
351 hns_roce_table_put(hr_dev
, &qp_table
->trrl_table
, hr_qp
->qpn
);
354 hns_roce_table_put(hr_dev
, &qp_table
->irrl_table
, hr_qp
->qpn
);
357 hns_roce_table_put(hr_dev
, &qp_table
->qp_table
, hr_qp
->qpn
);
363 void hns_roce_qp_remove(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
)
365 struct xarray
*xa
= &hr_dev
->qp_table_xa
;
368 list_del(&hr_qp
->node
);
369 list_del(&hr_qp
->sq_node
);
370 list_del(&hr_qp
->rq_node
);
372 xa_lock_irqsave(xa
, flags
);
373 __xa_erase(xa
, hr_qp
->qpn
& (hr_dev
->caps
.num_qps
- 1));
374 xa_unlock_irqrestore(xa
, flags
);
377 static void free_qpc(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
)
379 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
381 /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
382 if (hr_qp
->ibqp
.qp_type
== IB_QPT_GSI
&&
383 hr_dev
->hw_rev
== HNS_ROCE_HW_VER1
)
386 if (hr_dev
->caps
.trrl_entry_sz
)
387 hns_roce_table_put(hr_dev
, &qp_table
->trrl_table
, hr_qp
->qpn
);
388 hns_roce_table_put(hr_dev
, &qp_table
->irrl_table
, hr_qp
->qpn
);
391 static inline u8
get_qp_bankid(unsigned long qpn
)
393 /* The lower 3 bits of QPN are used to hash to different banks */
394 return (u8
)(qpn
& GENMASK(2, 0));
397 static void free_qpn(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
)
401 if (hr_qp
->ibqp
.qp_type
== IB_QPT_GSI
)
404 if (hr_qp
->qpn
< hr_dev
->caps
.reserved_qps
)
407 bankid
= get_qp_bankid(hr_qp
->qpn
);
409 ida_free(&hr_dev
->qp_table
.bank
[bankid
].ida
, hr_qp
->qpn
>> 3);
411 spin_lock(&hr_dev
->qp_table
.bank_lock
);
412 hr_dev
->qp_table
.bank
[bankid
].inuse
--;
413 spin_unlock(&hr_dev
->qp_table
.bank_lock
);
416 static int set_rq_size(struct hns_roce_dev
*hr_dev
, struct ib_qp_cap
*cap
,
417 struct hns_roce_qp
*hr_qp
, int has_rq
)
421 /* If srq exist, set zero for relative number of rq */
423 hr_qp
->rq
.wqe_cnt
= 0;
424 hr_qp
->rq
.max_gs
= 0;
425 hr_qp
->rq_inl_buf
.wqe_cnt
= 0;
426 cap
->max_recv_wr
= 0;
427 cap
->max_recv_sge
= 0;
432 /* Check the validity of QP support capacity */
433 if (!cap
->max_recv_wr
|| cap
->max_recv_wr
> hr_dev
->caps
.max_wqes
||
434 cap
->max_recv_sge
> hr_dev
->caps
.max_rq_sg
) {
435 ibdev_err(&hr_dev
->ib_dev
, "RQ config error, depth=%u, sge=%d\n",
436 cap
->max_recv_wr
, cap
->max_recv_sge
);
440 cnt
= roundup_pow_of_two(max(cap
->max_recv_wr
, hr_dev
->caps
.min_wqes
));
441 if (cnt
> hr_dev
->caps
.max_wqes
) {
442 ibdev_err(&hr_dev
->ib_dev
, "rq depth %u too large\n",
447 hr_qp
->rq
.max_gs
= roundup_pow_of_two(max(1U, cap
->max_recv_sge
));
449 if (hr_dev
->caps
.max_rq_sg
<= HNS_ROCE_SGE_IN_WQE
)
450 hr_qp
->rq
.wqe_shift
= ilog2(hr_dev
->caps
.max_rq_desc_sz
);
452 hr_qp
->rq
.wqe_shift
= ilog2(hr_dev
->caps
.max_rq_desc_sz
*
455 hr_qp
->rq
.wqe_cnt
= cnt
;
456 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RQ_INLINE
)
457 hr_qp
->rq_inl_buf
.wqe_cnt
= cnt
;
459 hr_qp
->rq_inl_buf
.wqe_cnt
= 0;
461 cap
->max_recv_wr
= cnt
;
462 cap
->max_recv_sge
= hr_qp
->rq
.max_gs
;
467 static u32
get_wqe_ext_sge_cnt(struct hns_roce_qp
*qp
)
469 /* GSI/UD QP only has extended sge */
470 if (qp
->ibqp
.qp_type
== IB_QPT_GSI
|| qp
->ibqp
.qp_type
== IB_QPT_UD
)
471 return qp
->sq
.max_gs
;
473 if (qp
->sq
.max_gs
> HNS_ROCE_SGE_IN_WQE
)
474 return qp
->sq
.max_gs
- HNS_ROCE_SGE_IN_WQE
;
479 static void set_ext_sge_param(struct hns_roce_dev
*hr_dev
, u32 sq_wqe_cnt
,
480 struct hns_roce_qp
*hr_qp
, struct ib_qp_cap
*cap
)
485 hr_qp
->sge
.sge_shift
= HNS_ROCE_SGE_SHIFT
;
487 if (hr_dev
->hw_rev
== HNS_ROCE_HW_VER1
) {
488 hr_qp
->sq
.max_gs
= HNS_ROCE_SGE_IN_WQE
;
492 hr_qp
->sq
.max_gs
= max(1U, cap
->max_send_sge
);
494 wqe_sge_cnt
= get_wqe_ext_sge_cnt(hr_qp
);
496 /* If the number of extended sge is not zero, they MUST use the
497 * space of HNS_HW_PAGE_SIZE at least.
500 total_sge_cnt
= roundup_pow_of_two(sq_wqe_cnt
* wqe_sge_cnt
);
501 hr_qp
->sge
.sge_cnt
= max(total_sge_cnt
,
502 (u32
)HNS_HW_PAGE_SIZE
/ HNS_ROCE_SGE_SIZE
);
506 static int check_sq_size_with_integrity(struct hns_roce_dev
*hr_dev
,
507 struct ib_qp_cap
*cap
,
508 struct hns_roce_ib_create_qp
*ucmd
)
510 u32 roundup_sq_stride
= roundup_pow_of_two(hr_dev
->caps
.max_sq_desc_sz
);
511 u8 max_sq_stride
= ilog2(roundup_sq_stride
);
513 /* Sanity check SQ size before proceeding */
514 if (ucmd
->log_sq_stride
> max_sq_stride
||
515 ucmd
->log_sq_stride
< HNS_ROCE_IB_MIN_SQ_STRIDE
) {
516 ibdev_err(&hr_dev
->ib_dev
, "failed to check SQ stride size.\n");
520 if (cap
->max_send_sge
> hr_dev
->caps
.max_sq_sg
) {
521 ibdev_err(&hr_dev
->ib_dev
, "failed to check SQ SGE size %u.\n",
529 static int set_user_sq_size(struct hns_roce_dev
*hr_dev
,
530 struct ib_qp_cap
*cap
, struct hns_roce_qp
*hr_qp
,
531 struct hns_roce_ib_create_qp
*ucmd
)
533 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
537 if (check_shl_overflow(1, ucmd
->log_sq_bb_count
, &cnt
) ||
538 cnt
> hr_dev
->caps
.max_wqes
)
541 ret
= check_sq_size_with_integrity(hr_dev
, cap
, ucmd
);
543 ibdev_err(ibdev
, "failed to check user SQ size, ret = %d.\n",
548 set_ext_sge_param(hr_dev
, cnt
, hr_qp
, cap
);
550 hr_qp
->sq
.wqe_shift
= ucmd
->log_sq_stride
;
551 hr_qp
->sq
.wqe_cnt
= cnt
;
556 static int set_wqe_buf_attr(struct hns_roce_dev
*hr_dev
,
557 struct hns_roce_qp
*hr_qp
,
558 struct hns_roce_buf_attr
*buf_attr
)
563 hr_qp
->buff_size
= 0;
566 hr_qp
->sq
.offset
= 0;
567 buf_size
= to_hr_hem_entries_size(hr_qp
->sq
.wqe_cnt
,
568 hr_qp
->sq
.wqe_shift
);
569 if (buf_size
> 0 && idx
< ARRAY_SIZE(buf_attr
->region
)) {
570 buf_attr
->region
[idx
].size
= buf_size
;
571 buf_attr
->region
[idx
].hopnum
= hr_dev
->caps
.wqe_sq_hop_num
;
573 hr_qp
->buff_size
+= buf_size
;
576 /* extend SGE WQE in SQ */
577 hr_qp
->sge
.offset
= hr_qp
->buff_size
;
578 buf_size
= to_hr_hem_entries_size(hr_qp
->sge
.sge_cnt
,
579 hr_qp
->sge
.sge_shift
);
580 if (buf_size
> 0 && idx
< ARRAY_SIZE(buf_attr
->region
)) {
581 buf_attr
->region
[idx
].size
= buf_size
;
582 buf_attr
->region
[idx
].hopnum
= hr_dev
->caps
.wqe_sge_hop_num
;
584 hr_qp
->buff_size
+= buf_size
;
588 hr_qp
->rq
.offset
= hr_qp
->buff_size
;
589 buf_size
= to_hr_hem_entries_size(hr_qp
->rq
.wqe_cnt
,
590 hr_qp
->rq
.wqe_shift
);
591 if (buf_size
> 0 && idx
< ARRAY_SIZE(buf_attr
->region
)) {
592 buf_attr
->region
[idx
].size
= buf_size
;
593 buf_attr
->region
[idx
].hopnum
= hr_dev
->caps
.wqe_rq_hop_num
;
595 hr_qp
->buff_size
+= buf_size
;
598 if (hr_qp
->buff_size
< 1)
601 buf_attr
->page_shift
= HNS_HW_PAGE_SHIFT
+ hr_dev
->caps
.mtt_buf_pg_sz
;
602 buf_attr
->fixed_page
= true;
603 buf_attr
->region_count
= idx
;
608 static int set_kernel_sq_size(struct hns_roce_dev
*hr_dev
,
609 struct ib_qp_cap
*cap
, struct hns_roce_qp
*hr_qp
)
611 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
614 if (!cap
->max_send_wr
|| cap
->max_send_wr
> hr_dev
->caps
.max_wqes
||
615 cap
->max_send_sge
> hr_dev
->caps
.max_sq_sg
) {
617 "failed to check SQ WR or SGE num, ret = %d.\n",
622 cnt
= roundup_pow_of_two(max(cap
->max_send_wr
, hr_dev
->caps
.min_wqes
));
623 if (cnt
> hr_dev
->caps
.max_wqes
) {
624 ibdev_err(ibdev
, "failed to check WQE num, WQE num = %u.\n",
629 hr_qp
->sq
.wqe_shift
= ilog2(hr_dev
->caps
.max_sq_desc_sz
);
630 hr_qp
->sq
.wqe_cnt
= cnt
;
632 set_ext_sge_param(hr_dev
, cnt
, hr_qp
, cap
);
634 /* sync the parameters of kernel QP to user's configuration */
635 cap
->max_send_wr
= cnt
;
636 cap
->max_send_sge
= hr_qp
->sq
.max_gs
;
641 static int hns_roce_qp_has_sq(struct ib_qp_init_attr
*attr
)
643 if (attr
->qp_type
== IB_QPT_XRC_TGT
|| !attr
->cap
.max_send_wr
)
649 static int hns_roce_qp_has_rq(struct ib_qp_init_attr
*attr
)
651 if (attr
->qp_type
== IB_QPT_XRC_INI
||
652 attr
->qp_type
== IB_QPT_XRC_TGT
|| attr
->srq
||
653 !attr
->cap
.max_recv_wr
)
659 static int alloc_rq_inline_buf(struct hns_roce_qp
*hr_qp
,
660 struct ib_qp_init_attr
*init_attr
)
662 u32 max_recv_sge
= init_attr
->cap
.max_recv_sge
;
663 u32 wqe_cnt
= hr_qp
->rq_inl_buf
.wqe_cnt
;
664 struct hns_roce_rinl_wqe
*wqe_list
;
667 /* allocate recv inline buf */
668 wqe_list
= kcalloc(wqe_cnt
, sizeof(struct hns_roce_rinl_wqe
),
674 /* Allocate a continuous buffer for all inline sge we need */
675 wqe_list
[0].sg_list
= kcalloc(wqe_cnt
, (max_recv_sge
*
676 sizeof(struct hns_roce_rinl_sge
)),
678 if (!wqe_list
[0].sg_list
)
681 /* Assign buffers of sg_list to each inline wqe */
682 for (i
= 1; i
< wqe_cnt
; i
++)
683 wqe_list
[i
].sg_list
= &wqe_list
[0].sg_list
[i
* max_recv_sge
];
685 hr_qp
->rq_inl_buf
.wqe_list
= wqe_list
;
696 static void free_rq_inline_buf(struct hns_roce_qp
*hr_qp
)
698 if (hr_qp
->rq_inl_buf
.wqe_list
)
699 kfree(hr_qp
->rq_inl_buf
.wqe_list
[0].sg_list
);
700 kfree(hr_qp
->rq_inl_buf
.wqe_list
);
703 static int alloc_qp_buf(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
,
704 struct ib_qp_init_attr
*init_attr
,
705 struct ib_udata
*udata
, unsigned long addr
)
707 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
708 struct hns_roce_buf_attr buf_attr
= {};
711 if (!udata
&& hr_qp
->rq_inl_buf
.wqe_cnt
) {
712 ret
= alloc_rq_inline_buf(hr_qp
, init_attr
);
715 "failed to alloc inline buf, ret = %d.\n",
720 hr_qp
->rq_inl_buf
.wqe_list
= NULL
;
723 ret
= set_wqe_buf_attr(hr_dev
, hr_qp
, &buf_attr
);
725 ibdev_err(ibdev
, "failed to split WQE buf, ret = %d.\n", ret
);
728 ret
= hns_roce_mtr_create(hr_dev
, &hr_qp
->mtr
, &buf_attr
,
729 HNS_HW_PAGE_SHIFT
+ hr_dev
->caps
.mtt_ba_pg_sz
,
732 ibdev_err(ibdev
, "failed to create WQE mtr, ret = %d.\n", ret
);
738 free_rq_inline_buf(hr_qp
);
743 static void free_qp_buf(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
)
745 hns_roce_mtr_destroy(hr_dev
, &hr_qp
->mtr
);
746 free_rq_inline_buf(hr_qp
);
749 static inline bool user_qp_has_sdb(struct hns_roce_dev
*hr_dev
,
750 struct ib_qp_init_attr
*init_attr
,
751 struct ib_udata
*udata
,
752 struct hns_roce_ib_create_qp_resp
*resp
,
753 struct hns_roce_ib_create_qp
*ucmd
)
755 return ((hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SQ_RECORD_DB
) &&
756 udata
->outlen
>= offsetofend(typeof(*resp
), cap_flags
) &&
757 hns_roce_qp_has_sq(init_attr
) &&
758 udata
->inlen
>= offsetofend(typeof(*ucmd
), sdb_addr
));
761 static inline bool user_qp_has_rdb(struct hns_roce_dev
*hr_dev
,
762 struct ib_qp_init_attr
*init_attr
,
763 struct ib_udata
*udata
,
764 struct hns_roce_ib_create_qp_resp
*resp
)
766 return ((hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) &&
767 udata
->outlen
>= offsetofend(typeof(*resp
), cap_flags
) &&
768 hns_roce_qp_has_rq(init_attr
));
771 static inline bool kernel_qp_has_rdb(struct hns_roce_dev
*hr_dev
,
772 struct ib_qp_init_attr
*init_attr
)
774 return ((hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_RECORD_DB
) &&
775 hns_roce_qp_has_rq(init_attr
));
778 static int alloc_qp_db(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
,
779 struct ib_qp_init_attr
*init_attr
,
780 struct ib_udata
*udata
,
781 struct hns_roce_ib_create_qp
*ucmd
,
782 struct hns_roce_ib_create_qp_resp
*resp
)
784 struct hns_roce_ucontext
*uctx
= rdma_udata_to_drv_context(
785 udata
, struct hns_roce_ucontext
, ibucontext
);
786 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
789 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_SDI_MODE
)
790 hr_qp
->en_flags
|= HNS_ROCE_QP_CAP_OWNER_DB
;
793 if (user_qp_has_sdb(hr_dev
, init_attr
, udata
, resp
, ucmd
)) {
794 ret
= hns_roce_db_map_user(uctx
, udata
, ucmd
->sdb_addr
,
798 "failed to map user SQ doorbell, ret = %d.\n",
802 hr_qp
->en_flags
|= HNS_ROCE_QP_CAP_SQ_RECORD_DB
;
803 resp
->cap_flags
|= HNS_ROCE_QP_CAP_SQ_RECORD_DB
;
806 if (user_qp_has_rdb(hr_dev
, init_attr
, udata
, resp
)) {
807 ret
= hns_roce_db_map_user(uctx
, udata
, ucmd
->db_addr
,
811 "failed to map user RQ doorbell, ret = %d.\n",
815 hr_qp
->en_flags
|= HNS_ROCE_QP_CAP_RQ_RECORD_DB
;
816 resp
->cap_flags
|= HNS_ROCE_QP_CAP_RQ_RECORD_DB
;
819 /* QP doorbell register address */
820 hr_qp
->sq
.db_reg_l
= hr_dev
->reg_base
+ hr_dev
->sdb_offset
+
821 DB_REG_OFFSET
* hr_dev
->priv_uar
.index
;
822 hr_qp
->rq
.db_reg_l
= hr_dev
->reg_base
+ hr_dev
->odb_offset
+
823 DB_REG_OFFSET
* hr_dev
->priv_uar
.index
;
825 if (kernel_qp_has_rdb(hr_dev
, init_attr
)) {
826 ret
= hns_roce_alloc_db(hr_dev
, &hr_qp
->rdb
, 0);
829 "failed to alloc kernel RQ doorbell, ret = %d.\n",
833 *hr_qp
->rdb
.db_record
= 0;
834 hr_qp
->en_flags
|= HNS_ROCE_QP_CAP_RQ_RECORD_DB
;
840 if (udata
&& hr_qp
->en_flags
& HNS_ROCE_QP_CAP_SQ_RECORD_DB
)
841 hns_roce_db_unmap_user(uctx
, &hr_qp
->sdb
);
846 static void free_qp_db(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
,
847 struct ib_udata
*udata
)
849 struct hns_roce_ucontext
*uctx
= rdma_udata_to_drv_context(
850 udata
, struct hns_roce_ucontext
, ibucontext
);
853 if (hr_qp
->en_flags
& HNS_ROCE_QP_CAP_RQ_RECORD_DB
)
854 hns_roce_db_unmap_user(uctx
, &hr_qp
->rdb
);
855 if (hr_qp
->en_flags
& HNS_ROCE_QP_CAP_SQ_RECORD_DB
)
856 hns_roce_db_unmap_user(uctx
, &hr_qp
->sdb
);
858 if (hr_qp
->en_flags
& HNS_ROCE_QP_CAP_RQ_RECORD_DB
)
859 hns_roce_free_db(hr_dev
, &hr_qp
->rdb
);
863 static int alloc_kernel_wrid(struct hns_roce_dev
*hr_dev
,
864 struct hns_roce_qp
*hr_qp
)
866 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
871 sq_wrid
= kcalloc(hr_qp
->sq
.wqe_cnt
, sizeof(u64
), GFP_KERNEL
);
872 if (ZERO_OR_NULL_PTR(sq_wrid
)) {
873 ibdev_err(ibdev
, "failed to alloc SQ wrid.\n");
877 if (hr_qp
->rq
.wqe_cnt
) {
878 rq_wrid
= kcalloc(hr_qp
->rq
.wqe_cnt
, sizeof(u64
), GFP_KERNEL
);
879 if (ZERO_OR_NULL_PTR(rq_wrid
)) {
880 ibdev_err(ibdev
, "failed to alloc RQ wrid.\n");
886 hr_qp
->sq
.wrid
= sq_wrid
;
887 hr_qp
->rq
.wrid
= rq_wrid
;
895 static void free_kernel_wrid(struct hns_roce_qp
*hr_qp
)
897 kfree(hr_qp
->rq
.wrid
);
898 kfree(hr_qp
->sq
.wrid
);
901 static int set_qp_param(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
,
902 struct ib_qp_init_attr
*init_attr
,
903 struct ib_udata
*udata
,
904 struct hns_roce_ib_create_qp
*ucmd
)
906 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
909 hr_qp
->ibqp
.qp_type
= init_attr
->qp_type
;
911 if (init_attr
->cap
.max_inline_data
> hr_dev
->caps
.max_sq_inline
)
912 init_attr
->cap
.max_inline_data
= hr_dev
->caps
.max_sq_inline
;
914 hr_qp
->max_inline_data
= init_attr
->cap
.max_inline_data
;
916 if (init_attr
->sq_sig_type
== IB_SIGNAL_ALL_WR
)
917 hr_qp
->sq_signal_bits
= IB_SIGNAL_ALL_WR
;
919 hr_qp
->sq_signal_bits
= IB_SIGNAL_REQ_WR
;
921 ret
= set_rq_size(hr_dev
, &init_attr
->cap
, hr_qp
,
922 hns_roce_qp_has_rq(init_attr
));
924 ibdev_err(ibdev
, "failed to set user RQ size, ret = %d.\n",
930 ret
= ib_copy_from_udata(ucmd
, udata
,
931 min(udata
->inlen
, sizeof(*ucmd
)));
934 "failed to copy QP ucmd, ret = %d\n", ret
);
938 ret
= set_user_sq_size(hr_dev
, &init_attr
->cap
, hr_qp
, ucmd
);
941 "failed to set user SQ size, ret = %d.\n",
944 ret
= set_kernel_sq_size(hr_dev
, &init_attr
->cap
, hr_qp
);
947 "failed to set kernel SQ size, ret = %d.\n",
954 static int hns_roce_create_qp_common(struct hns_roce_dev
*hr_dev
,
956 struct ib_qp_init_attr
*init_attr
,
957 struct ib_udata
*udata
,
958 struct hns_roce_qp
*hr_qp
)
960 struct hns_roce_ib_create_qp_resp resp
= {};
961 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
962 struct hns_roce_ib_create_qp ucmd
;
965 mutex_init(&hr_qp
->mutex
);
966 spin_lock_init(&hr_qp
->sq
.lock
);
967 spin_lock_init(&hr_qp
->rq
.lock
);
969 hr_qp
->state
= IB_QPS_RESET
;
970 hr_qp
->flush_flag
= 0;
972 if (init_attr
->create_flags
)
975 ret
= set_qp_param(hr_dev
, hr_qp
, init_attr
, udata
, &ucmd
);
977 ibdev_err(ibdev
, "failed to set QP param, ret = %d.\n", ret
);
982 ret
= alloc_kernel_wrid(hr_dev
, hr_qp
);
984 ibdev_err(ibdev
, "failed to alloc wrid, ret = %d.\n",
990 ret
= alloc_qp_db(hr_dev
, hr_qp
, init_attr
, udata
, &ucmd
, &resp
);
992 ibdev_err(ibdev
, "failed to alloc QP doorbell, ret = %d.\n",
997 ret
= alloc_qp_buf(hr_dev
, hr_qp
, init_attr
, udata
, ucmd
.buf_addr
);
999 ibdev_err(ibdev
, "failed to alloc QP buffer, ret = %d.\n", ret
);
1003 ret
= alloc_qpn(hr_dev
, hr_qp
);
1005 ibdev_err(ibdev
, "failed to alloc QPN, ret = %d.\n", ret
);
1009 ret
= alloc_qpc(hr_dev
, hr_qp
);
1011 ibdev_err(ibdev
, "failed to alloc QP context, ret = %d.\n",
1016 ret
= hns_roce_qp_store(hr_dev
, hr_qp
, init_attr
);
1018 ibdev_err(ibdev
, "failed to store QP, ret = %d.\n", ret
);
1023 ret
= ib_copy_to_udata(udata
, &resp
,
1024 min(udata
->outlen
, sizeof(resp
)));
1026 ibdev_err(ibdev
, "copy qp resp failed!\n");
1031 if (hr_dev
->caps
.flags
& HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL
) {
1032 ret
= hr_dev
->hw
->qp_flow_control_init(hr_dev
, hr_qp
);
1037 hr_qp
->ibqp
.qp_num
= hr_qp
->qpn
;
1038 hr_qp
->event
= hns_roce_ib_qp_event
;
1039 atomic_set(&hr_qp
->refcount
, 1);
1040 init_completion(&hr_qp
->free
);
1045 hns_roce_qp_remove(hr_dev
, hr_qp
);
1047 free_qpc(hr_dev
, hr_qp
);
1049 free_qpn(hr_dev
, hr_qp
);
1051 free_qp_buf(hr_dev
, hr_qp
);
1053 free_qp_db(hr_dev
, hr_qp
, udata
);
1055 free_kernel_wrid(hr_qp
);
1059 void hns_roce_qp_destroy(struct hns_roce_dev
*hr_dev
, struct hns_roce_qp
*hr_qp
,
1060 struct ib_udata
*udata
)
1062 if (atomic_dec_and_test(&hr_qp
->refcount
))
1063 complete(&hr_qp
->free
);
1064 wait_for_completion(&hr_qp
->free
);
1066 free_qpc(hr_dev
, hr_qp
);
1067 free_qpn(hr_dev
, hr_qp
);
1068 free_qp_buf(hr_dev
, hr_qp
);
1069 free_kernel_wrid(hr_qp
);
1070 free_qp_db(hr_dev
, hr_qp
, udata
);
1075 static int check_qp_type(struct hns_roce_dev
*hr_dev
, enum ib_qp_type type
,
1080 if (hr_dev
->pci_dev
->revision
<= PCI_REVISION_ID_HIP08
&&
1094 ibdev_err(&hr_dev
->ib_dev
, "not support QP type %d\n", type
);
1099 struct ib_qp
*hns_roce_create_qp(struct ib_pd
*pd
,
1100 struct ib_qp_init_attr
*init_attr
,
1101 struct ib_udata
*udata
)
1103 struct hns_roce_dev
*hr_dev
= to_hr_dev(pd
->device
);
1104 struct ib_device
*ibdev
= &hr_dev
->ib_dev
;
1105 struct hns_roce_qp
*hr_qp
;
1108 ret
= check_qp_type(hr_dev
, init_attr
->qp_type
, !!udata
);
1110 return ERR_PTR(ret
);
1112 hr_qp
= kzalloc(sizeof(*hr_qp
), GFP_KERNEL
);
1114 return ERR_PTR(-ENOMEM
);
1116 if (init_attr
->qp_type
== IB_QPT_GSI
) {
1117 hr_qp
->port
= init_attr
->port_num
- 1;
1118 hr_qp
->phy_port
= hr_dev
->iboe
.phy_port
[hr_qp
->port
];
1121 ret
= hns_roce_create_qp_common(hr_dev
, pd
, init_attr
, udata
, hr_qp
);
1123 ibdev_err(ibdev
, "Create QP type 0x%x failed(%d)\n",
1124 init_attr
->qp_type
, ret
);
1127 return ERR_PTR(ret
);
1130 return &hr_qp
->ibqp
;
1133 int to_hr_qp_type(int qp_type
)
1137 if (qp_type
== IB_QPT_RC
)
1138 transport_type
= SERV_TYPE_RC
;
1139 else if (qp_type
== IB_QPT_UC
)
1140 transport_type
= SERV_TYPE_UC
;
1141 else if (qp_type
== IB_QPT_UD
)
1142 transport_type
= SERV_TYPE_UD
;
1143 else if (qp_type
== IB_QPT_GSI
)
1144 transport_type
= SERV_TYPE_UD
;
1146 transport_type
= -1;
1148 return transport_type
;
1151 static int check_mtu_validate(struct hns_roce_dev
*hr_dev
,
1152 struct hns_roce_qp
*hr_qp
,
1153 struct ib_qp_attr
*attr
, int attr_mask
)
1155 enum ib_mtu active_mtu
;
1158 p
= attr_mask
& IB_QP_PORT
? (attr
->port_num
- 1) : hr_qp
->port
;
1159 active_mtu
= iboe_get_mtu(hr_dev
->iboe
.netdevs
[p
]->mtu
);
1161 if ((hr_dev
->caps
.max_mtu
>= IB_MTU_2048
&&
1162 attr
->path_mtu
> hr_dev
->caps
.max_mtu
) ||
1163 attr
->path_mtu
< IB_MTU_256
|| attr
->path_mtu
> active_mtu
) {
1164 ibdev_err(&hr_dev
->ib_dev
,
1165 "attr path_mtu(%d)invalid while modify qp",
1173 static int hns_roce_check_qp_attr(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1176 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
1177 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
1180 if ((attr_mask
& IB_QP_PORT
) &&
1181 (attr
->port_num
== 0 || attr
->port_num
> hr_dev
->caps
.num_ports
)) {
1182 ibdev_err(&hr_dev
->ib_dev
, "invalid attr, port_num = %u.\n",
1187 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1188 p
= attr_mask
& IB_QP_PORT
? (attr
->port_num
- 1) : hr_qp
->port
;
1189 if (attr
->pkey_index
>= hr_dev
->caps
.pkey_table_len
[p
]) {
1190 ibdev_err(&hr_dev
->ib_dev
,
1191 "invalid attr, pkey_index = %u.\n",
1197 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
1198 attr
->max_rd_atomic
> hr_dev
->caps
.max_qp_init_rdma
) {
1199 ibdev_err(&hr_dev
->ib_dev
,
1200 "invalid attr, max_rd_atomic = %u.\n",
1201 attr
->max_rd_atomic
);
1205 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
1206 attr
->max_dest_rd_atomic
> hr_dev
->caps
.max_qp_dest_rdma
) {
1207 ibdev_err(&hr_dev
->ib_dev
,
1208 "invalid attr, max_dest_rd_atomic = %u.\n",
1209 attr
->max_dest_rd_atomic
);
1213 if (attr_mask
& IB_QP_PATH_MTU
)
1214 return check_mtu_validate(hr_dev
, hr_qp
, attr
, attr_mask
);
1219 int hns_roce_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1220 int attr_mask
, struct ib_udata
*udata
)
1222 struct hns_roce_dev
*hr_dev
= to_hr_dev(ibqp
->device
);
1223 struct hns_roce_qp
*hr_qp
= to_hr_qp(ibqp
);
1224 enum ib_qp_state cur_state
, new_state
;
1227 mutex_lock(&hr_qp
->mutex
);
1229 if (attr_mask
& IB_QP_CUR_STATE
&& attr
->cur_qp_state
!= hr_qp
->state
)
1232 cur_state
= hr_qp
->state
;
1233 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
1235 if (ibqp
->uobject
&&
1236 (attr_mask
& IB_QP_STATE
) && new_state
== IB_QPS_ERR
) {
1237 if (hr_qp
->en_flags
& HNS_ROCE_QP_CAP_SQ_RECORD_DB
) {
1238 hr_qp
->sq
.head
= *(int *)(hr_qp
->sdb
.virt_addr
);
1240 if (hr_qp
->en_flags
& HNS_ROCE_QP_CAP_RQ_RECORD_DB
)
1241 hr_qp
->rq
.head
= *(int *)(hr_qp
->rdb
.virt_addr
);
1243 ibdev_warn(&hr_dev
->ib_dev
,
1244 "flush cqe is not supported in userspace!\n");
1249 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
1251 ibdev_err(&hr_dev
->ib_dev
, "ib_modify_qp_is_ok failed\n");
1255 ret
= hns_roce_check_qp_attr(ibqp
, attr
, attr_mask
);
1259 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
1260 if (hr_dev
->hw_rev
== HNS_ROCE_HW_VER1
) {
1262 ibdev_err(&hr_dev
->ib_dev
,
1263 "RST2RST state is not supported\n");
1271 ret
= hr_dev
->hw
->modify_qp(ibqp
, attr
, attr_mask
, cur_state
,
1275 mutex_unlock(&hr_qp
->mutex
);
1280 void hns_roce_lock_cqs(struct hns_roce_cq
*send_cq
, struct hns_roce_cq
*recv_cq
)
1281 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
1283 if (unlikely(send_cq
== NULL
&& recv_cq
== NULL
)) {
1284 __acquire(&send_cq
->lock
);
1285 __acquire(&recv_cq
->lock
);
1286 } else if (unlikely(send_cq
!= NULL
&& recv_cq
== NULL
)) {
1287 spin_lock_irq(&send_cq
->lock
);
1288 __acquire(&recv_cq
->lock
);
1289 } else if (unlikely(send_cq
== NULL
&& recv_cq
!= NULL
)) {
1290 spin_lock_irq(&recv_cq
->lock
);
1291 __acquire(&send_cq
->lock
);
1292 } else if (send_cq
== recv_cq
) {
1293 spin_lock_irq(&send_cq
->lock
);
1294 __acquire(&recv_cq
->lock
);
1295 } else if (send_cq
->cqn
< recv_cq
->cqn
) {
1296 spin_lock_irq(&send_cq
->lock
);
1297 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
1299 spin_lock_irq(&recv_cq
->lock
);
1300 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
1304 void hns_roce_unlock_cqs(struct hns_roce_cq
*send_cq
,
1305 struct hns_roce_cq
*recv_cq
) __releases(&send_cq
->lock
)
1306 __releases(&recv_cq
->lock
)
1308 if (unlikely(send_cq
== NULL
&& recv_cq
== NULL
)) {
1309 __release(&recv_cq
->lock
);
1310 __release(&send_cq
->lock
);
1311 } else if (unlikely(send_cq
!= NULL
&& recv_cq
== NULL
)) {
1312 __release(&recv_cq
->lock
);
1313 spin_unlock(&send_cq
->lock
);
1314 } else if (unlikely(send_cq
== NULL
&& recv_cq
!= NULL
)) {
1315 __release(&send_cq
->lock
);
1316 spin_unlock(&recv_cq
->lock
);
1317 } else if (send_cq
== recv_cq
) {
1318 __release(&recv_cq
->lock
);
1319 spin_unlock_irq(&send_cq
->lock
);
1320 } else if (send_cq
->cqn
< recv_cq
->cqn
) {
1321 spin_unlock(&recv_cq
->lock
);
1322 spin_unlock_irq(&send_cq
->lock
);
1324 spin_unlock(&send_cq
->lock
);
1325 spin_unlock_irq(&recv_cq
->lock
);
1329 static inline void *get_wqe(struct hns_roce_qp
*hr_qp
, int offset
)
1331 return hns_roce_buf_offset(hr_qp
->mtr
.kmem
, offset
);
1334 void *hns_roce_get_recv_wqe(struct hns_roce_qp
*hr_qp
, unsigned int n
)
1336 return get_wqe(hr_qp
, hr_qp
->rq
.offset
+ (n
<< hr_qp
->rq
.wqe_shift
));
1339 void *hns_roce_get_send_wqe(struct hns_roce_qp
*hr_qp
, unsigned int n
)
1341 return get_wqe(hr_qp
, hr_qp
->sq
.offset
+ (n
<< hr_qp
->sq
.wqe_shift
));
1344 void *hns_roce_get_extend_sge(struct hns_roce_qp
*hr_qp
, unsigned int n
)
1346 return get_wqe(hr_qp
, hr_qp
->sge
.offset
+ (n
<< hr_qp
->sge
.sge_shift
));
1349 bool hns_roce_wq_overflow(struct hns_roce_wq
*hr_wq
, u32 nreq
,
1350 struct ib_cq
*ib_cq
)
1352 struct hns_roce_cq
*hr_cq
;
1355 cur
= hr_wq
->head
- hr_wq
->tail
;
1356 if (likely(cur
+ nreq
< hr_wq
->wqe_cnt
))
1359 hr_cq
= to_hr_cq(ib_cq
);
1360 spin_lock(&hr_cq
->lock
);
1361 cur
= hr_wq
->head
- hr_wq
->tail
;
1362 spin_unlock(&hr_cq
->lock
);
1364 return cur
+ nreq
>= hr_wq
->wqe_cnt
;
1367 int hns_roce_init_qp_table(struct hns_roce_dev
*hr_dev
)
1369 struct hns_roce_qp_table
*qp_table
= &hr_dev
->qp_table
;
1370 unsigned int reserved_from_bot
;
1373 mutex_init(&qp_table
->scc_mutex
);
1374 xa_init(&hr_dev
->qp_table_xa
);
1376 reserved_from_bot
= hr_dev
->caps
.reserved_qps
;
1378 for (i
= 0; i
< reserved_from_bot
; i
++) {
1379 hr_dev
->qp_table
.bank
[get_qp_bankid(i
)].inuse
++;
1380 hr_dev
->qp_table
.bank
[get_qp_bankid(i
)].min
++;
1383 for (i
= 0; i
< HNS_ROCE_QP_BANK_NUM
; i
++) {
1384 ida_init(&hr_dev
->qp_table
.bank
[i
].ida
);
1385 hr_dev
->qp_table
.bank
[i
].max
= hr_dev
->caps
.num_qps
/
1386 HNS_ROCE_QP_BANK_NUM
- 1;
1387 hr_dev
->qp_table
.bank
[i
].next
= hr_dev
->qp_table
.bank
[i
].min
;
1393 void hns_roce_cleanup_qp_table(struct hns_roce_dev
*hr_dev
)
1397 for (i
= 0; i
< HNS_ROCE_QP_BANK_NUM
; i
++)
1398 ida_destroy(&hr_dev
->qp_table
.bank
[i
].ida
);