dm writecache: add cond_resched to loop in persistent_memory_claim()
[linux/fpc-iii.git] / drivers / infiniband / hw / hns / hns_roce_hw_v2.c
blobf9fa80ae5560316151e271770115b7a4ad27435b
1 /*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/kernel.h>
37 #include <linux/types.h>
38 #include <net/addrconf.h>
39 #include <rdma/ib_addr.h>
40 #include <rdma/ib_cache.h>
41 #include <rdma/ib_umem.h>
42 #include <rdma/uverbs_ioctl.h>
44 #include "hnae3.h"
45 #include "hns_roce_common.h"
46 #include "hns_roce_device.h"
47 #include "hns_roce_cmd.h"
48 #include "hns_roce_hem.h"
49 #include "hns_roce_hw_v2.h"
51 static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
52 struct ib_sge *sg)
54 dseg->lkey = cpu_to_le32(sg->lkey);
55 dseg->addr = cpu_to_le64(sg->addr);
56 dseg->len = cpu_to_le32(sg->length);
60 * mapped-value = 1 + real-value
61 * The hns wr opcode real value is start from 0, In order to distinguish between
62 * initialized and uninitialized map values, we plus 1 to the actual value when
63 * defining the mapping, so that the validity can be identified by checking the
64 * mapped value is greater than 0.
66 #define HR_OPC_MAP(ib_key, hr_key) \
67 [IB_WR_ ## ib_key] = 1 + HNS_ROCE_V2_WQE_OP_ ## hr_key
69 static const u32 hns_roce_op_code[] = {
70 HR_OPC_MAP(RDMA_WRITE, RDMA_WRITE),
71 HR_OPC_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE_WITH_IMM),
72 HR_OPC_MAP(SEND, SEND),
73 HR_OPC_MAP(SEND_WITH_IMM, SEND_WITH_IMM),
74 HR_OPC_MAP(RDMA_READ, RDMA_READ),
75 HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP),
76 HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD),
77 HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV),
78 HR_OPC_MAP(LOCAL_INV, LOCAL_INV),
79 HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP),
80 HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD),
81 HR_OPC_MAP(REG_MR, FAST_REG_PMR),
84 static u32 to_hr_opcode(u32 ib_opcode)
86 if (ib_opcode >= ARRAY_SIZE(hns_roce_op_code))
87 return HNS_ROCE_V2_WQE_OP_MASK;
89 return hns_roce_op_code[ib_opcode] ? hns_roce_op_code[ib_opcode] - 1 :
90 HNS_ROCE_V2_WQE_OP_MASK;
93 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
94 void *wqe, const struct ib_reg_wr *wr)
96 struct hns_roce_mr *mr = to_hr_mr(wr->mr);
97 struct hns_roce_wqe_frmr_seg *fseg = wqe;
99 /* use ib_access_flags */
100 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
101 wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
102 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
103 wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
104 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RR_S,
105 wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
106 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_RW_S,
107 wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
108 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_FRMR_WQE_BYTE_4_LW_S,
109 wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
111 /* Data structure reuse may lead to confusion */
112 rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
113 rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
115 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
116 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
117 rc_sq_wqe->rkey = cpu_to_le32(wr->key);
118 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
120 fseg->pbl_size = cpu_to_le32(mr->pbl_size);
121 roce_set_field(fseg->mode_buf_pg_sz,
122 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
123 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
124 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
125 roce_set_bit(fseg->mode_buf_pg_sz,
126 V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
129 static void set_atomic_seg(const struct ib_send_wr *wr, void *wqe,
130 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
131 int valid_num_sge)
133 struct hns_roce_wqe_atomic_seg *aseg;
135 set_data_seg_v2(wqe, wr->sg_list);
136 aseg = wqe + sizeof(struct hns_roce_v2_wqe_data_seg);
138 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
139 aseg->fetchadd_swap_data = cpu_to_le64(atomic_wr(wr)->swap);
140 aseg->cmp_data = cpu_to_le64(atomic_wr(wr)->compare_add);
141 } else {
142 aseg->fetchadd_swap_data =
143 cpu_to_le64(atomic_wr(wr)->compare_add);
144 aseg->cmp_data = 0;
147 roce_set_field(rc_sq_wqe->byte_16, V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
148 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
151 static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
152 unsigned int *sge_ind, int valid_num_sge)
154 struct hns_roce_v2_wqe_data_seg *dseg;
155 struct ib_sge *sg;
156 int num_in_wqe = 0;
157 int extend_sge_num;
158 int fi_sge_num;
159 int se_sge_num;
160 int shift;
161 int i;
163 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
164 num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
165 extend_sge_num = valid_num_sge - num_in_wqe;
166 sg = wr->sg_list + num_in_wqe;
167 shift = qp->hr_buf.page_shift;
170 * Check whether wr->num_sge sges are in the same page. If not, we
171 * should calculate how many sges in the first page and the second
172 * page.
174 dseg = hns_roce_get_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
175 fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
176 (uintptr_t)dseg) /
177 sizeof(struct hns_roce_v2_wqe_data_seg);
178 if (extend_sge_num > fi_sge_num) {
179 se_sge_num = extend_sge_num - fi_sge_num;
180 for (i = 0; i < fi_sge_num; i++) {
181 set_data_seg_v2(dseg++, sg + i);
182 (*sge_ind)++;
184 dseg = hns_roce_get_extend_sge(qp,
185 (*sge_ind) & (qp->sge.sge_cnt - 1));
186 for (i = 0; i < se_sge_num; i++) {
187 set_data_seg_v2(dseg++, sg + fi_sge_num + i);
188 (*sge_ind)++;
190 } else {
191 for (i = 0; i < extend_sge_num; i++) {
192 set_data_seg_v2(dseg++, sg + i);
193 (*sge_ind)++;
198 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
199 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
200 void *wqe, unsigned int *sge_ind,
201 int valid_num_sge)
203 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
204 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
205 struct ib_device *ibdev = &hr_dev->ib_dev;
206 struct hns_roce_qp *qp = to_hr_qp(ibqp);
207 int j = 0;
208 int i;
210 if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) {
211 if (le32_to_cpu(rc_sq_wqe->msg_len) >
212 hr_dev->caps.max_sq_inline) {
213 ibdev_err(ibdev, "inline len(1-%d)=%d, illegal",
214 rc_sq_wqe->msg_len,
215 hr_dev->caps.max_sq_inline);
216 return -EINVAL;
219 if (wr->opcode == IB_WR_RDMA_READ) {
220 ibdev_err(ibdev, "Not support inline data!\n");
221 return -EINVAL;
224 for (i = 0; i < wr->num_sge; i++) {
225 memcpy(wqe, ((void *)wr->sg_list[i].addr),
226 wr->sg_list[i].length);
227 wqe += wr->sg_list[i].length;
230 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
232 } else {
233 if (valid_num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
234 for (i = 0; i < wr->num_sge; i++) {
235 if (likely(wr->sg_list[i].length)) {
236 set_data_seg_v2(dseg, wr->sg_list + i);
237 dseg++;
240 } else {
241 roce_set_field(rc_sq_wqe->byte_20,
242 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
243 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
244 (*sge_ind) & (qp->sge.sge_cnt - 1));
246 for (i = 0; i < wr->num_sge &&
247 j < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
248 if (likely(wr->sg_list[i].length)) {
249 set_data_seg_v2(dseg, wr->sg_list + i);
250 dseg++;
251 j++;
255 set_extend_sge(qp, wr, sge_ind, valid_num_sge);
258 roce_set_field(rc_sq_wqe->byte_16,
259 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
260 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
263 return 0;
266 static int check_send_valid(struct hns_roce_dev *hr_dev,
267 struct hns_roce_qp *hr_qp)
269 struct ib_device *ibdev = &hr_dev->ib_dev;
270 struct ib_qp *ibqp = &hr_qp->ibqp;
272 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
273 ibqp->qp_type != IB_QPT_GSI &&
274 ibqp->qp_type != IB_QPT_UD)) {
275 ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
276 ibqp->qp_type);
277 return -EOPNOTSUPP;
278 } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
279 hr_qp->state == IB_QPS_INIT ||
280 hr_qp->state == IB_QPS_RTR)) {
281 ibdev_err(ibdev, "failed to post WQE, QP state %d!\n",
282 hr_qp->state);
283 return -EINVAL;
284 } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
285 ibdev_err(ibdev, "failed to post WQE, dev state %d!\n",
286 hr_dev->state);
287 return -EIO;
290 return 0;
293 static inline int calc_wr_sge_num(const struct ib_send_wr *wr, u32 *sge_len)
295 int valid_num = 0;
296 u32 len = 0;
297 int i;
299 for (i = 0; i < wr->num_sge; i++) {
300 if (likely(wr->sg_list[i].length)) {
301 len += wr->sg_list[i].length;
302 valid_num++;
306 *sge_len = len;
307 return valid_num;
310 static inline int set_ud_wqe(struct hns_roce_qp *qp,
311 const struct ib_send_wr *wr,
312 void *wqe, unsigned int *sge_idx,
313 unsigned int owner_bit)
315 struct hns_roce_dev *hr_dev = to_hr_dev(qp->ibqp.device);
316 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
317 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe = wqe;
318 unsigned int curr_idx = *sge_idx;
319 int valid_num_sge;
320 u32 msg_len = 0;
321 bool loopback;
322 u8 *smac;
324 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
325 memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
327 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
328 V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
329 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
330 V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
331 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
332 V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
333 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
334 V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
335 roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
336 V2_UD_SEND_WQE_BYTE_48_DMAC_4_S, ah->av.mac[4]);
337 roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
338 V2_UD_SEND_WQE_BYTE_48_DMAC_5_S, ah->av.mac[5]);
340 /* MAC loopback */
341 smac = (u8 *)hr_dev->dev_addr[qp->port];
342 loopback = ether_addr_equal_unaligned(ah->av.mac, smac) ? 1 : 0;
344 roce_set_bit(ud_sq_wqe->byte_40,
345 V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
347 roce_set_field(ud_sq_wqe->byte_4,
348 V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
349 V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
350 HNS_ROCE_V2_WQE_OP_SEND);
352 ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
354 switch (wr->opcode) {
355 case IB_WR_SEND_WITH_IMM:
356 case IB_WR_RDMA_WRITE_WITH_IMM:
357 ud_sq_wqe->immtdata = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
358 break;
359 default:
360 ud_sq_wqe->immtdata = 0;
361 break;
364 /* Set sig attr */
365 roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_CQE_S,
366 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
368 /* Set se attr */
369 roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_SE_S,
370 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
372 roce_set_bit(ud_sq_wqe->byte_4, V2_UD_SEND_WQE_BYTE_4_OWNER_S,
373 owner_bit);
375 roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_PD_M,
376 V2_UD_SEND_WQE_BYTE_16_PD_S, to_hr_pd(qp->ibqp.pd)->pdn);
378 roce_set_field(ud_sq_wqe->byte_16, V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
379 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
381 roce_set_field(ud_sq_wqe->byte_20,
382 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
383 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
384 curr_idx & (qp->sge.sge_cnt - 1));
386 roce_set_field(ud_sq_wqe->byte_24, V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
387 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
388 ud_sq_wqe->qkey = cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
389 qp->qkey : ud_wr(wr)->remote_qkey);
390 roce_set_field(ud_sq_wqe->byte_32, V2_UD_SEND_WQE_BYTE_32_DQPN_M,
391 V2_UD_SEND_WQE_BYTE_32_DQPN_S, ud_wr(wr)->remote_qpn);
393 roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_VLAN_M,
394 V2_UD_SEND_WQE_BYTE_36_VLAN_S, ah->av.vlan_id);
395 roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
396 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S, ah->av.hop_limit);
397 roce_set_field(ud_sq_wqe->byte_36, V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
398 V2_UD_SEND_WQE_BYTE_36_TCLASS_S, ah->av.tclass);
399 roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
400 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S, ah->av.flowlabel);
401 roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_SL_M,
402 V2_UD_SEND_WQE_BYTE_40_SL_S, ah->av.sl);
403 roce_set_field(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_PORTN_M,
404 V2_UD_SEND_WQE_BYTE_40_PORTN_S, qp->port);
406 roce_set_bit(ud_sq_wqe->byte_40, V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
407 ah->av.vlan_en ? 1 : 0);
408 roce_set_field(ud_sq_wqe->byte_48, V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
409 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S, ah->av.gid_index);
411 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN_V2);
413 set_extend_sge(qp, wr, &curr_idx, valid_num_sge);
415 *sge_idx = curr_idx;
417 return 0;
420 static inline int set_rc_wqe(struct hns_roce_qp *qp,
421 const struct ib_send_wr *wr,
422 void *wqe, unsigned int *sge_idx,
423 unsigned int owner_bit)
425 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
426 unsigned int curr_idx = *sge_idx;
427 int valid_num_sge;
428 u32 msg_len = 0;
429 int ret = 0;
431 valid_num_sge = calc_wr_sge_num(wr, &msg_len);
432 memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
434 rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
436 switch (wr->opcode) {
437 case IB_WR_SEND_WITH_IMM:
438 case IB_WR_RDMA_WRITE_WITH_IMM:
439 rc_sq_wqe->immtdata = cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
440 break;
441 case IB_WR_SEND_WITH_INV:
442 rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
443 break;
444 default:
445 rc_sq_wqe->immtdata = 0;
446 break;
449 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_FENCE_S,
450 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
452 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SE_S,
453 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
455 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_CQE_S,
456 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
458 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OWNER_S,
459 owner_bit);
461 wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
462 switch (wr->opcode) {
463 case IB_WR_RDMA_READ:
464 case IB_WR_RDMA_WRITE:
465 case IB_WR_RDMA_WRITE_WITH_IMM:
466 rc_sq_wqe->rkey = cpu_to_le32(rdma_wr(wr)->rkey);
467 rc_sq_wqe->va = cpu_to_le64(rdma_wr(wr)->remote_addr);
468 break;
469 case IB_WR_LOCAL_INV:
470 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
471 rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey);
472 break;
473 case IB_WR_REG_MR:
474 set_frmr_seg(rc_sq_wqe, wqe, reg_wr(wr));
475 break;
476 case IB_WR_ATOMIC_CMP_AND_SWP:
477 case IB_WR_ATOMIC_FETCH_AND_ADD:
478 rc_sq_wqe->rkey = cpu_to_le32(atomic_wr(wr)->rkey);
479 rc_sq_wqe->va = cpu_to_le64(atomic_wr(wr)->remote_addr);
480 break;
481 default:
482 break;
485 roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
486 V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
487 to_hr_opcode(wr->opcode));
489 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
490 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
491 set_atomic_seg(wr, wqe, rc_sq_wqe, valid_num_sge);
492 else if (wr->opcode != IB_WR_REG_MR)
493 ret = set_rwqe_data_seg(&qp->ibqp, wr, rc_sq_wqe,
494 wqe, &curr_idx, valid_num_sge);
496 *sge_idx = curr_idx;
498 return ret;
501 static inline void update_sq_db(struct hns_roce_dev *hr_dev,
502 struct hns_roce_qp *qp)
505 * Hip08 hardware cannot flush the WQEs in SQ if the QP state
506 * gets into errored mode. Hence, as a workaround to this
507 * hardware limitation, driver needs to assist in flushing. But
508 * the flushing operation uses mailbox to convey the QP state to
509 * the hardware and which can sleep due to the mutex protection
510 * around the mailbox calls. Hence, use the deferred flush for
511 * now.
513 if (qp->state == IB_QPS_ERR) {
514 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
515 init_flush_work(hr_dev, qp);
516 } else {
517 struct hns_roce_v2_db sq_db = {};
519 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
520 V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
521 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
522 V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
523 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
524 V2_DB_PARAMETER_IDX_S,
525 qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
526 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
527 V2_DB_PARAMETER_SL_S, qp->sl);
529 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
533 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
534 const struct ib_send_wr *wr,
535 const struct ib_send_wr **bad_wr)
537 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
538 struct ib_device *ibdev = &hr_dev->ib_dev;
539 struct hns_roce_qp *qp = to_hr_qp(ibqp);
540 unsigned long flags = 0;
541 unsigned int owner_bit;
542 unsigned int sge_idx;
543 unsigned int wqe_idx;
544 void *wqe = NULL;
545 int nreq;
546 int ret;
548 spin_lock_irqsave(&qp->sq.lock, flags);
550 ret = check_send_valid(hr_dev, qp);
551 if (ret) {
552 *bad_wr = wr;
553 nreq = 0;
554 goto out;
557 sge_idx = qp->next_sge;
559 for (nreq = 0; wr; ++nreq, wr = wr->next) {
560 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
561 ret = -ENOMEM;
562 *bad_wr = wr;
563 goto out;
566 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
568 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
569 ibdev_err(ibdev, "num_sge=%d > qp->sq.max_gs=%d\n",
570 wr->num_sge, qp->sq.max_gs);
571 ret = -EINVAL;
572 *bad_wr = wr;
573 goto out;
576 wqe = hns_roce_get_send_wqe(qp, wqe_idx);
577 qp->sq.wrid[wqe_idx] = wr->wr_id;
578 owner_bit =
579 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
581 /* Corresponding to the QP type, wqe process separately */
582 if (ibqp->qp_type == IB_QPT_GSI)
583 ret = set_ud_wqe(qp, wr, wqe, &sge_idx, owner_bit);
584 else if (ibqp->qp_type == IB_QPT_RC)
585 ret = set_rc_wqe(qp, wr, wqe, &sge_idx, owner_bit);
587 if (ret) {
588 *bad_wr = wr;
589 goto out;
593 out:
594 if (likely(nreq)) {
595 qp->sq.head += nreq;
596 qp->next_sge = sge_idx;
597 /* Memory barrier */
598 wmb();
599 update_sq_db(hr_dev, qp);
602 spin_unlock_irqrestore(&qp->sq.lock, flags);
604 return ret;
607 static int check_recv_valid(struct hns_roce_dev *hr_dev,
608 struct hns_roce_qp *hr_qp)
610 if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
611 return -EIO;
612 else if (hr_qp->state == IB_QPS_RESET)
613 return -EINVAL;
615 return 0;
618 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
619 const struct ib_recv_wr *wr,
620 const struct ib_recv_wr **bad_wr)
622 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
623 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
624 struct ib_device *ibdev = &hr_dev->ib_dev;
625 struct hns_roce_v2_wqe_data_seg *dseg;
626 struct hns_roce_rinl_sge *sge_list;
627 unsigned long flags;
628 void *wqe = NULL;
629 u32 wqe_idx;
630 int nreq;
631 int ret;
632 int i;
634 spin_lock_irqsave(&hr_qp->rq.lock, flags);
636 ret = check_recv_valid(hr_dev, hr_qp);
637 if (ret) {
638 *bad_wr = wr;
639 nreq = 0;
640 goto out;
643 for (nreq = 0; wr; ++nreq, wr = wr->next) {
644 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
645 hr_qp->ibqp.recv_cq)) {
646 ret = -ENOMEM;
647 *bad_wr = wr;
648 goto out;
651 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
653 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
654 ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
655 wr->num_sge, hr_qp->rq.max_gs);
656 ret = -EINVAL;
657 *bad_wr = wr;
658 goto out;
661 wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
662 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
663 for (i = 0; i < wr->num_sge; i++) {
664 if (!wr->sg_list[i].length)
665 continue;
666 set_data_seg_v2(dseg, wr->sg_list + i);
667 dseg++;
670 if (i < hr_qp->rq.max_gs) {
671 dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
672 dseg->addr = 0;
675 /* rq support inline data */
676 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
677 sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
678 hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
679 (u32)wr->num_sge;
680 for (i = 0; i < wr->num_sge; i++) {
681 sge_list[i].addr =
682 (void *)(u64)wr->sg_list[i].addr;
683 sge_list[i].len = wr->sg_list[i].length;
687 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
690 out:
691 if (likely(nreq)) {
692 hr_qp->rq.head += nreq;
693 /* Memory barrier */
694 wmb();
697 * Hip08 hardware cannot flush the WQEs in RQ if the QP state
698 * gets into errored mode. Hence, as a workaround to this
699 * hardware limitation, driver needs to assist in flushing. But
700 * the flushing operation uses mailbox to convey the QP state to
701 * the hardware and which can sleep due to the mutex protection
702 * around the mailbox calls. Hence, use the deferred flush for
703 * now.
705 if (hr_qp->state == IB_QPS_ERR) {
706 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG,
707 &hr_qp->flush_flag))
708 init_flush_work(hr_dev, hr_qp);
709 } else {
710 *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
713 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
715 return ret;
718 static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
719 unsigned long instance_stage,
720 unsigned long reset_stage)
722 /* When hardware reset has been completed once or more, we should stop
723 * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
724 * function, we should exit with error. If now at HNAE3_INIT_CLIENT
725 * stage of soft reset process, we should exit with error, and then
726 * HNAE3_INIT_CLIENT related process can rollback the operation like
727 * notifing hardware to free resources, HNAE3_INIT_CLIENT related
728 * process will exit with error to notify NIC driver to reschedule soft
729 * reset process once again.
731 hr_dev->is_reset = true;
732 hr_dev->dis_db = true;
734 if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
735 instance_stage == HNS_ROCE_STATE_INIT)
736 return CMD_RST_PRC_EBUSY;
738 return CMD_RST_PRC_SUCCESS;
741 static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
742 unsigned long instance_stage,
743 unsigned long reset_stage)
745 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
746 struct hnae3_handle *handle = priv->handle;
747 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
749 /* When hardware reset is detected, we should stop sending mailbox&cmq&
750 * doorbell to hardware. If now in .init_instance() function, we should
751 * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
752 * process, we should exit with error, and then HNAE3_INIT_CLIENT
753 * related process can rollback the operation like notifing hardware to
754 * free resources, HNAE3_INIT_CLIENT related process will exit with
755 * error to notify NIC driver to reschedule soft reset process once
756 * again.
758 hr_dev->dis_db = true;
759 if (!ops->get_hw_reset_stat(handle))
760 hr_dev->is_reset = true;
762 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
763 instance_stage == HNS_ROCE_STATE_INIT)
764 return CMD_RST_PRC_EBUSY;
766 return CMD_RST_PRC_SUCCESS;
769 static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
771 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
772 struct hnae3_handle *handle = priv->handle;
773 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
775 /* When software reset is detected at .init_instance() function, we
776 * should stop sending mailbox&cmq&doorbell to hardware, and exit
777 * with error.
779 hr_dev->dis_db = true;
780 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
781 hr_dev->is_reset = true;
783 return CMD_RST_PRC_EBUSY;
786 static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
788 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
789 struct hnae3_handle *handle = priv->handle;
790 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
791 unsigned long instance_stage; /* the current instance stage */
792 unsigned long reset_stage; /* the current reset stage */
793 unsigned long reset_cnt;
794 bool sw_resetting;
795 bool hw_resetting;
797 if (hr_dev->is_reset)
798 return CMD_RST_PRC_SUCCESS;
800 /* Get information about reset from NIC driver or RoCE driver itself,
801 * the meaning of the following variables from NIC driver are described
802 * as below:
803 * reset_cnt -- The count value of completed hardware reset.
804 * hw_resetting -- Whether hardware device is resetting now.
805 * sw_resetting -- Whether NIC's software reset process is running now.
807 instance_stage = handle->rinfo.instance_state;
808 reset_stage = handle->rinfo.reset_state;
809 reset_cnt = ops->ae_dev_reset_cnt(handle);
810 hw_resetting = ops->get_hw_reset_stat(handle);
811 sw_resetting = ops->ae_dev_resetting(handle);
813 if (reset_cnt != hr_dev->reset_cnt)
814 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
815 reset_stage);
816 else if (hw_resetting)
817 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
818 reset_stage);
819 else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
820 return hns_roce_v2_cmd_sw_resetting(hr_dev);
822 return 0;
825 static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
827 int ntu = ring->next_to_use;
828 int ntc = ring->next_to_clean;
829 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
831 return ring->desc_num - used - 1;
834 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
835 struct hns_roce_v2_cmq_ring *ring)
837 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
839 ring->desc = kzalloc(size, GFP_KERNEL);
840 if (!ring->desc)
841 return -ENOMEM;
843 ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
844 DMA_BIDIRECTIONAL);
845 if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
846 ring->desc_dma_addr = 0;
847 kfree(ring->desc);
848 ring->desc = NULL;
849 return -ENOMEM;
852 return 0;
855 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
856 struct hns_roce_v2_cmq_ring *ring)
858 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
859 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
860 DMA_BIDIRECTIONAL);
862 ring->desc_dma_addr = 0;
863 kfree(ring->desc);
866 static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
868 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
869 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
870 &priv->cmq.csq : &priv->cmq.crq;
872 ring->flag = ring_type;
873 ring->next_to_clean = 0;
874 ring->next_to_use = 0;
876 return hns_roce_alloc_cmq_desc(hr_dev, ring);
879 static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
881 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
882 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
883 &priv->cmq.csq : &priv->cmq.crq;
884 dma_addr_t dma = ring->desc_dma_addr;
886 if (ring_type == TYPE_CSQ) {
887 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
888 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
889 upper_32_bits(dma));
890 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
891 ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
892 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
893 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
894 } else {
895 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
896 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
897 upper_32_bits(dma));
898 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
899 ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
900 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
901 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
905 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
907 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
908 int ret;
910 /* Setup the queue entries for command queue */
911 priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
912 priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
914 /* Setup the lock for command queue */
915 spin_lock_init(&priv->cmq.csq.lock);
916 spin_lock_init(&priv->cmq.crq.lock);
918 /* Setup Tx write back timeout */
919 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
921 /* Init CSQ */
922 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
923 if (ret) {
924 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
925 return ret;
928 /* Init CRQ */
929 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
930 if (ret) {
931 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
932 goto err_crq;
935 /* Init CSQ REG */
936 hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
938 /* Init CRQ REG */
939 hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
941 return 0;
943 err_crq:
944 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
946 return ret;
949 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
951 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
953 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
954 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
957 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
958 enum hns_roce_opcode_type opcode,
959 bool is_read)
961 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
962 desc->opcode = cpu_to_le16(opcode);
963 desc->flag =
964 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
965 if (is_read)
966 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
967 else
968 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
971 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
973 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
974 u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
976 return head == priv->cmq.csq.next_to_use;
979 static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
981 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
982 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
983 struct hns_roce_cmq_desc *desc;
984 u16 ntc = csq->next_to_clean;
985 u32 head;
986 int clean = 0;
988 desc = &csq->desc[ntc];
989 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
990 while (head != ntc) {
991 memset(desc, 0, sizeof(*desc));
992 ntc++;
993 if (ntc == csq->desc_num)
994 ntc = 0;
995 desc = &csq->desc[ntc];
996 clean++;
998 csq->next_to_clean = ntc;
1000 return clean;
1003 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1004 struct hns_roce_cmq_desc *desc, int num)
1006 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1007 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1008 struct hns_roce_cmq_desc *desc_to_use;
1009 bool complete = false;
1010 u32 timeout = 0;
1011 int handle = 0;
1012 u16 desc_ret;
1013 int ret = 0;
1014 int ntc;
1016 spin_lock_bh(&csq->lock);
1018 if (num > hns_roce_cmq_space(csq)) {
1019 spin_unlock_bh(&csq->lock);
1020 return -EBUSY;
1024 * Record the location of desc in the cmq for this time
1025 * which will be use for hardware to write back
1027 ntc = csq->next_to_use;
1029 while (handle < num) {
1030 desc_to_use = &csq->desc[csq->next_to_use];
1031 *desc_to_use = desc[handle];
1032 dev_dbg(hr_dev->dev, "set cmq desc:\n");
1033 csq->next_to_use++;
1034 if (csq->next_to_use == csq->desc_num)
1035 csq->next_to_use = 0;
1036 handle++;
1039 /* Write to hardware */
1040 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
1043 * If the command is sync, wait for the firmware to write back,
1044 * if multi descriptors to be sent, use the first one to check
1046 if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
1047 do {
1048 if (hns_roce_cmq_csq_done(hr_dev))
1049 break;
1050 udelay(1);
1051 timeout++;
1052 } while (timeout < priv->cmq.tx_timeout);
1055 if (hns_roce_cmq_csq_done(hr_dev)) {
1056 complete = true;
1057 handle = 0;
1058 while (handle < num) {
1059 /* get the result of hardware write back */
1060 desc_to_use = &csq->desc[ntc];
1061 desc[handle] = *desc_to_use;
1062 dev_dbg(hr_dev->dev, "Get cmq desc:\n");
1063 desc_ret = le16_to_cpu(desc[handle].retval);
1064 if (desc_ret == CMD_EXEC_SUCCESS)
1065 ret = 0;
1066 else
1067 ret = -EIO;
1068 priv->cmq.last_status = desc_ret;
1069 ntc++;
1070 handle++;
1071 if (ntc == csq->desc_num)
1072 ntc = 0;
1076 if (!complete)
1077 ret = -EAGAIN;
1079 /* clean the command send queue */
1080 handle = hns_roce_cmq_csq_clean(hr_dev);
1081 if (handle != num)
1082 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
1083 handle, num);
1085 spin_unlock_bh(&csq->lock);
1087 return ret;
1090 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1091 struct hns_roce_cmq_desc *desc, int num)
1093 int retval;
1094 int ret;
1096 ret = hns_roce_v2_rst_process_cmd(hr_dev);
1097 if (ret == CMD_RST_PRC_SUCCESS)
1098 return 0;
1099 if (ret == CMD_RST_PRC_EBUSY)
1100 return -EBUSY;
1102 ret = __hns_roce_cmq_send(hr_dev, desc, num);
1103 if (ret) {
1104 retval = hns_roce_v2_rst_process_cmd(hr_dev);
1105 if (retval == CMD_RST_PRC_SUCCESS)
1106 return 0;
1107 else if (retval == CMD_RST_PRC_EBUSY)
1108 return -EBUSY;
1111 return ret;
1114 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1116 struct hns_roce_query_version *resp;
1117 struct hns_roce_cmq_desc desc;
1118 int ret;
1120 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1121 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1122 if (ret)
1123 return ret;
1125 resp = (struct hns_roce_query_version *)desc.data;
1126 hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1127 hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1129 return 0;
1132 static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
1134 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1135 struct hnae3_handle *handle = priv->handle;
1136 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1137 unsigned long reset_cnt;
1138 bool sw_resetting;
1139 bool hw_resetting;
1141 reset_cnt = ops->ae_dev_reset_cnt(handle);
1142 hw_resetting = ops->get_hw_reset_stat(handle);
1143 sw_resetting = ops->ae_dev_resetting(handle);
1145 if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting)
1146 return true;
1148 return false;
1151 static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
1152 int flag)
1154 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1155 struct hnae3_handle *handle = priv->handle;
1156 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1157 unsigned long instance_stage;
1158 unsigned long reset_cnt;
1159 unsigned long end;
1160 bool sw_resetting;
1161 bool hw_resetting;
1163 instance_stage = handle->rinfo.instance_state;
1164 reset_cnt = ops->ae_dev_reset_cnt(handle);
1165 hw_resetting = ops->get_hw_reset_stat(handle);
1166 sw_resetting = ops->ae_dev_resetting(handle);
1168 if (reset_cnt != hr_dev->reset_cnt) {
1169 hr_dev->dis_db = true;
1170 hr_dev->is_reset = true;
1171 dev_info(hr_dev->dev, "Func clear success after reset.\n");
1172 } else if (hw_resetting) {
1173 hr_dev->dis_db = true;
1175 dev_warn(hr_dev->dev,
1176 "Func clear is pending, device in resetting state.\n");
1177 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1178 while (end) {
1179 if (!ops->get_hw_reset_stat(handle)) {
1180 hr_dev->is_reset = true;
1181 dev_info(hr_dev->dev,
1182 "Func clear success after reset.\n");
1183 return;
1185 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1186 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1189 dev_warn(hr_dev->dev, "Func clear failed.\n");
1190 } else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) {
1191 hr_dev->dis_db = true;
1193 dev_warn(hr_dev->dev,
1194 "Func clear is pending, device in resetting state.\n");
1195 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1196 while (end) {
1197 if (ops->ae_dev_reset_cnt(handle) !=
1198 hr_dev->reset_cnt) {
1199 hr_dev->is_reset = true;
1200 dev_info(hr_dev->dev,
1201 "Func clear success after sw reset\n");
1202 return;
1204 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1205 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1208 dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
1209 } else {
1210 if (retval && !flag)
1211 dev_warn(hr_dev->dev,
1212 "Func clear read failed, ret = %d.\n", retval);
1214 dev_warn(hr_dev->dev, "Func clear failed.\n");
1217 static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1219 bool fclr_write_fail_flag = false;
1220 struct hns_roce_func_clear *resp;
1221 struct hns_roce_cmq_desc desc;
1222 unsigned long end;
1223 int ret = 0;
1225 if (hns_roce_func_clr_chk_rst(hr_dev))
1226 goto out;
1228 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1229 resp = (struct hns_roce_func_clear *)desc.data;
1231 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1232 if (ret) {
1233 fclr_write_fail_flag = true;
1234 dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1235 ret);
1236 goto out;
1239 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1240 end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1241 while (end) {
1242 if (hns_roce_func_clr_chk_rst(hr_dev))
1243 goto out;
1244 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1245 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1247 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1248 true);
1250 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1251 if (ret)
1252 continue;
1254 if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
1255 hr_dev->is_reset = true;
1256 return;
1260 out:
1261 hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
1264 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1266 struct hns_roce_query_fw_info *resp;
1267 struct hns_roce_cmq_desc desc;
1268 int ret;
1270 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1271 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1272 if (ret)
1273 return ret;
1275 resp = (struct hns_roce_query_fw_info *)desc.data;
1276 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1278 return 0;
1281 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1283 struct hns_roce_cfg_global_param *req;
1284 struct hns_roce_cmq_desc desc;
1286 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1287 false);
1289 req = (struct hns_roce_cfg_global_param *)desc.data;
1290 memset(req, 0, sizeof(*req));
1291 roce_set_field(req->time_cfg_udp_port,
1292 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
1293 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
1294 roce_set_field(req->time_cfg_udp_port,
1295 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
1296 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
1298 return hns_roce_cmq_send(hr_dev, &desc, 1);
1301 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1303 struct hns_roce_cmq_desc desc[2];
1304 struct hns_roce_pf_res_a *req_a;
1305 struct hns_roce_pf_res_b *req_b;
1306 int ret;
1307 int i;
1309 for (i = 0; i < 2; i++) {
1310 hns_roce_cmq_setup_basic_desc(&desc[i],
1311 HNS_ROCE_OPC_QUERY_PF_RES, true);
1313 if (i == 0)
1314 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1315 else
1316 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1319 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1320 if (ret)
1321 return ret;
1323 req_a = (struct hns_roce_pf_res_a *)desc[0].data;
1324 req_b = (struct hns_roce_pf_res_b *)desc[1].data;
1326 hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
1327 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
1328 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
1329 hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
1330 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
1331 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
1332 hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
1333 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
1334 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
1335 hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
1336 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
1337 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
1339 hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
1340 PF_RES_DATA_3_PF_SL_NUM_M,
1341 PF_RES_DATA_3_PF_SL_NUM_S);
1342 hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
1343 PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
1344 PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
1346 return 0;
1349 static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
1351 struct hns_roce_pf_timer_res_a *req_a;
1352 struct hns_roce_cmq_desc desc;
1353 int ret;
1355 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1356 true);
1358 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1359 if (ret)
1360 return ret;
1362 req_a = (struct hns_roce_pf_timer_res_a *)desc.data;
1364 hr_dev->caps.qpc_timer_bt_num =
1365 roce_get_field(req_a->qpc_timer_bt_idx_num,
1366 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
1367 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
1368 hr_dev->caps.cqc_timer_bt_num =
1369 roce_get_field(req_a->cqc_timer_bt_idx_num,
1370 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
1371 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
1373 return 0;
1376 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev, int vf_id)
1378 struct hns_roce_cmq_desc desc;
1379 struct hns_roce_vf_switch *swt;
1380 int ret;
1382 swt = (struct hns_roce_vf_switch *)desc.data;
1383 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1384 swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1385 roce_set_field(swt->fun_id, VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1386 VF_SWITCH_DATA_FUN_ID_VF_ID_S, vf_id);
1387 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1388 if (ret)
1389 return ret;
1391 desc.flag =
1392 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1393 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1394 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1395 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
1396 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1398 return hns_roce_cmq_send(hr_dev, &desc, 1);
1401 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1403 struct hns_roce_cmq_desc desc[2];
1404 struct hns_roce_vf_res_a *req_a;
1405 struct hns_roce_vf_res_b *req_b;
1406 int i;
1408 req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1409 req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1410 memset(req_a, 0, sizeof(*req_a));
1411 memset(req_b, 0, sizeof(*req_b));
1412 for (i = 0; i < 2; i++) {
1413 hns_roce_cmq_setup_basic_desc(&desc[i],
1414 HNS_ROCE_OPC_ALLOC_VF_RES, false);
1416 if (i == 0)
1417 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1418 else
1419 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1422 roce_set_field(req_a->vf_qpc_bt_idx_num,
1423 VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1424 VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1425 roce_set_field(req_a->vf_qpc_bt_idx_num,
1426 VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1427 VF_RES_A_DATA_1_VF_QPC_BT_NUM_S, HNS_ROCE_VF_QPC_BT_NUM);
1429 roce_set_field(req_a->vf_srqc_bt_idx_num,
1430 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1431 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1432 roce_set_field(req_a->vf_srqc_bt_idx_num,
1433 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1434 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1435 HNS_ROCE_VF_SRQC_BT_NUM);
1437 roce_set_field(req_a->vf_cqc_bt_idx_num,
1438 VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1439 VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1440 roce_set_field(req_a->vf_cqc_bt_idx_num,
1441 VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1442 VF_RES_A_DATA_3_VF_CQC_BT_NUM_S, HNS_ROCE_VF_CQC_BT_NUM);
1444 roce_set_field(req_a->vf_mpt_bt_idx_num,
1445 VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1446 VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1447 roce_set_field(req_a->vf_mpt_bt_idx_num,
1448 VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1449 VF_RES_A_DATA_4_VF_MPT_BT_NUM_S, HNS_ROCE_VF_MPT_BT_NUM);
1451 roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_IDX_M,
1452 VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1453 roce_set_field(req_a->vf_eqc_bt_idx_num, VF_RES_A_DATA_5_VF_EQC_NUM_M,
1454 VF_RES_A_DATA_5_VF_EQC_NUM_S, HNS_ROCE_VF_EQC_NUM);
1456 roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1457 VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1458 roce_set_field(req_b->vf_smac_idx_num, VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1459 VF_RES_B_DATA_1_VF_SMAC_NUM_S, HNS_ROCE_VF_SMAC_NUM);
1461 roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_IDX_M,
1462 VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1463 roce_set_field(req_b->vf_sgid_idx_num, VF_RES_B_DATA_2_VF_SGID_NUM_M,
1464 VF_RES_B_DATA_2_VF_SGID_NUM_S, HNS_ROCE_VF_SGID_NUM);
1466 roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_QID_IDX_M,
1467 VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1468 roce_set_field(req_b->vf_qid_idx_sl_num, VF_RES_B_DATA_3_VF_SL_NUM_M,
1469 VF_RES_B_DATA_3_VF_SL_NUM_S, HNS_ROCE_VF_SL_NUM);
1471 roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
1472 VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
1473 roce_set_field(req_b->vf_sccc_idx_num, VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
1474 VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
1475 HNS_ROCE_VF_SCCC_BT_NUM);
1477 return hns_roce_cmq_send(hr_dev, desc, 2);
1480 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1482 u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1483 u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1484 u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1485 u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1486 u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
1487 struct hns_roce_cfg_bt_attr *req;
1488 struct hns_roce_cmq_desc desc;
1490 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1491 req = (struct hns_roce_cfg_bt_attr *)desc.data;
1492 memset(req, 0, sizeof(*req));
1494 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1495 CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1496 hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1497 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1498 CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1499 hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1500 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1501 CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1502 qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1504 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1505 CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1506 hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1507 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1508 CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1509 hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1510 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1511 CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1512 srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1514 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1515 CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1516 hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1517 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1518 CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1519 hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1520 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1521 CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1522 cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1524 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1525 CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1526 hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1527 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1528 CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1529 hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1530 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1531 CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1532 mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1534 roce_set_field(req->vf_sccc_cfg,
1535 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
1536 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
1537 hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1538 roce_set_field(req->vf_sccc_cfg,
1539 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
1540 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
1541 hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1542 roce_set_field(req->vf_sccc_cfg,
1543 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
1544 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
1545 sccc_hop_num ==
1546 HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
1548 return hns_roce_cmq_send(hr_dev, &desc, 1);
1551 static void set_default_caps(struct hns_roce_dev *hr_dev)
1553 struct hns_roce_caps *caps = &hr_dev->caps;
1555 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1556 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1557 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1558 caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
1559 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1560 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1561 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1562 caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1563 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1564 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
1565 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1566 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
1567 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1568 caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
1569 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1570 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1571 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1572 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
1573 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1574 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
1575 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1576 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1577 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1578 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1579 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1580 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1581 caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
1582 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1583 caps->trrl_entry_sz = HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ;
1584 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1585 caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1586 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1587 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
1588 caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ;
1589 caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
1590 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1591 caps->reserved_lkey = 0;
1592 caps->reserved_pds = 0;
1593 caps->reserved_mrws = 1;
1594 caps->reserved_uars = 0;
1595 caps->reserved_cqs = 0;
1596 caps->reserved_srqs = 0;
1597 caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
1599 caps->qpc_ba_pg_sz = 0;
1600 caps->qpc_buf_pg_sz = 0;
1601 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1602 caps->srqc_ba_pg_sz = 0;
1603 caps->srqc_buf_pg_sz = 0;
1604 caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1605 caps->cqc_ba_pg_sz = 0;
1606 caps->cqc_buf_pg_sz = 0;
1607 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1608 caps->mpt_ba_pg_sz = 0;
1609 caps->mpt_buf_pg_sz = 0;
1610 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1611 caps->mtt_ba_pg_sz = 0;
1612 caps->mtt_buf_pg_sz = 0;
1613 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
1614 caps->wqe_sq_hop_num = HNS_ROCE_SQWQE_HOP_NUM;
1615 caps->wqe_sge_hop_num = HNS_ROCE_EXT_SGE_HOP_NUM;
1616 caps->wqe_rq_hop_num = HNS_ROCE_RQWQE_HOP_NUM;
1617 caps->cqe_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_256K;
1618 caps->cqe_buf_pg_sz = 0;
1619 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
1620 caps->srqwqe_ba_pg_sz = 0;
1621 caps->srqwqe_buf_pg_sz = 0;
1622 caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
1623 caps->idx_ba_pg_sz = 0;
1624 caps->idx_buf_pg_sz = 0;
1625 caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
1626 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1628 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
1629 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1630 HNS_ROCE_CAP_FLAG_RQ_INLINE |
1631 HNS_ROCE_CAP_FLAG_RECORD_DB |
1632 HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
1634 caps->pkey_table_len[0] = 1;
1635 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
1636 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
1637 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
1638 caps->local_ca_ack_delay = 0;
1639 caps->max_mtu = IB_MTU_4096;
1641 caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
1642 caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
1644 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) {
1645 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC | HNS_ROCE_CAP_FLAG_MW |
1646 HNS_ROCE_CAP_FLAG_SRQ | HNS_ROCE_CAP_FLAG_FRMR |
1647 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
1649 caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1650 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1651 caps->qpc_timer_ba_pg_sz = 0;
1652 caps->qpc_timer_buf_pg_sz = 0;
1653 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1654 caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1655 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1656 caps->cqc_timer_ba_pg_sz = 0;
1657 caps->cqc_timer_buf_pg_sz = 0;
1658 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1660 caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
1661 caps->sccc_ba_pg_sz = 0;
1662 caps->sccc_buf_pg_sz = 0;
1663 caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
1667 static void calc_pg_sz(int obj_num, int obj_size, int hop_num, int ctx_bt_num,
1668 int *buf_page_size, int *bt_page_size, u32 hem_type)
1670 u64 obj_per_chunk;
1671 int bt_chunk_size = 1 << PAGE_SHIFT;
1672 int buf_chunk_size = 1 << PAGE_SHIFT;
1673 int obj_per_chunk_default = buf_chunk_size / obj_size;
1675 *buf_page_size = 0;
1676 *bt_page_size = 0;
1678 switch (hop_num) {
1679 case 3:
1680 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1681 (bt_chunk_size / BA_BYTE_LEN) *
1682 (bt_chunk_size / BA_BYTE_LEN) *
1683 obj_per_chunk_default;
1684 break;
1685 case 2:
1686 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1687 (bt_chunk_size / BA_BYTE_LEN) *
1688 obj_per_chunk_default;
1689 break;
1690 case 1:
1691 obj_per_chunk = ctx_bt_num * (bt_chunk_size / BA_BYTE_LEN) *
1692 obj_per_chunk_default;
1693 break;
1694 case HNS_ROCE_HOP_NUM_0:
1695 obj_per_chunk = ctx_bt_num * obj_per_chunk_default;
1696 break;
1697 default:
1698 pr_err("Table %d not support hop_num = %d!\n", hem_type,
1699 hop_num);
1700 return;
1703 if (hem_type >= HEM_TYPE_MTT)
1704 *bt_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
1705 else
1706 *buf_page_size = ilog2(DIV_ROUND_UP(obj_num, obj_per_chunk));
1709 static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
1711 struct hns_roce_cmq_desc desc[HNS_ROCE_QUERY_PF_CAPS_CMD_NUM];
1712 struct hns_roce_caps *caps = &hr_dev->caps;
1713 struct hns_roce_query_pf_caps_a *resp_a;
1714 struct hns_roce_query_pf_caps_b *resp_b;
1715 struct hns_roce_query_pf_caps_c *resp_c;
1716 struct hns_roce_query_pf_caps_d *resp_d;
1717 struct hns_roce_query_pf_caps_e *resp_e;
1718 int ctx_hop_num;
1719 int pbl_hop_num;
1720 int ret;
1721 int i;
1723 for (i = 0; i < HNS_ROCE_QUERY_PF_CAPS_CMD_NUM; i++) {
1724 hns_roce_cmq_setup_basic_desc(&desc[i],
1725 HNS_ROCE_OPC_QUERY_PF_CAPS_NUM,
1726 true);
1727 if (i < (HNS_ROCE_QUERY_PF_CAPS_CMD_NUM - 1))
1728 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1729 else
1730 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1733 ret = hns_roce_cmq_send(hr_dev, desc, HNS_ROCE_QUERY_PF_CAPS_CMD_NUM);
1734 if (ret)
1735 return ret;
1737 resp_a = (struct hns_roce_query_pf_caps_a *)desc[0].data;
1738 resp_b = (struct hns_roce_query_pf_caps_b *)desc[1].data;
1739 resp_c = (struct hns_roce_query_pf_caps_c *)desc[2].data;
1740 resp_d = (struct hns_roce_query_pf_caps_d *)desc[3].data;
1741 resp_e = (struct hns_roce_query_pf_caps_e *)desc[4].data;
1743 caps->local_ca_ack_delay = resp_a->local_ca_ack_delay;
1744 caps->max_sq_sg = le16_to_cpu(resp_a->max_sq_sg);
1745 caps->max_sq_inline = le16_to_cpu(resp_a->max_sq_inline);
1746 caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
1747 caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
1748 caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
1749 caps->num_cqc_timer = le16_to_cpu(resp_a->num_cqc_timer);
1750 caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
1751 caps->num_aeq_vectors = resp_a->num_aeq_vectors;
1752 caps->num_other_vectors = resp_a->num_other_vectors;
1753 caps->max_sq_desc_sz = resp_a->max_sq_desc_sz;
1754 caps->max_rq_desc_sz = resp_a->max_rq_desc_sz;
1755 caps->max_srq_desc_sz = resp_a->max_srq_desc_sz;
1756 caps->cq_entry_sz = resp_a->cq_entry_sz;
1758 caps->mtpt_entry_sz = resp_b->mtpt_entry_sz;
1759 caps->irrl_entry_sz = resp_b->irrl_entry_sz;
1760 caps->trrl_entry_sz = resp_b->trrl_entry_sz;
1761 caps->cqc_entry_sz = resp_b->cqc_entry_sz;
1762 caps->srqc_entry_sz = resp_b->srqc_entry_sz;
1763 caps->idx_entry_sz = resp_b->idx_entry_sz;
1764 caps->sccc_entry_sz = resp_b->scc_ctx_entry_sz;
1765 caps->max_mtu = resp_b->max_mtu;
1766 caps->qpc_entry_sz = le16_to_cpu(resp_b->qpc_entry_sz);
1767 caps->min_cqes = resp_b->min_cqes;
1768 caps->min_wqes = resp_b->min_wqes;
1769 caps->page_size_cap = le32_to_cpu(resp_b->page_size_cap);
1770 caps->pkey_table_len[0] = resp_b->pkey_table_len;
1771 caps->phy_num_uars = resp_b->phy_num_uars;
1772 ctx_hop_num = resp_b->ctx_hop_num;
1773 pbl_hop_num = resp_b->pbl_hop_num;
1775 caps->num_pds = 1 << roce_get_field(resp_c->cap_flags_num_pds,
1776 V2_QUERY_PF_CAPS_C_NUM_PDS_M,
1777 V2_QUERY_PF_CAPS_C_NUM_PDS_S);
1778 caps->flags = roce_get_field(resp_c->cap_flags_num_pds,
1779 V2_QUERY_PF_CAPS_C_CAP_FLAGS_M,
1780 V2_QUERY_PF_CAPS_C_CAP_FLAGS_S);
1781 caps->num_cqs = 1 << roce_get_field(resp_c->max_gid_num_cqs,
1782 V2_QUERY_PF_CAPS_C_NUM_CQS_M,
1783 V2_QUERY_PF_CAPS_C_NUM_CQS_S);
1784 caps->gid_table_len[0] = roce_get_field(resp_c->max_gid_num_cqs,
1785 V2_QUERY_PF_CAPS_C_MAX_GID_M,
1786 V2_QUERY_PF_CAPS_C_MAX_GID_S);
1787 caps->max_cqes = 1 << roce_get_field(resp_c->cq_depth,
1788 V2_QUERY_PF_CAPS_C_CQ_DEPTH_M,
1789 V2_QUERY_PF_CAPS_C_CQ_DEPTH_S);
1790 caps->num_mtpts = 1 << roce_get_field(resp_c->num_mrws,
1791 V2_QUERY_PF_CAPS_C_NUM_MRWS_M,
1792 V2_QUERY_PF_CAPS_C_NUM_MRWS_S);
1793 caps->num_qps = 1 << roce_get_field(resp_c->ord_num_qps,
1794 V2_QUERY_PF_CAPS_C_NUM_QPS_M,
1795 V2_QUERY_PF_CAPS_C_NUM_QPS_S);
1796 caps->max_qp_init_rdma = roce_get_field(resp_c->ord_num_qps,
1797 V2_QUERY_PF_CAPS_C_MAX_ORD_M,
1798 V2_QUERY_PF_CAPS_C_MAX_ORD_S);
1799 caps->max_qp_dest_rdma = caps->max_qp_init_rdma;
1800 caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
1801 caps->num_srqs = 1 << roce_get_field(resp_d->wq_hop_num_max_srqs,
1802 V2_QUERY_PF_CAPS_D_NUM_SRQS_M,
1803 V2_QUERY_PF_CAPS_D_NUM_SRQS_S);
1804 caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
1805 caps->ceqe_depth = 1 << roce_get_field(resp_d->num_ceqs_ceq_depth,
1806 V2_QUERY_PF_CAPS_D_CEQ_DEPTH_M,
1807 V2_QUERY_PF_CAPS_D_CEQ_DEPTH_S);
1808 caps->num_comp_vectors = roce_get_field(resp_d->num_ceqs_ceq_depth,
1809 V2_QUERY_PF_CAPS_D_NUM_CEQS_M,
1810 V2_QUERY_PF_CAPS_D_NUM_CEQS_S);
1811 caps->aeqe_depth = 1 << roce_get_field(resp_d->arm_st_aeq_depth,
1812 V2_QUERY_PF_CAPS_D_AEQ_DEPTH_M,
1813 V2_QUERY_PF_CAPS_D_AEQ_DEPTH_S);
1814 caps->default_aeq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
1815 V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_M,
1816 V2_QUERY_PF_CAPS_D_AEQ_ARM_ST_S);
1817 caps->default_ceq_arm_st = roce_get_field(resp_d->arm_st_aeq_depth,
1818 V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_M,
1819 V2_QUERY_PF_CAPS_D_CEQ_ARM_ST_S);
1820 caps->reserved_pds = roce_get_field(resp_d->num_uars_rsv_pds,
1821 V2_QUERY_PF_CAPS_D_RSV_PDS_M,
1822 V2_QUERY_PF_CAPS_D_RSV_PDS_S);
1823 caps->num_uars = 1 << roce_get_field(resp_d->num_uars_rsv_pds,
1824 V2_QUERY_PF_CAPS_D_NUM_UARS_M,
1825 V2_QUERY_PF_CAPS_D_NUM_UARS_S);
1826 caps->reserved_qps = roce_get_field(resp_d->rsv_uars_rsv_qps,
1827 V2_QUERY_PF_CAPS_D_RSV_QPS_M,
1828 V2_QUERY_PF_CAPS_D_RSV_QPS_S);
1829 caps->reserved_uars = roce_get_field(resp_d->rsv_uars_rsv_qps,
1830 V2_QUERY_PF_CAPS_D_RSV_UARS_M,
1831 V2_QUERY_PF_CAPS_D_RSV_UARS_S);
1832 caps->reserved_mrws = roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
1833 V2_QUERY_PF_CAPS_E_RSV_MRWS_M,
1834 V2_QUERY_PF_CAPS_E_RSV_MRWS_S);
1835 caps->chunk_sz = 1 << roce_get_field(resp_e->chunk_size_shift_rsv_mrws,
1836 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_M,
1837 V2_QUERY_PF_CAPS_E_CHUNK_SIZE_SHIFT_S);
1838 caps->reserved_cqs = roce_get_field(resp_e->rsv_cqs,
1839 V2_QUERY_PF_CAPS_E_RSV_CQS_M,
1840 V2_QUERY_PF_CAPS_E_RSV_CQS_S);
1841 caps->reserved_srqs = roce_get_field(resp_e->rsv_srqs,
1842 V2_QUERY_PF_CAPS_E_RSV_SRQS_M,
1843 V2_QUERY_PF_CAPS_E_RSV_SRQS_S);
1844 caps->reserved_lkey = roce_get_field(resp_e->rsv_lkey,
1845 V2_QUERY_PF_CAPS_E_RSV_LKEYS_M,
1846 V2_QUERY_PF_CAPS_E_RSV_LKEYS_S);
1847 caps->default_ceq_max_cnt = le16_to_cpu(resp_e->ceq_max_cnt);
1848 caps->default_ceq_period = le16_to_cpu(resp_e->ceq_period);
1849 caps->default_aeq_max_cnt = le16_to_cpu(resp_e->aeq_max_cnt);
1850 caps->default_aeq_period = le16_to_cpu(resp_e->aeq_period);
1852 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1853 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1854 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
1855 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1856 caps->mtt_ba_pg_sz = 0;
1857 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
1858 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1859 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
1861 caps->qpc_hop_num = ctx_hop_num;
1862 caps->srqc_hop_num = ctx_hop_num;
1863 caps->cqc_hop_num = ctx_hop_num;
1864 caps->mpt_hop_num = ctx_hop_num;
1865 caps->mtt_hop_num = pbl_hop_num;
1866 caps->cqe_hop_num = pbl_hop_num;
1867 caps->srqwqe_hop_num = pbl_hop_num;
1868 caps->idx_hop_num = pbl_hop_num;
1869 caps->wqe_sq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
1870 V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_M,
1871 V2_QUERY_PF_CAPS_D_SQWQE_HOP_NUM_S);
1872 caps->wqe_sge_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
1873 V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_M,
1874 V2_QUERY_PF_CAPS_D_EX_SGE_HOP_NUM_S);
1875 caps->wqe_rq_hop_num = roce_get_field(resp_d->wq_hop_num_max_srqs,
1876 V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_M,
1877 V2_QUERY_PF_CAPS_D_RQWQE_HOP_NUM_S);
1879 calc_pg_sz(caps->num_qps, caps->qpc_entry_sz, caps->qpc_hop_num,
1880 caps->qpc_bt_num, &caps->qpc_buf_pg_sz, &caps->qpc_ba_pg_sz,
1881 HEM_TYPE_QPC);
1882 calc_pg_sz(caps->num_mtpts, caps->mtpt_entry_sz, caps->mpt_hop_num,
1883 caps->mpt_bt_num, &caps->mpt_buf_pg_sz, &caps->mpt_ba_pg_sz,
1884 HEM_TYPE_MTPT);
1885 calc_pg_sz(caps->num_cqs, caps->cqc_entry_sz, caps->cqc_hop_num,
1886 caps->cqc_bt_num, &caps->cqc_buf_pg_sz, &caps->cqc_ba_pg_sz,
1887 HEM_TYPE_CQC);
1888 calc_pg_sz(caps->num_srqs, caps->srqc_entry_sz, caps->srqc_hop_num,
1889 caps->srqc_bt_num, &caps->srqc_buf_pg_sz,
1890 &caps->srqc_ba_pg_sz, HEM_TYPE_SRQC);
1892 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) {
1893 caps->sccc_hop_num = ctx_hop_num;
1894 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1895 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1897 calc_pg_sz(caps->num_qps, caps->sccc_entry_sz,
1898 caps->sccc_hop_num, caps->sccc_bt_num,
1899 &caps->sccc_buf_pg_sz, &caps->sccc_ba_pg_sz,
1900 HEM_TYPE_SCCC);
1901 calc_pg_sz(caps->num_cqc_timer, caps->cqc_timer_entry_sz,
1902 caps->cqc_timer_hop_num, caps->cqc_timer_bt_num,
1903 &caps->cqc_timer_buf_pg_sz,
1904 &caps->cqc_timer_ba_pg_sz, HEM_TYPE_CQC_TIMER);
1907 calc_pg_sz(caps->num_cqe_segs, caps->mtt_entry_sz, caps->cqe_hop_num,
1908 1, &caps->cqe_buf_pg_sz, &caps->cqe_ba_pg_sz, HEM_TYPE_CQE);
1909 calc_pg_sz(caps->num_srqwqe_segs, caps->mtt_entry_sz,
1910 caps->srqwqe_hop_num, 1, &caps->srqwqe_buf_pg_sz,
1911 &caps->srqwqe_ba_pg_sz, HEM_TYPE_SRQWQE);
1912 calc_pg_sz(caps->num_idx_segs, caps->idx_entry_sz, caps->idx_hop_num,
1913 1, &caps->idx_buf_pg_sz, &caps->idx_ba_pg_sz, HEM_TYPE_IDX);
1915 return 0;
1918 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1920 struct hns_roce_caps *caps = &hr_dev->caps;
1921 int ret;
1923 ret = hns_roce_cmq_query_hw_info(hr_dev);
1924 if (ret) {
1925 dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
1926 ret);
1927 return ret;
1930 ret = hns_roce_query_fw_ver(hr_dev);
1931 if (ret) {
1932 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1933 ret);
1934 return ret;
1937 ret = hns_roce_config_global_param(hr_dev);
1938 if (ret) {
1939 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1940 ret);
1941 return ret;
1944 /* Get pf resource owned by every pf */
1945 ret = hns_roce_query_pf_resource(hr_dev);
1946 if (ret) {
1947 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1948 ret);
1949 return ret;
1952 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B) {
1953 ret = hns_roce_query_pf_timer_resource(hr_dev);
1954 if (ret) {
1955 dev_err(hr_dev->dev,
1956 "Query pf timer resource fail, ret = %d.\n",
1957 ret);
1958 return ret;
1961 ret = hns_roce_set_vf_switch_param(hr_dev, 0);
1962 if (ret) {
1963 dev_err(hr_dev->dev,
1964 "Set function switch param fail, ret = %d.\n",
1965 ret);
1966 return ret;
1970 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
1971 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
1973 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1974 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
1975 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1976 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
1978 caps->pbl_ba_pg_sz = HNS_ROCE_BA_PG_SZ_SUPPORTED_16K;
1979 caps->pbl_buf_pg_sz = 0;
1980 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
1981 caps->eqe_ba_pg_sz = 0;
1982 caps->eqe_buf_pg_sz = 0;
1983 caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
1984 caps->tsq_buf_pg_sz = 0;
1986 ret = hns_roce_query_pf_caps(hr_dev);
1987 if (ret)
1988 set_default_caps(hr_dev);
1990 ret = hns_roce_alloc_vf_resource(hr_dev);
1991 if (ret) {
1992 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
1993 ret);
1994 return ret;
1997 ret = hns_roce_v2_set_bt(hr_dev);
1998 if (ret)
1999 dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
2000 ret);
2002 return ret;
2005 static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
2006 enum hns_roce_link_table_type type)
2008 struct hns_roce_cmq_desc desc[2];
2009 struct hns_roce_cfg_llm_a *req_a =
2010 (struct hns_roce_cfg_llm_a *)desc[0].data;
2011 struct hns_roce_cfg_llm_b *req_b =
2012 (struct hns_roce_cfg_llm_b *)desc[1].data;
2013 struct hns_roce_v2_priv *priv = hr_dev->priv;
2014 struct hns_roce_link_table *link_tbl;
2015 struct hns_roce_link_table_entry *entry;
2016 enum hns_roce_opcode_type opcode;
2017 u32 page_num;
2018 int i;
2020 switch (type) {
2021 case TSQ_LINK_TABLE:
2022 link_tbl = &priv->tsq;
2023 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
2024 break;
2025 case TPQ_LINK_TABLE:
2026 link_tbl = &priv->tpq;
2027 opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
2028 break;
2029 default:
2030 return -EINVAL;
2033 page_num = link_tbl->npages;
2034 entry = link_tbl->table.buf;
2035 memset(req_a, 0, sizeof(*req_a));
2036 memset(req_b, 0, sizeof(*req_b));
2038 for (i = 0; i < 2; i++) {
2039 hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
2041 if (i == 0)
2042 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2043 else
2044 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
2046 if (i == 0) {
2047 req_a->base_addr_l =
2048 cpu_to_le32(link_tbl->table.map & 0xffffffff);
2049 req_a->base_addr_h =
2050 cpu_to_le32(link_tbl->table.map >> 32);
2051 roce_set_field(req_a->depth_pgsz_init_en,
2052 CFG_LLM_QUE_DEPTH_M, CFG_LLM_QUE_DEPTH_S,
2053 link_tbl->npages);
2054 roce_set_field(req_a->depth_pgsz_init_en,
2055 CFG_LLM_QUE_PGSZ_M, CFG_LLM_QUE_PGSZ_S,
2056 link_tbl->pg_sz);
2057 req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
2058 req_a->head_ba_h_nxtptr =
2059 cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
2060 roce_set_field(req_a->head_ptr, CFG_LLM_HEAD_PTR_M,
2061 CFG_LLM_HEAD_PTR_S, 0);
2062 } else {
2063 req_b->tail_ba_l =
2064 cpu_to_le32(entry[page_num - 1].blk_ba0);
2065 roce_set_field(req_b->tail_ba_h, CFG_LLM_TAIL_BA_H_M,
2066 CFG_LLM_TAIL_BA_H_S,
2067 entry[page_num - 1].blk_ba1_nxt_ptr &
2068 HNS_ROCE_LINK_TABLE_BA1_M);
2069 roce_set_field(req_b->tail_ptr, CFG_LLM_TAIL_PTR_M,
2070 CFG_LLM_TAIL_PTR_S,
2071 (entry[page_num - 2].blk_ba1_nxt_ptr &
2072 HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
2073 HNS_ROCE_LINK_TABLE_NXT_PTR_S);
2076 roce_set_field(req_a->depth_pgsz_init_en, CFG_LLM_INIT_EN_M,
2077 CFG_LLM_INIT_EN_S, 1);
2079 return hns_roce_cmq_send(hr_dev, desc, 2);
2082 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
2083 enum hns_roce_link_table_type type)
2085 struct hns_roce_v2_priv *priv = hr_dev->priv;
2086 struct hns_roce_link_table *link_tbl;
2087 struct hns_roce_link_table_entry *entry;
2088 struct device *dev = hr_dev->dev;
2089 u32 buf_chk_sz;
2090 dma_addr_t t;
2091 int func_num = 1;
2092 int pg_num_a;
2093 int pg_num_b;
2094 int pg_num;
2095 int size;
2096 int i;
2098 switch (type) {
2099 case TSQ_LINK_TABLE:
2100 link_tbl = &priv->tsq;
2101 buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
2102 pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
2103 pg_num_b = hr_dev->caps.sl_num * 4 + 2;
2104 break;
2105 case TPQ_LINK_TABLE:
2106 link_tbl = &priv->tpq;
2107 buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
2108 pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
2109 pg_num_b = 2 * 4 * func_num + 2;
2110 break;
2111 default:
2112 return -EINVAL;
2115 pg_num = max(pg_num_a, pg_num_b);
2116 size = pg_num * sizeof(struct hns_roce_link_table_entry);
2118 link_tbl->table.buf = dma_alloc_coherent(dev, size,
2119 &link_tbl->table.map,
2120 GFP_KERNEL);
2121 if (!link_tbl->table.buf)
2122 goto out;
2124 link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
2125 GFP_KERNEL);
2126 if (!link_tbl->pg_list)
2127 goto err_kcalloc_failed;
2129 entry = link_tbl->table.buf;
2130 for (i = 0; i < pg_num; ++i) {
2131 link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
2132 &t, GFP_KERNEL);
2133 if (!link_tbl->pg_list[i].buf)
2134 goto err_alloc_buf_failed;
2136 link_tbl->pg_list[i].map = t;
2138 entry[i].blk_ba0 = (u32)(t >> 12);
2139 entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44);
2141 if (i < (pg_num - 1))
2142 entry[i].blk_ba1_nxt_ptr |=
2143 (i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S;
2146 link_tbl->npages = pg_num;
2147 link_tbl->pg_sz = buf_chk_sz;
2149 return hns_roce_config_link_table(hr_dev, type);
2151 err_alloc_buf_failed:
2152 for (i -= 1; i >= 0; i--)
2153 dma_free_coherent(dev, buf_chk_sz,
2154 link_tbl->pg_list[i].buf,
2155 link_tbl->pg_list[i].map);
2156 kfree(link_tbl->pg_list);
2158 err_kcalloc_failed:
2159 dma_free_coherent(dev, size, link_tbl->table.buf,
2160 link_tbl->table.map);
2162 out:
2163 return -ENOMEM;
2166 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
2167 struct hns_roce_link_table *link_tbl)
2169 struct device *dev = hr_dev->dev;
2170 int size;
2171 int i;
2173 size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
2175 for (i = 0; i < link_tbl->npages; ++i)
2176 if (link_tbl->pg_list[i].buf)
2177 dma_free_coherent(dev, link_tbl->pg_sz,
2178 link_tbl->pg_list[i].buf,
2179 link_tbl->pg_list[i].map);
2180 kfree(link_tbl->pg_list);
2182 dma_free_coherent(dev, size, link_tbl->table.buf,
2183 link_tbl->table.map);
2186 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
2188 struct hns_roce_v2_priv *priv = hr_dev->priv;
2189 int qpc_count, cqc_count;
2190 int ret, i;
2192 /* TSQ includes SQ doorbell and ack doorbell */
2193 ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
2194 if (ret) {
2195 dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
2196 return ret;
2199 ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
2200 if (ret) {
2201 dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
2202 goto err_tpq_init_failed;
2205 /* Alloc memory for QPC Timer buffer space chunk */
2206 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
2207 qpc_count++) {
2208 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
2209 qpc_count);
2210 if (ret) {
2211 dev_err(hr_dev->dev, "QPC Timer get failed\n");
2212 goto err_qpc_timer_failed;
2216 /* Alloc memory for CQC Timer buffer space chunk */
2217 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
2218 cqc_count++) {
2219 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
2220 cqc_count);
2221 if (ret) {
2222 dev_err(hr_dev->dev, "CQC Timer get failed\n");
2223 goto err_cqc_timer_failed;
2227 return 0;
2229 err_cqc_timer_failed:
2230 for (i = 0; i < cqc_count; i++)
2231 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2233 err_qpc_timer_failed:
2234 for (i = 0; i < qpc_count; i++)
2235 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2237 hns_roce_free_link_table(hr_dev, &priv->tpq);
2239 err_tpq_init_failed:
2240 hns_roce_free_link_table(hr_dev, &priv->tsq);
2242 return ret;
2245 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
2247 struct hns_roce_v2_priv *priv = hr_dev->priv;
2249 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B)
2250 hns_roce_function_clear(hr_dev);
2252 hns_roce_free_link_table(hr_dev, &priv->tpq);
2253 hns_roce_free_link_table(hr_dev, &priv->tsq);
2256 static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
2258 struct hns_roce_cmq_desc desc;
2259 struct hns_roce_mbox_status *mb_st =
2260 (struct hns_roce_mbox_status *)desc.data;
2261 enum hns_roce_cmd_return_status status;
2263 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
2265 status = hns_roce_cmq_send(hr_dev, &desc, 1);
2266 if (status)
2267 return status;
2269 return le32_to_cpu(mb_st->mb_status_hw_run);
2272 static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
2274 u32 status = hns_roce_query_mbox_status(hr_dev);
2276 return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
2279 static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
2281 u32 status = hns_roce_query_mbox_status(hr_dev);
2283 return status & HNS_ROCE_HW_MB_STATUS_MASK;
2286 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
2287 u64 out_param, u32 in_modifier, u8 op_modifier,
2288 u16 op, u16 token, int event)
2290 struct hns_roce_cmq_desc desc;
2291 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
2293 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
2295 mb->in_param_l = cpu_to_le32(in_param);
2296 mb->in_param_h = cpu_to_le32(in_param >> 32);
2297 mb->out_param_l = cpu_to_le32(out_param);
2298 mb->out_param_h = cpu_to_le32(out_param >> 32);
2299 mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
2300 mb->token_event_en = cpu_to_le32(event << 16 | token);
2302 return hns_roce_cmq_send(hr_dev, &desc, 1);
2305 static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
2306 u64 out_param, u32 in_modifier, u8 op_modifier,
2307 u16 op, u16 token, int event)
2309 struct device *dev = hr_dev->dev;
2310 unsigned long end;
2311 int ret;
2313 end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
2314 while (hns_roce_v2_cmd_pending(hr_dev)) {
2315 if (time_after(jiffies, end)) {
2316 dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
2317 (int)end);
2318 return -EAGAIN;
2320 cond_resched();
2323 ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
2324 op_modifier, op, token, event);
2325 if (ret)
2326 dev_err(dev, "Post mailbox fail(%d)\n", ret);
2328 return ret;
2331 static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
2332 unsigned long timeout)
2334 struct device *dev = hr_dev->dev;
2335 unsigned long end;
2336 u32 status;
2338 end = msecs_to_jiffies(timeout) + jiffies;
2339 while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
2340 cond_resched();
2342 if (hns_roce_v2_cmd_pending(hr_dev)) {
2343 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
2344 return -ETIMEDOUT;
2347 status = hns_roce_v2_cmd_complete(hr_dev);
2348 if (status != 0x1) {
2349 if (status == CMD_RST_PRC_EBUSY)
2350 return status;
2352 dev_err(dev, "mailbox status 0x%x!\n", status);
2353 return -EBUSY;
2356 return 0;
2359 static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
2360 int gid_index, const union ib_gid *gid,
2361 enum hns_roce_sgid_type sgid_type)
2363 struct hns_roce_cmq_desc desc;
2364 struct hns_roce_cfg_sgid_tb *sgid_tb =
2365 (struct hns_roce_cfg_sgid_tb *)desc.data;
2366 u32 *p;
2368 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2370 roce_set_field(sgid_tb->table_idx_rsv, CFG_SGID_TB_TABLE_IDX_M,
2371 CFG_SGID_TB_TABLE_IDX_S, gid_index);
2372 roce_set_field(sgid_tb->vf_sgid_type_rsv, CFG_SGID_TB_VF_SGID_TYPE_M,
2373 CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2375 p = (u32 *)&gid->raw[0];
2376 sgid_tb->vf_sgid_l = cpu_to_le32(*p);
2378 p = (u32 *)&gid->raw[4];
2379 sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
2381 p = (u32 *)&gid->raw[8];
2382 sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
2384 p = (u32 *)&gid->raw[0xc];
2385 sgid_tb->vf_sgid_h = cpu_to_le32(*p);
2387 return hns_roce_cmq_send(hr_dev, &desc, 1);
2390 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
2391 int gid_index, const union ib_gid *gid,
2392 const struct ib_gid_attr *attr)
2394 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
2395 int ret;
2397 if (!gid || !attr)
2398 return -EINVAL;
2400 if (attr->gid_type == IB_GID_TYPE_ROCE)
2401 sgid_type = GID_TYPE_FLAG_ROCE_V1;
2403 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2404 if (ipv6_addr_v4mapped((void *)gid))
2405 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2406 else
2407 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2410 ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
2411 if (ret)
2412 ibdev_err(&hr_dev->ib_dev,
2413 "failed to configure sgid table, ret = %d!\n",
2414 ret);
2416 return ret;
2419 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
2420 u8 *addr)
2422 struct hns_roce_cmq_desc desc;
2423 struct hns_roce_cfg_smac_tb *smac_tb =
2424 (struct hns_roce_cfg_smac_tb *)desc.data;
2425 u16 reg_smac_h;
2426 u32 reg_smac_l;
2428 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
2430 reg_smac_l = *(u32 *)(&addr[0]);
2431 reg_smac_h = *(u16 *)(&addr[4]);
2433 memset(smac_tb, 0, sizeof(*smac_tb));
2434 roce_set_field(smac_tb->tb_idx_rsv,
2435 CFG_SMAC_TB_IDX_M,
2436 CFG_SMAC_TB_IDX_S, phy_port);
2437 roce_set_field(smac_tb->vf_smac_h_rsv,
2438 CFG_SMAC_TB_VF_SMAC_H_M,
2439 CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
2440 smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
2442 return hns_roce_cmq_send(hr_dev, &desc, 1);
2445 static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
2446 struct hns_roce_mr *mr)
2448 struct sg_dma_page_iter sg_iter;
2449 u64 page_addr;
2450 u64 *pages;
2451 int i;
2453 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2454 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2455 roce_set_field(mpt_entry->byte_48_mode_ba,
2456 V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
2457 upper_32_bits(mr->pbl_ba >> 3));
2459 pages = (u64 *)__get_free_page(GFP_KERNEL);
2460 if (!pages)
2461 return -ENOMEM;
2463 i = 0;
2464 for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
2465 page_addr = sg_page_iter_dma_address(&sg_iter);
2466 pages[i] = page_addr >> 6;
2468 /* Record the first 2 entry directly to MTPT table */
2469 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
2470 goto found;
2471 i++;
2473 found:
2474 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
2475 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
2476 V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
2478 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
2479 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
2480 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
2481 roce_set_field(mpt_entry->byte_64_buf_pa1,
2482 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2483 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2484 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2486 free_page((unsigned long)pages);
2488 return 0;
2491 static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
2492 unsigned long mtpt_idx)
2494 struct hns_roce_v2_mpt_entry *mpt_entry;
2495 int ret;
2497 mpt_entry = mb_buf;
2498 memset(mpt_entry, 0, sizeof(*mpt_entry));
2500 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2501 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2502 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2503 V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
2504 HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
2505 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2506 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2507 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2508 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2509 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2510 V2_MPT_BYTE_4_PD_S, mr->pd);
2512 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
2513 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
2514 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2515 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
2516 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
2517 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
2518 mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2519 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2520 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
2521 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2522 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
2523 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2524 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
2526 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
2527 mr->type == MR_TYPE_MR ? 0 : 1);
2528 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
2531 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
2532 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
2533 mpt_entry->lkey = cpu_to_le32(mr->key);
2534 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
2535 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
2537 if (mr->type == MR_TYPE_DMA)
2538 return 0;
2540 ret = set_mtpt_pbl(mpt_entry, mr);
2542 return ret;
2545 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
2546 struct hns_roce_mr *mr, int flags,
2547 u32 pdn, int mr_access_flags, u64 iova,
2548 u64 size, void *mb_buf)
2550 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
2551 int ret = 0;
2553 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2554 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2556 if (flags & IB_MR_REREG_PD) {
2557 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2558 V2_MPT_BYTE_4_PD_S, pdn);
2559 mr->pd = pdn;
2562 if (flags & IB_MR_REREG_ACCESS) {
2563 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2564 V2_MPT_BYTE_8_BIND_EN_S,
2565 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
2566 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2567 V2_MPT_BYTE_8_ATOMIC_EN_S,
2568 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2569 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2570 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
2571 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2572 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
2573 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2574 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
2577 if (flags & IB_MR_REREG_TRANS) {
2578 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
2579 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
2580 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
2581 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
2583 mr->iova = iova;
2584 mr->size = size;
2586 ret = set_mtpt_pbl(mpt_entry, mr);
2589 return ret;
2592 static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
2594 struct hns_roce_v2_mpt_entry *mpt_entry;
2596 mpt_entry = mb_buf;
2597 memset(mpt_entry, 0, sizeof(*mpt_entry));
2599 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2600 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2601 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2602 V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
2603 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2604 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2605 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2606 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2607 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2608 V2_MPT_BYTE_4_PD_S, mr->pd);
2610 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
2611 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2612 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2614 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
2615 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2616 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
2617 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2619 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2621 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2622 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
2623 V2_MPT_BYTE_48_PBL_BA_H_S,
2624 upper_32_bits(mr->pbl_ba >> 3));
2626 roce_set_field(mpt_entry->byte_64_buf_pa1,
2627 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2628 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2629 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2631 return 0;
2634 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
2636 struct hns_roce_v2_mpt_entry *mpt_entry;
2638 mpt_entry = mb_buf;
2639 memset(mpt_entry, 0, sizeof(*mpt_entry));
2641 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2642 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2643 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2644 V2_MPT_BYTE_4_PD_S, mw->pdn);
2645 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2646 V2_MPT_BYTE_4_PBL_HOP_NUM_S,
2647 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
2648 mw->pbl_hop_num);
2649 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2650 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2651 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2652 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2654 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2655 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2657 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2658 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
2659 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2660 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
2661 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
2663 roce_set_field(mpt_entry->byte_64_buf_pa1,
2664 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2665 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2666 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2668 mpt_entry->lkey = cpu_to_le32(mw->rkey);
2670 return 0;
2673 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2675 return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
2678 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2680 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
2682 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
2683 return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
2684 !!(n & hr_cq->cq_depth)) ? cqe : NULL;
2687 static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
2689 return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
2692 static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
2694 return hns_roce_buf_offset(&srq->buf, n << srq->wqe_shift);
2697 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
2699 /* always called with interrupts disabled. */
2700 spin_lock(&srq->lock);
2702 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
2703 srq->tail++;
2705 spin_unlock(&srq->lock);
2708 static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
2710 *hr_cq->set_ci_db = cons_index & V2_CQ_DB_PARAMETER_CONS_IDX_M;
2713 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2714 struct hns_roce_srq *srq)
2716 struct hns_roce_v2_cqe *cqe, *dest;
2717 u32 prod_index;
2718 int nfreed = 0;
2719 int wqe_index;
2720 u8 owner_bit;
2722 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
2723 ++prod_index) {
2724 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
2725 break;
2729 * Now backwards through the CQ, removing CQ entries
2730 * that match our QP by overwriting them with next entries.
2732 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2733 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2734 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2735 V2_CQE_BYTE_16_LCL_QPN_S) &
2736 HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
2737 if (srq &&
2738 roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
2739 wqe_index = roce_get_field(cqe->byte_4,
2740 V2_CQE_BYTE_4_WQE_INDX_M,
2741 V2_CQE_BYTE_4_WQE_INDX_S);
2742 hns_roce_free_srq_wqe(srq, wqe_index);
2744 ++nfreed;
2745 } else if (nfreed) {
2746 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
2747 hr_cq->ib_cq.cqe);
2748 owner_bit = roce_get_bit(dest->byte_4,
2749 V2_CQE_BYTE_4_OWNER_S);
2750 memcpy(dest, cqe, sizeof(*cqe));
2751 roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
2752 owner_bit);
2756 if (nfreed) {
2757 hr_cq->cons_index += nfreed;
2759 * Make sure update of buffer contents is done before
2760 * updating consumer index.
2762 wmb();
2763 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2767 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2768 struct hns_roce_srq *srq)
2770 spin_lock_irq(&hr_cq->lock);
2771 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
2772 spin_unlock_irq(&hr_cq->lock);
2775 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
2776 struct hns_roce_cq *hr_cq, void *mb_buf,
2777 u64 *mtts, dma_addr_t dma_handle)
2779 struct hns_roce_v2_cq_context *cq_context;
2781 cq_context = mb_buf;
2782 memset(cq_context, 0, sizeof(*cq_context));
2784 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
2785 V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
2786 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
2787 V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
2788 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
2789 V2_CQC_BYTE_4_SHIFT_S, ilog2(hr_cq->cq_depth));
2790 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
2791 V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
2793 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
2794 V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
2796 cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
2798 roce_set_field(cq_context->byte_16_hop_addr,
2799 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
2800 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
2801 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
2802 roce_set_field(cq_context->byte_16_hop_addr,
2803 V2_CQC_BYTE_16_CQE_HOP_NUM_M,
2804 V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
2805 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
2807 cq_context->cqe_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
2808 roce_set_field(cq_context->byte_24_pgsz_addr,
2809 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
2810 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
2811 mtts[1] >> (32 + PAGE_ADDR_SHIFT));
2812 roce_set_field(cq_context->byte_24_pgsz_addr,
2813 V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
2814 V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
2815 hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
2816 roce_set_field(cq_context->byte_24_pgsz_addr,
2817 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
2818 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
2819 hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
2821 cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
2823 roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
2824 V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
2826 if (hr_cq->db_en)
2827 roce_set_bit(cq_context->byte_44_db_record,
2828 V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
2830 roce_set_field(cq_context->byte_44_db_record,
2831 V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
2832 V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
2833 ((u32)hr_cq->db.dma) >> 1);
2834 cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32);
2836 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2837 V2_CQC_BYTE_56_CQ_MAX_CNT_M,
2838 V2_CQC_BYTE_56_CQ_MAX_CNT_S,
2839 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
2840 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2841 V2_CQC_BYTE_56_CQ_PERIOD_M,
2842 V2_CQC_BYTE_56_CQ_PERIOD_S,
2843 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
2846 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
2847 enum ib_cq_notify_flags flags)
2849 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
2850 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2851 u32 notification_flag;
2852 __le32 doorbell[2];
2854 doorbell[0] = 0;
2855 doorbell[1] = 0;
2857 notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
2858 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
2860 * flags = 0; Notification Flag = 1, next
2861 * flags = 1; Notification Flag = 0, solocited
2863 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
2864 hr_cq->cqn);
2865 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
2866 HNS_ROCE_V2_CQ_DB_NTR);
2867 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
2868 V2_CQ_DB_PARAMETER_CONS_IDX_S,
2869 hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2870 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
2871 V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
2872 roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
2873 notification_flag);
2875 hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
2877 return 0;
2880 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
2881 struct hns_roce_qp **cur_qp,
2882 struct ib_wc *wc)
2884 struct hns_roce_rinl_sge *sge_list;
2885 u32 wr_num, wr_cnt, sge_num;
2886 u32 sge_cnt, data_len, size;
2887 void *wqe_buf;
2889 wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
2890 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
2891 wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
2893 sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
2894 sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
2895 wqe_buf = hns_roce_get_recv_wqe(*cur_qp, wr_cnt);
2896 data_len = wc->byte_len;
2898 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
2899 size = min(sge_list[sge_cnt].len, data_len);
2900 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
2902 data_len -= size;
2903 wqe_buf += size;
2906 if (data_len) {
2907 wc->status = IB_WC_LOC_LEN_ERR;
2908 return -EAGAIN;
2911 return 0;
2914 static int sw_comp(struct hns_roce_qp *hr_qp, struct hns_roce_wq *wq,
2915 int num_entries, struct ib_wc *wc)
2917 unsigned int left;
2918 int npolled = 0;
2920 left = wq->head - wq->tail;
2921 if (left == 0)
2922 return 0;
2924 left = min_t(unsigned int, (unsigned int)num_entries, left);
2925 while (npolled < left) {
2926 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2927 wc->status = IB_WC_WR_FLUSH_ERR;
2928 wc->vendor_err = 0;
2929 wc->qp = &hr_qp->ibqp;
2931 wq->tail++;
2932 wc++;
2933 npolled++;
2936 return npolled;
2939 static int hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries,
2940 struct ib_wc *wc)
2942 struct hns_roce_qp *hr_qp;
2943 int npolled = 0;
2945 list_for_each_entry(hr_qp, &hr_cq->sq_list, sq_node) {
2946 npolled += sw_comp(hr_qp, &hr_qp->sq,
2947 num_entries - npolled, wc + npolled);
2948 if (npolled >= num_entries)
2949 goto out;
2952 list_for_each_entry(hr_qp, &hr_cq->rq_list, rq_node) {
2953 npolled += sw_comp(hr_qp, &hr_qp->rq,
2954 num_entries - npolled, wc + npolled);
2955 if (npolled >= num_entries)
2956 goto out;
2959 out:
2960 return npolled;
2963 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
2964 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2966 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2967 struct hns_roce_srq *srq = NULL;
2968 struct hns_roce_v2_cqe *cqe;
2969 struct hns_roce_qp *hr_qp;
2970 struct hns_roce_wq *wq;
2971 int is_send;
2972 u16 wqe_ctr;
2973 u32 opcode;
2974 u32 status;
2975 int qpn;
2976 int ret;
2978 /* Find cqe according to consumer index */
2979 cqe = next_cqe_sw_v2(hr_cq);
2980 if (!cqe)
2981 return -EAGAIN;
2983 ++hr_cq->cons_index;
2984 /* Memory barrier */
2985 rmb();
2987 /* 0->SQ, 1->RQ */
2988 is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
2990 qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2991 V2_CQE_BYTE_16_LCL_QPN_S);
2993 if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2994 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2995 if (unlikely(!hr_qp)) {
2996 ibdev_err(&hr_dev->ib_dev,
2997 "CQ %06lx with entry for unknown QPN %06x\n",
2998 hr_cq->cqn, qpn & HNS_ROCE_V2_CQE_QPN_MASK);
2999 return -EINVAL;
3001 *cur_qp = hr_qp;
3004 hr_qp = *cur_qp;
3005 wc->qp = &(*cur_qp)->ibqp;
3006 wc->vendor_err = 0;
3008 if (is_send) {
3009 wq = &(*cur_qp)->sq;
3010 if ((*cur_qp)->sq_signal_bits) {
3012 * If sg_signal_bit is 1,
3013 * firstly tail pointer updated to wqe
3014 * which current cqe correspond to
3016 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
3017 V2_CQE_BYTE_4_WQE_INDX_M,
3018 V2_CQE_BYTE_4_WQE_INDX_S);
3019 wq->tail += (wqe_ctr - (u16)wq->tail) &
3020 (wq->wqe_cnt - 1);
3023 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3024 ++wq->tail;
3025 } else if ((*cur_qp)->ibqp.srq) {
3026 srq = to_hr_srq((*cur_qp)->ibqp.srq);
3027 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
3028 V2_CQE_BYTE_4_WQE_INDX_M,
3029 V2_CQE_BYTE_4_WQE_INDX_S);
3030 wc->wr_id = srq->wrid[wqe_ctr];
3031 hns_roce_free_srq_wqe(srq, wqe_ctr);
3032 } else {
3033 /* Update tail pointer, record wr_id */
3034 wq = &(*cur_qp)->rq;
3035 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
3036 ++wq->tail;
3039 status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
3040 V2_CQE_BYTE_4_STATUS_S);
3041 switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
3042 case HNS_ROCE_CQE_V2_SUCCESS:
3043 wc->status = IB_WC_SUCCESS;
3044 break;
3045 case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
3046 wc->status = IB_WC_LOC_LEN_ERR;
3047 break;
3048 case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
3049 wc->status = IB_WC_LOC_QP_OP_ERR;
3050 break;
3051 case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
3052 wc->status = IB_WC_LOC_PROT_ERR;
3053 break;
3054 case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
3055 wc->status = IB_WC_WR_FLUSH_ERR;
3056 break;
3057 case HNS_ROCE_CQE_V2_MW_BIND_ERR:
3058 wc->status = IB_WC_MW_BIND_ERR;
3059 break;
3060 case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
3061 wc->status = IB_WC_BAD_RESP_ERR;
3062 break;
3063 case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
3064 wc->status = IB_WC_LOC_ACCESS_ERR;
3065 break;
3066 case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
3067 wc->status = IB_WC_REM_INV_REQ_ERR;
3068 break;
3069 case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
3070 wc->status = IB_WC_REM_ACCESS_ERR;
3071 break;
3072 case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
3073 wc->status = IB_WC_REM_OP_ERR;
3074 break;
3075 case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
3076 wc->status = IB_WC_RETRY_EXC_ERR;
3077 break;
3078 case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
3079 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
3080 break;
3081 case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
3082 wc->status = IB_WC_REM_ABORT_ERR;
3083 break;
3084 default:
3085 wc->status = IB_WC_GENERAL_ERR;
3086 break;
3090 * Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
3091 * into errored mode. Hence, as a workaround to this hardware
3092 * limitation, driver needs to assist in flushing. But the flushing
3093 * operation uses mailbox to convey the QP state to the hardware and
3094 * which can sleep due to the mutex protection around the mailbox calls.
3095 * Hence, use the deferred flush for now. Once wc error detected, the
3096 * flushing operation is needed.
3098 if (wc->status != IB_WC_SUCCESS &&
3099 wc->status != IB_WC_WR_FLUSH_ERR) {
3100 ibdev_err(&hr_dev->ib_dev, "error cqe status is: 0x%x\n",
3101 status & HNS_ROCE_V2_CQE_STATUS_MASK);
3103 if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag))
3104 init_flush_work(hr_dev, hr_qp);
3106 return 0;
3109 if (wc->status == IB_WC_WR_FLUSH_ERR)
3110 return 0;
3112 if (is_send) {
3113 wc->wc_flags = 0;
3114 /* SQ corresponding to CQE */
3115 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
3116 V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
3117 case HNS_ROCE_SQ_OPCODE_SEND:
3118 wc->opcode = IB_WC_SEND;
3119 break;
3120 case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
3121 wc->opcode = IB_WC_SEND;
3122 break;
3123 case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
3124 wc->opcode = IB_WC_SEND;
3125 wc->wc_flags |= IB_WC_WITH_IMM;
3126 break;
3127 case HNS_ROCE_SQ_OPCODE_RDMA_READ:
3128 wc->opcode = IB_WC_RDMA_READ;
3129 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3130 break;
3131 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
3132 wc->opcode = IB_WC_RDMA_WRITE;
3133 break;
3134 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
3135 wc->opcode = IB_WC_RDMA_WRITE;
3136 wc->wc_flags |= IB_WC_WITH_IMM;
3137 break;
3138 case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
3139 wc->opcode = IB_WC_LOCAL_INV;
3140 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
3141 break;
3142 case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
3143 wc->opcode = IB_WC_COMP_SWAP;
3144 wc->byte_len = 8;
3145 break;
3146 case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
3147 wc->opcode = IB_WC_FETCH_ADD;
3148 wc->byte_len = 8;
3149 break;
3150 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
3151 wc->opcode = IB_WC_MASKED_COMP_SWAP;
3152 wc->byte_len = 8;
3153 break;
3154 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
3155 wc->opcode = IB_WC_MASKED_FETCH_ADD;
3156 wc->byte_len = 8;
3157 break;
3158 case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
3159 wc->opcode = IB_WC_REG_MR;
3160 break;
3161 case HNS_ROCE_SQ_OPCODE_BIND_MW:
3162 wc->opcode = IB_WC_REG_MR;
3163 break;
3164 default:
3165 wc->status = IB_WC_GENERAL_ERR;
3166 break;
3168 } else {
3169 /* RQ correspond to CQE */
3170 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
3172 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
3173 V2_CQE_BYTE_4_OPCODE_S);
3174 switch (opcode & 0x1f) {
3175 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
3176 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
3177 wc->wc_flags = IB_WC_WITH_IMM;
3178 wc->ex.imm_data =
3179 cpu_to_be32(le32_to_cpu(cqe->immtdata));
3180 break;
3181 case HNS_ROCE_V2_OPCODE_SEND:
3182 wc->opcode = IB_WC_RECV;
3183 wc->wc_flags = 0;
3184 break;
3185 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
3186 wc->opcode = IB_WC_RECV;
3187 wc->wc_flags = IB_WC_WITH_IMM;
3188 wc->ex.imm_data =
3189 cpu_to_be32(le32_to_cpu(cqe->immtdata));
3190 break;
3191 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
3192 wc->opcode = IB_WC_RECV;
3193 wc->wc_flags = IB_WC_WITH_INVALIDATE;
3194 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
3195 break;
3196 default:
3197 wc->status = IB_WC_GENERAL_ERR;
3198 break;
3201 if ((wc->qp->qp_type == IB_QPT_RC ||
3202 wc->qp->qp_type == IB_QPT_UC) &&
3203 (opcode == HNS_ROCE_V2_OPCODE_SEND ||
3204 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
3205 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
3206 (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
3207 ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
3208 if (ret)
3209 return -EAGAIN;
3212 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
3213 V2_CQE_BYTE_32_SL_S);
3214 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
3215 V2_CQE_BYTE_32_RMT_QPN_M,
3216 V2_CQE_BYTE_32_RMT_QPN_S);
3217 wc->slid = 0;
3218 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
3219 V2_CQE_BYTE_32_GRH_S) ?
3220 IB_WC_GRH : 0);
3221 wc->port_num = roce_get_field(cqe->byte_32,
3222 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
3223 wc->pkey_index = 0;
3225 if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
3226 wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
3227 V2_CQE_BYTE_28_VID_M,
3228 V2_CQE_BYTE_28_VID_S);
3229 wc->wc_flags |= IB_WC_WITH_VLAN;
3230 } else {
3231 wc->vlan_id = 0xffff;
3234 wc->network_hdr_type = roce_get_field(cqe->byte_28,
3235 V2_CQE_BYTE_28_PORT_TYPE_M,
3236 V2_CQE_BYTE_28_PORT_TYPE_S);
3239 return 0;
3242 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
3243 struct ib_wc *wc)
3245 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3246 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3247 struct hns_roce_qp *cur_qp = NULL;
3248 unsigned long flags;
3249 int npolled;
3251 spin_lock_irqsave(&hr_cq->lock, flags);
3254 * When the device starts to reset, the state is RST_DOWN. At this time,
3255 * there may still be some valid CQEs in the hardware that are not
3256 * polled. Therefore, it is not allowed to switch to the software mode
3257 * immediately. When the state changes to UNINIT, CQE no longer exists
3258 * in the hardware, and then switch to software mode.
3260 if (hr_dev->state == HNS_ROCE_DEVICE_STATE_UNINIT) {
3261 npolled = hns_roce_v2_sw_poll_cq(hr_cq, num_entries, wc);
3262 goto out;
3265 for (npolled = 0; npolled < num_entries; ++npolled) {
3266 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
3267 break;
3270 if (npolled) {
3271 /* Memory barrier */
3272 wmb();
3273 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
3276 out:
3277 spin_unlock_irqrestore(&hr_cq->lock, flags);
3279 return npolled;
3282 static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
3283 int step_idx)
3285 int op;
3287 if (type == HEM_TYPE_SCCC && step_idx)
3288 return -EINVAL;
3290 switch (type) {
3291 case HEM_TYPE_QPC:
3292 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
3293 break;
3294 case HEM_TYPE_MTPT:
3295 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
3296 break;
3297 case HEM_TYPE_CQC:
3298 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
3299 break;
3300 case HEM_TYPE_SRQC:
3301 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
3302 break;
3303 case HEM_TYPE_SCCC:
3304 op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
3305 break;
3306 case HEM_TYPE_QPC_TIMER:
3307 op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
3308 break;
3309 case HEM_TYPE_CQC_TIMER:
3310 op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
3311 break;
3312 default:
3313 dev_warn(hr_dev->dev,
3314 "Table %d not to be written by mailbox!\n", type);
3315 return -EINVAL;
3318 return op + step_idx;
3321 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
3322 struct hns_roce_hem_table *table, int obj,
3323 int step_idx)
3325 struct hns_roce_cmd_mailbox *mailbox;
3326 struct hns_roce_hem_iter iter;
3327 struct hns_roce_hem_mhop mhop;
3328 struct hns_roce_hem *hem;
3329 unsigned long mhop_obj = obj;
3330 int i, j, k;
3331 int ret = 0;
3332 u64 hem_idx = 0;
3333 u64 l1_idx = 0;
3334 u64 bt_ba = 0;
3335 u32 chunk_ba_num;
3336 u32 hop_num;
3337 int op;
3339 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3340 return 0;
3342 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
3343 i = mhop.l0_idx;
3344 j = mhop.l1_idx;
3345 k = mhop.l2_idx;
3346 hop_num = mhop.hop_num;
3347 chunk_ba_num = mhop.bt_chunk_size / 8;
3349 if (hop_num == 2) {
3350 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
3352 l1_idx = i * chunk_ba_num + j;
3353 } else if (hop_num == 1) {
3354 hem_idx = i * chunk_ba_num + j;
3355 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
3356 hem_idx = i;
3359 op = get_op_for_set_hem(hr_dev, table->type, step_idx);
3360 if (op == -EINVAL)
3361 return 0;
3363 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3364 if (IS_ERR(mailbox))
3365 return PTR_ERR(mailbox);
3367 if (table->type == HEM_TYPE_SCCC)
3368 obj = mhop.l0_idx;
3370 if (check_whether_last_step(hop_num, step_idx)) {
3371 hem = table->hem[hem_idx];
3372 for (hns_roce_hem_first(hem, &iter);
3373 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
3374 bt_ba = hns_roce_hem_addr(&iter);
3376 /* configure the ba, tag, and op */
3377 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
3378 obj, 0, op,
3379 HNS_ROCE_CMD_TIMEOUT_MSECS);
3381 } else {
3382 if (step_idx == 0)
3383 bt_ba = table->bt_l0_dma_addr[i];
3384 else if (step_idx == 1 && hop_num == 2)
3385 bt_ba = table->bt_l1_dma_addr[l1_idx];
3387 /* configure the ba, tag, and op */
3388 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
3389 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
3392 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3393 return ret;
3396 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
3397 struct hns_roce_hem_table *table, int obj,
3398 int step_idx)
3400 struct device *dev = hr_dev->dev;
3401 struct hns_roce_cmd_mailbox *mailbox;
3402 int ret;
3403 u16 op = 0xff;
3405 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3406 return 0;
3408 switch (table->type) {
3409 case HEM_TYPE_QPC:
3410 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
3411 break;
3412 case HEM_TYPE_MTPT:
3413 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
3414 break;
3415 case HEM_TYPE_CQC:
3416 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
3417 break;
3418 case HEM_TYPE_SCCC:
3419 case HEM_TYPE_QPC_TIMER:
3420 case HEM_TYPE_CQC_TIMER:
3421 break;
3422 case HEM_TYPE_SRQC:
3423 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
3424 break;
3425 default:
3426 dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
3427 table->type);
3428 return 0;
3431 if (table->type == HEM_TYPE_SCCC ||
3432 table->type == HEM_TYPE_QPC_TIMER ||
3433 table->type == HEM_TYPE_CQC_TIMER)
3434 return 0;
3436 op += step_idx;
3438 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3439 if (IS_ERR(mailbox))
3440 return PTR_ERR(mailbox);
3442 /* configure the tag and op */
3443 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
3444 HNS_ROCE_CMD_TIMEOUT_MSECS);
3446 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3447 return ret;
3450 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
3451 struct hns_roce_v2_qp_context *context,
3452 struct hns_roce_qp *hr_qp)
3454 struct hns_roce_cmd_mailbox *mailbox;
3455 int ret;
3457 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3458 if (IS_ERR(mailbox))
3459 return PTR_ERR(mailbox);
3461 memcpy(mailbox->buf, context, sizeof(*context) * 2);
3463 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
3464 HNS_ROCE_CMD_MODIFY_QPC,
3465 HNS_ROCE_CMD_TIMEOUT_MSECS);
3467 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3469 return ret;
3472 static void set_access_flags(struct hns_roce_qp *hr_qp,
3473 struct hns_roce_v2_qp_context *context,
3474 struct hns_roce_v2_qp_context *qpc_mask,
3475 const struct ib_qp_attr *attr, int attr_mask)
3477 u8 dest_rd_atomic;
3478 u32 access_flags;
3480 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
3481 attr->max_dest_rd_atomic : hr_qp->resp_depth;
3483 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
3484 attr->qp_access_flags : hr_qp->atomic_rd_en;
3486 if (!dest_rd_atomic)
3487 access_flags &= IB_ACCESS_REMOTE_WRITE;
3489 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3490 !!(access_flags & IB_ACCESS_REMOTE_READ));
3491 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
3493 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3494 !!(access_flags & IB_ACCESS_REMOTE_WRITE));
3495 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
3497 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3498 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3499 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
3500 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S,
3501 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3502 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_EXT_ATE_S, 0);
3505 static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
3506 struct hns_roce_v2_qp_context *context,
3507 struct hns_roce_v2_qp_context *qpc_mask)
3509 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
3510 roce_set_field(context->byte_4_sqpn_tst,
3511 V2_QPC_BYTE_4_SGE_SHIFT_M,
3512 V2_QPC_BYTE_4_SGE_SHIFT_S,
3513 ilog2((unsigned int)hr_qp->sge.sge_cnt));
3514 else
3515 roce_set_field(context->byte_4_sqpn_tst,
3516 V2_QPC_BYTE_4_SGE_SHIFT_M,
3517 V2_QPC_BYTE_4_SGE_SHIFT_S,
3518 hr_qp->sq.max_gs >
3519 HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
3520 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
3522 roce_set_field(context->byte_20_smac_sgid_idx,
3523 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3524 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
3526 roce_set_field(context->byte_20_smac_sgid_idx,
3527 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3528 (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
3529 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT ||
3530 hr_qp->ibqp.srq) ? 0 :
3531 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
3534 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
3535 const struct ib_qp_attr *attr,
3536 int attr_mask,
3537 struct hns_roce_v2_qp_context *context,
3538 struct hns_roce_v2_qp_context *qpc_mask)
3540 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3541 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3544 * In v2 engine, software pass context and context mask to hardware
3545 * when modifying qp. If software need modify some fields in context,
3546 * we should set all bits of the relevant fields in context mask to
3547 * 0 at the same time, else set them to 0x1.
3549 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3550 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3552 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3553 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3555 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3556 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3558 roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3559 V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
3561 set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
3563 /* No VLAN need to set 0xFFF */
3564 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3565 V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
3567 if (hr_qp->rdb_en)
3568 roce_set_bit(context->byte_68_rq_db,
3569 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
3571 roce_set_field(context->byte_68_rq_db,
3572 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3573 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
3574 ((u32)hr_qp->rdb.dma) >> 1);
3575 context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
3577 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
3578 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
3580 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3581 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3582 if (ibqp->srq) {
3583 roce_set_field(context->byte_76_srqn_op_en,
3584 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3585 to_hr_srq(ibqp->srq)->srqn);
3586 roce_set_bit(context->byte_76_srqn_op_en,
3587 V2_QPC_BYTE_76_SRQ_EN_S, 1);
3590 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3591 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
3593 roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
3595 hr_qp->access_flags = attr->qp_access_flags;
3596 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3597 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3600 static void modify_qp_init_to_init(struct ib_qp *ibqp,
3601 const struct ib_qp_attr *attr, int attr_mask,
3602 struct hns_roce_v2_qp_context *context,
3603 struct hns_roce_v2_qp_context *qpc_mask)
3605 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3608 * In v2 engine, software pass context and context mask to hardware
3609 * when modifying qp. If software need modify some fields in context,
3610 * we should set all bits of the relevant fields in context mask to
3611 * 0 at the same time, else set them to 0x1.
3613 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3614 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3615 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3616 V2_QPC_BYTE_4_TST_S, 0);
3618 if (attr_mask & IB_QP_ACCESS_FLAGS) {
3619 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3620 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
3621 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3624 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3625 !!(attr->qp_access_flags &
3626 IB_ACCESS_REMOTE_WRITE));
3627 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3630 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3631 !!(attr->qp_access_flags &
3632 IB_ACCESS_REMOTE_ATOMIC));
3633 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3635 roce_set_bit(context->byte_76_srqn_op_en,
3636 V2_QPC_BYTE_76_EXT_ATE_S,
3637 !!(attr->qp_access_flags &
3638 IB_ACCESS_REMOTE_ATOMIC));
3639 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3640 V2_QPC_BYTE_76_EXT_ATE_S, 0);
3641 } else {
3642 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3643 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
3644 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3647 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3648 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
3649 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3652 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3653 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3654 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3656 roce_set_bit(context->byte_76_srqn_op_en,
3657 V2_QPC_BYTE_76_EXT_ATE_S,
3658 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3659 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3660 V2_QPC_BYTE_76_EXT_ATE_S, 0);
3663 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3664 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3665 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3666 V2_QPC_BYTE_16_PD_S, 0);
3668 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3669 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3670 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3671 V2_QPC_BYTE_80_RX_CQN_S, 0);
3673 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3674 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3675 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3676 V2_QPC_BYTE_252_TX_CQN_S, 0);
3678 if (ibqp->srq) {
3679 roce_set_bit(context->byte_76_srqn_op_en,
3680 V2_QPC_BYTE_76_SRQ_EN_S, 1);
3681 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3682 V2_QPC_BYTE_76_SRQ_EN_S, 0);
3683 roce_set_field(context->byte_76_srqn_op_en,
3684 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3685 to_hr_srq(ibqp->srq)->srqn);
3686 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3687 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3690 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3691 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3692 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3693 V2_QPC_BYTE_4_SQPN_S, 0);
3695 if (attr_mask & IB_QP_DEST_QPN) {
3696 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3697 V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
3698 roce_set_field(qpc_mask->byte_56_dqpn_err,
3699 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3703 static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev,
3704 struct hns_roce_qp *hr_qp, int mtt_cnt,
3705 u32 page_size)
3707 struct ib_device *ibdev = &hr_dev->ib_dev;
3709 if (hr_qp->rq.wqe_cnt < 1)
3710 return true;
3712 if (mtt_cnt < 1) {
3713 ibdev_err(ibdev, "failed to find RQWQE buf ba of QP(0x%lx)\n",
3714 hr_qp->qpn);
3715 return false;
3718 if (mtt_cnt < MTT_MIN_COUNT &&
3719 (hr_qp->rq.offset + page_size) < hr_qp->buff_size) {
3720 ibdev_err(ibdev,
3721 "failed to find next RQWQE buf ba of QP(0x%lx)\n",
3722 hr_qp->qpn);
3723 return false;
3726 return true;
3729 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
3730 const struct ib_qp_attr *attr, int attr_mask,
3731 struct hns_roce_v2_qp_context *context,
3732 struct hns_roce_v2_qp_context *qpc_mask)
3734 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
3735 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3736 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3737 struct ib_device *ibdev = &hr_dev->ib_dev;
3738 u64 mtts[MTT_MIN_COUNT] = { 0 };
3739 dma_addr_t dma_handle_3;
3740 dma_addr_t dma_handle_2;
3741 u64 wqe_sge_ba;
3742 u32 page_size;
3743 u8 port_num;
3744 u64 *mtts_3;
3745 u64 *mtts_2;
3746 int count;
3747 u8 *dmac;
3748 u8 *smac;
3749 int port;
3751 /* Search qp buf's mtts */
3752 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3753 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
3754 hr_qp->rq.offset / page_size, mtts,
3755 MTT_MIN_COUNT, &wqe_sge_ba);
3756 if (!ibqp->srq)
3757 if (!check_wqe_rq_mtt_count(hr_dev, hr_qp, count, page_size))
3758 return -EINVAL;
3760 /* Search IRRL's mtts */
3761 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
3762 hr_qp->qpn, &dma_handle_2);
3763 if (!mtts_2) {
3764 ibdev_err(ibdev, "failed to find QP irrl_table\n");
3765 return -EINVAL;
3768 /* Search TRRL's mtts */
3769 mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
3770 hr_qp->qpn, &dma_handle_3);
3771 if (!mtts_3) {
3772 ibdev_err(ibdev, "failed to find QP trrl_table\n");
3773 return -EINVAL;
3776 if (attr_mask & IB_QP_ALT_PATH) {
3777 ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error\n",
3778 attr_mask);
3779 return -EINVAL;
3782 dmac = (u8 *)attr->ah_attr.roce.dmac;
3783 context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
3784 qpc_mask->wqe_sge_ba = 0;
3787 * In v2 engine, software pass context and context mask to hardware
3788 * when modifying qp. If software need modify some fields in context,
3789 * we should set all bits of the relevant fields in context mask to
3790 * 0 at the same time, else set them to 0x1.
3792 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3793 V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3));
3794 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3795 V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
3797 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3798 V2_QPC_BYTE_12_SQ_HOP_NUM_S,
3799 hr_dev->caps.wqe_sq_hop_num == HNS_ROCE_HOP_NUM_0 ?
3800 0 : hr_dev->caps.wqe_sq_hop_num);
3801 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3802 V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
3804 roce_set_field(context->byte_20_smac_sgid_idx,
3805 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3806 V2_QPC_BYTE_20_SGE_HOP_NUM_S,
3807 ((ibqp->qp_type == IB_QPT_GSI) ||
3808 hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
3809 hr_dev->caps.wqe_sge_hop_num : 0);
3810 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3811 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3812 V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
3814 roce_set_field(context->byte_20_smac_sgid_idx,
3815 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3816 V2_QPC_BYTE_20_RQ_HOP_NUM_S,
3817 hr_dev->caps.wqe_rq_hop_num == HNS_ROCE_HOP_NUM_0 ?
3818 0 : hr_dev->caps.wqe_rq_hop_num);
3819 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3820 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3821 V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
3823 roce_set_field(context->byte_16_buf_ba_pg_sz,
3824 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3825 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
3826 hr_qp->wqe_bt_pg_shift + PG_SHIFT_OFFSET);
3827 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3828 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3829 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
3831 roce_set_field(context->byte_16_buf_ba_pg_sz,
3832 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3833 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
3834 hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
3835 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3836 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3837 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
3839 context->rq_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
3840 qpc_mask->rq_cur_blk_addr = 0;
3842 roce_set_field(context->byte_92_srq_info,
3843 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3844 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
3845 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3846 roce_set_field(qpc_mask->byte_92_srq_info,
3847 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3848 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
3850 context->rq_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
3851 qpc_mask->rq_nxt_blk_addr = 0;
3853 roce_set_field(context->byte_104_rq_sge,
3854 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3855 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
3856 mtts[1] >> (32 + PAGE_ADDR_SHIFT));
3857 roce_set_field(qpc_mask->byte_104_rq_sge,
3858 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3859 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
3861 roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3862 V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
3863 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3864 V2_QPC_BYTE_132_TRRL_BA_S, 0);
3865 context->trrl_ba = cpu_to_le32(dma_handle_3 >> (16 + 4));
3866 qpc_mask->trrl_ba = 0;
3867 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3868 V2_QPC_BYTE_140_TRRL_BA_S,
3869 (u32)(dma_handle_3 >> (32 + 16 + 4)));
3870 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3871 V2_QPC_BYTE_140_TRRL_BA_S, 0);
3873 context->irrl_ba = cpu_to_le32(dma_handle_2 >> 6);
3874 qpc_mask->irrl_ba = 0;
3875 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3876 V2_QPC_BYTE_208_IRRL_BA_S,
3877 dma_handle_2 >> (32 + 6));
3878 roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3879 V2_QPC_BYTE_208_IRRL_BA_S, 0);
3881 roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
3882 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
3884 roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3885 hr_qp->sq_signal_bits);
3886 roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3889 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
3891 smac = (u8 *)hr_dev->dev_addr[port];
3892 /* when dmac equals smac or loop_idc is 1, it should loopback */
3893 if (ether_addr_equal_unaligned(dmac, smac) ||
3894 hr_dev->loop_idc == 0x1) {
3895 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
3896 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
3899 if (attr_mask & IB_QP_DEST_QPN) {
3900 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3901 V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
3902 roce_set_field(qpc_mask->byte_56_dqpn_err,
3903 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3906 /* Configure GID index */
3907 port_num = rdma_ah_get_port_num(&attr->ah_attr);
3908 roce_set_field(context->byte_20_smac_sgid_idx,
3909 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
3910 hns_get_gid_index(hr_dev, port_num - 1,
3911 grh->sgid_index));
3912 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3913 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
3914 memcpy(&(context->dmac), dmac, sizeof(u32));
3915 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3916 V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
3917 qpc_mask->dmac = 0;
3918 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3919 V2_QPC_BYTE_52_DMAC_S, 0);
3921 /* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
3922 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3923 V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
3924 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3925 V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
3927 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
3928 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3929 V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
3930 else if (attr_mask & IB_QP_PATH_MTU)
3931 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3932 V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
3934 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3935 V2_QPC_BYTE_24_MTU_S, 0);
3937 roce_set_field(context->byte_84_rq_ci_pi,
3938 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3939 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
3940 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3941 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3942 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3944 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3945 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3946 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3947 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3948 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3949 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3950 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3951 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3952 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3953 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3955 context->rq_rnr_timer = 0;
3956 qpc_mask->rq_rnr_timer = 0;
3958 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3959 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3960 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3961 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3963 /* rocee send 2^lp_sgen_ini segs every time */
3964 roce_set_field(context->byte_168_irrl_idx,
3965 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3966 V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
3967 roce_set_field(qpc_mask->byte_168_irrl_idx,
3968 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3969 V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
3971 return 0;
3974 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
3975 const struct ib_qp_attr *attr, int attr_mask,
3976 struct hns_roce_v2_qp_context *context,
3977 struct hns_roce_v2_qp_context *qpc_mask)
3979 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3980 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3981 struct ib_device *ibdev = &hr_dev->ib_dev;
3982 u64 sge_cur_blk = 0;
3983 u64 sq_cur_blk = 0;
3984 u32 page_size;
3985 int count;
3987 /* Search qp buf's mtts */
3988 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
3989 if (count < 1) {
3990 ibdev_err(ibdev, "failed to find buf pa of QP(0x%lx)\n",
3991 hr_qp->qpn);
3992 return -EINVAL;
3995 if (hr_qp->sge.offset) {
3996 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3997 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
3998 hr_qp->sge.offset / page_size,
3999 &sge_cur_blk, 1, NULL);
4000 if (count < 1) {
4001 ibdev_err(ibdev, "failed to find sge pa of QP(0x%lx)\n",
4002 hr_qp->qpn);
4003 return -EINVAL;
4007 /* Not support alternate path and path migration */
4008 if (attr_mask & (IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE)) {
4009 ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
4010 return -EINVAL;
4014 * In v2 engine, software pass context and context mask to hardware
4015 * when modifying qp. If software need modify some fields in context,
4016 * we should set all bits of the relevant fields in context mask to
4017 * 0 at the same time, else set them to 0x1.
4019 context->sq_cur_blk_addr = cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
4020 roce_set_field(context->byte_168_irrl_idx,
4021 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
4022 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
4023 sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
4024 qpc_mask->sq_cur_blk_addr = 0;
4025 roce_set_field(qpc_mask->byte_168_irrl_idx,
4026 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
4027 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
4029 context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) ||
4030 hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
4031 cpu_to_le32(sge_cur_blk >>
4032 PAGE_ADDR_SHIFT) : 0;
4033 roce_set_field(context->byte_184_irrl_idx,
4034 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
4035 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
4036 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs >
4037 HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
4038 (sge_cur_blk >>
4039 (32 + PAGE_ADDR_SHIFT)) : 0);
4040 qpc_mask->sq_cur_sge_blk_addr = 0;
4041 roce_set_field(qpc_mask->byte_184_irrl_idx,
4042 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
4043 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
4045 context->rx_sq_cur_blk_addr =
4046 cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
4047 roce_set_field(context->byte_232_irrl_sge,
4048 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
4049 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
4050 sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
4051 qpc_mask->rx_sq_cur_blk_addr = 0;
4052 roce_set_field(qpc_mask->byte_232_irrl_sge,
4053 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
4054 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
4057 * Set some fields in context to zero, Because the default values
4058 * of all fields in context are zero, we need not set them to 0 again.
4059 * but we should set the relevant fields of context mask to 0.
4061 roce_set_field(qpc_mask->byte_232_irrl_sge,
4062 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
4063 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
4065 roce_set_field(qpc_mask->byte_240_irrl_tail,
4066 V2_QPC_BYTE_240_RX_ACK_MSN_M,
4067 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
4069 roce_set_field(qpc_mask->byte_248_ack_psn,
4070 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
4071 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
4072 roce_set_bit(qpc_mask->byte_248_ack_psn,
4073 V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
4074 roce_set_field(qpc_mask->byte_248_ack_psn,
4075 V2_QPC_BYTE_248_IRRL_PSN_M,
4076 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
4078 roce_set_field(qpc_mask->byte_240_irrl_tail,
4079 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
4080 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
4082 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4083 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
4084 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
4086 roce_set_bit(qpc_mask->byte_248_ack_psn,
4087 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
4089 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
4090 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
4092 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4093 V2_QPC_BYTE_212_LSN_S, 0x100);
4094 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4095 V2_QPC_BYTE_212_LSN_S, 0);
4097 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
4098 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
4100 return 0;
4103 static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
4104 enum ib_qp_state new_state)
4107 if ((cur_state != IB_QPS_RESET &&
4108 (new_state == IB_QPS_ERR || new_state == IB_QPS_RESET)) ||
4109 ((cur_state == IB_QPS_RTS || cur_state == IB_QPS_SQD) &&
4110 (new_state == IB_QPS_RTS || new_state == IB_QPS_SQD)) ||
4111 (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS))
4112 return true;
4114 return false;
4118 static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4119 const struct ib_qp_attr *attr,
4120 int attr_mask,
4121 struct hns_roce_v2_qp_context *context,
4122 struct hns_roce_v2_qp_context *qpc_mask)
4124 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4125 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4126 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4127 struct ib_device *ibdev = &hr_dev->ib_dev;
4128 const struct ib_gid_attr *gid_attr = NULL;
4129 int is_roce_protocol;
4130 u16 vlan_id = 0xffff;
4131 bool is_udp = false;
4132 u8 ib_port;
4133 u8 hr_port;
4134 int ret;
4136 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4137 hr_port = ib_port - 1;
4138 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4139 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4141 if (is_roce_protocol) {
4142 gid_attr = attr->ah_attr.grh.sgid_attr;
4143 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
4144 if (ret)
4145 return ret;
4147 if (gid_attr)
4148 is_udp = (gid_attr->gid_type ==
4149 IB_GID_TYPE_ROCE_UDP_ENCAP);
4152 if (vlan_id < VLAN_N_VID) {
4153 roce_set_bit(context->byte_76_srqn_op_en,
4154 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
4155 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
4156 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
4157 roce_set_bit(context->byte_168_irrl_idx,
4158 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
4159 roce_set_bit(qpc_mask->byte_168_irrl_idx,
4160 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
4163 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4164 V2_QPC_BYTE_24_VLAN_ID_S, vlan_id);
4165 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4166 V2_QPC_BYTE_24_VLAN_ID_S, 0);
4168 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4169 ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
4170 grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
4171 return -EINVAL;
4174 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4175 ibdev_err(ibdev, "ah attr is not RDMA roce type\n");
4176 return -EINVAL;
4179 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4180 V2_QPC_BYTE_52_UDPSPN_S,
4181 is_udp ? 0x12b7 : 0);
4183 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4184 V2_QPC_BYTE_52_UDPSPN_S, 0);
4186 roce_set_field(context->byte_20_smac_sgid_idx,
4187 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
4188 grh->sgid_index);
4190 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4191 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
4193 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4194 V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
4195 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4196 V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
4198 if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP08_B && is_udp)
4199 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4200 V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
4201 else
4202 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4203 V2_QPC_BYTE_24_TC_S, grh->traffic_class);
4204 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4205 V2_QPC_BYTE_24_TC_S, 0);
4206 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4207 V2_QPC_BYTE_28_FL_S, grh->flow_label);
4208 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4209 V2_QPC_BYTE_28_FL_S, 0);
4210 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4211 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4212 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4213 V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
4214 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4215 V2_QPC_BYTE_28_SL_S, 0);
4216 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4218 return 0;
4221 static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
4222 const struct ib_qp_attr *attr,
4223 int attr_mask,
4224 enum ib_qp_state cur_state,
4225 enum ib_qp_state new_state,
4226 struct hns_roce_v2_qp_context *context,
4227 struct hns_roce_v2_qp_context *qpc_mask)
4229 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4230 int ret = 0;
4232 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4233 memset(qpc_mask, 0, sizeof(*qpc_mask));
4234 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
4235 qpc_mask);
4236 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4237 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
4238 qpc_mask);
4239 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4240 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
4241 qpc_mask);
4242 if (ret)
4243 goto out;
4244 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4245 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
4246 qpc_mask);
4247 if (ret)
4248 goto out;
4249 } else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
4250 /* Nothing */
4252 } else {
4253 ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
4254 ret = -EINVAL;
4255 goto out;
4258 out:
4259 return ret;
4262 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
4263 const struct ib_qp_attr *attr,
4264 int attr_mask,
4265 struct hns_roce_v2_qp_context *context,
4266 struct hns_roce_v2_qp_context *qpc_mask)
4268 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4269 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4270 int ret = 0;
4272 if (attr_mask & IB_QP_AV) {
4273 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
4274 qpc_mask);
4275 if (ret)
4276 return ret;
4279 if (attr_mask & IB_QP_TIMEOUT) {
4280 if (attr->timeout < 31) {
4281 roce_set_field(context->byte_28_at_fl,
4282 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4283 attr->timeout);
4284 roce_set_field(qpc_mask->byte_28_at_fl,
4285 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4287 } else {
4288 ibdev_warn(&hr_dev->ib_dev,
4289 "Local ACK timeout shall be 0 to 30.\n");
4293 if (attr_mask & IB_QP_RETRY_CNT) {
4294 roce_set_field(context->byte_212_lsn,
4295 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4296 V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
4297 attr->retry_cnt);
4298 roce_set_field(qpc_mask->byte_212_lsn,
4299 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4300 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
4302 roce_set_field(context->byte_212_lsn,
4303 V2_QPC_BYTE_212_RETRY_CNT_M,
4304 V2_QPC_BYTE_212_RETRY_CNT_S, attr->retry_cnt);
4305 roce_set_field(qpc_mask->byte_212_lsn,
4306 V2_QPC_BYTE_212_RETRY_CNT_M,
4307 V2_QPC_BYTE_212_RETRY_CNT_S, 0);
4310 if (attr_mask & IB_QP_RNR_RETRY) {
4311 roce_set_field(context->byte_244_rnr_rxack,
4312 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4313 V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
4314 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4315 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4316 V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
4318 roce_set_field(context->byte_244_rnr_rxack,
4319 V2_QPC_BYTE_244_RNR_CNT_M,
4320 V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
4321 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4322 V2_QPC_BYTE_244_RNR_CNT_M,
4323 V2_QPC_BYTE_244_RNR_CNT_S, 0);
4326 /* RC&UC&UD required attr */
4327 if (attr_mask & IB_QP_SQ_PSN) {
4328 roce_set_field(context->byte_172_sq_psn,
4329 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4330 V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
4331 roce_set_field(qpc_mask->byte_172_sq_psn,
4332 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4333 V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
4335 roce_set_field(context->byte_196_sq_psn,
4336 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4337 V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
4338 roce_set_field(qpc_mask->byte_196_sq_psn,
4339 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4340 V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
4342 roce_set_field(context->byte_220_retry_psn_msn,
4343 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4344 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
4345 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4346 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4347 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
4349 roce_set_field(context->byte_224_retry_msg,
4350 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4351 V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
4352 attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
4353 roce_set_field(qpc_mask->byte_224_retry_msg,
4354 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4355 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
4357 roce_set_field(context->byte_224_retry_msg,
4358 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4359 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
4360 attr->sq_psn);
4361 roce_set_field(qpc_mask->byte_224_retry_msg,
4362 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4363 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
4365 roce_set_field(context->byte_244_rnr_rxack,
4366 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4367 V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
4368 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4369 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4370 V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
4373 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
4374 attr->max_dest_rd_atomic) {
4375 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4376 V2_QPC_BYTE_140_RR_MAX_S,
4377 fls(attr->max_dest_rd_atomic - 1));
4378 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4379 V2_QPC_BYTE_140_RR_MAX_S, 0);
4382 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
4383 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
4384 V2_QPC_BYTE_208_SR_MAX_S,
4385 fls(attr->max_rd_atomic - 1));
4386 roce_set_field(qpc_mask->byte_208_irrl,
4387 V2_QPC_BYTE_208_SR_MAX_M,
4388 V2_QPC_BYTE_208_SR_MAX_S, 0);
4391 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
4392 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
4394 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
4395 roce_set_field(context->byte_80_rnr_rx_cqn,
4396 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4397 V2_QPC_BYTE_80_MIN_RNR_TIME_S,
4398 attr->min_rnr_timer);
4399 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
4400 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4401 V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
4404 /* RC&UC required attr */
4405 if (attr_mask & IB_QP_RQ_PSN) {
4406 roce_set_field(context->byte_108_rx_reqepsn,
4407 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4408 V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
4409 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4410 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4411 V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
4413 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
4414 V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
4415 roce_set_field(qpc_mask->byte_152_raq,
4416 V2_QPC_BYTE_152_RAQ_PSN_M,
4417 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
4420 if (attr_mask & IB_QP_QKEY) {
4421 context->qkey_xrcd = cpu_to_le32(attr->qkey);
4422 qpc_mask->qkey_xrcd = 0;
4423 hr_qp->qkey = attr->qkey;
4426 return ret;
4429 static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
4430 const struct ib_qp_attr *attr,
4431 int attr_mask)
4433 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4434 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4436 if (attr_mask & IB_QP_ACCESS_FLAGS)
4437 hr_qp->atomic_rd_en = attr->qp_access_flags;
4439 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4440 hr_qp->resp_depth = attr->max_dest_rd_atomic;
4441 if (attr_mask & IB_QP_PORT) {
4442 hr_qp->port = attr->port_num - 1;
4443 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
4447 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
4448 const struct ib_qp_attr *attr,
4449 int attr_mask, enum ib_qp_state cur_state,
4450 enum ib_qp_state new_state)
4452 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4453 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4454 struct hns_roce_v2_qp_context ctx[2];
4455 struct hns_roce_v2_qp_context *context = ctx;
4456 struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
4457 struct ib_device *ibdev = &hr_dev->ib_dev;
4458 unsigned long sq_flag = 0;
4459 unsigned long rq_flag = 0;
4460 int ret;
4463 * In v2 engine, software pass context and context mask to hardware
4464 * when modifying qp. If software need modify some fields in context,
4465 * we should set all bits of the relevant fields in context mask to
4466 * 0 at the same time, else set them to 0x1.
4468 memset(context, 0, sizeof(*context));
4469 memset(qpc_mask, 0xff, sizeof(*qpc_mask));
4470 ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
4471 new_state, context, qpc_mask);
4472 if (ret)
4473 goto out;
4475 /* When QP state is err, SQ and RQ WQE should be flushed */
4476 if (new_state == IB_QPS_ERR) {
4477 spin_lock_irqsave(&hr_qp->sq.lock, sq_flag);
4478 hr_qp->state = IB_QPS_ERR;
4479 roce_set_field(context->byte_160_sq_ci_pi,
4480 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4481 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
4482 hr_qp->sq.head);
4483 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
4484 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4485 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
4486 spin_unlock_irqrestore(&hr_qp->sq.lock, sq_flag);
4488 if (!ibqp->srq) {
4489 spin_lock_irqsave(&hr_qp->rq.lock, rq_flag);
4490 roce_set_field(context->byte_84_rq_ci_pi,
4491 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4492 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
4493 hr_qp->rq.head);
4494 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4495 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4496 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4497 spin_unlock_irqrestore(&hr_qp->rq.lock, rq_flag);
4501 /* Configure the optional fields */
4502 ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
4503 qpc_mask);
4504 if (ret)
4505 goto out;
4507 roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
4508 ibqp->srq ? 1 : 0);
4509 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4510 V2_QPC_BYTE_108_INV_CREDIT_S, 0);
4512 /* Every status migrate must change state */
4513 roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4514 V2_QPC_BYTE_60_QP_ST_S, new_state);
4515 roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4516 V2_QPC_BYTE_60_QP_ST_S, 0);
4518 /* SW pass context to HW */
4519 ret = hns_roce_v2_qp_modify(hr_dev, ctx, hr_qp);
4520 if (ret) {
4521 ibdev_err(ibdev, "failed to modify QP, ret = %d\n", ret);
4522 goto out;
4525 hr_qp->state = new_state;
4527 hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
4529 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
4530 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
4531 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
4532 if (ibqp->send_cq != ibqp->recv_cq)
4533 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
4534 hr_qp->qpn, NULL);
4536 hr_qp->rq.head = 0;
4537 hr_qp->rq.tail = 0;
4538 hr_qp->sq.head = 0;
4539 hr_qp->sq.tail = 0;
4540 hr_qp->next_sge = 0;
4541 if (hr_qp->rq.wqe_cnt)
4542 *hr_qp->rdb.db_record = 0;
4545 out:
4546 return ret;
4549 static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
4551 switch (state) {
4552 case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
4553 case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
4554 case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
4555 case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
4556 case HNS_ROCE_QP_ST_SQ_DRAINING:
4557 case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
4558 case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
4559 case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
4560 default: return -1;
4564 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
4565 struct hns_roce_qp *hr_qp,
4566 struct hns_roce_v2_qp_context *hr_context)
4568 struct hns_roce_cmd_mailbox *mailbox;
4569 int ret;
4571 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4572 if (IS_ERR(mailbox))
4573 return PTR_ERR(mailbox);
4575 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
4576 HNS_ROCE_CMD_QUERY_QPC,
4577 HNS_ROCE_CMD_TIMEOUT_MSECS);
4578 if (ret)
4579 goto out;
4581 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
4583 out:
4584 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4585 return ret;
4588 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4589 int qp_attr_mask,
4590 struct ib_qp_init_attr *qp_init_attr)
4592 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4593 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4594 struct hns_roce_v2_qp_context context = {};
4595 struct ib_device *ibdev = &hr_dev->ib_dev;
4596 int tmp_qp_state;
4597 int state;
4598 int ret;
4600 memset(qp_attr, 0, sizeof(*qp_attr));
4601 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
4603 mutex_lock(&hr_qp->mutex);
4605 if (hr_qp->state == IB_QPS_RESET) {
4606 qp_attr->qp_state = IB_QPS_RESET;
4607 ret = 0;
4608 goto done;
4611 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
4612 if (ret) {
4613 ibdev_err(ibdev, "failed to query QPC, ret = %d\n", ret);
4614 ret = -EINVAL;
4615 goto out;
4618 state = roce_get_field(context.byte_60_qpst_tempid,
4619 V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
4620 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
4621 if (tmp_qp_state == -1) {
4622 ibdev_err(ibdev, "Illegal ib_qp_state\n");
4623 ret = -EINVAL;
4624 goto out;
4626 hr_qp->state = (u8)tmp_qp_state;
4627 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
4628 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc,
4629 V2_QPC_BYTE_24_MTU_M,
4630 V2_QPC_BYTE_24_MTU_S);
4631 qp_attr->path_mig_state = IB_MIG_ARMED;
4632 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
4633 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
4634 qp_attr->qkey = le32_to_cpu(context.qkey_xrcd);
4636 qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
4637 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4638 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
4639 qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn,
4640 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4641 V2_QPC_BYTE_172_SQ_CUR_PSN_S);
4642 qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err,
4643 V2_QPC_BYTE_56_DQPN_M,
4644 V2_QPC_BYTE_56_DQPN_S);
4645 qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en,
4646 V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) |
4647 ((roce_get_bit(context.byte_76_srqn_op_en,
4648 V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) |
4649 ((roce_get_bit(context.byte_76_srqn_op_en,
4650 V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
4652 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
4653 hr_qp->ibqp.qp_type == IB_QPT_UC) {
4654 struct ib_global_route *grh =
4655 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
4657 rdma_ah_set_sl(&qp_attr->ah_attr,
4658 roce_get_field(context.byte_28_at_fl,
4659 V2_QPC_BYTE_28_SL_M,
4660 V2_QPC_BYTE_28_SL_S));
4661 grh->flow_label = roce_get_field(context.byte_28_at_fl,
4662 V2_QPC_BYTE_28_FL_M,
4663 V2_QPC_BYTE_28_FL_S);
4664 grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx,
4665 V2_QPC_BYTE_20_SGID_IDX_M,
4666 V2_QPC_BYTE_20_SGID_IDX_S);
4667 grh->hop_limit = roce_get_field(context.byte_24_mtu_tc,
4668 V2_QPC_BYTE_24_HOP_LIMIT_M,
4669 V2_QPC_BYTE_24_HOP_LIMIT_S);
4670 grh->traffic_class = roce_get_field(context.byte_24_mtu_tc,
4671 V2_QPC_BYTE_24_TC_M,
4672 V2_QPC_BYTE_24_TC_S);
4674 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
4677 qp_attr->port_num = hr_qp->port + 1;
4678 qp_attr->sq_draining = 0;
4679 qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl,
4680 V2_QPC_BYTE_208_SR_MAX_M,
4681 V2_QPC_BYTE_208_SR_MAX_S);
4682 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
4683 V2_QPC_BYTE_140_RR_MAX_M,
4684 V2_QPC_BYTE_140_RR_MAX_S);
4685 qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
4686 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4687 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
4688 qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl,
4689 V2_QPC_BYTE_28_AT_M,
4690 V2_QPC_BYTE_28_AT_S);
4691 qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
4692 V2_QPC_BYTE_212_RETRY_CNT_M,
4693 V2_QPC_BYTE_212_RETRY_CNT_S);
4694 qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer);
4696 done:
4697 qp_attr->cur_qp_state = qp_attr->qp_state;
4698 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
4699 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
4701 if (!ibqp->uobject) {
4702 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
4703 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
4704 } else {
4705 qp_attr->cap.max_send_wr = 0;
4706 qp_attr->cap.max_send_sge = 0;
4709 qp_init_attr->cap = qp_attr->cap;
4711 out:
4712 mutex_unlock(&hr_qp->mutex);
4713 return ret;
4716 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
4717 struct hns_roce_qp *hr_qp,
4718 struct ib_udata *udata)
4720 struct ib_device *ibdev = &hr_dev->ib_dev;
4721 struct hns_roce_cq *send_cq, *recv_cq;
4722 unsigned long flags;
4723 int ret = 0;
4725 if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
4726 /* Modify qp to reset before destroying qp */
4727 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
4728 hr_qp->state, IB_QPS_RESET);
4729 if (ret)
4730 ibdev_err(ibdev,
4731 "failed to modify QP to RST, ret = %d\n",
4732 ret);
4735 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
4736 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
4738 spin_lock_irqsave(&hr_dev->qp_list_lock, flags);
4739 hns_roce_lock_cqs(send_cq, recv_cq);
4741 if (!udata) {
4742 if (recv_cq)
4743 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn,
4744 (hr_qp->ibqp.srq ?
4745 to_hr_srq(hr_qp->ibqp.srq) :
4746 NULL));
4748 if (send_cq && send_cq != recv_cq)
4749 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
4753 hns_roce_qp_remove(hr_dev, hr_qp);
4755 hns_roce_unlock_cqs(send_cq, recv_cq);
4756 spin_unlock_irqrestore(&hr_dev->qp_list_lock, flags);
4758 return ret;
4761 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
4763 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4764 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4765 int ret;
4767 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
4768 if (ret)
4769 ibdev_err(&hr_dev->ib_dev,
4770 "failed to destroy QP 0x%06lx, ret = %d\n",
4771 hr_qp->qpn, ret);
4773 hns_roce_qp_destroy(hr_dev, hr_qp, udata);
4775 return 0;
4778 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
4779 struct hns_roce_qp *hr_qp)
4781 struct ib_device *ibdev = &hr_dev->ib_dev;
4782 struct hns_roce_sccc_clr_done *resp;
4783 struct hns_roce_sccc_clr *clr;
4784 struct hns_roce_cmq_desc desc;
4785 int ret, i;
4787 mutex_lock(&hr_dev->qp_table.scc_mutex);
4789 /* set scc ctx clear done flag */
4790 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
4791 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4792 if (ret) {
4793 ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d\n", ret);
4794 goto out;
4797 /* clear scc context */
4798 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
4799 clr = (struct hns_roce_sccc_clr *)desc.data;
4800 clr->qpn = cpu_to_le32(hr_qp->qpn);
4801 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4802 if (ret) {
4803 ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d\n", ret);
4804 goto out;
4807 /* query scc context clear is done or not */
4808 resp = (struct hns_roce_sccc_clr_done *)desc.data;
4809 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
4810 hns_roce_cmq_setup_basic_desc(&desc,
4811 HNS_ROCE_OPC_QUERY_SCCC, true);
4812 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4813 if (ret) {
4814 ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n",
4815 ret);
4816 goto out;
4819 if (resp->clr_done)
4820 goto out;
4822 msleep(20);
4825 ibdev_err(ibdev, "Query SCC clr done flag overtime.\n");
4826 ret = -ETIMEDOUT;
4828 out:
4829 mutex_unlock(&hr_dev->qp_table.scc_mutex);
4830 return ret;
4833 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
4835 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
4836 struct hns_roce_v2_cq_context *cq_context;
4837 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
4838 struct hns_roce_v2_cq_context *cqc_mask;
4839 struct hns_roce_cmd_mailbox *mailbox;
4840 int ret;
4842 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4843 if (IS_ERR(mailbox))
4844 return PTR_ERR(mailbox);
4846 cq_context = mailbox->buf;
4847 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
4849 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
4851 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4852 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4853 cq_count);
4854 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4855 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4857 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4858 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4859 cq_period);
4860 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4861 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4864 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
4865 HNS_ROCE_CMD_MODIFY_CQC,
4866 HNS_ROCE_CMD_TIMEOUT_MSECS);
4867 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4868 if (ret)
4869 ibdev_err(&hr_dev->ib_dev,
4870 "failed to process cmd when modifying CQ, ret = %d\n",
4871 ret);
4873 return ret;
4876 static void hns_roce_irq_work_handle(struct work_struct *work)
4878 struct hns_roce_work *irq_work =
4879 container_of(work, struct hns_roce_work, work);
4880 struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
4881 u32 qpn = irq_work->qpn;
4882 u32 cqn = irq_work->cqn;
4884 switch (irq_work->event_type) {
4885 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4886 ibdev_info(ibdev, "Path migrated succeeded.\n");
4887 break;
4888 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4889 ibdev_warn(ibdev, "Path migration failed.\n");
4890 break;
4891 case HNS_ROCE_EVENT_TYPE_COMM_EST:
4892 break;
4893 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4894 ibdev_warn(ibdev, "Send queue drained.\n");
4895 break;
4896 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4897 ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
4898 qpn, irq_work->sub_type);
4899 break;
4900 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4901 ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
4902 qpn);
4903 break;
4904 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4905 ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
4906 qpn, irq_work->sub_type);
4907 break;
4908 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4909 ibdev_warn(ibdev, "SRQ limit reach.\n");
4910 break;
4911 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4912 ibdev_warn(ibdev, "SRQ last wqe reach.\n");
4913 break;
4914 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4915 ibdev_err(ibdev, "SRQ catas error.\n");
4916 break;
4917 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4918 ibdev_err(ibdev, "CQ 0x%x access err.\n", cqn);
4919 break;
4920 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4921 ibdev_warn(ibdev, "CQ 0x%x overflow\n", cqn);
4922 break;
4923 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4924 ibdev_warn(ibdev, "DB overflow.\n");
4925 break;
4926 case HNS_ROCE_EVENT_TYPE_FLR:
4927 ibdev_warn(ibdev, "Function level reset.\n");
4928 break;
4929 default:
4930 break;
4933 kfree(irq_work);
4936 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
4937 struct hns_roce_eq *eq,
4938 u32 qpn, u32 cqn)
4940 struct hns_roce_work *irq_work;
4942 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
4943 if (!irq_work)
4944 return;
4946 INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
4947 irq_work->hr_dev = hr_dev;
4948 irq_work->qpn = qpn;
4949 irq_work->cqn = cqn;
4950 irq_work->event_type = eq->event_type;
4951 irq_work->sub_type = eq->sub_type;
4952 queue_work(hr_dev->irq_workq, &(irq_work->work));
4955 static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
4957 struct hns_roce_dev *hr_dev = eq->hr_dev;
4958 __le32 doorbell[2] = {};
4960 if (eq->type_flag == HNS_ROCE_AEQ) {
4961 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4962 HNS_ROCE_V2_EQ_DB_CMD_S,
4963 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4964 HNS_ROCE_EQ_DB_CMD_AEQ :
4965 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
4966 } else {
4967 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
4968 HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
4970 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4971 HNS_ROCE_V2_EQ_DB_CMD_S,
4972 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4973 HNS_ROCE_EQ_DB_CMD_CEQ :
4974 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
4977 roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
4978 HNS_ROCE_V2_EQ_DB_PARA_S,
4979 (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
4981 hns_roce_write64(hr_dev, doorbell, eq->doorbell);
4984 static inline void *get_eqe_buf(struct hns_roce_eq *eq, unsigned long offset)
4986 u32 buf_chk_sz;
4988 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4989 if (eq->buf.nbufs == 1)
4990 return eq->buf.direct.buf + offset % buf_chk_sz;
4991 else
4992 return eq->buf.page_list[offset / buf_chk_sz].buf +
4993 offset % buf_chk_sz;
4996 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
4998 struct hns_roce_aeqe *aeqe;
5000 aeqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) *
5001 HNS_ROCE_AEQ_ENTRY_SIZE);
5002 return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
5003 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
5006 static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
5007 struct hns_roce_eq *eq)
5009 struct device *dev = hr_dev->dev;
5010 struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
5011 int aeqe_found = 0;
5012 int event_type;
5013 int sub_type;
5014 u32 srqn;
5015 u32 qpn;
5016 u32 cqn;
5018 while (aeqe) {
5019 /* Make sure we read AEQ entry after we have checked the
5020 * ownership bit
5022 dma_rmb();
5024 event_type = roce_get_field(aeqe->asyn,
5025 HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
5026 HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
5027 sub_type = roce_get_field(aeqe->asyn,
5028 HNS_ROCE_V2_AEQE_SUB_TYPE_M,
5029 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
5030 qpn = roce_get_field(aeqe->event.qp_event.qp,
5031 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5032 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5033 cqn = roce_get_field(aeqe->event.cq_event.cq,
5034 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5035 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5036 srqn = roce_get_field(aeqe->event.srq_event.srq,
5037 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5038 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5040 switch (event_type) {
5041 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5042 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5043 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5044 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5045 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5046 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5047 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5048 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5049 hns_roce_qp_event(hr_dev, qpn, event_type);
5050 break;
5051 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5052 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5053 hns_roce_srq_event(hr_dev, srqn, event_type);
5054 break;
5055 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5056 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5057 hns_roce_cq_event(hr_dev, cqn, event_type);
5058 break;
5059 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5060 break;
5061 case HNS_ROCE_EVENT_TYPE_MB:
5062 hns_roce_cmd_event(hr_dev,
5063 le16_to_cpu(aeqe->event.cmd.token),
5064 aeqe->event.cmd.status,
5065 le64_to_cpu(aeqe->event.cmd.out_param));
5066 break;
5067 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
5068 break;
5069 case HNS_ROCE_EVENT_TYPE_FLR:
5070 break;
5071 default:
5072 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
5073 event_type, eq->eqn, eq->cons_index);
5074 break;
5077 eq->event_type = event_type;
5078 eq->sub_type = sub_type;
5079 ++eq->cons_index;
5080 aeqe_found = 1;
5082 if (eq->cons_index > (2 * eq->entries - 1))
5083 eq->cons_index = 0;
5085 hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
5087 aeqe = next_aeqe_sw_v2(eq);
5090 set_eq_cons_index_v2(eq);
5091 return aeqe_found;
5094 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
5096 struct hns_roce_ceqe *ceqe;
5098 ceqe = get_eqe_buf(eq, (eq->cons_index & (eq->entries - 1)) *
5099 HNS_ROCE_CEQ_ENTRY_SIZE);
5100 return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
5101 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
5104 static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
5105 struct hns_roce_eq *eq)
5107 struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
5108 int ceqe_found = 0;
5109 u32 cqn;
5111 while (ceqe) {
5112 /* Make sure we read CEQ entry after we have checked the
5113 * ownership bit
5115 dma_rmb();
5117 cqn = roce_get_field(ceqe->comp, HNS_ROCE_V2_CEQE_COMP_CQN_M,
5118 HNS_ROCE_V2_CEQE_COMP_CQN_S);
5120 hns_roce_cq_completion(hr_dev, cqn);
5122 ++eq->cons_index;
5123 ceqe_found = 1;
5125 if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1))
5126 eq->cons_index = 0;
5128 ceqe = next_ceqe_sw_v2(eq);
5131 set_eq_cons_index_v2(eq);
5133 return ceqe_found;
5136 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
5138 struct hns_roce_eq *eq = eq_ptr;
5139 struct hns_roce_dev *hr_dev = eq->hr_dev;
5140 int int_work = 0;
5142 if (eq->type_flag == HNS_ROCE_CEQ)
5143 /* Completion event interrupt */
5144 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
5145 else
5146 /* Asychronous event interrupt */
5147 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
5149 return IRQ_RETVAL(int_work);
5152 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
5154 struct hns_roce_dev *hr_dev = dev_id;
5155 struct device *dev = hr_dev->dev;
5156 int int_work = 0;
5157 u32 int_st;
5158 u32 int_en;
5160 /* Abnormal interrupt */
5161 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
5162 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
5164 if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
5165 struct pci_dev *pdev = hr_dev->pci_dev;
5166 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5167 const struct hnae3_ae_ops *ops = ae_dev->ops;
5169 dev_err(dev, "AEQ overflow!\n");
5171 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
5172 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5174 /* Set reset level for reset_event() */
5175 if (ops->set_default_reset_request)
5176 ops->set_default_reset_request(ae_dev,
5177 HNAE3_FUNC_RESET);
5178 if (ops->reset_event)
5179 ops->reset_event(pdev, NULL);
5181 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5182 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5184 int_work = 1;
5185 } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
5186 dev_err(dev, "BUS ERR!\n");
5188 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S;
5189 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5191 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5192 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5194 int_work = 1;
5195 } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
5196 dev_err(dev, "OTHER ERR!\n");
5198 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S;
5199 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5201 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5202 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5204 int_work = 1;
5205 } else
5206 dev_err(dev, "There is no abnormal irq found!\n");
5208 return IRQ_RETVAL(int_work);
5211 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
5212 int eq_num, int enable_flag)
5214 int i;
5216 if (enable_flag == EQ_ENABLE) {
5217 for (i = 0; i < eq_num; i++)
5218 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5219 i * EQ_REG_OFFSET,
5220 HNS_ROCE_V2_VF_EVENT_INT_EN_M);
5222 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5223 HNS_ROCE_V2_VF_ABN_INT_EN_M);
5224 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5225 HNS_ROCE_V2_VF_ABN_INT_CFG_M);
5226 } else {
5227 for (i = 0; i < eq_num; i++)
5228 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5229 i * EQ_REG_OFFSET,
5230 HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
5232 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5233 HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
5234 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5235 HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
5239 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
5241 struct device *dev = hr_dev->dev;
5242 int ret;
5244 if (eqn < hr_dev->caps.num_comp_vectors)
5245 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5246 0, HNS_ROCE_CMD_DESTROY_CEQC,
5247 HNS_ROCE_CMD_TIMEOUT_MSECS);
5248 else
5249 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5250 0, HNS_ROCE_CMD_DESTROY_AEQC,
5251 HNS_ROCE_CMD_TIMEOUT_MSECS);
5252 if (ret)
5253 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
5256 static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
5258 if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0)
5259 hns_roce_mtr_cleanup(hr_dev, &eq->mtr);
5260 hns_roce_buf_free(hr_dev, eq->buf.size, &eq->buf);
5263 static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
5264 struct hns_roce_eq *eq,
5265 void *mb_buf)
5267 struct hns_roce_eq_context *eqc;
5268 u64 ba[MTT_MIN_COUNT] = { 0 };
5269 int count;
5271 eqc = mb_buf;
5272 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
5274 /* init eqc */
5275 eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
5276 eq->hop_num = hr_dev->caps.eqe_hop_num;
5277 eq->cons_index = 0;
5278 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
5279 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
5280 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
5281 eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
5282 eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
5283 eq->shift = ilog2((unsigned int)eq->entries);
5285 /* if not muti-hop, eqe buffer only use one trunk */
5286 if (!eq->hop_num || eq->hop_num == HNS_ROCE_HOP_NUM_0) {
5287 eq->eqe_ba = eq->buf.direct.map;
5288 eq->cur_eqe_ba = eq->eqe_ba;
5289 if (eq->buf.npages > 1)
5290 eq->nxt_eqe_ba = eq->eqe_ba + (1 << eq->eqe_buf_pg_sz);
5291 else
5292 eq->nxt_eqe_ba = eq->eqe_ba;
5293 } else {
5294 count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, ba,
5295 MTT_MIN_COUNT, &eq->eqe_ba);
5296 eq->cur_eqe_ba = ba[0];
5297 if (count > 1)
5298 eq->nxt_eqe_ba = ba[1];
5299 else
5300 eq->nxt_eqe_ba = ba[0];
5303 /* set eqc state */
5304 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQ_ST_M, HNS_ROCE_EQC_EQ_ST_S,
5305 HNS_ROCE_V2_EQ_STATE_VALID);
5307 /* set eqe hop num */
5308 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_HOP_NUM_M,
5309 HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
5311 /* set eqc over_ignore */
5312 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_OVER_IGNORE_M,
5313 HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
5315 /* set eqc coalesce */
5316 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_COALESCE_M,
5317 HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
5319 /* set eqc arm_state */
5320 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_ARM_ST_M,
5321 HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
5323 /* set eqn */
5324 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQN_M, HNS_ROCE_EQC_EQN_S,
5325 eq->eqn);
5327 /* set eqe_cnt */
5328 roce_set_field(eqc->byte_4, HNS_ROCE_EQC_EQE_CNT_M,
5329 HNS_ROCE_EQC_EQE_CNT_S, HNS_ROCE_EQ_INIT_EQE_CNT);
5331 /* set eqe_ba_pg_sz */
5332 roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BA_PG_SZ_M,
5333 HNS_ROCE_EQC_BA_PG_SZ_S,
5334 eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
5336 /* set eqe_buf_pg_sz */
5337 roce_set_field(eqc->byte_8, HNS_ROCE_EQC_BUF_PG_SZ_M,
5338 HNS_ROCE_EQC_BUF_PG_SZ_S,
5339 eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
5341 /* set eq_producer_idx */
5342 roce_set_field(eqc->byte_8, HNS_ROCE_EQC_PROD_INDX_M,
5343 HNS_ROCE_EQC_PROD_INDX_S, HNS_ROCE_EQ_INIT_PROD_IDX);
5345 /* set eq_max_cnt */
5346 roce_set_field(eqc->byte_12, HNS_ROCE_EQC_MAX_CNT_M,
5347 HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
5349 /* set eq_period */
5350 roce_set_field(eqc->byte_12, HNS_ROCE_EQC_PERIOD_M,
5351 HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
5353 /* set eqe_report_timer */
5354 roce_set_field(eqc->eqe_report_timer, HNS_ROCE_EQC_REPORT_TIMER_M,
5355 HNS_ROCE_EQC_REPORT_TIMER_S,
5356 HNS_ROCE_EQ_INIT_REPORT_TIMER);
5358 /* set eqe_ba [34:3] */
5359 roce_set_field(eqc->eqe_ba0, HNS_ROCE_EQC_EQE_BA_L_M,
5360 HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
5362 /* set eqe_ba [64:35] */
5363 roce_set_field(eqc->eqe_ba1, HNS_ROCE_EQC_EQE_BA_H_M,
5364 HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
5366 /* set eq shift */
5367 roce_set_field(eqc->byte_28, HNS_ROCE_EQC_SHIFT_M, HNS_ROCE_EQC_SHIFT_S,
5368 eq->shift);
5370 /* set eq MSI_IDX */
5371 roce_set_field(eqc->byte_28, HNS_ROCE_EQC_MSI_INDX_M,
5372 HNS_ROCE_EQC_MSI_INDX_S, HNS_ROCE_EQ_INIT_MSI_IDX);
5374 /* set cur_eqe_ba [27:12] */
5375 roce_set_field(eqc->byte_28, HNS_ROCE_EQC_CUR_EQE_BA_L_M,
5376 HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
5378 /* set cur_eqe_ba [59:28] */
5379 roce_set_field(eqc->byte_32, HNS_ROCE_EQC_CUR_EQE_BA_M_M,
5380 HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
5382 /* set cur_eqe_ba [63:60] */
5383 roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CUR_EQE_BA_H_M,
5384 HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
5386 /* set eq consumer idx */
5387 roce_set_field(eqc->byte_36, HNS_ROCE_EQC_CONS_INDX_M,
5388 HNS_ROCE_EQC_CONS_INDX_S, HNS_ROCE_EQ_INIT_CONS_IDX);
5390 /* set nex_eqe_ba[43:12] */
5391 roce_set_field(eqc->nxt_eqe_ba0, HNS_ROCE_EQC_NXT_EQE_BA_L_M,
5392 HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
5394 /* set nex_eqe_ba[63:44] */
5395 roce_set_field(eqc->nxt_eqe_ba1, HNS_ROCE_EQC_NXT_EQE_BA_H_M,
5396 HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
5399 static int map_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
5400 u32 page_shift)
5402 struct hns_roce_buf_region region = {};
5403 dma_addr_t *buf_list = NULL;
5404 int ba_num;
5405 int ret;
5407 ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
5408 1 << page_shift);
5409 hns_roce_init_buf_region(&region, hr_dev->caps.eqe_hop_num, 0, ba_num);
5411 /* alloc a tmp list for storing eq buf address */
5412 ret = hns_roce_alloc_buf_list(&region, &buf_list, 1);
5413 if (ret) {
5414 dev_err(hr_dev->dev, "alloc eq buf_list error\n");
5415 return ret;
5418 ba_num = hns_roce_get_kmem_bufs(hr_dev, buf_list, region.count,
5419 region.offset, &eq->buf);
5420 if (ba_num != region.count) {
5421 dev_err(hr_dev->dev, "get eqe buf err,expect %d,ret %d.\n",
5422 region.count, ba_num);
5423 ret = -ENOBUFS;
5424 goto done;
5427 hns_roce_mtr_init(&eq->mtr, PAGE_SHIFT + hr_dev->caps.eqe_ba_pg_sz,
5428 page_shift);
5429 ret = hns_roce_mtr_attach(hr_dev, &eq->mtr, &buf_list, &region, 1);
5430 if (ret)
5431 dev_err(hr_dev->dev, "mtr attach error for eqe\n");
5433 goto done;
5435 hns_roce_mtr_cleanup(hr_dev, &eq->mtr);
5436 done:
5437 hns_roce_free_buf_list(&buf_list, 1);
5439 return ret;
5442 static int alloc_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
5444 struct hns_roce_buf *buf = &eq->buf;
5445 bool is_mhop = false;
5446 u32 page_shift;
5447 u32 mhop_num;
5448 u32 max_size;
5449 int ret;
5451 page_shift = PAGE_SHIFT + hr_dev->caps.eqe_buf_pg_sz;
5452 mhop_num = hr_dev->caps.eqe_hop_num;
5453 if (!mhop_num) {
5454 max_size = 1 << page_shift;
5455 buf->size = max_size;
5456 } else if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5457 max_size = eq->entries * eq->eqe_size;
5458 buf->size = max_size;
5459 } else {
5460 max_size = 1 << page_shift;
5461 buf->size = PAGE_ALIGN(eq->entries * eq->eqe_size);
5462 is_mhop = true;
5465 ret = hns_roce_buf_alloc(hr_dev, buf->size, max_size, buf, page_shift);
5466 if (ret) {
5467 dev_err(hr_dev->dev, "alloc eq buf error\n");
5468 return ret;
5471 if (is_mhop) {
5472 ret = map_eq_buf(hr_dev, eq, page_shift);
5473 if (ret) {
5474 dev_err(hr_dev->dev, "map roce buf error\n");
5475 goto err_alloc;
5479 return 0;
5480 err_alloc:
5481 hns_roce_buf_free(hr_dev, buf->size, buf);
5482 return ret;
5485 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5486 struct hns_roce_eq *eq,
5487 unsigned int eq_cmd)
5489 struct hns_roce_cmd_mailbox *mailbox;
5490 int ret;
5492 /* Allocate mailbox memory */
5493 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5494 if (IS_ERR(mailbox))
5495 return PTR_ERR(mailbox);
5497 ret = alloc_eq_buf(hr_dev, eq);
5498 if (ret) {
5499 ret = -ENOMEM;
5500 goto free_cmd_mbox;
5502 hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
5504 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
5505 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
5506 if (ret) {
5507 dev_err(hr_dev->dev, "[mailbox cmd] create eqc failed.\n");
5508 goto err_cmd_mbox;
5511 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5513 return 0;
5515 err_cmd_mbox:
5516 free_eq_buf(hr_dev, eq);
5518 free_cmd_mbox:
5519 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5521 return ret;
5524 static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
5525 int comp_num, int aeq_num, int other_num)
5527 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5528 int i, j;
5529 int ret;
5531 for (i = 0; i < irq_num; i++) {
5532 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
5533 GFP_KERNEL);
5534 if (!hr_dev->irq_names[i]) {
5535 ret = -ENOMEM;
5536 goto err_kzalloc_failed;
5540 /* irq contains: abnormal + AEQ + CEQ */
5541 for (j = 0; j < other_num; j++)
5542 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
5543 "hns-abn-%d", j);
5545 for (j = other_num; j < (other_num + aeq_num); j++)
5546 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
5547 "hns-aeq-%d", j - other_num);
5549 for (j = (other_num + aeq_num); j < irq_num; j++)
5550 snprintf((char *)hr_dev->irq_names[j], HNS_ROCE_INT_NAME_LEN,
5551 "hns-ceq-%d", j - other_num - aeq_num);
5553 for (j = 0; j < irq_num; j++) {
5554 if (j < other_num)
5555 ret = request_irq(hr_dev->irq[j],
5556 hns_roce_v2_msix_interrupt_abn,
5557 0, hr_dev->irq_names[j], hr_dev);
5559 else if (j < (other_num + comp_num))
5560 ret = request_irq(eq_table->eq[j - other_num].irq,
5561 hns_roce_v2_msix_interrupt_eq,
5562 0, hr_dev->irq_names[j + aeq_num],
5563 &eq_table->eq[j - other_num]);
5564 else
5565 ret = request_irq(eq_table->eq[j - other_num].irq,
5566 hns_roce_v2_msix_interrupt_eq,
5567 0, hr_dev->irq_names[j - comp_num],
5568 &eq_table->eq[j - other_num]);
5569 if (ret) {
5570 dev_err(hr_dev->dev, "Request irq error!\n");
5571 goto err_request_failed;
5575 return 0;
5577 err_request_failed:
5578 for (j -= 1; j >= 0; j--)
5579 if (j < other_num)
5580 free_irq(hr_dev->irq[j], hr_dev);
5581 else
5582 free_irq(eq_table->eq[j - other_num].irq,
5583 &eq_table->eq[j - other_num]);
5585 err_kzalloc_failed:
5586 for (i -= 1; i >= 0; i--)
5587 kfree(hr_dev->irq_names[i]);
5589 return ret;
5592 static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
5594 int irq_num;
5595 int eq_num;
5596 int i;
5598 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
5599 irq_num = eq_num + hr_dev->caps.num_other_vectors;
5601 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
5602 free_irq(hr_dev->irq[i], hr_dev);
5604 for (i = 0; i < eq_num; i++)
5605 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
5607 for (i = 0; i < irq_num; i++)
5608 kfree(hr_dev->irq_names[i]);
5611 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
5613 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5614 struct device *dev = hr_dev->dev;
5615 struct hns_roce_eq *eq;
5616 unsigned int eq_cmd;
5617 int irq_num;
5618 int eq_num;
5619 int other_num;
5620 int comp_num;
5621 int aeq_num;
5622 int i;
5623 int ret;
5625 other_num = hr_dev->caps.num_other_vectors;
5626 comp_num = hr_dev->caps.num_comp_vectors;
5627 aeq_num = hr_dev->caps.num_aeq_vectors;
5629 eq_num = comp_num + aeq_num;
5630 irq_num = eq_num + other_num;
5632 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
5633 if (!eq_table->eq)
5634 return -ENOMEM;
5636 /* create eq */
5637 for (i = 0; i < eq_num; i++) {
5638 eq = &eq_table->eq[i];
5639 eq->hr_dev = hr_dev;
5640 eq->eqn = i;
5641 if (i < comp_num) {
5642 /* CEQ */
5643 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
5644 eq->type_flag = HNS_ROCE_CEQ;
5645 eq->entries = hr_dev->caps.ceqe_depth;
5646 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
5647 eq->irq = hr_dev->irq[i + other_num + aeq_num];
5648 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
5649 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
5650 } else {
5651 /* AEQ */
5652 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
5653 eq->type_flag = HNS_ROCE_AEQ;
5654 eq->entries = hr_dev->caps.aeqe_depth;
5655 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
5656 eq->irq = hr_dev->irq[i - comp_num + other_num];
5657 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
5658 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
5661 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
5662 if (ret) {
5663 dev_err(dev, "eq create failed.\n");
5664 goto err_create_eq_fail;
5668 /* enable irq */
5669 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
5671 ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
5672 aeq_num, other_num);
5673 if (ret) {
5674 dev_err(dev, "Request irq failed.\n");
5675 goto err_request_irq_fail;
5678 hr_dev->irq_workq = alloc_ordered_workqueue("hns_roce_irq_workq", 0);
5679 if (!hr_dev->irq_workq) {
5680 dev_err(dev, "Create irq workqueue failed!\n");
5681 ret = -ENOMEM;
5682 goto err_create_wq_fail;
5685 return 0;
5687 err_create_wq_fail:
5688 __hns_roce_free_irq(hr_dev);
5690 err_request_irq_fail:
5691 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
5693 err_create_eq_fail:
5694 for (i -= 1; i >= 0; i--)
5695 free_eq_buf(hr_dev, &eq_table->eq[i]);
5696 kfree(eq_table->eq);
5698 return ret;
5701 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
5703 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5704 int eq_num;
5705 int i;
5707 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
5709 /* Disable irq */
5710 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
5712 __hns_roce_free_irq(hr_dev);
5714 for (i = 0; i < eq_num; i++) {
5715 hns_roce_v2_destroy_eqc(hr_dev, i);
5717 free_eq_buf(hr_dev, &eq_table->eq[i]);
5720 kfree(eq_table->eq);
5722 flush_workqueue(hr_dev->irq_workq);
5723 destroy_workqueue(hr_dev->irq_workq);
5726 static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
5727 struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
5728 u32 cqn, void *mb_buf, u64 *mtts_wqe,
5729 u64 *mtts_idx, dma_addr_t dma_handle_wqe,
5730 dma_addr_t dma_handle_idx)
5732 struct hns_roce_srq_context *srq_context;
5734 srq_context = mb_buf;
5735 memset(srq_context, 0, sizeof(*srq_context));
5737 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
5738 SRQC_BYTE_4_SRQ_ST_S, 1);
5740 roce_set_field(srq_context->byte_4_srqn_srqst,
5741 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
5742 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
5743 (hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
5744 hr_dev->caps.srqwqe_hop_num));
5745 roce_set_field(srq_context->byte_4_srqn_srqst,
5746 SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
5747 ilog2(srq->wqe_cnt));
5749 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
5750 SRQC_BYTE_4_SRQN_S, srq->srqn);
5752 roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5753 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5755 roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
5756 SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
5758 srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
5760 roce_set_field(srq_context->byte_24_wqe_bt_ba,
5761 SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
5762 SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
5763 dma_handle_wqe >> 35);
5765 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
5766 SRQC_BYTE_28_PD_S, pdn);
5767 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
5768 SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
5769 fls(srq->max_gs - 1));
5771 srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
5772 roce_set_field(srq_context->rsv_idx_bt_ba,
5773 SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
5774 SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
5775 dma_handle_idx >> 35);
5777 srq_context->idx_cur_blk_addr =
5778 cpu_to_le32(mtts_idx[0] >> PAGE_ADDR_SHIFT);
5779 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5780 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
5781 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
5782 mtts_idx[0] >> (32 + PAGE_ADDR_SHIFT));
5783 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5784 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
5785 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
5786 hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
5787 hr_dev->caps.idx_hop_num);
5789 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5790 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
5791 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
5792 hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET);
5793 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
5794 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
5795 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
5796 hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET);
5798 srq_context->idx_nxt_blk_addr =
5799 cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT);
5800 roce_set_field(srq_context->rsv_idxnxtblkaddr,
5801 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
5802 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
5803 mtts_idx[1] >> (32 + PAGE_ADDR_SHIFT));
5804 roce_set_field(srq_context->byte_56_xrc_cqn,
5805 SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
5806 cqn);
5807 roce_set_field(srq_context->byte_56_xrc_cqn,
5808 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
5809 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
5810 hr_dev->caps.srqwqe_ba_pg_sz + PG_SHIFT_OFFSET);
5811 roce_set_field(srq_context->byte_56_xrc_cqn,
5812 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
5813 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
5814 hr_dev->caps.srqwqe_buf_pg_sz + PG_SHIFT_OFFSET);
5816 roce_set_bit(srq_context->db_record_addr_record_en,
5817 SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
5820 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
5821 struct ib_srq_attr *srq_attr,
5822 enum ib_srq_attr_mask srq_attr_mask,
5823 struct ib_udata *udata)
5825 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5826 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5827 struct hns_roce_srq_context *srq_context;
5828 struct hns_roce_srq_context *srqc_mask;
5829 struct hns_roce_cmd_mailbox *mailbox;
5830 int ret;
5832 if (srq_attr_mask & IB_SRQ_LIMIT) {
5833 if (srq_attr->srq_limit >= srq->wqe_cnt)
5834 return -EINVAL;
5836 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5837 if (IS_ERR(mailbox))
5838 return PTR_ERR(mailbox);
5840 srq_context = mailbox->buf;
5841 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
5843 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
5845 roce_set_field(srq_context->byte_8_limit_wl,
5846 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5847 SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
5848 roce_set_field(srqc_mask->byte_8_limit_wl,
5849 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5850 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
5852 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
5853 HNS_ROCE_CMD_MODIFY_SRQC,
5854 HNS_ROCE_CMD_TIMEOUT_MSECS);
5855 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5856 if (ret) {
5857 ibdev_err(&hr_dev->ib_dev,
5858 "failed to process cmd when modifying SRQ, ret = %d\n",
5859 ret);
5860 return ret;
5864 return 0;
5867 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
5869 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5870 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5871 struct hns_roce_srq_context *srq_context;
5872 struct hns_roce_cmd_mailbox *mailbox;
5873 int limit_wl;
5874 int ret;
5876 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5877 if (IS_ERR(mailbox))
5878 return PTR_ERR(mailbox);
5880 srq_context = mailbox->buf;
5881 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
5882 HNS_ROCE_CMD_QUERY_SRQC,
5883 HNS_ROCE_CMD_TIMEOUT_MSECS);
5884 if (ret) {
5885 ibdev_err(&hr_dev->ib_dev,
5886 "failed to process cmd when querying SRQ, ret = %d\n",
5887 ret);
5888 goto out;
5891 limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
5892 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
5893 SRQC_BYTE_8_SRQ_LIMIT_WL_S);
5895 attr->srq_limit = limit_wl;
5896 attr->max_wr = srq->wqe_cnt - 1;
5897 attr->max_sge = srq->max_gs;
5899 memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
5901 out:
5902 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5903 return ret;
5906 static int find_empty_entry(struct hns_roce_idx_que *idx_que,
5907 unsigned long size)
5909 int wqe_idx;
5911 if (unlikely(bitmap_full(idx_que->bitmap, size)))
5912 return -ENOSPC;
5914 wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
5916 bitmap_set(idx_que->bitmap, wqe_idx, 1);
5918 return wqe_idx;
5921 static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
5922 int cur_idx, int wqe_idx)
5924 unsigned int *addr;
5926 addr = (unsigned int *)hns_roce_buf_offset(&idx_que->idx_buf,
5927 cur_idx * idx_que->entry_sz);
5928 *addr = wqe_idx;
5931 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
5932 const struct ib_recv_wr *wr,
5933 const struct ib_recv_wr **bad_wr)
5935 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
5936 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
5937 struct hns_roce_v2_wqe_data_seg *dseg;
5938 struct hns_roce_v2_db srq_db;
5939 unsigned long flags;
5940 int ret = 0;
5941 int wqe_idx;
5942 void *wqe;
5943 int nreq;
5944 int ind;
5945 int i;
5947 spin_lock_irqsave(&srq->lock, flags);
5949 ind = srq->head & (srq->wqe_cnt - 1);
5951 for (nreq = 0; wr; ++nreq, wr = wr->next) {
5952 if (unlikely(wr->num_sge > srq->max_gs)) {
5953 ret = -EINVAL;
5954 *bad_wr = wr;
5955 break;
5958 if (unlikely(srq->head == srq->tail)) {
5959 ret = -ENOMEM;
5960 *bad_wr = wr;
5961 break;
5964 wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
5965 if (wqe_idx < 0) {
5966 ret = -ENOMEM;
5967 *bad_wr = wr;
5968 break;
5971 fill_idx_queue(&srq->idx_que, ind, wqe_idx);
5972 wqe = get_srq_wqe(srq, wqe_idx);
5973 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
5975 for (i = 0; i < wr->num_sge; ++i) {
5976 dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
5977 dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
5978 dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
5981 if (i < srq->max_gs) {
5982 dseg[i].len = 0;
5983 dseg[i].lkey = cpu_to_le32(0x100);
5984 dseg[i].addr = 0;
5987 srq->wrid[wqe_idx] = wr->wr_id;
5988 ind = (ind + 1) & (srq->wqe_cnt - 1);
5991 if (likely(nreq)) {
5992 srq->head += nreq;
5995 * Make sure that descriptors are written before
5996 * doorbell record.
5998 wmb();
6000 srq_db.byte_4 =
6001 cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
6002 (srq->srqn & V2_DB_BYTE_4_TAG_M));
6003 srq_db.parameter = cpu_to_le32(srq->head);
6005 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
6009 spin_unlock_irqrestore(&srq->lock, flags);
6011 return ret;
6014 static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6015 .query_cqc_info = hns_roce_v2_query_cqc_info,
6018 static const struct ib_device_ops hns_roce_v2_dev_ops = {
6019 .destroy_qp = hns_roce_v2_destroy_qp,
6020 .modify_cq = hns_roce_v2_modify_cq,
6021 .poll_cq = hns_roce_v2_poll_cq,
6022 .post_recv = hns_roce_v2_post_recv,
6023 .post_send = hns_roce_v2_post_send,
6024 .query_qp = hns_roce_v2_query_qp,
6025 .req_notify_cq = hns_roce_v2_req_notify_cq,
6028 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6029 .modify_srq = hns_roce_v2_modify_srq,
6030 .post_srq_recv = hns_roce_v2_post_srq_recv,
6031 .query_srq = hns_roce_v2_query_srq,
6034 static const struct hns_roce_hw hns_roce_hw_v2 = {
6035 .cmq_init = hns_roce_v2_cmq_init,
6036 .cmq_exit = hns_roce_v2_cmq_exit,
6037 .hw_profile = hns_roce_v2_profile,
6038 .hw_init = hns_roce_v2_init,
6039 .hw_exit = hns_roce_v2_exit,
6040 .post_mbox = hns_roce_v2_post_mbox,
6041 .chk_mbox = hns_roce_v2_chk_mbox,
6042 .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
6043 .set_gid = hns_roce_v2_set_gid,
6044 .set_mac = hns_roce_v2_set_mac,
6045 .write_mtpt = hns_roce_v2_write_mtpt,
6046 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6047 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6048 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6049 .write_cqc = hns_roce_v2_write_cqc,
6050 .set_hem = hns_roce_v2_set_hem,
6051 .clear_hem = hns_roce_v2_clear_hem,
6052 .modify_qp = hns_roce_v2_modify_qp,
6053 .query_qp = hns_roce_v2_query_qp,
6054 .destroy_qp = hns_roce_v2_destroy_qp,
6055 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6056 .modify_cq = hns_roce_v2_modify_cq,
6057 .post_send = hns_roce_v2_post_send,
6058 .post_recv = hns_roce_v2_post_recv,
6059 .req_notify_cq = hns_roce_v2_req_notify_cq,
6060 .poll_cq = hns_roce_v2_poll_cq,
6061 .init_eq = hns_roce_v2_init_eq_table,
6062 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6063 .write_srqc = hns_roce_v2_write_srqc,
6064 .modify_srq = hns_roce_v2_modify_srq,
6065 .query_srq = hns_roce_v2_query_srq,
6066 .post_srq_recv = hns_roce_v2_post_srq_recv,
6067 .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6068 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6071 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6072 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6073 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6074 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6075 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6076 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6077 /* required last entry */
6078 {0, }
6081 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6083 static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6084 struct hnae3_handle *handle)
6086 struct hns_roce_v2_priv *priv = hr_dev->priv;
6087 int i;
6089 hr_dev->pci_dev = handle->pdev;
6090 hr_dev->dev = &handle->pdev->dev;
6091 hr_dev->hw = &hns_roce_hw_v2;
6092 hr_dev->dfx = &hns_roce_dfx_hw_v2;
6093 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6094 hr_dev->odb_offset = hr_dev->sdb_offset;
6096 /* Get info from NIC driver. */
6097 hr_dev->reg_base = handle->rinfo.roce_io_base;
6098 hr_dev->caps.num_ports = 1;
6099 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6100 hr_dev->iboe.phy_port[0] = 0;
6102 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6103 hr_dev->iboe.netdevs[0]->dev_addr);
6105 for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
6106 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6107 i + handle->rinfo.base_vector);
6109 /* cmd issue mode: 0 is poll, 1 is event */
6110 hr_dev->cmd_mod = 1;
6111 hr_dev->loop_idc = 0;
6113 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6114 priv->handle = handle;
6117 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6119 struct hns_roce_dev *hr_dev;
6120 int ret;
6122 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6123 if (!hr_dev)
6124 return -ENOMEM;
6126 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6127 if (!hr_dev->priv) {
6128 ret = -ENOMEM;
6129 goto error_failed_kzalloc;
6132 hns_roce_hw_v2_get_cfg(hr_dev, handle);
6134 ret = hns_roce_init(hr_dev);
6135 if (ret) {
6136 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6137 goto error_failed_get_cfg;
6140 handle->priv = hr_dev;
6142 return 0;
6144 error_failed_get_cfg:
6145 kfree(hr_dev->priv);
6147 error_failed_kzalloc:
6148 ib_dealloc_device(&hr_dev->ib_dev);
6150 return ret;
6153 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6154 bool reset)
6156 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
6158 if (!hr_dev)
6159 return;
6161 handle->priv = NULL;
6163 hr_dev->state = HNS_ROCE_DEVICE_STATE_UNINIT;
6164 hns_roce_handle_device_err(hr_dev);
6166 hns_roce_exit(hr_dev);
6167 kfree(hr_dev->priv);
6168 ib_dealloc_device(&hr_dev->ib_dev);
6171 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6173 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6174 const struct pci_device_id *id;
6175 struct device *dev = &handle->pdev->dev;
6176 int ret;
6178 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6180 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6181 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6182 goto reset_chk_err;
6185 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6186 if (!id)
6187 return 0;
6189 ret = __hns_roce_hw_v2_init_instance(handle);
6190 if (ret) {
6191 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6192 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6193 if (ops->ae_dev_resetting(handle) ||
6194 ops->get_hw_reset_stat(handle))
6195 goto reset_chk_err;
6196 else
6197 return ret;
6200 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6203 return 0;
6205 reset_chk_err:
6206 dev_err(dev, "Device is busy in resetting state.\n"
6207 "please retry later.\n");
6209 return -EBUSY;
6212 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6213 bool reset)
6215 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6216 return;
6218 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6220 __hns_roce_hw_v2_uninit_instance(handle, reset);
6222 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6224 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6226 struct hns_roce_dev *hr_dev;
6228 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6229 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6230 return 0;
6233 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6234 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6236 hr_dev = (struct hns_roce_dev *)handle->priv;
6237 if (!hr_dev)
6238 return 0;
6240 hr_dev->is_reset = true;
6241 hr_dev->active = false;
6242 hr_dev->dis_db = true;
6244 hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN;
6246 return 0;
6249 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6251 struct device *dev = &handle->pdev->dev;
6252 int ret;
6254 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6255 &handle->rinfo.state)) {
6256 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6257 return 0;
6260 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6262 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6263 ret = __hns_roce_hw_v2_init_instance(handle);
6264 if (ret) {
6265 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
6266 * callback function, RoCE Engine reinitialize. If RoCE reinit
6267 * failed, we should inform NIC driver.
6269 handle->priv = NULL;
6270 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6271 } else {
6272 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6273 dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6276 return ret;
6279 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6281 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6282 return 0;
6284 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6285 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6286 msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
6287 __hns_roce_hw_v2_uninit_instance(handle, false);
6289 return 0;
6292 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6293 enum hnae3_reset_notify_type type)
6295 int ret = 0;
6297 switch (type) {
6298 case HNAE3_DOWN_CLIENT:
6299 ret = hns_roce_hw_v2_reset_notify_down(handle);
6300 break;
6301 case HNAE3_INIT_CLIENT:
6302 ret = hns_roce_hw_v2_reset_notify_init(handle);
6303 break;
6304 case HNAE3_UNINIT_CLIENT:
6305 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6306 break;
6307 default:
6308 break;
6311 return ret;
6314 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6315 .init_instance = hns_roce_hw_v2_init_instance,
6316 .uninit_instance = hns_roce_hw_v2_uninit_instance,
6317 .reset_notify = hns_roce_hw_v2_reset_notify,
6320 static struct hnae3_client hns_roce_hw_v2_client = {
6321 .name = "hns_roce_hw_v2",
6322 .type = HNAE3_CLIENT_ROCE,
6323 .ops = &hns_roce_hw_v2_ops,
6326 static int __init hns_roce_hw_v2_init(void)
6328 return hnae3_register_client(&hns_roce_hw_v2_client);
6331 static void __exit hns_roce_hw_v2_exit(void)
6333 hnae3_unregister_client(&hns_roce_hw_v2_client);
6336 module_init(hns_roce_hw_v2_init);
6337 module_exit(hns_roce_hw_v2_exit);
6339 MODULE_LICENSE("Dual BSD/GPL");
6340 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6341 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6342 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6343 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");