interconnect: qcom: Fix Kconfig indentation
[linux/fpc-iii.git] / drivers / infiniband / hw / hns / hns_roce_hw_v2.c
blobcb8071a3e0d57f1ae54396c0ffff993ae1486c81
1 /*
2 * Copyright (c) 2016-2017 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/acpi.h>
34 #include <linux/etherdevice.h>
35 #include <linux/interrupt.h>
36 #include <linux/kernel.h>
37 #include <linux/types.h>
38 #include <net/addrconf.h>
39 #include <rdma/ib_addr.h>
40 #include <rdma/ib_cache.h>
41 #include <rdma/ib_umem.h>
42 #include <rdma/uverbs_ioctl.h>
44 #include "hnae3.h"
45 #include "hns_roce_common.h"
46 #include "hns_roce_device.h"
47 #include "hns_roce_cmd.h"
48 #include "hns_roce_hem.h"
49 #include "hns_roce_hw_v2.h"
51 static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
52 struct ib_sge *sg)
54 dseg->lkey = cpu_to_le32(sg->lkey);
55 dseg->addr = cpu_to_le64(sg->addr);
56 dseg->len = cpu_to_le32(sg->length);
59 static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
60 struct hns_roce_wqe_frmr_seg *fseg,
61 const struct ib_reg_wr *wr)
63 struct hns_roce_mr *mr = to_hr_mr(wr->mr);
65 /* use ib_access_flags */
66 roce_set_bit(rc_sq_wqe->byte_4,
67 V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
68 wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
69 roce_set_bit(rc_sq_wqe->byte_4,
70 V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
71 wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
72 roce_set_bit(rc_sq_wqe->byte_4,
73 V2_RC_FRMR_WQE_BYTE_4_RR_S,
74 wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
75 roce_set_bit(rc_sq_wqe->byte_4,
76 V2_RC_FRMR_WQE_BYTE_4_RW_S,
77 wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
78 roce_set_bit(rc_sq_wqe->byte_4,
79 V2_RC_FRMR_WQE_BYTE_4_LW_S,
80 wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
82 /* Data structure reuse may lead to confusion */
83 rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
84 rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
86 rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
87 rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
88 rc_sq_wqe->rkey = cpu_to_le32(wr->key);
89 rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
91 fseg->pbl_size = cpu_to_le32(mr->pbl_size);
92 roce_set_field(fseg->mode_buf_pg_sz,
93 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
94 V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
95 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
96 roce_set_bit(fseg->mode_buf_pg_sz,
97 V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
100 static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
101 const struct ib_atomic_wr *wr)
103 if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
104 aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
105 aseg->cmp_data = cpu_to_le64(wr->compare_add);
106 } else {
107 aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
108 aseg->cmp_data = 0;
112 static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
113 unsigned int *sge_ind)
115 struct hns_roce_v2_wqe_data_seg *dseg;
116 struct ib_sge *sg;
117 int num_in_wqe = 0;
118 int extend_sge_num;
119 int fi_sge_num;
120 int se_sge_num;
121 int shift;
122 int i;
124 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
125 num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
126 extend_sge_num = wr->num_sge - num_in_wqe;
127 sg = wr->sg_list + num_in_wqe;
128 shift = qp->hr_buf.page_shift;
131 * Check whether wr->num_sge sges are in the same page. If not, we
132 * should calculate how many sges in the first page and the second
133 * page.
135 dseg = get_send_extend_sge(qp, (*sge_ind) & (qp->sge.sge_cnt - 1));
136 fi_sge_num = (round_up((uintptr_t)dseg, 1 << shift) -
137 (uintptr_t)dseg) /
138 sizeof(struct hns_roce_v2_wqe_data_seg);
139 if (extend_sge_num > fi_sge_num) {
140 se_sge_num = extend_sge_num - fi_sge_num;
141 for (i = 0; i < fi_sge_num; i++) {
142 set_data_seg_v2(dseg++, sg + i);
143 (*sge_ind)++;
145 dseg = get_send_extend_sge(qp,
146 (*sge_ind) & (qp->sge.sge_cnt - 1));
147 for (i = 0; i < se_sge_num; i++) {
148 set_data_seg_v2(dseg++, sg + fi_sge_num + i);
149 (*sge_ind)++;
151 } else {
152 for (i = 0; i < extend_sge_num; i++) {
153 set_data_seg_v2(dseg++, sg + i);
154 (*sge_ind)++;
159 static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
160 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
161 void *wqe, unsigned int *sge_ind,
162 const struct ib_send_wr **bad_wr)
164 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
165 struct hns_roce_v2_wqe_data_seg *dseg = wqe;
166 struct hns_roce_qp *qp = to_hr_qp(ibqp);
167 int i;
169 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
170 if (le32_to_cpu(rc_sq_wqe->msg_len) >
171 hr_dev->caps.max_sq_inline) {
172 *bad_wr = wr;
173 dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
174 rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
175 return -EINVAL;
178 if (wr->opcode == IB_WR_RDMA_READ) {
179 *bad_wr = wr;
180 dev_err(hr_dev->dev, "Not support inline data!\n");
181 return -EINVAL;
184 for (i = 0; i < wr->num_sge; i++) {
185 memcpy(wqe, ((void *)wr->sg_list[i].addr),
186 wr->sg_list[i].length);
187 wqe += wr->sg_list[i].length;
190 roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
192 } else {
193 if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
194 for (i = 0; i < wr->num_sge; i++) {
195 if (likely(wr->sg_list[i].length)) {
196 set_data_seg_v2(dseg, wr->sg_list + i);
197 dseg++;
200 } else {
201 roce_set_field(rc_sq_wqe->byte_20,
202 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
203 V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
204 (*sge_ind) & (qp->sge.sge_cnt - 1));
206 for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
207 if (likely(wr->sg_list[i].length)) {
208 set_data_seg_v2(dseg, wr->sg_list + i);
209 dseg++;
213 set_extend_sge(qp, wr, sge_ind);
216 roce_set_field(rc_sq_wqe->byte_16,
217 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
218 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
221 return 0;
224 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
225 const struct ib_qp_attr *attr,
226 int attr_mask, enum ib_qp_state cur_state,
227 enum ib_qp_state new_state);
229 static int hns_roce_v2_post_send(struct ib_qp *ibqp,
230 const struct ib_send_wr *wr,
231 const struct ib_send_wr **bad_wr)
233 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
234 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
235 struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
236 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
237 struct hns_roce_qp *qp = to_hr_qp(ibqp);
238 struct hns_roce_wqe_frmr_seg *fseg;
239 struct device *dev = hr_dev->dev;
240 struct hns_roce_v2_db sq_db;
241 struct ib_qp_attr attr;
242 unsigned int sge_ind;
243 unsigned int owner_bit;
244 unsigned long flags;
245 unsigned int ind;
246 void *wqe = NULL;
247 bool loopback;
248 int attr_mask;
249 u32 tmp_len;
250 int ret = 0;
251 u32 hr_op;
252 u8 *smac;
253 int nreq;
254 int i;
256 if (unlikely(ibqp->qp_type != IB_QPT_RC &&
257 ibqp->qp_type != IB_QPT_GSI &&
258 ibqp->qp_type != IB_QPT_UD)) {
259 dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
260 *bad_wr = wr;
261 return -EOPNOTSUPP;
264 if (unlikely(qp->state == IB_QPS_RESET || qp->state == IB_QPS_INIT ||
265 qp->state == IB_QPS_RTR)) {
266 dev_err(dev, "Post WQE fail, QP state %d err!\n", qp->state);
267 *bad_wr = wr;
268 return -EINVAL;
271 spin_lock_irqsave(&qp->sq.lock, flags);
272 ind = qp->sq_next_wqe;
273 sge_ind = qp->next_sge;
275 for (nreq = 0; wr; ++nreq, wr = wr->next) {
276 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
277 ret = -ENOMEM;
278 *bad_wr = wr;
279 goto out;
282 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
283 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
284 wr->num_sge, qp->sq.max_gs);
285 ret = -EINVAL;
286 *bad_wr = wr;
287 goto out;
290 wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
291 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
292 wr->wr_id;
294 owner_bit =
295 ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
296 tmp_len = 0;
298 /* Corresponding to the QP type, wqe process separately */
299 if (ibqp->qp_type == IB_QPT_GSI) {
300 ud_sq_wqe = wqe;
301 memset(ud_sq_wqe, 0, sizeof(*ud_sq_wqe));
303 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_0_M,
304 V2_UD_SEND_WQE_DMAC_0_S, ah->av.mac[0]);
305 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_1_M,
306 V2_UD_SEND_WQE_DMAC_1_S, ah->av.mac[1]);
307 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_2_M,
308 V2_UD_SEND_WQE_DMAC_2_S, ah->av.mac[2]);
309 roce_set_field(ud_sq_wqe->dmac, V2_UD_SEND_WQE_DMAC_3_M,
310 V2_UD_SEND_WQE_DMAC_3_S, ah->av.mac[3]);
311 roce_set_field(ud_sq_wqe->byte_48,
312 V2_UD_SEND_WQE_BYTE_48_DMAC_4_M,
313 V2_UD_SEND_WQE_BYTE_48_DMAC_4_S,
314 ah->av.mac[4]);
315 roce_set_field(ud_sq_wqe->byte_48,
316 V2_UD_SEND_WQE_BYTE_48_DMAC_5_M,
317 V2_UD_SEND_WQE_BYTE_48_DMAC_5_S,
318 ah->av.mac[5]);
320 /* MAC loopback */
321 smac = (u8 *)hr_dev->dev_addr[qp->port];
322 loopback = ether_addr_equal_unaligned(ah->av.mac,
323 smac) ? 1 : 0;
325 roce_set_bit(ud_sq_wqe->byte_40,
326 V2_UD_SEND_WQE_BYTE_40_LBI_S, loopback);
328 roce_set_field(ud_sq_wqe->byte_4,
329 V2_UD_SEND_WQE_BYTE_4_OPCODE_M,
330 V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
331 HNS_ROCE_V2_WQE_OP_SEND);
333 for (i = 0; i < wr->num_sge; i++)
334 tmp_len += wr->sg_list[i].length;
336 ud_sq_wqe->msg_len =
337 cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
339 switch (wr->opcode) {
340 case IB_WR_SEND_WITH_IMM:
341 case IB_WR_RDMA_WRITE_WITH_IMM:
342 ud_sq_wqe->immtdata =
343 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
344 break;
345 default:
346 ud_sq_wqe->immtdata = 0;
347 break;
350 /* Set sig attr */
351 roce_set_bit(ud_sq_wqe->byte_4,
352 V2_UD_SEND_WQE_BYTE_4_CQE_S,
353 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
355 /* Set se attr */
356 roce_set_bit(ud_sq_wqe->byte_4,
357 V2_UD_SEND_WQE_BYTE_4_SE_S,
358 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
360 roce_set_bit(ud_sq_wqe->byte_4,
361 V2_UD_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
363 roce_set_field(ud_sq_wqe->byte_16,
364 V2_UD_SEND_WQE_BYTE_16_PD_M,
365 V2_UD_SEND_WQE_BYTE_16_PD_S,
366 to_hr_pd(ibqp->pd)->pdn);
368 roce_set_field(ud_sq_wqe->byte_16,
369 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
370 V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
371 wr->num_sge);
373 roce_set_field(ud_sq_wqe->byte_20,
374 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
375 V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
376 sge_ind & (qp->sge.sge_cnt - 1));
378 roce_set_field(ud_sq_wqe->byte_24,
379 V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
380 V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
381 ud_sq_wqe->qkey =
382 cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
383 qp->qkey : ud_wr(wr)->remote_qkey);
384 roce_set_field(ud_sq_wqe->byte_32,
385 V2_UD_SEND_WQE_BYTE_32_DQPN_M,
386 V2_UD_SEND_WQE_BYTE_32_DQPN_S,
387 ud_wr(wr)->remote_qpn);
389 roce_set_field(ud_sq_wqe->byte_36,
390 V2_UD_SEND_WQE_BYTE_36_VLAN_M,
391 V2_UD_SEND_WQE_BYTE_36_VLAN_S,
392 ah->av.vlan_id);
393 roce_set_field(ud_sq_wqe->byte_36,
394 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
395 V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
396 ah->av.hop_limit);
397 roce_set_field(ud_sq_wqe->byte_36,
398 V2_UD_SEND_WQE_BYTE_36_TCLASS_M,
399 V2_UD_SEND_WQE_BYTE_36_TCLASS_S,
400 ah->av.tclass);
401 roce_set_field(ud_sq_wqe->byte_40,
402 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_M,
403 V2_UD_SEND_WQE_BYTE_40_FLOW_LABEL_S,
404 ah->av.flowlabel);
405 roce_set_field(ud_sq_wqe->byte_40,
406 V2_UD_SEND_WQE_BYTE_40_SL_M,
407 V2_UD_SEND_WQE_BYTE_40_SL_S,
408 ah->av.sl);
409 roce_set_field(ud_sq_wqe->byte_40,
410 V2_UD_SEND_WQE_BYTE_40_PORTN_M,
411 V2_UD_SEND_WQE_BYTE_40_PORTN_S,
412 qp->port);
414 roce_set_bit(ud_sq_wqe->byte_40,
415 V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
416 ah->av.vlan_en ? 1 : 0);
417 roce_set_field(ud_sq_wqe->byte_48,
418 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
419 V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
420 hns_get_gid_index(hr_dev, qp->phy_port,
421 ah->av.gid_index));
423 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
424 GID_LEN_V2);
426 set_extend_sge(qp, wr, &sge_ind);
427 ind++;
428 } else if (ibqp->qp_type == IB_QPT_RC) {
429 rc_sq_wqe = wqe;
430 memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
431 for (i = 0; i < wr->num_sge; i++)
432 tmp_len += wr->sg_list[i].length;
434 rc_sq_wqe->msg_len =
435 cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
437 switch (wr->opcode) {
438 case IB_WR_SEND_WITH_IMM:
439 case IB_WR_RDMA_WRITE_WITH_IMM:
440 rc_sq_wqe->immtdata =
441 cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
442 break;
443 case IB_WR_SEND_WITH_INV:
444 rc_sq_wqe->inv_key =
445 cpu_to_le32(wr->ex.invalidate_rkey);
446 break;
447 default:
448 rc_sq_wqe->immtdata = 0;
449 break;
452 roce_set_bit(rc_sq_wqe->byte_4,
453 V2_RC_SEND_WQE_BYTE_4_FENCE_S,
454 (wr->send_flags & IB_SEND_FENCE) ? 1 : 0);
456 roce_set_bit(rc_sq_wqe->byte_4,
457 V2_RC_SEND_WQE_BYTE_4_SE_S,
458 (wr->send_flags & IB_SEND_SOLICITED) ? 1 : 0);
460 roce_set_bit(rc_sq_wqe->byte_4,
461 V2_RC_SEND_WQE_BYTE_4_CQE_S,
462 (wr->send_flags & IB_SEND_SIGNALED) ? 1 : 0);
464 roce_set_bit(rc_sq_wqe->byte_4,
465 V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
467 wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
468 switch (wr->opcode) {
469 case IB_WR_RDMA_READ:
470 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
471 rc_sq_wqe->rkey =
472 cpu_to_le32(rdma_wr(wr)->rkey);
473 rc_sq_wqe->va =
474 cpu_to_le64(rdma_wr(wr)->remote_addr);
475 break;
476 case IB_WR_RDMA_WRITE:
477 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
478 rc_sq_wqe->rkey =
479 cpu_to_le32(rdma_wr(wr)->rkey);
480 rc_sq_wqe->va =
481 cpu_to_le64(rdma_wr(wr)->remote_addr);
482 break;
483 case IB_WR_RDMA_WRITE_WITH_IMM:
484 hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
485 rc_sq_wqe->rkey =
486 cpu_to_le32(rdma_wr(wr)->rkey);
487 rc_sq_wqe->va =
488 cpu_to_le64(rdma_wr(wr)->remote_addr);
489 break;
490 case IB_WR_SEND:
491 hr_op = HNS_ROCE_V2_WQE_OP_SEND;
492 break;
493 case IB_WR_SEND_WITH_INV:
494 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
495 break;
496 case IB_WR_SEND_WITH_IMM:
497 hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
498 break;
499 case IB_WR_LOCAL_INV:
500 hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
501 roce_set_bit(rc_sq_wqe->byte_4,
502 V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
503 rc_sq_wqe->inv_key =
504 cpu_to_le32(wr->ex.invalidate_rkey);
505 break;
506 case IB_WR_REG_MR:
507 hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
508 fseg = wqe;
509 set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
510 break;
511 case IB_WR_ATOMIC_CMP_AND_SWP:
512 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
513 rc_sq_wqe->rkey =
514 cpu_to_le32(atomic_wr(wr)->rkey);
515 rc_sq_wqe->va =
516 cpu_to_le64(atomic_wr(wr)->remote_addr);
517 break;
518 case IB_WR_ATOMIC_FETCH_AND_ADD:
519 hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
520 rc_sq_wqe->rkey =
521 cpu_to_le32(atomic_wr(wr)->rkey);
522 rc_sq_wqe->va =
523 cpu_to_le64(atomic_wr(wr)->remote_addr);
524 break;
525 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
526 hr_op =
527 HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
528 break;
529 case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
530 hr_op =
531 HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
532 break;
533 default:
534 hr_op = HNS_ROCE_V2_WQE_OP_MASK;
535 break;
538 roce_set_field(rc_sq_wqe->byte_4,
539 V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
540 V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
542 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
543 wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
544 struct hns_roce_v2_wqe_data_seg *dseg;
546 dseg = wqe;
547 set_data_seg_v2(dseg, wr->sg_list);
548 wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
549 set_atomic_seg(wqe, atomic_wr(wr));
550 roce_set_field(rc_sq_wqe->byte_16,
551 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
552 V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
553 wr->num_sge);
554 } else if (wr->opcode != IB_WR_REG_MR) {
555 ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
556 wqe, &sge_ind, bad_wr);
557 if (ret)
558 goto out;
561 ind++;
562 } else {
563 dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
564 spin_unlock_irqrestore(&qp->sq.lock, flags);
565 *bad_wr = wr;
566 return -EOPNOTSUPP;
570 out:
571 if (likely(nreq)) {
572 qp->sq.head += nreq;
573 /* Memory barrier */
574 wmb();
576 sq_db.byte_4 = 0;
577 sq_db.parameter = 0;
579 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_TAG_M,
580 V2_DB_BYTE_4_TAG_S, qp->doorbell_qpn);
581 roce_set_field(sq_db.byte_4, V2_DB_BYTE_4_CMD_M,
582 V2_DB_BYTE_4_CMD_S, HNS_ROCE_V2_SQ_DB);
583 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_IDX_M,
584 V2_DB_PARAMETER_IDX_S,
585 qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1));
586 roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
587 V2_DB_PARAMETER_SL_S, qp->sl);
589 hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
591 qp->sq_next_wqe = ind;
592 qp->next_sge = sge_ind;
594 if (qp->state == IB_QPS_ERR) {
595 attr_mask = IB_QP_STATE;
596 attr.qp_state = IB_QPS_ERR;
598 ret = hns_roce_v2_modify_qp(&qp->ibqp, &attr, attr_mask,
599 qp->state, IB_QPS_ERR);
600 if (ret) {
601 spin_unlock_irqrestore(&qp->sq.lock, flags);
602 *bad_wr = wr;
603 return ret;
608 spin_unlock_irqrestore(&qp->sq.lock, flags);
610 return ret;
613 static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
614 const struct ib_recv_wr *wr,
615 const struct ib_recv_wr **bad_wr)
617 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
618 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
619 struct hns_roce_v2_wqe_data_seg *dseg;
620 struct hns_roce_rinl_sge *sge_list;
621 struct device *dev = hr_dev->dev;
622 struct ib_qp_attr attr;
623 unsigned long flags;
624 void *wqe = NULL;
625 int attr_mask;
626 int ret = 0;
627 int nreq;
628 int ind;
629 int i;
631 spin_lock_irqsave(&hr_qp->rq.lock, flags);
632 ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
634 if (hr_qp->state == IB_QPS_RESET) {
635 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
636 *bad_wr = wr;
637 return -EINVAL;
640 for (nreq = 0; wr; ++nreq, wr = wr->next) {
641 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
642 hr_qp->ibqp.recv_cq)) {
643 ret = -ENOMEM;
644 *bad_wr = wr;
645 goto out;
648 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
649 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
650 wr->num_sge, hr_qp->rq.max_gs);
651 ret = -EINVAL;
652 *bad_wr = wr;
653 goto out;
656 wqe = get_recv_wqe(hr_qp, ind);
657 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
658 for (i = 0; i < wr->num_sge; i++) {
659 if (!wr->sg_list[i].length)
660 continue;
661 set_data_seg_v2(dseg, wr->sg_list + i);
662 dseg++;
665 if (i < hr_qp->rq.max_gs) {
666 dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
667 dseg->addr = 0;
670 /* rq support inline data */
671 if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
672 sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
673 hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
674 (u32)wr->num_sge;
675 for (i = 0; i < wr->num_sge; i++) {
676 sge_list[i].addr =
677 (void *)(u64)wr->sg_list[i].addr;
678 sge_list[i].len = wr->sg_list[i].length;
682 hr_qp->rq.wrid[ind] = wr->wr_id;
684 ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
687 out:
688 if (likely(nreq)) {
689 hr_qp->rq.head += nreq;
690 /* Memory barrier */
691 wmb();
693 *hr_qp->rdb.db_record = hr_qp->rq.head & 0xffff;
695 if (hr_qp->state == IB_QPS_ERR) {
696 attr_mask = IB_QP_STATE;
697 attr.qp_state = IB_QPS_ERR;
699 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr,
700 attr_mask, hr_qp->state,
701 IB_QPS_ERR);
702 if (ret) {
703 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
704 *bad_wr = wr;
705 return ret;
709 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
711 return ret;
714 static int hns_roce_v2_cmd_hw_reseted(struct hns_roce_dev *hr_dev,
715 unsigned long instance_stage,
716 unsigned long reset_stage)
718 /* When hardware reset has been completed once or more, we should stop
719 * sending mailbox&cmq&doorbell to hardware. If now in .init_instance()
720 * function, we should exit with error. If now at HNAE3_INIT_CLIENT
721 * stage of soft reset process, we should exit with error, and then
722 * HNAE3_INIT_CLIENT related process can rollback the operation like
723 * notifing hardware to free resources, HNAE3_INIT_CLIENT related
724 * process will exit with error to notify NIC driver to reschedule soft
725 * reset process once again.
727 hr_dev->is_reset = true;
728 hr_dev->dis_db = true;
730 if (reset_stage == HNS_ROCE_STATE_RST_INIT ||
731 instance_stage == HNS_ROCE_STATE_INIT)
732 return CMD_RST_PRC_EBUSY;
734 return CMD_RST_PRC_SUCCESS;
737 static int hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev,
738 unsigned long instance_stage,
739 unsigned long reset_stage)
741 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
742 struct hnae3_handle *handle = priv->handle;
743 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
745 /* When hardware reset is detected, we should stop sending mailbox&cmq&
746 * doorbell to hardware. If now in .init_instance() function, we should
747 * exit with error. If now at HNAE3_INIT_CLIENT stage of soft reset
748 * process, we should exit with error, and then HNAE3_INIT_CLIENT
749 * related process can rollback the operation like notifing hardware to
750 * free resources, HNAE3_INIT_CLIENT related process will exit with
751 * error to notify NIC driver to reschedule soft reset process once
752 * again.
754 hr_dev->dis_db = true;
755 if (!ops->get_hw_reset_stat(handle))
756 hr_dev->is_reset = true;
758 if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT ||
759 instance_stage == HNS_ROCE_STATE_INIT)
760 return CMD_RST_PRC_EBUSY;
762 return CMD_RST_PRC_SUCCESS;
765 static int hns_roce_v2_cmd_sw_resetting(struct hns_roce_dev *hr_dev)
767 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
768 struct hnae3_handle *handle = priv->handle;
769 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
771 /* When software reset is detected at .init_instance() function, we
772 * should stop sending mailbox&cmq&doorbell to hardware, and exit
773 * with error.
775 hr_dev->dis_db = true;
776 if (ops->ae_dev_reset_cnt(handle) != hr_dev->reset_cnt)
777 hr_dev->is_reset = true;
779 return CMD_RST_PRC_EBUSY;
782 static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
784 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
785 struct hnae3_handle *handle = priv->handle;
786 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
787 unsigned long instance_stage; /* the current instance stage */
788 unsigned long reset_stage; /* the current reset stage */
789 unsigned long reset_cnt;
790 bool sw_resetting;
791 bool hw_resetting;
793 if (hr_dev->is_reset)
794 return CMD_RST_PRC_SUCCESS;
796 /* Get information about reset from NIC driver or RoCE driver itself,
797 * the meaning of the following variables from NIC driver are described
798 * as below:
799 * reset_cnt -- The count value of completed hardware reset.
800 * hw_resetting -- Whether hardware device is resetting now.
801 * sw_resetting -- Whether NIC's software reset process is running now.
803 instance_stage = handle->rinfo.instance_state;
804 reset_stage = handle->rinfo.reset_state;
805 reset_cnt = ops->ae_dev_reset_cnt(handle);
806 hw_resetting = ops->get_hw_reset_stat(handle);
807 sw_resetting = ops->ae_dev_resetting(handle);
809 if (reset_cnt != hr_dev->reset_cnt)
810 return hns_roce_v2_cmd_hw_reseted(hr_dev, instance_stage,
811 reset_stage);
812 else if (hw_resetting)
813 return hns_roce_v2_cmd_hw_resetting(hr_dev, instance_stage,
814 reset_stage);
815 else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT)
816 return hns_roce_v2_cmd_sw_resetting(hr_dev);
818 return 0;
821 static int hns_roce_cmq_space(struct hns_roce_v2_cmq_ring *ring)
823 int ntu = ring->next_to_use;
824 int ntc = ring->next_to_clean;
825 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
827 return ring->desc_num - used - 1;
830 static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
831 struct hns_roce_v2_cmq_ring *ring)
833 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
835 ring->desc = kzalloc(size, GFP_KERNEL);
836 if (!ring->desc)
837 return -ENOMEM;
839 ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
840 DMA_BIDIRECTIONAL);
841 if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
842 ring->desc_dma_addr = 0;
843 kfree(ring->desc);
844 ring->desc = NULL;
845 return -ENOMEM;
848 return 0;
851 static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
852 struct hns_roce_v2_cmq_ring *ring)
854 dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
855 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
856 DMA_BIDIRECTIONAL);
858 ring->desc_dma_addr = 0;
859 kfree(ring->desc);
862 static int hns_roce_init_cmq_ring(struct hns_roce_dev *hr_dev, bool ring_type)
864 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
865 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
866 &priv->cmq.csq : &priv->cmq.crq;
868 ring->flag = ring_type;
869 ring->next_to_clean = 0;
870 ring->next_to_use = 0;
872 return hns_roce_alloc_cmq_desc(hr_dev, ring);
875 static void hns_roce_cmq_init_regs(struct hns_roce_dev *hr_dev, bool ring_type)
877 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
878 struct hns_roce_v2_cmq_ring *ring = (ring_type == TYPE_CSQ) ?
879 &priv->cmq.csq : &priv->cmq.crq;
880 dma_addr_t dma = ring->desc_dma_addr;
882 if (ring_type == TYPE_CSQ) {
883 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_L_REG, (u32)dma);
884 roce_write(hr_dev, ROCEE_TX_CMQ_BASEADDR_H_REG,
885 upper_32_bits(dma));
886 roce_write(hr_dev, ROCEE_TX_CMQ_DEPTH_REG,
887 ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
888 roce_write(hr_dev, ROCEE_TX_CMQ_HEAD_REG, 0);
889 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, 0);
890 } else {
891 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_L_REG, (u32)dma);
892 roce_write(hr_dev, ROCEE_RX_CMQ_BASEADDR_H_REG,
893 upper_32_bits(dma));
894 roce_write(hr_dev, ROCEE_RX_CMQ_DEPTH_REG,
895 ring->desc_num >> HNS_ROCE_CMQ_DESC_NUM_S);
896 roce_write(hr_dev, ROCEE_RX_CMQ_HEAD_REG, 0);
897 roce_write(hr_dev, ROCEE_RX_CMQ_TAIL_REG, 0);
901 static int hns_roce_v2_cmq_init(struct hns_roce_dev *hr_dev)
903 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
904 int ret;
906 /* Setup the queue entries for command queue */
907 priv->cmq.csq.desc_num = CMD_CSQ_DESC_NUM;
908 priv->cmq.crq.desc_num = CMD_CRQ_DESC_NUM;
910 /* Setup the lock for command queue */
911 spin_lock_init(&priv->cmq.csq.lock);
912 spin_lock_init(&priv->cmq.crq.lock);
914 /* Setup Tx write back timeout */
915 priv->cmq.tx_timeout = HNS_ROCE_CMQ_TX_TIMEOUT;
917 /* Init CSQ */
918 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CSQ);
919 if (ret) {
920 dev_err(hr_dev->dev, "Init CSQ error, ret = %d.\n", ret);
921 return ret;
924 /* Init CRQ */
925 ret = hns_roce_init_cmq_ring(hr_dev, TYPE_CRQ);
926 if (ret) {
927 dev_err(hr_dev->dev, "Init CRQ error, ret = %d.\n", ret);
928 goto err_crq;
931 /* Init CSQ REG */
932 hns_roce_cmq_init_regs(hr_dev, TYPE_CSQ);
934 /* Init CRQ REG */
935 hns_roce_cmq_init_regs(hr_dev, TYPE_CRQ);
937 return 0;
939 err_crq:
940 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
942 return ret;
945 static void hns_roce_v2_cmq_exit(struct hns_roce_dev *hr_dev)
947 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
949 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.csq);
950 hns_roce_free_cmq_desc(hr_dev, &priv->cmq.crq);
953 static void hns_roce_cmq_setup_basic_desc(struct hns_roce_cmq_desc *desc,
954 enum hns_roce_opcode_type opcode,
955 bool is_read)
957 memset((void *)desc, 0, sizeof(struct hns_roce_cmq_desc));
958 desc->opcode = cpu_to_le16(opcode);
959 desc->flag =
960 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
961 if (is_read)
962 desc->flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_WR);
963 else
964 desc->flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
967 static int hns_roce_cmq_csq_done(struct hns_roce_dev *hr_dev)
969 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
970 u32 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
972 return head == priv->cmq.csq.next_to_use;
975 static int hns_roce_cmq_csq_clean(struct hns_roce_dev *hr_dev)
977 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
978 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
979 struct hns_roce_cmq_desc *desc;
980 u16 ntc = csq->next_to_clean;
981 u32 head;
982 int clean = 0;
984 desc = &csq->desc[ntc];
985 head = roce_read(hr_dev, ROCEE_TX_CMQ_HEAD_REG);
986 while (head != ntc) {
987 memset(desc, 0, sizeof(*desc));
988 ntc++;
989 if (ntc == csq->desc_num)
990 ntc = 0;
991 desc = &csq->desc[ntc];
992 clean++;
994 csq->next_to_clean = ntc;
996 return clean;
999 static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1000 struct hns_roce_cmq_desc *desc, int num)
1002 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1003 struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
1004 struct hns_roce_cmq_desc *desc_to_use;
1005 bool complete = false;
1006 u32 timeout = 0;
1007 int handle = 0;
1008 u16 desc_ret;
1009 int ret = 0;
1010 int ntc;
1012 spin_lock_bh(&csq->lock);
1014 if (num > hns_roce_cmq_space(csq)) {
1015 spin_unlock_bh(&csq->lock);
1016 return -EBUSY;
1020 * Record the location of desc in the cmq for this time
1021 * which will be use for hardware to write back
1023 ntc = csq->next_to_use;
1025 while (handle < num) {
1026 desc_to_use = &csq->desc[csq->next_to_use];
1027 *desc_to_use = desc[handle];
1028 dev_dbg(hr_dev->dev, "set cmq desc:\n");
1029 csq->next_to_use++;
1030 if (csq->next_to_use == csq->desc_num)
1031 csq->next_to_use = 0;
1032 handle++;
1035 /* Write to hardware */
1036 roce_write(hr_dev, ROCEE_TX_CMQ_TAIL_REG, csq->next_to_use);
1039 * If the command is sync, wait for the firmware to write back,
1040 * if multi descriptors to be sent, use the first one to check
1042 if (le16_to_cpu(desc->flag) & HNS_ROCE_CMD_FLAG_NO_INTR) {
1043 do {
1044 if (hns_roce_cmq_csq_done(hr_dev))
1045 break;
1046 udelay(1);
1047 timeout++;
1048 } while (timeout < priv->cmq.tx_timeout);
1051 if (hns_roce_cmq_csq_done(hr_dev)) {
1052 complete = true;
1053 handle = 0;
1054 while (handle < num) {
1055 /* get the result of hardware write back */
1056 desc_to_use = &csq->desc[ntc];
1057 desc[handle] = *desc_to_use;
1058 dev_dbg(hr_dev->dev, "Get cmq desc:\n");
1059 desc_ret = le16_to_cpu(desc[handle].retval);
1060 if (desc_ret == CMD_EXEC_SUCCESS)
1061 ret = 0;
1062 else
1063 ret = -EIO;
1064 priv->cmq.last_status = desc_ret;
1065 ntc++;
1066 handle++;
1067 if (ntc == csq->desc_num)
1068 ntc = 0;
1072 if (!complete)
1073 ret = -EAGAIN;
1075 /* clean the command send queue */
1076 handle = hns_roce_cmq_csq_clean(hr_dev);
1077 if (handle != num)
1078 dev_warn(hr_dev->dev, "Cleaned %d, need to clean %d\n",
1079 handle, num);
1081 spin_unlock_bh(&csq->lock);
1083 return ret;
1086 static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
1087 struct hns_roce_cmq_desc *desc, int num)
1089 int retval;
1090 int ret;
1092 ret = hns_roce_v2_rst_process_cmd(hr_dev);
1093 if (ret == CMD_RST_PRC_SUCCESS)
1094 return 0;
1095 if (ret == CMD_RST_PRC_EBUSY)
1096 return -EBUSY;
1098 ret = __hns_roce_cmq_send(hr_dev, desc, num);
1099 if (ret) {
1100 retval = hns_roce_v2_rst_process_cmd(hr_dev);
1101 if (retval == CMD_RST_PRC_SUCCESS)
1102 return 0;
1103 else if (retval == CMD_RST_PRC_EBUSY)
1104 return -EBUSY;
1107 return ret;
1110 static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
1112 struct hns_roce_query_version *resp;
1113 struct hns_roce_cmq_desc desc;
1114 int ret;
1116 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_HW_VER, true);
1117 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1118 if (ret)
1119 return ret;
1121 resp = (struct hns_roce_query_version *)desc.data;
1122 hr_dev->hw_rev = le16_to_cpu(resp->rocee_hw_version);
1123 hr_dev->vendor_id = hr_dev->pci_dev->vendor;
1125 return 0;
1128 static bool hns_roce_func_clr_chk_rst(struct hns_roce_dev *hr_dev)
1130 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1131 struct hnae3_handle *handle = priv->handle;
1132 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1133 unsigned long reset_cnt;
1134 bool sw_resetting;
1135 bool hw_resetting;
1137 reset_cnt = ops->ae_dev_reset_cnt(handle);
1138 hw_resetting = ops->get_hw_reset_stat(handle);
1139 sw_resetting = ops->ae_dev_resetting(handle);
1141 if (reset_cnt != hr_dev->reset_cnt || hw_resetting || sw_resetting)
1142 return true;
1144 return false;
1147 static void hns_roce_func_clr_rst_prc(struct hns_roce_dev *hr_dev, int retval,
1148 int flag)
1150 struct hns_roce_v2_priv *priv = (struct hns_roce_v2_priv *)hr_dev->priv;
1151 struct hnae3_handle *handle = priv->handle;
1152 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
1153 unsigned long instance_stage;
1154 unsigned long reset_cnt;
1155 unsigned long end;
1156 bool sw_resetting;
1157 bool hw_resetting;
1159 instance_stage = handle->rinfo.instance_state;
1160 reset_cnt = ops->ae_dev_reset_cnt(handle);
1161 hw_resetting = ops->get_hw_reset_stat(handle);
1162 sw_resetting = ops->ae_dev_resetting(handle);
1164 if (reset_cnt != hr_dev->reset_cnt) {
1165 hr_dev->dis_db = true;
1166 hr_dev->is_reset = true;
1167 dev_info(hr_dev->dev, "Func clear success after reset.\n");
1168 } else if (hw_resetting) {
1169 hr_dev->dis_db = true;
1171 dev_warn(hr_dev->dev,
1172 "Func clear is pending, device in resetting state.\n");
1173 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1174 while (end) {
1175 if (!ops->get_hw_reset_stat(handle)) {
1176 hr_dev->is_reset = true;
1177 dev_info(hr_dev->dev,
1178 "Func clear success after reset.\n");
1179 return;
1181 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1182 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1185 dev_warn(hr_dev->dev, "Func clear failed.\n");
1186 } else if (sw_resetting && instance_stage == HNS_ROCE_STATE_INIT) {
1187 hr_dev->dis_db = true;
1189 dev_warn(hr_dev->dev,
1190 "Func clear is pending, device in resetting state.\n");
1191 end = HNS_ROCE_V2_HW_RST_TIMEOUT;
1192 while (end) {
1193 if (ops->ae_dev_reset_cnt(handle) !=
1194 hr_dev->reset_cnt) {
1195 hr_dev->is_reset = true;
1196 dev_info(hr_dev->dev,
1197 "Func clear success after sw reset\n");
1198 return;
1200 msleep(HNS_ROCE_V2_HW_RST_COMPLETION_WAIT);
1201 end -= HNS_ROCE_V2_HW_RST_COMPLETION_WAIT;
1204 dev_warn(hr_dev->dev, "Func clear failed because of unfinished sw reset\n");
1205 } else {
1206 if (retval && !flag)
1207 dev_warn(hr_dev->dev,
1208 "Func clear read failed, ret = %d.\n", retval);
1210 dev_warn(hr_dev->dev, "Func clear failed.\n");
1213 static void hns_roce_function_clear(struct hns_roce_dev *hr_dev)
1215 bool fclr_write_fail_flag = false;
1216 struct hns_roce_func_clear *resp;
1217 struct hns_roce_cmq_desc desc;
1218 unsigned long end;
1219 int ret = 0;
1221 if (hns_roce_func_clr_chk_rst(hr_dev))
1222 goto out;
1224 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR, false);
1225 resp = (struct hns_roce_func_clear *)desc.data;
1227 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1228 if (ret) {
1229 fclr_write_fail_flag = true;
1230 dev_err(hr_dev->dev, "Func clear write failed, ret = %d.\n",
1231 ret);
1232 goto out;
1235 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_INTERVAL);
1236 end = HNS_ROCE_V2_FUNC_CLEAR_TIMEOUT_MSECS;
1237 while (end) {
1238 if (hns_roce_func_clr_chk_rst(hr_dev))
1239 goto out;
1240 msleep(HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT);
1241 end -= HNS_ROCE_V2_READ_FUNC_CLEAR_FLAG_FAIL_WAIT;
1243 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_FUNC_CLEAR,
1244 true);
1246 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1247 if (ret)
1248 continue;
1250 if (roce_get_bit(resp->func_done, FUNC_CLEAR_RST_FUN_DONE_S)) {
1251 hr_dev->is_reset = true;
1252 return;
1256 out:
1257 dev_err(hr_dev->dev, "Func clear fail.\n");
1258 hns_roce_func_clr_rst_prc(hr_dev, ret, fclr_write_fail_flag);
1261 static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
1263 struct hns_roce_query_fw_info *resp;
1264 struct hns_roce_cmq_desc desc;
1265 int ret;
1267 hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
1268 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1269 if (ret)
1270 return ret;
1272 resp = (struct hns_roce_query_fw_info *)desc.data;
1273 hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
1275 return 0;
1278 static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev)
1280 struct hns_roce_cfg_global_param *req;
1281 struct hns_roce_cmq_desc desc;
1283 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM,
1284 false);
1286 req = (struct hns_roce_cfg_global_param *)desc.data;
1287 memset(req, 0, sizeof(*req));
1288 roce_set_field(req->time_cfg_udp_port,
1289 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_M,
1290 CFG_GLOBAL_PARAM_DATA_0_ROCEE_TIME_1US_CFG_S, 0x3e8);
1291 roce_set_field(req->time_cfg_udp_port,
1292 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_M,
1293 CFG_GLOBAL_PARAM_DATA_0_ROCEE_UDP_PORT_S, 0x12b7);
1295 return hns_roce_cmq_send(hr_dev, &desc, 1);
1298 static int hns_roce_query_pf_resource(struct hns_roce_dev *hr_dev)
1300 struct hns_roce_cmq_desc desc[2];
1301 struct hns_roce_pf_res_a *req_a;
1302 struct hns_roce_pf_res_b *req_b;
1303 int ret;
1304 int i;
1306 for (i = 0; i < 2; i++) {
1307 hns_roce_cmq_setup_basic_desc(&desc[i],
1308 HNS_ROCE_OPC_QUERY_PF_RES, true);
1310 if (i == 0)
1311 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1312 else
1313 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1316 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1317 if (ret)
1318 return ret;
1320 req_a = (struct hns_roce_pf_res_a *)desc[0].data;
1321 req_b = (struct hns_roce_pf_res_b *)desc[1].data;
1323 hr_dev->caps.qpc_bt_num = roce_get_field(req_a->qpc_bt_idx_num,
1324 PF_RES_DATA_1_PF_QPC_BT_NUM_M,
1325 PF_RES_DATA_1_PF_QPC_BT_NUM_S);
1326 hr_dev->caps.srqc_bt_num = roce_get_field(req_a->srqc_bt_idx_num,
1327 PF_RES_DATA_2_PF_SRQC_BT_NUM_M,
1328 PF_RES_DATA_2_PF_SRQC_BT_NUM_S);
1329 hr_dev->caps.cqc_bt_num = roce_get_field(req_a->cqc_bt_idx_num,
1330 PF_RES_DATA_3_PF_CQC_BT_NUM_M,
1331 PF_RES_DATA_3_PF_CQC_BT_NUM_S);
1332 hr_dev->caps.mpt_bt_num = roce_get_field(req_a->mpt_bt_idx_num,
1333 PF_RES_DATA_4_PF_MPT_BT_NUM_M,
1334 PF_RES_DATA_4_PF_MPT_BT_NUM_S);
1336 hr_dev->caps.sl_num = roce_get_field(req_b->qid_idx_sl_num,
1337 PF_RES_DATA_3_PF_SL_NUM_M,
1338 PF_RES_DATA_3_PF_SL_NUM_S);
1339 hr_dev->caps.sccc_bt_num = roce_get_field(req_b->sccc_bt_idx_num,
1340 PF_RES_DATA_4_PF_SCCC_BT_NUM_M,
1341 PF_RES_DATA_4_PF_SCCC_BT_NUM_S);
1343 return 0;
1346 static int hns_roce_query_pf_timer_resource(struct hns_roce_dev *hr_dev)
1348 struct hns_roce_pf_timer_res_a *req_a;
1349 struct hns_roce_cmq_desc desc[2];
1350 int ret, i;
1352 for (i = 0; i < 2; i++) {
1353 hns_roce_cmq_setup_basic_desc(&desc[i],
1354 HNS_ROCE_OPC_QUERY_PF_TIMER_RES,
1355 true);
1357 if (i == 0)
1358 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1359 else
1360 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1363 ret = hns_roce_cmq_send(hr_dev, desc, 2);
1364 if (ret)
1365 return ret;
1367 req_a = (struct hns_roce_pf_timer_res_a *)desc[0].data;
1369 hr_dev->caps.qpc_timer_bt_num =
1370 roce_get_field(req_a->qpc_timer_bt_idx_num,
1371 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_M,
1372 PF_RES_DATA_1_PF_QPC_TIMER_BT_NUM_S);
1373 hr_dev->caps.cqc_timer_bt_num =
1374 roce_get_field(req_a->cqc_timer_bt_idx_num,
1375 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_M,
1376 PF_RES_DATA_2_PF_CQC_TIMER_BT_NUM_S);
1378 return 0;
1381 static int hns_roce_set_vf_switch_param(struct hns_roce_dev *hr_dev,
1382 int vf_id)
1384 struct hns_roce_cmq_desc desc;
1385 struct hns_roce_vf_switch *swt;
1386 int ret;
1388 swt = (struct hns_roce_vf_switch *)desc.data;
1389 hns_roce_cmq_setup_basic_desc(&desc, HNS_SWITCH_PARAMETER_CFG, true);
1390 swt->rocee_sel |= cpu_to_le32(HNS_ICL_SWITCH_CMD_ROCEE_SEL);
1391 roce_set_field(swt->fun_id,
1392 VF_SWITCH_DATA_FUN_ID_VF_ID_M,
1393 VF_SWITCH_DATA_FUN_ID_VF_ID_S,
1394 vf_id);
1395 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
1396 if (ret)
1397 return ret;
1398 desc.flag =
1399 cpu_to_le16(HNS_ROCE_CMD_FLAG_NO_INTR | HNS_ROCE_CMD_FLAG_IN);
1400 desc.flag &= cpu_to_le16(~HNS_ROCE_CMD_FLAG_WR);
1401 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LPBK_S, 1);
1402 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_LCL_LPBK_S, 0);
1403 roce_set_bit(swt->cfg, VF_SWITCH_DATA_CFG_ALW_DST_OVRD_S, 1);
1405 return hns_roce_cmq_send(hr_dev, &desc, 1);
1408 static int hns_roce_alloc_vf_resource(struct hns_roce_dev *hr_dev)
1410 struct hns_roce_cmq_desc desc[2];
1411 struct hns_roce_vf_res_a *req_a;
1412 struct hns_roce_vf_res_b *req_b;
1413 int i;
1415 req_a = (struct hns_roce_vf_res_a *)desc[0].data;
1416 req_b = (struct hns_roce_vf_res_b *)desc[1].data;
1417 memset(req_a, 0, sizeof(*req_a));
1418 memset(req_b, 0, sizeof(*req_b));
1419 for (i = 0; i < 2; i++) {
1420 hns_roce_cmq_setup_basic_desc(&desc[i],
1421 HNS_ROCE_OPC_ALLOC_VF_RES, false);
1423 if (i == 0)
1424 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1425 else
1426 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1428 if (i == 0) {
1429 roce_set_field(req_a->vf_qpc_bt_idx_num,
1430 VF_RES_A_DATA_1_VF_QPC_BT_IDX_M,
1431 VF_RES_A_DATA_1_VF_QPC_BT_IDX_S, 0);
1432 roce_set_field(req_a->vf_qpc_bt_idx_num,
1433 VF_RES_A_DATA_1_VF_QPC_BT_NUM_M,
1434 VF_RES_A_DATA_1_VF_QPC_BT_NUM_S,
1435 HNS_ROCE_VF_QPC_BT_NUM);
1437 roce_set_field(req_a->vf_srqc_bt_idx_num,
1438 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_M,
1439 VF_RES_A_DATA_2_VF_SRQC_BT_IDX_S, 0);
1440 roce_set_field(req_a->vf_srqc_bt_idx_num,
1441 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_M,
1442 VF_RES_A_DATA_2_VF_SRQC_BT_NUM_S,
1443 HNS_ROCE_VF_SRQC_BT_NUM);
1445 roce_set_field(req_a->vf_cqc_bt_idx_num,
1446 VF_RES_A_DATA_3_VF_CQC_BT_IDX_M,
1447 VF_RES_A_DATA_3_VF_CQC_BT_IDX_S, 0);
1448 roce_set_field(req_a->vf_cqc_bt_idx_num,
1449 VF_RES_A_DATA_3_VF_CQC_BT_NUM_M,
1450 VF_RES_A_DATA_3_VF_CQC_BT_NUM_S,
1451 HNS_ROCE_VF_CQC_BT_NUM);
1453 roce_set_field(req_a->vf_mpt_bt_idx_num,
1454 VF_RES_A_DATA_4_VF_MPT_BT_IDX_M,
1455 VF_RES_A_DATA_4_VF_MPT_BT_IDX_S, 0);
1456 roce_set_field(req_a->vf_mpt_bt_idx_num,
1457 VF_RES_A_DATA_4_VF_MPT_BT_NUM_M,
1458 VF_RES_A_DATA_4_VF_MPT_BT_NUM_S,
1459 HNS_ROCE_VF_MPT_BT_NUM);
1461 roce_set_field(req_a->vf_eqc_bt_idx_num,
1462 VF_RES_A_DATA_5_VF_EQC_IDX_M,
1463 VF_RES_A_DATA_5_VF_EQC_IDX_S, 0);
1464 roce_set_field(req_a->vf_eqc_bt_idx_num,
1465 VF_RES_A_DATA_5_VF_EQC_NUM_M,
1466 VF_RES_A_DATA_5_VF_EQC_NUM_S,
1467 HNS_ROCE_VF_EQC_NUM);
1468 } else {
1469 roce_set_field(req_b->vf_smac_idx_num,
1470 VF_RES_B_DATA_1_VF_SMAC_IDX_M,
1471 VF_RES_B_DATA_1_VF_SMAC_IDX_S, 0);
1472 roce_set_field(req_b->vf_smac_idx_num,
1473 VF_RES_B_DATA_1_VF_SMAC_NUM_M,
1474 VF_RES_B_DATA_1_VF_SMAC_NUM_S,
1475 HNS_ROCE_VF_SMAC_NUM);
1477 roce_set_field(req_b->vf_sgid_idx_num,
1478 VF_RES_B_DATA_2_VF_SGID_IDX_M,
1479 VF_RES_B_DATA_2_VF_SGID_IDX_S, 0);
1480 roce_set_field(req_b->vf_sgid_idx_num,
1481 VF_RES_B_DATA_2_VF_SGID_NUM_M,
1482 VF_RES_B_DATA_2_VF_SGID_NUM_S,
1483 HNS_ROCE_VF_SGID_NUM);
1485 roce_set_field(req_b->vf_qid_idx_sl_num,
1486 VF_RES_B_DATA_3_VF_QID_IDX_M,
1487 VF_RES_B_DATA_3_VF_QID_IDX_S, 0);
1488 roce_set_field(req_b->vf_qid_idx_sl_num,
1489 VF_RES_B_DATA_3_VF_SL_NUM_M,
1490 VF_RES_B_DATA_3_VF_SL_NUM_S,
1491 HNS_ROCE_VF_SL_NUM);
1493 roce_set_field(req_b->vf_sccc_idx_num,
1494 VF_RES_B_DATA_4_VF_SCCC_BT_IDX_M,
1495 VF_RES_B_DATA_4_VF_SCCC_BT_IDX_S, 0);
1496 roce_set_field(req_b->vf_sccc_idx_num,
1497 VF_RES_B_DATA_4_VF_SCCC_BT_NUM_M,
1498 VF_RES_B_DATA_4_VF_SCCC_BT_NUM_S,
1499 HNS_ROCE_VF_SCCC_BT_NUM);
1503 return hns_roce_cmq_send(hr_dev, desc, 2);
1506 static int hns_roce_v2_set_bt(struct hns_roce_dev *hr_dev)
1508 u8 srqc_hop_num = hr_dev->caps.srqc_hop_num;
1509 u8 qpc_hop_num = hr_dev->caps.qpc_hop_num;
1510 u8 cqc_hop_num = hr_dev->caps.cqc_hop_num;
1511 u8 mpt_hop_num = hr_dev->caps.mpt_hop_num;
1512 u8 sccc_hop_num = hr_dev->caps.sccc_hop_num;
1513 struct hns_roce_cfg_bt_attr *req;
1514 struct hns_roce_cmq_desc desc;
1516 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_BT_ATTR, false);
1517 req = (struct hns_roce_cfg_bt_attr *)desc.data;
1518 memset(req, 0, sizeof(*req));
1520 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_M,
1521 CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S,
1522 hr_dev->caps.qpc_ba_pg_sz + PG_SHIFT_OFFSET);
1523 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_M,
1524 CFG_BT_ATTR_DATA_0_VF_QPC_BUF_PGSZ_S,
1525 hr_dev->caps.qpc_buf_pg_sz + PG_SHIFT_OFFSET);
1526 roce_set_field(req->vf_qpc_cfg, CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_M,
1527 CFG_BT_ATTR_DATA_0_VF_QPC_HOPNUM_S,
1528 qpc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : qpc_hop_num);
1530 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_M,
1531 CFG_BT_ATTR_DATA_1_VF_SRQC_BA_PGSZ_S,
1532 hr_dev->caps.srqc_ba_pg_sz + PG_SHIFT_OFFSET);
1533 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_M,
1534 CFG_BT_ATTR_DATA_1_VF_SRQC_BUF_PGSZ_S,
1535 hr_dev->caps.srqc_buf_pg_sz + PG_SHIFT_OFFSET);
1536 roce_set_field(req->vf_srqc_cfg, CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_M,
1537 CFG_BT_ATTR_DATA_1_VF_SRQC_HOPNUM_S,
1538 srqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : srqc_hop_num);
1540 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_M,
1541 CFG_BT_ATTR_DATA_2_VF_CQC_BA_PGSZ_S,
1542 hr_dev->caps.cqc_ba_pg_sz + PG_SHIFT_OFFSET);
1543 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_M,
1544 CFG_BT_ATTR_DATA_2_VF_CQC_BUF_PGSZ_S,
1545 hr_dev->caps.cqc_buf_pg_sz + PG_SHIFT_OFFSET);
1546 roce_set_field(req->vf_cqc_cfg, CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_M,
1547 CFG_BT_ATTR_DATA_2_VF_CQC_HOPNUM_S,
1548 cqc_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : cqc_hop_num);
1550 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_M,
1551 CFG_BT_ATTR_DATA_3_VF_MPT_BA_PGSZ_S,
1552 hr_dev->caps.mpt_ba_pg_sz + PG_SHIFT_OFFSET);
1553 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_M,
1554 CFG_BT_ATTR_DATA_3_VF_MPT_BUF_PGSZ_S,
1555 hr_dev->caps.mpt_buf_pg_sz + PG_SHIFT_OFFSET);
1556 roce_set_field(req->vf_mpt_cfg, CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M,
1557 CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_S,
1558 mpt_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 : mpt_hop_num);
1560 roce_set_field(req->vf_sccc_cfg,
1561 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_M,
1562 CFG_BT_ATTR_DATA_4_VF_SCCC_BA_PGSZ_S,
1563 hr_dev->caps.sccc_ba_pg_sz + PG_SHIFT_OFFSET);
1564 roce_set_field(req->vf_sccc_cfg,
1565 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_M,
1566 CFG_BT_ATTR_DATA_4_VF_SCCC_BUF_PGSZ_S,
1567 hr_dev->caps.sccc_buf_pg_sz + PG_SHIFT_OFFSET);
1568 roce_set_field(req->vf_sccc_cfg,
1569 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_M,
1570 CFG_BT_ATTR_DATA_4_VF_SCCC_HOPNUM_S,
1571 sccc_hop_num ==
1572 HNS_ROCE_HOP_NUM_0 ? 0 : sccc_hop_num);
1574 return hns_roce_cmq_send(hr_dev, &desc, 1);
1577 static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
1579 struct hns_roce_caps *caps = &hr_dev->caps;
1580 int ret;
1582 ret = hns_roce_cmq_query_hw_info(hr_dev);
1583 if (ret) {
1584 dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
1585 ret);
1586 return ret;
1589 ret = hns_roce_query_fw_ver(hr_dev);
1590 if (ret) {
1591 dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
1592 ret);
1593 return ret;
1596 ret = hns_roce_config_global_param(hr_dev);
1597 if (ret) {
1598 dev_err(hr_dev->dev, "Configure global param fail, ret = %d.\n",
1599 ret);
1600 return ret;
1603 /* Get pf resource owned by every pf */
1604 ret = hns_roce_query_pf_resource(hr_dev);
1605 if (ret) {
1606 dev_err(hr_dev->dev, "Query pf resource fail, ret = %d.\n",
1607 ret);
1608 return ret;
1611 if (hr_dev->pci_dev->revision == 0x21) {
1612 ret = hns_roce_query_pf_timer_resource(hr_dev);
1613 if (ret) {
1614 dev_err(hr_dev->dev,
1615 "Query pf timer resource fail, ret = %d.\n",
1616 ret);
1617 return ret;
1621 ret = hns_roce_alloc_vf_resource(hr_dev);
1622 if (ret) {
1623 dev_err(hr_dev->dev, "Allocate vf resource fail, ret = %d.\n",
1624 ret);
1625 return ret;
1628 if (hr_dev->pci_dev->revision == 0x21) {
1629 ret = hns_roce_set_vf_switch_param(hr_dev, 0);
1630 if (ret) {
1631 dev_err(hr_dev->dev,
1632 "Set function switch param fail, ret = %d.\n",
1633 ret);
1634 return ret;
1638 hr_dev->vendor_part_id = hr_dev->pci_dev->device;
1639 hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
1641 caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
1642 caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
1643 caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
1644 caps->num_srqs = HNS_ROCE_V2_MAX_SRQ_NUM;
1645 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1646 caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
1647 caps->max_srqwqes = HNS_ROCE_V2_MAX_SRQWQE_NUM;
1648 caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
1649 caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
1650 caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
1651 caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
1652 caps->max_srq_sg = HNS_ROCE_V2_MAX_SRQ_SGE_NUM;
1653 caps->num_uars = HNS_ROCE_V2_UAR_NUM;
1654 caps->phy_num_uars = HNS_ROCE_V2_PHY_UAR_NUM;
1655 caps->num_aeq_vectors = HNS_ROCE_V2_AEQE_VEC_NUM;
1656 caps->num_comp_vectors = HNS_ROCE_V2_COMP_VEC_NUM;
1657 caps->num_other_vectors = HNS_ROCE_V2_ABNORMAL_VEC_NUM;
1658 caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
1659 caps->num_mtt_segs = HNS_ROCE_V2_MAX_MTT_SEGS;
1660 caps->num_cqe_segs = HNS_ROCE_V2_MAX_CQE_SEGS;
1661 caps->num_srqwqe_segs = HNS_ROCE_V2_MAX_SRQWQE_SEGS;
1662 caps->num_idx_segs = HNS_ROCE_V2_MAX_IDX_SEGS;
1663 caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
1664 caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
1665 caps->max_qp_dest_rdma = HNS_ROCE_V2_MAX_QP_DEST_RDMA;
1666 caps->max_sq_desc_sz = HNS_ROCE_V2_MAX_SQ_DESC_SZ;
1667 caps->max_rq_desc_sz = HNS_ROCE_V2_MAX_RQ_DESC_SZ;
1668 caps->max_srq_desc_sz = HNS_ROCE_V2_MAX_SRQ_DESC_SZ;
1669 caps->qpc_entry_sz = HNS_ROCE_V2_QPC_ENTRY_SZ;
1670 caps->irrl_entry_sz = HNS_ROCE_V2_IRRL_ENTRY_SZ;
1671 caps->trrl_entry_sz = HNS_ROCE_V2_TRRL_ENTRY_SZ;
1672 caps->cqc_entry_sz = HNS_ROCE_V2_CQC_ENTRY_SZ;
1673 caps->srqc_entry_sz = HNS_ROCE_V2_SRQC_ENTRY_SZ;
1674 caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ;
1675 caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
1676 caps->idx_entry_sz = 4;
1677 caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE;
1678 caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED;
1679 caps->reserved_lkey = 0;
1680 caps->reserved_pds = 0;
1681 caps->reserved_mrws = 1;
1682 caps->reserved_uars = 0;
1683 caps->reserved_cqs = 0;
1684 caps->reserved_srqs = 0;
1685 caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
1687 caps->qpc_ba_pg_sz = 0;
1688 caps->qpc_buf_pg_sz = 0;
1689 caps->qpc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1690 caps->srqc_ba_pg_sz = 0;
1691 caps->srqc_buf_pg_sz = 0;
1692 caps->srqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1693 caps->cqc_ba_pg_sz = 0;
1694 caps->cqc_buf_pg_sz = 0;
1695 caps->cqc_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1696 caps->mpt_ba_pg_sz = 0;
1697 caps->mpt_buf_pg_sz = 0;
1698 caps->mpt_hop_num = HNS_ROCE_CONTEXT_HOP_NUM;
1699 caps->pbl_ba_pg_sz = 2;
1700 caps->pbl_buf_pg_sz = 0;
1701 caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
1702 caps->mtt_ba_pg_sz = 0;
1703 caps->mtt_buf_pg_sz = 0;
1704 caps->mtt_hop_num = HNS_ROCE_MTT_HOP_NUM;
1705 caps->wqe_sq_hop_num = 2;
1706 caps->wqe_sge_hop_num = 1;
1707 caps->wqe_rq_hop_num = 2;
1708 caps->cqe_ba_pg_sz = 6;
1709 caps->cqe_buf_pg_sz = 0;
1710 caps->cqe_hop_num = HNS_ROCE_CQE_HOP_NUM;
1711 caps->srqwqe_ba_pg_sz = 0;
1712 caps->srqwqe_buf_pg_sz = 0;
1713 caps->srqwqe_hop_num = HNS_ROCE_SRQWQE_HOP_NUM;
1714 caps->idx_ba_pg_sz = 0;
1715 caps->idx_buf_pg_sz = 0;
1716 caps->idx_hop_num = HNS_ROCE_IDX_HOP_NUM;
1717 caps->eqe_ba_pg_sz = 0;
1718 caps->eqe_buf_pg_sz = 0;
1719 caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
1720 caps->tsq_buf_pg_sz = 0;
1721 caps->chunk_sz = HNS_ROCE_V2_TABLE_CHUNK_SIZE;
1723 caps->flags = HNS_ROCE_CAP_FLAG_REREG_MR |
1724 HNS_ROCE_CAP_FLAG_ROCE_V1_V2 |
1725 HNS_ROCE_CAP_FLAG_RQ_INLINE |
1726 HNS_ROCE_CAP_FLAG_RECORD_DB |
1727 HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
1729 if (hr_dev->pci_dev->revision == 0x21)
1730 caps->flags |= HNS_ROCE_CAP_FLAG_MW |
1731 HNS_ROCE_CAP_FLAG_FRMR;
1733 caps->pkey_table_len[0] = 1;
1734 caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
1735 caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
1736 caps->aeqe_depth = HNS_ROCE_V2_ASYNC_EQE_NUM;
1737 caps->local_ca_ack_delay = 0;
1738 caps->max_mtu = IB_MTU_4096;
1740 caps->max_srqs = HNS_ROCE_V2_MAX_SRQ;
1741 caps->max_srq_wrs = HNS_ROCE_V2_MAX_SRQ_WR;
1742 caps->max_srq_sges = HNS_ROCE_V2_MAX_SRQ_SGE;
1744 if (hr_dev->pci_dev->revision == 0x21) {
1745 caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC |
1746 HNS_ROCE_CAP_FLAG_SRQ |
1747 HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL;
1749 caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
1750 caps->qpc_timer_entry_sz = HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ;
1751 caps->qpc_timer_ba_pg_sz = 0;
1752 caps->qpc_timer_buf_pg_sz = 0;
1753 caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1754 caps->num_cqc_timer = HNS_ROCE_V2_MAX_CQC_TIMER_NUM;
1755 caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
1756 caps->cqc_timer_ba_pg_sz = 0;
1757 caps->cqc_timer_buf_pg_sz = 0;
1758 caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
1760 caps->sccc_entry_sz = HNS_ROCE_V2_SCCC_ENTRY_SZ;
1761 caps->sccc_ba_pg_sz = 0;
1762 caps->sccc_buf_pg_sz = 0;
1763 caps->sccc_hop_num = HNS_ROCE_SCCC_HOP_NUM;
1766 ret = hns_roce_v2_set_bt(hr_dev);
1767 if (ret)
1768 dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
1769 ret);
1771 return ret;
1774 static int hns_roce_config_link_table(struct hns_roce_dev *hr_dev,
1775 enum hns_roce_link_table_type type)
1777 struct hns_roce_cmq_desc desc[2];
1778 struct hns_roce_cfg_llm_a *req_a =
1779 (struct hns_roce_cfg_llm_a *)desc[0].data;
1780 struct hns_roce_cfg_llm_b *req_b =
1781 (struct hns_roce_cfg_llm_b *)desc[1].data;
1782 struct hns_roce_v2_priv *priv = hr_dev->priv;
1783 struct hns_roce_link_table *link_tbl;
1784 struct hns_roce_link_table_entry *entry;
1785 enum hns_roce_opcode_type opcode;
1786 u32 page_num;
1787 int i;
1789 switch (type) {
1790 case TSQ_LINK_TABLE:
1791 link_tbl = &priv->tsq;
1792 opcode = HNS_ROCE_OPC_CFG_EXT_LLM;
1793 break;
1794 case TPQ_LINK_TABLE:
1795 link_tbl = &priv->tpq;
1796 opcode = HNS_ROCE_OPC_CFG_TMOUT_LLM;
1797 break;
1798 default:
1799 return -EINVAL;
1802 page_num = link_tbl->npages;
1803 entry = link_tbl->table.buf;
1804 memset(req_a, 0, sizeof(*req_a));
1805 memset(req_b, 0, sizeof(*req_b));
1807 for (i = 0; i < 2; i++) {
1808 hns_roce_cmq_setup_basic_desc(&desc[i], opcode, false);
1810 if (i == 0)
1811 desc[i].flag |= cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1812 else
1813 desc[i].flag &= ~cpu_to_le16(HNS_ROCE_CMD_FLAG_NEXT);
1815 if (i == 0) {
1816 req_a->base_addr_l =
1817 cpu_to_le32(link_tbl->table.map & 0xffffffff);
1818 req_a->base_addr_h =
1819 cpu_to_le32(link_tbl->table.map >> 32);
1820 roce_set_field(req_a->depth_pgsz_init_en,
1821 CFG_LLM_QUE_DEPTH_M,
1822 CFG_LLM_QUE_DEPTH_S,
1823 link_tbl->npages);
1824 roce_set_field(req_a->depth_pgsz_init_en,
1825 CFG_LLM_QUE_PGSZ_M,
1826 CFG_LLM_QUE_PGSZ_S,
1827 link_tbl->pg_sz);
1828 req_a->head_ba_l = cpu_to_le32(entry[0].blk_ba0);
1829 req_a->head_ba_h_nxtptr =
1830 cpu_to_le32(entry[0].blk_ba1_nxt_ptr);
1831 roce_set_field(req_a->head_ptr,
1832 CFG_LLM_HEAD_PTR_M,
1833 CFG_LLM_HEAD_PTR_S, 0);
1834 } else {
1835 req_b->tail_ba_l =
1836 cpu_to_le32(entry[page_num - 1].blk_ba0);
1837 roce_set_field(req_b->tail_ba_h,
1838 CFG_LLM_TAIL_BA_H_M,
1839 CFG_LLM_TAIL_BA_H_S,
1840 entry[page_num - 1].blk_ba1_nxt_ptr &
1841 HNS_ROCE_LINK_TABLE_BA1_M);
1842 roce_set_field(req_b->tail_ptr,
1843 CFG_LLM_TAIL_PTR_M,
1844 CFG_LLM_TAIL_PTR_S,
1845 (entry[page_num - 2].blk_ba1_nxt_ptr &
1846 HNS_ROCE_LINK_TABLE_NXT_PTR_M) >>
1847 HNS_ROCE_LINK_TABLE_NXT_PTR_S);
1850 roce_set_field(req_a->depth_pgsz_init_en,
1851 CFG_LLM_INIT_EN_M, CFG_LLM_INIT_EN_S, 1);
1853 return hns_roce_cmq_send(hr_dev, desc, 2);
1856 static int hns_roce_init_link_table(struct hns_roce_dev *hr_dev,
1857 enum hns_roce_link_table_type type)
1859 struct hns_roce_v2_priv *priv = hr_dev->priv;
1860 struct hns_roce_link_table *link_tbl;
1861 struct hns_roce_link_table_entry *entry;
1862 struct device *dev = hr_dev->dev;
1863 u32 buf_chk_sz;
1864 dma_addr_t t;
1865 int func_num = 1;
1866 int pg_num_a;
1867 int pg_num_b;
1868 int pg_num;
1869 int size;
1870 int i;
1872 switch (type) {
1873 case TSQ_LINK_TABLE:
1874 link_tbl = &priv->tsq;
1875 buf_chk_sz = 1 << (hr_dev->caps.tsq_buf_pg_sz + PAGE_SHIFT);
1876 pg_num_a = hr_dev->caps.num_qps * 8 / buf_chk_sz;
1877 pg_num_b = hr_dev->caps.sl_num * 4 + 2;
1878 break;
1879 case TPQ_LINK_TABLE:
1880 link_tbl = &priv->tpq;
1881 buf_chk_sz = 1 << (hr_dev->caps.tpq_buf_pg_sz + PAGE_SHIFT);
1882 pg_num_a = hr_dev->caps.num_cqs * 4 / buf_chk_sz;
1883 pg_num_b = 2 * 4 * func_num + 2;
1884 break;
1885 default:
1886 return -EINVAL;
1889 pg_num = max(pg_num_a, pg_num_b);
1890 size = pg_num * sizeof(struct hns_roce_link_table_entry);
1892 link_tbl->table.buf = dma_alloc_coherent(dev, size,
1893 &link_tbl->table.map,
1894 GFP_KERNEL);
1895 if (!link_tbl->table.buf)
1896 goto out;
1898 link_tbl->pg_list = kcalloc(pg_num, sizeof(*link_tbl->pg_list),
1899 GFP_KERNEL);
1900 if (!link_tbl->pg_list)
1901 goto err_kcalloc_failed;
1903 entry = link_tbl->table.buf;
1904 for (i = 0; i < pg_num; ++i) {
1905 link_tbl->pg_list[i].buf = dma_alloc_coherent(dev, buf_chk_sz,
1906 &t, GFP_KERNEL);
1907 if (!link_tbl->pg_list[i].buf)
1908 goto err_alloc_buf_failed;
1910 link_tbl->pg_list[i].map = t;
1912 entry[i].blk_ba0 = (u32)(t >> 12);
1913 entry[i].blk_ba1_nxt_ptr = (u32)(t >> 44);
1915 if (i < (pg_num - 1))
1916 entry[i].blk_ba1_nxt_ptr |=
1917 (i + 1) << HNS_ROCE_LINK_TABLE_NXT_PTR_S;
1920 link_tbl->npages = pg_num;
1921 link_tbl->pg_sz = buf_chk_sz;
1923 return hns_roce_config_link_table(hr_dev, type);
1925 err_alloc_buf_failed:
1926 for (i -= 1; i >= 0; i--)
1927 dma_free_coherent(dev, buf_chk_sz,
1928 link_tbl->pg_list[i].buf,
1929 link_tbl->pg_list[i].map);
1930 kfree(link_tbl->pg_list);
1932 err_kcalloc_failed:
1933 dma_free_coherent(dev, size, link_tbl->table.buf,
1934 link_tbl->table.map);
1936 out:
1937 return -ENOMEM;
1940 static void hns_roce_free_link_table(struct hns_roce_dev *hr_dev,
1941 struct hns_roce_link_table *link_tbl)
1943 struct device *dev = hr_dev->dev;
1944 int size;
1945 int i;
1947 size = link_tbl->npages * sizeof(struct hns_roce_link_table_entry);
1949 for (i = 0; i < link_tbl->npages; ++i)
1950 if (link_tbl->pg_list[i].buf)
1951 dma_free_coherent(dev, link_tbl->pg_sz,
1952 link_tbl->pg_list[i].buf,
1953 link_tbl->pg_list[i].map);
1954 kfree(link_tbl->pg_list);
1956 dma_free_coherent(dev, size, link_tbl->table.buf,
1957 link_tbl->table.map);
1960 static int hns_roce_v2_init(struct hns_roce_dev *hr_dev)
1962 struct hns_roce_v2_priv *priv = hr_dev->priv;
1963 int qpc_count, cqc_count;
1964 int ret, i;
1966 /* TSQ includes SQ doorbell and ack doorbell */
1967 ret = hns_roce_init_link_table(hr_dev, TSQ_LINK_TABLE);
1968 if (ret) {
1969 dev_err(hr_dev->dev, "TSQ init failed, ret = %d.\n", ret);
1970 return ret;
1973 ret = hns_roce_init_link_table(hr_dev, TPQ_LINK_TABLE);
1974 if (ret) {
1975 dev_err(hr_dev->dev, "TPQ init failed, ret = %d.\n", ret);
1976 goto err_tpq_init_failed;
1979 /* Alloc memory for QPC Timer buffer space chunk */
1980 for (qpc_count = 0; qpc_count < hr_dev->caps.qpc_timer_bt_num;
1981 qpc_count++) {
1982 ret = hns_roce_table_get(hr_dev, &hr_dev->qpc_timer_table,
1983 qpc_count);
1984 if (ret) {
1985 dev_err(hr_dev->dev, "QPC Timer get failed\n");
1986 goto err_qpc_timer_failed;
1990 /* Alloc memory for CQC Timer buffer space chunk */
1991 for (cqc_count = 0; cqc_count < hr_dev->caps.cqc_timer_bt_num;
1992 cqc_count++) {
1993 ret = hns_roce_table_get(hr_dev, &hr_dev->cqc_timer_table,
1994 cqc_count);
1995 if (ret) {
1996 dev_err(hr_dev->dev, "CQC Timer get failed\n");
1997 goto err_cqc_timer_failed;
2001 return 0;
2003 err_cqc_timer_failed:
2004 for (i = 0; i < cqc_count; i++)
2005 hns_roce_table_put(hr_dev, &hr_dev->cqc_timer_table, i);
2007 err_qpc_timer_failed:
2008 for (i = 0; i < qpc_count; i++)
2009 hns_roce_table_put(hr_dev, &hr_dev->qpc_timer_table, i);
2011 hns_roce_free_link_table(hr_dev, &priv->tpq);
2013 err_tpq_init_failed:
2014 hns_roce_free_link_table(hr_dev, &priv->tsq);
2016 return ret;
2019 static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
2021 struct hns_roce_v2_priv *priv = hr_dev->priv;
2023 if (hr_dev->pci_dev->revision == 0x21)
2024 hns_roce_function_clear(hr_dev);
2026 hns_roce_free_link_table(hr_dev, &priv->tpq);
2027 hns_roce_free_link_table(hr_dev, &priv->tsq);
2030 static int hns_roce_query_mbox_status(struct hns_roce_dev *hr_dev)
2032 struct hns_roce_cmq_desc desc;
2033 struct hns_roce_mbox_status *mb_st =
2034 (struct hns_roce_mbox_status *)desc.data;
2035 enum hns_roce_cmd_return_status status;
2037 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_QUERY_MB_ST, true);
2039 status = hns_roce_cmq_send(hr_dev, &desc, 1);
2040 if (status)
2041 return status;
2043 return le32_to_cpu(mb_st->mb_status_hw_run);
2046 static int hns_roce_v2_cmd_pending(struct hns_roce_dev *hr_dev)
2048 u32 status = hns_roce_query_mbox_status(hr_dev);
2050 return status >> HNS_ROCE_HW_RUN_BIT_SHIFT;
2053 static int hns_roce_v2_cmd_complete(struct hns_roce_dev *hr_dev)
2055 u32 status = hns_roce_query_mbox_status(hr_dev);
2057 return status & HNS_ROCE_HW_MB_STATUS_MASK;
2060 static int hns_roce_mbox_post(struct hns_roce_dev *hr_dev, u64 in_param,
2061 u64 out_param, u32 in_modifier, u8 op_modifier,
2062 u16 op, u16 token, int event)
2064 struct hns_roce_cmq_desc desc;
2065 struct hns_roce_post_mbox *mb = (struct hns_roce_post_mbox *)desc.data;
2067 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_POST_MB, false);
2069 mb->in_param_l = cpu_to_le32(in_param);
2070 mb->in_param_h = cpu_to_le32(in_param >> 32);
2071 mb->out_param_l = cpu_to_le32(out_param);
2072 mb->out_param_h = cpu_to_le32(out_param >> 32);
2073 mb->cmd_tag = cpu_to_le32(in_modifier << 8 | op);
2074 mb->token_event_en = cpu_to_le32(event << 16 | token);
2076 return hns_roce_cmq_send(hr_dev, &desc, 1);
2079 static int hns_roce_v2_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
2080 u64 out_param, u32 in_modifier, u8 op_modifier,
2081 u16 op, u16 token, int event)
2083 struct device *dev = hr_dev->dev;
2084 unsigned long end;
2085 int ret;
2087 end = msecs_to_jiffies(HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS) + jiffies;
2088 while (hns_roce_v2_cmd_pending(hr_dev)) {
2089 if (time_after(jiffies, end)) {
2090 dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
2091 (int)end);
2092 return -EAGAIN;
2094 cond_resched();
2097 ret = hns_roce_mbox_post(hr_dev, in_param, out_param, in_modifier,
2098 op_modifier, op, token, event);
2099 if (ret)
2100 dev_err(dev, "Post mailbox fail(%d)\n", ret);
2102 return ret;
2105 static int hns_roce_v2_chk_mbox(struct hns_roce_dev *hr_dev,
2106 unsigned long timeout)
2108 struct device *dev = hr_dev->dev;
2109 unsigned long end;
2110 u32 status;
2112 end = msecs_to_jiffies(timeout) + jiffies;
2113 while (hns_roce_v2_cmd_pending(hr_dev) && time_before(jiffies, end))
2114 cond_resched();
2116 if (hns_roce_v2_cmd_pending(hr_dev)) {
2117 dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
2118 return -ETIMEDOUT;
2121 status = hns_roce_v2_cmd_complete(hr_dev);
2122 if (status != 0x1) {
2123 if (status == CMD_RST_PRC_EBUSY)
2124 return status;
2126 dev_err(dev, "mailbox status 0x%x!\n", status);
2127 return -EBUSY;
2130 return 0;
2133 static int hns_roce_config_sgid_table(struct hns_roce_dev *hr_dev,
2134 int gid_index, const union ib_gid *gid,
2135 enum hns_roce_sgid_type sgid_type)
2137 struct hns_roce_cmq_desc desc;
2138 struct hns_roce_cfg_sgid_tb *sgid_tb =
2139 (struct hns_roce_cfg_sgid_tb *)desc.data;
2140 u32 *p;
2142 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SGID_TB, false);
2144 roce_set_field(sgid_tb->table_idx_rsv,
2145 CFG_SGID_TB_TABLE_IDX_M,
2146 CFG_SGID_TB_TABLE_IDX_S, gid_index);
2147 roce_set_field(sgid_tb->vf_sgid_type_rsv,
2148 CFG_SGID_TB_VF_SGID_TYPE_M,
2149 CFG_SGID_TB_VF_SGID_TYPE_S, sgid_type);
2151 p = (u32 *)&gid->raw[0];
2152 sgid_tb->vf_sgid_l = cpu_to_le32(*p);
2154 p = (u32 *)&gid->raw[4];
2155 sgid_tb->vf_sgid_ml = cpu_to_le32(*p);
2157 p = (u32 *)&gid->raw[8];
2158 sgid_tb->vf_sgid_mh = cpu_to_le32(*p);
2160 p = (u32 *)&gid->raw[0xc];
2161 sgid_tb->vf_sgid_h = cpu_to_le32(*p);
2163 return hns_roce_cmq_send(hr_dev, &desc, 1);
2166 static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
2167 int gid_index, const union ib_gid *gid,
2168 const struct ib_gid_attr *attr)
2170 enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
2171 int ret;
2173 if (!gid || !attr)
2174 return -EINVAL;
2176 if (attr->gid_type == IB_GID_TYPE_ROCE)
2177 sgid_type = GID_TYPE_FLAG_ROCE_V1;
2179 if (attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
2180 if (ipv6_addr_v4mapped((void *)gid))
2181 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV4;
2182 else
2183 sgid_type = GID_TYPE_FLAG_ROCE_V2_IPV6;
2186 ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
2187 if (ret)
2188 dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret);
2190 return ret;
2193 static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
2194 u8 *addr)
2196 struct hns_roce_cmq_desc desc;
2197 struct hns_roce_cfg_smac_tb *smac_tb =
2198 (struct hns_roce_cfg_smac_tb *)desc.data;
2199 u16 reg_smac_h;
2200 u32 reg_smac_l;
2202 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_SMAC_TB, false);
2204 reg_smac_l = *(u32 *)(&addr[0]);
2205 reg_smac_h = *(u16 *)(&addr[4]);
2207 memset(smac_tb, 0, sizeof(*smac_tb));
2208 roce_set_field(smac_tb->tb_idx_rsv,
2209 CFG_SMAC_TB_IDX_M,
2210 CFG_SMAC_TB_IDX_S, phy_port);
2211 roce_set_field(smac_tb->vf_smac_h_rsv,
2212 CFG_SMAC_TB_VF_SMAC_H_M,
2213 CFG_SMAC_TB_VF_SMAC_H_S, reg_smac_h);
2214 smac_tb->vf_smac_l = cpu_to_le32(reg_smac_l);
2216 return hns_roce_cmq_send(hr_dev, &desc, 1);
2219 static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
2220 struct hns_roce_mr *mr)
2222 struct sg_dma_page_iter sg_iter;
2223 u64 page_addr;
2224 u64 *pages;
2225 int i;
2227 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2228 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2229 roce_set_field(mpt_entry->byte_48_mode_ba,
2230 V2_MPT_BYTE_48_PBL_BA_H_M, V2_MPT_BYTE_48_PBL_BA_H_S,
2231 upper_32_bits(mr->pbl_ba >> 3));
2233 pages = (u64 *)__get_free_page(GFP_KERNEL);
2234 if (!pages)
2235 return -ENOMEM;
2237 i = 0;
2238 for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
2239 page_addr = sg_page_iter_dma_address(&sg_iter);
2240 pages[i] = page_addr >> 6;
2242 /* Record the first 2 entry directly to MTPT table */
2243 if (i >= HNS_ROCE_V2_MAX_INNER_MTPT_NUM - 1)
2244 goto found;
2245 i++;
2247 found:
2248 mpt_entry->pa0_l = cpu_to_le32(lower_32_bits(pages[0]));
2249 roce_set_field(mpt_entry->byte_56_pa0_h, V2_MPT_BYTE_56_PA0_H_M,
2250 V2_MPT_BYTE_56_PA0_H_S, upper_32_bits(pages[0]));
2252 mpt_entry->pa1_l = cpu_to_le32(lower_32_bits(pages[1]));
2253 roce_set_field(mpt_entry->byte_64_buf_pa1, V2_MPT_BYTE_64_PA1_H_M,
2254 V2_MPT_BYTE_64_PA1_H_S, upper_32_bits(pages[1]));
2255 roce_set_field(mpt_entry->byte_64_buf_pa1,
2256 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2257 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2258 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2260 free_page((unsigned long)pages);
2262 return 0;
2265 static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
2266 unsigned long mtpt_idx)
2268 struct hns_roce_v2_mpt_entry *mpt_entry;
2269 int ret;
2271 mpt_entry = mb_buf;
2272 memset(mpt_entry, 0, sizeof(*mpt_entry));
2274 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2275 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2276 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2277 V2_MPT_BYTE_4_PBL_HOP_NUM_S, mr->pbl_hop_num ==
2278 HNS_ROCE_HOP_NUM_0 ? 0 : mr->pbl_hop_num);
2279 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2280 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2281 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2282 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2283 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2284 V2_MPT_BYTE_4_PD_S, mr->pd);
2286 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
2287 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 0);
2288 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2289 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
2290 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
2291 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
2292 mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2293 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2294 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
2295 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2296 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
2297 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2298 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
2300 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S,
2301 mr->type == MR_TYPE_MR ? 0 : 1);
2302 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_INNER_PA_VLD_S,
2305 mpt_entry->len_l = cpu_to_le32(lower_32_bits(mr->size));
2306 mpt_entry->len_h = cpu_to_le32(upper_32_bits(mr->size));
2307 mpt_entry->lkey = cpu_to_le32(mr->key);
2308 mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
2309 mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
2311 if (mr->type == MR_TYPE_DMA)
2312 return 0;
2314 ret = set_mtpt_pbl(mpt_entry, mr);
2316 return ret;
2319 static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
2320 struct hns_roce_mr *mr, int flags,
2321 u32 pdn, int mr_access_flags, u64 iova,
2322 u64 size, void *mb_buf)
2324 struct hns_roce_v2_mpt_entry *mpt_entry = mb_buf;
2325 int ret = 0;
2327 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2328 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_VALID);
2330 if (flags & IB_MR_REREG_PD) {
2331 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2332 V2_MPT_BYTE_4_PD_S, pdn);
2333 mr->pd = pdn;
2336 if (flags & IB_MR_REREG_ACCESS) {
2337 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2338 V2_MPT_BYTE_8_BIND_EN_S,
2339 (mr_access_flags & IB_ACCESS_MW_BIND ? 1 : 0));
2340 roce_set_bit(mpt_entry->byte_8_mw_cnt_en,
2341 V2_MPT_BYTE_8_ATOMIC_EN_S,
2342 mr_access_flags & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
2343 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
2344 mr_access_flags & IB_ACCESS_REMOTE_READ ? 1 : 0);
2345 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
2346 mr_access_flags & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
2347 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S,
2348 mr_access_flags & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
2351 if (flags & IB_MR_REREG_TRANS) {
2352 mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
2353 mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
2354 mpt_entry->len_l = cpu_to_le32(lower_32_bits(size));
2355 mpt_entry->len_h = cpu_to_le32(upper_32_bits(size));
2357 mr->iova = iova;
2358 mr->size = size;
2360 ret = set_mtpt_pbl(mpt_entry, mr);
2363 return ret;
2366 static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
2368 struct hns_roce_v2_mpt_entry *mpt_entry;
2370 mpt_entry = mb_buf;
2371 memset(mpt_entry, 0, sizeof(*mpt_entry));
2373 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2374 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2375 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2376 V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
2377 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2378 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2379 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2380 mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2381 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2382 V2_MPT_BYTE_4_PD_S, mr->pd);
2384 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
2385 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2386 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2388 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
2389 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2390 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
2391 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2393 mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
2395 mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
2396 roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
2397 V2_MPT_BYTE_48_PBL_BA_H_S,
2398 upper_32_bits(mr->pbl_ba >> 3));
2400 roce_set_field(mpt_entry->byte_64_buf_pa1,
2401 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2402 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2403 mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2405 return 0;
2408 static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
2410 struct hns_roce_v2_mpt_entry *mpt_entry;
2412 mpt_entry = mb_buf;
2413 memset(mpt_entry, 0, sizeof(*mpt_entry));
2415 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
2416 V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
2417 roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
2418 V2_MPT_BYTE_4_PD_S, mw->pdn);
2419 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2420 V2_MPT_BYTE_4_PBL_HOP_NUM_M,
2421 V2_MPT_BYTE_4_PBL_HOP_NUM_S,
2422 mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ?
2423 0 : mw->pbl_hop_num);
2424 roce_set_field(mpt_entry->byte_4_pd_hop_st,
2425 V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
2426 V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
2427 mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
2429 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
2430 roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2432 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
2433 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
2434 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
2435 roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
2436 mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
2438 roce_set_field(mpt_entry->byte_64_buf_pa1,
2439 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
2440 V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
2441 mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
2443 mpt_entry->lkey = cpu_to_le32(mw->rkey);
2445 return 0;
2448 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2450 return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V2_CQE_ENTRY_SIZE);
2453 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n)
2455 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
2457 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
2458 return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
2459 !!(n & hr_cq->cq_depth)) ? cqe : NULL;
2462 static struct hns_roce_v2_cqe *next_cqe_sw_v2(struct hns_roce_cq *hr_cq)
2464 return get_sw_cqe_v2(hr_cq, hr_cq->cons_index);
2467 static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
2469 return hns_roce_buf_offset(&srq->buf, n << srq->wqe_shift);
2472 static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
2474 /* always called with interrupts disabled. */
2475 spin_lock(&srq->lock);
2477 bitmap_clear(srq->idx_que.bitmap, wqe_index, 1);
2478 srq->tail++;
2480 spin_unlock(&srq->lock);
2483 static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
2485 *hr_cq->set_ci_db = cons_index & 0xffffff;
2488 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2489 struct hns_roce_srq *srq)
2491 struct hns_roce_v2_cqe *cqe, *dest;
2492 u32 prod_index;
2493 int nfreed = 0;
2494 int wqe_index;
2495 u8 owner_bit;
2497 for (prod_index = hr_cq->cons_index; get_sw_cqe_v2(hr_cq, prod_index);
2498 ++prod_index) {
2499 if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
2500 break;
2504 * Now backwards through the CQ, removing CQ entries
2505 * that match our QP by overwriting them with next entries.
2507 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2508 cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2509 if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2510 V2_CQE_BYTE_16_LCL_QPN_S) &
2511 HNS_ROCE_V2_CQE_QPN_MASK) == qpn) {
2512 if (srq &&
2513 roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
2514 wqe_index = roce_get_field(cqe->byte_4,
2515 V2_CQE_BYTE_4_WQE_INDX_M,
2516 V2_CQE_BYTE_4_WQE_INDX_S);
2517 hns_roce_free_srq_wqe(srq, wqe_index);
2519 ++nfreed;
2520 } else if (nfreed) {
2521 dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
2522 hr_cq->ib_cq.cqe);
2523 owner_bit = roce_get_bit(dest->byte_4,
2524 V2_CQE_BYTE_4_OWNER_S);
2525 memcpy(dest, cqe, sizeof(*cqe));
2526 roce_set_bit(dest->byte_4, V2_CQE_BYTE_4_OWNER_S,
2527 owner_bit);
2531 if (nfreed) {
2532 hr_cq->cons_index += nfreed;
2534 * Make sure update of buffer contents is done before
2535 * updating consumer index.
2537 wmb();
2538 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2542 static void hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2543 struct hns_roce_srq *srq)
2545 spin_lock_irq(&hr_cq->lock);
2546 __hns_roce_v2_cq_clean(hr_cq, qpn, srq);
2547 spin_unlock_irq(&hr_cq->lock);
2550 static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
2551 struct hns_roce_cq *hr_cq, void *mb_buf,
2552 u64 *mtts, dma_addr_t dma_handle)
2554 struct hns_roce_v2_cq_context *cq_context;
2556 cq_context = mb_buf;
2557 memset(cq_context, 0, sizeof(*cq_context));
2559 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CQ_ST_M,
2560 V2_CQC_BYTE_4_CQ_ST_S, V2_CQ_STATE_VALID);
2561 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_ARM_ST_M,
2562 V2_CQC_BYTE_4_ARM_ST_S, REG_NXT_CEQE);
2563 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_SHIFT_M,
2564 V2_CQC_BYTE_4_SHIFT_S,
2565 ilog2(hr_cq->cq_depth));
2566 roce_set_field(cq_context->byte_4_pg_ceqn, V2_CQC_BYTE_4_CEQN_M,
2567 V2_CQC_BYTE_4_CEQN_S, hr_cq->vector);
2569 roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M,
2570 V2_CQC_BYTE_8_CQN_S, hr_cq->cqn);
2572 cq_context->cqe_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
2574 roce_set_field(cq_context->byte_16_hop_addr,
2575 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M,
2576 V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S,
2577 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
2578 roce_set_field(cq_context->byte_16_hop_addr,
2579 V2_CQC_BYTE_16_CQE_HOP_NUM_M,
2580 V2_CQC_BYTE_16_CQE_HOP_NUM_S, hr_dev->caps.cqe_hop_num ==
2581 HNS_ROCE_HOP_NUM_0 ? 0 : hr_dev->caps.cqe_hop_num);
2583 cq_context->cqe_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
2584 roce_set_field(cq_context->byte_24_pgsz_addr,
2585 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_M,
2586 V2_CQC_BYTE_24_CQE_NXT_BLK_ADDR_S,
2587 mtts[1] >> (32 + PAGE_ADDR_SHIFT));
2588 roce_set_field(cq_context->byte_24_pgsz_addr,
2589 V2_CQC_BYTE_24_CQE_BA_PG_SZ_M,
2590 V2_CQC_BYTE_24_CQE_BA_PG_SZ_S,
2591 hr_dev->caps.cqe_ba_pg_sz + PG_SHIFT_OFFSET);
2592 roce_set_field(cq_context->byte_24_pgsz_addr,
2593 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_M,
2594 V2_CQC_BYTE_24_CQE_BUF_PG_SZ_S,
2595 hr_dev->caps.cqe_buf_pg_sz + PG_SHIFT_OFFSET);
2597 cq_context->cqe_ba = cpu_to_le32(dma_handle >> 3);
2599 roce_set_field(cq_context->byte_40_cqe_ba, V2_CQC_BYTE_40_CQE_BA_M,
2600 V2_CQC_BYTE_40_CQE_BA_S, (dma_handle >> (32 + 3)));
2602 if (hr_cq->db_en)
2603 roce_set_bit(cq_context->byte_44_db_record,
2604 V2_CQC_BYTE_44_DB_RECORD_EN_S, 1);
2606 roce_set_field(cq_context->byte_44_db_record,
2607 V2_CQC_BYTE_44_DB_RECORD_ADDR_M,
2608 V2_CQC_BYTE_44_DB_RECORD_ADDR_S,
2609 ((u32)hr_cq->db.dma) >> 1);
2610 cq_context->db_record_addr = cpu_to_le32(hr_cq->db.dma >> 32);
2612 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2613 V2_CQC_BYTE_56_CQ_MAX_CNT_M,
2614 V2_CQC_BYTE_56_CQ_MAX_CNT_S,
2615 HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM);
2616 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
2617 V2_CQC_BYTE_56_CQ_PERIOD_M,
2618 V2_CQC_BYTE_56_CQ_PERIOD_S,
2619 HNS_ROCE_V2_CQ_DEFAULT_INTERVAL);
2622 static int hns_roce_v2_req_notify_cq(struct ib_cq *ibcq,
2623 enum ib_cq_notify_flags flags)
2625 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
2626 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2627 u32 notification_flag;
2628 __le32 doorbell[2];
2630 doorbell[0] = 0;
2631 doorbell[1] = 0;
2633 notification_flag = (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
2634 V2_CQ_DB_REQ_NOT : V2_CQ_DB_REQ_NOT_SOL;
2636 * flags = 0; Notification Flag = 1, next
2637 * flags = 1; Notification Flag = 0, solocited
2639 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_TAG_M, V2_DB_BYTE_4_TAG_S,
2640 hr_cq->cqn);
2641 roce_set_field(doorbell[0], V2_CQ_DB_BYTE_4_CMD_M, V2_DB_BYTE_4_CMD_S,
2642 HNS_ROCE_V2_CQ_DB_NTR);
2643 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CONS_IDX_M,
2644 V2_CQ_DB_PARAMETER_CONS_IDX_S,
2645 hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2646 roce_set_field(doorbell[1], V2_CQ_DB_PARAMETER_CMD_SN_M,
2647 V2_CQ_DB_PARAMETER_CMD_SN_S, hr_cq->arm_sn & 0x3);
2648 roce_set_bit(doorbell[1], V2_CQ_DB_PARAMETER_NOTIFY_S,
2649 notification_flag);
2651 hns_roce_write64(hr_dev, doorbell, hr_cq->cq_db_l);
2653 return 0;
2656 static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
2657 struct hns_roce_qp **cur_qp,
2658 struct ib_wc *wc)
2660 struct hns_roce_rinl_sge *sge_list;
2661 u32 wr_num, wr_cnt, sge_num;
2662 u32 sge_cnt, data_len, size;
2663 void *wqe_buf;
2665 wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
2666 V2_CQE_BYTE_4_WQE_INDX_S) & 0xffff;
2667 wr_cnt = wr_num & ((*cur_qp)->rq.wqe_cnt - 1);
2669 sge_list = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sg_list;
2670 sge_num = (*cur_qp)->rq_inl_buf.wqe_list[wr_cnt].sge_cnt;
2671 wqe_buf = get_recv_wqe(*cur_qp, wr_cnt);
2672 data_len = wc->byte_len;
2674 for (sge_cnt = 0; (sge_cnt < sge_num) && (data_len); sge_cnt++) {
2675 size = min(sge_list[sge_cnt].len, data_len);
2676 memcpy((void *)sge_list[sge_cnt].addr, wqe_buf, size);
2678 data_len -= size;
2679 wqe_buf += size;
2682 if (data_len) {
2683 wc->status = IB_WC_LOC_LEN_ERR;
2684 return -EAGAIN;
2687 return 0;
2690 static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
2691 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2693 struct hns_roce_srq *srq = NULL;
2694 struct hns_roce_dev *hr_dev;
2695 struct hns_roce_v2_cqe *cqe;
2696 struct hns_roce_qp *hr_qp;
2697 struct hns_roce_wq *wq;
2698 struct ib_qp_attr attr;
2699 int attr_mask;
2700 int is_send;
2701 u16 wqe_ctr;
2702 u32 opcode;
2703 u32 status;
2704 int qpn;
2705 int ret;
2707 /* Find cqe according to consumer index */
2708 cqe = next_cqe_sw_v2(hr_cq);
2709 if (!cqe)
2710 return -EAGAIN;
2712 ++hr_cq->cons_index;
2713 /* Memory barrier */
2714 rmb();
2716 /* 0->SQ, 1->RQ */
2717 is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
2719 qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
2720 V2_CQE_BYTE_16_LCL_QPN_S);
2722 if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2723 hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2724 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2725 if (unlikely(!hr_qp)) {
2726 dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n",
2727 hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK));
2728 return -EINVAL;
2730 *cur_qp = hr_qp;
2733 wc->qp = &(*cur_qp)->ibqp;
2734 wc->vendor_err = 0;
2736 if (is_send) {
2737 wq = &(*cur_qp)->sq;
2738 if ((*cur_qp)->sq_signal_bits) {
2740 * If sg_signal_bit is 1,
2741 * firstly tail pointer updated to wqe
2742 * which current cqe correspond to
2744 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
2745 V2_CQE_BYTE_4_WQE_INDX_M,
2746 V2_CQE_BYTE_4_WQE_INDX_S);
2747 wq->tail += (wqe_ctr - (u16)wq->tail) &
2748 (wq->wqe_cnt - 1);
2751 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2752 ++wq->tail;
2753 } else if ((*cur_qp)->ibqp.srq) {
2754 srq = to_hr_srq((*cur_qp)->ibqp.srq);
2755 wqe_ctr = (u16)roce_get_field(cqe->byte_4,
2756 V2_CQE_BYTE_4_WQE_INDX_M,
2757 V2_CQE_BYTE_4_WQE_INDX_S);
2758 wc->wr_id = srq->wrid[wqe_ctr];
2759 hns_roce_free_srq_wqe(srq, wqe_ctr);
2760 } else {
2761 /* Update tail pointer, record wr_id */
2762 wq = &(*cur_qp)->rq;
2763 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2764 ++wq->tail;
2767 status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
2768 V2_CQE_BYTE_4_STATUS_S);
2769 switch (status & HNS_ROCE_V2_CQE_STATUS_MASK) {
2770 case HNS_ROCE_CQE_V2_SUCCESS:
2771 wc->status = IB_WC_SUCCESS;
2772 break;
2773 case HNS_ROCE_CQE_V2_LOCAL_LENGTH_ERR:
2774 wc->status = IB_WC_LOC_LEN_ERR;
2775 break;
2776 case HNS_ROCE_CQE_V2_LOCAL_QP_OP_ERR:
2777 wc->status = IB_WC_LOC_QP_OP_ERR;
2778 break;
2779 case HNS_ROCE_CQE_V2_LOCAL_PROT_ERR:
2780 wc->status = IB_WC_LOC_PROT_ERR;
2781 break;
2782 case HNS_ROCE_CQE_V2_WR_FLUSH_ERR:
2783 wc->status = IB_WC_WR_FLUSH_ERR;
2784 break;
2785 case HNS_ROCE_CQE_V2_MW_BIND_ERR:
2786 wc->status = IB_WC_MW_BIND_ERR;
2787 break;
2788 case HNS_ROCE_CQE_V2_BAD_RESP_ERR:
2789 wc->status = IB_WC_BAD_RESP_ERR;
2790 break;
2791 case HNS_ROCE_CQE_V2_LOCAL_ACCESS_ERR:
2792 wc->status = IB_WC_LOC_ACCESS_ERR;
2793 break;
2794 case HNS_ROCE_CQE_V2_REMOTE_INVAL_REQ_ERR:
2795 wc->status = IB_WC_REM_INV_REQ_ERR;
2796 break;
2797 case HNS_ROCE_CQE_V2_REMOTE_ACCESS_ERR:
2798 wc->status = IB_WC_REM_ACCESS_ERR;
2799 break;
2800 case HNS_ROCE_CQE_V2_REMOTE_OP_ERR:
2801 wc->status = IB_WC_REM_OP_ERR;
2802 break;
2803 case HNS_ROCE_CQE_V2_TRANSPORT_RETRY_EXC_ERR:
2804 wc->status = IB_WC_RETRY_EXC_ERR;
2805 break;
2806 case HNS_ROCE_CQE_V2_RNR_RETRY_EXC_ERR:
2807 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2808 break;
2809 case HNS_ROCE_CQE_V2_REMOTE_ABORT_ERR:
2810 wc->status = IB_WC_REM_ABORT_ERR;
2811 break;
2812 default:
2813 wc->status = IB_WC_GENERAL_ERR;
2814 break;
2817 /* flush cqe if wc status is error, excluding flush error */
2818 if ((wc->status != IB_WC_SUCCESS) &&
2819 (wc->status != IB_WC_WR_FLUSH_ERR)) {
2820 attr_mask = IB_QP_STATE;
2821 attr.qp_state = IB_QPS_ERR;
2822 return hns_roce_v2_modify_qp(&(*cur_qp)->ibqp,
2823 &attr, attr_mask,
2824 (*cur_qp)->state, IB_QPS_ERR);
2827 if (wc->status == IB_WC_WR_FLUSH_ERR)
2828 return 0;
2830 if (is_send) {
2831 wc->wc_flags = 0;
2832 /* SQ corresponding to CQE */
2833 switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2834 V2_CQE_BYTE_4_OPCODE_S) & 0x1f) {
2835 case HNS_ROCE_SQ_OPCODE_SEND:
2836 wc->opcode = IB_WC_SEND;
2837 break;
2838 case HNS_ROCE_SQ_OPCODE_SEND_WITH_INV:
2839 wc->opcode = IB_WC_SEND;
2840 break;
2841 case HNS_ROCE_SQ_OPCODE_SEND_WITH_IMM:
2842 wc->opcode = IB_WC_SEND;
2843 wc->wc_flags |= IB_WC_WITH_IMM;
2844 break;
2845 case HNS_ROCE_SQ_OPCODE_RDMA_READ:
2846 wc->opcode = IB_WC_RDMA_READ;
2847 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2848 break;
2849 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE:
2850 wc->opcode = IB_WC_RDMA_WRITE;
2851 break;
2852 case HNS_ROCE_SQ_OPCODE_RDMA_WRITE_WITH_IMM:
2853 wc->opcode = IB_WC_RDMA_WRITE;
2854 wc->wc_flags |= IB_WC_WITH_IMM;
2855 break;
2856 case HNS_ROCE_SQ_OPCODE_LOCAL_INV:
2857 wc->opcode = IB_WC_LOCAL_INV;
2858 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
2859 break;
2860 case HNS_ROCE_SQ_OPCODE_ATOMIC_COMP_AND_SWAP:
2861 wc->opcode = IB_WC_COMP_SWAP;
2862 wc->byte_len = 8;
2863 break;
2864 case HNS_ROCE_SQ_OPCODE_ATOMIC_FETCH_AND_ADD:
2865 wc->opcode = IB_WC_FETCH_ADD;
2866 wc->byte_len = 8;
2867 break;
2868 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_COMP_AND_SWAP:
2869 wc->opcode = IB_WC_MASKED_COMP_SWAP;
2870 wc->byte_len = 8;
2871 break;
2872 case HNS_ROCE_SQ_OPCODE_ATOMIC_MASK_FETCH_AND_ADD:
2873 wc->opcode = IB_WC_MASKED_FETCH_ADD;
2874 wc->byte_len = 8;
2875 break;
2876 case HNS_ROCE_SQ_OPCODE_FAST_REG_WR:
2877 wc->opcode = IB_WC_REG_MR;
2878 break;
2879 case HNS_ROCE_SQ_OPCODE_BIND_MW:
2880 wc->opcode = IB_WC_REG_MR;
2881 break;
2882 default:
2883 wc->status = IB_WC_GENERAL_ERR;
2884 break;
2886 } else {
2887 /* RQ correspond to CQE */
2888 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2890 opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
2891 V2_CQE_BYTE_4_OPCODE_S);
2892 switch (opcode & 0x1f) {
2893 case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
2894 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2895 wc->wc_flags = IB_WC_WITH_IMM;
2896 wc->ex.imm_data =
2897 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2898 break;
2899 case HNS_ROCE_V2_OPCODE_SEND:
2900 wc->opcode = IB_WC_RECV;
2901 wc->wc_flags = 0;
2902 break;
2903 case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
2904 wc->opcode = IB_WC_RECV;
2905 wc->wc_flags = IB_WC_WITH_IMM;
2906 wc->ex.imm_data =
2907 cpu_to_be32(le32_to_cpu(cqe->immtdata));
2908 break;
2909 case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
2910 wc->opcode = IB_WC_RECV;
2911 wc->wc_flags = IB_WC_WITH_INVALIDATE;
2912 wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
2913 break;
2914 default:
2915 wc->status = IB_WC_GENERAL_ERR;
2916 break;
2919 if ((wc->qp->qp_type == IB_QPT_RC ||
2920 wc->qp->qp_type == IB_QPT_UC) &&
2921 (opcode == HNS_ROCE_V2_OPCODE_SEND ||
2922 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_IMM ||
2923 opcode == HNS_ROCE_V2_OPCODE_SEND_WITH_INV) &&
2924 (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
2925 ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
2926 if (ret)
2927 return -EAGAIN;
2930 wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
2931 V2_CQE_BYTE_32_SL_S);
2932 wc->src_qp = (u8)roce_get_field(cqe->byte_32,
2933 V2_CQE_BYTE_32_RMT_QPN_M,
2934 V2_CQE_BYTE_32_RMT_QPN_S);
2935 wc->slid = 0;
2936 wc->wc_flags |= (roce_get_bit(cqe->byte_32,
2937 V2_CQE_BYTE_32_GRH_S) ?
2938 IB_WC_GRH : 0);
2939 wc->port_num = roce_get_field(cqe->byte_32,
2940 V2_CQE_BYTE_32_PORTN_M, V2_CQE_BYTE_32_PORTN_S);
2941 wc->pkey_index = 0;
2942 memcpy(wc->smac, cqe->smac, 4);
2943 wc->smac[4] = roce_get_field(cqe->byte_28,
2944 V2_CQE_BYTE_28_SMAC_4_M,
2945 V2_CQE_BYTE_28_SMAC_4_S);
2946 wc->smac[5] = roce_get_field(cqe->byte_28,
2947 V2_CQE_BYTE_28_SMAC_5_M,
2948 V2_CQE_BYTE_28_SMAC_5_S);
2949 wc->wc_flags |= IB_WC_WITH_SMAC;
2950 if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
2951 wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
2952 V2_CQE_BYTE_28_VID_M,
2953 V2_CQE_BYTE_28_VID_S);
2954 wc->wc_flags |= IB_WC_WITH_VLAN;
2955 } else {
2956 wc->vlan_id = 0xffff;
2959 wc->network_hdr_type = roce_get_field(cqe->byte_28,
2960 V2_CQE_BYTE_28_PORT_TYPE_M,
2961 V2_CQE_BYTE_28_PORT_TYPE_S);
2964 return 0;
2967 static int hns_roce_v2_poll_cq(struct ib_cq *ibcq, int num_entries,
2968 struct ib_wc *wc)
2970 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2971 struct hns_roce_qp *cur_qp = NULL;
2972 unsigned long flags;
2973 int npolled;
2975 spin_lock_irqsave(&hr_cq->lock, flags);
2977 for (npolled = 0; npolled < num_entries; ++npolled) {
2978 if (hns_roce_v2_poll_one(hr_cq, &cur_qp, wc + npolled))
2979 break;
2982 if (npolled) {
2983 /* Memory barrier */
2984 wmb();
2985 hns_roce_v2_cq_set_ci(hr_cq, hr_cq->cons_index);
2988 spin_unlock_irqrestore(&hr_cq->lock, flags);
2990 return npolled;
2993 static int get_op_for_set_hem(struct hns_roce_dev *hr_dev, u32 type,
2994 int step_idx)
2996 int op;
2998 if (type == HEM_TYPE_SCCC && step_idx)
2999 return -EINVAL;
3001 switch (type) {
3002 case HEM_TYPE_QPC:
3003 op = HNS_ROCE_CMD_WRITE_QPC_BT0;
3004 break;
3005 case HEM_TYPE_MTPT:
3006 op = HNS_ROCE_CMD_WRITE_MPT_BT0;
3007 break;
3008 case HEM_TYPE_CQC:
3009 op = HNS_ROCE_CMD_WRITE_CQC_BT0;
3010 break;
3011 case HEM_TYPE_SRQC:
3012 op = HNS_ROCE_CMD_WRITE_SRQC_BT0;
3013 break;
3014 case HEM_TYPE_SCCC:
3015 op = HNS_ROCE_CMD_WRITE_SCCC_BT0;
3016 break;
3017 case HEM_TYPE_QPC_TIMER:
3018 op = HNS_ROCE_CMD_WRITE_QPC_TIMER_BT0;
3019 break;
3020 case HEM_TYPE_CQC_TIMER:
3021 op = HNS_ROCE_CMD_WRITE_CQC_TIMER_BT0;
3022 break;
3023 default:
3024 dev_warn(hr_dev->dev,
3025 "Table %d not to be written by mailbox!\n", type);
3026 return -EINVAL;
3029 return op + step_idx;
3032 static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
3033 struct hns_roce_hem_table *table, int obj,
3034 int step_idx)
3036 struct hns_roce_cmd_mailbox *mailbox;
3037 struct hns_roce_hem_iter iter;
3038 struct hns_roce_hem_mhop mhop;
3039 struct hns_roce_hem *hem;
3040 unsigned long mhop_obj = obj;
3041 int i, j, k;
3042 int ret = 0;
3043 u64 hem_idx = 0;
3044 u64 l1_idx = 0;
3045 u64 bt_ba = 0;
3046 u32 chunk_ba_num;
3047 u32 hop_num;
3048 int op;
3050 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3051 return 0;
3053 hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
3054 i = mhop.l0_idx;
3055 j = mhop.l1_idx;
3056 k = mhop.l2_idx;
3057 hop_num = mhop.hop_num;
3058 chunk_ba_num = mhop.bt_chunk_size / 8;
3060 if (hop_num == 2) {
3061 hem_idx = i * chunk_ba_num * chunk_ba_num + j * chunk_ba_num +
3063 l1_idx = i * chunk_ba_num + j;
3064 } else if (hop_num == 1) {
3065 hem_idx = i * chunk_ba_num + j;
3066 } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
3067 hem_idx = i;
3070 op = get_op_for_set_hem(hr_dev, table->type, step_idx);
3071 if (op == -EINVAL)
3072 return 0;
3074 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3075 if (IS_ERR(mailbox))
3076 return PTR_ERR(mailbox);
3078 if (table->type == HEM_TYPE_SCCC)
3079 obj = mhop.l0_idx;
3081 if (check_whether_last_step(hop_num, step_idx)) {
3082 hem = table->hem[hem_idx];
3083 for (hns_roce_hem_first(hem, &iter);
3084 !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
3085 bt_ba = hns_roce_hem_addr(&iter);
3087 /* configure the ba, tag, and op */
3088 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma,
3089 obj, 0, op,
3090 HNS_ROCE_CMD_TIMEOUT_MSECS);
3092 } else {
3093 if (step_idx == 0)
3094 bt_ba = table->bt_l0_dma_addr[i];
3095 else if (step_idx == 1 && hop_num == 2)
3096 bt_ba = table->bt_l1_dma_addr[l1_idx];
3098 /* configure the ba, tag, and op */
3099 ret = hns_roce_cmd_mbox(hr_dev, bt_ba, mailbox->dma, obj,
3100 0, op, HNS_ROCE_CMD_TIMEOUT_MSECS);
3103 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3104 return ret;
3107 static int hns_roce_v2_clear_hem(struct hns_roce_dev *hr_dev,
3108 struct hns_roce_hem_table *table, int obj,
3109 int step_idx)
3111 struct device *dev = hr_dev->dev;
3112 struct hns_roce_cmd_mailbox *mailbox;
3113 int ret;
3114 u16 op = 0xff;
3116 if (!hns_roce_check_whether_mhop(hr_dev, table->type))
3117 return 0;
3119 switch (table->type) {
3120 case HEM_TYPE_QPC:
3121 op = HNS_ROCE_CMD_DESTROY_QPC_BT0;
3122 break;
3123 case HEM_TYPE_MTPT:
3124 op = HNS_ROCE_CMD_DESTROY_MPT_BT0;
3125 break;
3126 case HEM_TYPE_CQC:
3127 op = HNS_ROCE_CMD_DESTROY_CQC_BT0;
3128 break;
3129 case HEM_TYPE_SCCC:
3130 case HEM_TYPE_QPC_TIMER:
3131 case HEM_TYPE_CQC_TIMER:
3132 break;
3133 case HEM_TYPE_SRQC:
3134 op = HNS_ROCE_CMD_DESTROY_SRQC_BT0;
3135 break;
3136 default:
3137 dev_warn(dev, "Table %d not to be destroyed by mailbox!\n",
3138 table->type);
3139 return 0;
3142 if (table->type == HEM_TYPE_SCCC ||
3143 table->type == HEM_TYPE_QPC_TIMER ||
3144 table->type == HEM_TYPE_CQC_TIMER)
3145 return 0;
3147 op += step_idx;
3149 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3150 if (IS_ERR(mailbox))
3151 return PTR_ERR(mailbox);
3153 /* configure the tag and op */
3154 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, obj, 0, op,
3155 HNS_ROCE_CMD_TIMEOUT_MSECS);
3157 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3158 return ret;
3161 static int hns_roce_v2_qp_modify(struct hns_roce_dev *hr_dev,
3162 enum ib_qp_state cur_state,
3163 enum ib_qp_state new_state,
3164 struct hns_roce_v2_qp_context *context,
3165 struct hns_roce_qp *hr_qp)
3167 struct hns_roce_cmd_mailbox *mailbox;
3168 int ret;
3170 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3171 if (IS_ERR(mailbox))
3172 return PTR_ERR(mailbox);
3174 memcpy(mailbox->buf, context, sizeof(*context) * 2);
3176 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
3177 HNS_ROCE_CMD_MODIFY_QPC,
3178 HNS_ROCE_CMD_TIMEOUT_MSECS);
3180 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3182 return ret;
3185 static void set_access_flags(struct hns_roce_qp *hr_qp,
3186 struct hns_roce_v2_qp_context *context,
3187 struct hns_roce_v2_qp_context *qpc_mask,
3188 const struct ib_qp_attr *attr, int attr_mask)
3190 u8 dest_rd_atomic;
3191 u32 access_flags;
3193 dest_rd_atomic = (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) ?
3194 attr->max_dest_rd_atomic : hr_qp->resp_depth;
3196 access_flags = (attr_mask & IB_QP_ACCESS_FLAGS) ?
3197 attr->qp_access_flags : hr_qp->atomic_rd_en;
3199 if (!dest_rd_atomic)
3200 access_flags &= IB_ACCESS_REMOTE_WRITE;
3202 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3203 !!(access_flags & IB_ACCESS_REMOTE_READ));
3204 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S, 0);
3206 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3207 !!(access_flags & IB_ACCESS_REMOTE_WRITE));
3208 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S, 0);
3210 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3211 !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
3212 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S, 0);
3215 static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
3216 struct hns_roce_v2_qp_context *context,
3217 struct hns_roce_v2_qp_context *qpc_mask)
3219 if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
3220 roce_set_field(context->byte_4_sqpn_tst,
3221 V2_QPC_BYTE_4_SGE_SHIFT_M,
3222 V2_QPC_BYTE_4_SGE_SHIFT_S,
3223 ilog2((unsigned int)hr_qp->sge.sge_cnt));
3224 else
3225 roce_set_field(context->byte_4_sqpn_tst,
3226 V2_QPC_BYTE_4_SGE_SHIFT_M,
3227 V2_QPC_BYTE_4_SGE_SHIFT_S,
3228 hr_qp->sq.max_gs >
3229 HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE ?
3230 ilog2((unsigned int)hr_qp->sge.sge_cnt) : 0);
3232 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SGE_SHIFT_M,
3233 V2_QPC_BYTE_4_SGE_SHIFT_S, 0);
3235 roce_set_field(context->byte_20_smac_sgid_idx,
3236 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S,
3237 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
3238 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3239 V2_QPC_BYTE_20_SQ_SHIFT_M, V2_QPC_BYTE_20_SQ_SHIFT_S, 0);
3241 roce_set_field(context->byte_20_smac_sgid_idx,
3242 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S,
3243 (hr_qp->ibqp.qp_type == IB_QPT_XRC_INI ||
3244 hr_qp->ibqp.qp_type == IB_QPT_XRC_TGT ||
3245 hr_qp->ibqp.srq) ? 0 :
3246 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
3248 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3249 V2_QPC_BYTE_20_RQ_SHIFT_M, V2_QPC_BYTE_20_RQ_SHIFT_S, 0);
3252 static void modify_qp_reset_to_init(struct ib_qp *ibqp,
3253 const struct ib_qp_attr *attr,
3254 int attr_mask,
3255 struct hns_roce_v2_qp_context *context,
3256 struct hns_roce_v2_qp_context *qpc_mask)
3258 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3259 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3262 * In v2 engine, software pass context and context mask to hardware
3263 * when modifying qp. If software need modify some fields in context,
3264 * we should set all bits of the relevant fields in context mask to
3265 * 0 at the same time, else set them to 0x1.
3267 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3268 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3269 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3270 V2_QPC_BYTE_4_TST_S, 0);
3272 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3273 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3274 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3275 V2_QPC_BYTE_4_SQPN_S, 0);
3277 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3278 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3279 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3280 V2_QPC_BYTE_16_PD_S, 0);
3282 roce_set_field(context->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3283 V2_QPC_BYTE_20_RQWS_S, ilog2(hr_qp->rq.max_gs));
3284 roce_set_field(qpc_mask->byte_20_smac_sgid_idx, V2_QPC_BYTE_20_RQWS_M,
3285 V2_QPC_BYTE_20_RQWS_S, 0);
3287 set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
3289 /* No VLAN need to set 0xFFF */
3290 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3291 V2_QPC_BYTE_24_VLAN_ID_S, 0xfff);
3292 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
3293 V2_QPC_BYTE_24_VLAN_ID_S, 0);
3296 * Set some fields in context to zero, Because the default values
3297 * of all fields in context are zero, we need not set them to 0 again.
3298 * but we should set the relevant fields of context mask to 0.
3300 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_TX_ERR_S, 0);
3301 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_SQ_RX_ERR_S, 0);
3302 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
3303 roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
3305 roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
3306 V2_QPC_BYTE_60_TEMPID_S, 0);
3308 roce_set_field(qpc_mask->byte_60_qpst_tempid,
3309 V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
3311 roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3312 V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
3313 roce_set_bit(qpc_mask->byte_60_qpst_tempid,
3314 V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
3315 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
3316 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
3318 if (hr_qp->rdb_en) {
3319 roce_set_bit(context->byte_68_rq_db,
3320 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 1);
3321 roce_set_bit(qpc_mask->byte_68_rq_db,
3322 V2_QPC_BYTE_68_RQ_RECORD_EN_S, 0);
3325 roce_set_field(context->byte_68_rq_db,
3326 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3327 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S,
3328 ((u32)hr_qp->rdb.dma) >> 1);
3329 roce_set_field(qpc_mask->byte_68_rq_db,
3330 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_M,
3331 V2_QPC_BYTE_68_RQ_DB_RECORD_ADDR_S, 0);
3332 context->rq_db_record_addr = cpu_to_le32(hr_qp->rdb.dma >> 32);
3333 qpc_mask->rq_db_record_addr = 0;
3335 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S,
3336 (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) ? 1 : 0);
3337 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RQIE_S, 0);
3339 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3340 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3341 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3342 V2_QPC_BYTE_80_RX_CQN_S, 0);
3343 if (ibqp->srq) {
3344 roce_set_field(context->byte_76_srqn_op_en,
3345 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3346 to_hr_srq(ibqp->srq)->srqn);
3347 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3348 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3349 roce_set_bit(context->byte_76_srqn_op_en,
3350 V2_QPC_BYTE_76_SRQ_EN_S, 1);
3351 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3352 V2_QPC_BYTE_76_SRQ_EN_S, 0);
3355 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3356 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3357 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3358 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3359 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3360 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3362 roce_set_field(qpc_mask->byte_92_srq_info, V2_QPC_BYTE_92_SRQ_INFO_M,
3363 V2_QPC_BYTE_92_SRQ_INFO_S, 0);
3365 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3366 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3368 roce_set_field(qpc_mask->byte_104_rq_sge,
3369 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_M,
3370 V2_QPC_BYTE_104_RQ_CUR_WQE_SGE_NUM_S, 0);
3372 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3373 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3374 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3375 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3376 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3377 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3378 V2_QPC_BYTE_108_RX_REQ_RNR_S, 0);
3380 qpc_mask->rq_rnr_timer = 0;
3381 qpc_mask->rx_msg_len = 0;
3382 qpc_mask->rx_rkey_pkt_info = 0;
3383 qpc_mask->rx_va = 0;
3385 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3386 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3387 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3388 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3390 roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
3392 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
3393 V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
3394 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
3395 V2_QPC_BYTE_140_RAQ_TRRL_TAIL_S, 0);
3397 roce_set_field(qpc_mask->byte_144_raq,
3398 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
3399 V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
3400 roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
3401 V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
3402 roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
3404 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RQ_MSN_M,
3405 V2_QPC_BYTE_148_RQ_MSN_S, 0);
3406 roce_set_field(qpc_mask->byte_148_raq, V2_QPC_BYTE_148_RAQ_SYNDROME_M,
3407 V2_QPC_BYTE_148_RAQ_SYNDROME_S, 0);
3409 roce_set_field(qpc_mask->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
3410 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
3411 roce_set_field(qpc_mask->byte_152_raq,
3412 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_M,
3413 V2_QPC_BYTE_152_RAQ_TRRL_RTY_HEAD_S, 0);
3415 roce_set_field(qpc_mask->byte_156_raq, V2_QPC_BYTE_156_RAQ_USE_PKTN_M,
3416 V2_QPC_BYTE_156_RAQ_USE_PKTN_S, 0);
3418 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3419 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
3420 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
3421 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
3422 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
3423 V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
3425 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3426 V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
3427 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3428 V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
3429 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3430 V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
3431 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3432 V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
3433 roce_set_bit(qpc_mask->byte_168_irrl_idx,
3434 V2_QPC_BYTE_168_SQ_INVLD_FLG_S, 0);
3435 roce_set_field(qpc_mask->byte_168_irrl_idx,
3436 V2_QPC_BYTE_168_IRRL_IDX_LSB_M,
3437 V2_QPC_BYTE_168_IRRL_IDX_LSB_S, 0);
3439 roce_set_field(context->byte_172_sq_psn, V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3440 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 4);
3441 roce_set_field(qpc_mask->byte_172_sq_psn,
3442 V2_QPC_BYTE_172_ACK_REQ_FREQ_M,
3443 V2_QPC_BYTE_172_ACK_REQ_FREQ_S, 0);
3445 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
3448 roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
3449 roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
3451 roce_set_field(qpc_mask->byte_176_msg_pktn,
3452 V2_QPC_BYTE_176_MSG_USE_PKTN_M,
3453 V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
3454 roce_set_field(qpc_mask->byte_176_msg_pktn,
3455 V2_QPC_BYTE_176_IRRL_HEAD_PRE_M,
3456 V2_QPC_BYTE_176_IRRL_HEAD_PRE_S, 0);
3458 roce_set_field(qpc_mask->byte_184_irrl_idx,
3459 V2_QPC_BYTE_184_IRRL_IDX_MSB_M,
3460 V2_QPC_BYTE_184_IRRL_IDX_MSB_S, 0);
3462 qpc_mask->cur_sge_offset = 0;
3464 roce_set_field(qpc_mask->byte_192_ext_sge,
3465 V2_QPC_BYTE_192_CUR_SGE_IDX_M,
3466 V2_QPC_BYTE_192_CUR_SGE_IDX_S, 0);
3467 roce_set_field(qpc_mask->byte_192_ext_sge,
3468 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_M,
3469 V2_QPC_BYTE_192_EXT_SGE_NUM_LEFT_S, 0);
3471 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
3472 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
3474 roce_set_field(qpc_mask->byte_200_sq_max, V2_QPC_BYTE_200_SQ_MAX_IDX_M,
3475 V2_QPC_BYTE_200_SQ_MAX_IDX_S, 0);
3476 roce_set_field(qpc_mask->byte_200_sq_max,
3477 V2_QPC_BYTE_200_LCL_OPERATED_CNT_M,
3478 V2_QPC_BYTE_200_LCL_OPERATED_CNT_S, 0);
3480 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RNR_FLG_S, 0);
3481 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_PKT_RTY_FLG_S, 0);
3483 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
3484 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
3486 qpc_mask->sq_timer = 0;
3488 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
3489 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
3490 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
3491 roce_set_field(qpc_mask->byte_232_irrl_sge,
3492 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3493 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3495 roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
3497 roce_set_bit(qpc_mask->byte_232_irrl_sge,
3498 V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
3499 roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
3502 qpc_mask->irrl_cur_sge_offset = 0;
3504 roce_set_field(qpc_mask->byte_240_irrl_tail,
3505 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
3506 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
3507 roce_set_field(qpc_mask->byte_240_irrl_tail,
3508 V2_QPC_BYTE_240_IRRL_TAIL_RD_M,
3509 V2_QPC_BYTE_240_IRRL_TAIL_RD_S, 0);
3510 roce_set_field(qpc_mask->byte_240_irrl_tail,
3511 V2_QPC_BYTE_240_RX_ACK_MSN_M,
3512 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
3514 roce_set_field(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_M,
3515 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
3516 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_ACK_PSN_ERR_S,
3518 roce_set_field(qpc_mask->byte_248_ack_psn,
3519 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
3520 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
3521 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_IRRL_PSN_VLD_S,
3523 roce_set_bit(qpc_mask->byte_248_ack_psn,
3524 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
3525 roce_set_bit(qpc_mask->byte_248_ack_psn, V2_QPC_BYTE_248_CQ_ERR_IND_S,
3528 hr_qp->access_flags = attr->qp_access_flags;
3529 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3530 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3531 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3532 V2_QPC_BYTE_252_TX_CQN_S, 0);
3534 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_ERR_TYPE_M,
3535 V2_QPC_BYTE_252_ERR_TYPE_S, 0);
3537 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3538 V2_QPC_BYTE_256_RQ_CQE_IDX_M,
3539 V2_QPC_BYTE_256_RQ_CQE_IDX_S, 0);
3540 roce_set_field(qpc_mask->byte_256_sqflush_rqcqe,
3541 V2_QPC_BYTE_256_SQ_FLUSH_IDX_M,
3542 V2_QPC_BYTE_256_SQ_FLUSH_IDX_S, 0);
3545 static void modify_qp_init_to_init(struct ib_qp *ibqp,
3546 const struct ib_qp_attr *attr, int attr_mask,
3547 struct hns_roce_v2_qp_context *context,
3548 struct hns_roce_v2_qp_context *qpc_mask)
3550 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3553 * In v2 engine, software pass context and context mask to hardware
3554 * when modifying qp. If software need modify some fields in context,
3555 * we should set all bits of the relevant fields in context mask to
3556 * 0 at the same time, else set them to 0x1.
3558 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3559 V2_QPC_BYTE_4_TST_S, to_hr_qp_type(hr_qp->ibqp.qp_type));
3560 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_TST_M,
3561 V2_QPC_BYTE_4_TST_S, 0);
3563 if (attr_mask & IB_QP_ACCESS_FLAGS) {
3564 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3565 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
3566 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3569 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3570 !!(attr->qp_access_flags &
3571 IB_ACCESS_REMOTE_WRITE));
3572 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3575 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3576 !!(attr->qp_access_flags &
3577 IB_ACCESS_REMOTE_ATOMIC));
3578 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3580 } else {
3581 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3582 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_READ));
3583 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RRE_S,
3586 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3587 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_WRITE));
3588 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_RWE_S,
3591 roce_set_bit(context->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3592 !!(hr_qp->access_flags & IB_ACCESS_REMOTE_ATOMIC));
3593 roce_set_bit(qpc_mask->byte_76_srqn_op_en, V2_QPC_BYTE_76_ATE_S,
3597 roce_set_field(context->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3598 V2_QPC_BYTE_16_PD_S, to_hr_pd(ibqp->pd)->pdn);
3599 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz, V2_QPC_BYTE_16_PD_M,
3600 V2_QPC_BYTE_16_PD_S, 0);
3602 roce_set_field(context->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3603 V2_QPC_BYTE_80_RX_CQN_S, to_hr_cq(ibqp->recv_cq)->cqn);
3604 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn, V2_QPC_BYTE_80_RX_CQN_M,
3605 V2_QPC_BYTE_80_RX_CQN_S, 0);
3607 roce_set_field(context->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3608 V2_QPC_BYTE_252_TX_CQN_S, to_hr_cq(ibqp->send_cq)->cqn);
3609 roce_set_field(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_TX_CQN_M,
3610 V2_QPC_BYTE_252_TX_CQN_S, 0);
3612 if (ibqp->srq) {
3613 roce_set_bit(context->byte_76_srqn_op_en,
3614 V2_QPC_BYTE_76_SRQ_EN_S, 1);
3615 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
3616 V2_QPC_BYTE_76_SRQ_EN_S, 0);
3617 roce_set_field(context->byte_76_srqn_op_en,
3618 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S,
3619 to_hr_srq(ibqp->srq)->srqn);
3620 roce_set_field(qpc_mask->byte_76_srqn_op_en,
3621 V2_QPC_BYTE_76_SRQN_M, V2_QPC_BYTE_76_SRQN_S, 0);
3624 roce_set_field(context->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3625 V2_QPC_BYTE_4_SQPN_S, hr_qp->qpn);
3626 roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
3627 V2_QPC_BYTE_4_SQPN_S, 0);
3629 if (attr_mask & IB_QP_DEST_QPN) {
3630 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3631 V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
3632 roce_set_field(qpc_mask->byte_56_dqpn_err,
3633 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3637 static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev,
3638 struct hns_roce_qp *hr_qp, int mtt_cnt,
3639 u32 page_size)
3641 struct device *dev = hr_dev->dev;
3643 if (hr_qp->rq.wqe_cnt < 1)
3644 return true;
3646 if (mtt_cnt < 1) {
3647 dev_err(dev, "qp(0x%lx) rqwqe buf ba find failed\n",
3648 hr_qp->qpn);
3649 return false;
3652 if (mtt_cnt < MTT_MIN_COUNT &&
3653 (hr_qp->rq.offset + page_size) < hr_qp->buff_size) {
3654 dev_err(dev, "qp(0x%lx) next rqwqe buf ba find failed\n",
3655 hr_qp->qpn);
3656 return false;
3659 return true;
3662 static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
3663 const struct ib_qp_attr *attr, int attr_mask,
3664 struct hns_roce_v2_qp_context *context,
3665 struct hns_roce_v2_qp_context *qpc_mask)
3667 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
3668 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3669 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3670 struct device *dev = hr_dev->dev;
3671 u64 mtts[MTT_MIN_COUNT] = { 0 };
3672 dma_addr_t dma_handle_3;
3673 dma_addr_t dma_handle_2;
3674 u64 wqe_sge_ba;
3675 u32 page_size;
3676 u8 port_num;
3677 u64 *mtts_3;
3678 u64 *mtts_2;
3679 int count;
3680 u8 *dmac;
3681 u8 *smac;
3682 int port;
3684 /* Search qp buf's mtts */
3685 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3686 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
3687 hr_qp->rq.offset / page_size, mtts,
3688 MTT_MIN_COUNT, &wqe_sge_ba);
3689 if (!ibqp->srq)
3690 if (!check_wqe_rq_mtt_count(hr_dev, hr_qp, count, page_size))
3691 return -EINVAL;
3693 /* Search IRRL's mtts */
3694 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
3695 hr_qp->qpn, &dma_handle_2);
3696 if (!mtts_2) {
3697 dev_err(dev, "qp irrl_table find failed\n");
3698 return -EINVAL;
3701 /* Search TRRL's mtts */
3702 mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
3703 hr_qp->qpn, &dma_handle_3);
3704 if (!mtts_3) {
3705 dev_err(dev, "qp trrl_table find failed\n");
3706 return -EINVAL;
3709 if (attr_mask & IB_QP_ALT_PATH) {
3710 dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
3711 return -EINVAL;
3714 dmac = (u8 *)attr->ah_attr.roce.dmac;
3715 context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
3716 qpc_mask->wqe_sge_ba = 0;
3719 * In v2 engine, software pass context and context mask to hardware
3720 * when modifying qp. If software need modify some fields in context,
3721 * we should set all bits of the relevant fields in context mask to
3722 * 0 at the same time, else set them to 0x1.
3724 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3725 V2_QPC_BYTE_12_WQE_SGE_BA_S, wqe_sge_ba >> (32 + 3));
3726 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_WQE_SGE_BA_M,
3727 V2_QPC_BYTE_12_WQE_SGE_BA_S, 0);
3729 roce_set_field(context->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3730 V2_QPC_BYTE_12_SQ_HOP_NUM_S,
3731 hr_dev->caps.wqe_sq_hop_num == HNS_ROCE_HOP_NUM_0 ?
3732 0 : hr_dev->caps.wqe_sq_hop_num);
3733 roce_set_field(qpc_mask->byte_12_sq_hop, V2_QPC_BYTE_12_SQ_HOP_NUM_M,
3734 V2_QPC_BYTE_12_SQ_HOP_NUM_S, 0);
3736 roce_set_field(context->byte_20_smac_sgid_idx,
3737 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3738 V2_QPC_BYTE_20_SGE_HOP_NUM_S,
3739 ((ibqp->qp_type == IB_QPT_GSI) ||
3740 hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
3741 hr_dev->caps.wqe_sge_hop_num : 0);
3742 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3743 V2_QPC_BYTE_20_SGE_HOP_NUM_M,
3744 V2_QPC_BYTE_20_SGE_HOP_NUM_S, 0);
3746 roce_set_field(context->byte_20_smac_sgid_idx,
3747 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3748 V2_QPC_BYTE_20_RQ_HOP_NUM_S,
3749 hr_dev->caps.wqe_rq_hop_num == HNS_ROCE_HOP_NUM_0 ?
3750 0 : hr_dev->caps.wqe_rq_hop_num);
3751 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3752 V2_QPC_BYTE_20_RQ_HOP_NUM_M,
3753 V2_QPC_BYTE_20_RQ_HOP_NUM_S, 0);
3755 roce_set_field(context->byte_16_buf_ba_pg_sz,
3756 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3757 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S,
3758 hr_qp->wqe_bt_pg_shift + PG_SHIFT_OFFSET);
3759 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3760 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_M,
3761 V2_QPC_BYTE_16_WQE_SGE_BA_PG_SZ_S, 0);
3763 roce_set_field(context->byte_16_buf_ba_pg_sz,
3764 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3765 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S,
3766 hr_dev->caps.mtt_buf_pg_sz + PG_SHIFT_OFFSET);
3767 roce_set_field(qpc_mask->byte_16_buf_ba_pg_sz,
3768 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_M,
3769 V2_QPC_BYTE_16_WQE_SGE_BUF_PG_SZ_S, 0);
3771 context->rq_cur_blk_addr = cpu_to_le32(mtts[0] >> PAGE_ADDR_SHIFT);
3772 qpc_mask->rq_cur_blk_addr = 0;
3774 roce_set_field(context->byte_92_srq_info,
3775 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3776 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S,
3777 mtts[0] >> (32 + PAGE_ADDR_SHIFT));
3778 roce_set_field(qpc_mask->byte_92_srq_info,
3779 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_M,
3780 V2_QPC_BYTE_92_RQ_CUR_BLK_ADDR_S, 0);
3782 context->rq_nxt_blk_addr = cpu_to_le32(mtts[1] >> PAGE_ADDR_SHIFT);
3783 qpc_mask->rq_nxt_blk_addr = 0;
3785 roce_set_field(context->byte_104_rq_sge,
3786 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3787 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S,
3788 mtts[1] >> (32 + PAGE_ADDR_SHIFT));
3789 roce_set_field(qpc_mask->byte_104_rq_sge,
3790 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_M,
3791 V2_QPC_BYTE_104_RQ_NXT_BLK_ADDR_S, 0);
3793 roce_set_field(context->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3794 V2_QPC_BYTE_132_TRRL_BA_S, dma_handle_3 >> 4);
3795 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_BA_M,
3796 V2_QPC_BYTE_132_TRRL_BA_S, 0);
3797 context->trrl_ba = cpu_to_le32(dma_handle_3 >> (16 + 4));
3798 qpc_mask->trrl_ba = 0;
3799 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3800 V2_QPC_BYTE_140_TRRL_BA_S,
3801 (u32)(dma_handle_3 >> (32 + 16 + 4)));
3802 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_TRRL_BA_M,
3803 V2_QPC_BYTE_140_TRRL_BA_S, 0);
3805 context->irrl_ba = cpu_to_le32(dma_handle_2 >> 6);
3806 qpc_mask->irrl_ba = 0;
3807 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3808 V2_QPC_BYTE_208_IRRL_BA_S,
3809 dma_handle_2 >> (32 + 6));
3810 roce_set_field(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_IRRL_BA_M,
3811 V2_QPC_BYTE_208_IRRL_BA_S, 0);
3813 roce_set_bit(context->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 1);
3814 roce_set_bit(qpc_mask->byte_208_irrl, V2_QPC_BYTE_208_RMT_E2E_S, 0);
3816 roce_set_bit(context->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3817 hr_qp->sq_signal_bits);
3818 roce_set_bit(qpc_mask->byte_252_err_txcqn, V2_QPC_BYTE_252_SIG_TYPE_S,
3821 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
3823 smac = (u8 *)hr_dev->dev_addr[port];
3824 /* when dmac equals smac or loop_idc is 1, it should loopback */
3825 if (ether_addr_equal_unaligned(dmac, smac) ||
3826 hr_dev->loop_idc == 0x1) {
3827 roce_set_bit(context->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 1);
3828 roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_LBI_S, 0);
3831 if (attr_mask & IB_QP_DEST_QPN) {
3832 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
3833 V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
3834 roce_set_field(qpc_mask->byte_56_dqpn_err,
3835 V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
3838 /* Configure GID index */
3839 port_num = rdma_ah_get_port_num(&attr->ah_attr);
3840 roce_set_field(context->byte_20_smac_sgid_idx,
3841 V2_QPC_BYTE_20_SGID_IDX_M,
3842 V2_QPC_BYTE_20_SGID_IDX_S,
3843 hns_get_gid_index(hr_dev, port_num - 1,
3844 grh->sgid_index));
3845 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
3846 V2_QPC_BYTE_20_SGID_IDX_M,
3847 V2_QPC_BYTE_20_SGID_IDX_S, 0);
3848 memcpy(&(context->dmac), dmac, sizeof(u32));
3849 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3850 V2_QPC_BYTE_52_DMAC_S, *((u16 *)(&dmac[4])));
3851 qpc_mask->dmac = 0;
3852 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
3853 V2_QPC_BYTE_52_DMAC_S, 0);
3855 /* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
3856 roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3857 V2_QPC_BYTE_56_LP_PKTN_INI_S, 4);
3858 roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
3859 V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
3861 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
3862 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3863 V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
3864 else if (attr_mask & IB_QP_PATH_MTU)
3865 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3866 V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
3868 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
3869 V2_QPC_BYTE_24_MTU_S, 0);
3871 roce_set_field(context->byte_84_rq_ci_pi,
3872 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3873 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, hr_qp->rq.head);
3874 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3875 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
3876 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
3878 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
3879 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_M,
3880 V2_QPC_BYTE_84_RQ_CONSUMER_IDX_S, 0);
3881 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
3882 V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
3883 roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
3884 V2_QPC_BYTE_96_RX_REQ_MSN_S, 0);
3885 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
3886 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_M,
3887 V2_QPC_BYTE_108_RX_REQ_LAST_OPTYPE_S, 0);
3889 context->rq_rnr_timer = 0;
3890 qpc_mask->rq_rnr_timer = 0;
3892 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_HEAD_MAX_M,
3893 V2_QPC_BYTE_132_TRRL_HEAD_MAX_S, 0);
3894 roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
3895 V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
3897 /* rocee send 2^lp_sgen_ini segs every time */
3898 roce_set_field(context->byte_168_irrl_idx,
3899 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3900 V2_QPC_BYTE_168_LP_SGEN_INI_S, 3);
3901 roce_set_field(qpc_mask->byte_168_irrl_idx,
3902 V2_QPC_BYTE_168_LP_SGEN_INI_M,
3903 V2_QPC_BYTE_168_LP_SGEN_INI_S, 0);
3905 return 0;
3908 static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
3909 const struct ib_qp_attr *attr, int attr_mask,
3910 struct hns_roce_v2_qp_context *context,
3911 struct hns_roce_v2_qp_context *qpc_mask)
3913 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3914 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3915 struct device *dev = hr_dev->dev;
3916 u64 sge_cur_blk = 0;
3917 u64 sq_cur_blk = 0;
3918 u32 page_size;
3919 int count;
3921 /* Search qp buf's mtts */
3922 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
3923 if (count < 1) {
3924 dev_err(dev, "qp(0x%lx) buf pa find failed\n", hr_qp->qpn);
3925 return -EINVAL;
3928 if (hr_qp->sge.offset) {
3929 page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
3930 count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
3931 hr_qp->sge.offset / page_size,
3932 &sge_cur_blk, 1, NULL);
3933 if (count < 1) {
3934 dev_err(dev, "qp(0x%lx) sge pa find failed\n",
3935 hr_qp->qpn);
3936 return -EINVAL;
3940 /* Not support alternate path and path migration */
3941 if ((attr_mask & IB_QP_ALT_PATH) ||
3942 (attr_mask & IB_QP_PATH_MIG_STATE)) {
3943 dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
3944 return -EINVAL;
3948 * In v2 engine, software pass context and context mask to hardware
3949 * when modifying qp. If software need modify some fields in context,
3950 * we should set all bits of the relevant fields in context mask to
3951 * 0 at the same time, else set them to 0x1.
3953 context->sq_cur_blk_addr = cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
3954 roce_set_field(context->byte_168_irrl_idx,
3955 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3956 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S,
3957 sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
3958 qpc_mask->sq_cur_blk_addr = 0;
3959 roce_set_field(qpc_mask->byte_168_irrl_idx,
3960 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
3961 V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_S, 0);
3963 context->sq_cur_sge_blk_addr = ((ibqp->qp_type == IB_QPT_GSI) ||
3964 hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
3965 cpu_to_le32(sge_cur_blk >>
3966 PAGE_ADDR_SHIFT) : 0;
3967 roce_set_field(context->byte_184_irrl_idx,
3968 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3969 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S,
3970 ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs >
3971 HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ?
3972 (sge_cur_blk >>
3973 (32 + PAGE_ADDR_SHIFT)) : 0);
3974 qpc_mask->sq_cur_sge_blk_addr = 0;
3975 roce_set_field(qpc_mask->byte_184_irrl_idx,
3976 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_M,
3977 V2_QPC_BYTE_184_SQ_CUR_SGE_BLK_ADDR_S, 0);
3979 context->rx_sq_cur_blk_addr =
3980 cpu_to_le32(sq_cur_blk >> PAGE_ADDR_SHIFT);
3981 roce_set_field(context->byte_232_irrl_sge,
3982 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3983 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S,
3984 sq_cur_blk >> (32 + PAGE_ADDR_SHIFT));
3985 qpc_mask->rx_sq_cur_blk_addr = 0;
3986 roce_set_field(qpc_mask->byte_232_irrl_sge,
3987 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_M,
3988 V2_QPC_BYTE_232_RX_SQ_CUR_BLK_ADDR_S, 0);
3991 * Set some fields in context to zero, Because the default values
3992 * of all fields in context are zero, we need not set them to 0 again.
3993 * but we should set the relevant fields of context mask to 0.
3995 roce_set_field(qpc_mask->byte_232_irrl_sge,
3996 V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
3997 V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
3999 roce_set_field(qpc_mask->byte_240_irrl_tail,
4000 V2_QPC_BYTE_240_RX_ACK_MSN_M,
4001 V2_QPC_BYTE_240_RX_ACK_MSN_S, 0);
4003 roce_set_field(qpc_mask->byte_248_ack_psn,
4004 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_M,
4005 V2_QPC_BYTE_248_ACK_LAST_OPTYPE_S, 0);
4006 roce_set_bit(qpc_mask->byte_248_ack_psn,
4007 V2_QPC_BYTE_248_IRRL_PSN_VLD_S, 0);
4008 roce_set_field(qpc_mask->byte_248_ack_psn,
4009 V2_QPC_BYTE_248_IRRL_PSN_M,
4010 V2_QPC_BYTE_248_IRRL_PSN_S, 0);
4012 roce_set_field(qpc_mask->byte_240_irrl_tail,
4013 V2_QPC_BYTE_240_IRRL_TAIL_REAL_M,
4014 V2_QPC_BYTE_240_IRRL_TAIL_REAL_S, 0);
4016 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4017 V2_QPC_BYTE_220_RETRY_MSG_MSN_M,
4018 V2_QPC_BYTE_220_RETRY_MSG_MSN_S, 0);
4020 roce_set_bit(qpc_mask->byte_248_ack_psn,
4021 V2_QPC_BYTE_248_RNR_RETRY_FLAG_S, 0);
4023 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_CHECK_FLG_M,
4024 V2_QPC_BYTE_212_CHECK_FLG_S, 0);
4026 roce_set_field(context->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4027 V2_QPC_BYTE_212_LSN_S, 0x100);
4028 roce_set_field(qpc_mask->byte_212_lsn, V2_QPC_BYTE_212_LSN_M,
4029 V2_QPC_BYTE_212_LSN_S, 0);
4031 roce_set_field(qpc_mask->byte_196_sq_psn, V2_QPC_BYTE_196_IRRL_HEAD_M,
4032 V2_QPC_BYTE_196_IRRL_HEAD_S, 0);
4034 return 0;
4037 static inline bool hns_roce_v2_check_qp_stat(enum ib_qp_state cur_state,
4038 enum ib_qp_state new_state)
4041 if ((cur_state != IB_QPS_RESET &&
4042 (new_state == IB_QPS_ERR || new_state == IB_QPS_RESET)) ||
4043 ((cur_state == IB_QPS_RTS || cur_state == IB_QPS_SQD) &&
4044 (new_state == IB_QPS_RTS || new_state == IB_QPS_SQD)) ||
4045 (cur_state == IB_QPS_SQE && new_state == IB_QPS_RTS))
4046 return true;
4048 return false;
4052 static int hns_roce_v2_set_path(struct ib_qp *ibqp,
4053 const struct ib_qp_attr *attr,
4054 int attr_mask,
4055 struct hns_roce_v2_qp_context *context,
4056 struct hns_roce_v2_qp_context *qpc_mask)
4058 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
4059 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4060 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4061 const struct ib_gid_attr *gid_attr = NULL;
4062 int is_roce_protocol;
4063 u16 vlan_id = 0xffff;
4064 bool is_udp = false;
4065 u8 ib_port;
4066 u8 hr_port;
4067 int ret;
4069 ib_port = (attr_mask & IB_QP_PORT) ? attr->port_num : hr_qp->port + 1;
4070 hr_port = ib_port - 1;
4071 is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) &&
4072 rdma_ah_get_ah_flags(&attr->ah_attr) & IB_AH_GRH;
4074 if (is_roce_protocol) {
4075 gid_attr = attr->ah_attr.grh.sgid_attr;
4076 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
4077 if (ret)
4078 return ret;
4080 if (gid_attr)
4081 is_udp = (gid_attr->gid_type ==
4082 IB_GID_TYPE_ROCE_UDP_ENCAP);
4085 if (vlan_id < VLAN_N_VID) {
4086 roce_set_bit(context->byte_76_srqn_op_en,
4087 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
4088 roce_set_bit(qpc_mask->byte_76_srqn_op_en,
4089 V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
4090 roce_set_bit(context->byte_168_irrl_idx,
4091 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
4092 roce_set_bit(qpc_mask->byte_168_irrl_idx,
4093 V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
4096 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4097 V2_QPC_BYTE_24_VLAN_ID_S, vlan_id);
4098 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_VLAN_ID_M,
4099 V2_QPC_BYTE_24_VLAN_ID_S, 0);
4101 if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
4102 dev_err(hr_dev->dev, "sgid_index(%u) too large. max is %d\n",
4103 grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
4104 return -EINVAL;
4107 if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
4108 dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n");
4109 return -EINVAL;
4112 roce_set_field(context->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4113 V2_QPC_BYTE_52_UDPSPN_S,
4114 is_udp ? 0x12b7 : 0);
4116 roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_UDPSPN_M,
4117 V2_QPC_BYTE_52_UDPSPN_S, 0);
4119 roce_set_field(context->byte_20_smac_sgid_idx,
4120 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S,
4121 grh->sgid_index);
4123 roce_set_field(qpc_mask->byte_20_smac_sgid_idx,
4124 V2_QPC_BYTE_20_SGID_IDX_M, V2_QPC_BYTE_20_SGID_IDX_S, 0);
4126 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4127 V2_QPC_BYTE_24_HOP_LIMIT_S, grh->hop_limit);
4128 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
4129 V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
4131 if (hr_dev->pci_dev->revision == 0x21 && is_udp)
4132 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4133 V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
4134 else
4135 roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4136 V2_QPC_BYTE_24_TC_S, grh->traffic_class);
4137 roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
4138 V2_QPC_BYTE_24_TC_S, 0);
4139 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4140 V2_QPC_BYTE_28_FL_S, grh->flow_label);
4141 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
4142 V2_QPC_BYTE_28_FL_S, 0);
4143 memcpy(context->dgid, grh->dgid.raw, sizeof(grh->dgid.raw));
4144 memset(qpc_mask->dgid, 0, sizeof(grh->dgid.raw));
4145 roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4146 V2_QPC_BYTE_28_SL_S, rdma_ah_get_sl(&attr->ah_attr));
4147 roce_set_field(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_SL_M,
4148 V2_QPC_BYTE_28_SL_S, 0);
4149 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
4151 return 0;
4154 static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
4155 const struct ib_qp_attr *attr,
4156 int attr_mask,
4157 enum ib_qp_state cur_state,
4158 enum ib_qp_state new_state,
4159 struct hns_roce_v2_qp_context *context,
4160 struct hns_roce_v2_qp_context *qpc_mask)
4162 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4163 int ret = 0;
4165 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
4166 memset(qpc_mask, 0, sizeof(*qpc_mask));
4167 modify_qp_reset_to_init(ibqp, attr, attr_mask, context,
4168 qpc_mask);
4169 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
4170 modify_qp_init_to_init(ibqp, attr, attr_mask, context,
4171 qpc_mask);
4172 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
4173 ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
4174 qpc_mask);
4175 if (ret)
4176 goto out;
4177 } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
4178 ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
4179 qpc_mask);
4180 if (ret)
4181 goto out;
4182 } else if (hns_roce_v2_check_qp_stat(cur_state, new_state)) {
4183 /* Nothing */
4185 } else {
4186 dev_err(hr_dev->dev, "Illegal state for QP!\n");
4187 ret = -EINVAL;
4188 goto out;
4191 out:
4192 return ret;
4195 static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
4196 const struct ib_qp_attr *attr,
4197 int attr_mask,
4198 struct hns_roce_v2_qp_context *context,
4199 struct hns_roce_v2_qp_context *qpc_mask)
4201 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4202 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4203 int ret = 0;
4205 if (attr_mask & IB_QP_AV) {
4206 ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context,
4207 qpc_mask);
4208 if (ret)
4209 return ret;
4212 if (attr_mask & IB_QP_TIMEOUT) {
4213 if (attr->timeout < 31) {
4214 roce_set_field(context->byte_28_at_fl,
4215 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4216 attr->timeout);
4217 roce_set_field(qpc_mask->byte_28_at_fl,
4218 V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
4220 } else {
4221 dev_warn(hr_dev->dev,
4222 "Local ACK timeout shall be 0 to 30.\n");
4226 if (attr_mask & IB_QP_RETRY_CNT) {
4227 roce_set_field(context->byte_212_lsn,
4228 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4229 V2_QPC_BYTE_212_RETRY_NUM_INIT_S,
4230 attr->retry_cnt);
4231 roce_set_field(qpc_mask->byte_212_lsn,
4232 V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4233 V2_QPC_BYTE_212_RETRY_NUM_INIT_S, 0);
4235 roce_set_field(context->byte_212_lsn,
4236 V2_QPC_BYTE_212_RETRY_CNT_M,
4237 V2_QPC_BYTE_212_RETRY_CNT_S,
4238 attr->retry_cnt);
4239 roce_set_field(qpc_mask->byte_212_lsn,
4240 V2_QPC_BYTE_212_RETRY_CNT_M,
4241 V2_QPC_BYTE_212_RETRY_CNT_S, 0);
4244 if (attr_mask & IB_QP_RNR_RETRY) {
4245 roce_set_field(context->byte_244_rnr_rxack,
4246 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4247 V2_QPC_BYTE_244_RNR_NUM_INIT_S, attr->rnr_retry);
4248 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4249 V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4250 V2_QPC_BYTE_244_RNR_NUM_INIT_S, 0);
4252 roce_set_field(context->byte_244_rnr_rxack,
4253 V2_QPC_BYTE_244_RNR_CNT_M,
4254 V2_QPC_BYTE_244_RNR_CNT_S, attr->rnr_retry);
4255 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4256 V2_QPC_BYTE_244_RNR_CNT_M,
4257 V2_QPC_BYTE_244_RNR_CNT_S, 0);
4260 /* RC&UC&UD required attr */
4261 if (attr_mask & IB_QP_SQ_PSN) {
4262 roce_set_field(context->byte_172_sq_psn,
4263 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4264 V2_QPC_BYTE_172_SQ_CUR_PSN_S, attr->sq_psn);
4265 roce_set_field(qpc_mask->byte_172_sq_psn,
4266 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4267 V2_QPC_BYTE_172_SQ_CUR_PSN_S, 0);
4269 roce_set_field(context->byte_196_sq_psn,
4270 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4271 V2_QPC_BYTE_196_SQ_MAX_PSN_S, attr->sq_psn);
4272 roce_set_field(qpc_mask->byte_196_sq_psn,
4273 V2_QPC_BYTE_196_SQ_MAX_PSN_M,
4274 V2_QPC_BYTE_196_SQ_MAX_PSN_S, 0);
4276 roce_set_field(context->byte_220_retry_psn_msn,
4277 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4278 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, attr->sq_psn);
4279 roce_set_field(qpc_mask->byte_220_retry_psn_msn,
4280 V2_QPC_BYTE_220_RETRY_MSG_PSN_M,
4281 V2_QPC_BYTE_220_RETRY_MSG_PSN_S, 0);
4283 roce_set_field(context->byte_224_retry_msg,
4284 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4285 V2_QPC_BYTE_224_RETRY_MSG_PSN_S,
4286 attr->sq_psn >> V2_QPC_BYTE_220_RETRY_MSG_PSN_S);
4287 roce_set_field(qpc_mask->byte_224_retry_msg,
4288 V2_QPC_BYTE_224_RETRY_MSG_PSN_M,
4289 V2_QPC_BYTE_224_RETRY_MSG_PSN_S, 0);
4291 roce_set_field(context->byte_224_retry_msg,
4292 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4293 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S,
4294 attr->sq_psn);
4295 roce_set_field(qpc_mask->byte_224_retry_msg,
4296 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_M,
4297 V2_QPC_BYTE_224_RETRY_MSG_FPKT_PSN_S, 0);
4299 roce_set_field(context->byte_244_rnr_rxack,
4300 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4301 V2_QPC_BYTE_244_RX_ACK_EPSN_S, attr->sq_psn);
4302 roce_set_field(qpc_mask->byte_244_rnr_rxack,
4303 V2_QPC_BYTE_244_RX_ACK_EPSN_M,
4304 V2_QPC_BYTE_244_RX_ACK_EPSN_S, 0);
4307 if ((attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) &&
4308 attr->max_dest_rd_atomic) {
4309 roce_set_field(context->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4310 V2_QPC_BYTE_140_RR_MAX_S,
4311 fls(attr->max_dest_rd_atomic - 1));
4312 roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RR_MAX_M,
4313 V2_QPC_BYTE_140_RR_MAX_S, 0);
4316 if ((attr_mask & IB_QP_MAX_QP_RD_ATOMIC) && attr->max_rd_atomic) {
4317 roce_set_field(context->byte_208_irrl, V2_QPC_BYTE_208_SR_MAX_M,
4318 V2_QPC_BYTE_208_SR_MAX_S,
4319 fls(attr->max_rd_atomic - 1));
4320 roce_set_field(qpc_mask->byte_208_irrl,
4321 V2_QPC_BYTE_208_SR_MAX_M,
4322 V2_QPC_BYTE_208_SR_MAX_S, 0);
4325 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
4326 set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
4328 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
4329 roce_set_field(context->byte_80_rnr_rx_cqn,
4330 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4331 V2_QPC_BYTE_80_MIN_RNR_TIME_S,
4332 attr->min_rnr_timer);
4333 roce_set_field(qpc_mask->byte_80_rnr_rx_cqn,
4334 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4335 V2_QPC_BYTE_80_MIN_RNR_TIME_S, 0);
4338 /* RC&UC required attr */
4339 if (attr_mask & IB_QP_RQ_PSN) {
4340 roce_set_field(context->byte_108_rx_reqepsn,
4341 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4342 V2_QPC_BYTE_108_RX_REQ_EPSN_S, attr->rq_psn);
4343 roce_set_field(qpc_mask->byte_108_rx_reqepsn,
4344 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4345 V2_QPC_BYTE_108_RX_REQ_EPSN_S, 0);
4347 roce_set_field(context->byte_152_raq, V2_QPC_BYTE_152_RAQ_PSN_M,
4348 V2_QPC_BYTE_152_RAQ_PSN_S, attr->rq_psn - 1);
4349 roce_set_field(qpc_mask->byte_152_raq,
4350 V2_QPC_BYTE_152_RAQ_PSN_M,
4351 V2_QPC_BYTE_152_RAQ_PSN_S, 0);
4354 if (attr_mask & IB_QP_QKEY) {
4355 context->qkey_xrcd = cpu_to_le32(attr->qkey);
4356 qpc_mask->qkey_xrcd = 0;
4357 hr_qp->qkey = attr->qkey;
4360 return ret;
4363 static void hns_roce_v2_record_opt_fields(struct ib_qp *ibqp,
4364 const struct ib_qp_attr *attr,
4365 int attr_mask)
4367 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4368 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4370 if (attr_mask & IB_QP_ACCESS_FLAGS)
4371 hr_qp->atomic_rd_en = attr->qp_access_flags;
4373 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
4374 hr_qp->resp_depth = attr->max_dest_rd_atomic;
4375 if (attr_mask & IB_QP_PORT) {
4376 hr_qp->port = attr->port_num - 1;
4377 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
4381 static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
4382 const struct ib_qp_attr *attr,
4383 int attr_mask, enum ib_qp_state cur_state,
4384 enum ib_qp_state new_state)
4386 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4387 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4388 struct hns_roce_v2_qp_context ctx[2];
4389 struct hns_roce_v2_qp_context *context = ctx;
4390 struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
4391 struct device *dev = hr_dev->dev;
4392 int ret;
4395 * In v2 engine, software pass context and context mask to hardware
4396 * when modifying qp. If software need modify some fields in context,
4397 * we should set all bits of the relevant fields in context mask to
4398 * 0 at the same time, else set them to 0x1.
4400 memset(context, 0, sizeof(*context));
4401 memset(qpc_mask, 0xff, sizeof(*qpc_mask));
4402 ret = hns_roce_v2_set_abs_fields(ibqp, attr, attr_mask, cur_state,
4403 new_state, context, qpc_mask);
4404 if (ret)
4405 goto out;
4407 /* When QP state is err, SQ and RQ WQE should be flushed */
4408 if (new_state == IB_QPS_ERR) {
4409 roce_set_field(context->byte_160_sq_ci_pi,
4410 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4411 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S,
4412 hr_qp->sq.head);
4413 roce_set_field(qpc_mask->byte_160_sq_ci_pi,
4414 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_M,
4415 V2_QPC_BYTE_160_SQ_PRODUCER_IDX_S, 0);
4417 if (!ibqp->srq) {
4418 roce_set_field(context->byte_84_rq_ci_pi,
4419 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4420 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S,
4421 hr_qp->rq.head);
4422 roce_set_field(qpc_mask->byte_84_rq_ci_pi,
4423 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_M,
4424 V2_QPC_BYTE_84_RQ_PRODUCER_IDX_S, 0);
4428 /* Configure the optional fields */
4429 ret = hns_roce_v2_set_opt_fields(ibqp, attr, attr_mask, context,
4430 qpc_mask);
4431 if (ret)
4432 goto out;
4434 roce_set_bit(context->byte_108_rx_reqepsn, V2_QPC_BYTE_108_INV_CREDIT_S,
4435 ibqp->srq ? 1 : 0);
4436 roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
4437 V2_QPC_BYTE_108_INV_CREDIT_S, 0);
4439 /* Every status migrate must change state */
4440 roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4441 V2_QPC_BYTE_60_QP_ST_S, new_state);
4442 roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
4443 V2_QPC_BYTE_60_QP_ST_S, 0);
4445 /* SW pass context to HW */
4446 ret = hns_roce_v2_qp_modify(hr_dev, cur_state, new_state, ctx, hr_qp);
4447 if (ret) {
4448 dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret);
4449 goto out;
4452 hr_qp->state = new_state;
4454 hns_roce_v2_record_opt_fields(ibqp, attr, attr_mask);
4456 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
4457 hns_roce_v2_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
4458 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
4459 if (ibqp->send_cq != ibqp->recv_cq)
4460 hns_roce_v2_cq_clean(to_hr_cq(ibqp->send_cq),
4461 hr_qp->qpn, NULL);
4463 hr_qp->rq.head = 0;
4464 hr_qp->rq.tail = 0;
4465 hr_qp->sq.head = 0;
4466 hr_qp->sq.tail = 0;
4467 hr_qp->sq_next_wqe = 0;
4468 hr_qp->next_sge = 0;
4469 if (hr_qp->rq.wqe_cnt)
4470 *hr_qp->rdb.db_record = 0;
4473 out:
4474 return ret;
4477 static inline enum ib_qp_state to_ib_qp_st(enum hns_roce_v2_qp_state state)
4479 switch (state) {
4480 case HNS_ROCE_QP_ST_RST: return IB_QPS_RESET;
4481 case HNS_ROCE_QP_ST_INIT: return IB_QPS_INIT;
4482 case HNS_ROCE_QP_ST_RTR: return IB_QPS_RTR;
4483 case HNS_ROCE_QP_ST_RTS: return IB_QPS_RTS;
4484 case HNS_ROCE_QP_ST_SQ_DRAINING:
4485 case HNS_ROCE_QP_ST_SQD: return IB_QPS_SQD;
4486 case HNS_ROCE_QP_ST_SQER: return IB_QPS_SQE;
4487 case HNS_ROCE_QP_ST_ERR: return IB_QPS_ERR;
4488 default: return -1;
4492 static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
4493 struct hns_roce_qp *hr_qp,
4494 struct hns_roce_v2_qp_context *hr_context)
4496 struct hns_roce_cmd_mailbox *mailbox;
4497 int ret;
4499 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4500 if (IS_ERR(mailbox))
4501 return PTR_ERR(mailbox);
4503 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
4504 HNS_ROCE_CMD_QUERY_QPC,
4505 HNS_ROCE_CMD_TIMEOUT_MSECS);
4506 if (ret) {
4507 dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
4508 goto out;
4511 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
4513 out:
4514 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4515 return ret;
4518 static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
4519 int qp_attr_mask,
4520 struct ib_qp_init_attr *qp_init_attr)
4522 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4523 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4524 struct hns_roce_v2_qp_context context = {};
4525 struct device *dev = hr_dev->dev;
4526 int tmp_qp_state;
4527 int state;
4528 int ret;
4530 memset(qp_attr, 0, sizeof(*qp_attr));
4531 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
4533 mutex_lock(&hr_qp->mutex);
4535 if (hr_qp->state == IB_QPS_RESET) {
4536 qp_attr->qp_state = IB_QPS_RESET;
4537 ret = 0;
4538 goto done;
4541 ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
4542 if (ret) {
4543 dev_err(dev, "query qpc error\n");
4544 ret = -EINVAL;
4545 goto out;
4548 state = roce_get_field(context.byte_60_qpst_tempid,
4549 V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
4550 tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
4551 if (tmp_qp_state == -1) {
4552 dev_err(dev, "Illegal ib_qp_state\n");
4553 ret = -EINVAL;
4554 goto out;
4556 hr_qp->state = (u8)tmp_qp_state;
4557 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
4558 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context.byte_24_mtu_tc,
4559 V2_QPC_BYTE_24_MTU_M,
4560 V2_QPC_BYTE_24_MTU_S);
4561 qp_attr->path_mig_state = IB_MIG_ARMED;
4562 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
4563 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
4564 qp_attr->qkey = V2_QKEY_VAL;
4566 qp_attr->rq_psn = roce_get_field(context.byte_108_rx_reqepsn,
4567 V2_QPC_BYTE_108_RX_REQ_EPSN_M,
4568 V2_QPC_BYTE_108_RX_REQ_EPSN_S);
4569 qp_attr->sq_psn = (u32)roce_get_field(context.byte_172_sq_psn,
4570 V2_QPC_BYTE_172_SQ_CUR_PSN_M,
4571 V2_QPC_BYTE_172_SQ_CUR_PSN_S);
4572 qp_attr->dest_qp_num = (u8)roce_get_field(context.byte_56_dqpn_err,
4573 V2_QPC_BYTE_56_DQPN_M,
4574 V2_QPC_BYTE_56_DQPN_S);
4575 qp_attr->qp_access_flags = ((roce_get_bit(context.byte_76_srqn_op_en,
4576 V2_QPC_BYTE_76_RRE_S)) << V2_QP_RRE_S) |
4577 ((roce_get_bit(context.byte_76_srqn_op_en,
4578 V2_QPC_BYTE_76_RWE_S)) << V2_QP_RWE_S) |
4579 ((roce_get_bit(context.byte_76_srqn_op_en,
4580 V2_QPC_BYTE_76_ATE_S)) << V2_QP_ATE_S);
4582 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
4583 hr_qp->ibqp.qp_type == IB_QPT_UC) {
4584 struct ib_global_route *grh =
4585 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
4587 rdma_ah_set_sl(&qp_attr->ah_attr,
4588 roce_get_field(context.byte_28_at_fl,
4589 V2_QPC_BYTE_28_SL_M,
4590 V2_QPC_BYTE_28_SL_S));
4591 grh->flow_label = roce_get_field(context.byte_28_at_fl,
4592 V2_QPC_BYTE_28_FL_M,
4593 V2_QPC_BYTE_28_FL_S);
4594 grh->sgid_index = roce_get_field(context.byte_20_smac_sgid_idx,
4595 V2_QPC_BYTE_20_SGID_IDX_M,
4596 V2_QPC_BYTE_20_SGID_IDX_S);
4597 grh->hop_limit = roce_get_field(context.byte_24_mtu_tc,
4598 V2_QPC_BYTE_24_HOP_LIMIT_M,
4599 V2_QPC_BYTE_24_HOP_LIMIT_S);
4600 grh->traffic_class = roce_get_field(context.byte_24_mtu_tc,
4601 V2_QPC_BYTE_24_TC_M,
4602 V2_QPC_BYTE_24_TC_S);
4604 memcpy(grh->dgid.raw, context.dgid, sizeof(grh->dgid.raw));
4607 qp_attr->port_num = hr_qp->port + 1;
4608 qp_attr->sq_draining = 0;
4609 qp_attr->max_rd_atomic = 1 << roce_get_field(context.byte_208_irrl,
4610 V2_QPC_BYTE_208_SR_MAX_M,
4611 V2_QPC_BYTE_208_SR_MAX_S);
4612 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context.byte_140_raq,
4613 V2_QPC_BYTE_140_RR_MAX_M,
4614 V2_QPC_BYTE_140_RR_MAX_S);
4615 qp_attr->min_rnr_timer = (u8)roce_get_field(context.byte_80_rnr_rx_cqn,
4616 V2_QPC_BYTE_80_MIN_RNR_TIME_M,
4617 V2_QPC_BYTE_80_MIN_RNR_TIME_S);
4618 qp_attr->timeout = (u8)roce_get_field(context.byte_28_at_fl,
4619 V2_QPC_BYTE_28_AT_M,
4620 V2_QPC_BYTE_28_AT_S);
4621 qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
4622 V2_QPC_BYTE_212_RETRY_CNT_M,
4623 V2_QPC_BYTE_212_RETRY_CNT_S);
4624 qp_attr->rnr_retry = le32_to_cpu(context.rq_rnr_timer);
4626 done:
4627 qp_attr->cur_qp_state = qp_attr->qp_state;
4628 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
4629 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
4631 if (!ibqp->uobject) {
4632 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
4633 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
4634 } else {
4635 qp_attr->cap.max_send_wr = 0;
4636 qp_attr->cap.max_send_sge = 0;
4639 qp_init_attr->cap = qp_attr->cap;
4641 out:
4642 mutex_unlock(&hr_qp->mutex);
4643 return ret;
4646 static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
4647 struct hns_roce_qp *hr_qp,
4648 struct ib_udata *udata)
4650 struct hns_roce_cq *send_cq, *recv_cq;
4651 struct ib_device *ibdev = &hr_dev->ib_dev;
4652 int ret = 0;
4654 if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
4655 /* Modify qp to reset before destroying qp */
4656 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
4657 hr_qp->state, IB_QPS_RESET);
4658 if (ret)
4659 ibdev_err(ibdev, "modify QP to Reset failed.\n");
4662 send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
4663 recv_cq = to_hr_cq(hr_qp->ibqp.recv_cq);
4665 hns_roce_lock_cqs(send_cq, recv_cq);
4667 if (!udata) {
4668 __hns_roce_v2_cq_clean(recv_cq, hr_qp->qpn, hr_qp->ibqp.srq ?
4669 to_hr_srq(hr_qp->ibqp.srq) : NULL);
4670 if (send_cq != recv_cq)
4671 __hns_roce_v2_cq_clean(send_cq, hr_qp->qpn, NULL);
4674 hns_roce_qp_remove(hr_dev, hr_qp);
4676 hns_roce_unlock_cqs(send_cq, recv_cq);
4678 hns_roce_qp_free(hr_dev, hr_qp);
4680 /* Not special_QP, free their QPN */
4681 if ((hr_qp->ibqp.qp_type == IB_QPT_RC) ||
4682 (hr_qp->ibqp.qp_type == IB_QPT_UC) ||
4683 (hr_qp->ibqp.qp_type == IB_QPT_UD))
4684 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1);
4686 hns_roce_mtr_cleanup(hr_dev, &hr_qp->mtr);
4688 if (udata) {
4689 struct hns_roce_ucontext *context =
4690 rdma_udata_to_drv_context(
4691 udata,
4692 struct hns_roce_ucontext,
4693 ibucontext);
4695 if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1))
4696 hns_roce_db_unmap_user(context, &hr_qp->sdb);
4698 if (hr_qp->rq.wqe_cnt && (hr_qp->rdb_en == 1))
4699 hns_roce_db_unmap_user(context, &hr_qp->rdb);
4700 } else {
4701 kfree(hr_qp->sq.wrid);
4702 kfree(hr_qp->rq.wrid);
4703 hns_roce_buf_free(hr_dev, hr_qp->buff_size, &hr_qp->hr_buf);
4704 if (hr_qp->rq.wqe_cnt)
4705 hns_roce_free_db(hr_dev, &hr_qp->rdb);
4707 ib_umem_release(hr_qp->umem);
4709 if ((hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) &&
4710 hr_qp->rq.wqe_cnt) {
4711 kfree(hr_qp->rq_inl_buf.wqe_list[0].sg_list);
4712 kfree(hr_qp->rq_inl_buf.wqe_list);
4715 return ret;
4718 static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
4720 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
4721 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
4722 int ret;
4724 ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
4725 if (ret)
4726 ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
4727 hr_qp->qpn, ret);
4729 kfree(hr_qp);
4731 return 0;
4734 static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
4735 struct hns_roce_qp *hr_qp)
4737 struct hns_roce_sccc_clr_done *resp;
4738 struct hns_roce_sccc_clr *clr;
4739 struct hns_roce_cmq_desc desc;
4740 int ret, i;
4742 mutex_lock(&hr_dev->qp_table.scc_mutex);
4744 /* set scc ctx clear done flag */
4745 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
4746 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4747 if (ret) {
4748 dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret);
4749 goto out;
4752 /* clear scc context */
4753 hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CLR_SCCC, false);
4754 clr = (struct hns_roce_sccc_clr *)desc.data;
4755 clr->qpn = cpu_to_le32(hr_qp->qpn);
4756 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4757 if (ret) {
4758 dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret);
4759 goto out;
4762 /* query scc context clear is done or not */
4763 resp = (struct hns_roce_sccc_clr_done *)desc.data;
4764 for (i = 0; i <= HNS_ROCE_CMQ_SCC_CLR_DONE_CNT; i++) {
4765 hns_roce_cmq_setup_basic_desc(&desc,
4766 HNS_ROCE_OPC_QUERY_SCCC, true);
4767 ret = hns_roce_cmq_send(hr_dev, &desc, 1);
4768 if (ret) {
4769 dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret);
4770 goto out;
4773 if (resp->clr_done)
4774 goto out;
4776 msleep(20);
4779 dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n");
4780 ret = -ETIMEDOUT;
4782 out:
4783 mutex_unlock(&hr_dev->qp_table.scc_mutex);
4784 return ret;
4787 static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
4789 struct hns_roce_dev *hr_dev = to_hr_dev(cq->device);
4790 struct hns_roce_v2_cq_context *cq_context;
4791 struct hns_roce_cq *hr_cq = to_hr_cq(cq);
4792 struct hns_roce_v2_cq_context *cqc_mask;
4793 struct hns_roce_cmd_mailbox *mailbox;
4794 int ret;
4796 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
4797 if (IS_ERR(mailbox))
4798 return PTR_ERR(mailbox);
4800 cq_context = mailbox->buf;
4801 cqc_mask = (struct hns_roce_v2_cq_context *)mailbox->buf + 1;
4803 memset(cqc_mask, 0xff, sizeof(*cqc_mask));
4805 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4806 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4807 cq_count);
4808 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4809 V2_CQC_BYTE_56_CQ_MAX_CNT_M, V2_CQC_BYTE_56_CQ_MAX_CNT_S,
4811 roce_set_field(cq_context->byte_56_cqe_period_maxcnt,
4812 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4813 cq_period);
4814 roce_set_field(cqc_mask->byte_56_cqe_period_maxcnt,
4815 V2_CQC_BYTE_56_CQ_PERIOD_M, V2_CQC_BYTE_56_CQ_PERIOD_S,
4818 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 1,
4819 HNS_ROCE_CMD_MODIFY_CQC,
4820 HNS_ROCE_CMD_TIMEOUT_MSECS);
4821 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
4822 if (ret)
4823 dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n");
4825 return ret;
4828 static void hns_roce_set_qps_to_err(struct hns_roce_dev *hr_dev, u32 qpn)
4830 struct hns_roce_qp *hr_qp;
4831 struct ib_qp_attr attr;
4832 int attr_mask;
4833 int ret;
4835 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
4836 if (!hr_qp) {
4837 dev_warn(hr_dev->dev, "no hr_qp can be found!\n");
4838 return;
4841 if (hr_qp->ibqp.uobject) {
4842 if (hr_qp->sdb_en == 1) {
4843 hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr);
4844 if (hr_qp->rdb_en == 1)
4845 hr_qp->rq.head = *(int *)(hr_qp->rdb.virt_addr);
4846 } else {
4847 dev_warn(hr_dev->dev, "flush cqe is unsupported in userspace!\n");
4848 return;
4852 attr_mask = IB_QP_STATE;
4853 attr.qp_state = IB_QPS_ERR;
4854 ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, &attr, attr_mask,
4855 hr_qp->state, IB_QPS_ERR);
4856 if (ret)
4857 dev_err(hr_dev->dev, "failed to modify qp %d to err state.\n",
4858 qpn);
4861 static void hns_roce_irq_work_handle(struct work_struct *work)
4863 struct hns_roce_work *irq_work =
4864 container_of(work, struct hns_roce_work, work);
4865 struct device *dev = irq_work->hr_dev->dev;
4866 u32 qpn = irq_work->qpn;
4867 u32 cqn = irq_work->cqn;
4869 switch (irq_work->event_type) {
4870 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
4871 dev_info(dev, "Path migrated succeeded.\n");
4872 break;
4873 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
4874 dev_warn(dev, "Path migration failed.\n");
4875 break;
4876 case HNS_ROCE_EVENT_TYPE_COMM_EST:
4877 break;
4878 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
4879 dev_warn(dev, "Send queue drained.\n");
4880 break;
4881 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
4882 dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n",
4883 qpn, irq_work->sub_type);
4884 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4885 break;
4886 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
4887 dev_err(dev, "Invalid request local work queue 0x%x error.\n",
4888 qpn);
4889 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4890 break;
4891 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
4892 dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n",
4893 qpn, irq_work->sub_type);
4894 hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
4895 break;
4896 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
4897 dev_warn(dev, "SRQ limit reach.\n");
4898 break;
4899 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
4900 dev_warn(dev, "SRQ last wqe reach.\n");
4901 break;
4902 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
4903 dev_err(dev, "SRQ catas error.\n");
4904 break;
4905 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
4906 dev_err(dev, "CQ 0x%x access err.\n", cqn);
4907 break;
4908 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
4909 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
4910 break;
4911 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
4912 dev_warn(dev, "DB overflow.\n");
4913 break;
4914 case HNS_ROCE_EVENT_TYPE_FLR:
4915 dev_warn(dev, "Function level reset.\n");
4916 break;
4917 default:
4918 break;
4921 kfree(irq_work);
4924 static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
4925 struct hns_roce_eq *eq,
4926 u32 qpn, u32 cqn)
4928 struct hns_roce_work *irq_work;
4930 irq_work = kzalloc(sizeof(struct hns_roce_work), GFP_ATOMIC);
4931 if (!irq_work)
4932 return;
4934 INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
4935 irq_work->hr_dev = hr_dev;
4936 irq_work->qpn = qpn;
4937 irq_work->cqn = cqn;
4938 irq_work->event_type = eq->event_type;
4939 irq_work->sub_type = eq->sub_type;
4940 queue_work(hr_dev->irq_workq, &(irq_work->work));
4943 static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
4945 struct hns_roce_dev *hr_dev = eq->hr_dev;
4946 __le32 doorbell[2] = {};
4948 if (eq->type_flag == HNS_ROCE_AEQ) {
4949 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4950 HNS_ROCE_V2_EQ_DB_CMD_S,
4951 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4952 HNS_ROCE_EQ_DB_CMD_AEQ :
4953 HNS_ROCE_EQ_DB_CMD_AEQ_ARMED);
4954 } else {
4955 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_TAG_M,
4956 HNS_ROCE_V2_EQ_DB_TAG_S, eq->eqn);
4958 roce_set_field(doorbell[0], HNS_ROCE_V2_EQ_DB_CMD_M,
4959 HNS_ROCE_V2_EQ_DB_CMD_S,
4960 eq->arm_st == HNS_ROCE_V2_EQ_ALWAYS_ARMED ?
4961 HNS_ROCE_EQ_DB_CMD_CEQ :
4962 HNS_ROCE_EQ_DB_CMD_CEQ_ARMED);
4965 roce_set_field(doorbell[1], HNS_ROCE_V2_EQ_DB_PARA_M,
4966 HNS_ROCE_V2_EQ_DB_PARA_S,
4967 (eq->cons_index & HNS_ROCE_V2_CONS_IDX_M));
4969 hns_roce_write64(hr_dev, doorbell, eq->doorbell);
4972 static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
4974 u32 buf_chk_sz;
4975 unsigned long off;
4977 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4978 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4980 return (struct hns_roce_aeqe *)((char *)(eq->buf_list->buf) +
4981 off % buf_chk_sz);
4984 static struct hns_roce_aeqe *mhop_get_aeqe(struct hns_roce_eq *eq, u32 entry)
4986 u32 buf_chk_sz;
4987 unsigned long off;
4989 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
4991 off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQ_ENTRY_SIZE;
4993 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
4994 return (struct hns_roce_aeqe *)((u8 *)(eq->bt_l0) +
4995 off % buf_chk_sz);
4996 else
4997 return (struct hns_roce_aeqe *)((u8 *)
4998 (eq->buf[off / buf_chk_sz]) + off % buf_chk_sz);
5001 static struct hns_roce_aeqe *next_aeqe_sw_v2(struct hns_roce_eq *eq)
5003 struct hns_roce_aeqe *aeqe;
5005 if (!eq->hop_num)
5006 aeqe = get_aeqe_v2(eq, eq->cons_index);
5007 else
5008 aeqe = mhop_get_aeqe(eq, eq->cons_index);
5010 return (roce_get_bit(aeqe->asyn, HNS_ROCE_V2_AEQ_AEQE_OWNER_S) ^
5011 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
5014 static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
5015 struct hns_roce_eq *eq)
5017 struct device *dev = hr_dev->dev;
5018 struct hns_roce_aeqe *aeqe = next_aeqe_sw_v2(eq);
5019 int aeqe_found = 0;
5020 int event_type;
5021 int sub_type;
5022 u32 srqn;
5023 u32 qpn;
5024 u32 cqn;
5026 while (aeqe) {
5027 /* Make sure we read AEQ entry after we have checked the
5028 * ownership bit
5030 dma_rmb();
5032 event_type = roce_get_field(aeqe->asyn,
5033 HNS_ROCE_V2_AEQE_EVENT_TYPE_M,
5034 HNS_ROCE_V2_AEQE_EVENT_TYPE_S);
5035 sub_type = roce_get_field(aeqe->asyn,
5036 HNS_ROCE_V2_AEQE_SUB_TYPE_M,
5037 HNS_ROCE_V2_AEQE_SUB_TYPE_S);
5038 qpn = roce_get_field(aeqe->event.qp_event.qp,
5039 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5040 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5041 cqn = roce_get_field(aeqe->event.cq_event.cq,
5042 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5043 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5044 srqn = roce_get_field(aeqe->event.srq_event.srq,
5045 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M,
5046 HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S);
5048 switch (event_type) {
5049 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
5050 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
5051 case HNS_ROCE_EVENT_TYPE_COMM_EST:
5052 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
5053 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
5054 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
5055 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
5056 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
5057 hns_roce_qp_event(hr_dev, qpn, event_type);
5058 break;
5059 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
5060 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
5061 hns_roce_srq_event(hr_dev, srqn, event_type);
5062 break;
5063 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
5064 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
5065 hns_roce_cq_event(hr_dev, cqn, event_type);
5066 break;
5067 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
5068 break;
5069 case HNS_ROCE_EVENT_TYPE_MB:
5070 hns_roce_cmd_event(hr_dev,
5071 le16_to_cpu(aeqe->event.cmd.token),
5072 aeqe->event.cmd.status,
5073 le64_to_cpu(aeqe->event.cmd.out_param));
5074 break;
5075 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
5076 break;
5077 case HNS_ROCE_EVENT_TYPE_FLR:
5078 break;
5079 default:
5080 dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
5081 event_type, eq->eqn, eq->cons_index);
5082 break;
5085 eq->event_type = event_type;
5086 eq->sub_type = sub_type;
5087 ++eq->cons_index;
5088 aeqe_found = 1;
5090 if (eq->cons_index > (2 * eq->entries - 1))
5091 eq->cons_index = 0;
5093 hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
5095 aeqe = next_aeqe_sw_v2(eq);
5098 set_eq_cons_index_v2(eq);
5099 return aeqe_found;
5102 static struct hns_roce_ceqe *get_ceqe_v2(struct hns_roce_eq *eq, u32 entry)
5104 u32 buf_chk_sz;
5105 unsigned long off;
5107 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5108 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
5110 return (struct hns_roce_ceqe *)((char *)(eq->buf_list->buf) +
5111 off % buf_chk_sz);
5114 static struct hns_roce_ceqe *mhop_get_ceqe(struct hns_roce_eq *eq, u32 entry)
5116 u32 buf_chk_sz;
5117 unsigned long off;
5119 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5121 off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQ_ENTRY_SIZE;
5123 if (eq->hop_num == HNS_ROCE_HOP_NUM_0)
5124 return (struct hns_roce_ceqe *)((u8 *)(eq->bt_l0) +
5125 off % buf_chk_sz);
5126 else
5127 return (struct hns_roce_ceqe *)((u8 *)(eq->buf[off /
5128 buf_chk_sz]) + off % buf_chk_sz);
5131 static struct hns_roce_ceqe *next_ceqe_sw_v2(struct hns_roce_eq *eq)
5133 struct hns_roce_ceqe *ceqe;
5135 if (!eq->hop_num)
5136 ceqe = get_ceqe_v2(eq, eq->cons_index);
5137 else
5138 ceqe = mhop_get_ceqe(eq, eq->cons_index);
5140 return (!!(roce_get_bit(ceqe->comp, HNS_ROCE_V2_CEQ_CEQE_OWNER_S))) ^
5141 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
5144 static int hns_roce_v2_ceq_int(struct hns_roce_dev *hr_dev,
5145 struct hns_roce_eq *eq)
5147 struct device *dev = hr_dev->dev;
5148 struct hns_roce_ceqe *ceqe = next_ceqe_sw_v2(eq);
5149 int ceqe_found = 0;
5150 u32 cqn;
5152 while (ceqe) {
5153 /* Make sure we read CEQ entry after we have checked the
5154 * ownership bit
5156 dma_rmb();
5158 cqn = roce_get_field(ceqe->comp,
5159 HNS_ROCE_V2_CEQE_COMP_CQN_M,
5160 HNS_ROCE_V2_CEQE_COMP_CQN_S);
5162 hns_roce_cq_completion(hr_dev, cqn);
5164 ++eq->cons_index;
5165 ceqe_found = 1;
5167 if (eq->cons_index > (EQ_DEPTH_COEFF * eq->entries - 1)) {
5168 dev_warn(dev, "cons_index overflow, set back to 0.\n");
5169 eq->cons_index = 0;
5172 ceqe = next_ceqe_sw_v2(eq);
5175 set_eq_cons_index_v2(eq);
5177 return ceqe_found;
5180 static irqreturn_t hns_roce_v2_msix_interrupt_eq(int irq, void *eq_ptr)
5182 struct hns_roce_eq *eq = eq_ptr;
5183 struct hns_roce_dev *hr_dev = eq->hr_dev;
5184 int int_work = 0;
5186 if (eq->type_flag == HNS_ROCE_CEQ)
5187 /* Completion event interrupt */
5188 int_work = hns_roce_v2_ceq_int(hr_dev, eq);
5189 else
5190 /* Asychronous event interrupt */
5191 int_work = hns_roce_v2_aeq_int(hr_dev, eq);
5193 return IRQ_RETVAL(int_work);
5196 static irqreturn_t hns_roce_v2_msix_interrupt_abn(int irq, void *dev_id)
5198 struct hns_roce_dev *hr_dev = dev_id;
5199 struct device *dev = hr_dev->dev;
5200 int int_work = 0;
5201 u32 int_st;
5202 u32 int_en;
5204 /* Abnormal interrupt */
5205 int_st = roce_read(hr_dev, ROCEE_VF_ABN_INT_ST_REG);
5206 int_en = roce_read(hr_dev, ROCEE_VF_ABN_INT_EN_REG);
5208 if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S)) {
5209 struct pci_dev *pdev = hr_dev->pci_dev;
5210 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
5211 const struct hnae3_ae_ops *ops = ae_dev->ops;
5213 dev_err(dev, "AEQ overflow!\n");
5215 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_AEQ_OVERFLOW_S;
5216 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5218 /* Set reset level for reset_event() */
5219 if (ops->set_default_reset_request)
5220 ops->set_default_reset_request(ae_dev,
5221 HNAE3_FUNC_RESET);
5222 if (ops->reset_event)
5223 ops->reset_event(pdev, NULL);
5225 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5226 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5228 int_work = 1;
5229 } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S)) {
5230 dev_err(dev, "BUS ERR!\n");
5232 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_BUS_ERR_S;
5233 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5235 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5236 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5238 int_work = 1;
5239 } else if (int_st & BIT(HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S)) {
5240 dev_err(dev, "OTHER ERR!\n");
5242 int_st |= 1 << HNS_ROCE_V2_VF_INT_ST_OTHER_ERR_S;
5243 roce_write(hr_dev, ROCEE_VF_ABN_INT_ST_REG, int_st);
5245 int_en |= 1 << HNS_ROCE_V2_VF_ABN_INT_EN_S;
5246 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG, int_en);
5248 int_work = 1;
5249 } else
5250 dev_err(dev, "There is no abnormal irq found!\n");
5252 return IRQ_RETVAL(int_work);
5255 static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
5256 int eq_num, int enable_flag)
5258 int i;
5260 if (enable_flag == EQ_ENABLE) {
5261 for (i = 0; i < eq_num; i++)
5262 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5263 i * EQ_REG_OFFSET,
5264 HNS_ROCE_V2_VF_EVENT_INT_EN_M);
5266 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5267 HNS_ROCE_V2_VF_ABN_INT_EN_M);
5268 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5269 HNS_ROCE_V2_VF_ABN_INT_CFG_M);
5270 } else {
5271 for (i = 0; i < eq_num; i++)
5272 roce_write(hr_dev, ROCEE_VF_EVENT_INT_EN_REG +
5273 i * EQ_REG_OFFSET,
5274 HNS_ROCE_V2_VF_EVENT_INT_EN_M & 0x0);
5276 roce_write(hr_dev, ROCEE_VF_ABN_INT_EN_REG,
5277 HNS_ROCE_V2_VF_ABN_INT_EN_M & 0x0);
5278 roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG,
5279 HNS_ROCE_V2_VF_ABN_INT_CFG_M & 0x0);
5283 static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
5285 struct device *dev = hr_dev->dev;
5286 int ret;
5288 if (eqn < hr_dev->caps.num_comp_vectors)
5289 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5290 0, HNS_ROCE_CMD_DESTROY_CEQC,
5291 HNS_ROCE_CMD_TIMEOUT_MSECS);
5292 else
5293 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, eqn & HNS_ROCE_V2_EQN_M,
5294 0, HNS_ROCE_CMD_DESTROY_AEQC,
5295 HNS_ROCE_CMD_TIMEOUT_MSECS);
5296 if (ret)
5297 dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
5300 static void hns_roce_mhop_free_eq(struct hns_roce_dev *hr_dev,
5301 struct hns_roce_eq *eq)
5303 struct device *dev = hr_dev->dev;
5304 u64 idx;
5305 u64 size;
5306 u32 buf_chk_sz;
5307 u32 bt_chk_sz;
5308 u32 mhop_num;
5309 int eqe_alloc;
5310 int i = 0;
5311 int j = 0;
5313 mhop_num = hr_dev->caps.eqe_hop_num;
5314 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5315 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
5317 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5318 dma_free_coherent(dev, (unsigned int)(eq->entries *
5319 eq->eqe_size), eq->bt_l0, eq->l0_dma);
5320 return;
5323 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5324 if (mhop_num == 1) {
5325 for (i = 0; i < eq->l0_last_num; i++) {
5326 if (i == eq->l0_last_num - 1) {
5327 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5328 size = (eq->entries - eqe_alloc) * eq->eqe_size;
5329 dma_free_coherent(dev, size, eq->buf[i],
5330 eq->buf_dma[i]);
5331 break;
5333 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5334 eq->buf_dma[i]);
5336 } else if (mhop_num == 2) {
5337 for (i = 0; i < eq->l0_last_num; i++) {
5338 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5339 eq->l1_dma[i]);
5341 for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5342 idx = i * (bt_chk_sz / BA_BYTE_LEN) + j;
5343 if ((i == eq->l0_last_num - 1)
5344 && j == eq->l1_last_num - 1) {
5345 eqe_alloc = (buf_chk_sz / eq->eqe_size)
5346 * idx;
5347 size = (eq->entries - eqe_alloc)
5348 * eq->eqe_size;
5349 dma_free_coherent(dev, size,
5350 eq->buf[idx],
5351 eq->buf_dma[idx]);
5352 break;
5354 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5355 eq->buf_dma[idx]);
5359 kfree(eq->buf_dma);
5360 kfree(eq->buf);
5361 kfree(eq->l1_dma);
5362 kfree(eq->bt_l1);
5363 eq->buf_dma = NULL;
5364 eq->buf = NULL;
5365 eq->l1_dma = NULL;
5366 eq->bt_l1 = NULL;
5369 static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
5370 struct hns_roce_eq *eq)
5372 u32 buf_chk_sz;
5374 buf_chk_sz = 1 << (eq->eqe_buf_pg_sz + PAGE_SHIFT);
5376 if (hr_dev->caps.eqe_hop_num) {
5377 hns_roce_mhop_free_eq(hr_dev, eq);
5378 return;
5381 dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
5382 eq->buf_list->map);
5383 kfree(eq->buf_list);
5386 static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
5387 struct hns_roce_eq *eq,
5388 void *mb_buf)
5390 struct hns_roce_eq_context *eqc;
5392 eqc = mb_buf;
5393 memset(eqc, 0, sizeof(struct hns_roce_eq_context));
5395 /* init eqc */
5396 eq->doorbell = hr_dev->reg_base + ROCEE_VF_EQ_DB_CFG0_REG;
5397 eq->hop_num = hr_dev->caps.eqe_hop_num;
5398 eq->cons_index = 0;
5399 eq->over_ignore = HNS_ROCE_V2_EQ_OVER_IGNORE_0;
5400 eq->coalesce = HNS_ROCE_V2_EQ_COALESCE_0;
5401 eq->arm_st = HNS_ROCE_V2_EQ_ALWAYS_ARMED;
5402 eq->eqe_ba_pg_sz = hr_dev->caps.eqe_ba_pg_sz;
5403 eq->eqe_buf_pg_sz = hr_dev->caps.eqe_buf_pg_sz;
5404 eq->shift = ilog2((unsigned int)eq->entries);
5406 if (!eq->hop_num)
5407 eq->eqe_ba = eq->buf_list->map;
5408 else
5409 eq->eqe_ba = eq->l0_dma;
5411 /* set eqc state */
5412 roce_set_field(eqc->byte_4,
5413 HNS_ROCE_EQC_EQ_ST_M,
5414 HNS_ROCE_EQC_EQ_ST_S,
5415 HNS_ROCE_V2_EQ_STATE_VALID);
5417 /* set eqe hop num */
5418 roce_set_field(eqc->byte_4,
5419 HNS_ROCE_EQC_HOP_NUM_M,
5420 HNS_ROCE_EQC_HOP_NUM_S, eq->hop_num);
5422 /* set eqc over_ignore */
5423 roce_set_field(eqc->byte_4,
5424 HNS_ROCE_EQC_OVER_IGNORE_M,
5425 HNS_ROCE_EQC_OVER_IGNORE_S, eq->over_ignore);
5427 /* set eqc coalesce */
5428 roce_set_field(eqc->byte_4,
5429 HNS_ROCE_EQC_COALESCE_M,
5430 HNS_ROCE_EQC_COALESCE_S, eq->coalesce);
5432 /* set eqc arm_state */
5433 roce_set_field(eqc->byte_4,
5434 HNS_ROCE_EQC_ARM_ST_M,
5435 HNS_ROCE_EQC_ARM_ST_S, eq->arm_st);
5437 /* set eqn */
5438 roce_set_field(eqc->byte_4,
5439 HNS_ROCE_EQC_EQN_M,
5440 HNS_ROCE_EQC_EQN_S, eq->eqn);
5442 /* set eqe_cnt */
5443 roce_set_field(eqc->byte_4,
5444 HNS_ROCE_EQC_EQE_CNT_M,
5445 HNS_ROCE_EQC_EQE_CNT_S,
5446 HNS_ROCE_EQ_INIT_EQE_CNT);
5448 /* set eqe_ba_pg_sz */
5449 roce_set_field(eqc->byte_8,
5450 HNS_ROCE_EQC_BA_PG_SZ_M,
5451 HNS_ROCE_EQC_BA_PG_SZ_S,
5452 eq->eqe_ba_pg_sz + PG_SHIFT_OFFSET);
5454 /* set eqe_buf_pg_sz */
5455 roce_set_field(eqc->byte_8,
5456 HNS_ROCE_EQC_BUF_PG_SZ_M,
5457 HNS_ROCE_EQC_BUF_PG_SZ_S,
5458 eq->eqe_buf_pg_sz + PG_SHIFT_OFFSET);
5460 /* set eq_producer_idx */
5461 roce_set_field(eqc->byte_8,
5462 HNS_ROCE_EQC_PROD_INDX_M,
5463 HNS_ROCE_EQC_PROD_INDX_S,
5464 HNS_ROCE_EQ_INIT_PROD_IDX);
5466 /* set eq_max_cnt */
5467 roce_set_field(eqc->byte_12,
5468 HNS_ROCE_EQC_MAX_CNT_M,
5469 HNS_ROCE_EQC_MAX_CNT_S, eq->eq_max_cnt);
5471 /* set eq_period */
5472 roce_set_field(eqc->byte_12,
5473 HNS_ROCE_EQC_PERIOD_M,
5474 HNS_ROCE_EQC_PERIOD_S, eq->eq_period);
5476 /* set eqe_report_timer */
5477 roce_set_field(eqc->eqe_report_timer,
5478 HNS_ROCE_EQC_REPORT_TIMER_M,
5479 HNS_ROCE_EQC_REPORT_TIMER_S,
5480 HNS_ROCE_EQ_INIT_REPORT_TIMER);
5482 /* set eqe_ba [34:3] */
5483 roce_set_field(eqc->eqe_ba0,
5484 HNS_ROCE_EQC_EQE_BA_L_M,
5485 HNS_ROCE_EQC_EQE_BA_L_S, eq->eqe_ba >> 3);
5487 /* set eqe_ba [64:35] */
5488 roce_set_field(eqc->eqe_ba1,
5489 HNS_ROCE_EQC_EQE_BA_H_M,
5490 HNS_ROCE_EQC_EQE_BA_H_S, eq->eqe_ba >> 35);
5492 /* set eq shift */
5493 roce_set_field(eqc->byte_28,
5494 HNS_ROCE_EQC_SHIFT_M,
5495 HNS_ROCE_EQC_SHIFT_S, eq->shift);
5497 /* set eq MSI_IDX */
5498 roce_set_field(eqc->byte_28,
5499 HNS_ROCE_EQC_MSI_INDX_M,
5500 HNS_ROCE_EQC_MSI_INDX_S,
5501 HNS_ROCE_EQ_INIT_MSI_IDX);
5503 /* set cur_eqe_ba [27:12] */
5504 roce_set_field(eqc->byte_28,
5505 HNS_ROCE_EQC_CUR_EQE_BA_L_M,
5506 HNS_ROCE_EQC_CUR_EQE_BA_L_S, eq->cur_eqe_ba >> 12);
5508 /* set cur_eqe_ba [59:28] */
5509 roce_set_field(eqc->byte_32,
5510 HNS_ROCE_EQC_CUR_EQE_BA_M_M,
5511 HNS_ROCE_EQC_CUR_EQE_BA_M_S, eq->cur_eqe_ba >> 28);
5513 /* set cur_eqe_ba [63:60] */
5514 roce_set_field(eqc->byte_36,
5515 HNS_ROCE_EQC_CUR_EQE_BA_H_M,
5516 HNS_ROCE_EQC_CUR_EQE_BA_H_S, eq->cur_eqe_ba >> 60);
5518 /* set eq consumer idx */
5519 roce_set_field(eqc->byte_36,
5520 HNS_ROCE_EQC_CONS_INDX_M,
5521 HNS_ROCE_EQC_CONS_INDX_S,
5522 HNS_ROCE_EQ_INIT_CONS_IDX);
5524 /* set nex_eqe_ba[43:12] */
5525 roce_set_field(eqc->nxt_eqe_ba0,
5526 HNS_ROCE_EQC_NXT_EQE_BA_L_M,
5527 HNS_ROCE_EQC_NXT_EQE_BA_L_S, eq->nxt_eqe_ba >> 12);
5529 /* set nex_eqe_ba[63:44] */
5530 roce_set_field(eqc->nxt_eqe_ba1,
5531 HNS_ROCE_EQC_NXT_EQE_BA_H_M,
5532 HNS_ROCE_EQC_NXT_EQE_BA_H_S, eq->nxt_eqe_ba >> 44);
5535 static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
5536 struct hns_roce_eq *eq)
5538 struct device *dev = hr_dev->dev;
5539 int eq_alloc_done = 0;
5540 int eq_buf_cnt = 0;
5541 int eqe_alloc;
5542 u32 buf_chk_sz;
5543 u32 bt_chk_sz;
5544 u32 mhop_num;
5545 u64 size;
5546 u64 idx;
5547 int ba_num;
5548 int bt_num;
5549 int record_i;
5550 int record_j;
5551 int i = 0;
5552 int j = 0;
5554 mhop_num = hr_dev->caps.eqe_hop_num;
5555 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5556 bt_chk_sz = 1 << (hr_dev->caps.eqe_ba_pg_sz + PAGE_SHIFT);
5558 ba_num = DIV_ROUND_UP(PAGE_ALIGN(eq->entries * eq->eqe_size),
5559 buf_chk_sz);
5560 bt_num = DIV_ROUND_UP(ba_num, bt_chk_sz / BA_BYTE_LEN);
5562 if (mhop_num == HNS_ROCE_HOP_NUM_0) {
5563 if (eq->entries > buf_chk_sz / eq->eqe_size) {
5564 dev_err(dev, "eq entries %d is larger than buf_pg_sz!",
5565 eq->entries);
5566 return -EINVAL;
5568 eq->bt_l0 = dma_alloc_coherent(dev, eq->entries * eq->eqe_size,
5569 &(eq->l0_dma), GFP_KERNEL);
5570 if (!eq->bt_l0)
5571 return -ENOMEM;
5573 eq->cur_eqe_ba = eq->l0_dma;
5574 eq->nxt_eqe_ba = 0;
5576 return 0;
5579 eq->buf_dma = kcalloc(ba_num, sizeof(*eq->buf_dma), GFP_KERNEL);
5580 if (!eq->buf_dma)
5581 return -ENOMEM;
5582 eq->buf = kcalloc(ba_num, sizeof(*eq->buf), GFP_KERNEL);
5583 if (!eq->buf)
5584 goto err_kcalloc_buf;
5586 if (mhop_num == 2) {
5587 eq->l1_dma = kcalloc(bt_num, sizeof(*eq->l1_dma), GFP_KERNEL);
5588 if (!eq->l1_dma)
5589 goto err_kcalloc_l1_dma;
5591 eq->bt_l1 = kcalloc(bt_num, sizeof(*eq->bt_l1), GFP_KERNEL);
5592 if (!eq->bt_l1)
5593 goto err_kcalloc_bt_l1;
5596 /* alloc L0 BT */
5597 eq->bt_l0 = dma_alloc_coherent(dev, bt_chk_sz, &eq->l0_dma, GFP_KERNEL);
5598 if (!eq->bt_l0)
5599 goto err_dma_alloc_l0;
5601 if (mhop_num == 1) {
5602 if (ba_num > (bt_chk_sz / BA_BYTE_LEN))
5603 dev_err(dev, "ba_num %d is too large for 1 hop\n",
5604 ba_num);
5606 /* alloc buf */
5607 for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
5608 if (eq_buf_cnt + 1 < ba_num) {
5609 size = buf_chk_sz;
5610 } else {
5611 eqe_alloc = i * (buf_chk_sz / eq->eqe_size);
5612 size = (eq->entries - eqe_alloc) * eq->eqe_size;
5614 eq->buf[i] = dma_alloc_coherent(dev, size,
5615 &(eq->buf_dma[i]),
5616 GFP_KERNEL);
5617 if (!eq->buf[i])
5618 goto err_dma_alloc_buf;
5620 *(eq->bt_l0 + i) = eq->buf_dma[i];
5622 eq_buf_cnt++;
5623 if (eq_buf_cnt >= ba_num)
5624 break;
5626 eq->cur_eqe_ba = eq->buf_dma[0];
5627 if (ba_num > 1)
5628 eq->nxt_eqe_ba = eq->buf_dma[1];
5630 } else if (mhop_num == 2) {
5631 /* alloc L1 BT and buf */
5632 for (i = 0; i < bt_chk_sz / BA_BYTE_LEN; i++) {
5633 eq->bt_l1[i] = dma_alloc_coherent(dev, bt_chk_sz,
5634 &(eq->l1_dma[i]),
5635 GFP_KERNEL);
5636 if (!eq->bt_l1[i])
5637 goto err_dma_alloc_l1;
5638 *(eq->bt_l0 + i) = eq->l1_dma[i];
5640 for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5641 idx = i * bt_chk_sz / BA_BYTE_LEN + j;
5642 if (eq_buf_cnt + 1 < ba_num) {
5643 size = buf_chk_sz;
5644 } else {
5645 eqe_alloc = (buf_chk_sz / eq->eqe_size)
5646 * idx;
5647 size = (eq->entries - eqe_alloc)
5648 * eq->eqe_size;
5650 eq->buf[idx] = dma_alloc_coherent(dev, size,
5651 &(eq->buf_dma[idx]),
5652 GFP_KERNEL);
5653 if (!eq->buf[idx])
5654 goto err_dma_alloc_buf;
5656 *(eq->bt_l1[i] + j) = eq->buf_dma[idx];
5658 eq_buf_cnt++;
5659 if (eq_buf_cnt >= ba_num) {
5660 eq_alloc_done = 1;
5661 break;
5665 if (eq_alloc_done)
5666 break;
5668 eq->cur_eqe_ba = eq->buf_dma[0];
5669 if (ba_num > 1)
5670 eq->nxt_eqe_ba = eq->buf_dma[1];
5673 eq->l0_last_num = i + 1;
5674 if (mhop_num == 2)
5675 eq->l1_last_num = j + 1;
5677 return 0;
5679 err_dma_alloc_l1:
5680 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5681 eq->bt_l0 = NULL;
5682 eq->l0_dma = 0;
5683 for (i -= 1; i >= 0; i--) {
5684 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5685 eq->l1_dma[i]);
5687 for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5688 idx = i * bt_chk_sz / BA_BYTE_LEN + j;
5689 dma_free_coherent(dev, buf_chk_sz, eq->buf[idx],
5690 eq->buf_dma[idx]);
5693 goto err_dma_alloc_l0;
5695 err_dma_alloc_buf:
5696 dma_free_coherent(dev, bt_chk_sz, eq->bt_l0, eq->l0_dma);
5697 eq->bt_l0 = NULL;
5698 eq->l0_dma = 0;
5700 if (mhop_num == 1)
5701 for (i -= 1; i >= 0; i--)
5702 dma_free_coherent(dev, buf_chk_sz, eq->buf[i],
5703 eq->buf_dma[i]);
5704 else if (mhop_num == 2) {
5705 record_i = i;
5706 record_j = j;
5707 for (; i >= 0; i--) {
5708 dma_free_coherent(dev, bt_chk_sz, eq->bt_l1[i],
5709 eq->l1_dma[i]);
5711 for (j = 0; j < bt_chk_sz / BA_BYTE_LEN; j++) {
5712 if (i == record_i && j >= record_j)
5713 break;
5715 idx = i * bt_chk_sz / BA_BYTE_LEN + j;
5716 dma_free_coherent(dev, buf_chk_sz,
5717 eq->buf[idx],
5718 eq->buf_dma[idx]);
5723 err_dma_alloc_l0:
5724 kfree(eq->bt_l1);
5725 eq->bt_l1 = NULL;
5727 err_kcalloc_bt_l1:
5728 kfree(eq->l1_dma);
5729 eq->l1_dma = NULL;
5731 err_kcalloc_l1_dma:
5732 kfree(eq->buf);
5733 eq->buf = NULL;
5735 err_kcalloc_buf:
5736 kfree(eq->buf_dma);
5737 eq->buf_dma = NULL;
5739 return -ENOMEM;
5742 static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
5743 struct hns_roce_eq *eq,
5744 unsigned int eq_cmd)
5746 struct device *dev = hr_dev->dev;
5747 struct hns_roce_cmd_mailbox *mailbox;
5748 u32 buf_chk_sz = 0;
5749 int ret;
5751 /* Allocate mailbox memory */
5752 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
5753 if (IS_ERR(mailbox))
5754 return PTR_ERR(mailbox);
5756 if (!hr_dev->caps.eqe_hop_num) {
5757 buf_chk_sz = 1 << (hr_dev->caps.eqe_buf_pg_sz + PAGE_SHIFT);
5759 eq->buf_list = kzalloc(sizeof(struct hns_roce_buf_list),
5760 GFP_KERNEL);
5761 if (!eq->buf_list) {
5762 ret = -ENOMEM;
5763 goto free_cmd_mbox;
5766 eq->buf_list->buf = dma_alloc_coherent(dev, buf_chk_sz,
5767 &(eq->buf_list->map),
5768 GFP_KERNEL);
5769 if (!eq->buf_list->buf) {
5770 ret = -ENOMEM;
5771 goto err_alloc_buf;
5774 } else {
5775 ret = hns_roce_mhop_alloc_eq(hr_dev, eq);
5776 if (ret) {
5777 ret = -ENOMEM;
5778 goto free_cmd_mbox;
5782 hns_roce_config_eqc(hr_dev, eq, mailbox->buf);
5784 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
5785 eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
5786 if (ret) {
5787 dev_err(dev, "[mailbox cmd] create eqc failed.\n");
5788 goto err_cmd_mbox;
5791 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5793 return 0;
5795 err_cmd_mbox:
5796 if (!hr_dev->caps.eqe_hop_num)
5797 dma_free_coherent(dev, buf_chk_sz, eq->buf_list->buf,
5798 eq->buf_list->map);
5799 else {
5800 hns_roce_mhop_free_eq(hr_dev, eq);
5801 goto free_cmd_mbox;
5804 err_alloc_buf:
5805 kfree(eq->buf_list);
5807 free_cmd_mbox:
5808 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
5810 return ret;
5813 static int __hns_roce_request_irq(struct hns_roce_dev *hr_dev, int irq_num,
5814 int comp_num, int aeq_num, int other_num)
5816 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5817 int i, j;
5818 int ret;
5820 for (i = 0; i < irq_num; i++) {
5821 hr_dev->irq_names[i] = kzalloc(HNS_ROCE_INT_NAME_LEN,
5822 GFP_KERNEL);
5823 if (!hr_dev->irq_names[i]) {
5824 ret = -ENOMEM;
5825 goto err_kzalloc_failed;
5829 /* irq contains: abnormal + AEQ + CEQ */
5830 for (j = 0; j < other_num; j++)
5831 snprintf((char *)hr_dev->irq_names[j],
5832 HNS_ROCE_INT_NAME_LEN, "hns-abn-%d", j);
5834 for (j = other_num; j < (other_num + aeq_num); j++)
5835 snprintf((char *)hr_dev->irq_names[j],
5836 HNS_ROCE_INT_NAME_LEN, "hns-aeq-%d",
5837 j - other_num);
5839 for (j = (other_num + aeq_num); j < irq_num; j++)
5840 snprintf((char *)hr_dev->irq_names[j],
5841 HNS_ROCE_INT_NAME_LEN, "hns-ceq-%d",
5842 j - other_num - aeq_num);
5844 for (j = 0; j < irq_num; j++) {
5845 if (j < other_num)
5846 ret = request_irq(hr_dev->irq[j],
5847 hns_roce_v2_msix_interrupt_abn,
5848 0, hr_dev->irq_names[j], hr_dev);
5850 else if (j < (other_num + comp_num))
5851 ret = request_irq(eq_table->eq[j - other_num].irq,
5852 hns_roce_v2_msix_interrupt_eq,
5853 0, hr_dev->irq_names[j + aeq_num],
5854 &eq_table->eq[j - other_num]);
5855 else
5856 ret = request_irq(eq_table->eq[j - other_num].irq,
5857 hns_roce_v2_msix_interrupt_eq,
5858 0, hr_dev->irq_names[j - comp_num],
5859 &eq_table->eq[j - other_num]);
5860 if (ret) {
5861 dev_err(hr_dev->dev, "Request irq error!\n");
5862 goto err_request_failed;
5866 return 0;
5868 err_request_failed:
5869 for (j -= 1; j >= 0; j--)
5870 if (j < other_num)
5871 free_irq(hr_dev->irq[j], hr_dev);
5872 else
5873 free_irq(eq_table->eq[j - other_num].irq,
5874 &eq_table->eq[j - other_num]);
5876 err_kzalloc_failed:
5877 for (i -= 1; i >= 0; i--)
5878 kfree(hr_dev->irq_names[i]);
5880 return ret;
5883 static void __hns_roce_free_irq(struct hns_roce_dev *hr_dev)
5885 int irq_num;
5886 int eq_num;
5887 int i;
5889 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
5890 irq_num = eq_num + hr_dev->caps.num_other_vectors;
5892 for (i = 0; i < hr_dev->caps.num_other_vectors; i++)
5893 free_irq(hr_dev->irq[i], hr_dev);
5895 for (i = 0; i < eq_num; i++)
5896 free_irq(hr_dev->eq_table.eq[i].irq, &hr_dev->eq_table.eq[i]);
5898 for (i = 0; i < irq_num; i++)
5899 kfree(hr_dev->irq_names[i]);
5902 static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
5904 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5905 struct device *dev = hr_dev->dev;
5906 struct hns_roce_eq *eq;
5907 unsigned int eq_cmd;
5908 int irq_num;
5909 int eq_num;
5910 int other_num;
5911 int comp_num;
5912 int aeq_num;
5913 int i;
5914 int ret;
5916 other_num = hr_dev->caps.num_other_vectors;
5917 comp_num = hr_dev->caps.num_comp_vectors;
5918 aeq_num = hr_dev->caps.num_aeq_vectors;
5920 eq_num = comp_num + aeq_num;
5921 irq_num = eq_num + other_num;
5923 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
5924 if (!eq_table->eq)
5925 return -ENOMEM;
5927 /* create eq */
5928 for (i = 0; i < eq_num; i++) {
5929 eq = &eq_table->eq[i];
5930 eq->hr_dev = hr_dev;
5931 eq->eqn = i;
5932 if (i < comp_num) {
5933 /* CEQ */
5934 eq_cmd = HNS_ROCE_CMD_CREATE_CEQC;
5935 eq->type_flag = HNS_ROCE_CEQ;
5936 eq->entries = hr_dev->caps.ceqe_depth;
5937 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
5938 eq->irq = hr_dev->irq[i + other_num + aeq_num];
5939 eq->eq_max_cnt = HNS_ROCE_CEQ_DEFAULT_BURST_NUM;
5940 eq->eq_period = HNS_ROCE_CEQ_DEFAULT_INTERVAL;
5941 } else {
5942 /* AEQ */
5943 eq_cmd = HNS_ROCE_CMD_CREATE_AEQC;
5944 eq->type_flag = HNS_ROCE_AEQ;
5945 eq->entries = hr_dev->caps.aeqe_depth;
5946 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
5947 eq->irq = hr_dev->irq[i - comp_num + other_num];
5948 eq->eq_max_cnt = HNS_ROCE_AEQ_DEFAULT_BURST_NUM;
5949 eq->eq_period = HNS_ROCE_AEQ_DEFAULT_INTERVAL;
5952 ret = hns_roce_v2_create_eq(hr_dev, eq, eq_cmd);
5953 if (ret) {
5954 dev_err(dev, "eq create failed.\n");
5955 goto err_create_eq_fail;
5959 /* enable irq */
5960 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_ENABLE);
5962 ret = __hns_roce_request_irq(hr_dev, irq_num, comp_num,
5963 aeq_num, other_num);
5964 if (ret) {
5965 dev_err(dev, "Request irq failed.\n");
5966 goto err_request_irq_fail;
5969 hr_dev->irq_workq =
5970 create_singlethread_workqueue("hns_roce_irq_workqueue");
5971 if (!hr_dev->irq_workq) {
5972 dev_err(dev, "Create irq workqueue failed!\n");
5973 ret = -ENOMEM;
5974 goto err_create_wq_fail;
5977 return 0;
5979 err_create_wq_fail:
5980 __hns_roce_free_irq(hr_dev);
5982 err_request_irq_fail:
5983 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
5985 err_create_eq_fail:
5986 for (i -= 1; i >= 0; i--)
5987 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
5988 kfree(eq_table->eq);
5990 return ret;
5993 static void hns_roce_v2_cleanup_eq_table(struct hns_roce_dev *hr_dev)
5995 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
5996 int eq_num;
5997 int i;
5999 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
6001 /* Disable irq */
6002 hns_roce_v2_int_mask_enable(hr_dev, eq_num, EQ_DISABLE);
6004 __hns_roce_free_irq(hr_dev);
6006 for (i = 0; i < eq_num; i++) {
6007 hns_roce_v2_destroy_eqc(hr_dev, i);
6009 hns_roce_v2_free_eq(hr_dev, &eq_table->eq[i]);
6012 kfree(eq_table->eq);
6014 flush_workqueue(hr_dev->irq_workq);
6015 destroy_workqueue(hr_dev->irq_workq);
6018 static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
6019 struct hns_roce_srq *srq, u32 pdn, u16 xrcd,
6020 u32 cqn, void *mb_buf, u64 *mtts_wqe,
6021 u64 *mtts_idx, dma_addr_t dma_handle_wqe,
6022 dma_addr_t dma_handle_idx)
6024 struct hns_roce_srq_context *srq_context;
6026 srq_context = mb_buf;
6027 memset(srq_context, 0, sizeof(*srq_context));
6029 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQ_ST_M,
6030 SRQC_BYTE_4_SRQ_ST_S, 1);
6032 roce_set_field(srq_context->byte_4_srqn_srqst,
6033 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
6034 SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
6035 (hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
6036 hr_dev->caps.srqwqe_hop_num));
6037 roce_set_field(srq_context->byte_4_srqn_srqst,
6038 SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
6039 ilog2(srq->wqe_cnt));
6041 roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
6042 SRQC_BYTE_4_SRQN_S, srq->srqn);
6044 roce_set_field(srq_context->byte_8_limit_wl, SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6045 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
6047 roce_set_field(srq_context->byte_12_xrcd, SRQC_BYTE_12_SRQ_XRCD_M,
6048 SRQC_BYTE_12_SRQ_XRCD_S, xrcd);
6050 srq_context->wqe_bt_ba = cpu_to_le32((u32)(dma_handle_wqe >> 3));
6052 roce_set_field(srq_context->byte_24_wqe_bt_ba,
6053 SRQC_BYTE_24_SRQ_WQE_BT_BA_M,
6054 SRQC_BYTE_24_SRQ_WQE_BT_BA_S,
6055 dma_handle_wqe >> 35);
6057 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_PD_M,
6058 SRQC_BYTE_28_PD_S, pdn);
6059 roce_set_field(srq_context->byte_28_rqws_pd, SRQC_BYTE_28_RQWS_M,
6060 SRQC_BYTE_28_RQWS_S, srq->max_gs <= 0 ? 0 :
6061 fls(srq->max_gs - 1));
6063 srq_context->idx_bt_ba = cpu_to_le32(dma_handle_idx >> 3);
6064 roce_set_field(srq_context->rsv_idx_bt_ba,
6065 SRQC_BYTE_36_SRQ_IDX_BT_BA_M,
6066 SRQC_BYTE_36_SRQ_IDX_BT_BA_S,
6067 dma_handle_idx >> 35);
6069 srq_context->idx_cur_blk_addr =
6070 cpu_to_le32(mtts_idx[0] >> PAGE_ADDR_SHIFT);
6071 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6072 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_M,
6073 SRQC_BYTE_44_SRQ_IDX_CUR_BLK_ADDR_S,
6074 mtts_idx[0] >> (32 + PAGE_ADDR_SHIFT));
6075 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6076 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
6077 SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
6078 hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
6079 hr_dev->caps.idx_hop_num);
6081 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6082 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
6083 SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
6084 hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET);
6085 roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
6086 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
6087 SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
6088 hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET);
6090 srq_context->idx_nxt_blk_addr =
6091 cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT);
6092 roce_set_field(srq_context->rsv_idxnxtblkaddr,
6093 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_M,
6094 SRQC_BYTE_52_SRQ_IDX_NXT_BLK_ADDR_S,
6095 mtts_idx[1] >> (32 + PAGE_ADDR_SHIFT));
6096 roce_set_field(srq_context->byte_56_xrc_cqn,
6097 SRQC_BYTE_56_SRQ_XRC_CQN_M, SRQC_BYTE_56_SRQ_XRC_CQN_S,
6098 cqn);
6099 roce_set_field(srq_context->byte_56_xrc_cqn,
6100 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_M,
6101 SRQC_BYTE_56_SRQ_WQE_BA_PG_SZ_S,
6102 hr_dev->caps.srqwqe_ba_pg_sz + PG_SHIFT_OFFSET);
6103 roce_set_field(srq_context->byte_56_xrc_cqn,
6104 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_M,
6105 SRQC_BYTE_56_SRQ_WQE_BUF_PG_SZ_S,
6106 hr_dev->caps.srqwqe_buf_pg_sz + PG_SHIFT_OFFSET);
6108 roce_set_bit(srq_context->db_record_addr_record_en,
6109 SRQC_BYTE_60_SRQ_RECORD_EN_S, 0);
6112 static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
6113 struct ib_srq_attr *srq_attr,
6114 enum ib_srq_attr_mask srq_attr_mask,
6115 struct ib_udata *udata)
6117 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6118 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6119 struct hns_roce_srq_context *srq_context;
6120 struct hns_roce_srq_context *srqc_mask;
6121 struct hns_roce_cmd_mailbox *mailbox;
6122 int ret;
6124 if (srq_attr_mask & IB_SRQ_LIMIT) {
6125 if (srq_attr->srq_limit >= srq->wqe_cnt)
6126 return -EINVAL;
6128 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6129 if (IS_ERR(mailbox))
6130 return PTR_ERR(mailbox);
6132 srq_context = mailbox->buf;
6133 srqc_mask = (struct hns_roce_srq_context *)mailbox->buf + 1;
6135 memset(srqc_mask, 0xff, sizeof(*srqc_mask));
6137 roce_set_field(srq_context->byte_8_limit_wl,
6138 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6139 SRQC_BYTE_8_SRQ_LIMIT_WL_S, srq_attr->srq_limit);
6140 roce_set_field(srqc_mask->byte_8_limit_wl,
6141 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6142 SRQC_BYTE_8_SRQ_LIMIT_WL_S, 0);
6144 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, srq->srqn, 0,
6145 HNS_ROCE_CMD_MODIFY_SRQC,
6146 HNS_ROCE_CMD_TIMEOUT_MSECS);
6147 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6148 if (ret) {
6149 dev_err(hr_dev->dev,
6150 "MODIFY SRQ Failed to cmd mailbox.\n");
6151 return ret;
6155 return 0;
6158 static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
6160 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6161 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6162 struct hns_roce_srq_context *srq_context;
6163 struct hns_roce_cmd_mailbox *mailbox;
6164 int limit_wl;
6165 int ret;
6167 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
6168 if (IS_ERR(mailbox))
6169 return PTR_ERR(mailbox);
6171 srq_context = mailbox->buf;
6172 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, srq->srqn, 0,
6173 HNS_ROCE_CMD_QUERY_SRQC,
6174 HNS_ROCE_CMD_TIMEOUT_MSECS);
6175 if (ret) {
6176 dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n");
6177 goto out;
6180 limit_wl = roce_get_field(srq_context->byte_8_limit_wl,
6181 SRQC_BYTE_8_SRQ_LIMIT_WL_M,
6182 SRQC_BYTE_8_SRQ_LIMIT_WL_S);
6184 attr->srq_limit = limit_wl;
6185 attr->max_wr = srq->wqe_cnt - 1;
6186 attr->max_sge = srq->max_gs;
6188 memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
6190 out:
6191 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
6192 return ret;
6195 static int find_empty_entry(struct hns_roce_idx_que *idx_que,
6196 unsigned long size)
6198 int wqe_idx;
6200 if (unlikely(bitmap_full(idx_que->bitmap, size)))
6201 return -ENOSPC;
6203 wqe_idx = find_first_zero_bit(idx_que->bitmap, size);
6205 bitmap_set(idx_que->bitmap, wqe_idx, 1);
6207 return wqe_idx;
6210 static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
6211 int cur_idx, int wqe_idx)
6213 unsigned int *addr;
6215 addr = (unsigned int *)hns_roce_buf_offset(&idx_que->idx_buf,
6216 cur_idx * idx_que->entry_sz);
6217 *addr = wqe_idx;
6220 static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
6221 const struct ib_recv_wr *wr,
6222 const struct ib_recv_wr **bad_wr)
6224 struct hns_roce_dev *hr_dev = to_hr_dev(ibsrq->device);
6225 struct hns_roce_srq *srq = to_hr_srq(ibsrq);
6226 struct hns_roce_v2_wqe_data_seg *dseg;
6227 struct hns_roce_v2_db srq_db;
6228 unsigned long flags;
6229 int ret = 0;
6230 int wqe_idx;
6231 void *wqe;
6232 int nreq;
6233 int ind;
6234 int i;
6236 spin_lock_irqsave(&srq->lock, flags);
6238 ind = srq->head & (srq->wqe_cnt - 1);
6240 for (nreq = 0; wr; ++nreq, wr = wr->next) {
6241 if (unlikely(wr->num_sge > srq->max_gs)) {
6242 ret = -EINVAL;
6243 *bad_wr = wr;
6244 break;
6247 if (unlikely(srq->head == srq->tail)) {
6248 ret = -ENOMEM;
6249 *bad_wr = wr;
6250 break;
6253 wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
6254 if (wqe_idx < 0) {
6255 ret = -ENOMEM;
6256 *bad_wr = wr;
6257 break;
6260 fill_idx_queue(&srq->idx_que, ind, wqe_idx);
6261 wqe = get_srq_wqe(srq, wqe_idx);
6262 dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
6264 for (i = 0; i < wr->num_sge; ++i) {
6265 dseg[i].len = cpu_to_le32(wr->sg_list[i].length);
6266 dseg[i].lkey = cpu_to_le32(wr->sg_list[i].lkey);
6267 dseg[i].addr = cpu_to_le64(wr->sg_list[i].addr);
6270 if (i < srq->max_gs) {
6271 dseg[i].len = 0;
6272 dseg[i].lkey = cpu_to_le32(0x100);
6273 dseg[i].addr = 0;
6276 srq->wrid[wqe_idx] = wr->wr_id;
6277 ind = (ind + 1) & (srq->wqe_cnt - 1);
6280 if (likely(nreq)) {
6281 srq->head += nreq;
6284 * Make sure that descriptors are written before
6285 * doorbell record.
6287 wmb();
6289 srq_db.byte_4 =
6290 cpu_to_le32(HNS_ROCE_V2_SRQ_DB << V2_DB_BYTE_4_CMD_S |
6291 (srq->srqn & V2_DB_BYTE_4_TAG_M));
6292 srq_db.parameter = cpu_to_le32(srq->head);
6294 hns_roce_write64(hr_dev, (__le32 *)&srq_db, srq->db_reg_l);
6298 spin_unlock_irqrestore(&srq->lock, flags);
6300 return ret;
6303 static const struct hns_roce_dfx_hw hns_roce_dfx_hw_v2 = {
6304 .query_cqc_info = hns_roce_v2_query_cqc_info,
6307 static const struct ib_device_ops hns_roce_v2_dev_ops = {
6308 .destroy_qp = hns_roce_v2_destroy_qp,
6309 .modify_cq = hns_roce_v2_modify_cq,
6310 .poll_cq = hns_roce_v2_poll_cq,
6311 .post_recv = hns_roce_v2_post_recv,
6312 .post_send = hns_roce_v2_post_send,
6313 .query_qp = hns_roce_v2_query_qp,
6314 .req_notify_cq = hns_roce_v2_req_notify_cq,
6317 static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
6318 .modify_srq = hns_roce_v2_modify_srq,
6319 .post_srq_recv = hns_roce_v2_post_srq_recv,
6320 .query_srq = hns_roce_v2_query_srq,
6323 static const struct hns_roce_hw hns_roce_hw_v2 = {
6324 .cmq_init = hns_roce_v2_cmq_init,
6325 .cmq_exit = hns_roce_v2_cmq_exit,
6326 .hw_profile = hns_roce_v2_profile,
6327 .hw_init = hns_roce_v2_init,
6328 .hw_exit = hns_roce_v2_exit,
6329 .post_mbox = hns_roce_v2_post_mbox,
6330 .chk_mbox = hns_roce_v2_chk_mbox,
6331 .rst_prc_mbox = hns_roce_v2_rst_process_cmd,
6332 .set_gid = hns_roce_v2_set_gid,
6333 .set_mac = hns_roce_v2_set_mac,
6334 .write_mtpt = hns_roce_v2_write_mtpt,
6335 .rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
6336 .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
6337 .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
6338 .write_cqc = hns_roce_v2_write_cqc,
6339 .set_hem = hns_roce_v2_set_hem,
6340 .clear_hem = hns_roce_v2_clear_hem,
6341 .modify_qp = hns_roce_v2_modify_qp,
6342 .query_qp = hns_roce_v2_query_qp,
6343 .destroy_qp = hns_roce_v2_destroy_qp,
6344 .qp_flow_control_init = hns_roce_v2_qp_flow_control_init,
6345 .modify_cq = hns_roce_v2_modify_cq,
6346 .post_send = hns_roce_v2_post_send,
6347 .post_recv = hns_roce_v2_post_recv,
6348 .req_notify_cq = hns_roce_v2_req_notify_cq,
6349 .poll_cq = hns_roce_v2_poll_cq,
6350 .init_eq = hns_roce_v2_init_eq_table,
6351 .cleanup_eq = hns_roce_v2_cleanup_eq_table,
6352 .write_srqc = hns_roce_v2_write_srqc,
6353 .modify_srq = hns_roce_v2_modify_srq,
6354 .query_srq = hns_roce_v2_query_srq,
6355 .post_srq_recv = hns_roce_v2_post_srq_recv,
6356 .hns_roce_dev_ops = &hns_roce_v2_dev_ops,
6357 .hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
6360 static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
6361 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
6362 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
6363 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
6364 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
6365 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
6366 /* required last entry */
6367 {0, }
6370 MODULE_DEVICE_TABLE(pci, hns_roce_hw_v2_pci_tbl);
6372 static void hns_roce_hw_v2_get_cfg(struct hns_roce_dev *hr_dev,
6373 struct hnae3_handle *handle)
6375 struct hns_roce_v2_priv *priv = hr_dev->priv;
6376 int i;
6378 hr_dev->pci_dev = handle->pdev;
6379 hr_dev->dev = &handle->pdev->dev;
6380 hr_dev->hw = &hns_roce_hw_v2;
6381 hr_dev->dfx = &hns_roce_dfx_hw_v2;
6382 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
6383 hr_dev->odb_offset = hr_dev->sdb_offset;
6385 /* Get info from NIC driver. */
6386 hr_dev->reg_base = handle->rinfo.roce_io_base;
6387 hr_dev->caps.num_ports = 1;
6388 hr_dev->iboe.netdevs[0] = handle->rinfo.netdev;
6389 hr_dev->iboe.phy_port[0] = 0;
6391 addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid,
6392 hr_dev->iboe.netdevs[0]->dev_addr);
6394 for (i = 0; i < HNS_ROCE_V2_MAX_IRQ_NUM; i++)
6395 hr_dev->irq[i] = pci_irq_vector(handle->pdev,
6396 i + handle->rinfo.base_vector);
6398 /* cmd issue mode: 0 is poll, 1 is event */
6399 hr_dev->cmd_mod = 1;
6400 hr_dev->loop_idc = 0;
6402 hr_dev->reset_cnt = handle->ae_algo->ops->ae_dev_reset_cnt(handle);
6403 priv->handle = handle;
6406 static int __hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6408 struct hns_roce_dev *hr_dev;
6409 int ret;
6411 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
6412 if (!hr_dev)
6413 return -ENOMEM;
6415 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v2_priv), GFP_KERNEL);
6416 if (!hr_dev->priv) {
6417 ret = -ENOMEM;
6418 goto error_failed_kzalloc;
6421 hns_roce_hw_v2_get_cfg(hr_dev, handle);
6423 ret = hns_roce_init(hr_dev);
6424 if (ret) {
6425 dev_err(hr_dev->dev, "RoCE Engine init failed!\n");
6426 goto error_failed_get_cfg;
6429 handle->priv = hr_dev;
6431 return 0;
6433 error_failed_get_cfg:
6434 kfree(hr_dev->priv);
6436 error_failed_kzalloc:
6437 ib_dealloc_device(&hr_dev->ib_dev);
6439 return ret;
6442 static void __hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6443 bool reset)
6445 struct hns_roce_dev *hr_dev = (struct hns_roce_dev *)handle->priv;
6447 if (!hr_dev)
6448 return;
6450 handle->priv = NULL;
6451 hns_roce_exit(hr_dev);
6452 kfree(hr_dev->priv);
6453 ib_dealloc_device(&hr_dev->ib_dev);
6456 static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
6458 const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
6459 const struct pci_device_id *id;
6460 struct device *dev = &handle->pdev->dev;
6461 int ret;
6463 handle->rinfo.instance_state = HNS_ROCE_STATE_INIT;
6465 if (ops->ae_dev_resetting(handle) || ops->get_hw_reset_stat(handle)) {
6466 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6467 goto reset_chk_err;
6470 id = pci_match_id(hns_roce_hw_v2_pci_tbl, handle->pdev);
6471 if (!id)
6472 return 0;
6474 ret = __hns_roce_hw_v2_init_instance(handle);
6475 if (ret) {
6476 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6477 dev_err(dev, "RoCE instance init failed! ret = %d\n", ret);
6478 if (ops->ae_dev_resetting(handle) ||
6479 ops->get_hw_reset_stat(handle))
6480 goto reset_chk_err;
6481 else
6482 return ret;
6485 handle->rinfo.instance_state = HNS_ROCE_STATE_INITED;
6488 return 0;
6490 reset_chk_err:
6491 dev_err(dev, "Device is busy in resetting state.\n"
6492 "please retry later.\n");
6494 return -EBUSY;
6497 static void hns_roce_hw_v2_uninit_instance(struct hnae3_handle *handle,
6498 bool reset)
6500 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED)
6501 return;
6503 handle->rinfo.instance_state = HNS_ROCE_STATE_UNINIT;
6505 __hns_roce_hw_v2_uninit_instance(handle, reset);
6507 handle->rinfo.instance_state = HNS_ROCE_STATE_NON_INIT;
6509 static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle)
6511 struct hns_roce_dev *hr_dev;
6512 struct ib_event event;
6514 if (handle->rinfo.instance_state != HNS_ROCE_STATE_INITED) {
6515 set_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6516 return 0;
6519 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_DOWN;
6520 clear_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state);
6522 hr_dev = (struct hns_roce_dev *)handle->priv;
6523 if (!hr_dev)
6524 return 0;
6526 hr_dev->is_reset = true;
6527 hr_dev->active = false;
6528 hr_dev->dis_db = true;
6530 event.event = IB_EVENT_DEVICE_FATAL;
6531 event.device = &hr_dev->ib_dev;
6532 event.element.port_num = 1;
6533 ib_dispatch_event(&event);
6535 return 0;
6538 static int hns_roce_hw_v2_reset_notify_init(struct hnae3_handle *handle)
6540 struct device *dev = &handle->pdev->dev;
6541 int ret;
6543 if (test_and_clear_bit(HNS_ROCE_RST_DIRECT_RETURN,
6544 &handle->rinfo.state)) {
6545 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6546 return 0;
6549 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INIT;
6551 dev_info(&handle->pdev->dev, "In reset process RoCE client reinit.\n");
6552 ret = __hns_roce_hw_v2_init_instance(handle);
6553 if (ret) {
6554 /* when reset notify type is HNAE3_INIT_CLIENT In reset notify
6555 * callback function, RoCE Engine reinitialize. If RoCE reinit
6556 * failed, we should inform NIC driver.
6558 handle->priv = NULL;
6559 dev_err(dev, "In reset process RoCE reinit failed %d.\n", ret);
6560 } else {
6561 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_INITED;
6562 dev_info(dev, "Reset done, RoCE client reinit finished.\n");
6565 return ret;
6568 static int hns_roce_hw_v2_reset_notify_uninit(struct hnae3_handle *handle)
6570 if (test_bit(HNS_ROCE_RST_DIRECT_RETURN, &handle->rinfo.state))
6571 return 0;
6573 handle->rinfo.reset_state = HNS_ROCE_STATE_RST_UNINIT;
6574 dev_info(&handle->pdev->dev, "In reset process RoCE client uninit.\n");
6575 msleep(HNS_ROCE_V2_HW_RST_UNINT_DELAY);
6576 __hns_roce_hw_v2_uninit_instance(handle, false);
6578 return 0;
6581 static int hns_roce_hw_v2_reset_notify(struct hnae3_handle *handle,
6582 enum hnae3_reset_notify_type type)
6584 int ret = 0;
6586 switch (type) {
6587 case HNAE3_DOWN_CLIENT:
6588 ret = hns_roce_hw_v2_reset_notify_down(handle);
6589 break;
6590 case HNAE3_INIT_CLIENT:
6591 ret = hns_roce_hw_v2_reset_notify_init(handle);
6592 break;
6593 case HNAE3_UNINIT_CLIENT:
6594 ret = hns_roce_hw_v2_reset_notify_uninit(handle);
6595 break;
6596 default:
6597 break;
6600 return ret;
6603 static const struct hnae3_client_ops hns_roce_hw_v2_ops = {
6604 .init_instance = hns_roce_hw_v2_init_instance,
6605 .uninit_instance = hns_roce_hw_v2_uninit_instance,
6606 .reset_notify = hns_roce_hw_v2_reset_notify,
6609 static struct hnae3_client hns_roce_hw_v2_client = {
6610 .name = "hns_roce_hw_v2",
6611 .type = HNAE3_CLIENT_ROCE,
6612 .ops = &hns_roce_hw_v2_ops,
6615 static int __init hns_roce_hw_v2_init(void)
6617 return hnae3_register_client(&hns_roce_hw_v2_client);
6620 static void __exit hns_roce_hw_v2_exit(void)
6622 hnae3_unregister_client(&hns_roce_hw_v2_client);
6625 module_init(hns_roce_hw_v2_init);
6626 module_exit(hns_roce_hw_v2_exit);
6628 MODULE_LICENSE("Dual BSD/GPL");
6629 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
6630 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
6631 MODULE_AUTHOR("Shaobo Xu <xushaobo2@huawei.com>");
6632 MODULE_DESCRIPTION("Hisilicon Hip08 Family RoCE Driver");