dm writecache: add cond_resched to loop in persistent_memory_claim()
[linux/fpc-iii.git] / drivers / infiniband / hw / hns / hns_roce_hw_v1.c
blob5ff028d77be3f7d7f73a93c236e264e623640c59
1 /*
2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/platform_device.h>
34 #include <linux/acpi.h>
35 #include <linux/etherdevice.h>
36 #include <linux/interrupt.h>
37 #include <linux/of.h>
38 #include <linux/of_platform.h>
39 #include <rdma/ib_umem.h>
40 #include "hns_roce_common.h"
41 #include "hns_roce_device.h"
42 #include "hns_roce_cmd.h"
43 #include "hns_roce_hem.h"
44 #include "hns_roce_hw_v1.h"
46 static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
48 dseg->lkey = cpu_to_le32(sg->lkey);
49 dseg->addr = cpu_to_le64(sg->addr);
50 dseg->len = cpu_to_le32(sg->length);
53 static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
54 u32 rkey)
56 rseg->raddr = cpu_to_le64(remote_addr);
57 rseg->rkey = cpu_to_le32(rkey);
58 rseg->len = 0;
61 static int hns_roce_v1_post_send(struct ib_qp *ibqp,
62 const struct ib_send_wr *wr,
63 const struct ib_send_wr **bad_wr)
65 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
66 struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
67 struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
68 struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
69 struct hns_roce_wqe_data_seg *dseg = NULL;
70 struct hns_roce_qp *qp = to_hr_qp(ibqp);
71 struct device *dev = &hr_dev->pdev->dev;
72 struct hns_roce_sq_db sq_db = {};
73 int ps_opcode = 0, i = 0;
74 unsigned long flags = 0;
75 void *wqe = NULL;
76 __le32 doorbell[2];
77 u32 wqe_idx = 0;
78 int nreq = 0;
79 int ret = 0;
80 u8 *smac;
81 int loopback;
83 if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
84 ibqp->qp_type != IB_QPT_RC)) {
85 dev_err(dev, "un-supported QP type\n");
86 *bad_wr = NULL;
87 return -EOPNOTSUPP;
90 spin_lock_irqsave(&qp->sq.lock, flags);
92 for (nreq = 0; wr; ++nreq, wr = wr->next) {
93 if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
94 ret = -ENOMEM;
95 *bad_wr = wr;
96 goto out;
99 wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
101 if (unlikely(wr->num_sge > qp->sq.max_gs)) {
102 dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
103 wr->num_sge, qp->sq.max_gs);
104 ret = -EINVAL;
105 *bad_wr = wr;
106 goto out;
109 wqe = hns_roce_get_send_wqe(qp, wqe_idx);
110 qp->sq.wrid[wqe_idx] = wr->wr_id;
112 /* Corresponding to the RC and RD type wqe process separately */
113 if (ibqp->qp_type == IB_QPT_GSI) {
114 ud_sq_wqe = wqe;
115 roce_set_field(ud_sq_wqe->dmac_h,
116 UD_SEND_WQE_U32_4_DMAC_0_M,
117 UD_SEND_WQE_U32_4_DMAC_0_S,
118 ah->av.mac[0]);
119 roce_set_field(ud_sq_wqe->dmac_h,
120 UD_SEND_WQE_U32_4_DMAC_1_M,
121 UD_SEND_WQE_U32_4_DMAC_1_S,
122 ah->av.mac[1]);
123 roce_set_field(ud_sq_wqe->dmac_h,
124 UD_SEND_WQE_U32_4_DMAC_2_M,
125 UD_SEND_WQE_U32_4_DMAC_2_S,
126 ah->av.mac[2]);
127 roce_set_field(ud_sq_wqe->dmac_h,
128 UD_SEND_WQE_U32_4_DMAC_3_M,
129 UD_SEND_WQE_U32_4_DMAC_3_S,
130 ah->av.mac[3]);
132 roce_set_field(ud_sq_wqe->u32_8,
133 UD_SEND_WQE_U32_8_DMAC_4_M,
134 UD_SEND_WQE_U32_8_DMAC_4_S,
135 ah->av.mac[4]);
136 roce_set_field(ud_sq_wqe->u32_8,
137 UD_SEND_WQE_U32_8_DMAC_5_M,
138 UD_SEND_WQE_U32_8_DMAC_5_S,
139 ah->av.mac[5]);
141 smac = (u8 *)hr_dev->dev_addr[qp->port];
142 loopback = ether_addr_equal_unaligned(ah->av.mac,
143 smac) ? 1 : 0;
144 roce_set_bit(ud_sq_wqe->u32_8,
145 UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
146 loopback);
148 roce_set_field(ud_sq_wqe->u32_8,
149 UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
150 UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
151 HNS_ROCE_WQE_OPCODE_SEND);
152 roce_set_field(ud_sq_wqe->u32_8,
153 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
154 UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
156 roce_set_bit(ud_sq_wqe->u32_8,
157 UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
160 ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
161 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
162 (wr->send_flags & IB_SEND_SOLICITED ?
163 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
164 ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
165 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
167 roce_set_field(ud_sq_wqe->u32_16,
168 UD_SEND_WQE_U32_16_DEST_QP_M,
169 UD_SEND_WQE_U32_16_DEST_QP_S,
170 ud_wr(wr)->remote_qpn);
171 roce_set_field(ud_sq_wqe->u32_16,
172 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
173 UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
174 ah->av.stat_rate);
176 roce_set_field(ud_sq_wqe->u32_36,
177 UD_SEND_WQE_U32_36_FLOW_LABEL_M,
178 UD_SEND_WQE_U32_36_FLOW_LABEL_S,
179 ah->av.flowlabel);
180 roce_set_field(ud_sq_wqe->u32_36,
181 UD_SEND_WQE_U32_36_PRIORITY_M,
182 UD_SEND_WQE_U32_36_PRIORITY_S,
183 ah->av.sl);
184 roce_set_field(ud_sq_wqe->u32_36,
185 UD_SEND_WQE_U32_36_SGID_INDEX_M,
186 UD_SEND_WQE_U32_36_SGID_INDEX_S,
187 hns_get_gid_index(hr_dev, qp->phy_port,
188 ah->av.gid_index));
190 roce_set_field(ud_sq_wqe->u32_40,
191 UD_SEND_WQE_U32_40_HOP_LIMIT_M,
192 UD_SEND_WQE_U32_40_HOP_LIMIT_S,
193 ah->av.hop_limit);
194 roce_set_field(ud_sq_wqe->u32_40,
195 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
196 UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
197 ah->av.tclass);
199 memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
201 ud_sq_wqe->va0_l =
202 cpu_to_le32((u32)wr->sg_list[0].addr);
203 ud_sq_wqe->va0_h =
204 cpu_to_le32((wr->sg_list[0].addr) >> 32);
205 ud_sq_wqe->l_key0 =
206 cpu_to_le32(wr->sg_list[0].lkey);
208 ud_sq_wqe->va1_l =
209 cpu_to_le32((u32)wr->sg_list[1].addr);
210 ud_sq_wqe->va1_h =
211 cpu_to_le32((wr->sg_list[1].addr) >> 32);
212 ud_sq_wqe->l_key1 =
213 cpu_to_le32(wr->sg_list[1].lkey);
214 } else if (ibqp->qp_type == IB_QPT_RC) {
215 u32 tmp_len = 0;
217 ctrl = wqe;
218 memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
219 for (i = 0; i < wr->num_sge; i++)
220 tmp_len += wr->sg_list[i].length;
222 ctrl->msg_length =
223 cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
225 ctrl->sgl_pa_h = 0;
226 ctrl->flag = 0;
228 switch (wr->opcode) {
229 case IB_WR_SEND_WITH_IMM:
230 case IB_WR_RDMA_WRITE_WITH_IMM:
231 ctrl->imm_data = wr->ex.imm_data;
232 break;
233 case IB_WR_SEND_WITH_INV:
234 ctrl->inv_key =
235 cpu_to_le32(wr->ex.invalidate_rkey);
236 break;
237 default:
238 ctrl->imm_data = 0;
239 break;
242 /*Ctrl field, ctrl set type: sig, solic, imm, fence */
243 /* SO wait for conforming application scenarios */
244 ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
245 cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
246 (wr->send_flags & IB_SEND_SOLICITED ?
247 cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
248 ((wr->opcode == IB_WR_SEND_WITH_IMM ||
249 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
250 cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
251 (wr->send_flags & IB_SEND_FENCE ?
252 (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
254 wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
256 switch (wr->opcode) {
257 case IB_WR_RDMA_READ:
258 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
259 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
260 rdma_wr(wr)->rkey);
261 break;
262 case IB_WR_RDMA_WRITE:
263 case IB_WR_RDMA_WRITE_WITH_IMM:
264 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
265 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
266 rdma_wr(wr)->rkey);
267 break;
268 case IB_WR_SEND:
269 case IB_WR_SEND_WITH_INV:
270 case IB_WR_SEND_WITH_IMM:
271 ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
272 break;
273 case IB_WR_LOCAL_INV:
274 break;
275 case IB_WR_ATOMIC_CMP_AND_SWP:
276 case IB_WR_ATOMIC_FETCH_AND_ADD:
277 case IB_WR_LSO:
278 default:
279 ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
280 break;
282 ctrl->flag |= cpu_to_le32(ps_opcode);
283 wqe += sizeof(struct hns_roce_wqe_raddr_seg);
285 dseg = wqe;
286 if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
287 if (le32_to_cpu(ctrl->msg_length) >
288 hr_dev->caps.max_sq_inline) {
289 ret = -EINVAL;
290 *bad_wr = wr;
291 dev_err(dev, "inline len(1-%d)=%d, illegal",
292 ctrl->msg_length,
293 hr_dev->caps.max_sq_inline);
294 goto out;
296 for (i = 0; i < wr->num_sge; i++) {
297 memcpy(wqe, ((void *) (uintptr_t)
298 wr->sg_list[i].addr),
299 wr->sg_list[i].length);
300 wqe += wr->sg_list[i].length;
302 ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
303 } else {
304 /*sqe num is two */
305 for (i = 0; i < wr->num_sge; i++)
306 set_data_seg(dseg + i, wr->sg_list + i);
308 ctrl->flag |= cpu_to_le32(wr->num_sge <<
309 HNS_ROCE_WQE_SGE_NUM_BIT);
314 out:
315 /* Set DB return */
316 if (likely(nreq)) {
317 qp->sq.head += nreq;
318 /* Memory barrier */
319 wmb();
321 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
322 SQ_DOORBELL_U32_4_SQ_HEAD_S,
323 (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
324 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
325 SQ_DOORBELL_U32_4_SL_S, qp->sl);
326 roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
327 SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
328 roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
329 SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
330 roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
332 doorbell[0] = sq_db.u32_4;
333 doorbell[1] = sq_db.u32_8;
335 hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
338 spin_unlock_irqrestore(&qp->sq.lock, flags);
340 return ret;
343 static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
344 const struct ib_recv_wr *wr,
345 const struct ib_recv_wr **bad_wr)
347 struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
348 struct hns_roce_wqe_data_seg *scat = NULL;
349 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
350 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
351 struct device *dev = &hr_dev->pdev->dev;
352 struct hns_roce_rq_db rq_db = {};
353 __le32 doorbell[2] = {0};
354 unsigned long flags = 0;
355 unsigned int wqe_idx;
356 int ret = 0;
357 int nreq = 0;
358 int i = 0;
359 u32 reg_val;
361 spin_lock_irqsave(&hr_qp->rq.lock, flags);
363 for (nreq = 0; wr; ++nreq, wr = wr->next) {
364 if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
365 hr_qp->ibqp.recv_cq)) {
366 ret = -ENOMEM;
367 *bad_wr = wr;
368 goto out;
371 wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
373 if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
374 dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
375 wr->num_sge, hr_qp->rq.max_gs);
376 ret = -EINVAL;
377 *bad_wr = wr;
378 goto out;
381 ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
383 roce_set_field(ctrl->rwqe_byte_12,
384 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
385 RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
386 wr->num_sge);
388 scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
390 for (i = 0; i < wr->num_sge; i++)
391 set_data_seg(scat + i, wr->sg_list + i);
393 hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
396 out:
397 if (likely(nreq)) {
398 hr_qp->rq.head += nreq;
399 /* Memory barrier */
400 wmb();
402 if (ibqp->qp_type == IB_QPT_GSI) {
403 __le32 tmp;
405 /* SW update GSI rq header */
406 reg_val = roce_read(to_hr_dev(ibqp->device),
407 ROCEE_QP1C_CFG3_0_REG +
408 QP1C_CFGN_OFFSET * hr_qp->phy_port);
409 tmp = cpu_to_le32(reg_val);
410 roce_set_field(tmp,
411 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
412 ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
413 hr_qp->rq.head);
414 reg_val = le32_to_cpu(tmp);
415 roce_write(to_hr_dev(ibqp->device),
416 ROCEE_QP1C_CFG3_0_REG +
417 QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
418 } else {
419 roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
420 RQ_DOORBELL_U32_4_RQ_HEAD_S,
421 hr_qp->rq.head);
422 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
423 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
424 roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
425 RQ_DOORBELL_U32_8_CMD_S, 1);
426 roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
429 doorbell[0] = rq_db.u32_4;
430 doorbell[1] = rq_db.u32_8;
432 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
435 spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
437 return ret;
440 static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
441 int sdb_mode, int odb_mode)
443 __le32 tmp;
444 u32 val;
446 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
447 tmp = cpu_to_le32(val);
448 roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
449 roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
450 val = le32_to_cpu(tmp);
451 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
454 static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
455 u32 odb_mode)
457 __le32 tmp;
458 u32 val;
460 /* Configure SDB/ODB extend mode */
461 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
462 tmp = cpu_to_le32(val);
463 roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
464 roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
465 val = le32_to_cpu(tmp);
466 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
469 static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
470 u32 sdb_alful)
472 __le32 tmp;
473 u32 val;
475 /* Configure SDB */
476 val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
477 tmp = cpu_to_le32(val);
478 roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
479 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
480 roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
481 ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
482 val = le32_to_cpu(tmp);
483 roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
486 static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
487 u32 odb_alful)
489 __le32 tmp;
490 u32 val;
492 /* Configure ODB */
493 val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
494 tmp = cpu_to_le32(val);
495 roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
496 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
497 roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
498 ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
499 val = le32_to_cpu(tmp);
500 roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
503 static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
504 u32 ext_sdb_alful)
506 struct device *dev = &hr_dev->pdev->dev;
507 struct hns_roce_v1_priv *priv;
508 struct hns_roce_db_table *db;
509 dma_addr_t sdb_dma_addr;
510 __le32 tmp;
511 u32 val;
513 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
514 db = &priv->db_table;
516 /* Configure extend SDB threshold */
517 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
518 roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
520 /* Configure extend SDB base addr */
521 sdb_dma_addr = db->ext_db->sdb_buf_list->map;
522 roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
524 /* Configure extend SDB depth */
525 val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
526 tmp = cpu_to_le32(val);
527 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
528 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
529 db->ext_db->esdb_dep);
531 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
532 * using 4K page, and shift more 32 because of
533 * caculating the high 32 bit value evaluated to hardware.
535 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
536 ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
537 val = le32_to_cpu(tmp);
538 roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
540 dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
541 dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n",
542 ext_sdb_alept, ext_sdb_alful);
545 static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
546 u32 ext_odb_alful)
548 struct device *dev = &hr_dev->pdev->dev;
549 struct hns_roce_v1_priv *priv;
550 struct hns_roce_db_table *db;
551 dma_addr_t odb_dma_addr;
552 __le32 tmp;
553 u32 val;
555 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
556 db = &priv->db_table;
558 /* Configure extend ODB threshold */
559 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
560 roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
562 /* Configure extend ODB base addr */
563 odb_dma_addr = db->ext_db->odb_buf_list->map;
564 roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
566 /* Configure extend ODB depth */
567 val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
568 tmp = cpu_to_le32(val);
569 roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
570 ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
571 db->ext_db->eodb_dep);
572 roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
573 ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
574 db->ext_db->eodb_dep);
575 val = le32_to_cpu(tmp);
576 roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
578 dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
579 dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
580 ext_odb_alept, ext_odb_alful);
583 static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
584 u32 odb_ext_mod)
586 struct device *dev = &hr_dev->pdev->dev;
587 struct hns_roce_v1_priv *priv;
588 struct hns_roce_db_table *db;
589 dma_addr_t sdb_dma_addr;
590 dma_addr_t odb_dma_addr;
591 int ret = 0;
593 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
594 db = &priv->db_table;
596 db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
597 if (!db->ext_db)
598 return -ENOMEM;
600 if (sdb_ext_mod) {
601 db->ext_db->sdb_buf_list = kmalloc(
602 sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
603 if (!db->ext_db->sdb_buf_list) {
604 ret = -ENOMEM;
605 goto ext_sdb_buf_fail_out;
608 db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
609 HNS_ROCE_V1_EXT_SDB_SIZE,
610 &sdb_dma_addr, GFP_KERNEL);
611 if (!db->ext_db->sdb_buf_list->buf) {
612 ret = -ENOMEM;
613 goto alloc_sq_db_buf_fail;
615 db->ext_db->sdb_buf_list->map = sdb_dma_addr;
617 db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
618 hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
619 HNS_ROCE_V1_EXT_SDB_ALFUL);
620 } else
621 hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
622 HNS_ROCE_V1_SDB_ALFUL);
624 if (odb_ext_mod) {
625 db->ext_db->odb_buf_list = kmalloc(
626 sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
627 if (!db->ext_db->odb_buf_list) {
628 ret = -ENOMEM;
629 goto ext_odb_buf_fail_out;
632 db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
633 HNS_ROCE_V1_EXT_ODB_SIZE,
634 &odb_dma_addr, GFP_KERNEL);
635 if (!db->ext_db->odb_buf_list->buf) {
636 ret = -ENOMEM;
637 goto alloc_otr_db_buf_fail;
639 db->ext_db->odb_buf_list->map = odb_dma_addr;
641 db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
642 hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
643 HNS_ROCE_V1_EXT_ODB_ALFUL);
644 } else
645 hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
646 HNS_ROCE_V1_ODB_ALFUL);
648 hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
650 return 0;
652 alloc_otr_db_buf_fail:
653 kfree(db->ext_db->odb_buf_list);
655 ext_odb_buf_fail_out:
656 if (sdb_ext_mod) {
657 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
658 db->ext_db->sdb_buf_list->buf,
659 db->ext_db->sdb_buf_list->map);
662 alloc_sq_db_buf_fail:
663 if (sdb_ext_mod)
664 kfree(db->ext_db->sdb_buf_list);
666 ext_sdb_buf_fail_out:
667 kfree(db->ext_db);
668 return ret;
671 static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
672 struct ib_pd *pd)
674 struct device *dev = &hr_dev->pdev->dev;
675 struct ib_qp_init_attr init_attr;
676 struct ib_qp *qp;
678 memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
679 init_attr.qp_type = IB_QPT_RC;
680 init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
681 init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
682 init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
684 qp = hns_roce_create_qp(pd, &init_attr, NULL);
685 if (IS_ERR(qp)) {
686 dev_err(dev, "Create loop qp for mr free failed!");
687 return NULL;
690 return to_hr_qp(qp);
693 static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
695 struct hns_roce_caps *caps = &hr_dev->caps;
696 struct device *dev = &hr_dev->pdev->dev;
697 struct ib_cq_init_attr cq_init_attr;
698 struct hns_roce_free_mr *free_mr;
699 struct ib_qp_attr attr = { 0 };
700 struct hns_roce_v1_priv *priv;
701 struct hns_roce_qp *hr_qp;
702 struct ib_device *ibdev;
703 struct ib_cq *cq;
704 struct ib_pd *pd;
705 union ib_gid dgid;
706 __be64 subnet_prefix;
707 int attr_mask = 0;
708 int ret;
709 int i, j;
710 u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
711 u8 phy_port;
712 u8 port = 0;
713 u8 sl;
715 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
716 free_mr = &priv->free_mr;
718 /* Reserved cq for loop qp */
719 cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
720 cq_init_attr.comp_vector = 0;
722 ibdev = &hr_dev->ib_dev;
723 cq = rdma_zalloc_drv_obj(ibdev, ib_cq);
724 if (!cq)
725 return -ENOMEM;
727 ret = hns_roce_create_cq(cq, &cq_init_attr, NULL);
728 if (ret) {
729 dev_err(dev, "Create cq for reserved loop qp failed!");
730 goto alloc_cq_failed;
732 free_mr->mr_free_cq = to_hr_cq(cq);
733 free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
734 free_mr->mr_free_cq->ib_cq.uobject = NULL;
735 free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
736 free_mr->mr_free_cq->ib_cq.event_handler = NULL;
737 free_mr->mr_free_cq->ib_cq.cq_context = NULL;
738 atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
740 pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
741 if (!pd) {
742 ret = -ENOMEM;
743 goto alloc_mem_failed;
746 pd->device = ibdev;
747 ret = hns_roce_alloc_pd(pd, NULL);
748 if (ret)
749 goto alloc_pd_failed;
751 free_mr->mr_free_pd = to_hr_pd(pd);
752 free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
753 free_mr->mr_free_pd->ibpd.uobject = NULL;
754 free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
755 atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
757 attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
758 attr.pkey_index = 0;
759 attr.min_rnr_timer = 0;
760 /* Disable read ability */
761 attr.max_dest_rd_atomic = 0;
762 attr.max_rd_atomic = 0;
763 /* Use arbitrary values as rq_psn and sq_psn */
764 attr.rq_psn = 0x0808;
765 attr.sq_psn = 0x0808;
766 attr.retry_cnt = 7;
767 attr.rnr_retry = 7;
768 attr.timeout = 0x12;
769 attr.path_mtu = IB_MTU_256;
770 attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
771 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
772 rdma_ah_set_static_rate(&attr.ah_attr, 3);
774 subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
775 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
776 phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
777 (i % HNS_ROCE_MAX_PORTS);
778 sl = i / HNS_ROCE_MAX_PORTS;
780 for (j = 0; j < caps->num_ports; j++) {
781 if (hr_dev->iboe.phy_port[j] == phy_port) {
782 queue_en[i] = 1;
783 port = j;
784 break;
788 if (!queue_en[i])
789 continue;
791 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
792 if (!free_mr->mr_free_qp[i]) {
793 dev_err(dev, "Create loop qp failed!\n");
794 ret = -ENOMEM;
795 goto create_lp_qp_failed;
797 hr_qp = free_mr->mr_free_qp[i];
799 hr_qp->port = port;
800 hr_qp->phy_port = phy_port;
801 hr_qp->ibqp.qp_type = IB_QPT_RC;
802 hr_qp->ibqp.device = &hr_dev->ib_dev;
803 hr_qp->ibqp.uobject = NULL;
804 atomic_set(&hr_qp->ibqp.usecnt, 0);
805 hr_qp->ibqp.pd = pd;
806 hr_qp->ibqp.recv_cq = cq;
807 hr_qp->ibqp.send_cq = cq;
809 rdma_ah_set_port_num(&attr.ah_attr, port + 1);
810 rdma_ah_set_sl(&attr.ah_attr, sl);
811 attr.port_num = port + 1;
813 attr.dest_qp_num = hr_qp->qpn;
814 memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
815 hr_dev->dev_addr[port],
816 ETH_ALEN);
818 memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
819 memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
820 memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
821 dgid.raw[11] = 0xff;
822 dgid.raw[12] = 0xfe;
823 dgid.raw[8] ^= 2;
824 rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
826 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
827 IB_QPS_RESET, IB_QPS_INIT);
828 if (ret) {
829 dev_err(dev, "modify qp failed(%d)!\n", ret);
830 goto create_lp_qp_failed;
833 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
834 IB_QPS_INIT, IB_QPS_RTR);
835 if (ret) {
836 dev_err(dev, "modify qp failed(%d)!\n", ret);
837 goto create_lp_qp_failed;
840 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
841 IB_QPS_RTR, IB_QPS_RTS);
842 if (ret) {
843 dev_err(dev, "modify qp failed(%d)!\n", ret);
844 goto create_lp_qp_failed;
848 return 0;
850 create_lp_qp_failed:
851 for (i -= 1; i >= 0; i--) {
852 hr_qp = free_mr->mr_free_qp[i];
853 if (hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL))
854 dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
857 hns_roce_dealloc_pd(pd, NULL);
859 alloc_pd_failed:
860 kfree(pd);
862 alloc_mem_failed:
863 hns_roce_destroy_cq(cq, NULL);
864 alloc_cq_failed:
865 kfree(cq);
866 return ret;
869 static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
871 struct device *dev = &hr_dev->pdev->dev;
872 struct hns_roce_free_mr *free_mr;
873 struct hns_roce_v1_priv *priv;
874 struct hns_roce_qp *hr_qp;
875 int ret;
876 int i;
878 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
879 free_mr = &priv->free_mr;
881 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
882 hr_qp = free_mr->mr_free_qp[i];
883 if (!hr_qp)
884 continue;
886 ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp, NULL);
887 if (ret)
888 dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
889 i, ret);
892 hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
893 kfree(&free_mr->mr_free_cq->ib_cq);
894 hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
895 kfree(&free_mr->mr_free_pd->ibpd);
898 static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
900 struct device *dev = &hr_dev->pdev->dev;
901 struct hns_roce_v1_priv *priv;
902 struct hns_roce_db_table *db;
903 u32 sdb_ext_mod;
904 u32 odb_ext_mod;
905 u32 sdb_evt_mod;
906 u32 odb_evt_mod;
907 int ret = 0;
909 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
910 db = &priv->db_table;
912 memset(db, 0, sizeof(*db));
914 /* Default DB mode */
915 sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
916 odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
917 sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
918 odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
920 db->sdb_ext_mod = sdb_ext_mod;
921 db->odb_ext_mod = odb_ext_mod;
923 /* Init extend DB */
924 ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
925 if (ret) {
926 dev_err(dev, "Failed in extend DB configuration.\n");
927 return ret;
930 hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
932 return 0;
935 static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
937 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
938 struct hns_roce_dev *hr_dev;
940 lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
941 work);
942 hr_dev = to_hr_dev(lp_qp_work->ib_dev);
944 hns_roce_v1_release_lp_qp(hr_dev);
946 if (hns_roce_v1_rsv_lp_qp(hr_dev))
947 dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
949 if (lp_qp_work->comp_flag)
950 complete(lp_qp_work->comp);
952 kfree(lp_qp_work);
955 static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
957 struct device *dev = &hr_dev->pdev->dev;
958 struct hns_roce_recreate_lp_qp_work *lp_qp_work;
959 struct hns_roce_free_mr *free_mr;
960 struct hns_roce_v1_priv *priv;
961 struct completion comp;
962 long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
964 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
965 free_mr = &priv->free_mr;
967 lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
968 GFP_KERNEL);
969 if (!lp_qp_work)
970 return -ENOMEM;
972 INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
974 lp_qp_work->ib_dev = &(hr_dev->ib_dev);
975 lp_qp_work->comp = &comp;
976 lp_qp_work->comp_flag = 1;
978 init_completion(lp_qp_work->comp);
980 queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
982 while (end > 0) {
983 if (try_wait_for_completion(&comp))
984 return 0;
985 msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
986 end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE;
989 lp_qp_work->comp_flag = 0;
990 if (try_wait_for_completion(&comp))
991 return 0;
993 dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
994 return -ETIMEDOUT;
997 static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
999 struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
1000 struct device *dev = &hr_dev->pdev->dev;
1001 struct ib_send_wr send_wr;
1002 const struct ib_send_wr *bad_wr;
1003 int ret;
1005 memset(&send_wr, 0, sizeof(send_wr));
1006 send_wr.next = NULL;
1007 send_wr.num_sge = 0;
1008 send_wr.send_flags = 0;
1009 send_wr.sg_list = NULL;
1010 send_wr.wr_id = (unsigned long long)&send_wr;
1011 send_wr.opcode = IB_WR_RDMA_WRITE;
1013 ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
1014 if (ret) {
1015 dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
1016 return ret;
1019 return 0;
1022 static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
1024 struct hns_roce_mr_free_work *mr_work;
1025 struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
1026 struct hns_roce_free_mr *free_mr;
1027 struct hns_roce_cq *mr_free_cq;
1028 struct hns_roce_v1_priv *priv;
1029 struct hns_roce_dev *hr_dev;
1030 struct hns_roce_mr *hr_mr;
1031 struct hns_roce_qp *hr_qp;
1032 struct device *dev;
1033 unsigned long end =
1034 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
1035 int i;
1036 int ret;
1037 int ne = 0;
1039 mr_work = container_of(work, struct hns_roce_mr_free_work, work);
1040 hr_mr = (struct hns_roce_mr *)mr_work->mr;
1041 hr_dev = to_hr_dev(mr_work->ib_dev);
1042 dev = &hr_dev->pdev->dev;
1044 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1045 free_mr = &priv->free_mr;
1046 mr_free_cq = free_mr->mr_free_cq;
1048 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
1049 hr_qp = free_mr->mr_free_qp[i];
1050 if (!hr_qp)
1051 continue;
1052 ne++;
1054 ret = hns_roce_v1_send_lp_wqe(hr_qp);
1055 if (ret) {
1056 dev_err(dev,
1057 "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
1058 hr_qp->qpn, ret);
1059 goto free_work;
1063 if (!ne) {
1064 dev_err(dev, "Reserved loop qp is absent!\n");
1065 goto free_work;
1068 do {
1069 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
1070 if (ret < 0 && hr_qp) {
1071 dev_err(dev,
1072 "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
1073 hr_qp->qpn, ret, hr_mr->key, ne);
1074 goto free_work;
1076 ne -= ret;
1077 usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
1078 (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
1079 } while (ne && time_before_eq(jiffies, end));
1081 if (ne != 0)
1082 dev_err(dev,
1083 "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
1084 hr_mr->key, ne);
1086 free_work:
1087 if (mr_work->comp_flag)
1088 complete(mr_work->comp);
1089 kfree(mr_work);
1092 static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
1093 struct hns_roce_mr *mr, struct ib_udata *udata)
1095 struct device *dev = &hr_dev->pdev->dev;
1096 struct hns_roce_mr_free_work *mr_work;
1097 struct hns_roce_free_mr *free_mr;
1098 struct hns_roce_v1_priv *priv;
1099 struct completion comp;
1100 long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
1101 unsigned long start = jiffies;
1102 int npages;
1103 int ret = 0;
1105 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1106 free_mr = &priv->free_mr;
1108 if (mr->enabled) {
1109 if (hns_roce_hw_destroy_mpt(hr_dev, NULL,
1110 key_to_hw_index(mr->key) &
1111 (hr_dev->caps.num_mtpts - 1)))
1112 dev_warn(dev, "DESTROY_MPT failed!\n");
1115 mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
1116 if (!mr_work) {
1117 ret = -ENOMEM;
1118 goto free_mr;
1121 INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
1123 mr_work->ib_dev = &(hr_dev->ib_dev);
1124 mr_work->comp = &comp;
1125 mr_work->comp_flag = 1;
1126 mr_work->mr = (void *)mr;
1127 init_completion(mr_work->comp);
1129 queue_work(free_mr->free_mr_wq, &(mr_work->work));
1131 while (end > 0) {
1132 if (try_wait_for_completion(&comp))
1133 goto free_mr;
1134 msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
1135 end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE;
1138 mr_work->comp_flag = 0;
1139 if (try_wait_for_completion(&comp))
1140 goto free_mr;
1142 dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
1143 ret = -ETIMEDOUT;
1145 free_mr:
1146 dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
1147 mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
1149 if (mr->size != ~0ULL) {
1150 npages = ib_umem_page_count(mr->umem);
1151 dma_free_coherent(dev, npages * 8, mr->pbl_buf,
1152 mr->pbl_dma_addr);
1155 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
1156 key_to_hw_index(mr->key), 0);
1158 ib_umem_release(mr->umem);
1160 kfree(mr);
1162 return ret;
1165 static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
1167 struct device *dev = &hr_dev->pdev->dev;
1168 struct hns_roce_v1_priv *priv;
1169 struct hns_roce_db_table *db;
1171 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1172 db = &priv->db_table;
1174 if (db->sdb_ext_mod) {
1175 dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
1176 db->ext_db->sdb_buf_list->buf,
1177 db->ext_db->sdb_buf_list->map);
1178 kfree(db->ext_db->sdb_buf_list);
1181 if (db->odb_ext_mod) {
1182 dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
1183 db->ext_db->odb_buf_list->buf,
1184 db->ext_db->odb_buf_list->map);
1185 kfree(db->ext_db->odb_buf_list);
1188 kfree(db->ext_db);
1191 static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
1193 int ret;
1194 u32 val;
1195 __le32 tmp;
1196 int raq_shift = 0;
1197 dma_addr_t addr;
1198 struct hns_roce_v1_priv *priv;
1199 struct hns_roce_raq_table *raq;
1200 struct device *dev = &hr_dev->pdev->dev;
1202 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1203 raq = &priv->raq_table;
1205 raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
1206 if (!raq->e_raq_buf)
1207 return -ENOMEM;
1209 raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
1210 &addr, GFP_KERNEL);
1211 if (!raq->e_raq_buf->buf) {
1212 ret = -ENOMEM;
1213 goto err_dma_alloc_raq;
1215 raq->e_raq_buf->map = addr;
1217 /* Configure raq extended address. 48bit 4K align*/
1218 roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
1220 /* Configure raq_shift */
1221 raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
1222 val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
1223 tmp = cpu_to_le32(val);
1224 roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
1225 ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
1227 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
1228 * using 4K page, and shift more 32 because of
1229 * caculating the high 32 bit value evaluated to hardware.
1231 roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
1232 ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
1233 raq->e_raq_buf->map >> 44);
1234 val = le32_to_cpu(tmp);
1235 roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
1236 dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
1238 /* Configure raq threshold */
1239 val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
1240 tmp = cpu_to_le32(val);
1241 roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
1242 ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
1243 HNS_ROCE_V1_EXT_RAQ_WF);
1244 val = le32_to_cpu(tmp);
1245 roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
1246 dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
1248 /* Enable extend raq */
1249 val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
1250 tmp = cpu_to_le32(val);
1251 roce_set_field(tmp,
1252 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
1253 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
1254 POL_TIME_INTERVAL_VAL);
1255 roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
1256 roce_set_field(tmp,
1257 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
1258 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
1260 roce_set_bit(tmp,
1261 ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
1262 val = le32_to_cpu(tmp);
1263 roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
1264 dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
1266 /* Enable raq drop */
1267 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1268 tmp = cpu_to_le32(val);
1269 roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
1270 val = le32_to_cpu(tmp);
1271 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1272 dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
1274 return 0;
1276 err_dma_alloc_raq:
1277 kfree(raq->e_raq_buf);
1278 return ret;
1281 static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
1283 struct device *dev = &hr_dev->pdev->dev;
1284 struct hns_roce_v1_priv *priv;
1285 struct hns_roce_raq_table *raq;
1287 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1288 raq = &priv->raq_table;
1290 dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
1291 raq->e_raq_buf->map);
1292 kfree(raq->e_raq_buf);
1295 static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
1297 __le32 tmp;
1298 u32 val;
1300 if (enable_flag) {
1301 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1302 /* Open all ports */
1303 tmp = cpu_to_le32(val);
1304 roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1305 ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
1306 ALL_PORT_VAL_OPEN);
1307 val = le32_to_cpu(tmp);
1308 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1309 } else {
1310 val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
1311 /* Close all ports */
1312 tmp = cpu_to_le32(val);
1313 roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
1314 ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
1315 val = le32_to_cpu(tmp);
1316 roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
1320 static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
1322 struct device *dev = &hr_dev->pdev->dev;
1323 struct hns_roce_v1_priv *priv;
1324 int ret;
1326 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1328 priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
1329 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
1330 GFP_KERNEL);
1331 if (!priv->bt_table.qpc_buf.buf)
1332 return -ENOMEM;
1334 priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
1335 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
1336 GFP_KERNEL);
1337 if (!priv->bt_table.mtpt_buf.buf) {
1338 ret = -ENOMEM;
1339 goto err_failed_alloc_mtpt_buf;
1342 priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
1343 HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
1344 GFP_KERNEL);
1345 if (!priv->bt_table.cqc_buf.buf) {
1346 ret = -ENOMEM;
1347 goto err_failed_alloc_cqc_buf;
1350 return 0;
1352 err_failed_alloc_cqc_buf:
1353 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1354 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1356 err_failed_alloc_mtpt_buf:
1357 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1358 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1360 return ret;
1363 static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
1365 struct device *dev = &hr_dev->pdev->dev;
1366 struct hns_roce_v1_priv *priv;
1368 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1370 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1371 priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
1373 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1374 priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
1376 dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
1377 priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
1380 static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
1382 struct device *dev = &hr_dev->pdev->dev;
1383 struct hns_roce_buf_list *tptr_buf;
1384 struct hns_roce_v1_priv *priv;
1386 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1387 tptr_buf = &priv->tptr_table.tptr_buf;
1390 * This buffer will be used for CQ's tptr(tail pointer), also
1391 * named ci(customer index). Every CQ will use 2 bytes to save
1392 * cqe ci in hip06. Hardware will read this area to get new ci
1393 * when the queue is almost full.
1395 tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1396 &tptr_buf->map, GFP_KERNEL);
1397 if (!tptr_buf->buf)
1398 return -ENOMEM;
1400 hr_dev->tptr_dma_addr = tptr_buf->map;
1401 hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
1403 return 0;
1406 static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
1408 struct device *dev = &hr_dev->pdev->dev;
1409 struct hns_roce_buf_list *tptr_buf;
1410 struct hns_roce_v1_priv *priv;
1412 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1413 tptr_buf = &priv->tptr_table.tptr_buf;
1415 dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
1416 tptr_buf->buf, tptr_buf->map);
1419 static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
1421 struct device *dev = &hr_dev->pdev->dev;
1422 struct hns_roce_free_mr *free_mr;
1423 struct hns_roce_v1_priv *priv;
1424 int ret = 0;
1426 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1427 free_mr = &priv->free_mr;
1429 free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
1430 if (!free_mr->free_mr_wq) {
1431 dev_err(dev, "Create free mr workqueue failed!\n");
1432 return -ENOMEM;
1435 ret = hns_roce_v1_rsv_lp_qp(hr_dev);
1436 if (ret) {
1437 dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
1438 flush_workqueue(free_mr->free_mr_wq);
1439 destroy_workqueue(free_mr->free_mr_wq);
1442 return ret;
1445 static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
1447 struct hns_roce_free_mr *free_mr;
1448 struct hns_roce_v1_priv *priv;
1450 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
1451 free_mr = &priv->free_mr;
1453 flush_workqueue(free_mr->free_mr_wq);
1454 destroy_workqueue(free_mr->free_mr_wq);
1456 hns_roce_v1_release_lp_qp(hr_dev);
1460 * hns_roce_v1_reset - reset RoCE
1461 * @hr_dev: RoCE device struct pointer
1462 * @enable: true -- drop reset, false -- reset
1463 * return 0 - success , negative --fail
1465 static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
1467 struct device_node *dsaf_node;
1468 struct device *dev = &hr_dev->pdev->dev;
1469 struct device_node *np = dev->of_node;
1470 struct fwnode_handle *fwnode;
1471 int ret;
1473 /* check if this is DT/ACPI case */
1474 if (dev_of_node(dev)) {
1475 dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
1476 if (!dsaf_node) {
1477 dev_err(dev, "could not find dsaf-handle\n");
1478 return -EINVAL;
1480 fwnode = &dsaf_node->fwnode;
1481 } else if (is_acpi_device_node(dev->fwnode)) {
1482 struct fwnode_reference_args args;
1484 ret = acpi_node_get_property_reference(dev->fwnode,
1485 "dsaf-handle", 0, &args);
1486 if (ret) {
1487 dev_err(dev, "could not find dsaf-handle\n");
1488 return ret;
1490 fwnode = args.fwnode;
1491 } else {
1492 dev_err(dev, "cannot read data from DT or ACPI\n");
1493 return -ENXIO;
1496 ret = hns_dsaf_roce_reset(fwnode, false);
1497 if (ret)
1498 return ret;
1500 if (dereset) {
1501 msleep(SLEEP_TIME_INTERVAL);
1502 ret = hns_dsaf_roce_reset(fwnode, true);
1505 return ret;
1508 static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
1510 int i = 0;
1511 struct hns_roce_caps *caps = &hr_dev->caps;
1513 hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
1514 hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
1515 hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) |
1516 ((u64)roce_read(hr_dev,
1517 ROCEE_SYS_IMAGE_GUID_H_REG) << 32);
1518 hr_dev->hw_rev = HNS_ROCE_HW_VER1;
1520 caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
1521 caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
1522 caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
1523 caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
1524 caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
1525 caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
1526 caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
1527 caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
1528 caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
1529 caps->num_uars = HNS_ROCE_V1_UAR_NUM;
1530 caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
1531 caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
1532 caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
1533 caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
1534 caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
1535 caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
1536 caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
1537 caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
1538 caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
1539 caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
1540 caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
1541 caps->qpc_entry_sz = HNS_ROCE_V1_QPC_ENTRY_SIZE;
1542 caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
1543 caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
1544 caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
1545 caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
1546 caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE;
1547 caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
1548 caps->reserved_lkey = 0;
1549 caps->reserved_pds = 0;
1550 caps->reserved_mrws = 1;
1551 caps->reserved_uars = 0;
1552 caps->reserved_cqs = 0;
1553 caps->reserved_qps = 12; /* 2 SQP per port, six ports total 12 */
1554 caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
1556 for (i = 0; i < caps->num_ports; i++)
1557 caps->pkey_table_len[i] = 1;
1559 for (i = 0; i < caps->num_ports; i++) {
1560 /* Six ports shared 16 GID in v1 engine */
1561 if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
1562 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1563 caps->num_ports;
1564 else
1565 caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
1566 caps->num_ports + 1;
1569 caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
1570 caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
1571 caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG);
1572 caps->max_mtu = IB_MTU_2048;
1574 return 0;
1577 static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
1579 int ret;
1580 u32 val;
1581 __le32 tmp;
1582 struct device *dev = &hr_dev->pdev->dev;
1584 /* DMAE user config */
1585 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
1586 tmp = cpu_to_le32(val);
1587 roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
1588 ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
1589 roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
1590 ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
1591 1 << PAGES_SHIFT_16);
1592 val = le32_to_cpu(tmp);
1593 roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
1595 val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
1596 tmp = cpu_to_le32(val);
1597 roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
1598 ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
1599 roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
1600 ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
1601 1 << PAGES_SHIFT_16);
1603 ret = hns_roce_db_init(hr_dev);
1604 if (ret) {
1605 dev_err(dev, "doorbell init failed!\n");
1606 return ret;
1609 ret = hns_roce_raq_init(hr_dev);
1610 if (ret) {
1611 dev_err(dev, "raq init failed!\n");
1612 goto error_failed_raq_init;
1615 ret = hns_roce_bt_init(hr_dev);
1616 if (ret) {
1617 dev_err(dev, "bt init failed!\n");
1618 goto error_failed_bt_init;
1621 ret = hns_roce_tptr_init(hr_dev);
1622 if (ret) {
1623 dev_err(dev, "tptr init failed!\n");
1624 goto error_failed_tptr_init;
1627 ret = hns_roce_free_mr_init(hr_dev);
1628 if (ret) {
1629 dev_err(dev, "free mr init failed!\n");
1630 goto error_failed_free_mr_init;
1633 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
1635 return 0;
1637 error_failed_free_mr_init:
1638 hns_roce_tptr_free(hr_dev);
1640 error_failed_tptr_init:
1641 hns_roce_bt_free(hr_dev);
1643 error_failed_bt_init:
1644 hns_roce_raq_free(hr_dev);
1646 error_failed_raq_init:
1647 hns_roce_db_free(hr_dev);
1648 return ret;
1651 static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
1653 hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
1654 hns_roce_free_mr_free(hr_dev);
1655 hns_roce_tptr_free(hr_dev);
1656 hns_roce_bt_free(hr_dev);
1657 hns_roce_raq_free(hr_dev);
1658 hns_roce_db_free(hr_dev);
1661 static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
1663 u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
1665 return (!!(status & (1 << HCR_GO_BIT)));
1668 static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
1669 u64 out_param, u32 in_modifier, u8 op_modifier,
1670 u16 op, u16 token, int event)
1672 u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
1673 unsigned long end;
1674 u32 val = 0;
1675 __le32 tmp;
1677 end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
1678 while (hns_roce_v1_cmd_pending(hr_dev)) {
1679 if (time_after(jiffies, end)) {
1680 dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
1681 (int)jiffies, (int)end);
1682 return -EAGAIN;
1684 cond_resched();
1687 tmp = cpu_to_le32(val);
1688 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
1689 op);
1690 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
1691 ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
1692 roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
1693 roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
1694 roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M,
1695 ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
1697 val = le32_to_cpu(tmp);
1698 writeq(in_param, hcr + 0);
1699 writeq(out_param, hcr + 2);
1700 writel(in_modifier, hcr + 4);
1701 /* Memory barrier */
1702 wmb();
1704 writel(val, hcr + 5);
1706 return 0;
1709 static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
1710 unsigned long timeout)
1712 u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
1713 unsigned long end = 0;
1714 u32 status = 0;
1716 end = msecs_to_jiffies(timeout) + jiffies;
1717 while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
1718 cond_resched();
1720 if (hns_roce_v1_cmd_pending(hr_dev)) {
1721 dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
1722 return -ETIMEDOUT;
1725 status = le32_to_cpu((__force __le32)
1726 __raw_readl(hcr + HCR_STATUS_OFFSET));
1727 if ((status & STATUS_MASK) != 0x1) {
1728 dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
1729 return -EBUSY;
1732 return 0;
1735 static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u8 port,
1736 int gid_index, const union ib_gid *gid,
1737 const struct ib_gid_attr *attr)
1739 unsigned long flags;
1740 u32 *p = NULL;
1741 u8 gid_idx = 0;
1743 gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
1745 spin_lock_irqsave(&hr_dev->iboe.lock, flags);
1747 p = (u32 *)&gid->raw[0];
1748 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
1749 (HNS_ROCE_V1_GID_NUM * gid_idx));
1751 p = (u32 *)&gid->raw[4];
1752 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
1753 (HNS_ROCE_V1_GID_NUM * gid_idx));
1755 p = (u32 *)&gid->raw[8];
1756 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
1757 (HNS_ROCE_V1_GID_NUM * gid_idx));
1759 p = (u32 *)&gid->raw[0xc];
1760 roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
1761 (HNS_ROCE_V1_GID_NUM * gid_idx));
1763 spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
1765 return 0;
1768 static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
1769 u8 *addr)
1771 u32 reg_smac_l;
1772 u16 reg_smac_h;
1773 __le32 tmp;
1774 u16 *p_h;
1775 u32 *p;
1776 u32 val;
1779 * When mac changed, loopback may fail
1780 * because of smac not equal to dmac.
1781 * We Need to release and create reserved qp again.
1783 if (hr_dev->hw->dereg_mr) {
1784 int ret;
1786 ret = hns_roce_v1_recreate_lp_qp(hr_dev);
1787 if (ret && ret != -ETIMEDOUT)
1788 return ret;
1791 p = (u32 *)(&addr[0]);
1792 reg_smac_l = *p;
1793 roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
1794 PHY_PORT_OFFSET * phy_port);
1796 val = roce_read(hr_dev,
1797 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1798 tmp = cpu_to_le32(val);
1799 p_h = (u16 *)(&addr[4]);
1800 reg_smac_h = *p_h;
1801 roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
1802 ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
1803 val = le32_to_cpu(tmp);
1804 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1805 val);
1807 return 0;
1810 static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
1811 enum ib_mtu mtu)
1813 __le32 tmp;
1814 u32 val;
1816 val = roce_read(hr_dev,
1817 ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
1818 tmp = cpu_to_le32(val);
1819 roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
1820 ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
1821 val = le32_to_cpu(tmp);
1822 roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
1823 val);
1826 static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
1827 unsigned long mtpt_idx)
1829 struct hns_roce_v1_mpt_entry *mpt_entry;
1830 struct sg_dma_page_iter sg_iter;
1831 u64 *pages;
1832 int i;
1834 /* MPT filled into mailbox buf */
1835 mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
1836 memset(mpt_entry, 0, sizeof(*mpt_entry));
1838 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
1839 MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
1840 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
1841 MPT_BYTE_4_KEY_S, mr->key);
1842 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
1843 MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
1844 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
1845 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
1846 (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
1847 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
1848 roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
1849 MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
1850 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
1851 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
1852 (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
1853 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
1854 (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
1855 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
1856 (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
1857 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
1859 roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
1861 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1862 MPT_BYTE_12_PBL_ADDR_H_S, 0);
1863 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
1864 MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
1866 mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
1867 mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
1868 mpt_entry->length = cpu_to_le32((u32)mr->size);
1870 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
1871 MPT_BYTE_28_PD_S, mr->pd);
1872 roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
1873 MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
1874 roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
1875 MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
1877 /* DMA memory register */
1878 if (mr->type == MR_TYPE_DMA)
1879 return 0;
1881 pages = (u64 *) __get_free_page(GFP_KERNEL);
1882 if (!pages)
1883 return -ENOMEM;
1885 i = 0;
1886 for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
1887 pages[i] = ((u64)sg_page_iter_dma_address(&sg_iter)) >> 12;
1889 /* Directly record to MTPT table firstly 7 entry */
1890 if (i >= HNS_ROCE_MAX_INNER_MTPT_NUM)
1891 break;
1892 i++;
1895 /* Register user mr */
1896 for (i = 0; i < HNS_ROCE_MAX_INNER_MTPT_NUM; i++) {
1897 switch (i) {
1898 case 0:
1899 mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
1900 roce_set_field(mpt_entry->mpt_byte_36,
1901 MPT_BYTE_36_PA0_H_M,
1902 MPT_BYTE_36_PA0_H_S,
1903 (u32)(pages[i] >> PAGES_SHIFT_32));
1904 break;
1905 case 1:
1906 roce_set_field(mpt_entry->mpt_byte_36,
1907 MPT_BYTE_36_PA1_L_M,
1908 MPT_BYTE_36_PA1_L_S, (u32)(pages[i]));
1909 roce_set_field(mpt_entry->mpt_byte_40,
1910 MPT_BYTE_40_PA1_H_M,
1911 MPT_BYTE_40_PA1_H_S,
1912 (u32)(pages[i] >> PAGES_SHIFT_24));
1913 break;
1914 case 2:
1915 roce_set_field(mpt_entry->mpt_byte_40,
1916 MPT_BYTE_40_PA2_L_M,
1917 MPT_BYTE_40_PA2_L_S, (u32)(pages[i]));
1918 roce_set_field(mpt_entry->mpt_byte_44,
1919 MPT_BYTE_44_PA2_H_M,
1920 MPT_BYTE_44_PA2_H_S,
1921 (u32)(pages[i] >> PAGES_SHIFT_16));
1922 break;
1923 case 3:
1924 roce_set_field(mpt_entry->mpt_byte_44,
1925 MPT_BYTE_44_PA3_L_M,
1926 MPT_BYTE_44_PA3_L_S, (u32)(pages[i]));
1927 roce_set_field(mpt_entry->mpt_byte_48,
1928 MPT_BYTE_48_PA3_H_M,
1929 MPT_BYTE_48_PA3_H_S,
1930 (u32)(pages[i] >> PAGES_SHIFT_8));
1931 break;
1932 case 4:
1933 mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
1934 roce_set_field(mpt_entry->mpt_byte_56,
1935 MPT_BYTE_56_PA4_H_M,
1936 MPT_BYTE_56_PA4_H_S,
1937 (u32)(pages[i] >> PAGES_SHIFT_32));
1938 break;
1939 case 5:
1940 roce_set_field(mpt_entry->mpt_byte_56,
1941 MPT_BYTE_56_PA5_L_M,
1942 MPT_BYTE_56_PA5_L_S, (u32)(pages[i]));
1943 roce_set_field(mpt_entry->mpt_byte_60,
1944 MPT_BYTE_60_PA5_H_M,
1945 MPT_BYTE_60_PA5_H_S,
1946 (u32)(pages[i] >> PAGES_SHIFT_24));
1947 break;
1948 case 6:
1949 roce_set_field(mpt_entry->mpt_byte_60,
1950 MPT_BYTE_60_PA6_L_M,
1951 MPT_BYTE_60_PA6_L_S, (u32)(pages[i]));
1952 roce_set_field(mpt_entry->mpt_byte_64,
1953 MPT_BYTE_64_PA6_H_M,
1954 MPT_BYTE_64_PA6_H_S,
1955 (u32)(pages[i] >> PAGES_SHIFT_16));
1956 break;
1957 default:
1958 break;
1962 free_page((unsigned long) pages);
1964 mpt_entry->pbl_addr_l = cpu_to_le32((u32)(mr->pbl_dma_addr));
1966 roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
1967 MPT_BYTE_12_PBL_ADDR_H_S,
1968 ((u32)(mr->pbl_dma_addr >> 32)));
1970 return 0;
1973 static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
1975 return hns_roce_buf_offset(&hr_cq->buf, n * HNS_ROCE_V1_CQE_ENTRY_SIZE);
1978 static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
1980 struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
1982 /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
1983 return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
1984 !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL;
1987 static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
1989 return get_sw_cqe(hr_cq, hr_cq->cons_index);
1992 static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
1994 __le32 doorbell[2];
1996 doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1));
1997 doorbell[1] = 0;
1998 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
1999 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2000 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2001 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2002 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
2003 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2004 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
2006 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2009 static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2010 struct hns_roce_srq *srq)
2012 struct hns_roce_cqe *cqe, *dest;
2013 u32 prod_index;
2014 int nfreed = 0;
2015 u8 owner_bit;
2017 for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
2018 ++prod_index) {
2019 if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
2020 break;
2024 * Now backwards through the CQ, removing CQ entries
2025 * that match our QP by overwriting them with next entries.
2027 while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
2028 cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
2029 if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2030 CQE_BYTE_16_LOCAL_QPN_S) &
2031 HNS_ROCE_CQE_QPN_MASK) == qpn) {
2032 /* In v1 engine, not support SRQ */
2033 ++nfreed;
2034 } else if (nfreed) {
2035 dest = get_cqe(hr_cq, (prod_index + nfreed) &
2036 hr_cq->ib_cq.cqe);
2037 owner_bit = roce_get_bit(dest->cqe_byte_4,
2038 CQE_BYTE_4_OWNER_S);
2039 memcpy(dest, cqe, sizeof(*cqe));
2040 roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
2041 owner_bit);
2045 if (nfreed) {
2046 hr_cq->cons_index += nfreed;
2048 * Make sure update of buffer contents is done before
2049 * updating consumer index.
2051 wmb();
2053 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2057 static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
2058 struct hns_roce_srq *srq)
2060 spin_lock_irq(&hr_cq->lock);
2061 __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
2062 spin_unlock_irq(&hr_cq->lock);
2065 static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
2066 struct hns_roce_cq *hr_cq, void *mb_buf,
2067 u64 *mtts, dma_addr_t dma_handle)
2069 struct hns_roce_cq_context *cq_context = NULL;
2070 struct hns_roce_buf_list *tptr_buf;
2071 struct hns_roce_v1_priv *priv;
2072 dma_addr_t tptr_dma_addr;
2073 int offset;
2075 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
2076 tptr_buf = &priv->tptr_table.tptr_buf;
2078 cq_context = mb_buf;
2079 memset(cq_context, 0, sizeof(*cq_context));
2081 /* Get the tptr for this CQ. */
2082 offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
2083 tptr_dma_addr = tptr_buf->map + offset;
2084 hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
2086 /* Register cq_context members */
2087 roce_set_field(cq_context->cqc_byte_4,
2088 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
2089 CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
2090 roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
2091 CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
2093 cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle);
2095 roce_set_field(cq_context->cqc_byte_12,
2096 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
2097 CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
2098 ((u64)dma_handle >> 32));
2099 roce_set_field(cq_context->cqc_byte_12,
2100 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
2101 CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
2102 ilog2(hr_cq->cq_depth));
2103 roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
2104 CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector);
2106 cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
2108 roce_set_field(cq_context->cqc_byte_20,
2109 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
2110 CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32);
2111 /* Dedicated hardware, directly set 0 */
2112 roce_set_field(cq_context->cqc_byte_20,
2113 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
2114 CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
2116 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
2117 * using 4K page, and shift more 32 because of
2118 * caculating the high 32 bit value evaluated to hardware.
2120 roce_set_field(cq_context->cqc_byte_20,
2121 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
2122 CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
2123 tptr_dma_addr >> 44);
2125 cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12));
2127 roce_set_field(cq_context->cqc_byte_32,
2128 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
2129 CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
2130 roce_set_bit(cq_context->cqc_byte_32,
2131 CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
2132 roce_set_bit(cq_context->cqc_byte_32,
2133 CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
2134 roce_set_bit(cq_context->cqc_byte_32,
2135 CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
2136 roce_set_bit(cq_context->cqc_byte_32,
2137 CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
2139 /* The initial value of cq's ci is 0 */
2140 roce_set_field(cq_context->cqc_byte_32,
2141 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
2142 CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
2145 static int hns_roce_v1_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2147 return -EOPNOTSUPP;
2150 static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
2151 enum ib_cq_notify_flags flags)
2153 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2154 u32 notification_flag;
2155 __le32 doorbell[2] = {};
2157 notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
2158 IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
2160 * flags = 0; Notification Flag = 1, next
2161 * flags = 1; Notification Flag = 0, solocited
2163 doorbell[0] =
2164 cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
2165 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
2166 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
2167 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
2168 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
2169 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
2170 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
2171 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
2172 hr_cq->cqn | notification_flag);
2174 hns_roce_write64_k(doorbell, hr_cq->cq_db_l);
2176 return 0;
2179 static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2180 struct hns_roce_qp **cur_qp, struct ib_wc *wc)
2182 int qpn;
2183 int is_send;
2184 u16 wqe_ctr;
2185 u32 status;
2186 u32 opcode;
2187 struct hns_roce_cqe *cqe;
2188 struct hns_roce_qp *hr_qp;
2189 struct hns_roce_wq *wq;
2190 struct hns_roce_wqe_ctrl_seg *sq_wqe;
2191 struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
2192 struct device *dev = &hr_dev->pdev->dev;
2194 /* Find cqe according consumer index */
2195 cqe = next_cqe_sw(hr_cq);
2196 if (!cqe)
2197 return -EAGAIN;
2199 ++hr_cq->cons_index;
2200 /* Memory barrier */
2201 rmb();
2202 /* 0->SQ, 1->RQ */
2203 is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
2205 /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
2206 if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2207 CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
2208 qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
2209 CQE_BYTE_20_PORT_NUM_S) +
2210 roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2211 CQE_BYTE_16_LOCAL_QPN_S) *
2212 HNS_ROCE_MAX_PORTS;
2213 } else {
2214 qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
2215 CQE_BYTE_16_LOCAL_QPN_S);
2218 if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
2219 hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
2220 if (unlikely(!hr_qp)) {
2221 dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
2222 hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
2223 return -EINVAL;
2226 *cur_qp = hr_qp;
2229 wc->qp = &(*cur_qp)->ibqp;
2230 wc->vendor_err = 0;
2232 status = roce_get_field(cqe->cqe_byte_4,
2233 CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
2234 CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
2235 HNS_ROCE_CQE_STATUS_MASK;
2236 switch (status) {
2237 case HNS_ROCE_CQE_SUCCESS:
2238 wc->status = IB_WC_SUCCESS;
2239 break;
2240 case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
2241 wc->status = IB_WC_LOC_LEN_ERR;
2242 break;
2243 case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
2244 wc->status = IB_WC_LOC_QP_OP_ERR;
2245 break;
2246 case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
2247 wc->status = IB_WC_LOC_PROT_ERR;
2248 break;
2249 case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
2250 wc->status = IB_WC_WR_FLUSH_ERR;
2251 break;
2252 case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
2253 wc->status = IB_WC_MW_BIND_ERR;
2254 break;
2255 case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
2256 wc->status = IB_WC_BAD_RESP_ERR;
2257 break;
2258 case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
2259 wc->status = IB_WC_LOC_ACCESS_ERR;
2260 break;
2261 case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
2262 wc->status = IB_WC_REM_INV_REQ_ERR;
2263 break;
2264 case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
2265 wc->status = IB_WC_REM_ACCESS_ERR;
2266 break;
2267 case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
2268 wc->status = IB_WC_REM_OP_ERR;
2269 break;
2270 case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
2271 wc->status = IB_WC_RETRY_EXC_ERR;
2272 break;
2273 case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
2274 wc->status = IB_WC_RNR_RETRY_EXC_ERR;
2275 break;
2276 default:
2277 wc->status = IB_WC_GENERAL_ERR;
2278 break;
2281 /* CQE status error, directly return */
2282 if (wc->status != IB_WC_SUCCESS)
2283 return 0;
2285 if (is_send) {
2286 /* SQ conrespond to CQE */
2287 sq_wqe = hns_roce_get_send_wqe(*cur_qp,
2288 roce_get_field(cqe->cqe_byte_4,
2289 CQE_BYTE_4_WQE_INDEX_M,
2290 CQE_BYTE_4_WQE_INDEX_S) &
2291 ((*cur_qp)->sq.wqe_cnt-1));
2292 switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
2293 case HNS_ROCE_WQE_OPCODE_SEND:
2294 wc->opcode = IB_WC_SEND;
2295 break;
2296 case HNS_ROCE_WQE_OPCODE_RDMA_READ:
2297 wc->opcode = IB_WC_RDMA_READ;
2298 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2299 break;
2300 case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
2301 wc->opcode = IB_WC_RDMA_WRITE;
2302 break;
2303 case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
2304 wc->opcode = IB_WC_LOCAL_INV;
2305 break;
2306 case HNS_ROCE_WQE_OPCODE_UD_SEND:
2307 wc->opcode = IB_WC_SEND;
2308 break;
2309 default:
2310 wc->status = IB_WC_GENERAL_ERR;
2311 break;
2313 wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
2314 IB_WC_WITH_IMM : 0);
2316 wq = &(*cur_qp)->sq;
2317 if ((*cur_qp)->sq_signal_bits) {
2319 * If sg_signal_bit is 1,
2320 * firstly tail pointer updated to wqe
2321 * which current cqe correspond to
2323 wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
2324 CQE_BYTE_4_WQE_INDEX_M,
2325 CQE_BYTE_4_WQE_INDEX_S);
2326 wq->tail += (wqe_ctr - (u16)wq->tail) &
2327 (wq->wqe_cnt - 1);
2329 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2330 ++wq->tail;
2331 } else {
2332 /* RQ conrespond to CQE */
2333 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2334 opcode = roce_get_field(cqe->cqe_byte_4,
2335 CQE_BYTE_4_OPERATION_TYPE_M,
2336 CQE_BYTE_4_OPERATION_TYPE_S) &
2337 HNS_ROCE_CQE_OPCODE_MASK;
2338 switch (opcode) {
2339 case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
2340 wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2341 wc->wc_flags = IB_WC_WITH_IMM;
2342 wc->ex.imm_data =
2343 cpu_to_be32(le32_to_cpu(cqe->immediate_data));
2344 break;
2345 case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
2346 if (roce_get_bit(cqe->cqe_byte_4,
2347 CQE_BYTE_4_IMM_INDICATOR_S)) {
2348 wc->opcode = IB_WC_RECV;
2349 wc->wc_flags = IB_WC_WITH_IMM;
2350 wc->ex.imm_data = cpu_to_be32(
2351 le32_to_cpu(cqe->immediate_data));
2352 } else {
2353 wc->opcode = IB_WC_RECV;
2354 wc->wc_flags = 0;
2356 break;
2357 default:
2358 wc->status = IB_WC_GENERAL_ERR;
2359 break;
2362 /* Update tail pointer, record wr_id */
2363 wq = &(*cur_qp)->rq;
2364 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2365 ++wq->tail;
2366 wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
2367 CQE_BYTE_20_SL_S);
2368 wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
2369 CQE_BYTE_20_REMOTE_QPN_M,
2370 CQE_BYTE_20_REMOTE_QPN_S);
2371 wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
2372 CQE_BYTE_20_GRH_PRESENT_S) ?
2373 IB_WC_GRH : 0);
2374 wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
2375 CQE_BYTE_28_P_KEY_IDX_M,
2376 CQE_BYTE_28_P_KEY_IDX_S);
2379 return 0;
2382 int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2384 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
2385 struct hns_roce_qp *cur_qp = NULL;
2386 unsigned long flags;
2387 int npolled;
2388 int ret = 0;
2390 spin_lock_irqsave(&hr_cq->lock, flags);
2392 for (npolled = 0; npolled < num_entries; ++npolled) {
2393 ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
2394 if (ret)
2395 break;
2398 if (npolled) {
2399 *hr_cq->tptr_addr = hr_cq->cons_index &
2400 ((hr_cq->cq_depth << 1) - 1);
2402 /* Memroy barrier */
2403 wmb();
2404 hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
2407 spin_unlock_irqrestore(&hr_cq->lock, flags);
2409 if (ret == 0 || ret == -EAGAIN)
2410 return npolled;
2411 else
2412 return ret;
2415 static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
2416 struct hns_roce_hem_table *table, int obj,
2417 int step_idx)
2419 struct device *dev = &hr_dev->pdev->dev;
2420 struct hns_roce_v1_priv *priv;
2421 unsigned long flags = 0;
2422 long end = HW_SYNC_TIMEOUT_MSECS;
2423 __le32 bt_cmd_val[2] = {0};
2424 void __iomem *bt_cmd;
2425 u64 bt_ba = 0;
2427 priv = (struct hns_roce_v1_priv *)hr_dev->priv;
2429 switch (table->type) {
2430 case HEM_TYPE_QPC:
2431 bt_ba = priv->bt_table.qpc_buf.map >> 12;
2432 break;
2433 case HEM_TYPE_MTPT:
2434 bt_ba = priv->bt_table.mtpt_buf.map >> 12;
2435 break;
2436 case HEM_TYPE_CQC:
2437 bt_ba = priv->bt_table.cqc_buf.map >> 12;
2438 break;
2439 case HEM_TYPE_SRQC:
2440 dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
2441 return -EINVAL;
2442 default:
2443 return 0;
2445 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
2446 ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
2447 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
2448 ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
2449 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
2450 roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
2452 spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
2454 bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
2456 while (1) {
2457 if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
2458 if (!end) {
2459 dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
2460 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
2461 flags);
2462 return -EBUSY;
2464 } else {
2465 break;
2467 mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
2468 end -= HW_SYNC_SLEEP_TIME_INTERVAL;
2471 bt_cmd_val[0] = cpu_to_le32(bt_ba);
2472 roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
2473 ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
2474 hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
2476 spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
2478 return 0;
2481 static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
2482 struct hns_roce_mtt *mtt,
2483 enum hns_roce_qp_state cur_state,
2484 enum hns_roce_qp_state new_state,
2485 struct hns_roce_qp_context *context,
2486 struct hns_roce_qp *hr_qp)
2488 static const u16
2489 op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
2490 [HNS_ROCE_QP_STATE_RST] = {
2491 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2492 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2493 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2495 [HNS_ROCE_QP_STATE_INIT] = {
2496 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2497 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2498 /* Note: In v1 engine, HW doesn't support RST2INIT.
2499 * We use RST2INIT cmd instead of INIT2INIT.
2501 [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
2502 [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
2504 [HNS_ROCE_QP_STATE_RTR] = {
2505 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2506 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2507 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
2509 [HNS_ROCE_QP_STATE_RTS] = {
2510 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2511 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2512 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
2513 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
2515 [HNS_ROCE_QP_STATE_SQD] = {
2516 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2517 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2518 [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
2519 [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
2521 [HNS_ROCE_QP_STATE_ERR] = {
2522 [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
2523 [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
2527 struct hns_roce_cmd_mailbox *mailbox;
2528 struct device *dev = &hr_dev->pdev->dev;
2529 int ret = 0;
2531 if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
2532 new_state >= HNS_ROCE_QP_NUM_STATE ||
2533 !op[cur_state][new_state]) {
2534 dev_err(dev, "[modify_qp]not support state %d to %d\n",
2535 cur_state, new_state);
2536 return -EINVAL;
2539 if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
2540 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2541 HNS_ROCE_CMD_2RST_QP,
2542 HNS_ROCE_CMD_TIMEOUT_MSECS);
2544 if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
2545 return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
2546 HNS_ROCE_CMD_2ERR_QP,
2547 HNS_ROCE_CMD_TIMEOUT_MSECS);
2549 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
2550 if (IS_ERR(mailbox))
2551 return PTR_ERR(mailbox);
2553 memcpy(mailbox->buf, context, sizeof(*context));
2555 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
2556 op[cur_state][new_state],
2557 HNS_ROCE_CMD_TIMEOUT_MSECS);
2559 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
2560 return ret;
2563 static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2564 int attr_mask, enum ib_qp_state cur_state,
2565 enum ib_qp_state new_state)
2567 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2568 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2569 struct hns_roce_sqp_context *context;
2570 struct device *dev = &hr_dev->pdev->dev;
2571 dma_addr_t dma_handle = 0;
2572 u32 __iomem *addr;
2573 int rq_pa_start;
2574 __le32 tmp;
2575 u32 reg_val;
2576 u64 *mtts;
2578 context = kzalloc(sizeof(*context), GFP_KERNEL);
2579 if (!context)
2580 return -ENOMEM;
2582 /* Search QP buf's MTTs */
2583 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2584 hr_qp->mtt.first_seg, &dma_handle);
2585 if (!mtts) {
2586 dev_err(dev, "qp buf pa find failed\n");
2587 goto out;
2590 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2591 roce_set_field(context->qp1c_bytes_4,
2592 QP1C_BYTES_4_SQ_WQE_SHIFT_M,
2593 QP1C_BYTES_4_SQ_WQE_SHIFT_S,
2594 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2595 roce_set_field(context->qp1c_bytes_4,
2596 QP1C_BYTES_4_RQ_WQE_SHIFT_M,
2597 QP1C_BYTES_4_RQ_WQE_SHIFT_S,
2598 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2599 roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
2600 QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
2602 context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
2603 roce_set_field(context->qp1c_bytes_12,
2604 QP1C_BYTES_12_SQ_RQ_BT_H_M,
2605 QP1C_BYTES_12_SQ_RQ_BT_H_S,
2606 ((u32)(dma_handle >> 32)));
2608 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
2609 QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
2610 roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
2611 QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
2612 roce_set_bit(context->qp1c_bytes_16,
2613 QP1C_BYTES_16_SIGNALING_TYPE_S,
2614 hr_qp->sq_signal_bits);
2615 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
2617 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
2619 roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
2622 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
2623 QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
2624 roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
2625 QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
2627 rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
2628 context->cur_rq_wqe_ba_l =
2629 cpu_to_le32((u32)(mtts[rq_pa_start]));
2631 roce_set_field(context->qp1c_bytes_28,
2632 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
2633 QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
2634 (mtts[rq_pa_start]) >> 32);
2635 roce_set_field(context->qp1c_bytes_28,
2636 QP1C_BYTES_28_RQ_CUR_IDX_M,
2637 QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
2639 roce_set_field(context->qp1c_bytes_32,
2640 QP1C_BYTES_32_RX_CQ_NUM_M,
2641 QP1C_BYTES_32_RX_CQ_NUM_S,
2642 to_hr_cq(ibqp->recv_cq)->cqn);
2643 roce_set_field(context->qp1c_bytes_32,
2644 QP1C_BYTES_32_TX_CQ_NUM_M,
2645 QP1C_BYTES_32_TX_CQ_NUM_S,
2646 to_hr_cq(ibqp->send_cq)->cqn);
2648 context->cur_sq_wqe_ba_l = cpu_to_le32((u32)mtts[0]);
2650 roce_set_field(context->qp1c_bytes_40,
2651 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
2652 QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
2653 (mtts[0]) >> 32);
2654 roce_set_field(context->qp1c_bytes_40,
2655 QP1C_BYTES_40_SQ_CUR_IDX_M,
2656 QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
2658 /* Copy context to QP1C register */
2659 addr = (u32 __iomem *)(hr_dev->reg_base +
2660 ROCEE_QP1C_CFG0_0_REG +
2661 hr_qp->phy_port * sizeof(*context));
2663 writel(le32_to_cpu(context->qp1c_bytes_4), addr);
2664 writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1);
2665 writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2);
2666 writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3);
2667 writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4);
2668 writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5);
2669 writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6);
2670 writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7);
2671 writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8);
2672 writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9);
2675 /* Modify QP1C status */
2676 reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2677 hr_qp->phy_port * sizeof(*context));
2678 tmp = cpu_to_le32(reg_val);
2679 roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
2680 ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
2681 reg_val = le32_to_cpu(tmp);
2682 roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
2683 hr_qp->phy_port * sizeof(*context), reg_val);
2685 hr_qp->state = new_state;
2686 if (new_state == IB_QPS_RESET) {
2687 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
2688 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
2689 if (ibqp->send_cq != ibqp->recv_cq)
2690 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
2691 hr_qp->qpn, NULL);
2693 hr_qp->rq.head = 0;
2694 hr_qp->rq.tail = 0;
2695 hr_qp->sq.head = 0;
2696 hr_qp->sq.tail = 0;
2699 kfree(context);
2700 return 0;
2702 out:
2703 kfree(context);
2704 return -EINVAL;
2707 static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
2708 int attr_mask, enum ib_qp_state cur_state,
2709 enum ib_qp_state new_state)
2711 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
2712 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
2713 struct device *dev = &hr_dev->pdev->dev;
2714 struct hns_roce_qp_context *context;
2715 const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
2716 dma_addr_t dma_handle_2 = 0;
2717 dma_addr_t dma_handle = 0;
2718 __le32 doorbell[2] = {0};
2719 int rq_pa_start = 0;
2720 u64 *mtts_2 = NULL;
2721 int ret = -EINVAL;
2722 u64 *mtts = NULL;
2723 int port;
2724 u8 port_num;
2725 u8 *dmac;
2726 u8 *smac;
2728 context = kzalloc(sizeof(*context), GFP_KERNEL);
2729 if (!context)
2730 return -ENOMEM;
2732 /* Search qp buf's mtts */
2733 mtts = hns_roce_table_find(hr_dev, &hr_dev->mr_table.mtt_table,
2734 hr_qp->mtt.first_seg, &dma_handle);
2735 if (mtts == NULL) {
2736 dev_err(dev, "qp buf pa find failed\n");
2737 goto out;
2740 /* Search IRRL's mtts */
2741 mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
2742 hr_qp->qpn, &dma_handle_2);
2743 if (mtts_2 == NULL) {
2744 dev_err(dev, "qp irrl_table find failed\n");
2745 goto out;
2749 * Reset to init
2750 * Mandatory param:
2751 * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
2752 * Optional param: NA
2754 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
2755 roce_set_field(context->qpc_bytes_4,
2756 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2757 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2758 to_hr_qp_type(hr_qp->ibqp.qp_type));
2760 roce_set_bit(context->qpc_bytes_4,
2761 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2762 roce_set_bit(context->qpc_bytes_4,
2763 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2764 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
2765 roce_set_bit(context->qpc_bytes_4,
2766 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2767 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
2769 roce_set_bit(context->qpc_bytes_4,
2770 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
2771 !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
2773 roce_set_bit(context->qpc_bytes_4,
2774 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2775 roce_set_field(context->qpc_bytes_4,
2776 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2777 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2778 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2779 roce_set_field(context->qpc_bytes_4,
2780 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2781 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2782 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2783 roce_set_field(context->qpc_bytes_4,
2784 QP_CONTEXT_QPC_BYTES_4_PD_M,
2785 QP_CONTEXT_QPC_BYTES_4_PD_S,
2786 to_hr_pd(ibqp->pd)->pdn);
2787 hr_qp->access_flags = attr->qp_access_flags;
2788 roce_set_field(context->qpc_bytes_8,
2789 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2790 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2791 to_hr_cq(ibqp->send_cq)->cqn);
2792 roce_set_field(context->qpc_bytes_8,
2793 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2794 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2795 to_hr_cq(ibqp->recv_cq)->cqn);
2797 if (ibqp->srq)
2798 roce_set_field(context->qpc_bytes_12,
2799 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2800 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2801 to_hr_srq(ibqp->srq)->srqn);
2803 roce_set_field(context->qpc_bytes_12,
2804 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2805 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2806 attr->pkey_index);
2807 hr_qp->pkey_index = attr->pkey_index;
2808 roce_set_field(context->qpc_bytes_16,
2809 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2810 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2812 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
2813 roce_set_field(context->qpc_bytes_4,
2814 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
2815 QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
2816 to_hr_qp_type(hr_qp->ibqp.qp_type));
2817 roce_set_bit(context->qpc_bytes_4,
2818 QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
2819 if (attr_mask & IB_QP_ACCESS_FLAGS) {
2820 roce_set_bit(context->qpc_bytes_4,
2821 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2822 !!(attr->qp_access_flags &
2823 IB_ACCESS_REMOTE_READ));
2824 roce_set_bit(context->qpc_bytes_4,
2825 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2826 !!(attr->qp_access_flags &
2827 IB_ACCESS_REMOTE_WRITE));
2828 } else {
2829 roce_set_bit(context->qpc_bytes_4,
2830 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
2831 !!(hr_qp->access_flags &
2832 IB_ACCESS_REMOTE_READ));
2833 roce_set_bit(context->qpc_bytes_4,
2834 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
2835 !!(hr_qp->access_flags &
2836 IB_ACCESS_REMOTE_WRITE));
2839 roce_set_bit(context->qpc_bytes_4,
2840 QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
2841 roce_set_field(context->qpc_bytes_4,
2842 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
2843 QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
2844 ilog2((unsigned int)hr_qp->sq.wqe_cnt));
2845 roce_set_field(context->qpc_bytes_4,
2846 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
2847 QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
2848 ilog2((unsigned int)hr_qp->rq.wqe_cnt));
2849 roce_set_field(context->qpc_bytes_4,
2850 QP_CONTEXT_QPC_BYTES_4_PD_M,
2851 QP_CONTEXT_QPC_BYTES_4_PD_S,
2852 to_hr_pd(ibqp->pd)->pdn);
2854 roce_set_field(context->qpc_bytes_8,
2855 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
2856 QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
2857 to_hr_cq(ibqp->send_cq)->cqn);
2858 roce_set_field(context->qpc_bytes_8,
2859 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
2860 QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
2861 to_hr_cq(ibqp->recv_cq)->cqn);
2863 if (ibqp->srq)
2864 roce_set_field(context->qpc_bytes_12,
2865 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
2866 QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
2867 to_hr_srq(ibqp->srq)->srqn);
2868 if (attr_mask & IB_QP_PKEY_INDEX)
2869 roce_set_field(context->qpc_bytes_12,
2870 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2871 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2872 attr->pkey_index);
2873 else
2874 roce_set_field(context->qpc_bytes_12,
2875 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
2876 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
2877 hr_qp->pkey_index);
2879 roce_set_field(context->qpc_bytes_16,
2880 QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
2881 QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
2882 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
2883 if ((attr_mask & IB_QP_ALT_PATH) ||
2884 (attr_mask & IB_QP_ACCESS_FLAGS) ||
2885 (attr_mask & IB_QP_PKEY_INDEX) ||
2886 (attr_mask & IB_QP_QKEY)) {
2887 dev_err(dev, "INIT2RTR attr_mask error\n");
2888 goto out;
2891 dmac = (u8 *)attr->ah_attr.roce.dmac;
2893 context->sq_rq_bt_l = cpu_to_le32((u32)(dma_handle));
2894 roce_set_field(context->qpc_bytes_24,
2895 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
2896 QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
2897 ((u32)(dma_handle >> 32)));
2898 roce_set_bit(context->qpc_bytes_24,
2899 QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
2901 roce_set_field(context->qpc_bytes_24,
2902 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
2903 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
2904 attr->min_rnr_timer);
2905 context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2));
2906 roce_set_field(context->qpc_bytes_32,
2907 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
2908 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
2909 ((u32)(dma_handle_2 >> 32)) &
2910 QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
2911 roce_set_field(context->qpc_bytes_32,
2912 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
2913 QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
2914 roce_set_bit(context->qpc_bytes_32,
2915 QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
2917 roce_set_bit(context->qpc_bytes_32,
2918 QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
2919 hr_qp->sq_signal_bits);
2921 port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
2922 hr_qp->port;
2923 smac = (u8 *)hr_dev->dev_addr[port];
2924 /* when dmac equals smac or loop_idc is 1, it should loopback */
2925 if (ether_addr_equal_unaligned(dmac, smac) ||
2926 hr_dev->loop_idc == 0x1)
2927 roce_set_bit(context->qpc_bytes_32,
2928 QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
2930 roce_set_bit(context->qpc_bytes_32,
2931 QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
2932 rdma_ah_get_ah_flags(&attr->ah_attr));
2933 roce_set_field(context->qpc_bytes_32,
2934 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
2935 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
2936 ilog2((unsigned int)attr->max_dest_rd_atomic));
2938 if (attr_mask & IB_QP_DEST_QPN)
2939 roce_set_field(context->qpc_bytes_36,
2940 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
2941 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
2942 attr->dest_qp_num);
2944 /* Configure GID index */
2945 port_num = rdma_ah_get_port_num(&attr->ah_attr);
2946 roce_set_field(context->qpc_bytes_36,
2947 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
2948 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
2949 hns_get_gid_index(hr_dev,
2950 port_num - 1,
2951 grh->sgid_index));
2953 memcpy(&(context->dmac_l), dmac, 4);
2955 roce_set_field(context->qpc_bytes_44,
2956 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2957 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
2958 *((u16 *)(&dmac[4])));
2959 roce_set_field(context->qpc_bytes_44,
2960 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
2961 QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
2962 rdma_ah_get_static_rate(&attr->ah_attr));
2963 roce_set_field(context->qpc_bytes_44,
2964 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
2965 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
2966 grh->hop_limit);
2968 roce_set_field(context->qpc_bytes_48,
2969 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
2970 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
2971 grh->flow_label);
2972 roce_set_field(context->qpc_bytes_48,
2973 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
2974 QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
2975 grh->traffic_class);
2976 roce_set_field(context->qpc_bytes_48,
2977 QP_CONTEXT_QPC_BYTES_48_MTU_M,
2978 QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
2980 memcpy(context->dgid, grh->dgid.raw,
2981 sizeof(grh->dgid.raw));
2983 dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
2984 roce_get_field(context->qpc_bytes_44,
2985 QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
2986 QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
2988 roce_set_field(context->qpc_bytes_68,
2989 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
2990 QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
2991 hr_qp->rq.head);
2992 roce_set_field(context->qpc_bytes_68,
2993 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
2994 QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
2996 rq_pa_start = (u32)hr_qp->rq.offset / PAGE_SIZE;
2997 context->cur_rq_wqe_ba_l =
2998 cpu_to_le32((u32)(mtts[rq_pa_start]));
3000 roce_set_field(context->qpc_bytes_76,
3001 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
3002 QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
3003 mtts[rq_pa_start] >> 32);
3004 roce_set_field(context->qpc_bytes_76,
3005 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
3006 QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
3008 context->rx_rnr_time = 0;
3010 roce_set_field(context->qpc_bytes_84,
3011 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
3012 QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
3013 attr->rq_psn - 1);
3014 roce_set_field(context->qpc_bytes_84,
3015 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
3016 QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
3018 roce_set_field(context->qpc_bytes_88,
3019 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3020 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
3021 attr->rq_psn);
3022 roce_set_bit(context->qpc_bytes_88,
3023 QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
3024 roce_set_bit(context->qpc_bytes_88,
3025 QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
3026 roce_set_field(context->qpc_bytes_88,
3027 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
3028 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
3030 roce_set_field(context->qpc_bytes_88,
3031 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
3032 QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
3035 context->dma_length = 0;
3036 context->r_key = 0;
3037 context->va_l = 0;
3038 context->va_h = 0;
3040 roce_set_field(context->qpc_bytes_108,
3041 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
3042 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
3043 roce_set_bit(context->qpc_bytes_108,
3044 QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
3045 roce_set_bit(context->qpc_bytes_108,
3046 QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
3048 roce_set_field(context->qpc_bytes_112,
3049 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
3050 QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
3051 roce_set_field(context->qpc_bytes_112,
3052 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
3053 QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
3055 /* For chip resp ack */
3056 roce_set_field(context->qpc_bytes_156,
3057 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3058 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3059 hr_qp->phy_port);
3060 roce_set_field(context->qpc_bytes_156,
3061 QP_CONTEXT_QPC_BYTES_156_SL_M,
3062 QP_CONTEXT_QPC_BYTES_156_SL_S,
3063 rdma_ah_get_sl(&attr->ah_attr));
3064 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3065 } else if (cur_state == IB_QPS_RTR &&
3066 new_state == IB_QPS_RTS) {
3067 /* If exist optional param, return error */
3068 if ((attr_mask & IB_QP_ALT_PATH) ||
3069 (attr_mask & IB_QP_ACCESS_FLAGS) ||
3070 (attr_mask & IB_QP_QKEY) ||
3071 (attr_mask & IB_QP_PATH_MIG_STATE) ||
3072 (attr_mask & IB_QP_CUR_STATE) ||
3073 (attr_mask & IB_QP_MIN_RNR_TIMER)) {
3074 dev_err(dev, "RTR2RTS attr_mask error\n");
3075 goto out;
3078 context->rx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
3080 roce_set_field(context->qpc_bytes_120,
3081 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
3082 QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
3083 (mtts[0]) >> 32);
3085 roce_set_field(context->qpc_bytes_124,
3086 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
3087 QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
3088 roce_set_field(context->qpc_bytes_124,
3089 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
3090 QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
3092 roce_set_field(context->qpc_bytes_128,
3093 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
3094 QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
3095 attr->sq_psn);
3096 roce_set_bit(context->qpc_bytes_128,
3097 QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
3098 roce_set_field(context->qpc_bytes_128,
3099 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
3100 QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
3102 roce_set_bit(context->qpc_bytes_128,
3103 QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
3105 roce_set_field(context->qpc_bytes_132,
3106 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
3107 QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
3108 roce_set_field(context->qpc_bytes_132,
3109 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
3110 QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
3112 roce_set_field(context->qpc_bytes_136,
3113 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
3114 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
3115 attr->sq_psn);
3116 roce_set_field(context->qpc_bytes_136,
3117 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
3118 QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
3119 attr->sq_psn);
3121 roce_set_field(context->qpc_bytes_140,
3122 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
3123 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
3124 (attr->sq_psn >> SQ_PSN_SHIFT));
3125 roce_set_field(context->qpc_bytes_140,
3126 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
3127 QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
3128 roce_set_bit(context->qpc_bytes_140,
3129 QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
3131 roce_set_field(context->qpc_bytes_148,
3132 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
3133 QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
3134 roce_set_field(context->qpc_bytes_148,
3135 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3136 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
3137 attr->retry_cnt);
3138 roce_set_field(context->qpc_bytes_148,
3139 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
3140 QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
3141 attr->rnr_retry);
3142 roce_set_field(context->qpc_bytes_148,
3143 QP_CONTEXT_QPC_BYTES_148_LSN_M,
3144 QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
3146 context->rnr_retry = 0;
3148 roce_set_field(context->qpc_bytes_156,
3149 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
3150 QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
3151 attr->retry_cnt);
3152 if (attr->timeout < 0x12) {
3153 dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
3154 attr->timeout);
3155 roce_set_field(context->qpc_bytes_156,
3156 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3157 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3158 0x12);
3159 } else {
3160 roce_set_field(context->qpc_bytes_156,
3161 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3162 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
3163 attr->timeout);
3165 roce_set_field(context->qpc_bytes_156,
3166 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
3167 QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
3168 attr->rnr_retry);
3169 roce_set_field(context->qpc_bytes_156,
3170 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
3171 QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
3172 hr_qp->phy_port);
3173 roce_set_field(context->qpc_bytes_156,
3174 QP_CONTEXT_QPC_BYTES_156_SL_M,
3175 QP_CONTEXT_QPC_BYTES_156_SL_S,
3176 rdma_ah_get_sl(&attr->ah_attr));
3177 hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
3178 roce_set_field(context->qpc_bytes_156,
3179 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3180 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
3181 ilog2((unsigned int)attr->max_rd_atomic));
3182 roce_set_field(context->qpc_bytes_156,
3183 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
3184 QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
3185 context->pkt_use_len = 0;
3187 roce_set_field(context->qpc_bytes_164,
3188 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3189 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
3190 roce_set_field(context->qpc_bytes_164,
3191 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
3192 QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
3194 roce_set_field(context->qpc_bytes_168,
3195 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
3196 QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
3197 attr->sq_psn);
3198 roce_set_field(context->qpc_bytes_168,
3199 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
3200 QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
3201 roce_set_field(context->qpc_bytes_168,
3202 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
3203 QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
3204 roce_set_bit(context->qpc_bytes_168,
3205 QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
3206 roce_set_bit(context->qpc_bytes_168,
3207 QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
3208 roce_set_bit(context->qpc_bytes_168,
3209 QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
3210 context->sge_use_len = 0;
3212 roce_set_field(context->qpc_bytes_176,
3213 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
3214 QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
3215 roce_set_field(context->qpc_bytes_176,
3216 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
3217 QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
3219 roce_set_field(context->qpc_bytes_180,
3220 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
3221 QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
3222 roce_set_field(context->qpc_bytes_180,
3223 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
3224 QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
3226 context->tx_cur_sq_wqe_ba_l = cpu_to_le32((u32)(mtts[0]));
3228 roce_set_field(context->qpc_bytes_188,
3229 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
3230 QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
3231 (mtts[0]) >> 32);
3232 roce_set_bit(context->qpc_bytes_188,
3233 QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
3234 roce_set_field(context->qpc_bytes_188,
3235 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
3236 QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
3238 } else if (!((cur_state == IB_QPS_INIT && new_state == IB_QPS_RESET) ||
3239 (cur_state == IB_QPS_INIT && new_state == IB_QPS_ERR) ||
3240 (cur_state == IB_QPS_RTR && new_state == IB_QPS_RESET) ||
3241 (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
3242 (cur_state == IB_QPS_RTS && new_state == IB_QPS_RESET) ||
3243 (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
3244 (cur_state == IB_QPS_ERR && new_state == IB_QPS_RESET) ||
3245 (cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR))) {
3246 dev_err(dev, "not support this status migration\n");
3247 goto out;
3250 /* Every status migrate must change state */
3251 roce_set_field(context->qpc_bytes_144,
3252 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3253 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
3255 /* SW pass context to HW */
3256 ret = hns_roce_v1_qp_modify(hr_dev, &hr_qp->mtt,
3257 to_hns_roce_state(cur_state),
3258 to_hns_roce_state(new_state), context,
3259 hr_qp);
3260 if (ret) {
3261 dev_err(dev, "hns_roce_qp_modify failed\n");
3262 goto out;
3266 * Use rst2init to instead of init2init with drv,
3267 * need to hw to flash RQ HEAD by DB again
3269 if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
3270 /* Memory barrier */
3271 wmb();
3273 roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
3274 RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
3275 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
3276 RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
3277 roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
3278 RQ_DOORBELL_U32_8_CMD_S, 1);
3279 roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
3281 if (ibqp->uobject) {
3282 hr_qp->rq.db_reg_l = hr_dev->reg_base +
3283 hr_dev->odb_offset +
3284 DB_REG_OFFSET * hr_dev->priv_uar.index;
3287 hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
3290 hr_qp->state = new_state;
3292 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
3293 hr_qp->resp_depth = attr->max_dest_rd_atomic;
3294 if (attr_mask & IB_QP_PORT) {
3295 hr_qp->port = attr->port_num - 1;
3296 hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
3299 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
3300 hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
3301 ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
3302 if (ibqp->send_cq != ibqp->recv_cq)
3303 hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
3304 hr_qp->qpn, NULL);
3306 hr_qp->rq.head = 0;
3307 hr_qp->rq.tail = 0;
3308 hr_qp->sq.head = 0;
3309 hr_qp->sq.tail = 0;
3311 out:
3312 kfree(context);
3313 return ret;
3316 static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
3317 const struct ib_qp_attr *attr, int attr_mask,
3318 enum ib_qp_state cur_state,
3319 enum ib_qp_state new_state)
3322 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
3323 return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
3324 new_state);
3325 else
3326 return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
3327 new_state);
3330 static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
3332 switch (state) {
3333 case HNS_ROCE_QP_STATE_RST:
3334 return IB_QPS_RESET;
3335 case HNS_ROCE_QP_STATE_INIT:
3336 return IB_QPS_INIT;
3337 case HNS_ROCE_QP_STATE_RTR:
3338 return IB_QPS_RTR;
3339 case HNS_ROCE_QP_STATE_RTS:
3340 return IB_QPS_RTS;
3341 case HNS_ROCE_QP_STATE_SQD:
3342 return IB_QPS_SQD;
3343 case HNS_ROCE_QP_STATE_ERR:
3344 return IB_QPS_ERR;
3345 default:
3346 return IB_QPS_ERR;
3350 static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
3351 struct hns_roce_qp *hr_qp,
3352 struct hns_roce_qp_context *hr_context)
3354 struct hns_roce_cmd_mailbox *mailbox;
3355 int ret;
3357 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
3358 if (IS_ERR(mailbox))
3359 return PTR_ERR(mailbox);
3361 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
3362 HNS_ROCE_CMD_QUERY_QP,
3363 HNS_ROCE_CMD_TIMEOUT_MSECS);
3364 if (!ret)
3365 memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
3366 else
3367 dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
3369 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
3371 return ret;
3374 static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3375 int qp_attr_mask,
3376 struct ib_qp_init_attr *qp_init_attr)
3378 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3379 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3380 struct hns_roce_sqp_context context;
3381 u32 addr;
3383 mutex_lock(&hr_qp->mutex);
3385 if (hr_qp->state == IB_QPS_RESET) {
3386 qp_attr->qp_state = IB_QPS_RESET;
3387 goto done;
3390 addr = ROCEE_QP1C_CFG0_0_REG +
3391 hr_qp->port * sizeof(struct hns_roce_sqp_context);
3392 context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr));
3393 context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1));
3394 context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2));
3395 context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3));
3396 context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4));
3397 context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5));
3398 context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6));
3399 context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7));
3400 context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8));
3401 context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9));
3403 hr_qp->state = roce_get_field(context.qp1c_bytes_4,
3404 QP1C_BYTES_4_QP_STATE_M,
3405 QP1C_BYTES_4_QP_STATE_S);
3406 qp_attr->qp_state = hr_qp->state;
3407 qp_attr->path_mtu = IB_MTU_256;
3408 qp_attr->path_mig_state = IB_MIG_ARMED;
3409 qp_attr->qkey = QKEY_VAL;
3410 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
3411 qp_attr->rq_psn = 0;
3412 qp_attr->sq_psn = 0;
3413 qp_attr->dest_qp_num = 1;
3414 qp_attr->qp_access_flags = 6;
3416 qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
3417 QP1C_BYTES_20_PKEY_IDX_M,
3418 QP1C_BYTES_20_PKEY_IDX_S);
3419 qp_attr->port_num = hr_qp->port + 1;
3420 qp_attr->sq_draining = 0;
3421 qp_attr->max_rd_atomic = 0;
3422 qp_attr->max_dest_rd_atomic = 0;
3423 qp_attr->min_rnr_timer = 0;
3424 qp_attr->timeout = 0;
3425 qp_attr->retry_cnt = 0;
3426 qp_attr->rnr_retry = 0;
3427 qp_attr->alt_timeout = 0;
3429 done:
3430 qp_attr->cur_qp_state = qp_attr->qp_state;
3431 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3432 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3433 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3434 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3435 qp_attr->cap.max_inline_data = 0;
3436 qp_init_attr->cap = qp_attr->cap;
3437 qp_init_attr->create_flags = 0;
3439 mutex_unlock(&hr_qp->mutex);
3441 return 0;
3444 static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3445 int qp_attr_mask,
3446 struct ib_qp_init_attr *qp_init_attr)
3448 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3449 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3450 struct device *dev = &hr_dev->pdev->dev;
3451 struct hns_roce_qp_context *context;
3452 int tmp_qp_state = 0;
3453 int ret = 0;
3454 int state;
3456 context = kzalloc(sizeof(*context), GFP_KERNEL);
3457 if (!context)
3458 return -ENOMEM;
3460 memset(qp_attr, 0, sizeof(*qp_attr));
3461 memset(qp_init_attr, 0, sizeof(*qp_init_attr));
3463 mutex_lock(&hr_qp->mutex);
3465 if (hr_qp->state == IB_QPS_RESET) {
3466 qp_attr->qp_state = IB_QPS_RESET;
3467 goto done;
3470 ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
3471 if (ret) {
3472 dev_err(dev, "query qpc error\n");
3473 ret = -EINVAL;
3474 goto out;
3477 state = roce_get_field(context->qpc_bytes_144,
3478 QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
3479 QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
3480 tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
3481 if (tmp_qp_state == -1) {
3482 dev_err(dev, "to_ib_qp_state error\n");
3483 ret = -EINVAL;
3484 goto out;
3486 hr_qp->state = (u8)tmp_qp_state;
3487 qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
3488 qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
3489 QP_CONTEXT_QPC_BYTES_48_MTU_M,
3490 QP_CONTEXT_QPC_BYTES_48_MTU_S);
3491 qp_attr->path_mig_state = IB_MIG_ARMED;
3492 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
3493 if (hr_qp->ibqp.qp_type == IB_QPT_UD)
3494 qp_attr->qkey = QKEY_VAL;
3496 qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
3497 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
3498 QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
3499 qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
3500 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
3501 QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
3502 qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
3503 QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
3504 QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
3505 qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
3506 QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
3507 ((roce_get_bit(context->qpc_bytes_4,
3508 QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
3509 ((roce_get_bit(context->qpc_bytes_4,
3510 QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
3512 if (hr_qp->ibqp.qp_type == IB_QPT_RC ||
3513 hr_qp->ibqp.qp_type == IB_QPT_UC) {
3514 struct ib_global_route *grh =
3515 rdma_ah_retrieve_grh(&qp_attr->ah_attr);
3517 rdma_ah_set_sl(&qp_attr->ah_attr,
3518 roce_get_field(context->qpc_bytes_156,
3519 QP_CONTEXT_QPC_BYTES_156_SL_M,
3520 QP_CONTEXT_QPC_BYTES_156_SL_S));
3521 rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
3522 grh->flow_label =
3523 roce_get_field(context->qpc_bytes_48,
3524 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
3525 QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
3526 grh->sgid_index =
3527 roce_get_field(context->qpc_bytes_36,
3528 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
3529 QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
3530 grh->hop_limit =
3531 roce_get_field(context->qpc_bytes_44,
3532 QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
3533 QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
3534 grh->traffic_class =
3535 roce_get_field(context->qpc_bytes_48,
3536 QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
3537 QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
3539 memcpy(grh->dgid.raw, context->dgid,
3540 sizeof(grh->dgid.raw));
3543 qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
3544 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
3545 QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
3546 qp_attr->port_num = hr_qp->port + 1;
3547 qp_attr->sq_draining = 0;
3548 qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
3549 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
3550 QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
3551 qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
3552 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
3553 QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
3554 qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
3555 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
3556 QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
3557 qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
3558 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
3559 QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
3560 qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
3561 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
3562 QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
3563 qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry);
3565 done:
3566 qp_attr->cur_qp_state = qp_attr->qp_state;
3567 qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
3568 qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
3570 if (!ibqp->uobject) {
3571 qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
3572 qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
3573 } else {
3574 qp_attr->cap.max_send_wr = 0;
3575 qp_attr->cap.max_send_sge = 0;
3578 qp_init_attr->cap = qp_attr->cap;
3580 out:
3581 mutex_unlock(&hr_qp->mutex);
3582 kfree(context);
3583 return ret;
3586 static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
3587 int qp_attr_mask,
3588 struct ib_qp_init_attr *qp_init_attr)
3590 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3592 return hr_qp->doorbell_qpn <= 1 ?
3593 hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
3594 hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
3597 int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
3599 struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
3600 struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
3601 struct hns_roce_cq *send_cq, *recv_cq;
3602 int ret;
3604 ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET);
3605 if (ret)
3606 return ret;
3608 send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
3609 recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
3611 hns_roce_lock_cqs(send_cq, recv_cq);
3612 if (!udata) {
3613 if (recv_cq)
3614 __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn,
3615 (hr_qp->ibqp.srq ?
3616 to_hr_srq(hr_qp->ibqp.srq) :
3617 NULL));
3619 if (send_cq && send_cq != recv_cq)
3620 __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
3622 hns_roce_qp_remove(hr_dev, hr_qp);
3623 hns_roce_unlock_cqs(send_cq, recv_cq);
3625 hns_roce_qp_destroy(hr_dev, hr_qp, udata);
3627 return 0;
3630 static void hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
3632 struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
3633 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
3634 struct device *dev = &hr_dev->pdev->dev;
3635 u32 cqe_cnt_ori;
3636 u32 cqe_cnt_cur;
3637 int wait_time = 0;
3639 hns_roce_free_cqc(hr_dev, hr_cq);
3642 * Before freeing cq buffer, we need to ensure that the outstanding CQE
3643 * have been written by checking the CQE counter.
3645 cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3646 while (1) {
3647 if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
3648 HNS_ROCE_CQE_WCMD_EMPTY_BIT)
3649 break;
3651 cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
3652 if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
3653 break;
3655 msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
3656 if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
3657 dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
3658 hr_cq->cqn);
3659 break;
3661 wait_time++;
3664 hns_roce_mtt_cleanup(hr_dev, &hr_cq->mtt);
3666 ib_umem_release(hr_cq->umem);
3667 if (!udata) {
3668 /* Free the buff of stored cq */
3669 hns_roce_buf_free(hr_dev, hr_cq->buf.size, &hr_cq->buf);
3673 static void set_eq_cons_index_v1(struct hns_roce_eq *eq, int req_not)
3675 roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
3676 (req_not << eq->log_entries), eq->doorbell);
3679 static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
3680 struct hns_roce_aeqe *aeqe, int qpn)
3682 struct device *dev = &hr_dev->pdev->dev;
3684 dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
3685 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3686 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3687 case HNS_ROCE_LWQCE_QPC_ERROR:
3688 dev_warn(dev, "QP %d, QPC error.\n", qpn);
3689 break;
3690 case HNS_ROCE_LWQCE_MTU_ERROR:
3691 dev_warn(dev, "QP %d, MTU error.\n", qpn);
3692 break;
3693 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
3694 dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
3695 break;
3696 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
3697 dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
3698 break;
3699 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
3700 dev_warn(dev, "QP %d, WQE shift error\n", qpn);
3701 break;
3702 case HNS_ROCE_LWQCE_SL_ERROR:
3703 dev_warn(dev, "QP %d, SL error.\n", qpn);
3704 break;
3705 case HNS_ROCE_LWQCE_PORT_ERROR:
3706 dev_warn(dev, "QP %d, port error.\n", qpn);
3707 break;
3708 default:
3709 break;
3713 static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
3714 struct hns_roce_aeqe *aeqe,
3715 int qpn)
3717 struct device *dev = &hr_dev->pdev->dev;
3719 dev_warn(dev, "Local Access Violation Work Queue Error.\n");
3720 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3721 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3722 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
3723 dev_warn(dev, "QP %d, R_key violation.\n", qpn);
3724 break;
3725 case HNS_ROCE_LAVWQE_LENGTH_ERROR:
3726 dev_warn(dev, "QP %d, length error.\n", qpn);
3727 break;
3728 case HNS_ROCE_LAVWQE_VA_ERROR:
3729 dev_warn(dev, "QP %d, VA error.\n", qpn);
3730 break;
3731 case HNS_ROCE_LAVWQE_PD_ERROR:
3732 dev_err(dev, "QP %d, PD error.\n", qpn);
3733 break;
3734 case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
3735 dev_warn(dev, "QP %d, rw acc error.\n", qpn);
3736 break;
3737 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
3738 dev_warn(dev, "QP %d, key state error.\n", qpn);
3739 break;
3740 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
3741 dev_warn(dev, "QP %d, MR operation error.\n", qpn);
3742 break;
3743 default:
3744 break;
3748 static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
3749 struct hns_roce_aeqe *aeqe,
3750 int event_type)
3752 struct device *dev = &hr_dev->pdev->dev;
3753 int phy_port;
3754 int qpn;
3756 qpn = roce_get_field(aeqe->event.qp_event.qp,
3757 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
3758 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
3759 phy_port = roce_get_field(aeqe->event.qp_event.qp,
3760 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
3761 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
3762 if (qpn <= 1)
3763 qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
3765 switch (event_type) {
3766 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3767 dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
3768 "QP %d, phy_port %d.\n", qpn, phy_port);
3769 break;
3770 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3771 hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
3772 break;
3773 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3774 hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
3775 break;
3776 default:
3777 break;
3780 hns_roce_qp_event(hr_dev, qpn, event_type);
3783 static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
3784 struct hns_roce_aeqe *aeqe,
3785 int event_type)
3787 struct device *dev = &hr_dev->pdev->dev;
3788 u32 cqn;
3790 cqn = roce_get_field(aeqe->event.cq_event.cq,
3791 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
3792 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
3794 switch (event_type) {
3795 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3796 dev_warn(dev, "CQ 0x%x access err.\n", cqn);
3797 break;
3798 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3799 dev_warn(dev, "CQ 0x%x overflow\n", cqn);
3800 break;
3801 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3802 dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
3803 break;
3804 default:
3805 break;
3808 hns_roce_cq_event(hr_dev, cqn, event_type);
3811 static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
3812 struct hns_roce_aeqe *aeqe)
3814 struct device *dev = &hr_dev->pdev->dev;
3816 switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
3817 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
3818 case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
3819 dev_warn(dev, "SDB overflow.\n");
3820 break;
3821 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
3822 dev_warn(dev, "SDB almost overflow.\n");
3823 break;
3824 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
3825 dev_warn(dev, "SDB almost empty.\n");
3826 break;
3827 case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
3828 dev_warn(dev, "ODB overflow.\n");
3829 break;
3830 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
3831 dev_warn(dev, "ODB almost overflow.\n");
3832 break;
3833 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
3834 dev_warn(dev, "SDB almost empty.\n");
3835 break;
3836 default:
3837 break;
3841 static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
3843 unsigned long off = (entry & (eq->entries - 1)) *
3844 HNS_ROCE_AEQ_ENTRY_SIZE;
3846 return (struct hns_roce_aeqe *)((u8 *)
3847 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3848 off % HNS_ROCE_BA_SIZE);
3851 static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
3853 struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
3855 return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
3856 !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
3859 static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
3860 struct hns_roce_eq *eq)
3862 struct device *dev = &hr_dev->pdev->dev;
3863 struct hns_roce_aeqe *aeqe;
3864 int aeqes_found = 0;
3865 int event_type;
3867 while ((aeqe = next_aeqe_sw_v1(eq))) {
3869 /* Make sure we read the AEQ entry after we have checked the
3870 * ownership bit
3872 dma_rmb();
3874 dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n",
3875 aeqe,
3876 roce_get_field(aeqe->asyn,
3877 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3878 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
3879 event_type = roce_get_field(aeqe->asyn,
3880 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
3881 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
3882 switch (event_type) {
3883 case HNS_ROCE_EVENT_TYPE_PATH_MIG:
3884 dev_warn(dev, "PATH MIG not supported\n");
3885 break;
3886 case HNS_ROCE_EVENT_TYPE_COMM_EST:
3887 dev_warn(dev, "COMMUNICATION established\n");
3888 break;
3889 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
3890 dev_warn(dev, "SQ DRAINED not supported\n");
3891 break;
3892 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
3893 dev_warn(dev, "PATH MIG failed\n");
3894 break;
3895 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
3896 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
3897 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
3898 hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
3899 break;
3900 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
3901 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
3902 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
3903 dev_warn(dev, "SRQ not support!\n");
3904 break;
3905 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
3906 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
3907 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
3908 hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
3909 break;
3910 case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
3911 dev_warn(dev, "port change.\n");
3912 break;
3913 case HNS_ROCE_EVENT_TYPE_MB:
3914 hns_roce_cmd_event(hr_dev,
3915 le16_to_cpu(aeqe->event.cmd.token),
3916 aeqe->event.cmd.status,
3917 le64_to_cpu(aeqe->event.cmd.out_param
3919 break;
3920 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
3921 hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
3922 break;
3923 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
3924 dev_warn(dev, "CEQ 0x%lx overflow.\n",
3925 roce_get_field(aeqe->event.ce_event.ceqe,
3926 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M,
3927 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S));
3928 break;
3929 default:
3930 dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
3931 event_type, eq->eqn, eq->cons_index);
3932 break;
3935 eq->cons_index++;
3936 aeqes_found = 1;
3938 if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1)
3939 eq->cons_index = 0;
3942 set_eq_cons_index_v1(eq, 0);
3944 return aeqes_found;
3947 static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
3949 unsigned long off = (entry & (eq->entries - 1)) *
3950 HNS_ROCE_CEQ_ENTRY_SIZE;
3952 return (struct hns_roce_ceqe *)((u8 *)
3953 (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
3954 off % HNS_ROCE_BA_SIZE);
3957 static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
3959 struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
3961 return (!!(roce_get_bit(ceqe->comp,
3962 HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
3963 (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
3966 static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
3967 struct hns_roce_eq *eq)
3969 struct hns_roce_ceqe *ceqe;
3970 int ceqes_found = 0;
3971 u32 cqn;
3973 while ((ceqe = next_ceqe_sw_v1(eq))) {
3975 /* Make sure we read CEQ entry after we have checked the
3976 * ownership bit
3978 dma_rmb();
3980 cqn = roce_get_field(ceqe->comp,
3981 HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
3982 HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
3983 hns_roce_cq_completion(hr_dev, cqn);
3985 ++eq->cons_index;
3986 ceqes_found = 1;
3988 if (eq->cons_index >
3989 EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1)
3990 eq->cons_index = 0;
3993 set_eq_cons_index_v1(eq, 0);
3995 return ceqes_found;
3998 static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
4000 struct hns_roce_eq *eq = eq_ptr;
4001 struct hns_roce_dev *hr_dev = eq->hr_dev;
4002 int int_work = 0;
4004 if (eq->type_flag == HNS_ROCE_CEQ)
4005 /* CEQ irq routine, CEQ is pulse irq, not clear */
4006 int_work = hns_roce_v1_ceq_int(hr_dev, eq);
4007 else
4008 /* AEQ irq routine, AEQ is pulse irq, not clear */
4009 int_work = hns_roce_v1_aeq_int(hr_dev, eq);
4011 return IRQ_RETVAL(int_work);
4014 static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
4016 struct hns_roce_dev *hr_dev = dev_id;
4017 struct device *dev = &hr_dev->pdev->dev;
4018 int int_work = 0;
4019 u32 caepaemask_val;
4020 u32 cealmovf_val;
4021 u32 caepaest_val;
4022 u32 aeshift_val;
4023 u32 ceshift_val;
4024 u32 cemask_val;
4025 __le32 tmp;
4026 int i;
4029 * Abnormal interrupt:
4030 * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
4031 * interrupt, mask irq, clear irq, cancel mask operation
4033 aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
4034 tmp = cpu_to_le32(aeshift_val);
4036 /* AEQE overflow */
4037 if (roce_get_bit(tmp,
4038 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
4039 dev_warn(dev, "AEQ overflow!\n");
4041 /* Set mask */
4042 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4043 tmp = cpu_to_le32(caepaemask_val);
4044 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4045 HNS_ROCE_INT_MASK_ENABLE);
4046 caepaemask_val = le32_to_cpu(tmp);
4047 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4049 /* Clear int state(INT_WC : write 1 clear) */
4050 caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
4051 tmp = cpu_to_le32(caepaest_val);
4052 roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
4053 caepaest_val = le32_to_cpu(tmp);
4054 roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
4056 /* Clear mask */
4057 caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4058 tmp = cpu_to_le32(caepaemask_val);
4059 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4060 HNS_ROCE_INT_MASK_DISABLE);
4061 caepaemask_val = le32_to_cpu(tmp);
4062 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
4065 /* CEQ almost overflow */
4066 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4067 ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
4068 i * CEQ_REG_OFFSET);
4069 tmp = cpu_to_le32(ceshift_val);
4071 if (roce_get_bit(tmp,
4072 ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
4073 dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
4074 int_work++;
4076 /* Set mask */
4077 cemask_val = roce_read(hr_dev,
4078 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4079 i * CEQ_REG_OFFSET);
4080 tmp = cpu_to_le32(cemask_val);
4081 roce_set_bit(tmp,
4082 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4083 HNS_ROCE_INT_MASK_ENABLE);
4084 cemask_val = le32_to_cpu(tmp);
4085 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4086 i * CEQ_REG_OFFSET, cemask_val);
4088 /* Clear int state(INT_WC : write 1 clear) */
4089 cealmovf_val = roce_read(hr_dev,
4090 ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4091 i * CEQ_REG_OFFSET);
4092 tmp = cpu_to_le32(cealmovf_val);
4093 roce_set_bit(tmp,
4094 ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
4096 cealmovf_val = le32_to_cpu(tmp);
4097 roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
4098 i * CEQ_REG_OFFSET, cealmovf_val);
4100 /* Clear mask */
4101 cemask_val = roce_read(hr_dev,
4102 ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4103 i * CEQ_REG_OFFSET);
4104 tmp = cpu_to_le32(cemask_val);
4105 roce_set_bit(tmp,
4106 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
4107 HNS_ROCE_INT_MASK_DISABLE);
4108 cemask_val = le32_to_cpu(tmp);
4109 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4110 i * CEQ_REG_OFFSET, cemask_val);
4114 /* ECC multi-bit error alarm */
4115 dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
4116 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
4117 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
4118 roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
4120 dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
4121 roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
4122 roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
4123 roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
4125 return IRQ_RETVAL(int_work);
4128 static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
4130 u32 aemask_val;
4131 int masken = 0;
4132 __le32 tmp;
4133 int i;
4135 /* AEQ INT */
4136 aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
4137 tmp = cpu_to_le32(aemask_val);
4138 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
4139 masken);
4140 roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
4141 aemask_val = le32_to_cpu(tmp);
4142 roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
4144 /* CEQ INT */
4145 for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
4146 /* IRQ mask */
4147 roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
4148 i * CEQ_REG_OFFSET, masken);
4152 static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
4153 struct hns_roce_eq *eq)
4155 int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
4156 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4157 int i;
4159 if (!eq->buf_list)
4160 return;
4162 for (i = 0; i < npages; ++i)
4163 dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
4164 eq->buf_list[i].buf, eq->buf_list[i].map);
4166 kfree(eq->buf_list);
4169 static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
4170 int enable_flag)
4172 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
4173 __le32 tmp;
4174 u32 val;
4176 val = readl(eqc);
4177 tmp = cpu_to_le32(val);
4179 if (enable_flag)
4180 roce_set_field(tmp,
4181 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4182 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4183 HNS_ROCE_EQ_STAT_VALID);
4184 else
4185 roce_set_field(tmp,
4186 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4187 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4188 HNS_ROCE_EQ_STAT_INVALID);
4190 val = le32_to_cpu(tmp);
4191 writel(val, eqc);
4194 static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
4195 struct hns_roce_eq *eq)
4197 void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
4198 struct device *dev = &hr_dev->pdev->dev;
4199 dma_addr_t tmp_dma_addr;
4200 u32 eqconsindx_val = 0;
4201 u32 eqcuridx_val = 0;
4202 u32 eqshift_val = 0;
4203 __le32 tmp2 = 0;
4204 __le32 tmp1 = 0;
4205 __le32 tmp = 0;
4206 int num_bas;
4207 int ret;
4208 int i;
4210 num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
4211 HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
4213 if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
4214 dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
4215 (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
4216 num_bas);
4217 return -EINVAL;
4220 eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
4221 if (!eq->buf_list)
4222 return -ENOMEM;
4224 for (i = 0; i < num_bas; ++i) {
4225 eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
4226 &tmp_dma_addr,
4227 GFP_KERNEL);
4228 if (!eq->buf_list[i].buf) {
4229 ret = -ENOMEM;
4230 goto err_out_free_pages;
4233 eq->buf_list[i].map = tmp_dma_addr;
4235 eq->cons_index = 0;
4236 roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
4237 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
4238 HNS_ROCE_EQ_STAT_INVALID);
4239 roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
4240 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
4241 eq->log_entries);
4242 eqshift_val = le32_to_cpu(tmp);
4243 writel(eqshift_val, eqc);
4245 /* Configure eq extended address 12~44bit */
4246 writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
4249 * Configure eq extended address 45~49 bit.
4250 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
4251 * using 4K page, and shift more 32 because of
4252 * caculating the high 32 bit value evaluated to hardware.
4254 roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
4255 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
4256 eq->buf_list[0].map >> 44);
4257 roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
4258 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
4259 eqcuridx_val = le32_to_cpu(tmp1);
4260 writel(eqcuridx_val, eqc + 8);
4262 /* Configure eq consumer index */
4263 roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
4264 ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
4265 eqconsindx_val = le32_to_cpu(tmp2);
4266 writel(eqconsindx_val, eqc + 0xc);
4268 return 0;
4270 err_out_free_pages:
4271 for (i -= 1; i >= 0; i--)
4272 dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
4273 eq->buf_list[i].map);
4275 kfree(eq->buf_list);
4276 return ret;
4279 static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
4281 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4282 struct device *dev = &hr_dev->pdev->dev;
4283 struct hns_roce_eq *eq;
4284 int irq_num;
4285 int eq_num;
4286 int ret;
4287 int i, j;
4289 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4290 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4292 eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
4293 if (!eq_table->eq)
4294 return -ENOMEM;
4296 eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
4297 GFP_KERNEL);
4298 if (!eq_table->eqc_base) {
4299 ret = -ENOMEM;
4300 goto err_eqc_base_alloc_fail;
4303 for (i = 0; i < eq_num; i++) {
4304 eq = &eq_table->eq[i];
4305 eq->hr_dev = hr_dev;
4306 eq->eqn = i;
4307 eq->irq = hr_dev->irq[i];
4308 eq->log_page_size = PAGE_SHIFT;
4310 if (i < hr_dev->caps.num_comp_vectors) {
4311 /* CEQ */
4312 eq_table->eqc_base[i] = hr_dev->reg_base +
4313 ROCEE_CAEP_CEQC_SHIFT_0_REG +
4314 CEQ_REG_OFFSET * i;
4315 eq->type_flag = HNS_ROCE_CEQ;
4316 eq->doorbell = hr_dev->reg_base +
4317 ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
4318 CEQ_REG_OFFSET * i;
4319 eq->entries = hr_dev->caps.ceqe_depth;
4320 eq->log_entries = ilog2(eq->entries);
4321 eq->eqe_size = HNS_ROCE_CEQ_ENTRY_SIZE;
4322 } else {
4323 /* AEQ */
4324 eq_table->eqc_base[i] = hr_dev->reg_base +
4325 ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
4326 eq->type_flag = HNS_ROCE_AEQ;
4327 eq->doorbell = hr_dev->reg_base +
4328 ROCEE_CAEP_AEQE_CONS_IDX_REG;
4329 eq->entries = hr_dev->caps.aeqe_depth;
4330 eq->log_entries = ilog2(eq->entries);
4331 eq->eqe_size = HNS_ROCE_AEQ_ENTRY_SIZE;
4335 /* Disable irq */
4336 hns_roce_v1_int_mask_enable(hr_dev);
4338 /* Configure ce int interval */
4339 roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
4340 HNS_ROCE_CEQ_DEFAULT_INTERVAL);
4342 /* Configure ce int burst num */
4343 roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
4344 HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
4346 for (i = 0; i < eq_num; i++) {
4347 ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
4348 if (ret) {
4349 dev_err(dev, "eq create failed\n");
4350 goto err_create_eq_fail;
4354 for (j = 0; j < irq_num; j++) {
4355 if (j < eq_num)
4356 ret = request_irq(hr_dev->irq[j],
4357 hns_roce_v1_msix_interrupt_eq, 0,
4358 hr_dev->irq_names[j],
4359 &eq_table->eq[j]);
4360 else
4361 ret = request_irq(hr_dev->irq[j],
4362 hns_roce_v1_msix_interrupt_abn, 0,
4363 hr_dev->irq_names[j], hr_dev);
4365 if (ret) {
4366 dev_err(dev, "request irq error!\n");
4367 goto err_request_irq_fail;
4371 for (i = 0; i < eq_num; i++)
4372 hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
4374 return 0;
4376 err_request_irq_fail:
4377 for (j -= 1; j >= 0; j--)
4378 free_irq(hr_dev->irq[j], &eq_table->eq[j]);
4380 err_create_eq_fail:
4381 for (i -= 1; i >= 0; i--)
4382 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4384 kfree(eq_table->eqc_base);
4386 err_eqc_base_alloc_fail:
4387 kfree(eq_table->eq);
4389 return ret;
4392 static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
4394 struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
4395 int irq_num;
4396 int eq_num;
4397 int i;
4399 eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
4400 irq_num = eq_num + hr_dev->caps.num_other_vectors;
4401 for (i = 0; i < eq_num; i++) {
4402 /* Disable EQ */
4403 hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
4405 free_irq(hr_dev->irq[i], &eq_table->eq[i]);
4407 hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
4409 for (i = eq_num; i < irq_num; i++)
4410 free_irq(hr_dev->irq[i], hr_dev);
4412 kfree(eq_table->eqc_base);
4413 kfree(eq_table->eq);
4416 static const struct ib_device_ops hns_roce_v1_dev_ops = {
4417 .destroy_qp = hns_roce_v1_destroy_qp,
4418 .modify_cq = hns_roce_v1_modify_cq,
4419 .poll_cq = hns_roce_v1_poll_cq,
4420 .post_recv = hns_roce_v1_post_recv,
4421 .post_send = hns_roce_v1_post_send,
4422 .query_qp = hns_roce_v1_query_qp,
4423 .req_notify_cq = hns_roce_v1_req_notify_cq,
4426 static const struct hns_roce_hw hns_roce_hw_v1 = {
4427 .reset = hns_roce_v1_reset,
4428 .hw_profile = hns_roce_v1_profile,
4429 .hw_init = hns_roce_v1_init,
4430 .hw_exit = hns_roce_v1_exit,
4431 .post_mbox = hns_roce_v1_post_mbox,
4432 .chk_mbox = hns_roce_v1_chk_mbox,
4433 .set_gid = hns_roce_v1_set_gid,
4434 .set_mac = hns_roce_v1_set_mac,
4435 .set_mtu = hns_roce_v1_set_mtu,
4436 .write_mtpt = hns_roce_v1_write_mtpt,
4437 .write_cqc = hns_roce_v1_write_cqc,
4438 .modify_cq = hns_roce_v1_modify_cq,
4439 .clear_hem = hns_roce_v1_clear_hem,
4440 .modify_qp = hns_roce_v1_modify_qp,
4441 .query_qp = hns_roce_v1_query_qp,
4442 .destroy_qp = hns_roce_v1_destroy_qp,
4443 .post_send = hns_roce_v1_post_send,
4444 .post_recv = hns_roce_v1_post_recv,
4445 .req_notify_cq = hns_roce_v1_req_notify_cq,
4446 .poll_cq = hns_roce_v1_poll_cq,
4447 .dereg_mr = hns_roce_v1_dereg_mr,
4448 .destroy_cq = hns_roce_v1_destroy_cq,
4449 .init_eq = hns_roce_v1_init_eq_table,
4450 .cleanup_eq = hns_roce_v1_cleanup_eq_table,
4451 .hns_roce_dev_ops = &hns_roce_v1_dev_ops,
4454 static const struct of_device_id hns_roce_of_match[] = {
4455 { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
4458 MODULE_DEVICE_TABLE(of, hns_roce_of_match);
4460 static const struct acpi_device_id hns_roce_acpi_match[] = {
4461 { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
4464 MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
4466 static struct
4467 platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
4469 struct device *dev;
4471 /* get the 'device' corresponding to the matching 'fwnode' */
4472 dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
4473 /* get the platform device */
4474 return dev ? to_platform_device(dev) : NULL;
4477 static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
4479 struct device *dev = &hr_dev->pdev->dev;
4480 struct platform_device *pdev = NULL;
4481 struct net_device *netdev = NULL;
4482 struct device_node *net_node;
4483 int port_cnt = 0;
4484 u8 phy_port;
4485 int ret;
4486 int i;
4488 /* check if we are compatible with the underlying SoC */
4489 if (dev_of_node(dev)) {
4490 const struct of_device_id *of_id;
4492 of_id = of_match_node(hns_roce_of_match, dev->of_node);
4493 if (!of_id) {
4494 dev_err(dev, "device is not compatible!\n");
4495 return -ENXIO;
4497 hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
4498 if (!hr_dev->hw) {
4499 dev_err(dev, "couldn't get H/W specific DT data!\n");
4500 return -ENXIO;
4502 } else if (is_acpi_device_node(dev->fwnode)) {
4503 const struct acpi_device_id *acpi_id;
4505 acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
4506 if (!acpi_id) {
4507 dev_err(dev, "device is not compatible!\n");
4508 return -ENXIO;
4510 hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
4511 if (!hr_dev->hw) {
4512 dev_err(dev, "couldn't get H/W specific ACPI data!\n");
4513 return -ENXIO;
4515 } else {
4516 dev_err(dev, "can't read compatibility data from DT or ACPI\n");
4517 return -ENXIO;
4520 /* get the mapped register base address */
4521 hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0);
4522 if (IS_ERR(hr_dev->reg_base))
4523 return PTR_ERR(hr_dev->reg_base);
4525 /* read the node_guid of IB device from the DT or ACPI */
4526 ret = device_property_read_u8_array(dev, "node-guid",
4527 (u8 *)&hr_dev->ib_dev.node_guid,
4528 GUID_LEN);
4529 if (ret) {
4530 dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
4531 return ret;
4534 /* get the RoCE associated ethernet ports or netdevices */
4535 for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
4536 if (dev_of_node(dev)) {
4537 net_node = of_parse_phandle(dev->of_node, "eth-handle",
4539 if (!net_node)
4540 continue;
4541 pdev = of_find_device_by_node(net_node);
4542 } else if (is_acpi_device_node(dev->fwnode)) {
4543 struct fwnode_reference_args args;
4545 ret = acpi_node_get_property_reference(dev->fwnode,
4546 "eth-handle",
4547 i, &args);
4548 if (ret)
4549 continue;
4550 pdev = hns_roce_find_pdev(args.fwnode);
4551 } else {
4552 dev_err(dev, "cannot read data from DT or ACPI\n");
4553 return -ENXIO;
4556 if (pdev) {
4557 netdev = platform_get_drvdata(pdev);
4558 phy_port = (u8)i;
4559 if (netdev) {
4560 hr_dev->iboe.netdevs[port_cnt] = netdev;
4561 hr_dev->iboe.phy_port[port_cnt] = phy_port;
4562 } else {
4563 dev_err(dev, "no netdev found with pdev %s\n",
4564 pdev->name);
4565 return -ENODEV;
4567 port_cnt++;
4571 if (port_cnt == 0) {
4572 dev_err(dev, "unable to get eth-handle for available ports!\n");
4573 return -EINVAL;
4576 hr_dev->caps.num_ports = port_cnt;
4578 /* cmd issue mode: 0 is poll, 1 is event */
4579 hr_dev->cmd_mod = 1;
4580 hr_dev->loop_idc = 0;
4581 hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
4582 hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
4584 /* read the interrupt names from the DT or ACPI */
4585 ret = device_property_read_string_array(dev, "interrupt-names",
4586 hr_dev->irq_names,
4587 HNS_ROCE_V1_MAX_IRQ_NUM);
4588 if (ret < 0) {
4589 dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
4590 return ret;
4593 /* fetch the interrupt numbers */
4594 for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
4595 hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
4596 if (hr_dev->irq[i] <= 0)
4597 return -EINVAL;
4600 return 0;
4604 * hns_roce_probe - RoCE driver entrance
4605 * @pdev: pointer to platform device
4606 * Return : int
4609 static int hns_roce_probe(struct platform_device *pdev)
4611 int ret;
4612 struct hns_roce_dev *hr_dev;
4613 struct device *dev = &pdev->dev;
4615 hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
4616 if (!hr_dev)
4617 return -ENOMEM;
4619 hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
4620 if (!hr_dev->priv) {
4621 ret = -ENOMEM;
4622 goto error_failed_kzalloc;
4625 hr_dev->pdev = pdev;
4626 hr_dev->dev = dev;
4627 platform_set_drvdata(pdev, hr_dev);
4629 if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
4630 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
4631 dev_err(dev, "Not usable DMA addressing mode\n");
4632 ret = -EIO;
4633 goto error_failed_get_cfg;
4636 ret = hns_roce_get_cfg(hr_dev);
4637 if (ret) {
4638 dev_err(dev, "Get Configuration failed!\n");
4639 goto error_failed_get_cfg;
4642 ret = hns_roce_init(hr_dev);
4643 if (ret) {
4644 dev_err(dev, "RoCE engine init failed!\n");
4645 goto error_failed_get_cfg;
4648 return 0;
4650 error_failed_get_cfg:
4651 kfree(hr_dev->priv);
4653 error_failed_kzalloc:
4654 ib_dealloc_device(&hr_dev->ib_dev);
4656 return ret;
4660 * hns_roce_remove - remove RoCE device
4661 * @pdev: pointer to platform device
4663 static int hns_roce_remove(struct platform_device *pdev)
4665 struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
4667 hns_roce_exit(hr_dev);
4668 kfree(hr_dev->priv);
4669 ib_dealloc_device(&hr_dev->ib_dev);
4671 return 0;
4674 static struct platform_driver hns_roce_driver = {
4675 .probe = hns_roce_probe,
4676 .remove = hns_roce_remove,
4677 .driver = {
4678 .name = DRV_NAME,
4679 .of_match_table = hns_roce_of_match,
4680 .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
4684 module_platform_driver(hns_roce_driver);
4686 MODULE_LICENSE("Dual BSD/GPL");
4687 MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
4688 MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
4689 MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
4690 MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");