2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kref.h>
34 #include <rdma/ib_umem.h>
38 static void mlx5_ib_cq_comp(struct mlx5_core_cq
*cq
)
40 struct ib_cq
*ibcq
= &to_mibcq(cq
)->ibcq
;
42 ibcq
->comp_handler(ibcq
, ibcq
->cq_context
);
45 static void mlx5_ib_cq_event(struct mlx5_core_cq
*mcq
, enum mlx5_event type
)
47 struct mlx5_ib_cq
*cq
= container_of(mcq
, struct mlx5_ib_cq
, mcq
);
48 struct mlx5_ib_dev
*dev
= to_mdev(cq
->ibcq
.device
);
49 struct ib_cq
*ibcq
= &cq
->ibcq
;
50 struct ib_event event
;
52 if (type
!= MLX5_EVENT_TYPE_CQ_ERROR
) {
53 mlx5_ib_warn(dev
, "Unexpected event type %d on CQ %06x\n",
58 if (ibcq
->event_handler
) {
59 event
.device
= &dev
->ib_dev
;
60 event
.event
= IB_EVENT_CQ_ERR
;
61 event
.element
.cq
= ibcq
;
62 ibcq
->event_handler(&event
, ibcq
->cq_context
);
66 static void *get_cqe_from_buf(struct mlx5_ib_cq_buf
*buf
, int n
, int size
)
68 return mlx5_buf_offset(&buf
->buf
, n
* size
);
71 static void *get_cqe(struct mlx5_ib_cq
*cq
, int n
)
73 return get_cqe_from_buf(&cq
->buf
, n
, cq
->mcq
.cqe_sz
);
76 static void *get_sw_cqe(struct mlx5_ib_cq
*cq
, int n
)
78 void *cqe
= get_cqe(cq
, n
& cq
->ibcq
.cqe
);
79 struct mlx5_cqe64
*cqe64
;
81 cqe64
= (cq
->mcq
.cqe_sz
== 64) ? cqe
: cqe
+ 64;
82 return ((cqe64
->op_own
& MLX5_CQE_OWNER_MASK
) ^
83 !!(n
& (cq
->ibcq
.cqe
+ 1))) ? NULL
: cqe
;
86 static void *next_cqe_sw(struct mlx5_ib_cq
*cq
)
88 return get_sw_cqe(cq
, cq
->mcq
.cons_index
);
91 static enum ib_wc_opcode
get_umr_comp(struct mlx5_ib_wq
*wq
, int idx
)
93 switch (wq
->wr_data
[idx
]) {
98 return IB_WC_LOCAL_INV
;
100 case IB_WR_FAST_REG_MR
:
101 return IB_WC_FAST_REG_MR
;
104 pr_warn("unknown completion status\n");
109 static void handle_good_req(struct ib_wc
*wc
, struct mlx5_cqe64
*cqe
,
110 struct mlx5_ib_wq
*wq
, int idx
)
113 switch (be32_to_cpu(cqe
->sop_drop_qpn
) >> 24) {
114 case MLX5_OPCODE_RDMA_WRITE_IMM
:
115 wc
->wc_flags
|= IB_WC_WITH_IMM
;
116 case MLX5_OPCODE_RDMA_WRITE
:
117 wc
->opcode
= IB_WC_RDMA_WRITE
;
119 case MLX5_OPCODE_SEND_IMM
:
120 wc
->wc_flags
|= IB_WC_WITH_IMM
;
121 case MLX5_OPCODE_SEND
:
122 case MLX5_OPCODE_SEND_INVAL
:
123 wc
->opcode
= IB_WC_SEND
;
125 case MLX5_OPCODE_RDMA_READ
:
126 wc
->opcode
= IB_WC_RDMA_READ
;
127 wc
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
129 case MLX5_OPCODE_ATOMIC_CS
:
130 wc
->opcode
= IB_WC_COMP_SWAP
;
133 case MLX5_OPCODE_ATOMIC_FA
:
134 wc
->opcode
= IB_WC_FETCH_ADD
;
137 case MLX5_OPCODE_ATOMIC_MASKED_CS
:
138 wc
->opcode
= IB_WC_MASKED_COMP_SWAP
;
141 case MLX5_OPCODE_ATOMIC_MASKED_FA
:
142 wc
->opcode
= IB_WC_MASKED_FETCH_ADD
;
145 case MLX5_OPCODE_BIND_MW
:
146 wc
->opcode
= IB_WC_BIND_MW
;
148 case MLX5_OPCODE_UMR
:
149 wc
->opcode
= get_umr_comp(wq
, idx
);
155 MLX5_GRH_IN_BUFFER
= 1,
159 static void handle_responder(struct ib_wc
*wc
, struct mlx5_cqe64
*cqe
,
160 struct mlx5_ib_qp
*qp
)
162 struct mlx5_ib_dev
*dev
= to_mdev(qp
->ibqp
.device
);
163 struct mlx5_ib_srq
*srq
;
164 struct mlx5_ib_wq
*wq
;
168 if (qp
->ibqp
.srq
|| qp
->ibqp
.xrcd
) {
169 struct mlx5_core_srq
*msrq
= NULL
;
172 msrq
= mlx5_core_get_srq(&dev
->mdev
,
173 be32_to_cpu(cqe
->srqn
));
174 srq
= to_mibsrq(msrq
);
176 srq
= to_msrq(qp
->ibqp
.srq
);
179 wqe_ctr
= be16_to_cpu(cqe
->wqe_counter
);
180 wc
->wr_id
= srq
->wrid
[wqe_ctr
];
181 mlx5_ib_free_srq_wqe(srq
, wqe_ctr
);
182 if (msrq
&& atomic_dec_and_test(&msrq
->refcount
))
183 complete(&msrq
->free
);
187 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
190 wc
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
192 switch (cqe
->op_own
>> 4) {
193 case MLX5_CQE_RESP_WR_IMM
:
194 wc
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
195 wc
->wc_flags
= IB_WC_WITH_IMM
;
196 wc
->ex
.imm_data
= cqe
->imm_inval_pkey
;
198 case MLX5_CQE_RESP_SEND
:
199 wc
->opcode
= IB_WC_RECV
;
202 case MLX5_CQE_RESP_SEND_IMM
:
203 wc
->opcode
= IB_WC_RECV
;
204 wc
->wc_flags
= IB_WC_WITH_IMM
;
205 wc
->ex
.imm_data
= cqe
->imm_inval_pkey
;
207 case MLX5_CQE_RESP_SEND_INV
:
208 wc
->opcode
= IB_WC_RECV
;
209 wc
->wc_flags
= IB_WC_WITH_INVALIDATE
;
210 wc
->ex
.invalidate_rkey
= be32_to_cpu(cqe
->imm_inval_pkey
);
213 wc
->slid
= be16_to_cpu(cqe
->slid
);
214 wc
->sl
= (be32_to_cpu(cqe
->flags_rqpn
) >> 24) & 0xf;
215 wc
->src_qp
= be32_to_cpu(cqe
->flags_rqpn
) & 0xffffff;
216 wc
->dlid_path_bits
= cqe
->ml_path
;
217 g
= (be32_to_cpu(cqe
->flags_rqpn
) >> 28) & 3;
218 wc
->wc_flags
|= g
? IB_WC_GRH
: 0;
219 wc
->pkey_index
= be32_to_cpu(cqe
->imm_inval_pkey
) & 0xffff;
222 static void dump_cqe(struct mlx5_ib_dev
*dev
, struct mlx5_err_cqe
*cqe
)
224 __be32
*p
= (__be32
*)cqe
;
227 mlx5_ib_warn(dev
, "dump error cqe\n");
228 for (i
= 0; i
< sizeof(*cqe
) / 16; i
++, p
+= 4)
229 pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p
[0]),
230 be32_to_cpu(p
[1]), be32_to_cpu(p
[2]),
234 static void mlx5_handle_error_cqe(struct mlx5_ib_dev
*dev
,
235 struct mlx5_err_cqe
*cqe
,
240 switch (cqe
->syndrome
) {
241 case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR
:
242 wc
->status
= IB_WC_LOC_LEN_ERR
;
244 case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR
:
245 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
247 case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR
:
248 wc
->status
= IB_WC_LOC_PROT_ERR
;
250 case MLX5_CQE_SYNDROME_WR_FLUSH_ERR
:
252 wc
->status
= IB_WC_WR_FLUSH_ERR
;
254 case MLX5_CQE_SYNDROME_MW_BIND_ERR
:
255 wc
->status
= IB_WC_MW_BIND_ERR
;
257 case MLX5_CQE_SYNDROME_BAD_RESP_ERR
:
258 wc
->status
= IB_WC_BAD_RESP_ERR
;
260 case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR
:
261 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
263 case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR
:
264 wc
->status
= IB_WC_REM_INV_REQ_ERR
;
266 case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR
:
267 wc
->status
= IB_WC_REM_ACCESS_ERR
;
269 case MLX5_CQE_SYNDROME_REMOTE_OP_ERR
:
270 wc
->status
= IB_WC_REM_OP_ERR
;
272 case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR
:
273 wc
->status
= IB_WC_RETRY_EXC_ERR
;
276 case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR
:
277 wc
->status
= IB_WC_RNR_RETRY_EXC_ERR
;
280 case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR
:
281 wc
->status
= IB_WC_REM_ABORT_ERR
;
284 wc
->status
= IB_WC_GENERAL_ERR
;
288 wc
->vendor_err
= cqe
->vendor_err_synd
;
293 static int is_atomic_response(struct mlx5_ib_qp
*qp
, uint16_t idx
)
295 /* TBD: waiting decision
300 static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp
*qp
, uint16_t idx
)
302 struct mlx5_wqe_data_seg
*dpseg
;
305 dpseg
= mlx5_get_send_wqe(qp
, idx
) + sizeof(struct mlx5_wqe_ctrl_seg
) +
306 sizeof(struct mlx5_wqe_raddr_seg
) +
307 sizeof(struct mlx5_wqe_atomic_seg
);
308 addr
= (void *)(unsigned long)be64_to_cpu(dpseg
->addr
);
312 static void handle_atomic(struct mlx5_ib_qp
*qp
, struct mlx5_cqe64
*cqe64
,
319 if (!is_atomic_response(qp
, idx
))
322 byte_count
= be32_to_cpu(cqe64
->byte_cnt
);
323 addr
= mlx5_get_atomic_laddr(qp
, idx
);
325 if (byte_count
== 4) {
326 *(uint32_t *)addr
= be32_to_cpu(*((__be32
*)addr
));
328 for (i
= 0; i
< byte_count
; i
+= 8) {
329 *(uint64_t *)addr
= be64_to_cpu(*((__be64
*)addr
));
337 static void handle_atomics(struct mlx5_ib_qp
*qp
, struct mlx5_cqe64
*cqe64
,
343 idx
= tail
& (qp
->sq
.wqe_cnt
- 1);
344 handle_atomic(qp
, cqe64
, idx
);
348 tail
= qp
->sq
.w_list
[idx
].next
;
350 tail
= qp
->sq
.w_list
[idx
].next
;
351 qp
->sq
.last_poll
= tail
;
354 static int mlx5_poll_one(struct mlx5_ib_cq
*cq
,
355 struct mlx5_ib_qp
**cur_qp
,
358 struct mlx5_ib_dev
*dev
= to_mdev(cq
->ibcq
.device
);
359 struct mlx5_err_cqe
*err_cqe
;
360 struct mlx5_cqe64
*cqe64
;
361 struct mlx5_core_qp
*mqp
;
362 struct mlx5_ib_wq
*wq
;
369 cqe
= next_cqe_sw(cq
);
373 cqe64
= (cq
->mcq
.cqe_sz
== 64) ? cqe
: cqe
+ 64;
375 ++cq
->mcq
.cons_index
;
377 /* Make sure we read CQ entry contents after we've checked the
384 qpn
= ntohl(cqe64
->sop_drop_qpn
) & 0xffffff;
385 if (!*cur_qp
|| (qpn
!= (*cur_qp
)->ibqp
.qp_num
)) {
386 /* We do not have to take the QP table lock here,
387 * because CQs will be locked while QPs are removed
390 mqp
= __mlx5_qp_lookup(&dev
->mdev
, qpn
);
391 if (unlikely(!mqp
)) {
392 mlx5_ib_warn(dev
, "CQE@CQ %06x for unknown QPN %6x\n",
397 *cur_qp
= to_mibqp(mqp
);
400 wc
->qp
= &(*cur_qp
)->ibqp
;
401 opcode
= cqe64
->op_own
>> 4;
405 wqe_ctr
= be16_to_cpu(cqe64
->wqe_counter
);
406 idx
= wqe_ctr
& (wq
->wqe_cnt
- 1);
407 handle_good_req(wc
, cqe64
, wq
, idx
);
408 handle_atomics(*cur_qp
, cqe64
, wq
->last_poll
, idx
);
409 wc
->wr_id
= wq
->wrid
[idx
];
410 wq
->tail
= wq
->wqe_head
[idx
] + 1;
411 wc
->status
= IB_WC_SUCCESS
;
413 case MLX5_CQE_RESP_WR_IMM
:
414 case MLX5_CQE_RESP_SEND
:
415 case MLX5_CQE_RESP_SEND_IMM
:
416 case MLX5_CQE_RESP_SEND_INV
:
417 handle_responder(wc
, cqe64
, *cur_qp
);
418 wc
->status
= IB_WC_SUCCESS
;
420 case MLX5_CQE_RESIZE_CQ
:
422 case MLX5_CQE_REQ_ERR
:
423 case MLX5_CQE_RESP_ERR
:
424 err_cqe
= (struct mlx5_err_cqe
*)cqe64
;
425 mlx5_handle_error_cqe(dev
, err_cqe
, wc
);
426 mlx5_ib_dbg(dev
, "%s error cqe on cqn 0x%x:\n",
427 opcode
== MLX5_CQE_REQ_ERR
?
428 "Requestor" : "Responder", cq
->mcq
.cqn
);
429 mlx5_ib_dbg(dev
, "syndrome 0x%x, vendor syndrome 0x%x\n",
430 err_cqe
->syndrome
, err_cqe
->vendor_err_synd
);
431 if (opcode
== MLX5_CQE_REQ_ERR
) {
433 wqe_ctr
= be16_to_cpu(cqe64
->wqe_counter
);
434 idx
= wqe_ctr
& (wq
->wqe_cnt
- 1);
435 wc
->wr_id
= wq
->wrid
[idx
];
436 wq
->tail
= wq
->wqe_head
[idx
] + 1;
438 struct mlx5_ib_srq
*srq
;
440 if ((*cur_qp
)->ibqp
.srq
) {
441 srq
= to_msrq((*cur_qp
)->ibqp
.srq
);
442 wqe_ctr
= be16_to_cpu(cqe64
->wqe_counter
);
443 wc
->wr_id
= srq
->wrid
[wqe_ctr
];
444 mlx5_ib_free_srq_wqe(srq
, wqe_ctr
);
447 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
457 int mlx5_ib_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
)
459 struct mlx5_ib_cq
*cq
= to_mcq(ibcq
);
460 struct mlx5_ib_qp
*cur_qp
= NULL
;
465 spin_lock_irqsave(&cq
->lock
, flags
);
467 for (npolled
= 0; npolled
< num_entries
; npolled
++) {
468 err
= mlx5_poll_one(cq
, &cur_qp
, wc
+ npolled
);
474 mlx5_cq_set_ci(&cq
->mcq
);
476 spin_unlock_irqrestore(&cq
->lock
, flags
);
478 if (err
== 0 || err
== -EAGAIN
)
484 int mlx5_ib_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
486 mlx5_cq_arm(&to_mcq(ibcq
)->mcq
,
487 (flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
?
488 MLX5_CQ_DB_REQ_NOT_SOL
: MLX5_CQ_DB_REQ_NOT
,
489 to_mdev(ibcq
->device
)->mdev
.priv
.uuari
.uars
[0].map
,
490 MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq
->device
)->mdev
.priv
.cq_uar_lock
));
495 static int alloc_cq_buf(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq_buf
*buf
,
496 int nent
, int cqe_size
)
500 err
= mlx5_buf_alloc(&dev
->mdev
, nent
* cqe_size
,
501 PAGE_SIZE
* 2, &buf
->buf
);
505 buf
->cqe_size
= cqe_size
;
510 static void free_cq_buf(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq_buf
*buf
)
512 mlx5_buf_free(&dev
->mdev
, &buf
->buf
);
515 static int create_cq_user(struct mlx5_ib_dev
*dev
, struct ib_udata
*udata
,
516 struct ib_ucontext
*context
, struct mlx5_ib_cq
*cq
,
517 int entries
, struct mlx5_create_cq_mbox_in
**cqb
,
518 int *cqe_size
, int *index
, int *inlen
)
520 struct mlx5_ib_create_cq ucmd
;
526 if (ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
)))
529 if (ucmd
.cqe_size
!= 64 && ucmd
.cqe_size
!= 128)
532 *cqe_size
= ucmd
.cqe_size
;
534 cq
->buf
.umem
= ib_umem_get(context
, ucmd
.buf_addr
,
535 entries
* ucmd
.cqe_size
,
536 IB_ACCESS_LOCAL_WRITE
, 1);
537 if (IS_ERR(cq
->buf
.umem
)) {
538 err
= PTR_ERR(cq
->buf
.umem
);
542 err
= mlx5_ib_db_map_user(to_mucontext(context
), ucmd
.db_addr
,
547 mlx5_ib_cont_pages(cq
->buf
.umem
, ucmd
.buf_addr
, &npages
, &page_shift
,
549 mlx5_ib_dbg(dev
, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
550 ucmd
.buf_addr
, entries
* ucmd
.cqe_size
, npages
, page_shift
, ncont
);
552 *inlen
= sizeof(**cqb
) + sizeof(*(*cqb
)->pas
) * ncont
;
553 *cqb
= mlx5_vzalloc(*inlen
);
558 mlx5_ib_populate_pas(dev
, cq
->buf
.umem
, page_shift
, (*cqb
)->pas
, 0);
559 (*cqb
)->ctx
.log_pg_sz
= page_shift
- PAGE_SHIFT
;
561 *index
= to_mucontext(context
)->uuari
.uars
[0].index
;
566 mlx5_ib_db_unmap_user(to_mucontext(context
), &cq
->db
);
569 ib_umem_release(cq
->buf
.umem
);
573 static void destroy_cq_user(struct mlx5_ib_cq
*cq
, struct ib_ucontext
*context
)
575 mlx5_ib_db_unmap_user(to_mucontext(context
), &cq
->db
);
576 ib_umem_release(cq
->buf
.umem
);
579 static void init_cq_buf(struct mlx5_ib_cq
*cq
, int nent
)
583 struct mlx5_cqe64
*cqe64
;
585 for (i
= 0; i
< nent
; i
++) {
586 cqe
= get_cqe(cq
, i
);
587 cqe64
= (cq
->buf
.cqe_size
== 64) ? cqe
: cqe
+ 64;
588 cqe64
->op_own
= 0xf1;
592 static int create_cq_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq
*cq
,
593 int entries
, int cqe_size
,
594 struct mlx5_create_cq_mbox_in
**cqb
,
595 int *index
, int *inlen
)
599 err
= mlx5_db_alloc(&dev
->mdev
, &cq
->db
);
603 cq
->mcq
.set_ci_db
= cq
->db
.db
;
604 cq
->mcq
.arm_db
= cq
->db
.db
+ 1;
605 *cq
->mcq
.set_ci_db
= 0;
607 cq
->mcq
.cqe_sz
= cqe_size
;
609 err
= alloc_cq_buf(dev
, &cq
->buf
, entries
, cqe_size
);
613 init_cq_buf(cq
, entries
);
615 *inlen
= sizeof(**cqb
) + sizeof(*(*cqb
)->pas
) * cq
->buf
.buf
.npages
;
616 *cqb
= mlx5_vzalloc(*inlen
);
621 mlx5_fill_page_array(&cq
->buf
.buf
, (*cqb
)->pas
);
623 (*cqb
)->ctx
.log_pg_sz
= cq
->buf
.buf
.page_shift
- PAGE_SHIFT
;
624 *index
= dev
->mdev
.priv
.uuari
.uars
[0].index
;
629 free_cq_buf(dev
, &cq
->buf
);
632 mlx5_db_free(&dev
->mdev
, &cq
->db
);
636 static void destroy_cq_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq
*cq
)
638 free_cq_buf(dev
, &cq
->buf
);
639 mlx5_db_free(&dev
->mdev
, &cq
->db
);
642 struct ib_cq
*mlx5_ib_create_cq(struct ib_device
*ibdev
, int entries
,
643 int vector
, struct ib_ucontext
*context
,
644 struct ib_udata
*udata
)
646 struct mlx5_create_cq_mbox_in
*cqb
= NULL
;
647 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
648 struct mlx5_ib_cq
*cq
;
649 int uninitialized_var(index
);
650 int uninitialized_var(inlen
);
656 entries
= roundup_pow_of_two(entries
+ 1);
657 if (entries
< 1 || entries
> dev
->mdev
.caps
.max_cqes
)
658 return ERR_PTR(-EINVAL
);
660 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
662 return ERR_PTR(-ENOMEM
);
664 cq
->ibcq
.cqe
= entries
- 1;
665 mutex_init(&cq
->resize_mutex
);
666 spin_lock_init(&cq
->lock
);
667 cq
->resize_buf
= NULL
;
668 cq
->resize_umem
= NULL
;
671 err
= create_cq_user(dev
, udata
, context
, cq
, entries
,
672 &cqb
, &cqe_size
, &index
, &inlen
);
676 /* for now choose 64 bytes till we have a proper interface */
678 err
= create_cq_kernel(dev
, cq
, entries
, cqe_size
, &cqb
,
684 cq
->cqe_size
= cqe_size
;
685 cqb
->ctx
.cqe_sz_flags
= cqe_sz_to_mlx_sz(cqe_size
) << 5;
686 cqb
->ctx
.log_sz_usr_page
= cpu_to_be32((ilog2(entries
) << 24) | index
);
687 err
= mlx5_vector2eqn(dev
, vector
, &eqn
, &irqn
);
691 cqb
->ctx
.c_eqn
= cpu_to_be16(eqn
);
692 cqb
->ctx
.db_record_addr
= cpu_to_be64(cq
->db
.dma
);
694 err
= mlx5_core_create_cq(&dev
->mdev
, &cq
->mcq
, cqb
, inlen
);
698 mlx5_ib_dbg(dev
, "cqn 0x%x\n", cq
->mcq
.cqn
);
700 cq
->mcq
.comp
= mlx5_ib_cq_comp
;
701 cq
->mcq
.event
= mlx5_ib_cq_event
;
704 if (ib_copy_to_udata(udata
, &cq
->mcq
.cqn
, sizeof(__u32
))) {
714 mlx5_core_destroy_cq(&dev
->mdev
, &cq
->mcq
);
719 destroy_cq_user(cq
, context
);
721 destroy_cq_kernel(dev
, cq
);
730 int mlx5_ib_destroy_cq(struct ib_cq
*cq
)
732 struct mlx5_ib_dev
*dev
= to_mdev(cq
->device
);
733 struct mlx5_ib_cq
*mcq
= to_mcq(cq
);
734 struct ib_ucontext
*context
= NULL
;
737 context
= cq
->uobject
->context
;
739 mlx5_core_destroy_cq(&dev
->mdev
, &mcq
->mcq
);
741 destroy_cq_user(mcq
, context
);
743 destroy_cq_kernel(dev
, mcq
);
750 static int is_equal_rsn(struct mlx5_cqe64
*cqe64
, struct mlx5_ib_srq
*srq
,
756 lrsn
= be32_to_cpu(cqe64
->srqn
) & 0xffffff;
758 lrsn
= be32_to_cpu(cqe64
->sop_drop_qpn
) & 0xffffff;
763 void __mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 rsn
, struct mlx5_ib_srq
*srq
)
765 struct mlx5_cqe64
*cqe64
, *dest64
;
774 /* First we need to find the current producer index, so we
775 * know where to start cleaning from. It doesn't matter if HW
776 * adds new entries after this loop -- the QP we're worried
777 * about is already in RESET, so the new entries won't come
778 * from our QP and therefore don't need to be checked.
780 for (prod_index
= cq
->mcq
.cons_index
; get_sw_cqe(cq
, prod_index
); prod_index
++)
781 if (prod_index
== cq
->mcq
.cons_index
+ cq
->ibcq
.cqe
)
784 /* Now sweep backwards through the CQ, removing CQ entries
785 * that match our QP by copying older entries on top of them.
787 while ((int) --prod_index
- (int) cq
->mcq
.cons_index
>= 0) {
788 cqe
= get_cqe(cq
, prod_index
& cq
->ibcq
.cqe
);
789 cqe64
= (cq
->mcq
.cqe_sz
== 64) ? cqe
: cqe
+ 64;
790 if (is_equal_rsn(cqe64
, srq
, rsn
)) {
792 mlx5_ib_free_srq_wqe(srq
, be16_to_cpu(cqe64
->wqe_counter
));
795 dest
= get_cqe(cq
, (prod_index
+ nfreed
) & cq
->ibcq
.cqe
);
796 dest64
= (cq
->mcq
.cqe_sz
== 64) ? dest
: dest
+ 64;
797 owner_bit
= dest64
->op_own
& MLX5_CQE_OWNER_MASK
;
798 memcpy(dest
, cqe
, cq
->mcq
.cqe_sz
);
799 dest64
->op_own
= owner_bit
|
800 (dest64
->op_own
& ~MLX5_CQE_OWNER_MASK
);
805 cq
->mcq
.cons_index
+= nfreed
;
806 /* Make sure update of buffer contents is done before
807 * updating consumer index.
810 mlx5_cq_set_ci(&cq
->mcq
);
814 void mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 qpn
, struct mlx5_ib_srq
*srq
)
819 spin_lock_irq(&cq
->lock
);
820 __mlx5_ib_cq_clean(cq
, qpn
, srq
);
821 spin_unlock_irq(&cq
->lock
);
824 int mlx5_ib_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
)
829 int mlx5_ib_resize_cq(struct ib_cq
*ibcq
, int entries
, struct ib_udata
*udata
)
834 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev
*dev
, struct ib_cq
*ibcq
)
836 struct mlx5_ib_cq
*cq
;