2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kref.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <rdma/ib_cache.h>
41 static void mlx5_ib_cq_comp(struct mlx5_core_cq
*cq
, struct mlx5_eqe
*eqe
)
43 struct ib_cq
*ibcq
= &to_mibcq(cq
)->ibcq
;
45 ibcq
->comp_handler(ibcq
, ibcq
->cq_context
);
48 static void mlx5_ib_cq_event(struct mlx5_core_cq
*mcq
, enum mlx5_event type
)
50 struct mlx5_ib_cq
*cq
= container_of(mcq
, struct mlx5_ib_cq
, mcq
);
51 struct mlx5_ib_dev
*dev
= to_mdev(cq
->ibcq
.device
);
52 struct ib_cq
*ibcq
= &cq
->ibcq
;
53 struct ib_event event
;
55 if (type
!= MLX5_EVENT_TYPE_CQ_ERROR
) {
56 mlx5_ib_warn(dev
, "Unexpected event type %d on CQ %06x\n",
61 if (ibcq
->event_handler
) {
62 event
.device
= &dev
->ib_dev
;
63 event
.event
= IB_EVENT_CQ_ERR
;
64 event
.element
.cq
= ibcq
;
65 ibcq
->event_handler(&event
, ibcq
->cq_context
);
69 static void *get_cqe(struct mlx5_ib_cq
*cq
, int n
)
71 return mlx5_frag_buf_get_wqe(&cq
->buf
.fbc
, n
);
74 static u8
sw_ownership_bit(int n
, int nent
)
76 return (n
& nent
) ? 1 : 0;
79 static void *get_sw_cqe(struct mlx5_ib_cq
*cq
, int n
)
81 void *cqe
= get_cqe(cq
, n
& cq
->ibcq
.cqe
);
82 struct mlx5_cqe64
*cqe64
;
84 cqe64
= (cq
->mcq
.cqe_sz
== 64) ? cqe
: cqe
+ 64;
86 if (likely(get_cqe_opcode(cqe64
) != MLX5_CQE_INVALID
) &&
87 !((cqe64
->op_own
& MLX5_CQE_OWNER_MASK
) ^ !!(n
& (cq
->ibcq
.cqe
+ 1)))) {
94 static void *next_cqe_sw(struct mlx5_ib_cq
*cq
)
96 return get_sw_cqe(cq
, cq
->mcq
.cons_index
);
99 static enum ib_wc_opcode
get_umr_comp(struct mlx5_ib_wq
*wq
, int idx
)
101 switch (wq
->wr_data
[idx
]) {
105 case IB_WR_LOCAL_INV
:
106 return IB_WC_LOCAL_INV
;
112 pr_warn("unknown completion status\n");
117 static void handle_good_req(struct ib_wc
*wc
, struct mlx5_cqe64
*cqe
,
118 struct mlx5_ib_wq
*wq
, int idx
)
121 switch (be32_to_cpu(cqe
->sop_drop_qpn
) >> 24) {
122 case MLX5_OPCODE_RDMA_WRITE_IMM
:
123 wc
->wc_flags
|= IB_WC_WITH_IMM
;
125 case MLX5_OPCODE_RDMA_WRITE
:
126 wc
->opcode
= IB_WC_RDMA_WRITE
;
128 case MLX5_OPCODE_SEND_IMM
:
129 wc
->wc_flags
|= IB_WC_WITH_IMM
;
131 case MLX5_OPCODE_SEND
:
132 case MLX5_OPCODE_SEND_INVAL
:
133 wc
->opcode
= IB_WC_SEND
;
135 case MLX5_OPCODE_RDMA_READ
:
136 wc
->opcode
= IB_WC_RDMA_READ
;
137 wc
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
139 case MLX5_OPCODE_ATOMIC_CS
:
140 wc
->opcode
= IB_WC_COMP_SWAP
;
143 case MLX5_OPCODE_ATOMIC_FA
:
144 wc
->opcode
= IB_WC_FETCH_ADD
;
147 case MLX5_OPCODE_ATOMIC_MASKED_CS
:
148 wc
->opcode
= IB_WC_MASKED_COMP_SWAP
;
151 case MLX5_OPCODE_ATOMIC_MASKED_FA
:
152 wc
->opcode
= IB_WC_MASKED_FETCH_ADD
;
155 case MLX5_OPCODE_UMR
:
156 wc
->opcode
= get_umr_comp(wq
, idx
);
162 MLX5_GRH_IN_BUFFER
= 1,
166 static void handle_responder(struct ib_wc
*wc
, struct mlx5_cqe64
*cqe
,
167 struct mlx5_ib_qp
*qp
)
169 enum rdma_link_layer ll
= rdma_port_get_link_layer(qp
->ibqp
.device
, 1);
170 struct mlx5_ib_dev
*dev
= to_mdev(qp
->ibqp
.device
);
171 struct mlx5_ib_srq
*srq
= NULL
;
172 struct mlx5_ib_wq
*wq
;
178 if (qp
->ibqp
.srq
|| qp
->ibqp
.xrcd
) {
179 struct mlx5_core_srq
*msrq
= NULL
;
182 msrq
= mlx5_cmd_get_srq(dev
, be32_to_cpu(cqe
->srqn
));
184 srq
= to_mibsrq(msrq
);
186 srq
= to_msrq(qp
->ibqp
.srq
);
189 wqe_ctr
= be16_to_cpu(cqe
->wqe_counter
);
190 wc
->wr_id
= srq
->wrid
[wqe_ctr
];
191 mlx5_ib_free_srq_wqe(srq
, wqe_ctr
);
193 mlx5_core_res_put(&msrq
->common
);
197 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
200 wc
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
202 switch (get_cqe_opcode(cqe
)) {
203 case MLX5_CQE_RESP_WR_IMM
:
204 wc
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
205 wc
->wc_flags
= IB_WC_WITH_IMM
;
206 wc
->ex
.imm_data
= cqe
->immediate
;
208 case MLX5_CQE_RESP_SEND
:
209 wc
->opcode
= IB_WC_RECV
;
210 wc
->wc_flags
= IB_WC_IP_CSUM_OK
;
211 if (unlikely(!((cqe
->hds_ip_ext
& CQE_L3_OK
) &&
212 (cqe
->hds_ip_ext
& CQE_L4_OK
))))
215 case MLX5_CQE_RESP_SEND_IMM
:
216 wc
->opcode
= IB_WC_RECV
;
217 wc
->wc_flags
= IB_WC_WITH_IMM
;
218 wc
->ex
.imm_data
= cqe
->immediate
;
220 case MLX5_CQE_RESP_SEND_INV
:
221 wc
->opcode
= IB_WC_RECV
;
222 wc
->wc_flags
= IB_WC_WITH_INVALIDATE
;
223 wc
->ex
.invalidate_rkey
= be32_to_cpu(cqe
->inval_rkey
);
226 wc
->src_qp
= be32_to_cpu(cqe
->flags_rqpn
) & 0xffffff;
227 wc
->dlid_path_bits
= cqe
->ml_path
;
228 g
= (be32_to_cpu(cqe
->flags_rqpn
) >> 28) & 3;
229 wc
->wc_flags
|= g
? IB_WC_GRH
: 0;
230 if (unlikely(is_qp1(qp
->ibqp
.qp_type
))) {
231 u16 pkey
= be32_to_cpu(cqe
->pkey
) & 0xffff;
233 ib_find_cached_pkey(&dev
->ib_dev
, qp
->port
, pkey
,
239 if (ll
!= IB_LINK_LAYER_ETHERNET
) {
240 wc
->slid
= be16_to_cpu(cqe
->slid
);
241 wc
->sl
= (be32_to_cpu(cqe
->flags_rqpn
) >> 24) & 0xf;
246 vlan_present
= cqe
->l4_l3_hdr_type
& 0x1;
247 roce_packet_type
= (be32_to_cpu(cqe
->flags_rqpn
) >> 24) & 0x3;
249 wc
->vlan_id
= (be16_to_cpu(cqe
->vlan_info
)) & 0xfff;
250 wc
->sl
= (be16_to_cpu(cqe
->vlan_info
) >> 13) & 0x7;
251 wc
->wc_flags
|= IB_WC_WITH_VLAN
;
256 switch (roce_packet_type
) {
257 case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH
:
258 wc
->network_hdr_type
= RDMA_NETWORK_ROCE_V1
;
260 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6
:
261 wc
->network_hdr_type
= RDMA_NETWORK_IPV6
;
263 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4
:
264 wc
->network_hdr_type
= RDMA_NETWORK_IPV4
;
267 wc
->wc_flags
|= IB_WC_WITH_NETWORK_HDR_TYPE
;
270 static void dump_cqe(struct mlx5_ib_dev
*dev
, struct mlx5_err_cqe
*cqe
)
272 mlx5_ib_warn(dev
, "dump error cqe\n");
273 mlx5_dump_err_cqe(dev
->mdev
, cqe
);
276 static void mlx5_handle_error_cqe(struct mlx5_ib_dev
*dev
,
277 struct mlx5_err_cqe
*cqe
,
282 switch (cqe
->syndrome
) {
283 case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR
:
284 wc
->status
= IB_WC_LOC_LEN_ERR
;
286 case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR
:
287 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
289 case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR
:
290 wc
->status
= IB_WC_LOC_PROT_ERR
;
292 case MLX5_CQE_SYNDROME_WR_FLUSH_ERR
:
294 wc
->status
= IB_WC_WR_FLUSH_ERR
;
296 case MLX5_CQE_SYNDROME_MW_BIND_ERR
:
297 wc
->status
= IB_WC_MW_BIND_ERR
;
299 case MLX5_CQE_SYNDROME_BAD_RESP_ERR
:
300 wc
->status
= IB_WC_BAD_RESP_ERR
;
302 case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR
:
303 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
305 case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR
:
306 wc
->status
= IB_WC_REM_INV_REQ_ERR
;
308 case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR
:
309 wc
->status
= IB_WC_REM_ACCESS_ERR
;
311 case MLX5_CQE_SYNDROME_REMOTE_OP_ERR
:
312 wc
->status
= IB_WC_REM_OP_ERR
;
314 case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR
:
315 wc
->status
= IB_WC_RETRY_EXC_ERR
;
318 case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR
:
319 wc
->status
= IB_WC_RNR_RETRY_EXC_ERR
;
322 case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR
:
323 wc
->status
= IB_WC_REM_ABORT_ERR
;
326 wc
->status
= IB_WC_GENERAL_ERR
;
330 wc
->vendor_err
= cqe
->vendor_err_synd
;
335 static void handle_atomics(struct mlx5_ib_qp
*qp
, struct mlx5_cqe64
*cqe64
,
341 idx
= tail
& (qp
->sq
.wqe_cnt
- 1);
345 tail
= qp
->sq
.w_list
[idx
].next
;
347 tail
= qp
->sq
.w_list
[idx
].next
;
348 qp
->sq
.last_poll
= tail
;
351 static void free_cq_buf(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq_buf
*buf
)
353 mlx5_frag_buf_free(dev
->mdev
, &buf
->frag_buf
);
356 static void get_sig_err_item(struct mlx5_sig_err_cqe
*cqe
,
357 struct ib_sig_err
*item
)
359 u16 syndrome
= be16_to_cpu(cqe
->syndrome
);
361 #define GUARD_ERR (1 << 13)
362 #define APPTAG_ERR (1 << 12)
363 #define REFTAG_ERR (1 << 11)
365 if (syndrome
& GUARD_ERR
) {
366 item
->err_type
= IB_SIG_BAD_GUARD
;
367 item
->expected
= be32_to_cpu(cqe
->expected_trans_sig
) >> 16;
368 item
->actual
= be32_to_cpu(cqe
->actual_trans_sig
) >> 16;
370 if (syndrome
& REFTAG_ERR
) {
371 item
->err_type
= IB_SIG_BAD_REFTAG
;
372 item
->expected
= be32_to_cpu(cqe
->expected_reftag
);
373 item
->actual
= be32_to_cpu(cqe
->actual_reftag
);
375 if (syndrome
& APPTAG_ERR
) {
376 item
->err_type
= IB_SIG_BAD_APPTAG
;
377 item
->expected
= be32_to_cpu(cqe
->expected_trans_sig
) & 0xffff;
378 item
->actual
= be32_to_cpu(cqe
->actual_trans_sig
) & 0xffff;
380 pr_err("Got signature completion error with bad syndrome %04x\n",
384 item
->sig_err_offset
= be64_to_cpu(cqe
->err_offset
);
385 item
->key
= be32_to_cpu(cqe
->mkey
);
388 static void sw_comp(struct mlx5_ib_qp
*qp
, int num_entries
, struct ib_wc
*wc
,
389 int *npolled
, bool is_send
)
391 struct mlx5_ib_wq
*wq
;
396 wq
= (is_send
) ? &qp
->sq
: &qp
->rq
;
397 cur
= wq
->head
- wq
->tail
;
403 for (i
= 0; i
< cur
&& np
< num_entries
; i
++) {
406 idx
= (is_send
) ? wq
->last_poll
: wq
->tail
;
407 idx
&= (wq
->wqe_cnt
- 1);
408 wc
->wr_id
= wq
->wrid
[idx
];
409 wc
->status
= IB_WC_WR_FLUSH_ERR
;
410 wc
->vendor_err
= MLX5_CQE_SYNDROME_WR_FLUSH_ERR
;
413 wq
->last_poll
= wq
->w_list
[idx
].next
;
421 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq
*cq
, int num_entries
,
422 struct ib_wc
*wc
, int *npolled
)
424 struct mlx5_ib_qp
*qp
;
427 /* Find uncompleted WQEs belonging to that cq and return mmics ones */
428 list_for_each_entry(qp
, &cq
->list_send_qp
, cq_send_list
) {
429 sw_comp(qp
, num_entries
, wc
+ *npolled
, npolled
, true);
430 if (*npolled
>= num_entries
)
434 list_for_each_entry(qp
, &cq
->list_recv_qp
, cq_recv_list
) {
435 sw_comp(qp
, num_entries
, wc
+ *npolled
, npolled
, false);
436 if (*npolled
>= num_entries
)
441 static int mlx5_poll_one(struct mlx5_ib_cq
*cq
,
442 struct mlx5_ib_qp
**cur_qp
,
445 struct mlx5_ib_dev
*dev
= to_mdev(cq
->ibcq
.device
);
446 struct mlx5_err_cqe
*err_cqe
;
447 struct mlx5_cqe64
*cqe64
;
448 struct mlx5_core_qp
*mqp
;
449 struct mlx5_ib_wq
*wq
;
457 cqe
= next_cqe_sw(cq
);
461 cqe64
= (cq
->mcq
.cqe_sz
== 64) ? cqe
: cqe
+ 64;
463 ++cq
->mcq
.cons_index
;
465 /* Make sure we read CQ entry contents after we've checked the
470 opcode
= get_cqe_opcode(cqe64
);
471 if (unlikely(opcode
== MLX5_CQE_RESIZE_CQ
)) {
472 if (likely(cq
->resize_buf
)) {
473 free_cq_buf(dev
, &cq
->buf
);
474 cq
->buf
= *cq
->resize_buf
;
475 kfree(cq
->resize_buf
);
476 cq
->resize_buf
= NULL
;
479 mlx5_ib_warn(dev
, "unexpected resize cqe\n");
483 qpn
= ntohl(cqe64
->sop_drop_qpn
) & 0xffffff;
484 if (!*cur_qp
|| (qpn
!= (*cur_qp
)->ibqp
.qp_num
)) {
485 /* We do not have to take the QP table lock here,
486 * because CQs will be locked while QPs are removed
489 mqp
= radix_tree_lookup(&dev
->qp_table
.tree
, qpn
);
490 *cur_qp
= to_mibqp(mqp
);
493 wc
->qp
= &(*cur_qp
)->ibqp
;
497 wqe_ctr
= be16_to_cpu(cqe64
->wqe_counter
);
498 idx
= wqe_ctr
& (wq
->wqe_cnt
- 1);
499 handle_good_req(wc
, cqe64
, wq
, idx
);
500 handle_atomics(*cur_qp
, cqe64
, wq
->last_poll
, idx
);
501 wc
->wr_id
= wq
->wrid
[idx
];
502 wq
->tail
= wq
->wqe_head
[idx
] + 1;
503 wc
->status
= IB_WC_SUCCESS
;
505 case MLX5_CQE_RESP_WR_IMM
:
506 case MLX5_CQE_RESP_SEND
:
507 case MLX5_CQE_RESP_SEND_IMM
:
508 case MLX5_CQE_RESP_SEND_INV
:
509 handle_responder(wc
, cqe64
, *cur_qp
);
510 wc
->status
= IB_WC_SUCCESS
;
512 case MLX5_CQE_RESIZE_CQ
:
514 case MLX5_CQE_REQ_ERR
:
515 case MLX5_CQE_RESP_ERR
:
516 err_cqe
= (struct mlx5_err_cqe
*)cqe64
;
517 mlx5_handle_error_cqe(dev
, err_cqe
, wc
);
518 mlx5_ib_dbg(dev
, "%s error cqe on cqn 0x%x:\n",
519 opcode
== MLX5_CQE_REQ_ERR
?
520 "Requestor" : "Responder", cq
->mcq
.cqn
);
521 mlx5_ib_dbg(dev
, "syndrome 0x%x, vendor syndrome 0x%x\n",
522 err_cqe
->syndrome
, err_cqe
->vendor_err_synd
);
523 if (opcode
== MLX5_CQE_REQ_ERR
) {
525 wqe_ctr
= be16_to_cpu(cqe64
->wqe_counter
);
526 idx
= wqe_ctr
& (wq
->wqe_cnt
- 1);
527 wc
->wr_id
= wq
->wrid
[idx
];
528 wq
->tail
= wq
->wqe_head
[idx
] + 1;
530 struct mlx5_ib_srq
*srq
;
532 if ((*cur_qp
)->ibqp
.srq
) {
533 srq
= to_msrq((*cur_qp
)->ibqp
.srq
);
534 wqe_ctr
= be16_to_cpu(cqe64
->wqe_counter
);
535 wc
->wr_id
= srq
->wrid
[wqe_ctr
];
536 mlx5_ib_free_srq_wqe(srq
, wqe_ctr
);
539 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
544 case MLX5_CQE_SIG_ERR
: {
545 struct mlx5_sig_err_cqe
*sig_err_cqe
=
546 (struct mlx5_sig_err_cqe
*)cqe64
;
547 struct mlx5_core_sig_ctx
*sig
;
549 xa_lock(&dev
->sig_mrs
);
550 sig
= xa_load(&dev
->sig_mrs
,
551 mlx5_base_mkey(be32_to_cpu(sig_err_cqe
->mkey
)));
552 get_sig_err_item(sig_err_cqe
, &sig
->err_item
);
553 sig
->sig_err_exists
= true;
556 mlx5_ib_warn(dev
, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
557 cq
->mcq
.cqn
, sig
->err_item
.key
,
558 sig
->err_item
.err_type
,
559 sig
->err_item
.sig_err_offset
,
560 sig
->err_item
.expected
,
561 sig
->err_item
.actual
);
563 xa_unlock(&dev
->sig_mrs
);
571 static int poll_soft_wc(struct mlx5_ib_cq
*cq
, int num_entries
,
572 struct ib_wc
*wc
, bool is_fatal_err
)
574 struct mlx5_ib_dev
*dev
= to_mdev(cq
->ibcq
.device
);
575 struct mlx5_ib_wc
*soft_wc
, *next
;
578 list_for_each_entry_safe(soft_wc
, next
, &cq
->wc_list
, list
) {
579 if (npolled
>= num_entries
)
582 mlx5_ib_dbg(dev
, "polled software generated completion on CQ 0x%x\n",
585 if (unlikely(is_fatal_err
)) {
586 soft_wc
->wc
.status
= IB_WC_WR_FLUSH_ERR
;
587 soft_wc
->wc
.vendor_err
= MLX5_CQE_SYNDROME_WR_FLUSH_ERR
;
589 wc
[npolled
++] = soft_wc
->wc
;
590 list_del(&soft_wc
->list
);
597 int mlx5_ib_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
)
599 struct mlx5_ib_cq
*cq
= to_mcq(ibcq
);
600 struct mlx5_ib_qp
*cur_qp
= NULL
;
601 struct mlx5_ib_dev
*dev
= to_mdev(cq
->ibcq
.device
);
602 struct mlx5_core_dev
*mdev
= dev
->mdev
;
607 spin_lock_irqsave(&cq
->lock
, flags
);
608 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
609 /* make sure no soft wqe's are waiting */
610 if (unlikely(!list_empty(&cq
->wc_list
)))
611 soft_polled
= poll_soft_wc(cq
, num_entries
, wc
, true);
613 mlx5_ib_poll_sw_comp(cq
, num_entries
- soft_polled
,
614 wc
+ soft_polled
, &npolled
);
618 if (unlikely(!list_empty(&cq
->wc_list
)))
619 soft_polled
= poll_soft_wc(cq
, num_entries
, wc
, false);
621 for (npolled
= 0; npolled
< num_entries
- soft_polled
; npolled
++) {
622 if (mlx5_poll_one(cq
, &cur_qp
, wc
+ soft_polled
+ npolled
))
627 mlx5_cq_set_ci(&cq
->mcq
);
629 spin_unlock_irqrestore(&cq
->lock
, flags
);
631 return soft_polled
+ npolled
;
634 int mlx5_ib_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
636 struct mlx5_core_dev
*mdev
= to_mdev(ibcq
->device
)->mdev
;
637 struct mlx5_ib_cq
*cq
= to_mcq(ibcq
);
638 void __iomem
*uar_page
= mdev
->priv
.uar
->map
;
639 unsigned long irq_flags
;
642 spin_lock_irqsave(&cq
->lock
, irq_flags
);
643 if (cq
->notify_flags
!= IB_CQ_NEXT_COMP
)
644 cq
->notify_flags
= flags
& IB_CQ_SOLICITED_MASK
;
646 if ((flags
& IB_CQ_REPORT_MISSED_EVENTS
) && !list_empty(&cq
->wc_list
))
648 spin_unlock_irqrestore(&cq
->lock
, irq_flags
);
650 mlx5_cq_arm(&cq
->mcq
,
651 (flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
?
652 MLX5_CQ_DB_REQ_NOT_SOL
: MLX5_CQ_DB_REQ_NOT
,
653 uar_page
, to_mcq(ibcq
)->mcq
.cons_index
);
658 static int alloc_cq_frag_buf(struct mlx5_ib_dev
*dev
,
659 struct mlx5_ib_cq_buf
*buf
,
663 struct mlx5_frag_buf
*frag_buf
= &buf
->frag_buf
;
664 u8 log_wq_stride
= 6 + (cqe_size
== 128 ? 1 : 0);
665 u8 log_wq_sz
= ilog2(cqe_size
);
668 err
= mlx5_frag_buf_alloc_node(dev
->mdev
,
671 dev
->mdev
->priv
.numa_node
);
675 mlx5_init_fbc(frag_buf
->frags
, log_wq_stride
, log_wq_sz
, &buf
->fbc
);
677 buf
->cqe_size
= cqe_size
;
684 MLX5_CQE_RES_FORMAT_HASH
= 0,
685 MLX5_CQE_RES_FORMAT_CSUM
= 1,
686 MLX5_CQE_RES_FORMAT_CSUM_STRIDX
= 3,
689 static int mini_cqe_res_format_to_hw(struct mlx5_ib_dev
*dev
, u8 format
)
692 case MLX5_IB_CQE_RES_FORMAT_HASH
:
693 return MLX5_CQE_RES_FORMAT_HASH
;
694 case MLX5_IB_CQE_RES_FORMAT_CSUM
:
695 return MLX5_CQE_RES_FORMAT_CSUM
;
696 case MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX
:
697 if (MLX5_CAP_GEN(dev
->mdev
, mini_cqe_resp_stride_index
))
698 return MLX5_CQE_RES_FORMAT_CSUM_STRIDX
;
705 static int create_cq_user(struct mlx5_ib_dev
*dev
, struct ib_udata
*udata
,
706 struct mlx5_ib_cq
*cq
, int entries
, u32
**cqb
,
707 int *cqe_size
, int *index
, int *inlen
)
709 struct mlx5_ib_create_cq ucmd
= {};
710 unsigned long page_size
;
711 unsigned int page_offset_quantized
;
717 struct mlx5_ib_ucontext
*context
= rdma_udata_to_drv_context(
718 udata
, struct mlx5_ib_ucontext
, ibucontext
);
720 ucmdlen
= min(udata
->inlen
, sizeof(ucmd
));
721 if (ucmdlen
< offsetof(struct mlx5_ib_create_cq
, flags
))
724 if (ib_copy_from_udata(&ucmd
, udata
, ucmdlen
))
727 if ((ucmd
.flags
& ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD
|
728 MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX
)))
731 if ((ucmd
.cqe_size
!= 64 && ucmd
.cqe_size
!= 128) ||
732 ucmd
.reserved0
|| ucmd
.reserved1
)
735 *cqe_size
= ucmd
.cqe_size
;
738 ib_umem_get(&dev
->ib_dev
, ucmd
.buf_addr
,
739 entries
* ucmd
.cqe_size
, IB_ACCESS_LOCAL_WRITE
);
740 if (IS_ERR(cq
->buf
.umem
)) {
741 err
= PTR_ERR(cq
->buf
.umem
);
745 page_size
= mlx5_umem_find_best_cq_quantized_pgoff(
746 cq
->buf
.umem
, cqc
, log_page_size
, MLX5_ADAPTER_PAGE_SHIFT
,
747 page_offset
, 64, &page_offset_quantized
);
753 err
= mlx5_ib_db_map_user(context
, udata
, ucmd
.db_addr
, &cq
->db
);
757 ncont
= ib_umem_num_dma_blocks(cq
->buf
.umem
, page_size
);
760 "addr 0x%llx, size %u, npages %zu, page_size %lu, ncont %d\n",
761 ucmd
.buf_addr
, entries
* ucmd
.cqe_size
,
762 ib_umem_num_pages(cq
->buf
.umem
), page_size
, ncont
);
764 *inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
765 MLX5_FLD_SZ_BYTES(create_cq_in
, pas
[0]) * ncont
;
766 *cqb
= kvzalloc(*inlen
, GFP_KERNEL
);
772 pas
= (__be64
*)MLX5_ADDR_OF(create_cq_in
, *cqb
, pas
);
773 mlx5_ib_populate_pas(cq
->buf
.umem
, page_size
, pas
, 0);
775 cqc
= MLX5_ADDR_OF(create_cq_in
, *cqb
, cq_context
);
776 MLX5_SET(cqc
, cqc
, log_page_size
,
777 order_base_2(page_size
) - MLX5_ADAPTER_PAGE_SHIFT
);
778 MLX5_SET(cqc
, cqc
, page_offset
, page_offset_quantized
);
780 if (ucmd
.flags
& MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX
) {
781 *index
= ucmd
.uar_page_index
;
782 } else if (context
->bfregi
.lib_uar_dyn
) {
786 *index
= context
->bfregi
.sys_pages
[0];
789 if (ucmd
.cqe_comp_en
== 1) {
792 if (!((*cqe_size
== 128 &&
793 MLX5_CAP_GEN(dev
->mdev
, cqe_compression_128
)) ||
795 MLX5_CAP_GEN(dev
->mdev
, cqe_compression
)))) {
797 mlx5_ib_warn(dev
, "CQE compression is not supported for size %d!\n",
803 mini_cqe_res_format_to_hw(dev
,
804 ucmd
.cqe_comp_res_format
);
805 if (mini_cqe_format
< 0) {
806 err
= mini_cqe_format
;
807 mlx5_ib_dbg(dev
, "CQE compression res format %d error: %d\n",
808 ucmd
.cqe_comp_res_format
, err
);
812 MLX5_SET(cqc
, cqc
, cqe_comp_en
, 1);
813 MLX5_SET(cqc
, cqc
, mini_cqe_res_format
, mini_cqe_format
);
816 if (ucmd
.flags
& MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD
) {
817 if (*cqe_size
!= 128 ||
818 !MLX5_CAP_GEN(dev
->mdev
, cqe_128_always
)) {
821 "CQE padding is not supported for CQE size of %dB!\n",
826 cq
->private_flags
|= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD
;
829 MLX5_SET(create_cq_in
, *cqb
, uid
, context
->devx_uid
);
836 mlx5_ib_db_unmap_user(context
, &cq
->db
);
839 ib_umem_release(cq
->buf
.umem
);
843 static void destroy_cq_user(struct mlx5_ib_cq
*cq
, struct ib_udata
*udata
)
845 struct mlx5_ib_ucontext
*context
= rdma_udata_to_drv_context(
846 udata
, struct mlx5_ib_ucontext
, ibucontext
);
848 mlx5_ib_db_unmap_user(context
, &cq
->db
);
849 ib_umem_release(cq
->buf
.umem
);
852 static void init_cq_frag_buf(struct mlx5_ib_cq
*cq
,
853 struct mlx5_ib_cq_buf
*buf
)
857 struct mlx5_cqe64
*cqe64
;
859 for (i
= 0; i
< buf
->nent
; i
++) {
860 cqe
= get_cqe(cq
, i
);
861 cqe64
= buf
->cqe_size
== 64 ? cqe
: cqe
+ 64;
862 cqe64
->op_own
= MLX5_CQE_INVALID
<< 4;
866 static int create_cq_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq
*cq
,
867 int entries
, int cqe_size
,
868 u32
**cqb
, int *index
, int *inlen
)
874 err
= mlx5_db_alloc(dev
->mdev
, &cq
->db
);
878 cq
->mcq
.set_ci_db
= cq
->db
.db
;
879 cq
->mcq
.arm_db
= cq
->db
.db
+ 1;
880 cq
->mcq
.cqe_sz
= cqe_size
;
882 err
= alloc_cq_frag_buf(dev
, &cq
->buf
, entries
, cqe_size
);
886 init_cq_frag_buf(cq
, &cq
->buf
);
888 *inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
889 MLX5_FLD_SZ_BYTES(create_cq_in
, pas
[0]) *
890 cq
->buf
.frag_buf
.npages
;
891 *cqb
= kvzalloc(*inlen
, GFP_KERNEL
);
897 pas
= (__be64
*)MLX5_ADDR_OF(create_cq_in
, *cqb
, pas
);
898 mlx5_fill_page_frag_array(&cq
->buf
.frag_buf
, pas
);
900 cqc
= MLX5_ADDR_OF(create_cq_in
, *cqb
, cq_context
);
901 MLX5_SET(cqc
, cqc
, log_page_size
,
902 cq
->buf
.frag_buf
.page_shift
-
903 MLX5_ADAPTER_PAGE_SHIFT
);
905 *index
= dev
->mdev
->priv
.uar
->index
;
910 free_cq_buf(dev
, &cq
->buf
);
913 mlx5_db_free(dev
->mdev
, &cq
->db
);
917 static void destroy_cq_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq
*cq
)
919 free_cq_buf(dev
, &cq
->buf
);
920 mlx5_db_free(dev
->mdev
, &cq
->db
);
923 static void notify_soft_wc_handler(struct work_struct
*work
)
925 struct mlx5_ib_cq
*cq
= container_of(work
, struct mlx5_ib_cq
,
928 cq
->ibcq
.comp_handler(&cq
->ibcq
, cq
->ibcq
.cq_context
);
931 int mlx5_ib_create_cq(struct ib_cq
*ibcq
, const struct ib_cq_init_attr
*attr
,
932 struct ib_udata
*udata
)
934 struct ib_device
*ibdev
= ibcq
->device
;
935 int entries
= attr
->cqe
;
936 int vector
= attr
->comp_vector
;
937 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
938 struct mlx5_ib_cq
*cq
= to_mcq(ibcq
);
939 u32 out
[MLX5_ST_SZ_DW(create_cq_out
)];
950 (entries
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_cq_sz
))))
953 if (check_cq_create_flags(attr
->flags
))
956 entries
= roundup_pow_of_two(entries
+ 1);
957 if (entries
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_cq_sz
)))
960 cq
->ibcq
.cqe
= entries
- 1;
961 mutex_init(&cq
->resize_mutex
);
962 spin_lock_init(&cq
->lock
);
963 cq
->resize_buf
= NULL
;
964 cq
->resize_umem
= NULL
;
965 cq
->create_flags
= attr
->flags
;
966 INIT_LIST_HEAD(&cq
->list_send_qp
);
967 INIT_LIST_HEAD(&cq
->list_recv_qp
);
970 err
= create_cq_user(dev
, udata
, cq
, entries
, &cqb
, &cqe_size
,
975 cqe_size
= cache_line_size() == 128 ? 128 : 64;
976 err
= create_cq_kernel(dev
, cq
, entries
, cqe_size
, &cqb
,
981 INIT_WORK(&cq
->notify_work
, notify_soft_wc_handler
);
984 err
= mlx5_vector2eqn(dev
->mdev
, vector
, &eqn
, &irqn
);
988 cq
->cqe_size
= cqe_size
;
990 cqc
= MLX5_ADDR_OF(create_cq_in
, cqb
, cq_context
);
991 MLX5_SET(cqc
, cqc
, cqe_sz
,
992 cqe_sz_to_mlx_sz(cqe_size
,
994 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD
));
995 MLX5_SET(cqc
, cqc
, log_cq_size
, ilog2(entries
));
996 MLX5_SET(cqc
, cqc
, uar_page
, index
);
997 MLX5_SET(cqc
, cqc
, c_eqn
, eqn
);
998 MLX5_SET64(cqc
, cqc
, dbr_addr
, cq
->db
.dma
);
999 if (cq
->create_flags
& IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN
)
1000 MLX5_SET(cqc
, cqc
, oi
, 1);
1002 err
= mlx5_core_create_cq(dev
->mdev
, &cq
->mcq
, cqb
, inlen
, out
, sizeof(out
));
1006 mlx5_ib_dbg(dev
, "cqn 0x%x\n", cq
->mcq
.cqn
);
1007 cq
->mcq
.irqn
= irqn
;
1009 cq
->mcq
.tasklet_ctx
.comp
= mlx5_ib_cq_comp
;
1011 cq
->mcq
.comp
= mlx5_ib_cq_comp
;
1012 cq
->mcq
.event
= mlx5_ib_cq_event
;
1014 INIT_LIST_HEAD(&cq
->wc_list
);
1017 if (ib_copy_to_udata(udata
, &cq
->mcq
.cqn
, sizeof(__u32
))) {
1027 mlx5_core_destroy_cq(dev
->mdev
, &cq
->mcq
);
1032 destroy_cq_user(cq
, udata
);
1034 destroy_cq_kernel(dev
, cq
);
1038 int mlx5_ib_destroy_cq(struct ib_cq
*cq
, struct ib_udata
*udata
)
1040 struct mlx5_ib_dev
*dev
= to_mdev(cq
->device
);
1041 struct mlx5_ib_cq
*mcq
= to_mcq(cq
);
1044 ret
= mlx5_core_destroy_cq(dev
->mdev
, &mcq
->mcq
);
1049 destroy_cq_user(mcq
, udata
);
1051 destroy_cq_kernel(dev
, mcq
);
1055 static int is_equal_rsn(struct mlx5_cqe64
*cqe64
, u32 rsn
)
1057 return rsn
== (ntohl(cqe64
->sop_drop_qpn
) & 0xffffff);
1060 void __mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 rsn
, struct mlx5_ib_srq
*srq
)
1062 struct mlx5_cqe64
*cqe64
, *dest64
;
1071 /* First we need to find the current producer index, so we
1072 * know where to start cleaning from. It doesn't matter if HW
1073 * adds new entries after this loop -- the QP we're worried
1074 * about is already in RESET, so the new entries won't come
1075 * from our QP and therefore don't need to be checked.
1077 for (prod_index
= cq
->mcq
.cons_index
; get_sw_cqe(cq
, prod_index
); prod_index
++)
1078 if (prod_index
== cq
->mcq
.cons_index
+ cq
->ibcq
.cqe
)
1081 /* Now sweep backwards through the CQ, removing CQ entries
1082 * that match our QP by copying older entries on top of them.
1084 while ((int) --prod_index
- (int) cq
->mcq
.cons_index
>= 0) {
1085 cqe
= get_cqe(cq
, prod_index
& cq
->ibcq
.cqe
);
1086 cqe64
= (cq
->mcq
.cqe_sz
== 64) ? cqe
: cqe
+ 64;
1087 if (is_equal_rsn(cqe64
, rsn
)) {
1088 if (srq
&& (ntohl(cqe64
->srqn
) & 0xffffff))
1089 mlx5_ib_free_srq_wqe(srq
, be16_to_cpu(cqe64
->wqe_counter
));
1091 } else if (nfreed
) {
1092 dest
= get_cqe(cq
, (prod_index
+ nfreed
) & cq
->ibcq
.cqe
);
1093 dest64
= (cq
->mcq
.cqe_sz
== 64) ? dest
: dest
+ 64;
1094 owner_bit
= dest64
->op_own
& MLX5_CQE_OWNER_MASK
;
1095 memcpy(dest
, cqe
, cq
->mcq
.cqe_sz
);
1096 dest64
->op_own
= owner_bit
|
1097 (dest64
->op_own
& ~MLX5_CQE_OWNER_MASK
);
1102 cq
->mcq
.cons_index
+= nfreed
;
1103 /* Make sure update of buffer contents is done before
1104 * updating consumer index.
1107 mlx5_cq_set_ci(&cq
->mcq
);
1111 void mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 qpn
, struct mlx5_ib_srq
*srq
)
1116 spin_lock_irq(&cq
->lock
);
1117 __mlx5_ib_cq_clean(cq
, qpn
, srq
);
1118 spin_unlock_irq(&cq
->lock
);
1121 int mlx5_ib_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
)
1123 struct mlx5_ib_dev
*dev
= to_mdev(cq
->device
);
1124 struct mlx5_ib_cq
*mcq
= to_mcq(cq
);
1127 if (!MLX5_CAP_GEN(dev
->mdev
, cq_moderation
))
1130 if (cq_period
> MLX5_MAX_CQ_PERIOD
)
1133 err
= mlx5_core_modify_cq_moderation(dev
->mdev
, &mcq
->mcq
,
1134 cq_period
, cq_count
);
1136 mlx5_ib_warn(dev
, "modify cq 0x%x failed\n", mcq
->mcq
.cqn
);
1141 static int resize_user(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq
*cq
,
1142 int entries
, struct ib_udata
*udata
,
1145 struct mlx5_ib_resize_cq ucmd
;
1146 struct ib_umem
*umem
;
1149 err
= ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
));
1153 if (ucmd
.reserved0
|| ucmd
.reserved1
)
1156 /* check multiplication overflow */
1157 if (ucmd
.cqe_size
&& SIZE_MAX
/ ucmd
.cqe_size
<= entries
- 1)
1160 umem
= ib_umem_get(&dev
->ib_dev
, ucmd
.buf_addr
,
1161 (size_t)ucmd
.cqe_size
* entries
,
1162 IB_ACCESS_LOCAL_WRITE
);
1164 err
= PTR_ERR(umem
);
1168 cq
->resize_umem
= umem
;
1169 *cqe_size
= ucmd
.cqe_size
;
1174 static int resize_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq
*cq
,
1175 int entries
, int cqe_size
)
1179 cq
->resize_buf
= kzalloc(sizeof(*cq
->resize_buf
), GFP_KERNEL
);
1180 if (!cq
->resize_buf
)
1183 err
= alloc_cq_frag_buf(dev
, cq
->resize_buf
, entries
, cqe_size
);
1187 init_cq_frag_buf(cq
, cq
->resize_buf
);
1192 kfree(cq
->resize_buf
);
1196 static int copy_resize_cqes(struct mlx5_ib_cq
*cq
)
1198 struct mlx5_ib_dev
*dev
= to_mdev(cq
->ibcq
.device
);
1199 struct mlx5_cqe64
*scqe64
;
1200 struct mlx5_cqe64
*dcqe64
;
1209 ssize
= cq
->buf
.cqe_size
;
1210 dsize
= cq
->resize_buf
->cqe_size
;
1211 if (ssize
!= dsize
) {
1212 mlx5_ib_warn(dev
, "resize from different cqe size is not supported\n");
1216 i
= cq
->mcq
.cons_index
;
1217 scqe
= get_sw_cqe(cq
, i
);
1218 scqe64
= ssize
== 64 ? scqe
: scqe
+ 64;
1221 mlx5_ib_warn(dev
, "expected cqe in sw ownership\n");
1225 while (get_cqe_opcode(scqe64
) != MLX5_CQE_RESIZE_CQ
) {
1226 dcqe
= mlx5_frag_buf_get_wqe(&cq
->resize_buf
->fbc
,
1227 (i
+ 1) & cq
->resize_buf
->nent
);
1228 dcqe64
= dsize
== 64 ? dcqe
: dcqe
+ 64;
1229 sw_own
= sw_ownership_bit(i
+ 1, cq
->resize_buf
->nent
);
1230 memcpy(dcqe
, scqe
, dsize
);
1231 dcqe64
->op_own
= (dcqe64
->op_own
& ~MLX5_CQE_OWNER_MASK
) | sw_own
;
1234 scqe
= get_sw_cqe(cq
, i
);
1235 scqe64
= ssize
== 64 ? scqe
: scqe
+ 64;
1237 mlx5_ib_warn(dev
, "expected cqe in sw ownership\n");
1241 if (scqe
== start_cqe
) {
1242 pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
1247 ++cq
->mcq
.cons_index
;
1251 int mlx5_ib_resize_cq(struct ib_cq
*ibcq
, int entries
, struct ib_udata
*udata
)
1253 struct mlx5_ib_dev
*dev
= to_mdev(ibcq
->device
);
1254 struct mlx5_ib_cq
*cq
= to_mcq(ibcq
);
1260 unsigned int page_offset_quantized
= 0;
1261 unsigned int page_shift
;
1264 unsigned long flags
;
1266 if (!MLX5_CAP_GEN(dev
->mdev
, cq_resize
)) {
1267 pr_info("Firmware does not support resize CQ\n");
1272 entries
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_cq_sz
))) {
1273 mlx5_ib_warn(dev
, "wrong entries number %d, max %d\n",
1275 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_cq_sz
));
1279 entries
= roundup_pow_of_two(entries
+ 1);
1280 if (entries
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_cq_sz
)) + 1)
1283 if (entries
== ibcq
->cqe
+ 1)
1286 mutex_lock(&cq
->resize_mutex
);
1288 unsigned long page_size
;
1290 err
= resize_user(dev
, cq
, entries
, udata
, &cqe_size
);
1294 page_size
= mlx5_umem_find_best_cq_quantized_pgoff(
1295 cq
->resize_umem
, cqc
, log_page_size
,
1296 MLX5_ADAPTER_PAGE_SHIFT
, page_offset
, 64,
1297 &page_offset_quantized
);
1302 npas
= ib_umem_num_dma_blocks(cq
->resize_umem
, page_size
);
1303 page_shift
= order_base_2(page_size
);
1305 struct mlx5_frag_buf
*frag_buf
;
1308 err
= resize_kernel(dev
, cq
, entries
, cqe_size
);
1311 frag_buf
= &cq
->resize_buf
->frag_buf
;
1312 npas
= frag_buf
->npages
;
1313 page_shift
= frag_buf
->page_shift
;
1316 inlen
= MLX5_ST_SZ_BYTES(modify_cq_in
) +
1317 MLX5_FLD_SZ_BYTES(modify_cq_in
, pas
[0]) * npas
;
1319 in
= kvzalloc(inlen
, GFP_KERNEL
);
1325 pas
= (__be64
*)MLX5_ADDR_OF(modify_cq_in
, in
, pas
);
1327 mlx5_ib_populate_pas(cq
->resize_umem
, 1UL << page_shift
, pas
,
1330 mlx5_fill_page_frag_array(&cq
->resize_buf
->frag_buf
, pas
);
1332 MLX5_SET(modify_cq_in
, in
,
1333 modify_field_select_resize_field_select
.resize_field_select
.resize_field_select
,
1334 MLX5_MODIFY_CQ_MASK_LOG_SIZE
|
1335 MLX5_MODIFY_CQ_MASK_PG_OFFSET
|
1336 MLX5_MODIFY_CQ_MASK_PG_SIZE
);
1338 cqc
= MLX5_ADDR_OF(modify_cq_in
, in
, cq_context
);
1340 MLX5_SET(cqc
, cqc
, log_page_size
,
1341 page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
1342 MLX5_SET(cqc
, cqc
, page_offset
, page_offset_quantized
);
1343 MLX5_SET(cqc
, cqc
, cqe_sz
,
1344 cqe_sz_to_mlx_sz(cqe_size
,
1346 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD
));
1347 MLX5_SET(cqc
, cqc
, log_cq_size
, ilog2(entries
));
1349 MLX5_SET(modify_cq_in
, in
, op_mod
, MLX5_CQ_OPMOD_RESIZE
);
1350 MLX5_SET(modify_cq_in
, in
, cqn
, cq
->mcq
.cqn
);
1352 err
= mlx5_core_modify_cq(dev
->mdev
, &cq
->mcq
, in
, inlen
);
1357 cq
->ibcq
.cqe
= entries
- 1;
1358 ib_umem_release(cq
->buf
.umem
);
1359 cq
->buf
.umem
= cq
->resize_umem
;
1360 cq
->resize_umem
= NULL
;
1362 struct mlx5_ib_cq_buf tbuf
;
1365 spin_lock_irqsave(&cq
->lock
, flags
);
1366 if (cq
->resize_buf
) {
1367 err
= copy_resize_cqes(cq
);
1370 cq
->buf
= *cq
->resize_buf
;
1371 kfree(cq
->resize_buf
);
1372 cq
->resize_buf
= NULL
;
1376 cq
->ibcq
.cqe
= entries
- 1;
1377 spin_unlock_irqrestore(&cq
->lock
, flags
);
1379 free_cq_buf(dev
, &tbuf
);
1381 mutex_unlock(&cq
->resize_mutex
);
1390 ib_umem_release(cq
->resize_umem
);
1392 free_cq_buf(dev
, cq
->resize_buf
);
1393 cq
->resize_buf
= NULL
;
1396 mutex_unlock(&cq
->resize_mutex
);
1400 int mlx5_ib_get_cqe_size(struct ib_cq
*ibcq
)
1402 struct mlx5_ib_cq
*cq
;
1408 return cq
->cqe_size
;
1411 /* Called from atomic context */
1412 int mlx5_ib_generate_wc(struct ib_cq
*ibcq
, struct ib_wc
*wc
)
1414 struct mlx5_ib_wc
*soft_wc
;
1415 struct mlx5_ib_cq
*cq
= to_mcq(ibcq
);
1416 unsigned long flags
;
1418 soft_wc
= kmalloc(sizeof(*soft_wc
), GFP_ATOMIC
);
1423 spin_lock_irqsave(&cq
->lock
, flags
);
1424 list_add_tail(&soft_wc
->list
, &cq
->wc_list
);
1425 if (cq
->notify_flags
== IB_CQ_NEXT_COMP
||
1426 wc
->status
!= IB_WC_SUCCESS
) {
1427 cq
->notify_flags
= 0;
1428 schedule_work(&cq
->notify_work
);
1430 spin_unlock_irqrestore(&cq
->lock
, flags
);