2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kref.h>
34 #include <rdma/ib_umem.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <rdma/ib_cache.h>
39 static void mlx5_ib_cq_comp(struct mlx5_core_cq
*cq
)
41 struct ib_cq
*ibcq
= &to_mibcq(cq
)->ibcq
;
43 ibcq
->comp_handler(ibcq
, ibcq
->cq_context
);
46 static void mlx5_ib_cq_event(struct mlx5_core_cq
*mcq
, enum mlx5_event type
)
48 struct mlx5_ib_cq
*cq
= container_of(mcq
, struct mlx5_ib_cq
, mcq
);
49 struct mlx5_ib_dev
*dev
= to_mdev(cq
->ibcq
.device
);
50 struct ib_cq
*ibcq
= &cq
->ibcq
;
51 struct ib_event event
;
53 if (type
!= MLX5_EVENT_TYPE_CQ_ERROR
) {
54 mlx5_ib_warn(dev
, "Unexpected event type %d on CQ %06x\n",
59 if (ibcq
->event_handler
) {
60 event
.device
= &dev
->ib_dev
;
61 event
.event
= IB_EVENT_CQ_ERR
;
62 event
.element
.cq
= ibcq
;
63 ibcq
->event_handler(&event
, ibcq
->cq_context
);
67 static void *get_cqe_from_buf(struct mlx5_ib_cq_buf
*buf
, int n
, int size
)
69 return mlx5_buf_offset(&buf
->buf
, n
* size
);
72 static void *get_cqe(struct mlx5_ib_cq
*cq
, int n
)
74 return get_cqe_from_buf(&cq
->buf
, n
, cq
->mcq
.cqe_sz
);
77 static u8
sw_ownership_bit(int n
, int nent
)
79 return (n
& nent
) ? 1 : 0;
82 static void *get_sw_cqe(struct mlx5_ib_cq
*cq
, int n
)
84 void *cqe
= get_cqe(cq
, n
& cq
->ibcq
.cqe
);
85 struct mlx5_cqe64
*cqe64
;
87 cqe64
= (cq
->mcq
.cqe_sz
== 64) ? cqe
: cqe
+ 64;
89 if (likely((cqe64
->op_own
) >> 4 != MLX5_CQE_INVALID
) &&
90 !((cqe64
->op_own
& MLX5_CQE_OWNER_MASK
) ^ !!(n
& (cq
->ibcq
.cqe
+ 1)))) {
97 static void *next_cqe_sw(struct mlx5_ib_cq
*cq
)
99 return get_sw_cqe(cq
, cq
->mcq
.cons_index
);
102 static enum ib_wc_opcode
get_umr_comp(struct mlx5_ib_wq
*wq
, int idx
)
104 switch (wq
->wr_data
[idx
]) {
108 case IB_WR_LOCAL_INV
:
109 return IB_WC_LOCAL_INV
;
115 pr_warn("unknown completion status\n");
120 static void handle_good_req(struct ib_wc
*wc
, struct mlx5_cqe64
*cqe
,
121 struct mlx5_ib_wq
*wq
, int idx
)
124 switch (be32_to_cpu(cqe
->sop_drop_qpn
) >> 24) {
125 case MLX5_OPCODE_RDMA_WRITE_IMM
:
126 wc
->wc_flags
|= IB_WC_WITH_IMM
;
128 case MLX5_OPCODE_RDMA_WRITE
:
129 wc
->opcode
= IB_WC_RDMA_WRITE
;
131 case MLX5_OPCODE_SEND_IMM
:
132 wc
->wc_flags
|= IB_WC_WITH_IMM
;
134 case MLX5_OPCODE_SEND
:
135 case MLX5_OPCODE_SEND_INVAL
:
136 wc
->opcode
= IB_WC_SEND
;
138 case MLX5_OPCODE_RDMA_READ
:
139 wc
->opcode
= IB_WC_RDMA_READ
;
140 wc
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
142 case MLX5_OPCODE_ATOMIC_CS
:
143 wc
->opcode
= IB_WC_COMP_SWAP
;
146 case MLX5_OPCODE_ATOMIC_FA
:
147 wc
->opcode
= IB_WC_FETCH_ADD
;
150 case MLX5_OPCODE_ATOMIC_MASKED_CS
:
151 wc
->opcode
= IB_WC_MASKED_COMP_SWAP
;
154 case MLX5_OPCODE_ATOMIC_MASKED_FA
:
155 wc
->opcode
= IB_WC_MASKED_FETCH_ADD
;
158 case MLX5_OPCODE_UMR
:
159 wc
->opcode
= get_umr_comp(wq
, idx
);
165 MLX5_GRH_IN_BUFFER
= 1,
169 static void handle_responder(struct ib_wc
*wc
, struct mlx5_cqe64
*cqe
,
170 struct mlx5_ib_qp
*qp
)
172 enum rdma_link_layer ll
= rdma_port_get_link_layer(qp
->ibqp
.device
, 1);
173 struct mlx5_ib_dev
*dev
= to_mdev(qp
->ibqp
.device
);
174 struct mlx5_ib_srq
*srq
;
175 struct mlx5_ib_wq
*wq
;
181 if (qp
->ibqp
.srq
|| qp
->ibqp
.xrcd
) {
182 struct mlx5_core_srq
*msrq
= NULL
;
185 msrq
= mlx5_core_get_srq(dev
->mdev
,
186 be32_to_cpu(cqe
->srqn
));
187 srq
= to_mibsrq(msrq
);
189 srq
= to_msrq(qp
->ibqp
.srq
);
192 wqe_ctr
= be16_to_cpu(cqe
->wqe_counter
);
193 wc
->wr_id
= srq
->wrid
[wqe_ctr
];
194 mlx5_ib_free_srq_wqe(srq
, wqe_ctr
);
195 if (msrq
&& atomic_dec_and_test(&msrq
->refcount
))
196 complete(&msrq
->free
);
200 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
203 wc
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
205 switch (cqe
->op_own
>> 4) {
206 case MLX5_CQE_RESP_WR_IMM
:
207 wc
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
208 wc
->wc_flags
= IB_WC_WITH_IMM
;
209 wc
->ex
.imm_data
= cqe
->imm_inval_pkey
;
211 case MLX5_CQE_RESP_SEND
:
212 wc
->opcode
= IB_WC_RECV
;
213 wc
->wc_flags
= IB_WC_IP_CSUM_OK
;
214 if (unlikely(!((cqe
->hds_ip_ext
& CQE_L3_OK
) &&
215 (cqe
->hds_ip_ext
& CQE_L4_OK
))))
218 case MLX5_CQE_RESP_SEND_IMM
:
219 wc
->opcode
= IB_WC_RECV
;
220 wc
->wc_flags
= IB_WC_WITH_IMM
;
221 wc
->ex
.imm_data
= cqe
->imm_inval_pkey
;
223 case MLX5_CQE_RESP_SEND_INV
:
224 wc
->opcode
= IB_WC_RECV
;
225 wc
->wc_flags
= IB_WC_WITH_INVALIDATE
;
226 wc
->ex
.invalidate_rkey
= be32_to_cpu(cqe
->imm_inval_pkey
);
229 wc
->slid
= be16_to_cpu(cqe
->slid
);
230 wc
->src_qp
= be32_to_cpu(cqe
->flags_rqpn
) & 0xffffff;
231 wc
->dlid_path_bits
= cqe
->ml_path
;
232 g
= (be32_to_cpu(cqe
->flags_rqpn
) >> 28) & 3;
233 wc
->wc_flags
|= g
? IB_WC_GRH
: 0;
234 if (unlikely(is_qp1(qp
->ibqp
.qp_type
))) {
235 u16 pkey
= be32_to_cpu(cqe
->imm_inval_pkey
) & 0xffff;
237 ib_find_cached_pkey(&dev
->ib_dev
, qp
->port
, pkey
,
243 if (ll
!= IB_LINK_LAYER_ETHERNET
) {
244 wc
->sl
= (be32_to_cpu(cqe
->flags_rqpn
) >> 24) & 0xf;
248 vlan_present
= cqe
->l4_l3_hdr_type
& 0x1;
249 roce_packet_type
= (be32_to_cpu(cqe
->flags_rqpn
) >> 24) & 0x3;
251 wc
->vlan_id
= (be16_to_cpu(cqe
->vlan_info
)) & 0xfff;
252 wc
->sl
= (be16_to_cpu(cqe
->vlan_info
) >> 13) & 0x7;
253 wc
->wc_flags
|= IB_WC_WITH_VLAN
;
258 switch (roce_packet_type
) {
259 case MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH
:
260 wc
->network_hdr_type
= RDMA_NETWORK_IB
;
262 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6
:
263 wc
->network_hdr_type
= RDMA_NETWORK_IPV6
;
265 case MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4
:
266 wc
->network_hdr_type
= RDMA_NETWORK_IPV4
;
269 wc
->wc_flags
|= IB_WC_WITH_NETWORK_HDR_TYPE
;
272 static void dump_cqe(struct mlx5_ib_dev
*dev
, struct mlx5_err_cqe
*cqe
)
274 __be32
*p
= (__be32
*)cqe
;
277 mlx5_ib_warn(dev
, "dump error cqe\n");
278 for (i
= 0; i
< sizeof(*cqe
) / 16; i
++, p
+= 4)
279 pr_info("%08x %08x %08x %08x\n", be32_to_cpu(p
[0]),
280 be32_to_cpu(p
[1]), be32_to_cpu(p
[2]),
284 static void mlx5_handle_error_cqe(struct mlx5_ib_dev
*dev
,
285 struct mlx5_err_cqe
*cqe
,
290 switch (cqe
->syndrome
) {
291 case MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR
:
292 wc
->status
= IB_WC_LOC_LEN_ERR
;
294 case MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR
:
295 wc
->status
= IB_WC_LOC_QP_OP_ERR
;
297 case MLX5_CQE_SYNDROME_LOCAL_PROT_ERR
:
298 wc
->status
= IB_WC_LOC_PROT_ERR
;
300 case MLX5_CQE_SYNDROME_WR_FLUSH_ERR
:
302 wc
->status
= IB_WC_WR_FLUSH_ERR
;
304 case MLX5_CQE_SYNDROME_MW_BIND_ERR
:
305 wc
->status
= IB_WC_MW_BIND_ERR
;
307 case MLX5_CQE_SYNDROME_BAD_RESP_ERR
:
308 wc
->status
= IB_WC_BAD_RESP_ERR
;
310 case MLX5_CQE_SYNDROME_LOCAL_ACCESS_ERR
:
311 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
313 case MLX5_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR
:
314 wc
->status
= IB_WC_REM_INV_REQ_ERR
;
316 case MLX5_CQE_SYNDROME_REMOTE_ACCESS_ERR
:
317 wc
->status
= IB_WC_REM_ACCESS_ERR
;
319 case MLX5_CQE_SYNDROME_REMOTE_OP_ERR
:
320 wc
->status
= IB_WC_REM_OP_ERR
;
322 case MLX5_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR
:
323 wc
->status
= IB_WC_RETRY_EXC_ERR
;
326 case MLX5_CQE_SYNDROME_RNR_RETRY_EXC_ERR
:
327 wc
->status
= IB_WC_RNR_RETRY_EXC_ERR
;
330 case MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR
:
331 wc
->status
= IB_WC_REM_ABORT_ERR
;
334 wc
->status
= IB_WC_GENERAL_ERR
;
338 wc
->vendor_err
= cqe
->vendor_err_synd
;
343 static int is_atomic_response(struct mlx5_ib_qp
*qp
, uint16_t idx
)
345 /* TBD: waiting decision
350 static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp
*qp
, uint16_t idx
)
352 struct mlx5_wqe_data_seg
*dpseg
;
355 dpseg
= mlx5_get_send_wqe(qp
, idx
) + sizeof(struct mlx5_wqe_ctrl_seg
) +
356 sizeof(struct mlx5_wqe_raddr_seg
) +
357 sizeof(struct mlx5_wqe_atomic_seg
);
358 addr
= (void *)(unsigned long)be64_to_cpu(dpseg
->addr
);
362 static void handle_atomic(struct mlx5_ib_qp
*qp
, struct mlx5_cqe64
*cqe64
,
369 if (!is_atomic_response(qp
, idx
))
372 byte_count
= be32_to_cpu(cqe64
->byte_cnt
);
373 addr
= mlx5_get_atomic_laddr(qp
, idx
);
375 if (byte_count
== 4) {
376 *(uint32_t *)addr
= be32_to_cpu(*((__be32
*)addr
));
378 for (i
= 0; i
< byte_count
; i
+= 8) {
379 *(uint64_t *)addr
= be64_to_cpu(*((__be64
*)addr
));
387 static void handle_atomics(struct mlx5_ib_qp
*qp
, struct mlx5_cqe64
*cqe64
,
393 idx
= tail
& (qp
->sq
.wqe_cnt
- 1);
394 handle_atomic(qp
, cqe64
, idx
);
398 tail
= qp
->sq
.w_list
[idx
].next
;
400 tail
= qp
->sq
.w_list
[idx
].next
;
401 qp
->sq
.last_poll
= tail
;
404 static void free_cq_buf(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq_buf
*buf
)
406 mlx5_buf_free(dev
->mdev
, &buf
->buf
);
409 static void get_sig_err_item(struct mlx5_sig_err_cqe
*cqe
,
410 struct ib_sig_err
*item
)
412 u16 syndrome
= be16_to_cpu(cqe
->syndrome
);
414 #define GUARD_ERR (1 << 13)
415 #define APPTAG_ERR (1 << 12)
416 #define REFTAG_ERR (1 << 11)
418 if (syndrome
& GUARD_ERR
) {
419 item
->err_type
= IB_SIG_BAD_GUARD
;
420 item
->expected
= be32_to_cpu(cqe
->expected_trans_sig
) >> 16;
421 item
->actual
= be32_to_cpu(cqe
->actual_trans_sig
) >> 16;
423 if (syndrome
& REFTAG_ERR
) {
424 item
->err_type
= IB_SIG_BAD_REFTAG
;
425 item
->expected
= be32_to_cpu(cqe
->expected_reftag
);
426 item
->actual
= be32_to_cpu(cqe
->actual_reftag
);
428 if (syndrome
& APPTAG_ERR
) {
429 item
->err_type
= IB_SIG_BAD_APPTAG
;
430 item
->expected
= be32_to_cpu(cqe
->expected_trans_sig
) & 0xffff;
431 item
->actual
= be32_to_cpu(cqe
->actual_trans_sig
) & 0xffff;
433 pr_err("Got signature completion error with bad syndrome %04x\n",
437 item
->sig_err_offset
= be64_to_cpu(cqe
->err_offset
);
438 item
->key
= be32_to_cpu(cqe
->mkey
);
441 static void sw_send_comp(struct mlx5_ib_qp
*qp
, int num_entries
,
442 struct ib_wc
*wc
, int *npolled
)
444 struct mlx5_ib_wq
*wq
;
451 cur
= wq
->head
- wq
->tail
;
457 for (i
= 0; i
< cur
&& np
< num_entries
; i
++) {
458 idx
= wq
->last_poll
& (wq
->wqe_cnt
- 1);
459 wc
->wr_id
= wq
->wrid
[idx
];
460 wc
->status
= IB_WC_WR_FLUSH_ERR
;
461 wc
->vendor_err
= MLX5_CQE_SYNDROME_WR_FLUSH_ERR
;
466 wq
->last_poll
= wq
->w_list
[idx
].next
;
471 static void sw_recv_comp(struct mlx5_ib_qp
*qp
, int num_entries
,
472 struct ib_wc
*wc
, int *npolled
)
474 struct mlx5_ib_wq
*wq
;
480 cur
= wq
->head
- wq
->tail
;
486 for (i
= 0; i
< cur
&& np
< num_entries
; i
++) {
487 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
488 wc
->status
= IB_WC_WR_FLUSH_ERR
;
489 wc
->vendor_err
= MLX5_CQE_SYNDROME_WR_FLUSH_ERR
;
498 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq
*cq
, int num_entries
,
499 struct ib_wc
*wc
, int *npolled
)
501 struct mlx5_ib_qp
*qp
;
504 /* Find uncompleted WQEs belonging to that cq and return mmics ones */
505 list_for_each_entry(qp
, &cq
->list_send_qp
, cq_send_list
) {
506 sw_send_comp(qp
, num_entries
, wc
+ *npolled
, npolled
);
507 if (*npolled
>= num_entries
)
511 list_for_each_entry(qp
, &cq
->list_recv_qp
, cq_recv_list
) {
512 sw_recv_comp(qp
, num_entries
, wc
+ *npolled
, npolled
);
513 if (*npolled
>= num_entries
)
518 static int mlx5_poll_one(struct mlx5_ib_cq
*cq
,
519 struct mlx5_ib_qp
**cur_qp
,
522 struct mlx5_ib_dev
*dev
= to_mdev(cq
->ibcq
.device
);
523 struct mlx5_err_cqe
*err_cqe
;
524 struct mlx5_cqe64
*cqe64
;
525 struct mlx5_core_qp
*mqp
;
526 struct mlx5_ib_wq
*wq
;
527 struct mlx5_sig_err_cqe
*sig_err_cqe
;
528 struct mlx5_core_mkey
*mmkey
;
529 struct mlx5_ib_mr
*mr
;
537 cqe
= next_cqe_sw(cq
);
541 cqe64
= (cq
->mcq
.cqe_sz
== 64) ? cqe
: cqe
+ 64;
543 ++cq
->mcq
.cons_index
;
545 /* Make sure we read CQ entry contents after we've checked the
550 opcode
= cqe64
->op_own
>> 4;
551 if (unlikely(opcode
== MLX5_CQE_RESIZE_CQ
)) {
552 if (likely(cq
->resize_buf
)) {
553 free_cq_buf(dev
, &cq
->buf
);
554 cq
->buf
= *cq
->resize_buf
;
555 kfree(cq
->resize_buf
);
556 cq
->resize_buf
= NULL
;
559 mlx5_ib_warn(dev
, "unexpected resize cqe\n");
563 qpn
= ntohl(cqe64
->sop_drop_qpn
) & 0xffffff;
564 if (!*cur_qp
|| (qpn
!= (*cur_qp
)->ibqp
.qp_num
)) {
565 /* We do not have to take the QP table lock here,
566 * because CQs will be locked while QPs are removed
569 mqp
= __mlx5_qp_lookup(dev
->mdev
, qpn
);
570 *cur_qp
= to_mibqp(mqp
);
573 wc
->qp
= &(*cur_qp
)->ibqp
;
577 wqe_ctr
= be16_to_cpu(cqe64
->wqe_counter
);
578 idx
= wqe_ctr
& (wq
->wqe_cnt
- 1);
579 handle_good_req(wc
, cqe64
, wq
, idx
);
580 handle_atomics(*cur_qp
, cqe64
, wq
->last_poll
, idx
);
581 wc
->wr_id
= wq
->wrid
[idx
];
582 wq
->tail
= wq
->wqe_head
[idx
] + 1;
583 wc
->status
= IB_WC_SUCCESS
;
585 case MLX5_CQE_RESP_WR_IMM
:
586 case MLX5_CQE_RESP_SEND
:
587 case MLX5_CQE_RESP_SEND_IMM
:
588 case MLX5_CQE_RESP_SEND_INV
:
589 handle_responder(wc
, cqe64
, *cur_qp
);
590 wc
->status
= IB_WC_SUCCESS
;
592 case MLX5_CQE_RESIZE_CQ
:
594 case MLX5_CQE_REQ_ERR
:
595 case MLX5_CQE_RESP_ERR
:
596 err_cqe
= (struct mlx5_err_cqe
*)cqe64
;
597 mlx5_handle_error_cqe(dev
, err_cqe
, wc
);
598 mlx5_ib_dbg(dev
, "%s error cqe on cqn 0x%x:\n",
599 opcode
== MLX5_CQE_REQ_ERR
?
600 "Requestor" : "Responder", cq
->mcq
.cqn
);
601 mlx5_ib_dbg(dev
, "syndrome 0x%x, vendor syndrome 0x%x\n",
602 err_cqe
->syndrome
, err_cqe
->vendor_err_synd
);
603 if (opcode
== MLX5_CQE_REQ_ERR
) {
605 wqe_ctr
= be16_to_cpu(cqe64
->wqe_counter
);
606 idx
= wqe_ctr
& (wq
->wqe_cnt
- 1);
607 wc
->wr_id
= wq
->wrid
[idx
];
608 wq
->tail
= wq
->wqe_head
[idx
] + 1;
610 struct mlx5_ib_srq
*srq
;
612 if ((*cur_qp
)->ibqp
.srq
) {
613 srq
= to_msrq((*cur_qp
)->ibqp
.srq
);
614 wqe_ctr
= be16_to_cpu(cqe64
->wqe_counter
);
615 wc
->wr_id
= srq
->wrid
[wqe_ctr
];
616 mlx5_ib_free_srq_wqe(srq
, wqe_ctr
);
619 wc
->wr_id
= wq
->wrid
[wq
->tail
& (wq
->wqe_cnt
- 1)];
624 case MLX5_CQE_SIG_ERR
:
625 sig_err_cqe
= (struct mlx5_sig_err_cqe
*)cqe64
;
627 read_lock(&dev
->mdev
->priv
.mkey_table
.lock
);
628 mmkey
= __mlx5_mr_lookup(dev
->mdev
,
629 mlx5_base_mkey(be32_to_cpu(sig_err_cqe
->mkey
)));
630 mr
= to_mibmr(mmkey
);
631 get_sig_err_item(sig_err_cqe
, &mr
->sig
->err_item
);
632 mr
->sig
->sig_err_exists
= true;
633 mr
->sig
->sigerr_count
++;
635 mlx5_ib_warn(dev
, "CQN: 0x%x Got SIGERR on key: 0x%x err_type %x err_offset %llx expected %x actual %x\n",
636 cq
->mcq
.cqn
, mr
->sig
->err_item
.key
,
637 mr
->sig
->err_item
.err_type
,
638 mr
->sig
->err_item
.sig_err_offset
,
639 mr
->sig
->err_item
.expected
,
640 mr
->sig
->err_item
.actual
);
642 read_unlock(&dev
->mdev
->priv
.mkey_table
.lock
);
649 static int poll_soft_wc(struct mlx5_ib_cq
*cq
, int num_entries
,
652 struct mlx5_ib_dev
*dev
= to_mdev(cq
->ibcq
.device
);
653 struct mlx5_ib_wc
*soft_wc
, *next
;
656 list_for_each_entry_safe(soft_wc
, next
, &cq
->wc_list
, list
) {
657 if (npolled
>= num_entries
)
660 mlx5_ib_dbg(dev
, "polled software generated completion on CQ 0x%x\n",
663 wc
[npolled
++] = soft_wc
->wc
;
664 list_del(&soft_wc
->list
);
671 int mlx5_ib_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
)
673 struct mlx5_ib_cq
*cq
= to_mcq(ibcq
);
674 struct mlx5_ib_qp
*cur_qp
= NULL
;
675 struct mlx5_ib_dev
*dev
= to_mdev(cq
->ibcq
.device
);
676 struct mlx5_core_dev
*mdev
= dev
->mdev
;
681 spin_lock_irqsave(&cq
->lock
, flags
);
682 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
683 mlx5_ib_poll_sw_comp(cq
, num_entries
, wc
, &npolled
);
687 if (unlikely(!list_empty(&cq
->wc_list
)))
688 soft_polled
= poll_soft_wc(cq
, num_entries
, wc
);
690 for (npolled
= 0; npolled
< num_entries
- soft_polled
; npolled
++) {
691 if (mlx5_poll_one(cq
, &cur_qp
, wc
+ soft_polled
+ npolled
))
696 mlx5_cq_set_ci(&cq
->mcq
);
698 spin_unlock_irqrestore(&cq
->lock
, flags
);
700 return soft_polled
+ npolled
;
703 int mlx5_ib_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
705 struct mlx5_core_dev
*mdev
= to_mdev(ibcq
->device
)->mdev
;
706 struct mlx5_ib_cq
*cq
= to_mcq(ibcq
);
707 void __iomem
*uar_page
= mdev
->priv
.uar
->map
;
708 unsigned long irq_flags
;
711 spin_lock_irqsave(&cq
->lock
, irq_flags
);
712 if (cq
->notify_flags
!= IB_CQ_NEXT_COMP
)
713 cq
->notify_flags
= flags
& IB_CQ_SOLICITED_MASK
;
715 if ((flags
& IB_CQ_REPORT_MISSED_EVENTS
) && !list_empty(&cq
->wc_list
))
717 spin_unlock_irqrestore(&cq
->lock
, irq_flags
);
719 mlx5_cq_arm(&cq
->mcq
,
720 (flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
?
721 MLX5_CQ_DB_REQ_NOT_SOL
: MLX5_CQ_DB_REQ_NOT
,
722 uar_page
, to_mcq(ibcq
)->mcq
.cons_index
);
727 static int alloc_cq_buf(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq_buf
*buf
,
728 int nent
, int cqe_size
)
732 err
= mlx5_buf_alloc(dev
->mdev
, nent
* cqe_size
, &buf
->buf
);
736 buf
->cqe_size
= cqe_size
;
742 static int create_cq_user(struct mlx5_ib_dev
*dev
, struct ib_udata
*udata
,
743 struct ib_ucontext
*context
, struct mlx5_ib_cq
*cq
,
744 int entries
, u32
**cqb
,
745 int *cqe_size
, int *index
, int *inlen
)
747 struct mlx5_ib_create_cq ucmd
= {};
756 ucmdlen
= udata
->inlen
< sizeof(ucmd
) ?
757 (sizeof(ucmd
) - sizeof(ucmd
.flags
)) : sizeof(ucmd
);
759 if (ib_copy_from_udata(&ucmd
, udata
, ucmdlen
))
762 if (ucmdlen
== sizeof(ucmd
) &&
763 (ucmd
.flags
& ~(MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD
)))
766 if (ucmd
.cqe_size
!= 64 && ucmd
.cqe_size
!= 128)
769 *cqe_size
= ucmd
.cqe_size
;
771 cq
->buf
.umem
= ib_umem_get(context
, ucmd
.buf_addr
,
772 entries
* ucmd
.cqe_size
,
773 IB_ACCESS_LOCAL_WRITE
, 1);
774 if (IS_ERR(cq
->buf
.umem
)) {
775 err
= PTR_ERR(cq
->buf
.umem
);
779 err
= mlx5_ib_db_map_user(to_mucontext(context
), ucmd
.db_addr
,
784 mlx5_ib_cont_pages(cq
->buf
.umem
, ucmd
.buf_addr
, 0, &npages
, &page_shift
,
786 mlx5_ib_dbg(dev
, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
787 ucmd
.buf_addr
, entries
* ucmd
.cqe_size
, npages
, page_shift
, ncont
);
789 *inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
790 MLX5_FLD_SZ_BYTES(create_cq_in
, pas
[0]) * ncont
;
791 *cqb
= kvzalloc(*inlen
, GFP_KERNEL
);
797 pas
= (__be64
*)MLX5_ADDR_OF(create_cq_in
, *cqb
, pas
);
798 mlx5_ib_populate_pas(dev
, cq
->buf
.umem
, page_shift
, pas
, 0);
800 cqc
= MLX5_ADDR_OF(create_cq_in
, *cqb
, cq_context
);
801 MLX5_SET(cqc
, cqc
, log_page_size
,
802 page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
804 *index
= to_mucontext(context
)->bfregi
.sys_pages
[0];
806 if (ucmd
.cqe_comp_en
== 1) {
807 if (!((*cqe_size
== 128 &&
808 MLX5_CAP_GEN(dev
->mdev
, cqe_compression_128
)) ||
810 MLX5_CAP_GEN(dev
->mdev
, cqe_compression
)))) {
812 mlx5_ib_warn(dev
, "CQE compression is not supported for size %d!\n",
817 if (unlikely(!ucmd
.cqe_comp_res_format
||
818 !(ucmd
.cqe_comp_res_format
<
819 MLX5_IB_CQE_RES_RESERVED
) ||
820 (ucmd
.cqe_comp_res_format
&
821 (ucmd
.cqe_comp_res_format
- 1)))) {
823 mlx5_ib_warn(dev
, "CQE compression res format %d is not supported!\n",
824 ucmd
.cqe_comp_res_format
);
828 MLX5_SET(cqc
, cqc
, cqe_comp_en
, 1);
829 MLX5_SET(cqc
, cqc
, mini_cqe_res_format
,
830 ilog2(ucmd
.cqe_comp_res_format
));
833 if (ucmd
.flags
& MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD
) {
834 if (*cqe_size
!= 128 ||
835 !MLX5_CAP_GEN(dev
->mdev
, cqe_128_always
)) {
838 "CQE padding is not supported for CQE size of %dB!\n",
843 cq
->private_flags
|= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD
;
852 mlx5_ib_db_unmap_user(to_mucontext(context
), &cq
->db
);
855 ib_umem_release(cq
->buf
.umem
);
859 static void destroy_cq_user(struct mlx5_ib_cq
*cq
, struct ib_ucontext
*context
)
861 mlx5_ib_db_unmap_user(to_mucontext(context
), &cq
->db
);
862 ib_umem_release(cq
->buf
.umem
);
865 static void init_cq_buf(struct mlx5_ib_cq
*cq
, struct mlx5_ib_cq_buf
*buf
)
869 struct mlx5_cqe64
*cqe64
;
871 for (i
= 0; i
< buf
->nent
; i
++) {
872 cqe
= get_cqe_from_buf(buf
, i
, buf
->cqe_size
);
873 cqe64
= buf
->cqe_size
== 64 ? cqe
: cqe
+ 64;
874 cqe64
->op_own
= MLX5_CQE_INVALID
<< 4;
878 static int create_cq_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq
*cq
,
879 int entries
, int cqe_size
,
880 u32
**cqb
, int *index
, int *inlen
)
886 err
= mlx5_db_alloc(dev
->mdev
, &cq
->db
);
890 cq
->mcq
.set_ci_db
= cq
->db
.db
;
891 cq
->mcq
.arm_db
= cq
->db
.db
+ 1;
892 cq
->mcq
.cqe_sz
= cqe_size
;
894 err
= alloc_cq_buf(dev
, &cq
->buf
, entries
, cqe_size
);
898 init_cq_buf(cq
, &cq
->buf
);
900 *inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
901 MLX5_FLD_SZ_BYTES(create_cq_in
, pas
[0]) * cq
->buf
.buf
.npages
;
902 *cqb
= kvzalloc(*inlen
, GFP_KERNEL
);
908 pas
= (__be64
*)MLX5_ADDR_OF(create_cq_in
, *cqb
, pas
);
909 mlx5_fill_page_array(&cq
->buf
.buf
, pas
);
911 cqc
= MLX5_ADDR_OF(create_cq_in
, *cqb
, cq_context
);
912 MLX5_SET(cqc
, cqc
, log_page_size
,
913 cq
->buf
.buf
.page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
915 *index
= dev
->mdev
->priv
.uar
->index
;
920 free_cq_buf(dev
, &cq
->buf
);
923 mlx5_db_free(dev
->mdev
, &cq
->db
);
927 static void destroy_cq_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq
*cq
)
929 free_cq_buf(dev
, &cq
->buf
);
930 mlx5_db_free(dev
->mdev
, &cq
->db
);
933 static void notify_soft_wc_handler(struct work_struct
*work
)
935 struct mlx5_ib_cq
*cq
= container_of(work
, struct mlx5_ib_cq
,
938 cq
->ibcq
.comp_handler(&cq
->ibcq
, cq
->ibcq
.cq_context
);
941 struct ib_cq
*mlx5_ib_create_cq(struct ib_device
*ibdev
,
942 const struct ib_cq_init_attr
*attr
,
943 struct ib_ucontext
*context
,
944 struct ib_udata
*udata
)
946 int entries
= attr
->cqe
;
947 int vector
= attr
->comp_vector
;
948 struct mlx5_ib_dev
*dev
= to_mdev(ibdev
);
949 struct mlx5_ib_cq
*cq
;
950 int uninitialized_var(index
);
951 int uninitialized_var(inlen
);
960 (entries
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_cq_sz
))))
961 return ERR_PTR(-EINVAL
);
963 if (check_cq_create_flags(attr
->flags
))
964 return ERR_PTR(-EOPNOTSUPP
);
966 entries
= roundup_pow_of_two(entries
+ 1);
967 if (entries
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_cq_sz
)))
968 return ERR_PTR(-EINVAL
);
970 cq
= kzalloc(sizeof(*cq
), GFP_KERNEL
);
972 return ERR_PTR(-ENOMEM
);
974 cq
->ibcq
.cqe
= entries
- 1;
975 mutex_init(&cq
->resize_mutex
);
976 spin_lock_init(&cq
->lock
);
977 cq
->resize_buf
= NULL
;
978 cq
->resize_umem
= NULL
;
979 cq
->create_flags
= attr
->flags
;
980 INIT_LIST_HEAD(&cq
->list_send_qp
);
981 INIT_LIST_HEAD(&cq
->list_recv_qp
);
984 err
= create_cq_user(dev
, udata
, context
, cq
, entries
,
985 &cqb
, &cqe_size
, &index
, &inlen
);
989 cqe_size
= cache_line_size() == 128 ? 128 : 64;
990 err
= create_cq_kernel(dev
, cq
, entries
, cqe_size
, &cqb
,
995 INIT_WORK(&cq
->notify_work
, notify_soft_wc_handler
);
998 err
= mlx5_vector2eqn(dev
->mdev
, vector
, &eqn
, &irqn
);
1002 cq
->cqe_size
= cqe_size
;
1004 cqc
= MLX5_ADDR_OF(create_cq_in
, cqb
, cq_context
);
1005 MLX5_SET(cqc
, cqc
, cqe_sz
,
1006 cqe_sz_to_mlx_sz(cqe_size
,
1008 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD
));
1009 MLX5_SET(cqc
, cqc
, log_cq_size
, ilog2(entries
));
1010 MLX5_SET(cqc
, cqc
, uar_page
, index
);
1011 MLX5_SET(cqc
, cqc
, c_eqn
, eqn
);
1012 MLX5_SET64(cqc
, cqc
, dbr_addr
, cq
->db
.dma
);
1013 if (cq
->create_flags
& IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN
)
1014 MLX5_SET(cqc
, cqc
, oi
, 1);
1016 err
= mlx5_core_create_cq(dev
->mdev
, &cq
->mcq
, cqb
, inlen
);
1020 mlx5_ib_dbg(dev
, "cqn 0x%x\n", cq
->mcq
.cqn
);
1021 cq
->mcq
.irqn
= irqn
;
1023 cq
->mcq
.tasklet_ctx
.comp
= mlx5_ib_cq_comp
;
1025 cq
->mcq
.comp
= mlx5_ib_cq_comp
;
1026 cq
->mcq
.event
= mlx5_ib_cq_event
;
1028 INIT_LIST_HEAD(&cq
->wc_list
);
1031 if (ib_copy_to_udata(udata
, &cq
->mcq
.cqn
, sizeof(__u32
))) {
1041 mlx5_core_destroy_cq(dev
->mdev
, &cq
->mcq
);
1046 destroy_cq_user(cq
, context
);
1048 destroy_cq_kernel(dev
, cq
);
1053 return ERR_PTR(err
);
1057 int mlx5_ib_destroy_cq(struct ib_cq
*cq
)
1059 struct mlx5_ib_dev
*dev
= to_mdev(cq
->device
);
1060 struct mlx5_ib_cq
*mcq
= to_mcq(cq
);
1061 struct ib_ucontext
*context
= NULL
;
1064 context
= cq
->uobject
->context
;
1066 mlx5_core_destroy_cq(dev
->mdev
, &mcq
->mcq
);
1068 destroy_cq_user(mcq
, context
);
1070 destroy_cq_kernel(dev
, mcq
);
1077 static int is_equal_rsn(struct mlx5_cqe64
*cqe64
, u32 rsn
)
1079 return rsn
== (ntohl(cqe64
->sop_drop_qpn
) & 0xffffff);
1082 void __mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 rsn
, struct mlx5_ib_srq
*srq
)
1084 struct mlx5_cqe64
*cqe64
, *dest64
;
1093 /* First we need to find the current producer index, so we
1094 * know where to start cleaning from. It doesn't matter if HW
1095 * adds new entries after this loop -- the QP we're worried
1096 * about is already in RESET, so the new entries won't come
1097 * from our QP and therefore don't need to be checked.
1099 for (prod_index
= cq
->mcq
.cons_index
; get_sw_cqe(cq
, prod_index
); prod_index
++)
1100 if (prod_index
== cq
->mcq
.cons_index
+ cq
->ibcq
.cqe
)
1103 /* Now sweep backwards through the CQ, removing CQ entries
1104 * that match our QP by copying older entries on top of them.
1106 while ((int) --prod_index
- (int) cq
->mcq
.cons_index
>= 0) {
1107 cqe
= get_cqe(cq
, prod_index
& cq
->ibcq
.cqe
);
1108 cqe64
= (cq
->mcq
.cqe_sz
== 64) ? cqe
: cqe
+ 64;
1109 if (is_equal_rsn(cqe64
, rsn
)) {
1110 if (srq
&& (ntohl(cqe64
->srqn
) & 0xffffff))
1111 mlx5_ib_free_srq_wqe(srq
, be16_to_cpu(cqe64
->wqe_counter
));
1113 } else if (nfreed
) {
1114 dest
= get_cqe(cq
, (prod_index
+ nfreed
) & cq
->ibcq
.cqe
);
1115 dest64
= (cq
->mcq
.cqe_sz
== 64) ? dest
: dest
+ 64;
1116 owner_bit
= dest64
->op_own
& MLX5_CQE_OWNER_MASK
;
1117 memcpy(dest
, cqe
, cq
->mcq
.cqe_sz
);
1118 dest64
->op_own
= owner_bit
|
1119 (dest64
->op_own
& ~MLX5_CQE_OWNER_MASK
);
1124 cq
->mcq
.cons_index
+= nfreed
;
1125 /* Make sure update of buffer contents is done before
1126 * updating consumer index.
1129 mlx5_cq_set_ci(&cq
->mcq
);
1133 void mlx5_ib_cq_clean(struct mlx5_ib_cq
*cq
, u32 qpn
, struct mlx5_ib_srq
*srq
)
1138 spin_lock_irq(&cq
->lock
);
1139 __mlx5_ib_cq_clean(cq
, qpn
, srq
);
1140 spin_unlock_irq(&cq
->lock
);
1143 int mlx5_ib_modify_cq(struct ib_cq
*cq
, u16 cq_count
, u16 cq_period
)
1145 struct mlx5_ib_dev
*dev
= to_mdev(cq
->device
);
1146 struct mlx5_ib_cq
*mcq
= to_mcq(cq
);
1149 if (!MLX5_CAP_GEN(dev
->mdev
, cq_moderation
))
1152 if (cq_period
> MLX5_MAX_CQ_PERIOD
)
1155 err
= mlx5_core_modify_cq_moderation(dev
->mdev
, &mcq
->mcq
,
1156 cq_period
, cq_count
);
1158 mlx5_ib_warn(dev
, "modify cq 0x%x failed\n", mcq
->mcq
.cqn
);
1163 static int resize_user(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq
*cq
,
1164 int entries
, struct ib_udata
*udata
, int *npas
,
1165 int *page_shift
, int *cqe_size
)
1167 struct mlx5_ib_resize_cq ucmd
;
1168 struct ib_umem
*umem
;
1171 struct ib_ucontext
*context
= cq
->buf
.umem
->context
;
1173 err
= ib_copy_from_udata(&ucmd
, udata
, sizeof(ucmd
));
1177 if (ucmd
.reserved0
|| ucmd
.reserved1
)
1180 umem
= ib_umem_get(context
, ucmd
.buf_addr
, entries
* ucmd
.cqe_size
,
1181 IB_ACCESS_LOCAL_WRITE
, 1);
1183 err
= PTR_ERR(umem
);
1187 mlx5_ib_cont_pages(umem
, ucmd
.buf_addr
, 0, &npages
, page_shift
,
1190 cq
->resize_umem
= umem
;
1191 *cqe_size
= ucmd
.cqe_size
;
1196 static void un_resize_user(struct mlx5_ib_cq
*cq
)
1198 ib_umem_release(cq
->resize_umem
);
1201 static int resize_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq
*cq
,
1202 int entries
, int cqe_size
)
1206 cq
->resize_buf
= kzalloc(sizeof(*cq
->resize_buf
), GFP_KERNEL
);
1207 if (!cq
->resize_buf
)
1210 err
= alloc_cq_buf(dev
, cq
->resize_buf
, entries
, cqe_size
);
1214 init_cq_buf(cq
, cq
->resize_buf
);
1219 kfree(cq
->resize_buf
);
1223 static void un_resize_kernel(struct mlx5_ib_dev
*dev
, struct mlx5_ib_cq
*cq
)
1225 free_cq_buf(dev
, cq
->resize_buf
);
1226 cq
->resize_buf
= NULL
;
1229 static int copy_resize_cqes(struct mlx5_ib_cq
*cq
)
1231 struct mlx5_ib_dev
*dev
= to_mdev(cq
->ibcq
.device
);
1232 struct mlx5_cqe64
*scqe64
;
1233 struct mlx5_cqe64
*dcqe64
;
1242 ssize
= cq
->buf
.cqe_size
;
1243 dsize
= cq
->resize_buf
->cqe_size
;
1244 if (ssize
!= dsize
) {
1245 mlx5_ib_warn(dev
, "resize from different cqe size is not supported\n");
1249 i
= cq
->mcq
.cons_index
;
1250 scqe
= get_sw_cqe(cq
, i
);
1251 scqe64
= ssize
== 64 ? scqe
: scqe
+ 64;
1254 mlx5_ib_warn(dev
, "expected cqe in sw ownership\n");
1258 while ((scqe64
->op_own
>> 4) != MLX5_CQE_RESIZE_CQ
) {
1259 dcqe
= get_cqe_from_buf(cq
->resize_buf
,
1260 (i
+ 1) & (cq
->resize_buf
->nent
),
1262 dcqe64
= dsize
== 64 ? dcqe
: dcqe
+ 64;
1263 sw_own
= sw_ownership_bit(i
+ 1, cq
->resize_buf
->nent
);
1264 memcpy(dcqe
, scqe
, dsize
);
1265 dcqe64
->op_own
= (dcqe64
->op_own
& ~MLX5_CQE_OWNER_MASK
) | sw_own
;
1268 scqe
= get_sw_cqe(cq
, i
);
1269 scqe64
= ssize
== 64 ? scqe
: scqe
+ 64;
1271 mlx5_ib_warn(dev
, "expected cqe in sw ownership\n");
1275 if (scqe
== start_cqe
) {
1276 pr_warn("resize CQ failed to get resize CQE, CQN 0x%x\n",
1281 ++cq
->mcq
.cons_index
;
1285 int mlx5_ib_resize_cq(struct ib_cq
*ibcq
, int entries
, struct ib_udata
*udata
)
1287 struct mlx5_ib_dev
*dev
= to_mdev(ibcq
->device
);
1288 struct mlx5_ib_cq
*cq
= to_mcq(ibcq
);
1296 int uninitialized_var(cqe_size
);
1297 unsigned long flags
;
1299 if (!MLX5_CAP_GEN(dev
->mdev
, cq_resize
)) {
1300 pr_info("Firmware does not support resize CQ\n");
1305 entries
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_cq_sz
))) {
1306 mlx5_ib_warn(dev
, "wrong entries number %d, max %d\n",
1308 1 << MLX5_CAP_GEN(dev
->mdev
, log_max_cq_sz
));
1312 entries
= roundup_pow_of_two(entries
+ 1);
1313 if (entries
> (1 << MLX5_CAP_GEN(dev
->mdev
, log_max_cq_sz
)) + 1)
1316 if (entries
== ibcq
->cqe
+ 1)
1319 mutex_lock(&cq
->resize_mutex
);
1321 err
= resize_user(dev
, cq
, entries
, udata
, &npas
, &page_shift
,
1325 err
= resize_kernel(dev
, cq
, entries
, cqe_size
);
1327 npas
= cq
->resize_buf
->buf
.npages
;
1328 page_shift
= cq
->resize_buf
->buf
.page_shift
;
1335 inlen
= MLX5_ST_SZ_BYTES(modify_cq_in
) +
1336 MLX5_FLD_SZ_BYTES(modify_cq_in
, pas
[0]) * npas
;
1338 in
= kvzalloc(inlen
, GFP_KERNEL
);
1344 pas
= (__be64
*)MLX5_ADDR_OF(modify_cq_in
, in
, pas
);
1346 mlx5_ib_populate_pas(dev
, cq
->resize_umem
, page_shift
,
1349 mlx5_fill_page_array(&cq
->resize_buf
->buf
, pas
);
1351 MLX5_SET(modify_cq_in
, in
,
1352 modify_field_select_resize_field_select
.resize_field_select
.resize_field_select
,
1353 MLX5_MODIFY_CQ_MASK_LOG_SIZE
|
1354 MLX5_MODIFY_CQ_MASK_PG_OFFSET
|
1355 MLX5_MODIFY_CQ_MASK_PG_SIZE
);
1357 cqc
= MLX5_ADDR_OF(modify_cq_in
, in
, cq_context
);
1359 MLX5_SET(cqc
, cqc
, log_page_size
,
1360 page_shift
- MLX5_ADAPTER_PAGE_SHIFT
);
1361 MLX5_SET(cqc
, cqc
, cqe_sz
,
1362 cqe_sz_to_mlx_sz(cqe_size
,
1364 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD
));
1365 MLX5_SET(cqc
, cqc
, log_cq_size
, ilog2(entries
));
1367 MLX5_SET(modify_cq_in
, in
, op_mod
, MLX5_CQ_OPMOD_RESIZE
);
1368 MLX5_SET(modify_cq_in
, in
, cqn
, cq
->mcq
.cqn
);
1370 err
= mlx5_core_modify_cq(dev
->mdev
, &cq
->mcq
, in
, inlen
);
1375 cq
->ibcq
.cqe
= entries
- 1;
1376 ib_umem_release(cq
->buf
.umem
);
1377 cq
->buf
.umem
= cq
->resize_umem
;
1378 cq
->resize_umem
= NULL
;
1380 struct mlx5_ib_cq_buf tbuf
;
1383 spin_lock_irqsave(&cq
->lock
, flags
);
1384 if (cq
->resize_buf
) {
1385 err
= copy_resize_cqes(cq
);
1388 cq
->buf
= *cq
->resize_buf
;
1389 kfree(cq
->resize_buf
);
1390 cq
->resize_buf
= NULL
;
1394 cq
->ibcq
.cqe
= entries
- 1;
1395 spin_unlock_irqrestore(&cq
->lock
, flags
);
1397 free_cq_buf(dev
, &tbuf
);
1399 mutex_unlock(&cq
->resize_mutex
);
1411 un_resize_kernel(dev
, cq
);
1413 mutex_unlock(&cq
->resize_mutex
);
1417 int mlx5_ib_get_cqe_size(struct mlx5_ib_dev
*dev
, struct ib_cq
*ibcq
)
1419 struct mlx5_ib_cq
*cq
;
1425 return cq
->cqe_size
;
1428 /* Called from atomic context */
1429 int mlx5_ib_generate_wc(struct ib_cq
*ibcq
, struct ib_wc
*wc
)
1431 struct mlx5_ib_wc
*soft_wc
;
1432 struct mlx5_ib_cq
*cq
= to_mcq(ibcq
);
1433 unsigned long flags
;
1435 soft_wc
= kmalloc(sizeof(*soft_wc
), GFP_ATOMIC
);
1440 spin_lock_irqsave(&cq
->lock
, flags
);
1441 list_add_tail(&soft_wc
->list
, &cq
->wc_list
);
1442 if (cq
->notify_flags
== IB_CQ_NEXT_COMP
||
1443 wc
->status
!= IB_WC_SUCCESS
) {
1444 cq
->notify_flags
= 0;
1445 schedule_work(&cq
->notify_work
);
1447 spin_unlock_irqrestore(&cq
->lock
, flags
);