2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006 Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $
39 #include <linux/hardirq.h>
40 #include <linux/sched.h>
44 #include <rdma/ib_pack.h>
46 #include "mthca_dev.h"
47 #include "mthca_cmd.h"
48 #include "mthca_memfree.h"
51 MTHCA_MAX_DIRECT_CQ_SIZE
= 4 * PAGE_SIZE
55 MTHCA_CQ_ENTRY_SIZE
= 0x20
59 MTHCA_ATOMIC_BYTE_LEN
= 8
63 * Must be packed because start is 64 bits but only aligned to 32 bits.
65 struct mthca_cq_context
{
68 __be32 logsize_usrpage
;
69 __be32 error_eqn
; /* Tavor only */
73 __be32 last_notified_index
;
74 __be32 solicit_producer_index
;
75 __be32 consumer_index
;
76 __be32 producer_index
;
78 __be32 ci_db
; /* Arbel only */
79 __be32 state_db
; /* Arbel only */
81 } __attribute__((packed
));
83 #define MTHCA_CQ_STATUS_OK ( 0 << 28)
84 #define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28)
85 #define MTHCA_CQ_STATUS_WRITE_FAIL (10 << 28)
86 #define MTHCA_CQ_FLAG_TR ( 1 << 18)
87 #define MTHCA_CQ_FLAG_OI ( 1 << 17)
88 #define MTHCA_CQ_STATE_DISARMED ( 0 << 8)
89 #define MTHCA_CQ_STATE_ARMED ( 1 << 8)
90 #define MTHCA_CQ_STATE_ARMED_SOL ( 4 << 8)
91 #define MTHCA_EQ_STATE_FIRED (10 << 8)
94 MTHCA_ERROR_CQE_OPCODE_MASK
= 0xfe
98 SYNDROME_LOCAL_LENGTH_ERR
= 0x01,
99 SYNDROME_LOCAL_QP_OP_ERR
= 0x02,
100 SYNDROME_LOCAL_EEC_OP_ERR
= 0x03,
101 SYNDROME_LOCAL_PROT_ERR
= 0x04,
102 SYNDROME_WR_FLUSH_ERR
= 0x05,
103 SYNDROME_MW_BIND_ERR
= 0x06,
104 SYNDROME_BAD_RESP_ERR
= 0x10,
105 SYNDROME_LOCAL_ACCESS_ERR
= 0x11,
106 SYNDROME_REMOTE_INVAL_REQ_ERR
= 0x12,
107 SYNDROME_REMOTE_ACCESS_ERR
= 0x13,
108 SYNDROME_REMOTE_OP_ERR
= 0x14,
109 SYNDROME_RETRY_EXC_ERR
= 0x15,
110 SYNDROME_RNR_RETRY_EXC_ERR
= 0x16,
111 SYNDROME_LOCAL_RDD_VIOL_ERR
= 0x20,
112 SYNDROME_REMOTE_INVAL_RD_REQ_ERR
= 0x21,
113 SYNDROME_REMOTE_ABORTED_ERR
= 0x22,
114 SYNDROME_INVAL_EECN_ERR
= 0x23,
115 SYNDROME_INVAL_EEC_STATE_ERR
= 0x24
124 __be32 imm_etype_pkey_eec
;
133 struct mthca_err_cqe
{
146 #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7)
147 #define MTHCA_CQ_ENTRY_OWNER_HW (1 << 7)
149 #define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24)
150 #define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24)
151 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24)
152 #define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24)
153 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24)
155 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24)
156 #define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24)
157 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)
159 static inline struct mthca_cqe
*get_cqe_from_buf(struct mthca_cq_buf
*buf
,
163 return buf
->queue
.direct
.buf
+ (entry
* MTHCA_CQ_ENTRY_SIZE
);
165 return buf
->queue
.page_list
[entry
* MTHCA_CQ_ENTRY_SIZE
/ PAGE_SIZE
].buf
166 + (entry
* MTHCA_CQ_ENTRY_SIZE
) % PAGE_SIZE
;
169 static inline struct mthca_cqe
*get_cqe(struct mthca_cq
*cq
, int entry
)
171 return get_cqe_from_buf(&cq
->buf
, entry
);
174 static inline struct mthca_cqe
*cqe_sw(struct mthca_cqe
*cqe
)
176 return MTHCA_CQ_ENTRY_OWNER_HW
& cqe
->owner
? NULL
: cqe
;
179 static inline struct mthca_cqe
*next_cqe_sw(struct mthca_cq
*cq
)
181 return cqe_sw(get_cqe(cq
, cq
->cons_index
& cq
->ibcq
.cqe
));
184 static inline void set_cqe_hw(struct mthca_cqe
*cqe
)
186 cqe
->owner
= MTHCA_CQ_ENTRY_OWNER_HW
;
189 static void dump_cqe(struct mthca_dev
*dev
, void *cqe_ptr
)
191 __be32
*cqe
= cqe_ptr
;
193 (void) cqe
; /* avoid warning if mthca_dbg compiled away... */
194 mthca_dbg(dev
, "CQE contents %08x %08x %08x %08x %08x %08x %08x %08x\n",
195 be32_to_cpu(cqe
[0]), be32_to_cpu(cqe
[1]), be32_to_cpu(cqe
[2]),
196 be32_to_cpu(cqe
[3]), be32_to_cpu(cqe
[4]), be32_to_cpu(cqe
[5]),
197 be32_to_cpu(cqe
[6]), be32_to_cpu(cqe
[7]));
201 * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
202 * should be correct before calling update_cons_index().
204 static inline void update_cons_index(struct mthca_dev
*dev
, struct mthca_cq
*cq
,
207 if (mthca_is_memfree(dev
)) {
208 *cq
->set_ci_db
= cpu_to_be32(cq
->cons_index
);
211 mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI
| cq
->cqn
, incr
- 1,
212 dev
->kar
+ MTHCA_CQ_DOORBELL
,
213 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
215 * Make sure doorbells don't leak out of CQ spinlock
216 * and reach the HCA out of order:
222 void mthca_cq_completion(struct mthca_dev
*dev
, u32 cqn
)
226 cq
= mthca_array_get(&dev
->cq_table
.cq
, cqn
& (dev
->limits
.num_cqs
- 1));
229 mthca_warn(dev
, "Completion event for bogus CQ %08x\n", cqn
);
235 cq
->ibcq
.comp_handler(&cq
->ibcq
, cq
->ibcq
.cq_context
);
238 void mthca_cq_event(struct mthca_dev
*dev
, u32 cqn
,
239 enum ib_event_type event_type
)
242 struct ib_event event
;
244 spin_lock(&dev
->cq_table
.lock
);
246 cq
= mthca_array_get(&dev
->cq_table
.cq
, cqn
& (dev
->limits
.num_cqs
- 1));
250 spin_unlock(&dev
->cq_table
.lock
);
253 mthca_warn(dev
, "Async event for bogus CQ %08x\n", cqn
);
257 event
.device
= &dev
->ib_dev
;
258 event
.event
= event_type
;
259 event
.element
.cq
= &cq
->ibcq
;
260 if (cq
->ibcq
.event_handler
)
261 cq
->ibcq
.event_handler(&event
, cq
->ibcq
.cq_context
);
263 spin_lock(&dev
->cq_table
.lock
);
266 spin_unlock(&dev
->cq_table
.lock
);
269 static inline int is_recv_cqe(struct mthca_cqe
*cqe
)
271 if ((cqe
->opcode
& MTHCA_ERROR_CQE_OPCODE_MASK
) ==
272 MTHCA_ERROR_CQE_OPCODE_MASK
)
273 return !(cqe
->opcode
& 0x01);
275 return !(cqe
->is_send
& 0x80);
278 void mthca_cq_clean(struct mthca_dev
*dev
, struct mthca_cq
*cq
, u32 qpn
,
279 struct mthca_srq
*srq
)
281 struct mthca_cqe
*cqe
;
285 spin_lock_irq(&cq
->lock
);
288 * First we need to find the current producer index, so we
289 * know where to start cleaning from. It doesn't matter if HW
290 * adds new entries after this loop -- the QP we're worried
291 * about is already in RESET, so the new entries won't come
292 * from our QP and therefore don't need to be checked.
294 for (prod_index
= cq
->cons_index
;
295 cqe_sw(get_cqe(cq
, prod_index
& cq
->ibcq
.cqe
));
297 if (prod_index
== cq
->cons_index
+ cq
->ibcq
.cqe
)
301 mthca_dbg(dev
, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
302 qpn
, cq
->cqn
, cq
->cons_index
, prod_index
);
305 * Now sweep backwards through the CQ, removing CQ entries
306 * that match our QP by copying older entries on top of them.
308 while ((int) --prod_index
- (int) cq
->cons_index
>= 0) {
309 cqe
= get_cqe(cq
, prod_index
& cq
->ibcq
.cqe
);
310 if (cqe
->my_qpn
== cpu_to_be32(qpn
)) {
311 if (srq
&& is_recv_cqe(cqe
))
312 mthca_free_srq_wqe(srq
, be32_to_cpu(cqe
->wqe
));
315 memcpy(get_cqe(cq
, (prod_index
+ nfreed
) & cq
->ibcq
.cqe
),
316 cqe
, MTHCA_CQ_ENTRY_SIZE
);
320 for (i
= 0; i
< nfreed
; ++i
)
321 set_cqe_hw(get_cqe(cq
, (cq
->cons_index
+ i
) & cq
->ibcq
.cqe
));
323 cq
->cons_index
+= nfreed
;
324 update_cons_index(dev
, cq
, nfreed
);
327 spin_unlock_irq(&cq
->lock
);
330 void mthca_cq_resize_copy_cqes(struct mthca_cq
*cq
)
335 * In Tavor mode, the hardware keeps the consumer and producer
336 * indices mod the CQ size. Since we might be making the CQ
337 * bigger, we need to deal with the case where the producer
338 * index wrapped around before the CQ was resized.
340 if (!mthca_is_memfree(to_mdev(cq
->ibcq
.device
)) &&
341 cq
->ibcq
.cqe
< cq
->resize_buf
->cqe
) {
342 cq
->cons_index
&= cq
->ibcq
.cqe
;
343 if (cqe_sw(get_cqe(cq
, cq
->ibcq
.cqe
)))
344 cq
->cons_index
-= cq
->ibcq
.cqe
+ 1;
347 for (i
= cq
->cons_index
; cqe_sw(get_cqe(cq
, i
& cq
->ibcq
.cqe
)); ++i
)
348 memcpy(get_cqe_from_buf(&cq
->resize_buf
->buf
,
349 i
& cq
->resize_buf
->cqe
),
350 get_cqe(cq
, i
& cq
->ibcq
.cqe
), MTHCA_CQ_ENTRY_SIZE
);
353 int mthca_alloc_cq_buf(struct mthca_dev
*dev
, struct mthca_cq_buf
*buf
, int nent
)
358 ret
= mthca_buf_alloc(dev
, nent
* MTHCA_CQ_ENTRY_SIZE
,
359 MTHCA_MAX_DIRECT_CQ_SIZE
,
360 &buf
->queue
, &buf
->is_direct
,
361 &dev
->driver_pd
, 1, &buf
->mr
);
365 for (i
= 0; i
< nent
; ++i
)
366 set_cqe_hw(get_cqe_from_buf(buf
, i
));
371 void mthca_free_cq_buf(struct mthca_dev
*dev
, struct mthca_cq_buf
*buf
, int cqe
)
373 mthca_buf_free(dev
, (cqe
+ 1) * MTHCA_CQ_ENTRY_SIZE
, &buf
->queue
,
374 buf
->is_direct
, &buf
->mr
);
377 static void handle_error_cqe(struct mthca_dev
*dev
, struct mthca_cq
*cq
,
378 struct mthca_qp
*qp
, int wqe_index
, int is_send
,
379 struct mthca_err_cqe
*cqe
,
380 struct ib_wc
*entry
, int *free_cqe
)
385 if (cqe
->syndrome
== SYNDROME_LOCAL_QP_OP_ERR
) {
386 mthca_dbg(dev
, "local QP operation err "
387 "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n",
388 be32_to_cpu(cqe
->my_qpn
), be32_to_cpu(cqe
->wqe
),
389 cq
->cqn
, cq
->cons_index
);
394 * For completions in error, only work request ID, status, vendor error
395 * (and freed resource count for RD) have to be set.
397 switch (cqe
->syndrome
) {
398 case SYNDROME_LOCAL_LENGTH_ERR
:
399 entry
->status
= IB_WC_LOC_LEN_ERR
;
401 case SYNDROME_LOCAL_QP_OP_ERR
:
402 entry
->status
= IB_WC_LOC_QP_OP_ERR
;
404 case SYNDROME_LOCAL_EEC_OP_ERR
:
405 entry
->status
= IB_WC_LOC_EEC_OP_ERR
;
407 case SYNDROME_LOCAL_PROT_ERR
:
408 entry
->status
= IB_WC_LOC_PROT_ERR
;
410 case SYNDROME_WR_FLUSH_ERR
:
411 entry
->status
= IB_WC_WR_FLUSH_ERR
;
413 case SYNDROME_MW_BIND_ERR
:
414 entry
->status
= IB_WC_MW_BIND_ERR
;
416 case SYNDROME_BAD_RESP_ERR
:
417 entry
->status
= IB_WC_BAD_RESP_ERR
;
419 case SYNDROME_LOCAL_ACCESS_ERR
:
420 entry
->status
= IB_WC_LOC_ACCESS_ERR
;
422 case SYNDROME_REMOTE_INVAL_REQ_ERR
:
423 entry
->status
= IB_WC_REM_INV_REQ_ERR
;
425 case SYNDROME_REMOTE_ACCESS_ERR
:
426 entry
->status
= IB_WC_REM_ACCESS_ERR
;
428 case SYNDROME_REMOTE_OP_ERR
:
429 entry
->status
= IB_WC_REM_OP_ERR
;
431 case SYNDROME_RETRY_EXC_ERR
:
432 entry
->status
= IB_WC_RETRY_EXC_ERR
;
434 case SYNDROME_RNR_RETRY_EXC_ERR
:
435 entry
->status
= IB_WC_RNR_RETRY_EXC_ERR
;
437 case SYNDROME_LOCAL_RDD_VIOL_ERR
:
438 entry
->status
= IB_WC_LOC_RDD_VIOL_ERR
;
440 case SYNDROME_REMOTE_INVAL_RD_REQ_ERR
:
441 entry
->status
= IB_WC_REM_INV_RD_REQ_ERR
;
443 case SYNDROME_REMOTE_ABORTED_ERR
:
444 entry
->status
= IB_WC_REM_ABORT_ERR
;
446 case SYNDROME_INVAL_EECN_ERR
:
447 entry
->status
= IB_WC_INV_EECN_ERR
;
449 case SYNDROME_INVAL_EEC_STATE_ERR
:
450 entry
->status
= IB_WC_INV_EEC_STATE_ERR
;
453 entry
->status
= IB_WC_GENERAL_ERR
;
457 entry
->vendor_err
= cqe
->vendor_err
;
460 * Mem-free HCAs always generate one CQE per WQE, even in the
461 * error case, so we don't have to check the doorbell count, etc.
463 if (mthca_is_memfree(dev
))
466 mthca_free_err_wqe(dev
, qp
, is_send
, wqe_index
, &dbd
, &new_wqe
);
469 * If we're at the end of the WQE chain, or we've used up our
470 * doorbell count, free the CQE. Otherwise just update it for
471 * the next poll operation.
473 if (!(new_wqe
& cpu_to_be32(0x3f)) || (!cqe
->db_cnt
&& dbd
))
476 be16_add_cpu(&cqe
->db_cnt
, -dbd
);
478 cqe
->syndrome
= SYNDROME_WR_FLUSH_ERR
;
483 static inline int mthca_poll_one(struct mthca_dev
*dev
,
485 struct mthca_qp
**cur_qp
,
490 struct mthca_cqe
*cqe
;
497 cqe
= next_cqe_sw(cq
);
502 * Make sure we read CQ entry contents after we've checked the
508 mthca_dbg(dev
, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
509 cq
->cqn
, cq
->cons_index
, be32_to_cpu(cqe
->my_qpn
),
510 be32_to_cpu(cqe
->wqe
));
514 is_error
= (cqe
->opcode
& MTHCA_ERROR_CQE_OPCODE_MASK
) ==
515 MTHCA_ERROR_CQE_OPCODE_MASK
;
516 is_send
= is_error
? cqe
->opcode
& 0x01 : cqe
->is_send
& 0x80;
518 if (!*cur_qp
|| be32_to_cpu(cqe
->my_qpn
) != (*cur_qp
)->qpn
) {
520 * We do not have to take the QP table lock here,
521 * because CQs will be locked while QPs are removed
524 *cur_qp
= mthca_array_get(&dev
->qp_table
.qp
,
525 be32_to_cpu(cqe
->my_qpn
) &
526 (dev
->limits
.num_qps
- 1));
528 mthca_warn(dev
, "CQ entry for unknown QP %06x\n",
529 be32_to_cpu(cqe
->my_qpn
) & 0xffffff);
535 entry
->qp
= &(*cur_qp
)->ibqp
;
539 wqe_index
= ((be32_to_cpu(cqe
->wqe
) - (*cur_qp
)->send_wqe_offset
)
541 entry
->wr_id
= (*cur_qp
)->wrid
[wqe_index
+
543 } else if ((*cur_qp
)->ibqp
.srq
) {
544 struct mthca_srq
*srq
= to_msrq((*cur_qp
)->ibqp
.srq
);
545 u32 wqe
= be32_to_cpu(cqe
->wqe
);
547 wqe_index
= wqe
>> srq
->wqe_shift
;
548 entry
->wr_id
= srq
->wrid
[wqe_index
];
549 mthca_free_srq_wqe(srq
, wqe
);
553 wqe
= be32_to_cpu(cqe
->wqe
);
554 wqe_index
= wqe
>> wq
->wqe_shift
;
556 * WQE addr == base - 1 might be reported in receive completion
557 * with error instead of (rq size - 1) by Sinai FW 1.0.800 and
558 * Arbel FW 5.1.400. This bug should be fixed in later FW revs.
560 if (unlikely(wqe_index
< 0))
561 wqe_index
= wq
->max
- 1;
562 entry
->wr_id
= (*cur_qp
)->wrid
[wqe_index
];
566 if (wq
->last_comp
< wqe_index
)
567 wq
->tail
+= wqe_index
- wq
->last_comp
;
569 wq
->tail
+= wqe_index
+ wq
->max
- wq
->last_comp
;
571 wq
->last_comp
= wqe_index
;
575 handle_error_cqe(dev
, cq
, *cur_qp
, wqe_index
, is_send
,
576 (struct mthca_err_cqe
*) cqe
,
583 switch (cqe
->opcode
) {
584 case MTHCA_OPCODE_RDMA_WRITE
:
585 entry
->opcode
= IB_WC_RDMA_WRITE
;
587 case MTHCA_OPCODE_RDMA_WRITE_IMM
:
588 entry
->opcode
= IB_WC_RDMA_WRITE
;
589 entry
->wc_flags
|= IB_WC_WITH_IMM
;
591 case MTHCA_OPCODE_SEND
:
592 entry
->opcode
= IB_WC_SEND
;
594 case MTHCA_OPCODE_SEND_IMM
:
595 entry
->opcode
= IB_WC_SEND
;
596 entry
->wc_flags
|= IB_WC_WITH_IMM
;
598 case MTHCA_OPCODE_RDMA_READ
:
599 entry
->opcode
= IB_WC_RDMA_READ
;
600 entry
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
602 case MTHCA_OPCODE_ATOMIC_CS
:
603 entry
->opcode
= IB_WC_COMP_SWAP
;
604 entry
->byte_len
= MTHCA_ATOMIC_BYTE_LEN
;
606 case MTHCA_OPCODE_ATOMIC_FA
:
607 entry
->opcode
= IB_WC_FETCH_ADD
;
608 entry
->byte_len
= MTHCA_ATOMIC_BYTE_LEN
;
610 case MTHCA_OPCODE_BIND_MW
:
611 entry
->opcode
= IB_WC_BIND_MW
;
614 entry
->opcode
= MTHCA_OPCODE_INVALID
;
618 entry
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
619 switch (cqe
->opcode
& 0x1f) {
620 case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE
:
621 case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE
:
622 entry
->wc_flags
= IB_WC_WITH_IMM
;
623 entry
->imm_data
= cqe
->imm_etype_pkey_eec
;
624 entry
->opcode
= IB_WC_RECV
;
626 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE
:
627 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE
:
628 entry
->wc_flags
= IB_WC_WITH_IMM
;
629 entry
->imm_data
= cqe
->imm_etype_pkey_eec
;
630 entry
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
634 entry
->opcode
= IB_WC_RECV
;
637 entry
->slid
= be16_to_cpu(cqe
->rlid
);
638 entry
->sl
= be16_to_cpu(cqe
->sl_g_mlpath
) >> 12;
639 entry
->src_qp
= be32_to_cpu(cqe
->rqpn
) & 0xffffff;
640 entry
->dlid_path_bits
= be16_to_cpu(cqe
->sl_g_mlpath
) & 0x7f;
641 entry
->pkey_index
= be32_to_cpu(cqe
->imm_etype_pkey_eec
) >> 16;
642 entry
->wc_flags
|= be16_to_cpu(cqe
->sl_g_mlpath
) & 0x80 ?
646 entry
->status
= IB_WC_SUCCESS
;
649 if (likely(free_cqe
)) {
658 int mthca_poll_cq(struct ib_cq
*ibcq
, int num_entries
,
661 struct mthca_dev
*dev
= to_mdev(ibcq
->device
);
662 struct mthca_cq
*cq
= to_mcq(ibcq
);
663 struct mthca_qp
*qp
= NULL
;
669 spin_lock_irqsave(&cq
->lock
, flags
);
673 while (npolled
< num_entries
) {
674 err
= mthca_poll_one(dev
, cq
, &qp
,
675 &freed
, entry
+ npolled
);
683 update_cons_index(dev
, cq
, freed
);
687 * If a CQ resize is in progress and we discovered that the
688 * old buffer is empty, then peek in the new buffer, and if
689 * it's not empty, switch to the new buffer and continue
692 if (unlikely(err
== -EAGAIN
&& cq
->resize_buf
&&
693 cq
->resize_buf
->state
== CQ_RESIZE_READY
)) {
695 * In Tavor mode, the hardware keeps the producer
696 * index modulo the CQ size. Since we might be making
697 * the CQ bigger, we need to mask our consumer index
698 * using the size of the old CQ buffer before looking
699 * in the new CQ buffer.
701 if (!mthca_is_memfree(dev
))
702 cq
->cons_index
&= cq
->ibcq
.cqe
;
704 if (cqe_sw(get_cqe_from_buf(&cq
->resize_buf
->buf
,
705 cq
->cons_index
& cq
->resize_buf
->cqe
))) {
706 struct mthca_cq_buf tbuf
;
711 cq
->buf
= cq
->resize_buf
->buf
;
712 cq
->ibcq
.cqe
= cq
->resize_buf
->cqe
;
714 cq
->resize_buf
->buf
= tbuf
;
715 cq
->resize_buf
->cqe
= tcqe
;
716 cq
->resize_buf
->state
= CQ_RESIZE_SWAPPED
;
722 spin_unlock_irqrestore(&cq
->lock
, flags
);
724 return err
== 0 || err
== -EAGAIN
? npolled
: err
;
727 int mthca_tavor_arm_cq(struct ib_cq
*cq
, enum ib_cq_notify_flags flags
)
729 u32 dbhi
= ((flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
?
730 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL
:
731 MTHCA_TAVOR_CQ_DB_REQ_NOT
) |
734 mthca_write64(dbhi
, 0xffffffff, to_mdev(cq
->device
)->kar
+ MTHCA_CQ_DOORBELL
,
735 MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq
->device
)->doorbell_lock
));
740 int mthca_arbel_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
742 struct mthca_cq
*cq
= to_mcq(ibcq
);
745 u32 sn
= cq
->arm_sn
& 3;
747 db_rec
[0] = cpu_to_be32(cq
->cons_index
);
748 db_rec
[1] = cpu_to_be32((cq
->cqn
<< 8) | (2 << 5) | (sn
<< 3) |
749 ((flags
& IB_CQ_SOLICITED_MASK
) ==
750 IB_CQ_SOLICITED
? 1 : 2));
752 mthca_write_db_rec(db_rec
, cq
->arm_db
);
755 * Make sure that the doorbell record in host memory is
756 * written before ringing the doorbell via PCI MMIO.
761 ((flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
?
762 MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL
:
763 MTHCA_ARBEL_CQ_DB_REQ_NOT
) | cq
->cqn
;
765 mthca_write64(dbhi
, cq
->cons_index
,
766 to_mdev(ibcq
->device
)->kar
+ MTHCA_CQ_DOORBELL
,
767 MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq
->device
)->doorbell_lock
));
772 int mthca_init_cq(struct mthca_dev
*dev
, int nent
,
773 struct mthca_ucontext
*ctx
, u32 pdn
,
776 struct mthca_mailbox
*mailbox
;
777 struct mthca_cq_context
*cq_context
;
781 cq
->ibcq
.cqe
= nent
- 1;
782 cq
->is_kernel
= !ctx
;
784 cq
->cqn
= mthca_alloc(&dev
->cq_table
.alloc
);
788 if (mthca_is_memfree(dev
)) {
789 err
= mthca_table_get(dev
, dev
->cq_table
.table
, cq
->cqn
);
798 cq
->set_ci_db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_CQ_SET_CI
,
799 cq
->cqn
, &cq
->set_ci_db
);
800 if (cq
->set_ci_db_index
< 0)
803 cq
->arm_db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_CQ_ARM
,
804 cq
->cqn
, &cq
->arm_db
);
805 if (cq
->arm_db_index
< 0)
810 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
814 cq_context
= mailbox
->buf
;
817 err
= mthca_alloc_cq_buf(dev
, &cq
->buf
, nent
);
819 goto err_out_mailbox
;
822 spin_lock_init(&cq
->lock
);
824 init_waitqueue_head(&cq
->wait
);
825 mutex_init(&cq
->mutex
);
827 memset(cq_context
, 0, sizeof *cq_context
);
828 cq_context
->flags
= cpu_to_be32(MTHCA_CQ_STATUS_OK
|
829 MTHCA_CQ_STATE_DISARMED
|
831 cq_context
->logsize_usrpage
= cpu_to_be32((ffs(nent
) - 1) << 24);
833 cq_context
->logsize_usrpage
|= cpu_to_be32(ctx
->uar
.index
);
835 cq_context
->logsize_usrpage
|= cpu_to_be32(dev
->driver_uar
.index
);
836 cq_context
->error_eqn
= cpu_to_be32(dev
->eq_table
.eq
[MTHCA_EQ_ASYNC
].eqn
);
837 cq_context
->comp_eqn
= cpu_to_be32(dev
->eq_table
.eq
[MTHCA_EQ_COMP
].eqn
);
838 cq_context
->pd
= cpu_to_be32(pdn
);
839 cq_context
->lkey
= cpu_to_be32(cq
->buf
.mr
.ibmr
.lkey
);
840 cq_context
->cqn
= cpu_to_be32(cq
->cqn
);
842 if (mthca_is_memfree(dev
)) {
843 cq_context
->ci_db
= cpu_to_be32(cq
->set_ci_db_index
);
844 cq_context
->state_db
= cpu_to_be32(cq
->arm_db_index
);
847 err
= mthca_SW2HW_CQ(dev
, mailbox
, cq
->cqn
, &status
);
849 mthca_warn(dev
, "SW2HW_CQ failed (%d)\n", err
);
850 goto err_out_free_mr
;
854 mthca_warn(dev
, "SW2HW_CQ returned status 0x%02x\n",
857 goto err_out_free_mr
;
860 spin_lock_irq(&dev
->cq_table
.lock
);
861 if (mthca_array_set(&dev
->cq_table
.cq
,
862 cq
->cqn
& (dev
->limits
.num_cqs
- 1),
864 spin_unlock_irq(&dev
->cq_table
.lock
);
865 goto err_out_free_mr
;
867 spin_unlock_irq(&dev
->cq_table
.lock
);
871 mthca_free_mailbox(dev
, mailbox
);
877 mthca_free_cq_buf(dev
, &cq
->buf
, cq
->ibcq
.cqe
);
880 mthca_free_mailbox(dev
, mailbox
);
883 if (cq
->is_kernel
&& mthca_is_memfree(dev
))
884 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_ARM
, cq
->arm_db_index
);
887 if (cq
->is_kernel
&& mthca_is_memfree(dev
))
888 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_SET_CI
, cq
->set_ci_db_index
);
891 mthca_table_put(dev
, dev
->cq_table
.table
, cq
->cqn
);
894 mthca_free(&dev
->cq_table
.alloc
, cq
->cqn
);
899 static inline int get_cq_refcount(struct mthca_dev
*dev
, struct mthca_cq
*cq
)
903 spin_lock_irq(&dev
->cq_table
.lock
);
905 spin_unlock_irq(&dev
->cq_table
.lock
);
910 void mthca_free_cq(struct mthca_dev
*dev
,
913 struct mthca_mailbox
*mailbox
;
917 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
918 if (IS_ERR(mailbox
)) {
919 mthca_warn(dev
, "No memory for mailbox to free CQ.\n");
923 err
= mthca_HW2SW_CQ(dev
, mailbox
, cq
->cqn
, &status
);
925 mthca_warn(dev
, "HW2SW_CQ failed (%d)\n", err
);
927 mthca_warn(dev
, "HW2SW_CQ returned status 0x%02x\n", status
);
930 __be32
*ctx
= mailbox
->buf
;
933 printk(KERN_ERR
"context for CQN %x (cons index %x, next sw %d)\n",
934 cq
->cqn
, cq
->cons_index
,
935 cq
->is_kernel
? !!next_cqe_sw(cq
) : 0);
936 for (j
= 0; j
< 16; ++j
)
937 printk(KERN_ERR
"[%2x] %08x\n", j
* 4, be32_to_cpu(ctx
[j
]));
940 spin_lock_irq(&dev
->cq_table
.lock
);
941 mthca_array_clear(&dev
->cq_table
.cq
,
942 cq
->cqn
& (dev
->limits
.num_cqs
- 1));
944 spin_unlock_irq(&dev
->cq_table
.lock
);
946 if (dev
->mthca_flags
& MTHCA_FLAG_MSI_X
)
947 synchronize_irq(dev
->eq_table
.eq
[MTHCA_EQ_COMP
].msi_x_vector
);
949 synchronize_irq(dev
->pdev
->irq
);
951 wait_event(cq
->wait
, !get_cq_refcount(dev
, cq
));
954 mthca_free_cq_buf(dev
, &cq
->buf
, cq
->ibcq
.cqe
);
955 if (mthca_is_memfree(dev
)) {
956 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_ARM
, cq
->arm_db_index
);
957 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_SET_CI
, cq
->set_ci_db_index
);
961 mthca_table_put(dev
, dev
->cq_table
.table
, cq
->cqn
);
962 mthca_free(&dev
->cq_table
.alloc
, cq
->cqn
);
963 mthca_free_mailbox(dev
, mailbox
);
966 int mthca_init_cq_table(struct mthca_dev
*dev
)
970 spin_lock_init(&dev
->cq_table
.lock
);
972 err
= mthca_alloc_init(&dev
->cq_table
.alloc
,
975 dev
->limits
.reserved_cqs
);
979 err
= mthca_array_init(&dev
->cq_table
.cq
,
980 dev
->limits
.num_cqs
);
982 mthca_alloc_cleanup(&dev
->cq_table
.alloc
);
987 void mthca_cleanup_cq_table(struct mthca_dev
*dev
)
989 mthca_array_cleanup(&dev
->cq_table
.cq
, dev
->limits
.num_cqs
);
990 mthca_alloc_cleanup(&dev
->cq_table
.alloc
);