2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mthca_cq.c 1369 2004-12-20 16:17:07Z roland $
35 #include <linux/init.h>
36 #include <linux/hardirq.h>
40 #include "mthca_dev.h"
41 #include "mthca_cmd.h"
42 #include "mthca_memfree.h"
45 MTHCA_MAX_DIRECT_CQ_SIZE
= 4 * PAGE_SIZE
49 MTHCA_CQ_ENTRY_SIZE
= 0x20
53 * Must be packed because start is 64 bits but only aligned to 32 bits.
55 struct mthca_cq_context
{
59 u32 error_eqn
; /* Tavor only */
63 u32 last_notified_index
;
64 u32 solicit_producer_index
;
68 u32 ci_db
; /* Arbel only */
69 u32 state_db
; /* Arbel only */
71 } __attribute__((packed
));
73 #define MTHCA_CQ_STATUS_OK ( 0 << 28)
74 #define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28)
75 #define MTHCA_CQ_STATUS_WRITE_FAIL (10 << 28)
76 #define MTHCA_CQ_FLAG_TR ( 1 << 18)
77 #define MTHCA_CQ_FLAG_OI ( 1 << 17)
78 #define MTHCA_CQ_STATE_DISARMED ( 0 << 8)
79 #define MTHCA_CQ_STATE_ARMED ( 1 << 8)
80 #define MTHCA_CQ_STATE_ARMED_SOL ( 4 << 8)
81 #define MTHCA_EQ_STATE_FIRED (10 << 8)
84 MTHCA_ERROR_CQE_OPCODE_MASK
= 0xfe
88 SYNDROME_LOCAL_LENGTH_ERR
= 0x01,
89 SYNDROME_LOCAL_QP_OP_ERR
= 0x02,
90 SYNDROME_LOCAL_EEC_OP_ERR
= 0x03,
91 SYNDROME_LOCAL_PROT_ERR
= 0x04,
92 SYNDROME_WR_FLUSH_ERR
= 0x05,
93 SYNDROME_MW_BIND_ERR
= 0x06,
94 SYNDROME_BAD_RESP_ERR
= 0x10,
95 SYNDROME_LOCAL_ACCESS_ERR
= 0x11,
96 SYNDROME_REMOTE_INVAL_REQ_ERR
= 0x12,
97 SYNDROME_REMOTE_ACCESS_ERR
= 0x13,
98 SYNDROME_REMOTE_OP_ERR
= 0x14,
99 SYNDROME_RETRY_EXC_ERR
= 0x15,
100 SYNDROME_RNR_RETRY_EXC_ERR
= 0x16,
101 SYNDROME_LOCAL_RDD_VIOL_ERR
= 0x20,
102 SYNDROME_REMOTE_INVAL_RD_REQ_ERR
= 0x21,
103 SYNDROME_REMOTE_ABORTED_ERR
= 0x22,
104 SYNDROME_INVAL_EECN_ERR
= 0x23,
105 SYNDROME_INVAL_EEC_STATE_ERR
= 0x24
114 u32 imm_etype_pkey_eec
;
123 struct mthca_err_cqe
{
136 #define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7)
137 #define MTHCA_CQ_ENTRY_OWNER_HW (1 << 7)
139 #define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24)
140 #define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24)
141 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24)
142 #define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24)
143 #define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24)
145 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24)
146 #define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24)
147 #define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24)
149 static inline struct mthca_cqe
*get_cqe(struct mthca_cq
*cq
, int entry
)
152 return cq
->queue
.direct
.buf
+ (entry
* MTHCA_CQ_ENTRY_SIZE
);
154 return cq
->queue
.page_list
[entry
* MTHCA_CQ_ENTRY_SIZE
/ PAGE_SIZE
].buf
155 + (entry
* MTHCA_CQ_ENTRY_SIZE
) % PAGE_SIZE
;
158 static inline struct mthca_cqe
*cqe_sw(struct mthca_cq
*cq
, int i
)
160 struct mthca_cqe
*cqe
= get_cqe(cq
, i
);
161 return MTHCA_CQ_ENTRY_OWNER_HW
& cqe
->owner
? NULL
: cqe
;
164 static inline struct mthca_cqe
*next_cqe_sw(struct mthca_cq
*cq
)
166 return cqe_sw(cq
, cq
->cons_index
& cq
->ibcq
.cqe
);
169 static inline void set_cqe_hw(struct mthca_cqe
*cqe
)
171 cqe
->owner
= MTHCA_CQ_ENTRY_OWNER_HW
;
175 * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index
176 * should be correct before calling update_cons_index().
178 static inline void update_cons_index(struct mthca_dev
*dev
, struct mthca_cq
*cq
,
183 if (mthca_is_memfree(dev
)) {
184 *cq
->set_ci_db
= cpu_to_be32(cq
->cons_index
);
187 doorbell
[0] = cpu_to_be32(MTHCA_TAVOR_CQ_DB_INC_CI
| cq
->cqn
);
188 doorbell
[1] = cpu_to_be32(incr
- 1);
190 mthca_write64(doorbell
,
191 dev
->kar
+ MTHCA_CQ_DOORBELL
,
192 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
196 void mthca_cq_event(struct mthca_dev
*dev
, u32 cqn
)
200 cq
= mthca_array_get(&dev
->cq_table
.cq
, cqn
& (dev
->limits
.num_cqs
- 1));
203 mthca_warn(dev
, "Completion event for bogus CQ %08x\n", cqn
);
209 cq
->ibcq
.comp_handler(&cq
->ibcq
, cq
->ibcq
.cq_context
);
212 void mthca_cq_clean(struct mthca_dev
*dev
, u32 cqn
, u32 qpn
)
215 struct mthca_cqe
*cqe
;
219 spin_lock_irq(&dev
->cq_table
.lock
);
220 cq
= mthca_array_get(&dev
->cq_table
.cq
, cqn
& (dev
->limits
.num_cqs
- 1));
222 atomic_inc(&cq
->refcount
);
223 spin_unlock_irq(&dev
->cq_table
.lock
);
228 spin_lock_irq(&cq
->lock
);
231 * First we need to find the current producer index, so we
232 * know where to start cleaning from. It doesn't matter if HW
233 * adds new entries after this loop -- the QP we're worried
234 * about is already in RESET, so the new entries won't come
235 * from our QP and therefore don't need to be checked.
237 for (prod_index
= cq
->cons_index
;
238 cqe_sw(cq
, prod_index
& cq
->ibcq
.cqe
);
240 if (prod_index
== cq
->cons_index
+ cq
->ibcq
.cqe
)
244 mthca_dbg(dev
, "Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n",
245 qpn
, cqn
, cq
->cons_index
, prod_index
);
248 * Now sweep backwards through the CQ, removing CQ entries
249 * that match our QP by copying older entries on top of them.
251 while (prod_index
> cq
->cons_index
) {
252 cqe
= get_cqe(cq
, (prod_index
- 1) & cq
->ibcq
.cqe
);
253 if (cqe
->my_qpn
== cpu_to_be32(qpn
))
256 memcpy(get_cqe(cq
, (prod_index
- 1 + nfreed
) &
259 MTHCA_CQ_ENTRY_SIZE
);
265 cq
->cons_index
+= nfreed
;
266 update_cons_index(dev
, cq
, nfreed
);
269 spin_unlock_irq(&cq
->lock
);
270 if (atomic_dec_and_test(&cq
->refcount
))
274 static int handle_error_cqe(struct mthca_dev
*dev
, struct mthca_cq
*cq
,
275 struct mthca_qp
*qp
, int wqe_index
, int is_send
,
276 struct mthca_err_cqe
*cqe
,
277 struct ib_wc
*entry
, int *free_cqe
)
283 if (1 && cqe
->syndrome
!= SYNDROME_WR_FLUSH_ERR
) {
286 mthca_dbg(dev
, "%x/%d: error CQE -> QPN %06x, WQE @ %08x\n",
287 cq
->cqn
, cq
->cons_index
, be32_to_cpu(cqe
->my_qpn
),
288 be32_to_cpu(cqe
->wqe
));
290 for (j
= 0; j
< 8; ++j
)
291 printk(KERN_DEBUG
" [%2x] %08x\n",
292 j
* 4, be32_to_cpu(((u32
*) cqe
)[j
]));
296 * For completions in error, only work request ID, status (and
297 * freed resource count for RD) have to be set.
299 switch (cqe
->syndrome
) {
300 case SYNDROME_LOCAL_LENGTH_ERR
:
301 entry
->status
= IB_WC_LOC_LEN_ERR
;
303 case SYNDROME_LOCAL_QP_OP_ERR
:
304 entry
->status
= IB_WC_LOC_QP_OP_ERR
;
306 case SYNDROME_LOCAL_EEC_OP_ERR
:
307 entry
->status
= IB_WC_LOC_EEC_OP_ERR
;
309 case SYNDROME_LOCAL_PROT_ERR
:
310 entry
->status
= IB_WC_LOC_PROT_ERR
;
312 case SYNDROME_WR_FLUSH_ERR
:
313 entry
->status
= IB_WC_WR_FLUSH_ERR
;
315 case SYNDROME_MW_BIND_ERR
:
316 entry
->status
= IB_WC_MW_BIND_ERR
;
318 case SYNDROME_BAD_RESP_ERR
:
319 entry
->status
= IB_WC_BAD_RESP_ERR
;
321 case SYNDROME_LOCAL_ACCESS_ERR
:
322 entry
->status
= IB_WC_LOC_ACCESS_ERR
;
324 case SYNDROME_REMOTE_INVAL_REQ_ERR
:
325 entry
->status
= IB_WC_REM_INV_REQ_ERR
;
327 case SYNDROME_REMOTE_ACCESS_ERR
:
328 entry
->status
= IB_WC_REM_ACCESS_ERR
;
330 case SYNDROME_REMOTE_OP_ERR
:
331 entry
->status
= IB_WC_REM_OP_ERR
;
333 case SYNDROME_RETRY_EXC_ERR
:
334 entry
->status
= IB_WC_RETRY_EXC_ERR
;
336 case SYNDROME_RNR_RETRY_EXC_ERR
:
337 entry
->status
= IB_WC_RNR_RETRY_EXC_ERR
;
339 case SYNDROME_LOCAL_RDD_VIOL_ERR
:
340 entry
->status
= IB_WC_LOC_RDD_VIOL_ERR
;
342 case SYNDROME_REMOTE_INVAL_RD_REQ_ERR
:
343 entry
->status
= IB_WC_REM_INV_RD_REQ_ERR
;
345 case SYNDROME_REMOTE_ABORTED_ERR
:
346 entry
->status
= IB_WC_REM_ABORT_ERR
;
348 case SYNDROME_INVAL_EECN_ERR
:
349 entry
->status
= IB_WC_INV_EECN_ERR
;
351 case SYNDROME_INVAL_EEC_STATE_ERR
:
352 entry
->status
= IB_WC_INV_EEC_STATE_ERR
;
355 entry
->status
= IB_WC_GENERAL_ERR
;
359 err
= mthca_free_err_wqe(dev
, qp
, is_send
, wqe_index
, &dbd
, &new_wqe
);
364 * If we're at the end of the WQE chain, or we've used up our
365 * doorbell count, free the CQE. Otherwise just update it for
366 * the next poll operation.
368 if (!(new_wqe
& cpu_to_be32(0x3f)) || (!cqe
->db_cnt
&& dbd
))
371 cqe
->db_cnt
= cpu_to_be16(be16_to_cpu(cqe
->db_cnt
) - dbd
);
373 cqe
->syndrome
= SYNDROME_WR_FLUSH_ERR
;
380 static void dump_cqe(struct mthca_cqe
*cqe
)
384 for (j
= 0; j
< 8; ++j
)
385 printk(KERN_DEBUG
" [%2x] %08x\n",
386 j
* 4, be32_to_cpu(((u32
*) cqe
)[j
]));
389 static inline int mthca_poll_one(struct mthca_dev
*dev
,
391 struct mthca_qp
**cur_qp
,
396 struct mthca_cqe
*cqe
;
403 cqe
= next_cqe_sw(cq
);
408 * Make sure we read CQ entry contents after we've checked the
414 mthca_dbg(dev
, "%x/%d: CQE -> QPN %06x, WQE @ %08x\n",
415 cq
->cqn
, cq
->cons_index
, be32_to_cpu(cqe
->my_qpn
),
416 be32_to_cpu(cqe
->wqe
));
421 is_error
= (cqe
->opcode
& MTHCA_ERROR_CQE_OPCODE_MASK
) ==
422 MTHCA_ERROR_CQE_OPCODE_MASK
;
423 is_send
= is_error
? cqe
->opcode
& 0x01 : cqe
->is_send
& 0x80;
425 if (!*cur_qp
|| be32_to_cpu(cqe
->my_qpn
) != (*cur_qp
)->qpn
) {
427 * We do not have to take the QP table lock here,
428 * because CQs will be locked while QPs are removed
431 *cur_qp
= mthca_array_get(&dev
->qp_table
.qp
,
432 be32_to_cpu(cqe
->my_qpn
) &
433 (dev
->limits
.num_qps
- 1));
435 mthca_warn(dev
, "CQ entry for unknown QP %06x\n",
436 be32_to_cpu(cqe
->my_qpn
) & 0xffffff);
442 entry
->qp_num
= (*cur_qp
)->qpn
;
446 wqe_index
= ((be32_to_cpu(cqe
->wqe
) - (*cur_qp
)->send_wqe_offset
)
448 entry
->wr_id
= (*cur_qp
)->wrid
[wqe_index
+
452 wqe_index
= be32_to_cpu(cqe
->wqe
) >> wq
->wqe_shift
;
453 entry
->wr_id
= (*cur_qp
)->wrid
[wqe_index
];
456 if (wq
->last_comp
< wqe_index
)
457 wq
->tail
+= wqe_index
- wq
->last_comp
;
459 wq
->tail
+= wqe_index
+ wq
->max
- wq
->last_comp
;
461 wq
->last_comp
= wqe_index
;
464 mthca_dbg(dev
, "%s completion for QP %06x, index %d (nr %d)\n",
465 is_send
? "Send" : "Receive",
466 (*cur_qp
)->qpn
, wqe_index
, wq
->max
);
469 err
= handle_error_cqe(dev
, cq
, *cur_qp
, wqe_index
, is_send
,
470 (struct mthca_err_cqe
*) cqe
,
477 switch (cqe
->opcode
) {
478 case MTHCA_OPCODE_RDMA_WRITE
:
479 entry
->opcode
= IB_WC_RDMA_WRITE
;
481 case MTHCA_OPCODE_RDMA_WRITE_IMM
:
482 entry
->opcode
= IB_WC_RDMA_WRITE
;
483 entry
->wc_flags
|= IB_WC_WITH_IMM
;
485 case MTHCA_OPCODE_SEND
:
486 entry
->opcode
= IB_WC_SEND
;
488 case MTHCA_OPCODE_SEND_IMM
:
489 entry
->opcode
= IB_WC_SEND
;
490 entry
->wc_flags
|= IB_WC_WITH_IMM
;
492 case MTHCA_OPCODE_RDMA_READ
:
493 entry
->opcode
= IB_WC_RDMA_READ
;
494 entry
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
496 case MTHCA_OPCODE_ATOMIC_CS
:
497 entry
->opcode
= IB_WC_COMP_SWAP
;
498 entry
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
500 case MTHCA_OPCODE_ATOMIC_FA
:
501 entry
->opcode
= IB_WC_FETCH_ADD
;
502 entry
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
504 case MTHCA_OPCODE_BIND_MW
:
505 entry
->opcode
= IB_WC_BIND_MW
;
508 entry
->opcode
= MTHCA_OPCODE_INVALID
;
512 entry
->byte_len
= be32_to_cpu(cqe
->byte_cnt
);
513 switch (cqe
->opcode
& 0x1f) {
514 case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE
:
515 case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE
:
516 entry
->wc_flags
= IB_WC_WITH_IMM
;
517 entry
->imm_data
= cqe
->imm_etype_pkey_eec
;
518 entry
->opcode
= IB_WC_RECV
;
520 case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE
:
521 case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE
:
522 entry
->wc_flags
= IB_WC_WITH_IMM
;
523 entry
->imm_data
= cqe
->imm_etype_pkey_eec
;
524 entry
->opcode
= IB_WC_RECV_RDMA_WITH_IMM
;
528 entry
->opcode
= IB_WC_RECV
;
531 entry
->slid
= be16_to_cpu(cqe
->rlid
);
532 entry
->sl
= be16_to_cpu(cqe
->sl_g_mlpath
) >> 12;
533 entry
->src_qp
= be32_to_cpu(cqe
->rqpn
) & 0xffffff;
534 entry
->dlid_path_bits
= be16_to_cpu(cqe
->sl_g_mlpath
) & 0x7f;
535 entry
->pkey_index
= be32_to_cpu(cqe
->imm_etype_pkey_eec
) >> 16;
536 entry
->wc_flags
|= be16_to_cpu(cqe
->sl_g_mlpath
) & 0x80 ?
540 entry
->status
= IB_WC_SUCCESS
;
543 if (likely(free_cqe
)) {
552 int mthca_poll_cq(struct ib_cq
*ibcq
, int num_entries
,
555 struct mthca_dev
*dev
= to_mdev(ibcq
->device
);
556 struct mthca_cq
*cq
= to_mcq(ibcq
);
557 struct mthca_qp
*qp
= NULL
;
563 spin_lock_irqsave(&cq
->lock
, flags
);
565 for (npolled
= 0; npolled
< num_entries
; ++npolled
) {
566 err
= mthca_poll_one(dev
, cq
, &qp
,
567 &freed
, entry
+ npolled
);
574 update_cons_index(dev
, cq
, freed
);
577 spin_unlock_irqrestore(&cq
->lock
, flags
);
579 return err
== 0 || err
== -EAGAIN
? npolled
: err
;
582 int mthca_tavor_arm_cq(struct ib_cq
*cq
, enum ib_cq_notify notify
)
586 doorbell
[0] = cpu_to_be32((notify
== IB_CQ_SOLICITED
?
587 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL
:
588 MTHCA_TAVOR_CQ_DB_REQ_NOT
) |
590 doorbell
[1] = 0xffffffff;
592 mthca_write64(doorbell
,
593 to_mdev(cq
->device
)->kar
+ MTHCA_CQ_DOORBELL
,
594 MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq
->device
)->doorbell_lock
));
599 int mthca_arbel_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify notify
)
601 struct mthca_cq
*cq
= to_mcq(ibcq
);
607 ci
= cpu_to_be32(cq
->cons_index
);
610 doorbell
[1] = cpu_to_be32((cq
->cqn
<< 8) | (2 << 5) | (sn
<< 3) |
611 (notify
== IB_CQ_SOLICITED
? 1 : 2));
613 mthca_write_db_rec(doorbell
, cq
->arm_db
);
616 * Make sure that the doorbell record in host memory is
617 * written before ringing the doorbell via PCI MMIO.
621 doorbell
[0] = cpu_to_be32((sn
<< 28) |
622 (notify
== IB_CQ_SOLICITED
?
623 MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL
:
624 MTHCA_ARBEL_CQ_DB_REQ_NOT
) |
628 mthca_write64(doorbell
,
629 to_mdev(ibcq
->device
)->kar
+ MTHCA_CQ_DOORBELL
,
630 MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq
->device
)->doorbell_lock
));
635 static void mthca_free_cq_buf(struct mthca_dev
*dev
, struct mthca_cq
*cq
)
641 pci_free_consistent(dev
->pdev
,
642 (cq
->ibcq
.cqe
+ 1) * MTHCA_CQ_ENTRY_SIZE
,
643 cq
->queue
.direct
.buf
,
644 pci_unmap_addr(&cq
->queue
.direct
,
647 size
= (cq
->ibcq
.cqe
+ 1) * MTHCA_CQ_ENTRY_SIZE
;
648 for (i
= 0; i
< (size
+ PAGE_SIZE
- 1) / PAGE_SIZE
; ++i
)
649 if (cq
->queue
.page_list
[i
].buf
)
650 pci_free_consistent(dev
->pdev
, PAGE_SIZE
,
651 cq
->queue
.page_list
[i
].buf
,
652 pci_unmap_addr(&cq
->queue
.page_list
[i
],
655 kfree(cq
->queue
.page_list
);
659 static int mthca_alloc_cq_buf(struct mthca_dev
*dev
, int size
,
664 u64
*dma_list
= NULL
;
668 if (size
<= MTHCA_MAX_DIRECT_CQ_SIZE
) {
671 shift
= get_order(size
) + PAGE_SHIFT
;
673 cq
->queue
.direct
.buf
= pci_alloc_consistent(dev
->pdev
,
675 if (!cq
->queue
.direct
.buf
)
678 pci_unmap_addr_set(&cq
->queue
.direct
, mapping
, t
);
680 memset(cq
->queue
.direct
.buf
, 0, size
);
682 while (t
& ((1 << shift
) - 1)) {
687 dma_list
= kmalloc(npages
* sizeof *dma_list
, GFP_KERNEL
);
691 for (i
= 0; i
< npages
; ++i
)
692 dma_list
[i
] = t
+ i
* (1 << shift
);
695 npages
= (size
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
698 dma_list
= kmalloc(npages
* sizeof *dma_list
, GFP_KERNEL
);
702 cq
->queue
.page_list
= kmalloc(npages
* sizeof *cq
->queue
.page_list
,
704 if (!cq
->queue
.page_list
)
707 for (i
= 0; i
< npages
; ++i
)
708 cq
->queue
.page_list
[i
].buf
= NULL
;
710 for (i
= 0; i
< npages
; ++i
) {
711 cq
->queue
.page_list
[i
].buf
=
712 pci_alloc_consistent(dev
->pdev
, PAGE_SIZE
, &t
);
713 if (!cq
->queue
.page_list
[i
].buf
)
717 pci_unmap_addr_set(&cq
->queue
.page_list
[i
], mapping
, t
);
719 memset(cq
->queue
.page_list
[i
].buf
, 0, PAGE_SIZE
);
723 err
= mthca_mr_alloc_phys(dev
, dev
->driver_pd
.pd_num
,
724 dma_list
, shift
, npages
,
726 MTHCA_MPT_FLAG_LOCAL_WRITE
|
727 MTHCA_MPT_FLAG_LOCAL_READ
,
737 mthca_free_cq_buf(dev
, cq
);
745 int mthca_init_cq(struct mthca_dev
*dev
, int nent
,
748 int size
= nent
* MTHCA_CQ_ENTRY_SIZE
;
749 void *mailbox
= NULL
;
750 struct mthca_cq_context
*cq_context
;
757 cq
->ibcq
.cqe
= nent
- 1;
759 cq
->cqn
= mthca_alloc(&dev
->cq_table
.alloc
);
763 if (mthca_is_memfree(dev
)) {
766 err
= mthca_table_get(dev
, dev
->cq_table
.table
, cq
->cqn
);
772 cq
->set_ci_db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_CQ_SET_CI
,
773 cq
->cqn
, &cq
->set_ci_db
);
774 if (cq
->set_ci_db_index
< 0)
777 cq
->arm_db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_CQ_ARM
,
778 cq
->cqn
, &cq
->arm_db
);
779 if (cq
->arm_db_index
< 0)
783 mailbox
= kmalloc(sizeof (struct mthca_cq_context
) + MTHCA_CMD_MAILBOX_EXTRA
,
786 goto err_out_mailbox
;
788 cq_context
= MAILBOX_ALIGN(mailbox
);
790 err
= mthca_alloc_cq_buf(dev
, size
, cq
);
792 goto err_out_mailbox
;
794 for (i
= 0; i
< nent
; ++i
)
795 set_cqe_hw(get_cqe(cq
, i
));
797 spin_lock_init(&cq
->lock
);
798 atomic_set(&cq
->refcount
, 1);
799 init_waitqueue_head(&cq
->wait
);
801 memset(cq_context
, 0, sizeof *cq_context
);
802 cq_context
->flags
= cpu_to_be32(MTHCA_CQ_STATUS_OK
|
803 MTHCA_CQ_STATE_DISARMED
|
805 cq_context
->start
= cpu_to_be64(0);
806 cq_context
->logsize_usrpage
= cpu_to_be32((ffs(nent
) - 1) << 24 |
807 dev
->driver_uar
.index
);
808 cq_context
->error_eqn
= cpu_to_be32(dev
->eq_table
.eq
[MTHCA_EQ_ASYNC
].eqn
);
809 cq_context
->comp_eqn
= cpu_to_be32(dev
->eq_table
.eq
[MTHCA_EQ_COMP
].eqn
);
810 cq_context
->pd
= cpu_to_be32(dev
->driver_pd
.pd_num
);
811 cq_context
->lkey
= cpu_to_be32(cq
->mr
.ibmr
.lkey
);
812 cq_context
->cqn
= cpu_to_be32(cq
->cqn
);
814 if (mthca_is_memfree(dev
)) {
815 cq_context
->ci_db
= cpu_to_be32(cq
->set_ci_db_index
);
816 cq_context
->state_db
= cpu_to_be32(cq
->arm_db_index
);
819 err
= mthca_SW2HW_CQ(dev
, cq_context
, cq
->cqn
, &status
);
821 mthca_warn(dev
, "SW2HW_CQ failed (%d)\n", err
);
822 goto err_out_free_mr
;
826 mthca_warn(dev
, "SW2HW_CQ returned status 0x%02x\n",
829 goto err_out_free_mr
;
832 spin_lock_irq(&dev
->cq_table
.lock
);
833 if (mthca_array_set(&dev
->cq_table
.cq
,
834 cq
->cqn
& (dev
->limits
.num_cqs
- 1),
836 spin_unlock_irq(&dev
->cq_table
.lock
);
837 goto err_out_free_mr
;
839 spin_unlock_irq(&dev
->cq_table
.lock
);
848 mthca_free_mr(dev
, &cq
->mr
);
849 mthca_free_cq_buf(dev
, cq
);
854 if (mthca_is_memfree(dev
))
855 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_ARM
, cq
->arm_db_index
);
858 if (mthca_is_memfree(dev
))
859 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_SET_CI
, cq
->set_ci_db_index
);
862 mthca_table_put(dev
, dev
->cq_table
.table
, cq
->cqn
);
865 mthca_free(&dev
->cq_table
.alloc
, cq
->cqn
);
870 void mthca_free_cq(struct mthca_dev
*dev
,
879 mailbox
= kmalloc(sizeof (struct mthca_cq_context
) + MTHCA_CMD_MAILBOX_EXTRA
,
882 mthca_warn(dev
, "No memory for mailbox to free CQ.\n");
886 err
= mthca_HW2SW_CQ(dev
, MAILBOX_ALIGN(mailbox
), cq
->cqn
, &status
);
888 mthca_warn(dev
, "HW2SW_CQ failed (%d)\n", err
);
890 mthca_warn(dev
, "HW2SW_CQ returned status 0x%02x\n",
894 u32
*ctx
= MAILBOX_ALIGN(mailbox
);
897 printk(KERN_ERR
"context for CQN %x (cons index %x, next sw %d)\n",
898 cq
->cqn
, cq
->cons_index
, !!next_cqe_sw(cq
));
899 for (j
= 0; j
< 16; ++j
)
900 printk(KERN_ERR
"[%2x] %08x\n", j
* 4, be32_to_cpu(ctx
[j
]));
903 spin_lock_irq(&dev
->cq_table
.lock
);
904 mthca_array_clear(&dev
->cq_table
.cq
,
905 cq
->cqn
& (dev
->limits
.num_cqs
- 1));
906 spin_unlock_irq(&dev
->cq_table
.lock
);
908 if (dev
->mthca_flags
& MTHCA_FLAG_MSI_X
)
909 synchronize_irq(dev
->eq_table
.eq
[MTHCA_EQ_COMP
].msi_x_vector
);
911 synchronize_irq(dev
->pdev
->irq
);
913 atomic_dec(&cq
->refcount
);
914 wait_event(cq
->wait
, !atomic_read(&cq
->refcount
));
916 mthca_free_mr(dev
, &cq
->mr
);
917 mthca_free_cq_buf(dev
, cq
);
919 if (mthca_is_memfree(dev
)) {
920 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_ARM
, cq
->arm_db_index
);
921 mthca_free_db(dev
, MTHCA_DB_TYPE_CQ_SET_CI
, cq
->set_ci_db_index
);
922 mthca_table_put(dev
, dev
->cq_table
.table
, cq
->cqn
);
925 mthca_free(&dev
->cq_table
.alloc
, cq
->cqn
);
929 int __devinit
mthca_init_cq_table(struct mthca_dev
*dev
)
933 spin_lock_init(&dev
->cq_table
.lock
);
935 err
= mthca_alloc_init(&dev
->cq_table
.alloc
,
938 dev
->limits
.reserved_cqs
);
942 err
= mthca_array_init(&dev
->cq_table
.cq
,
943 dev
->limits
.num_cqs
);
945 mthca_alloc_cleanup(&dev
->cq_table
.alloc
);
950 void __devexit
mthca_cleanup_cq_table(struct mthca_dev
*dev
)
952 mthca_array_cleanup(&dev
->cq_table
.cq
, dev
->limits
.num_cqs
);
953 mthca_alloc_cleanup(&dev
->cq_table
.alloc
);