2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 static int destroy_cq(struct c4iw_rdev
*rdev
, struct t4_cq
*cq
,
36 struct c4iw_dev_ucontext
*uctx
, struct sk_buff
*skb
)
38 struct fw_ri_res_wr
*res_wr
;
39 struct fw_ri_res
*res
;
41 struct c4iw_wr_wait wr_wait
;
44 wr_len
= sizeof *res_wr
+ sizeof *res
;
45 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, 0);
47 res_wr
= (struct fw_ri_res_wr
*)__skb_put(skb
, wr_len
);
48 memset(res_wr
, 0, wr_len
);
49 res_wr
->op_nres
= cpu_to_be32(
50 FW_WR_OP_V(FW_RI_RES_WR
) |
51 FW_RI_RES_WR_NRES_V(1) |
53 res_wr
->len16_pkd
= cpu_to_be32(DIV_ROUND_UP(wr_len
, 16));
54 res_wr
->cookie
= (uintptr_t)&wr_wait
;
56 res
->u
.cq
.restype
= FW_RI_RES_TYPE_CQ
;
57 res
->u
.cq
.op
= FW_RI_RES_OP_RESET
;
58 res
->u
.cq
.iqid
= cpu_to_be32(cq
->cqid
);
60 c4iw_init_wr_wait(&wr_wait
);
61 ret
= c4iw_ofld_send(rdev
, skb
);
63 ret
= c4iw_wait_for_reply(rdev
, &wr_wait
, 0, 0, __func__
);
67 dma_free_coherent(&(rdev
->lldi
.pdev
->dev
),
68 cq
->memsize
, cq
->queue
,
69 dma_unmap_addr(cq
, mapping
));
70 c4iw_put_cqid(rdev
, cq
->cqid
, uctx
);
74 static int create_cq(struct c4iw_rdev
*rdev
, struct t4_cq
*cq
,
75 struct c4iw_dev_ucontext
*uctx
)
77 struct fw_ri_res_wr
*res_wr
;
78 struct fw_ri_res
*res
;
80 int user
= (uctx
!= &rdev
->uctx
);
81 struct c4iw_wr_wait wr_wait
;
85 cq
->cqid
= c4iw_get_cqid(rdev
, uctx
);
92 cq
->sw_queue
= kzalloc(cq
->memsize
, GFP_KERNEL
);
98 cq
->queue
= dma_alloc_coherent(&rdev
->lldi
.pdev
->dev
, cq
->memsize
,
99 &cq
->dma_addr
, GFP_KERNEL
);
104 dma_unmap_addr_set(cq
, mapping
, cq
->dma_addr
);
105 memset(cq
->queue
, 0, cq
->memsize
);
107 /* build fw_ri_res_wr */
108 wr_len
= sizeof *res_wr
+ sizeof *res
;
110 skb
= alloc_skb(wr_len
, GFP_KERNEL
);
115 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, 0);
117 res_wr
= (struct fw_ri_res_wr
*)__skb_put(skb
, wr_len
);
118 memset(res_wr
, 0, wr_len
);
119 res_wr
->op_nres
= cpu_to_be32(
120 FW_WR_OP_V(FW_RI_RES_WR
) |
121 FW_RI_RES_WR_NRES_V(1) |
123 res_wr
->len16_pkd
= cpu_to_be32(DIV_ROUND_UP(wr_len
, 16));
124 res_wr
->cookie
= (uintptr_t)&wr_wait
;
126 res
->u
.cq
.restype
= FW_RI_RES_TYPE_CQ
;
127 res
->u
.cq
.op
= FW_RI_RES_OP_WRITE
;
128 res
->u
.cq
.iqid
= cpu_to_be32(cq
->cqid
);
129 res
->u
.cq
.iqandst_to_iqandstindex
= cpu_to_be32(
130 FW_RI_RES_WR_IQANUS_V(0) |
131 FW_RI_RES_WR_IQANUD_V(1) |
132 FW_RI_RES_WR_IQANDST_F
|
133 FW_RI_RES_WR_IQANDSTINDEX_V(
134 rdev
->lldi
.ciq_ids
[cq
->vector
]));
135 res
->u
.cq
.iqdroprss_to_iqesize
= cpu_to_be16(
136 FW_RI_RES_WR_IQDROPRSS_F
|
137 FW_RI_RES_WR_IQPCIECH_V(2) |
138 FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
140 FW_RI_RES_WR_IQESIZE_V(1));
141 res
->u
.cq
.iqsize
= cpu_to_be16(cq
->size
);
142 res
->u
.cq
.iqaddr
= cpu_to_be64(cq
->dma_addr
);
144 c4iw_init_wr_wait(&wr_wait
);
146 ret
= c4iw_ofld_send(rdev
, skb
);
149 PDBG("%s wait_event wr_wait %p\n", __func__
, &wr_wait
);
150 ret
= c4iw_wait_for_reply(rdev
, &wr_wait
, 0, 0, __func__
);
155 cq
->gts
= rdev
->lldi
.gts_reg
;
158 cq
->bar2_va
= c4iw_bar2_addrs(rdev
, cq
->cqid
, T4_BAR2_QTYPE_INGRESS
,
160 user
? &cq
->bar2_pa
: NULL
);
161 if (user
&& !cq
->bar2_pa
) {
162 pr_warn(MOD
"%s: cqid %u not in BAR2 range.\n",
163 pci_name(rdev
->lldi
.pdev
), cq
->cqid
);
169 dma_free_coherent(&rdev
->lldi
.pdev
->dev
, cq
->memsize
, cq
->queue
,
170 dma_unmap_addr(cq
, mapping
));
174 c4iw_put_cqid(rdev
, cq
->cqid
, uctx
);
179 static void insert_recv_cqe(struct t4_wq
*wq
, struct t4_cq
*cq
)
183 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__
,
184 wq
, cq
, cq
->sw_cidx
, cq
->sw_pidx
);
185 memset(&cqe
, 0, sizeof(cqe
));
186 cqe
.header
= cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH
) |
187 CQE_OPCODE_V(FW_RI_SEND
) |
190 CQE_QPID_V(wq
->sq
.qid
));
191 cqe
.bits_type_ts
= cpu_to_be64(CQE_GENBIT_V((u64
)cq
->gen
));
192 cq
->sw_queue
[cq
->sw_pidx
] = cqe
;
196 int c4iw_flush_rq(struct t4_wq
*wq
, struct t4_cq
*cq
, int count
)
199 int in_use
= wq
->rq
.in_use
- count
;
202 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__
,
203 wq
, cq
, wq
->rq
.in_use
, count
);
205 insert_recv_cqe(wq
, cq
);
211 static void insert_sq_cqe(struct t4_wq
*wq
, struct t4_cq
*cq
,
212 struct t4_swsqe
*swcqe
)
216 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__
,
217 wq
, cq
, cq
->sw_cidx
, cq
->sw_pidx
);
218 memset(&cqe
, 0, sizeof(cqe
));
219 cqe
.header
= cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH
) |
220 CQE_OPCODE_V(swcqe
->opcode
) |
223 CQE_QPID_V(wq
->sq
.qid
));
224 CQE_WRID_SQ_IDX(&cqe
) = swcqe
->idx
;
225 cqe
.bits_type_ts
= cpu_to_be64(CQE_GENBIT_V((u64
)cq
->gen
));
226 cq
->sw_queue
[cq
->sw_pidx
] = cqe
;
230 static void advance_oldest_read(struct t4_wq
*wq
);
232 int c4iw_flush_sq(struct c4iw_qp
*qhp
)
235 struct t4_wq
*wq
= &qhp
->wq
;
236 struct c4iw_cq
*chp
= to_c4iw_cq(qhp
->ibqp
.send_cq
);
237 struct t4_cq
*cq
= &chp
->cq
;
239 struct t4_swsqe
*swsqe
;
241 if (wq
->sq
.flush_cidx
== -1)
242 wq
->sq
.flush_cidx
= wq
->sq
.cidx
;
243 idx
= wq
->sq
.flush_cidx
;
244 BUG_ON(idx
>= wq
->sq
.size
);
245 while (idx
!= wq
->sq
.pidx
) {
246 swsqe
= &wq
->sq
.sw_sq
[idx
];
247 BUG_ON(swsqe
->flushed
);
249 insert_sq_cqe(wq
, cq
, swsqe
);
250 if (wq
->sq
.oldest_read
== swsqe
) {
251 BUG_ON(swsqe
->opcode
!= FW_RI_READ_REQ
);
252 advance_oldest_read(wq
);
255 if (++idx
== wq
->sq
.size
)
258 wq
->sq
.flush_cidx
+= flushed
;
259 if (wq
->sq
.flush_cidx
>= wq
->sq
.size
)
260 wq
->sq
.flush_cidx
-= wq
->sq
.size
;
264 static void flush_completed_wrs(struct t4_wq
*wq
, struct t4_cq
*cq
)
266 struct t4_swsqe
*swsqe
;
269 if (wq
->sq
.flush_cidx
== -1)
270 wq
->sq
.flush_cidx
= wq
->sq
.cidx
;
271 cidx
= wq
->sq
.flush_cidx
;
272 BUG_ON(cidx
> wq
->sq
.size
);
274 while (cidx
!= wq
->sq
.pidx
) {
275 swsqe
= &wq
->sq
.sw_sq
[cidx
];
276 if (!swsqe
->signaled
) {
277 if (++cidx
== wq
->sq
.size
)
279 } else if (swsqe
->complete
) {
281 BUG_ON(swsqe
->flushed
);
284 * Insert this completed cqe into the swcq.
286 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
287 __func__
, cidx
, cq
->sw_pidx
);
288 swsqe
->cqe
.header
|= htonl(CQE_SWCQE_V(1));
289 cq
->sw_queue
[cq
->sw_pidx
] = swsqe
->cqe
;
292 if (++cidx
== wq
->sq
.size
)
294 wq
->sq
.flush_cidx
= cidx
;
300 static void create_read_req_cqe(struct t4_wq
*wq
, struct t4_cqe
*hw_cqe
,
301 struct t4_cqe
*read_cqe
)
303 read_cqe
->u
.scqe
.cidx
= wq
->sq
.oldest_read
->idx
;
304 read_cqe
->len
= htonl(wq
->sq
.oldest_read
->read_len
);
305 read_cqe
->header
= htonl(CQE_QPID_V(CQE_QPID(hw_cqe
)) |
306 CQE_SWCQE_V(SW_CQE(hw_cqe
)) |
307 CQE_OPCODE_V(FW_RI_READ_REQ
) |
309 read_cqe
->bits_type_ts
= hw_cqe
->bits_type_ts
;
312 static void advance_oldest_read(struct t4_wq
*wq
)
315 u32 rptr
= wq
->sq
.oldest_read
- wq
->sq
.sw_sq
+ 1;
317 if (rptr
== wq
->sq
.size
)
319 while (rptr
!= wq
->sq
.pidx
) {
320 wq
->sq
.oldest_read
= &wq
->sq
.sw_sq
[rptr
];
322 if (wq
->sq
.oldest_read
->opcode
== FW_RI_READ_REQ
)
324 if (++rptr
== wq
->sq
.size
)
327 wq
->sq
.oldest_read
= NULL
;
331 * Move all CQEs from the HWCQ into the SWCQ.
332 * Deal with out-of-order and/or completions that complete
333 * prior unsignalled WRs.
335 void c4iw_flush_hw_cq(struct c4iw_cq
*chp
)
337 struct t4_cqe
*hw_cqe
, *swcqe
, read_cqe
;
339 struct t4_swsqe
*swsqe
;
342 PDBG("%s cqid 0x%x\n", __func__
, chp
->cq
.cqid
);
343 ret
= t4_next_hw_cqe(&chp
->cq
, &hw_cqe
);
346 * This logic is similar to poll_cq(), but not quite the same
347 * unfortunately. Need to move pertinent HW CQEs to the SW CQ but
348 * also do any translation magic that poll_cq() normally does.
351 qhp
= get_qhp(chp
->rhp
, CQE_QPID(hw_cqe
));
354 * drop CQEs with no associated QP
359 if (CQE_OPCODE(hw_cqe
) == FW_RI_TERMINATE
)
362 if (CQE_OPCODE(hw_cqe
) == FW_RI_READ_RESP
) {
364 /* If we have reached here because of async
365 * event or other error, and have egress error
368 if (CQE_TYPE(hw_cqe
) == 1)
371 /* drop peer2peer RTR reads.
373 if (CQE_WRID_STAG(hw_cqe
) == 1)
377 * Eat completions for unsignaled read WRs.
379 if (!qhp
->wq
.sq
.oldest_read
->signaled
) {
380 advance_oldest_read(&qhp
->wq
);
385 * Don't write to the HWCQ, create a new read req CQE
386 * in local memory and move it into the swcq.
388 create_read_req_cqe(&qhp
->wq
, hw_cqe
, &read_cqe
);
390 advance_oldest_read(&qhp
->wq
);
393 /* if its a SQ completion, then do the magic to move all the
394 * unsignaled and now in-order completions into the swcq.
396 if (SQ_TYPE(hw_cqe
)) {
397 swsqe
= &qhp
->wq
.sq
.sw_sq
[CQE_WRID_SQ_IDX(hw_cqe
)];
398 swsqe
->cqe
= *hw_cqe
;
400 flush_completed_wrs(&qhp
->wq
, &chp
->cq
);
402 swcqe
= &chp
->cq
.sw_queue
[chp
->cq
.sw_pidx
];
404 swcqe
->header
|= cpu_to_be32(CQE_SWCQE_V(1));
405 t4_swcq_produce(&chp
->cq
);
408 t4_hwcq_consume(&chp
->cq
);
409 ret
= t4_next_hw_cqe(&chp
->cq
, &hw_cqe
);
413 static int cqe_completes_wr(struct t4_cqe
*cqe
, struct t4_wq
*wq
)
415 if (CQE_OPCODE(cqe
) == FW_RI_TERMINATE
)
418 if ((CQE_OPCODE(cqe
) == FW_RI_RDMA_WRITE
) && RQ_TYPE(cqe
))
421 if ((CQE_OPCODE(cqe
) == FW_RI_READ_RESP
) && SQ_TYPE(cqe
))
424 if (CQE_SEND_OPCODE(cqe
) && RQ_TYPE(cqe
) && t4_rq_empty(wq
))
429 void c4iw_count_rcqes(struct t4_cq
*cq
, struct t4_wq
*wq
, int *count
)
435 PDBG("%s count zero %d\n", __func__
, *count
);
437 while (ptr
!= cq
->sw_pidx
) {
438 cqe
= &cq
->sw_queue
[ptr
];
439 if (RQ_TYPE(cqe
) && (CQE_OPCODE(cqe
) != FW_RI_READ_RESP
) &&
440 (CQE_QPID(cqe
) == wq
->sq
.qid
) && cqe_completes_wr(cqe
, wq
))
442 if (++ptr
== cq
->size
)
445 PDBG("%s cq %p count %d\n", __func__
, cq
, *count
);
452 * check the validity of the first CQE,
453 * supply the wq assicated with the qpid.
455 * credit: cq credit to return to sge.
456 * cqe_flushed: 1 iff the CQE is flushed.
457 * cqe: copy of the polled CQE.
461 * -EAGAIN CQE skipped, try again.
462 * -EOVERFLOW CQ overflow detected.
464 static int poll_cq(struct t4_wq
*wq
, struct t4_cq
*cq
, struct t4_cqe
*cqe
,
465 u8
*cqe_flushed
, u64
*cookie
, u32
*credit
)
468 struct t4_cqe
*hw_cqe
, read_cqe
;
472 ret
= t4_next_cqe(cq
, &hw_cqe
);
476 PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
477 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
478 __func__
, CQE_OVFBIT(hw_cqe
), CQE_QPID(hw_cqe
),
479 CQE_GENBIT(hw_cqe
), CQE_TYPE(hw_cqe
), CQE_STATUS(hw_cqe
),
480 CQE_OPCODE(hw_cqe
), CQE_LEN(hw_cqe
), CQE_WRID_HI(hw_cqe
),
481 CQE_WRID_LOW(hw_cqe
));
484 * skip cqe's not affiliated with a QP.
492 * skip hw cqe's if the wq is flushed.
494 if (wq
->flushed
&& !SW_CQE(hw_cqe
)) {
500 * skip TERMINATE cqes...
502 if (CQE_OPCODE(hw_cqe
) == FW_RI_TERMINATE
) {
508 * Gotta tweak READ completions:
509 * 1) the cqe doesn't contain the sq_wptr from the wr.
510 * 2) opcode not reflected from the wr.
511 * 3) read_len not reflected from the wr.
512 * 4) cq_type is RQ_TYPE not SQ_TYPE.
514 if (RQ_TYPE(hw_cqe
) && (CQE_OPCODE(hw_cqe
) == FW_RI_READ_RESP
)) {
516 /* If we have reached here because of async
517 * event or other error, and have egress error
520 if (CQE_TYPE(hw_cqe
) == 1) {
521 if (CQE_STATUS(hw_cqe
))
522 t4_set_wq_in_error(wq
);
527 /* If this is an unsolicited read response, then the read
528 * was generated by the kernel driver as part of peer-2-peer
529 * connection setup. So ignore the completion.
531 if (CQE_WRID_STAG(hw_cqe
) == 1) {
532 if (CQE_STATUS(hw_cqe
))
533 t4_set_wq_in_error(wq
);
539 * Eat completions for unsignaled read WRs.
541 if (!wq
->sq
.oldest_read
->signaled
) {
542 advance_oldest_read(wq
);
548 * Don't write to the HWCQ, so create a new read req CQE
551 create_read_req_cqe(wq
, hw_cqe
, &read_cqe
);
553 advance_oldest_read(wq
);
556 if (CQE_STATUS(hw_cqe
) || t4_wq_in_error(wq
)) {
557 *cqe_flushed
= (CQE_STATUS(hw_cqe
) == T4_ERR_SWFLUSH
);
558 t4_set_wq_in_error(wq
);
564 if (RQ_TYPE(hw_cqe
)) {
567 * HW only validates 4 bits of MSN. So we must validate that
568 * the MSN in the SEND is the next expected MSN. If its not,
569 * then we complete this with T4_ERR_MSN and mark the wq in
573 if (t4_rq_empty(wq
)) {
574 t4_set_wq_in_error(wq
);
578 if (unlikely((CQE_WRID_MSN(hw_cqe
) != (wq
->rq
.msn
)))) {
579 t4_set_wq_in_error(wq
);
580 hw_cqe
->header
|= htonl(CQE_STATUS_V(T4_ERR_MSN
));
587 * If we get here its a send completion.
589 * Handle out of order completion. These get stuffed
590 * in the SW SQ. Then the SW SQ is walked to move any
591 * now in-order completions into the SW CQ. This handles
593 * 1) reaping unsignaled WRs when the first subsequent
594 * signaled WR is completed.
595 * 2) out of order read completions.
597 if (!SW_CQE(hw_cqe
) && (CQE_WRID_SQ_IDX(hw_cqe
) != wq
->sq
.cidx
)) {
598 struct t4_swsqe
*swsqe
;
600 PDBG("%s out of order completion going in sw_sq at idx %u\n",
601 __func__
, CQE_WRID_SQ_IDX(hw_cqe
));
602 swsqe
= &wq
->sq
.sw_sq
[CQE_WRID_SQ_IDX(hw_cqe
)];
603 swsqe
->cqe
= *hw_cqe
;
613 * Reap the associated WR(s) that are freed up with this
616 if (SQ_TYPE(hw_cqe
)) {
617 int idx
= CQE_WRID_SQ_IDX(hw_cqe
);
618 BUG_ON(idx
>= wq
->sq
.size
);
621 * Account for any unsignaled completions completed by
622 * this signaled completion. In this case, cidx points
623 * to the first unsignaled one, and idx points to the
624 * signaled one. So adjust in_use based on this delta.
625 * if this is not completing any unsigned wrs, then the
626 * delta will be 0. Handle wrapping also!
628 if (idx
< wq
->sq
.cidx
)
629 wq
->sq
.in_use
-= wq
->sq
.size
+ idx
- wq
->sq
.cidx
;
631 wq
->sq
.in_use
-= idx
- wq
->sq
.cidx
;
632 BUG_ON(wq
->sq
.in_use
<= 0 && wq
->sq
.in_use
>= wq
->sq
.size
);
634 wq
->sq
.cidx
= (uint16_t)idx
;
635 PDBG("%s completing sq idx %u\n", __func__
, wq
->sq
.cidx
);
636 *cookie
= wq
->sq
.sw_sq
[wq
->sq
.cidx
].wr_id
;
638 c4iw_log_wr_stats(wq
, hw_cqe
);
641 PDBG("%s completing rq idx %u\n", __func__
, wq
->rq
.cidx
);
642 *cookie
= wq
->rq
.sw_rq
[wq
->rq
.cidx
].wr_id
;
643 BUG_ON(t4_rq_empty(wq
));
645 c4iw_log_wr_stats(wq
, hw_cqe
);
652 * Flush any completed cqes that are now in-order.
654 flush_completed_wrs(wq
, cq
);
657 if (SW_CQE(hw_cqe
)) {
658 PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
659 __func__
, cq
, cq
->cqid
, cq
->sw_cidx
);
662 PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
663 __func__
, cq
, cq
->cqid
, cq
->cidx
);
670 * Get one cq entry from c4iw and map it to openib.
675 * -EAGAIN caller must try again
676 * any other -errno fatal error
678 static int c4iw_poll_cq_one(struct c4iw_cq
*chp
, struct ib_wc
*wc
)
680 struct c4iw_qp
*qhp
= NULL
;
681 struct t4_cqe
uninitialized_var(cqe
), *rd_cqe
;
688 ret
= t4_next_cqe(&chp
->cq
, &rd_cqe
);
693 qhp
= get_qhp(chp
->rhp
, CQE_QPID(rd_cqe
));
697 spin_lock(&qhp
->lock
);
700 ret
= poll_cq(wq
, &(chp
->cq
), &cqe
, &cqe_flushed
, &cookie
, &credit
);
706 wc
->vendor_err
= CQE_STATUS(&cqe
);
709 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
710 "lo 0x%x cookie 0x%llx\n", __func__
, CQE_QPID(&cqe
),
711 CQE_TYPE(&cqe
), CQE_OPCODE(&cqe
), CQE_STATUS(&cqe
), CQE_LEN(&cqe
),
712 CQE_WRID_HI(&cqe
), CQE_WRID_LOW(&cqe
), (unsigned long long)cookie
);
714 if (CQE_TYPE(&cqe
) == 0) {
715 if (!CQE_STATUS(&cqe
))
716 wc
->byte_len
= CQE_LEN(&cqe
);
719 wc
->opcode
= IB_WC_RECV
;
720 if (CQE_OPCODE(&cqe
) == FW_RI_SEND_WITH_INV
||
721 CQE_OPCODE(&cqe
) == FW_RI_SEND_WITH_SE_INV
) {
722 wc
->ex
.invalidate_rkey
= CQE_WRID_STAG(&cqe
);
723 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
726 switch (CQE_OPCODE(&cqe
)) {
727 case FW_RI_RDMA_WRITE
:
728 wc
->opcode
= IB_WC_RDMA_WRITE
;
731 wc
->opcode
= IB_WC_RDMA_READ
;
732 wc
->byte_len
= CQE_LEN(&cqe
);
734 case FW_RI_SEND_WITH_INV
:
735 case FW_RI_SEND_WITH_SE_INV
:
736 wc
->opcode
= IB_WC_SEND
;
737 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
740 case FW_RI_SEND_WITH_SE
:
741 wc
->opcode
= IB_WC_SEND
;
744 case FW_RI_LOCAL_INV
:
745 wc
->opcode
= IB_WC_LOCAL_INV
;
747 case FW_RI_FAST_REGISTER
:
748 wc
->opcode
= IB_WC_REG_MR
;
751 printk(KERN_ERR MOD
"Unexpected opcode %d "
752 "in the CQE received for QPID=0x%0x\n",
753 CQE_OPCODE(&cqe
), CQE_QPID(&cqe
));
760 wc
->status
= IB_WC_WR_FLUSH_ERR
;
763 switch (CQE_STATUS(&cqe
)) {
765 wc
->status
= IB_WC_SUCCESS
;
768 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
771 wc
->status
= IB_WC_LOC_PROT_ERR
;
775 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
778 wc
->status
= IB_WC_GENERAL_ERR
;
781 wc
->status
= IB_WC_LOC_LEN_ERR
;
783 case T4_ERR_INVALIDATE_SHARED_MR
:
784 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND
:
785 wc
->status
= IB_WC_MW_BIND_ERR
;
789 case T4_ERR_PDU_LEN_ERR
:
790 case T4_ERR_OUT_OF_RQE
:
791 case T4_ERR_DDP_VERSION
:
792 case T4_ERR_RDMA_VERSION
:
793 case T4_ERR_DDP_QUEUE_NUM
:
797 case T4_ERR_MSN_RANGE
:
798 case T4_ERR_IRD_OVERFLOW
:
800 case T4_ERR_INTERNAL_ERR
:
801 wc
->status
= IB_WC_FATAL_ERR
;
804 wc
->status
= IB_WC_WR_FLUSH_ERR
;
808 "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
809 CQE_STATUS(&cqe
), CQE_QPID(&cqe
));
810 wc
->status
= IB_WC_FATAL_ERR
;
815 if (unlikely(qhp
->attr
.state
!= C4IW_QP_STATE_RTS
)) {
817 complete(&qhp
->sq_drained
);
819 complete(&qhp
->rq_drained
);
821 spin_unlock(&qhp
->lock
);
826 int c4iw_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
)
833 chp
= to_c4iw_cq(ibcq
);
835 spin_lock_irqsave(&chp
->lock
, flags
);
836 for (npolled
= 0; npolled
< num_entries
; ++npolled
) {
838 err
= c4iw_poll_cq_one(chp
, wc
+ npolled
);
839 } while (err
== -EAGAIN
);
843 spin_unlock_irqrestore(&chp
->lock
, flags
);
844 return !err
|| err
== -ENODATA
? npolled
: err
;
847 int c4iw_destroy_cq(struct ib_cq
*ib_cq
)
850 struct c4iw_ucontext
*ucontext
;
852 PDBG("%s ib_cq %p\n", __func__
, ib_cq
);
853 chp
= to_c4iw_cq(ib_cq
);
855 remove_handle(chp
->rhp
, &chp
->rhp
->cqidr
, chp
->cq
.cqid
);
856 atomic_dec(&chp
->refcnt
);
857 wait_event(chp
->wait
, !atomic_read(&chp
->refcnt
));
859 ucontext
= ib_cq
->uobject
? to_c4iw_ucontext(ib_cq
->uobject
->context
)
861 destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
,
862 ucontext
? &ucontext
->uctx
: &chp
->cq
.rdev
->uctx
,
864 chp
->destroy_skb
= NULL
;
869 struct ib_cq
*c4iw_create_cq(struct ib_device
*ibdev
,
870 const struct ib_cq_init_attr
*attr
,
871 struct ib_ucontext
*ib_context
,
872 struct ib_udata
*udata
)
874 int entries
= attr
->cqe
;
875 int vector
= attr
->comp_vector
;
876 struct c4iw_dev
*rhp
;
878 struct c4iw_create_cq_resp uresp
;
879 struct c4iw_ucontext
*ucontext
= NULL
;
881 size_t memsize
, hwentries
;
882 struct c4iw_mm_entry
*mm
, *mm2
;
884 PDBG("%s ib_dev %p entries %d\n", __func__
, ibdev
, entries
);
886 return ERR_PTR(-EINVAL
);
888 rhp
= to_c4iw_dev(ibdev
);
890 if (vector
>= rhp
->rdev
.lldi
.nciq
)
891 return ERR_PTR(-EINVAL
);
893 chp
= kzalloc(sizeof(*chp
), GFP_KERNEL
);
895 return ERR_PTR(-ENOMEM
);
897 wr_len
= sizeof(struct fw_ri_res_wr
) + sizeof(struct fw_ri_res
);
898 chp
->destroy_skb
= alloc_skb(wr_len
, GFP_KERNEL
);
899 if (!chp
->destroy_skb
) {
905 ucontext
= to_c4iw_ucontext(ib_context
);
907 /* account for the status page. */
910 /* IQ needs one extra entry to differentiate full vs empty. */
914 * entries must be multiple of 16 for HW.
916 entries
= roundup(entries
, 16);
919 * Make actual HW queue 2x to avoid cdix_inc overflows.
921 hwentries
= min(entries
* 2, rhp
->rdev
.hw_queue
.t4_max_iq_size
);
924 * Make HW queue at least 64 entries so GTS updates aren't too
930 memsize
= hwentries
* sizeof *chp
->cq
.queue
;
933 * memsize must be a multiple of the page size if its a user cq.
936 memsize
= roundup(memsize
, PAGE_SIZE
);
937 chp
->cq
.size
= hwentries
;
938 chp
->cq
.memsize
= memsize
;
939 chp
->cq
.vector
= vector
;
941 ret
= create_cq(&rhp
->rdev
, &chp
->cq
,
942 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
947 chp
->cq
.size
--; /* status page */
948 chp
->ibcq
.cqe
= entries
- 2;
949 spin_lock_init(&chp
->lock
);
950 spin_lock_init(&chp
->comp_handler_lock
);
951 atomic_set(&chp
->refcnt
, 1);
952 init_waitqueue_head(&chp
->wait
);
953 ret
= insert_handle(rhp
, &rhp
->cqidr
, chp
, chp
->cq
.cqid
);
958 mm
= kmalloc(sizeof *mm
, GFP_KERNEL
);
961 mm2
= kmalloc(sizeof *mm2
, GFP_KERNEL
);
965 uresp
.qid_mask
= rhp
->rdev
.cqmask
;
966 uresp
.cqid
= chp
->cq
.cqid
;
967 uresp
.size
= chp
->cq
.size
;
968 uresp
.memsize
= chp
->cq
.memsize
;
969 spin_lock(&ucontext
->mmap_lock
);
970 uresp
.key
= ucontext
->key
;
971 ucontext
->key
+= PAGE_SIZE
;
972 uresp
.gts_key
= ucontext
->key
;
973 ucontext
->key
+= PAGE_SIZE
;
974 spin_unlock(&ucontext
->mmap_lock
);
975 ret
= ib_copy_to_udata(udata
, &uresp
,
976 sizeof(uresp
) - sizeof(uresp
.reserved
));
981 mm
->addr
= virt_to_phys(chp
->cq
.queue
);
982 mm
->len
= chp
->cq
.memsize
;
983 insert_mmap(ucontext
, mm
);
985 mm2
->key
= uresp
.gts_key
;
986 mm2
->addr
= chp
->cq
.bar2_pa
;
987 mm2
->len
= PAGE_SIZE
;
988 insert_mmap(ucontext
, mm2
);
990 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
991 __func__
, chp
->cq
.cqid
, chp
, chp
->cq
.size
,
992 chp
->cq
.memsize
, (unsigned long long) chp
->cq
.dma_addr
);
999 remove_handle(rhp
, &rhp
->cqidr
, chp
->cq
.cqid
);
1001 destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
,
1002 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
,
1005 kfree_skb(chp
->destroy_skb
);
1008 return ERR_PTR(ret
);
1011 int c4iw_resize_cq(struct ib_cq
*cq
, int cqe
, struct ib_udata
*udata
)
1016 int c4iw_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
1018 struct c4iw_cq
*chp
;
1022 chp
= to_c4iw_cq(ibcq
);
1023 spin_lock_irqsave(&chp
->lock
, flag
);
1024 ret
= t4_arm_cq(&chp
->cq
,
1025 (flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
);
1026 spin_unlock_irqrestore(&chp
->lock
, flag
);
1027 if (ret
&& !(flags
& IB_CQ_REPORT_MISSED_EVENTS
))