2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 static int destroy_cq(struct c4iw_rdev
*rdev
, struct t4_cq
*cq
,
36 struct c4iw_dev_ucontext
*uctx
)
38 struct fw_ri_res_wr
*res_wr
;
39 struct fw_ri_res
*res
;
41 struct c4iw_wr_wait wr_wait
;
45 wr_len
= sizeof *res_wr
+ sizeof *res
;
46 skb
= alloc_skb(wr_len
, GFP_KERNEL
);
49 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, 0);
51 res_wr
= (struct fw_ri_res_wr
*)__skb_put(skb
, wr_len
);
52 memset(res_wr
, 0, wr_len
);
53 res_wr
->op_nres
= cpu_to_be32(
54 FW_WR_OP(FW_RI_RES_WR
) |
55 V_FW_RI_RES_WR_NRES(1) |
57 res_wr
->len16_pkd
= cpu_to_be32(DIV_ROUND_UP(wr_len
, 16));
58 res_wr
->cookie
= (unsigned long) &wr_wait
;
60 res
->u
.cq
.restype
= FW_RI_RES_TYPE_CQ
;
61 res
->u
.cq
.op
= FW_RI_RES_OP_RESET
;
62 res
->u
.cq
.iqid
= cpu_to_be32(cq
->cqid
);
64 c4iw_init_wr_wait(&wr_wait
);
65 ret
= c4iw_ofld_send(rdev
, skb
);
67 ret
= c4iw_wait_for_reply(rdev
, &wr_wait
, 0, 0, __func__
);
71 dma_free_coherent(&(rdev
->lldi
.pdev
->dev
),
72 cq
->memsize
, cq
->queue
,
73 dma_unmap_addr(cq
, mapping
));
74 c4iw_put_cqid(rdev
, cq
->cqid
, uctx
);
78 static int create_cq(struct c4iw_rdev
*rdev
, struct t4_cq
*cq
,
79 struct c4iw_dev_ucontext
*uctx
)
81 struct fw_ri_res_wr
*res_wr
;
82 struct fw_ri_res
*res
;
84 int user
= (uctx
!= &rdev
->uctx
);
85 struct c4iw_wr_wait wr_wait
;
89 cq
->cqid
= c4iw_get_cqid(rdev
, uctx
);
96 cq
->sw_queue
= kzalloc(cq
->memsize
, GFP_KERNEL
);
102 cq
->queue
= dma_alloc_coherent(&rdev
->lldi
.pdev
->dev
, cq
->memsize
,
103 &cq
->dma_addr
, GFP_KERNEL
);
108 dma_unmap_addr_set(cq
, mapping
, cq
->dma_addr
);
109 memset(cq
->queue
, 0, cq
->memsize
);
111 /* build fw_ri_res_wr */
112 wr_len
= sizeof *res_wr
+ sizeof *res
;
114 skb
= alloc_skb(wr_len
, GFP_KERNEL
);
119 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, 0);
121 res_wr
= (struct fw_ri_res_wr
*)__skb_put(skb
, wr_len
);
122 memset(res_wr
, 0, wr_len
);
123 res_wr
->op_nres
= cpu_to_be32(
124 FW_WR_OP(FW_RI_RES_WR
) |
125 V_FW_RI_RES_WR_NRES(1) |
127 res_wr
->len16_pkd
= cpu_to_be32(DIV_ROUND_UP(wr_len
, 16));
128 res_wr
->cookie
= (unsigned long) &wr_wait
;
130 res
->u
.cq
.restype
= FW_RI_RES_TYPE_CQ
;
131 res
->u
.cq
.op
= FW_RI_RES_OP_WRITE
;
132 res
->u
.cq
.iqid
= cpu_to_be32(cq
->cqid
);
133 res
->u
.cq
.iqandst_to_iqandstindex
= cpu_to_be32(
134 V_FW_RI_RES_WR_IQANUS(0) |
135 V_FW_RI_RES_WR_IQANUD(1) |
136 F_FW_RI_RES_WR_IQANDST
|
137 V_FW_RI_RES_WR_IQANDSTINDEX(*rdev
->lldi
.rxq_ids
));
138 res
->u
.cq
.iqdroprss_to_iqesize
= cpu_to_be16(
139 F_FW_RI_RES_WR_IQDROPRSS
|
140 V_FW_RI_RES_WR_IQPCIECH(2) |
141 V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
143 V_FW_RI_RES_WR_IQESIZE(1));
144 res
->u
.cq
.iqsize
= cpu_to_be16(cq
->size
);
145 res
->u
.cq
.iqaddr
= cpu_to_be64(cq
->dma_addr
);
147 c4iw_init_wr_wait(&wr_wait
);
149 ret
= c4iw_ofld_send(rdev
, skb
);
152 PDBG("%s wait_event wr_wait %p\n", __func__
, &wr_wait
);
153 ret
= c4iw_wait_for_reply(rdev
, &wr_wait
, 0, 0, __func__
);
158 cq
->gts
= rdev
->lldi
.gts_reg
;
161 cq
->ugts
= (u64
)pci_resource_start(rdev
->lldi
.pdev
, 2) +
162 (cq
->cqid
<< rdev
->cqshift
);
163 cq
->ugts
&= PAGE_MASK
;
167 dma_free_coherent(&rdev
->lldi
.pdev
->dev
, cq
->memsize
, cq
->queue
,
168 dma_unmap_addr(cq
, mapping
));
172 c4iw_put_cqid(rdev
, cq
->cqid
, uctx
);
177 static void insert_recv_cqe(struct t4_wq
*wq
, struct t4_cq
*cq
)
181 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__
,
182 wq
, cq
, cq
->sw_cidx
, cq
->sw_pidx
);
183 memset(&cqe
, 0, sizeof(cqe
));
184 cqe
.header
= cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH
) |
185 V_CQE_OPCODE(FW_RI_SEND
) |
188 V_CQE_QPID(wq
->sq
.qid
));
189 cqe
.bits_type_ts
= cpu_to_be64(V_CQE_GENBIT((u64
)cq
->gen
));
190 cq
->sw_queue
[cq
->sw_pidx
] = cqe
;
194 int c4iw_flush_rq(struct t4_wq
*wq
, struct t4_cq
*cq
, int count
)
197 int in_use
= wq
->rq
.in_use
- count
;
200 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__
,
201 wq
, cq
, wq
->rq
.in_use
, count
);
203 insert_recv_cqe(wq
, cq
);
209 static void insert_sq_cqe(struct t4_wq
*wq
, struct t4_cq
*cq
,
210 struct t4_swsqe
*swcqe
)
214 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__
,
215 wq
, cq
, cq
->sw_cidx
, cq
->sw_pidx
);
216 memset(&cqe
, 0, sizeof(cqe
));
217 cqe
.header
= cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH
) |
218 V_CQE_OPCODE(swcqe
->opcode
) |
221 V_CQE_QPID(wq
->sq
.qid
));
222 CQE_WRID_SQ_IDX(&cqe
) = swcqe
->idx
;
223 cqe
.bits_type_ts
= cpu_to_be64(V_CQE_GENBIT((u64
)cq
->gen
));
224 cq
->sw_queue
[cq
->sw_pidx
] = cqe
;
228 int c4iw_flush_sq(struct t4_wq
*wq
, struct t4_cq
*cq
, int count
)
231 struct t4_swsqe
*swsqe
= &wq
->sq
.sw_sq
[wq
->sq
.cidx
+ count
];
232 int in_use
= wq
->sq
.in_use
- count
;
237 insert_sq_cqe(wq
, cq
, swsqe
);
239 if (swsqe
== (wq
->sq
.sw_sq
+ wq
->sq
.size
))
240 swsqe
= wq
->sq
.sw_sq
;
247 * Move all CQEs from the HWCQ into the SWCQ.
249 void c4iw_flush_hw_cq(struct t4_cq
*cq
)
251 struct t4_cqe
*cqe
= NULL
, *swcqe
;
254 PDBG("%s cq %p cqid 0x%x\n", __func__
, cq
, cq
->cqid
);
255 ret
= t4_next_hw_cqe(cq
, &cqe
);
257 PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",
258 __func__
, cq
->cidx
, cq
->sw_pidx
);
259 swcqe
= &cq
->sw_queue
[cq
->sw_pidx
];
261 swcqe
->header
|= cpu_to_be32(V_CQE_SWCQE(1));
264 ret
= t4_next_hw_cqe(cq
, &cqe
);
268 static int cqe_completes_wr(struct t4_cqe
*cqe
, struct t4_wq
*wq
)
270 if (CQE_OPCODE(cqe
) == FW_RI_TERMINATE
)
273 if ((CQE_OPCODE(cqe
) == FW_RI_RDMA_WRITE
) && RQ_TYPE(cqe
))
276 if ((CQE_OPCODE(cqe
) == FW_RI_READ_RESP
) && SQ_TYPE(cqe
))
279 if (CQE_SEND_OPCODE(cqe
) && RQ_TYPE(cqe
) && t4_rq_empty(wq
))
284 void c4iw_count_scqes(struct t4_cq
*cq
, struct t4_wq
*wq
, int *count
)
291 while (ptr
!= cq
->sw_pidx
) {
292 cqe
= &cq
->sw_queue
[ptr
];
293 if ((SQ_TYPE(cqe
) || ((CQE_OPCODE(cqe
) == FW_RI_READ_RESP
) &&
294 wq
->sq
.oldest_read
)) &&
295 (CQE_QPID(cqe
) == wq
->sq
.qid
))
297 if (++ptr
== cq
->size
)
300 PDBG("%s cq %p count %d\n", __func__
, cq
, *count
);
303 void c4iw_count_rcqes(struct t4_cq
*cq
, struct t4_wq
*wq
, int *count
)
309 PDBG("%s count zero %d\n", __func__
, *count
);
311 while (ptr
!= cq
->sw_pidx
) {
312 cqe
= &cq
->sw_queue
[ptr
];
313 if (RQ_TYPE(cqe
) && (CQE_OPCODE(cqe
) != FW_RI_READ_RESP
) &&
314 (CQE_QPID(cqe
) == wq
->sq
.qid
) && cqe_completes_wr(cqe
, wq
))
316 if (++ptr
== cq
->size
)
319 PDBG("%s cq %p count %d\n", __func__
, cq
, *count
);
322 static void flush_completed_wrs(struct t4_wq
*wq
, struct t4_cq
*cq
)
324 struct t4_swsqe
*swsqe
;
325 u16 ptr
= wq
->sq
.cidx
;
326 int count
= wq
->sq
.in_use
;
329 swsqe
= &wq
->sq
.sw_sq
[ptr
];
331 if (!swsqe
->signaled
) {
332 if (++ptr
== wq
->sq
.size
)
334 swsqe
= &wq
->sq
.sw_sq
[ptr
];
336 } else if (swsqe
->complete
) {
339 * Insert this completed cqe into the swcq.
341 PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
342 __func__
, ptr
, cq
->sw_pidx
);
343 swsqe
->cqe
.header
|= htonl(V_CQE_SWCQE(1));
344 cq
->sw_queue
[cq
->sw_pidx
] = swsqe
->cqe
;
347 wq
->sq
.in_use
-= unsignaled
;
353 static void create_read_req_cqe(struct t4_wq
*wq
, struct t4_cqe
*hw_cqe
,
354 struct t4_cqe
*read_cqe
)
356 read_cqe
->u
.scqe
.cidx
= wq
->sq
.oldest_read
->idx
;
357 read_cqe
->len
= cpu_to_be32(wq
->sq
.oldest_read
->read_len
);
358 read_cqe
->header
= htonl(V_CQE_QPID(CQE_QPID(hw_cqe
)) |
359 V_CQE_SWCQE(SW_CQE(hw_cqe
)) |
360 V_CQE_OPCODE(FW_RI_READ_REQ
) |
362 read_cqe
->bits_type_ts
= hw_cqe
->bits_type_ts
;
366 * Return a ptr to the next read wr in the SWSQ or NULL.
368 static void advance_oldest_read(struct t4_wq
*wq
)
371 u32 rptr
= wq
->sq
.oldest_read
- wq
->sq
.sw_sq
+ 1;
373 if (rptr
== wq
->sq
.size
)
375 while (rptr
!= wq
->sq
.pidx
) {
376 wq
->sq
.oldest_read
= &wq
->sq
.sw_sq
[rptr
];
378 if (wq
->sq
.oldest_read
->opcode
== FW_RI_READ_REQ
)
380 if (++rptr
== wq
->sq
.size
)
383 wq
->sq
.oldest_read
= NULL
;
390 * check the validity of the first CQE,
391 * supply the wq assicated with the qpid.
393 * credit: cq credit to return to sge.
394 * cqe_flushed: 1 iff the CQE is flushed.
395 * cqe: copy of the polled CQE.
399 * -EAGAIN CQE skipped, try again.
400 * -EOVERFLOW CQ overflow detected.
402 static int poll_cq(struct t4_wq
*wq
, struct t4_cq
*cq
, struct t4_cqe
*cqe
,
403 u8
*cqe_flushed
, u64
*cookie
, u32
*credit
)
406 struct t4_cqe
*hw_cqe
, read_cqe
;
410 ret
= t4_next_cqe(cq
, &hw_cqe
);
414 PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
415 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
416 __func__
, CQE_OVFBIT(hw_cqe
), CQE_QPID(hw_cqe
),
417 CQE_GENBIT(hw_cqe
), CQE_TYPE(hw_cqe
), CQE_STATUS(hw_cqe
),
418 CQE_OPCODE(hw_cqe
), CQE_LEN(hw_cqe
), CQE_WRID_HI(hw_cqe
),
419 CQE_WRID_LOW(hw_cqe
));
422 * skip cqe's not affiliated with a QP.
430 * Gotta tweak READ completions:
431 * 1) the cqe doesn't contain the sq_wptr from the wr.
432 * 2) opcode not reflected from the wr.
433 * 3) read_len not reflected from the wr.
434 * 4) cq_type is RQ_TYPE not SQ_TYPE.
436 if (RQ_TYPE(hw_cqe
) && (CQE_OPCODE(hw_cqe
) == FW_RI_READ_RESP
)) {
439 * If this is an unsolicited read response, then the read
440 * was generated by the kernel driver as part of peer-2-peer
441 * connection setup. So ignore the completion.
443 if (!wq
->sq
.oldest_read
) {
444 if (CQE_STATUS(hw_cqe
))
445 t4_set_wq_in_error(wq
);
451 * Don't write to the HWCQ, so create a new read req CQE
454 create_read_req_cqe(wq
, hw_cqe
, &read_cqe
);
456 advance_oldest_read(wq
);
459 if (CQE_STATUS(hw_cqe
) || t4_wq_in_error(wq
)) {
460 *cqe_flushed
= t4_wq_in_error(wq
);
461 t4_set_wq_in_error(wq
);
465 if (CQE_OPCODE(hw_cqe
) == FW_RI_TERMINATE
) {
473 if (RQ_TYPE(hw_cqe
)) {
476 * HW only validates 4 bits of MSN. So we must validate that
477 * the MSN in the SEND is the next expected MSN. If its not,
478 * then we complete this with T4_ERR_MSN and mark the wq in
482 if (t4_rq_empty(wq
)) {
483 t4_set_wq_in_error(wq
);
487 if (unlikely((CQE_WRID_MSN(hw_cqe
) != (wq
->rq
.msn
)))) {
488 t4_set_wq_in_error(wq
);
489 hw_cqe
->header
|= htonl(V_CQE_STATUS(T4_ERR_MSN
));
496 * If we get here its a send completion.
498 * Handle out of order completion. These get stuffed
499 * in the SW SQ. Then the SW SQ is walked to move any
500 * now in-order completions into the SW CQ. This handles
502 * 1) reaping unsignaled WRs when the first subsequent
503 * signaled WR is completed.
504 * 2) out of order read completions.
506 if (!SW_CQE(hw_cqe
) && (CQE_WRID_SQ_IDX(hw_cqe
) != wq
->sq
.cidx
)) {
507 struct t4_swsqe
*swsqe
;
509 PDBG("%s out of order completion going in sw_sq at idx %u\n",
510 __func__
, CQE_WRID_SQ_IDX(hw_cqe
));
511 swsqe
= &wq
->sq
.sw_sq
[CQE_WRID_SQ_IDX(hw_cqe
)];
512 swsqe
->cqe
= *hw_cqe
;
522 * Reap the associated WR(s) that are freed up with this
525 if (SQ_TYPE(hw_cqe
)) {
526 wq
->sq
.cidx
= CQE_WRID_SQ_IDX(hw_cqe
);
527 PDBG("%s completing sq idx %u\n", __func__
, wq
->sq
.cidx
);
528 *cookie
= wq
->sq
.sw_sq
[wq
->sq
.cidx
].wr_id
;
531 PDBG("%s completing rq idx %u\n", __func__
, wq
->rq
.cidx
);
532 *cookie
= wq
->rq
.sw_rq
[wq
->rq
.cidx
].wr_id
;
533 BUG_ON(t4_rq_empty(wq
));
539 * Flush any completed cqes that are now in-order.
541 flush_completed_wrs(wq
, cq
);
544 if (SW_CQE(hw_cqe
)) {
545 PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
546 __func__
, cq
, cq
->cqid
, cq
->sw_cidx
);
549 PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
550 __func__
, cq
, cq
->cqid
, cq
->cidx
);
557 * Get one cq entry from c4iw and map it to openib.
562 * -EAGAIN caller must try again
563 * any other -errno fatal error
565 static int c4iw_poll_cq_one(struct c4iw_cq
*chp
, struct ib_wc
*wc
)
567 struct c4iw_qp
*qhp
= NULL
;
568 struct t4_cqe cqe
= {0, 0}, *rd_cqe
;
575 ret
= t4_next_cqe(&chp
->cq
, &rd_cqe
);
580 qhp
= get_qhp(chp
->rhp
, CQE_QPID(rd_cqe
));
584 spin_lock(&qhp
->lock
);
587 ret
= poll_cq(wq
, &(chp
->cq
), &cqe
, &cqe_flushed
, &cookie
, &credit
);
593 wc
->vendor_err
= CQE_STATUS(&cqe
);
596 PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
597 "lo 0x%x cookie 0x%llx\n", __func__
, CQE_QPID(&cqe
),
598 CQE_TYPE(&cqe
), CQE_OPCODE(&cqe
), CQE_STATUS(&cqe
), CQE_LEN(&cqe
),
599 CQE_WRID_HI(&cqe
), CQE_WRID_LOW(&cqe
), (unsigned long long)cookie
);
601 if (CQE_TYPE(&cqe
) == 0) {
602 if (!CQE_STATUS(&cqe
))
603 wc
->byte_len
= CQE_LEN(&cqe
);
606 wc
->opcode
= IB_WC_RECV
;
607 if (CQE_OPCODE(&cqe
) == FW_RI_SEND_WITH_INV
||
608 CQE_OPCODE(&cqe
) == FW_RI_SEND_WITH_SE_INV
) {
609 wc
->ex
.invalidate_rkey
= CQE_WRID_STAG(&cqe
);
610 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
613 switch (CQE_OPCODE(&cqe
)) {
614 case FW_RI_RDMA_WRITE
:
615 wc
->opcode
= IB_WC_RDMA_WRITE
;
618 wc
->opcode
= IB_WC_RDMA_READ
;
619 wc
->byte_len
= CQE_LEN(&cqe
);
621 case FW_RI_SEND_WITH_INV
:
622 case FW_RI_SEND_WITH_SE_INV
:
623 wc
->opcode
= IB_WC_SEND
;
624 wc
->wc_flags
|= IB_WC_WITH_INVALIDATE
;
627 case FW_RI_SEND_WITH_SE
:
628 wc
->opcode
= IB_WC_SEND
;
631 wc
->opcode
= IB_WC_BIND_MW
;
634 case FW_RI_LOCAL_INV
:
635 wc
->opcode
= IB_WC_LOCAL_INV
;
637 case FW_RI_FAST_REGISTER
:
638 wc
->opcode
= IB_WC_FAST_REG_MR
;
641 printk(KERN_ERR MOD
"Unexpected opcode %d "
642 "in the CQE received for QPID=0x%0x\n",
643 CQE_OPCODE(&cqe
), CQE_QPID(&cqe
));
650 wc
->status
= IB_WC_WR_FLUSH_ERR
;
653 switch (CQE_STATUS(&cqe
)) {
655 wc
->status
= IB_WC_SUCCESS
;
658 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
661 wc
->status
= IB_WC_LOC_PROT_ERR
;
665 wc
->status
= IB_WC_LOC_ACCESS_ERR
;
668 wc
->status
= IB_WC_GENERAL_ERR
;
671 wc
->status
= IB_WC_LOC_LEN_ERR
;
673 case T4_ERR_INVALIDATE_SHARED_MR
:
674 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND
:
675 wc
->status
= IB_WC_MW_BIND_ERR
;
679 case T4_ERR_PDU_LEN_ERR
:
680 case T4_ERR_OUT_OF_RQE
:
681 case T4_ERR_DDP_VERSION
:
682 case T4_ERR_RDMA_VERSION
:
683 case T4_ERR_DDP_QUEUE_NUM
:
687 case T4_ERR_MSN_RANGE
:
688 case T4_ERR_IRD_OVERFLOW
:
690 case T4_ERR_INTERNAL_ERR
:
691 wc
->status
= IB_WC_FATAL_ERR
;
694 wc
->status
= IB_WC_WR_FLUSH_ERR
;
698 "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
699 CQE_STATUS(&cqe
), CQE_QPID(&cqe
));
705 spin_unlock(&qhp
->lock
);
709 int c4iw_poll_cq(struct ib_cq
*ibcq
, int num_entries
, struct ib_wc
*wc
)
716 chp
= to_c4iw_cq(ibcq
);
718 spin_lock_irqsave(&chp
->lock
, flags
);
719 for (npolled
= 0; npolled
< num_entries
; ++npolled
) {
721 err
= c4iw_poll_cq_one(chp
, wc
+ npolled
);
722 } while (err
== -EAGAIN
);
726 spin_unlock_irqrestore(&chp
->lock
, flags
);
727 return !err
|| err
== -ENODATA
? npolled
: err
;
730 int c4iw_destroy_cq(struct ib_cq
*ib_cq
)
733 struct c4iw_ucontext
*ucontext
;
735 PDBG("%s ib_cq %p\n", __func__
, ib_cq
);
736 chp
= to_c4iw_cq(ib_cq
);
738 remove_handle(chp
->rhp
, &chp
->rhp
->cqidr
, chp
->cq
.cqid
);
739 atomic_dec(&chp
->refcnt
);
740 wait_event(chp
->wait
, !atomic_read(&chp
->refcnt
));
742 ucontext
= ib_cq
->uobject
? to_c4iw_ucontext(ib_cq
->uobject
->context
)
744 destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
,
745 ucontext
? &ucontext
->uctx
: &chp
->cq
.rdev
->uctx
);
750 struct ib_cq
*c4iw_create_cq(struct ib_device
*ibdev
, int entries
,
751 int vector
, struct ib_ucontext
*ib_context
,
752 struct ib_udata
*udata
)
754 struct c4iw_dev
*rhp
;
756 struct c4iw_create_cq_resp uresp
;
757 struct c4iw_ucontext
*ucontext
= NULL
;
759 size_t memsize
, hwentries
;
760 struct c4iw_mm_entry
*mm
, *mm2
;
762 PDBG("%s ib_dev %p entries %d\n", __func__
, ibdev
, entries
);
764 rhp
= to_c4iw_dev(ibdev
);
766 chp
= kzalloc(sizeof(*chp
), GFP_KERNEL
);
768 return ERR_PTR(-ENOMEM
);
771 ucontext
= to_c4iw_ucontext(ib_context
);
773 /* account for the status page. */
776 /* IQ needs one extra entry to differentiate full vs empty. */
780 * entries must be multiple of 16 for HW.
782 entries
= roundup(entries
, 16);
785 * Make actual HW queue 2x to avoid cdix_inc overflows.
787 hwentries
= entries
* 2;
790 * Make HW queue at least 64 entries so GTS updates aren't too
796 memsize
= hwentries
* sizeof *chp
->cq
.queue
;
799 * memsize must be a multiple of the page size if its a user cq.
802 memsize
= roundup(memsize
, PAGE_SIZE
);
803 hwentries
= memsize
/ sizeof *chp
->cq
.queue
;
804 while (hwentries
> T4_MAX_IQ_SIZE
) {
805 memsize
-= PAGE_SIZE
;
806 hwentries
= memsize
/ sizeof *chp
->cq
.queue
;
809 chp
->cq
.size
= hwentries
;
810 chp
->cq
.memsize
= memsize
;
812 ret
= create_cq(&rhp
->rdev
, &chp
->cq
,
813 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
818 chp
->cq
.size
--; /* status page */
819 chp
->ibcq
.cqe
= entries
- 2;
820 spin_lock_init(&chp
->lock
);
821 spin_lock_init(&chp
->comp_handler_lock
);
822 atomic_set(&chp
->refcnt
, 1);
823 init_waitqueue_head(&chp
->wait
);
824 ret
= insert_handle(rhp
, &rhp
->cqidr
, chp
, chp
->cq
.cqid
);
829 mm
= kmalloc(sizeof *mm
, GFP_KERNEL
);
832 mm2
= kmalloc(sizeof *mm2
, GFP_KERNEL
);
836 uresp
.qid_mask
= rhp
->rdev
.cqmask
;
837 uresp
.cqid
= chp
->cq
.cqid
;
838 uresp
.size
= chp
->cq
.size
;
839 uresp
.memsize
= chp
->cq
.memsize
;
840 spin_lock(&ucontext
->mmap_lock
);
841 uresp
.key
= ucontext
->key
;
842 ucontext
->key
+= PAGE_SIZE
;
843 uresp
.gts_key
= ucontext
->key
;
844 ucontext
->key
+= PAGE_SIZE
;
845 spin_unlock(&ucontext
->mmap_lock
);
846 ret
= ib_copy_to_udata(udata
, &uresp
, sizeof uresp
);
851 mm
->addr
= virt_to_phys(chp
->cq
.queue
);
852 mm
->len
= chp
->cq
.memsize
;
853 insert_mmap(ucontext
, mm
);
855 mm2
->key
= uresp
.gts_key
;
856 mm2
->addr
= chp
->cq
.ugts
;
857 mm2
->len
= PAGE_SIZE
;
858 insert_mmap(ucontext
, mm2
);
860 PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
861 __func__
, chp
->cq
.cqid
, chp
, chp
->cq
.size
,
863 (unsigned long long) chp
->cq
.dma_addr
);
870 remove_handle(rhp
, &rhp
->cqidr
, chp
->cq
.cqid
);
872 destroy_cq(&chp
->rhp
->rdev
, &chp
->cq
,
873 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
879 int c4iw_resize_cq(struct ib_cq
*cq
, int cqe
, struct ib_udata
*udata
)
884 int c4iw_arm_cq(struct ib_cq
*ibcq
, enum ib_cq_notify_flags flags
)
890 chp
= to_c4iw_cq(ibcq
);
891 spin_lock_irqsave(&chp
->lock
, flag
);
892 ret
= t4_arm_cq(&chp
->cq
,
893 (flags
& IB_CQ_SOLICITED_MASK
) == IB_CQ_SOLICITED
);
894 spin_unlock_irqrestore(&chp
->lock
, flag
);
895 if (ret
&& !(flags
& IB_CQ_REPORT_MISSED_EVENTS
))