2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
37 static int db_delay_usecs
= 1;
38 module_param(db_delay_usecs
, int, 0644);
39 MODULE_PARM_DESC(db_delay_usecs
, "Usecs to delay awaiting db fifo to drain");
41 static int ocqp_support
= 1;
42 module_param(ocqp_support
, int, 0644);
43 MODULE_PARM_DESC(ocqp_support
, "Support on-chip SQs (default=1)");
45 int db_fc_threshold
= 1000;
46 module_param(db_fc_threshold
, int, 0644);
47 MODULE_PARM_DESC(db_fc_threshold
,
48 "QP count/threshold that triggers"
49 " automatic db flow control mode (default = 1000)");
51 int db_coalescing_threshold
;
52 module_param(db_coalescing_threshold
, int, 0644);
53 MODULE_PARM_DESC(db_coalescing_threshold
,
54 "QP count/threshold that triggers"
55 " disabling db coalescing (default = 0)");
57 static int max_fr_immd
= T4_MAX_FR_IMMD
;
58 module_param(max_fr_immd
, int, 0644);
59 MODULE_PARM_DESC(max_fr_immd
, "fastreg threshold for using DSGL instead of immedate");
61 static void set_state(struct c4iw_qp
*qhp
, enum c4iw_qp_state state
)
64 spin_lock_irqsave(&qhp
->lock
, flag
);
65 qhp
->attr
.state
= state
;
66 spin_unlock_irqrestore(&qhp
->lock
, flag
);
69 static void dealloc_oc_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
)
71 c4iw_ocqp_pool_free(rdev
, sq
->dma_addr
, sq
->memsize
);
74 static void dealloc_host_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
)
76 dma_free_coherent(&(rdev
->lldi
.pdev
->dev
), sq
->memsize
, sq
->queue
,
77 pci_unmap_addr(sq
, mapping
));
80 static void dealloc_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
)
83 dealloc_oc_sq(rdev
, sq
);
85 dealloc_host_sq(rdev
, sq
);
88 static int alloc_oc_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
)
90 if (!ocqp_support
|| !ocqp_supported(&rdev
->lldi
))
92 sq
->dma_addr
= c4iw_ocqp_pool_alloc(rdev
, sq
->memsize
);
95 sq
->phys_addr
= rdev
->oc_mw_pa
+ sq
->dma_addr
-
96 rdev
->lldi
.vr
->ocq
.start
;
97 sq
->queue
= (__force
union t4_wr
*)(rdev
->oc_mw_kva
+ sq
->dma_addr
-
98 rdev
->lldi
.vr
->ocq
.start
);
99 sq
->flags
|= T4_SQ_ONCHIP
;
103 static int alloc_host_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
)
105 sq
->queue
= dma_alloc_coherent(&(rdev
->lldi
.pdev
->dev
), sq
->memsize
,
106 &(sq
->dma_addr
), GFP_KERNEL
);
109 sq
->phys_addr
= virt_to_phys(sq
->queue
);
110 pci_unmap_addr_set(sq
, mapping
, sq
->dma_addr
);
114 static int alloc_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
, int user
)
118 ret
= alloc_oc_sq(rdev
, sq
);
120 ret
= alloc_host_sq(rdev
, sq
);
124 static int destroy_qp(struct c4iw_rdev
*rdev
, struct t4_wq
*wq
,
125 struct c4iw_dev_ucontext
*uctx
)
128 * uP clears EQ contexts when the connection exits rdma mode,
129 * so no need to post a RESET WR for these EQs.
131 dma_free_coherent(&(rdev
->lldi
.pdev
->dev
),
132 wq
->rq
.memsize
, wq
->rq
.queue
,
133 dma_unmap_addr(&wq
->rq
, mapping
));
134 dealloc_sq(rdev
, &wq
->sq
);
135 c4iw_rqtpool_free(rdev
, wq
->rq
.rqt_hwaddr
, wq
->rq
.rqt_size
);
138 c4iw_put_qpid(rdev
, wq
->rq
.qid
, uctx
);
139 c4iw_put_qpid(rdev
, wq
->sq
.qid
, uctx
);
143 static int create_qp(struct c4iw_rdev
*rdev
, struct t4_wq
*wq
,
144 struct t4_cq
*rcq
, struct t4_cq
*scq
,
145 struct c4iw_dev_ucontext
*uctx
)
147 int user
= (uctx
!= &rdev
->uctx
);
148 struct fw_ri_res_wr
*res_wr
;
149 struct fw_ri_res
*res
;
151 struct c4iw_wr_wait wr_wait
;
156 wq
->sq
.qid
= c4iw_get_qpid(rdev
, uctx
);
160 wq
->rq
.qid
= c4iw_get_qpid(rdev
, uctx
);
167 wq
->sq
.sw_sq
= kzalloc(wq
->sq
.size
* sizeof *wq
->sq
.sw_sq
,
174 wq
->rq
.sw_rq
= kzalloc(wq
->rq
.size
* sizeof *wq
->rq
.sw_rq
,
183 * RQT must be a power of 2.
185 wq
->rq
.rqt_size
= roundup_pow_of_two(wq
->rq
.size
);
186 wq
->rq
.rqt_hwaddr
= c4iw_rqtpool_alloc(rdev
, wq
->rq
.rqt_size
);
187 if (!wq
->rq
.rqt_hwaddr
) {
192 ret
= alloc_sq(rdev
, &wq
->sq
, user
);
195 memset(wq
->sq
.queue
, 0, wq
->sq
.memsize
);
196 dma_unmap_addr_set(&wq
->sq
, mapping
, wq
->sq
.dma_addr
);
198 wq
->rq
.queue
= dma_alloc_coherent(&(rdev
->lldi
.pdev
->dev
),
199 wq
->rq
.memsize
, &(wq
->rq
.dma_addr
),
205 PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
206 __func__
, wq
->sq
.queue
,
207 (unsigned long long)virt_to_phys(wq
->sq
.queue
),
209 (unsigned long long)virt_to_phys(wq
->rq
.queue
));
210 memset(wq
->rq
.queue
, 0, wq
->rq
.memsize
);
211 dma_unmap_addr_set(&wq
->rq
, mapping
, wq
->rq
.dma_addr
);
213 wq
->db
= rdev
->lldi
.db_reg
;
214 wq
->gts
= rdev
->lldi
.gts_reg
;
216 wq
->sq
.udb
= (u64
)pci_resource_start(rdev
->lldi
.pdev
, 2) +
217 (wq
->sq
.qid
<< rdev
->qpshift
);
218 wq
->sq
.udb
&= PAGE_MASK
;
219 wq
->rq
.udb
= (u64
)pci_resource_start(rdev
->lldi
.pdev
, 2) +
220 (wq
->rq
.qid
<< rdev
->qpshift
);
221 wq
->rq
.udb
&= PAGE_MASK
;
226 /* build fw_ri_res_wr */
227 wr_len
= sizeof *res_wr
+ 2 * sizeof *res
;
229 skb
= alloc_skb(wr_len
, GFP_KERNEL
);
234 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, 0);
236 res_wr
= (struct fw_ri_res_wr
*)__skb_put(skb
, wr_len
);
237 memset(res_wr
, 0, wr_len
);
238 res_wr
->op_nres
= cpu_to_be32(
239 FW_WR_OP(FW_RI_RES_WR
) |
240 V_FW_RI_RES_WR_NRES(2) |
242 res_wr
->len16_pkd
= cpu_to_be32(DIV_ROUND_UP(wr_len
, 16));
243 res_wr
->cookie
= (unsigned long) &wr_wait
;
245 res
->u
.sqrq
.restype
= FW_RI_RES_TYPE_SQ
;
246 res
->u
.sqrq
.op
= FW_RI_RES_OP_WRITE
;
249 * eqsize is the number of 64B entries plus the status page size.
251 eqsize
= wq
->sq
.size
* T4_SQ_NUM_SLOTS
+ T4_EQ_STATUS_ENTRIES
;
253 res
->u
.sqrq
.fetchszm_to_iqid
= cpu_to_be32(
254 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
255 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
256 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
257 (t4_sq_onchip(&wq
->sq
) ? F_FW_RI_RES_WR_ONCHIP
: 0) |
258 V_FW_RI_RES_WR_IQID(scq
->cqid
));
259 res
->u
.sqrq
.dcaen_to_eqsize
= cpu_to_be32(
260 V_FW_RI_RES_WR_DCAEN(0) |
261 V_FW_RI_RES_WR_DCACPU(0) |
262 V_FW_RI_RES_WR_FBMIN(2) |
263 V_FW_RI_RES_WR_FBMAX(2) |
264 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
265 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
266 V_FW_RI_RES_WR_EQSIZE(eqsize
));
267 res
->u
.sqrq
.eqid
= cpu_to_be32(wq
->sq
.qid
);
268 res
->u
.sqrq
.eqaddr
= cpu_to_be64(wq
->sq
.dma_addr
);
270 res
->u
.sqrq
.restype
= FW_RI_RES_TYPE_RQ
;
271 res
->u
.sqrq
.op
= FW_RI_RES_OP_WRITE
;
274 * eqsize is the number of 64B entries plus the status page size.
276 eqsize
= wq
->rq
.size
* T4_RQ_NUM_SLOTS
+ T4_EQ_STATUS_ENTRIES
;
277 res
->u
.sqrq
.fetchszm_to_iqid
= cpu_to_be32(
278 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
279 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
280 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
281 V_FW_RI_RES_WR_IQID(rcq
->cqid
));
282 res
->u
.sqrq
.dcaen_to_eqsize
= cpu_to_be32(
283 V_FW_RI_RES_WR_DCAEN(0) |
284 V_FW_RI_RES_WR_DCACPU(0) |
285 V_FW_RI_RES_WR_FBMIN(2) |
286 V_FW_RI_RES_WR_FBMAX(2) |
287 V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
288 V_FW_RI_RES_WR_CIDXFTHRESH(0) |
289 V_FW_RI_RES_WR_EQSIZE(eqsize
));
290 res
->u
.sqrq
.eqid
= cpu_to_be32(wq
->rq
.qid
);
291 res
->u
.sqrq
.eqaddr
= cpu_to_be64(wq
->rq
.dma_addr
);
293 c4iw_init_wr_wait(&wr_wait
);
295 ret
= c4iw_ofld_send(rdev
, skb
);
298 ret
= c4iw_wait_for_reply(rdev
, &wr_wait
, 0, wq
->sq
.qid
, __func__
);
302 PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
303 __func__
, wq
->sq
.qid
, wq
->rq
.qid
, wq
->db
,
304 (unsigned long long)wq
->sq
.udb
, (unsigned long long)wq
->rq
.udb
);
308 dma_free_coherent(&(rdev
->lldi
.pdev
->dev
),
309 wq
->rq
.memsize
, wq
->rq
.queue
,
310 dma_unmap_addr(&wq
->rq
, mapping
));
312 dealloc_sq(rdev
, &wq
->sq
);
314 c4iw_rqtpool_free(rdev
, wq
->rq
.rqt_hwaddr
, wq
->rq
.rqt_size
);
320 c4iw_put_qpid(rdev
, wq
->rq
.qid
, uctx
);
322 c4iw_put_qpid(rdev
, wq
->sq
.qid
, uctx
);
326 static int build_immd(struct t4_sq
*sq
, struct fw_ri_immd
*immdp
,
327 struct ib_send_wr
*wr
, int max
, u32
*plenp
)
334 dstp
= (u8
*)immdp
->data
;
335 for (i
= 0; i
< wr
->num_sge
; i
++) {
336 if ((plen
+ wr
->sg_list
[i
].length
) > max
)
338 srcp
= (u8
*)(unsigned long)wr
->sg_list
[i
].addr
;
339 plen
+= wr
->sg_list
[i
].length
;
340 rem
= wr
->sg_list
[i
].length
;
342 if (dstp
== (u8
*)&sq
->queue
[sq
->size
])
343 dstp
= (u8
*)sq
->queue
;
344 if (rem
<= (u8
*)&sq
->queue
[sq
->size
] - dstp
)
347 len
= (u8
*)&sq
->queue
[sq
->size
] - dstp
;
348 memcpy(dstp
, srcp
, len
);
354 len
= roundup(plen
+ sizeof *immdp
, 16) - (plen
+ sizeof *immdp
);
356 memset(dstp
, 0, len
);
357 immdp
->op
= FW_RI_DATA_IMMD
;
360 immdp
->immdlen
= cpu_to_be32(plen
);
365 static int build_isgl(__be64
*queue_start
, __be64
*queue_end
,
366 struct fw_ri_isgl
*isglp
, struct ib_sge
*sg_list
,
367 int num_sge
, u32
*plenp
)
372 __be64
*flitp
= (__be64
*)isglp
->sge
;
374 for (i
= 0; i
< num_sge
; i
++) {
375 if ((plen
+ sg_list
[i
].length
) < plen
)
377 plen
+= sg_list
[i
].length
;
378 *flitp
= cpu_to_be64(((u64
)sg_list
[i
].lkey
<< 32) |
380 if (++flitp
== queue_end
)
382 *flitp
= cpu_to_be64(sg_list
[i
].addr
);
383 if (++flitp
== queue_end
)
386 *flitp
= (__force __be64
)0;
387 isglp
->op
= FW_RI_DATA_ISGL
;
389 isglp
->nsge
= cpu_to_be16(num_sge
);
396 static int build_rdma_send(struct t4_sq
*sq
, union t4_wr
*wqe
,
397 struct ib_send_wr
*wr
, u8
*len16
)
403 if (wr
->num_sge
> T4_MAX_SEND_SGE
)
405 switch (wr
->opcode
) {
407 if (wr
->send_flags
& IB_SEND_SOLICITED
)
408 wqe
->send
.sendop_pkd
= cpu_to_be32(
409 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE
));
411 wqe
->send
.sendop_pkd
= cpu_to_be32(
412 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND
));
413 wqe
->send
.stag_inv
= 0;
415 case IB_WR_SEND_WITH_INV
:
416 if (wr
->send_flags
& IB_SEND_SOLICITED
)
417 wqe
->send
.sendop_pkd
= cpu_to_be32(
418 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_SE_INV
));
420 wqe
->send
.sendop_pkd
= cpu_to_be32(
421 V_FW_RI_SEND_WR_SENDOP(FW_RI_SEND_WITH_INV
));
422 wqe
->send
.stag_inv
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
431 if (wr
->send_flags
& IB_SEND_INLINE
) {
432 ret
= build_immd(sq
, wqe
->send
.u
.immd_src
, wr
,
433 T4_MAX_SEND_INLINE
, &plen
);
436 size
= sizeof wqe
->send
+ sizeof(struct fw_ri_immd
) +
439 ret
= build_isgl((__be64
*)sq
->queue
,
440 (__be64
*)&sq
->queue
[sq
->size
],
441 wqe
->send
.u
.isgl_src
,
442 wr
->sg_list
, wr
->num_sge
, &plen
);
445 size
= sizeof wqe
->send
+ sizeof(struct fw_ri_isgl
) +
446 wr
->num_sge
* sizeof(struct fw_ri_sge
);
449 wqe
->send
.u
.immd_src
[0].op
= FW_RI_DATA_IMMD
;
450 wqe
->send
.u
.immd_src
[0].r1
= 0;
451 wqe
->send
.u
.immd_src
[0].r2
= 0;
452 wqe
->send
.u
.immd_src
[0].immdlen
= 0;
453 size
= sizeof wqe
->send
+ sizeof(struct fw_ri_immd
);
456 *len16
= DIV_ROUND_UP(size
, 16);
457 wqe
->send
.plen
= cpu_to_be32(plen
);
461 static int build_rdma_write(struct t4_sq
*sq
, union t4_wr
*wqe
,
462 struct ib_send_wr
*wr
, u8
*len16
)
468 if (wr
->num_sge
> T4_MAX_SEND_SGE
)
471 wqe
->write
.stag_sink
= cpu_to_be32(wr
->wr
.rdma
.rkey
);
472 wqe
->write
.to_sink
= cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
474 if (wr
->send_flags
& IB_SEND_INLINE
) {
475 ret
= build_immd(sq
, wqe
->write
.u
.immd_src
, wr
,
476 T4_MAX_WRITE_INLINE
, &plen
);
479 size
= sizeof wqe
->write
+ sizeof(struct fw_ri_immd
) +
482 ret
= build_isgl((__be64
*)sq
->queue
,
483 (__be64
*)&sq
->queue
[sq
->size
],
484 wqe
->write
.u
.isgl_src
,
485 wr
->sg_list
, wr
->num_sge
, &plen
);
488 size
= sizeof wqe
->write
+ sizeof(struct fw_ri_isgl
) +
489 wr
->num_sge
* sizeof(struct fw_ri_sge
);
492 wqe
->write
.u
.immd_src
[0].op
= FW_RI_DATA_IMMD
;
493 wqe
->write
.u
.immd_src
[0].r1
= 0;
494 wqe
->write
.u
.immd_src
[0].r2
= 0;
495 wqe
->write
.u
.immd_src
[0].immdlen
= 0;
496 size
= sizeof wqe
->write
+ sizeof(struct fw_ri_immd
);
499 *len16
= DIV_ROUND_UP(size
, 16);
500 wqe
->write
.plen
= cpu_to_be32(plen
);
504 static int build_rdma_read(union t4_wr
*wqe
, struct ib_send_wr
*wr
, u8
*len16
)
509 wqe
->read
.stag_src
= cpu_to_be32(wr
->wr
.rdma
.rkey
);
510 wqe
->read
.to_src_hi
= cpu_to_be32((u32
)(wr
->wr
.rdma
.remote_addr
512 wqe
->read
.to_src_lo
= cpu_to_be32((u32
)wr
->wr
.rdma
.remote_addr
);
513 wqe
->read
.stag_sink
= cpu_to_be32(wr
->sg_list
[0].lkey
);
514 wqe
->read
.plen
= cpu_to_be32(wr
->sg_list
[0].length
);
515 wqe
->read
.to_sink_hi
= cpu_to_be32((u32
)(wr
->sg_list
[0].addr
517 wqe
->read
.to_sink_lo
= cpu_to_be32((u32
)(wr
->sg_list
[0].addr
));
519 wqe
->read
.stag_src
= cpu_to_be32(2);
520 wqe
->read
.to_src_hi
= 0;
521 wqe
->read
.to_src_lo
= 0;
522 wqe
->read
.stag_sink
= cpu_to_be32(2);
524 wqe
->read
.to_sink_hi
= 0;
525 wqe
->read
.to_sink_lo
= 0;
529 *len16
= DIV_ROUND_UP(sizeof wqe
->read
, 16);
533 static int build_rdma_recv(struct c4iw_qp
*qhp
, union t4_recv_wr
*wqe
,
534 struct ib_recv_wr
*wr
, u8
*len16
)
538 ret
= build_isgl((__be64
*)qhp
->wq
.rq
.queue
,
539 (__be64
*)&qhp
->wq
.rq
.queue
[qhp
->wq
.rq
.size
],
540 &wqe
->recv
.isgl
, wr
->sg_list
, wr
->num_sge
, NULL
);
543 *len16
= DIV_ROUND_UP(sizeof wqe
->recv
+
544 wr
->num_sge
* sizeof(struct fw_ri_sge
), 16);
548 static int build_fastreg(struct t4_sq
*sq
, union t4_wr
*wqe
,
549 struct ib_send_wr
*wr
, u8
*len16
, u8 t5dev
)
552 struct fw_ri_immd
*imdp
;
555 int pbllen
= roundup(wr
->wr
.fast_reg
.page_list_len
* sizeof(u64
), 32);
558 if (wr
->wr
.fast_reg
.page_list_len
> T4_MAX_FR_DEPTH
)
561 wqe
->fr
.qpbinde_to_dcacpu
= 0;
562 wqe
->fr
.pgsz_shift
= wr
->wr
.fast_reg
.page_shift
- 12;
563 wqe
->fr
.addr_type
= FW_RI_VA_BASED_TO
;
564 wqe
->fr
.mem_perms
= c4iw_ib_to_tpt_access(wr
->wr
.fast_reg
.access_flags
);
566 wqe
->fr
.len_lo
= cpu_to_be32(wr
->wr
.fast_reg
.length
);
567 wqe
->fr
.stag
= cpu_to_be32(wr
->wr
.fast_reg
.rkey
);
568 wqe
->fr
.va_hi
= cpu_to_be32(wr
->wr
.fast_reg
.iova_start
>> 32);
569 wqe
->fr
.va_lo_fbo
= cpu_to_be32(wr
->wr
.fast_reg
.iova_start
&
572 if (t5dev
&& use_dsgl
&& (pbllen
> max_fr_immd
)) {
573 struct c4iw_fr_page_list
*c4pl
=
574 to_c4iw_fr_page_list(wr
->wr
.fast_reg
.page_list
);
575 struct fw_ri_dsgl
*sglp
;
577 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; i
++) {
578 wr
->wr
.fast_reg
.page_list
->page_list
[i
] = (__force u64
)
580 wr
->wr
.fast_reg
.page_list
->page_list
[i
]);
583 sglp
= (struct fw_ri_dsgl
*)(&wqe
->fr
+ 1);
584 sglp
->op
= FW_RI_DATA_DSGL
;
586 sglp
->nsge
= cpu_to_be16(1);
587 sglp
->addr0
= cpu_to_be64(c4pl
->dma_addr
);
588 sglp
->len0
= cpu_to_be32(pbllen
);
590 *len16
= DIV_ROUND_UP(sizeof(wqe
->fr
) + sizeof(*sglp
), 16);
592 imdp
= (struct fw_ri_immd
*)(&wqe
->fr
+ 1);
593 imdp
->op
= FW_RI_DATA_IMMD
;
596 imdp
->immdlen
= cpu_to_be32(pbllen
);
597 p
= (__be64
*)(imdp
+ 1);
599 for (i
= 0; i
< wr
->wr
.fast_reg
.page_list_len
; i
++) {
601 (u64
)wr
->wr
.fast_reg
.page_list
->page_list
[i
]);
603 if (++p
== (__be64
*)&sq
->queue
[sq
->size
])
604 p
= (__be64
*)sq
->queue
;
610 if (++p
== (__be64
*)&sq
->queue
[sq
->size
])
611 p
= (__be64
*)sq
->queue
;
613 *len16
= DIV_ROUND_UP(sizeof(wqe
->fr
) + sizeof(*imdp
)
619 static int build_inv_stag(union t4_wr
*wqe
, struct ib_send_wr
*wr
,
622 wqe
->inv
.stag_inv
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
624 *len16
= DIV_ROUND_UP(sizeof wqe
->inv
, 16);
628 void c4iw_qp_add_ref(struct ib_qp
*qp
)
630 PDBG("%s ib_qp %p\n", __func__
, qp
);
631 atomic_inc(&(to_c4iw_qp(qp
)->refcnt
));
634 void c4iw_qp_rem_ref(struct ib_qp
*qp
)
636 PDBG("%s ib_qp %p\n", __func__
, qp
);
637 if (atomic_dec_and_test(&(to_c4iw_qp(qp
)->refcnt
)))
638 wake_up(&(to_c4iw_qp(qp
)->wait
));
641 int c4iw_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
642 struct ib_send_wr
**bad_wr
)
646 enum fw_wr_opcodes fw_opcode
= 0;
647 enum fw_ri_wr_flags fw_flags
;
651 struct t4_swsqe
*swsqe
;
655 qhp
= to_c4iw_qp(ibqp
);
656 spin_lock_irqsave(&qhp
->lock
, flag
);
657 if (t4_wq_in_error(&qhp
->wq
)) {
658 spin_unlock_irqrestore(&qhp
->lock
, flag
);
661 num_wrs
= t4_sq_avail(&qhp
->wq
);
663 spin_unlock_irqrestore(&qhp
->lock
, flag
);
672 wqe
= (union t4_wr
*)((u8
*)qhp
->wq
.sq
.queue
+
673 qhp
->wq
.sq
.wq_pidx
* T4_EQ_ENTRY_SIZE
);
676 if (wr
->send_flags
& IB_SEND_SOLICITED
)
677 fw_flags
|= FW_RI_SOLICITED_EVENT_FLAG
;
678 if (wr
->send_flags
& IB_SEND_SIGNALED
)
679 fw_flags
|= FW_RI_COMPLETION_FLAG
;
680 swsqe
= &qhp
->wq
.sq
.sw_sq
[qhp
->wq
.sq
.pidx
];
681 switch (wr
->opcode
) {
682 case IB_WR_SEND_WITH_INV
:
684 if (wr
->send_flags
& IB_SEND_FENCE
)
685 fw_flags
|= FW_RI_READ_FENCE_FLAG
;
686 fw_opcode
= FW_RI_SEND_WR
;
687 if (wr
->opcode
== IB_WR_SEND
)
688 swsqe
->opcode
= FW_RI_SEND
;
690 swsqe
->opcode
= FW_RI_SEND_WITH_INV
;
691 err
= build_rdma_send(&qhp
->wq
.sq
, wqe
, wr
, &len16
);
693 case IB_WR_RDMA_WRITE
:
694 fw_opcode
= FW_RI_RDMA_WRITE_WR
;
695 swsqe
->opcode
= FW_RI_RDMA_WRITE
;
696 err
= build_rdma_write(&qhp
->wq
.sq
, wqe
, wr
, &len16
);
698 case IB_WR_RDMA_READ
:
699 case IB_WR_RDMA_READ_WITH_INV
:
700 fw_opcode
= FW_RI_RDMA_READ_WR
;
701 swsqe
->opcode
= FW_RI_READ_REQ
;
702 if (wr
->opcode
== IB_WR_RDMA_READ_WITH_INV
)
703 fw_flags
= FW_RI_RDMA_READ_INVALIDATE
;
706 err
= build_rdma_read(wqe
, wr
, &len16
);
709 swsqe
->read_len
= wr
->sg_list
[0].length
;
710 if (!qhp
->wq
.sq
.oldest_read
)
711 qhp
->wq
.sq
.oldest_read
= swsqe
;
713 case IB_WR_FAST_REG_MR
:
714 fw_opcode
= FW_RI_FR_NSMR_WR
;
715 swsqe
->opcode
= FW_RI_FAST_REGISTER
;
716 err
= build_fastreg(&qhp
->wq
.sq
, wqe
, wr
, &len16
,
718 qhp
->rhp
->rdev
.lldi
.adapter_type
) ?
721 case IB_WR_LOCAL_INV
:
722 if (wr
->send_flags
& IB_SEND_FENCE
)
723 fw_flags
|= FW_RI_LOCAL_FENCE_FLAG
;
724 fw_opcode
= FW_RI_INV_LSTAG_WR
;
725 swsqe
->opcode
= FW_RI_LOCAL_INV
;
726 err
= build_inv_stag(wqe
, wr
, &len16
);
729 PDBG("%s post of type=%d TBD!\n", __func__
,
737 swsqe
->idx
= qhp
->wq
.sq
.pidx
;
739 swsqe
->signaled
= (wr
->send_flags
& IB_SEND_SIGNALED
);
740 swsqe
->wr_id
= wr
->wr_id
;
742 init_wr_hdr(wqe
, qhp
->wq
.sq
.pidx
, fw_opcode
, fw_flags
, len16
);
744 PDBG("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
745 __func__
, (unsigned long long)wr
->wr_id
, qhp
->wq
.sq
.pidx
,
746 swsqe
->opcode
, swsqe
->read_len
);
749 t4_sq_produce(&qhp
->wq
, len16
);
750 idx
+= DIV_ROUND_UP(len16
*16, T4_EQ_ENTRY_SIZE
);
752 if (t4_wq_db_enabled(&qhp
->wq
))
753 t4_ring_sq_db(&qhp
->wq
, idx
);
754 spin_unlock_irqrestore(&qhp
->lock
, flag
);
758 int c4iw_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
759 struct ib_recv_wr
**bad_wr
)
763 union t4_recv_wr
*wqe
;
769 qhp
= to_c4iw_qp(ibqp
);
770 spin_lock_irqsave(&qhp
->lock
, flag
);
771 if (t4_wq_in_error(&qhp
->wq
)) {
772 spin_unlock_irqrestore(&qhp
->lock
, flag
);
775 num_wrs
= t4_rq_avail(&qhp
->wq
);
777 spin_unlock_irqrestore(&qhp
->lock
, flag
);
781 if (wr
->num_sge
> T4_MAX_RECV_SGE
) {
786 wqe
= (union t4_recv_wr
*)((u8
*)qhp
->wq
.rq
.queue
+
790 err
= build_rdma_recv(qhp
, wqe
, wr
, &len16
);
798 qhp
->wq
.rq
.sw_rq
[qhp
->wq
.rq
.pidx
].wr_id
= wr
->wr_id
;
800 wqe
->recv
.opcode
= FW_RI_RECV_WR
;
802 wqe
->recv
.wrid
= qhp
->wq
.rq
.pidx
;
806 wqe
->recv
.len16
= len16
;
807 PDBG("%s cookie 0x%llx pidx %u\n", __func__
,
808 (unsigned long long) wr
->wr_id
, qhp
->wq
.rq
.pidx
);
809 t4_rq_produce(&qhp
->wq
, len16
);
810 idx
+= DIV_ROUND_UP(len16
*16, T4_EQ_ENTRY_SIZE
);
814 if (t4_wq_db_enabled(&qhp
->wq
))
815 t4_ring_rq_db(&qhp
->wq
, idx
);
816 spin_unlock_irqrestore(&qhp
->lock
, flag
);
820 int c4iw_bind_mw(struct ib_qp
*qp
, struct ib_mw
*mw
, struct ib_mw_bind
*mw_bind
)
825 static inline void build_term_codes(struct t4_cqe
*err_cqe
, u8
*layer_type
,
835 *layer_type
= LAYER_RDMAP
|DDP_LOCAL_CATA
;
840 status
= CQE_STATUS(err_cqe
);
841 opcode
= CQE_OPCODE(err_cqe
);
842 rqtype
= RQ_TYPE(err_cqe
);
843 send_inv
= (opcode
== FW_RI_SEND_WITH_INV
) ||
844 (opcode
== FW_RI_SEND_WITH_SE_INV
);
845 tagged
= (opcode
== FW_RI_RDMA_WRITE
) ||
846 (rqtype
&& (opcode
== FW_RI_READ_RESP
));
851 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
852 *ecode
= RDMAP_CANT_INV_STAG
;
854 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
855 *ecode
= RDMAP_INV_STAG
;
859 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
860 if ((opcode
== FW_RI_SEND_WITH_INV
) ||
861 (opcode
== FW_RI_SEND_WITH_SE_INV
))
862 *ecode
= RDMAP_CANT_INV_STAG
;
864 *ecode
= RDMAP_STAG_NOT_ASSOC
;
867 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
868 *ecode
= RDMAP_STAG_NOT_ASSOC
;
871 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
872 *ecode
= RDMAP_ACC_VIOL
;
875 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
876 *ecode
= RDMAP_TO_WRAP
;
880 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
881 *ecode
= DDPT_BASE_BOUNDS
;
883 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
884 *ecode
= RDMAP_BASE_BOUNDS
;
887 case T4_ERR_INVALIDATE_SHARED_MR
:
888 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND
:
889 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
890 *ecode
= RDMAP_CANT_INV_STAG
;
893 case T4_ERR_ECC_PSTAG
:
894 case T4_ERR_INTERNAL_ERR
:
895 *layer_type
= LAYER_RDMAP
|RDMAP_LOCAL_CATA
;
898 case T4_ERR_OUT_OF_RQE
:
899 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
900 *ecode
= DDPU_INV_MSN_NOBUF
;
902 case T4_ERR_PBL_ADDR_BOUND
:
903 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
904 *ecode
= DDPT_BASE_BOUNDS
;
907 *layer_type
= LAYER_MPA
|DDP_LLP
;
908 *ecode
= MPA_CRC_ERR
;
911 *layer_type
= LAYER_MPA
|DDP_LLP
;
912 *ecode
= MPA_MARKER_ERR
;
914 case T4_ERR_PDU_LEN_ERR
:
915 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
916 *ecode
= DDPU_MSG_TOOBIG
;
918 case T4_ERR_DDP_VERSION
:
920 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
921 *ecode
= DDPT_INV_VERS
;
923 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
924 *ecode
= DDPU_INV_VERS
;
927 case T4_ERR_RDMA_VERSION
:
928 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
929 *ecode
= RDMAP_INV_VERS
;
932 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
933 *ecode
= RDMAP_INV_OPCODE
;
935 case T4_ERR_DDP_QUEUE_NUM
:
936 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
937 *ecode
= DDPU_INV_QN
;
941 case T4_ERR_MSN_RANGE
:
942 case T4_ERR_IRD_OVERFLOW
:
943 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
944 *ecode
= DDPU_INV_MSN_RANGE
;
947 *layer_type
= LAYER_DDP
|DDP_LOCAL_CATA
;
951 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
952 *ecode
= DDPU_INV_MO
;
955 *layer_type
= LAYER_RDMAP
|DDP_LOCAL_CATA
;
961 static void post_terminate(struct c4iw_qp
*qhp
, struct t4_cqe
*err_cqe
,
964 struct fw_ri_wr
*wqe
;
966 struct terminate_message
*term
;
968 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__
, qhp
, qhp
->wq
.sq
.qid
,
971 skb
= alloc_skb(sizeof *wqe
, gfp
);
974 set_wr_txq(skb
, CPL_PRIORITY_DATA
, qhp
->ep
->txq_idx
);
976 wqe
= (struct fw_ri_wr
*)__skb_put(skb
, sizeof(*wqe
));
977 memset(wqe
, 0, sizeof *wqe
);
978 wqe
->op_compl
= cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR
));
979 wqe
->flowid_len16
= cpu_to_be32(
980 FW_WR_FLOWID(qhp
->ep
->hwtid
) |
981 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe
, 16)));
983 wqe
->u
.terminate
.type
= FW_RI_TYPE_TERMINATE
;
984 wqe
->u
.terminate
.immdlen
= cpu_to_be32(sizeof *term
);
985 term
= (struct terminate_message
*)wqe
->u
.terminate
.termmsg
;
986 if (qhp
->attr
.layer_etype
== (LAYER_MPA
|DDP_LLP
)) {
987 term
->layer_etype
= qhp
->attr
.layer_etype
;
988 term
->ecode
= qhp
->attr
.ecode
;
990 build_term_codes(err_cqe
, &term
->layer_etype
, &term
->ecode
);
991 c4iw_ofld_send(&qhp
->rhp
->rdev
, skb
);
995 * Assumes qhp lock is held.
997 static void __flush_qp(struct c4iw_qp
*qhp
, struct c4iw_cq
*rchp
,
998 struct c4iw_cq
*schp
)
1004 PDBG("%s qhp %p rchp %p schp %p\n", __func__
, qhp
, rchp
, schp
);
1006 /* locking hierarchy: cq lock first, then qp lock. */
1007 spin_lock_irqsave(&rchp
->lock
, flag
);
1008 spin_lock(&qhp
->lock
);
1009 c4iw_flush_hw_cq(&rchp
->cq
);
1010 c4iw_count_rcqes(&rchp
->cq
, &qhp
->wq
, &count
);
1011 flushed
= c4iw_flush_rq(&qhp
->wq
, &rchp
->cq
, count
);
1012 spin_unlock(&qhp
->lock
);
1013 spin_unlock_irqrestore(&rchp
->lock
, flag
);
1015 spin_lock_irqsave(&rchp
->comp_handler_lock
, flag
);
1016 (*rchp
->ibcq
.comp_handler
)(&rchp
->ibcq
, rchp
->ibcq
.cq_context
);
1017 spin_unlock_irqrestore(&rchp
->comp_handler_lock
, flag
);
1020 /* locking hierarchy: cq lock first, then qp lock. */
1021 spin_lock_irqsave(&schp
->lock
, flag
);
1022 spin_lock(&qhp
->lock
);
1023 c4iw_flush_hw_cq(&schp
->cq
);
1024 c4iw_count_scqes(&schp
->cq
, &qhp
->wq
, &count
);
1025 flushed
= c4iw_flush_sq(&qhp
->wq
, &schp
->cq
, count
);
1026 spin_unlock(&qhp
->lock
);
1027 spin_unlock_irqrestore(&schp
->lock
, flag
);
1029 spin_lock_irqsave(&schp
->comp_handler_lock
, flag
);
1030 (*schp
->ibcq
.comp_handler
)(&schp
->ibcq
, schp
->ibcq
.cq_context
);
1031 spin_unlock_irqrestore(&schp
->comp_handler_lock
, flag
);
1035 static void flush_qp(struct c4iw_qp
*qhp
)
1037 struct c4iw_cq
*rchp
, *schp
;
1040 rchp
= get_chp(qhp
->rhp
, qhp
->attr
.rcq
);
1041 schp
= get_chp(qhp
->rhp
, qhp
->attr
.scq
);
1043 if (qhp
->ibqp
.uobject
) {
1044 t4_set_wq_in_error(&qhp
->wq
);
1045 t4_set_cq_in_error(&rchp
->cq
);
1046 spin_lock_irqsave(&rchp
->comp_handler_lock
, flag
);
1047 (*rchp
->ibcq
.comp_handler
)(&rchp
->ibcq
, rchp
->ibcq
.cq_context
);
1048 spin_unlock_irqrestore(&rchp
->comp_handler_lock
, flag
);
1050 t4_set_cq_in_error(&schp
->cq
);
1051 spin_lock_irqsave(&schp
->comp_handler_lock
, flag
);
1052 (*schp
->ibcq
.comp_handler
)(&schp
->ibcq
,
1053 schp
->ibcq
.cq_context
);
1054 spin_unlock_irqrestore(&schp
->comp_handler_lock
, flag
);
1058 __flush_qp(qhp
, rchp
, schp
);
1061 static int rdma_fini(struct c4iw_dev
*rhp
, struct c4iw_qp
*qhp
,
1064 struct fw_ri_wr
*wqe
;
1066 struct sk_buff
*skb
;
1068 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__
, qhp
, qhp
->wq
.sq
.qid
,
1071 skb
= alloc_skb(sizeof *wqe
, GFP_KERNEL
);
1074 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1076 wqe
= (struct fw_ri_wr
*)__skb_put(skb
, sizeof(*wqe
));
1077 memset(wqe
, 0, sizeof *wqe
);
1078 wqe
->op_compl
= cpu_to_be32(
1079 FW_WR_OP(FW_RI_INIT_WR
) |
1081 wqe
->flowid_len16
= cpu_to_be32(
1082 FW_WR_FLOWID(ep
->hwtid
) |
1083 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe
, 16)));
1084 wqe
->cookie
= (unsigned long) &ep
->com
.wr_wait
;
1086 wqe
->u
.fini
.type
= FW_RI_TYPE_FINI
;
1087 ret
= c4iw_ofld_send(&rhp
->rdev
, skb
);
1091 ret
= c4iw_wait_for_reply(&rhp
->rdev
, &ep
->com
.wr_wait
, qhp
->ep
->hwtid
,
1092 qhp
->wq
.sq
.qid
, __func__
);
1094 PDBG("%s ret %d\n", __func__
, ret
);
1098 static void build_rtr_msg(u8 p2p_type
, struct fw_ri_init
*init
)
1100 PDBG("%s p2p_type = %d\n", __func__
, p2p_type
);
1101 memset(&init
->u
, 0, sizeof init
->u
);
1103 case FW_RI_INIT_P2PTYPE_RDMA_WRITE
:
1104 init
->u
.write
.opcode
= FW_RI_RDMA_WRITE_WR
;
1105 init
->u
.write
.stag_sink
= cpu_to_be32(1);
1106 init
->u
.write
.to_sink
= cpu_to_be64(1);
1107 init
->u
.write
.u
.immd_src
[0].op
= FW_RI_DATA_IMMD
;
1108 init
->u
.write
.len16
= DIV_ROUND_UP(sizeof init
->u
.write
+
1109 sizeof(struct fw_ri_immd
),
1112 case FW_RI_INIT_P2PTYPE_READ_REQ
:
1113 init
->u
.write
.opcode
= FW_RI_RDMA_READ_WR
;
1114 init
->u
.read
.stag_src
= cpu_to_be32(1);
1115 init
->u
.read
.to_src_lo
= cpu_to_be32(1);
1116 init
->u
.read
.stag_sink
= cpu_to_be32(1);
1117 init
->u
.read
.to_sink_lo
= cpu_to_be32(1);
1118 init
->u
.read
.len16
= DIV_ROUND_UP(sizeof init
->u
.read
, 16);
1123 static int rdma_init(struct c4iw_dev
*rhp
, struct c4iw_qp
*qhp
)
1125 struct fw_ri_wr
*wqe
;
1127 struct sk_buff
*skb
;
1129 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__
, qhp
, qhp
->wq
.sq
.qid
,
1132 skb
= alloc_skb(sizeof *wqe
, GFP_KERNEL
);
1135 set_wr_txq(skb
, CPL_PRIORITY_DATA
, qhp
->ep
->txq_idx
);
1137 wqe
= (struct fw_ri_wr
*)__skb_put(skb
, sizeof(*wqe
));
1138 memset(wqe
, 0, sizeof *wqe
);
1139 wqe
->op_compl
= cpu_to_be32(
1140 FW_WR_OP(FW_RI_INIT_WR
) |
1142 wqe
->flowid_len16
= cpu_to_be32(
1143 FW_WR_FLOWID(qhp
->ep
->hwtid
) |
1144 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe
, 16)));
1146 wqe
->cookie
= (unsigned long) &qhp
->ep
->com
.wr_wait
;
1148 wqe
->u
.init
.type
= FW_RI_TYPE_INIT
;
1149 wqe
->u
.init
.mpareqbit_p2ptype
=
1150 V_FW_RI_WR_MPAREQBIT(qhp
->attr
.mpa_attr
.initiator
) |
1151 V_FW_RI_WR_P2PTYPE(qhp
->attr
.mpa_attr
.p2p_type
);
1152 wqe
->u
.init
.mpa_attrs
= FW_RI_MPA_IETF_ENABLE
;
1153 if (qhp
->attr
.mpa_attr
.recv_marker_enabled
)
1154 wqe
->u
.init
.mpa_attrs
|= FW_RI_MPA_RX_MARKER_ENABLE
;
1155 if (qhp
->attr
.mpa_attr
.xmit_marker_enabled
)
1156 wqe
->u
.init
.mpa_attrs
|= FW_RI_MPA_TX_MARKER_ENABLE
;
1157 if (qhp
->attr
.mpa_attr
.crc_enabled
)
1158 wqe
->u
.init
.mpa_attrs
|= FW_RI_MPA_CRC_ENABLE
;
1160 wqe
->u
.init
.qp_caps
= FW_RI_QP_RDMA_READ_ENABLE
|
1161 FW_RI_QP_RDMA_WRITE_ENABLE
|
1162 FW_RI_QP_BIND_ENABLE
;
1163 if (!qhp
->ibqp
.uobject
)
1164 wqe
->u
.init
.qp_caps
|= FW_RI_QP_FAST_REGISTER_ENABLE
|
1165 FW_RI_QP_STAG0_ENABLE
;
1166 wqe
->u
.init
.nrqe
= cpu_to_be16(t4_rqes_posted(&qhp
->wq
));
1167 wqe
->u
.init
.pdid
= cpu_to_be32(qhp
->attr
.pd
);
1168 wqe
->u
.init
.qpid
= cpu_to_be32(qhp
->wq
.sq
.qid
);
1169 wqe
->u
.init
.sq_eqid
= cpu_to_be32(qhp
->wq
.sq
.qid
);
1170 wqe
->u
.init
.rq_eqid
= cpu_to_be32(qhp
->wq
.rq
.qid
);
1171 wqe
->u
.init
.scqid
= cpu_to_be32(qhp
->attr
.scq
);
1172 wqe
->u
.init
.rcqid
= cpu_to_be32(qhp
->attr
.rcq
);
1173 wqe
->u
.init
.ord_max
= cpu_to_be32(qhp
->attr
.max_ord
);
1174 wqe
->u
.init
.ird_max
= cpu_to_be32(qhp
->attr
.max_ird
);
1175 wqe
->u
.init
.iss
= cpu_to_be32(qhp
->ep
->snd_seq
);
1176 wqe
->u
.init
.irs
= cpu_to_be32(qhp
->ep
->rcv_seq
);
1177 wqe
->u
.init
.hwrqsize
= cpu_to_be32(qhp
->wq
.rq
.rqt_size
);
1178 wqe
->u
.init
.hwrqaddr
= cpu_to_be32(qhp
->wq
.rq
.rqt_hwaddr
-
1179 rhp
->rdev
.lldi
.vr
->rq
.start
);
1180 if (qhp
->attr
.mpa_attr
.initiator
)
1181 build_rtr_msg(qhp
->attr
.mpa_attr
.p2p_type
, &wqe
->u
.init
);
1183 ret
= c4iw_ofld_send(&rhp
->rdev
, skb
);
1187 ret
= c4iw_wait_for_reply(&rhp
->rdev
, &qhp
->ep
->com
.wr_wait
,
1188 qhp
->ep
->hwtid
, qhp
->wq
.sq
.qid
, __func__
);
1190 PDBG("%s ret %d\n", __func__
, ret
);
1195 * Called by the library when the qp has user dbs disabled due to
1196 * a DB_FULL condition. This function will single-thread all user
1197 * DB rings to avoid overflowing the hw db-fifo.
1199 static int ring_kernel_db(struct c4iw_qp
*qhp
, u32 qid
, u16 inc
)
1201 int delay
= db_delay_usecs
;
1203 mutex_lock(&qhp
->rhp
->db_mutex
);
1207 * The interrupt threshold is dbfifo_int_thresh << 6. So
1208 * make sure we don't cross that and generate an interrupt.
1210 if (cxgb4_dbfifo_count(qhp
->rhp
->rdev
.lldi
.ports
[0], 1) <
1211 (qhp
->rhp
->rdev
.lldi
.dbfifo_int_thresh
<< 5)) {
1212 writel(QID(qid
) | PIDX(inc
), qhp
->wq
.db
);
1215 set_current_state(TASK_UNINTERRUPTIBLE
);
1216 schedule_timeout(usecs_to_jiffies(delay
));
1217 delay
= min(delay
<< 1, 2000);
1219 mutex_unlock(&qhp
->rhp
->db_mutex
);
1223 int c4iw_modify_qp(struct c4iw_dev
*rhp
, struct c4iw_qp
*qhp
,
1224 enum c4iw_qp_attr_mask mask
,
1225 struct c4iw_qp_attributes
*attrs
,
1229 struct c4iw_qp_attributes newattr
= qhp
->attr
;
1234 struct c4iw_ep
*ep
= NULL
;
1236 PDBG("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", __func__
,
1237 qhp
, qhp
->wq
.sq
.qid
, qhp
->wq
.rq
.qid
, qhp
->ep
, qhp
->attr
.state
,
1238 (mask
& C4IW_QP_ATTR_NEXT_STATE
) ? attrs
->next_state
: -1);
1240 mutex_lock(&qhp
->mutex
);
1242 /* Process attr changes if in IDLE */
1243 if (mask
& C4IW_QP_ATTR_VALID_MODIFY
) {
1244 if (qhp
->attr
.state
!= C4IW_QP_STATE_IDLE
) {
1248 if (mask
& C4IW_QP_ATTR_ENABLE_RDMA_READ
)
1249 newattr
.enable_rdma_read
= attrs
->enable_rdma_read
;
1250 if (mask
& C4IW_QP_ATTR_ENABLE_RDMA_WRITE
)
1251 newattr
.enable_rdma_write
= attrs
->enable_rdma_write
;
1252 if (mask
& C4IW_QP_ATTR_ENABLE_RDMA_BIND
)
1253 newattr
.enable_bind
= attrs
->enable_bind
;
1254 if (mask
& C4IW_QP_ATTR_MAX_ORD
) {
1255 if (attrs
->max_ord
> c4iw_max_read_depth
) {
1259 newattr
.max_ord
= attrs
->max_ord
;
1261 if (mask
& C4IW_QP_ATTR_MAX_IRD
) {
1262 if (attrs
->max_ird
> c4iw_max_read_depth
) {
1266 newattr
.max_ird
= attrs
->max_ird
;
1268 qhp
->attr
= newattr
;
1271 if (mask
& C4IW_QP_ATTR_SQ_DB
) {
1272 ret
= ring_kernel_db(qhp
, qhp
->wq
.sq
.qid
, attrs
->sq_db_inc
);
1275 if (mask
& C4IW_QP_ATTR_RQ_DB
) {
1276 ret
= ring_kernel_db(qhp
, qhp
->wq
.rq
.qid
, attrs
->rq_db_inc
);
1280 if (!(mask
& C4IW_QP_ATTR_NEXT_STATE
))
1282 if (qhp
->attr
.state
== attrs
->next_state
)
1285 switch (qhp
->attr
.state
) {
1286 case C4IW_QP_STATE_IDLE
:
1287 switch (attrs
->next_state
) {
1288 case C4IW_QP_STATE_RTS
:
1289 if (!(mask
& C4IW_QP_ATTR_LLP_STREAM_HANDLE
)) {
1293 if (!(mask
& C4IW_QP_ATTR_MPA_ATTR
)) {
1297 qhp
->attr
.mpa_attr
= attrs
->mpa_attr
;
1298 qhp
->attr
.llp_stream_handle
= attrs
->llp_stream_handle
;
1299 qhp
->ep
= qhp
->attr
.llp_stream_handle
;
1300 set_state(qhp
, C4IW_QP_STATE_RTS
);
1303 * Ref the endpoint here and deref when we
1304 * disassociate the endpoint from the QP. This
1305 * happens in CLOSING->IDLE transition or *->ERROR
1308 c4iw_get_ep(&qhp
->ep
->com
);
1309 ret
= rdma_init(rhp
, qhp
);
1313 case C4IW_QP_STATE_ERROR
:
1314 set_state(qhp
, C4IW_QP_STATE_ERROR
);
1322 case C4IW_QP_STATE_RTS
:
1323 switch (attrs
->next_state
) {
1324 case C4IW_QP_STATE_CLOSING
:
1325 BUG_ON(atomic_read(&qhp
->ep
->com
.kref
.refcount
) < 2);
1326 set_state(qhp
, C4IW_QP_STATE_CLOSING
);
1331 c4iw_get_ep(&qhp
->ep
->com
);
1333 if (qhp
->ibqp
.uobject
)
1334 t4_set_wq_in_error(&qhp
->wq
);
1335 ret
= rdma_fini(rhp
, qhp
, ep
);
1339 case C4IW_QP_STATE_TERMINATE
:
1340 set_state(qhp
, C4IW_QP_STATE_TERMINATE
);
1341 qhp
->attr
.layer_etype
= attrs
->layer_etype
;
1342 qhp
->attr
.ecode
= attrs
->ecode
;
1343 if (qhp
->ibqp
.uobject
)
1344 t4_set_wq_in_error(&qhp
->wq
);
1349 c4iw_get_ep(&qhp
->ep
->com
);
1351 case C4IW_QP_STATE_ERROR
:
1352 set_state(qhp
, C4IW_QP_STATE_ERROR
);
1353 if (qhp
->ibqp
.uobject
)
1354 t4_set_wq_in_error(&qhp
->wq
);
1359 c4iw_get_ep(&qhp
->ep
->com
);
1368 case C4IW_QP_STATE_CLOSING
:
1373 switch (attrs
->next_state
) {
1374 case C4IW_QP_STATE_IDLE
:
1376 set_state(qhp
, C4IW_QP_STATE_IDLE
);
1377 qhp
->attr
.llp_stream_handle
= NULL
;
1378 c4iw_put_ep(&qhp
->ep
->com
);
1380 wake_up(&qhp
->wait
);
1382 case C4IW_QP_STATE_ERROR
:
1389 case C4IW_QP_STATE_ERROR
:
1390 if (attrs
->next_state
!= C4IW_QP_STATE_IDLE
) {
1394 if (!t4_sq_empty(&qhp
->wq
) || !t4_rq_empty(&qhp
->wq
)) {
1398 set_state(qhp
, C4IW_QP_STATE_IDLE
);
1400 case C4IW_QP_STATE_TERMINATE
:
1408 printk(KERN_ERR
"%s in a bad state %d\n",
1409 __func__
, qhp
->attr
.state
);
1416 PDBG("%s disassociating ep %p qpid 0x%x\n", __func__
, qhp
->ep
,
1419 /* disassociate the LLP connection */
1420 qhp
->attr
.llp_stream_handle
= NULL
;
1424 set_state(qhp
, C4IW_QP_STATE_ERROR
);
1427 wake_up(&qhp
->wait
);
1431 mutex_unlock(&qhp
->mutex
);
1434 post_terminate(qhp
, NULL
, internal
? GFP_ATOMIC
: GFP_KERNEL
);
1437 * If disconnect is 1, then we need to initiate a disconnect
1438 * on the EP. This can be a normal close (RTS->CLOSING) or
1439 * an abnormal close (RTS/CLOSING->ERROR).
1442 c4iw_ep_disconnect(ep
, abort
, internal
? GFP_ATOMIC
:
1444 c4iw_put_ep(&ep
->com
);
1448 * If free is 1, then we've disassociated the EP from the QP
1449 * and we need to dereference the EP.
1452 c4iw_put_ep(&ep
->com
);
1453 PDBG("%s exit state %d\n", __func__
, qhp
->attr
.state
);
1457 static int enable_qp_db(int id
, void *p
, void *data
)
1459 struct c4iw_qp
*qp
= p
;
1461 t4_enable_wq_db(&qp
->wq
);
1465 int c4iw_destroy_qp(struct ib_qp
*ib_qp
)
1467 struct c4iw_dev
*rhp
;
1468 struct c4iw_qp
*qhp
;
1469 struct c4iw_qp_attributes attrs
;
1470 struct c4iw_ucontext
*ucontext
;
1472 qhp
= to_c4iw_qp(ib_qp
);
1475 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
1476 if (qhp
->attr
.state
== C4IW_QP_STATE_TERMINATE
)
1477 c4iw_modify_qp(rhp
, qhp
, C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
1479 c4iw_modify_qp(rhp
, qhp
, C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 0);
1480 wait_event(qhp
->wait
, !qhp
->ep
);
1482 spin_lock_irq(&rhp
->lock
);
1483 remove_handle_nolock(rhp
, &rhp
->qpidr
, qhp
->wq
.sq
.qid
);
1485 BUG_ON(rhp
->qpcnt
< 0);
1486 if (rhp
->qpcnt
<= db_fc_threshold
&& rhp
->db_state
== FLOW_CONTROL
) {
1487 rhp
->rdev
.stats
.db_state_transitions
++;
1488 rhp
->db_state
= NORMAL
;
1489 idr_for_each(&rhp
->qpidr
, enable_qp_db
, NULL
);
1491 if (db_coalescing_threshold
>= 0)
1492 if (rhp
->qpcnt
<= db_coalescing_threshold
)
1493 cxgb4_enable_db_coalescing(rhp
->rdev
.lldi
.ports
[0]);
1494 spin_unlock_irq(&rhp
->lock
);
1495 atomic_dec(&qhp
->refcnt
);
1496 wait_event(qhp
->wait
, !atomic_read(&qhp
->refcnt
));
1498 ucontext
= ib_qp
->uobject
?
1499 to_c4iw_ucontext(ib_qp
->uobject
->context
) : NULL
;
1500 destroy_qp(&rhp
->rdev
, &qhp
->wq
,
1501 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
1503 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__
, ib_qp
, qhp
->wq
.sq
.qid
);
1508 static int disable_qp_db(int id
, void *p
, void *data
)
1510 struct c4iw_qp
*qp
= p
;
1512 t4_disable_wq_db(&qp
->wq
);
1516 struct ib_qp
*c4iw_create_qp(struct ib_pd
*pd
, struct ib_qp_init_attr
*attrs
,
1517 struct ib_udata
*udata
)
1519 struct c4iw_dev
*rhp
;
1520 struct c4iw_qp
*qhp
;
1521 struct c4iw_pd
*php
;
1522 struct c4iw_cq
*schp
;
1523 struct c4iw_cq
*rchp
;
1524 struct c4iw_create_qp_resp uresp
;
1526 struct c4iw_ucontext
*ucontext
;
1528 struct c4iw_mm_entry
*mm1
, *mm2
, *mm3
, *mm4
, *mm5
= NULL
;
1530 PDBG("%s ib_pd %p\n", __func__
, pd
);
1532 if (attrs
->qp_type
!= IB_QPT_RC
)
1533 return ERR_PTR(-EINVAL
);
1535 php
= to_c4iw_pd(pd
);
1537 schp
= get_chp(rhp
, ((struct c4iw_cq
*)attrs
->send_cq
)->cq
.cqid
);
1538 rchp
= get_chp(rhp
, ((struct c4iw_cq
*)attrs
->recv_cq
)->cq
.cqid
);
1540 return ERR_PTR(-EINVAL
);
1542 if (attrs
->cap
.max_inline_data
> T4_MAX_SEND_INLINE
)
1543 return ERR_PTR(-EINVAL
);
1545 rqsize
= roundup(attrs
->cap
.max_recv_wr
+ 1, 16);
1546 if (rqsize
> T4_MAX_RQ_SIZE
)
1547 return ERR_PTR(-E2BIG
);
1549 sqsize
= roundup(attrs
->cap
.max_send_wr
+ 1, 16);
1550 if (sqsize
> T4_MAX_SQ_SIZE
)
1551 return ERR_PTR(-E2BIG
);
1553 ucontext
= pd
->uobject
? to_c4iw_ucontext(pd
->uobject
->context
) : NULL
;
1556 qhp
= kzalloc(sizeof(*qhp
), GFP_KERNEL
);
1558 return ERR_PTR(-ENOMEM
);
1559 qhp
->wq
.sq
.size
= sqsize
;
1560 qhp
->wq
.sq
.memsize
= (sqsize
+ 1) * sizeof *qhp
->wq
.sq
.queue
;
1561 qhp
->wq
.rq
.size
= rqsize
;
1562 qhp
->wq
.rq
.memsize
= (rqsize
+ 1) * sizeof *qhp
->wq
.rq
.queue
;
1565 qhp
->wq
.sq
.memsize
= roundup(qhp
->wq
.sq
.memsize
, PAGE_SIZE
);
1566 qhp
->wq
.rq
.memsize
= roundup(qhp
->wq
.rq
.memsize
, PAGE_SIZE
);
1569 PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
1570 __func__
, sqsize
, qhp
->wq
.sq
.memsize
, rqsize
, qhp
->wq
.rq
.memsize
);
1572 ret
= create_qp(&rhp
->rdev
, &qhp
->wq
, &schp
->cq
, &rchp
->cq
,
1573 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
1577 attrs
->cap
.max_recv_wr
= rqsize
- 1;
1578 attrs
->cap
.max_send_wr
= sqsize
- 1;
1579 attrs
->cap
.max_inline_data
= T4_MAX_SEND_INLINE
;
1582 qhp
->attr
.pd
= php
->pdid
;
1583 qhp
->attr
.scq
= ((struct c4iw_cq
*) attrs
->send_cq
)->cq
.cqid
;
1584 qhp
->attr
.rcq
= ((struct c4iw_cq
*) attrs
->recv_cq
)->cq
.cqid
;
1585 qhp
->attr
.sq_num_entries
= attrs
->cap
.max_send_wr
;
1586 qhp
->attr
.rq_num_entries
= attrs
->cap
.max_recv_wr
;
1587 qhp
->attr
.sq_max_sges
= attrs
->cap
.max_send_sge
;
1588 qhp
->attr
.sq_max_sges_rdma_write
= attrs
->cap
.max_send_sge
;
1589 qhp
->attr
.rq_max_sges
= attrs
->cap
.max_recv_sge
;
1590 qhp
->attr
.state
= C4IW_QP_STATE_IDLE
;
1591 qhp
->attr
.next_state
= C4IW_QP_STATE_IDLE
;
1592 qhp
->attr
.enable_rdma_read
= 1;
1593 qhp
->attr
.enable_rdma_write
= 1;
1594 qhp
->attr
.enable_bind
= 1;
1595 qhp
->attr
.max_ord
= 1;
1596 qhp
->attr
.max_ird
= 1;
1597 spin_lock_init(&qhp
->lock
);
1598 mutex_init(&qhp
->mutex
);
1599 init_waitqueue_head(&qhp
->wait
);
1600 atomic_set(&qhp
->refcnt
, 1);
1602 spin_lock_irq(&rhp
->lock
);
1603 if (rhp
->db_state
!= NORMAL
)
1604 t4_disable_wq_db(&qhp
->wq
);
1606 if (rhp
->qpcnt
> db_fc_threshold
&& rhp
->db_state
== NORMAL
) {
1607 rhp
->rdev
.stats
.db_state_transitions
++;
1608 rhp
->db_state
= FLOW_CONTROL
;
1609 idr_for_each(&rhp
->qpidr
, disable_qp_db
, NULL
);
1611 if (db_coalescing_threshold
>= 0)
1612 if (rhp
->qpcnt
> db_coalescing_threshold
)
1613 cxgb4_disable_db_coalescing(rhp
->rdev
.lldi
.ports
[0]);
1614 ret
= insert_handle_nolock(rhp
, &rhp
->qpidr
, qhp
, qhp
->wq
.sq
.qid
);
1615 spin_unlock_irq(&rhp
->lock
);
1620 mm1
= kmalloc(sizeof *mm1
, GFP_KERNEL
);
1625 mm2
= kmalloc(sizeof *mm2
, GFP_KERNEL
);
1630 mm3
= kmalloc(sizeof *mm3
, GFP_KERNEL
);
1635 mm4
= kmalloc(sizeof *mm4
, GFP_KERNEL
);
1640 if (t4_sq_onchip(&qhp
->wq
.sq
)) {
1641 mm5
= kmalloc(sizeof *mm5
, GFP_KERNEL
);
1646 uresp
.flags
= C4IW_QPF_ONCHIP
;
1649 uresp
.qid_mask
= rhp
->rdev
.qpmask
;
1650 uresp
.sqid
= qhp
->wq
.sq
.qid
;
1651 uresp
.sq_size
= qhp
->wq
.sq
.size
;
1652 uresp
.sq_memsize
= qhp
->wq
.sq
.memsize
;
1653 uresp
.rqid
= qhp
->wq
.rq
.qid
;
1654 uresp
.rq_size
= qhp
->wq
.rq
.size
;
1655 uresp
.rq_memsize
= qhp
->wq
.rq
.memsize
;
1656 spin_lock(&ucontext
->mmap_lock
);
1658 uresp
.ma_sync_key
= ucontext
->key
;
1659 ucontext
->key
+= PAGE_SIZE
;
1661 uresp
.sq_key
= ucontext
->key
;
1662 ucontext
->key
+= PAGE_SIZE
;
1663 uresp
.rq_key
= ucontext
->key
;
1664 ucontext
->key
+= PAGE_SIZE
;
1665 uresp
.sq_db_gts_key
= ucontext
->key
;
1666 ucontext
->key
+= PAGE_SIZE
;
1667 uresp
.rq_db_gts_key
= ucontext
->key
;
1668 ucontext
->key
+= PAGE_SIZE
;
1669 spin_unlock(&ucontext
->mmap_lock
);
1670 ret
= ib_copy_to_udata(udata
, &uresp
, sizeof uresp
);
1673 mm1
->key
= uresp
.sq_key
;
1674 mm1
->addr
= qhp
->wq
.sq
.phys_addr
;
1675 mm1
->len
= PAGE_ALIGN(qhp
->wq
.sq
.memsize
);
1676 insert_mmap(ucontext
, mm1
);
1677 mm2
->key
= uresp
.rq_key
;
1678 mm2
->addr
= virt_to_phys(qhp
->wq
.rq
.queue
);
1679 mm2
->len
= PAGE_ALIGN(qhp
->wq
.rq
.memsize
);
1680 insert_mmap(ucontext
, mm2
);
1681 mm3
->key
= uresp
.sq_db_gts_key
;
1682 mm3
->addr
= qhp
->wq
.sq
.udb
;
1683 mm3
->len
= PAGE_SIZE
;
1684 insert_mmap(ucontext
, mm3
);
1685 mm4
->key
= uresp
.rq_db_gts_key
;
1686 mm4
->addr
= qhp
->wq
.rq
.udb
;
1687 mm4
->len
= PAGE_SIZE
;
1688 insert_mmap(ucontext
, mm4
);
1690 mm5
->key
= uresp
.ma_sync_key
;
1691 mm5
->addr
= (pci_resource_start(rhp
->rdev
.lldi
.pdev
, 0)
1692 + A_PCIE_MA_SYNC
) & PAGE_MASK
;
1693 mm5
->len
= PAGE_SIZE
;
1694 insert_mmap(ucontext
, mm5
);
1697 qhp
->ibqp
.qp_num
= qhp
->wq
.sq
.qid
;
1698 init_timer(&(qhp
->timer
));
1699 PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
1700 __func__
, qhp
, qhp
->attr
.sq_num_entries
, qhp
->attr
.rq_num_entries
,
1714 remove_handle(rhp
, &rhp
->qpidr
, qhp
->wq
.sq
.qid
);
1716 destroy_qp(&rhp
->rdev
, &qhp
->wq
,
1717 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
);
1720 return ERR_PTR(ret
);
1723 int c4iw_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1724 int attr_mask
, struct ib_udata
*udata
)
1726 struct c4iw_dev
*rhp
;
1727 struct c4iw_qp
*qhp
;
1728 enum c4iw_qp_attr_mask mask
= 0;
1729 struct c4iw_qp_attributes attrs
;
1731 PDBG("%s ib_qp %p\n", __func__
, ibqp
);
1733 /* iwarp does not support the RTR state */
1734 if ((attr_mask
& IB_QP_STATE
) && (attr
->qp_state
== IB_QPS_RTR
))
1735 attr_mask
&= ~IB_QP_STATE
;
1737 /* Make sure we still have something left to do */
1741 memset(&attrs
, 0, sizeof attrs
);
1742 qhp
= to_c4iw_qp(ibqp
);
1745 attrs
.next_state
= c4iw_convert_state(attr
->qp_state
);
1746 attrs
.enable_rdma_read
= (attr
->qp_access_flags
&
1747 IB_ACCESS_REMOTE_READ
) ? 1 : 0;
1748 attrs
.enable_rdma_write
= (attr
->qp_access_flags
&
1749 IB_ACCESS_REMOTE_WRITE
) ? 1 : 0;
1750 attrs
.enable_bind
= (attr
->qp_access_flags
& IB_ACCESS_MW_BIND
) ? 1 : 0;
1753 mask
|= (attr_mask
& IB_QP_STATE
) ? C4IW_QP_ATTR_NEXT_STATE
: 0;
1754 mask
|= (attr_mask
& IB_QP_ACCESS_FLAGS
) ?
1755 (C4IW_QP_ATTR_ENABLE_RDMA_READ
|
1756 C4IW_QP_ATTR_ENABLE_RDMA_WRITE
|
1757 C4IW_QP_ATTR_ENABLE_RDMA_BIND
) : 0;
1760 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
1761 * ringing the queue db when we're in DB_FULL mode.
1763 attrs
.sq_db_inc
= attr
->sq_psn
;
1764 attrs
.rq_db_inc
= attr
->rq_psn
;
1765 mask
|= (attr_mask
& IB_QP_SQ_PSN
) ? C4IW_QP_ATTR_SQ_DB
: 0;
1766 mask
|= (attr_mask
& IB_QP_RQ_PSN
) ? C4IW_QP_ATTR_RQ_DB
: 0;
1768 return c4iw_modify_qp(rhp
, qhp
, mask
, &attrs
, 0);
1771 struct ib_qp
*c4iw_get_qp(struct ib_device
*dev
, int qpn
)
1773 PDBG("%s ib_dev %p qpn 0x%x\n", __func__
, dev
, qpn
);
1774 return (struct ib_qp
*)get_qhp(to_c4iw_dev(dev
), qpn
);
1777 int c4iw_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
1778 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
1780 struct c4iw_qp
*qhp
= to_c4iw_qp(ibqp
);
1782 memset(attr
, 0, sizeof *attr
);
1783 memset(init_attr
, 0, sizeof *init_attr
);
1784 attr
->qp_state
= to_ib_qp_state(qhp
->attr
.state
);