2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/uverbs_ioctl.h>
38 static int db_delay_usecs
= 1;
39 module_param(db_delay_usecs
, int, 0644);
40 MODULE_PARM_DESC(db_delay_usecs
, "Usecs to delay awaiting db fifo to drain");
42 static int ocqp_support
= 1;
43 module_param(ocqp_support
, int, 0644);
44 MODULE_PARM_DESC(ocqp_support
, "Support on-chip SQs (default=1)");
46 int db_fc_threshold
= 1000;
47 module_param(db_fc_threshold
, int, 0644);
48 MODULE_PARM_DESC(db_fc_threshold
,
49 "QP count/threshold that triggers"
50 " automatic db flow control mode (default = 1000)");
52 int db_coalescing_threshold
;
53 module_param(db_coalescing_threshold
, int, 0644);
54 MODULE_PARM_DESC(db_coalescing_threshold
,
55 "QP count/threshold that triggers"
56 " disabling db coalescing (default = 0)");
58 static int max_fr_immd
= T4_MAX_FR_IMMD
;
59 module_param(max_fr_immd
, int, 0644);
60 MODULE_PARM_DESC(max_fr_immd
, "fastreg threshold for using DSGL instead of immediate");
62 static int alloc_ird(struct c4iw_dev
*dev
, u32 ird
)
66 xa_lock_irq(&dev
->qps
);
67 if (ird
<= dev
->avail_ird
)
68 dev
->avail_ird
-= ird
;
71 xa_unlock_irq(&dev
->qps
);
74 dev_warn(&dev
->rdev
.lldi
.pdev
->dev
,
75 "device IRD resources exhausted\n");
80 static void free_ird(struct c4iw_dev
*dev
, int ird
)
82 xa_lock_irq(&dev
->qps
);
83 dev
->avail_ird
+= ird
;
84 xa_unlock_irq(&dev
->qps
);
87 static void set_state(struct c4iw_qp
*qhp
, enum c4iw_qp_state state
)
90 spin_lock_irqsave(&qhp
->lock
, flag
);
91 qhp
->attr
.state
= state
;
92 spin_unlock_irqrestore(&qhp
->lock
, flag
);
95 static void dealloc_oc_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
)
97 c4iw_ocqp_pool_free(rdev
, sq
->dma_addr
, sq
->memsize
);
100 static void dealloc_host_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
)
102 dma_free_coherent(&(rdev
->lldi
.pdev
->dev
), sq
->memsize
, sq
->queue
,
103 dma_unmap_addr(sq
, mapping
));
106 static void dealloc_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
)
108 if (t4_sq_onchip(sq
))
109 dealloc_oc_sq(rdev
, sq
);
111 dealloc_host_sq(rdev
, sq
);
114 static int alloc_oc_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
)
116 if (!ocqp_support
|| !ocqp_supported(&rdev
->lldi
))
118 sq
->dma_addr
= c4iw_ocqp_pool_alloc(rdev
, sq
->memsize
);
121 sq
->phys_addr
= rdev
->oc_mw_pa
+ sq
->dma_addr
-
122 rdev
->lldi
.vr
->ocq
.start
;
123 sq
->queue
= (__force
union t4_wr
*)(rdev
->oc_mw_kva
+ sq
->dma_addr
-
124 rdev
->lldi
.vr
->ocq
.start
);
125 sq
->flags
|= T4_SQ_ONCHIP
;
129 static int alloc_host_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
)
131 sq
->queue
= dma_alloc_coherent(&(rdev
->lldi
.pdev
->dev
), sq
->memsize
,
132 &(sq
->dma_addr
), GFP_KERNEL
);
135 sq
->phys_addr
= virt_to_phys(sq
->queue
);
136 dma_unmap_addr_set(sq
, mapping
, sq
->dma_addr
);
140 static int alloc_sq(struct c4iw_rdev
*rdev
, struct t4_sq
*sq
, int user
)
144 ret
= alloc_oc_sq(rdev
, sq
);
146 ret
= alloc_host_sq(rdev
, sq
);
150 static int destroy_qp(struct c4iw_rdev
*rdev
, struct t4_wq
*wq
,
151 struct c4iw_dev_ucontext
*uctx
, int has_rq
)
154 * uP clears EQ contexts when the connection exits rdma mode,
155 * so no need to post a RESET WR for these EQs.
157 dealloc_sq(rdev
, &wq
->sq
);
159 c4iw_put_qpid(rdev
, wq
->sq
.qid
, uctx
);
162 dma_free_coherent(&rdev
->lldi
.pdev
->dev
,
163 wq
->rq
.memsize
, wq
->rq
.queue
,
164 dma_unmap_addr(&wq
->rq
, mapping
));
165 c4iw_rqtpool_free(rdev
, wq
->rq
.rqt_hwaddr
, wq
->rq
.rqt_size
);
167 c4iw_put_qpid(rdev
, wq
->rq
.qid
, uctx
);
173 * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
174 * then this is a user mapping so compute the page-aligned physical address
177 void __iomem
*c4iw_bar2_addrs(struct c4iw_rdev
*rdev
, unsigned int qid
,
178 enum cxgb4_bar2_qtype qtype
,
179 unsigned int *pbar2_qid
, u64
*pbar2_pa
)
184 ret
= cxgb4_bar2_sge_qregs(rdev
->lldi
.ports
[0], qid
, qtype
,
186 &bar2_qoffset
, pbar2_qid
);
191 *pbar2_pa
= (rdev
->bar2_pa
+ bar2_qoffset
) & PAGE_MASK
;
193 if (is_t4(rdev
->lldi
.adapter_type
))
196 return rdev
->bar2_kva
+ bar2_qoffset
;
199 static int create_qp(struct c4iw_rdev
*rdev
, struct t4_wq
*wq
,
200 struct t4_cq
*rcq
, struct t4_cq
*scq
,
201 struct c4iw_dev_ucontext
*uctx
,
202 struct c4iw_wr_wait
*wr_waitp
,
205 int user
= (uctx
!= &rdev
->uctx
);
206 struct fw_ri_res_wr
*res_wr
;
207 struct fw_ri_res
*res
;
213 wq
->sq
.qid
= c4iw_get_qpid(rdev
, uctx
);
218 wq
->rq
.qid
= c4iw_get_qpid(rdev
, uctx
);
226 wq
->sq
.sw_sq
= kcalloc(wq
->sq
.size
, sizeof(*wq
->sq
.sw_sq
),
230 goto free_rq_qid
;//FIXME
234 wq
->rq
.sw_rq
= kcalloc(wq
->rq
.size
,
235 sizeof(*wq
->rq
.sw_rq
),
246 * RQT must be a power of 2 and at least 16 deep.
249 roundup_pow_of_two(max_t(u16
, wq
->rq
.size
, 16));
250 wq
->rq
.rqt_hwaddr
= c4iw_rqtpool_alloc(rdev
, wq
->rq
.rqt_size
);
251 if (!wq
->rq
.rqt_hwaddr
) {
257 ret
= alloc_sq(rdev
, &wq
->sq
, user
);
260 memset(wq
->sq
.queue
, 0, wq
->sq
.memsize
);
261 dma_unmap_addr_set(&wq
->sq
, mapping
, wq
->sq
.dma_addr
);
264 wq
->rq
.queue
= dma_alloc_coherent(&rdev
->lldi
.pdev
->dev
,
272 pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
274 (unsigned long long)virt_to_phys(wq
->sq
.queue
),
276 (unsigned long long)virt_to_phys(wq
->rq
.queue
));
277 dma_unmap_addr_set(&wq
->rq
, mapping
, wq
->rq
.dma_addr
);
280 wq
->db
= rdev
->lldi
.db_reg
;
282 wq
->sq
.bar2_va
= c4iw_bar2_addrs(rdev
, wq
->sq
.qid
,
283 CXGB4_BAR2_QTYPE_EGRESS
,
285 user
? &wq
->sq
.bar2_pa
: NULL
);
287 wq
->rq
.bar2_va
= c4iw_bar2_addrs(rdev
, wq
->rq
.qid
,
288 CXGB4_BAR2_QTYPE_EGRESS
,
290 user
? &wq
->rq
.bar2_pa
: NULL
);
293 * User mode must have bar2 access.
295 if (user
&& (!wq
->sq
.bar2_pa
|| (need_rq
&& !wq
->rq
.bar2_pa
))) {
296 pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
297 pci_name(rdev
->lldi
.pdev
), wq
->sq
.qid
, wq
->rq
.qid
);
305 /* build fw_ri_res_wr */
306 wr_len
= sizeof(*res_wr
) + 2 * sizeof(*res
);
308 wr_len
+= sizeof(*res
);
309 skb
= alloc_skb(wr_len
, GFP_KERNEL
);
314 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, 0);
316 res_wr
= __skb_put_zero(skb
, wr_len
);
317 res_wr
->op_nres
= cpu_to_be32(
318 FW_WR_OP_V(FW_RI_RES_WR
) |
319 FW_RI_RES_WR_NRES_V(need_rq
? 2 : 1) |
321 res_wr
->len16_pkd
= cpu_to_be32(DIV_ROUND_UP(wr_len
, 16));
322 res_wr
->cookie
= (uintptr_t)wr_waitp
;
324 res
->u
.sqrq
.restype
= FW_RI_RES_TYPE_SQ
;
325 res
->u
.sqrq
.op
= FW_RI_RES_OP_WRITE
;
328 * eqsize is the number of 64B entries plus the status page size.
330 eqsize
= wq
->sq
.size
* T4_SQ_NUM_SLOTS
+
331 rdev
->hw_queue
.t4_eq_status_entries
;
333 res
->u
.sqrq
.fetchszm_to_iqid
= cpu_to_be32(
334 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
335 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
336 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
337 (t4_sq_onchip(&wq
->sq
) ? FW_RI_RES_WR_ONCHIP_F
: 0) |
338 FW_RI_RES_WR_IQID_V(scq
->cqid
));
339 res
->u
.sqrq
.dcaen_to_eqsize
= cpu_to_be32(
340 FW_RI_RES_WR_DCAEN_V(0) |
341 FW_RI_RES_WR_DCACPU_V(0) |
342 FW_RI_RES_WR_FBMIN_V(2) |
343 (t4_sq_onchip(&wq
->sq
) ? FW_RI_RES_WR_FBMAX_V(2) :
344 FW_RI_RES_WR_FBMAX_V(3)) |
345 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
346 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
347 FW_RI_RES_WR_EQSIZE_V(eqsize
));
348 res
->u
.sqrq
.eqid
= cpu_to_be32(wq
->sq
.qid
);
349 res
->u
.sqrq
.eqaddr
= cpu_to_be64(wq
->sq
.dma_addr
);
353 res
->u
.sqrq
.restype
= FW_RI_RES_TYPE_RQ
;
354 res
->u
.sqrq
.op
= FW_RI_RES_OP_WRITE
;
357 * eqsize is the number of 64B entries plus the status page size
359 eqsize
= wq
->rq
.size
* T4_RQ_NUM_SLOTS
+
360 rdev
->hw_queue
.t4_eq_status_entries
;
361 res
->u
.sqrq
.fetchszm_to_iqid
=
362 /* no host cidx updates */
363 cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
364 /* don't keep in chip cache */
365 FW_RI_RES_WR_CPRIO_V(0) |
366 /* set by uP at ri_init time */
367 FW_RI_RES_WR_PCIECHN_V(0) |
368 FW_RI_RES_WR_IQID_V(rcq
->cqid
));
369 res
->u
.sqrq
.dcaen_to_eqsize
=
370 cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
371 FW_RI_RES_WR_DCACPU_V(0) |
372 FW_RI_RES_WR_FBMIN_V(2) |
373 FW_RI_RES_WR_FBMAX_V(3) |
374 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
375 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
376 FW_RI_RES_WR_EQSIZE_V(eqsize
));
377 res
->u
.sqrq
.eqid
= cpu_to_be32(wq
->rq
.qid
);
378 res
->u
.sqrq
.eqaddr
= cpu_to_be64(wq
->rq
.dma_addr
);
381 c4iw_init_wr_wait(wr_waitp
);
382 ret
= c4iw_ref_send_wait(rdev
, skb
, wr_waitp
, 0, wq
->sq
.qid
, __func__
);
386 pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
387 wq
->sq
.qid
, wq
->rq
.qid
, wq
->db
,
388 wq
->sq
.bar2_va
, wq
->rq
.bar2_va
);
393 dma_free_coherent(&rdev
->lldi
.pdev
->dev
,
394 wq
->rq
.memsize
, wq
->rq
.queue
,
395 dma_unmap_addr(&wq
->rq
, mapping
));
397 dealloc_sq(rdev
, &wq
->sq
);
400 c4iw_rqtpool_free(rdev
, wq
->rq
.rqt_hwaddr
, wq
->rq
.rqt_size
);
408 c4iw_put_qpid(rdev
, wq
->rq
.qid
, uctx
);
410 c4iw_put_qpid(rdev
, wq
->sq
.qid
, uctx
);
414 static int build_immd(struct t4_sq
*sq
, struct fw_ri_immd
*immdp
,
415 const struct ib_send_wr
*wr
, int max
, u32
*plenp
)
422 dstp
= (u8
*)immdp
->data
;
423 for (i
= 0; i
< wr
->num_sge
; i
++) {
424 if ((plen
+ wr
->sg_list
[i
].length
) > max
)
426 srcp
= (u8
*)(unsigned long)wr
->sg_list
[i
].addr
;
427 plen
+= wr
->sg_list
[i
].length
;
428 rem
= wr
->sg_list
[i
].length
;
430 if (dstp
== (u8
*)&sq
->queue
[sq
->size
])
431 dstp
= (u8
*)sq
->queue
;
432 if (rem
<= (u8
*)&sq
->queue
[sq
->size
] - dstp
)
435 len
= (u8
*)&sq
->queue
[sq
->size
] - dstp
;
436 memcpy(dstp
, srcp
, len
);
442 len
= roundup(plen
+ sizeof(*immdp
), 16) - (plen
+ sizeof(*immdp
));
444 memset(dstp
, 0, len
);
445 immdp
->op
= FW_RI_DATA_IMMD
;
448 immdp
->immdlen
= cpu_to_be32(plen
);
453 static int build_isgl(__be64
*queue_start
, __be64
*queue_end
,
454 struct fw_ri_isgl
*isglp
, struct ib_sge
*sg_list
,
455 int num_sge
, u32
*plenp
)
462 if ((__be64
*)isglp
== queue_end
)
463 isglp
= (struct fw_ri_isgl
*)queue_start
;
465 flitp
= (__be64
*)isglp
->sge
;
467 for (i
= 0; i
< num_sge
; i
++) {
468 if ((plen
+ sg_list
[i
].length
) < plen
)
470 plen
+= sg_list
[i
].length
;
471 *flitp
= cpu_to_be64(((u64
)sg_list
[i
].lkey
<< 32) |
473 if (++flitp
== queue_end
)
475 *flitp
= cpu_to_be64(sg_list
[i
].addr
);
476 if (++flitp
== queue_end
)
479 *flitp
= (__force __be64
)0;
480 isglp
->op
= FW_RI_DATA_ISGL
;
482 isglp
->nsge
= cpu_to_be16(num_sge
);
489 static int build_rdma_send(struct t4_sq
*sq
, union t4_wr
*wqe
,
490 const struct ib_send_wr
*wr
, u8
*len16
)
496 if (wr
->num_sge
> T4_MAX_SEND_SGE
)
498 switch (wr
->opcode
) {
500 if (wr
->send_flags
& IB_SEND_SOLICITED
)
501 wqe
->send
.sendop_pkd
= cpu_to_be32(
502 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE
));
504 wqe
->send
.sendop_pkd
= cpu_to_be32(
505 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND
));
506 wqe
->send
.stag_inv
= 0;
508 case IB_WR_SEND_WITH_INV
:
509 if (wr
->send_flags
& IB_SEND_SOLICITED
)
510 wqe
->send
.sendop_pkd
= cpu_to_be32(
511 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV
));
513 wqe
->send
.sendop_pkd
= cpu_to_be32(
514 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV
));
515 wqe
->send
.stag_inv
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
526 if (wr
->send_flags
& IB_SEND_INLINE
) {
527 ret
= build_immd(sq
, wqe
->send
.u
.immd_src
, wr
,
528 T4_MAX_SEND_INLINE
, &plen
);
531 size
= sizeof(wqe
->send
) + sizeof(struct fw_ri_immd
) +
534 ret
= build_isgl((__be64
*)sq
->queue
,
535 (__be64
*)&sq
->queue
[sq
->size
],
536 wqe
->send
.u
.isgl_src
,
537 wr
->sg_list
, wr
->num_sge
, &plen
);
540 size
= sizeof(wqe
->send
) + sizeof(struct fw_ri_isgl
) +
541 wr
->num_sge
* sizeof(struct fw_ri_sge
);
544 wqe
->send
.u
.immd_src
[0].op
= FW_RI_DATA_IMMD
;
545 wqe
->send
.u
.immd_src
[0].r1
= 0;
546 wqe
->send
.u
.immd_src
[0].r2
= 0;
547 wqe
->send
.u
.immd_src
[0].immdlen
= 0;
548 size
= sizeof(wqe
->send
) + sizeof(struct fw_ri_immd
);
551 *len16
= DIV_ROUND_UP(size
, 16);
552 wqe
->send
.plen
= cpu_to_be32(plen
);
556 static int build_rdma_write(struct t4_sq
*sq
, union t4_wr
*wqe
,
557 const struct ib_send_wr
*wr
, u8
*len16
)
563 if (wr
->num_sge
> T4_MAX_SEND_SGE
)
567 * iWARP protocol supports 64 bit immediate data but rdma api
568 * limits it to 32bit.
570 if (wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
571 wqe
->write
.iw_imm_data
.ib_imm_data
.imm_data32
= wr
->ex
.imm_data
;
573 wqe
->write
.iw_imm_data
.ib_imm_data
.imm_data32
= 0;
574 wqe
->write
.stag_sink
= cpu_to_be32(rdma_wr(wr
)->rkey
);
575 wqe
->write
.to_sink
= cpu_to_be64(rdma_wr(wr
)->remote_addr
);
577 if (wr
->send_flags
& IB_SEND_INLINE
) {
578 ret
= build_immd(sq
, wqe
->write
.u
.immd_src
, wr
,
579 T4_MAX_WRITE_INLINE
, &plen
);
582 size
= sizeof(wqe
->write
) + sizeof(struct fw_ri_immd
) +
585 ret
= build_isgl((__be64
*)sq
->queue
,
586 (__be64
*)&sq
->queue
[sq
->size
],
587 wqe
->write
.u
.isgl_src
,
588 wr
->sg_list
, wr
->num_sge
, &plen
);
591 size
= sizeof(wqe
->write
) + sizeof(struct fw_ri_isgl
) +
592 wr
->num_sge
* sizeof(struct fw_ri_sge
);
595 wqe
->write
.u
.immd_src
[0].op
= FW_RI_DATA_IMMD
;
596 wqe
->write
.u
.immd_src
[0].r1
= 0;
597 wqe
->write
.u
.immd_src
[0].r2
= 0;
598 wqe
->write
.u
.immd_src
[0].immdlen
= 0;
599 size
= sizeof(wqe
->write
) + sizeof(struct fw_ri_immd
);
602 *len16
= DIV_ROUND_UP(size
, 16);
603 wqe
->write
.plen
= cpu_to_be32(plen
);
607 static void build_immd_cmpl(struct t4_sq
*sq
, struct fw_ri_immd_cmpl
*immdp
,
608 struct ib_send_wr
*wr
)
610 memcpy((u8
*)immdp
->data
, (u8
*)(uintptr_t)wr
->sg_list
->addr
, 16);
611 memset(immdp
->r1
, 0, 6);
612 immdp
->op
= FW_RI_DATA_IMMD
;
616 static void build_rdma_write_cmpl(struct t4_sq
*sq
,
617 struct fw_ri_rdma_write_cmpl_wr
*wcwr
,
618 const struct ib_send_wr
*wr
, u8
*len16
)
624 * This code assumes the struct fields preceding the write isgl
625 * fit in one 64B WR slot. This is because the WQE is built
626 * directly in the dma queue, and wrapping is only handled
627 * by the code buildling sgls. IE the "fixed part" of the wr
628 * structs must all fit in 64B. The WQE build code should probably be
629 * redesigned to avoid this restriction, but for now just add
630 * the BUILD_BUG_ON() to catch if this WQE struct gets too big.
632 BUILD_BUG_ON(offsetof(struct fw_ri_rdma_write_cmpl_wr
, u
) > 64);
634 wcwr
->stag_sink
= cpu_to_be32(rdma_wr(wr
)->rkey
);
635 wcwr
->to_sink
= cpu_to_be64(rdma_wr(wr
)->remote_addr
);
636 if (wr
->next
->opcode
== IB_WR_SEND
)
639 wcwr
->stag_inv
= cpu_to_be32(wr
->next
->ex
.invalidate_rkey
);
644 if (wr
->next
->send_flags
& IB_SEND_INLINE
)
645 build_immd_cmpl(sq
, &wcwr
->u_cmpl
.immd_src
, wr
->next
);
647 build_isgl((__be64
*)sq
->queue
, (__be64
*)&sq
->queue
[sq
->size
],
648 &wcwr
->u_cmpl
.isgl_src
, wr
->next
->sg_list
, 1, NULL
);
651 build_isgl((__be64
*)sq
->queue
, (__be64
*)&sq
->queue
[sq
->size
],
652 wcwr
->u
.isgl_src
, wr
->sg_list
, wr
->num_sge
, &plen
);
654 size
= sizeof(*wcwr
) + sizeof(struct fw_ri_isgl
) +
655 wr
->num_sge
* sizeof(struct fw_ri_sge
);
656 wcwr
->plen
= cpu_to_be32(plen
);
657 *len16
= DIV_ROUND_UP(size
, 16);
660 static int build_rdma_read(union t4_wr
*wqe
, const struct ib_send_wr
*wr
,
665 if (wr
->num_sge
&& wr
->sg_list
[0].length
) {
666 wqe
->read
.stag_src
= cpu_to_be32(rdma_wr(wr
)->rkey
);
667 wqe
->read
.to_src_hi
= cpu_to_be32((u32
)(rdma_wr(wr
)->remote_addr
669 wqe
->read
.to_src_lo
= cpu_to_be32((u32
)rdma_wr(wr
)->remote_addr
);
670 wqe
->read
.stag_sink
= cpu_to_be32(wr
->sg_list
[0].lkey
);
671 wqe
->read
.plen
= cpu_to_be32(wr
->sg_list
[0].length
);
672 wqe
->read
.to_sink_hi
= cpu_to_be32((u32
)(wr
->sg_list
[0].addr
674 wqe
->read
.to_sink_lo
= cpu_to_be32((u32
)(wr
->sg_list
[0].addr
));
676 wqe
->read
.stag_src
= cpu_to_be32(2);
677 wqe
->read
.to_src_hi
= 0;
678 wqe
->read
.to_src_lo
= 0;
679 wqe
->read
.stag_sink
= cpu_to_be32(2);
681 wqe
->read
.to_sink_hi
= 0;
682 wqe
->read
.to_sink_lo
= 0;
686 *len16
= DIV_ROUND_UP(sizeof(wqe
->read
), 16);
690 static void post_write_cmpl(struct c4iw_qp
*qhp
, const struct ib_send_wr
*wr
)
692 bool send_signaled
= (wr
->next
->send_flags
& IB_SEND_SIGNALED
) ||
694 bool write_signaled
= (wr
->send_flags
& IB_SEND_SIGNALED
) ||
696 struct t4_swsqe
*swsqe
;
703 * The sw_sq entries still look like a WRITE and a SEND and consume
704 * 2 slots. The FW WR, however, will be a single uber-WR.
706 wqe
= (union t4_wr
*)((u8
*)qhp
->wq
.sq
.queue
+
707 qhp
->wq
.sq
.wq_pidx
* T4_EQ_ENTRY_SIZE
);
708 build_rdma_write_cmpl(&qhp
->wq
.sq
, &wqe
->write_cmpl
, wr
, &len16
);
711 swsqe
= &qhp
->wq
.sq
.sw_sq
[qhp
->wq
.sq
.pidx
];
712 swsqe
->opcode
= FW_RI_RDMA_WRITE
;
713 swsqe
->idx
= qhp
->wq
.sq
.pidx
;
715 swsqe
->signaled
= write_signaled
;
717 swsqe
->wr_id
= wr
->wr_id
;
720 cxgb4_read_sge_timestamp(qhp
->rhp
->rdev
.lldi
.ports
[0]);
721 swsqe
->host_time
= ktime_get();
724 write_wrid
= qhp
->wq
.sq
.pidx
;
726 /* just bump the sw_sq */
728 if (++qhp
->wq
.sq
.pidx
== qhp
->wq
.sq
.size
)
731 /* SEND_WITH_INV swsqe */
732 swsqe
= &qhp
->wq
.sq
.sw_sq
[qhp
->wq
.sq
.pidx
];
733 if (wr
->next
->opcode
== IB_WR_SEND
)
734 swsqe
->opcode
= FW_RI_SEND
;
736 swsqe
->opcode
= FW_RI_SEND_WITH_INV
;
737 swsqe
->idx
= qhp
->wq
.sq
.pidx
;
739 swsqe
->signaled
= send_signaled
;
741 swsqe
->wr_id
= wr
->next
->wr_id
;
744 cxgb4_read_sge_timestamp(qhp
->rhp
->rdev
.lldi
.ports
[0]);
745 swsqe
->host_time
= ktime_get();
748 wqe
->write_cmpl
.flags_send
= send_signaled
? FW_RI_COMPLETION_FLAG
: 0;
749 wqe
->write_cmpl
.wrid_send
= qhp
->wq
.sq
.pidx
;
751 init_wr_hdr(wqe
, write_wrid
, FW_RI_RDMA_WRITE_CMPL_WR
,
752 write_signaled
? FW_RI_COMPLETION_FLAG
: 0, len16
);
753 t4_sq_produce(&qhp
->wq
, len16
);
754 idx
= DIV_ROUND_UP(len16
* 16, T4_EQ_ENTRY_SIZE
);
756 t4_ring_sq_db(&qhp
->wq
, idx
, wqe
);
759 static int build_rdma_recv(struct c4iw_qp
*qhp
, union t4_recv_wr
*wqe
,
760 const struct ib_recv_wr
*wr
, u8
*len16
)
764 ret
= build_isgl((__be64
*)qhp
->wq
.rq
.queue
,
765 (__be64
*)&qhp
->wq
.rq
.queue
[qhp
->wq
.rq
.size
],
766 &wqe
->recv
.isgl
, wr
->sg_list
, wr
->num_sge
, NULL
);
769 *len16
= DIV_ROUND_UP(
770 sizeof(wqe
->recv
) + wr
->num_sge
* sizeof(struct fw_ri_sge
), 16);
774 static int build_srq_recv(union t4_recv_wr
*wqe
, const struct ib_recv_wr
*wr
,
779 ret
= build_isgl((__be64
*)wqe
, (__be64
*)(wqe
+ 1),
780 &wqe
->recv
.isgl
, wr
->sg_list
, wr
->num_sge
, NULL
);
783 *len16
= DIV_ROUND_UP(sizeof(wqe
->recv
) +
784 wr
->num_sge
* sizeof(struct fw_ri_sge
), 16);
788 static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr
*fr
,
789 const struct ib_reg_wr
*wr
, struct c4iw_mr
*mhp
,
792 __be64
*p
= (__be64
*)fr
->pbl
;
794 fr
->r2
= cpu_to_be32(0);
795 fr
->stag
= cpu_to_be32(mhp
->ibmr
.rkey
);
797 fr
->tpte
.valid_to_pdid
= cpu_to_be32(FW_RI_TPTE_VALID_F
|
798 FW_RI_TPTE_STAGKEY_V((mhp
->ibmr
.rkey
& FW_RI_TPTE_STAGKEY_M
)) |
799 FW_RI_TPTE_STAGSTATE_V(1) |
800 FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR
) |
801 FW_RI_TPTE_PDID_V(mhp
->attr
.pdid
));
802 fr
->tpte
.locread_to_qpid
= cpu_to_be32(
803 FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr
->access
)) |
804 FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO
) |
805 FW_RI_TPTE_PS_V(ilog2(wr
->mr
->page_size
) - 12));
806 fr
->tpte
.nosnoop_pbladdr
= cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
807 PBL_OFF(&mhp
->rhp
->rdev
, mhp
->attr
.pbl_addr
)>>3));
808 fr
->tpte
.dca_mwbcnt_pstag
= cpu_to_be32(0);
809 fr
->tpte
.len_hi
= cpu_to_be32(0);
810 fr
->tpte
.len_lo
= cpu_to_be32(mhp
->ibmr
.length
);
811 fr
->tpte
.va_hi
= cpu_to_be32(mhp
->ibmr
.iova
>> 32);
812 fr
->tpte
.va_lo_fbo
= cpu_to_be32(mhp
->ibmr
.iova
& 0xffffffff);
814 p
[0] = cpu_to_be64((u64
)mhp
->mpl
[0]);
815 p
[1] = cpu_to_be64((u64
)mhp
->mpl
[1]);
817 *len16
= DIV_ROUND_UP(sizeof(*fr
), 16);
820 static int build_memreg(struct t4_sq
*sq
, union t4_wr
*wqe
,
821 const struct ib_reg_wr
*wr
, struct c4iw_mr
*mhp
,
822 u8
*len16
, bool dsgl_supported
)
824 struct fw_ri_immd
*imdp
;
827 int pbllen
= roundup(mhp
->mpl_len
* sizeof(u64
), 32);
830 if (mhp
->mpl_len
> t4_max_fr_depth(dsgl_supported
&& use_dsgl
))
833 wqe
->fr
.qpbinde_to_dcacpu
= 0;
834 wqe
->fr
.pgsz_shift
= ilog2(wr
->mr
->page_size
) - 12;
835 wqe
->fr
.addr_type
= FW_RI_VA_BASED_TO
;
836 wqe
->fr
.mem_perms
= c4iw_ib_to_tpt_access(wr
->access
);
838 wqe
->fr
.len_lo
= cpu_to_be32(mhp
->ibmr
.length
);
839 wqe
->fr
.stag
= cpu_to_be32(wr
->key
);
840 wqe
->fr
.va_hi
= cpu_to_be32(mhp
->ibmr
.iova
>> 32);
841 wqe
->fr
.va_lo_fbo
= cpu_to_be32(mhp
->ibmr
.iova
&
844 if (dsgl_supported
&& use_dsgl
&& (pbllen
> max_fr_immd
)) {
845 struct fw_ri_dsgl
*sglp
;
847 for (i
= 0; i
< mhp
->mpl_len
; i
++)
848 mhp
->mpl
[i
] = (__force u64
)cpu_to_be64((u64
)mhp
->mpl
[i
]);
850 sglp
= (struct fw_ri_dsgl
*)(&wqe
->fr
+ 1);
851 sglp
->op
= FW_RI_DATA_DSGL
;
853 sglp
->nsge
= cpu_to_be16(1);
854 sglp
->addr0
= cpu_to_be64(mhp
->mpl_addr
);
855 sglp
->len0
= cpu_to_be32(pbllen
);
857 *len16
= DIV_ROUND_UP(sizeof(wqe
->fr
) + sizeof(*sglp
), 16);
859 imdp
= (struct fw_ri_immd
*)(&wqe
->fr
+ 1);
860 imdp
->op
= FW_RI_DATA_IMMD
;
863 imdp
->immdlen
= cpu_to_be32(pbllen
);
864 p
= (__be64
*)(imdp
+ 1);
866 for (i
= 0; i
< mhp
->mpl_len
; i
++) {
867 *p
= cpu_to_be64((u64
)mhp
->mpl
[i
]);
869 if (++p
== (__be64
*)&sq
->queue
[sq
->size
])
870 p
= (__be64
*)sq
->queue
;
875 if (++p
== (__be64
*)&sq
->queue
[sq
->size
])
876 p
= (__be64
*)sq
->queue
;
878 *len16
= DIV_ROUND_UP(sizeof(wqe
->fr
) + sizeof(*imdp
)
884 static int build_inv_stag(union t4_wr
*wqe
, const struct ib_send_wr
*wr
,
887 wqe
->inv
.stag_inv
= cpu_to_be32(wr
->ex
.invalidate_rkey
);
889 *len16
= DIV_ROUND_UP(sizeof(wqe
->inv
), 16);
893 void c4iw_qp_add_ref(struct ib_qp
*qp
)
895 pr_debug("ib_qp %p\n", qp
);
896 refcount_inc(&to_c4iw_qp(qp
)->qp_refcnt
);
899 void c4iw_qp_rem_ref(struct ib_qp
*qp
)
901 pr_debug("ib_qp %p\n", qp
);
902 if (refcount_dec_and_test(&to_c4iw_qp(qp
)->qp_refcnt
))
903 complete(&to_c4iw_qp(qp
)->qp_rel_comp
);
906 static void add_to_fc_list(struct list_head
*head
, struct list_head
*entry
)
908 if (list_empty(entry
))
909 list_add_tail(entry
, head
);
912 static int ring_kernel_sq_db(struct c4iw_qp
*qhp
, u16 inc
)
916 xa_lock_irqsave(&qhp
->rhp
->qps
, flags
);
917 spin_lock(&qhp
->lock
);
918 if (qhp
->rhp
->db_state
== NORMAL
)
919 t4_ring_sq_db(&qhp
->wq
, inc
, NULL
);
921 add_to_fc_list(&qhp
->rhp
->db_fc_list
, &qhp
->db_fc_entry
);
922 qhp
->wq
.sq
.wq_pidx_inc
+= inc
;
924 spin_unlock(&qhp
->lock
);
925 xa_unlock_irqrestore(&qhp
->rhp
->qps
, flags
);
929 static int ring_kernel_rq_db(struct c4iw_qp
*qhp
, u16 inc
)
933 xa_lock_irqsave(&qhp
->rhp
->qps
, flags
);
934 spin_lock(&qhp
->lock
);
935 if (qhp
->rhp
->db_state
== NORMAL
)
936 t4_ring_rq_db(&qhp
->wq
, inc
, NULL
);
938 add_to_fc_list(&qhp
->rhp
->db_fc_list
, &qhp
->db_fc_entry
);
939 qhp
->wq
.rq
.wq_pidx_inc
+= inc
;
941 spin_unlock(&qhp
->lock
);
942 xa_unlock_irqrestore(&qhp
->rhp
->qps
, flags
);
946 static int ib_to_fw_opcode(int ib_opcode
)
951 case IB_WR_SEND_WITH_INV
:
952 opcode
= FW_RI_SEND_WITH_INV
;
957 case IB_WR_RDMA_WRITE
:
958 opcode
= FW_RI_RDMA_WRITE
;
960 case IB_WR_RDMA_WRITE_WITH_IMM
:
961 opcode
= FW_RI_WRITE_IMMEDIATE
;
963 case IB_WR_RDMA_READ
:
964 case IB_WR_RDMA_READ_WITH_INV
:
965 opcode
= FW_RI_READ_REQ
;
968 opcode
= FW_RI_FAST_REGISTER
;
970 case IB_WR_LOCAL_INV
:
971 opcode
= FW_RI_LOCAL_INV
;
979 static int complete_sq_drain_wr(struct c4iw_qp
*qhp
,
980 const struct ib_send_wr
*wr
)
982 struct t4_cqe cqe
= {};
983 struct c4iw_cq
*schp
;
988 schp
= to_c4iw_cq(qhp
->ibqp
.send_cq
);
991 opcode
= ib_to_fw_opcode(wr
->opcode
);
995 cqe
.u
.drain_cookie
= wr
->wr_id
;
996 cqe
.header
= cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH
) |
997 CQE_OPCODE_V(opcode
) |
1001 CQE_QPID_V(qhp
->wq
.sq
.qid
));
1003 spin_lock_irqsave(&schp
->lock
, flag
);
1004 cqe
.bits_type_ts
= cpu_to_be64(CQE_GENBIT_V((u64
)cq
->gen
));
1005 cq
->sw_queue
[cq
->sw_pidx
] = cqe
;
1006 t4_swcq_produce(cq
);
1007 spin_unlock_irqrestore(&schp
->lock
, flag
);
1009 if (t4_clear_cq_armed(&schp
->cq
)) {
1010 spin_lock_irqsave(&schp
->comp_handler_lock
, flag
);
1011 (*schp
->ibcq
.comp_handler
)(&schp
->ibcq
,
1012 schp
->ibcq
.cq_context
);
1013 spin_unlock_irqrestore(&schp
->comp_handler_lock
, flag
);
1018 static int complete_sq_drain_wrs(struct c4iw_qp
*qhp
,
1019 const struct ib_send_wr
*wr
,
1020 const struct ib_send_wr
**bad_wr
)
1025 ret
= complete_sq_drain_wr(qhp
, wr
);
1035 static void complete_rq_drain_wr(struct c4iw_qp
*qhp
,
1036 const struct ib_recv_wr
*wr
)
1038 struct t4_cqe cqe
= {};
1039 struct c4iw_cq
*rchp
;
1043 rchp
= to_c4iw_cq(qhp
->ibqp
.recv_cq
);
1046 cqe
.u
.drain_cookie
= wr
->wr_id
;
1047 cqe
.header
= cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH
) |
1048 CQE_OPCODE_V(FW_RI_SEND
) |
1052 CQE_QPID_V(qhp
->wq
.sq
.qid
));
1054 spin_lock_irqsave(&rchp
->lock
, flag
);
1055 cqe
.bits_type_ts
= cpu_to_be64(CQE_GENBIT_V((u64
)cq
->gen
));
1056 cq
->sw_queue
[cq
->sw_pidx
] = cqe
;
1057 t4_swcq_produce(cq
);
1058 spin_unlock_irqrestore(&rchp
->lock
, flag
);
1060 if (t4_clear_cq_armed(&rchp
->cq
)) {
1061 spin_lock_irqsave(&rchp
->comp_handler_lock
, flag
);
1062 (*rchp
->ibcq
.comp_handler
)(&rchp
->ibcq
,
1063 rchp
->ibcq
.cq_context
);
1064 spin_unlock_irqrestore(&rchp
->comp_handler_lock
, flag
);
1068 static void complete_rq_drain_wrs(struct c4iw_qp
*qhp
,
1069 const struct ib_recv_wr
*wr
)
1072 complete_rq_drain_wr(qhp
, wr
);
1077 int c4iw_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
1078 const struct ib_send_wr
**bad_wr
)
1082 enum fw_wr_opcodes fw_opcode
= 0;
1083 enum fw_ri_wr_flags fw_flags
;
1084 struct c4iw_qp
*qhp
;
1085 struct c4iw_dev
*rhp
;
1086 union t4_wr
*wqe
= NULL
;
1088 struct t4_swsqe
*swsqe
;
1092 qhp
= to_c4iw_qp(ibqp
);
1094 spin_lock_irqsave(&qhp
->lock
, flag
);
1097 * If the qp has been flushed, then just insert a special
1100 if (qhp
->wq
.flushed
) {
1101 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1102 err
= complete_sq_drain_wrs(qhp
, wr
, bad_wr
);
1105 num_wrs
= t4_sq_avail(&qhp
->wq
);
1107 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1113 * Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is
1114 * the response for small NVMEe-oF READ requests. If the chain is
1115 * exactly a WRITE->SEND_WITH_INV or a WRITE->SEND and the sgl depths
1116 * and lengths meet the requirements of the fw_ri_write_cmpl_wr work
1117 * request, then build and post the write_cmpl WR. If any of the tests
1118 * below are not true, then we continue on with the tradtional WRITE
1121 if (qhp
->rhp
->rdev
.lldi
.write_cmpl_support
&&
1122 CHELSIO_CHIP_VERSION(qhp
->rhp
->rdev
.lldi
.adapter_type
) >=
1124 wr
&& wr
->next
&& !wr
->next
->next
&&
1125 wr
->opcode
== IB_WR_RDMA_WRITE
&&
1126 wr
->sg_list
[0].length
&& wr
->num_sge
<= T4_WRITE_CMPL_MAX_SGL
&&
1127 (wr
->next
->opcode
== IB_WR_SEND
||
1128 wr
->next
->opcode
== IB_WR_SEND_WITH_INV
) &&
1129 wr
->next
->sg_list
[0].length
== T4_WRITE_CMPL_MAX_CQE
&&
1130 wr
->next
->num_sge
== 1 && num_wrs
>= 2) {
1131 post_write_cmpl(qhp
, wr
);
1132 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1142 wqe
= (union t4_wr
*)((u8
*)qhp
->wq
.sq
.queue
+
1143 qhp
->wq
.sq
.wq_pidx
* T4_EQ_ENTRY_SIZE
);
1146 if (wr
->send_flags
& IB_SEND_SOLICITED
)
1147 fw_flags
|= FW_RI_SOLICITED_EVENT_FLAG
;
1148 if (wr
->send_flags
& IB_SEND_SIGNALED
|| qhp
->sq_sig_all
)
1149 fw_flags
|= FW_RI_COMPLETION_FLAG
;
1150 swsqe
= &qhp
->wq
.sq
.sw_sq
[qhp
->wq
.sq
.pidx
];
1151 switch (wr
->opcode
) {
1152 case IB_WR_SEND_WITH_INV
:
1154 if (wr
->send_flags
& IB_SEND_FENCE
)
1155 fw_flags
|= FW_RI_READ_FENCE_FLAG
;
1156 fw_opcode
= FW_RI_SEND_WR
;
1157 if (wr
->opcode
== IB_WR_SEND
)
1158 swsqe
->opcode
= FW_RI_SEND
;
1160 swsqe
->opcode
= FW_RI_SEND_WITH_INV
;
1161 err
= build_rdma_send(&qhp
->wq
.sq
, wqe
, wr
, &len16
);
1163 case IB_WR_RDMA_WRITE_WITH_IMM
:
1164 if (unlikely(!rhp
->rdev
.lldi
.write_w_imm_support
)) {
1168 fw_flags
|= FW_RI_RDMA_WRITE_WITH_IMMEDIATE
;
1170 case IB_WR_RDMA_WRITE
:
1171 fw_opcode
= FW_RI_RDMA_WRITE_WR
;
1172 swsqe
->opcode
= FW_RI_RDMA_WRITE
;
1173 err
= build_rdma_write(&qhp
->wq
.sq
, wqe
, wr
, &len16
);
1175 case IB_WR_RDMA_READ
:
1176 case IB_WR_RDMA_READ_WITH_INV
:
1177 fw_opcode
= FW_RI_RDMA_READ_WR
;
1178 swsqe
->opcode
= FW_RI_READ_REQ
;
1179 if (wr
->opcode
== IB_WR_RDMA_READ_WITH_INV
) {
1180 c4iw_invalidate_mr(rhp
, wr
->sg_list
[0].lkey
);
1181 fw_flags
= FW_RI_RDMA_READ_INVALIDATE
;
1185 err
= build_rdma_read(wqe
, wr
, &len16
);
1188 swsqe
->read_len
= wr
->sg_list
[0].length
;
1189 if (!qhp
->wq
.sq
.oldest_read
)
1190 qhp
->wq
.sq
.oldest_read
= swsqe
;
1192 case IB_WR_REG_MR
: {
1193 struct c4iw_mr
*mhp
= to_c4iw_mr(reg_wr(wr
)->mr
);
1195 swsqe
->opcode
= FW_RI_FAST_REGISTER
;
1196 if (rhp
->rdev
.lldi
.fr_nsmr_tpte_wr_support
&&
1197 !mhp
->attr
.state
&& mhp
->mpl_len
<= 2) {
1198 fw_opcode
= FW_RI_FR_NSMR_TPTE_WR
;
1199 build_tpte_memreg(&wqe
->fr_tpte
, reg_wr(wr
),
1202 fw_opcode
= FW_RI_FR_NSMR_WR
;
1203 err
= build_memreg(&qhp
->wq
.sq
, wqe
, reg_wr(wr
),
1205 rhp
->rdev
.lldi
.ulptx_memwrite_dsgl
);
1209 mhp
->attr
.state
= 1;
1212 case IB_WR_LOCAL_INV
:
1213 if (wr
->send_flags
& IB_SEND_FENCE
)
1214 fw_flags
|= FW_RI_LOCAL_FENCE_FLAG
;
1215 fw_opcode
= FW_RI_INV_LSTAG_WR
;
1216 swsqe
->opcode
= FW_RI_LOCAL_INV
;
1217 err
= build_inv_stag(wqe
, wr
, &len16
);
1218 c4iw_invalidate_mr(rhp
, wr
->ex
.invalidate_rkey
);
1221 pr_warn("%s post of type=%d TBD!\n", __func__
,
1229 swsqe
->idx
= qhp
->wq
.sq
.pidx
;
1230 swsqe
->complete
= 0;
1231 swsqe
->signaled
= (wr
->send_flags
& IB_SEND_SIGNALED
) ||
1234 swsqe
->wr_id
= wr
->wr_id
;
1236 swsqe
->sge_ts
= cxgb4_read_sge_timestamp(
1237 rhp
->rdev
.lldi
.ports
[0]);
1238 swsqe
->host_time
= ktime_get();
1241 init_wr_hdr(wqe
, qhp
->wq
.sq
.pidx
, fw_opcode
, fw_flags
, len16
);
1243 pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
1244 (unsigned long long)wr
->wr_id
, qhp
->wq
.sq
.pidx
,
1245 swsqe
->opcode
, swsqe
->read_len
);
1248 t4_sq_produce(&qhp
->wq
, len16
);
1249 idx
+= DIV_ROUND_UP(len16
*16, T4_EQ_ENTRY_SIZE
);
1251 if (!rhp
->rdev
.status_page
->db_off
) {
1252 t4_ring_sq_db(&qhp
->wq
, idx
, wqe
);
1253 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1255 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1256 ring_kernel_sq_db(qhp
, idx
);
1261 int c4iw_post_receive(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
1262 const struct ib_recv_wr
**bad_wr
)
1265 struct c4iw_qp
*qhp
;
1266 union t4_recv_wr
*wqe
= NULL
;
1272 qhp
= to_c4iw_qp(ibqp
);
1273 spin_lock_irqsave(&qhp
->lock
, flag
);
1276 * If the qp has been flushed, then just insert a special
1279 if (qhp
->wq
.flushed
) {
1280 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1281 complete_rq_drain_wrs(qhp
, wr
);
1284 num_wrs
= t4_rq_avail(&qhp
->wq
);
1286 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1291 if (wr
->num_sge
> T4_MAX_RECV_SGE
) {
1296 wqe
= (union t4_recv_wr
*)((u8
*)qhp
->wq
.rq
.queue
+
1297 qhp
->wq
.rq
.wq_pidx
*
1300 err
= build_rdma_recv(qhp
, wqe
, wr
, &len16
);
1308 qhp
->wq
.rq
.sw_rq
[qhp
->wq
.rq
.pidx
].wr_id
= wr
->wr_id
;
1310 qhp
->wq
.rq
.sw_rq
[qhp
->wq
.rq
.pidx
].sge_ts
=
1311 cxgb4_read_sge_timestamp(
1312 qhp
->rhp
->rdev
.lldi
.ports
[0]);
1313 qhp
->wq
.rq
.sw_rq
[qhp
->wq
.rq
.pidx
].host_time
=
1317 wqe
->recv
.opcode
= FW_RI_RECV_WR
;
1319 wqe
->recv
.wrid
= qhp
->wq
.rq
.pidx
;
1320 wqe
->recv
.r2
[0] = 0;
1321 wqe
->recv
.r2
[1] = 0;
1322 wqe
->recv
.r2
[2] = 0;
1323 wqe
->recv
.len16
= len16
;
1324 pr_debug("cookie 0x%llx pidx %u\n",
1325 (unsigned long long)wr
->wr_id
, qhp
->wq
.rq
.pidx
);
1326 t4_rq_produce(&qhp
->wq
, len16
);
1327 idx
+= DIV_ROUND_UP(len16
*16, T4_EQ_ENTRY_SIZE
);
1331 if (!qhp
->rhp
->rdev
.status_page
->db_off
) {
1332 t4_ring_rq_db(&qhp
->wq
, idx
, wqe
);
1333 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1335 spin_unlock_irqrestore(&qhp
->lock
, flag
);
1336 ring_kernel_rq_db(qhp
, idx
);
1341 static void defer_srq_wr(struct t4_srq
*srq
, union t4_recv_wr
*wqe
,
1342 u64 wr_id
, u8 len16
)
1344 struct t4_srq_pending_wr
*pwr
= &srq
->pending_wrs
[srq
->pending_pidx
];
1346 pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u ooo_count %u wr_id 0x%llx pending_cidx %u pending_pidx %u pending_in_use %u\n",
1347 __func__
, srq
->cidx
, srq
->pidx
, srq
->wq_pidx
,
1348 srq
->in_use
, srq
->ooo_count
,
1349 (unsigned long long)wr_id
, srq
->pending_cidx
,
1350 srq
->pending_pidx
, srq
->pending_in_use
);
1353 memcpy(&pwr
->wqe
, wqe
, len16
* 16);
1354 t4_srq_produce_pending_wr(srq
);
1357 int c4iw_post_srq_recv(struct ib_srq
*ibsrq
, const struct ib_recv_wr
*wr
,
1358 const struct ib_recv_wr
**bad_wr
)
1360 union t4_recv_wr
*wqe
, lwqe
;
1361 struct c4iw_srq
*srq
;
1368 srq
= to_c4iw_srq(ibsrq
);
1369 spin_lock_irqsave(&srq
->lock
, flag
);
1370 num_wrs
= t4_srq_avail(&srq
->wq
);
1372 spin_unlock_irqrestore(&srq
->lock
, flag
);
1376 if (wr
->num_sge
> T4_MAX_RECV_SGE
) {
1383 err
= build_srq_recv(wqe
, wr
, &len16
);
1391 wqe
->recv
.opcode
= FW_RI_RECV_WR
;
1393 wqe
->recv
.wrid
= srq
->wq
.pidx
;
1394 wqe
->recv
.r2
[0] = 0;
1395 wqe
->recv
.r2
[1] = 0;
1396 wqe
->recv
.r2
[2] = 0;
1397 wqe
->recv
.len16
= len16
;
1399 if (srq
->wq
.ooo_count
||
1400 srq
->wq
.pending_in_use
||
1401 srq
->wq
.sw_rq
[srq
->wq
.pidx
].valid
) {
1402 defer_srq_wr(&srq
->wq
, wqe
, wr
->wr_id
, len16
);
1404 srq
->wq
.sw_rq
[srq
->wq
.pidx
].wr_id
= wr
->wr_id
;
1405 srq
->wq
.sw_rq
[srq
->wq
.pidx
].valid
= 1;
1406 c4iw_copy_wr_to_srq(&srq
->wq
, wqe
, len16
);
1407 pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u wr_id 0x%llx\n",
1408 __func__
, srq
->wq
.cidx
,
1409 srq
->wq
.pidx
, srq
->wq
.wq_pidx
,
1411 (unsigned long long)wr
->wr_id
);
1412 t4_srq_produce(&srq
->wq
, len16
);
1413 idx
+= DIV_ROUND_UP(len16
* 16, T4_EQ_ENTRY_SIZE
);
1419 t4_ring_srq_db(&srq
->wq
, idx
, len16
, wqe
);
1420 spin_unlock_irqrestore(&srq
->lock
, flag
);
1424 static inline void build_term_codes(struct t4_cqe
*err_cqe
, u8
*layer_type
,
1434 *layer_type
= LAYER_RDMAP
|DDP_LOCAL_CATA
;
1439 status
= CQE_STATUS(err_cqe
);
1440 opcode
= CQE_OPCODE(err_cqe
);
1441 rqtype
= RQ_TYPE(err_cqe
);
1442 send_inv
= (opcode
== FW_RI_SEND_WITH_INV
) ||
1443 (opcode
== FW_RI_SEND_WITH_SE_INV
);
1444 tagged
= (opcode
== FW_RI_RDMA_WRITE
) ||
1445 (rqtype
&& (opcode
== FW_RI_READ_RESP
));
1450 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
1451 *ecode
= RDMAP_CANT_INV_STAG
;
1453 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
1454 *ecode
= RDMAP_INV_STAG
;
1458 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
1459 if ((opcode
== FW_RI_SEND_WITH_INV
) ||
1460 (opcode
== FW_RI_SEND_WITH_SE_INV
))
1461 *ecode
= RDMAP_CANT_INV_STAG
;
1463 *ecode
= RDMAP_STAG_NOT_ASSOC
;
1466 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
1467 *ecode
= RDMAP_STAG_NOT_ASSOC
;
1470 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
1471 *ecode
= RDMAP_ACC_VIOL
;
1474 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
1475 *ecode
= RDMAP_TO_WRAP
;
1479 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
1480 *ecode
= DDPT_BASE_BOUNDS
;
1482 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_PROT
;
1483 *ecode
= RDMAP_BASE_BOUNDS
;
1486 case T4_ERR_INVALIDATE_SHARED_MR
:
1487 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND
:
1488 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
1489 *ecode
= RDMAP_CANT_INV_STAG
;
1492 case T4_ERR_ECC_PSTAG
:
1493 case T4_ERR_INTERNAL_ERR
:
1494 *layer_type
= LAYER_RDMAP
|RDMAP_LOCAL_CATA
;
1497 case T4_ERR_OUT_OF_RQE
:
1498 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
1499 *ecode
= DDPU_INV_MSN_NOBUF
;
1501 case T4_ERR_PBL_ADDR_BOUND
:
1502 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
1503 *ecode
= DDPT_BASE_BOUNDS
;
1506 *layer_type
= LAYER_MPA
|DDP_LLP
;
1507 *ecode
= MPA_CRC_ERR
;
1510 *layer_type
= LAYER_MPA
|DDP_LLP
;
1511 *ecode
= MPA_MARKER_ERR
;
1513 case T4_ERR_PDU_LEN_ERR
:
1514 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
1515 *ecode
= DDPU_MSG_TOOBIG
;
1517 case T4_ERR_DDP_VERSION
:
1519 *layer_type
= LAYER_DDP
|DDP_TAGGED_ERR
;
1520 *ecode
= DDPT_INV_VERS
;
1522 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
1523 *ecode
= DDPU_INV_VERS
;
1526 case T4_ERR_RDMA_VERSION
:
1527 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
1528 *ecode
= RDMAP_INV_VERS
;
1531 *layer_type
= LAYER_RDMAP
|RDMAP_REMOTE_OP
;
1532 *ecode
= RDMAP_INV_OPCODE
;
1534 case T4_ERR_DDP_QUEUE_NUM
:
1535 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
1536 *ecode
= DDPU_INV_QN
;
1539 case T4_ERR_MSN_GAP
:
1540 case T4_ERR_MSN_RANGE
:
1541 case T4_ERR_IRD_OVERFLOW
:
1542 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
1543 *ecode
= DDPU_INV_MSN_RANGE
;
1546 *layer_type
= LAYER_DDP
|DDP_LOCAL_CATA
;
1550 *layer_type
= LAYER_DDP
|DDP_UNTAGGED_ERR
;
1551 *ecode
= DDPU_INV_MO
;
1554 *layer_type
= LAYER_RDMAP
|DDP_LOCAL_CATA
;
1560 static void post_terminate(struct c4iw_qp
*qhp
, struct t4_cqe
*err_cqe
,
1563 struct fw_ri_wr
*wqe
;
1564 struct sk_buff
*skb
;
1565 struct terminate_message
*term
;
1567 pr_debug("qhp %p qid 0x%x tid %u\n", qhp
, qhp
->wq
.sq
.qid
,
1570 skb
= skb_dequeue(&qhp
->ep
->com
.ep_skb_list
);
1574 set_wr_txq(skb
, CPL_PRIORITY_DATA
, qhp
->ep
->txq_idx
);
1576 wqe
= __skb_put_zero(skb
, sizeof(*wqe
));
1577 wqe
->op_compl
= cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR
));
1578 wqe
->flowid_len16
= cpu_to_be32(
1579 FW_WR_FLOWID_V(qhp
->ep
->hwtid
) |
1580 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe
), 16)));
1582 wqe
->u
.terminate
.type
= FW_RI_TYPE_TERMINATE
;
1583 wqe
->u
.terminate
.immdlen
= cpu_to_be32(sizeof(*term
));
1584 term
= (struct terminate_message
*)wqe
->u
.terminate
.termmsg
;
1585 if (qhp
->attr
.layer_etype
== (LAYER_MPA
|DDP_LLP
)) {
1586 term
->layer_etype
= qhp
->attr
.layer_etype
;
1587 term
->ecode
= qhp
->attr
.ecode
;
1589 build_term_codes(err_cqe
, &term
->layer_etype
, &term
->ecode
);
1590 c4iw_ofld_send(&qhp
->rhp
->rdev
, skb
);
1594 * Assumes qhp lock is held.
1596 static void __flush_qp(struct c4iw_qp
*qhp
, struct c4iw_cq
*rchp
,
1597 struct c4iw_cq
*schp
)
1600 int rq_flushed
= 0, sq_flushed
;
1603 pr_debug("qhp %p rchp %p schp %p\n", qhp
, rchp
, schp
);
1605 /* locking hierarchy: cqs lock first, then qp lock. */
1606 spin_lock_irqsave(&rchp
->lock
, flag
);
1608 spin_lock(&schp
->lock
);
1609 spin_lock(&qhp
->lock
);
1611 if (qhp
->wq
.flushed
) {
1612 spin_unlock(&qhp
->lock
);
1614 spin_unlock(&schp
->lock
);
1615 spin_unlock_irqrestore(&rchp
->lock
, flag
);
1618 qhp
->wq
.flushed
= 1;
1619 t4_set_wq_in_error(&qhp
->wq
, 0);
1621 c4iw_flush_hw_cq(rchp
, qhp
);
1623 c4iw_count_rcqes(&rchp
->cq
, &qhp
->wq
, &count
);
1624 rq_flushed
= c4iw_flush_rq(&qhp
->wq
, &rchp
->cq
, count
);
1628 c4iw_flush_hw_cq(schp
, qhp
);
1629 sq_flushed
= c4iw_flush_sq(qhp
);
1631 spin_unlock(&qhp
->lock
);
1633 spin_unlock(&schp
->lock
);
1634 spin_unlock_irqrestore(&rchp
->lock
, flag
);
1637 if ((rq_flushed
|| sq_flushed
) &&
1638 t4_clear_cq_armed(&rchp
->cq
)) {
1639 spin_lock_irqsave(&rchp
->comp_handler_lock
, flag
);
1640 (*rchp
->ibcq
.comp_handler
)(&rchp
->ibcq
,
1641 rchp
->ibcq
.cq_context
);
1642 spin_unlock_irqrestore(&rchp
->comp_handler_lock
, flag
);
1645 if (rq_flushed
&& t4_clear_cq_armed(&rchp
->cq
)) {
1646 spin_lock_irqsave(&rchp
->comp_handler_lock
, flag
);
1647 (*rchp
->ibcq
.comp_handler
)(&rchp
->ibcq
,
1648 rchp
->ibcq
.cq_context
);
1649 spin_unlock_irqrestore(&rchp
->comp_handler_lock
, flag
);
1651 if (sq_flushed
&& t4_clear_cq_armed(&schp
->cq
)) {
1652 spin_lock_irqsave(&schp
->comp_handler_lock
, flag
);
1653 (*schp
->ibcq
.comp_handler
)(&schp
->ibcq
,
1654 schp
->ibcq
.cq_context
);
1655 spin_unlock_irqrestore(&schp
->comp_handler_lock
, flag
);
1660 static void flush_qp(struct c4iw_qp
*qhp
)
1662 struct c4iw_cq
*rchp
, *schp
;
1665 rchp
= to_c4iw_cq(qhp
->ibqp
.recv_cq
);
1666 schp
= to_c4iw_cq(qhp
->ibqp
.send_cq
);
1668 if (qhp
->ibqp
.uobject
) {
1670 /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
1671 if (qhp
->wq
.flushed
)
1674 qhp
->wq
.flushed
= 1;
1675 t4_set_wq_in_error(&qhp
->wq
, 0);
1676 t4_set_cq_in_error(&rchp
->cq
);
1677 spin_lock_irqsave(&rchp
->comp_handler_lock
, flag
);
1678 (*rchp
->ibcq
.comp_handler
)(&rchp
->ibcq
, rchp
->ibcq
.cq_context
);
1679 spin_unlock_irqrestore(&rchp
->comp_handler_lock
, flag
);
1681 t4_set_cq_in_error(&schp
->cq
);
1682 spin_lock_irqsave(&schp
->comp_handler_lock
, flag
);
1683 (*schp
->ibcq
.comp_handler
)(&schp
->ibcq
,
1684 schp
->ibcq
.cq_context
);
1685 spin_unlock_irqrestore(&schp
->comp_handler_lock
, flag
);
1689 __flush_qp(qhp
, rchp
, schp
);
1692 static int rdma_fini(struct c4iw_dev
*rhp
, struct c4iw_qp
*qhp
,
1695 struct fw_ri_wr
*wqe
;
1697 struct sk_buff
*skb
;
1699 pr_debug("qhp %p qid 0x%x tid %u\n", qhp
, qhp
->wq
.sq
.qid
, ep
->hwtid
);
1701 skb
= skb_dequeue(&ep
->com
.ep_skb_list
);
1705 set_wr_txq(skb
, CPL_PRIORITY_DATA
, ep
->txq_idx
);
1707 wqe
= __skb_put_zero(skb
, sizeof(*wqe
));
1708 wqe
->op_compl
= cpu_to_be32(
1709 FW_WR_OP_V(FW_RI_INIT_WR
) |
1711 wqe
->flowid_len16
= cpu_to_be32(
1712 FW_WR_FLOWID_V(ep
->hwtid
) |
1713 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe
), 16)));
1714 wqe
->cookie
= (uintptr_t)ep
->com
.wr_waitp
;
1716 wqe
->u
.fini
.type
= FW_RI_TYPE_FINI
;
1718 ret
= c4iw_ref_send_wait(&rhp
->rdev
, skb
, ep
->com
.wr_waitp
,
1719 qhp
->ep
->hwtid
, qhp
->wq
.sq
.qid
, __func__
);
1721 pr_debug("ret %d\n", ret
);
1725 static void build_rtr_msg(u8 p2p_type
, struct fw_ri_init
*init
)
1727 pr_debug("p2p_type = %d\n", p2p_type
);
1728 memset(&init
->u
, 0, sizeof(init
->u
));
1730 case FW_RI_INIT_P2PTYPE_RDMA_WRITE
:
1731 init
->u
.write
.opcode
= FW_RI_RDMA_WRITE_WR
;
1732 init
->u
.write
.stag_sink
= cpu_to_be32(1);
1733 init
->u
.write
.to_sink
= cpu_to_be64(1);
1734 init
->u
.write
.u
.immd_src
[0].op
= FW_RI_DATA_IMMD
;
1735 init
->u
.write
.len16
= DIV_ROUND_UP(
1736 sizeof(init
->u
.write
) + sizeof(struct fw_ri_immd
), 16);
1738 case FW_RI_INIT_P2PTYPE_READ_REQ
:
1739 init
->u
.write
.opcode
= FW_RI_RDMA_READ_WR
;
1740 init
->u
.read
.stag_src
= cpu_to_be32(1);
1741 init
->u
.read
.to_src_lo
= cpu_to_be32(1);
1742 init
->u
.read
.stag_sink
= cpu_to_be32(1);
1743 init
->u
.read
.to_sink_lo
= cpu_to_be32(1);
1744 init
->u
.read
.len16
= DIV_ROUND_UP(sizeof(init
->u
.read
), 16);
1749 static int rdma_init(struct c4iw_dev
*rhp
, struct c4iw_qp
*qhp
)
1751 struct fw_ri_wr
*wqe
;
1753 struct sk_buff
*skb
;
1755 pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp
,
1756 qhp
->wq
.sq
.qid
, qhp
->ep
->hwtid
, qhp
->ep
->ird
, qhp
->ep
->ord
);
1758 skb
= alloc_skb(sizeof(*wqe
), GFP_KERNEL
);
1763 ret
= alloc_ird(rhp
, qhp
->attr
.max_ird
);
1765 qhp
->attr
.max_ird
= 0;
1769 set_wr_txq(skb
, CPL_PRIORITY_DATA
, qhp
->ep
->txq_idx
);
1771 wqe
= __skb_put_zero(skb
, sizeof(*wqe
));
1772 wqe
->op_compl
= cpu_to_be32(
1773 FW_WR_OP_V(FW_RI_INIT_WR
) |
1775 wqe
->flowid_len16
= cpu_to_be32(
1776 FW_WR_FLOWID_V(qhp
->ep
->hwtid
) |
1777 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe
), 16)));
1779 wqe
->cookie
= (uintptr_t)qhp
->ep
->com
.wr_waitp
;
1781 wqe
->u
.init
.type
= FW_RI_TYPE_INIT
;
1782 wqe
->u
.init
.mpareqbit_p2ptype
=
1783 FW_RI_WR_MPAREQBIT_V(qhp
->attr
.mpa_attr
.initiator
) |
1784 FW_RI_WR_P2PTYPE_V(qhp
->attr
.mpa_attr
.p2p_type
);
1785 wqe
->u
.init
.mpa_attrs
= FW_RI_MPA_IETF_ENABLE
;
1786 if (qhp
->attr
.mpa_attr
.recv_marker_enabled
)
1787 wqe
->u
.init
.mpa_attrs
|= FW_RI_MPA_RX_MARKER_ENABLE
;
1788 if (qhp
->attr
.mpa_attr
.xmit_marker_enabled
)
1789 wqe
->u
.init
.mpa_attrs
|= FW_RI_MPA_TX_MARKER_ENABLE
;
1790 if (qhp
->attr
.mpa_attr
.crc_enabled
)
1791 wqe
->u
.init
.mpa_attrs
|= FW_RI_MPA_CRC_ENABLE
;
1793 wqe
->u
.init
.qp_caps
= FW_RI_QP_RDMA_READ_ENABLE
|
1794 FW_RI_QP_RDMA_WRITE_ENABLE
|
1795 FW_RI_QP_BIND_ENABLE
;
1796 if (!qhp
->ibqp
.uobject
)
1797 wqe
->u
.init
.qp_caps
|= FW_RI_QP_FAST_REGISTER_ENABLE
|
1798 FW_RI_QP_STAG0_ENABLE
;
1799 wqe
->u
.init
.nrqe
= cpu_to_be16(t4_rqes_posted(&qhp
->wq
));
1800 wqe
->u
.init
.pdid
= cpu_to_be32(qhp
->attr
.pd
);
1801 wqe
->u
.init
.qpid
= cpu_to_be32(qhp
->wq
.sq
.qid
);
1802 wqe
->u
.init
.sq_eqid
= cpu_to_be32(qhp
->wq
.sq
.qid
);
1804 wqe
->u
.init
.rq_eqid
= cpu_to_be32(FW_RI_INIT_RQEQID_SRQ
|
1807 wqe
->u
.init
.rq_eqid
= cpu_to_be32(qhp
->wq
.rq
.qid
);
1808 wqe
->u
.init
.hwrqsize
= cpu_to_be32(qhp
->wq
.rq
.rqt_size
);
1809 wqe
->u
.init
.hwrqaddr
= cpu_to_be32(qhp
->wq
.rq
.rqt_hwaddr
-
1810 rhp
->rdev
.lldi
.vr
->rq
.start
);
1812 wqe
->u
.init
.scqid
= cpu_to_be32(qhp
->attr
.scq
);
1813 wqe
->u
.init
.rcqid
= cpu_to_be32(qhp
->attr
.rcq
);
1814 wqe
->u
.init
.ord_max
= cpu_to_be32(qhp
->attr
.max_ord
);
1815 wqe
->u
.init
.ird_max
= cpu_to_be32(qhp
->attr
.max_ird
);
1816 wqe
->u
.init
.iss
= cpu_to_be32(qhp
->ep
->snd_seq
);
1817 wqe
->u
.init
.irs
= cpu_to_be32(qhp
->ep
->rcv_seq
);
1818 if (qhp
->attr
.mpa_attr
.initiator
)
1819 build_rtr_msg(qhp
->attr
.mpa_attr
.p2p_type
, &wqe
->u
.init
);
1821 ret
= c4iw_ref_send_wait(&rhp
->rdev
, skb
, qhp
->ep
->com
.wr_waitp
,
1822 qhp
->ep
->hwtid
, qhp
->wq
.sq
.qid
, __func__
);
1826 free_ird(rhp
, qhp
->attr
.max_ird
);
1828 pr_debug("ret %d\n", ret
);
1832 int c4iw_modify_qp(struct c4iw_dev
*rhp
, struct c4iw_qp
*qhp
,
1833 enum c4iw_qp_attr_mask mask
,
1834 struct c4iw_qp_attributes
*attrs
,
1838 struct c4iw_qp_attributes newattr
= qhp
->attr
;
1843 struct c4iw_ep
*ep
= NULL
;
1845 pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
1846 qhp
, qhp
->wq
.sq
.qid
, qhp
->wq
.rq
.qid
, qhp
->ep
, qhp
->attr
.state
,
1847 (mask
& C4IW_QP_ATTR_NEXT_STATE
) ? attrs
->next_state
: -1);
1849 mutex_lock(&qhp
->mutex
);
1851 /* Process attr changes if in IDLE */
1852 if (mask
& C4IW_QP_ATTR_VALID_MODIFY
) {
1853 if (qhp
->attr
.state
!= C4IW_QP_STATE_IDLE
) {
1857 if (mask
& C4IW_QP_ATTR_ENABLE_RDMA_READ
)
1858 newattr
.enable_rdma_read
= attrs
->enable_rdma_read
;
1859 if (mask
& C4IW_QP_ATTR_ENABLE_RDMA_WRITE
)
1860 newattr
.enable_rdma_write
= attrs
->enable_rdma_write
;
1861 if (mask
& C4IW_QP_ATTR_ENABLE_RDMA_BIND
)
1862 newattr
.enable_bind
= attrs
->enable_bind
;
1863 if (mask
& C4IW_QP_ATTR_MAX_ORD
) {
1864 if (attrs
->max_ord
> c4iw_max_read_depth
) {
1868 newattr
.max_ord
= attrs
->max_ord
;
1870 if (mask
& C4IW_QP_ATTR_MAX_IRD
) {
1871 if (attrs
->max_ird
> cur_max_read_depth(rhp
)) {
1875 newattr
.max_ird
= attrs
->max_ird
;
1877 qhp
->attr
= newattr
;
1880 if (mask
& C4IW_QP_ATTR_SQ_DB
) {
1881 ret
= ring_kernel_sq_db(qhp
, attrs
->sq_db_inc
);
1884 if (mask
& C4IW_QP_ATTR_RQ_DB
) {
1885 ret
= ring_kernel_rq_db(qhp
, attrs
->rq_db_inc
);
1889 if (!(mask
& C4IW_QP_ATTR_NEXT_STATE
))
1891 if (qhp
->attr
.state
== attrs
->next_state
)
1894 switch (qhp
->attr
.state
) {
1895 case C4IW_QP_STATE_IDLE
:
1896 switch (attrs
->next_state
) {
1897 case C4IW_QP_STATE_RTS
:
1898 if (!(mask
& C4IW_QP_ATTR_LLP_STREAM_HANDLE
)) {
1902 if (!(mask
& C4IW_QP_ATTR_MPA_ATTR
)) {
1906 qhp
->attr
.mpa_attr
= attrs
->mpa_attr
;
1907 qhp
->attr
.llp_stream_handle
= attrs
->llp_stream_handle
;
1908 qhp
->ep
= qhp
->attr
.llp_stream_handle
;
1909 set_state(qhp
, C4IW_QP_STATE_RTS
);
1912 * Ref the endpoint here and deref when we
1913 * disassociate the endpoint from the QP. This
1914 * happens in CLOSING->IDLE transition or *->ERROR
1917 c4iw_get_ep(&qhp
->ep
->com
);
1918 ret
= rdma_init(rhp
, qhp
);
1922 case C4IW_QP_STATE_ERROR
:
1923 set_state(qhp
, C4IW_QP_STATE_ERROR
);
1931 case C4IW_QP_STATE_RTS
:
1932 switch (attrs
->next_state
) {
1933 case C4IW_QP_STATE_CLOSING
:
1934 t4_set_wq_in_error(&qhp
->wq
, 0);
1935 set_state(qhp
, C4IW_QP_STATE_CLOSING
);
1940 c4iw_get_ep(&qhp
->ep
->com
);
1942 ret
= rdma_fini(rhp
, qhp
, ep
);
1946 case C4IW_QP_STATE_TERMINATE
:
1947 t4_set_wq_in_error(&qhp
->wq
, 0);
1948 set_state(qhp
, C4IW_QP_STATE_TERMINATE
);
1949 qhp
->attr
.layer_etype
= attrs
->layer_etype
;
1950 qhp
->attr
.ecode
= attrs
->ecode
;
1953 c4iw_get_ep(&ep
->com
);
1957 terminate
= qhp
->attr
.send_term
;
1958 ret
= rdma_fini(rhp
, qhp
, ep
);
1963 case C4IW_QP_STATE_ERROR
:
1964 t4_set_wq_in_error(&qhp
->wq
, 0);
1965 set_state(qhp
, C4IW_QP_STATE_ERROR
);
1969 c4iw_get_ep(&qhp
->ep
->com
);
1978 case C4IW_QP_STATE_CLOSING
:
1981 * Allow kernel users to move to ERROR for qp draining.
1983 if (!internal
&& (qhp
->ibqp
.uobject
|| attrs
->next_state
!=
1984 C4IW_QP_STATE_ERROR
)) {
1988 switch (attrs
->next_state
) {
1989 case C4IW_QP_STATE_IDLE
:
1991 set_state(qhp
, C4IW_QP_STATE_IDLE
);
1992 qhp
->attr
.llp_stream_handle
= NULL
;
1993 c4iw_put_ep(&qhp
->ep
->com
);
1995 wake_up(&qhp
->wait
);
1997 case C4IW_QP_STATE_ERROR
:
2004 case C4IW_QP_STATE_ERROR
:
2005 if (attrs
->next_state
!= C4IW_QP_STATE_IDLE
) {
2009 if (!t4_sq_empty(&qhp
->wq
) || !t4_rq_empty(&qhp
->wq
)) {
2013 set_state(qhp
, C4IW_QP_STATE_IDLE
);
2015 case C4IW_QP_STATE_TERMINATE
:
2023 pr_err("%s in a bad state %d\n", __func__
, qhp
->attr
.state
);
2030 pr_debug("disassociating ep %p qpid 0x%x\n", qhp
->ep
,
2033 /* disassociate the LLP connection */
2034 qhp
->attr
.llp_stream_handle
= NULL
;
2038 set_state(qhp
, C4IW_QP_STATE_ERROR
);
2042 wake_up(&qhp
->wait
);
2044 mutex_unlock(&qhp
->mutex
);
2047 post_terminate(qhp
, NULL
, internal
? GFP_ATOMIC
: GFP_KERNEL
);
2050 * If disconnect is 1, then we need to initiate a disconnect
2051 * on the EP. This can be a normal close (RTS->CLOSING) or
2052 * an abnormal close (RTS/CLOSING->ERROR).
2055 c4iw_ep_disconnect(ep
, abort
, internal
? GFP_ATOMIC
:
2057 c4iw_put_ep(&ep
->com
);
2061 * If free is 1, then we've disassociated the EP from the QP
2062 * and we need to dereference the EP.
2065 c4iw_put_ep(&ep
->com
);
2066 pr_debug("exit state %d\n", qhp
->attr
.state
);
2070 int c4iw_destroy_qp(struct ib_qp
*ib_qp
, struct ib_udata
*udata
)
2072 struct c4iw_dev
*rhp
;
2073 struct c4iw_qp
*qhp
;
2074 struct c4iw_ucontext
*ucontext
;
2075 struct c4iw_qp_attributes attrs
;
2077 qhp
= to_c4iw_qp(ib_qp
);
2079 ucontext
= qhp
->ucontext
;
2081 attrs
.next_state
= C4IW_QP_STATE_ERROR
;
2082 if (qhp
->attr
.state
== C4IW_QP_STATE_TERMINATE
)
2083 c4iw_modify_qp(rhp
, qhp
, C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 1);
2085 c4iw_modify_qp(rhp
, qhp
, C4IW_QP_ATTR_NEXT_STATE
, &attrs
, 0);
2086 wait_event(qhp
->wait
, !qhp
->ep
);
2088 xa_lock_irq(&rhp
->qps
);
2089 __xa_erase(&rhp
->qps
, qhp
->wq
.sq
.qid
);
2090 if (!list_empty(&qhp
->db_fc_entry
))
2091 list_del_init(&qhp
->db_fc_entry
);
2092 xa_unlock_irq(&rhp
->qps
);
2093 free_ird(rhp
, qhp
->attr
.max_ird
);
2095 c4iw_qp_rem_ref(ib_qp
);
2097 wait_for_completion(&qhp
->qp_rel_comp
);
2099 pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp
, qhp
->wq
.sq
.qid
);
2100 pr_debug("qhp %p ucontext %p\n", qhp
, ucontext
);
2102 destroy_qp(&rhp
->rdev
, &qhp
->wq
,
2103 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
, !qhp
->srq
);
2105 c4iw_put_wr_wait(qhp
->wr_waitp
);
2109 int c4iw_create_qp(struct ib_qp
*qp
, struct ib_qp_init_attr
*attrs
,
2110 struct ib_udata
*udata
)
2112 struct ib_pd
*pd
= qp
->pd
;
2113 struct c4iw_dev
*rhp
;
2114 struct c4iw_qp
*qhp
= to_c4iw_qp(qp
);
2115 struct c4iw_pd
*php
;
2116 struct c4iw_cq
*schp
;
2117 struct c4iw_cq
*rchp
;
2118 struct c4iw_create_qp_resp uresp
;
2119 unsigned int sqsize
, rqsize
= 0;
2120 struct c4iw_ucontext
*ucontext
= rdma_udata_to_drv_context(
2121 udata
, struct c4iw_ucontext
, ibucontext
);
2123 struct c4iw_mm_entry
*sq_key_mm
, *rq_key_mm
= NULL
, *sq_db_key_mm
;
2124 struct c4iw_mm_entry
*rq_db_key_mm
= NULL
, *ma_sync_key_mm
= NULL
;
2126 if (attrs
->qp_type
!= IB_QPT_RC
|| attrs
->create_flags
)
2129 php
= to_c4iw_pd(pd
);
2131 schp
= get_chp(rhp
, ((struct c4iw_cq
*)attrs
->send_cq
)->cq
.cqid
);
2132 rchp
= get_chp(rhp
, ((struct c4iw_cq
*)attrs
->recv_cq
)->cq
.cqid
);
2136 if (attrs
->cap
.max_inline_data
> T4_MAX_SEND_INLINE
)
2140 if (attrs
->cap
.max_recv_wr
> rhp
->rdev
.hw_queue
.t4_max_rq_size
)
2142 rqsize
= attrs
->cap
.max_recv_wr
+ 1;
2147 if (attrs
->cap
.max_send_wr
> rhp
->rdev
.hw_queue
.t4_max_sq_size
)
2149 sqsize
= attrs
->cap
.max_send_wr
+ 1;
2153 qhp
->wr_waitp
= c4iw_alloc_wr_wait(GFP_KERNEL
);
2157 qhp
->wq
.sq
.size
= sqsize
;
2158 qhp
->wq
.sq
.memsize
=
2159 (sqsize
+ rhp
->rdev
.hw_queue
.t4_eq_status_entries
) *
2160 sizeof(*qhp
->wq
.sq
.queue
) + 16 * sizeof(__be64
);
2161 qhp
->wq
.sq
.flush_cidx
= -1;
2163 qhp
->wq
.rq
.size
= rqsize
;
2164 qhp
->wq
.rq
.memsize
=
2165 (rqsize
+ rhp
->rdev
.hw_queue
.t4_eq_status_entries
) *
2166 sizeof(*qhp
->wq
.rq
.queue
);
2170 qhp
->wq
.sq
.memsize
= roundup(qhp
->wq
.sq
.memsize
, PAGE_SIZE
);
2172 qhp
->wq
.rq
.memsize
=
2173 roundup(qhp
->wq
.rq
.memsize
, PAGE_SIZE
);
2176 ret
= create_qp(&rhp
->rdev
, &qhp
->wq
, &schp
->cq
, &rchp
->cq
,
2177 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
,
2178 qhp
->wr_waitp
, !attrs
->srq
);
2180 goto err_free_wr_wait
;
2182 attrs
->cap
.max_recv_wr
= rqsize
- 1;
2183 attrs
->cap
.max_send_wr
= sqsize
- 1;
2184 attrs
->cap
.max_inline_data
= T4_MAX_SEND_INLINE
;
2187 qhp
->attr
.pd
= php
->pdid
;
2188 qhp
->attr
.scq
= ((struct c4iw_cq
*) attrs
->send_cq
)->cq
.cqid
;
2189 qhp
->attr
.rcq
= ((struct c4iw_cq
*) attrs
->recv_cq
)->cq
.cqid
;
2190 qhp
->attr
.sq_num_entries
= attrs
->cap
.max_send_wr
;
2191 qhp
->attr
.sq_max_sges
= attrs
->cap
.max_send_sge
;
2192 qhp
->attr
.sq_max_sges_rdma_write
= attrs
->cap
.max_send_sge
;
2194 qhp
->attr
.rq_num_entries
= attrs
->cap
.max_recv_wr
;
2195 qhp
->attr
.rq_max_sges
= attrs
->cap
.max_recv_sge
;
2197 qhp
->attr
.state
= C4IW_QP_STATE_IDLE
;
2198 qhp
->attr
.next_state
= C4IW_QP_STATE_IDLE
;
2199 qhp
->attr
.enable_rdma_read
= 1;
2200 qhp
->attr
.enable_rdma_write
= 1;
2201 qhp
->attr
.enable_bind
= 1;
2202 qhp
->attr
.max_ord
= 0;
2203 qhp
->attr
.max_ird
= 0;
2204 qhp
->sq_sig_all
= attrs
->sq_sig_type
== IB_SIGNAL_ALL_WR
;
2205 spin_lock_init(&qhp
->lock
);
2206 mutex_init(&qhp
->mutex
);
2207 init_waitqueue_head(&qhp
->wait
);
2208 init_completion(&qhp
->qp_rel_comp
);
2209 refcount_set(&qhp
->qp_refcnt
, 1);
2211 ret
= xa_insert_irq(&rhp
->qps
, qhp
->wq
.sq
.qid
, qhp
, GFP_KERNEL
);
2213 goto err_destroy_qp
;
2215 if (udata
&& ucontext
) {
2216 sq_key_mm
= kmalloc(sizeof(*sq_key_mm
), GFP_KERNEL
);
2219 goto err_remove_handle
;
2222 rq_key_mm
= kmalloc(sizeof(*rq_key_mm
), GFP_KERNEL
);
2225 goto err_free_sq_key
;
2228 sq_db_key_mm
= kmalloc(sizeof(*sq_db_key_mm
), GFP_KERNEL
);
2229 if (!sq_db_key_mm
) {
2231 goto err_free_rq_key
;
2235 kmalloc(sizeof(*rq_db_key_mm
), GFP_KERNEL
);
2236 if (!rq_db_key_mm
) {
2238 goto err_free_sq_db_key
;
2241 memset(&uresp
, 0, sizeof(uresp
));
2242 if (t4_sq_onchip(&qhp
->wq
.sq
)) {
2243 ma_sync_key_mm
= kmalloc(sizeof(*ma_sync_key_mm
),
2245 if (!ma_sync_key_mm
) {
2247 goto err_free_rq_db_key
;
2249 uresp
.flags
= C4IW_QPF_ONCHIP
;
2251 if (rhp
->rdev
.lldi
.write_w_imm_support
)
2252 uresp
.flags
|= C4IW_QPF_WRITE_W_IMM
;
2253 uresp
.qid_mask
= rhp
->rdev
.qpmask
;
2254 uresp
.sqid
= qhp
->wq
.sq
.qid
;
2255 uresp
.sq_size
= qhp
->wq
.sq
.size
;
2256 uresp
.sq_memsize
= qhp
->wq
.sq
.memsize
;
2258 uresp
.rqid
= qhp
->wq
.rq
.qid
;
2259 uresp
.rq_size
= qhp
->wq
.rq
.size
;
2260 uresp
.rq_memsize
= qhp
->wq
.rq
.memsize
;
2262 spin_lock(&ucontext
->mmap_lock
);
2263 if (ma_sync_key_mm
) {
2264 uresp
.ma_sync_key
= ucontext
->key
;
2265 ucontext
->key
+= PAGE_SIZE
;
2267 uresp
.sq_key
= ucontext
->key
;
2268 ucontext
->key
+= PAGE_SIZE
;
2270 uresp
.rq_key
= ucontext
->key
;
2271 ucontext
->key
+= PAGE_SIZE
;
2273 uresp
.sq_db_gts_key
= ucontext
->key
;
2274 ucontext
->key
+= PAGE_SIZE
;
2276 uresp
.rq_db_gts_key
= ucontext
->key
;
2277 ucontext
->key
+= PAGE_SIZE
;
2279 spin_unlock(&ucontext
->mmap_lock
);
2280 ret
= ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
));
2282 goto err_free_ma_sync_key
;
2283 sq_key_mm
->key
= uresp
.sq_key
;
2284 sq_key_mm
->addr
= 0;
2285 sq_key_mm
->vaddr
= qhp
->wq
.sq
.queue
;
2286 sq_key_mm
->dma_addr
= qhp
->wq
.sq
.dma_addr
;
2287 sq_key_mm
->len
= PAGE_ALIGN(qhp
->wq
.sq
.memsize
);
2288 insert_flag_to_mmap(&rhp
->rdev
, sq_key_mm
, sq_key_mm
->addr
);
2289 insert_mmap(ucontext
, sq_key_mm
);
2291 rq_key_mm
->key
= uresp
.rq_key
;
2292 rq_key_mm
->addr
= 0;
2293 rq_key_mm
->vaddr
= qhp
->wq
.rq
.queue
;
2294 rq_key_mm
->dma_addr
= qhp
->wq
.rq
.dma_addr
;
2295 rq_key_mm
->len
= PAGE_ALIGN(qhp
->wq
.rq
.memsize
);
2296 insert_flag_to_mmap(&rhp
->rdev
, rq_key_mm
,
2298 insert_mmap(ucontext
, rq_key_mm
);
2300 sq_db_key_mm
->key
= uresp
.sq_db_gts_key
;
2301 sq_db_key_mm
->addr
= (u64
)(unsigned long)qhp
->wq
.sq
.bar2_pa
;
2302 sq_db_key_mm
->vaddr
= NULL
;
2303 sq_db_key_mm
->dma_addr
= 0;
2304 sq_db_key_mm
->len
= PAGE_SIZE
;
2305 insert_flag_to_mmap(&rhp
->rdev
, sq_db_key_mm
,
2306 sq_db_key_mm
->addr
);
2307 insert_mmap(ucontext
, sq_db_key_mm
);
2309 rq_db_key_mm
->key
= uresp
.rq_db_gts_key
;
2310 rq_db_key_mm
->addr
=
2311 (u64
)(unsigned long)qhp
->wq
.rq
.bar2_pa
;
2312 rq_db_key_mm
->len
= PAGE_SIZE
;
2313 rq_db_key_mm
->vaddr
= NULL
;
2314 rq_db_key_mm
->dma_addr
= 0;
2315 insert_flag_to_mmap(&rhp
->rdev
, rq_db_key_mm
,
2316 rq_db_key_mm
->addr
);
2317 insert_mmap(ucontext
, rq_db_key_mm
);
2319 if (ma_sync_key_mm
) {
2320 ma_sync_key_mm
->key
= uresp
.ma_sync_key
;
2321 ma_sync_key_mm
->addr
=
2322 (pci_resource_start(rhp
->rdev
.lldi
.pdev
, 0) +
2323 PCIE_MA_SYNC_A
) & PAGE_MASK
;
2324 ma_sync_key_mm
->len
= PAGE_SIZE
;
2325 ma_sync_key_mm
->vaddr
= NULL
;
2326 ma_sync_key_mm
->dma_addr
= 0;
2327 insert_flag_to_mmap(&rhp
->rdev
, ma_sync_key_mm
,
2328 ma_sync_key_mm
->addr
);
2329 insert_mmap(ucontext
, ma_sync_key_mm
);
2332 qhp
->ucontext
= ucontext
;
2336 &qhp
->wq
.rq
.queue
[qhp
->wq
.rq
.size
].status
.qp_err
;
2339 &qhp
->wq
.sq
.queue
[qhp
->wq
.sq
.size
].status
.qp_err
;
2341 &qhp
->wq
.sq
.queue
[qhp
->wq
.sq
.size
].status
.srqidx
;
2344 qhp
->ibqp
.qp_num
= qhp
->wq
.sq
.qid
;
2346 qhp
->srq
= to_c4iw_srq(attrs
->srq
);
2347 INIT_LIST_HEAD(&qhp
->db_fc_entry
);
2348 pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
2349 qhp
->wq
.sq
.qid
, qhp
->wq
.sq
.size
, qhp
->wq
.sq
.memsize
,
2350 attrs
->cap
.max_send_wr
, qhp
->wq
.rq
.qid
, qhp
->wq
.rq
.size
,
2351 qhp
->wq
.rq
.memsize
, attrs
->cap
.max_recv_wr
);
2353 err_free_ma_sync_key
:
2354 kfree(ma_sync_key_mm
);
2357 kfree(rq_db_key_mm
);
2359 kfree(sq_db_key_mm
);
2366 xa_erase_irq(&rhp
->qps
, qhp
->wq
.sq
.qid
);
2368 destroy_qp(&rhp
->rdev
, &qhp
->wq
,
2369 ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
, !attrs
->srq
);
2371 c4iw_put_wr_wait(qhp
->wr_waitp
);
2375 int c4iw_ib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
2376 int attr_mask
, struct ib_udata
*udata
)
2378 struct c4iw_dev
*rhp
;
2379 struct c4iw_qp
*qhp
;
2380 enum c4iw_qp_attr_mask mask
= 0;
2381 struct c4iw_qp_attributes attrs
= {};
2383 pr_debug("ib_qp %p\n", ibqp
);
2385 if (attr_mask
& ~IB_QP_ATTR_STANDARD_BITS
)
2388 /* iwarp does not support the RTR state */
2389 if ((attr_mask
& IB_QP_STATE
) && (attr
->qp_state
== IB_QPS_RTR
))
2390 attr_mask
&= ~IB_QP_STATE
;
2392 /* Make sure we still have something left to do */
2396 qhp
= to_c4iw_qp(ibqp
);
2399 attrs
.next_state
= c4iw_convert_state(attr
->qp_state
);
2400 attrs
.enable_rdma_read
= (attr
->qp_access_flags
&
2401 IB_ACCESS_REMOTE_READ
) ? 1 : 0;
2402 attrs
.enable_rdma_write
= (attr
->qp_access_flags
&
2403 IB_ACCESS_REMOTE_WRITE
) ? 1 : 0;
2404 attrs
.enable_bind
= (attr
->qp_access_flags
& IB_ACCESS_MW_BIND
) ? 1 : 0;
2407 mask
|= (attr_mask
& IB_QP_STATE
) ? C4IW_QP_ATTR_NEXT_STATE
: 0;
2408 mask
|= (attr_mask
& IB_QP_ACCESS_FLAGS
) ?
2409 (C4IW_QP_ATTR_ENABLE_RDMA_READ
|
2410 C4IW_QP_ATTR_ENABLE_RDMA_WRITE
|
2411 C4IW_QP_ATTR_ENABLE_RDMA_BIND
) : 0;
2414 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
2415 * ringing the queue db when we're in DB_FULL mode.
2416 * Only allow this on T4 devices.
2418 attrs
.sq_db_inc
= attr
->sq_psn
;
2419 attrs
.rq_db_inc
= attr
->rq_psn
;
2420 mask
|= (attr_mask
& IB_QP_SQ_PSN
) ? C4IW_QP_ATTR_SQ_DB
: 0;
2421 mask
|= (attr_mask
& IB_QP_RQ_PSN
) ? C4IW_QP_ATTR_RQ_DB
: 0;
2422 if (!is_t4(to_c4iw_qp(ibqp
)->rhp
->rdev
.lldi
.adapter_type
) &&
2423 (mask
& (C4IW_QP_ATTR_SQ_DB
|C4IW_QP_ATTR_RQ_DB
)))
2426 return c4iw_modify_qp(rhp
, qhp
, mask
, &attrs
, 0);
2429 struct ib_qp
*c4iw_get_qp(struct ib_device
*dev
, int qpn
)
2431 pr_debug("ib_dev %p qpn 0x%x\n", dev
, qpn
);
2432 return (struct ib_qp
*)get_qhp(to_c4iw_dev(dev
), qpn
);
2435 void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq
*srq
)
2437 struct ib_event event
= {};
2439 event
.device
= &srq
->rhp
->ibdev
;
2440 event
.element
.srq
= &srq
->ibsrq
;
2441 event
.event
= IB_EVENT_SRQ_LIMIT_REACHED
;
2442 ib_dispatch_event(&event
);
2445 int c4iw_modify_srq(struct ib_srq
*ib_srq
, struct ib_srq_attr
*attr
,
2446 enum ib_srq_attr_mask srq_attr_mask
,
2447 struct ib_udata
*udata
)
2449 struct c4iw_srq
*srq
= to_c4iw_srq(ib_srq
);
2453 * XXX 0 mask == a SW interrupt for srq_limit reached...
2455 if (udata
&& !srq_attr_mask
) {
2456 c4iw_dispatch_srq_limit_reached_event(srq
);
2460 /* no support for this yet */
2461 if (srq_attr_mask
& IB_SRQ_MAX_WR
) {
2466 if (!udata
&& (srq_attr_mask
& IB_SRQ_LIMIT
)) {
2468 srq
->srq_limit
= attr
->srq_limit
;
2474 int c4iw_ib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
2475 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
2477 struct c4iw_qp
*qhp
= to_c4iw_qp(ibqp
);
2479 memset(attr
, 0, sizeof(*attr
));
2480 memset(init_attr
, 0, sizeof(*init_attr
));
2481 attr
->qp_state
= to_ib_qp_state(qhp
->attr
.state
);
2482 attr
->cur_qp_state
= to_ib_qp_state(qhp
->attr
.state
);
2483 init_attr
->cap
.max_send_wr
= qhp
->attr
.sq_num_entries
;
2484 init_attr
->cap
.max_recv_wr
= qhp
->attr
.rq_num_entries
;
2485 init_attr
->cap
.max_send_sge
= qhp
->attr
.sq_max_sges
;
2486 init_attr
->cap
.max_recv_sge
= qhp
->attr
.rq_max_sges
;
2487 init_attr
->cap
.max_inline_data
= T4_MAX_SEND_INLINE
;
2488 init_attr
->sq_sig_type
= qhp
->sq_sig_all
? IB_SIGNAL_ALL_WR
: IB_SIGNAL_REQ_WR
;
2492 static void free_srq_queue(struct c4iw_srq
*srq
, struct c4iw_dev_ucontext
*uctx
,
2493 struct c4iw_wr_wait
*wr_waitp
)
2495 struct c4iw_rdev
*rdev
= &srq
->rhp
->rdev
;
2496 struct sk_buff
*skb
= srq
->destroy_skb
;
2497 struct t4_srq
*wq
= &srq
->wq
;
2498 struct fw_ri_res_wr
*res_wr
;
2499 struct fw_ri_res
*res
;
2502 wr_len
= sizeof(*res_wr
) + sizeof(*res
);
2503 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, 0);
2505 res_wr
= (struct fw_ri_res_wr
*)__skb_put(skb
, wr_len
);
2506 memset(res_wr
, 0, wr_len
);
2507 res_wr
->op_nres
= cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR
) |
2508 FW_RI_RES_WR_NRES_V(1) |
2510 res_wr
->len16_pkd
= cpu_to_be32(DIV_ROUND_UP(wr_len
, 16));
2511 res_wr
->cookie
= (uintptr_t)wr_waitp
;
2513 res
->u
.srq
.restype
= FW_RI_RES_TYPE_SRQ
;
2514 res
->u
.srq
.op
= FW_RI_RES_OP_RESET
;
2515 res
->u
.srq
.srqid
= cpu_to_be32(srq
->idx
);
2516 res
->u
.srq
.eqid
= cpu_to_be32(wq
->qid
);
2518 c4iw_init_wr_wait(wr_waitp
);
2519 c4iw_ref_send_wait(rdev
, skb
, wr_waitp
, 0, 0, __func__
);
2521 dma_free_coherent(&rdev
->lldi
.pdev
->dev
,
2522 wq
->memsize
, wq
->queue
,
2523 dma_unmap_addr(wq
, mapping
));
2524 c4iw_rqtpool_free(rdev
, wq
->rqt_hwaddr
, wq
->rqt_size
);
2526 c4iw_put_qpid(rdev
, wq
->qid
, uctx
);
2529 static int alloc_srq_queue(struct c4iw_srq
*srq
, struct c4iw_dev_ucontext
*uctx
,
2530 struct c4iw_wr_wait
*wr_waitp
)
2532 struct c4iw_rdev
*rdev
= &srq
->rhp
->rdev
;
2533 int user
= (uctx
!= &rdev
->uctx
);
2534 struct t4_srq
*wq
= &srq
->wq
;
2535 struct fw_ri_res_wr
*res_wr
;
2536 struct fw_ri_res
*res
;
2537 struct sk_buff
*skb
;
2542 wq
->qid
= c4iw_get_qpid(rdev
, uctx
);
2547 wq
->sw_rq
= kcalloc(wq
->size
, sizeof(*wq
->sw_rq
),
2551 wq
->pending_wrs
= kcalloc(srq
->wq
.size
,
2552 sizeof(*srq
->wq
.pending_wrs
),
2554 if (!wq
->pending_wrs
)
2555 goto err_free_sw_rq
;
2558 wq
->rqt_size
= wq
->size
;
2559 wq
->rqt_hwaddr
= c4iw_rqtpool_alloc(rdev
, wq
->rqt_size
);
2560 if (!wq
->rqt_hwaddr
)
2561 goto err_free_pending_wrs
;
2562 wq
->rqt_abs_idx
= (wq
->rqt_hwaddr
- rdev
->lldi
.vr
->rq
.start
) >>
2565 wq
->queue
= dma_alloc_coherent(&rdev
->lldi
.pdev
->dev
, wq
->memsize
,
2566 &wq
->dma_addr
, GFP_KERNEL
);
2568 goto err_free_rqtpool
;
2570 dma_unmap_addr_set(wq
, mapping
, wq
->dma_addr
);
2572 wq
->bar2_va
= c4iw_bar2_addrs(rdev
, wq
->qid
, CXGB4_BAR2_QTYPE_EGRESS
,
2574 user
? &wq
->bar2_pa
: NULL
);
2577 * User mode must have bar2 access.
2580 if (user
&& !wq
->bar2_va
) {
2581 pr_warn(MOD
"%s: srqid %u not in BAR2 range.\n",
2582 pci_name(rdev
->lldi
.pdev
), wq
->qid
);
2584 goto err_free_queue
;
2587 /* build fw_ri_res_wr */
2588 wr_len
= sizeof(*res_wr
) + sizeof(*res
);
2590 skb
= alloc_skb(wr_len
, GFP_KERNEL
);
2592 goto err_free_queue
;
2593 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, 0);
2595 res_wr
= (struct fw_ri_res_wr
*)__skb_put(skb
, wr_len
);
2596 memset(res_wr
, 0, wr_len
);
2597 res_wr
->op_nres
= cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR
) |
2598 FW_RI_RES_WR_NRES_V(1) |
2600 res_wr
->len16_pkd
= cpu_to_be32(DIV_ROUND_UP(wr_len
, 16));
2601 res_wr
->cookie
= (uintptr_t)wr_waitp
;
2603 res
->u
.srq
.restype
= FW_RI_RES_TYPE_SRQ
;
2604 res
->u
.srq
.op
= FW_RI_RES_OP_WRITE
;
2607 * eqsize is the number of 64B entries plus the status page size.
2609 eqsize
= wq
->size
* T4_RQ_NUM_SLOTS
+
2610 rdev
->hw_queue
.t4_eq_status_entries
;
2611 res
->u
.srq
.eqid
= cpu_to_be32(wq
->qid
);
2612 res
->u
.srq
.fetchszm_to_iqid
=
2613 /* no host cidx updates */
2614 cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
2615 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
2616 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
2617 FW_RI_RES_WR_FETCHRO_V(0)); /* relaxed_ordering */
2618 res
->u
.srq
.dcaen_to_eqsize
=
2619 cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
2620 FW_RI_RES_WR_DCACPU_V(0) |
2621 FW_RI_RES_WR_FBMIN_V(2) |
2622 FW_RI_RES_WR_FBMAX_V(3) |
2623 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
2624 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
2625 FW_RI_RES_WR_EQSIZE_V(eqsize
));
2626 res
->u
.srq
.eqaddr
= cpu_to_be64(wq
->dma_addr
);
2627 res
->u
.srq
.srqid
= cpu_to_be32(srq
->idx
);
2628 res
->u
.srq
.pdid
= cpu_to_be32(srq
->pdid
);
2629 res
->u
.srq
.hwsrqsize
= cpu_to_be32(wq
->rqt_size
);
2630 res
->u
.srq
.hwsrqaddr
= cpu_to_be32(wq
->rqt_hwaddr
-
2631 rdev
->lldi
.vr
->rq
.start
);
2633 c4iw_init_wr_wait(wr_waitp
);
2635 ret
= c4iw_ref_send_wait(rdev
, skb
, wr_waitp
, 0, wq
->qid
, __func__
);
2637 goto err_free_queue
;
2639 pr_debug("%s srq %u eqid %u pdid %u queue va %p pa 0x%llx\n"
2640 " bar2_addr %p rqt addr 0x%x size %d\n",
2641 __func__
, srq
->idx
, wq
->qid
, srq
->pdid
, wq
->queue
,
2642 (u64
)virt_to_phys(wq
->queue
), wq
->bar2_va
,
2643 wq
->rqt_hwaddr
, wq
->rqt_size
);
2647 dma_free_coherent(&rdev
->lldi
.pdev
->dev
,
2648 wq
->memsize
, wq
->queue
,
2649 dma_unmap_addr(wq
, mapping
));
2651 c4iw_rqtpool_free(rdev
, wq
->rqt_hwaddr
, wq
->rqt_size
);
2652 err_free_pending_wrs
:
2654 kfree(wq
->pending_wrs
);
2659 c4iw_put_qpid(rdev
, wq
->qid
, uctx
);
2664 void c4iw_copy_wr_to_srq(struct t4_srq
*srq
, union t4_recv_wr
*wqe
, u8 len16
)
2669 dst
= (u64
*)((u8
*)srq
->queue
+ srq
->wq_pidx
* T4_EQ_ENTRY_SIZE
);
2672 if (dst
>= (u64
*)&srq
->queue
[srq
->size
])
2673 dst
= (u64
*)srq
->queue
;
2675 if (dst
>= (u64
*)&srq
->queue
[srq
->size
])
2676 dst
= (u64
*)srq
->queue
;
2681 int c4iw_create_srq(struct ib_srq
*ib_srq
, struct ib_srq_init_attr
*attrs
,
2682 struct ib_udata
*udata
)
2684 struct ib_pd
*pd
= ib_srq
->pd
;
2685 struct c4iw_dev
*rhp
;
2686 struct c4iw_srq
*srq
= to_c4iw_srq(ib_srq
);
2687 struct c4iw_pd
*php
;
2688 struct c4iw_create_srq_resp uresp
;
2689 struct c4iw_ucontext
*ucontext
;
2690 struct c4iw_mm_entry
*srq_key_mm
, *srq_db_key_mm
;
2695 if (attrs
->srq_type
!= IB_SRQT_BASIC
)
2698 pr_debug("%s ib_pd %p\n", __func__
, pd
);
2700 php
= to_c4iw_pd(pd
);
2703 if (!rhp
->rdev
.lldi
.vr
->srq
.size
)
2705 if (attrs
->attr
.max_wr
> rhp
->rdev
.hw_queue
.t4_max_rq_size
)
2707 if (attrs
->attr
.max_sge
> T4_MAX_RECV_SGE
)
2711 * SRQ RQT and RQ must be a power of 2 and at least 16 deep.
2713 rqsize
= attrs
->attr
.max_wr
+ 1;
2714 rqsize
= roundup_pow_of_two(max_t(u16
, rqsize
, 16));
2716 ucontext
= rdma_udata_to_drv_context(udata
, struct c4iw_ucontext
,
2719 srq
->wr_waitp
= c4iw_alloc_wr_wait(GFP_KERNEL
);
2723 srq
->idx
= c4iw_alloc_srq_idx(&rhp
->rdev
);
2726 goto err_free_wr_wait
;
2729 wr_len
= sizeof(struct fw_ri_res_wr
) + sizeof(struct fw_ri_res
);
2730 srq
->destroy_skb
= alloc_skb(wr_len
, GFP_KERNEL
);
2731 if (!srq
->destroy_skb
) {
2733 goto err_free_srq_idx
;
2737 srq
->pdid
= php
->pdid
;
2739 srq
->wq
.size
= rqsize
;
2741 (rqsize
+ rhp
->rdev
.hw_queue
.t4_eq_status_entries
) *
2742 sizeof(*srq
->wq
.queue
);
2744 srq
->wq
.memsize
= roundup(srq
->wq
.memsize
, PAGE_SIZE
);
2746 ret
= alloc_srq_queue(srq
, ucontext
? &ucontext
->uctx
:
2747 &rhp
->rdev
.uctx
, srq
->wr_waitp
);
2750 attrs
->attr
.max_wr
= rqsize
- 1;
2752 if (CHELSIO_CHIP_VERSION(rhp
->rdev
.lldi
.adapter_type
) > CHELSIO_T6
)
2753 srq
->flags
= T4_SRQ_LIMIT_SUPPORT
;
2756 srq_key_mm
= kmalloc(sizeof(*srq_key_mm
), GFP_KERNEL
);
2759 goto err_free_queue
;
2761 srq_db_key_mm
= kmalloc(sizeof(*srq_db_key_mm
), GFP_KERNEL
);
2762 if (!srq_db_key_mm
) {
2764 goto err_free_srq_key_mm
;
2766 memset(&uresp
, 0, sizeof(uresp
));
2767 uresp
.flags
= srq
->flags
;
2768 uresp
.qid_mask
= rhp
->rdev
.qpmask
;
2769 uresp
.srqid
= srq
->wq
.qid
;
2770 uresp
.srq_size
= srq
->wq
.size
;
2771 uresp
.srq_memsize
= srq
->wq
.memsize
;
2772 uresp
.rqt_abs_idx
= srq
->wq
.rqt_abs_idx
;
2773 spin_lock(&ucontext
->mmap_lock
);
2774 uresp
.srq_key
= ucontext
->key
;
2775 ucontext
->key
+= PAGE_SIZE
;
2776 uresp
.srq_db_gts_key
= ucontext
->key
;
2777 ucontext
->key
+= PAGE_SIZE
;
2778 spin_unlock(&ucontext
->mmap_lock
);
2779 ret
= ib_copy_to_udata(udata
, &uresp
, sizeof(uresp
));
2781 goto err_free_srq_db_key_mm
;
2782 srq_key_mm
->key
= uresp
.srq_key
;
2783 srq_key_mm
->addr
= 0;
2784 srq_key_mm
->len
= PAGE_ALIGN(srq
->wq
.memsize
);
2785 srq_key_mm
->vaddr
= srq
->wq
.queue
;
2786 srq_key_mm
->dma_addr
= srq
->wq
.dma_addr
;
2787 insert_flag_to_mmap(&rhp
->rdev
, srq_key_mm
, srq_key_mm
->addr
);
2788 insert_mmap(ucontext
, srq_key_mm
);
2789 srq_db_key_mm
->key
= uresp
.srq_db_gts_key
;
2790 srq_db_key_mm
->addr
= (u64
)(unsigned long)srq
->wq
.bar2_pa
;
2791 srq_db_key_mm
->len
= PAGE_SIZE
;
2792 srq_db_key_mm
->vaddr
= NULL
;
2793 srq_db_key_mm
->dma_addr
= 0;
2794 insert_flag_to_mmap(&rhp
->rdev
, srq_db_key_mm
,
2795 srq_db_key_mm
->addr
);
2796 insert_mmap(ucontext
, srq_db_key_mm
);
2799 pr_debug("%s srq qid %u idx %u size %u memsize %lu num_entries %u\n",
2800 __func__
, srq
->wq
.qid
, srq
->idx
, srq
->wq
.size
,
2801 (unsigned long)srq
->wq
.memsize
, attrs
->attr
.max_wr
);
2803 spin_lock_init(&srq
->lock
);
2806 err_free_srq_db_key_mm
:
2807 kfree(srq_db_key_mm
);
2808 err_free_srq_key_mm
:
2811 free_srq_queue(srq
, ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
,
2814 kfree_skb(srq
->destroy_skb
);
2816 c4iw_free_srq_idx(&rhp
->rdev
, srq
->idx
);
2818 c4iw_put_wr_wait(srq
->wr_waitp
);
2822 int c4iw_destroy_srq(struct ib_srq
*ibsrq
, struct ib_udata
*udata
)
2824 struct c4iw_dev
*rhp
;
2825 struct c4iw_srq
*srq
;
2826 struct c4iw_ucontext
*ucontext
;
2828 srq
= to_c4iw_srq(ibsrq
);
2831 pr_debug("%s id %d\n", __func__
, srq
->wq
.qid
);
2832 ucontext
= rdma_udata_to_drv_context(udata
, struct c4iw_ucontext
,
2834 free_srq_queue(srq
, ucontext
? &ucontext
->uctx
: &rhp
->rdev
.uctx
,
2836 c4iw_free_srq_idx(&rhp
->rdev
, srq
->idx
);
2837 c4iw_put_wr_wait(srq
->wr_waitp
);