2 * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <asm/delay.h>
34 #include <linux/mutex.h>
35 #include <linux/netdevice.h>
36 #include <linux/sched.h>
37 #include <linux/spinlock.h>
38 #include <linux/pci.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/slab.h>
41 #include <net/net_namespace.h>
43 #include "cxio_resource.h"
45 #include "cxgb3_offload.h"
48 static LIST_HEAD(rdev_list
);
49 static cxio_hal_ev_callback_func_t cxio_ev_cb
= NULL
;
51 static struct cxio_rdev
*cxio_hal_find_rdev_by_name(char *dev_name
)
53 struct cxio_rdev
*rdev
;
55 list_for_each_entry(rdev
, &rdev_list
, entry
)
56 if (!strcmp(rdev
->dev_name
, dev_name
))
61 static struct cxio_rdev
*cxio_hal_find_rdev_by_t3cdev(struct t3cdev
*tdev
)
63 struct cxio_rdev
*rdev
;
65 list_for_each_entry(rdev
, &rdev_list
, entry
)
66 if (rdev
->t3cdev_p
== tdev
)
71 int cxio_hal_cq_op(struct cxio_rdev
*rdev_p
, struct t3_cq
*cq
,
72 enum t3_cq_opcode op
, u32 credit
)
78 struct rdma_cq_op setup
;
80 setup
.credits
= (op
== CQ_CREDIT_UPDATE
) ? credit
: 0;
82 ret
= rdev_p
->t3cdev_p
->ctl(rdev_p
->t3cdev_p
, RDMA_CQ_OP
, &setup
);
84 if ((ret
< 0) || (op
== CQ_CREDIT_UPDATE
))
88 * If the rearm returned an index other than our current index,
89 * then there might be CQE's in flight (being DMA'd). We must wait
90 * here for them to complete or the consumer can miss a notification.
92 if (Q_PTR2IDX((cq
->rptr
), cq
->size_log2
) != ret
) {
98 * Keep the generation correct by bumping rptr until it
99 * matches the index returned by the rearm - 1.
101 while (Q_PTR2IDX((rptr
+1), cq
->size_log2
) != ret
)
105 * Now rptr is the index for the (last) cqe that was
106 * in-flight at the time the HW rearmed the CQ. We
107 * spin until that CQE is valid.
109 cqe
= cq
->queue
+ Q_PTR2IDX(rptr
, cq
->size_log2
);
110 while (!CQ_VLD_ENTRY(rptr
, cq
->size_log2
, cqe
)) {
113 printk(KERN_ERR
"%s: stalled rnic\n",
125 static int cxio_hal_clear_cq_ctx(struct cxio_rdev
*rdev_p
, u32 cqid
)
127 struct rdma_cq_setup setup
;
129 setup
.base_addr
= 0; /* NULL address */
130 setup
.size
= 0; /* disaable the CQ */
132 setup
.credit_thres
= 0;
134 return (rdev_p
->t3cdev_p
->ctl(rdev_p
->t3cdev_p
, RDMA_CQ_SETUP
, &setup
));
137 static int cxio_hal_clear_qp_ctx(struct cxio_rdev
*rdev_p
, u32 qpid
)
140 struct t3_modify_qp_wr
*wqe
;
141 struct sk_buff
*skb
= alloc_skb(sizeof(*wqe
), GFP_KERNEL
);
143 PDBG("%s alloc_skb failed\n", __func__
);
146 wqe
= (struct t3_modify_qp_wr
*) skb_put(skb
, sizeof(*wqe
));
147 memset(wqe
, 0, sizeof(*wqe
));
148 build_fw_riwrh((struct fw_riwrh
*) wqe
, T3_WR_QP_MOD
,
149 T3_COMPLETION_FLAG
| T3_NOTIFY_FLAG
, 0, qpid
, 7,
151 wqe
->flags
= cpu_to_be32(MODQP_WRITE_EC
);
152 sge_cmd
= qpid
<< 8 | 3;
153 wqe
->sge_cmd
= cpu_to_be64(sge_cmd
);
154 skb
->priority
= CPL_PRIORITY_CONTROL
;
155 return iwch_cxgb3_ofld_send(rdev_p
->t3cdev_p
, skb
);
158 int cxio_create_cq(struct cxio_rdev
*rdev_p
, struct t3_cq
*cq
, int kernel
)
160 struct rdma_cq_setup setup
;
161 int size
= (1UL << (cq
->size_log2
)) * sizeof(struct t3_cqe
);
163 size
+= 1; /* one extra page for storing cq-in-err state */
164 cq
->cqid
= cxio_hal_get_cqid(rdev_p
->rscp
);
168 cq
->sw_queue
= kzalloc(size
, GFP_KERNEL
);
172 cq
->queue
= dma_alloc_coherent(&(rdev_p
->rnic_info
.pdev
->dev
), size
,
173 &(cq
->dma_addr
), GFP_KERNEL
);
178 dma_unmap_addr_set(cq
, mapping
, cq
->dma_addr
);
179 memset(cq
->queue
, 0, size
);
181 setup
.base_addr
= (u64
) (cq
->dma_addr
);
182 setup
.size
= 1UL << cq
->size_log2
;
183 setup
.credits
= 65535;
184 setup
.credit_thres
= 1;
185 if (rdev_p
->t3cdev_p
->type
!= T3A
)
189 return (rdev_p
->t3cdev_p
->ctl(rdev_p
->t3cdev_p
, RDMA_CQ_SETUP
, &setup
));
193 int cxio_resize_cq(struct cxio_rdev
*rdev_p
, struct t3_cq
*cq
)
195 struct rdma_cq_setup setup
;
197 setup
.base_addr
= (u64
) (cq
->dma_addr
);
198 setup
.size
= 1UL << cq
->size_log2
;
199 setup
.credits
= setup
.size
;
200 setup
.credit_thres
= setup
.size
; /* TBD: overflow recovery */
202 return (rdev_p
->t3cdev_p
->ctl(rdev_p
->t3cdev_p
, RDMA_CQ_SETUP
, &setup
));
206 static u32
get_qpid(struct cxio_rdev
*rdev_p
, struct cxio_ucontext
*uctx
)
208 struct cxio_qpid_list
*entry
;
212 mutex_lock(&uctx
->lock
);
213 if (!list_empty(&uctx
->qpids
)) {
214 entry
= list_entry(uctx
->qpids
.next
, struct cxio_qpid_list
,
216 list_del(&entry
->entry
);
220 qpid
= cxio_hal_get_qpid(rdev_p
->rscp
);
223 for (i
= qpid
+1; i
& rdev_p
->qpmask
; i
++) {
224 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
228 list_add_tail(&entry
->entry
, &uctx
->qpids
);
232 mutex_unlock(&uctx
->lock
);
233 PDBG("%s qpid 0x%x\n", __func__
, qpid
);
237 static void put_qpid(struct cxio_rdev
*rdev_p
, u32 qpid
,
238 struct cxio_ucontext
*uctx
)
240 struct cxio_qpid_list
*entry
;
242 entry
= kmalloc(sizeof *entry
, GFP_KERNEL
);
245 PDBG("%s qpid 0x%x\n", __func__
, qpid
);
247 mutex_lock(&uctx
->lock
);
248 list_add_tail(&entry
->entry
, &uctx
->qpids
);
249 mutex_unlock(&uctx
->lock
);
252 void cxio_release_ucontext(struct cxio_rdev
*rdev_p
, struct cxio_ucontext
*uctx
)
254 struct list_head
*pos
, *nxt
;
255 struct cxio_qpid_list
*entry
;
257 mutex_lock(&uctx
->lock
);
258 list_for_each_safe(pos
, nxt
, &uctx
->qpids
) {
259 entry
= list_entry(pos
, struct cxio_qpid_list
, entry
);
260 list_del_init(&entry
->entry
);
261 if (!(entry
->qpid
& rdev_p
->qpmask
))
262 cxio_hal_put_qpid(rdev_p
->rscp
, entry
->qpid
);
265 mutex_unlock(&uctx
->lock
);
268 void cxio_init_ucontext(struct cxio_rdev
*rdev_p
, struct cxio_ucontext
*uctx
)
270 INIT_LIST_HEAD(&uctx
->qpids
);
271 mutex_init(&uctx
->lock
);
274 int cxio_create_qp(struct cxio_rdev
*rdev_p
, u32 kernel_domain
,
275 struct t3_wq
*wq
, struct cxio_ucontext
*uctx
)
277 int depth
= 1UL << wq
->size_log2
;
278 int rqsize
= 1UL << wq
->rq_size_log2
;
280 wq
->qpid
= get_qpid(rdev_p
, uctx
);
284 wq
->rq
= kzalloc(depth
* sizeof(struct t3_swrq
), GFP_KERNEL
);
288 wq
->rq_addr
= cxio_hal_rqtpool_alloc(rdev_p
, rqsize
);
292 wq
->sq
= kzalloc(depth
* sizeof(struct t3_swsq
), GFP_KERNEL
);
296 wq
->queue
= dma_alloc_coherent(&(rdev_p
->rnic_info
.pdev
->dev
),
297 depth
* sizeof(union t3_wr
),
298 &(wq
->dma_addr
), GFP_KERNEL
);
302 memset(wq
->queue
, 0, depth
* sizeof(union t3_wr
));
303 dma_unmap_addr_set(wq
, mapping
, wq
->dma_addr
);
304 wq
->doorbell
= (void __iomem
*)rdev_p
->rnic_info
.kdb_addr
;
306 wq
->udb
= (u64
)rdev_p
->rnic_info
.udbell_physbase
+
307 (wq
->qpid
<< rdev_p
->qpshift
);
309 PDBG("%s qpid 0x%x doorbell 0x%p udb 0x%llx\n", __func__
,
310 wq
->qpid
, wq
->doorbell
, (unsigned long long) wq
->udb
);
315 cxio_hal_rqtpool_free(rdev_p
, wq
->rq_addr
, rqsize
);
319 put_qpid(rdev_p
, wq
->qpid
, uctx
);
323 int cxio_destroy_cq(struct cxio_rdev
*rdev_p
, struct t3_cq
*cq
)
326 err
= cxio_hal_clear_cq_ctx(rdev_p
, cq
->cqid
);
328 dma_free_coherent(&(rdev_p
->rnic_info
.pdev
->dev
),
329 (1UL << (cq
->size_log2
))
330 * sizeof(struct t3_cqe
), cq
->queue
,
331 dma_unmap_addr(cq
, mapping
));
332 cxio_hal_put_cqid(rdev_p
->rscp
, cq
->cqid
);
336 int cxio_destroy_qp(struct cxio_rdev
*rdev_p
, struct t3_wq
*wq
,
337 struct cxio_ucontext
*uctx
)
339 dma_free_coherent(&(rdev_p
->rnic_info
.pdev
->dev
),
340 (1UL << (wq
->size_log2
))
341 * sizeof(union t3_wr
), wq
->queue
,
342 dma_unmap_addr(wq
, mapping
));
344 cxio_hal_rqtpool_free(rdev_p
, wq
->rq_addr
, (1UL << wq
->rq_size_log2
));
346 put_qpid(rdev_p
, wq
->qpid
, uctx
);
350 static void insert_recv_cqe(struct t3_wq
*wq
, struct t3_cq
*cq
)
354 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__
,
355 wq
, cq
, cq
->sw_rptr
, cq
->sw_wptr
);
356 memset(&cqe
, 0, sizeof(cqe
));
357 cqe
.header
= cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH
) |
358 V_CQE_OPCODE(T3_SEND
) |
361 V_CQE_QPID(wq
->qpid
) |
362 V_CQE_GENBIT(Q_GENBIT(cq
->sw_wptr
,
364 *(cq
->sw_queue
+ Q_PTR2IDX(cq
->sw_wptr
, cq
->size_log2
)) = cqe
;
368 int cxio_flush_rq(struct t3_wq
*wq
, struct t3_cq
*cq
, int count
)
373 PDBG("%s wq %p cq %p\n", __func__
, wq
, cq
);
376 PDBG("%s rq_rptr %u rq_wptr %u skip count %u\n", __func__
,
377 wq
->rq_rptr
, wq
->rq_wptr
, count
);
378 ptr
= wq
->rq_rptr
+ count
;
379 while (ptr
++ != wq
->rq_wptr
) {
380 insert_recv_cqe(wq
, cq
);
386 static void insert_sq_cqe(struct t3_wq
*wq
, struct t3_cq
*cq
,
391 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__
,
392 wq
, cq
, cq
->sw_rptr
, cq
->sw_wptr
);
393 memset(&cqe
, 0, sizeof(cqe
));
394 cqe
.header
= cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH
) |
395 V_CQE_OPCODE(sqp
->opcode
) |
398 V_CQE_QPID(wq
->qpid
) |
399 V_CQE_GENBIT(Q_GENBIT(cq
->sw_wptr
,
401 cqe
.u
.scqe
.wrid_hi
= sqp
->sq_wptr
;
403 *(cq
->sw_queue
+ Q_PTR2IDX(cq
->sw_wptr
, cq
->size_log2
)) = cqe
;
407 int cxio_flush_sq(struct t3_wq
*wq
, struct t3_cq
*cq
, int count
)
411 struct t3_swsq
*sqp
= wq
->sq
+ Q_PTR2IDX(wq
->sq_rptr
, wq
->sq_size_log2
);
413 ptr
= wq
->sq_rptr
+ count
;
414 sqp
= wq
->sq
+ Q_PTR2IDX(ptr
, wq
->sq_size_log2
);
415 while (ptr
!= wq
->sq_wptr
) {
417 insert_sq_cqe(wq
, cq
, sqp
);
419 sqp
= wq
->sq
+ Q_PTR2IDX(ptr
, wq
->sq_size_log2
);
426 * Move all CQEs from the HWCQ into the SWCQ.
428 void cxio_flush_hw_cq(struct t3_cq
*cq
)
430 struct t3_cqe
*cqe
, *swcqe
;
432 PDBG("%s cq %p cqid 0x%x\n", __func__
, cq
, cq
->cqid
);
433 cqe
= cxio_next_hw_cqe(cq
);
435 PDBG("%s flushing hwcq rptr 0x%x to swcq wptr 0x%x\n",
436 __func__
, cq
->rptr
, cq
->sw_wptr
);
437 swcqe
= cq
->sw_queue
+ Q_PTR2IDX(cq
->sw_wptr
, cq
->size_log2
);
439 swcqe
->header
|= cpu_to_be32(V_CQE_SWCQE(1));
442 cqe
= cxio_next_hw_cqe(cq
);
446 static int cqe_completes_wr(struct t3_cqe
*cqe
, struct t3_wq
*wq
)
448 if (CQE_OPCODE(*cqe
) == T3_TERMINATE
)
451 if ((CQE_OPCODE(*cqe
) == T3_RDMA_WRITE
) && RQ_TYPE(*cqe
))
454 if ((CQE_OPCODE(*cqe
) == T3_READ_RESP
) && SQ_TYPE(*cqe
))
457 if (CQE_SEND_OPCODE(*cqe
) && RQ_TYPE(*cqe
) &&
458 Q_EMPTY(wq
->rq_rptr
, wq
->rq_wptr
))
464 void cxio_count_scqes(struct t3_cq
*cq
, struct t3_wq
*wq
, int *count
)
471 while (!Q_EMPTY(ptr
, cq
->sw_wptr
)) {
472 cqe
= cq
->sw_queue
+ (Q_PTR2IDX(ptr
, cq
->size_log2
));
473 if ((SQ_TYPE(*cqe
) ||
474 ((CQE_OPCODE(*cqe
) == T3_READ_RESP
) && wq
->oldest_read
)) &&
475 (CQE_QPID(*cqe
) == wq
->qpid
))
479 PDBG("%s cq %p count %d\n", __func__
, cq
, *count
);
482 void cxio_count_rcqes(struct t3_cq
*cq
, struct t3_wq
*wq
, int *count
)
488 PDBG("%s count zero %d\n", __func__
, *count
);
490 while (!Q_EMPTY(ptr
, cq
->sw_wptr
)) {
491 cqe
= cq
->sw_queue
+ (Q_PTR2IDX(ptr
, cq
->size_log2
));
492 if (RQ_TYPE(*cqe
) && (CQE_OPCODE(*cqe
) != T3_READ_RESP
) &&
493 (CQE_QPID(*cqe
) == wq
->qpid
) && cqe_completes_wr(cqe
, wq
))
497 PDBG("%s cq %p count %d\n", __func__
, cq
, *count
);
500 static int cxio_hal_init_ctrl_cq(struct cxio_rdev
*rdev_p
)
502 struct rdma_cq_setup setup
;
504 setup
.base_addr
= 0; /* NULL address */
505 setup
.size
= 1; /* enable the CQ */
508 /* force SGE to redirect to RspQ and interrupt */
509 setup
.credit_thres
= 0;
511 return (rdev_p
->t3cdev_p
->ctl(rdev_p
->t3cdev_p
, RDMA_CQ_SETUP
, &setup
));
514 static int cxio_hal_init_ctrl_qp(struct cxio_rdev
*rdev_p
)
517 u64 sge_cmd
, ctx0
, ctx1
;
519 struct t3_modify_qp_wr
*wqe
;
522 skb
= alloc_skb(sizeof(*wqe
), GFP_KERNEL
);
524 PDBG("%s alloc_skb failed\n", __func__
);
527 err
= cxio_hal_init_ctrl_cq(rdev_p
);
529 PDBG("%s err %d initializing ctrl_cq\n", __func__
, err
);
532 rdev_p
->ctrl_qp
.workq
= dma_alloc_coherent(
533 &(rdev_p
->rnic_info
.pdev
->dev
),
534 (1 << T3_CTRL_QP_SIZE_LOG2
) *
536 &(rdev_p
->ctrl_qp
.dma_addr
),
538 if (!rdev_p
->ctrl_qp
.workq
) {
539 PDBG("%s dma_alloc_coherent failed\n", __func__
);
543 dma_unmap_addr_set(&rdev_p
->ctrl_qp
, mapping
,
544 rdev_p
->ctrl_qp
.dma_addr
);
545 rdev_p
->ctrl_qp
.doorbell
= (void __iomem
*)rdev_p
->rnic_info
.kdb_addr
;
546 memset(rdev_p
->ctrl_qp
.workq
, 0,
547 (1 << T3_CTRL_QP_SIZE_LOG2
) * sizeof(union t3_wr
));
549 mutex_init(&rdev_p
->ctrl_qp
.lock
);
550 init_waitqueue_head(&rdev_p
->ctrl_qp
.waitq
);
552 /* update HW Ctrl QP context */
553 base_addr
= rdev_p
->ctrl_qp
.dma_addr
;
555 ctx0
= (V_EC_SIZE((1 << T3_CTRL_QP_SIZE_LOG2
)) |
556 V_EC_BASE_LO((u32
) base_addr
& 0xffff));
558 ctx0
|= V_EC_CREDITS(FW_WR_NUM
);
560 ctx1
= (u32
) base_addr
;
562 ctx1
|= ((u64
) (V_EC_BASE_HI((u32
) base_addr
& 0xf) | V_EC_RESPQ(0) |
563 V_EC_TYPE(0) | V_EC_GEN(1) |
564 V_EC_UP_TOKEN(T3_CTL_QP_TID
) | F_EC_VALID
)) << 32;
565 wqe
= (struct t3_modify_qp_wr
*) skb_put(skb
, sizeof(*wqe
));
566 memset(wqe
, 0, sizeof(*wqe
));
567 build_fw_riwrh((struct fw_riwrh
*) wqe
, T3_WR_QP_MOD
, 0, 0,
568 T3_CTL_QP_TID
, 7, T3_SOPEOP
);
569 wqe
->flags
= cpu_to_be32(MODQP_WRITE_EC
);
570 sge_cmd
= (3ULL << 56) | FW_RI_SGEEC_START
<< 8 | 3;
571 wqe
->sge_cmd
= cpu_to_be64(sge_cmd
);
572 wqe
->ctx1
= cpu_to_be64(ctx1
);
573 wqe
->ctx0
= cpu_to_be64(ctx0
);
574 PDBG("CtrlQP dma_addr 0x%llx workq %p size %d\n",
575 (unsigned long long) rdev_p
->ctrl_qp
.dma_addr
,
576 rdev_p
->ctrl_qp
.workq
, 1 << T3_CTRL_QP_SIZE_LOG2
);
577 skb
->priority
= CPL_PRIORITY_CONTROL
;
578 return iwch_cxgb3_ofld_send(rdev_p
->t3cdev_p
, skb
);
584 static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev
*rdev_p
)
586 dma_free_coherent(&(rdev_p
->rnic_info
.pdev
->dev
),
587 (1UL << T3_CTRL_QP_SIZE_LOG2
)
588 * sizeof(union t3_wr
), rdev_p
->ctrl_qp
.workq
,
589 dma_unmap_addr(&rdev_p
->ctrl_qp
, mapping
));
590 return cxio_hal_clear_qp_ctx(rdev_p
, T3_CTRL_QP_ID
);
593 /* write len bytes of data into addr (32B aligned address)
594 * If data is NULL, clear len byte of memory to zero.
595 * caller acquires the ctrl_qp lock before the call
597 static int cxio_hal_ctrl_qp_write_mem(struct cxio_rdev
*rdev_p
, u32 addr
,
600 u32 i
, nr_wqe
, copy_len
;
602 u8 wr_len
, utx_len
; /* length in 8 byte flit */
603 enum t3_wr_flags flag
;
607 nr_wqe
= len
% 96 ? len
/ 96 + 1 : len
/ 96; /* 96B max per WQE */
608 PDBG("%s wptr 0x%x rptr 0x%x len %d, nr_wqe %d data %p addr 0x%0x\n",
609 __func__
, rdev_p
->ctrl_qp
.wptr
, rdev_p
->ctrl_qp
.rptr
, len
,
611 utx_len
= 3; /* in 32B unit */
612 for (i
= 0; i
< nr_wqe
; i
++) {
613 if (Q_FULL(rdev_p
->ctrl_qp
.rptr
, rdev_p
->ctrl_qp
.wptr
,
614 T3_CTRL_QP_SIZE_LOG2
)) {
615 PDBG("%s ctrl_qp full wtpr 0x%0x rptr 0x%0x, "
616 "wait for more space i %d\n", __func__
,
617 rdev_p
->ctrl_qp
.wptr
, rdev_p
->ctrl_qp
.rptr
, i
);
618 if (wait_event_interruptible(rdev_p
->ctrl_qp
.waitq
,
619 !Q_FULL(rdev_p
->ctrl_qp
.rptr
,
620 rdev_p
->ctrl_qp
.wptr
,
621 T3_CTRL_QP_SIZE_LOG2
))) {
622 PDBG("%s ctrl_qp workq interrupted\n",
626 PDBG("%s ctrl_qp wakeup, continue posting work request "
627 "i %d\n", __func__
, i
);
629 wqe
= (__be64
*)(rdev_p
->ctrl_qp
.workq
+ (rdev_p
->ctrl_qp
.wptr
%
630 (1 << T3_CTRL_QP_SIZE_LOG2
)));
632 if (i
== (nr_wqe
- 1)) {
634 flag
= T3_COMPLETION_FLAG
;
636 utx_len
= len
/ 32 + 1;
642 * Force a CQE to return the credit to the workq in case
643 * we posted more than half the max QP size of WRs
646 (i
% (((1 << T3_CTRL_QP_SIZE_LOG2
)) >> 1) == 0)) {
647 flag
= T3_COMPLETION_FLAG
;
648 PDBG("%s force completion at i %d\n", __func__
, i
);
651 /* build the utx mem command */
652 wqe
+= (sizeof(struct t3_bypass_wr
) >> 3);
653 utx_cmd
= (T3_UTX_MEM_WRITE
<< 28) | (addr
+ i
* 3);
655 utx_cmd
|= (utx_len
<< 28) | ((utx_len
<< 2) + 1);
656 *wqe
= cpu_to_be64(utx_cmd
);
658 copy_data
= (u8
*) data
+ i
* 96;
659 copy_len
= len
> 96 ? 96 : len
;
661 /* clear memory content if data is NULL */
663 memcpy(wqe
, copy_data
, copy_len
);
665 memset(wqe
, 0, copy_len
);
667 memset(((u8
*) wqe
) + copy_len
, 0,
668 32 - (copy_len
% 32));
669 wr_len
= ((sizeof(struct t3_bypass_wr
)) >> 3) + 1 +
671 wqe
= (__be64
*)(rdev_p
->ctrl_qp
.workq
+ (rdev_p
->ctrl_qp
.wptr
%
672 (1 << T3_CTRL_QP_SIZE_LOG2
)));
674 /* wptr in the WRID[31:0] */
675 ((union t3_wrid
*)(wqe
+1))->id0
.low
= rdev_p
->ctrl_qp
.wptr
;
678 * This must be the last write with a memory barrier
681 build_fw_riwrh((struct fw_riwrh
*) wqe
, T3_WR_BP
, flag
,
682 Q_GENBIT(rdev_p
->ctrl_qp
.wptr
,
683 T3_CTRL_QP_SIZE_LOG2
), T3_CTRL_QP_ID
,
685 if (flag
== T3_COMPLETION_FLAG
)
686 ring_doorbell(rdev_p
->ctrl_qp
.doorbell
, T3_CTRL_QP_ID
);
688 rdev_p
->ctrl_qp
.wptr
++;
693 /* IN: stag key, pdid, perm, zbva, to, len, page_size, pbl_size and pbl_addr
695 * TBD: shared memory region support
697 static int __cxio_tpt_op(struct cxio_rdev
*rdev_p
, u32 reset_tpt_entry
,
698 u32
*stag
, u8 stag_state
, u32 pdid
,
699 enum tpt_mem_type type
, enum tpt_mem_perm perm
,
700 u32 zbva
, u64 to
, u32 len
, u8 page_size
,
701 u32 pbl_size
, u32 pbl_addr
)
704 struct tpt_entry tpt
;
708 if (cxio_fatal_error(rdev_p
))
711 stag_state
= stag_state
> 0;
712 stag_idx
= (*stag
) >> 8;
714 if ((!reset_tpt_entry
) && !(*stag
!= T3_STAG_UNSET
)) {
715 stag_idx
= cxio_hal_get_stag(rdev_p
->rscp
);
718 *stag
= (stag_idx
<< 8) | ((*stag
) & 0xFF);
720 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
721 __func__
, stag_state
, type
, pdid
, stag_idx
);
723 mutex_lock(&rdev_p
->ctrl_qp
.lock
);
725 /* write TPT entry */
727 memset(&tpt
, 0, sizeof(tpt
));
729 tpt
.valid_stag_pdid
= cpu_to_be32(F_TPT_VALID
|
730 V_TPT_STAG_KEY((*stag
) & M_TPT_STAG_KEY
) |
731 V_TPT_STAG_STATE(stag_state
) |
732 V_TPT_STAG_TYPE(type
) | V_TPT_PDID(pdid
));
733 BUG_ON(page_size
>= 28);
734 tpt
.flags_pagesize_qpid
= cpu_to_be32(V_TPT_PERM(perm
) |
735 ((perm
& TPT_MW_BIND
) ? F_TPT_MW_BIND_ENABLE
: 0) |
736 V_TPT_ADDR_TYPE((zbva
? TPT_ZBTO
: TPT_VATO
)) |
737 V_TPT_PAGE_SIZE(page_size
));
738 tpt
.rsvd_pbl_addr
= reset_tpt_entry
? 0 :
739 cpu_to_be32(V_TPT_PBL_ADDR(PBL_OFF(rdev_p
, pbl_addr
)>>3));
740 tpt
.len
= cpu_to_be32(len
);
741 tpt
.va_hi
= cpu_to_be32((u32
) (to
>> 32));
742 tpt
.va_low_or_fbo
= cpu_to_be32((u32
) (to
& 0xFFFFFFFFULL
));
743 tpt
.rsvd_bind_cnt_or_pstag
= 0;
744 tpt
.rsvd_pbl_size
= reset_tpt_entry
? 0 :
745 cpu_to_be32(V_TPT_PBL_SIZE(pbl_size
>> 2));
747 err
= cxio_hal_ctrl_qp_write_mem(rdev_p
,
749 (rdev_p
->rnic_info
.tpt_base
>> 5),
752 /* release the stag index to free pool */
754 cxio_hal_put_stag(rdev_p
->rscp
, stag_idx
);
756 wptr
= rdev_p
->ctrl_qp
.wptr
;
757 mutex_unlock(&rdev_p
->ctrl_qp
.lock
);
759 if (wait_event_interruptible(rdev_p
->ctrl_qp
.waitq
,
760 SEQ32_GE(rdev_p
->ctrl_qp
.rptr
,
766 int cxio_write_pbl(struct cxio_rdev
*rdev_p
, __be64
*pbl
,
767 u32 pbl_addr
, u32 pbl_size
)
772 PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
773 __func__
, pbl_addr
, rdev_p
->rnic_info
.pbl_base
,
776 mutex_lock(&rdev_p
->ctrl_qp
.lock
);
777 err
= cxio_hal_ctrl_qp_write_mem(rdev_p
, pbl_addr
>> 5, pbl_size
<< 3,
779 wptr
= rdev_p
->ctrl_qp
.wptr
;
780 mutex_unlock(&rdev_p
->ctrl_qp
.lock
);
784 if (wait_event_interruptible(rdev_p
->ctrl_qp
.waitq
,
785 SEQ32_GE(rdev_p
->ctrl_qp
.rptr
,
792 int cxio_register_phys_mem(struct cxio_rdev
*rdev_p
, u32
*stag
, u32 pdid
,
793 enum tpt_mem_perm perm
, u32 zbva
, u64 to
, u32 len
,
794 u8 page_size
, u32 pbl_size
, u32 pbl_addr
)
796 *stag
= T3_STAG_UNSET
;
797 return __cxio_tpt_op(rdev_p
, 0, stag
, 1, pdid
, TPT_NON_SHARED_MR
, perm
,
798 zbva
, to
, len
, page_size
, pbl_size
, pbl_addr
);
801 int cxio_reregister_phys_mem(struct cxio_rdev
*rdev_p
, u32
*stag
, u32 pdid
,
802 enum tpt_mem_perm perm
, u32 zbva
, u64 to
, u32 len
,
803 u8 page_size
, u32 pbl_size
, u32 pbl_addr
)
805 return __cxio_tpt_op(rdev_p
, 0, stag
, 1, pdid
, TPT_NON_SHARED_MR
, perm
,
806 zbva
, to
, len
, page_size
, pbl_size
, pbl_addr
);
809 int cxio_dereg_mem(struct cxio_rdev
*rdev_p
, u32 stag
, u32 pbl_size
,
812 return __cxio_tpt_op(rdev_p
, 1, &stag
, 0, 0, 0, 0, 0, 0ULL, 0, 0,
816 int cxio_allocate_window(struct cxio_rdev
*rdev_p
, u32
* stag
, u32 pdid
)
818 *stag
= T3_STAG_UNSET
;
819 return __cxio_tpt_op(rdev_p
, 0, stag
, 0, pdid
, TPT_MW
, 0, 0, 0ULL, 0, 0,
823 int cxio_deallocate_window(struct cxio_rdev
*rdev_p
, u32 stag
)
825 return __cxio_tpt_op(rdev_p
, 1, &stag
, 0, 0, 0, 0, 0, 0ULL, 0, 0,
829 int cxio_allocate_stag(struct cxio_rdev
*rdev_p
, u32
*stag
, u32 pdid
, u32 pbl_size
, u32 pbl_addr
)
831 *stag
= T3_STAG_UNSET
;
832 return __cxio_tpt_op(rdev_p
, 0, stag
, 0, pdid
, TPT_NON_SHARED_MR
,
833 0, 0, 0ULL, 0, 0, pbl_size
, pbl_addr
);
836 int cxio_rdma_init(struct cxio_rdev
*rdev_p
, struct t3_rdma_init_attr
*attr
)
838 struct t3_rdma_init_wr
*wqe
;
839 struct sk_buff
*skb
= alloc_skb(sizeof(*wqe
), GFP_ATOMIC
);
842 PDBG("%s rdev_p %p\n", __func__
, rdev_p
);
843 wqe
= (struct t3_rdma_init_wr
*) __skb_put(skb
, sizeof(*wqe
));
844 wqe
->wrh
.op_seop_flags
= cpu_to_be32(V_FW_RIWR_OP(T3_WR_INIT
));
845 wqe
->wrh
.gen_tid_len
= cpu_to_be32(V_FW_RIWR_TID(attr
->tid
) |
846 V_FW_RIWR_LEN(sizeof(*wqe
) >> 3));
848 wqe
->qpid
= cpu_to_be32(attr
->qpid
);
849 wqe
->pdid
= cpu_to_be32(attr
->pdid
);
850 wqe
->scqid
= cpu_to_be32(attr
->scqid
);
851 wqe
->rcqid
= cpu_to_be32(attr
->rcqid
);
852 wqe
->rq_addr
= cpu_to_be32(attr
->rq_addr
- rdev_p
->rnic_info
.rqt_base
);
853 wqe
->rq_size
= cpu_to_be32(attr
->rq_size
);
854 wqe
->mpaattrs
= attr
->mpaattrs
;
855 wqe
->qpcaps
= attr
->qpcaps
;
856 wqe
->ulpdu_size
= cpu_to_be16(attr
->tcp_emss
);
857 wqe
->rqe_count
= cpu_to_be16(attr
->rqe_count
);
858 wqe
->flags_rtr_type
= cpu_to_be16(attr
->flags
|
859 V_RTR_TYPE(attr
->rtr_type
) |
861 wqe
->ord
= cpu_to_be32(attr
->ord
);
862 wqe
->ird
= cpu_to_be32(attr
->ird
);
863 wqe
->qp_dma_addr
= cpu_to_be64(attr
->qp_dma_addr
);
864 wqe
->qp_dma_size
= cpu_to_be32(attr
->qp_dma_size
);
865 wqe
->irs
= cpu_to_be32(attr
->irs
);
866 skb
->priority
= 0; /* 0=>ToeQ; 1=>CtrlQ */
867 return iwch_cxgb3_ofld_send(rdev_p
->t3cdev_p
, skb
);
870 void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb
)
875 void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb
)
880 static int cxio_hal_ev_handler(struct t3cdev
*t3cdev_p
, struct sk_buff
*skb
)
883 struct cxio_rdev
*rdev_p
= NULL
;
884 struct respQ_msg_t
*rsp_msg
= (struct respQ_msg_t
*) skb
->data
;
885 PDBG("%d: %s cq_id 0x%x cq_ptr 0x%x genbit %0x overflow %0x an %0x"
886 " se %0x notify %0x cqbranch %0x creditth %0x\n",
887 cnt
, __func__
, RSPQ_CQID(rsp_msg
), RSPQ_CQPTR(rsp_msg
),
888 RSPQ_GENBIT(rsp_msg
), RSPQ_OVERFLOW(rsp_msg
), RSPQ_AN(rsp_msg
),
889 RSPQ_SE(rsp_msg
), RSPQ_NOTIFY(rsp_msg
), RSPQ_CQBRANCH(rsp_msg
),
890 RSPQ_CREDIT_THRESH(rsp_msg
));
891 PDBG("CQE: QPID 0x%0x genbit %0x type 0x%0x status 0x%0x opcode %d "
892 "len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
893 CQE_QPID(rsp_msg
->cqe
), CQE_GENBIT(rsp_msg
->cqe
),
894 CQE_TYPE(rsp_msg
->cqe
), CQE_STATUS(rsp_msg
->cqe
),
895 CQE_OPCODE(rsp_msg
->cqe
), CQE_LEN(rsp_msg
->cqe
),
896 CQE_WRID_HI(rsp_msg
->cqe
), CQE_WRID_LOW(rsp_msg
->cqe
));
897 rdev_p
= (struct cxio_rdev
*)t3cdev_p
->ulp
;
899 PDBG("%s called by t3cdev %p with null ulp\n", __func__
,
903 if (CQE_QPID(rsp_msg
->cqe
) == T3_CTRL_QP_ID
) {
904 rdev_p
->ctrl_qp
.rptr
= CQE_WRID_LOW(rsp_msg
->cqe
) + 1;
905 wake_up_interruptible(&rdev_p
->ctrl_qp
.waitq
);
906 dev_kfree_skb_irq(skb
);
907 } else if (CQE_QPID(rsp_msg
->cqe
) == 0xfff8)
908 dev_kfree_skb_irq(skb
);
910 (*cxio_ev_cb
) (rdev_p
, skb
);
912 dev_kfree_skb_irq(skb
);
917 /* Caller takes care of locking if needed */
918 int cxio_rdev_open(struct cxio_rdev
*rdev_p
)
920 struct net_device
*netdev_p
= NULL
;
922 if (strlen(rdev_p
->dev_name
)) {
923 if (cxio_hal_find_rdev_by_name(rdev_p
->dev_name
)) {
926 netdev_p
= dev_get_by_name(&init_net
, rdev_p
->dev_name
);
931 } else if (rdev_p
->t3cdev_p
) {
932 if (cxio_hal_find_rdev_by_t3cdev(rdev_p
->t3cdev_p
)) {
935 netdev_p
= rdev_p
->t3cdev_p
->lldev
;
936 strncpy(rdev_p
->dev_name
, rdev_p
->t3cdev_p
->name
,
937 T3_MAX_DEV_NAME_LEN
);
939 PDBG("%s t3cdev_p or dev_name must be set\n", __func__
);
943 list_add_tail(&rdev_p
->entry
, &rdev_list
);
945 PDBG("%s opening rnic dev %s\n", __func__
, rdev_p
->dev_name
);
946 memset(&rdev_p
->ctrl_qp
, 0, sizeof(rdev_p
->ctrl_qp
));
947 if (!rdev_p
->t3cdev_p
)
948 rdev_p
->t3cdev_p
= dev2t3cdev(netdev_p
);
949 rdev_p
->t3cdev_p
->ulp
= (void *) rdev_p
;
951 err
= rdev_p
->t3cdev_p
->ctl(rdev_p
->t3cdev_p
, GET_EMBEDDED_INFO
,
954 printk(KERN_ERR
"%s t3cdev_p(%p)->ctl returned error %d.\n",
955 __func__
, rdev_p
->t3cdev_p
, err
);
958 if (G_FW_VERSION_MAJOR(rdev_p
->fw_info
.fw_vers
) != CXIO_FW_MAJ
) {
959 printk(KERN_ERR MOD
"fatal firmware version mismatch: "
960 "need version %u but adapter has version %u\n",
962 G_FW_VERSION_MAJOR(rdev_p
->fw_info
.fw_vers
));
967 err
= rdev_p
->t3cdev_p
->ctl(rdev_p
->t3cdev_p
, RDMA_GET_PARAMS
,
968 &(rdev_p
->rnic_info
));
970 printk(KERN_ERR
"%s t3cdev_p(%p)->ctl returned error %d.\n",
971 __func__
, rdev_p
->t3cdev_p
, err
);
974 err
= rdev_p
->t3cdev_p
->ctl(rdev_p
->t3cdev_p
, GET_PORTS
,
975 &(rdev_p
->port_info
));
977 printk(KERN_ERR
"%s t3cdev_p(%p)->ctl returned error %d.\n",
978 __func__
, rdev_p
->t3cdev_p
, err
);
983 * qpshift is the number of bits to shift the qpid left in order
984 * to get the correct address of the doorbell for that qp.
986 cxio_init_ucontext(rdev_p
, &rdev_p
->uctx
);
987 rdev_p
->qpshift
= PAGE_SHIFT
-
989 ilog2(rdev_p
->rnic_info
.udbell_len
>>
991 rdev_p
->qpnr
= rdev_p
->rnic_info
.udbell_len
>> PAGE_SHIFT
;
992 rdev_p
->qpmask
= (65536 >> ilog2(rdev_p
->qpnr
)) - 1;
993 PDBG("%s rnic %s info: tpt_base 0x%0x tpt_top 0x%0x num stags %d "
994 "pbl_base 0x%0x pbl_top 0x%0x rqt_base 0x%0x, rqt_top 0x%0x\n",
995 __func__
, rdev_p
->dev_name
, rdev_p
->rnic_info
.tpt_base
,
996 rdev_p
->rnic_info
.tpt_top
, cxio_num_stags(rdev_p
),
997 rdev_p
->rnic_info
.pbl_base
,
998 rdev_p
->rnic_info
.pbl_top
, rdev_p
->rnic_info
.rqt_base
,
999 rdev_p
->rnic_info
.rqt_top
);
1000 PDBG("udbell_len 0x%0x udbell_physbase 0x%lx kdb_addr %p qpshift %lu "
1001 "qpnr %d qpmask 0x%x\n",
1002 rdev_p
->rnic_info
.udbell_len
,
1003 rdev_p
->rnic_info
.udbell_physbase
, rdev_p
->rnic_info
.kdb_addr
,
1004 rdev_p
->qpshift
, rdev_p
->qpnr
, rdev_p
->qpmask
);
1006 err
= cxio_hal_init_ctrl_qp(rdev_p
);
1008 printk(KERN_ERR
"%s error %d initializing ctrl_qp.\n",
1012 err
= cxio_hal_init_resource(rdev_p
, cxio_num_stags(rdev_p
), 0,
1013 0, T3_MAX_NUM_QP
, T3_MAX_NUM_CQ
,
1016 printk(KERN_ERR
"%s error %d initializing hal resources.\n",
1020 err
= cxio_hal_pblpool_create(rdev_p
);
1022 printk(KERN_ERR
"%s error %d initializing pbl mem pool.\n",
1026 err
= cxio_hal_rqtpool_create(rdev_p
);
1028 printk(KERN_ERR
"%s error %d initializing rqt mem pool.\n",
1034 cxio_hal_pblpool_destroy(rdev_p
);
1036 cxio_hal_destroy_resource(rdev_p
->rscp
);
1038 cxio_hal_destroy_ctrl_qp(rdev_p
);
1040 rdev_p
->t3cdev_p
->ulp
= NULL
;
1041 list_del(&rdev_p
->entry
);
1045 void cxio_rdev_close(struct cxio_rdev
*rdev_p
)
1048 cxio_hal_pblpool_destroy(rdev_p
);
1049 cxio_hal_rqtpool_destroy(rdev_p
);
1050 list_del(&rdev_p
->entry
);
1051 cxio_hal_destroy_ctrl_qp(rdev_p
);
1052 cxio_hal_destroy_resource(rdev_p
->rscp
);
1053 rdev_p
->t3cdev_p
->ulp
= NULL
;
1057 int __init
cxio_hal_init(void)
1059 if (cxio_hal_init_rhdl_resource(T3_MAX_NUM_RI
))
1061 t3_register_cpl_handler(CPL_ASYNC_NOTIF
, cxio_hal_ev_handler
);
1065 void __exit
cxio_hal_exit(void)
1067 struct cxio_rdev
*rdev
, *tmp
;
1069 t3_register_cpl_handler(CPL_ASYNC_NOTIF
, NULL
);
1070 list_for_each_entry_safe(rdev
, tmp
, &rdev_list
, entry
)
1071 cxio_rdev_close(rdev
);
1072 cxio_hal_destroy_rhdl_resource();
1075 static void flush_completed_wrs(struct t3_wq
*wq
, struct t3_cq
*cq
)
1077 struct t3_swsq
*sqp
;
1078 __u32 ptr
= wq
->sq_rptr
;
1079 int count
= Q_COUNT(wq
->sq_rptr
, wq
->sq_wptr
);
1081 sqp
= wq
->sq
+ Q_PTR2IDX(ptr
, wq
->sq_size_log2
);
1083 if (!sqp
->signaled
) {
1085 sqp
= wq
->sq
+ Q_PTR2IDX(ptr
, wq
->sq_size_log2
);
1086 } else if (sqp
->complete
) {
1089 * Insert this completed cqe into the swcq.
1091 PDBG("%s moving cqe into swcq sq idx %ld cq idx %ld\n",
1092 __func__
, Q_PTR2IDX(ptr
, wq
->sq_size_log2
),
1093 Q_PTR2IDX(cq
->sw_wptr
, cq
->size_log2
));
1094 sqp
->cqe
.header
|= htonl(V_CQE_SWCQE(1));
1095 *(cq
->sw_queue
+ Q_PTR2IDX(cq
->sw_wptr
, cq
->size_log2
))
1104 static void create_read_req_cqe(struct t3_wq
*wq
, struct t3_cqe
*hw_cqe
,
1105 struct t3_cqe
*read_cqe
)
1107 read_cqe
->u
.scqe
.wrid_hi
= wq
->oldest_read
->sq_wptr
;
1108 read_cqe
->len
= wq
->oldest_read
->read_len
;
1109 read_cqe
->header
= htonl(V_CQE_QPID(CQE_QPID(*hw_cqe
)) |
1110 V_CQE_SWCQE(SW_CQE(*hw_cqe
)) |
1111 V_CQE_OPCODE(T3_READ_REQ
) |
1116 * Return a ptr to the next read wr in the SWSQ or NULL.
1118 static void advance_oldest_read(struct t3_wq
*wq
)
1121 u32 rptr
= wq
->oldest_read
- wq
->sq
+ 1;
1122 u32 wptr
= Q_PTR2IDX(wq
->sq_wptr
, wq
->sq_size_log2
);
1124 while (Q_PTR2IDX(rptr
, wq
->sq_size_log2
) != wptr
) {
1125 wq
->oldest_read
= wq
->sq
+ Q_PTR2IDX(rptr
, wq
->sq_size_log2
);
1127 if (wq
->oldest_read
->opcode
== T3_READ_REQ
)
1131 wq
->oldest_read
= NULL
;
1138 * check the validity of the first CQE,
1139 * supply the wq assicated with the qpid.
1141 * credit: cq credit to return to sge.
1142 * cqe_flushed: 1 iff the CQE is flushed.
1143 * cqe: copy of the polled CQE.
1147 * -1 CQE skipped, try again.
1149 int cxio_poll_cq(struct t3_wq
*wq
, struct t3_cq
*cq
, struct t3_cqe
*cqe
,
1150 u8
*cqe_flushed
, u64
*cookie
, u32
*credit
)
1153 struct t3_cqe
*hw_cqe
, read_cqe
;
1157 hw_cqe
= cxio_next_cqe(cq
);
1159 PDBG("%s CQE OOO %d qpid 0x%0x genbit %d type %d status 0x%0x"
1160 " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
1161 __func__
, CQE_OOO(*hw_cqe
), CQE_QPID(*hw_cqe
),
1162 CQE_GENBIT(*hw_cqe
), CQE_TYPE(*hw_cqe
), CQE_STATUS(*hw_cqe
),
1163 CQE_OPCODE(*hw_cqe
), CQE_LEN(*hw_cqe
), CQE_WRID_HI(*hw_cqe
),
1164 CQE_WRID_LOW(*hw_cqe
));
1167 * skip cqe's not affiliated with a QP.
1175 * Gotta tweak READ completions:
1176 * 1) the cqe doesn't contain the sq_wptr from the wr.
1177 * 2) opcode not reflected from the wr.
1178 * 3) read_len not reflected from the wr.
1179 * 4) cq_type is RQ_TYPE not SQ_TYPE.
1181 if (RQ_TYPE(*hw_cqe
) && (CQE_OPCODE(*hw_cqe
) == T3_READ_RESP
)) {
1184 * If this is an unsolicited read response, then the read
1185 * was generated by the kernel driver as part of peer-2-peer
1186 * connection setup. So ignore the completion.
1188 if (!wq
->oldest_read
) {
1189 if (CQE_STATUS(*hw_cqe
))
1196 * Don't write to the HWCQ, so create a new read req CQE
1199 create_read_req_cqe(wq
, hw_cqe
, &read_cqe
);
1201 advance_oldest_read(wq
);
1205 * T3A: Discard TERMINATE CQEs.
1207 if (CQE_OPCODE(*hw_cqe
) == T3_TERMINATE
) {
1213 if (CQE_STATUS(*hw_cqe
) || wq
->error
) {
1214 *cqe_flushed
= wq
->error
;
1218 * T3A inserts errors into the CQE. We cannot return
1219 * these as work completions.
1221 /* incoming write failures */
1222 if ((CQE_OPCODE(*hw_cqe
) == T3_RDMA_WRITE
)
1223 && RQ_TYPE(*hw_cqe
)) {
1227 /* incoming read request failures */
1228 if ((CQE_OPCODE(*hw_cqe
) == T3_READ_RESP
) && SQ_TYPE(*hw_cqe
)) {
1233 /* incoming SEND with no receive posted failures */
1234 if (CQE_SEND_OPCODE(*hw_cqe
) && RQ_TYPE(*hw_cqe
) &&
1235 Q_EMPTY(wq
->rq_rptr
, wq
->rq_wptr
)) {
1239 BUG_ON((*cqe_flushed
== 0) && !SW_CQE(*hw_cqe
));
1246 if (RQ_TYPE(*hw_cqe
)) {
1249 * HW only validates 4 bits of MSN. So we must validate that
1250 * the MSN in the SEND is the next expected MSN. If its not,
1251 * then we complete this with TPT_ERR_MSN and mark the wq in
1255 if (Q_EMPTY(wq
->rq_rptr
, wq
->rq_wptr
)) {
1261 if (unlikely((CQE_WRID_MSN(*hw_cqe
) != (wq
->rq_rptr
+ 1)))) {
1263 hw_cqe
->header
|= htonl(V_CQE_STATUS(TPT_ERR_MSN
));
1270 * If we get here its a send completion.
1272 * Handle out of order completion. These get stuffed
1273 * in the SW SQ. Then the SW SQ is walked to move any
1274 * now in-order completions into the SW CQ. This handles
1276 * 1) reaping unsignaled WRs when the first subsequent
1277 * signaled WR is completed.
1278 * 2) out of order read completions.
1280 if (!SW_CQE(*hw_cqe
) && (CQE_WRID_SQ_WPTR(*hw_cqe
) != wq
->sq_rptr
)) {
1281 struct t3_swsq
*sqp
;
1283 PDBG("%s out of order completion going in swsq at idx %ld\n",
1285 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe
), wq
->sq_size_log2
));
1287 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe
), wq
->sq_size_log2
);
1298 * Reap the associated WR(s) that are freed up with this
1301 if (SQ_TYPE(*hw_cqe
)) {
1302 wq
->sq_rptr
= CQE_WRID_SQ_WPTR(*hw_cqe
);
1303 PDBG("%s completing sq idx %ld\n", __func__
,
1304 Q_PTR2IDX(wq
->sq_rptr
, wq
->sq_size_log2
));
1305 *cookie
= wq
->sq
[Q_PTR2IDX(wq
->sq_rptr
, wq
->sq_size_log2
)].wr_id
;
1308 PDBG("%s completing rq idx %ld\n", __func__
,
1309 Q_PTR2IDX(wq
->rq_rptr
, wq
->rq_size_log2
));
1310 *cookie
= wq
->rq
[Q_PTR2IDX(wq
->rq_rptr
, wq
->rq_size_log2
)].wr_id
;
1311 if (wq
->rq
[Q_PTR2IDX(wq
->rq_rptr
, wq
->rq_size_log2
)].pbl_addr
)
1312 cxio_hal_pblpool_free(wq
->rdev
,
1313 wq
->rq
[Q_PTR2IDX(wq
->rq_rptr
,
1314 wq
->rq_size_log2
)].pbl_addr
, T3_STAG0_PBL_SIZE
);
1315 BUG_ON(Q_EMPTY(wq
->rq_rptr
, wq
->rq_wptr
));
1321 * Flush any completed cqes that are now in-order.
1323 flush_completed_wrs(wq
, cq
);
1326 if (SW_CQE(*hw_cqe
)) {
1327 PDBG("%s cq %p cqid 0x%x skip sw cqe sw_rptr 0x%x\n",
1328 __func__
, cq
, cq
->cqid
, cq
->sw_rptr
);
1331 PDBG("%s cq %p cqid 0x%x skip hw cqe rptr 0x%x\n",
1332 __func__
, cq
, cq
->cqid
, cq
->rptr
);
1336 * T3A: compute credits.
1338 if (((cq
->rptr
- cq
->wptr
) > (1 << (cq
->size_log2
- 1)))
1339 || ((cq
->rptr
- cq
->wptr
) >= 128)) {
1340 *credit
= cq
->rptr
- cq
->wptr
;
1341 cq
->wptr
= cq
->rptr
;