2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <linux/delay.h>
42 #include "c2_status.h"
44 #define C2_MAX_ORD_PER_QP 128
45 #define C2_MAX_IRD_PER_QP 128
47 #define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
48 #define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
49 #define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
52 static const u8 c2_opcode
[] = {
53 [IB_WR_SEND
] = C2_WR_TYPE_SEND
,
54 [IB_WR_SEND_WITH_IMM
] = NO_SUPPORT
,
55 [IB_WR_RDMA_WRITE
] = C2_WR_TYPE_RDMA_WRITE
,
56 [IB_WR_RDMA_WRITE_WITH_IMM
] = NO_SUPPORT
,
57 [IB_WR_RDMA_READ
] = C2_WR_TYPE_RDMA_READ
,
58 [IB_WR_ATOMIC_CMP_AND_SWP
] = NO_SUPPORT
,
59 [IB_WR_ATOMIC_FETCH_AND_ADD
] = NO_SUPPORT
,
62 static int to_c2_state(enum ib_qp_state ib_state
)
66 return C2_QP_STATE_IDLE
;
68 return C2_QP_STATE_RTS
;
70 return C2_QP_STATE_CLOSING
;
72 return C2_QP_STATE_CLOSING
;
74 return C2_QP_STATE_ERROR
;
80 static int to_ib_state(enum c2_qp_state c2_state
)
83 case C2_QP_STATE_IDLE
:
85 case C2_QP_STATE_CONNECTING
:
89 case C2_QP_STATE_CLOSING
:
91 case C2_QP_STATE_ERROR
:
93 case C2_QP_STATE_TERMINATE
:
100 static const char *to_ib_state_str(int ib_state
)
102 static const char *state_str
[] = {
111 if (ib_state
< IB_QPS_RESET
||
112 ib_state
> IB_QPS_ERR
)
113 return "<invalid IB QP state>";
115 ib_state
-= IB_QPS_RESET
;
116 return state_str
[ib_state
];
119 void c2_set_qp_state(struct c2_qp
*qp
, int c2_state
)
121 int new_state
= to_ib_state(c2_state
);
123 pr_debug("%s: qp[%p] state modify %s --> %s\n",
126 to_ib_state_str(qp
->state
),
127 to_ib_state_str(new_state
));
128 qp
->state
= new_state
;
131 #define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
133 int c2_qp_modify(struct c2_dev
*c2dev
, struct c2_qp
*qp
,
134 struct ib_qp_attr
*attr
, int attr_mask
)
136 struct c2wr_qp_modify_req wr
;
137 struct c2wr_qp_modify_rep
*reply
;
138 struct c2_vq_req
*vq_req
;
143 pr_debug("%s:%d qp=%p, %s --> %s\n",
144 __FUNCTION__
, __LINE__
,
146 to_ib_state_str(qp
->state
),
147 to_ib_state_str(attr
->qp_state
));
149 vq_req
= vq_req_alloc(c2dev
);
153 c2_wr_set_id(&wr
, CCWR_QP_MODIFY
);
154 wr
.hdr
.context
= (unsigned long) vq_req
;
155 wr
.rnic_handle
= c2dev
->adapter_handle
;
156 wr
.qp_handle
= qp
->adapter_handle
;
157 wr
.ord
= cpu_to_be32(C2_QP_NO_ATTR_CHANGE
);
158 wr
.ird
= cpu_to_be32(C2_QP_NO_ATTR_CHANGE
);
159 wr
.sq_depth
= cpu_to_be32(C2_QP_NO_ATTR_CHANGE
);
160 wr
.rq_depth
= cpu_to_be32(C2_QP_NO_ATTR_CHANGE
);
162 if (attr_mask
& IB_QP_STATE
) {
163 /* Ensure the state is valid */
164 if (attr
->qp_state
< 0 || attr
->qp_state
> IB_QPS_ERR
) {
169 wr
.next_qp_state
= cpu_to_be32(to_c2_state(attr
->qp_state
));
171 if (attr
->qp_state
== IB_QPS_ERR
) {
172 spin_lock_irqsave(&qp
->lock
, flags
);
173 if (qp
->cm_id
&& qp
->state
== IB_QPS_RTS
) {
174 pr_debug("Generating CLOSE event for QP-->ERR, "
175 "qp=%p, cm_id=%p\n",qp
,qp
->cm_id
);
176 /* Generate an CLOSE event */
177 vq_req
->cm_id
= qp
->cm_id
;
178 vq_req
->event
= IW_CM_EVENT_CLOSE
;
180 spin_unlock_irqrestore(&qp
->lock
, flags
);
182 next_state
= attr
->qp_state
;
184 } else if (attr_mask
& IB_QP_CUR_STATE
) {
186 if (attr
->cur_qp_state
!= IB_QPS_RTR
&&
187 attr
->cur_qp_state
!= IB_QPS_RTS
&&
188 attr
->cur_qp_state
!= IB_QPS_SQD
&&
189 attr
->cur_qp_state
!= IB_QPS_SQE
) {
194 cpu_to_be32(to_c2_state(attr
->cur_qp_state
));
196 next_state
= attr
->cur_qp_state
;
203 /* reference the request struct */
204 vq_req_get(c2dev
, vq_req
);
206 err
= vq_send_wr(c2dev
, (union c2wr
*) & wr
);
208 vq_req_put(c2dev
, vq_req
);
212 err
= vq_wait_for_reply(c2dev
, vq_req
);
216 reply
= (struct c2wr_qp_modify_rep
*) (unsigned long) vq_req
->reply_msg
;
222 err
= c2_errno(reply
);
224 qp
->state
= next_state
;
227 pr_debug("%s: c2_errno=%d\n", __FUNCTION__
, err
);
230 * If we're going to error and generating the event here, then
231 * we need to remove the reference because there will be no
232 * close event generated by the adapter
234 spin_lock_irqsave(&qp
->lock
, flags
);
235 if (vq_req
->event
==IW_CM_EVENT_CLOSE
&& qp
->cm_id
) {
236 qp
->cm_id
->rem_ref(qp
->cm_id
);
239 spin_unlock_irqrestore(&qp
->lock
, flags
);
241 vq_repbuf_free(c2dev
, reply
);
243 vq_req_free(c2dev
, vq_req
);
245 pr_debug("%s:%d qp=%p, cur_state=%s\n",
246 __FUNCTION__
, __LINE__
,
248 to_ib_state_str(qp
->state
));
252 int c2_qp_set_read_limits(struct c2_dev
*c2dev
, struct c2_qp
*qp
,
255 struct c2wr_qp_modify_req wr
;
256 struct c2wr_qp_modify_rep
*reply
;
257 struct c2_vq_req
*vq_req
;
260 vq_req
= vq_req_alloc(c2dev
);
264 c2_wr_set_id(&wr
, CCWR_QP_MODIFY
);
265 wr
.hdr
.context
= (unsigned long) vq_req
;
266 wr
.rnic_handle
= c2dev
->adapter_handle
;
267 wr
.qp_handle
= qp
->adapter_handle
;
268 wr
.ord
= cpu_to_be32(ord
);
269 wr
.ird
= cpu_to_be32(ird
);
270 wr
.sq_depth
= cpu_to_be32(C2_QP_NO_ATTR_CHANGE
);
271 wr
.rq_depth
= cpu_to_be32(C2_QP_NO_ATTR_CHANGE
);
272 wr
.next_qp_state
= cpu_to_be32(C2_QP_NO_ATTR_CHANGE
);
274 /* reference the request struct */
275 vq_req_get(c2dev
, vq_req
);
277 err
= vq_send_wr(c2dev
, (union c2wr
*) & wr
);
279 vq_req_put(c2dev
, vq_req
);
283 err
= vq_wait_for_reply(c2dev
, vq_req
);
287 reply
= (struct c2wr_qp_modify_rep
*) (unsigned long)
294 err
= c2_errno(reply
);
295 vq_repbuf_free(c2dev
, reply
);
297 vq_req_free(c2dev
, vq_req
);
301 static int destroy_qp(struct c2_dev
*c2dev
, struct c2_qp
*qp
)
303 struct c2_vq_req
*vq_req
;
304 struct c2wr_qp_destroy_req wr
;
305 struct c2wr_qp_destroy_rep
*reply
;
310 * Allocate a verb request message
312 vq_req
= vq_req_alloc(c2dev
);
320 c2_wr_set_id(&wr
, CCWR_QP_DESTROY
);
321 wr
.hdr
.context
= (unsigned long) vq_req
;
322 wr
.rnic_handle
= c2dev
->adapter_handle
;
323 wr
.qp_handle
= qp
->adapter_handle
;
326 * reference the request struct. dereferenced in the int handler.
328 vq_req_get(c2dev
, vq_req
);
330 spin_lock_irqsave(&qp
->lock
, flags
);
331 if (qp
->cm_id
&& qp
->state
== IB_QPS_RTS
) {
332 pr_debug("destroy_qp: generating CLOSE event for QP-->ERR, "
333 "qp=%p, cm_id=%p\n",qp
,qp
->cm_id
);
334 /* Generate an CLOSE event */
336 vq_req
->cm_id
= qp
->cm_id
;
337 vq_req
->event
= IW_CM_EVENT_CLOSE
;
339 spin_unlock_irqrestore(&qp
->lock
, flags
);
344 err
= vq_send_wr(c2dev
, (union c2wr
*) & wr
);
346 vq_req_put(c2dev
, vq_req
);
351 * Wait for reply from adapter
353 err
= vq_wait_for_reply(c2dev
, vq_req
);
361 reply
= (struct c2wr_qp_destroy_rep
*) (unsigned long) (vq_req
->reply_msg
);
367 spin_lock_irqsave(&qp
->lock
, flags
);
369 qp
->cm_id
->rem_ref(qp
->cm_id
);
372 spin_unlock_irqrestore(&qp
->lock
, flags
);
374 vq_repbuf_free(c2dev
, reply
);
376 vq_req_free(c2dev
, vq_req
);
380 static int c2_alloc_qpn(struct c2_dev
*c2dev
, struct c2_qp
*qp
)
385 spin_lock_irq(&c2dev
->qp_table
.lock
);
386 ret
= idr_get_new_above(&c2dev
->qp_table
.idr
, qp
,
387 c2dev
->qp_table
.last
++, &qp
->qpn
);
388 spin_unlock_irq(&c2dev
->qp_table
.lock
);
389 } while ((ret
== -EAGAIN
) &&
390 idr_pre_get(&c2dev
->qp_table
.idr
, GFP_KERNEL
));
394 static void c2_free_qpn(struct c2_dev
*c2dev
, int qpn
)
396 spin_lock_irq(&c2dev
->qp_table
.lock
);
397 idr_remove(&c2dev
->qp_table
.idr
, qpn
);
398 spin_unlock_irq(&c2dev
->qp_table
.lock
);
401 struct c2_qp
*c2_find_qpn(struct c2_dev
*c2dev
, int qpn
)
406 spin_lock_irqsave(&c2dev
->qp_table
.lock
, flags
);
407 qp
= idr_find(&c2dev
->qp_table
.idr
, qpn
);
408 spin_unlock_irqrestore(&c2dev
->qp_table
.lock
, flags
);
412 int c2_alloc_qp(struct c2_dev
*c2dev
,
414 struct ib_qp_init_attr
*qp_attrs
, struct c2_qp
*qp
)
416 struct c2wr_qp_create_req wr
;
417 struct c2wr_qp_create_rep
*reply
;
418 struct c2_vq_req
*vq_req
;
419 struct c2_cq
*send_cq
= to_c2cq(qp_attrs
->send_cq
);
420 struct c2_cq
*recv_cq
= to_c2cq(qp_attrs
->recv_cq
);
421 unsigned long peer_pa
;
422 u32 q_size
, msg_size
, mmap_size
;
426 err
= c2_alloc_qpn(c2dev
, qp
);
429 qp
->ibqp
.qp_num
= qp
->qpn
;
430 qp
->ibqp
.qp_type
= IB_QPT_RC
;
432 /* Allocate the SQ and RQ shared pointers */
433 qp
->sq_mq
.shared
= c2_alloc_mqsp(c2dev
, c2dev
->kern_mqsp_pool
,
434 &qp
->sq_mq
.shared_dma
, GFP_KERNEL
);
435 if (!qp
->sq_mq
.shared
) {
440 qp
->rq_mq
.shared
= c2_alloc_mqsp(c2dev
, c2dev
->kern_mqsp_pool
,
441 &qp
->rq_mq
.shared_dma
, GFP_KERNEL
);
442 if (!qp
->rq_mq
.shared
) {
447 /* Allocate the verbs request */
448 vq_req
= vq_req_alloc(c2dev
);
449 if (vq_req
== NULL
) {
454 /* Initialize the work request */
455 memset(&wr
, 0, sizeof(wr
));
456 c2_wr_set_id(&wr
, CCWR_QP_CREATE
);
457 wr
.hdr
.context
= (unsigned long) vq_req
;
458 wr
.rnic_handle
= c2dev
->adapter_handle
;
459 wr
.sq_cq_handle
= send_cq
->adapter_handle
;
460 wr
.rq_cq_handle
= recv_cq
->adapter_handle
;
461 wr
.sq_depth
= cpu_to_be32(qp_attrs
->cap
.max_send_wr
+ 1);
462 wr
.rq_depth
= cpu_to_be32(qp_attrs
->cap
.max_recv_wr
+ 1);
464 wr
.flags
= cpu_to_be32(QP_RDMA_READ
| QP_RDMA_WRITE
| QP_MW_BIND
|
465 QP_ZERO_STAG
| QP_RDMA_READ_RESPONSE
);
466 wr
.send_sgl_depth
= cpu_to_be32(qp_attrs
->cap
.max_send_sge
);
467 wr
.recv_sgl_depth
= cpu_to_be32(qp_attrs
->cap
.max_recv_sge
);
468 wr
.rdma_write_sgl_depth
= cpu_to_be32(qp_attrs
->cap
.max_send_sge
);
469 wr
.shared_sq_ht
= cpu_to_be64(qp
->sq_mq
.shared_dma
);
470 wr
.shared_rq_ht
= cpu_to_be64(qp
->rq_mq
.shared_dma
);
471 wr
.ord
= cpu_to_be32(C2_MAX_ORD_PER_QP
);
472 wr
.ird
= cpu_to_be32(C2_MAX_IRD_PER_QP
);
473 wr
.pd_id
= pd
->pd_id
;
474 wr
.user_context
= (unsigned long) qp
;
476 vq_req_get(c2dev
, vq_req
);
478 /* Send the WR to the adapter */
479 err
= vq_send_wr(c2dev
, (union c2wr
*) & wr
);
481 vq_req_put(c2dev
, vq_req
);
485 /* Wait for the verb reply */
486 err
= vq_wait_for_reply(c2dev
, vq_req
);
491 /* Process the reply */
492 reply
= (struct c2wr_qp_create_rep
*) (unsigned long) (vq_req
->reply_msg
);
498 if ((err
= c2_wr_get_result(reply
)) != 0) {
502 /* Fill in the kernel QP struct */
503 atomic_set(&qp
->refcount
, 1);
504 qp
->adapter_handle
= reply
->qp_handle
;
505 qp
->state
= IB_QPS_RESET
;
506 qp
->send_sgl_depth
= qp_attrs
->cap
.max_send_sge
;
507 qp
->rdma_write_sgl_depth
= qp_attrs
->cap
.max_send_sge
;
508 qp
->recv_sgl_depth
= qp_attrs
->cap
.max_recv_sge
;
510 /* Initialize the SQ MQ */
511 q_size
= be32_to_cpu(reply
->sq_depth
);
512 msg_size
= be32_to_cpu(reply
->sq_msg_size
);
513 peer_pa
= c2dev
->pa
+ be32_to_cpu(reply
->sq_mq_start
);
514 mmap_size
= PAGE_ALIGN(sizeof(struct c2_mq_shared
) + msg_size
* q_size
);
515 mmap
= ioremap_nocache(peer_pa
, mmap_size
);
521 c2_mq_req_init(&qp
->sq_mq
,
522 be32_to_cpu(reply
->sq_mq_index
),
525 mmap
+ sizeof(struct c2_mq_shared
), /* pool start */
527 C2_MQ_ADAPTER_TARGET
);
529 /* Initialize the RQ mq */
530 q_size
= be32_to_cpu(reply
->rq_depth
);
531 msg_size
= be32_to_cpu(reply
->rq_msg_size
);
532 peer_pa
= c2dev
->pa
+ be32_to_cpu(reply
->rq_mq_start
);
533 mmap_size
= PAGE_ALIGN(sizeof(struct c2_mq_shared
) + msg_size
* q_size
);
534 mmap
= ioremap_nocache(peer_pa
, mmap_size
);
540 c2_mq_req_init(&qp
->rq_mq
,
541 be32_to_cpu(reply
->rq_mq_index
),
544 mmap
+ sizeof(struct c2_mq_shared
), /* pool start */
546 C2_MQ_ADAPTER_TARGET
);
548 vq_repbuf_free(c2dev
, reply
);
549 vq_req_free(c2dev
, vq_req
);
554 iounmap(qp
->sq_mq
.peer
);
556 destroy_qp(c2dev
, qp
);
558 vq_repbuf_free(c2dev
, reply
);
560 vq_req_free(c2dev
, vq_req
);
562 c2_free_mqsp(qp
->rq_mq
.shared
);
564 c2_free_mqsp(qp
->sq_mq
.shared
);
566 c2_free_qpn(c2dev
, qp
->qpn
);
570 static inline void c2_lock_cqs(struct c2_cq
*send_cq
, struct c2_cq
*recv_cq
)
572 if (send_cq
== recv_cq
)
573 spin_lock_irq(&send_cq
->lock
);
574 else if (send_cq
> recv_cq
) {
575 spin_lock_irq(&send_cq
->lock
);
576 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
578 spin_lock_irq(&recv_cq
->lock
);
579 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
583 static inline void c2_unlock_cqs(struct c2_cq
*send_cq
, struct c2_cq
*recv_cq
)
585 if (send_cq
== recv_cq
)
586 spin_unlock_irq(&send_cq
->lock
);
587 else if (send_cq
> recv_cq
) {
588 spin_unlock(&recv_cq
->lock
);
589 spin_unlock_irq(&send_cq
->lock
);
591 spin_unlock(&send_cq
->lock
);
592 spin_unlock_irq(&recv_cq
->lock
);
596 void c2_free_qp(struct c2_dev
*c2dev
, struct c2_qp
*qp
)
598 struct c2_cq
*send_cq
;
599 struct c2_cq
*recv_cq
;
601 send_cq
= to_c2cq(qp
->ibqp
.send_cq
);
602 recv_cq
= to_c2cq(qp
->ibqp
.recv_cq
);
605 * Lock CQs here, so that CQ polling code can do QP lookup
606 * without taking a lock.
608 c2_lock_cqs(send_cq
, recv_cq
);
609 c2_free_qpn(c2dev
, qp
->qpn
);
610 c2_unlock_cqs(send_cq
, recv_cq
);
613 * Destory qp in the rnic...
615 destroy_qp(c2dev
, qp
);
618 * Mark any unreaped CQEs as null and void.
620 c2_cq_clean(c2dev
, qp
, send_cq
->cqn
);
621 if (send_cq
!= recv_cq
)
622 c2_cq_clean(c2dev
, qp
, recv_cq
->cqn
);
624 * Unmap the MQs and return the shared pointers
625 * to the message pool.
627 iounmap(qp
->sq_mq
.peer
);
628 iounmap(qp
->rq_mq
.peer
);
629 c2_free_mqsp(qp
->sq_mq
.shared
);
630 c2_free_mqsp(qp
->rq_mq
.shared
);
632 atomic_dec(&qp
->refcount
);
633 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
640 * Move an SGL from the user's work request struct into a CCIL Work Request
641 * message, swapping to WR byte order and ensure the total length doesn't
645 * dst - ptr to CCIL Work Request message SGL memory.
646 * src - ptr to the consumers SGL memory.
654 move_sgl(struct c2_data_addr
* dst
, struct ib_sge
*src
, int count
, u32
* p_len
,
657 u32 tot
= 0; /* running total */
658 u8 acount
= 0; /* running total non-0 len sge's */
662 * If the addition of this SGE causes the
663 * total SGL length to exceed 2^32-1, then
666 * If the current total plus the next element length
667 * wraps, then it will go negative and be less than the
670 if ((tot
+ src
->length
) < tot
) {
674 * Bug: 1456 (as well as 1498 & 1643)
675 * Skip over any sge's supplied with len=0
679 dst
->stag
= cpu_to_be32(src
->lkey
);
680 dst
->to
= cpu_to_be64(src
->addr
);
681 dst
->length
= cpu_to_be32(src
->length
);
691 * Bug: 1476 (as well as 1498, 1456 and 1643)
692 * Setup the SGL in the WR to make it easier for the RNIC.
693 * This way, the FW doesn't have to deal with special cases.
694 * Setting length=0 should be sufficient.
702 *actual_count
= acount
;
707 * Function: c2_activity (private function)
710 * Post an mq index to the host->adapter activity fifo.
713 * c2dev - ptr to c2dev structure
714 * mq_index - mq index to post
715 * shared - value most recently written to shared
722 static inline void c2_activity(struct c2_dev
*c2dev
, u32 mq_index
, u16 shared
)
725 * First read the register to see if the FIFO is full, and if so,
726 * spin until it's not. This isn't perfect -- there is no
727 * synchronization among the clients of the register, but in
728 * practice it prevents multiple CPU from hammering the bus
729 * with PCI RETRY. Note that when this does happen, the card
730 * cannot get on the bus and the card and system hang in a
731 * deadlock -- thus the need for this code. [TOT]
733 while (readl(c2dev
->regs
+ PCI_BAR0_ADAPTER_HINT
) & 0x80000000)
736 __raw_writel(C2_HINT_MAKE(mq_index
, shared
),
737 c2dev
->regs
+ PCI_BAR0_ADAPTER_HINT
);
741 * Function: qp_wr_post
744 * This in-line function allocates a MQ msg, then moves the host-copy of
745 * the completed WR into msg. Then it posts the message.
748 * q - ptr to user MQ.
749 * wr - ptr to host-copy of the WR.
750 * qp - ptr to user qp
751 * size - Number of bytes to post. Assumed to be divisible by 4.
758 static int qp_wr_post(struct c2_mq
*q
, union c2wr
* wr
, struct c2_qp
*qp
, u32 size
)
762 msg
= c2_mq_alloc(q
);
767 ((c2wr_hdr_t
*) wr
)->magic
= cpu_to_be32(CCWR_MAGIC
);
771 * Since all header fields in the WR are the same as the
772 * CQE, set the following so the adapter need not.
774 c2_wr_set_result(wr
, CCERR_PENDING
);
777 * Copy the wr down to the adapter
779 memcpy((void *) msg
, (void *) wr
, size
);
786 int c2_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*ib_wr
,
787 struct ib_send_wr
**bad_wr
)
789 struct c2_dev
*c2dev
= to_c2dev(ibqp
->device
);
790 struct c2_qp
*qp
= to_c2qp(ibqp
);
792 unsigned long lock_flags
;
800 if (qp
->state
> IB_QPS_RTS
)
806 wr
.sqwr
.sq_hdr
.user_hdr
.hdr
.context
= ib_wr
->wr_id
;
807 if (ib_wr
->send_flags
& IB_SEND_SIGNALED
) {
808 flags
|= SQ_SIGNALED
;
811 switch (ib_wr
->opcode
) {
813 if (ib_wr
->send_flags
& IB_SEND_SOLICITED
) {
814 c2_wr_set_id(&wr
, C2_WR_TYPE_SEND_SE
);
815 msg_size
= sizeof(struct c2wr_send_req
);
817 c2_wr_set_id(&wr
, C2_WR_TYPE_SEND
);
818 msg_size
= sizeof(struct c2wr_send_req
);
821 wr
.sqwr
.send
.remote_stag
= 0;
822 msg_size
+= sizeof(struct c2_data_addr
) * ib_wr
->num_sge
;
823 if (ib_wr
->num_sge
> qp
->send_sgl_depth
) {
827 if (ib_wr
->send_flags
& IB_SEND_FENCE
) {
828 flags
|= SQ_READ_FENCE
;
830 err
= move_sgl((struct c2_data_addr
*) & (wr
.sqwr
.send
.data
),
833 &tot_len
, &actual_sge_count
);
834 wr
.sqwr
.send
.sge_len
= cpu_to_be32(tot_len
);
835 c2_wr_set_sge_count(&wr
, actual_sge_count
);
837 case IB_WR_RDMA_WRITE
:
838 c2_wr_set_id(&wr
, C2_WR_TYPE_RDMA_WRITE
);
839 msg_size
= sizeof(struct c2wr_rdma_write_req
) +
840 (sizeof(struct c2_data_addr
) * ib_wr
->num_sge
);
841 if (ib_wr
->num_sge
> qp
->rdma_write_sgl_depth
) {
845 if (ib_wr
->send_flags
& IB_SEND_FENCE
) {
846 flags
|= SQ_READ_FENCE
;
848 wr
.sqwr
.rdma_write
.remote_stag
=
849 cpu_to_be32(ib_wr
->wr
.rdma
.rkey
);
850 wr
.sqwr
.rdma_write
.remote_to
=
851 cpu_to_be64(ib_wr
->wr
.rdma
.remote_addr
);
852 err
= move_sgl((struct c2_data_addr
*)
853 & (wr
.sqwr
.rdma_write
.data
),
856 &tot_len
, &actual_sge_count
);
857 wr
.sqwr
.rdma_write
.sge_len
= cpu_to_be32(tot_len
);
858 c2_wr_set_sge_count(&wr
, actual_sge_count
);
860 case IB_WR_RDMA_READ
:
861 c2_wr_set_id(&wr
, C2_WR_TYPE_RDMA_READ
);
862 msg_size
= sizeof(struct c2wr_rdma_read_req
);
864 /* IWarp only suppots 1 sge for RDMA reads */
865 if (ib_wr
->num_sge
> 1) {
871 * Move the local and remote stag/to/len into the WR.
873 wr
.sqwr
.rdma_read
.local_stag
=
874 cpu_to_be32(ib_wr
->sg_list
->lkey
);
875 wr
.sqwr
.rdma_read
.local_to
=
876 cpu_to_be64(ib_wr
->sg_list
->addr
);
877 wr
.sqwr
.rdma_read
.remote_stag
=
878 cpu_to_be32(ib_wr
->wr
.rdma
.rkey
);
879 wr
.sqwr
.rdma_read
.remote_to
=
880 cpu_to_be64(ib_wr
->wr
.rdma
.remote_addr
);
881 wr
.sqwr
.rdma_read
.length
=
882 cpu_to_be32(ib_wr
->sg_list
->length
);
892 * If we had an error on the last wr build, then
893 * break out. Possible errors include bogus WR
894 * type, and a bogus SGL length...
903 c2_wr_set_flags(&wr
, flags
);
908 spin_lock_irqsave(&qp
->lock
, lock_flags
);
909 err
= qp_wr_post(&qp
->sq_mq
, &wr
, qp
, msg_size
);
911 spin_unlock_irqrestore(&qp
->lock
, lock_flags
);
916 * Enqueue mq index to activity FIFO.
918 c2_activity(c2dev
, qp
->sq_mq
.index
, qp
->sq_mq
.hint_count
);
919 spin_unlock_irqrestore(&qp
->lock
, lock_flags
);
929 int c2_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*ib_wr
,
930 struct ib_recv_wr
**bad_wr
)
932 struct c2_dev
*c2dev
= to_c2dev(ibqp
->device
);
933 struct c2_qp
*qp
= to_c2qp(ibqp
);
935 unsigned long lock_flags
;
938 if (qp
->state
> IB_QPS_RTS
)
942 * Try and post each work request
948 if (ib_wr
->num_sge
> qp
->recv_sgl_depth
) {
954 * Create local host-copy of the WR
956 wr
.rqwr
.rq_hdr
.user_hdr
.hdr
.context
= ib_wr
->wr_id
;
957 c2_wr_set_id(&wr
, CCWR_RECV
);
958 c2_wr_set_flags(&wr
, 0);
960 /* sge_count is limited to eight bits. */
961 BUG_ON(ib_wr
->num_sge
>= 256);
962 err
= move_sgl((struct c2_data_addr
*) & (wr
.rqwr
.data
),
964 ib_wr
->num_sge
, &tot_len
, &actual_sge_count
);
965 c2_wr_set_sge_count(&wr
, actual_sge_count
);
968 * If we had an error on the last wr build, then
969 * break out. Possible errors include bogus WR
970 * type, and a bogus SGL length...
976 spin_lock_irqsave(&qp
->lock
, lock_flags
);
977 err
= qp_wr_post(&qp
->rq_mq
, &wr
, qp
, qp
->rq_mq
.msg_size
);
979 spin_unlock_irqrestore(&qp
->lock
, lock_flags
);
984 * Enqueue mq index to activity FIFO
986 c2_activity(c2dev
, qp
->rq_mq
.index
, qp
->rq_mq
.hint_count
);
987 spin_unlock_irqrestore(&qp
->lock
, lock_flags
);
997 void __devinit
c2_init_qp_table(struct c2_dev
*c2dev
)
999 spin_lock_init(&c2dev
->qp_table
.lock
);
1000 idr_init(&c2dev
->qp_table
.idr
);
1003 void __devexit
c2_cleanup_qp_table(struct c2_dev
*c2dev
)
1005 idr_destroy(&c2dev
->qp_table
.idr
);