2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/skbuff.h>
35 #include <linux/delay.h>
36 #include <linux/sched.h>
40 #include "rxe_queue.h"
43 char *rxe_qp_state_name
[] = {
44 [QP_STATE_RESET
] = "RESET",
45 [QP_STATE_INIT
] = "INIT",
46 [QP_STATE_READY
] = "READY",
47 [QP_STATE_DRAIN
] = "DRAIN",
48 [QP_STATE_DRAINED
] = "DRAINED",
49 [QP_STATE_ERROR
] = "ERROR",
52 static int rxe_qp_chk_cap(struct rxe_dev
*rxe
, struct ib_qp_cap
*cap
,
55 if (cap
->max_send_wr
> rxe
->attr
.max_qp_wr
) {
56 pr_warn("invalid send wr = %d > %d\n",
57 cap
->max_send_wr
, rxe
->attr
.max_qp_wr
);
61 if (cap
->max_send_sge
> rxe
->attr
.max_sge
) {
62 pr_warn("invalid send sge = %d > %d\n",
63 cap
->max_send_sge
, rxe
->attr
.max_sge
);
68 if (cap
->max_recv_wr
> rxe
->attr
.max_qp_wr
) {
69 pr_warn("invalid recv wr = %d > %d\n",
70 cap
->max_recv_wr
, rxe
->attr
.max_qp_wr
);
74 if (cap
->max_recv_sge
> rxe
->attr
.max_sge
) {
75 pr_warn("invalid recv sge = %d > %d\n",
76 cap
->max_recv_sge
, rxe
->attr
.max_sge
);
81 if (cap
->max_inline_data
> rxe
->max_inline_data
) {
82 pr_warn("invalid max inline data = %d > %d\n",
83 cap
->max_inline_data
, rxe
->max_inline_data
);
93 int rxe_qp_chk_init(struct rxe_dev
*rxe
, struct ib_qp_init_attr
*init
)
95 struct ib_qp_cap
*cap
= &init
->cap
;
96 struct rxe_port
*port
;
97 int port_num
= init
->port_num
;
99 if (!init
->recv_cq
|| !init
->send_cq
) {
100 pr_warn("missing cq\n");
104 if (rxe_qp_chk_cap(rxe
, cap
, !!init
->srq
))
107 if (init
->qp_type
== IB_QPT_SMI
|| init
->qp_type
== IB_QPT_GSI
) {
109 pr_warn("invalid port = %d\n", port_num
);
115 if (init
->qp_type
== IB_QPT_SMI
&& port
->qp_smi_index
) {
116 pr_warn("SMI QP exists for port %d\n", port_num
);
120 if (init
->qp_type
== IB_QPT_GSI
&& port
->qp_gsi_index
) {
121 pr_warn("GSI QP exists for port %d\n", port_num
);
132 static int alloc_rd_atomic_resources(struct rxe_qp
*qp
, unsigned int n
)
134 qp
->resp
.res_head
= 0;
135 qp
->resp
.res_tail
= 0;
136 qp
->resp
.resources
= kcalloc(n
, sizeof(struct resp_res
), GFP_KERNEL
);
138 if (!qp
->resp
.resources
)
144 static void free_rd_atomic_resources(struct rxe_qp
*qp
)
146 if (qp
->resp
.resources
) {
149 for (i
= 0; i
< qp
->attr
.max_dest_rd_atomic
; i
++) {
150 struct resp_res
*res
= &qp
->resp
.resources
[i
];
152 free_rd_atomic_resource(qp
, res
);
154 kfree(qp
->resp
.resources
);
155 qp
->resp
.resources
= NULL
;
159 void free_rd_atomic_resource(struct rxe_qp
*qp
, struct resp_res
*res
)
161 if (res
->type
== RXE_ATOMIC_MASK
) {
163 kfree_skb(res
->atomic
.skb
);
164 } else if (res
->type
== RXE_READ_MASK
) {
166 rxe_drop_ref(res
->read
.mr
);
171 static void cleanup_rd_atomic_resources(struct rxe_qp
*qp
)
174 struct resp_res
*res
;
176 if (qp
->resp
.resources
) {
177 for (i
= 0; i
< qp
->attr
.max_dest_rd_atomic
; i
++) {
178 res
= &qp
->resp
.resources
[i
];
179 free_rd_atomic_resource(qp
, res
);
184 static void rxe_qp_init_misc(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
185 struct ib_qp_init_attr
*init
)
187 struct rxe_port
*port
;
190 qp
->sq_sig_type
= init
->sq_sig_type
;
191 qp
->attr
.path_mtu
= 1;
192 qp
->mtu
= ib_mtu_enum_to_int(qp
->attr
.path_mtu
);
194 qpn
= qp
->pelem
.index
;
197 switch (init
->qp_type
) {
200 port
->qp_smi_index
= qpn
;
201 qp
->attr
.port_num
= init
->port_num
;
206 port
->qp_gsi_index
= qpn
;
207 qp
->attr
.port_num
= init
->port_num
;
211 qp
->ibqp
.qp_num
= qpn
;
215 INIT_LIST_HEAD(&qp
->grp_list
);
217 skb_queue_head_init(&qp
->send_pkts
);
219 spin_lock_init(&qp
->grp_lock
);
220 spin_lock_init(&qp
->state_lock
);
222 atomic_set(&qp
->ssn
, 0);
223 atomic_set(&qp
->skb_out
, 0);
226 static int rxe_qp_init_req(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
227 struct ib_qp_init_attr
*init
,
228 struct ib_ucontext
*context
, struct ib_udata
*udata
)
233 err
= sock_create_kern(&init_net
, AF_INET
, SOCK_DGRAM
, 0, &qp
->sk
);
236 qp
->sk
->sk
->sk_user_data
= qp
;
238 qp
->sq
.max_wr
= init
->cap
.max_send_wr
;
239 qp
->sq
.max_sge
= init
->cap
.max_send_sge
;
240 qp
->sq
.max_inline
= init
->cap
.max_inline_data
;
242 wqe_size
= max_t(int, sizeof(struct rxe_send_wqe
) +
243 qp
->sq
.max_sge
* sizeof(struct ib_sge
),
244 sizeof(struct rxe_send_wqe
) +
247 qp
->sq
.queue
= rxe_queue_init(rxe
,
253 err
= do_mmap_info(rxe
, udata
, true,
254 context
, qp
->sq
.queue
->buf
,
255 qp
->sq
.queue
->buf_size
, &qp
->sq
.queue
->ip
);
258 kvfree(qp
->sq
.queue
->buf
);
263 qp
->req
.wqe_index
= producer_index(qp
->sq
.queue
);
264 qp
->req
.state
= QP_STATE_RESET
;
266 qp
->comp
.opcode
= -1;
268 spin_lock_init(&qp
->sq
.sq_lock
);
269 skb_queue_head_init(&qp
->req_pkts
);
271 rxe_init_task(rxe
, &qp
->req
.task
, qp
,
272 rxe_requester
, "req");
273 rxe_init_task(rxe
, &qp
->comp
.task
, qp
,
274 rxe_completer
, "comp");
276 qp
->qp_timeout_jiffies
= 0; /* Can't be set for UD/UC in modify_qp */
277 if (init
->qp_type
== IB_QPT_RC
) {
278 timer_setup(&qp
->rnr_nak_timer
, rnr_nak_timer
, 0);
279 timer_setup(&qp
->retrans_timer
, retransmit_timer
, 0);
284 static int rxe_qp_init_resp(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
285 struct ib_qp_init_attr
*init
,
286 struct ib_ucontext
*context
, struct ib_udata
*udata
)
292 qp
->rq
.max_wr
= init
->cap
.max_recv_wr
;
293 qp
->rq
.max_sge
= init
->cap
.max_recv_sge
;
295 wqe_size
= rcv_wqe_size(qp
->rq
.max_sge
);
297 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
298 qp_num(qp
), qp
->rq
.max_wr
, qp
->rq
.max_sge
, wqe_size
);
300 qp
->rq
.queue
= rxe_queue_init(rxe
,
306 err
= do_mmap_info(rxe
, udata
, false, context
,
308 qp
->rq
.queue
->buf_size
,
311 kvfree(qp
->rq
.queue
->buf
);
317 spin_lock_init(&qp
->rq
.producer_lock
);
318 spin_lock_init(&qp
->rq
.consumer_lock
);
320 skb_queue_head_init(&qp
->resp_pkts
);
322 rxe_init_task(rxe
, &qp
->resp
.task
, qp
,
323 rxe_responder
, "resp");
325 qp
->resp
.opcode
= OPCODE_NONE
;
327 qp
->resp
.state
= QP_STATE_RESET
;
332 /* called by the create qp verb */
333 int rxe_qp_from_init(struct rxe_dev
*rxe
, struct rxe_qp
*qp
, struct rxe_pd
*pd
,
334 struct ib_qp_init_attr
*init
, struct ib_udata
*udata
,
338 struct rxe_cq
*rcq
= to_rcq(init
->recv_cq
);
339 struct rxe_cq
*scq
= to_rcq(init
->send_cq
);
340 struct rxe_srq
*srq
= init
->srq
? to_rsrq(init
->srq
) : NULL
;
341 struct ib_ucontext
*context
= udata
? ibpd
->uobject
->context
: NULL
;
354 rxe_qp_init_misc(rxe
, qp
, init
);
356 err
= rxe_qp_init_req(rxe
, qp
, init
, context
, udata
);
360 err
= rxe_qp_init_resp(rxe
, qp
, init
, context
, udata
);
364 qp
->attr
.qp_state
= IB_QPS_RESET
;
370 rxe_queue_cleanup(qp
->sq
.queue
);
381 /* called by the query qp verb */
382 int rxe_qp_to_init(struct rxe_qp
*qp
, struct ib_qp_init_attr
*init
)
384 init
->event_handler
= qp
->ibqp
.event_handler
;
385 init
->qp_context
= qp
->ibqp
.qp_context
;
386 init
->send_cq
= qp
->ibqp
.send_cq
;
387 init
->recv_cq
= qp
->ibqp
.recv_cq
;
388 init
->srq
= qp
->ibqp
.srq
;
390 init
->cap
.max_send_wr
= qp
->sq
.max_wr
;
391 init
->cap
.max_send_sge
= qp
->sq
.max_sge
;
392 init
->cap
.max_inline_data
= qp
->sq
.max_inline
;
395 init
->cap
.max_recv_wr
= qp
->rq
.max_wr
;
396 init
->cap
.max_recv_sge
= qp
->rq
.max_sge
;
399 init
->sq_sig_type
= qp
->sq_sig_type
;
401 init
->qp_type
= qp
->ibqp
.qp_type
;
407 /* called by the modify qp verb, this routine checks all the parameters before
410 int rxe_qp_chk_attr(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
411 struct ib_qp_attr
*attr
, int mask
)
413 enum ib_qp_state cur_state
= (mask
& IB_QP_CUR_STATE
) ?
414 attr
->cur_qp_state
: qp
->attr
.qp_state
;
415 enum ib_qp_state new_state
= (mask
& IB_QP_STATE
) ?
416 attr
->qp_state
: cur_state
;
418 if (!ib_modify_qp_is_ok(cur_state
, new_state
, qp_type(qp
), mask
,
419 IB_LINK_LAYER_ETHERNET
)) {
420 pr_warn("invalid mask or state for qp\n");
424 if (mask
& IB_QP_STATE
) {
425 if (cur_state
== IB_QPS_SQD
) {
426 if (qp
->req
.state
== QP_STATE_DRAIN
&&
427 new_state
!= IB_QPS_ERR
)
432 if (mask
& IB_QP_PORT
) {
433 if (attr
->port_num
!= 1) {
434 pr_warn("invalid port %d\n", attr
->port_num
);
439 if (mask
& IB_QP_CAP
&& rxe_qp_chk_cap(rxe
, &attr
->cap
, !!qp
->srq
))
442 if (mask
& IB_QP_AV
&& rxe_av_chk_attr(rxe
, &attr
->ah_attr
))
445 if (mask
& IB_QP_ALT_PATH
) {
446 if (rxe_av_chk_attr(rxe
, &attr
->alt_ah_attr
))
448 if (attr
->alt_port_num
!= 1) {
449 pr_warn("invalid alt port %d\n", attr
->alt_port_num
);
452 if (attr
->alt_timeout
> 31) {
453 pr_warn("invalid QP alt timeout %d > 31\n",
459 if (mask
& IB_QP_PATH_MTU
) {
460 struct rxe_port
*port
= &rxe
->port
;
462 enum ib_mtu max_mtu
= port
->attr
.max_mtu
;
463 enum ib_mtu mtu
= attr
->path_mtu
;
466 pr_debug("invalid mtu (%d) > (%d)\n",
467 ib_mtu_enum_to_int(mtu
),
468 ib_mtu_enum_to_int(max_mtu
));
473 if (mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
474 if (attr
->max_rd_atomic
> rxe
->attr
.max_qp_rd_atom
) {
475 pr_warn("invalid max_rd_atomic %d > %d\n",
477 rxe
->attr
.max_qp_rd_atom
);
482 if (mask
& IB_QP_TIMEOUT
) {
483 if (attr
->timeout
> 31) {
484 pr_warn("invalid QP timeout %d > 31\n",
496 /* move the qp to the reset state */
497 static void rxe_qp_reset(struct rxe_qp
*qp
)
499 /* stop tasks from running */
500 rxe_disable_task(&qp
->resp
.task
);
502 /* stop request/comp */
504 if (qp_type(qp
) == IB_QPT_RC
)
505 rxe_disable_task(&qp
->comp
.task
);
506 rxe_disable_task(&qp
->req
.task
);
509 /* move qp to the reset state */
510 qp
->req
.state
= QP_STATE_RESET
;
511 qp
->resp
.state
= QP_STATE_RESET
;
513 /* let state machines reset themselves drain work and packet queues
516 __rxe_do_task(&qp
->resp
.task
);
519 __rxe_do_task(&qp
->comp
.task
);
520 __rxe_do_task(&qp
->req
.task
);
521 rxe_queue_reset(qp
->sq
.queue
);
524 /* cleanup attributes */
525 atomic_set(&qp
->ssn
, 0);
527 qp
->req
.need_retry
= 0;
528 qp
->req
.noack_pkts
= 0;
530 qp
->resp
.opcode
= -1;
531 qp
->resp
.drop_msg
= 0;
532 qp
->resp
.goto_error
= 0;
533 qp
->resp
.sent_psn_nak
= 0;
536 rxe_drop_ref(qp
->resp
.mr
);
540 cleanup_rd_atomic_resources(qp
);
543 rxe_enable_task(&qp
->resp
.task
);
546 if (qp_type(qp
) == IB_QPT_RC
)
547 rxe_enable_task(&qp
->comp
.task
);
549 rxe_enable_task(&qp
->req
.task
);
553 /* drain the send queue */
554 static void rxe_qp_drain(struct rxe_qp
*qp
)
557 if (qp
->req
.state
!= QP_STATE_DRAINED
) {
558 qp
->req
.state
= QP_STATE_DRAIN
;
559 if (qp_type(qp
) == IB_QPT_RC
)
560 rxe_run_task(&qp
->comp
.task
, 1);
562 __rxe_do_task(&qp
->comp
.task
);
563 rxe_run_task(&qp
->req
.task
, 1);
568 /* move the qp to the error state */
569 void rxe_qp_error(struct rxe_qp
*qp
)
571 qp
->req
.state
= QP_STATE_ERROR
;
572 qp
->resp
.state
= QP_STATE_ERROR
;
573 qp
->attr
.qp_state
= IB_QPS_ERR
;
575 /* drain work and packet queues */
576 rxe_run_task(&qp
->resp
.task
, 1);
578 if (qp_type(qp
) == IB_QPT_RC
)
579 rxe_run_task(&qp
->comp
.task
, 1);
581 __rxe_do_task(&qp
->comp
.task
);
582 rxe_run_task(&qp
->req
.task
, 1);
585 /* called by the modify qp verb */
586 int rxe_qp_from_attr(struct rxe_qp
*qp
, struct ib_qp_attr
*attr
, int mask
,
587 struct ib_udata
*udata
)
590 struct rxe_dev
*rxe
= to_rdev(qp
->ibqp
.device
);
592 struct ib_gid_attr sgid_attr
;
594 if (mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
595 int max_rd_atomic
= __roundup_pow_of_two(attr
->max_rd_atomic
);
597 qp
->attr
.max_rd_atomic
= max_rd_atomic
;
598 atomic_set(&qp
->req
.rd_atomic
, max_rd_atomic
);
601 if (mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
602 int max_dest_rd_atomic
=
603 __roundup_pow_of_two(attr
->max_dest_rd_atomic
);
605 qp
->attr
.max_dest_rd_atomic
= max_dest_rd_atomic
;
607 free_rd_atomic_resources(qp
);
609 err
= alloc_rd_atomic_resources(qp
, max_dest_rd_atomic
);
614 if (mask
& IB_QP_CUR_STATE
)
615 qp
->attr
.cur_qp_state
= attr
->qp_state
;
617 if (mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
)
618 qp
->attr
.en_sqd_async_notify
= attr
->en_sqd_async_notify
;
620 if (mask
& IB_QP_ACCESS_FLAGS
)
621 qp
->attr
.qp_access_flags
= attr
->qp_access_flags
;
623 if (mask
& IB_QP_PKEY_INDEX
)
624 qp
->attr
.pkey_index
= attr
->pkey_index
;
626 if (mask
& IB_QP_PORT
)
627 qp
->attr
.port_num
= attr
->port_num
;
629 if (mask
& IB_QP_QKEY
)
630 qp
->attr
.qkey
= attr
->qkey
;
632 if (mask
& IB_QP_AV
) {
633 ib_get_cached_gid(&rxe
->ib_dev
, 1,
634 rdma_ah_read_grh(&attr
->ah_attr
)->sgid_index
,
636 rxe_av_from_attr(attr
->port_num
, &qp
->pri_av
, &attr
->ah_attr
);
637 rxe_av_fill_ip_info(&qp
->pri_av
, &attr
->ah_attr
,
640 dev_put(sgid_attr
.ndev
);
643 if (mask
& IB_QP_ALT_PATH
) {
645 rdma_ah_read_grh(&attr
->alt_ah_attr
)->sgid_index
;
647 ib_get_cached_gid(&rxe
->ib_dev
, 1, sgid_index
,
650 rxe_av_from_attr(attr
->alt_port_num
, &qp
->alt_av
,
652 rxe_av_fill_ip_info(&qp
->alt_av
, &attr
->alt_ah_attr
,
655 dev_put(sgid_attr
.ndev
);
657 qp
->attr
.alt_port_num
= attr
->alt_port_num
;
658 qp
->attr
.alt_pkey_index
= attr
->alt_pkey_index
;
659 qp
->attr
.alt_timeout
= attr
->alt_timeout
;
662 if (mask
& IB_QP_PATH_MTU
) {
663 qp
->attr
.path_mtu
= attr
->path_mtu
;
664 qp
->mtu
= ib_mtu_enum_to_int(attr
->path_mtu
);
667 if (mask
& IB_QP_TIMEOUT
) {
668 qp
->attr
.timeout
= attr
->timeout
;
669 if (attr
->timeout
== 0) {
670 qp
->qp_timeout_jiffies
= 0;
672 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
673 int j
= nsecs_to_jiffies(4096ULL << attr
->timeout
);
675 qp
->qp_timeout_jiffies
= j
? j
: 1;
679 if (mask
& IB_QP_RETRY_CNT
) {
680 qp
->attr
.retry_cnt
= attr
->retry_cnt
;
681 qp
->comp
.retry_cnt
= attr
->retry_cnt
;
682 pr_debug("qp#%d set retry count = %d\n", qp_num(qp
),
686 if (mask
& IB_QP_RNR_RETRY
) {
687 qp
->attr
.rnr_retry
= attr
->rnr_retry
;
688 qp
->comp
.rnr_retry
= attr
->rnr_retry
;
689 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp
),
693 if (mask
& IB_QP_RQ_PSN
) {
694 qp
->attr
.rq_psn
= (attr
->rq_psn
& BTH_PSN_MASK
);
695 qp
->resp
.psn
= qp
->attr
.rq_psn
;
696 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp
),
700 if (mask
& IB_QP_MIN_RNR_TIMER
) {
701 qp
->attr
.min_rnr_timer
= attr
->min_rnr_timer
;
702 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp
),
703 attr
->min_rnr_timer
);
706 if (mask
& IB_QP_SQ_PSN
) {
707 qp
->attr
.sq_psn
= (attr
->sq_psn
& BTH_PSN_MASK
);
708 qp
->req
.psn
= qp
->attr
.sq_psn
;
709 qp
->comp
.psn
= qp
->attr
.sq_psn
;
710 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp
), qp
->req
.psn
);
713 if (mask
& IB_QP_PATH_MIG_STATE
)
714 qp
->attr
.path_mig_state
= attr
->path_mig_state
;
716 if (mask
& IB_QP_DEST_QPN
)
717 qp
->attr
.dest_qp_num
= attr
->dest_qp_num
;
719 if (mask
& IB_QP_STATE
) {
720 qp
->attr
.qp_state
= attr
->qp_state
;
722 switch (attr
->qp_state
) {
724 pr_debug("qp#%d state -> RESET\n", qp_num(qp
));
729 pr_debug("qp#%d state -> INIT\n", qp_num(qp
));
730 qp
->req
.state
= QP_STATE_INIT
;
731 qp
->resp
.state
= QP_STATE_INIT
;
735 pr_debug("qp#%d state -> RTR\n", qp_num(qp
));
736 qp
->resp
.state
= QP_STATE_READY
;
740 pr_debug("qp#%d state -> RTS\n", qp_num(qp
));
741 qp
->req
.state
= QP_STATE_READY
;
745 pr_debug("qp#%d state -> SQD\n", qp_num(qp
));
750 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp
));
751 /* Not possible from modify_qp. */
755 pr_debug("qp#%d state -> ERR\n", qp_num(qp
));
764 /* called by the query qp verb */
765 int rxe_qp_to_attr(struct rxe_qp
*qp
, struct ib_qp_attr
*attr
, int mask
)
769 attr
->rq_psn
= qp
->resp
.psn
;
770 attr
->sq_psn
= qp
->req
.psn
;
772 attr
->cap
.max_send_wr
= qp
->sq
.max_wr
;
773 attr
->cap
.max_send_sge
= qp
->sq
.max_sge
;
774 attr
->cap
.max_inline_data
= qp
->sq
.max_inline
;
777 attr
->cap
.max_recv_wr
= qp
->rq
.max_wr
;
778 attr
->cap
.max_recv_sge
= qp
->rq
.max_sge
;
781 rxe_av_to_attr(&qp
->pri_av
, &attr
->ah_attr
);
782 rxe_av_to_attr(&qp
->alt_av
, &attr
->alt_ah_attr
);
784 if (qp
->req
.state
== QP_STATE_DRAIN
) {
785 attr
->sq_draining
= 1;
786 /* applications that get this state
787 * typically spin on it. yield the
792 attr
->sq_draining
= 0;
795 pr_debug("attr->sq_draining = %d\n", attr
->sq_draining
);
800 /* called by the destroy qp verb */
801 void rxe_qp_destroy(struct rxe_qp
*qp
)
804 qp
->qp_timeout_jiffies
= 0;
805 rxe_cleanup_task(&qp
->resp
.task
);
807 if (qp_type(qp
) == IB_QPT_RC
) {
808 del_timer_sync(&qp
->retrans_timer
);
809 del_timer_sync(&qp
->rnr_nak_timer
);
812 rxe_cleanup_task(&qp
->req
.task
);
813 rxe_cleanup_task(&qp
->comp
.task
);
815 /* flush out any receive wr's or pending requests */
816 __rxe_do_task(&qp
->req
.task
);
818 __rxe_do_task(&qp
->comp
.task
);
819 __rxe_do_task(&qp
->req
.task
);
823 /* called when the last reference to the qp is dropped */
824 static void rxe_qp_do_cleanup(struct work_struct
*work
)
826 struct rxe_qp
*qp
= container_of(work
, typeof(*qp
), cleanup_work
.work
);
828 rxe_drop_all_mcast_groups(qp
);
831 rxe_queue_cleanup(qp
->sq
.queue
);
834 rxe_drop_ref(qp
->srq
);
837 rxe_queue_cleanup(qp
->rq
.queue
);
840 rxe_drop_ref(qp
->scq
);
842 rxe_drop_ref(qp
->rcq
);
844 rxe_drop_ref(qp
->pd
);
847 rxe_drop_ref(qp
->resp
.mr
);
851 if (qp_type(qp
) == IB_QPT_RC
)
852 sk_dst_reset(qp
->sk
->sk
);
854 free_rd_atomic_resources(qp
);
856 kernel_sock_shutdown(qp
->sk
, SHUT_RDWR
);
857 sock_release(qp
->sk
);
860 /* called when the last reference to the qp is dropped */
861 void rxe_qp_cleanup(struct rxe_pool_entry
*arg
)
863 struct rxe_qp
*qp
= container_of(arg
, typeof(*qp
), pelem
);
865 execute_in_process_context(rxe_qp_do_cleanup
, &qp
->cleanup_work
);