2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/skbuff.h>
35 #include <linux/delay.h>
36 #include <linux/sched.h>
37 #include <linux/vmalloc.h>
38 #include <rdma/uverbs_ioctl.h>
42 #include "rxe_queue.h"
45 static int rxe_qp_chk_cap(struct rxe_dev
*rxe
, struct ib_qp_cap
*cap
,
48 if (cap
->max_send_wr
> rxe
->attr
.max_qp_wr
) {
49 pr_warn("invalid send wr = %d > %d\n",
50 cap
->max_send_wr
, rxe
->attr
.max_qp_wr
);
54 if (cap
->max_send_sge
> rxe
->attr
.max_send_sge
) {
55 pr_warn("invalid send sge = %d > %d\n",
56 cap
->max_send_sge
, rxe
->attr
.max_send_sge
);
61 if (cap
->max_recv_wr
> rxe
->attr
.max_qp_wr
) {
62 pr_warn("invalid recv wr = %d > %d\n",
63 cap
->max_recv_wr
, rxe
->attr
.max_qp_wr
);
67 if (cap
->max_recv_sge
> rxe
->attr
.max_recv_sge
) {
68 pr_warn("invalid recv sge = %d > %d\n",
69 cap
->max_recv_sge
, rxe
->attr
.max_recv_sge
);
74 if (cap
->max_inline_data
> rxe
->max_inline_data
) {
75 pr_warn("invalid max inline data = %d > %d\n",
76 cap
->max_inline_data
, rxe
->max_inline_data
);
86 int rxe_qp_chk_init(struct rxe_dev
*rxe
, struct ib_qp_init_attr
*init
)
88 struct ib_qp_cap
*cap
= &init
->cap
;
89 struct rxe_port
*port
;
90 int port_num
= init
->port_num
;
92 if (!init
->recv_cq
|| !init
->send_cq
) {
93 pr_warn("missing cq\n");
97 if (rxe_qp_chk_cap(rxe
, cap
, !!init
->srq
))
100 if (init
->qp_type
== IB_QPT_SMI
|| init
->qp_type
== IB_QPT_GSI
) {
101 if (!rdma_is_port_valid(&rxe
->ib_dev
, port_num
)) {
102 pr_warn("invalid port = %d\n", port_num
);
108 if (init
->qp_type
== IB_QPT_SMI
&& port
->qp_smi_index
) {
109 pr_warn("SMI QP exists for port %d\n", port_num
);
113 if (init
->qp_type
== IB_QPT_GSI
&& port
->qp_gsi_index
) {
114 pr_warn("GSI QP exists for port %d\n", port_num
);
125 static int alloc_rd_atomic_resources(struct rxe_qp
*qp
, unsigned int n
)
127 qp
->resp
.res_head
= 0;
128 qp
->resp
.res_tail
= 0;
129 qp
->resp
.resources
= kcalloc(n
, sizeof(struct resp_res
), GFP_KERNEL
);
131 if (!qp
->resp
.resources
)
137 static void free_rd_atomic_resources(struct rxe_qp
*qp
)
139 if (qp
->resp
.resources
) {
142 for (i
= 0; i
< qp
->attr
.max_dest_rd_atomic
; i
++) {
143 struct resp_res
*res
= &qp
->resp
.resources
[i
];
145 free_rd_atomic_resource(qp
, res
);
147 kfree(qp
->resp
.resources
);
148 qp
->resp
.resources
= NULL
;
152 void free_rd_atomic_resource(struct rxe_qp
*qp
, struct resp_res
*res
)
154 if (res
->type
== RXE_ATOMIC_MASK
) {
156 kfree_skb(res
->atomic
.skb
);
157 } else if (res
->type
== RXE_READ_MASK
) {
159 rxe_drop_ref(res
->read
.mr
);
164 static void cleanup_rd_atomic_resources(struct rxe_qp
*qp
)
167 struct resp_res
*res
;
169 if (qp
->resp
.resources
) {
170 for (i
= 0; i
< qp
->attr
.max_dest_rd_atomic
; i
++) {
171 res
= &qp
->resp
.resources
[i
];
172 free_rd_atomic_resource(qp
, res
);
177 static void rxe_qp_init_misc(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
178 struct ib_qp_init_attr
*init
)
180 struct rxe_port
*port
;
183 qp
->sq_sig_type
= init
->sq_sig_type
;
184 qp
->attr
.path_mtu
= 1;
185 qp
->mtu
= ib_mtu_enum_to_int(qp
->attr
.path_mtu
);
187 qpn
= qp
->pelem
.index
;
190 switch (init
->qp_type
) {
193 port
->qp_smi_index
= qpn
;
194 qp
->attr
.port_num
= init
->port_num
;
199 port
->qp_gsi_index
= qpn
;
200 qp
->attr
.port_num
= init
->port_num
;
204 qp
->ibqp
.qp_num
= qpn
;
208 INIT_LIST_HEAD(&qp
->grp_list
);
210 skb_queue_head_init(&qp
->send_pkts
);
212 spin_lock_init(&qp
->grp_lock
);
213 spin_lock_init(&qp
->state_lock
);
215 atomic_set(&qp
->ssn
, 0);
216 atomic_set(&qp
->skb_out
, 0);
219 static int rxe_qp_init_req(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
220 struct ib_qp_init_attr
*init
, struct ib_udata
*udata
,
221 struct rxe_create_qp_resp __user
*uresp
)
226 err
= sock_create_kern(&init_net
, AF_INET
, SOCK_DGRAM
, 0, &qp
->sk
);
229 qp
->sk
->sk
->sk_user_data
= qp
;
231 /* pick a source UDP port number for this QP based on
232 * the source QPN. this spreads traffic for different QPs
233 * across different NIC RX queues (while using a single
234 * flow for a given QP to maintain packet order).
235 * the port number must be in the Dynamic Ports range
238 qp
->src_port
= RXE_ROCE_V2_SPORT
+
239 (hash_32_generic(qp_num(qp
), 14) & 0x3fff);
241 qp
->sq
.max_wr
= init
->cap
.max_send_wr
;
242 qp
->sq
.max_sge
= init
->cap
.max_send_sge
;
243 qp
->sq
.max_inline
= init
->cap
.max_inline_data
;
245 wqe_size
= max_t(int, sizeof(struct rxe_send_wqe
) +
246 qp
->sq
.max_sge
* sizeof(struct ib_sge
),
247 sizeof(struct rxe_send_wqe
) +
250 qp
->sq
.queue
= rxe_queue_init(rxe
,
256 err
= do_mmap_info(rxe
, uresp
? &uresp
->sq_mi
: NULL
, udata
,
257 qp
->sq
.queue
->buf
, qp
->sq
.queue
->buf_size
,
261 vfree(qp
->sq
.queue
->buf
);
266 qp
->req
.wqe_index
= producer_index(qp
->sq
.queue
);
267 qp
->req
.state
= QP_STATE_RESET
;
269 qp
->comp
.opcode
= -1;
271 spin_lock_init(&qp
->sq
.sq_lock
);
272 skb_queue_head_init(&qp
->req_pkts
);
274 rxe_init_task(rxe
, &qp
->req
.task
, qp
,
275 rxe_requester
, "req");
276 rxe_init_task(rxe
, &qp
->comp
.task
, qp
,
277 rxe_completer
, "comp");
279 qp
->qp_timeout_jiffies
= 0; /* Can't be set for UD/UC in modify_qp */
280 if (init
->qp_type
== IB_QPT_RC
) {
281 timer_setup(&qp
->rnr_nak_timer
, rnr_nak_timer
, 0);
282 timer_setup(&qp
->retrans_timer
, retransmit_timer
, 0);
287 static int rxe_qp_init_resp(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
288 struct ib_qp_init_attr
*init
,
289 struct ib_udata
*udata
,
290 struct rxe_create_qp_resp __user
*uresp
)
296 qp
->rq
.max_wr
= init
->cap
.max_recv_wr
;
297 qp
->rq
.max_sge
= init
->cap
.max_recv_sge
;
299 wqe_size
= rcv_wqe_size(qp
->rq
.max_sge
);
301 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
302 qp_num(qp
), qp
->rq
.max_wr
, qp
->rq
.max_sge
, wqe_size
);
304 qp
->rq
.queue
= rxe_queue_init(rxe
,
310 err
= do_mmap_info(rxe
, uresp
? &uresp
->rq_mi
: NULL
, udata
,
311 qp
->rq
.queue
->buf
, qp
->rq
.queue
->buf_size
,
314 vfree(qp
->rq
.queue
->buf
);
320 spin_lock_init(&qp
->rq
.producer_lock
);
321 spin_lock_init(&qp
->rq
.consumer_lock
);
323 skb_queue_head_init(&qp
->resp_pkts
);
325 rxe_init_task(rxe
, &qp
->resp
.task
, qp
,
326 rxe_responder
, "resp");
328 qp
->resp
.opcode
= OPCODE_NONE
;
330 qp
->resp
.state
= QP_STATE_RESET
;
335 /* called by the create qp verb */
336 int rxe_qp_from_init(struct rxe_dev
*rxe
, struct rxe_qp
*qp
, struct rxe_pd
*pd
,
337 struct ib_qp_init_attr
*init
,
338 struct rxe_create_qp_resp __user
*uresp
,
340 struct ib_udata
*udata
)
343 struct rxe_cq
*rcq
= to_rcq(init
->recv_cq
);
344 struct rxe_cq
*scq
= to_rcq(init
->send_cq
);
345 struct rxe_srq
*srq
= init
->srq
? to_rsrq(init
->srq
) : NULL
;
358 rxe_qp_init_misc(rxe
, qp
, init
);
360 err
= rxe_qp_init_req(rxe
, qp
, init
, udata
, uresp
);
364 err
= rxe_qp_init_resp(rxe
, qp
, init
, udata
, uresp
);
368 qp
->attr
.qp_state
= IB_QPS_RESET
;
374 rxe_queue_cleanup(qp
->sq
.queue
);
385 /* called by the query qp verb */
386 int rxe_qp_to_init(struct rxe_qp
*qp
, struct ib_qp_init_attr
*init
)
388 init
->event_handler
= qp
->ibqp
.event_handler
;
389 init
->qp_context
= qp
->ibqp
.qp_context
;
390 init
->send_cq
= qp
->ibqp
.send_cq
;
391 init
->recv_cq
= qp
->ibqp
.recv_cq
;
392 init
->srq
= qp
->ibqp
.srq
;
394 init
->cap
.max_send_wr
= qp
->sq
.max_wr
;
395 init
->cap
.max_send_sge
= qp
->sq
.max_sge
;
396 init
->cap
.max_inline_data
= qp
->sq
.max_inline
;
399 init
->cap
.max_recv_wr
= qp
->rq
.max_wr
;
400 init
->cap
.max_recv_sge
= qp
->rq
.max_sge
;
403 init
->sq_sig_type
= qp
->sq_sig_type
;
405 init
->qp_type
= qp
->ibqp
.qp_type
;
411 /* called by the modify qp verb, this routine checks all the parameters before
414 int rxe_qp_chk_attr(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
415 struct ib_qp_attr
*attr
, int mask
)
417 enum ib_qp_state cur_state
= (mask
& IB_QP_CUR_STATE
) ?
418 attr
->cur_qp_state
: qp
->attr
.qp_state
;
419 enum ib_qp_state new_state
= (mask
& IB_QP_STATE
) ?
420 attr
->qp_state
: cur_state
;
422 if (!ib_modify_qp_is_ok(cur_state
, new_state
, qp_type(qp
), mask
)) {
423 pr_warn("invalid mask or state for qp\n");
427 if (mask
& IB_QP_STATE
) {
428 if (cur_state
== IB_QPS_SQD
) {
429 if (qp
->req
.state
== QP_STATE_DRAIN
&&
430 new_state
!= IB_QPS_ERR
)
435 if (mask
& IB_QP_PORT
) {
436 if (!rdma_is_port_valid(&rxe
->ib_dev
, attr
->port_num
)) {
437 pr_warn("invalid port %d\n", attr
->port_num
);
442 if (mask
& IB_QP_CAP
&& rxe_qp_chk_cap(rxe
, &attr
->cap
, !!qp
->srq
))
445 if (mask
& IB_QP_AV
&& rxe_av_chk_attr(rxe
, &attr
->ah_attr
))
448 if (mask
& IB_QP_ALT_PATH
) {
449 if (rxe_av_chk_attr(rxe
, &attr
->alt_ah_attr
))
451 if (!rdma_is_port_valid(&rxe
->ib_dev
, attr
->alt_port_num
)) {
452 pr_warn("invalid alt port %d\n", attr
->alt_port_num
);
455 if (attr
->alt_timeout
> 31) {
456 pr_warn("invalid QP alt timeout %d > 31\n",
462 if (mask
& IB_QP_PATH_MTU
) {
463 struct rxe_port
*port
= &rxe
->port
;
465 enum ib_mtu max_mtu
= port
->attr
.max_mtu
;
466 enum ib_mtu mtu
= attr
->path_mtu
;
469 pr_debug("invalid mtu (%d) > (%d)\n",
470 ib_mtu_enum_to_int(mtu
),
471 ib_mtu_enum_to_int(max_mtu
));
476 if (mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
477 if (attr
->max_rd_atomic
> rxe
->attr
.max_qp_rd_atom
) {
478 pr_warn("invalid max_rd_atomic %d > %d\n",
480 rxe
->attr
.max_qp_rd_atom
);
485 if (mask
& IB_QP_TIMEOUT
) {
486 if (attr
->timeout
> 31) {
487 pr_warn("invalid QP timeout %d > 31\n",
499 /* move the qp to the reset state */
500 static void rxe_qp_reset(struct rxe_qp
*qp
)
502 /* stop tasks from running */
503 rxe_disable_task(&qp
->resp
.task
);
505 /* stop request/comp */
507 if (qp_type(qp
) == IB_QPT_RC
)
508 rxe_disable_task(&qp
->comp
.task
);
509 rxe_disable_task(&qp
->req
.task
);
512 /* move qp to the reset state */
513 qp
->req
.state
= QP_STATE_RESET
;
514 qp
->resp
.state
= QP_STATE_RESET
;
516 /* let state machines reset themselves drain work and packet queues
519 __rxe_do_task(&qp
->resp
.task
);
522 __rxe_do_task(&qp
->comp
.task
);
523 __rxe_do_task(&qp
->req
.task
);
524 rxe_queue_reset(qp
->sq
.queue
);
527 /* cleanup attributes */
528 atomic_set(&qp
->ssn
, 0);
530 qp
->req
.need_retry
= 0;
531 qp
->req
.noack_pkts
= 0;
533 qp
->resp
.opcode
= -1;
534 qp
->resp
.drop_msg
= 0;
535 qp
->resp
.goto_error
= 0;
536 qp
->resp
.sent_psn_nak
= 0;
539 rxe_drop_ref(qp
->resp
.mr
);
543 cleanup_rd_atomic_resources(qp
);
546 rxe_enable_task(&qp
->resp
.task
);
549 if (qp_type(qp
) == IB_QPT_RC
)
550 rxe_enable_task(&qp
->comp
.task
);
552 rxe_enable_task(&qp
->req
.task
);
556 /* drain the send queue */
557 static void rxe_qp_drain(struct rxe_qp
*qp
)
560 if (qp
->req
.state
!= QP_STATE_DRAINED
) {
561 qp
->req
.state
= QP_STATE_DRAIN
;
562 if (qp_type(qp
) == IB_QPT_RC
)
563 rxe_run_task(&qp
->comp
.task
, 1);
565 __rxe_do_task(&qp
->comp
.task
);
566 rxe_run_task(&qp
->req
.task
, 1);
571 /* move the qp to the error state */
572 void rxe_qp_error(struct rxe_qp
*qp
)
574 qp
->req
.state
= QP_STATE_ERROR
;
575 qp
->resp
.state
= QP_STATE_ERROR
;
576 qp
->attr
.qp_state
= IB_QPS_ERR
;
578 /* drain work and packet queues */
579 rxe_run_task(&qp
->resp
.task
, 1);
581 if (qp_type(qp
) == IB_QPT_RC
)
582 rxe_run_task(&qp
->comp
.task
, 1);
584 __rxe_do_task(&qp
->comp
.task
);
585 rxe_run_task(&qp
->req
.task
, 1);
588 /* called by the modify qp verb */
589 int rxe_qp_from_attr(struct rxe_qp
*qp
, struct ib_qp_attr
*attr
, int mask
,
590 struct ib_udata
*udata
)
594 if (mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
595 int max_rd_atomic
= __roundup_pow_of_two(attr
->max_rd_atomic
);
597 qp
->attr
.max_rd_atomic
= max_rd_atomic
;
598 atomic_set(&qp
->req
.rd_atomic
, max_rd_atomic
);
601 if (mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
602 int max_dest_rd_atomic
=
603 __roundup_pow_of_two(attr
->max_dest_rd_atomic
);
605 qp
->attr
.max_dest_rd_atomic
= max_dest_rd_atomic
;
607 free_rd_atomic_resources(qp
);
609 err
= alloc_rd_atomic_resources(qp
, max_dest_rd_atomic
);
614 if (mask
& IB_QP_CUR_STATE
)
615 qp
->attr
.cur_qp_state
= attr
->qp_state
;
617 if (mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
)
618 qp
->attr
.en_sqd_async_notify
= attr
->en_sqd_async_notify
;
620 if (mask
& IB_QP_ACCESS_FLAGS
)
621 qp
->attr
.qp_access_flags
= attr
->qp_access_flags
;
623 if (mask
& IB_QP_PKEY_INDEX
)
624 qp
->attr
.pkey_index
= attr
->pkey_index
;
626 if (mask
& IB_QP_PORT
)
627 qp
->attr
.port_num
= attr
->port_num
;
629 if (mask
& IB_QP_QKEY
)
630 qp
->attr
.qkey
= attr
->qkey
;
632 if (mask
& IB_QP_AV
) {
633 rxe_init_av(&attr
->ah_attr
, &qp
->pri_av
);
636 if (mask
& IB_QP_ALT_PATH
) {
637 rxe_init_av(&attr
->alt_ah_attr
, &qp
->alt_av
);
638 qp
->attr
.alt_port_num
= attr
->alt_port_num
;
639 qp
->attr
.alt_pkey_index
= attr
->alt_pkey_index
;
640 qp
->attr
.alt_timeout
= attr
->alt_timeout
;
643 if (mask
& IB_QP_PATH_MTU
) {
644 qp
->attr
.path_mtu
= attr
->path_mtu
;
645 qp
->mtu
= ib_mtu_enum_to_int(attr
->path_mtu
);
648 if (mask
& IB_QP_TIMEOUT
) {
649 qp
->attr
.timeout
= attr
->timeout
;
650 if (attr
->timeout
== 0) {
651 qp
->qp_timeout_jiffies
= 0;
653 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
654 int j
= nsecs_to_jiffies(4096ULL << attr
->timeout
);
656 qp
->qp_timeout_jiffies
= j
? j
: 1;
660 if (mask
& IB_QP_RETRY_CNT
) {
661 qp
->attr
.retry_cnt
= attr
->retry_cnt
;
662 qp
->comp
.retry_cnt
= attr
->retry_cnt
;
663 pr_debug("qp#%d set retry count = %d\n", qp_num(qp
),
667 if (mask
& IB_QP_RNR_RETRY
) {
668 qp
->attr
.rnr_retry
= attr
->rnr_retry
;
669 qp
->comp
.rnr_retry
= attr
->rnr_retry
;
670 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp
),
674 if (mask
& IB_QP_RQ_PSN
) {
675 qp
->attr
.rq_psn
= (attr
->rq_psn
& BTH_PSN_MASK
);
676 qp
->resp
.psn
= qp
->attr
.rq_psn
;
677 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp
),
681 if (mask
& IB_QP_MIN_RNR_TIMER
) {
682 qp
->attr
.min_rnr_timer
= attr
->min_rnr_timer
;
683 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp
),
684 attr
->min_rnr_timer
);
687 if (mask
& IB_QP_SQ_PSN
) {
688 qp
->attr
.sq_psn
= (attr
->sq_psn
& BTH_PSN_MASK
);
689 qp
->req
.psn
= qp
->attr
.sq_psn
;
690 qp
->comp
.psn
= qp
->attr
.sq_psn
;
691 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp
), qp
->req
.psn
);
694 if (mask
& IB_QP_PATH_MIG_STATE
)
695 qp
->attr
.path_mig_state
= attr
->path_mig_state
;
697 if (mask
& IB_QP_DEST_QPN
)
698 qp
->attr
.dest_qp_num
= attr
->dest_qp_num
;
700 if (mask
& IB_QP_STATE
) {
701 qp
->attr
.qp_state
= attr
->qp_state
;
703 switch (attr
->qp_state
) {
705 pr_debug("qp#%d state -> RESET\n", qp_num(qp
));
710 pr_debug("qp#%d state -> INIT\n", qp_num(qp
));
711 qp
->req
.state
= QP_STATE_INIT
;
712 qp
->resp
.state
= QP_STATE_INIT
;
716 pr_debug("qp#%d state -> RTR\n", qp_num(qp
));
717 qp
->resp
.state
= QP_STATE_READY
;
721 pr_debug("qp#%d state -> RTS\n", qp_num(qp
));
722 qp
->req
.state
= QP_STATE_READY
;
726 pr_debug("qp#%d state -> SQD\n", qp_num(qp
));
731 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp
));
732 /* Not possible from modify_qp. */
736 pr_debug("qp#%d state -> ERR\n", qp_num(qp
));
745 /* called by the query qp verb */
746 int rxe_qp_to_attr(struct rxe_qp
*qp
, struct ib_qp_attr
*attr
, int mask
)
750 attr
->rq_psn
= qp
->resp
.psn
;
751 attr
->sq_psn
= qp
->req
.psn
;
753 attr
->cap
.max_send_wr
= qp
->sq
.max_wr
;
754 attr
->cap
.max_send_sge
= qp
->sq
.max_sge
;
755 attr
->cap
.max_inline_data
= qp
->sq
.max_inline
;
758 attr
->cap
.max_recv_wr
= qp
->rq
.max_wr
;
759 attr
->cap
.max_recv_sge
= qp
->rq
.max_sge
;
762 rxe_av_to_attr(&qp
->pri_av
, &attr
->ah_attr
);
763 rxe_av_to_attr(&qp
->alt_av
, &attr
->alt_ah_attr
);
765 if (qp
->req
.state
== QP_STATE_DRAIN
) {
766 attr
->sq_draining
= 1;
767 /* applications that get this state
768 * typically spin on it. yield the
773 attr
->sq_draining
= 0;
776 pr_debug("attr->sq_draining = %d\n", attr
->sq_draining
);
781 /* called by the destroy qp verb */
782 void rxe_qp_destroy(struct rxe_qp
*qp
)
785 qp
->qp_timeout_jiffies
= 0;
786 rxe_cleanup_task(&qp
->resp
.task
);
788 if (qp_type(qp
) == IB_QPT_RC
) {
789 del_timer_sync(&qp
->retrans_timer
);
790 del_timer_sync(&qp
->rnr_nak_timer
);
793 rxe_cleanup_task(&qp
->req
.task
);
794 rxe_cleanup_task(&qp
->comp
.task
);
796 /* flush out any receive wr's or pending requests */
797 __rxe_do_task(&qp
->req
.task
);
799 __rxe_do_task(&qp
->comp
.task
);
800 __rxe_do_task(&qp
->req
.task
);
804 /* called when the last reference to the qp is dropped */
805 static void rxe_qp_do_cleanup(struct work_struct
*work
)
807 struct rxe_qp
*qp
= container_of(work
, typeof(*qp
), cleanup_work
.work
);
809 rxe_drop_all_mcast_groups(qp
);
812 rxe_queue_cleanup(qp
->sq
.queue
);
815 rxe_drop_ref(qp
->srq
);
818 rxe_queue_cleanup(qp
->rq
.queue
);
821 rxe_drop_ref(qp
->scq
);
823 rxe_drop_ref(qp
->rcq
);
825 rxe_drop_ref(qp
->pd
);
828 rxe_drop_ref(qp
->resp
.mr
);
832 if (qp_type(qp
) == IB_QPT_RC
)
833 sk_dst_reset(qp
->sk
->sk
);
835 free_rd_atomic_resources(qp
);
837 kernel_sock_shutdown(qp
->sk
, SHUT_RDWR
);
838 sock_release(qp
->sk
);
841 /* called when the last reference to the qp is dropped */
842 void rxe_qp_cleanup(struct rxe_pool_entry
*arg
)
844 struct rxe_qp
*qp
= container_of(arg
, typeof(*qp
), pelem
);
846 execute_in_process_context(rxe_qp_do_cleanup
, &qp
->cleanup_work
);