2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/skbuff.h>
35 #include <linux/delay.h>
36 #include <linux/sched.h>
37 #include <linux/vmalloc.h>
41 #include "rxe_queue.h"
44 static int rxe_qp_chk_cap(struct rxe_dev
*rxe
, struct ib_qp_cap
*cap
,
47 if (cap
->max_send_wr
> rxe
->attr
.max_qp_wr
) {
48 pr_warn("invalid send wr = %d > %d\n",
49 cap
->max_send_wr
, rxe
->attr
.max_qp_wr
);
53 if (cap
->max_send_sge
> rxe
->attr
.max_send_sge
) {
54 pr_warn("invalid send sge = %d > %d\n",
55 cap
->max_send_sge
, rxe
->attr
.max_send_sge
);
60 if (cap
->max_recv_wr
> rxe
->attr
.max_qp_wr
) {
61 pr_warn("invalid recv wr = %d > %d\n",
62 cap
->max_recv_wr
, rxe
->attr
.max_qp_wr
);
66 if (cap
->max_recv_sge
> rxe
->attr
.max_recv_sge
) {
67 pr_warn("invalid recv sge = %d > %d\n",
68 cap
->max_recv_sge
, rxe
->attr
.max_recv_sge
);
73 if (cap
->max_inline_data
> rxe
->max_inline_data
) {
74 pr_warn("invalid max inline data = %d > %d\n",
75 cap
->max_inline_data
, rxe
->max_inline_data
);
85 int rxe_qp_chk_init(struct rxe_dev
*rxe
, struct ib_qp_init_attr
*init
)
87 struct ib_qp_cap
*cap
= &init
->cap
;
88 struct rxe_port
*port
;
89 int port_num
= init
->port_num
;
91 if (!init
->recv_cq
|| !init
->send_cq
) {
92 pr_warn("missing cq\n");
96 if (rxe_qp_chk_cap(rxe
, cap
, !!init
->srq
))
99 if (init
->qp_type
== IB_QPT_SMI
|| init
->qp_type
== IB_QPT_GSI
) {
101 pr_warn("invalid port = %d\n", port_num
);
107 if (init
->qp_type
== IB_QPT_SMI
&& port
->qp_smi_index
) {
108 pr_warn("SMI QP exists for port %d\n", port_num
);
112 if (init
->qp_type
== IB_QPT_GSI
&& port
->qp_gsi_index
) {
113 pr_warn("GSI QP exists for port %d\n", port_num
);
124 static int alloc_rd_atomic_resources(struct rxe_qp
*qp
, unsigned int n
)
126 qp
->resp
.res_head
= 0;
127 qp
->resp
.res_tail
= 0;
128 qp
->resp
.resources
= kcalloc(n
, sizeof(struct resp_res
), GFP_KERNEL
);
130 if (!qp
->resp
.resources
)
136 static void free_rd_atomic_resources(struct rxe_qp
*qp
)
138 if (qp
->resp
.resources
) {
141 for (i
= 0; i
< qp
->attr
.max_dest_rd_atomic
; i
++) {
142 struct resp_res
*res
= &qp
->resp
.resources
[i
];
144 free_rd_atomic_resource(qp
, res
);
146 kfree(qp
->resp
.resources
);
147 qp
->resp
.resources
= NULL
;
151 void free_rd_atomic_resource(struct rxe_qp
*qp
, struct resp_res
*res
)
153 if (res
->type
== RXE_ATOMIC_MASK
) {
155 kfree_skb(res
->atomic
.skb
);
156 } else if (res
->type
== RXE_READ_MASK
) {
158 rxe_drop_ref(res
->read
.mr
);
163 static void cleanup_rd_atomic_resources(struct rxe_qp
*qp
)
166 struct resp_res
*res
;
168 if (qp
->resp
.resources
) {
169 for (i
= 0; i
< qp
->attr
.max_dest_rd_atomic
; i
++) {
170 res
= &qp
->resp
.resources
[i
];
171 free_rd_atomic_resource(qp
, res
);
176 static void rxe_qp_init_misc(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
177 struct ib_qp_init_attr
*init
)
179 struct rxe_port
*port
;
182 qp
->sq_sig_type
= init
->sq_sig_type
;
183 qp
->attr
.path_mtu
= 1;
184 qp
->mtu
= ib_mtu_enum_to_int(qp
->attr
.path_mtu
);
186 qpn
= qp
->pelem
.index
;
189 switch (init
->qp_type
) {
192 port
->qp_smi_index
= qpn
;
193 qp
->attr
.port_num
= init
->port_num
;
198 port
->qp_gsi_index
= qpn
;
199 qp
->attr
.port_num
= init
->port_num
;
203 qp
->ibqp
.qp_num
= qpn
;
207 INIT_LIST_HEAD(&qp
->grp_list
);
209 skb_queue_head_init(&qp
->send_pkts
);
211 spin_lock_init(&qp
->grp_lock
);
212 spin_lock_init(&qp
->state_lock
);
214 atomic_set(&qp
->ssn
, 0);
215 atomic_set(&qp
->skb_out
, 0);
218 static int rxe_qp_init_req(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
219 struct ib_qp_init_attr
*init
,
220 struct ib_ucontext
*context
,
221 struct rxe_create_qp_resp __user
*uresp
)
226 err
= sock_create_kern(&init_net
, AF_INET
, SOCK_DGRAM
, 0, &qp
->sk
);
229 qp
->sk
->sk
->sk_user_data
= qp
;
231 qp
->sq
.max_wr
= init
->cap
.max_send_wr
;
232 qp
->sq
.max_sge
= init
->cap
.max_send_sge
;
233 qp
->sq
.max_inline
= init
->cap
.max_inline_data
;
235 wqe_size
= max_t(int, sizeof(struct rxe_send_wqe
) +
236 qp
->sq
.max_sge
* sizeof(struct ib_sge
),
237 sizeof(struct rxe_send_wqe
) +
240 qp
->sq
.queue
= rxe_queue_init(rxe
,
246 err
= do_mmap_info(rxe
, uresp
? &uresp
->sq_mi
: NULL
, context
,
247 qp
->sq
.queue
->buf
, qp
->sq
.queue
->buf_size
,
251 vfree(qp
->sq
.queue
->buf
);
256 qp
->req
.wqe_index
= producer_index(qp
->sq
.queue
);
257 qp
->req
.state
= QP_STATE_RESET
;
259 qp
->comp
.opcode
= -1;
261 spin_lock_init(&qp
->sq
.sq_lock
);
262 skb_queue_head_init(&qp
->req_pkts
);
264 rxe_init_task(rxe
, &qp
->req
.task
, qp
,
265 rxe_requester
, "req");
266 rxe_init_task(rxe
, &qp
->comp
.task
, qp
,
267 rxe_completer
, "comp");
269 qp
->qp_timeout_jiffies
= 0; /* Can't be set for UD/UC in modify_qp */
270 if (init
->qp_type
== IB_QPT_RC
) {
271 timer_setup(&qp
->rnr_nak_timer
, rnr_nak_timer
, 0);
272 timer_setup(&qp
->retrans_timer
, retransmit_timer
, 0);
277 static int rxe_qp_init_resp(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
278 struct ib_qp_init_attr
*init
,
279 struct ib_ucontext
*context
,
280 struct rxe_create_qp_resp __user
*uresp
)
286 qp
->rq
.max_wr
= init
->cap
.max_recv_wr
;
287 qp
->rq
.max_sge
= init
->cap
.max_recv_sge
;
289 wqe_size
= rcv_wqe_size(qp
->rq
.max_sge
);
291 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
292 qp_num(qp
), qp
->rq
.max_wr
, qp
->rq
.max_sge
, wqe_size
);
294 qp
->rq
.queue
= rxe_queue_init(rxe
,
300 err
= do_mmap_info(rxe
, uresp
? &uresp
->rq_mi
: NULL
, context
,
301 qp
->rq
.queue
->buf
, qp
->rq
.queue
->buf_size
,
304 vfree(qp
->rq
.queue
->buf
);
310 spin_lock_init(&qp
->rq
.producer_lock
);
311 spin_lock_init(&qp
->rq
.consumer_lock
);
313 skb_queue_head_init(&qp
->resp_pkts
);
315 rxe_init_task(rxe
, &qp
->resp
.task
, qp
,
316 rxe_responder
, "resp");
318 qp
->resp
.opcode
= OPCODE_NONE
;
320 qp
->resp
.state
= QP_STATE_RESET
;
325 /* called by the create qp verb */
326 int rxe_qp_from_init(struct rxe_dev
*rxe
, struct rxe_qp
*qp
, struct rxe_pd
*pd
,
327 struct ib_qp_init_attr
*init
,
328 struct rxe_create_qp_resp __user
*uresp
,
332 struct rxe_cq
*rcq
= to_rcq(init
->recv_cq
);
333 struct rxe_cq
*scq
= to_rcq(init
->send_cq
);
334 struct rxe_srq
*srq
= init
->srq
? to_rsrq(init
->srq
) : NULL
;
335 struct ib_ucontext
*context
= ibpd
->uobject
? ibpd
->uobject
->context
: NULL
;
348 rxe_qp_init_misc(rxe
, qp
, init
);
350 err
= rxe_qp_init_req(rxe
, qp
, init
, context
, uresp
);
354 err
= rxe_qp_init_resp(rxe
, qp
, init
, context
, uresp
);
358 qp
->attr
.qp_state
= IB_QPS_RESET
;
364 rxe_queue_cleanup(qp
->sq
.queue
);
375 /* called by the query qp verb */
376 int rxe_qp_to_init(struct rxe_qp
*qp
, struct ib_qp_init_attr
*init
)
378 init
->event_handler
= qp
->ibqp
.event_handler
;
379 init
->qp_context
= qp
->ibqp
.qp_context
;
380 init
->send_cq
= qp
->ibqp
.send_cq
;
381 init
->recv_cq
= qp
->ibqp
.recv_cq
;
382 init
->srq
= qp
->ibqp
.srq
;
384 init
->cap
.max_send_wr
= qp
->sq
.max_wr
;
385 init
->cap
.max_send_sge
= qp
->sq
.max_sge
;
386 init
->cap
.max_inline_data
= qp
->sq
.max_inline
;
389 init
->cap
.max_recv_wr
= qp
->rq
.max_wr
;
390 init
->cap
.max_recv_sge
= qp
->rq
.max_sge
;
393 init
->sq_sig_type
= qp
->sq_sig_type
;
395 init
->qp_type
= qp
->ibqp
.qp_type
;
401 /* called by the modify qp verb, this routine checks all the parameters before
404 int rxe_qp_chk_attr(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
405 struct ib_qp_attr
*attr
, int mask
)
407 enum ib_qp_state cur_state
= (mask
& IB_QP_CUR_STATE
) ?
408 attr
->cur_qp_state
: qp
->attr
.qp_state
;
409 enum ib_qp_state new_state
= (mask
& IB_QP_STATE
) ?
410 attr
->qp_state
: cur_state
;
412 if (!ib_modify_qp_is_ok(cur_state
, new_state
, qp_type(qp
), mask
,
413 IB_LINK_LAYER_ETHERNET
)) {
414 pr_warn("invalid mask or state for qp\n");
418 if (mask
& IB_QP_STATE
) {
419 if (cur_state
== IB_QPS_SQD
) {
420 if (qp
->req
.state
== QP_STATE_DRAIN
&&
421 new_state
!= IB_QPS_ERR
)
426 if (mask
& IB_QP_PORT
) {
427 if (attr
->port_num
!= 1) {
428 pr_warn("invalid port %d\n", attr
->port_num
);
433 if (mask
& IB_QP_CAP
&& rxe_qp_chk_cap(rxe
, &attr
->cap
, !!qp
->srq
))
436 if (mask
& IB_QP_AV
&& rxe_av_chk_attr(rxe
, &attr
->ah_attr
))
439 if (mask
& IB_QP_ALT_PATH
) {
440 if (rxe_av_chk_attr(rxe
, &attr
->alt_ah_attr
))
442 if (attr
->alt_port_num
!= 1) {
443 pr_warn("invalid alt port %d\n", attr
->alt_port_num
);
446 if (attr
->alt_timeout
> 31) {
447 pr_warn("invalid QP alt timeout %d > 31\n",
453 if (mask
& IB_QP_PATH_MTU
) {
454 struct rxe_port
*port
= &rxe
->port
;
456 enum ib_mtu max_mtu
= port
->attr
.max_mtu
;
457 enum ib_mtu mtu
= attr
->path_mtu
;
460 pr_debug("invalid mtu (%d) > (%d)\n",
461 ib_mtu_enum_to_int(mtu
),
462 ib_mtu_enum_to_int(max_mtu
));
467 if (mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
468 if (attr
->max_rd_atomic
> rxe
->attr
.max_qp_rd_atom
) {
469 pr_warn("invalid max_rd_atomic %d > %d\n",
471 rxe
->attr
.max_qp_rd_atom
);
476 if (mask
& IB_QP_TIMEOUT
) {
477 if (attr
->timeout
> 31) {
478 pr_warn("invalid QP timeout %d > 31\n",
490 /* move the qp to the reset state */
491 static void rxe_qp_reset(struct rxe_qp
*qp
)
493 /* stop tasks from running */
494 rxe_disable_task(&qp
->resp
.task
);
496 /* stop request/comp */
498 if (qp_type(qp
) == IB_QPT_RC
)
499 rxe_disable_task(&qp
->comp
.task
);
500 rxe_disable_task(&qp
->req
.task
);
503 /* move qp to the reset state */
504 qp
->req
.state
= QP_STATE_RESET
;
505 qp
->resp
.state
= QP_STATE_RESET
;
507 /* let state machines reset themselves drain work and packet queues
510 __rxe_do_task(&qp
->resp
.task
);
513 __rxe_do_task(&qp
->comp
.task
);
514 __rxe_do_task(&qp
->req
.task
);
515 rxe_queue_reset(qp
->sq
.queue
);
518 /* cleanup attributes */
519 atomic_set(&qp
->ssn
, 0);
521 qp
->req
.need_retry
= 0;
522 qp
->req
.noack_pkts
= 0;
524 qp
->resp
.opcode
= -1;
525 qp
->resp
.drop_msg
= 0;
526 qp
->resp
.goto_error
= 0;
527 qp
->resp
.sent_psn_nak
= 0;
530 rxe_drop_ref(qp
->resp
.mr
);
534 cleanup_rd_atomic_resources(qp
);
537 rxe_enable_task(&qp
->resp
.task
);
540 if (qp_type(qp
) == IB_QPT_RC
)
541 rxe_enable_task(&qp
->comp
.task
);
543 rxe_enable_task(&qp
->req
.task
);
547 /* drain the send queue */
548 static void rxe_qp_drain(struct rxe_qp
*qp
)
551 if (qp
->req
.state
!= QP_STATE_DRAINED
) {
552 qp
->req
.state
= QP_STATE_DRAIN
;
553 if (qp_type(qp
) == IB_QPT_RC
)
554 rxe_run_task(&qp
->comp
.task
, 1);
556 __rxe_do_task(&qp
->comp
.task
);
557 rxe_run_task(&qp
->req
.task
, 1);
562 /* move the qp to the error state */
563 void rxe_qp_error(struct rxe_qp
*qp
)
565 qp
->req
.state
= QP_STATE_ERROR
;
566 qp
->resp
.state
= QP_STATE_ERROR
;
567 qp
->attr
.qp_state
= IB_QPS_ERR
;
569 /* drain work and packet queues */
570 rxe_run_task(&qp
->resp
.task
, 1);
572 if (qp_type(qp
) == IB_QPT_RC
)
573 rxe_run_task(&qp
->comp
.task
, 1);
575 __rxe_do_task(&qp
->comp
.task
);
576 rxe_run_task(&qp
->req
.task
, 1);
579 /* called by the modify qp verb */
580 int rxe_qp_from_attr(struct rxe_qp
*qp
, struct ib_qp_attr
*attr
, int mask
,
581 struct ib_udata
*udata
)
585 if (mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
586 int max_rd_atomic
= __roundup_pow_of_two(attr
->max_rd_atomic
);
588 qp
->attr
.max_rd_atomic
= max_rd_atomic
;
589 atomic_set(&qp
->req
.rd_atomic
, max_rd_atomic
);
592 if (mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
593 int max_dest_rd_atomic
=
594 __roundup_pow_of_two(attr
->max_dest_rd_atomic
);
596 qp
->attr
.max_dest_rd_atomic
= max_dest_rd_atomic
;
598 free_rd_atomic_resources(qp
);
600 err
= alloc_rd_atomic_resources(qp
, max_dest_rd_atomic
);
605 if (mask
& IB_QP_CUR_STATE
)
606 qp
->attr
.cur_qp_state
= attr
->qp_state
;
608 if (mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
)
609 qp
->attr
.en_sqd_async_notify
= attr
->en_sqd_async_notify
;
611 if (mask
& IB_QP_ACCESS_FLAGS
)
612 qp
->attr
.qp_access_flags
= attr
->qp_access_flags
;
614 if (mask
& IB_QP_PKEY_INDEX
)
615 qp
->attr
.pkey_index
= attr
->pkey_index
;
617 if (mask
& IB_QP_PORT
)
618 qp
->attr
.port_num
= attr
->port_num
;
620 if (mask
& IB_QP_QKEY
)
621 qp
->attr
.qkey
= attr
->qkey
;
623 if (mask
& IB_QP_AV
) {
624 rxe_av_from_attr(attr
->port_num
, &qp
->pri_av
, &attr
->ah_attr
);
625 rxe_av_fill_ip_info(&qp
->pri_av
, &attr
->ah_attr
);
628 if (mask
& IB_QP_ALT_PATH
) {
629 rxe_av_from_attr(attr
->alt_port_num
, &qp
->alt_av
,
631 rxe_av_fill_ip_info(&qp
->alt_av
, &attr
->alt_ah_attr
);
632 qp
->attr
.alt_port_num
= attr
->alt_port_num
;
633 qp
->attr
.alt_pkey_index
= attr
->alt_pkey_index
;
634 qp
->attr
.alt_timeout
= attr
->alt_timeout
;
637 if (mask
& IB_QP_PATH_MTU
) {
638 qp
->attr
.path_mtu
= attr
->path_mtu
;
639 qp
->mtu
= ib_mtu_enum_to_int(attr
->path_mtu
);
642 if (mask
& IB_QP_TIMEOUT
) {
643 qp
->attr
.timeout
= attr
->timeout
;
644 if (attr
->timeout
== 0) {
645 qp
->qp_timeout_jiffies
= 0;
647 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
648 int j
= nsecs_to_jiffies(4096ULL << attr
->timeout
);
650 qp
->qp_timeout_jiffies
= j
? j
: 1;
654 if (mask
& IB_QP_RETRY_CNT
) {
655 qp
->attr
.retry_cnt
= attr
->retry_cnt
;
656 qp
->comp
.retry_cnt
= attr
->retry_cnt
;
657 pr_debug("qp#%d set retry count = %d\n", qp_num(qp
),
661 if (mask
& IB_QP_RNR_RETRY
) {
662 qp
->attr
.rnr_retry
= attr
->rnr_retry
;
663 qp
->comp
.rnr_retry
= attr
->rnr_retry
;
664 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp
),
668 if (mask
& IB_QP_RQ_PSN
) {
669 qp
->attr
.rq_psn
= (attr
->rq_psn
& BTH_PSN_MASK
);
670 qp
->resp
.psn
= qp
->attr
.rq_psn
;
671 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp
),
675 if (mask
& IB_QP_MIN_RNR_TIMER
) {
676 qp
->attr
.min_rnr_timer
= attr
->min_rnr_timer
;
677 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp
),
678 attr
->min_rnr_timer
);
681 if (mask
& IB_QP_SQ_PSN
) {
682 qp
->attr
.sq_psn
= (attr
->sq_psn
& BTH_PSN_MASK
);
683 qp
->req
.psn
= qp
->attr
.sq_psn
;
684 qp
->comp
.psn
= qp
->attr
.sq_psn
;
685 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp
), qp
->req
.psn
);
688 if (mask
& IB_QP_PATH_MIG_STATE
)
689 qp
->attr
.path_mig_state
= attr
->path_mig_state
;
691 if (mask
& IB_QP_DEST_QPN
)
692 qp
->attr
.dest_qp_num
= attr
->dest_qp_num
;
694 if (mask
& IB_QP_STATE
) {
695 qp
->attr
.qp_state
= attr
->qp_state
;
697 switch (attr
->qp_state
) {
699 pr_debug("qp#%d state -> RESET\n", qp_num(qp
));
704 pr_debug("qp#%d state -> INIT\n", qp_num(qp
));
705 qp
->req
.state
= QP_STATE_INIT
;
706 qp
->resp
.state
= QP_STATE_INIT
;
710 pr_debug("qp#%d state -> RTR\n", qp_num(qp
));
711 qp
->resp
.state
= QP_STATE_READY
;
715 pr_debug("qp#%d state -> RTS\n", qp_num(qp
));
716 qp
->req
.state
= QP_STATE_READY
;
720 pr_debug("qp#%d state -> SQD\n", qp_num(qp
));
725 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp
));
726 /* Not possible from modify_qp. */
730 pr_debug("qp#%d state -> ERR\n", qp_num(qp
));
739 /* called by the query qp verb */
740 int rxe_qp_to_attr(struct rxe_qp
*qp
, struct ib_qp_attr
*attr
, int mask
)
744 attr
->rq_psn
= qp
->resp
.psn
;
745 attr
->sq_psn
= qp
->req
.psn
;
747 attr
->cap
.max_send_wr
= qp
->sq
.max_wr
;
748 attr
->cap
.max_send_sge
= qp
->sq
.max_sge
;
749 attr
->cap
.max_inline_data
= qp
->sq
.max_inline
;
752 attr
->cap
.max_recv_wr
= qp
->rq
.max_wr
;
753 attr
->cap
.max_recv_sge
= qp
->rq
.max_sge
;
756 rxe_av_to_attr(&qp
->pri_av
, &attr
->ah_attr
);
757 rxe_av_to_attr(&qp
->alt_av
, &attr
->alt_ah_attr
);
759 if (qp
->req
.state
== QP_STATE_DRAIN
) {
760 attr
->sq_draining
= 1;
761 /* applications that get this state
762 * typically spin on it. yield the
767 attr
->sq_draining
= 0;
770 pr_debug("attr->sq_draining = %d\n", attr
->sq_draining
);
775 /* called by the destroy qp verb */
776 void rxe_qp_destroy(struct rxe_qp
*qp
)
779 qp
->qp_timeout_jiffies
= 0;
780 rxe_cleanup_task(&qp
->resp
.task
);
782 if (qp_type(qp
) == IB_QPT_RC
) {
783 del_timer_sync(&qp
->retrans_timer
);
784 del_timer_sync(&qp
->rnr_nak_timer
);
787 rxe_cleanup_task(&qp
->req
.task
);
788 rxe_cleanup_task(&qp
->comp
.task
);
790 /* flush out any receive wr's or pending requests */
791 __rxe_do_task(&qp
->req
.task
);
793 __rxe_do_task(&qp
->comp
.task
);
794 __rxe_do_task(&qp
->req
.task
);
798 /* called when the last reference to the qp is dropped */
799 static void rxe_qp_do_cleanup(struct work_struct
*work
)
801 struct rxe_qp
*qp
= container_of(work
, typeof(*qp
), cleanup_work
.work
);
803 rxe_drop_all_mcast_groups(qp
);
806 rxe_queue_cleanup(qp
->sq
.queue
);
809 rxe_drop_ref(qp
->srq
);
812 rxe_queue_cleanup(qp
->rq
.queue
);
815 rxe_drop_ref(qp
->scq
);
817 rxe_drop_ref(qp
->rcq
);
819 rxe_drop_ref(qp
->pd
);
822 rxe_drop_ref(qp
->resp
.mr
);
826 if (qp_type(qp
) == IB_QPT_RC
)
827 sk_dst_reset(qp
->sk
->sk
);
829 free_rd_atomic_resources(qp
);
831 kernel_sock_shutdown(qp
->sk
, SHUT_RDWR
);
832 sock_release(qp
->sk
);
835 /* called when the last reference to the qp is dropped */
836 void rxe_qp_cleanup(struct rxe_pool_entry
*arg
)
838 struct rxe_qp
*qp
= container_of(arg
, typeof(*qp
), pelem
);
840 execute_in_process_context(rxe_qp_do_cleanup
, &qp
->cleanup_work
);