1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
7 #include <linux/skbuff.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/vmalloc.h>
11 #include <rdma/uverbs_ioctl.h>
15 #include "rxe_queue.h"
18 static int rxe_qp_chk_cap(struct rxe_dev
*rxe
, struct ib_qp_cap
*cap
,
21 if (cap
->max_send_wr
> rxe
->attr
.max_qp_wr
) {
22 pr_warn("invalid send wr = %d > %d\n",
23 cap
->max_send_wr
, rxe
->attr
.max_qp_wr
);
27 if (cap
->max_send_sge
> rxe
->attr
.max_send_sge
) {
28 pr_warn("invalid send sge = %d > %d\n",
29 cap
->max_send_sge
, rxe
->attr
.max_send_sge
);
34 if (cap
->max_recv_wr
> rxe
->attr
.max_qp_wr
) {
35 pr_warn("invalid recv wr = %d > %d\n",
36 cap
->max_recv_wr
, rxe
->attr
.max_qp_wr
);
40 if (cap
->max_recv_sge
> rxe
->attr
.max_recv_sge
) {
41 pr_warn("invalid recv sge = %d > %d\n",
42 cap
->max_recv_sge
, rxe
->attr
.max_recv_sge
);
47 if (cap
->max_inline_data
> rxe
->max_inline_data
) {
48 pr_warn("invalid max inline data = %d > %d\n",
49 cap
->max_inline_data
, rxe
->max_inline_data
);
59 int rxe_qp_chk_init(struct rxe_dev
*rxe
, struct ib_qp_init_attr
*init
)
61 struct ib_qp_cap
*cap
= &init
->cap
;
62 struct rxe_port
*port
;
63 int port_num
= init
->port_num
;
65 if (!init
->recv_cq
|| !init
->send_cq
) {
66 pr_warn("missing cq\n");
70 if (rxe_qp_chk_cap(rxe
, cap
, !!init
->srq
))
73 if (init
->qp_type
== IB_QPT_SMI
|| init
->qp_type
== IB_QPT_GSI
) {
74 if (!rdma_is_port_valid(&rxe
->ib_dev
, port_num
)) {
75 pr_warn("invalid port = %d\n", port_num
);
81 if (init
->qp_type
== IB_QPT_SMI
&& port
->qp_smi_index
) {
82 pr_warn("SMI QP exists for port %d\n", port_num
);
86 if (init
->qp_type
== IB_QPT_GSI
&& port
->qp_gsi_index
) {
87 pr_warn("GSI QP exists for port %d\n", port_num
);
98 static int alloc_rd_atomic_resources(struct rxe_qp
*qp
, unsigned int n
)
100 qp
->resp
.res_head
= 0;
101 qp
->resp
.res_tail
= 0;
102 qp
->resp
.resources
= kcalloc(n
, sizeof(struct resp_res
), GFP_KERNEL
);
104 if (!qp
->resp
.resources
)
110 static void free_rd_atomic_resources(struct rxe_qp
*qp
)
112 if (qp
->resp
.resources
) {
115 for (i
= 0; i
< qp
->attr
.max_dest_rd_atomic
; i
++) {
116 struct resp_res
*res
= &qp
->resp
.resources
[i
];
118 free_rd_atomic_resource(qp
, res
);
120 kfree(qp
->resp
.resources
);
121 qp
->resp
.resources
= NULL
;
125 void free_rd_atomic_resource(struct rxe_qp
*qp
, struct resp_res
*res
)
127 if (res
->type
== RXE_ATOMIC_MASK
) {
129 kfree_skb(res
->atomic
.skb
);
130 } else if (res
->type
== RXE_READ_MASK
) {
132 rxe_drop_ref(res
->read
.mr
);
137 static void cleanup_rd_atomic_resources(struct rxe_qp
*qp
)
140 struct resp_res
*res
;
142 if (qp
->resp
.resources
) {
143 for (i
= 0; i
< qp
->attr
.max_dest_rd_atomic
; i
++) {
144 res
= &qp
->resp
.resources
[i
];
145 free_rd_atomic_resource(qp
, res
);
150 static void rxe_qp_init_misc(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
151 struct ib_qp_init_attr
*init
)
153 struct rxe_port
*port
;
156 qp
->sq_sig_type
= init
->sq_sig_type
;
157 qp
->attr
.path_mtu
= 1;
158 qp
->mtu
= ib_mtu_enum_to_int(qp
->attr
.path_mtu
);
160 qpn
= qp
->pelem
.index
;
163 switch (init
->qp_type
) {
166 port
->qp_smi_index
= qpn
;
167 qp
->attr
.port_num
= init
->port_num
;
172 port
->qp_gsi_index
= qpn
;
173 qp
->attr
.port_num
= init
->port_num
;
177 qp
->ibqp
.qp_num
= qpn
;
181 INIT_LIST_HEAD(&qp
->grp_list
);
183 skb_queue_head_init(&qp
->send_pkts
);
185 spin_lock_init(&qp
->grp_lock
);
186 spin_lock_init(&qp
->state_lock
);
188 atomic_set(&qp
->ssn
, 0);
189 atomic_set(&qp
->skb_out
, 0);
192 static int rxe_qp_init_req(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
193 struct ib_qp_init_attr
*init
, struct ib_udata
*udata
,
194 struct rxe_create_qp_resp __user
*uresp
)
199 err
= sock_create_kern(&init_net
, AF_INET
, SOCK_DGRAM
, 0, &qp
->sk
);
202 qp
->sk
->sk
->sk_user_data
= qp
;
204 /* pick a source UDP port number for this QP based on
205 * the source QPN. this spreads traffic for different QPs
206 * across different NIC RX queues (while using a single
207 * flow for a given QP to maintain packet order).
208 * the port number must be in the Dynamic Ports range
211 qp
->src_port
= RXE_ROCE_V2_SPORT
+
212 (hash_32_generic(qp_num(qp
), 14) & 0x3fff);
213 qp
->sq
.max_wr
= init
->cap
.max_send_wr
;
215 /* These caps are limited by rxe_qp_chk_cap() done by the caller */
216 wqe_size
= max_t(int, init
->cap
.max_send_sge
* sizeof(struct ib_sge
),
217 init
->cap
.max_inline_data
);
218 qp
->sq
.max_sge
= init
->cap
.max_send_sge
=
219 wqe_size
/ sizeof(struct ib_sge
);
220 qp
->sq
.max_inline
= init
->cap
.max_inline_data
= wqe_size
;
221 wqe_size
+= sizeof(struct rxe_send_wqe
);
223 qp
->sq
.queue
= rxe_queue_init(rxe
, &qp
->sq
.max_wr
, wqe_size
);
227 err
= do_mmap_info(rxe
, uresp
? &uresp
->sq_mi
: NULL
, udata
,
228 qp
->sq
.queue
->buf
, qp
->sq
.queue
->buf_size
,
232 vfree(qp
->sq
.queue
->buf
);
237 qp
->req
.wqe_index
= producer_index(qp
->sq
.queue
);
238 qp
->req
.state
= QP_STATE_RESET
;
240 qp
->comp
.opcode
= -1;
242 spin_lock_init(&qp
->sq
.sq_lock
);
243 skb_queue_head_init(&qp
->req_pkts
);
245 rxe_init_task(rxe
, &qp
->req
.task
, qp
,
246 rxe_requester
, "req");
247 rxe_init_task(rxe
, &qp
->comp
.task
, qp
,
248 rxe_completer
, "comp");
250 qp
->qp_timeout_jiffies
= 0; /* Can't be set for UD/UC in modify_qp */
251 if (init
->qp_type
== IB_QPT_RC
) {
252 timer_setup(&qp
->rnr_nak_timer
, rnr_nak_timer
, 0);
253 timer_setup(&qp
->retrans_timer
, retransmit_timer
, 0);
258 static int rxe_qp_init_resp(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
259 struct ib_qp_init_attr
*init
,
260 struct ib_udata
*udata
,
261 struct rxe_create_qp_resp __user
*uresp
)
267 qp
->rq
.max_wr
= init
->cap
.max_recv_wr
;
268 qp
->rq
.max_sge
= init
->cap
.max_recv_sge
;
270 wqe_size
= rcv_wqe_size(qp
->rq
.max_sge
);
272 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
273 qp_num(qp
), qp
->rq
.max_wr
, qp
->rq
.max_sge
, wqe_size
);
275 qp
->rq
.queue
= rxe_queue_init(rxe
,
281 err
= do_mmap_info(rxe
, uresp
? &uresp
->rq_mi
: NULL
, udata
,
282 qp
->rq
.queue
->buf
, qp
->rq
.queue
->buf_size
,
285 vfree(qp
->rq
.queue
->buf
);
291 spin_lock_init(&qp
->rq
.producer_lock
);
292 spin_lock_init(&qp
->rq
.consumer_lock
);
294 skb_queue_head_init(&qp
->resp_pkts
);
296 rxe_init_task(rxe
, &qp
->resp
.task
, qp
,
297 rxe_responder
, "resp");
299 qp
->resp
.opcode
= OPCODE_NONE
;
301 qp
->resp
.state
= QP_STATE_RESET
;
306 /* called by the create qp verb */
307 int rxe_qp_from_init(struct rxe_dev
*rxe
, struct rxe_qp
*qp
, struct rxe_pd
*pd
,
308 struct ib_qp_init_attr
*init
,
309 struct rxe_create_qp_resp __user
*uresp
,
311 struct ib_udata
*udata
)
314 struct rxe_cq
*rcq
= to_rcq(init
->recv_cq
);
315 struct rxe_cq
*scq
= to_rcq(init
->send_cq
);
316 struct rxe_srq
*srq
= init
->srq
? to_rsrq(init
->srq
) : NULL
;
329 rxe_qp_init_misc(rxe
, qp
, init
);
331 err
= rxe_qp_init_req(rxe
, qp
, init
, udata
, uresp
);
335 err
= rxe_qp_init_resp(rxe
, qp
, init
, udata
, uresp
);
339 qp
->attr
.qp_state
= IB_QPS_RESET
;
345 rxe_queue_cleanup(qp
->sq
.queue
);
356 /* called by the query qp verb */
357 int rxe_qp_to_init(struct rxe_qp
*qp
, struct ib_qp_init_attr
*init
)
359 init
->event_handler
= qp
->ibqp
.event_handler
;
360 init
->qp_context
= qp
->ibqp
.qp_context
;
361 init
->send_cq
= qp
->ibqp
.send_cq
;
362 init
->recv_cq
= qp
->ibqp
.recv_cq
;
363 init
->srq
= qp
->ibqp
.srq
;
365 init
->cap
.max_send_wr
= qp
->sq
.max_wr
;
366 init
->cap
.max_send_sge
= qp
->sq
.max_sge
;
367 init
->cap
.max_inline_data
= qp
->sq
.max_inline
;
370 init
->cap
.max_recv_wr
= qp
->rq
.max_wr
;
371 init
->cap
.max_recv_sge
= qp
->rq
.max_sge
;
374 init
->sq_sig_type
= qp
->sq_sig_type
;
376 init
->qp_type
= qp
->ibqp
.qp_type
;
382 /* called by the modify qp verb, this routine checks all the parameters before
385 int rxe_qp_chk_attr(struct rxe_dev
*rxe
, struct rxe_qp
*qp
,
386 struct ib_qp_attr
*attr
, int mask
)
388 enum ib_qp_state cur_state
= (mask
& IB_QP_CUR_STATE
) ?
389 attr
->cur_qp_state
: qp
->attr
.qp_state
;
390 enum ib_qp_state new_state
= (mask
& IB_QP_STATE
) ?
391 attr
->qp_state
: cur_state
;
393 if (!ib_modify_qp_is_ok(cur_state
, new_state
, qp_type(qp
), mask
)) {
394 pr_warn("invalid mask or state for qp\n");
398 if (mask
& IB_QP_STATE
) {
399 if (cur_state
== IB_QPS_SQD
) {
400 if (qp
->req
.state
== QP_STATE_DRAIN
&&
401 new_state
!= IB_QPS_ERR
)
406 if (mask
& IB_QP_PORT
) {
407 if (!rdma_is_port_valid(&rxe
->ib_dev
, attr
->port_num
)) {
408 pr_warn("invalid port %d\n", attr
->port_num
);
413 if (mask
& IB_QP_CAP
&& rxe_qp_chk_cap(rxe
, &attr
->cap
, !!qp
->srq
))
416 if (mask
& IB_QP_AV
&& rxe_av_chk_attr(rxe
, &attr
->ah_attr
))
419 if (mask
& IB_QP_ALT_PATH
) {
420 if (rxe_av_chk_attr(rxe
, &attr
->alt_ah_attr
))
422 if (!rdma_is_port_valid(&rxe
->ib_dev
, attr
->alt_port_num
)) {
423 pr_warn("invalid alt port %d\n", attr
->alt_port_num
);
426 if (attr
->alt_timeout
> 31) {
427 pr_warn("invalid QP alt timeout %d > 31\n",
433 if (mask
& IB_QP_PATH_MTU
) {
434 struct rxe_port
*port
= &rxe
->port
;
436 enum ib_mtu max_mtu
= port
->attr
.max_mtu
;
437 enum ib_mtu mtu
= attr
->path_mtu
;
440 pr_debug("invalid mtu (%d) > (%d)\n",
441 ib_mtu_enum_to_int(mtu
),
442 ib_mtu_enum_to_int(max_mtu
));
447 if (mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
448 if (attr
->max_rd_atomic
> rxe
->attr
.max_qp_rd_atom
) {
449 pr_warn("invalid max_rd_atomic %d > %d\n",
451 rxe
->attr
.max_qp_rd_atom
);
456 if (mask
& IB_QP_TIMEOUT
) {
457 if (attr
->timeout
> 31) {
458 pr_warn("invalid QP timeout %d > 31\n",
470 /* move the qp to the reset state */
471 static void rxe_qp_reset(struct rxe_qp
*qp
)
473 /* stop tasks from running */
474 rxe_disable_task(&qp
->resp
.task
);
476 /* stop request/comp */
478 if (qp_type(qp
) == IB_QPT_RC
)
479 rxe_disable_task(&qp
->comp
.task
);
480 rxe_disable_task(&qp
->req
.task
);
483 /* move qp to the reset state */
484 qp
->req
.state
= QP_STATE_RESET
;
485 qp
->resp
.state
= QP_STATE_RESET
;
487 /* let state machines reset themselves drain work and packet queues
490 __rxe_do_task(&qp
->resp
.task
);
493 __rxe_do_task(&qp
->comp
.task
);
494 __rxe_do_task(&qp
->req
.task
);
495 rxe_queue_reset(qp
->sq
.queue
);
498 /* cleanup attributes */
499 atomic_set(&qp
->ssn
, 0);
501 qp
->req
.need_retry
= 0;
502 qp
->req
.noack_pkts
= 0;
504 qp
->resp
.opcode
= -1;
505 qp
->resp
.drop_msg
= 0;
506 qp
->resp
.goto_error
= 0;
507 qp
->resp
.sent_psn_nak
= 0;
510 rxe_drop_ref(qp
->resp
.mr
);
514 cleanup_rd_atomic_resources(qp
);
517 rxe_enable_task(&qp
->resp
.task
);
520 if (qp_type(qp
) == IB_QPT_RC
)
521 rxe_enable_task(&qp
->comp
.task
);
523 rxe_enable_task(&qp
->req
.task
);
527 /* drain the send queue */
528 static void rxe_qp_drain(struct rxe_qp
*qp
)
531 if (qp
->req
.state
!= QP_STATE_DRAINED
) {
532 qp
->req
.state
= QP_STATE_DRAIN
;
533 if (qp_type(qp
) == IB_QPT_RC
)
534 rxe_run_task(&qp
->comp
.task
, 1);
536 __rxe_do_task(&qp
->comp
.task
);
537 rxe_run_task(&qp
->req
.task
, 1);
542 /* move the qp to the error state */
543 void rxe_qp_error(struct rxe_qp
*qp
)
545 qp
->req
.state
= QP_STATE_ERROR
;
546 qp
->resp
.state
= QP_STATE_ERROR
;
547 qp
->attr
.qp_state
= IB_QPS_ERR
;
549 /* drain work and packet queues */
550 rxe_run_task(&qp
->resp
.task
, 1);
552 if (qp_type(qp
) == IB_QPT_RC
)
553 rxe_run_task(&qp
->comp
.task
, 1);
555 __rxe_do_task(&qp
->comp
.task
);
556 rxe_run_task(&qp
->req
.task
, 1);
559 /* called by the modify qp verb */
560 int rxe_qp_from_attr(struct rxe_qp
*qp
, struct ib_qp_attr
*attr
, int mask
,
561 struct ib_udata
*udata
)
565 if (mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
566 int max_rd_atomic
= attr
->max_rd_atomic
?
567 roundup_pow_of_two(attr
->max_rd_atomic
) : 0;
569 qp
->attr
.max_rd_atomic
= max_rd_atomic
;
570 atomic_set(&qp
->req
.rd_atomic
, max_rd_atomic
);
573 if (mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
574 int max_dest_rd_atomic
= attr
->max_dest_rd_atomic
?
575 roundup_pow_of_two(attr
->max_dest_rd_atomic
) : 0;
577 qp
->attr
.max_dest_rd_atomic
= max_dest_rd_atomic
;
579 free_rd_atomic_resources(qp
);
581 err
= alloc_rd_atomic_resources(qp
, max_dest_rd_atomic
);
586 if (mask
& IB_QP_CUR_STATE
)
587 qp
->attr
.cur_qp_state
= attr
->qp_state
;
589 if (mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
)
590 qp
->attr
.en_sqd_async_notify
= attr
->en_sqd_async_notify
;
592 if (mask
& IB_QP_ACCESS_FLAGS
)
593 qp
->attr
.qp_access_flags
= attr
->qp_access_flags
;
595 if (mask
& IB_QP_PKEY_INDEX
)
596 qp
->attr
.pkey_index
= attr
->pkey_index
;
598 if (mask
& IB_QP_PORT
)
599 qp
->attr
.port_num
= attr
->port_num
;
601 if (mask
& IB_QP_QKEY
)
602 qp
->attr
.qkey
= attr
->qkey
;
605 rxe_init_av(&attr
->ah_attr
, &qp
->pri_av
);
607 if (mask
& IB_QP_ALT_PATH
) {
608 rxe_init_av(&attr
->alt_ah_attr
, &qp
->alt_av
);
609 qp
->attr
.alt_port_num
= attr
->alt_port_num
;
610 qp
->attr
.alt_pkey_index
= attr
->alt_pkey_index
;
611 qp
->attr
.alt_timeout
= attr
->alt_timeout
;
614 if (mask
& IB_QP_PATH_MTU
) {
615 qp
->attr
.path_mtu
= attr
->path_mtu
;
616 qp
->mtu
= ib_mtu_enum_to_int(attr
->path_mtu
);
619 if (mask
& IB_QP_TIMEOUT
) {
620 qp
->attr
.timeout
= attr
->timeout
;
621 if (attr
->timeout
== 0) {
622 qp
->qp_timeout_jiffies
= 0;
624 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
625 int j
= nsecs_to_jiffies(4096ULL << attr
->timeout
);
627 qp
->qp_timeout_jiffies
= j
? j
: 1;
631 if (mask
& IB_QP_RETRY_CNT
) {
632 qp
->attr
.retry_cnt
= attr
->retry_cnt
;
633 qp
->comp
.retry_cnt
= attr
->retry_cnt
;
634 pr_debug("qp#%d set retry count = %d\n", qp_num(qp
),
638 if (mask
& IB_QP_RNR_RETRY
) {
639 qp
->attr
.rnr_retry
= attr
->rnr_retry
;
640 qp
->comp
.rnr_retry
= attr
->rnr_retry
;
641 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp
),
645 if (mask
& IB_QP_RQ_PSN
) {
646 qp
->attr
.rq_psn
= (attr
->rq_psn
& BTH_PSN_MASK
);
647 qp
->resp
.psn
= qp
->attr
.rq_psn
;
648 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp
),
652 if (mask
& IB_QP_MIN_RNR_TIMER
) {
653 qp
->attr
.min_rnr_timer
= attr
->min_rnr_timer
;
654 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp
),
655 attr
->min_rnr_timer
);
658 if (mask
& IB_QP_SQ_PSN
) {
659 qp
->attr
.sq_psn
= (attr
->sq_psn
& BTH_PSN_MASK
);
660 qp
->req
.psn
= qp
->attr
.sq_psn
;
661 qp
->comp
.psn
= qp
->attr
.sq_psn
;
662 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp
), qp
->req
.psn
);
665 if (mask
& IB_QP_PATH_MIG_STATE
)
666 qp
->attr
.path_mig_state
= attr
->path_mig_state
;
668 if (mask
& IB_QP_DEST_QPN
)
669 qp
->attr
.dest_qp_num
= attr
->dest_qp_num
;
671 if (mask
& IB_QP_STATE
) {
672 qp
->attr
.qp_state
= attr
->qp_state
;
674 switch (attr
->qp_state
) {
676 pr_debug("qp#%d state -> RESET\n", qp_num(qp
));
681 pr_debug("qp#%d state -> INIT\n", qp_num(qp
));
682 qp
->req
.state
= QP_STATE_INIT
;
683 qp
->resp
.state
= QP_STATE_INIT
;
687 pr_debug("qp#%d state -> RTR\n", qp_num(qp
));
688 qp
->resp
.state
= QP_STATE_READY
;
692 pr_debug("qp#%d state -> RTS\n", qp_num(qp
));
693 qp
->req
.state
= QP_STATE_READY
;
697 pr_debug("qp#%d state -> SQD\n", qp_num(qp
));
702 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp
));
703 /* Not possible from modify_qp. */
707 pr_debug("qp#%d state -> ERR\n", qp_num(qp
));
716 /* called by the query qp verb */
717 int rxe_qp_to_attr(struct rxe_qp
*qp
, struct ib_qp_attr
*attr
, int mask
)
721 attr
->rq_psn
= qp
->resp
.psn
;
722 attr
->sq_psn
= qp
->req
.psn
;
724 attr
->cap
.max_send_wr
= qp
->sq
.max_wr
;
725 attr
->cap
.max_send_sge
= qp
->sq
.max_sge
;
726 attr
->cap
.max_inline_data
= qp
->sq
.max_inline
;
729 attr
->cap
.max_recv_wr
= qp
->rq
.max_wr
;
730 attr
->cap
.max_recv_sge
= qp
->rq
.max_sge
;
733 rxe_av_to_attr(&qp
->pri_av
, &attr
->ah_attr
);
734 rxe_av_to_attr(&qp
->alt_av
, &attr
->alt_ah_attr
);
736 if (qp
->req
.state
== QP_STATE_DRAIN
) {
737 attr
->sq_draining
= 1;
738 /* applications that get this state
739 * typically spin on it. yield the
744 attr
->sq_draining
= 0;
747 pr_debug("attr->sq_draining = %d\n", attr
->sq_draining
);
752 /* called by the destroy qp verb */
753 void rxe_qp_destroy(struct rxe_qp
*qp
)
756 qp
->qp_timeout_jiffies
= 0;
757 rxe_cleanup_task(&qp
->resp
.task
);
759 if (qp_type(qp
) == IB_QPT_RC
) {
760 del_timer_sync(&qp
->retrans_timer
);
761 del_timer_sync(&qp
->rnr_nak_timer
);
764 rxe_cleanup_task(&qp
->req
.task
);
765 rxe_cleanup_task(&qp
->comp
.task
);
767 /* flush out any receive wr's or pending requests */
768 __rxe_do_task(&qp
->req
.task
);
770 __rxe_do_task(&qp
->comp
.task
);
771 __rxe_do_task(&qp
->req
.task
);
775 /* called when the last reference to the qp is dropped */
776 static void rxe_qp_do_cleanup(struct work_struct
*work
)
778 struct rxe_qp
*qp
= container_of(work
, typeof(*qp
), cleanup_work
.work
);
780 rxe_drop_all_mcast_groups(qp
);
783 rxe_queue_cleanup(qp
->sq
.queue
);
786 rxe_drop_ref(qp
->srq
);
789 rxe_queue_cleanup(qp
->rq
.queue
);
792 rxe_drop_ref(qp
->scq
);
794 rxe_drop_ref(qp
->rcq
);
796 rxe_drop_ref(qp
->pd
);
799 rxe_drop_ref(qp
->resp
.mr
);
803 if (qp_type(qp
) == IB_QPT_RC
)
804 sk_dst_reset(qp
->sk
->sk
);
806 free_rd_atomic_resources(qp
);
808 kernel_sock_shutdown(qp
->sk
, SHUT_RDWR
);
809 sock_release(qp
->sk
);
812 /* called when the last reference to the qp is dropped */
813 void rxe_qp_cleanup(struct rxe_pool_entry
*arg
)
815 struct rxe_qp
*qp
= container_of(arg
, typeof(*qp
), pelem
);
817 execute_in_process_context(rxe_qp_do_cleanup
, &qp
->cleanup_work
);