WIP FPC-III support
[linux/fpc-iii.git] / drivers / infiniband / sw / rxe / rxe_qp.c
blob656a5b4be847e1a260a8ae572e3beafe7ca80dd3
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 */
7 #include <linux/skbuff.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/vmalloc.h>
11 #include <rdma/uverbs_ioctl.h>
13 #include "rxe.h"
14 #include "rxe_loc.h"
15 #include "rxe_queue.h"
16 #include "rxe_task.h"
18 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
19 int has_srq)
21 if (cap->max_send_wr > rxe->attr.max_qp_wr) {
22 pr_warn("invalid send wr = %d > %d\n",
23 cap->max_send_wr, rxe->attr.max_qp_wr);
24 goto err1;
27 if (cap->max_send_sge > rxe->attr.max_send_sge) {
28 pr_warn("invalid send sge = %d > %d\n",
29 cap->max_send_sge, rxe->attr.max_send_sge);
30 goto err1;
33 if (!has_srq) {
34 if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
35 pr_warn("invalid recv wr = %d > %d\n",
36 cap->max_recv_wr, rxe->attr.max_qp_wr);
37 goto err1;
40 if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
41 pr_warn("invalid recv sge = %d > %d\n",
42 cap->max_recv_sge, rxe->attr.max_recv_sge);
43 goto err1;
47 if (cap->max_inline_data > rxe->max_inline_data) {
48 pr_warn("invalid max inline data = %d > %d\n",
49 cap->max_inline_data, rxe->max_inline_data);
50 goto err1;
53 return 0;
55 err1:
56 return -EINVAL;
59 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
61 struct ib_qp_cap *cap = &init->cap;
62 struct rxe_port *port;
63 int port_num = init->port_num;
65 if (!init->recv_cq || !init->send_cq) {
66 pr_warn("missing cq\n");
67 goto err1;
70 if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
71 goto err1;
73 if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
74 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
75 pr_warn("invalid port = %d\n", port_num);
76 goto err1;
79 port = &rxe->port;
81 if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
82 pr_warn("SMI QP exists for port %d\n", port_num);
83 goto err1;
86 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
87 pr_warn("GSI QP exists for port %d\n", port_num);
88 goto err1;
92 return 0;
94 err1:
95 return -EINVAL;
98 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
100 qp->resp.res_head = 0;
101 qp->resp.res_tail = 0;
102 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
104 if (!qp->resp.resources)
105 return -ENOMEM;
107 return 0;
110 static void free_rd_atomic_resources(struct rxe_qp *qp)
112 if (qp->resp.resources) {
113 int i;
115 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
116 struct resp_res *res = &qp->resp.resources[i];
118 free_rd_atomic_resource(qp, res);
120 kfree(qp->resp.resources);
121 qp->resp.resources = NULL;
125 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
127 if (res->type == RXE_ATOMIC_MASK) {
128 rxe_drop_ref(qp);
129 kfree_skb(res->atomic.skb);
130 } else if (res->type == RXE_READ_MASK) {
131 if (res->read.mr)
132 rxe_drop_ref(res->read.mr);
134 res->type = 0;
137 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
139 int i;
140 struct resp_res *res;
142 if (qp->resp.resources) {
143 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
144 res = &qp->resp.resources[i];
145 free_rd_atomic_resource(qp, res);
150 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
151 struct ib_qp_init_attr *init)
153 struct rxe_port *port;
154 u32 qpn;
156 qp->sq_sig_type = init->sq_sig_type;
157 qp->attr.path_mtu = 1;
158 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
160 qpn = qp->pelem.index;
161 port = &rxe->port;
163 switch (init->qp_type) {
164 case IB_QPT_SMI:
165 qp->ibqp.qp_num = 0;
166 port->qp_smi_index = qpn;
167 qp->attr.port_num = init->port_num;
168 break;
170 case IB_QPT_GSI:
171 qp->ibqp.qp_num = 1;
172 port->qp_gsi_index = qpn;
173 qp->attr.port_num = init->port_num;
174 break;
176 default:
177 qp->ibqp.qp_num = qpn;
178 break;
181 INIT_LIST_HEAD(&qp->grp_list);
183 skb_queue_head_init(&qp->send_pkts);
185 spin_lock_init(&qp->grp_lock);
186 spin_lock_init(&qp->state_lock);
188 atomic_set(&qp->ssn, 0);
189 atomic_set(&qp->skb_out, 0);
192 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
193 struct ib_qp_init_attr *init, struct ib_udata *udata,
194 struct rxe_create_qp_resp __user *uresp)
196 int err;
197 int wqe_size;
199 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
200 if (err < 0)
201 return err;
202 qp->sk->sk->sk_user_data = qp;
204 /* pick a source UDP port number for this QP based on
205 * the source QPN. this spreads traffic for different QPs
206 * across different NIC RX queues (while using a single
207 * flow for a given QP to maintain packet order).
208 * the port number must be in the Dynamic Ports range
209 * (0xc000 - 0xffff).
211 qp->src_port = RXE_ROCE_V2_SPORT +
212 (hash_32_generic(qp_num(qp), 14) & 0x3fff);
213 qp->sq.max_wr = init->cap.max_send_wr;
215 /* These caps are limited by rxe_qp_chk_cap() done by the caller */
216 wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
217 init->cap.max_inline_data);
218 qp->sq.max_sge = init->cap.max_send_sge =
219 wqe_size / sizeof(struct ib_sge);
220 qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
221 wqe_size += sizeof(struct rxe_send_wqe);
223 qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size);
224 if (!qp->sq.queue)
225 return -ENOMEM;
227 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
228 qp->sq.queue->buf, qp->sq.queue->buf_size,
229 &qp->sq.queue->ip);
231 if (err) {
232 vfree(qp->sq.queue->buf);
233 kfree(qp->sq.queue);
234 return err;
237 qp->req.wqe_index = producer_index(qp->sq.queue);
238 qp->req.state = QP_STATE_RESET;
239 qp->req.opcode = -1;
240 qp->comp.opcode = -1;
242 spin_lock_init(&qp->sq.sq_lock);
243 skb_queue_head_init(&qp->req_pkts);
245 rxe_init_task(rxe, &qp->req.task, qp,
246 rxe_requester, "req");
247 rxe_init_task(rxe, &qp->comp.task, qp,
248 rxe_completer, "comp");
250 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
251 if (init->qp_type == IB_QPT_RC) {
252 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
253 timer_setup(&qp->retrans_timer, retransmit_timer, 0);
255 return 0;
258 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
259 struct ib_qp_init_attr *init,
260 struct ib_udata *udata,
261 struct rxe_create_qp_resp __user *uresp)
263 int err;
264 int wqe_size;
266 if (!qp->srq) {
267 qp->rq.max_wr = init->cap.max_recv_wr;
268 qp->rq.max_sge = init->cap.max_recv_sge;
270 wqe_size = rcv_wqe_size(qp->rq.max_sge);
272 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
273 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
275 qp->rq.queue = rxe_queue_init(rxe,
276 &qp->rq.max_wr,
277 wqe_size);
278 if (!qp->rq.queue)
279 return -ENOMEM;
281 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
282 qp->rq.queue->buf, qp->rq.queue->buf_size,
283 &qp->rq.queue->ip);
284 if (err) {
285 vfree(qp->rq.queue->buf);
286 kfree(qp->rq.queue);
287 return err;
291 spin_lock_init(&qp->rq.producer_lock);
292 spin_lock_init(&qp->rq.consumer_lock);
294 skb_queue_head_init(&qp->resp_pkts);
296 rxe_init_task(rxe, &qp->resp.task, qp,
297 rxe_responder, "resp");
299 qp->resp.opcode = OPCODE_NONE;
300 qp->resp.msn = 0;
301 qp->resp.state = QP_STATE_RESET;
303 return 0;
306 /* called by the create qp verb */
307 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
308 struct ib_qp_init_attr *init,
309 struct rxe_create_qp_resp __user *uresp,
310 struct ib_pd *ibpd,
311 struct ib_udata *udata)
313 int err;
314 struct rxe_cq *rcq = to_rcq(init->recv_cq);
315 struct rxe_cq *scq = to_rcq(init->send_cq);
316 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
318 rxe_add_ref(pd);
319 rxe_add_ref(rcq);
320 rxe_add_ref(scq);
321 if (srq)
322 rxe_add_ref(srq);
324 qp->pd = pd;
325 qp->rcq = rcq;
326 qp->scq = scq;
327 qp->srq = srq;
329 rxe_qp_init_misc(rxe, qp, init);
331 err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
332 if (err)
333 goto err1;
335 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
336 if (err)
337 goto err2;
339 qp->attr.qp_state = IB_QPS_RESET;
340 qp->valid = 1;
342 return 0;
344 err2:
345 rxe_queue_cleanup(qp->sq.queue);
346 err1:
347 if (srq)
348 rxe_drop_ref(srq);
349 rxe_drop_ref(scq);
350 rxe_drop_ref(rcq);
351 rxe_drop_ref(pd);
353 return err;
356 /* called by the query qp verb */
357 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
359 init->event_handler = qp->ibqp.event_handler;
360 init->qp_context = qp->ibqp.qp_context;
361 init->send_cq = qp->ibqp.send_cq;
362 init->recv_cq = qp->ibqp.recv_cq;
363 init->srq = qp->ibqp.srq;
365 init->cap.max_send_wr = qp->sq.max_wr;
366 init->cap.max_send_sge = qp->sq.max_sge;
367 init->cap.max_inline_data = qp->sq.max_inline;
369 if (!qp->srq) {
370 init->cap.max_recv_wr = qp->rq.max_wr;
371 init->cap.max_recv_sge = qp->rq.max_sge;
374 init->sq_sig_type = qp->sq_sig_type;
376 init->qp_type = qp->ibqp.qp_type;
377 init->port_num = 1;
379 return 0;
382 /* called by the modify qp verb, this routine checks all the parameters before
383 * making any changes
385 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
386 struct ib_qp_attr *attr, int mask)
388 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
389 attr->cur_qp_state : qp->attr.qp_state;
390 enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
391 attr->qp_state : cur_state;
393 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
394 pr_warn("invalid mask or state for qp\n");
395 goto err1;
398 if (mask & IB_QP_STATE) {
399 if (cur_state == IB_QPS_SQD) {
400 if (qp->req.state == QP_STATE_DRAIN &&
401 new_state != IB_QPS_ERR)
402 goto err1;
406 if (mask & IB_QP_PORT) {
407 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
408 pr_warn("invalid port %d\n", attr->port_num);
409 goto err1;
413 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
414 goto err1;
416 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
417 goto err1;
419 if (mask & IB_QP_ALT_PATH) {
420 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
421 goto err1;
422 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) {
423 pr_warn("invalid alt port %d\n", attr->alt_port_num);
424 goto err1;
426 if (attr->alt_timeout > 31) {
427 pr_warn("invalid QP alt timeout %d > 31\n",
428 attr->alt_timeout);
429 goto err1;
433 if (mask & IB_QP_PATH_MTU) {
434 struct rxe_port *port = &rxe->port;
436 enum ib_mtu max_mtu = port->attr.max_mtu;
437 enum ib_mtu mtu = attr->path_mtu;
439 if (mtu > max_mtu) {
440 pr_debug("invalid mtu (%d) > (%d)\n",
441 ib_mtu_enum_to_int(mtu),
442 ib_mtu_enum_to_int(max_mtu));
443 goto err1;
447 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
448 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
449 pr_warn("invalid max_rd_atomic %d > %d\n",
450 attr->max_rd_atomic,
451 rxe->attr.max_qp_rd_atom);
452 goto err1;
456 if (mask & IB_QP_TIMEOUT) {
457 if (attr->timeout > 31) {
458 pr_warn("invalid QP timeout %d > 31\n",
459 attr->timeout);
460 goto err1;
464 return 0;
466 err1:
467 return -EINVAL;
470 /* move the qp to the reset state */
471 static void rxe_qp_reset(struct rxe_qp *qp)
473 /* stop tasks from running */
474 rxe_disable_task(&qp->resp.task);
476 /* stop request/comp */
477 if (qp->sq.queue) {
478 if (qp_type(qp) == IB_QPT_RC)
479 rxe_disable_task(&qp->comp.task);
480 rxe_disable_task(&qp->req.task);
483 /* move qp to the reset state */
484 qp->req.state = QP_STATE_RESET;
485 qp->resp.state = QP_STATE_RESET;
487 /* let state machines reset themselves drain work and packet queues
488 * etc.
490 __rxe_do_task(&qp->resp.task);
492 if (qp->sq.queue) {
493 __rxe_do_task(&qp->comp.task);
494 __rxe_do_task(&qp->req.task);
495 rxe_queue_reset(qp->sq.queue);
498 /* cleanup attributes */
499 atomic_set(&qp->ssn, 0);
500 qp->req.opcode = -1;
501 qp->req.need_retry = 0;
502 qp->req.noack_pkts = 0;
503 qp->resp.msn = 0;
504 qp->resp.opcode = -1;
505 qp->resp.drop_msg = 0;
506 qp->resp.goto_error = 0;
507 qp->resp.sent_psn_nak = 0;
509 if (qp->resp.mr) {
510 rxe_drop_ref(qp->resp.mr);
511 qp->resp.mr = NULL;
514 cleanup_rd_atomic_resources(qp);
516 /* reenable tasks */
517 rxe_enable_task(&qp->resp.task);
519 if (qp->sq.queue) {
520 if (qp_type(qp) == IB_QPT_RC)
521 rxe_enable_task(&qp->comp.task);
523 rxe_enable_task(&qp->req.task);
527 /* drain the send queue */
528 static void rxe_qp_drain(struct rxe_qp *qp)
530 if (qp->sq.queue) {
531 if (qp->req.state != QP_STATE_DRAINED) {
532 qp->req.state = QP_STATE_DRAIN;
533 if (qp_type(qp) == IB_QPT_RC)
534 rxe_run_task(&qp->comp.task, 1);
535 else
536 __rxe_do_task(&qp->comp.task);
537 rxe_run_task(&qp->req.task, 1);
542 /* move the qp to the error state */
543 void rxe_qp_error(struct rxe_qp *qp)
545 qp->req.state = QP_STATE_ERROR;
546 qp->resp.state = QP_STATE_ERROR;
547 qp->attr.qp_state = IB_QPS_ERR;
549 /* drain work and packet queues */
550 rxe_run_task(&qp->resp.task, 1);
552 if (qp_type(qp) == IB_QPT_RC)
553 rxe_run_task(&qp->comp.task, 1);
554 else
555 __rxe_do_task(&qp->comp.task);
556 rxe_run_task(&qp->req.task, 1);
559 /* called by the modify qp verb */
560 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
561 struct ib_udata *udata)
563 int err;
565 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
566 int max_rd_atomic = attr->max_rd_atomic ?
567 roundup_pow_of_two(attr->max_rd_atomic) : 0;
569 qp->attr.max_rd_atomic = max_rd_atomic;
570 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
573 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
574 int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
575 roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
577 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
579 free_rd_atomic_resources(qp);
581 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
582 if (err)
583 return err;
586 if (mask & IB_QP_CUR_STATE)
587 qp->attr.cur_qp_state = attr->qp_state;
589 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
590 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
592 if (mask & IB_QP_ACCESS_FLAGS)
593 qp->attr.qp_access_flags = attr->qp_access_flags;
595 if (mask & IB_QP_PKEY_INDEX)
596 qp->attr.pkey_index = attr->pkey_index;
598 if (mask & IB_QP_PORT)
599 qp->attr.port_num = attr->port_num;
601 if (mask & IB_QP_QKEY)
602 qp->attr.qkey = attr->qkey;
604 if (mask & IB_QP_AV)
605 rxe_init_av(&attr->ah_attr, &qp->pri_av);
607 if (mask & IB_QP_ALT_PATH) {
608 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
609 qp->attr.alt_port_num = attr->alt_port_num;
610 qp->attr.alt_pkey_index = attr->alt_pkey_index;
611 qp->attr.alt_timeout = attr->alt_timeout;
614 if (mask & IB_QP_PATH_MTU) {
615 qp->attr.path_mtu = attr->path_mtu;
616 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
619 if (mask & IB_QP_TIMEOUT) {
620 qp->attr.timeout = attr->timeout;
621 if (attr->timeout == 0) {
622 qp->qp_timeout_jiffies = 0;
623 } else {
624 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
625 int j = nsecs_to_jiffies(4096ULL << attr->timeout);
627 qp->qp_timeout_jiffies = j ? j : 1;
631 if (mask & IB_QP_RETRY_CNT) {
632 qp->attr.retry_cnt = attr->retry_cnt;
633 qp->comp.retry_cnt = attr->retry_cnt;
634 pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
635 attr->retry_cnt);
638 if (mask & IB_QP_RNR_RETRY) {
639 qp->attr.rnr_retry = attr->rnr_retry;
640 qp->comp.rnr_retry = attr->rnr_retry;
641 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
642 attr->rnr_retry);
645 if (mask & IB_QP_RQ_PSN) {
646 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
647 qp->resp.psn = qp->attr.rq_psn;
648 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
649 qp->resp.psn);
652 if (mask & IB_QP_MIN_RNR_TIMER) {
653 qp->attr.min_rnr_timer = attr->min_rnr_timer;
654 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
655 attr->min_rnr_timer);
658 if (mask & IB_QP_SQ_PSN) {
659 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
660 qp->req.psn = qp->attr.sq_psn;
661 qp->comp.psn = qp->attr.sq_psn;
662 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
665 if (mask & IB_QP_PATH_MIG_STATE)
666 qp->attr.path_mig_state = attr->path_mig_state;
668 if (mask & IB_QP_DEST_QPN)
669 qp->attr.dest_qp_num = attr->dest_qp_num;
671 if (mask & IB_QP_STATE) {
672 qp->attr.qp_state = attr->qp_state;
674 switch (attr->qp_state) {
675 case IB_QPS_RESET:
676 pr_debug("qp#%d state -> RESET\n", qp_num(qp));
677 rxe_qp_reset(qp);
678 break;
680 case IB_QPS_INIT:
681 pr_debug("qp#%d state -> INIT\n", qp_num(qp));
682 qp->req.state = QP_STATE_INIT;
683 qp->resp.state = QP_STATE_INIT;
684 break;
686 case IB_QPS_RTR:
687 pr_debug("qp#%d state -> RTR\n", qp_num(qp));
688 qp->resp.state = QP_STATE_READY;
689 break;
691 case IB_QPS_RTS:
692 pr_debug("qp#%d state -> RTS\n", qp_num(qp));
693 qp->req.state = QP_STATE_READY;
694 break;
696 case IB_QPS_SQD:
697 pr_debug("qp#%d state -> SQD\n", qp_num(qp));
698 rxe_qp_drain(qp);
699 break;
701 case IB_QPS_SQE:
702 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
703 /* Not possible from modify_qp. */
704 break;
706 case IB_QPS_ERR:
707 pr_debug("qp#%d state -> ERR\n", qp_num(qp));
708 rxe_qp_error(qp);
709 break;
713 return 0;
716 /* called by the query qp verb */
717 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
719 *attr = qp->attr;
721 attr->rq_psn = qp->resp.psn;
722 attr->sq_psn = qp->req.psn;
724 attr->cap.max_send_wr = qp->sq.max_wr;
725 attr->cap.max_send_sge = qp->sq.max_sge;
726 attr->cap.max_inline_data = qp->sq.max_inline;
728 if (!qp->srq) {
729 attr->cap.max_recv_wr = qp->rq.max_wr;
730 attr->cap.max_recv_sge = qp->rq.max_sge;
733 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
734 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
736 if (qp->req.state == QP_STATE_DRAIN) {
737 attr->sq_draining = 1;
738 /* applications that get this state
739 * typically spin on it. yield the
740 * processor
742 cond_resched();
743 } else {
744 attr->sq_draining = 0;
747 pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
749 return 0;
752 /* called by the destroy qp verb */
753 void rxe_qp_destroy(struct rxe_qp *qp)
755 qp->valid = 0;
756 qp->qp_timeout_jiffies = 0;
757 rxe_cleanup_task(&qp->resp.task);
759 if (qp_type(qp) == IB_QPT_RC) {
760 del_timer_sync(&qp->retrans_timer);
761 del_timer_sync(&qp->rnr_nak_timer);
764 rxe_cleanup_task(&qp->req.task);
765 rxe_cleanup_task(&qp->comp.task);
767 /* flush out any receive wr's or pending requests */
768 __rxe_do_task(&qp->req.task);
769 if (qp->sq.queue) {
770 __rxe_do_task(&qp->comp.task);
771 __rxe_do_task(&qp->req.task);
775 /* called when the last reference to the qp is dropped */
776 static void rxe_qp_do_cleanup(struct work_struct *work)
778 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
780 rxe_drop_all_mcast_groups(qp);
782 if (qp->sq.queue)
783 rxe_queue_cleanup(qp->sq.queue);
785 if (qp->srq)
786 rxe_drop_ref(qp->srq);
788 if (qp->rq.queue)
789 rxe_queue_cleanup(qp->rq.queue);
791 if (qp->scq)
792 rxe_drop_ref(qp->scq);
793 if (qp->rcq)
794 rxe_drop_ref(qp->rcq);
795 if (qp->pd)
796 rxe_drop_ref(qp->pd);
798 if (qp->resp.mr) {
799 rxe_drop_ref(qp->resp.mr);
800 qp->resp.mr = NULL;
803 if (qp_type(qp) == IB_QPT_RC)
804 sk_dst_reset(qp->sk->sk);
806 free_rd_atomic_resources(qp);
808 kernel_sock_shutdown(qp->sk, SHUT_RDWR);
809 sock_release(qp->sk);
812 /* called when the last reference to the qp is dropped */
813 void rxe_qp_cleanup(struct rxe_pool_entry *arg)
815 struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
817 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);