Linux 4.19.133
[linux/fpc-iii.git] / drivers / infiniband / sw / rxe / rxe_qp.c
blob230697fa31fe36705cc9b5c6d05e2d4d963ebc36
1 /*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include <linux/skbuff.h>
35 #include <linux/delay.h>
36 #include <linux/sched.h>
37 #include <linux/vmalloc.h>
39 #include "rxe.h"
40 #include "rxe_loc.h"
41 #include "rxe_queue.h"
42 #include "rxe_task.h"
44 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
45 int has_srq)
47 if (cap->max_send_wr > rxe->attr.max_qp_wr) {
48 pr_warn("invalid send wr = %d > %d\n",
49 cap->max_send_wr, rxe->attr.max_qp_wr);
50 goto err1;
53 if (cap->max_send_sge > rxe->attr.max_send_sge) {
54 pr_warn("invalid send sge = %d > %d\n",
55 cap->max_send_sge, rxe->attr.max_send_sge);
56 goto err1;
59 if (!has_srq) {
60 if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
61 pr_warn("invalid recv wr = %d > %d\n",
62 cap->max_recv_wr, rxe->attr.max_qp_wr);
63 goto err1;
66 if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
67 pr_warn("invalid recv sge = %d > %d\n",
68 cap->max_recv_sge, rxe->attr.max_recv_sge);
69 goto err1;
73 if (cap->max_inline_data > rxe->max_inline_data) {
74 pr_warn("invalid max inline data = %d > %d\n",
75 cap->max_inline_data, rxe->max_inline_data);
76 goto err1;
79 return 0;
81 err1:
82 return -EINVAL;
85 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
87 struct ib_qp_cap *cap = &init->cap;
88 struct rxe_port *port;
89 int port_num = init->port_num;
91 if (!init->recv_cq || !init->send_cq) {
92 pr_warn("missing cq\n");
93 goto err1;
96 if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
97 goto err1;
99 if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
100 if (port_num != 1) {
101 pr_warn("invalid port = %d\n", port_num);
102 goto err1;
105 port = &rxe->port;
107 if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
108 pr_warn("SMI QP exists for port %d\n", port_num);
109 goto err1;
112 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
113 pr_warn("GSI QP exists for port %d\n", port_num);
114 goto err1;
118 return 0;
120 err1:
121 return -EINVAL;
124 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
126 qp->resp.res_head = 0;
127 qp->resp.res_tail = 0;
128 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
130 if (!qp->resp.resources)
131 return -ENOMEM;
133 return 0;
136 static void free_rd_atomic_resources(struct rxe_qp *qp)
138 if (qp->resp.resources) {
139 int i;
141 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
142 struct resp_res *res = &qp->resp.resources[i];
144 free_rd_atomic_resource(qp, res);
146 kfree(qp->resp.resources);
147 qp->resp.resources = NULL;
151 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
153 if (res->type == RXE_ATOMIC_MASK) {
154 rxe_drop_ref(qp);
155 kfree_skb(res->atomic.skb);
156 } else if (res->type == RXE_READ_MASK) {
157 if (res->read.mr)
158 rxe_drop_ref(res->read.mr);
160 res->type = 0;
163 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
165 int i;
166 struct resp_res *res;
168 if (qp->resp.resources) {
169 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
170 res = &qp->resp.resources[i];
171 free_rd_atomic_resource(qp, res);
176 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
177 struct ib_qp_init_attr *init)
179 struct rxe_port *port;
180 u32 qpn;
182 qp->sq_sig_type = init->sq_sig_type;
183 qp->attr.path_mtu = 1;
184 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
186 qpn = qp->pelem.index;
187 port = &rxe->port;
189 switch (init->qp_type) {
190 case IB_QPT_SMI:
191 qp->ibqp.qp_num = 0;
192 port->qp_smi_index = qpn;
193 qp->attr.port_num = init->port_num;
194 break;
196 case IB_QPT_GSI:
197 qp->ibqp.qp_num = 1;
198 port->qp_gsi_index = qpn;
199 qp->attr.port_num = init->port_num;
200 break;
202 default:
203 qp->ibqp.qp_num = qpn;
204 break;
207 INIT_LIST_HEAD(&qp->grp_list);
209 skb_queue_head_init(&qp->send_pkts);
211 spin_lock_init(&qp->grp_lock);
212 spin_lock_init(&qp->state_lock);
214 atomic_set(&qp->ssn, 0);
215 atomic_set(&qp->skb_out, 0);
218 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
219 struct ib_qp_init_attr *init,
220 struct ib_ucontext *context,
221 struct rxe_create_qp_resp __user *uresp)
223 int err;
224 int wqe_size;
226 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
227 if (err < 0)
228 return err;
229 qp->sk->sk->sk_user_data = qp;
231 qp->sq.max_wr = init->cap.max_send_wr;
232 qp->sq.max_sge = init->cap.max_send_sge;
233 qp->sq.max_inline = init->cap.max_inline_data;
235 wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
236 qp->sq.max_sge * sizeof(struct ib_sge),
237 sizeof(struct rxe_send_wqe) +
238 qp->sq.max_inline);
240 qp->sq.queue = rxe_queue_init(rxe,
241 &qp->sq.max_wr,
242 wqe_size);
243 if (!qp->sq.queue)
244 return -ENOMEM;
246 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, context,
247 qp->sq.queue->buf, qp->sq.queue->buf_size,
248 &qp->sq.queue->ip);
250 if (err) {
251 vfree(qp->sq.queue->buf);
252 kfree(qp->sq.queue);
253 return err;
256 qp->req.wqe_index = producer_index(qp->sq.queue);
257 qp->req.state = QP_STATE_RESET;
258 qp->req.opcode = -1;
259 qp->comp.opcode = -1;
261 spin_lock_init(&qp->sq.sq_lock);
262 skb_queue_head_init(&qp->req_pkts);
264 rxe_init_task(rxe, &qp->req.task, qp,
265 rxe_requester, "req");
266 rxe_init_task(rxe, &qp->comp.task, qp,
267 rxe_completer, "comp");
269 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
270 if (init->qp_type == IB_QPT_RC) {
271 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
272 timer_setup(&qp->retrans_timer, retransmit_timer, 0);
274 return 0;
277 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
278 struct ib_qp_init_attr *init,
279 struct ib_ucontext *context,
280 struct rxe_create_qp_resp __user *uresp)
282 int err;
283 int wqe_size;
285 if (!qp->srq) {
286 qp->rq.max_wr = init->cap.max_recv_wr;
287 qp->rq.max_sge = init->cap.max_recv_sge;
289 wqe_size = rcv_wqe_size(qp->rq.max_sge);
291 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
292 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
294 qp->rq.queue = rxe_queue_init(rxe,
295 &qp->rq.max_wr,
296 wqe_size);
297 if (!qp->rq.queue)
298 return -ENOMEM;
300 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, context,
301 qp->rq.queue->buf, qp->rq.queue->buf_size,
302 &qp->rq.queue->ip);
303 if (err) {
304 vfree(qp->rq.queue->buf);
305 kfree(qp->rq.queue);
306 return err;
310 spin_lock_init(&qp->rq.producer_lock);
311 spin_lock_init(&qp->rq.consumer_lock);
313 skb_queue_head_init(&qp->resp_pkts);
315 rxe_init_task(rxe, &qp->resp.task, qp,
316 rxe_responder, "resp");
318 qp->resp.opcode = OPCODE_NONE;
319 qp->resp.msn = 0;
320 qp->resp.state = QP_STATE_RESET;
322 return 0;
325 /* called by the create qp verb */
326 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
327 struct ib_qp_init_attr *init,
328 struct rxe_create_qp_resp __user *uresp,
329 struct ib_pd *ibpd)
331 int err;
332 struct rxe_cq *rcq = to_rcq(init->recv_cq);
333 struct rxe_cq *scq = to_rcq(init->send_cq);
334 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
335 struct ib_ucontext *context = ibpd->uobject ? ibpd->uobject->context : NULL;
337 rxe_add_ref(pd);
338 rxe_add_ref(rcq);
339 rxe_add_ref(scq);
340 if (srq)
341 rxe_add_ref(srq);
343 qp->pd = pd;
344 qp->rcq = rcq;
345 qp->scq = scq;
346 qp->srq = srq;
348 rxe_qp_init_misc(rxe, qp, init);
350 err = rxe_qp_init_req(rxe, qp, init, context, uresp);
351 if (err)
352 goto err1;
354 err = rxe_qp_init_resp(rxe, qp, init, context, uresp);
355 if (err)
356 goto err2;
358 qp->attr.qp_state = IB_QPS_RESET;
359 qp->valid = 1;
361 return 0;
363 err2:
364 rxe_queue_cleanup(qp->sq.queue);
365 err1:
366 if (srq)
367 rxe_drop_ref(srq);
368 rxe_drop_ref(scq);
369 rxe_drop_ref(rcq);
370 rxe_drop_ref(pd);
372 return err;
375 /* called by the query qp verb */
376 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
378 init->event_handler = qp->ibqp.event_handler;
379 init->qp_context = qp->ibqp.qp_context;
380 init->send_cq = qp->ibqp.send_cq;
381 init->recv_cq = qp->ibqp.recv_cq;
382 init->srq = qp->ibqp.srq;
384 init->cap.max_send_wr = qp->sq.max_wr;
385 init->cap.max_send_sge = qp->sq.max_sge;
386 init->cap.max_inline_data = qp->sq.max_inline;
388 if (!qp->srq) {
389 init->cap.max_recv_wr = qp->rq.max_wr;
390 init->cap.max_recv_sge = qp->rq.max_sge;
393 init->sq_sig_type = qp->sq_sig_type;
395 init->qp_type = qp->ibqp.qp_type;
396 init->port_num = 1;
398 return 0;
401 /* called by the modify qp verb, this routine checks all the parameters before
402 * making any changes
404 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
405 struct ib_qp_attr *attr, int mask)
407 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
408 attr->cur_qp_state : qp->attr.qp_state;
409 enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
410 attr->qp_state : cur_state;
412 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask,
413 IB_LINK_LAYER_ETHERNET)) {
414 pr_warn("invalid mask or state for qp\n");
415 goto err1;
418 if (mask & IB_QP_STATE) {
419 if (cur_state == IB_QPS_SQD) {
420 if (qp->req.state == QP_STATE_DRAIN &&
421 new_state != IB_QPS_ERR)
422 goto err1;
426 if (mask & IB_QP_PORT) {
427 if (attr->port_num != 1) {
428 pr_warn("invalid port %d\n", attr->port_num);
429 goto err1;
433 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
434 goto err1;
436 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
437 goto err1;
439 if (mask & IB_QP_ALT_PATH) {
440 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
441 goto err1;
442 if (attr->alt_port_num != 1) {
443 pr_warn("invalid alt port %d\n", attr->alt_port_num);
444 goto err1;
446 if (attr->alt_timeout > 31) {
447 pr_warn("invalid QP alt timeout %d > 31\n",
448 attr->alt_timeout);
449 goto err1;
453 if (mask & IB_QP_PATH_MTU) {
454 struct rxe_port *port = &rxe->port;
456 enum ib_mtu max_mtu = port->attr.max_mtu;
457 enum ib_mtu mtu = attr->path_mtu;
459 if (mtu > max_mtu) {
460 pr_debug("invalid mtu (%d) > (%d)\n",
461 ib_mtu_enum_to_int(mtu),
462 ib_mtu_enum_to_int(max_mtu));
463 goto err1;
467 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
468 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
469 pr_warn("invalid max_rd_atomic %d > %d\n",
470 attr->max_rd_atomic,
471 rxe->attr.max_qp_rd_atom);
472 goto err1;
476 if (mask & IB_QP_TIMEOUT) {
477 if (attr->timeout > 31) {
478 pr_warn("invalid QP timeout %d > 31\n",
479 attr->timeout);
480 goto err1;
484 return 0;
486 err1:
487 return -EINVAL;
490 /* move the qp to the reset state */
491 static void rxe_qp_reset(struct rxe_qp *qp)
493 /* stop tasks from running */
494 rxe_disable_task(&qp->resp.task);
496 /* stop request/comp */
497 if (qp->sq.queue) {
498 if (qp_type(qp) == IB_QPT_RC)
499 rxe_disable_task(&qp->comp.task);
500 rxe_disable_task(&qp->req.task);
503 /* move qp to the reset state */
504 qp->req.state = QP_STATE_RESET;
505 qp->resp.state = QP_STATE_RESET;
507 /* let state machines reset themselves drain work and packet queues
508 * etc.
510 __rxe_do_task(&qp->resp.task);
512 if (qp->sq.queue) {
513 __rxe_do_task(&qp->comp.task);
514 __rxe_do_task(&qp->req.task);
515 rxe_queue_reset(qp->sq.queue);
518 /* cleanup attributes */
519 atomic_set(&qp->ssn, 0);
520 qp->req.opcode = -1;
521 qp->req.need_retry = 0;
522 qp->req.noack_pkts = 0;
523 qp->resp.msn = 0;
524 qp->resp.opcode = -1;
525 qp->resp.drop_msg = 0;
526 qp->resp.goto_error = 0;
527 qp->resp.sent_psn_nak = 0;
529 if (qp->resp.mr) {
530 rxe_drop_ref(qp->resp.mr);
531 qp->resp.mr = NULL;
534 cleanup_rd_atomic_resources(qp);
536 /* reenable tasks */
537 rxe_enable_task(&qp->resp.task);
539 if (qp->sq.queue) {
540 if (qp_type(qp) == IB_QPT_RC)
541 rxe_enable_task(&qp->comp.task);
543 rxe_enable_task(&qp->req.task);
547 /* drain the send queue */
548 static void rxe_qp_drain(struct rxe_qp *qp)
550 if (qp->sq.queue) {
551 if (qp->req.state != QP_STATE_DRAINED) {
552 qp->req.state = QP_STATE_DRAIN;
553 if (qp_type(qp) == IB_QPT_RC)
554 rxe_run_task(&qp->comp.task, 1);
555 else
556 __rxe_do_task(&qp->comp.task);
557 rxe_run_task(&qp->req.task, 1);
562 /* move the qp to the error state */
563 void rxe_qp_error(struct rxe_qp *qp)
565 qp->req.state = QP_STATE_ERROR;
566 qp->resp.state = QP_STATE_ERROR;
567 qp->attr.qp_state = IB_QPS_ERR;
569 /* drain work and packet queues */
570 rxe_run_task(&qp->resp.task, 1);
572 if (qp_type(qp) == IB_QPT_RC)
573 rxe_run_task(&qp->comp.task, 1);
574 else
575 __rxe_do_task(&qp->comp.task);
576 rxe_run_task(&qp->req.task, 1);
579 /* called by the modify qp verb */
580 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
581 struct ib_udata *udata)
583 int err;
585 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
586 int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
588 qp->attr.max_rd_atomic = max_rd_atomic;
589 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
592 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
593 int max_dest_rd_atomic =
594 __roundup_pow_of_two(attr->max_dest_rd_atomic);
596 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
598 free_rd_atomic_resources(qp);
600 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
601 if (err)
602 return err;
605 if (mask & IB_QP_CUR_STATE)
606 qp->attr.cur_qp_state = attr->qp_state;
608 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
609 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
611 if (mask & IB_QP_ACCESS_FLAGS)
612 qp->attr.qp_access_flags = attr->qp_access_flags;
614 if (mask & IB_QP_PKEY_INDEX)
615 qp->attr.pkey_index = attr->pkey_index;
617 if (mask & IB_QP_PORT)
618 qp->attr.port_num = attr->port_num;
620 if (mask & IB_QP_QKEY)
621 qp->attr.qkey = attr->qkey;
623 if (mask & IB_QP_AV) {
624 rxe_av_from_attr(attr->port_num, &qp->pri_av, &attr->ah_attr);
625 rxe_av_fill_ip_info(&qp->pri_av, &attr->ah_attr);
628 if (mask & IB_QP_ALT_PATH) {
629 rxe_av_from_attr(attr->alt_port_num, &qp->alt_av,
630 &attr->alt_ah_attr);
631 rxe_av_fill_ip_info(&qp->alt_av, &attr->alt_ah_attr);
632 qp->attr.alt_port_num = attr->alt_port_num;
633 qp->attr.alt_pkey_index = attr->alt_pkey_index;
634 qp->attr.alt_timeout = attr->alt_timeout;
637 if (mask & IB_QP_PATH_MTU) {
638 qp->attr.path_mtu = attr->path_mtu;
639 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
642 if (mask & IB_QP_TIMEOUT) {
643 qp->attr.timeout = attr->timeout;
644 if (attr->timeout == 0) {
645 qp->qp_timeout_jiffies = 0;
646 } else {
647 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
648 int j = nsecs_to_jiffies(4096ULL << attr->timeout);
650 qp->qp_timeout_jiffies = j ? j : 1;
654 if (mask & IB_QP_RETRY_CNT) {
655 qp->attr.retry_cnt = attr->retry_cnt;
656 qp->comp.retry_cnt = attr->retry_cnt;
657 pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
658 attr->retry_cnt);
661 if (mask & IB_QP_RNR_RETRY) {
662 qp->attr.rnr_retry = attr->rnr_retry;
663 qp->comp.rnr_retry = attr->rnr_retry;
664 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
665 attr->rnr_retry);
668 if (mask & IB_QP_RQ_PSN) {
669 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
670 qp->resp.psn = qp->attr.rq_psn;
671 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
672 qp->resp.psn);
675 if (mask & IB_QP_MIN_RNR_TIMER) {
676 qp->attr.min_rnr_timer = attr->min_rnr_timer;
677 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
678 attr->min_rnr_timer);
681 if (mask & IB_QP_SQ_PSN) {
682 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
683 qp->req.psn = qp->attr.sq_psn;
684 qp->comp.psn = qp->attr.sq_psn;
685 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
688 if (mask & IB_QP_PATH_MIG_STATE)
689 qp->attr.path_mig_state = attr->path_mig_state;
691 if (mask & IB_QP_DEST_QPN)
692 qp->attr.dest_qp_num = attr->dest_qp_num;
694 if (mask & IB_QP_STATE) {
695 qp->attr.qp_state = attr->qp_state;
697 switch (attr->qp_state) {
698 case IB_QPS_RESET:
699 pr_debug("qp#%d state -> RESET\n", qp_num(qp));
700 rxe_qp_reset(qp);
701 break;
703 case IB_QPS_INIT:
704 pr_debug("qp#%d state -> INIT\n", qp_num(qp));
705 qp->req.state = QP_STATE_INIT;
706 qp->resp.state = QP_STATE_INIT;
707 break;
709 case IB_QPS_RTR:
710 pr_debug("qp#%d state -> RTR\n", qp_num(qp));
711 qp->resp.state = QP_STATE_READY;
712 break;
714 case IB_QPS_RTS:
715 pr_debug("qp#%d state -> RTS\n", qp_num(qp));
716 qp->req.state = QP_STATE_READY;
717 break;
719 case IB_QPS_SQD:
720 pr_debug("qp#%d state -> SQD\n", qp_num(qp));
721 rxe_qp_drain(qp);
722 break;
724 case IB_QPS_SQE:
725 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
726 /* Not possible from modify_qp. */
727 break;
729 case IB_QPS_ERR:
730 pr_debug("qp#%d state -> ERR\n", qp_num(qp));
731 rxe_qp_error(qp);
732 break;
736 return 0;
739 /* called by the query qp verb */
740 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
742 *attr = qp->attr;
744 attr->rq_psn = qp->resp.psn;
745 attr->sq_psn = qp->req.psn;
747 attr->cap.max_send_wr = qp->sq.max_wr;
748 attr->cap.max_send_sge = qp->sq.max_sge;
749 attr->cap.max_inline_data = qp->sq.max_inline;
751 if (!qp->srq) {
752 attr->cap.max_recv_wr = qp->rq.max_wr;
753 attr->cap.max_recv_sge = qp->rq.max_sge;
756 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
757 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
759 if (qp->req.state == QP_STATE_DRAIN) {
760 attr->sq_draining = 1;
761 /* applications that get this state
762 * typically spin on it. yield the
763 * processor
765 cond_resched();
766 } else {
767 attr->sq_draining = 0;
770 pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
772 return 0;
775 /* called by the destroy qp verb */
776 void rxe_qp_destroy(struct rxe_qp *qp)
778 qp->valid = 0;
779 qp->qp_timeout_jiffies = 0;
780 rxe_cleanup_task(&qp->resp.task);
782 if (qp_type(qp) == IB_QPT_RC) {
783 del_timer_sync(&qp->retrans_timer);
784 del_timer_sync(&qp->rnr_nak_timer);
787 rxe_cleanup_task(&qp->req.task);
788 rxe_cleanup_task(&qp->comp.task);
790 /* flush out any receive wr's or pending requests */
791 __rxe_do_task(&qp->req.task);
792 if (qp->sq.queue) {
793 __rxe_do_task(&qp->comp.task);
794 __rxe_do_task(&qp->req.task);
798 /* called when the last reference to the qp is dropped */
799 static void rxe_qp_do_cleanup(struct work_struct *work)
801 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
803 rxe_drop_all_mcast_groups(qp);
805 if (qp->sq.queue)
806 rxe_queue_cleanup(qp->sq.queue);
808 if (qp->srq)
809 rxe_drop_ref(qp->srq);
811 if (qp->rq.queue)
812 rxe_queue_cleanup(qp->rq.queue);
814 if (qp->scq)
815 rxe_drop_ref(qp->scq);
816 if (qp->rcq)
817 rxe_drop_ref(qp->rcq);
818 if (qp->pd)
819 rxe_drop_ref(qp->pd);
821 if (qp->resp.mr) {
822 rxe_drop_ref(qp->resp.mr);
823 qp->resp.mr = NULL;
826 if (qp_type(qp) == IB_QPT_RC)
827 sk_dst_reset(qp->sk->sk);
829 free_rd_atomic_resources(qp);
831 kernel_sock_shutdown(qp->sk, SHUT_RDWR);
832 sock_release(qp->sk);
835 /* called when the last reference to the qp is dropped */
836 void rxe_qp_cleanup(struct rxe_pool_entry *arg)
838 struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
840 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);