media: stv06xx: add missing descriptor sanity checks
[linux/fpc-iii.git] / drivers / infiniband / sw / rxe / rxe_qp.c
blobe2c6d1cedf416b6c5cda7136bfcf500fd12badfa
1 /*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include <linux/skbuff.h>
35 #include <linux/delay.h>
36 #include <linux/sched.h>
37 #include <linux/vmalloc.h>
38 #include <rdma/uverbs_ioctl.h>
40 #include "rxe.h"
41 #include "rxe_loc.h"
42 #include "rxe_queue.h"
43 #include "rxe_task.h"
45 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
46 int has_srq)
48 if (cap->max_send_wr > rxe->attr.max_qp_wr) {
49 pr_warn("invalid send wr = %d > %d\n",
50 cap->max_send_wr, rxe->attr.max_qp_wr);
51 goto err1;
54 if (cap->max_send_sge > rxe->attr.max_send_sge) {
55 pr_warn("invalid send sge = %d > %d\n",
56 cap->max_send_sge, rxe->attr.max_send_sge);
57 goto err1;
60 if (!has_srq) {
61 if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
62 pr_warn("invalid recv wr = %d > %d\n",
63 cap->max_recv_wr, rxe->attr.max_qp_wr);
64 goto err1;
67 if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
68 pr_warn("invalid recv sge = %d > %d\n",
69 cap->max_recv_sge, rxe->attr.max_recv_sge);
70 goto err1;
74 if (cap->max_inline_data > rxe->max_inline_data) {
75 pr_warn("invalid max inline data = %d > %d\n",
76 cap->max_inline_data, rxe->max_inline_data);
77 goto err1;
80 return 0;
82 err1:
83 return -EINVAL;
86 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
88 struct ib_qp_cap *cap = &init->cap;
89 struct rxe_port *port;
90 int port_num = init->port_num;
92 if (!init->recv_cq || !init->send_cq) {
93 pr_warn("missing cq\n");
94 goto err1;
97 if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
98 goto err1;
100 if (init->qp_type == IB_QPT_SMI || init->qp_type == IB_QPT_GSI) {
101 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
102 pr_warn("invalid port = %d\n", port_num);
103 goto err1;
106 port = &rxe->port;
108 if (init->qp_type == IB_QPT_SMI && port->qp_smi_index) {
109 pr_warn("SMI QP exists for port %d\n", port_num);
110 goto err1;
113 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
114 pr_warn("GSI QP exists for port %d\n", port_num);
115 goto err1;
119 return 0;
121 err1:
122 return -EINVAL;
125 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
127 qp->resp.res_head = 0;
128 qp->resp.res_tail = 0;
129 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
131 if (!qp->resp.resources)
132 return -ENOMEM;
134 return 0;
137 static void free_rd_atomic_resources(struct rxe_qp *qp)
139 if (qp->resp.resources) {
140 int i;
142 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
143 struct resp_res *res = &qp->resp.resources[i];
145 free_rd_atomic_resource(qp, res);
147 kfree(qp->resp.resources);
148 qp->resp.resources = NULL;
152 void free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res)
154 if (res->type == RXE_ATOMIC_MASK) {
155 rxe_drop_ref(qp);
156 kfree_skb(res->atomic.skb);
157 } else if (res->type == RXE_READ_MASK) {
158 if (res->read.mr)
159 rxe_drop_ref(res->read.mr);
161 res->type = 0;
164 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
166 int i;
167 struct resp_res *res;
169 if (qp->resp.resources) {
170 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
171 res = &qp->resp.resources[i];
172 free_rd_atomic_resource(qp, res);
177 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
178 struct ib_qp_init_attr *init)
180 struct rxe_port *port;
181 u32 qpn;
183 qp->sq_sig_type = init->sq_sig_type;
184 qp->attr.path_mtu = 1;
185 qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
187 qpn = qp->pelem.index;
188 port = &rxe->port;
190 switch (init->qp_type) {
191 case IB_QPT_SMI:
192 qp->ibqp.qp_num = 0;
193 port->qp_smi_index = qpn;
194 qp->attr.port_num = init->port_num;
195 break;
197 case IB_QPT_GSI:
198 qp->ibqp.qp_num = 1;
199 port->qp_gsi_index = qpn;
200 qp->attr.port_num = init->port_num;
201 break;
203 default:
204 qp->ibqp.qp_num = qpn;
205 break;
208 INIT_LIST_HEAD(&qp->grp_list);
210 skb_queue_head_init(&qp->send_pkts);
212 spin_lock_init(&qp->grp_lock);
213 spin_lock_init(&qp->state_lock);
215 atomic_set(&qp->ssn, 0);
216 atomic_set(&qp->skb_out, 0);
219 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
220 struct ib_qp_init_attr *init, struct ib_udata *udata,
221 struct rxe_create_qp_resp __user *uresp)
223 int err;
224 int wqe_size;
226 err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
227 if (err < 0)
228 return err;
229 qp->sk->sk->sk_user_data = qp;
231 /* pick a source UDP port number for this QP based on
232 * the source QPN. this spreads traffic for different QPs
233 * across different NIC RX queues (while using a single
234 * flow for a given QP to maintain packet order).
235 * the port number must be in the Dynamic Ports range
236 * (0xc000 - 0xffff).
238 qp->src_port = RXE_ROCE_V2_SPORT +
239 (hash_32_generic(qp_num(qp), 14) & 0x3fff);
241 qp->sq.max_wr = init->cap.max_send_wr;
242 qp->sq.max_sge = init->cap.max_send_sge;
243 qp->sq.max_inline = init->cap.max_inline_data;
245 wqe_size = max_t(int, sizeof(struct rxe_send_wqe) +
246 qp->sq.max_sge * sizeof(struct ib_sge),
247 sizeof(struct rxe_send_wqe) +
248 qp->sq.max_inline);
250 qp->sq.queue = rxe_queue_init(rxe,
251 &qp->sq.max_wr,
252 wqe_size);
253 if (!qp->sq.queue)
254 return -ENOMEM;
256 err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
257 qp->sq.queue->buf, qp->sq.queue->buf_size,
258 &qp->sq.queue->ip);
260 if (err) {
261 vfree(qp->sq.queue->buf);
262 kfree(qp->sq.queue);
263 return err;
266 qp->req.wqe_index = producer_index(qp->sq.queue);
267 qp->req.state = QP_STATE_RESET;
268 qp->req.opcode = -1;
269 qp->comp.opcode = -1;
271 spin_lock_init(&qp->sq.sq_lock);
272 skb_queue_head_init(&qp->req_pkts);
274 rxe_init_task(rxe, &qp->req.task, qp,
275 rxe_requester, "req");
276 rxe_init_task(rxe, &qp->comp.task, qp,
277 rxe_completer, "comp");
279 qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
280 if (init->qp_type == IB_QPT_RC) {
281 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
282 timer_setup(&qp->retrans_timer, retransmit_timer, 0);
284 return 0;
287 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
288 struct ib_qp_init_attr *init,
289 struct ib_udata *udata,
290 struct rxe_create_qp_resp __user *uresp)
292 int err;
293 int wqe_size;
295 if (!qp->srq) {
296 qp->rq.max_wr = init->cap.max_recv_wr;
297 qp->rq.max_sge = init->cap.max_recv_sge;
299 wqe_size = rcv_wqe_size(qp->rq.max_sge);
301 pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
302 qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
304 qp->rq.queue = rxe_queue_init(rxe,
305 &qp->rq.max_wr,
306 wqe_size);
307 if (!qp->rq.queue)
308 return -ENOMEM;
310 err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
311 qp->rq.queue->buf, qp->rq.queue->buf_size,
312 &qp->rq.queue->ip);
313 if (err) {
314 vfree(qp->rq.queue->buf);
315 kfree(qp->rq.queue);
316 return err;
320 spin_lock_init(&qp->rq.producer_lock);
321 spin_lock_init(&qp->rq.consumer_lock);
323 skb_queue_head_init(&qp->resp_pkts);
325 rxe_init_task(rxe, &qp->resp.task, qp,
326 rxe_responder, "resp");
328 qp->resp.opcode = OPCODE_NONE;
329 qp->resp.msn = 0;
330 qp->resp.state = QP_STATE_RESET;
332 return 0;
335 /* called by the create qp verb */
336 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
337 struct ib_qp_init_attr *init,
338 struct rxe_create_qp_resp __user *uresp,
339 struct ib_pd *ibpd,
340 struct ib_udata *udata)
342 int err;
343 struct rxe_cq *rcq = to_rcq(init->recv_cq);
344 struct rxe_cq *scq = to_rcq(init->send_cq);
345 struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
347 rxe_add_ref(pd);
348 rxe_add_ref(rcq);
349 rxe_add_ref(scq);
350 if (srq)
351 rxe_add_ref(srq);
353 qp->pd = pd;
354 qp->rcq = rcq;
355 qp->scq = scq;
356 qp->srq = srq;
358 rxe_qp_init_misc(rxe, qp, init);
360 err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
361 if (err)
362 goto err1;
364 err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
365 if (err)
366 goto err2;
368 qp->attr.qp_state = IB_QPS_RESET;
369 qp->valid = 1;
371 return 0;
373 err2:
374 rxe_queue_cleanup(qp->sq.queue);
375 err1:
376 if (srq)
377 rxe_drop_ref(srq);
378 rxe_drop_ref(scq);
379 rxe_drop_ref(rcq);
380 rxe_drop_ref(pd);
382 return err;
385 /* called by the query qp verb */
386 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
388 init->event_handler = qp->ibqp.event_handler;
389 init->qp_context = qp->ibqp.qp_context;
390 init->send_cq = qp->ibqp.send_cq;
391 init->recv_cq = qp->ibqp.recv_cq;
392 init->srq = qp->ibqp.srq;
394 init->cap.max_send_wr = qp->sq.max_wr;
395 init->cap.max_send_sge = qp->sq.max_sge;
396 init->cap.max_inline_data = qp->sq.max_inline;
398 if (!qp->srq) {
399 init->cap.max_recv_wr = qp->rq.max_wr;
400 init->cap.max_recv_sge = qp->rq.max_sge;
403 init->sq_sig_type = qp->sq_sig_type;
405 init->qp_type = qp->ibqp.qp_type;
406 init->port_num = 1;
408 return 0;
411 /* called by the modify qp verb, this routine checks all the parameters before
412 * making any changes
414 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
415 struct ib_qp_attr *attr, int mask)
417 enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
418 attr->cur_qp_state : qp->attr.qp_state;
419 enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
420 attr->qp_state : cur_state;
422 if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
423 pr_warn("invalid mask or state for qp\n");
424 goto err1;
427 if (mask & IB_QP_STATE) {
428 if (cur_state == IB_QPS_SQD) {
429 if (qp->req.state == QP_STATE_DRAIN &&
430 new_state != IB_QPS_ERR)
431 goto err1;
435 if (mask & IB_QP_PORT) {
436 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
437 pr_warn("invalid port %d\n", attr->port_num);
438 goto err1;
442 if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
443 goto err1;
445 if (mask & IB_QP_AV && rxe_av_chk_attr(rxe, &attr->ah_attr))
446 goto err1;
448 if (mask & IB_QP_ALT_PATH) {
449 if (rxe_av_chk_attr(rxe, &attr->alt_ah_attr))
450 goto err1;
451 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) {
452 pr_warn("invalid alt port %d\n", attr->alt_port_num);
453 goto err1;
455 if (attr->alt_timeout > 31) {
456 pr_warn("invalid QP alt timeout %d > 31\n",
457 attr->alt_timeout);
458 goto err1;
462 if (mask & IB_QP_PATH_MTU) {
463 struct rxe_port *port = &rxe->port;
465 enum ib_mtu max_mtu = port->attr.max_mtu;
466 enum ib_mtu mtu = attr->path_mtu;
468 if (mtu > max_mtu) {
469 pr_debug("invalid mtu (%d) > (%d)\n",
470 ib_mtu_enum_to_int(mtu),
471 ib_mtu_enum_to_int(max_mtu));
472 goto err1;
476 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
477 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
478 pr_warn("invalid max_rd_atomic %d > %d\n",
479 attr->max_rd_atomic,
480 rxe->attr.max_qp_rd_atom);
481 goto err1;
485 if (mask & IB_QP_TIMEOUT) {
486 if (attr->timeout > 31) {
487 pr_warn("invalid QP timeout %d > 31\n",
488 attr->timeout);
489 goto err1;
493 return 0;
495 err1:
496 return -EINVAL;
499 /* move the qp to the reset state */
500 static void rxe_qp_reset(struct rxe_qp *qp)
502 /* stop tasks from running */
503 rxe_disable_task(&qp->resp.task);
505 /* stop request/comp */
506 if (qp->sq.queue) {
507 if (qp_type(qp) == IB_QPT_RC)
508 rxe_disable_task(&qp->comp.task);
509 rxe_disable_task(&qp->req.task);
512 /* move qp to the reset state */
513 qp->req.state = QP_STATE_RESET;
514 qp->resp.state = QP_STATE_RESET;
516 /* let state machines reset themselves drain work and packet queues
517 * etc.
519 __rxe_do_task(&qp->resp.task);
521 if (qp->sq.queue) {
522 __rxe_do_task(&qp->comp.task);
523 __rxe_do_task(&qp->req.task);
524 rxe_queue_reset(qp->sq.queue);
527 /* cleanup attributes */
528 atomic_set(&qp->ssn, 0);
529 qp->req.opcode = -1;
530 qp->req.need_retry = 0;
531 qp->req.noack_pkts = 0;
532 qp->resp.msn = 0;
533 qp->resp.opcode = -1;
534 qp->resp.drop_msg = 0;
535 qp->resp.goto_error = 0;
536 qp->resp.sent_psn_nak = 0;
538 if (qp->resp.mr) {
539 rxe_drop_ref(qp->resp.mr);
540 qp->resp.mr = NULL;
543 cleanup_rd_atomic_resources(qp);
545 /* reenable tasks */
546 rxe_enable_task(&qp->resp.task);
548 if (qp->sq.queue) {
549 if (qp_type(qp) == IB_QPT_RC)
550 rxe_enable_task(&qp->comp.task);
552 rxe_enable_task(&qp->req.task);
556 /* drain the send queue */
557 static void rxe_qp_drain(struct rxe_qp *qp)
559 if (qp->sq.queue) {
560 if (qp->req.state != QP_STATE_DRAINED) {
561 qp->req.state = QP_STATE_DRAIN;
562 if (qp_type(qp) == IB_QPT_RC)
563 rxe_run_task(&qp->comp.task, 1);
564 else
565 __rxe_do_task(&qp->comp.task);
566 rxe_run_task(&qp->req.task, 1);
571 /* move the qp to the error state */
572 void rxe_qp_error(struct rxe_qp *qp)
574 qp->req.state = QP_STATE_ERROR;
575 qp->resp.state = QP_STATE_ERROR;
576 qp->attr.qp_state = IB_QPS_ERR;
578 /* drain work and packet queues */
579 rxe_run_task(&qp->resp.task, 1);
581 if (qp_type(qp) == IB_QPT_RC)
582 rxe_run_task(&qp->comp.task, 1);
583 else
584 __rxe_do_task(&qp->comp.task);
585 rxe_run_task(&qp->req.task, 1);
588 /* called by the modify qp verb */
589 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
590 struct ib_udata *udata)
592 int err;
594 if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
595 int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
597 qp->attr.max_rd_atomic = max_rd_atomic;
598 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
601 if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
602 int max_dest_rd_atomic =
603 __roundup_pow_of_two(attr->max_dest_rd_atomic);
605 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
607 free_rd_atomic_resources(qp);
609 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
610 if (err)
611 return err;
614 if (mask & IB_QP_CUR_STATE)
615 qp->attr.cur_qp_state = attr->qp_state;
617 if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
618 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
620 if (mask & IB_QP_ACCESS_FLAGS)
621 qp->attr.qp_access_flags = attr->qp_access_flags;
623 if (mask & IB_QP_PKEY_INDEX)
624 qp->attr.pkey_index = attr->pkey_index;
626 if (mask & IB_QP_PORT)
627 qp->attr.port_num = attr->port_num;
629 if (mask & IB_QP_QKEY)
630 qp->attr.qkey = attr->qkey;
632 if (mask & IB_QP_AV) {
633 rxe_init_av(&attr->ah_attr, &qp->pri_av);
636 if (mask & IB_QP_ALT_PATH) {
637 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
638 qp->attr.alt_port_num = attr->alt_port_num;
639 qp->attr.alt_pkey_index = attr->alt_pkey_index;
640 qp->attr.alt_timeout = attr->alt_timeout;
643 if (mask & IB_QP_PATH_MTU) {
644 qp->attr.path_mtu = attr->path_mtu;
645 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
648 if (mask & IB_QP_TIMEOUT) {
649 qp->attr.timeout = attr->timeout;
650 if (attr->timeout == 0) {
651 qp->qp_timeout_jiffies = 0;
652 } else {
653 /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
654 int j = nsecs_to_jiffies(4096ULL << attr->timeout);
656 qp->qp_timeout_jiffies = j ? j : 1;
660 if (mask & IB_QP_RETRY_CNT) {
661 qp->attr.retry_cnt = attr->retry_cnt;
662 qp->comp.retry_cnt = attr->retry_cnt;
663 pr_debug("qp#%d set retry count = %d\n", qp_num(qp),
664 attr->retry_cnt);
667 if (mask & IB_QP_RNR_RETRY) {
668 qp->attr.rnr_retry = attr->rnr_retry;
669 qp->comp.rnr_retry = attr->rnr_retry;
670 pr_debug("qp#%d set rnr retry count = %d\n", qp_num(qp),
671 attr->rnr_retry);
674 if (mask & IB_QP_RQ_PSN) {
675 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
676 qp->resp.psn = qp->attr.rq_psn;
677 pr_debug("qp#%d set resp psn = 0x%x\n", qp_num(qp),
678 qp->resp.psn);
681 if (mask & IB_QP_MIN_RNR_TIMER) {
682 qp->attr.min_rnr_timer = attr->min_rnr_timer;
683 pr_debug("qp#%d set min rnr timer = 0x%x\n", qp_num(qp),
684 attr->min_rnr_timer);
687 if (mask & IB_QP_SQ_PSN) {
688 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
689 qp->req.psn = qp->attr.sq_psn;
690 qp->comp.psn = qp->attr.sq_psn;
691 pr_debug("qp#%d set req psn = 0x%x\n", qp_num(qp), qp->req.psn);
694 if (mask & IB_QP_PATH_MIG_STATE)
695 qp->attr.path_mig_state = attr->path_mig_state;
697 if (mask & IB_QP_DEST_QPN)
698 qp->attr.dest_qp_num = attr->dest_qp_num;
700 if (mask & IB_QP_STATE) {
701 qp->attr.qp_state = attr->qp_state;
703 switch (attr->qp_state) {
704 case IB_QPS_RESET:
705 pr_debug("qp#%d state -> RESET\n", qp_num(qp));
706 rxe_qp_reset(qp);
707 break;
709 case IB_QPS_INIT:
710 pr_debug("qp#%d state -> INIT\n", qp_num(qp));
711 qp->req.state = QP_STATE_INIT;
712 qp->resp.state = QP_STATE_INIT;
713 break;
715 case IB_QPS_RTR:
716 pr_debug("qp#%d state -> RTR\n", qp_num(qp));
717 qp->resp.state = QP_STATE_READY;
718 break;
720 case IB_QPS_RTS:
721 pr_debug("qp#%d state -> RTS\n", qp_num(qp));
722 qp->req.state = QP_STATE_READY;
723 break;
725 case IB_QPS_SQD:
726 pr_debug("qp#%d state -> SQD\n", qp_num(qp));
727 rxe_qp_drain(qp);
728 break;
730 case IB_QPS_SQE:
731 pr_warn("qp#%d state -> SQE !!?\n", qp_num(qp));
732 /* Not possible from modify_qp. */
733 break;
735 case IB_QPS_ERR:
736 pr_debug("qp#%d state -> ERR\n", qp_num(qp));
737 rxe_qp_error(qp);
738 break;
742 return 0;
745 /* called by the query qp verb */
746 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
748 *attr = qp->attr;
750 attr->rq_psn = qp->resp.psn;
751 attr->sq_psn = qp->req.psn;
753 attr->cap.max_send_wr = qp->sq.max_wr;
754 attr->cap.max_send_sge = qp->sq.max_sge;
755 attr->cap.max_inline_data = qp->sq.max_inline;
757 if (!qp->srq) {
758 attr->cap.max_recv_wr = qp->rq.max_wr;
759 attr->cap.max_recv_sge = qp->rq.max_sge;
762 rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
763 rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
765 if (qp->req.state == QP_STATE_DRAIN) {
766 attr->sq_draining = 1;
767 /* applications that get this state
768 * typically spin on it. yield the
769 * processor
771 cond_resched();
772 } else {
773 attr->sq_draining = 0;
776 pr_debug("attr->sq_draining = %d\n", attr->sq_draining);
778 return 0;
781 /* called by the destroy qp verb */
782 void rxe_qp_destroy(struct rxe_qp *qp)
784 qp->valid = 0;
785 qp->qp_timeout_jiffies = 0;
786 rxe_cleanup_task(&qp->resp.task);
788 if (qp_type(qp) == IB_QPT_RC) {
789 del_timer_sync(&qp->retrans_timer);
790 del_timer_sync(&qp->rnr_nak_timer);
793 rxe_cleanup_task(&qp->req.task);
794 rxe_cleanup_task(&qp->comp.task);
796 /* flush out any receive wr's or pending requests */
797 __rxe_do_task(&qp->req.task);
798 if (qp->sq.queue) {
799 __rxe_do_task(&qp->comp.task);
800 __rxe_do_task(&qp->req.task);
804 /* called when the last reference to the qp is dropped */
805 static void rxe_qp_do_cleanup(struct work_struct *work)
807 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
809 rxe_drop_all_mcast_groups(qp);
811 if (qp->sq.queue)
812 rxe_queue_cleanup(qp->sq.queue);
814 if (qp->srq)
815 rxe_drop_ref(qp->srq);
817 if (qp->rq.queue)
818 rxe_queue_cleanup(qp->rq.queue);
820 if (qp->scq)
821 rxe_drop_ref(qp->scq);
822 if (qp->rcq)
823 rxe_drop_ref(qp->rcq);
824 if (qp->pd)
825 rxe_drop_ref(qp->pd);
827 if (qp->resp.mr) {
828 rxe_drop_ref(qp->resp.mr);
829 qp->resp.mr = NULL;
832 if (qp_type(qp) == IB_QPT_RC)
833 sk_dst_reset(qp->sk->sk);
835 free_rd_atomic_resources(qp);
837 kernel_sock_shutdown(qp->sk, SHUT_RDWR);
838 sock_release(qp->sk);
841 /* called when the last reference to the qp is dropped */
842 void rxe_qp_cleanup(struct rxe_pool_entry *arg)
844 struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
846 execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);