Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux/fpc-iii.git] / drivers / infiniband / hw / qib / qib_qp.c
blob0cad0c40d742638a3f5756a0c51d0b5974e98477
1 /*
2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <linux/jhash.h>
38 #ifdef CONFIG_DEBUG_FS
39 #include <linux/seq_file.h>
40 #endif
42 #include "qib.h"
44 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
45 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
47 static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
48 struct qpn_map *map, unsigned off)
50 return (map - qpt->map) * BITS_PER_PAGE + off;
53 static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
54 struct qpn_map *map, unsigned off,
55 unsigned n)
57 if (qpt->mask) {
58 off++;
59 if (((off & qpt->mask) >> 1) >= n)
60 off = (off | qpt->mask) + 2;
61 } else
62 off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
63 return off;
67 * Convert the AETH credit code into the number of credits.
69 static u32 credit_table[31] = {
70 0, /* 0 */
71 1, /* 1 */
72 2, /* 2 */
73 3, /* 3 */
74 4, /* 4 */
75 6, /* 5 */
76 8, /* 6 */
77 12, /* 7 */
78 16, /* 8 */
79 24, /* 9 */
80 32, /* A */
81 48, /* B */
82 64, /* C */
83 96, /* D */
84 128, /* E */
85 192, /* F */
86 256, /* 10 */
87 384, /* 11 */
88 512, /* 12 */
89 768, /* 13 */
90 1024, /* 14 */
91 1536, /* 15 */
92 2048, /* 16 */
93 3072, /* 17 */
94 4096, /* 18 */
95 6144, /* 19 */
96 8192, /* 1A */
97 12288, /* 1B */
98 16384, /* 1C */
99 24576, /* 1D */
100 32768 /* 1E */
103 static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
105 unsigned long page = get_zeroed_page(GFP_KERNEL);
108 * Free the page if someone raced with us installing it.
111 spin_lock(&qpt->lock);
112 if (map->page)
113 free_page(page);
114 else
115 map->page = (void *)page;
116 spin_unlock(&qpt->lock);
120 * Allocate the next available QPN or
121 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
123 static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
124 enum ib_qp_type type, u8 port)
126 u32 i, offset, max_scan, qpn;
127 struct qpn_map *map;
128 u32 ret;
130 if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
131 unsigned n;
133 ret = type == IB_QPT_GSI;
134 n = 1 << (ret + 2 * (port - 1));
135 spin_lock(&qpt->lock);
136 if (qpt->flags & n)
137 ret = -EINVAL;
138 else
139 qpt->flags |= n;
140 spin_unlock(&qpt->lock);
141 goto bail;
144 qpn = qpt->last + 2;
145 if (qpn >= QPN_MAX)
146 qpn = 2;
147 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
148 qpn = (qpn | qpt->mask) + 2;
149 offset = qpn & BITS_PER_PAGE_MASK;
150 map = &qpt->map[qpn / BITS_PER_PAGE];
151 max_scan = qpt->nmaps - !offset;
152 for (i = 0;;) {
153 if (unlikely(!map->page)) {
154 get_map_page(qpt, map);
155 if (unlikely(!map->page))
156 break;
158 do {
159 if (!test_and_set_bit(offset, map->page)) {
160 qpt->last = qpn;
161 ret = qpn;
162 goto bail;
164 offset = find_next_offset(qpt, map, offset,
165 dd->n_krcv_queues);
166 qpn = mk_qpn(qpt, map, offset);
168 * This test differs from alloc_pidmap().
169 * If find_next_offset() does find a zero
170 * bit, we don't need to check for QPN
171 * wrapping around past our starting QPN.
172 * We just need to be sure we don't loop
173 * forever.
175 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
177 * In order to keep the number of pages allocated to a
178 * minimum, we scan the all existing pages before increasing
179 * the size of the bitmap table.
181 if (++i > max_scan) {
182 if (qpt->nmaps == QPNMAP_ENTRIES)
183 break;
184 map = &qpt->map[qpt->nmaps++];
185 offset = 0;
186 } else if (map < &qpt->map[qpt->nmaps]) {
187 ++map;
188 offset = 0;
189 } else {
190 map = &qpt->map[0];
191 offset = 2;
193 qpn = mk_qpn(qpt, map, offset);
196 ret = -ENOMEM;
198 bail:
199 return ret;
202 static void free_qpn(struct qib_qpn_table *qpt, u32 qpn)
204 struct qpn_map *map;
206 map = qpt->map + qpn / BITS_PER_PAGE;
207 if (map->page)
208 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
211 static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
213 return jhash_1word(qpn, dev->qp_rnd) &
214 (dev->qp_table_size - 1);
219 * Put the QP into the hash table.
220 * The hash table holds a reference to the QP.
222 static void insert_qp(struct qib_ibdev *dev, struct qib_qp *qp)
224 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
225 unsigned long flags;
226 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
228 atomic_inc(&qp->refcount);
229 spin_lock_irqsave(&dev->qpt_lock, flags);
231 if (qp->ibqp.qp_num == 0)
232 rcu_assign_pointer(ibp->qp0, qp);
233 else if (qp->ibqp.qp_num == 1)
234 rcu_assign_pointer(ibp->qp1, qp);
235 else {
236 qp->next = dev->qp_table[n];
237 rcu_assign_pointer(dev->qp_table[n], qp);
240 spin_unlock_irqrestore(&dev->qpt_lock, flags);
244 * Remove the QP from the table so it can't be found asynchronously by
245 * the receive interrupt routine.
247 static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
249 struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
250 unsigned n = qpn_hash(dev, qp->ibqp.qp_num);
251 unsigned long flags;
252 int removed = 1;
254 spin_lock_irqsave(&dev->qpt_lock, flags);
256 if (rcu_dereference_protected(ibp->qp0,
257 lockdep_is_held(&dev->qpt_lock)) == qp) {
258 rcu_assign_pointer(ibp->qp0, NULL);
259 } else if (rcu_dereference_protected(ibp->qp1,
260 lockdep_is_held(&dev->qpt_lock)) == qp) {
261 rcu_assign_pointer(ibp->qp1, NULL);
262 } else {
263 struct qib_qp *q;
264 struct qib_qp __rcu **qpp;
266 removed = 0;
267 qpp = &dev->qp_table[n];
268 for (; (q = rcu_dereference_protected(*qpp,
269 lockdep_is_held(&dev->qpt_lock))) != NULL;
270 qpp = &q->next)
271 if (q == qp) {
272 rcu_assign_pointer(*qpp,
273 rcu_dereference_protected(qp->next,
274 lockdep_is_held(&dev->qpt_lock)));
275 removed = 1;
276 break;
280 spin_unlock_irqrestore(&dev->qpt_lock, flags);
281 if (removed) {
282 synchronize_rcu();
283 atomic_dec(&qp->refcount);
288 * qib_free_all_qps - check for QPs still in use
289 * @qpt: the QP table to empty
291 * There should not be any QPs still in use.
292 * Free memory for table.
294 unsigned qib_free_all_qps(struct qib_devdata *dd)
296 struct qib_ibdev *dev = &dd->verbs_dev;
297 unsigned long flags;
298 struct qib_qp *qp;
299 unsigned n, qp_inuse = 0;
301 for (n = 0; n < dd->num_pports; n++) {
302 struct qib_ibport *ibp = &dd->pport[n].ibport_data;
304 if (!qib_mcast_tree_empty(ibp))
305 qp_inuse++;
306 rcu_read_lock();
307 if (rcu_dereference(ibp->qp0))
308 qp_inuse++;
309 if (rcu_dereference(ibp->qp1))
310 qp_inuse++;
311 rcu_read_unlock();
314 spin_lock_irqsave(&dev->qpt_lock, flags);
315 for (n = 0; n < dev->qp_table_size; n++) {
316 qp = rcu_dereference_protected(dev->qp_table[n],
317 lockdep_is_held(&dev->qpt_lock));
318 rcu_assign_pointer(dev->qp_table[n], NULL);
320 for (; qp; qp = rcu_dereference_protected(qp->next,
321 lockdep_is_held(&dev->qpt_lock)))
322 qp_inuse++;
324 spin_unlock_irqrestore(&dev->qpt_lock, flags);
325 synchronize_rcu();
327 return qp_inuse;
331 * qib_lookup_qpn - return the QP with the given QPN
332 * @qpt: the QP table
333 * @qpn: the QP number to look up
335 * The caller is responsible for decrementing the QP reference count
336 * when done.
338 struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn)
340 struct qib_qp *qp = NULL;
342 rcu_read_lock();
343 if (unlikely(qpn <= 1)) {
344 if (qpn == 0)
345 qp = rcu_dereference(ibp->qp0);
346 else
347 qp = rcu_dereference(ibp->qp1);
348 if (qp)
349 atomic_inc(&qp->refcount);
350 } else {
351 struct qib_ibdev *dev = &ppd_from_ibp(ibp)->dd->verbs_dev;
352 unsigned n = qpn_hash(dev, qpn);
354 for (qp = rcu_dereference(dev->qp_table[n]); qp;
355 qp = rcu_dereference(qp->next))
356 if (qp->ibqp.qp_num == qpn) {
357 atomic_inc(&qp->refcount);
358 break;
361 rcu_read_unlock();
362 return qp;
366 * qib_reset_qp - initialize the QP state to the reset state
367 * @qp: the QP to reset
368 * @type: the QP type
370 static void qib_reset_qp(struct qib_qp *qp, enum ib_qp_type type)
372 qp->remote_qpn = 0;
373 qp->qkey = 0;
374 qp->qp_access_flags = 0;
375 atomic_set(&qp->s_dma_busy, 0);
376 qp->s_flags &= QIB_S_SIGNAL_REQ_WR;
377 qp->s_hdrwords = 0;
378 qp->s_wqe = NULL;
379 qp->s_draining = 0;
380 qp->s_next_psn = 0;
381 qp->s_last_psn = 0;
382 qp->s_sending_psn = 0;
383 qp->s_sending_hpsn = 0;
384 qp->s_psn = 0;
385 qp->r_psn = 0;
386 qp->r_msn = 0;
387 if (type == IB_QPT_RC) {
388 qp->s_state = IB_OPCODE_RC_SEND_LAST;
389 qp->r_state = IB_OPCODE_RC_SEND_LAST;
390 } else {
391 qp->s_state = IB_OPCODE_UC_SEND_LAST;
392 qp->r_state = IB_OPCODE_UC_SEND_LAST;
394 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
395 qp->r_nak_state = 0;
396 qp->r_aflags = 0;
397 qp->r_flags = 0;
398 qp->s_head = 0;
399 qp->s_tail = 0;
400 qp->s_cur = 0;
401 qp->s_acked = 0;
402 qp->s_last = 0;
403 qp->s_ssn = 1;
404 qp->s_lsn = 0;
405 qp->s_mig_state = IB_MIG_MIGRATED;
406 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
407 qp->r_head_ack_queue = 0;
408 qp->s_tail_ack_queue = 0;
409 qp->s_num_rd_atomic = 0;
410 if (qp->r_rq.wq) {
411 qp->r_rq.wq->head = 0;
412 qp->r_rq.wq->tail = 0;
414 qp->r_sge.num_sge = 0;
417 static void clear_mr_refs(struct qib_qp *qp, int clr_sends)
419 unsigned n;
421 if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags))
422 qib_put_ss(&qp->s_rdma_read_sge);
424 qib_put_ss(&qp->r_sge);
426 if (clr_sends) {
427 while (qp->s_last != qp->s_head) {
428 struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
429 unsigned i;
431 for (i = 0; i < wqe->wr.num_sge; i++) {
432 struct qib_sge *sge = &wqe->sg_list[i];
434 qib_put_mr(sge->mr);
436 if (qp->ibqp.qp_type == IB_QPT_UD ||
437 qp->ibqp.qp_type == IB_QPT_SMI ||
438 qp->ibqp.qp_type == IB_QPT_GSI)
439 atomic_dec(&to_iah(wqe->wr.wr.ud.ah)->refcount);
440 if (++qp->s_last >= qp->s_size)
441 qp->s_last = 0;
443 if (qp->s_rdma_mr) {
444 qib_put_mr(qp->s_rdma_mr);
445 qp->s_rdma_mr = NULL;
449 if (qp->ibqp.qp_type != IB_QPT_RC)
450 return;
452 for (n = 0; n < ARRAY_SIZE(qp->s_ack_queue); n++) {
453 struct qib_ack_entry *e = &qp->s_ack_queue[n];
455 if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST &&
456 e->rdma_sge.mr) {
457 qib_put_mr(e->rdma_sge.mr);
458 e->rdma_sge.mr = NULL;
464 * qib_error_qp - put a QP into the error state
465 * @qp: the QP to put into the error state
466 * @err: the receive completion error to signal if a RWQE is active
468 * Flushes both send and receive work queues.
469 * Returns true if last WQE event should be generated.
470 * The QP r_lock and s_lock should be held and interrupts disabled.
471 * If we are already in error state, just return.
473 int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err)
475 struct qib_ibdev *dev = to_idev(qp->ibqp.device);
476 struct ib_wc wc;
477 int ret = 0;
479 if (qp->state == IB_QPS_ERR || qp->state == IB_QPS_RESET)
480 goto bail;
482 qp->state = IB_QPS_ERR;
484 if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) {
485 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR);
486 del_timer(&qp->s_timer);
489 if (qp->s_flags & QIB_S_ANY_WAIT_SEND)
490 qp->s_flags &= ~QIB_S_ANY_WAIT_SEND;
492 spin_lock(&dev->pending_lock);
493 if (!list_empty(&qp->iowait) && !(qp->s_flags & QIB_S_BUSY)) {
494 qp->s_flags &= ~QIB_S_ANY_WAIT_IO;
495 list_del_init(&qp->iowait);
497 spin_unlock(&dev->pending_lock);
499 if (!(qp->s_flags & QIB_S_BUSY)) {
500 qp->s_hdrwords = 0;
501 if (qp->s_rdma_mr) {
502 qib_put_mr(qp->s_rdma_mr);
503 qp->s_rdma_mr = NULL;
505 if (qp->s_tx) {
506 qib_put_txreq(qp->s_tx);
507 qp->s_tx = NULL;
511 /* Schedule the sending tasklet to drain the send work queue. */
512 if (qp->s_last != qp->s_head)
513 qib_schedule_send(qp);
515 clear_mr_refs(qp, 0);
517 memset(&wc, 0, sizeof(wc));
518 wc.qp = &qp->ibqp;
519 wc.opcode = IB_WC_RECV;
521 if (test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) {
522 wc.wr_id = qp->r_wr_id;
523 wc.status = err;
524 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
526 wc.status = IB_WC_WR_FLUSH_ERR;
528 if (qp->r_rq.wq) {
529 struct qib_rwq *wq;
530 u32 head;
531 u32 tail;
533 spin_lock(&qp->r_rq.lock);
535 /* sanity check pointers before trusting them */
536 wq = qp->r_rq.wq;
537 head = wq->head;
538 if (head >= qp->r_rq.size)
539 head = 0;
540 tail = wq->tail;
541 if (tail >= qp->r_rq.size)
542 tail = 0;
543 while (tail != head) {
544 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
545 if (++tail >= qp->r_rq.size)
546 tail = 0;
547 qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
549 wq->tail = tail;
551 spin_unlock(&qp->r_rq.lock);
552 } else if (qp->ibqp.event_handler)
553 ret = 1;
555 bail:
556 return ret;
560 * qib_modify_qp - modify the attributes of a queue pair
561 * @ibqp: the queue pair who's attributes we're modifying
562 * @attr: the new attributes
563 * @attr_mask: the mask of attributes to modify
564 * @udata: user data for libibverbs.so
566 * Returns 0 on success, otherwise returns an errno.
568 int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
569 int attr_mask, struct ib_udata *udata)
571 struct qib_ibdev *dev = to_idev(ibqp->device);
572 struct qib_qp *qp = to_iqp(ibqp);
573 enum ib_qp_state cur_state, new_state;
574 struct ib_event ev;
575 int lastwqe = 0;
576 int mig = 0;
577 int ret;
578 u32 pmtu = 0; /* for gcc warning only */
580 spin_lock_irq(&qp->r_lock);
581 spin_lock(&qp->s_lock);
583 cur_state = attr_mask & IB_QP_CUR_STATE ?
584 attr->cur_qp_state : qp->state;
585 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
587 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
588 attr_mask, IB_LINK_LAYER_UNSPECIFIED))
589 goto inval;
591 if (attr_mask & IB_QP_AV) {
592 if (attr->ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
593 goto inval;
594 if (qib_check_ah(qp->ibqp.device, &attr->ah_attr))
595 goto inval;
598 if (attr_mask & IB_QP_ALT_PATH) {
599 if (attr->alt_ah_attr.dlid >= QIB_MULTICAST_LID_BASE)
600 goto inval;
601 if (qib_check_ah(qp->ibqp.device, &attr->alt_ah_attr))
602 goto inval;
603 if (attr->alt_pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
604 goto inval;
607 if (attr_mask & IB_QP_PKEY_INDEX)
608 if (attr->pkey_index >= qib_get_npkeys(dd_from_dev(dev)))
609 goto inval;
611 if (attr_mask & IB_QP_MIN_RNR_TIMER)
612 if (attr->min_rnr_timer > 31)
613 goto inval;
615 if (attr_mask & IB_QP_PORT)
616 if (qp->ibqp.qp_type == IB_QPT_SMI ||
617 qp->ibqp.qp_type == IB_QPT_GSI ||
618 attr->port_num == 0 ||
619 attr->port_num > ibqp->device->phys_port_cnt)
620 goto inval;
622 if (attr_mask & IB_QP_DEST_QPN)
623 if (attr->dest_qp_num > QIB_QPN_MASK)
624 goto inval;
626 if (attr_mask & IB_QP_RETRY_CNT)
627 if (attr->retry_cnt > 7)
628 goto inval;
630 if (attr_mask & IB_QP_RNR_RETRY)
631 if (attr->rnr_retry > 7)
632 goto inval;
635 * Don't allow invalid path_mtu values. OK to set greater
636 * than the active mtu (or even the max_cap, if we have tuned
637 * that to a small mtu. We'll set qp->path_mtu
638 * to the lesser of requested attribute mtu and active,
639 * for packetizing messages.
640 * Note that the QP port has to be set in INIT and MTU in RTR.
642 if (attr_mask & IB_QP_PATH_MTU) {
643 struct qib_devdata *dd = dd_from_dev(dev);
644 int mtu, pidx = qp->port_num - 1;
646 mtu = ib_mtu_enum_to_int(attr->path_mtu);
647 if (mtu == -1)
648 goto inval;
649 if (mtu > dd->pport[pidx].ibmtu) {
650 switch (dd->pport[pidx].ibmtu) {
651 case 4096:
652 pmtu = IB_MTU_4096;
653 break;
654 case 2048:
655 pmtu = IB_MTU_2048;
656 break;
657 case 1024:
658 pmtu = IB_MTU_1024;
659 break;
660 case 512:
661 pmtu = IB_MTU_512;
662 break;
663 case 256:
664 pmtu = IB_MTU_256;
665 break;
666 default:
667 pmtu = IB_MTU_2048;
669 } else
670 pmtu = attr->path_mtu;
673 if (attr_mask & IB_QP_PATH_MIG_STATE) {
674 if (attr->path_mig_state == IB_MIG_REARM) {
675 if (qp->s_mig_state == IB_MIG_ARMED)
676 goto inval;
677 if (new_state != IB_QPS_RTS)
678 goto inval;
679 } else if (attr->path_mig_state == IB_MIG_MIGRATED) {
680 if (qp->s_mig_state == IB_MIG_REARM)
681 goto inval;
682 if (new_state != IB_QPS_RTS && new_state != IB_QPS_SQD)
683 goto inval;
684 if (qp->s_mig_state == IB_MIG_ARMED)
685 mig = 1;
686 } else
687 goto inval;
690 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
691 if (attr->max_dest_rd_atomic > QIB_MAX_RDMA_ATOMIC)
692 goto inval;
694 switch (new_state) {
695 case IB_QPS_RESET:
696 if (qp->state != IB_QPS_RESET) {
697 qp->state = IB_QPS_RESET;
698 spin_lock(&dev->pending_lock);
699 if (!list_empty(&qp->iowait))
700 list_del_init(&qp->iowait);
701 spin_unlock(&dev->pending_lock);
702 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
703 spin_unlock(&qp->s_lock);
704 spin_unlock_irq(&qp->r_lock);
705 /* Stop the sending work queue and retry timer */
706 cancel_work_sync(&qp->s_work);
707 del_timer_sync(&qp->s_timer);
708 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
709 if (qp->s_tx) {
710 qib_put_txreq(qp->s_tx);
711 qp->s_tx = NULL;
713 remove_qp(dev, qp);
714 wait_event(qp->wait, !atomic_read(&qp->refcount));
715 spin_lock_irq(&qp->r_lock);
716 spin_lock(&qp->s_lock);
717 clear_mr_refs(qp, 1);
718 qib_reset_qp(qp, ibqp->qp_type);
720 break;
722 case IB_QPS_RTR:
723 /* Allow event to retrigger if QP set to RTR more than once */
724 qp->r_flags &= ~QIB_R_COMM_EST;
725 qp->state = new_state;
726 break;
728 case IB_QPS_SQD:
729 qp->s_draining = qp->s_last != qp->s_cur;
730 qp->state = new_state;
731 break;
733 case IB_QPS_SQE:
734 if (qp->ibqp.qp_type == IB_QPT_RC)
735 goto inval;
736 qp->state = new_state;
737 break;
739 case IB_QPS_ERR:
740 lastwqe = qib_error_qp(qp, IB_WC_WR_FLUSH_ERR);
741 break;
743 default:
744 qp->state = new_state;
745 break;
748 if (attr_mask & IB_QP_PKEY_INDEX)
749 qp->s_pkey_index = attr->pkey_index;
751 if (attr_mask & IB_QP_PORT)
752 qp->port_num = attr->port_num;
754 if (attr_mask & IB_QP_DEST_QPN)
755 qp->remote_qpn = attr->dest_qp_num;
757 if (attr_mask & IB_QP_SQ_PSN) {
758 qp->s_next_psn = attr->sq_psn & QIB_PSN_MASK;
759 qp->s_psn = qp->s_next_psn;
760 qp->s_sending_psn = qp->s_next_psn;
761 qp->s_last_psn = qp->s_next_psn - 1;
762 qp->s_sending_hpsn = qp->s_last_psn;
765 if (attr_mask & IB_QP_RQ_PSN)
766 qp->r_psn = attr->rq_psn & QIB_PSN_MASK;
768 if (attr_mask & IB_QP_ACCESS_FLAGS)
769 qp->qp_access_flags = attr->qp_access_flags;
771 if (attr_mask & IB_QP_AV) {
772 qp->remote_ah_attr = attr->ah_attr;
773 qp->s_srate = attr->ah_attr.static_rate;
776 if (attr_mask & IB_QP_ALT_PATH) {
777 qp->alt_ah_attr = attr->alt_ah_attr;
778 qp->s_alt_pkey_index = attr->alt_pkey_index;
781 if (attr_mask & IB_QP_PATH_MIG_STATE) {
782 qp->s_mig_state = attr->path_mig_state;
783 if (mig) {
784 qp->remote_ah_attr = qp->alt_ah_attr;
785 qp->port_num = qp->alt_ah_attr.port_num;
786 qp->s_pkey_index = qp->s_alt_pkey_index;
790 if (attr_mask & IB_QP_PATH_MTU) {
791 qp->path_mtu = pmtu;
792 qp->pmtu = ib_mtu_enum_to_int(pmtu);
795 if (attr_mask & IB_QP_RETRY_CNT) {
796 qp->s_retry_cnt = attr->retry_cnt;
797 qp->s_retry = attr->retry_cnt;
800 if (attr_mask & IB_QP_RNR_RETRY) {
801 qp->s_rnr_retry_cnt = attr->rnr_retry;
802 qp->s_rnr_retry = attr->rnr_retry;
805 if (attr_mask & IB_QP_MIN_RNR_TIMER)
806 qp->r_min_rnr_timer = attr->min_rnr_timer;
808 if (attr_mask & IB_QP_TIMEOUT) {
809 qp->timeout = attr->timeout;
810 qp->timeout_jiffies =
811 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
812 1000UL);
815 if (attr_mask & IB_QP_QKEY)
816 qp->qkey = attr->qkey;
818 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
819 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
821 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
822 qp->s_max_rd_atomic = attr->max_rd_atomic;
824 spin_unlock(&qp->s_lock);
825 spin_unlock_irq(&qp->r_lock);
827 if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
828 insert_qp(dev, qp);
830 if (lastwqe) {
831 ev.device = qp->ibqp.device;
832 ev.element.qp = &qp->ibqp;
833 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
834 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
836 if (mig) {
837 ev.device = qp->ibqp.device;
838 ev.element.qp = &qp->ibqp;
839 ev.event = IB_EVENT_PATH_MIG;
840 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
842 ret = 0;
843 goto bail;
845 inval:
846 spin_unlock(&qp->s_lock);
847 spin_unlock_irq(&qp->r_lock);
848 ret = -EINVAL;
850 bail:
851 return ret;
854 int qib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
855 int attr_mask, struct ib_qp_init_attr *init_attr)
857 struct qib_qp *qp = to_iqp(ibqp);
859 attr->qp_state = qp->state;
860 attr->cur_qp_state = attr->qp_state;
861 attr->path_mtu = qp->path_mtu;
862 attr->path_mig_state = qp->s_mig_state;
863 attr->qkey = qp->qkey;
864 attr->rq_psn = qp->r_psn & QIB_PSN_MASK;
865 attr->sq_psn = qp->s_next_psn & QIB_PSN_MASK;
866 attr->dest_qp_num = qp->remote_qpn;
867 attr->qp_access_flags = qp->qp_access_flags;
868 attr->cap.max_send_wr = qp->s_size - 1;
869 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
870 attr->cap.max_send_sge = qp->s_max_sge;
871 attr->cap.max_recv_sge = qp->r_rq.max_sge;
872 attr->cap.max_inline_data = 0;
873 attr->ah_attr = qp->remote_ah_attr;
874 attr->alt_ah_attr = qp->alt_ah_attr;
875 attr->pkey_index = qp->s_pkey_index;
876 attr->alt_pkey_index = qp->s_alt_pkey_index;
877 attr->en_sqd_async_notify = 0;
878 attr->sq_draining = qp->s_draining;
879 attr->max_rd_atomic = qp->s_max_rd_atomic;
880 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
881 attr->min_rnr_timer = qp->r_min_rnr_timer;
882 attr->port_num = qp->port_num;
883 attr->timeout = qp->timeout;
884 attr->retry_cnt = qp->s_retry_cnt;
885 attr->rnr_retry = qp->s_rnr_retry_cnt;
886 attr->alt_port_num = qp->alt_ah_attr.port_num;
887 attr->alt_timeout = qp->alt_timeout;
889 init_attr->event_handler = qp->ibqp.event_handler;
890 init_attr->qp_context = qp->ibqp.qp_context;
891 init_attr->send_cq = qp->ibqp.send_cq;
892 init_attr->recv_cq = qp->ibqp.recv_cq;
893 init_attr->srq = qp->ibqp.srq;
894 init_attr->cap = attr->cap;
895 if (qp->s_flags & QIB_S_SIGNAL_REQ_WR)
896 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
897 else
898 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
899 init_attr->qp_type = qp->ibqp.qp_type;
900 init_attr->port_num = qp->port_num;
901 return 0;
905 * qib_compute_aeth - compute the AETH (syndrome + MSN)
906 * @qp: the queue pair to compute the AETH for
908 * Returns the AETH.
910 __be32 qib_compute_aeth(struct qib_qp *qp)
912 u32 aeth = qp->r_msn & QIB_MSN_MASK;
914 if (qp->ibqp.srq) {
916 * Shared receive queues don't generate credits.
917 * Set the credit field to the invalid value.
919 aeth |= QIB_AETH_CREDIT_INVAL << QIB_AETH_CREDIT_SHIFT;
920 } else {
921 u32 min, max, x;
922 u32 credits;
923 struct qib_rwq *wq = qp->r_rq.wq;
924 u32 head;
925 u32 tail;
927 /* sanity check pointers before trusting them */
928 head = wq->head;
929 if (head >= qp->r_rq.size)
930 head = 0;
931 tail = wq->tail;
932 if (tail >= qp->r_rq.size)
933 tail = 0;
935 * Compute the number of credits available (RWQEs).
936 * XXX Not holding the r_rq.lock here so there is a small
937 * chance that the pair of reads are not atomic.
939 credits = head - tail;
940 if ((int)credits < 0)
941 credits += qp->r_rq.size;
943 * Binary search the credit table to find the code to
944 * use.
946 min = 0;
947 max = 31;
948 for (;;) {
949 x = (min + max) / 2;
950 if (credit_table[x] == credits)
951 break;
952 if (credit_table[x] > credits)
953 max = x;
954 else if (min == x)
955 break;
956 else
957 min = x;
959 aeth |= x << QIB_AETH_CREDIT_SHIFT;
961 return cpu_to_be32(aeth);
965 * qib_create_qp - create a queue pair for a device
966 * @ibpd: the protection domain who's device we create the queue pair for
967 * @init_attr: the attributes of the queue pair
968 * @udata: user data for libibverbs.so
970 * Returns the queue pair on success, otherwise returns an errno.
972 * Called by the ib_create_qp() core verbs function.
974 struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
975 struct ib_qp_init_attr *init_attr,
976 struct ib_udata *udata)
978 struct qib_qp *qp;
979 int err;
980 struct qib_swqe *swq = NULL;
981 struct qib_ibdev *dev;
982 struct qib_devdata *dd;
983 size_t sz;
984 size_t sg_list_sz;
985 struct ib_qp *ret;
987 if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
988 init_attr->cap.max_send_wr > ib_qib_max_qp_wrs) {
989 ret = ERR_PTR(-EINVAL);
990 goto bail;
993 /* Check receive queue parameters if no SRQ is specified. */
994 if (!init_attr->srq) {
995 if (init_attr->cap.max_recv_sge > ib_qib_max_sges ||
996 init_attr->cap.max_recv_wr > ib_qib_max_qp_wrs) {
997 ret = ERR_PTR(-EINVAL);
998 goto bail;
1000 if (init_attr->cap.max_send_sge +
1001 init_attr->cap.max_send_wr +
1002 init_attr->cap.max_recv_sge +
1003 init_attr->cap.max_recv_wr == 0) {
1004 ret = ERR_PTR(-EINVAL);
1005 goto bail;
1009 switch (init_attr->qp_type) {
1010 case IB_QPT_SMI:
1011 case IB_QPT_GSI:
1012 if (init_attr->port_num == 0 ||
1013 init_attr->port_num > ibpd->device->phys_port_cnt) {
1014 ret = ERR_PTR(-EINVAL);
1015 goto bail;
1017 case IB_QPT_UC:
1018 case IB_QPT_RC:
1019 case IB_QPT_UD:
1020 sz = sizeof(struct qib_sge) *
1021 init_attr->cap.max_send_sge +
1022 sizeof(struct qib_swqe);
1023 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
1024 if (swq == NULL) {
1025 ret = ERR_PTR(-ENOMEM);
1026 goto bail;
1028 sz = sizeof(*qp);
1029 sg_list_sz = 0;
1030 if (init_attr->srq) {
1031 struct qib_srq *srq = to_isrq(init_attr->srq);
1033 if (srq->rq.max_sge > 1)
1034 sg_list_sz = sizeof(*qp->r_sg_list) *
1035 (srq->rq.max_sge - 1);
1036 } else if (init_attr->cap.max_recv_sge > 1)
1037 sg_list_sz = sizeof(*qp->r_sg_list) *
1038 (init_attr->cap.max_recv_sge - 1);
1039 qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
1040 if (!qp) {
1041 ret = ERR_PTR(-ENOMEM);
1042 goto bail_swq;
1044 RCU_INIT_POINTER(qp->next, NULL);
1045 qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
1046 if (!qp->s_hdr) {
1047 ret = ERR_PTR(-ENOMEM);
1048 goto bail_qp;
1050 qp->timeout_jiffies =
1051 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
1052 1000UL);
1053 if (init_attr->srq)
1054 sz = 0;
1055 else {
1056 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
1057 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
1058 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
1059 sizeof(struct qib_rwqe);
1060 qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
1061 qp->r_rq.size * sz);
1062 if (!qp->r_rq.wq) {
1063 ret = ERR_PTR(-ENOMEM);
1064 goto bail_qp;
1069 * ib_create_qp() will initialize qp->ibqp
1070 * except for qp->ibqp.qp_num.
1072 spin_lock_init(&qp->r_lock);
1073 spin_lock_init(&qp->s_lock);
1074 spin_lock_init(&qp->r_rq.lock);
1075 atomic_set(&qp->refcount, 0);
1076 init_waitqueue_head(&qp->wait);
1077 init_waitqueue_head(&qp->wait_dma);
1078 init_timer(&qp->s_timer);
1079 qp->s_timer.data = (unsigned long)qp;
1080 INIT_WORK(&qp->s_work, qib_do_send);
1081 INIT_LIST_HEAD(&qp->iowait);
1082 INIT_LIST_HEAD(&qp->rspwait);
1083 qp->state = IB_QPS_RESET;
1084 qp->s_wq = swq;
1085 qp->s_size = init_attr->cap.max_send_wr + 1;
1086 qp->s_max_sge = init_attr->cap.max_send_sge;
1087 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
1088 qp->s_flags = QIB_S_SIGNAL_REQ_WR;
1089 dev = to_idev(ibpd->device);
1090 dd = dd_from_dev(dev);
1091 err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
1092 init_attr->port_num);
1093 if (err < 0) {
1094 ret = ERR_PTR(err);
1095 vfree(qp->r_rq.wq);
1096 goto bail_qp;
1098 qp->ibqp.qp_num = err;
1099 qp->port_num = init_attr->port_num;
1100 qib_reset_qp(qp, init_attr->qp_type);
1101 break;
1103 default:
1104 /* Don't support raw QPs */
1105 ret = ERR_PTR(-ENOSYS);
1106 goto bail;
1109 init_attr->cap.max_inline_data = 0;
1112 * Return the address of the RWQ as the offset to mmap.
1113 * See qib_mmap() for details.
1115 if (udata && udata->outlen >= sizeof(__u64)) {
1116 if (!qp->r_rq.wq) {
1117 __u64 offset = 0;
1119 err = ib_copy_to_udata(udata, &offset,
1120 sizeof(offset));
1121 if (err) {
1122 ret = ERR_PTR(err);
1123 goto bail_ip;
1125 } else {
1126 u32 s = sizeof(struct qib_rwq) + qp->r_rq.size * sz;
1128 qp->ip = qib_create_mmap_info(dev, s,
1129 ibpd->uobject->context,
1130 qp->r_rq.wq);
1131 if (!qp->ip) {
1132 ret = ERR_PTR(-ENOMEM);
1133 goto bail_ip;
1136 err = ib_copy_to_udata(udata, &(qp->ip->offset),
1137 sizeof(qp->ip->offset));
1138 if (err) {
1139 ret = ERR_PTR(err);
1140 goto bail_ip;
1145 spin_lock(&dev->n_qps_lock);
1146 if (dev->n_qps_allocated == ib_qib_max_qps) {
1147 spin_unlock(&dev->n_qps_lock);
1148 ret = ERR_PTR(-ENOMEM);
1149 goto bail_ip;
1152 dev->n_qps_allocated++;
1153 spin_unlock(&dev->n_qps_lock);
1155 if (qp->ip) {
1156 spin_lock_irq(&dev->pending_lock);
1157 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
1158 spin_unlock_irq(&dev->pending_lock);
1161 ret = &qp->ibqp;
1162 goto bail;
1164 bail_ip:
1165 if (qp->ip)
1166 kref_put(&qp->ip->ref, qib_release_mmap_info);
1167 else
1168 vfree(qp->r_rq.wq);
1169 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1170 bail_qp:
1171 kfree(qp->s_hdr);
1172 kfree(qp);
1173 bail_swq:
1174 vfree(swq);
1175 bail:
1176 return ret;
1180 * qib_destroy_qp - destroy a queue pair
1181 * @ibqp: the queue pair to destroy
1183 * Returns 0 on success.
1185 * Note that this can be called while the QP is actively sending or
1186 * receiving!
1188 int qib_destroy_qp(struct ib_qp *ibqp)
1190 struct qib_qp *qp = to_iqp(ibqp);
1191 struct qib_ibdev *dev = to_idev(ibqp->device);
1193 /* Make sure HW and driver activity is stopped. */
1194 spin_lock_irq(&qp->s_lock);
1195 if (qp->state != IB_QPS_RESET) {
1196 qp->state = IB_QPS_RESET;
1197 spin_lock(&dev->pending_lock);
1198 if (!list_empty(&qp->iowait))
1199 list_del_init(&qp->iowait);
1200 spin_unlock(&dev->pending_lock);
1201 qp->s_flags &= ~(QIB_S_TIMER | QIB_S_ANY_WAIT);
1202 spin_unlock_irq(&qp->s_lock);
1203 cancel_work_sync(&qp->s_work);
1204 del_timer_sync(&qp->s_timer);
1205 wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
1206 if (qp->s_tx) {
1207 qib_put_txreq(qp->s_tx);
1208 qp->s_tx = NULL;
1210 remove_qp(dev, qp);
1211 wait_event(qp->wait, !atomic_read(&qp->refcount));
1212 clear_mr_refs(qp, 1);
1213 } else
1214 spin_unlock_irq(&qp->s_lock);
1216 /* all user's cleaned up, mark it available */
1217 free_qpn(&dev->qpn_table, qp->ibqp.qp_num);
1218 spin_lock(&dev->n_qps_lock);
1219 dev->n_qps_allocated--;
1220 spin_unlock(&dev->n_qps_lock);
1222 if (qp->ip)
1223 kref_put(&qp->ip->ref, qib_release_mmap_info);
1224 else
1225 vfree(qp->r_rq.wq);
1226 vfree(qp->s_wq);
1227 kfree(qp->s_hdr);
1228 kfree(qp);
1229 return 0;
1233 * qib_init_qpn_table - initialize the QP number table for a device
1234 * @qpt: the QPN table
1236 void qib_init_qpn_table(struct qib_devdata *dd, struct qib_qpn_table *qpt)
1238 spin_lock_init(&qpt->lock);
1239 qpt->last = 1; /* start with QPN 2 */
1240 qpt->nmaps = 1;
1241 qpt->mask = dd->qpn_mask;
1245 * qib_free_qpn_table - free the QP number table for a device
1246 * @qpt: the QPN table
1248 void qib_free_qpn_table(struct qib_qpn_table *qpt)
1250 int i;
1252 for (i = 0; i < ARRAY_SIZE(qpt->map); i++)
1253 if (qpt->map[i].page)
1254 free_page((unsigned long) qpt->map[i].page);
1258 * qib_get_credit - flush the send work queue of a QP
1259 * @qp: the qp who's send work queue to flush
1260 * @aeth: the Acknowledge Extended Transport Header
1262 * The QP s_lock should be held.
1264 void qib_get_credit(struct qib_qp *qp, u32 aeth)
1266 u32 credit = (aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK;
1269 * If the credit is invalid, we can send
1270 * as many packets as we like. Otherwise, we have to
1271 * honor the credit field.
1273 if (credit == QIB_AETH_CREDIT_INVAL) {
1274 if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1275 qp->s_flags |= QIB_S_UNLIMITED_CREDIT;
1276 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1277 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1278 qib_schedule_send(qp);
1281 } else if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) {
1282 /* Compute new LSN (i.e., MSN + credit) */
1283 credit = (aeth + credit_table[credit]) & QIB_MSN_MASK;
1284 if (qib_cmp24(credit, qp->s_lsn) > 0) {
1285 qp->s_lsn = credit;
1286 if (qp->s_flags & QIB_S_WAIT_SSN_CREDIT) {
1287 qp->s_flags &= ~QIB_S_WAIT_SSN_CREDIT;
1288 qib_schedule_send(qp);
1294 #ifdef CONFIG_DEBUG_FS
1296 struct qib_qp_iter {
1297 struct qib_ibdev *dev;
1298 struct qib_qp *qp;
1299 int n;
1302 struct qib_qp_iter *qib_qp_iter_init(struct qib_ibdev *dev)
1304 struct qib_qp_iter *iter;
1306 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1307 if (!iter)
1308 return NULL;
1310 iter->dev = dev;
1311 if (qib_qp_iter_next(iter)) {
1312 kfree(iter);
1313 return NULL;
1316 return iter;
1319 int qib_qp_iter_next(struct qib_qp_iter *iter)
1321 struct qib_ibdev *dev = iter->dev;
1322 int n = iter->n;
1323 int ret = 1;
1324 struct qib_qp *pqp = iter->qp;
1325 struct qib_qp *qp;
1327 rcu_read_lock();
1328 for (; n < dev->qp_table_size; n++) {
1329 if (pqp)
1330 qp = rcu_dereference(pqp->next);
1331 else
1332 qp = rcu_dereference(dev->qp_table[n]);
1333 pqp = qp;
1334 if (qp) {
1335 if (iter->qp)
1336 atomic_dec(&iter->qp->refcount);
1337 atomic_inc(&qp->refcount);
1338 rcu_read_unlock();
1339 iter->qp = qp;
1340 iter->n = n;
1341 return 0;
1344 rcu_read_unlock();
1345 if (iter->qp)
1346 atomic_dec(&iter->qp->refcount);
1347 return ret;
1350 static const char * const qp_type_str[] = {
1351 "SMI", "GSI", "RC", "UC", "UD",
1354 void qib_qp_iter_print(struct seq_file *s, struct qib_qp_iter *iter)
1356 struct qib_swqe *wqe;
1357 struct qib_qp *qp = iter->qp;
1359 wqe = get_swqe_ptr(qp, qp->s_last);
1360 seq_printf(s,
1361 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
1362 iter->n,
1363 qp->ibqp.qp_num,
1364 qp_type_str[qp->ibqp.qp_type],
1365 qp->state,
1366 wqe->wr.opcode,
1367 qp->s_hdrwords,
1368 qp->s_flags,
1369 atomic_read(&qp->s_dma_busy),
1370 !list_empty(&qp->iowait),
1371 qp->timeout,
1372 wqe->ssn,
1373 qp->s_lsn,
1374 qp->s_last_psn,
1375 qp->s_psn, qp->s_next_psn,
1376 qp->s_sending_psn, qp->s_sending_hpsn,
1377 qp->s_last, qp->s_acked, qp->s_cur,
1378 qp->s_tail, qp->s_head, qp->s_size,
1379 qp->remote_qpn,
1380 qp->remote_ah_attr.dlid);
1383 #endif