2 * Copyright (c) 2012, 2013 Intel Corporation. All rights reserved.
3 * Copyright (c) 2006 - 2012 QLogic Corporation. * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
37 #include <linux/jhash.h>
38 #ifdef CONFIG_DEBUG_FS
39 #include <linux/seq_file.h>
44 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
45 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
47 static inline unsigned mk_qpn(struct qib_qpn_table
*qpt
,
48 struct qpn_map
*map
, unsigned off
)
50 return (map
- qpt
->map
) * BITS_PER_PAGE
+ off
;
53 static inline unsigned find_next_offset(struct qib_qpn_table
*qpt
,
54 struct qpn_map
*map
, unsigned off
,
59 if (((off
& qpt
->mask
) >> 1) >= n
)
60 off
= (off
| qpt
->mask
) + 2;
62 off
= find_next_zero_bit(map
->page
, BITS_PER_PAGE
, off
);
67 * Convert the AETH credit code into the number of credits.
69 static u32 credit_table
[31] = {
103 static void get_map_page(struct qib_qpn_table
*qpt
, struct qpn_map
*map
)
105 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
108 * Free the page if someone raced with us installing it.
111 spin_lock(&qpt
->lock
);
115 map
->page
= (void *)page
;
116 spin_unlock(&qpt
->lock
);
120 * Allocate the next available QPN or
121 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
123 static int alloc_qpn(struct qib_devdata
*dd
, struct qib_qpn_table
*qpt
,
124 enum ib_qp_type type
, u8 port
)
126 u32 i
, offset
, max_scan
, qpn
;
130 if (type
== IB_QPT_SMI
|| type
== IB_QPT_GSI
) {
133 ret
= type
== IB_QPT_GSI
;
134 n
= 1 << (ret
+ 2 * (port
- 1));
135 spin_lock(&qpt
->lock
);
140 spin_unlock(&qpt
->lock
);
147 if (qpt
->mask
&& ((qpn
& qpt
->mask
) >> 1) >= dd
->n_krcv_queues
)
148 qpn
= (qpn
| qpt
->mask
) + 2;
149 offset
= qpn
& BITS_PER_PAGE_MASK
;
150 map
= &qpt
->map
[qpn
/ BITS_PER_PAGE
];
151 max_scan
= qpt
->nmaps
- !offset
;
153 if (unlikely(!map
->page
)) {
154 get_map_page(qpt
, map
);
155 if (unlikely(!map
->page
))
159 if (!test_and_set_bit(offset
, map
->page
)) {
164 offset
= find_next_offset(qpt
, map
, offset
,
166 qpn
= mk_qpn(qpt
, map
, offset
);
168 * This test differs from alloc_pidmap().
169 * If find_next_offset() does find a zero
170 * bit, we don't need to check for QPN
171 * wrapping around past our starting QPN.
172 * We just need to be sure we don't loop
175 } while (offset
< BITS_PER_PAGE
&& qpn
< QPN_MAX
);
177 * In order to keep the number of pages allocated to a
178 * minimum, we scan the all existing pages before increasing
179 * the size of the bitmap table.
181 if (++i
> max_scan
) {
182 if (qpt
->nmaps
== QPNMAP_ENTRIES
)
184 map
= &qpt
->map
[qpt
->nmaps
++];
186 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
193 qpn
= mk_qpn(qpt
, map
, offset
);
202 static void free_qpn(struct qib_qpn_table
*qpt
, u32 qpn
)
206 map
= qpt
->map
+ qpn
/ BITS_PER_PAGE
;
208 clear_bit(qpn
& BITS_PER_PAGE_MASK
, map
->page
);
211 static inline unsigned qpn_hash(struct qib_ibdev
*dev
, u32 qpn
)
213 return jhash_1word(qpn
, dev
->qp_rnd
) &
214 (dev
->qp_table_size
- 1);
219 * Put the QP into the hash table.
220 * The hash table holds a reference to the QP.
222 static void insert_qp(struct qib_ibdev
*dev
, struct qib_qp
*qp
)
224 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
226 unsigned n
= qpn_hash(dev
, qp
->ibqp
.qp_num
);
228 atomic_inc(&qp
->refcount
);
229 spin_lock_irqsave(&dev
->qpt_lock
, flags
);
231 if (qp
->ibqp
.qp_num
== 0)
232 rcu_assign_pointer(ibp
->qp0
, qp
);
233 else if (qp
->ibqp
.qp_num
== 1)
234 rcu_assign_pointer(ibp
->qp1
, qp
);
236 qp
->next
= dev
->qp_table
[n
];
237 rcu_assign_pointer(dev
->qp_table
[n
], qp
);
240 spin_unlock_irqrestore(&dev
->qpt_lock
, flags
);
244 * Remove the QP from the table so it can't be found asynchronously by
245 * the receive interrupt routine.
247 static void remove_qp(struct qib_ibdev
*dev
, struct qib_qp
*qp
)
249 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
250 unsigned n
= qpn_hash(dev
, qp
->ibqp
.qp_num
);
254 spin_lock_irqsave(&dev
->qpt_lock
, flags
);
256 if (rcu_dereference_protected(ibp
->qp0
,
257 lockdep_is_held(&dev
->qpt_lock
)) == qp
) {
258 rcu_assign_pointer(ibp
->qp0
, NULL
);
259 } else if (rcu_dereference_protected(ibp
->qp1
,
260 lockdep_is_held(&dev
->qpt_lock
)) == qp
) {
261 rcu_assign_pointer(ibp
->qp1
, NULL
);
264 struct qib_qp __rcu
**qpp
;
267 qpp
= &dev
->qp_table
[n
];
268 for (; (q
= rcu_dereference_protected(*qpp
,
269 lockdep_is_held(&dev
->qpt_lock
))) != NULL
;
272 rcu_assign_pointer(*qpp
,
273 rcu_dereference_protected(qp
->next
,
274 lockdep_is_held(&dev
->qpt_lock
)));
280 spin_unlock_irqrestore(&dev
->qpt_lock
, flags
);
283 atomic_dec(&qp
->refcount
);
288 * qib_free_all_qps - check for QPs still in use
289 * @qpt: the QP table to empty
291 * There should not be any QPs still in use.
292 * Free memory for table.
294 unsigned qib_free_all_qps(struct qib_devdata
*dd
)
296 struct qib_ibdev
*dev
= &dd
->verbs_dev
;
299 unsigned n
, qp_inuse
= 0;
301 for (n
= 0; n
< dd
->num_pports
; n
++) {
302 struct qib_ibport
*ibp
= &dd
->pport
[n
].ibport_data
;
304 if (!qib_mcast_tree_empty(ibp
))
307 if (rcu_dereference(ibp
->qp0
))
309 if (rcu_dereference(ibp
->qp1
))
314 spin_lock_irqsave(&dev
->qpt_lock
, flags
);
315 for (n
= 0; n
< dev
->qp_table_size
; n
++) {
316 qp
= rcu_dereference_protected(dev
->qp_table
[n
],
317 lockdep_is_held(&dev
->qpt_lock
));
318 rcu_assign_pointer(dev
->qp_table
[n
], NULL
);
320 for (; qp
; qp
= rcu_dereference_protected(qp
->next
,
321 lockdep_is_held(&dev
->qpt_lock
)))
324 spin_unlock_irqrestore(&dev
->qpt_lock
, flags
);
331 * qib_lookup_qpn - return the QP with the given QPN
333 * @qpn: the QP number to look up
335 * The caller is responsible for decrementing the QP reference count
338 struct qib_qp
*qib_lookup_qpn(struct qib_ibport
*ibp
, u32 qpn
)
340 struct qib_qp
*qp
= NULL
;
343 if (unlikely(qpn
<= 1)) {
345 qp
= rcu_dereference(ibp
->qp0
);
347 qp
= rcu_dereference(ibp
->qp1
);
349 atomic_inc(&qp
->refcount
);
351 struct qib_ibdev
*dev
= &ppd_from_ibp(ibp
)->dd
->verbs_dev
;
352 unsigned n
= qpn_hash(dev
, qpn
);
354 for (qp
= rcu_dereference(dev
->qp_table
[n
]); qp
;
355 qp
= rcu_dereference(qp
->next
))
356 if (qp
->ibqp
.qp_num
== qpn
) {
357 atomic_inc(&qp
->refcount
);
366 * qib_reset_qp - initialize the QP state to the reset state
367 * @qp: the QP to reset
370 static void qib_reset_qp(struct qib_qp
*qp
, enum ib_qp_type type
)
374 qp
->qp_access_flags
= 0;
375 atomic_set(&qp
->s_dma_busy
, 0);
376 qp
->s_flags
&= QIB_S_SIGNAL_REQ_WR
;
382 qp
->s_sending_psn
= 0;
383 qp
->s_sending_hpsn
= 0;
387 if (type
== IB_QPT_RC
) {
388 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
389 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
391 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
392 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
394 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
405 qp
->s_mig_state
= IB_MIG_MIGRATED
;
406 memset(qp
->s_ack_queue
, 0, sizeof(qp
->s_ack_queue
));
407 qp
->r_head_ack_queue
= 0;
408 qp
->s_tail_ack_queue
= 0;
409 qp
->s_num_rd_atomic
= 0;
411 qp
->r_rq
.wq
->head
= 0;
412 qp
->r_rq
.wq
->tail
= 0;
414 qp
->r_sge
.num_sge
= 0;
417 static void clear_mr_refs(struct qib_qp
*qp
, int clr_sends
)
421 if (test_and_clear_bit(QIB_R_REWIND_SGE
, &qp
->r_aflags
))
422 qib_put_ss(&qp
->s_rdma_read_sge
);
424 qib_put_ss(&qp
->r_sge
);
427 while (qp
->s_last
!= qp
->s_head
) {
428 struct qib_swqe
*wqe
= get_swqe_ptr(qp
, qp
->s_last
);
431 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
432 struct qib_sge
*sge
= &wqe
->sg_list
[i
];
436 if (qp
->ibqp
.qp_type
== IB_QPT_UD
||
437 qp
->ibqp
.qp_type
== IB_QPT_SMI
||
438 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
439 atomic_dec(&to_iah(wqe
->wr
.wr
.ud
.ah
)->refcount
);
440 if (++qp
->s_last
>= qp
->s_size
)
444 qib_put_mr(qp
->s_rdma_mr
);
445 qp
->s_rdma_mr
= NULL
;
449 if (qp
->ibqp
.qp_type
!= IB_QPT_RC
)
452 for (n
= 0; n
< ARRAY_SIZE(qp
->s_ack_queue
); n
++) {
453 struct qib_ack_entry
*e
= &qp
->s_ack_queue
[n
];
455 if (e
->opcode
== IB_OPCODE_RC_RDMA_READ_REQUEST
&&
457 qib_put_mr(e
->rdma_sge
.mr
);
458 e
->rdma_sge
.mr
= NULL
;
464 * qib_error_qp - put a QP into the error state
465 * @qp: the QP to put into the error state
466 * @err: the receive completion error to signal if a RWQE is active
468 * Flushes both send and receive work queues.
469 * Returns true if last WQE event should be generated.
470 * The QP r_lock and s_lock should be held and interrupts disabled.
471 * If we are already in error state, just return.
473 int qib_error_qp(struct qib_qp
*qp
, enum ib_wc_status err
)
475 struct qib_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
479 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
482 qp
->state
= IB_QPS_ERR
;
484 if (qp
->s_flags
& (QIB_S_TIMER
| QIB_S_WAIT_RNR
)) {
485 qp
->s_flags
&= ~(QIB_S_TIMER
| QIB_S_WAIT_RNR
);
486 del_timer(&qp
->s_timer
);
489 if (qp
->s_flags
& QIB_S_ANY_WAIT_SEND
)
490 qp
->s_flags
&= ~QIB_S_ANY_WAIT_SEND
;
492 spin_lock(&dev
->pending_lock
);
493 if (!list_empty(&qp
->iowait
) && !(qp
->s_flags
& QIB_S_BUSY
)) {
494 qp
->s_flags
&= ~QIB_S_ANY_WAIT_IO
;
495 list_del_init(&qp
->iowait
);
497 spin_unlock(&dev
->pending_lock
);
499 if (!(qp
->s_flags
& QIB_S_BUSY
)) {
502 qib_put_mr(qp
->s_rdma_mr
);
503 qp
->s_rdma_mr
= NULL
;
506 qib_put_txreq(qp
->s_tx
);
511 /* Schedule the sending tasklet to drain the send work queue. */
512 if (qp
->s_last
!= qp
->s_head
)
513 qib_schedule_send(qp
);
515 clear_mr_refs(qp
, 0);
517 memset(&wc
, 0, sizeof(wc
));
519 wc
.opcode
= IB_WC_RECV
;
521 if (test_and_clear_bit(QIB_R_WRID_VALID
, &qp
->r_aflags
)) {
522 wc
.wr_id
= qp
->r_wr_id
;
524 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
526 wc
.status
= IB_WC_WR_FLUSH_ERR
;
533 spin_lock(&qp
->r_rq
.lock
);
535 /* sanity check pointers before trusting them */
538 if (head
>= qp
->r_rq
.size
)
541 if (tail
>= qp
->r_rq
.size
)
543 while (tail
!= head
) {
544 wc
.wr_id
= get_rwqe_ptr(&qp
->r_rq
, tail
)->wr_id
;
545 if (++tail
>= qp
->r_rq
.size
)
547 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
551 spin_unlock(&qp
->r_rq
.lock
);
552 } else if (qp
->ibqp
.event_handler
)
560 * qib_modify_qp - modify the attributes of a queue pair
561 * @ibqp: the queue pair who's attributes we're modifying
562 * @attr: the new attributes
563 * @attr_mask: the mask of attributes to modify
564 * @udata: user data for libibverbs.so
566 * Returns 0 on success, otherwise returns an errno.
568 int qib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
569 int attr_mask
, struct ib_udata
*udata
)
571 struct qib_ibdev
*dev
= to_idev(ibqp
->device
);
572 struct qib_qp
*qp
= to_iqp(ibqp
);
573 enum ib_qp_state cur_state
, new_state
;
578 u32 pmtu
= 0; /* for gcc warning only */
580 spin_lock_irq(&qp
->r_lock
);
581 spin_lock(&qp
->s_lock
);
583 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
584 attr
->cur_qp_state
: qp
->state
;
585 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
587 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
588 attr_mask
, IB_LINK_LAYER_UNSPECIFIED
))
591 if (attr_mask
& IB_QP_AV
) {
592 if (attr
->ah_attr
.dlid
>= QIB_MULTICAST_LID_BASE
)
594 if (qib_check_ah(qp
->ibqp
.device
, &attr
->ah_attr
))
598 if (attr_mask
& IB_QP_ALT_PATH
) {
599 if (attr
->alt_ah_attr
.dlid
>= QIB_MULTICAST_LID_BASE
)
601 if (qib_check_ah(qp
->ibqp
.device
, &attr
->alt_ah_attr
))
603 if (attr
->alt_pkey_index
>= qib_get_npkeys(dd_from_dev(dev
)))
607 if (attr_mask
& IB_QP_PKEY_INDEX
)
608 if (attr
->pkey_index
>= qib_get_npkeys(dd_from_dev(dev
)))
611 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
612 if (attr
->min_rnr_timer
> 31)
615 if (attr_mask
& IB_QP_PORT
)
616 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
617 qp
->ibqp
.qp_type
== IB_QPT_GSI
||
618 attr
->port_num
== 0 ||
619 attr
->port_num
> ibqp
->device
->phys_port_cnt
)
622 if (attr_mask
& IB_QP_DEST_QPN
)
623 if (attr
->dest_qp_num
> QIB_QPN_MASK
)
626 if (attr_mask
& IB_QP_RETRY_CNT
)
627 if (attr
->retry_cnt
> 7)
630 if (attr_mask
& IB_QP_RNR_RETRY
)
631 if (attr
->rnr_retry
> 7)
635 * Don't allow invalid path_mtu values. OK to set greater
636 * than the active mtu (or even the max_cap, if we have tuned
637 * that to a small mtu. We'll set qp->path_mtu
638 * to the lesser of requested attribute mtu and active,
639 * for packetizing messages.
640 * Note that the QP port has to be set in INIT and MTU in RTR.
642 if (attr_mask
& IB_QP_PATH_MTU
) {
643 struct qib_devdata
*dd
= dd_from_dev(dev
);
644 int mtu
, pidx
= qp
->port_num
- 1;
646 mtu
= ib_mtu_enum_to_int(attr
->path_mtu
);
649 if (mtu
> dd
->pport
[pidx
].ibmtu
) {
650 switch (dd
->pport
[pidx
].ibmtu
) {
670 pmtu
= attr
->path_mtu
;
673 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
674 if (attr
->path_mig_state
== IB_MIG_REARM
) {
675 if (qp
->s_mig_state
== IB_MIG_ARMED
)
677 if (new_state
!= IB_QPS_RTS
)
679 } else if (attr
->path_mig_state
== IB_MIG_MIGRATED
) {
680 if (qp
->s_mig_state
== IB_MIG_REARM
)
682 if (new_state
!= IB_QPS_RTS
&& new_state
!= IB_QPS_SQD
)
684 if (qp
->s_mig_state
== IB_MIG_ARMED
)
690 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
691 if (attr
->max_dest_rd_atomic
> QIB_MAX_RDMA_ATOMIC
)
696 if (qp
->state
!= IB_QPS_RESET
) {
697 qp
->state
= IB_QPS_RESET
;
698 spin_lock(&dev
->pending_lock
);
699 if (!list_empty(&qp
->iowait
))
700 list_del_init(&qp
->iowait
);
701 spin_unlock(&dev
->pending_lock
);
702 qp
->s_flags
&= ~(QIB_S_TIMER
| QIB_S_ANY_WAIT
);
703 spin_unlock(&qp
->s_lock
);
704 spin_unlock_irq(&qp
->r_lock
);
705 /* Stop the sending work queue and retry timer */
706 cancel_work_sync(&qp
->s_work
);
707 del_timer_sync(&qp
->s_timer
);
708 wait_event(qp
->wait_dma
, !atomic_read(&qp
->s_dma_busy
));
710 qib_put_txreq(qp
->s_tx
);
714 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
715 spin_lock_irq(&qp
->r_lock
);
716 spin_lock(&qp
->s_lock
);
717 clear_mr_refs(qp
, 1);
718 qib_reset_qp(qp
, ibqp
->qp_type
);
723 /* Allow event to retrigger if QP set to RTR more than once */
724 qp
->r_flags
&= ~QIB_R_COMM_EST
;
725 qp
->state
= new_state
;
729 qp
->s_draining
= qp
->s_last
!= qp
->s_cur
;
730 qp
->state
= new_state
;
734 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
736 qp
->state
= new_state
;
740 lastwqe
= qib_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
744 qp
->state
= new_state
;
748 if (attr_mask
& IB_QP_PKEY_INDEX
)
749 qp
->s_pkey_index
= attr
->pkey_index
;
751 if (attr_mask
& IB_QP_PORT
)
752 qp
->port_num
= attr
->port_num
;
754 if (attr_mask
& IB_QP_DEST_QPN
)
755 qp
->remote_qpn
= attr
->dest_qp_num
;
757 if (attr_mask
& IB_QP_SQ_PSN
) {
758 qp
->s_next_psn
= attr
->sq_psn
& QIB_PSN_MASK
;
759 qp
->s_psn
= qp
->s_next_psn
;
760 qp
->s_sending_psn
= qp
->s_next_psn
;
761 qp
->s_last_psn
= qp
->s_next_psn
- 1;
762 qp
->s_sending_hpsn
= qp
->s_last_psn
;
765 if (attr_mask
& IB_QP_RQ_PSN
)
766 qp
->r_psn
= attr
->rq_psn
& QIB_PSN_MASK
;
768 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
769 qp
->qp_access_flags
= attr
->qp_access_flags
;
771 if (attr_mask
& IB_QP_AV
) {
772 qp
->remote_ah_attr
= attr
->ah_attr
;
773 qp
->s_srate
= attr
->ah_attr
.static_rate
;
776 if (attr_mask
& IB_QP_ALT_PATH
) {
777 qp
->alt_ah_attr
= attr
->alt_ah_attr
;
778 qp
->s_alt_pkey_index
= attr
->alt_pkey_index
;
781 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
782 qp
->s_mig_state
= attr
->path_mig_state
;
784 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
785 qp
->port_num
= qp
->alt_ah_attr
.port_num
;
786 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
790 if (attr_mask
& IB_QP_PATH_MTU
) {
792 qp
->pmtu
= ib_mtu_enum_to_int(pmtu
);
795 if (attr_mask
& IB_QP_RETRY_CNT
) {
796 qp
->s_retry_cnt
= attr
->retry_cnt
;
797 qp
->s_retry
= attr
->retry_cnt
;
800 if (attr_mask
& IB_QP_RNR_RETRY
) {
801 qp
->s_rnr_retry_cnt
= attr
->rnr_retry
;
802 qp
->s_rnr_retry
= attr
->rnr_retry
;
805 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
806 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
808 if (attr_mask
& IB_QP_TIMEOUT
) {
809 qp
->timeout
= attr
->timeout
;
810 qp
->timeout_jiffies
=
811 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
815 if (attr_mask
& IB_QP_QKEY
)
816 qp
->qkey
= attr
->qkey
;
818 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
819 qp
->r_max_rd_atomic
= attr
->max_dest_rd_atomic
;
821 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
)
822 qp
->s_max_rd_atomic
= attr
->max_rd_atomic
;
824 spin_unlock(&qp
->s_lock
);
825 spin_unlock_irq(&qp
->r_lock
);
827 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
831 ev
.device
= qp
->ibqp
.device
;
832 ev
.element
.qp
= &qp
->ibqp
;
833 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
834 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
837 ev
.device
= qp
->ibqp
.device
;
838 ev
.element
.qp
= &qp
->ibqp
;
839 ev
.event
= IB_EVENT_PATH_MIG
;
840 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
846 spin_unlock(&qp
->s_lock
);
847 spin_unlock_irq(&qp
->r_lock
);
854 int qib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
855 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
857 struct qib_qp
*qp
= to_iqp(ibqp
);
859 attr
->qp_state
= qp
->state
;
860 attr
->cur_qp_state
= attr
->qp_state
;
861 attr
->path_mtu
= qp
->path_mtu
;
862 attr
->path_mig_state
= qp
->s_mig_state
;
863 attr
->qkey
= qp
->qkey
;
864 attr
->rq_psn
= qp
->r_psn
& QIB_PSN_MASK
;
865 attr
->sq_psn
= qp
->s_next_psn
& QIB_PSN_MASK
;
866 attr
->dest_qp_num
= qp
->remote_qpn
;
867 attr
->qp_access_flags
= qp
->qp_access_flags
;
868 attr
->cap
.max_send_wr
= qp
->s_size
- 1;
869 attr
->cap
.max_recv_wr
= qp
->ibqp
.srq
? 0 : qp
->r_rq
.size
- 1;
870 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
871 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
872 attr
->cap
.max_inline_data
= 0;
873 attr
->ah_attr
= qp
->remote_ah_attr
;
874 attr
->alt_ah_attr
= qp
->alt_ah_attr
;
875 attr
->pkey_index
= qp
->s_pkey_index
;
876 attr
->alt_pkey_index
= qp
->s_alt_pkey_index
;
877 attr
->en_sqd_async_notify
= 0;
878 attr
->sq_draining
= qp
->s_draining
;
879 attr
->max_rd_atomic
= qp
->s_max_rd_atomic
;
880 attr
->max_dest_rd_atomic
= qp
->r_max_rd_atomic
;
881 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
882 attr
->port_num
= qp
->port_num
;
883 attr
->timeout
= qp
->timeout
;
884 attr
->retry_cnt
= qp
->s_retry_cnt
;
885 attr
->rnr_retry
= qp
->s_rnr_retry_cnt
;
886 attr
->alt_port_num
= qp
->alt_ah_attr
.port_num
;
887 attr
->alt_timeout
= qp
->alt_timeout
;
889 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
890 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
891 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
892 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
893 init_attr
->srq
= qp
->ibqp
.srq
;
894 init_attr
->cap
= attr
->cap
;
895 if (qp
->s_flags
& QIB_S_SIGNAL_REQ_WR
)
896 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
898 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
899 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
900 init_attr
->port_num
= qp
->port_num
;
905 * qib_compute_aeth - compute the AETH (syndrome + MSN)
906 * @qp: the queue pair to compute the AETH for
910 __be32
qib_compute_aeth(struct qib_qp
*qp
)
912 u32 aeth
= qp
->r_msn
& QIB_MSN_MASK
;
916 * Shared receive queues don't generate credits.
917 * Set the credit field to the invalid value.
919 aeth
|= QIB_AETH_CREDIT_INVAL
<< QIB_AETH_CREDIT_SHIFT
;
923 struct qib_rwq
*wq
= qp
->r_rq
.wq
;
927 /* sanity check pointers before trusting them */
929 if (head
>= qp
->r_rq
.size
)
932 if (tail
>= qp
->r_rq
.size
)
935 * Compute the number of credits available (RWQEs).
936 * XXX Not holding the r_rq.lock here so there is a small
937 * chance that the pair of reads are not atomic.
939 credits
= head
- tail
;
940 if ((int)credits
< 0)
941 credits
+= qp
->r_rq
.size
;
943 * Binary search the credit table to find the code to
950 if (credit_table
[x
] == credits
)
952 if (credit_table
[x
] > credits
)
959 aeth
|= x
<< QIB_AETH_CREDIT_SHIFT
;
961 return cpu_to_be32(aeth
);
965 * qib_create_qp - create a queue pair for a device
966 * @ibpd: the protection domain who's device we create the queue pair for
967 * @init_attr: the attributes of the queue pair
968 * @udata: user data for libibverbs.so
970 * Returns the queue pair on success, otherwise returns an errno.
972 * Called by the ib_create_qp() core verbs function.
974 struct ib_qp
*qib_create_qp(struct ib_pd
*ibpd
,
975 struct ib_qp_init_attr
*init_attr
,
976 struct ib_udata
*udata
)
980 struct qib_swqe
*swq
= NULL
;
981 struct qib_ibdev
*dev
;
982 struct qib_devdata
*dd
;
987 if (init_attr
->cap
.max_send_sge
> ib_qib_max_sges
||
988 init_attr
->cap
.max_send_wr
> ib_qib_max_qp_wrs
) {
989 ret
= ERR_PTR(-EINVAL
);
993 /* Check receive queue parameters if no SRQ is specified. */
994 if (!init_attr
->srq
) {
995 if (init_attr
->cap
.max_recv_sge
> ib_qib_max_sges
||
996 init_attr
->cap
.max_recv_wr
> ib_qib_max_qp_wrs
) {
997 ret
= ERR_PTR(-EINVAL
);
1000 if (init_attr
->cap
.max_send_sge
+
1001 init_attr
->cap
.max_send_wr
+
1002 init_attr
->cap
.max_recv_sge
+
1003 init_attr
->cap
.max_recv_wr
== 0) {
1004 ret
= ERR_PTR(-EINVAL
);
1009 switch (init_attr
->qp_type
) {
1012 if (init_attr
->port_num
== 0 ||
1013 init_attr
->port_num
> ibpd
->device
->phys_port_cnt
) {
1014 ret
= ERR_PTR(-EINVAL
);
1020 sz
= sizeof(struct qib_sge
) *
1021 init_attr
->cap
.max_send_sge
+
1022 sizeof(struct qib_swqe
);
1023 swq
= vmalloc((init_attr
->cap
.max_send_wr
+ 1) * sz
);
1025 ret
= ERR_PTR(-ENOMEM
);
1030 if (init_attr
->srq
) {
1031 struct qib_srq
*srq
= to_isrq(init_attr
->srq
);
1033 if (srq
->rq
.max_sge
> 1)
1034 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1035 (srq
->rq
.max_sge
- 1);
1036 } else if (init_attr
->cap
.max_recv_sge
> 1)
1037 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1038 (init_attr
->cap
.max_recv_sge
- 1);
1039 qp
= kzalloc(sz
+ sg_list_sz
, GFP_KERNEL
);
1041 ret
= ERR_PTR(-ENOMEM
);
1044 RCU_INIT_POINTER(qp
->next
, NULL
);
1045 qp
->s_hdr
= kzalloc(sizeof(*qp
->s_hdr
), GFP_KERNEL
);
1047 ret
= ERR_PTR(-ENOMEM
);
1050 qp
->timeout_jiffies
=
1051 usecs_to_jiffies((4096UL * (1UL << qp
->timeout
)) /
1056 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
1057 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
1058 sz
= (sizeof(struct ib_sge
) * qp
->r_rq
.max_sge
) +
1059 sizeof(struct qib_rwqe
);
1060 qp
->r_rq
.wq
= vmalloc_user(sizeof(struct qib_rwq
) +
1061 qp
->r_rq
.size
* sz
);
1063 ret
= ERR_PTR(-ENOMEM
);
1069 * ib_create_qp() will initialize qp->ibqp
1070 * except for qp->ibqp.qp_num.
1072 spin_lock_init(&qp
->r_lock
);
1073 spin_lock_init(&qp
->s_lock
);
1074 spin_lock_init(&qp
->r_rq
.lock
);
1075 atomic_set(&qp
->refcount
, 0);
1076 init_waitqueue_head(&qp
->wait
);
1077 init_waitqueue_head(&qp
->wait_dma
);
1078 init_timer(&qp
->s_timer
);
1079 qp
->s_timer
.data
= (unsigned long)qp
;
1080 INIT_WORK(&qp
->s_work
, qib_do_send
);
1081 INIT_LIST_HEAD(&qp
->iowait
);
1082 INIT_LIST_HEAD(&qp
->rspwait
);
1083 qp
->state
= IB_QPS_RESET
;
1085 qp
->s_size
= init_attr
->cap
.max_send_wr
+ 1;
1086 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
1087 if (init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
)
1088 qp
->s_flags
= QIB_S_SIGNAL_REQ_WR
;
1089 dev
= to_idev(ibpd
->device
);
1090 dd
= dd_from_dev(dev
);
1091 err
= alloc_qpn(dd
, &dev
->qpn_table
, init_attr
->qp_type
,
1092 init_attr
->port_num
);
1098 qp
->ibqp
.qp_num
= err
;
1099 qp
->port_num
= init_attr
->port_num
;
1100 qib_reset_qp(qp
, init_attr
->qp_type
);
1104 /* Don't support raw QPs */
1105 ret
= ERR_PTR(-ENOSYS
);
1109 init_attr
->cap
.max_inline_data
= 0;
1112 * Return the address of the RWQ as the offset to mmap.
1113 * See qib_mmap() for details.
1115 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
1119 err
= ib_copy_to_udata(udata
, &offset
,
1126 u32 s
= sizeof(struct qib_rwq
) + qp
->r_rq
.size
* sz
;
1128 qp
->ip
= qib_create_mmap_info(dev
, s
,
1129 ibpd
->uobject
->context
,
1132 ret
= ERR_PTR(-ENOMEM
);
1136 err
= ib_copy_to_udata(udata
, &(qp
->ip
->offset
),
1137 sizeof(qp
->ip
->offset
));
1145 spin_lock(&dev
->n_qps_lock
);
1146 if (dev
->n_qps_allocated
== ib_qib_max_qps
) {
1147 spin_unlock(&dev
->n_qps_lock
);
1148 ret
= ERR_PTR(-ENOMEM
);
1152 dev
->n_qps_allocated
++;
1153 spin_unlock(&dev
->n_qps_lock
);
1156 spin_lock_irq(&dev
->pending_lock
);
1157 list_add(&qp
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
1158 spin_unlock_irq(&dev
->pending_lock
);
1166 kref_put(&qp
->ip
->ref
, qib_release_mmap_info
);
1169 free_qpn(&dev
->qpn_table
, qp
->ibqp
.qp_num
);
1180 * qib_destroy_qp - destroy a queue pair
1181 * @ibqp: the queue pair to destroy
1183 * Returns 0 on success.
1185 * Note that this can be called while the QP is actively sending or
1188 int qib_destroy_qp(struct ib_qp
*ibqp
)
1190 struct qib_qp
*qp
= to_iqp(ibqp
);
1191 struct qib_ibdev
*dev
= to_idev(ibqp
->device
);
1193 /* Make sure HW and driver activity is stopped. */
1194 spin_lock_irq(&qp
->s_lock
);
1195 if (qp
->state
!= IB_QPS_RESET
) {
1196 qp
->state
= IB_QPS_RESET
;
1197 spin_lock(&dev
->pending_lock
);
1198 if (!list_empty(&qp
->iowait
))
1199 list_del_init(&qp
->iowait
);
1200 spin_unlock(&dev
->pending_lock
);
1201 qp
->s_flags
&= ~(QIB_S_TIMER
| QIB_S_ANY_WAIT
);
1202 spin_unlock_irq(&qp
->s_lock
);
1203 cancel_work_sync(&qp
->s_work
);
1204 del_timer_sync(&qp
->s_timer
);
1205 wait_event(qp
->wait_dma
, !atomic_read(&qp
->s_dma_busy
));
1207 qib_put_txreq(qp
->s_tx
);
1211 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
1212 clear_mr_refs(qp
, 1);
1214 spin_unlock_irq(&qp
->s_lock
);
1216 /* all user's cleaned up, mark it available */
1217 free_qpn(&dev
->qpn_table
, qp
->ibqp
.qp_num
);
1218 spin_lock(&dev
->n_qps_lock
);
1219 dev
->n_qps_allocated
--;
1220 spin_unlock(&dev
->n_qps_lock
);
1223 kref_put(&qp
->ip
->ref
, qib_release_mmap_info
);
1233 * qib_init_qpn_table - initialize the QP number table for a device
1234 * @qpt: the QPN table
1236 void qib_init_qpn_table(struct qib_devdata
*dd
, struct qib_qpn_table
*qpt
)
1238 spin_lock_init(&qpt
->lock
);
1239 qpt
->last
= 1; /* start with QPN 2 */
1241 qpt
->mask
= dd
->qpn_mask
;
1245 * qib_free_qpn_table - free the QP number table for a device
1246 * @qpt: the QPN table
1248 void qib_free_qpn_table(struct qib_qpn_table
*qpt
)
1252 for (i
= 0; i
< ARRAY_SIZE(qpt
->map
); i
++)
1253 if (qpt
->map
[i
].page
)
1254 free_page((unsigned long) qpt
->map
[i
].page
);
1258 * qib_get_credit - flush the send work queue of a QP
1259 * @qp: the qp who's send work queue to flush
1260 * @aeth: the Acknowledge Extended Transport Header
1262 * The QP s_lock should be held.
1264 void qib_get_credit(struct qib_qp
*qp
, u32 aeth
)
1266 u32 credit
= (aeth
>> QIB_AETH_CREDIT_SHIFT
) & QIB_AETH_CREDIT_MASK
;
1269 * If the credit is invalid, we can send
1270 * as many packets as we like. Otherwise, we have to
1271 * honor the credit field.
1273 if (credit
== QIB_AETH_CREDIT_INVAL
) {
1274 if (!(qp
->s_flags
& QIB_S_UNLIMITED_CREDIT
)) {
1275 qp
->s_flags
|= QIB_S_UNLIMITED_CREDIT
;
1276 if (qp
->s_flags
& QIB_S_WAIT_SSN_CREDIT
) {
1277 qp
->s_flags
&= ~QIB_S_WAIT_SSN_CREDIT
;
1278 qib_schedule_send(qp
);
1281 } else if (!(qp
->s_flags
& QIB_S_UNLIMITED_CREDIT
)) {
1282 /* Compute new LSN (i.e., MSN + credit) */
1283 credit
= (aeth
+ credit_table
[credit
]) & QIB_MSN_MASK
;
1284 if (qib_cmp24(credit
, qp
->s_lsn
) > 0) {
1286 if (qp
->s_flags
& QIB_S_WAIT_SSN_CREDIT
) {
1287 qp
->s_flags
&= ~QIB_S_WAIT_SSN_CREDIT
;
1288 qib_schedule_send(qp
);
1294 #ifdef CONFIG_DEBUG_FS
1296 struct qib_qp_iter
{
1297 struct qib_ibdev
*dev
;
1302 struct qib_qp_iter
*qib_qp_iter_init(struct qib_ibdev
*dev
)
1304 struct qib_qp_iter
*iter
;
1306 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
1311 if (qib_qp_iter_next(iter
)) {
1319 int qib_qp_iter_next(struct qib_qp_iter
*iter
)
1321 struct qib_ibdev
*dev
= iter
->dev
;
1324 struct qib_qp
*pqp
= iter
->qp
;
1328 for (; n
< dev
->qp_table_size
; n
++) {
1330 qp
= rcu_dereference(pqp
->next
);
1332 qp
= rcu_dereference(dev
->qp_table
[n
]);
1336 atomic_dec(&iter
->qp
->refcount
);
1337 atomic_inc(&qp
->refcount
);
1346 atomic_dec(&iter
->qp
->refcount
);
1350 static const char * const qp_type_str
[] = {
1351 "SMI", "GSI", "RC", "UC", "UD",
1354 void qib_qp_iter_print(struct seq_file
*s
, struct qib_qp_iter
*iter
)
1356 struct qib_swqe
*wqe
;
1357 struct qib_qp
*qp
= iter
->qp
;
1359 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
1361 "N %d QP%u %s %u %u %u f=%x %u %u %u %u %u PSN %x %x %x %x %x (%u %u %u %u %u %u) QP%u LID %x\n",
1364 qp_type_str
[qp
->ibqp
.qp_type
],
1369 atomic_read(&qp
->s_dma_busy
),
1370 !list_empty(&qp
->iowait
),
1375 qp
->s_psn
, qp
->s_next_psn
,
1376 qp
->s_sending_psn
, qp
->s_sending_hpsn
,
1377 qp
->s_last
, qp
->s_acked
, qp
->s_cur
,
1378 qp
->s_tail
, qp
->s_head
, qp
->s_size
,
1380 qp
->remote_ah_attr
.dlid
);