2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/err.h>
36 #include <linux/vmalloc.h>
40 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
41 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
43 static inline unsigned mk_qpn(struct qib_qpn_table
*qpt
,
44 struct qpn_map
*map
, unsigned off
)
46 return (map
- qpt
->map
) * BITS_PER_PAGE
+ off
;
49 static inline unsigned find_next_offset(struct qib_qpn_table
*qpt
,
50 struct qpn_map
*map
, unsigned off
,
55 if ((off
& qpt
->mask
) >> 1 != r
)
56 off
= ((off
& qpt
->mask
) ?
57 (off
| qpt
->mask
) + 1 : off
) | (r
<< 1);
59 off
= find_next_zero_bit(map
->page
, BITS_PER_PAGE
, off
);
64 * Convert the AETH credit code into the number of credits.
66 static u32 credit_table
[31] = {
100 static void get_map_page(struct qib_qpn_table
*qpt
, struct qpn_map
*map
)
102 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
105 * Free the page if someone raced with us installing it.
108 spin_lock(&qpt
->lock
);
112 map
->page
= (void *)page
;
113 spin_unlock(&qpt
->lock
);
117 * Allocate the next available QPN or
118 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
120 static int alloc_qpn(struct qib_devdata
*dd
, struct qib_qpn_table
*qpt
,
121 enum ib_qp_type type
, u8 port
)
123 u32 i
, offset
, max_scan
, qpn
;
128 if (type
== IB_QPT_SMI
|| type
== IB_QPT_GSI
) {
131 ret
= type
== IB_QPT_GSI
;
132 n
= 1 << (ret
+ 2 * (port
- 1));
133 spin_lock(&qpt
->lock
);
138 spin_unlock(&qpt
->lock
);
142 r
= smp_processor_id();
143 if (r
>= dd
->n_krcv_queues
)
144 r
%= dd
->n_krcv_queues
;
148 if (qpt
->mask
&& ((qpn
& qpt
->mask
) >> 1) != r
)
149 qpn
= ((qpn
& qpt
->mask
) ? (qpn
| qpt
->mask
) + 1 : qpn
) |
151 offset
= qpn
& BITS_PER_PAGE_MASK
;
152 map
= &qpt
->map
[qpn
/ BITS_PER_PAGE
];
153 max_scan
= qpt
->nmaps
- !offset
;
155 if (unlikely(!map
->page
)) {
156 get_map_page(qpt
, map
);
157 if (unlikely(!map
->page
))
161 if (!test_and_set_bit(offset
, map
->page
)) {
166 offset
= find_next_offset(qpt
, map
, offset
, r
);
167 qpn
= mk_qpn(qpt
, map
, offset
);
169 * This test differs from alloc_pidmap().
170 * If find_next_offset() does find a zero
171 * bit, we don't need to check for QPN
172 * wrapping around past our starting QPN.
173 * We just need to be sure we don't loop
176 } while (offset
< BITS_PER_PAGE
&& qpn
< QPN_MAX
);
178 * In order to keep the number of pages allocated to a
179 * minimum, we scan the all existing pages before increasing
180 * the size of the bitmap table.
182 if (++i
> max_scan
) {
183 if (qpt
->nmaps
== QPNMAP_ENTRIES
)
185 map
= &qpt
->map
[qpt
->nmaps
++];
186 offset
= qpt
->mask
? (r
<< 1) : 0;
187 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
189 offset
= qpt
->mask
? (r
<< 1) : 0;
192 offset
= qpt
->mask
? (r
<< 1) : 2;
194 qpn
= mk_qpn(qpt
, map
, offset
);
203 static void free_qpn(struct qib_qpn_table
*qpt
, u32 qpn
)
207 map
= qpt
->map
+ qpn
/ BITS_PER_PAGE
;
209 clear_bit(qpn
& BITS_PER_PAGE_MASK
, map
->page
);
213 * Put the QP into the hash table.
214 * The hash table holds a reference to the QP.
216 static void insert_qp(struct qib_ibdev
*dev
, struct qib_qp
*qp
)
218 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
219 unsigned n
= qp
->ibqp
.qp_num
% dev
->qp_table_size
;
222 spin_lock_irqsave(&dev
->qpt_lock
, flags
);
224 if (qp
->ibqp
.qp_num
== 0)
226 else if (qp
->ibqp
.qp_num
== 1)
229 qp
->next
= dev
->qp_table
[n
];
230 dev
->qp_table
[n
] = qp
;
232 atomic_inc(&qp
->refcount
);
234 spin_unlock_irqrestore(&dev
->qpt_lock
, flags
);
238 * Remove the QP from the table so it can't be found asynchronously by
239 * the receive interrupt routine.
241 static void remove_qp(struct qib_ibdev
*dev
, struct qib_qp
*qp
)
243 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
244 struct qib_qp
*q
, **qpp
;
247 qpp
= &dev
->qp_table
[qp
->ibqp
.qp_num
% dev
->qp_table_size
];
249 spin_lock_irqsave(&dev
->qpt_lock
, flags
);
251 if (ibp
->qp0
== qp
) {
253 atomic_dec(&qp
->refcount
);
254 } else if (ibp
->qp1
== qp
) {
256 atomic_dec(&qp
->refcount
);
258 for (; (q
= *qpp
) != NULL
; qpp
= &q
->next
)
262 atomic_dec(&qp
->refcount
);
266 spin_unlock_irqrestore(&dev
->qpt_lock
, flags
);
270 * qib_free_all_qps - check for QPs still in use
271 * @qpt: the QP table to empty
273 * There should not be any QPs still in use.
274 * Free memory for table.
276 unsigned qib_free_all_qps(struct qib_devdata
*dd
)
278 struct qib_ibdev
*dev
= &dd
->verbs_dev
;
281 unsigned n
, qp_inuse
= 0;
283 for (n
= 0; n
< dd
->num_pports
; n
++) {
284 struct qib_ibport
*ibp
= &dd
->pport
[n
].ibport_data
;
286 if (!qib_mcast_tree_empty(ibp
))
294 spin_lock_irqsave(&dev
->qpt_lock
, flags
);
295 for (n
= 0; n
< dev
->qp_table_size
; n
++) {
296 qp
= dev
->qp_table
[n
];
297 dev
->qp_table
[n
] = NULL
;
299 for (; qp
; qp
= qp
->next
)
302 spin_unlock_irqrestore(&dev
->qpt_lock
, flags
);
308 * qib_lookup_qpn - return the QP with the given QPN
310 * @qpn: the QP number to look up
312 * The caller is responsible for decrementing the QP reference count
315 struct qib_qp
*qib_lookup_qpn(struct qib_ibport
*ibp
, u32 qpn
)
317 struct qib_ibdev
*dev
= &ppd_from_ibp(ibp
)->dd
->verbs_dev
;
321 spin_lock_irqsave(&dev
->qpt_lock
, flags
);
328 for (qp
= dev
->qp_table
[qpn
% dev
->qp_table_size
]; qp
;
330 if (qp
->ibqp
.qp_num
== qpn
)
333 atomic_inc(&qp
->refcount
);
335 spin_unlock_irqrestore(&dev
->qpt_lock
, flags
);
340 * qib_reset_qp - initialize the QP state to the reset state
341 * @qp: the QP to reset
344 static void qib_reset_qp(struct qib_qp
*qp
, enum ib_qp_type type
)
348 qp
->qp_access_flags
= 0;
349 atomic_set(&qp
->s_dma_busy
, 0);
350 qp
->s_flags
&= QIB_S_SIGNAL_REQ_WR
;
356 qp
->s_sending_psn
= 0;
357 qp
->s_sending_hpsn
= 0;
361 if (type
== IB_QPT_RC
) {
362 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
363 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
365 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
366 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
368 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
379 qp
->s_mig_state
= IB_MIG_MIGRATED
;
380 memset(qp
->s_ack_queue
, 0, sizeof(qp
->s_ack_queue
));
381 qp
->r_head_ack_queue
= 0;
382 qp
->s_tail_ack_queue
= 0;
383 qp
->s_num_rd_atomic
= 0;
385 qp
->r_rq
.wq
->head
= 0;
386 qp
->r_rq
.wq
->tail
= 0;
388 qp
->r_sge
.num_sge
= 0;
391 static void clear_mr_refs(struct qib_qp
*qp
, int clr_sends
)
395 if (test_and_clear_bit(QIB_R_REWIND_SGE
, &qp
->r_aflags
))
396 while (qp
->s_rdma_read_sge
.num_sge
) {
397 atomic_dec(&qp
->s_rdma_read_sge
.sge
.mr
->refcount
);
398 if (--qp
->s_rdma_read_sge
.num_sge
)
399 qp
->s_rdma_read_sge
.sge
=
400 *qp
->s_rdma_read_sge
.sg_list
++;
403 while (qp
->r_sge
.num_sge
) {
404 atomic_dec(&qp
->r_sge
.sge
.mr
->refcount
);
405 if (--qp
->r_sge
.num_sge
)
406 qp
->r_sge
.sge
= *qp
->r_sge
.sg_list
++;
410 while (qp
->s_last
!= qp
->s_head
) {
411 struct qib_swqe
*wqe
= get_swqe_ptr(qp
, qp
->s_last
);
414 for (i
= 0; i
< wqe
->wr
.num_sge
; i
++) {
415 struct qib_sge
*sge
= &wqe
->sg_list
[i
];
417 atomic_dec(&sge
->mr
->refcount
);
419 if (qp
->ibqp
.qp_type
== IB_QPT_UD
||
420 qp
->ibqp
.qp_type
== IB_QPT_SMI
||
421 qp
->ibqp
.qp_type
== IB_QPT_GSI
)
422 atomic_dec(&to_iah(wqe
->wr
.wr
.ud
.ah
)->refcount
);
423 if (++qp
->s_last
>= qp
->s_size
)
427 atomic_dec(&qp
->s_rdma_mr
->refcount
);
428 qp
->s_rdma_mr
= NULL
;
432 if (qp
->ibqp
.qp_type
!= IB_QPT_RC
)
435 for (n
= 0; n
< ARRAY_SIZE(qp
->s_ack_queue
); n
++) {
436 struct qib_ack_entry
*e
= &qp
->s_ack_queue
[n
];
438 if (e
->opcode
== IB_OPCODE_RC_RDMA_READ_REQUEST
&&
440 atomic_dec(&e
->rdma_sge
.mr
->refcount
);
441 e
->rdma_sge
.mr
= NULL
;
447 * qib_error_qp - put a QP into the error state
448 * @qp: the QP to put into the error state
449 * @err: the receive completion error to signal if a RWQE is active
451 * Flushes both send and receive work queues.
452 * Returns true if last WQE event should be generated.
453 * The QP r_lock and s_lock should be held and interrupts disabled.
454 * If we are already in error state, just return.
456 int qib_error_qp(struct qib_qp
*qp
, enum ib_wc_status err
)
458 struct qib_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
462 if (qp
->state
== IB_QPS_ERR
|| qp
->state
== IB_QPS_RESET
)
465 qp
->state
= IB_QPS_ERR
;
467 if (qp
->s_flags
& (QIB_S_TIMER
| QIB_S_WAIT_RNR
)) {
468 qp
->s_flags
&= ~(QIB_S_TIMER
| QIB_S_WAIT_RNR
);
469 del_timer(&qp
->s_timer
);
471 spin_lock(&dev
->pending_lock
);
472 if (!list_empty(&qp
->iowait
) && !(qp
->s_flags
& QIB_S_BUSY
)) {
473 qp
->s_flags
&= ~QIB_S_ANY_WAIT_IO
;
474 list_del_init(&qp
->iowait
);
476 spin_unlock(&dev
->pending_lock
);
478 if (!(qp
->s_flags
& QIB_S_BUSY
)) {
481 atomic_dec(&qp
->s_rdma_mr
->refcount
);
482 qp
->s_rdma_mr
= NULL
;
485 qib_put_txreq(qp
->s_tx
);
490 /* Schedule the sending tasklet to drain the send work queue. */
491 if (qp
->s_last
!= qp
->s_head
)
492 qib_schedule_send(qp
);
494 clear_mr_refs(qp
, 0);
496 memset(&wc
, 0, sizeof(wc
));
498 wc
.opcode
= IB_WC_RECV
;
500 if (test_and_clear_bit(QIB_R_WRID_VALID
, &qp
->r_aflags
)) {
501 wc
.wr_id
= qp
->r_wr_id
;
503 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
505 wc
.status
= IB_WC_WR_FLUSH_ERR
;
512 spin_lock(&qp
->r_rq
.lock
);
514 /* sanity check pointers before trusting them */
517 if (head
>= qp
->r_rq
.size
)
520 if (tail
>= qp
->r_rq
.size
)
522 while (tail
!= head
) {
523 wc
.wr_id
= get_rwqe_ptr(&qp
->r_rq
, tail
)->wr_id
;
524 if (++tail
>= qp
->r_rq
.size
)
526 qib_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
530 spin_unlock(&qp
->r_rq
.lock
);
531 } else if (qp
->ibqp
.event_handler
)
539 * qib_modify_qp - modify the attributes of a queue pair
540 * @ibqp: the queue pair who's attributes we're modifying
541 * @attr: the new attributes
542 * @attr_mask: the mask of attributes to modify
543 * @udata: user data for libibverbs.so
545 * Returns 0 on success, otherwise returns an errno.
547 int qib_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
548 int attr_mask
, struct ib_udata
*udata
)
550 struct qib_ibdev
*dev
= to_idev(ibqp
->device
);
551 struct qib_qp
*qp
= to_iqp(ibqp
);
552 enum ib_qp_state cur_state
, new_state
;
557 u32 pmtu
= 0; /* for gcc warning only */
559 spin_lock_irq(&qp
->r_lock
);
560 spin_lock(&qp
->s_lock
);
562 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
563 attr
->cur_qp_state
: qp
->state
;
564 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
566 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
570 if (attr_mask
& IB_QP_AV
) {
571 if (attr
->ah_attr
.dlid
>= QIB_MULTICAST_LID_BASE
)
573 if (qib_check_ah(qp
->ibqp
.device
, &attr
->ah_attr
))
577 if (attr_mask
& IB_QP_ALT_PATH
) {
578 if (attr
->alt_ah_attr
.dlid
>= QIB_MULTICAST_LID_BASE
)
580 if (qib_check_ah(qp
->ibqp
.device
, &attr
->alt_ah_attr
))
582 if (attr
->alt_pkey_index
>= qib_get_npkeys(dd_from_dev(dev
)))
586 if (attr_mask
& IB_QP_PKEY_INDEX
)
587 if (attr
->pkey_index
>= qib_get_npkeys(dd_from_dev(dev
)))
590 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
591 if (attr
->min_rnr_timer
> 31)
594 if (attr_mask
& IB_QP_PORT
)
595 if (qp
->ibqp
.qp_type
== IB_QPT_SMI
||
596 qp
->ibqp
.qp_type
== IB_QPT_GSI
||
597 attr
->port_num
== 0 ||
598 attr
->port_num
> ibqp
->device
->phys_port_cnt
)
601 if (attr_mask
& IB_QP_DEST_QPN
)
602 if (attr
->dest_qp_num
> QIB_QPN_MASK
)
605 if (attr_mask
& IB_QP_RETRY_CNT
)
606 if (attr
->retry_cnt
> 7)
609 if (attr_mask
& IB_QP_RNR_RETRY
)
610 if (attr
->rnr_retry
> 7)
614 * Don't allow invalid path_mtu values. OK to set greater
615 * than the active mtu (or even the max_cap, if we have tuned
616 * that to a small mtu. We'll set qp->path_mtu
617 * to the lesser of requested attribute mtu and active,
618 * for packetizing messages.
619 * Note that the QP port has to be set in INIT and MTU in RTR.
621 if (attr_mask
& IB_QP_PATH_MTU
) {
622 struct qib_devdata
*dd
= dd_from_dev(dev
);
623 int mtu
, pidx
= qp
->port_num
- 1;
625 mtu
= ib_mtu_enum_to_int(attr
->path_mtu
);
628 if (mtu
> dd
->pport
[pidx
].ibmtu
) {
629 switch (dd
->pport
[pidx
].ibmtu
) {
649 pmtu
= attr
->path_mtu
;
652 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
653 if (attr
->path_mig_state
== IB_MIG_REARM
) {
654 if (qp
->s_mig_state
== IB_MIG_ARMED
)
656 if (new_state
!= IB_QPS_RTS
)
658 } else if (attr
->path_mig_state
== IB_MIG_MIGRATED
) {
659 if (qp
->s_mig_state
== IB_MIG_REARM
)
661 if (new_state
!= IB_QPS_RTS
&& new_state
!= IB_QPS_SQD
)
663 if (qp
->s_mig_state
== IB_MIG_ARMED
)
669 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
670 if (attr
->max_dest_rd_atomic
> QIB_MAX_RDMA_ATOMIC
)
675 if (qp
->state
!= IB_QPS_RESET
) {
676 qp
->state
= IB_QPS_RESET
;
677 spin_lock(&dev
->pending_lock
);
678 if (!list_empty(&qp
->iowait
))
679 list_del_init(&qp
->iowait
);
680 spin_unlock(&dev
->pending_lock
);
681 qp
->s_flags
&= ~(QIB_S_TIMER
| QIB_S_ANY_WAIT
);
682 spin_unlock(&qp
->s_lock
);
683 spin_unlock_irq(&qp
->r_lock
);
684 /* Stop the sending work queue and retry timer */
685 cancel_work_sync(&qp
->s_work
);
686 del_timer_sync(&qp
->s_timer
);
687 wait_event(qp
->wait_dma
, !atomic_read(&qp
->s_dma_busy
));
689 qib_put_txreq(qp
->s_tx
);
693 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
694 spin_lock_irq(&qp
->r_lock
);
695 spin_lock(&qp
->s_lock
);
696 clear_mr_refs(qp
, 1);
697 qib_reset_qp(qp
, ibqp
->qp_type
);
702 /* Allow event to retrigger if QP set to RTR more than once */
703 qp
->r_flags
&= ~QIB_R_COMM_EST
;
704 qp
->state
= new_state
;
708 qp
->s_draining
= qp
->s_last
!= qp
->s_cur
;
709 qp
->state
= new_state
;
713 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
715 qp
->state
= new_state
;
719 lastwqe
= qib_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
723 qp
->state
= new_state
;
727 if (attr_mask
& IB_QP_PKEY_INDEX
)
728 qp
->s_pkey_index
= attr
->pkey_index
;
730 if (attr_mask
& IB_QP_PORT
)
731 qp
->port_num
= attr
->port_num
;
733 if (attr_mask
& IB_QP_DEST_QPN
)
734 qp
->remote_qpn
= attr
->dest_qp_num
;
736 if (attr_mask
& IB_QP_SQ_PSN
) {
737 qp
->s_next_psn
= attr
->sq_psn
& QIB_PSN_MASK
;
738 qp
->s_psn
= qp
->s_next_psn
;
739 qp
->s_sending_psn
= qp
->s_next_psn
;
740 qp
->s_last_psn
= qp
->s_next_psn
- 1;
741 qp
->s_sending_hpsn
= qp
->s_last_psn
;
744 if (attr_mask
& IB_QP_RQ_PSN
)
745 qp
->r_psn
= attr
->rq_psn
& QIB_PSN_MASK
;
747 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
748 qp
->qp_access_flags
= attr
->qp_access_flags
;
750 if (attr_mask
& IB_QP_AV
) {
751 qp
->remote_ah_attr
= attr
->ah_attr
;
752 qp
->s_srate
= attr
->ah_attr
.static_rate
;
755 if (attr_mask
& IB_QP_ALT_PATH
) {
756 qp
->alt_ah_attr
= attr
->alt_ah_attr
;
757 qp
->s_alt_pkey_index
= attr
->alt_pkey_index
;
760 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
761 qp
->s_mig_state
= attr
->path_mig_state
;
763 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
764 qp
->port_num
= qp
->alt_ah_attr
.port_num
;
765 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
769 if (attr_mask
& IB_QP_PATH_MTU
)
772 if (attr_mask
& IB_QP_RETRY_CNT
) {
773 qp
->s_retry_cnt
= attr
->retry_cnt
;
774 qp
->s_retry
= attr
->retry_cnt
;
777 if (attr_mask
& IB_QP_RNR_RETRY
) {
778 qp
->s_rnr_retry_cnt
= attr
->rnr_retry
;
779 qp
->s_rnr_retry
= attr
->rnr_retry
;
782 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
783 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
785 if (attr_mask
& IB_QP_TIMEOUT
)
786 qp
->timeout
= attr
->timeout
;
788 if (attr_mask
& IB_QP_QKEY
)
789 qp
->qkey
= attr
->qkey
;
791 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
792 qp
->r_max_rd_atomic
= attr
->max_dest_rd_atomic
;
794 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
)
795 qp
->s_max_rd_atomic
= attr
->max_rd_atomic
;
797 spin_unlock(&qp
->s_lock
);
798 spin_unlock_irq(&qp
->r_lock
);
800 if (cur_state
== IB_QPS_RESET
&& new_state
== IB_QPS_INIT
)
804 ev
.device
= qp
->ibqp
.device
;
805 ev
.element
.qp
= &qp
->ibqp
;
806 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
807 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
810 ev
.device
= qp
->ibqp
.device
;
811 ev
.element
.qp
= &qp
->ibqp
;
812 ev
.event
= IB_EVENT_PATH_MIG
;
813 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
819 spin_unlock(&qp
->s_lock
);
820 spin_unlock_irq(&qp
->r_lock
);
827 int qib_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
828 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
830 struct qib_qp
*qp
= to_iqp(ibqp
);
832 attr
->qp_state
= qp
->state
;
833 attr
->cur_qp_state
= attr
->qp_state
;
834 attr
->path_mtu
= qp
->path_mtu
;
835 attr
->path_mig_state
= qp
->s_mig_state
;
836 attr
->qkey
= qp
->qkey
;
837 attr
->rq_psn
= qp
->r_psn
& QIB_PSN_MASK
;
838 attr
->sq_psn
= qp
->s_next_psn
& QIB_PSN_MASK
;
839 attr
->dest_qp_num
= qp
->remote_qpn
;
840 attr
->qp_access_flags
= qp
->qp_access_flags
;
841 attr
->cap
.max_send_wr
= qp
->s_size
- 1;
842 attr
->cap
.max_recv_wr
= qp
->ibqp
.srq
? 0 : qp
->r_rq
.size
- 1;
843 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
844 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
845 attr
->cap
.max_inline_data
= 0;
846 attr
->ah_attr
= qp
->remote_ah_attr
;
847 attr
->alt_ah_attr
= qp
->alt_ah_attr
;
848 attr
->pkey_index
= qp
->s_pkey_index
;
849 attr
->alt_pkey_index
= qp
->s_alt_pkey_index
;
850 attr
->en_sqd_async_notify
= 0;
851 attr
->sq_draining
= qp
->s_draining
;
852 attr
->max_rd_atomic
= qp
->s_max_rd_atomic
;
853 attr
->max_dest_rd_atomic
= qp
->r_max_rd_atomic
;
854 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
855 attr
->port_num
= qp
->port_num
;
856 attr
->timeout
= qp
->timeout
;
857 attr
->retry_cnt
= qp
->s_retry_cnt
;
858 attr
->rnr_retry
= qp
->s_rnr_retry_cnt
;
859 attr
->alt_port_num
= qp
->alt_ah_attr
.port_num
;
860 attr
->alt_timeout
= qp
->alt_timeout
;
862 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
863 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
864 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
865 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
866 init_attr
->srq
= qp
->ibqp
.srq
;
867 init_attr
->cap
= attr
->cap
;
868 if (qp
->s_flags
& QIB_S_SIGNAL_REQ_WR
)
869 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
871 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
872 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
873 init_attr
->port_num
= qp
->port_num
;
878 * qib_compute_aeth - compute the AETH (syndrome + MSN)
879 * @qp: the queue pair to compute the AETH for
883 __be32
qib_compute_aeth(struct qib_qp
*qp
)
885 u32 aeth
= qp
->r_msn
& QIB_MSN_MASK
;
889 * Shared receive queues don't generate credits.
890 * Set the credit field to the invalid value.
892 aeth
|= QIB_AETH_CREDIT_INVAL
<< QIB_AETH_CREDIT_SHIFT
;
896 struct qib_rwq
*wq
= qp
->r_rq
.wq
;
900 /* sanity check pointers before trusting them */
902 if (head
>= qp
->r_rq
.size
)
905 if (tail
>= qp
->r_rq
.size
)
908 * Compute the number of credits available (RWQEs).
909 * XXX Not holding the r_rq.lock here so there is a small
910 * chance that the pair of reads are not atomic.
912 credits
= head
- tail
;
913 if ((int)credits
< 0)
914 credits
+= qp
->r_rq
.size
;
916 * Binary search the credit table to find the code to
923 if (credit_table
[x
] == credits
)
925 if (credit_table
[x
] > credits
)
932 aeth
|= x
<< QIB_AETH_CREDIT_SHIFT
;
934 return cpu_to_be32(aeth
);
938 * qib_create_qp - create a queue pair for a device
939 * @ibpd: the protection domain who's device we create the queue pair for
940 * @init_attr: the attributes of the queue pair
941 * @udata: user data for libibverbs.so
943 * Returns the queue pair on success, otherwise returns an errno.
945 * Called by the ib_create_qp() core verbs function.
947 struct ib_qp
*qib_create_qp(struct ib_pd
*ibpd
,
948 struct ib_qp_init_attr
*init_attr
,
949 struct ib_udata
*udata
)
953 struct qib_swqe
*swq
= NULL
;
954 struct qib_ibdev
*dev
;
955 struct qib_devdata
*dd
;
960 if (init_attr
->cap
.max_send_sge
> ib_qib_max_sges
||
961 init_attr
->cap
.max_send_wr
> ib_qib_max_qp_wrs
) {
962 ret
= ERR_PTR(-EINVAL
);
966 /* Check receive queue parameters if no SRQ is specified. */
967 if (!init_attr
->srq
) {
968 if (init_attr
->cap
.max_recv_sge
> ib_qib_max_sges
||
969 init_attr
->cap
.max_recv_wr
> ib_qib_max_qp_wrs
) {
970 ret
= ERR_PTR(-EINVAL
);
973 if (init_attr
->cap
.max_send_sge
+
974 init_attr
->cap
.max_send_wr
+
975 init_attr
->cap
.max_recv_sge
+
976 init_attr
->cap
.max_recv_wr
== 0) {
977 ret
= ERR_PTR(-EINVAL
);
982 switch (init_attr
->qp_type
) {
985 if (init_attr
->port_num
== 0 ||
986 init_attr
->port_num
> ibpd
->device
->phys_port_cnt
) {
987 ret
= ERR_PTR(-EINVAL
);
993 sz
= sizeof(struct qib_sge
) *
994 init_attr
->cap
.max_send_sge
+
995 sizeof(struct qib_swqe
);
996 swq
= vmalloc((init_attr
->cap
.max_send_wr
+ 1) * sz
);
998 ret
= ERR_PTR(-ENOMEM
);
1003 if (init_attr
->srq
) {
1004 struct qib_srq
*srq
= to_isrq(init_attr
->srq
);
1006 if (srq
->rq
.max_sge
> 1)
1007 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1008 (srq
->rq
.max_sge
- 1);
1009 } else if (init_attr
->cap
.max_recv_sge
> 1)
1010 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
1011 (init_attr
->cap
.max_recv_sge
- 1);
1012 qp
= kzalloc(sz
+ sg_list_sz
, GFP_KERNEL
);
1014 ret
= ERR_PTR(-ENOMEM
);
1020 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
1021 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
1022 sz
= (sizeof(struct ib_sge
) * qp
->r_rq
.max_sge
) +
1023 sizeof(struct qib_rwqe
);
1024 qp
->r_rq
.wq
= vmalloc_user(sizeof(struct qib_rwq
) +
1025 qp
->r_rq
.size
* sz
);
1027 ret
= ERR_PTR(-ENOMEM
);
1033 * ib_create_qp() will initialize qp->ibqp
1034 * except for qp->ibqp.qp_num.
1036 spin_lock_init(&qp
->r_lock
);
1037 spin_lock_init(&qp
->s_lock
);
1038 spin_lock_init(&qp
->r_rq
.lock
);
1039 atomic_set(&qp
->refcount
, 0);
1040 init_waitqueue_head(&qp
->wait
);
1041 init_waitqueue_head(&qp
->wait_dma
);
1042 init_timer(&qp
->s_timer
);
1043 qp
->s_timer
.data
= (unsigned long)qp
;
1044 INIT_WORK(&qp
->s_work
, qib_do_send
);
1045 INIT_LIST_HEAD(&qp
->iowait
);
1046 INIT_LIST_HEAD(&qp
->rspwait
);
1047 qp
->state
= IB_QPS_RESET
;
1049 qp
->s_size
= init_attr
->cap
.max_send_wr
+ 1;
1050 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
1051 if (init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
)
1052 qp
->s_flags
= QIB_S_SIGNAL_REQ_WR
;
1053 dev
= to_idev(ibpd
->device
);
1054 dd
= dd_from_dev(dev
);
1055 err
= alloc_qpn(dd
, &dev
->qpn_table
, init_attr
->qp_type
,
1056 init_attr
->port_num
);
1062 qp
->ibqp
.qp_num
= err
;
1063 qp
->port_num
= init_attr
->port_num
;
1064 qp
->processor_id
= smp_processor_id();
1065 qib_reset_qp(qp
, init_attr
->qp_type
);
1069 /* Don't support raw QPs */
1070 ret
= ERR_PTR(-ENOSYS
);
1074 init_attr
->cap
.max_inline_data
= 0;
1077 * Return the address of the RWQ as the offset to mmap.
1078 * See qib_mmap() for details.
1080 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
1084 err
= ib_copy_to_udata(udata
, &offset
,
1091 u32 s
= sizeof(struct qib_rwq
) + qp
->r_rq
.size
* sz
;
1093 qp
->ip
= qib_create_mmap_info(dev
, s
,
1094 ibpd
->uobject
->context
,
1097 ret
= ERR_PTR(-ENOMEM
);
1101 err
= ib_copy_to_udata(udata
, &(qp
->ip
->offset
),
1102 sizeof(qp
->ip
->offset
));
1110 spin_lock(&dev
->n_qps_lock
);
1111 if (dev
->n_qps_allocated
== ib_qib_max_qps
) {
1112 spin_unlock(&dev
->n_qps_lock
);
1113 ret
= ERR_PTR(-ENOMEM
);
1117 dev
->n_qps_allocated
++;
1118 spin_unlock(&dev
->n_qps_lock
);
1121 spin_lock_irq(&dev
->pending_lock
);
1122 list_add(&qp
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
1123 spin_unlock_irq(&dev
->pending_lock
);
1131 kref_put(&qp
->ip
->ref
, qib_release_mmap_info
);
1134 free_qpn(&dev
->qpn_table
, qp
->ibqp
.qp_num
);
1144 * qib_destroy_qp - destroy a queue pair
1145 * @ibqp: the queue pair to destroy
1147 * Returns 0 on success.
1149 * Note that this can be called while the QP is actively sending or
1152 int qib_destroy_qp(struct ib_qp
*ibqp
)
1154 struct qib_qp
*qp
= to_iqp(ibqp
);
1155 struct qib_ibdev
*dev
= to_idev(ibqp
->device
);
1157 /* Make sure HW and driver activity is stopped. */
1158 spin_lock_irq(&qp
->s_lock
);
1159 if (qp
->state
!= IB_QPS_RESET
) {
1160 qp
->state
= IB_QPS_RESET
;
1161 spin_lock(&dev
->pending_lock
);
1162 if (!list_empty(&qp
->iowait
))
1163 list_del_init(&qp
->iowait
);
1164 spin_unlock(&dev
->pending_lock
);
1165 qp
->s_flags
&= ~(QIB_S_TIMER
| QIB_S_ANY_WAIT
);
1166 spin_unlock_irq(&qp
->s_lock
);
1167 cancel_work_sync(&qp
->s_work
);
1168 del_timer_sync(&qp
->s_timer
);
1169 wait_event(qp
->wait_dma
, !atomic_read(&qp
->s_dma_busy
));
1171 qib_put_txreq(qp
->s_tx
);
1175 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
1176 clear_mr_refs(qp
, 1);
1178 spin_unlock_irq(&qp
->s_lock
);
1180 /* all user's cleaned up, mark it available */
1181 free_qpn(&dev
->qpn_table
, qp
->ibqp
.qp_num
);
1182 spin_lock(&dev
->n_qps_lock
);
1183 dev
->n_qps_allocated
--;
1184 spin_unlock(&dev
->n_qps_lock
);
1187 kref_put(&qp
->ip
->ref
, qib_release_mmap_info
);
1196 * qib_init_qpn_table - initialize the QP number table for a device
1197 * @qpt: the QPN table
1199 void qib_init_qpn_table(struct qib_devdata
*dd
, struct qib_qpn_table
*qpt
)
1201 spin_lock_init(&qpt
->lock
);
1202 qpt
->last
= 1; /* start with QPN 2 */
1204 qpt
->mask
= dd
->qpn_mask
;
1208 * qib_free_qpn_table - free the QP number table for a device
1209 * @qpt: the QPN table
1211 void qib_free_qpn_table(struct qib_qpn_table
*qpt
)
1215 for (i
= 0; i
< ARRAY_SIZE(qpt
->map
); i
++)
1216 if (qpt
->map
[i
].page
)
1217 free_page((unsigned long) qpt
->map
[i
].page
);
1221 * qib_get_credit - flush the send work queue of a QP
1222 * @qp: the qp who's send work queue to flush
1223 * @aeth: the Acknowledge Extended Transport Header
1225 * The QP s_lock should be held.
1227 void qib_get_credit(struct qib_qp
*qp
, u32 aeth
)
1229 u32 credit
= (aeth
>> QIB_AETH_CREDIT_SHIFT
) & QIB_AETH_CREDIT_MASK
;
1232 * If the credit is invalid, we can send
1233 * as many packets as we like. Otherwise, we have to
1234 * honor the credit field.
1236 if (credit
== QIB_AETH_CREDIT_INVAL
) {
1237 if (!(qp
->s_flags
& QIB_S_UNLIMITED_CREDIT
)) {
1238 qp
->s_flags
|= QIB_S_UNLIMITED_CREDIT
;
1239 if (qp
->s_flags
& QIB_S_WAIT_SSN_CREDIT
) {
1240 qp
->s_flags
&= ~QIB_S_WAIT_SSN_CREDIT
;
1241 qib_schedule_send(qp
);
1244 } else if (!(qp
->s_flags
& QIB_S_UNLIMITED_CREDIT
)) {
1245 /* Compute new LSN (i.e., MSN + credit) */
1246 credit
= (aeth
+ credit_table
[credit
]) & QIB_MSN_MASK
;
1247 if (qib_cmp24(credit
, qp
->s_lsn
) > 0) {
1249 if (qp
->s_flags
& QIB_S_WAIT_SSN_CREDIT
) {
1250 qp
->s_flags
&= ~QIB_S_WAIT_SSN_CREDIT
;
1251 qib_schedule_send(qp
);