2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
37 #include "ipath_verbs.h"
38 #include "ipath_kernel.h"
40 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
41 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
42 #define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \
44 #define find_next_offset(map, off) find_next_zero_bit((map)->page, \
48 * Convert the AETH credit code into the number of credits.
50 static u32 credit_table
[31] = {
85 static void get_map_page(struct ipath_qp_table
*qpt
, struct qpn_map
*map
)
87 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
91 * Free the page if someone raced with us installing it.
94 spin_lock_irqsave(&qpt
->lock
, flags
);
98 map
->page
= (void *)page
;
99 spin_unlock_irqrestore(&qpt
->lock
, flags
);
103 static int alloc_qpn(struct ipath_qp_table
*qpt
, enum ib_qp_type type
)
105 u32 i
, offset
, max_scan
, qpn
;
109 if (type
== IB_QPT_SMI
)
111 else if (type
== IB_QPT_GSI
)
116 if (unlikely(!map
->page
)) {
117 get_map_page(qpt
, map
);
118 if (unlikely(!map
->page
)) {
123 if (!test_and_set_bit(ret
, map
->page
))
124 atomic_dec(&map
->n_free
);
133 offset
= qpn
& BITS_PER_PAGE_MASK
;
134 map
= &qpt
->map
[qpn
/ BITS_PER_PAGE
];
135 max_scan
= qpt
->nmaps
- !offset
;
137 if (unlikely(!map
->page
)) {
138 get_map_page(qpt
, map
);
139 if (unlikely(!map
->page
))
142 if (likely(atomic_read(&map
->n_free
))) {
144 if (!test_and_set_bit(offset
, map
->page
)) {
145 atomic_dec(&map
->n_free
);
150 offset
= find_next_offset(map
, offset
);
151 qpn
= mk_qpn(qpt
, map
, offset
);
153 * This test differs from alloc_pidmap().
154 * If find_next_offset() does find a zero
155 * bit, we don't need to check for QPN
156 * wrapping around past our starting QPN.
157 * We just need to be sure we don't loop
160 } while (offset
< BITS_PER_PAGE
&& qpn
< QPN_MAX
);
163 * In order to keep the number of pages allocated to a
164 * minimum, we scan the all existing pages before increasing
165 * the size of the bitmap table.
167 if (++i
> max_scan
) {
168 if (qpt
->nmaps
== QPNMAP_ENTRIES
)
170 map
= &qpt
->map
[qpt
->nmaps
++];
172 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
179 qpn
= mk_qpn(qpt
, map
, offset
);
188 static void free_qpn(struct ipath_qp_table
*qpt
, u32 qpn
)
192 map
= qpt
->map
+ qpn
/ BITS_PER_PAGE
;
194 clear_bit(qpn
& BITS_PER_PAGE_MASK
, map
->page
);
195 atomic_inc(&map
->n_free
);
199 * ipath_alloc_qpn - allocate a QP number
202 * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
204 * Allocate the next available QPN and put the QP into the hash table.
205 * The hash table holds a reference to the QP.
207 static int ipath_alloc_qpn(struct ipath_qp_table
*qpt
, struct ipath_qp
*qp
,
208 enum ib_qp_type type
)
213 ret
= alloc_qpn(qpt
, type
);
216 qp
->ibqp
.qp_num
= ret
;
218 /* Add the QP to the hash table. */
219 spin_lock_irqsave(&qpt
->lock
, flags
);
222 qp
->next
= qpt
->table
[ret
];
223 qpt
->table
[ret
] = qp
;
224 atomic_inc(&qp
->refcount
);
226 spin_unlock_irqrestore(&qpt
->lock
, flags
);
234 * ipath_free_qp - remove a QP from the QP table
236 * @qp: the QP to remove
238 * Remove the QP from the table so it can't be found asynchronously by
239 * the receive interrupt routine.
241 static void ipath_free_qp(struct ipath_qp_table
*qpt
, struct ipath_qp
*qp
)
243 struct ipath_qp
*q
, **qpp
;
247 spin_lock_irqsave(&qpt
->lock
, flags
);
249 /* Remove QP from the hash table. */
250 qpp
= &qpt
->table
[qp
->ibqp
.qp_num
% qpt
->max
];
251 for (; (q
= *qpp
) != NULL
; qpp
= &q
->next
) {
255 atomic_dec(&qp
->refcount
);
261 spin_unlock_irqrestore(&qpt
->lock
, flags
);
266 free_qpn(qpt
, qp
->ibqp
.qp_num
);
268 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
272 * ipath_free_all_qps - remove all QPs from the table
273 * @qpt: the QP table to empty
275 void ipath_free_all_qps(struct ipath_qp_table
*qpt
)
278 struct ipath_qp
*qp
, *nqp
;
281 for (n
= 0; n
< qpt
->max
; n
++) {
282 spin_lock_irqsave(&qpt
->lock
, flags
);
284 qpt
->table
[n
] = NULL
;
285 spin_unlock_irqrestore(&qpt
->lock
, flags
);
289 free_qpn(qpt
, qp
->ibqp
.qp_num
);
290 if (!atomic_dec_and_test(&qp
->refcount
) ||
291 !ipath_destroy_qp(&qp
->ibqp
))
292 ipath_dbg("QP memory leak!\n");
297 for (n
= 0; n
< ARRAY_SIZE(qpt
->map
); n
++) {
298 if (qpt
->map
[n
].page
)
299 free_page((unsigned long)qpt
->map
[n
].page
);
304 * ipath_lookup_qpn - return the QP with the given QPN
306 * @qpn: the QP number to look up
308 * The caller is responsible for decrementing the QP reference count
311 struct ipath_qp
*ipath_lookup_qpn(struct ipath_qp_table
*qpt
, u32 qpn
)
316 spin_lock_irqsave(&qpt
->lock
, flags
);
318 for (qp
= qpt
->table
[qpn
% qpt
->max
]; qp
; qp
= qp
->next
) {
319 if (qp
->ibqp
.qp_num
== qpn
) {
320 atomic_inc(&qp
->refcount
);
325 spin_unlock_irqrestore(&qpt
->lock
, flags
);
330 * ipath_reset_qp - initialize the QP state to the reset state
331 * @qp: the QP to reset
334 static void ipath_reset_qp(struct ipath_qp
*qp
, enum ib_qp_type type
)
338 qp
->qp_access_flags
= 0;
340 qp
->s_flags
&= IPATH_S_SIGNAL_REQ_WR
;
346 if (type
== IB_QPT_RC
) {
347 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
348 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
350 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
351 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
353 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
355 qp
->r_wrid_valid
= 0;
356 qp
->s_rnr_timeout
= 0;
363 qp
->s_wait_credit
= 0;
364 memset(qp
->s_ack_queue
, 0, sizeof(qp
->s_ack_queue
));
365 qp
->r_head_ack_queue
= 0;
366 qp
->s_tail_ack_queue
= 0;
367 qp
->s_num_rd_atomic
= 0;
369 qp
->r_rq
.wq
->head
= 0;
370 qp
->r_rq
.wq
->tail
= 0;
376 * ipath_error_qp - put a QP into an error state
377 * @qp: the QP to put into an error state
378 * @err: the receive completion error to signal if a RWQE is active
380 * Flushes both send and receive work queues.
381 * Returns true if last WQE event should be generated.
382 * The QP s_lock should be held and interrupts disabled.
385 int ipath_error_qp(struct ipath_qp
*qp
, enum ib_wc_status err
)
387 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
391 ipath_dbg("QP%d/%d in error state (%d)\n",
392 qp
->ibqp
.qp_num
, qp
->remote_qpn
, err
);
394 spin_lock(&dev
->pending_lock
);
395 /* XXX What if its already removed by the timeout code? */
396 if (!list_empty(&qp
->timerwait
))
397 list_del_init(&qp
->timerwait
);
398 if (!list_empty(&qp
->piowait
))
399 list_del_init(&qp
->piowait
);
400 spin_unlock(&dev
->pending_lock
);
411 wc
.dlid_path_bits
= 0;
413 if (qp
->r_wrid_valid
) {
414 qp
->r_wrid_valid
= 0;
415 wc
.wr_id
= qp
->r_wr_id
;
416 wc
.opcode
= IB_WC_RECV
;
418 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
420 wc
.status
= IB_WC_WR_FLUSH_ERR
;
422 while (qp
->s_last
!= qp
->s_head
) {
423 struct ipath_swqe
*wqe
= get_swqe_ptr(qp
, qp
->s_last
);
425 wc
.wr_id
= wqe
->wr
.wr_id
;
426 wc
.opcode
= ib_ipath_wc_opcode
[wqe
->wr
.opcode
];
427 if (++qp
->s_last
>= qp
->s_size
)
429 ipath_cq_enter(to_icq(qp
->ibqp
.send_cq
), &wc
, 1);
431 qp
->s_cur
= qp
->s_tail
= qp
->s_head
;
433 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
436 struct ipath_rwq
*wq
;
440 spin_lock(&qp
->r_rq
.lock
);
442 /* sanity check pointers before trusting them */
445 if (head
>= qp
->r_rq
.size
)
448 if (tail
>= qp
->r_rq
.size
)
450 wc
.opcode
= IB_WC_RECV
;
451 while (tail
!= head
) {
452 wc
.wr_id
= get_rwqe_ptr(&qp
->r_rq
, tail
)->wr_id
;
453 if (++tail
>= qp
->r_rq
.size
)
455 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
459 spin_unlock(&qp
->r_rq
.lock
);
460 } else if (qp
->ibqp
.event_handler
)
467 * ipath_modify_qp - modify the attributes of a queue pair
468 * @ibqp: the queue pair who's attributes we're modifying
469 * @attr: the new attributes
470 * @attr_mask: the mask of attributes to modify
471 * @udata: user data for ipathverbs.so
473 * Returns 0 on success, otherwise returns an errno.
475 int ipath_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
476 int attr_mask
, struct ib_udata
*udata
)
478 struct ipath_ibdev
*dev
= to_idev(ibqp
->device
);
479 struct ipath_qp
*qp
= to_iqp(ibqp
);
480 enum ib_qp_state cur_state
, new_state
;
485 spin_lock_irqsave(&qp
->s_lock
, flags
);
487 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
488 attr
->cur_qp_state
: qp
->state
;
489 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
491 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
495 if (attr_mask
& IB_QP_AV
) {
496 if (attr
->ah_attr
.dlid
== 0 ||
497 attr
->ah_attr
.dlid
>= IPATH_MULTICAST_LID_BASE
)
500 if ((attr
->ah_attr
.ah_flags
& IB_AH_GRH
) &&
501 (attr
->ah_attr
.grh
.sgid_index
> 1))
505 if (attr_mask
& IB_QP_PKEY_INDEX
)
506 if (attr
->pkey_index
>= ipath_get_npkeys(dev
->dd
))
509 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
510 if (attr
->min_rnr_timer
> 31)
513 if (attr_mask
& IB_QP_PORT
)
514 if (attr
->port_num
== 0 ||
515 attr
->port_num
> ibqp
->device
->phys_port_cnt
)
519 * Note: the chips support a maximum MTU of 4096, but the driver
520 * hasn't implemented this feature yet, so don't allow Path MTU
521 * values greater than 2048.
523 if (attr_mask
& IB_QP_PATH_MTU
)
524 if (attr
->path_mtu
> IB_MTU_2048
)
527 if (attr_mask
& IB_QP_PATH_MIG_STATE
)
528 if (attr
->path_mig_state
!= IB_MIG_MIGRATED
&&
529 attr
->path_mig_state
!= IB_MIG_REARM
)
532 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
533 if (attr
->max_dest_rd_atomic
> IPATH_MAX_RDMA_ATOMIC
)
538 ipath_reset_qp(qp
, ibqp
->qp_type
);
542 lastwqe
= ipath_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
550 if (attr_mask
& IB_QP_PKEY_INDEX
)
551 qp
->s_pkey_index
= attr
->pkey_index
;
553 if (attr_mask
& IB_QP_DEST_QPN
)
554 qp
->remote_qpn
= attr
->dest_qp_num
;
556 if (attr_mask
& IB_QP_SQ_PSN
) {
557 qp
->s_psn
= qp
->s_next_psn
= attr
->sq_psn
;
558 qp
->s_last_psn
= qp
->s_next_psn
- 1;
561 if (attr_mask
& IB_QP_RQ_PSN
)
562 qp
->r_psn
= attr
->rq_psn
;
564 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
565 qp
->qp_access_flags
= attr
->qp_access_flags
;
567 if (attr_mask
& IB_QP_AV
)
568 qp
->remote_ah_attr
= attr
->ah_attr
;
570 if (attr_mask
& IB_QP_PATH_MTU
)
571 qp
->path_mtu
= attr
->path_mtu
;
573 if (attr_mask
& IB_QP_RETRY_CNT
)
574 qp
->s_retry
= qp
->s_retry_cnt
= attr
->retry_cnt
;
576 if (attr_mask
& IB_QP_RNR_RETRY
) {
577 qp
->s_rnr_retry
= attr
->rnr_retry
;
578 if (qp
->s_rnr_retry
> 7)
580 qp
->s_rnr_retry_cnt
= qp
->s_rnr_retry
;
583 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
584 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
586 if (attr_mask
& IB_QP_TIMEOUT
)
587 qp
->timeout
= attr
->timeout
;
589 if (attr_mask
& IB_QP_QKEY
)
590 qp
->qkey
= attr
->qkey
;
592 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
593 qp
->r_max_rd_atomic
= attr
->max_dest_rd_atomic
;
595 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
)
596 qp
->s_max_rd_atomic
= attr
->max_rd_atomic
;
598 qp
->state
= new_state
;
599 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
604 ev
.device
= qp
->ibqp
.device
;
605 ev
.element
.qp
= &qp
->ibqp
;
606 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
607 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
613 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
620 int ipath_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
621 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
623 struct ipath_qp
*qp
= to_iqp(ibqp
);
625 attr
->qp_state
= qp
->state
;
626 attr
->cur_qp_state
= attr
->qp_state
;
627 attr
->path_mtu
= qp
->path_mtu
;
628 attr
->path_mig_state
= 0;
629 attr
->qkey
= qp
->qkey
;
630 attr
->rq_psn
= qp
->r_psn
;
631 attr
->sq_psn
= qp
->s_next_psn
;
632 attr
->dest_qp_num
= qp
->remote_qpn
;
633 attr
->qp_access_flags
= qp
->qp_access_flags
;
634 attr
->cap
.max_send_wr
= qp
->s_size
- 1;
635 attr
->cap
.max_recv_wr
= qp
->ibqp
.srq
? 0 : qp
->r_rq
.size
- 1;
636 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
637 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
638 attr
->cap
.max_inline_data
= 0;
639 attr
->ah_attr
= qp
->remote_ah_attr
;
640 memset(&attr
->alt_ah_attr
, 0, sizeof(attr
->alt_ah_attr
));
641 attr
->pkey_index
= qp
->s_pkey_index
;
642 attr
->alt_pkey_index
= 0;
643 attr
->en_sqd_async_notify
= 0;
644 attr
->sq_draining
= 0;
645 attr
->max_rd_atomic
= qp
->s_max_rd_atomic
;
646 attr
->max_dest_rd_atomic
= qp
->r_max_rd_atomic
;
647 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
649 attr
->timeout
= qp
->timeout
;
650 attr
->retry_cnt
= qp
->s_retry_cnt
;
651 attr
->rnr_retry
= qp
->s_rnr_retry_cnt
;
652 attr
->alt_port_num
= 0;
653 attr
->alt_timeout
= 0;
655 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
656 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
657 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
658 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
659 init_attr
->srq
= qp
->ibqp
.srq
;
660 init_attr
->cap
= attr
->cap
;
661 if (qp
->s_flags
& IPATH_S_SIGNAL_REQ_WR
)
662 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
664 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
665 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
666 init_attr
->port_num
= 1;
671 * ipath_compute_aeth - compute the AETH (syndrome + MSN)
672 * @qp: the queue pair to compute the AETH for
676 __be32
ipath_compute_aeth(struct ipath_qp
*qp
)
678 u32 aeth
= qp
->r_msn
& IPATH_MSN_MASK
;
682 * Shared receive queues don't generate credits.
683 * Set the credit field to the invalid value.
685 aeth
|= IPATH_AETH_CREDIT_INVAL
<< IPATH_AETH_CREDIT_SHIFT
;
689 struct ipath_rwq
*wq
= qp
->r_rq
.wq
;
693 /* sanity check pointers before trusting them */
695 if (head
>= qp
->r_rq
.size
)
698 if (tail
>= qp
->r_rq
.size
)
701 * Compute the number of credits available (RWQEs).
702 * XXX Not holding the r_rq.lock here so there is a small
703 * chance that the pair of reads are not atomic.
705 credits
= head
- tail
;
706 if ((int)credits
< 0)
707 credits
+= qp
->r_rq
.size
;
709 * Binary search the credit table to find the code to
716 if (credit_table
[x
] == credits
)
718 if (credit_table
[x
] > credits
)
725 aeth
|= x
<< IPATH_AETH_CREDIT_SHIFT
;
727 return cpu_to_be32(aeth
);
731 * ipath_create_qp - create a queue pair for a device
732 * @ibpd: the protection domain who's device we create the queue pair for
733 * @init_attr: the attributes of the queue pair
734 * @udata: unused by InfiniPath
736 * Returns the queue pair on success, otherwise returns an errno.
738 * Called by the ib_create_qp() core verbs function.
740 struct ib_qp
*ipath_create_qp(struct ib_pd
*ibpd
,
741 struct ib_qp_init_attr
*init_attr
,
742 struct ib_udata
*udata
)
746 struct ipath_swqe
*swq
= NULL
;
747 struct ipath_ibdev
*dev
;
751 if (init_attr
->cap
.max_send_sge
> ib_ipath_max_sges
||
752 init_attr
->cap
.max_recv_sge
> ib_ipath_max_sges
||
753 init_attr
->cap
.max_send_wr
> ib_ipath_max_qp_wrs
||
754 init_attr
->cap
.max_recv_wr
> ib_ipath_max_qp_wrs
) {
755 ret
= ERR_PTR(-ENOMEM
);
759 if (init_attr
->cap
.max_send_sge
+
760 init_attr
->cap
.max_recv_sge
+
761 init_attr
->cap
.max_send_wr
+
762 init_attr
->cap
.max_recv_wr
== 0) {
763 ret
= ERR_PTR(-EINVAL
);
767 switch (init_attr
->qp_type
) {
773 sz
= sizeof(struct ipath_sge
) *
774 init_attr
->cap
.max_send_sge
+
775 sizeof(struct ipath_swqe
);
776 swq
= vmalloc((init_attr
->cap
.max_send_wr
+ 1) * sz
);
778 ret
= ERR_PTR(-ENOMEM
);
782 if (init_attr
->srq
) {
783 struct ipath_srq
*srq
= to_isrq(init_attr
->srq
);
785 sz
+= sizeof(*qp
->r_sg_list
) *
788 sz
+= sizeof(*qp
->r_sg_list
) *
789 init_attr
->cap
.max_recv_sge
;
790 qp
= kmalloc(sz
, GFP_KERNEL
);
792 ret
= ERR_PTR(-ENOMEM
);
795 if (init_attr
->srq
) {
798 qp
->r_rq
.max_sge
= 0;
800 init_attr
->cap
.max_recv_wr
= 0;
801 init_attr
->cap
.max_recv_sge
= 0;
803 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
804 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
805 sz
= (sizeof(struct ib_sge
) * qp
->r_rq
.max_sge
) +
806 sizeof(struct ipath_rwqe
);
807 qp
->r_rq
.wq
= vmalloc_user(sizeof(struct ipath_rwq
) +
810 ret
= ERR_PTR(-ENOMEM
);
816 * ib_create_qp() will initialize qp->ibqp
817 * except for qp->ibqp.qp_num.
819 spin_lock_init(&qp
->s_lock
);
820 spin_lock_init(&qp
->r_rq
.lock
);
821 atomic_set(&qp
->refcount
, 0);
822 init_waitqueue_head(&qp
->wait
);
823 tasklet_init(&qp
->s_task
, ipath_do_send
, (unsigned long)qp
);
824 INIT_LIST_HEAD(&qp
->piowait
);
825 INIT_LIST_HEAD(&qp
->timerwait
);
826 qp
->state
= IB_QPS_RESET
;
828 qp
->s_size
= init_attr
->cap
.max_send_wr
+ 1;
829 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
830 if (init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
)
831 qp
->s_flags
= IPATH_S_SIGNAL_REQ_WR
;
834 dev
= to_idev(ibpd
->device
);
835 err
= ipath_alloc_qpn(&dev
->qp_table
, qp
,
843 ipath_reset_qp(qp
, init_attr
->qp_type
);
847 /* Don't support raw QPs */
848 ret
= ERR_PTR(-ENOSYS
);
852 init_attr
->cap
.max_inline_data
= 0;
855 * Return the address of the RWQ as the offset to mmap.
856 * See ipath_mmap() for details.
858 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
862 err
= ib_copy_to_udata(udata
, &offset
,
869 u32 s
= sizeof(struct ipath_rwq
) +
873 ipath_create_mmap_info(dev
, s
,
874 ibpd
->uobject
->context
,
877 ret
= ERR_PTR(-ENOMEM
);
881 err
= ib_copy_to_udata(udata
, &(qp
->ip
->offset
),
882 sizeof(qp
->ip
->offset
));
890 spin_lock(&dev
->n_qps_lock
);
891 if (dev
->n_qps_allocated
== ib_ipath_max_qps
) {
892 spin_unlock(&dev
->n_qps_lock
);
893 ret
= ERR_PTR(-ENOMEM
);
897 dev
->n_qps_allocated
++;
898 spin_unlock(&dev
->n_qps_lock
);
901 spin_lock_irq(&dev
->pending_lock
);
902 list_add(&qp
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
903 spin_unlock_irq(&dev
->pending_lock
);
911 kref_put(&qp
->ip
->ref
, ipath_release_mmap_info
);
914 ipath_free_qp(&dev
->qp_table
, qp
);
924 * ipath_destroy_qp - destroy a queue pair
925 * @ibqp: the queue pair to destroy
927 * Returns 0 on success.
929 * Note that this can be called while the QP is actively sending or
932 int ipath_destroy_qp(struct ib_qp
*ibqp
)
934 struct ipath_qp
*qp
= to_iqp(ibqp
);
935 struct ipath_ibdev
*dev
= to_idev(ibqp
->device
);
938 spin_lock_irqsave(&qp
->s_lock
, flags
);
939 qp
->state
= IB_QPS_ERR
;
940 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
941 spin_lock(&dev
->n_qps_lock
);
942 dev
->n_qps_allocated
--;
943 spin_unlock(&dev
->n_qps_lock
);
945 /* Stop the sending tasklet. */
946 tasklet_kill(&qp
->s_task
);
948 /* Make sure the QP isn't on the timeout list. */
949 spin_lock_irqsave(&dev
->pending_lock
, flags
);
950 if (!list_empty(&qp
->timerwait
))
951 list_del_init(&qp
->timerwait
);
952 if (!list_empty(&qp
->piowait
))
953 list_del_init(&qp
->piowait
);
954 spin_unlock_irqrestore(&dev
->pending_lock
, flags
);
957 * Make sure that the QP is not in the QPN table so receive
958 * interrupts will discard packets for this QP. XXX Also remove QP
959 * from multicast table.
961 if (atomic_read(&qp
->refcount
) != 0)
962 ipath_free_qp(&dev
->qp_table
, qp
);
965 kref_put(&qp
->ip
->ref
, ipath_release_mmap_info
);
974 * ipath_init_qp_table - initialize the QP table for a device
975 * @idev: the device who's QP table we're initializing
976 * @size: the size of the QP table
978 * Returns 0 on success, otherwise returns an errno.
980 int ipath_init_qp_table(struct ipath_ibdev
*idev
, int size
)
985 idev
->qp_table
.last
= 1; /* QPN 0 and 1 are special. */
986 idev
->qp_table
.max
= size
;
987 idev
->qp_table
.nmaps
= 1;
988 idev
->qp_table
.table
= kzalloc(size
* sizeof(*idev
->qp_table
.table
),
990 if (idev
->qp_table
.table
== NULL
) {
995 for (i
= 0; i
< ARRAY_SIZE(idev
->qp_table
.map
); i
++) {
996 atomic_set(&idev
->qp_table
.map
[i
].n_free
, BITS_PER_PAGE
);
997 idev
->qp_table
.map
[i
].page
= NULL
;
1007 * ipath_sqerror_qp - put a QP's send queue into an error state
1008 * @qp: QP who's send queue will be put into an error state
1009 * @wc: the WC responsible for putting the QP in this state
1011 * Flushes the send work queue.
1012 * The QP s_lock should be held and interrupts disabled.
1015 void ipath_sqerror_qp(struct ipath_qp
*qp
, struct ib_wc
*wc
)
1017 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
1018 struct ipath_swqe
*wqe
= get_swqe_ptr(qp
, qp
->s_last
);
1020 ipath_dbg("Send queue error on QP%d/%d: err: %d\n",
1021 qp
->ibqp
.qp_num
, qp
->remote_qpn
, wc
->status
);
1023 spin_lock(&dev
->pending_lock
);
1024 /* XXX What if its already removed by the timeout code? */
1025 if (!list_empty(&qp
->timerwait
))
1026 list_del_init(&qp
->timerwait
);
1027 if (!list_empty(&qp
->piowait
))
1028 list_del_init(&qp
->piowait
);
1029 spin_unlock(&dev
->pending_lock
);
1031 ipath_cq_enter(to_icq(qp
->ibqp
.send_cq
), wc
, 1);
1032 if (++qp
->s_last
>= qp
->s_size
)
1035 wc
->status
= IB_WC_WR_FLUSH_ERR
;
1037 while (qp
->s_last
!= qp
->s_head
) {
1038 wqe
= get_swqe_ptr(qp
, qp
->s_last
);
1039 wc
->wr_id
= wqe
->wr
.wr_id
;
1040 wc
->opcode
= ib_ipath_wc_opcode
[wqe
->wr
.opcode
];
1041 ipath_cq_enter(to_icq(qp
->ibqp
.send_cq
), wc
, 1);
1042 if (++qp
->s_last
>= qp
->s_size
)
1045 qp
->s_cur
= qp
->s_tail
= qp
->s_head
;
1046 qp
->state
= IB_QPS_SQE
;
1050 * ipath_get_credit - flush the send work queue of a QP
1051 * @qp: the qp who's send work queue to flush
1052 * @aeth: the Acknowledge Extended Transport Header
1054 * The QP s_lock should be held.
1056 void ipath_get_credit(struct ipath_qp
*qp
, u32 aeth
)
1058 u32 credit
= (aeth
>> IPATH_AETH_CREDIT_SHIFT
) & IPATH_AETH_CREDIT_MASK
;
1061 * If the credit is invalid, we can send
1062 * as many packets as we like. Otherwise, we have to
1063 * honor the credit field.
1065 if (credit
== IPATH_AETH_CREDIT_INVAL
)
1066 qp
->s_lsn
= (u32
) -1;
1067 else if (qp
->s_lsn
!= (u32
) -1) {
1068 /* Compute new LSN (i.e., MSN + credit) */
1069 credit
= (aeth
+ credit_table
[credit
]) & IPATH_MSN_MASK
;
1070 if (ipath_cmp24(credit
, qp
->s_lsn
) > 0)
1074 /* Restart sending if it was blocked due to lack of credits. */
1075 if (qp
->s_cur
!= qp
->s_head
&&
1076 (qp
->s_lsn
== (u32
) -1 ||
1077 ipath_cmp24(get_swqe_ptr(qp
, qp
->s_cur
)->ssn
,
1078 qp
->s_lsn
+ 1) <= 0))
1079 tasklet_hi_schedule(&qp
->s_task
);