2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
39 #include "ipath_verbs.h"
40 #include "ipath_kernel.h"
42 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
43 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
44 #define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \
46 #define find_next_offset(map, off) find_next_zero_bit((map)->page, \
50 * Convert the AETH credit code into the number of credits.
52 static u32 credit_table
[31] = {
87 static void get_map_page(struct ipath_qp_table
*qpt
, struct qpn_map
*map
)
89 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
93 * Free the page if someone raced with us installing it.
96 spin_lock_irqsave(&qpt
->lock
, flags
);
100 map
->page
= (void *)page
;
101 spin_unlock_irqrestore(&qpt
->lock
, flags
);
105 static int alloc_qpn(struct ipath_qp_table
*qpt
, enum ib_qp_type type
)
107 u32 i
, offset
, max_scan
, qpn
;
111 if (type
== IB_QPT_SMI
)
113 else if (type
== IB_QPT_GSI
)
118 if (unlikely(!map
->page
)) {
119 get_map_page(qpt
, map
);
120 if (unlikely(!map
->page
)) {
125 if (!test_and_set_bit(ret
, map
->page
))
126 atomic_dec(&map
->n_free
);
135 offset
= qpn
& BITS_PER_PAGE_MASK
;
136 map
= &qpt
->map
[qpn
/ BITS_PER_PAGE
];
137 max_scan
= qpt
->nmaps
- !offset
;
139 if (unlikely(!map
->page
)) {
140 get_map_page(qpt
, map
);
141 if (unlikely(!map
->page
))
144 if (likely(atomic_read(&map
->n_free
))) {
146 if (!test_and_set_bit(offset
, map
->page
)) {
147 atomic_dec(&map
->n_free
);
152 offset
= find_next_offset(map
, offset
);
153 qpn
= mk_qpn(qpt
, map
, offset
);
155 * This test differs from alloc_pidmap().
156 * If find_next_offset() does find a zero
157 * bit, we don't need to check for QPN
158 * wrapping around past our starting QPN.
159 * We just need to be sure we don't loop
162 } while (offset
< BITS_PER_PAGE
&& qpn
< QPN_MAX
);
165 * In order to keep the number of pages allocated to a
166 * minimum, we scan the all existing pages before increasing
167 * the size of the bitmap table.
169 if (++i
> max_scan
) {
170 if (qpt
->nmaps
== QPNMAP_ENTRIES
)
172 map
= &qpt
->map
[qpt
->nmaps
++];
174 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
181 qpn
= mk_qpn(qpt
, map
, offset
);
190 static void free_qpn(struct ipath_qp_table
*qpt
, u32 qpn
)
194 map
= qpt
->map
+ qpn
/ BITS_PER_PAGE
;
196 clear_bit(qpn
& BITS_PER_PAGE_MASK
, map
->page
);
197 atomic_inc(&map
->n_free
);
201 * ipath_alloc_qpn - allocate a QP number
204 * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
206 * Allocate the next available QPN and put the QP into the hash table.
207 * The hash table holds a reference to the QP.
209 static int ipath_alloc_qpn(struct ipath_qp_table
*qpt
, struct ipath_qp
*qp
,
210 enum ib_qp_type type
)
215 ret
= alloc_qpn(qpt
, type
);
218 qp
->ibqp
.qp_num
= ret
;
220 /* Add the QP to the hash table. */
221 spin_lock_irqsave(&qpt
->lock
, flags
);
224 qp
->next
= qpt
->table
[ret
];
225 qpt
->table
[ret
] = qp
;
226 atomic_inc(&qp
->refcount
);
228 spin_unlock_irqrestore(&qpt
->lock
, flags
);
236 * ipath_free_qp - remove a QP from the QP table
238 * @qp: the QP to remove
240 * Remove the QP from the table so it can't be found asynchronously by
241 * the receive interrupt routine.
243 static void ipath_free_qp(struct ipath_qp_table
*qpt
, struct ipath_qp
*qp
)
245 struct ipath_qp
*q
, **qpp
;
248 spin_lock_irqsave(&qpt
->lock
, flags
);
250 /* Remove QP from the hash table. */
251 qpp
= &qpt
->table
[qp
->ibqp
.qp_num
% qpt
->max
];
252 for (; (q
= *qpp
) != NULL
; qpp
= &q
->next
) {
256 atomic_dec(&qp
->refcount
);
261 spin_unlock_irqrestore(&qpt
->lock
, flags
);
265 * ipath_free_all_qps - check for QPs still in use
266 * @qpt: the QP table to empty
268 * There should not be any QPs still in use.
269 * Free memory for table.
271 unsigned ipath_free_all_qps(struct ipath_qp_table
*qpt
)
277 spin_lock_irqsave(&qpt
->lock
, flags
);
278 for (n
= 0; n
< qpt
->max
; n
++) {
280 qpt
->table
[n
] = NULL
;
282 for (; qp
; qp
= qp
->next
)
285 spin_unlock_irqrestore(&qpt
->lock
, flags
);
287 for (n
= 0; n
< ARRAY_SIZE(qpt
->map
); n
++)
288 if (qpt
->map
[n
].page
)
289 free_page((unsigned long) qpt
->map
[n
].page
);
294 * ipath_lookup_qpn - return the QP with the given QPN
296 * @qpn: the QP number to look up
298 * The caller is responsible for decrementing the QP reference count
301 struct ipath_qp
*ipath_lookup_qpn(struct ipath_qp_table
*qpt
, u32 qpn
)
306 spin_lock_irqsave(&qpt
->lock
, flags
);
308 for (qp
= qpt
->table
[qpn
% qpt
->max
]; qp
; qp
= qp
->next
) {
309 if (qp
->ibqp
.qp_num
== qpn
) {
310 atomic_inc(&qp
->refcount
);
315 spin_unlock_irqrestore(&qpt
->lock
, flags
);
320 * ipath_reset_qp - initialize the QP state to the reset state
321 * @qp: the QP to reset
324 static void ipath_reset_qp(struct ipath_qp
*qp
, enum ib_qp_type type
)
328 qp
->qp_access_flags
= 0;
329 atomic_set(&qp
->s_dma_busy
, 0);
330 qp
->s_flags
&= IPATH_S_SIGNAL_REQ_WR
;
338 if (type
== IB_QPT_RC
) {
339 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
340 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
342 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
343 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
345 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
349 qp
->s_rnr_timeout
= 0;
356 memset(qp
->s_ack_queue
, 0, sizeof(qp
->s_ack_queue
));
357 qp
->r_head_ack_queue
= 0;
358 qp
->s_tail_ack_queue
= 0;
359 qp
->s_num_rd_atomic
= 0;
361 qp
->r_rq
.wq
->head
= 0;
362 qp
->r_rq
.wq
->tail
= 0;
367 * ipath_error_qp - put a QP into the error state
368 * @qp: the QP to put into the error state
369 * @err: the receive completion error to signal if a RWQE is active
371 * Flushes both send and receive work queues.
372 * Returns true if last WQE event should be generated.
373 * The QP s_lock should be held and interrupts disabled.
374 * If we are already in error state, just return.
377 int ipath_error_qp(struct ipath_qp
*qp
, enum ib_wc_status err
)
379 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
383 if (qp
->state
== IB_QPS_ERR
)
386 qp
->state
= IB_QPS_ERR
;
388 spin_lock(&dev
->pending_lock
);
389 if (!list_empty(&qp
->timerwait
))
390 list_del_init(&qp
->timerwait
);
391 if (!list_empty(&qp
->piowait
))
392 list_del_init(&qp
->piowait
);
393 spin_unlock(&dev
->pending_lock
);
395 /* Schedule the sending tasklet to drain the send work queue. */
396 if (qp
->s_last
!= qp
->s_head
)
397 ipath_schedule_send(qp
);
399 memset(&wc
, 0, sizeof(wc
));
401 wc
.opcode
= IB_WC_RECV
;
403 if (test_and_clear_bit(IPATH_R_WRID_VALID
, &qp
->r_aflags
)) {
404 wc
.wr_id
= qp
->r_wr_id
;
406 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
408 wc
.status
= IB_WC_WR_FLUSH_ERR
;
411 struct ipath_rwq
*wq
;
415 spin_lock(&qp
->r_rq
.lock
);
417 /* sanity check pointers before trusting them */
420 if (head
>= qp
->r_rq
.size
)
423 if (tail
>= qp
->r_rq
.size
)
425 while (tail
!= head
) {
426 wc
.wr_id
= get_rwqe_ptr(&qp
->r_rq
, tail
)->wr_id
;
427 if (++tail
>= qp
->r_rq
.size
)
429 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
433 spin_unlock(&qp
->r_rq
.lock
);
434 } else if (qp
->ibqp
.event_handler
)
442 * ipath_modify_qp - modify the attributes of a queue pair
443 * @ibqp: the queue pair who's attributes we're modifying
444 * @attr: the new attributes
445 * @attr_mask: the mask of attributes to modify
446 * @udata: user data for ipathverbs.so
448 * Returns 0 on success, otherwise returns an errno.
450 int ipath_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
451 int attr_mask
, struct ib_udata
*udata
)
453 struct ipath_ibdev
*dev
= to_idev(ibqp
->device
);
454 struct ipath_qp
*qp
= to_iqp(ibqp
);
455 enum ib_qp_state cur_state
, new_state
;
459 spin_lock_irq(&qp
->s_lock
);
461 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
462 attr
->cur_qp_state
: qp
->state
;
463 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
465 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
466 attr_mask
, IB_LINK_LAYER_UNSPECIFIED
))
469 if (attr_mask
& IB_QP_AV
) {
470 if (attr
->ah_attr
.dlid
== 0 ||
471 attr
->ah_attr
.dlid
>= IPATH_MULTICAST_LID_BASE
)
474 if ((attr
->ah_attr
.ah_flags
& IB_AH_GRH
) &&
475 (attr
->ah_attr
.grh
.sgid_index
> 1))
479 if (attr_mask
& IB_QP_PKEY_INDEX
)
480 if (attr
->pkey_index
>= ipath_get_npkeys(dev
->dd
))
483 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
484 if (attr
->min_rnr_timer
> 31)
487 if (attr_mask
& IB_QP_PORT
)
488 if (attr
->port_num
== 0 ||
489 attr
->port_num
> ibqp
->device
->phys_port_cnt
)
493 * don't allow invalid Path MTU values or greater than 2048
494 * unless we are configured for a 4KB MTU
496 if ((attr_mask
& IB_QP_PATH_MTU
) &&
497 (ib_mtu_enum_to_int(attr
->path_mtu
) == -1 ||
498 (attr
->path_mtu
> IB_MTU_2048
&& !ipath_mtu4096
)))
501 if (attr_mask
& IB_QP_PATH_MIG_STATE
)
502 if (attr
->path_mig_state
!= IB_MIG_MIGRATED
&&
503 attr
->path_mig_state
!= IB_MIG_REARM
)
506 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
507 if (attr
->max_dest_rd_atomic
> IPATH_MAX_RDMA_ATOMIC
)
512 if (qp
->state
!= IB_QPS_RESET
) {
513 qp
->state
= IB_QPS_RESET
;
514 spin_lock(&dev
->pending_lock
);
515 if (!list_empty(&qp
->timerwait
))
516 list_del_init(&qp
->timerwait
);
517 if (!list_empty(&qp
->piowait
))
518 list_del_init(&qp
->piowait
);
519 spin_unlock(&dev
->pending_lock
);
520 qp
->s_flags
&= ~IPATH_S_ANY_WAIT
;
521 spin_unlock_irq(&qp
->s_lock
);
522 /* Stop the sending tasklet */
523 tasklet_kill(&qp
->s_task
);
524 wait_event(qp
->wait_dma
, !atomic_read(&qp
->s_dma_busy
));
525 spin_lock_irq(&qp
->s_lock
);
527 ipath_reset_qp(qp
, ibqp
->qp_type
);
531 qp
->s_draining
= qp
->s_last
!= qp
->s_cur
;
532 qp
->state
= new_state
;
536 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
538 qp
->state
= new_state
;
542 lastwqe
= ipath_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
546 qp
->state
= new_state
;
550 if (attr_mask
& IB_QP_PKEY_INDEX
)
551 qp
->s_pkey_index
= attr
->pkey_index
;
553 if (attr_mask
& IB_QP_DEST_QPN
)
554 qp
->remote_qpn
= attr
->dest_qp_num
;
556 if (attr_mask
& IB_QP_SQ_PSN
) {
557 qp
->s_psn
= qp
->s_next_psn
= attr
->sq_psn
;
558 qp
->s_last_psn
= qp
->s_next_psn
- 1;
561 if (attr_mask
& IB_QP_RQ_PSN
)
562 qp
->r_psn
= attr
->rq_psn
;
564 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
565 qp
->qp_access_flags
= attr
->qp_access_flags
;
567 if (attr_mask
& IB_QP_AV
) {
568 qp
->remote_ah_attr
= attr
->ah_attr
;
569 qp
->s_dmult
= ipath_ib_rate_to_mult(attr
->ah_attr
.static_rate
);
572 if (attr_mask
& IB_QP_PATH_MTU
)
573 qp
->path_mtu
= attr
->path_mtu
;
575 if (attr_mask
& IB_QP_RETRY_CNT
)
576 qp
->s_retry
= qp
->s_retry_cnt
= attr
->retry_cnt
;
578 if (attr_mask
& IB_QP_RNR_RETRY
) {
579 qp
->s_rnr_retry
= attr
->rnr_retry
;
580 if (qp
->s_rnr_retry
> 7)
582 qp
->s_rnr_retry_cnt
= qp
->s_rnr_retry
;
585 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
586 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
588 if (attr_mask
& IB_QP_TIMEOUT
)
589 qp
->timeout
= attr
->timeout
;
591 if (attr_mask
& IB_QP_QKEY
)
592 qp
->qkey
= attr
->qkey
;
594 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
595 qp
->r_max_rd_atomic
= attr
->max_dest_rd_atomic
;
597 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
)
598 qp
->s_max_rd_atomic
= attr
->max_rd_atomic
;
600 spin_unlock_irq(&qp
->s_lock
);
605 ev
.device
= qp
->ibqp
.device
;
606 ev
.element
.qp
= &qp
->ibqp
;
607 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
608 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
614 spin_unlock_irq(&qp
->s_lock
);
621 int ipath_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
622 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
624 struct ipath_qp
*qp
= to_iqp(ibqp
);
626 attr
->qp_state
= qp
->state
;
627 attr
->cur_qp_state
= attr
->qp_state
;
628 attr
->path_mtu
= qp
->path_mtu
;
629 attr
->path_mig_state
= 0;
630 attr
->qkey
= qp
->qkey
;
631 attr
->rq_psn
= qp
->r_psn
;
632 attr
->sq_psn
= qp
->s_next_psn
;
633 attr
->dest_qp_num
= qp
->remote_qpn
;
634 attr
->qp_access_flags
= qp
->qp_access_flags
;
635 attr
->cap
.max_send_wr
= qp
->s_size
- 1;
636 attr
->cap
.max_recv_wr
= qp
->ibqp
.srq
? 0 : qp
->r_rq
.size
- 1;
637 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
638 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
639 attr
->cap
.max_inline_data
= 0;
640 attr
->ah_attr
= qp
->remote_ah_attr
;
641 memset(&attr
->alt_ah_attr
, 0, sizeof(attr
->alt_ah_attr
));
642 attr
->pkey_index
= qp
->s_pkey_index
;
643 attr
->alt_pkey_index
= 0;
644 attr
->en_sqd_async_notify
= 0;
645 attr
->sq_draining
= qp
->s_draining
;
646 attr
->max_rd_atomic
= qp
->s_max_rd_atomic
;
647 attr
->max_dest_rd_atomic
= qp
->r_max_rd_atomic
;
648 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
650 attr
->timeout
= qp
->timeout
;
651 attr
->retry_cnt
= qp
->s_retry_cnt
;
652 attr
->rnr_retry
= qp
->s_rnr_retry_cnt
;
653 attr
->alt_port_num
= 0;
654 attr
->alt_timeout
= 0;
656 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
657 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
658 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
659 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
660 init_attr
->srq
= qp
->ibqp
.srq
;
661 init_attr
->cap
= attr
->cap
;
662 if (qp
->s_flags
& IPATH_S_SIGNAL_REQ_WR
)
663 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
665 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
666 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
667 init_attr
->port_num
= 1;
672 * ipath_compute_aeth - compute the AETH (syndrome + MSN)
673 * @qp: the queue pair to compute the AETH for
677 __be32
ipath_compute_aeth(struct ipath_qp
*qp
)
679 u32 aeth
= qp
->r_msn
& IPATH_MSN_MASK
;
683 * Shared receive queues don't generate credits.
684 * Set the credit field to the invalid value.
686 aeth
|= IPATH_AETH_CREDIT_INVAL
<< IPATH_AETH_CREDIT_SHIFT
;
690 struct ipath_rwq
*wq
= qp
->r_rq
.wq
;
694 /* sanity check pointers before trusting them */
696 if (head
>= qp
->r_rq
.size
)
699 if (tail
>= qp
->r_rq
.size
)
702 * Compute the number of credits available (RWQEs).
703 * XXX Not holding the r_rq.lock here so there is a small
704 * chance that the pair of reads are not atomic.
706 credits
= head
- tail
;
707 if ((int)credits
< 0)
708 credits
+= qp
->r_rq
.size
;
710 * Binary search the credit table to find the code to
717 if (credit_table
[x
] == credits
)
719 if (credit_table
[x
] > credits
)
726 aeth
|= x
<< IPATH_AETH_CREDIT_SHIFT
;
728 return cpu_to_be32(aeth
);
732 * ipath_create_qp - create a queue pair for a device
733 * @ibpd: the protection domain who's device we create the queue pair for
734 * @init_attr: the attributes of the queue pair
735 * @udata: unused by InfiniPath
737 * Returns the queue pair on success, otherwise returns an errno.
739 * Called by the ib_create_qp() core verbs function.
741 struct ib_qp
*ipath_create_qp(struct ib_pd
*ibpd
,
742 struct ib_qp_init_attr
*init_attr
,
743 struct ib_udata
*udata
)
747 struct ipath_swqe
*swq
= NULL
;
748 struct ipath_ibdev
*dev
;
753 if (init_attr
->create_flags
) {
754 ret
= ERR_PTR(-EINVAL
);
758 if (init_attr
->cap
.max_send_sge
> ib_ipath_max_sges
||
759 init_attr
->cap
.max_send_wr
> ib_ipath_max_qp_wrs
) {
760 ret
= ERR_PTR(-EINVAL
);
764 /* Check receive queue parameters if no SRQ is specified. */
765 if (!init_attr
->srq
) {
766 if (init_attr
->cap
.max_recv_sge
> ib_ipath_max_sges
||
767 init_attr
->cap
.max_recv_wr
> ib_ipath_max_qp_wrs
) {
768 ret
= ERR_PTR(-EINVAL
);
771 if (init_attr
->cap
.max_send_sge
+
772 init_attr
->cap
.max_send_wr
+
773 init_attr
->cap
.max_recv_sge
+
774 init_attr
->cap
.max_recv_wr
== 0) {
775 ret
= ERR_PTR(-EINVAL
);
780 switch (init_attr
->qp_type
) {
786 sz
= sizeof(struct ipath_sge
) *
787 init_attr
->cap
.max_send_sge
+
788 sizeof(struct ipath_swqe
);
789 swq
= vmalloc((init_attr
->cap
.max_send_wr
+ 1) * sz
);
791 ret
= ERR_PTR(-ENOMEM
);
796 if (init_attr
->srq
) {
797 struct ipath_srq
*srq
= to_isrq(init_attr
->srq
);
799 if (srq
->rq
.max_sge
> 1)
800 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
801 (srq
->rq
.max_sge
- 1);
802 } else if (init_attr
->cap
.max_recv_sge
> 1)
803 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
804 (init_attr
->cap
.max_recv_sge
- 1);
805 qp
= kmalloc(sz
+ sg_list_sz
, GFP_KERNEL
);
807 ret
= ERR_PTR(-ENOMEM
);
810 if (sg_list_sz
&& (init_attr
->qp_type
== IB_QPT_UD
||
811 init_attr
->qp_type
== IB_QPT_SMI
||
812 init_attr
->qp_type
== IB_QPT_GSI
)) {
813 qp
->r_ud_sg_list
= kmalloc(sg_list_sz
, GFP_KERNEL
);
814 if (!qp
->r_ud_sg_list
) {
815 ret
= ERR_PTR(-ENOMEM
);
819 qp
->r_ud_sg_list
= NULL
;
820 if (init_attr
->srq
) {
823 qp
->r_rq
.max_sge
= 0;
825 init_attr
->cap
.max_recv_wr
= 0;
826 init_attr
->cap
.max_recv_sge
= 0;
828 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
829 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
830 sz
= (sizeof(struct ib_sge
) * qp
->r_rq
.max_sge
) +
831 sizeof(struct ipath_rwqe
);
832 qp
->r_rq
.wq
= vmalloc_user(sizeof(struct ipath_rwq
) +
835 ret
= ERR_PTR(-ENOMEM
);
841 * ib_create_qp() will initialize qp->ibqp
842 * except for qp->ibqp.qp_num.
844 spin_lock_init(&qp
->s_lock
);
845 spin_lock_init(&qp
->r_rq
.lock
);
846 atomic_set(&qp
->refcount
, 0);
847 init_waitqueue_head(&qp
->wait
);
848 init_waitqueue_head(&qp
->wait_dma
);
849 tasklet_init(&qp
->s_task
, ipath_do_send
, (unsigned long)qp
);
850 INIT_LIST_HEAD(&qp
->piowait
);
851 INIT_LIST_HEAD(&qp
->timerwait
);
852 qp
->state
= IB_QPS_RESET
;
854 qp
->s_size
= init_attr
->cap
.max_send_wr
+ 1;
855 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
856 if (init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
)
857 qp
->s_flags
= IPATH_S_SIGNAL_REQ_WR
;
860 dev
= to_idev(ibpd
->device
);
861 err
= ipath_alloc_qpn(&dev
->qp_table
, qp
,
870 ipath_reset_qp(qp
, init_attr
->qp_type
);
874 /* Don't support raw QPs */
875 ret
= ERR_PTR(-ENOSYS
);
879 init_attr
->cap
.max_inline_data
= 0;
882 * Return the address of the RWQ as the offset to mmap.
883 * See ipath_mmap() for details.
885 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
889 err
= ib_copy_to_udata(udata
, &offset
,
896 u32 s
= sizeof(struct ipath_rwq
) +
900 ipath_create_mmap_info(dev
, s
,
901 ibpd
->uobject
->context
,
904 ret
= ERR_PTR(-ENOMEM
);
908 err
= ib_copy_to_udata(udata
, &(qp
->ip
->offset
),
909 sizeof(qp
->ip
->offset
));
917 spin_lock(&dev
->n_qps_lock
);
918 if (dev
->n_qps_allocated
== ib_ipath_max_qps
) {
919 spin_unlock(&dev
->n_qps_lock
);
920 ret
= ERR_PTR(-ENOMEM
);
924 dev
->n_qps_allocated
++;
925 spin_unlock(&dev
->n_qps_lock
);
928 spin_lock_irq(&dev
->pending_lock
);
929 list_add(&qp
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
930 spin_unlock_irq(&dev
->pending_lock
);
938 kref_put(&qp
->ip
->ref
, ipath_release_mmap_info
);
941 ipath_free_qp(&dev
->qp_table
, qp
);
942 free_qpn(&dev
->qp_table
, qp
->ibqp
.qp_num
);
944 kfree(qp
->r_ud_sg_list
);
954 * ipath_destroy_qp - destroy a queue pair
955 * @ibqp: the queue pair to destroy
957 * Returns 0 on success.
959 * Note that this can be called while the QP is actively sending or
962 int ipath_destroy_qp(struct ib_qp
*ibqp
)
964 struct ipath_qp
*qp
= to_iqp(ibqp
);
965 struct ipath_ibdev
*dev
= to_idev(ibqp
->device
);
967 /* Make sure HW and driver activity is stopped. */
968 spin_lock_irq(&qp
->s_lock
);
969 if (qp
->state
!= IB_QPS_RESET
) {
970 qp
->state
= IB_QPS_RESET
;
971 spin_lock(&dev
->pending_lock
);
972 if (!list_empty(&qp
->timerwait
))
973 list_del_init(&qp
->timerwait
);
974 if (!list_empty(&qp
->piowait
))
975 list_del_init(&qp
->piowait
);
976 spin_unlock(&dev
->pending_lock
);
977 qp
->s_flags
&= ~IPATH_S_ANY_WAIT
;
978 spin_unlock_irq(&qp
->s_lock
);
979 /* Stop the sending tasklet */
980 tasklet_kill(&qp
->s_task
);
981 wait_event(qp
->wait_dma
, !atomic_read(&qp
->s_dma_busy
));
983 spin_unlock_irq(&qp
->s_lock
);
985 ipath_free_qp(&dev
->qp_table
, qp
);
988 atomic_dec(&qp
->refcount
);
989 if (qp
->s_tx
->txreq
.flags
& IPATH_SDMA_TXREQ_F_FREEBUF
)
990 kfree(qp
->s_tx
->txreq
.map_addr
);
991 spin_lock_irq(&dev
->pending_lock
);
992 list_add(&qp
->s_tx
->txreq
.list
, &dev
->txreq_free
);
993 spin_unlock_irq(&dev
->pending_lock
);
997 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
999 /* all user's cleaned up, mark it available */
1000 free_qpn(&dev
->qp_table
, qp
->ibqp
.qp_num
);
1001 spin_lock(&dev
->n_qps_lock
);
1002 dev
->n_qps_allocated
--;
1003 spin_unlock(&dev
->n_qps_lock
);
1006 kref_put(&qp
->ip
->ref
, ipath_release_mmap_info
);
1009 kfree(qp
->r_ud_sg_list
);
1016 * ipath_init_qp_table - initialize the QP table for a device
1017 * @idev: the device who's QP table we're initializing
1018 * @size: the size of the QP table
1020 * Returns 0 on success, otherwise returns an errno.
1022 int ipath_init_qp_table(struct ipath_ibdev
*idev
, int size
)
1027 idev
->qp_table
.last
= 1; /* QPN 0 and 1 are special. */
1028 idev
->qp_table
.max
= size
;
1029 idev
->qp_table
.nmaps
= 1;
1030 idev
->qp_table
.table
= kzalloc(size
* sizeof(*idev
->qp_table
.table
),
1032 if (idev
->qp_table
.table
== NULL
) {
1037 for (i
= 0; i
< ARRAY_SIZE(idev
->qp_table
.map
); i
++) {
1038 atomic_set(&idev
->qp_table
.map
[i
].n_free
, BITS_PER_PAGE
);
1039 idev
->qp_table
.map
[i
].page
= NULL
;
1049 * ipath_get_credit - flush the send work queue of a QP
1050 * @qp: the qp who's send work queue to flush
1051 * @aeth: the Acknowledge Extended Transport Header
1053 * The QP s_lock should be held.
1055 void ipath_get_credit(struct ipath_qp
*qp
, u32 aeth
)
1057 u32 credit
= (aeth
>> IPATH_AETH_CREDIT_SHIFT
) & IPATH_AETH_CREDIT_MASK
;
1060 * If the credit is invalid, we can send
1061 * as many packets as we like. Otherwise, we have to
1062 * honor the credit field.
1064 if (credit
== IPATH_AETH_CREDIT_INVAL
)
1065 qp
->s_lsn
= (u32
) -1;
1066 else if (qp
->s_lsn
!= (u32
) -1) {
1067 /* Compute new LSN (i.e., MSN + credit) */
1068 credit
= (aeth
+ credit_table
[credit
]) & IPATH_MSN_MASK
;
1069 if (ipath_cmp24(credit
, qp
->s_lsn
) > 0)
1073 /* Restart sending if it was blocked due to lack of credits. */
1074 if ((qp
->s_flags
& IPATH_S_WAIT_SSN_CREDIT
) &&
1075 qp
->s_cur
!= qp
->s_head
&&
1076 (qp
->s_lsn
== (u32
) -1 ||
1077 ipath_cmp24(get_swqe_ptr(qp
, qp
->s_cur
)->ssn
,
1078 qp
->s_lsn
+ 1) <= 0))
1079 ipath_schedule_send(qp
);