2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
37 #include "ipath_verbs.h"
38 #include "ipath_kernel.h"
40 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
41 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
42 #define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \
44 #define find_next_offset(map, off) find_next_zero_bit((map)->page, \
48 * Convert the AETH credit code into the number of credits.
50 static u32 credit_table
[31] = {
85 static void get_map_page(struct ipath_qp_table
*qpt
, struct qpn_map
*map
)
87 unsigned long page
= get_zeroed_page(GFP_KERNEL
);
91 * Free the page if someone raced with us installing it.
94 spin_lock_irqsave(&qpt
->lock
, flags
);
98 map
->page
= (void *)page
;
99 spin_unlock_irqrestore(&qpt
->lock
, flags
);
103 static int alloc_qpn(struct ipath_qp_table
*qpt
, enum ib_qp_type type
)
105 u32 i
, offset
, max_scan
, qpn
;
109 if (type
== IB_QPT_SMI
)
111 else if (type
== IB_QPT_GSI
)
116 if (unlikely(!map
->page
)) {
117 get_map_page(qpt
, map
);
118 if (unlikely(!map
->page
)) {
123 if (!test_and_set_bit(ret
, map
->page
))
124 atomic_dec(&map
->n_free
);
133 offset
= qpn
& BITS_PER_PAGE_MASK
;
134 map
= &qpt
->map
[qpn
/ BITS_PER_PAGE
];
135 max_scan
= qpt
->nmaps
- !offset
;
137 if (unlikely(!map
->page
)) {
138 get_map_page(qpt
, map
);
139 if (unlikely(!map
->page
))
142 if (likely(atomic_read(&map
->n_free
))) {
144 if (!test_and_set_bit(offset
, map
->page
)) {
145 atomic_dec(&map
->n_free
);
150 offset
= find_next_offset(map
, offset
);
151 qpn
= mk_qpn(qpt
, map
, offset
);
153 * This test differs from alloc_pidmap().
154 * If find_next_offset() does find a zero
155 * bit, we don't need to check for QPN
156 * wrapping around past our starting QPN.
157 * We just need to be sure we don't loop
160 } while (offset
< BITS_PER_PAGE
&& qpn
< QPN_MAX
);
163 * In order to keep the number of pages allocated to a
164 * minimum, we scan the all existing pages before increasing
165 * the size of the bitmap table.
167 if (++i
> max_scan
) {
168 if (qpt
->nmaps
== QPNMAP_ENTRIES
)
170 map
= &qpt
->map
[qpt
->nmaps
++];
172 } else if (map
< &qpt
->map
[qpt
->nmaps
]) {
179 qpn
= mk_qpn(qpt
, map
, offset
);
188 static void free_qpn(struct ipath_qp_table
*qpt
, u32 qpn
)
192 map
= qpt
->map
+ qpn
/ BITS_PER_PAGE
;
194 clear_bit(qpn
& BITS_PER_PAGE_MASK
, map
->page
);
195 atomic_inc(&map
->n_free
);
199 * ipath_alloc_qpn - allocate a QP number
202 * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
204 * Allocate the next available QPN and put the QP into the hash table.
205 * The hash table holds a reference to the QP.
207 static int ipath_alloc_qpn(struct ipath_qp_table
*qpt
, struct ipath_qp
*qp
,
208 enum ib_qp_type type
)
213 ret
= alloc_qpn(qpt
, type
);
216 qp
->ibqp
.qp_num
= ret
;
218 /* Add the QP to the hash table. */
219 spin_lock_irqsave(&qpt
->lock
, flags
);
222 qp
->next
= qpt
->table
[ret
];
223 qpt
->table
[ret
] = qp
;
224 atomic_inc(&qp
->refcount
);
226 spin_unlock_irqrestore(&qpt
->lock
, flags
);
234 * ipath_free_qp - remove a QP from the QP table
236 * @qp: the QP to remove
238 * Remove the QP from the table so it can't be found asynchronously by
239 * the receive interrupt routine.
241 static void ipath_free_qp(struct ipath_qp_table
*qpt
, struct ipath_qp
*qp
)
243 struct ipath_qp
*q
, **qpp
;
246 spin_lock_irqsave(&qpt
->lock
, flags
);
248 /* Remove QP from the hash table. */
249 qpp
= &qpt
->table
[qp
->ibqp
.qp_num
% qpt
->max
];
250 for (; (q
= *qpp
) != NULL
; qpp
= &q
->next
) {
254 atomic_dec(&qp
->refcount
);
259 spin_unlock_irqrestore(&qpt
->lock
, flags
);
263 * ipath_free_all_qps - check for QPs still in use
264 * @qpt: the QP table to empty
266 * There should not be any QPs still in use.
267 * Free memory for table.
269 unsigned ipath_free_all_qps(struct ipath_qp_table
*qpt
)
275 spin_lock_irqsave(&qpt
->lock
, flags
);
276 for (n
= 0; n
< qpt
->max
; n
++) {
278 qpt
->table
[n
] = NULL
;
280 for (; qp
; qp
= qp
->next
)
283 spin_unlock_irqrestore(&qpt
->lock
, flags
);
285 for (n
= 0; n
< ARRAY_SIZE(qpt
->map
); n
++)
286 if (qpt
->map
[n
].page
)
287 free_page((unsigned long) qpt
->map
[n
].page
);
292 * ipath_lookup_qpn - return the QP with the given QPN
294 * @qpn: the QP number to look up
296 * The caller is responsible for decrementing the QP reference count
299 struct ipath_qp
*ipath_lookup_qpn(struct ipath_qp_table
*qpt
, u32 qpn
)
304 spin_lock_irqsave(&qpt
->lock
, flags
);
306 for (qp
= qpt
->table
[qpn
% qpt
->max
]; qp
; qp
= qp
->next
) {
307 if (qp
->ibqp
.qp_num
== qpn
) {
308 atomic_inc(&qp
->refcount
);
313 spin_unlock_irqrestore(&qpt
->lock
, flags
);
318 * ipath_reset_qp - initialize the QP state to the reset state
319 * @qp: the QP to reset
322 static void ipath_reset_qp(struct ipath_qp
*qp
, enum ib_qp_type type
)
326 qp
->qp_access_flags
= 0;
327 atomic_set(&qp
->s_dma_busy
, 0);
328 qp
->s_flags
&= IPATH_S_SIGNAL_REQ_WR
;
336 if (type
== IB_QPT_RC
) {
337 qp
->s_state
= IB_OPCODE_RC_SEND_LAST
;
338 qp
->r_state
= IB_OPCODE_RC_SEND_LAST
;
340 qp
->s_state
= IB_OPCODE_UC_SEND_LAST
;
341 qp
->r_state
= IB_OPCODE_UC_SEND_LAST
;
343 qp
->s_ack_state
= IB_OPCODE_RC_ACKNOWLEDGE
;
347 qp
->s_rnr_timeout
= 0;
354 memset(qp
->s_ack_queue
, 0, sizeof(qp
->s_ack_queue
));
355 qp
->r_head_ack_queue
= 0;
356 qp
->s_tail_ack_queue
= 0;
357 qp
->s_num_rd_atomic
= 0;
359 qp
->r_rq
.wq
->head
= 0;
360 qp
->r_rq
.wq
->tail
= 0;
365 * ipath_error_qp - put a QP into the error state
366 * @qp: the QP to put into the error state
367 * @err: the receive completion error to signal if a RWQE is active
369 * Flushes both send and receive work queues.
370 * Returns true if last WQE event should be generated.
371 * The QP s_lock should be held and interrupts disabled.
372 * If we are already in error state, just return.
375 int ipath_error_qp(struct ipath_qp
*qp
, enum ib_wc_status err
)
377 struct ipath_ibdev
*dev
= to_idev(qp
->ibqp
.device
);
381 if (qp
->state
== IB_QPS_ERR
)
384 qp
->state
= IB_QPS_ERR
;
386 spin_lock(&dev
->pending_lock
);
387 if (!list_empty(&qp
->timerwait
))
388 list_del_init(&qp
->timerwait
);
389 if (!list_empty(&qp
->piowait
))
390 list_del_init(&qp
->piowait
);
391 spin_unlock(&dev
->pending_lock
);
393 /* Schedule the sending tasklet to drain the send work queue. */
394 if (qp
->s_last
!= qp
->s_head
)
395 ipath_schedule_send(qp
);
397 memset(&wc
, 0, sizeof(wc
));
399 wc
.opcode
= IB_WC_RECV
;
401 if (test_and_clear_bit(IPATH_R_WRID_VALID
, &qp
->r_aflags
)) {
402 wc
.wr_id
= qp
->r_wr_id
;
404 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
406 wc
.status
= IB_WC_WR_FLUSH_ERR
;
409 struct ipath_rwq
*wq
;
413 spin_lock(&qp
->r_rq
.lock
);
415 /* sanity check pointers before trusting them */
418 if (head
>= qp
->r_rq
.size
)
421 if (tail
>= qp
->r_rq
.size
)
423 while (tail
!= head
) {
424 wc
.wr_id
= get_rwqe_ptr(&qp
->r_rq
, tail
)->wr_id
;
425 if (++tail
>= qp
->r_rq
.size
)
427 ipath_cq_enter(to_icq(qp
->ibqp
.recv_cq
), &wc
, 1);
431 spin_unlock(&qp
->r_rq
.lock
);
432 } else if (qp
->ibqp
.event_handler
)
440 * ipath_modify_qp - modify the attributes of a queue pair
441 * @ibqp: the queue pair who's attributes we're modifying
442 * @attr: the new attributes
443 * @attr_mask: the mask of attributes to modify
444 * @udata: user data for ipathverbs.so
446 * Returns 0 on success, otherwise returns an errno.
448 int ipath_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
449 int attr_mask
, struct ib_udata
*udata
)
451 struct ipath_ibdev
*dev
= to_idev(ibqp
->device
);
452 struct ipath_qp
*qp
= to_iqp(ibqp
);
453 enum ib_qp_state cur_state
, new_state
;
457 spin_lock_irq(&qp
->s_lock
);
459 cur_state
= attr_mask
& IB_QP_CUR_STATE
?
460 attr
->cur_qp_state
: qp
->state
;
461 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
463 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
467 if (attr_mask
& IB_QP_AV
) {
468 if (attr
->ah_attr
.dlid
== 0 ||
469 attr
->ah_attr
.dlid
>= IPATH_MULTICAST_LID_BASE
)
472 if ((attr
->ah_attr
.ah_flags
& IB_AH_GRH
) &&
473 (attr
->ah_attr
.grh
.sgid_index
> 1))
477 if (attr_mask
& IB_QP_PKEY_INDEX
)
478 if (attr
->pkey_index
>= ipath_get_npkeys(dev
->dd
))
481 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
482 if (attr
->min_rnr_timer
> 31)
485 if (attr_mask
& IB_QP_PORT
)
486 if (attr
->port_num
== 0 ||
487 attr
->port_num
> ibqp
->device
->phys_port_cnt
)
491 * don't allow invalid Path MTU values or greater than 2048
492 * unless we are configured for a 4KB MTU
494 if ((attr_mask
& IB_QP_PATH_MTU
) &&
495 (ib_mtu_enum_to_int(attr
->path_mtu
) == -1 ||
496 (attr
->path_mtu
> IB_MTU_2048
&& !ipath_mtu4096
)))
499 if (attr_mask
& IB_QP_PATH_MIG_STATE
)
500 if (attr
->path_mig_state
!= IB_MIG_MIGRATED
&&
501 attr
->path_mig_state
!= IB_MIG_REARM
)
504 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
505 if (attr
->max_dest_rd_atomic
> IPATH_MAX_RDMA_ATOMIC
)
510 if (qp
->state
!= IB_QPS_RESET
) {
511 qp
->state
= IB_QPS_RESET
;
512 spin_lock(&dev
->pending_lock
);
513 if (!list_empty(&qp
->timerwait
))
514 list_del_init(&qp
->timerwait
);
515 if (!list_empty(&qp
->piowait
))
516 list_del_init(&qp
->piowait
);
517 spin_unlock(&dev
->pending_lock
);
518 qp
->s_flags
&= ~IPATH_S_ANY_WAIT
;
519 spin_unlock_irq(&qp
->s_lock
);
520 /* Stop the sending tasklet */
521 tasklet_kill(&qp
->s_task
);
522 wait_event(qp
->wait_dma
, !atomic_read(&qp
->s_dma_busy
));
523 spin_lock_irq(&qp
->s_lock
);
525 ipath_reset_qp(qp
, ibqp
->qp_type
);
529 qp
->s_draining
= qp
->s_last
!= qp
->s_cur
;
530 qp
->state
= new_state
;
534 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
536 qp
->state
= new_state
;
540 lastwqe
= ipath_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
544 qp
->state
= new_state
;
548 if (attr_mask
& IB_QP_PKEY_INDEX
)
549 qp
->s_pkey_index
= attr
->pkey_index
;
551 if (attr_mask
& IB_QP_DEST_QPN
)
552 qp
->remote_qpn
= attr
->dest_qp_num
;
554 if (attr_mask
& IB_QP_SQ_PSN
) {
555 qp
->s_psn
= qp
->s_next_psn
= attr
->sq_psn
;
556 qp
->s_last_psn
= qp
->s_next_psn
- 1;
559 if (attr_mask
& IB_QP_RQ_PSN
)
560 qp
->r_psn
= attr
->rq_psn
;
562 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
563 qp
->qp_access_flags
= attr
->qp_access_flags
;
565 if (attr_mask
& IB_QP_AV
) {
566 qp
->remote_ah_attr
= attr
->ah_attr
;
567 qp
->s_dmult
= ipath_ib_rate_to_mult(attr
->ah_attr
.static_rate
);
570 if (attr_mask
& IB_QP_PATH_MTU
)
571 qp
->path_mtu
= attr
->path_mtu
;
573 if (attr_mask
& IB_QP_RETRY_CNT
)
574 qp
->s_retry
= qp
->s_retry_cnt
= attr
->retry_cnt
;
576 if (attr_mask
& IB_QP_RNR_RETRY
) {
577 qp
->s_rnr_retry
= attr
->rnr_retry
;
578 if (qp
->s_rnr_retry
> 7)
580 qp
->s_rnr_retry_cnt
= qp
->s_rnr_retry
;
583 if (attr_mask
& IB_QP_MIN_RNR_TIMER
)
584 qp
->r_min_rnr_timer
= attr
->min_rnr_timer
;
586 if (attr_mask
& IB_QP_TIMEOUT
)
587 qp
->timeout
= attr
->timeout
;
589 if (attr_mask
& IB_QP_QKEY
)
590 qp
->qkey
= attr
->qkey
;
592 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
593 qp
->r_max_rd_atomic
= attr
->max_dest_rd_atomic
;
595 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
)
596 qp
->s_max_rd_atomic
= attr
->max_rd_atomic
;
598 spin_unlock_irq(&qp
->s_lock
);
603 ev
.device
= qp
->ibqp
.device
;
604 ev
.element
.qp
= &qp
->ibqp
;
605 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
606 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
612 spin_unlock_irq(&qp
->s_lock
);
619 int ipath_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
,
620 int attr_mask
, struct ib_qp_init_attr
*init_attr
)
622 struct ipath_qp
*qp
= to_iqp(ibqp
);
624 attr
->qp_state
= qp
->state
;
625 attr
->cur_qp_state
= attr
->qp_state
;
626 attr
->path_mtu
= qp
->path_mtu
;
627 attr
->path_mig_state
= 0;
628 attr
->qkey
= qp
->qkey
;
629 attr
->rq_psn
= qp
->r_psn
;
630 attr
->sq_psn
= qp
->s_next_psn
;
631 attr
->dest_qp_num
= qp
->remote_qpn
;
632 attr
->qp_access_flags
= qp
->qp_access_flags
;
633 attr
->cap
.max_send_wr
= qp
->s_size
- 1;
634 attr
->cap
.max_recv_wr
= qp
->ibqp
.srq
? 0 : qp
->r_rq
.size
- 1;
635 attr
->cap
.max_send_sge
= qp
->s_max_sge
;
636 attr
->cap
.max_recv_sge
= qp
->r_rq
.max_sge
;
637 attr
->cap
.max_inline_data
= 0;
638 attr
->ah_attr
= qp
->remote_ah_attr
;
639 memset(&attr
->alt_ah_attr
, 0, sizeof(attr
->alt_ah_attr
));
640 attr
->pkey_index
= qp
->s_pkey_index
;
641 attr
->alt_pkey_index
= 0;
642 attr
->en_sqd_async_notify
= 0;
643 attr
->sq_draining
= qp
->s_draining
;
644 attr
->max_rd_atomic
= qp
->s_max_rd_atomic
;
645 attr
->max_dest_rd_atomic
= qp
->r_max_rd_atomic
;
646 attr
->min_rnr_timer
= qp
->r_min_rnr_timer
;
648 attr
->timeout
= qp
->timeout
;
649 attr
->retry_cnt
= qp
->s_retry_cnt
;
650 attr
->rnr_retry
= qp
->s_rnr_retry_cnt
;
651 attr
->alt_port_num
= 0;
652 attr
->alt_timeout
= 0;
654 init_attr
->event_handler
= qp
->ibqp
.event_handler
;
655 init_attr
->qp_context
= qp
->ibqp
.qp_context
;
656 init_attr
->send_cq
= qp
->ibqp
.send_cq
;
657 init_attr
->recv_cq
= qp
->ibqp
.recv_cq
;
658 init_attr
->srq
= qp
->ibqp
.srq
;
659 init_attr
->cap
= attr
->cap
;
660 if (qp
->s_flags
& IPATH_S_SIGNAL_REQ_WR
)
661 init_attr
->sq_sig_type
= IB_SIGNAL_REQ_WR
;
663 init_attr
->sq_sig_type
= IB_SIGNAL_ALL_WR
;
664 init_attr
->qp_type
= qp
->ibqp
.qp_type
;
665 init_attr
->port_num
= 1;
670 * ipath_compute_aeth - compute the AETH (syndrome + MSN)
671 * @qp: the queue pair to compute the AETH for
675 __be32
ipath_compute_aeth(struct ipath_qp
*qp
)
677 u32 aeth
= qp
->r_msn
& IPATH_MSN_MASK
;
681 * Shared receive queues don't generate credits.
682 * Set the credit field to the invalid value.
684 aeth
|= IPATH_AETH_CREDIT_INVAL
<< IPATH_AETH_CREDIT_SHIFT
;
688 struct ipath_rwq
*wq
= qp
->r_rq
.wq
;
692 /* sanity check pointers before trusting them */
694 if (head
>= qp
->r_rq
.size
)
697 if (tail
>= qp
->r_rq
.size
)
700 * Compute the number of credits available (RWQEs).
701 * XXX Not holding the r_rq.lock here so there is a small
702 * chance that the pair of reads are not atomic.
704 credits
= head
- tail
;
705 if ((int)credits
< 0)
706 credits
+= qp
->r_rq
.size
;
708 * Binary search the credit table to find the code to
715 if (credit_table
[x
] == credits
)
717 if (credit_table
[x
] > credits
)
724 aeth
|= x
<< IPATH_AETH_CREDIT_SHIFT
;
726 return cpu_to_be32(aeth
);
730 * ipath_create_qp - create a queue pair for a device
731 * @ibpd: the protection domain who's device we create the queue pair for
732 * @init_attr: the attributes of the queue pair
733 * @udata: unused by InfiniPath
735 * Returns the queue pair on success, otherwise returns an errno.
737 * Called by the ib_create_qp() core verbs function.
739 struct ib_qp
*ipath_create_qp(struct ib_pd
*ibpd
,
740 struct ib_qp_init_attr
*init_attr
,
741 struct ib_udata
*udata
)
745 struct ipath_swqe
*swq
= NULL
;
746 struct ipath_ibdev
*dev
;
751 if (init_attr
->create_flags
) {
752 ret
= ERR_PTR(-EINVAL
);
756 if (init_attr
->cap
.max_send_sge
> ib_ipath_max_sges
||
757 init_attr
->cap
.max_send_wr
> ib_ipath_max_qp_wrs
) {
758 ret
= ERR_PTR(-EINVAL
);
762 /* Check receive queue parameters if no SRQ is specified. */
763 if (!init_attr
->srq
) {
764 if (init_attr
->cap
.max_recv_sge
> ib_ipath_max_sges
||
765 init_attr
->cap
.max_recv_wr
> ib_ipath_max_qp_wrs
) {
766 ret
= ERR_PTR(-EINVAL
);
769 if (init_attr
->cap
.max_send_sge
+
770 init_attr
->cap
.max_send_wr
+
771 init_attr
->cap
.max_recv_sge
+
772 init_attr
->cap
.max_recv_wr
== 0) {
773 ret
= ERR_PTR(-EINVAL
);
778 switch (init_attr
->qp_type
) {
784 sz
= sizeof(struct ipath_sge
) *
785 init_attr
->cap
.max_send_sge
+
786 sizeof(struct ipath_swqe
);
787 swq
= vmalloc((init_attr
->cap
.max_send_wr
+ 1) * sz
);
789 ret
= ERR_PTR(-ENOMEM
);
794 if (init_attr
->srq
) {
795 struct ipath_srq
*srq
= to_isrq(init_attr
->srq
);
797 if (srq
->rq
.max_sge
> 1)
798 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
799 (srq
->rq
.max_sge
- 1);
800 } else if (init_attr
->cap
.max_recv_sge
> 1)
801 sg_list_sz
= sizeof(*qp
->r_sg_list
) *
802 (init_attr
->cap
.max_recv_sge
- 1);
803 qp
= kmalloc(sz
+ sg_list_sz
, GFP_KERNEL
);
805 ret
= ERR_PTR(-ENOMEM
);
808 if (sg_list_sz
&& (init_attr
->qp_type
== IB_QPT_UD
||
809 init_attr
->qp_type
== IB_QPT_SMI
||
810 init_attr
->qp_type
== IB_QPT_GSI
)) {
811 qp
->r_ud_sg_list
= kmalloc(sg_list_sz
, GFP_KERNEL
);
812 if (!qp
->r_ud_sg_list
) {
813 ret
= ERR_PTR(-ENOMEM
);
817 qp
->r_ud_sg_list
= NULL
;
818 if (init_attr
->srq
) {
821 qp
->r_rq
.max_sge
= 0;
823 init_attr
->cap
.max_recv_wr
= 0;
824 init_attr
->cap
.max_recv_sge
= 0;
826 qp
->r_rq
.size
= init_attr
->cap
.max_recv_wr
+ 1;
827 qp
->r_rq
.max_sge
= init_attr
->cap
.max_recv_sge
;
828 sz
= (sizeof(struct ib_sge
) * qp
->r_rq
.max_sge
) +
829 sizeof(struct ipath_rwqe
);
830 qp
->r_rq
.wq
= vmalloc_user(sizeof(struct ipath_rwq
) +
833 ret
= ERR_PTR(-ENOMEM
);
839 * ib_create_qp() will initialize qp->ibqp
840 * except for qp->ibqp.qp_num.
842 spin_lock_init(&qp
->s_lock
);
843 spin_lock_init(&qp
->r_rq
.lock
);
844 atomic_set(&qp
->refcount
, 0);
845 init_waitqueue_head(&qp
->wait
);
846 init_waitqueue_head(&qp
->wait_dma
);
847 tasklet_init(&qp
->s_task
, ipath_do_send
, (unsigned long)qp
);
848 INIT_LIST_HEAD(&qp
->piowait
);
849 INIT_LIST_HEAD(&qp
->timerwait
);
850 qp
->state
= IB_QPS_RESET
;
852 qp
->s_size
= init_attr
->cap
.max_send_wr
+ 1;
853 qp
->s_max_sge
= init_attr
->cap
.max_send_sge
;
854 if (init_attr
->sq_sig_type
== IB_SIGNAL_REQ_WR
)
855 qp
->s_flags
= IPATH_S_SIGNAL_REQ_WR
;
858 dev
= to_idev(ibpd
->device
);
859 err
= ipath_alloc_qpn(&dev
->qp_table
, qp
,
868 ipath_reset_qp(qp
, init_attr
->qp_type
);
872 /* Don't support raw QPs */
873 ret
= ERR_PTR(-ENOSYS
);
877 init_attr
->cap
.max_inline_data
= 0;
880 * Return the address of the RWQ as the offset to mmap.
881 * See ipath_mmap() for details.
883 if (udata
&& udata
->outlen
>= sizeof(__u64
)) {
887 err
= ib_copy_to_udata(udata
, &offset
,
894 u32 s
= sizeof(struct ipath_rwq
) +
898 ipath_create_mmap_info(dev
, s
,
899 ibpd
->uobject
->context
,
902 ret
= ERR_PTR(-ENOMEM
);
906 err
= ib_copy_to_udata(udata
, &(qp
->ip
->offset
),
907 sizeof(qp
->ip
->offset
));
915 spin_lock(&dev
->n_qps_lock
);
916 if (dev
->n_qps_allocated
== ib_ipath_max_qps
) {
917 spin_unlock(&dev
->n_qps_lock
);
918 ret
= ERR_PTR(-ENOMEM
);
922 dev
->n_qps_allocated
++;
923 spin_unlock(&dev
->n_qps_lock
);
926 spin_lock_irq(&dev
->pending_lock
);
927 list_add(&qp
->ip
->pending_mmaps
, &dev
->pending_mmaps
);
928 spin_unlock_irq(&dev
->pending_lock
);
936 kref_put(&qp
->ip
->ref
, ipath_release_mmap_info
);
939 ipath_free_qp(&dev
->qp_table
, qp
);
940 free_qpn(&dev
->qp_table
, qp
->ibqp
.qp_num
);
942 kfree(qp
->r_ud_sg_list
);
952 * ipath_destroy_qp - destroy a queue pair
953 * @ibqp: the queue pair to destroy
955 * Returns 0 on success.
957 * Note that this can be called while the QP is actively sending or
960 int ipath_destroy_qp(struct ib_qp
*ibqp
)
962 struct ipath_qp
*qp
= to_iqp(ibqp
);
963 struct ipath_ibdev
*dev
= to_idev(ibqp
->device
);
965 /* Make sure HW and driver activity is stopped. */
966 spin_lock_irq(&qp
->s_lock
);
967 if (qp
->state
!= IB_QPS_RESET
) {
968 qp
->state
= IB_QPS_RESET
;
969 spin_lock(&dev
->pending_lock
);
970 if (!list_empty(&qp
->timerwait
))
971 list_del_init(&qp
->timerwait
);
972 if (!list_empty(&qp
->piowait
))
973 list_del_init(&qp
->piowait
);
974 spin_unlock(&dev
->pending_lock
);
975 qp
->s_flags
&= ~IPATH_S_ANY_WAIT
;
976 spin_unlock_irq(&qp
->s_lock
);
977 /* Stop the sending tasklet */
978 tasklet_kill(&qp
->s_task
);
979 wait_event(qp
->wait_dma
, !atomic_read(&qp
->s_dma_busy
));
981 spin_unlock_irq(&qp
->s_lock
);
983 ipath_free_qp(&dev
->qp_table
, qp
);
986 atomic_dec(&qp
->refcount
);
987 if (qp
->s_tx
->txreq
.flags
& IPATH_SDMA_TXREQ_F_FREEBUF
)
988 kfree(qp
->s_tx
->txreq
.map_addr
);
989 spin_lock_irq(&dev
->pending_lock
);
990 list_add(&qp
->s_tx
->txreq
.list
, &dev
->txreq_free
);
991 spin_unlock_irq(&dev
->pending_lock
);
995 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
997 /* all user's cleaned up, mark it available */
998 free_qpn(&dev
->qp_table
, qp
->ibqp
.qp_num
);
999 spin_lock(&dev
->n_qps_lock
);
1000 dev
->n_qps_allocated
--;
1001 spin_unlock(&dev
->n_qps_lock
);
1004 kref_put(&qp
->ip
->ref
, ipath_release_mmap_info
);
1007 kfree(qp
->r_ud_sg_list
);
1014 * ipath_init_qp_table - initialize the QP table for a device
1015 * @idev: the device who's QP table we're initializing
1016 * @size: the size of the QP table
1018 * Returns 0 on success, otherwise returns an errno.
1020 int ipath_init_qp_table(struct ipath_ibdev
*idev
, int size
)
1025 idev
->qp_table
.last
= 1; /* QPN 0 and 1 are special. */
1026 idev
->qp_table
.max
= size
;
1027 idev
->qp_table
.nmaps
= 1;
1028 idev
->qp_table
.table
= kzalloc(size
* sizeof(*idev
->qp_table
.table
),
1030 if (idev
->qp_table
.table
== NULL
) {
1035 for (i
= 0; i
< ARRAY_SIZE(idev
->qp_table
.map
); i
++) {
1036 atomic_set(&idev
->qp_table
.map
[i
].n_free
, BITS_PER_PAGE
);
1037 idev
->qp_table
.map
[i
].page
= NULL
;
1047 * ipath_get_credit - flush the send work queue of a QP
1048 * @qp: the qp who's send work queue to flush
1049 * @aeth: the Acknowledge Extended Transport Header
1051 * The QP s_lock should be held.
1053 void ipath_get_credit(struct ipath_qp
*qp
, u32 aeth
)
1055 u32 credit
= (aeth
>> IPATH_AETH_CREDIT_SHIFT
) & IPATH_AETH_CREDIT_MASK
;
1058 * If the credit is invalid, we can send
1059 * as many packets as we like. Otherwise, we have to
1060 * honor the credit field.
1062 if (credit
== IPATH_AETH_CREDIT_INVAL
)
1063 qp
->s_lsn
= (u32
) -1;
1064 else if (qp
->s_lsn
!= (u32
) -1) {
1065 /* Compute new LSN (i.e., MSN + credit) */
1066 credit
= (aeth
+ credit_table
[credit
]) & IPATH_MSN_MASK
;
1067 if (ipath_cmp24(credit
, qp
->s_lsn
) > 0)
1071 /* Restart sending if it was blocked due to lack of credits. */
1072 if ((qp
->s_flags
& IPATH_S_WAIT_SSN_CREDIT
) &&
1073 qp
->s_cur
!= qp
->s_head
&&
1074 (qp
->s_lsn
== (u32
) -1 ||
1075 ipath_cmp24(get_swqe_ptr(qp
, qp
->s_cur
)->ssn
,
1076 qp
->s_lsn
+ 1) <= 0))
1077 ipath_schedule_send(qp
);