2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
6 * Authors: Joachim Fenkes <fenkes@de.ibm.com>
7 * Stefan Roscher <stefan.roscher@de.ibm.com>
8 * Waleri Fomin <fomin@de.ibm.com>
9 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
10 * Reinhard Ernst <rernst@de.ibm.com>
11 * Heiko J Schick <schickhj@de.ibm.com>
13 * Copyright (c) 2005 IBM Corporation
15 * All rights reserved.
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
46 #include <linux/slab.h>
48 #include "ehca_classes.h"
49 #include "ehca_tools.h"
51 #include "ehca_iverbs.h"
55 static struct kmem_cache
*qp_cache
;
58 * attributes not supported by query qp
60 #define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \
61 IB_QP_EN_SQD_ASYNC_NOTIFY)
64 * ehca (internal) qp state values
77 * qp state transitions as defined by IB Arch Rel 1.1 page 431
79 enum ib_qp_statetrans
{
91 IB_QPST_MAX
/* nr of transitions, this must be last!!! */
95 * ib2ehca_qp_state maps IB to ehca qp_state
96 * returns ehca qp state corresponding to given ib qp state
98 static inline enum ehca_qp_state
ib2ehca_qp_state(enum ib_qp_state ib_qp_state
)
100 switch (ib_qp_state
) {
102 return EHCA_QPS_RESET
;
104 return EHCA_QPS_INIT
;
116 ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state
);
122 * ehca2ib_qp_state maps ehca to IB qp_state
123 * returns ib qp state corresponding to given ehca qp state
125 static inline enum ib_qp_state
ehca2ib_qp_state(enum ehca_qp_state
128 switch (ehca_qp_state
) {
144 ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state
);
150 * ehca_qp_type used as index for req_attr and opt_attr of
151 * struct ehca_modqp_statetrans
162 * ib2ehcaqptype maps Ib to ehca qp_type
163 * returns ehca qp type corresponding to ib qp type
165 static inline enum ehca_qp_type
ib2ehcaqptype(enum ib_qp_type ibqptype
)
178 ehca_gen_err("Invalid ibqptype=%x", ibqptype
);
183 static inline enum ib_qp_statetrans
get_modqp_statetrans(int ib_fromstate
,
187 switch (ib_tostate
) {
189 index
= IB_QPST_ANY2RESET
;
192 switch (ib_fromstate
) {
194 index
= IB_QPST_RESET2INIT
;
197 index
= IB_QPST_INIT2INIT
;
202 if (ib_fromstate
== IB_QPS_INIT
)
203 index
= IB_QPST_INIT2RTR
;
206 switch (ib_fromstate
) {
208 index
= IB_QPST_RTR2RTS
;
211 index
= IB_QPST_RTS2RTS
;
214 index
= IB_QPST_SQD2RTS
;
217 index
= IB_QPST_SQE2RTS
;
222 if (ib_fromstate
== IB_QPS_RTS
)
223 index
= IB_QPST_RTS2SQD
;
228 index
= IB_QPST_ANY2ERR
;
237 * ibqptype2servicetype returns hcp service type corresponding to given
238 * ib qp type used by create_qp()
240 static inline int ibqptype2servicetype(enum ib_qp_type ibqptype
)
252 case IB_QPT_RAW_IPV6
:
254 case IB_QPT_RAW_ETHERTYPE
:
257 ehca_gen_err("Invalid ibqptype=%x", ibqptype
);
263 * init userspace queue info from ipz_queue data
265 static inline void queue2resp(struct ipzu_queue_resp
*resp
,
266 struct ipz_queue
*queue
)
268 resp
->qe_size
= queue
->qe_size
;
269 resp
->act_nr_of_sg
= queue
->act_nr_of_sg
;
270 resp
->queue_length
= queue
->queue_length
;
271 resp
->pagesize
= queue
->pagesize
;
272 resp
->toggle_state
= queue
->toggle_state
;
273 resp
->offset
= queue
->offset
;
277 * init_qp_queue initializes/constructs r/squeue and registers queue pages.
279 static inline int init_qp_queue(struct ehca_shca
*shca
,
281 struct ehca_qp
*my_qp
,
282 struct ipz_queue
*queue
,
285 struct ehca_alloc_queue_parms
*parms
,
288 int ret
, cnt
, ipz_rc
, nr_q_pages
;
291 struct ib_device
*ib_dev
= &shca
->ib_device
;
292 struct ipz_adapter_handle ipz_hca_handle
= shca
->ipz_hca_handle
;
294 if (!parms
->queue_size
)
297 if (parms
->is_small
) {
299 ipz_rc
= ipz_queue_ctor(pd
, queue
, nr_q_pages
,
300 128 << parms
->page_size
,
301 wqe_size
, parms
->act_nr_sges
, 1);
303 nr_q_pages
= parms
->queue_size
;
304 ipz_rc
= ipz_queue_ctor(pd
, queue
, nr_q_pages
,
305 EHCA_PAGESIZE
, wqe_size
,
306 parms
->act_nr_sges
, 0);
310 ehca_err(ib_dev
, "Cannot allocate page for queue. ipz_rc=%i",
315 /* register queue pages */
316 for (cnt
= 0; cnt
< nr_q_pages
; cnt
++) {
317 vpage
= ipz_qpageit_get_inc(queue
);
319 ehca_err(ib_dev
, "ipz_qpageit_get_inc() "
320 "failed p_vpage= %p", vpage
);
326 h_ret
= hipz_h_register_rpage_qp(ipz_hca_handle
,
327 my_qp
->ipz_qp_handle
,
329 rpage
, parms
->is_small
? 0 : 1,
330 my_qp
->galpas
.kernel
);
331 if (cnt
== (nr_q_pages
- 1)) { /* last page! */
332 if (h_ret
!= expected_hret
) {
333 ehca_err(ib_dev
, "hipz_qp_register_rpage() "
334 "h_ret=%lli", h_ret
);
335 ret
= ehca2ib_return_code(h_ret
);
338 vpage
= ipz_qpageit_get_inc(&my_qp
->ipz_rqueue
);
340 ehca_err(ib_dev
, "ipz_qpageit_get_inc() "
341 "should not succeed vpage=%p", vpage
);
346 if (h_ret
!= H_PAGE_REGISTERED
) {
347 ehca_err(ib_dev
, "hipz_qp_register_rpage() "
348 "h_ret=%lli", h_ret
);
349 ret
= ehca2ib_return_code(h_ret
);
355 ipz_qeit_reset(queue
);
360 ipz_queue_dtor(pd
, queue
);
364 static inline int ehca_calc_wqe_size(int act_nr_sge
, int is_llqp
)
367 return 128 << act_nr_sge
;
369 return offsetof(struct ehca_wqe
,
370 u
.nud
.sg_list
[act_nr_sge
]);
373 static void ehca_determine_small_queue(struct ehca_alloc_queue_parms
*queue
,
374 int req_nr_sge
, int is_llqp
)
376 u32 wqe_size
, q_size
;
377 int act_nr_sge
= req_nr_sge
;
380 /* round up #SGEs so WQE size is a power of 2 */
381 for (act_nr_sge
= 4; act_nr_sge
<= 252;
382 act_nr_sge
= 4 + 2 * act_nr_sge
)
383 if (act_nr_sge
>= req_nr_sge
)
386 wqe_size
= ehca_calc_wqe_size(act_nr_sge
, is_llqp
);
387 q_size
= wqe_size
* (queue
->max_wr
+ 1);
390 queue
->page_size
= 2;
391 else if (q_size
<= 1024)
392 queue
->page_size
= 3;
394 queue
->page_size
= 0;
396 queue
->is_small
= (queue
->page_size
!= 0);
399 /* needs to be called with cq->spinlock held */
400 void ehca_add_to_err_list(struct ehca_qp
*qp
, int on_sq
)
402 struct list_head
*list
, *node
;
404 /* TODO: support low latency QPs */
405 if (qp
->ext_type
== EQPT_LLQP
)
409 list
= &qp
->send_cq
->sqp_err_list
;
410 node
= &qp
->sq_err_node
;
412 list
= &qp
->recv_cq
->rqp_err_list
;
413 node
= &qp
->rq_err_node
;
416 if (list_empty(node
))
417 list_add_tail(node
, list
);
422 static void del_from_err_list(struct ehca_cq
*cq
, struct list_head
*node
)
426 spin_lock_irqsave(&cq
->spinlock
, flags
);
428 if (!list_empty(node
))
431 spin_unlock_irqrestore(&cq
->spinlock
, flags
);
434 static void reset_queue_map(struct ehca_queue_map
*qmap
)
438 qmap
->tail
= qmap
->entries
- 1;
439 qmap
->left_to_poll
= 0;
440 qmap
->next_wqe_idx
= 0;
441 for (i
= 0; i
< qmap
->entries
; i
++) {
442 qmap
->map
[i
].reported
= 1;
443 qmap
->map
[i
].cqe_req
= 0;
448 * Create an ib_qp struct that is either a QP or an SRQ, depending on
449 * the value of the is_srq parameter. If init_attr and srq_init_attr share
450 * fields, the field out of init_attr is used.
452 static struct ehca_qp
*internal_create_qp(
454 struct ib_qp_init_attr
*init_attr
,
455 struct ib_srq_init_attr
*srq_init_attr
,
456 struct ib_udata
*udata
, int is_srq
)
458 struct ehca_qp
*my_qp
, *my_srq
= NULL
;
459 struct ehca_pd
*my_pd
= container_of(pd
, struct ehca_pd
, ib_pd
);
460 struct ehca_shca
*shca
= container_of(pd
->device
, struct ehca_shca
,
462 struct ib_ucontext
*context
= NULL
;
464 int is_llqp
= 0, has_srq
= 0, is_user
= 0;
465 int qp_type
, max_send_sge
, max_recv_sge
, ret
;
467 /* h_call's out parameters */
468 struct ehca_alloc_qp_parms parms
;
469 u32 swqe_size
= 0, rwqe_size
= 0, ib_qp_num
;
472 if (!atomic_add_unless(&shca
->num_qps
, 1, shca
->max_num_qps
)) {
473 ehca_err(pd
->device
, "Unable to create QP, max number of %i "
474 "QPs reached.", shca
->max_num_qps
);
475 ehca_err(pd
->device
, "To increase the maximum number of QPs "
476 "use the number_of_qps module parameter.\n");
477 return ERR_PTR(-ENOSPC
);
480 if (init_attr
->create_flags
) {
481 atomic_dec(&shca
->num_qps
);
482 return ERR_PTR(-EINVAL
);
485 memset(&parms
, 0, sizeof(parms
));
486 qp_type
= init_attr
->qp_type
;
488 if (init_attr
->sq_sig_type
!= IB_SIGNAL_REQ_WR
&&
489 init_attr
->sq_sig_type
!= IB_SIGNAL_ALL_WR
) {
490 ehca_err(pd
->device
, "init_attr->sg_sig_type=%x not allowed",
491 init_attr
->sq_sig_type
);
492 atomic_dec(&shca
->num_qps
);
493 return ERR_PTR(-EINVAL
);
497 if (qp_type
& 0x80) {
499 parms
.ext_type
= EQPT_LLQP
;
500 parms
.ll_comp_flags
= qp_type
& LLQP_COMP_MASK
;
503 init_attr
->qp_type
&= 0x1F;
505 /* handle SRQ base QPs */
506 if (init_attr
->srq
) {
507 my_srq
= container_of(init_attr
->srq
, struct ehca_qp
, ib_srq
);
509 if (qp_type
== IB_QPT_UC
) {
510 ehca_err(pd
->device
, "UC with SRQ not supported");
511 atomic_dec(&shca
->num_qps
);
512 return ERR_PTR(-EINVAL
);
516 parms
.ext_type
= EQPT_SRQBASE
;
517 parms
.srq_qpn
= my_srq
->real_qp_num
;
520 if (is_llqp
&& has_srq
) {
521 ehca_err(pd
->device
, "LLQPs can't have an SRQ");
522 atomic_dec(&shca
->num_qps
);
523 return ERR_PTR(-EINVAL
);
528 parms
.ext_type
= EQPT_SRQ
;
529 parms
.srq_limit
= srq_init_attr
->attr
.srq_limit
;
530 if (init_attr
->cap
.max_recv_sge
> 3) {
531 ehca_err(pd
->device
, "no more than three SGEs "
532 "supported for SRQ pd=%p max_sge=%x",
533 pd
, init_attr
->cap
.max_recv_sge
);
534 atomic_dec(&shca
->num_qps
);
535 return ERR_PTR(-EINVAL
);
540 if (qp_type
!= IB_QPT_UD
&&
541 qp_type
!= IB_QPT_UC
&&
542 qp_type
!= IB_QPT_RC
&&
543 qp_type
!= IB_QPT_SMI
&&
544 qp_type
!= IB_QPT_GSI
) {
545 ehca_err(pd
->device
, "wrong QP Type=%x", qp_type
);
546 atomic_dec(&shca
->num_qps
);
547 return ERR_PTR(-EINVAL
);
553 if ((init_attr
->cap
.max_send_wr
> 255) ||
554 (init_attr
->cap
.max_recv_wr
> 255)) {
556 "Invalid Number of max_sq_wr=%x "
557 "or max_rq_wr=%x for RC LLQP",
558 init_attr
->cap
.max_send_wr
,
559 init_attr
->cap
.max_recv_wr
);
560 atomic_dec(&shca
->num_qps
);
561 return ERR_PTR(-EINVAL
);
565 if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP
, shca
->hca_cap
)) {
566 ehca_err(pd
->device
, "UD LLQP not supported "
568 atomic_dec(&shca
->num_qps
);
569 return ERR_PTR(-ENOSYS
);
571 if (!(init_attr
->cap
.max_send_sge
<= 5
572 && init_attr
->cap
.max_send_sge
>= 1
573 && init_attr
->cap
.max_recv_sge
<= 5
574 && init_attr
->cap
.max_recv_sge
>= 1)) {
576 "Invalid Number of max_send_sge=%x "
577 "or max_recv_sge=%x for UD LLQP",
578 init_attr
->cap
.max_send_sge
,
579 init_attr
->cap
.max_recv_sge
);
580 atomic_dec(&shca
->num_qps
);
581 return ERR_PTR(-EINVAL
);
582 } else if (init_attr
->cap
.max_send_wr
> 255) {
585 "max_send_wr=%x for UD QP_TYPE=%x",
586 init_attr
->cap
.max_send_wr
, qp_type
);
587 atomic_dec(&shca
->num_qps
);
588 return ERR_PTR(-EINVAL
);
592 ehca_err(pd
->device
, "unsupported LL QP Type=%x",
594 atomic_dec(&shca
->num_qps
);
595 return ERR_PTR(-EINVAL
);
598 int max_sge
= (qp_type
== IB_QPT_UD
|| qp_type
== IB_QPT_SMI
599 || qp_type
== IB_QPT_GSI
) ? 250 : 252;
601 if (init_attr
->cap
.max_send_sge
> max_sge
602 || init_attr
->cap
.max_recv_sge
> max_sge
) {
603 ehca_err(pd
->device
, "Invalid number of SGEs requested "
604 "send_sge=%x recv_sge=%x max_sge=%x",
605 init_attr
->cap
.max_send_sge
,
606 init_attr
->cap
.max_recv_sge
, max_sge
);
607 atomic_dec(&shca
->num_qps
);
608 return ERR_PTR(-EINVAL
);
612 my_qp
= kmem_cache_zalloc(qp_cache
, GFP_KERNEL
);
614 ehca_err(pd
->device
, "pd=%p not enough memory to alloc qp", pd
);
615 atomic_dec(&shca
->num_qps
);
616 return ERR_PTR(-ENOMEM
);
619 if (pd
->uobject
&& udata
) {
621 context
= pd
->uobject
->context
;
624 atomic_set(&my_qp
->nr_events
, 0);
625 init_waitqueue_head(&my_qp
->wait_completion
);
626 spin_lock_init(&my_qp
->spinlock_s
);
627 spin_lock_init(&my_qp
->spinlock_r
);
628 my_qp
->qp_type
= qp_type
;
629 my_qp
->ext_type
= parms
.ext_type
;
630 my_qp
->state
= IB_QPS_RESET
;
632 if (init_attr
->recv_cq
)
634 container_of(init_attr
->recv_cq
, struct ehca_cq
, ib_cq
);
635 if (init_attr
->send_cq
)
637 container_of(init_attr
->send_cq
, struct ehca_cq
, ib_cq
);
639 idr_preload(GFP_KERNEL
);
640 write_lock_irqsave(&ehca_qp_idr_lock
, flags
);
642 ret
= idr_alloc(&ehca_qp_idr
, my_qp
, 0, 0x2000000, GFP_NOWAIT
);
646 write_unlock_irqrestore(&ehca_qp_idr_lock
, flags
);
649 if (ret
== -ENOSPC
) {
651 ehca_err(pd
->device
, "Invalid number of qp");
654 ehca_err(pd
->device
, "Can't allocate new idr entry.");
656 goto create_qp_exit0
;
660 parms
.srq_token
= my_qp
->token
;
662 parms
.servicetype
= ibqptype2servicetype(qp_type
);
663 if (parms
.servicetype
< 0) {
665 ehca_err(pd
->device
, "Invalid qp_type=%x", qp_type
);
666 goto create_qp_exit1
;
669 /* Always signal by WQE so we can hide circ. WQEs */
670 parms
.sigtype
= HCALL_SIGT_BY_WQE
;
672 /* UD_AV CIRCUMVENTION */
673 max_send_sge
= init_attr
->cap
.max_send_sge
;
674 max_recv_sge
= init_attr
->cap
.max_recv_sge
;
675 if (parms
.servicetype
== ST_UD
&& !is_llqp
) {
680 parms
.token
= my_qp
->token
;
681 parms
.eq_handle
= shca
->eq
.ipz_eq_handle
;
682 parms
.pd
= my_pd
->fw_pd
;
684 parms
.send_cq_handle
= my_qp
->send_cq
->ipz_cq_handle
;
686 parms
.recv_cq_handle
= my_qp
->recv_cq
->ipz_cq_handle
;
688 parms
.squeue
.max_wr
= init_attr
->cap
.max_send_wr
;
689 parms
.rqueue
.max_wr
= init_attr
->cap
.max_recv_wr
;
690 parms
.squeue
.max_sge
= max_send_sge
;
691 parms
.rqueue
.max_sge
= max_recv_sge
;
693 /* RC QPs need one more SWQE for unsolicited ack circumvention */
694 if (qp_type
== IB_QPT_RC
)
695 parms
.squeue
.max_wr
++;
697 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP
, shca
->hca_cap
)) {
699 ehca_determine_small_queue(
700 &parms
.squeue
, max_send_sge
, is_llqp
);
702 ehca_determine_small_queue(
703 &parms
.rqueue
, max_recv_sge
, is_llqp
);
705 (parms
.squeue
.is_small
|| parms
.rqueue
.is_small
);
708 h_ret
= hipz_h_alloc_resource_qp(shca
->ipz_hca_handle
, &parms
, is_user
);
709 if (h_ret
!= H_SUCCESS
) {
710 ehca_err(pd
->device
, "h_alloc_resource_qp() failed h_ret=%lli",
712 ret
= ehca2ib_return_code(h_ret
);
713 goto create_qp_exit1
;
716 ib_qp_num
= my_qp
->real_qp_num
= parms
.real_qp_num
;
717 my_qp
->ipz_qp_handle
= parms
.qp_handle
;
718 my_qp
->galpas
= parms
.galpas
;
720 swqe_size
= ehca_calc_wqe_size(parms
.squeue
.act_nr_sges
, is_llqp
);
721 rwqe_size
= ehca_calc_wqe_size(parms
.rqueue
.act_nr_sges
, is_llqp
);
726 parms
.squeue
.act_nr_sges
= 1;
727 parms
.rqueue
.act_nr_sges
= 1;
729 /* hide the extra WQE */
730 parms
.squeue
.act_nr_wqes
--;
735 /* UD circumvention */
737 parms
.squeue
.act_nr_sges
= 1;
738 parms
.rqueue
.act_nr_sges
= 1;
740 parms
.squeue
.act_nr_sges
-= 2;
741 parms
.rqueue
.act_nr_sges
-= 2;
744 if (IB_QPT_GSI
== qp_type
|| IB_QPT_SMI
== qp_type
) {
745 parms
.squeue
.act_nr_wqes
= init_attr
->cap
.max_send_wr
;
746 parms
.rqueue
.act_nr_wqes
= init_attr
->cap
.max_recv_wr
;
747 parms
.squeue
.act_nr_sges
= init_attr
->cap
.max_send_sge
;
748 parms
.rqueue
.act_nr_sges
= init_attr
->cap
.max_recv_sge
;
749 ib_qp_num
= (qp_type
== IB_QPT_SMI
) ? 0 : 1;
758 /* initialize r/squeue and register queue pages */
761 shca
, my_pd
, my_qp
, &my_qp
->ipz_squeue
, 0,
762 HAS_RQ(my_qp
) ? H_PAGE_REGISTERED
: H_SUCCESS
,
763 &parms
.squeue
, swqe_size
);
765 ehca_err(pd
->device
, "Couldn't initialize squeue "
766 "and pages ret=%i", ret
);
767 goto create_qp_exit2
;
771 my_qp
->sq_map
.entries
= my_qp
->ipz_squeue
.queue_length
/
772 my_qp
->ipz_squeue
.qe_size
;
773 my_qp
->sq_map
.map
= vmalloc(my_qp
->sq_map
.entries
*
774 sizeof(struct ehca_qmap_entry
));
775 if (!my_qp
->sq_map
.map
) {
776 ehca_err(pd
->device
, "Couldn't allocate squeue "
778 goto create_qp_exit3
;
780 INIT_LIST_HEAD(&my_qp
->sq_err_node
);
781 /* to avoid the generation of bogus flush CQEs */
782 reset_queue_map(&my_qp
->sq_map
);
788 shca
, my_pd
, my_qp
, &my_qp
->ipz_rqueue
, 1,
789 H_SUCCESS
, &parms
.rqueue
, rwqe_size
);
791 ehca_err(pd
->device
, "Couldn't initialize rqueue "
792 "and pages ret=%i", ret
);
793 goto create_qp_exit4
;
796 my_qp
->rq_map
.entries
= my_qp
->ipz_rqueue
.queue_length
/
797 my_qp
->ipz_rqueue
.qe_size
;
798 my_qp
->rq_map
.map
= vmalloc(my_qp
->rq_map
.entries
*
799 sizeof(struct ehca_qmap_entry
));
800 if (!my_qp
->rq_map
.map
) {
801 ehca_err(pd
->device
, "Couldn't allocate squeue "
803 goto create_qp_exit5
;
805 INIT_LIST_HEAD(&my_qp
->rq_err_node
);
806 /* to avoid the generation of bogus flush CQEs */
807 reset_queue_map(&my_qp
->rq_map
);
809 } else if (init_attr
->srq
&& !is_user
) {
810 /* this is a base QP, use the queue map of the SRQ */
811 my_qp
->rq_map
= my_srq
->rq_map
;
812 INIT_LIST_HEAD(&my_qp
->rq_err_node
);
814 my_qp
->ipz_rqueue
= my_srq
->ipz_rqueue
;
818 my_qp
->ib_srq
.pd
= &my_pd
->ib_pd
;
819 my_qp
->ib_srq
.device
= my_pd
->ib_pd
.device
;
821 my_qp
->ib_srq
.srq_context
= init_attr
->qp_context
;
822 my_qp
->ib_srq
.event_handler
= init_attr
->event_handler
;
824 my_qp
->ib_qp
.qp_num
= ib_qp_num
;
825 my_qp
->ib_qp
.pd
= &my_pd
->ib_pd
;
826 my_qp
->ib_qp
.device
= my_pd
->ib_pd
.device
;
828 my_qp
->ib_qp
.recv_cq
= init_attr
->recv_cq
;
829 my_qp
->ib_qp
.send_cq
= init_attr
->send_cq
;
831 my_qp
->ib_qp
.qp_type
= qp_type
;
832 my_qp
->ib_qp
.srq
= init_attr
->srq
;
834 my_qp
->ib_qp
.qp_context
= init_attr
->qp_context
;
835 my_qp
->ib_qp
.event_handler
= init_attr
->event_handler
;
838 init_attr
->cap
.max_inline_data
= 0; /* not supported yet */
839 init_attr
->cap
.max_recv_sge
= parms
.rqueue
.act_nr_sges
;
840 init_attr
->cap
.max_recv_wr
= parms
.rqueue
.act_nr_wqes
;
841 init_attr
->cap
.max_send_sge
= parms
.squeue
.act_nr_sges
;
842 init_attr
->cap
.max_send_wr
= parms
.squeue
.act_nr_wqes
;
843 my_qp
->init_attr
= *init_attr
;
845 if (qp_type
== IB_QPT_SMI
|| qp_type
== IB_QPT_GSI
) {
846 shca
->sport
[init_attr
->port_num
- 1].ibqp_sqp
[qp_type
] =
848 if (ehca_nr_ports
< 0) {
849 /* alloc array to cache subsequent modify qp parms
850 * for autodetect mode
853 kzalloc(EHCA_MOD_QP_PARM_MAX
*
854 sizeof(*my_qp
->mod_qp_parm
),
856 if (!my_qp
->mod_qp_parm
) {
858 "Could not alloc mod_qp_parm");
859 goto create_qp_exit5
;
864 /* NOTE: define_apq0() not supported yet */
865 if (qp_type
== IB_QPT_GSI
) {
866 h_ret
= ehca_define_sqp(shca
, my_qp
, init_attr
);
867 if (h_ret
!= H_SUCCESS
) {
868 kfree(my_qp
->mod_qp_parm
);
869 my_qp
->mod_qp_parm
= NULL
;
870 /* the QP pointer is no longer valid */
871 shca
->sport
[init_attr
->port_num
- 1].ibqp_sqp
[qp_type
] =
873 ret
= ehca2ib_return_code(h_ret
);
874 goto create_qp_exit6
;
878 if (my_qp
->send_cq
) {
879 ret
= ehca_cq_assign_qp(my_qp
->send_cq
, my_qp
);
882 "Couldn't assign qp to send_cq ret=%i", ret
);
883 goto create_qp_exit7
;
887 /* copy queues, galpa data to user space */
888 if (context
&& udata
) {
889 struct ehca_create_qp_resp resp
;
890 memset(&resp
, 0, sizeof(resp
));
892 resp
.qp_num
= my_qp
->real_qp_num
;
893 resp
.token
= my_qp
->token
;
894 resp
.qp_type
= my_qp
->qp_type
;
895 resp
.ext_type
= my_qp
->ext_type
;
896 resp
.qkey
= my_qp
->qkey
;
897 resp
.real_qp_num
= my_qp
->real_qp_num
;
900 queue2resp(&resp
.ipz_squeue
, &my_qp
->ipz_squeue
);
902 queue2resp(&resp
.ipz_rqueue
, &my_qp
->ipz_rqueue
);
903 resp
.fw_handle_ofs
= (u32
)
904 (my_qp
->galpas
.user
.fw_handle
& (PAGE_SIZE
- 1));
906 if (ib_copy_to_udata(udata
, &resp
, sizeof resp
)) {
907 ehca_err(pd
->device
, "Copy to udata failed");
909 goto create_qp_exit8
;
916 ehca_cq_unassign_qp(my_qp
->send_cq
, my_qp
->real_qp_num
);
919 kfree(my_qp
->mod_qp_parm
);
922 if (HAS_RQ(my_qp
) && !is_user
)
923 vfree(my_qp
->rq_map
.map
);
927 ipz_queue_dtor(my_pd
, &my_qp
->ipz_rqueue
);
930 if (HAS_SQ(my_qp
) && !is_user
)
931 vfree(my_qp
->sq_map
.map
);
935 ipz_queue_dtor(my_pd
, &my_qp
->ipz_squeue
);
938 hipz_h_destroy_qp(shca
->ipz_hca_handle
, my_qp
);
941 write_lock_irqsave(&ehca_qp_idr_lock
, flags
);
942 idr_remove(&ehca_qp_idr
, my_qp
->token
);
943 write_unlock_irqrestore(&ehca_qp_idr_lock
, flags
);
946 kmem_cache_free(qp_cache
, my_qp
);
947 atomic_dec(&shca
->num_qps
);
951 struct ib_qp
*ehca_create_qp(struct ib_pd
*pd
,
952 struct ib_qp_init_attr
*qp_init_attr
,
953 struct ib_udata
*udata
)
957 ret
= internal_create_qp(pd
, qp_init_attr
, NULL
, udata
, 0);
958 return IS_ERR(ret
) ? (struct ib_qp
*)ret
: &ret
->ib_qp
;
961 static int internal_destroy_qp(struct ib_device
*dev
, struct ehca_qp
*my_qp
,
962 struct ib_uobject
*uobject
);
964 struct ib_srq
*ehca_create_srq(struct ib_pd
*pd
,
965 struct ib_srq_init_attr
*srq_init_attr
,
966 struct ib_udata
*udata
)
968 struct ib_qp_init_attr qp_init_attr
;
969 struct ehca_qp
*my_qp
;
971 struct ehca_shca
*shca
= container_of(pd
->device
, struct ehca_shca
,
973 struct hcp_modify_qp_control_block
*mqpcb
;
974 u64 hret
, update_mask
;
976 if (srq_init_attr
->srq_type
!= IB_SRQT_BASIC
)
977 return ERR_PTR(-ENOSYS
);
979 /* For common attributes, internal_create_qp() takes its info
980 * out of qp_init_attr, so copy all common attrs there.
982 memset(&qp_init_attr
, 0, sizeof(qp_init_attr
));
983 qp_init_attr
.event_handler
= srq_init_attr
->event_handler
;
984 qp_init_attr
.qp_context
= srq_init_attr
->srq_context
;
985 qp_init_attr
.sq_sig_type
= IB_SIGNAL_ALL_WR
;
986 qp_init_attr
.qp_type
= IB_QPT_RC
;
987 qp_init_attr
.cap
.max_recv_wr
= srq_init_attr
->attr
.max_wr
;
988 qp_init_attr
.cap
.max_recv_sge
= srq_init_attr
->attr
.max_sge
;
990 my_qp
= internal_create_qp(pd
, &qp_init_attr
, srq_init_attr
, udata
, 1);
992 return (struct ib_srq
*)my_qp
;
994 /* copy back return values */
995 srq_init_attr
->attr
.max_wr
= qp_init_attr
.cap
.max_recv_wr
;
996 srq_init_attr
->attr
.max_sge
= 3;
998 /* drive SRQ into RTR state */
999 mqpcb
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
1001 ehca_err(pd
->device
, "Could not get zeroed page for mqpcb "
1002 "ehca_qp=%p qp_num=%x ", my_qp
, my_qp
->real_qp_num
);
1003 ret
= ERR_PTR(-ENOMEM
);
1007 mqpcb
->qp_state
= EHCA_QPS_INIT
;
1008 mqpcb
->prim_phys_port
= 1;
1009 update_mask
= EHCA_BMASK_SET(MQPCB_MASK_QP_STATE
, 1);
1010 hret
= hipz_h_modify_qp(shca
->ipz_hca_handle
,
1011 my_qp
->ipz_qp_handle
,
1014 mqpcb
, my_qp
->galpas
.kernel
);
1015 if (hret
!= H_SUCCESS
) {
1016 ehca_err(pd
->device
, "Could not modify SRQ to INIT "
1017 "ehca_qp=%p qp_num=%x h_ret=%lli",
1018 my_qp
, my_qp
->real_qp_num
, hret
);
1022 mqpcb
->qp_enable
= 1;
1023 update_mask
= EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE
, 1);
1024 hret
= hipz_h_modify_qp(shca
->ipz_hca_handle
,
1025 my_qp
->ipz_qp_handle
,
1028 mqpcb
, my_qp
->galpas
.kernel
);
1029 if (hret
!= H_SUCCESS
) {
1030 ehca_err(pd
->device
, "Could not enable SRQ "
1031 "ehca_qp=%p qp_num=%x h_ret=%lli",
1032 my_qp
, my_qp
->real_qp_num
, hret
);
1036 mqpcb
->qp_state
= EHCA_QPS_RTR
;
1037 update_mask
= EHCA_BMASK_SET(MQPCB_MASK_QP_STATE
, 1);
1038 hret
= hipz_h_modify_qp(shca
->ipz_hca_handle
,
1039 my_qp
->ipz_qp_handle
,
1042 mqpcb
, my_qp
->galpas
.kernel
);
1043 if (hret
!= H_SUCCESS
) {
1044 ehca_err(pd
->device
, "Could not modify SRQ to RTR "
1045 "ehca_qp=%p qp_num=%x h_ret=%lli",
1046 my_qp
, my_qp
->real_qp_num
, hret
);
1050 ehca_free_fw_ctrlblock(mqpcb
);
1052 return &my_qp
->ib_srq
;
1055 ret
= ERR_PTR(ehca2ib_return_code(hret
));
1056 ehca_free_fw_ctrlblock(mqpcb
);
1059 internal_destroy_qp(pd
->device
, my_qp
, my_qp
->ib_srq
.uobject
);
1065 * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
1066 * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
1067 * returns total number of bad wqes in bad_wqe_cnt
1069 static int prepare_sqe_rts(struct ehca_qp
*my_qp
, struct ehca_shca
*shca
,
1073 struct ipz_queue
*squeue
;
1074 void *bad_send_wqe_p
, *bad_send_wqe_v
;
1076 struct ehca_wqe
*wqe
;
1077 int qp_num
= my_qp
->ib_qp
.qp_num
;
1079 /* get send wqe pointer */
1080 h_ret
= hipz_h_disable_and_get_wqe(shca
->ipz_hca_handle
,
1081 my_qp
->ipz_qp_handle
, &my_qp
->pf
,
1082 &bad_send_wqe_p
, NULL
, 2);
1083 if (h_ret
!= H_SUCCESS
) {
1084 ehca_err(&shca
->ib_device
, "hipz_h_disable_and_get_wqe() failed"
1085 " ehca_qp=%p qp_num=%x h_ret=%lli",
1086 my_qp
, qp_num
, h_ret
);
1087 return ehca2ib_return_code(h_ret
);
1089 bad_send_wqe_p
= (void *)((u64
)bad_send_wqe_p
& (~(1L << 63)));
1090 ehca_dbg(&shca
->ib_device
, "qp_num=%x bad_send_wqe_p=%p",
1091 qp_num
, bad_send_wqe_p
);
1092 /* convert wqe pointer to vadr */
1093 bad_send_wqe_v
= __va((u64
)bad_send_wqe_p
);
1094 if (ehca_debug_level
>= 2)
1095 ehca_dmp(bad_send_wqe_v
, 32, "qp_num=%x bad_wqe", qp_num
);
1096 squeue
= &my_qp
->ipz_squeue
;
1097 if (ipz_queue_abs_to_offset(squeue
, (u64
)bad_send_wqe_p
, &q_ofs
)) {
1098 ehca_err(&shca
->ib_device
, "failed to get wqe offset qp_num=%x"
1099 " bad_send_wqe_p=%p", qp_num
, bad_send_wqe_p
);
1103 /* loop sets wqe's purge bit */
1104 wqe
= (struct ehca_wqe
*)ipz_qeit_calc(squeue
, q_ofs
);
1106 while (wqe
->optype
!= 0xff && wqe
->wqef
!= 0xff) {
1107 if (ehca_debug_level
>= 2)
1108 ehca_dmp(wqe
, 32, "qp_num=%x wqe", qp_num
);
1109 wqe
->nr_of_data_seg
= 0; /* suppress data access */
1110 wqe
->wqef
= WQEF_PURGE
; /* WQE to be purged */
1111 q_ofs
= ipz_queue_advance_offset(squeue
, q_ofs
);
1112 wqe
= (struct ehca_wqe
*)ipz_qeit_calc(squeue
, q_ofs
);
1113 *bad_wqe_cnt
= (*bad_wqe_cnt
)+1;
1116 * bad wqe will be reprocessed and ignored when pol_cq() is called,
1117 * i.e. nr of wqes with flush error status is one less
1119 ehca_dbg(&shca
->ib_device
, "qp_num=%x flusherr_wqe_cnt=%x",
1120 qp_num
, (*bad_wqe_cnt
)-1);
1126 static int calc_left_cqes(u64 wqe_p
, struct ipz_queue
*ipz_queue
,
1127 struct ehca_queue_map
*qmap
)
1132 unsigned int tail_idx
;
1134 /* convert real to abs address */
1135 wqe_p
= wqe_p
& (~(1UL << 63));
1137 wqe_v
= __va(wqe_p
);
1139 if (ipz_queue_abs_to_offset(ipz_queue
, wqe_p
, &q_ofs
)) {
1140 ehca_gen_err("Invalid offset for calculating left cqes "
1141 "wqe_p=%#llx wqe_v=%p\n", wqe_p
, wqe_v
);
1145 tail_idx
= next_index(qmap
->tail
, qmap
->entries
);
1146 wqe_idx
= q_ofs
/ ipz_queue
->qe_size
;
1148 /* check all processed wqes, whether a cqe is requested or not */
1149 while (tail_idx
!= wqe_idx
) {
1150 if (qmap
->map
[tail_idx
].cqe_req
)
1151 qmap
->left_to_poll
++;
1152 tail_idx
= next_index(tail_idx
, qmap
->entries
);
1154 /* save index in queue, where we have to start flushing */
1155 qmap
->next_wqe_idx
= wqe_idx
;
1159 static int check_for_left_cqes(struct ehca_qp
*my_qp
, struct ehca_shca
*shca
)
1162 void *send_wqe_p
, *recv_wqe_p
;
1164 unsigned long flags
;
1165 int qp_num
= my_qp
->ib_qp
.qp_num
;
1167 /* this hcall is not supported on base QPs */
1168 if (my_qp
->ext_type
!= EQPT_SRQBASE
) {
1169 /* get send and receive wqe pointer */
1170 h_ret
= hipz_h_disable_and_get_wqe(shca
->ipz_hca_handle
,
1171 my_qp
->ipz_qp_handle
, &my_qp
->pf
,
1172 &send_wqe_p
, &recv_wqe_p
, 4);
1173 if (h_ret
!= H_SUCCESS
) {
1174 ehca_err(&shca
->ib_device
, "disable_and_get_wqe() "
1175 "failed ehca_qp=%p qp_num=%x h_ret=%lli",
1176 my_qp
, qp_num
, h_ret
);
1177 return ehca2ib_return_code(h_ret
);
1181 * acquire lock to ensure that nobody is polling the cq which
1182 * could mean that the qmap->tail pointer is in an
1183 * inconsistent state.
1185 spin_lock_irqsave(&my_qp
->send_cq
->spinlock
, flags
);
1186 ret
= calc_left_cqes((u64
)send_wqe_p
, &my_qp
->ipz_squeue
,
1188 spin_unlock_irqrestore(&my_qp
->send_cq
->spinlock
, flags
);
1193 spin_lock_irqsave(&my_qp
->recv_cq
->spinlock
, flags
);
1194 ret
= calc_left_cqes((u64
)recv_wqe_p
, &my_qp
->ipz_rqueue
,
1196 spin_unlock_irqrestore(&my_qp
->recv_cq
->spinlock
, flags
);
1200 spin_lock_irqsave(&my_qp
->send_cq
->spinlock
, flags
);
1201 my_qp
->sq_map
.left_to_poll
= 0;
1202 my_qp
->sq_map
.next_wqe_idx
= next_index(my_qp
->sq_map
.tail
,
1203 my_qp
->sq_map
.entries
);
1204 spin_unlock_irqrestore(&my_qp
->send_cq
->spinlock
, flags
);
1206 spin_lock_irqsave(&my_qp
->recv_cq
->spinlock
, flags
);
1207 my_qp
->rq_map
.left_to_poll
= 0;
1208 my_qp
->rq_map
.next_wqe_idx
= next_index(my_qp
->rq_map
.tail
,
1209 my_qp
->rq_map
.entries
);
1210 spin_unlock_irqrestore(&my_qp
->recv_cq
->spinlock
, flags
);
1213 /* this assures flush cqes being generated only for pending wqes */
1214 if ((my_qp
->sq_map
.left_to_poll
== 0) &&
1215 (my_qp
->rq_map
.left_to_poll
== 0)) {
1216 spin_lock_irqsave(&my_qp
->send_cq
->spinlock
, flags
);
1217 ehca_add_to_err_list(my_qp
, 1);
1218 spin_unlock_irqrestore(&my_qp
->send_cq
->spinlock
, flags
);
1220 if (HAS_RQ(my_qp
)) {
1221 spin_lock_irqsave(&my_qp
->recv_cq
->spinlock
, flags
);
1222 ehca_add_to_err_list(my_qp
, 0);
1223 spin_unlock_irqrestore(&my_qp
->recv_cq
->spinlock
,
1232 * internal_modify_qp with circumvention to handle aqp0 properly
1233 * smi_reset2init indicates if this is an internal reset-to-init-call for
1234 * smi. This flag must always be zero if called from ehca_modify_qp()!
1235 * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
1237 static int internal_modify_qp(struct ib_qp
*ibqp
,
1238 struct ib_qp_attr
*attr
,
1239 int attr_mask
, int smi_reset2init
)
1241 enum ib_qp_state qp_cur_state
, qp_new_state
;
1242 int cnt
, qp_attr_idx
, ret
= 0;
1243 enum ib_qp_statetrans statetrans
;
1244 struct hcp_modify_qp_control_block
*mqpcb
;
1245 struct ehca_qp
*my_qp
= container_of(ibqp
, struct ehca_qp
, ib_qp
);
1246 struct ehca_shca
*shca
=
1247 container_of(ibqp
->pd
->device
, struct ehca_shca
, ib_device
);
1250 int bad_wqe_cnt
= 0;
1252 int squeue_locked
= 0;
1253 unsigned long flags
= 0;
1255 /* do query_qp to obtain current attr values */
1256 mqpcb
= ehca_alloc_fw_ctrlblock(GFP_ATOMIC
);
1258 ehca_err(ibqp
->device
, "Could not get zeroed page for mqpcb "
1259 "ehca_qp=%p qp_num=%x ", my_qp
, ibqp
->qp_num
);
1263 h_ret
= hipz_h_query_qp(shca
->ipz_hca_handle
,
1264 my_qp
->ipz_qp_handle
,
1266 mqpcb
, my_qp
->galpas
.kernel
);
1267 if (h_ret
!= H_SUCCESS
) {
1268 ehca_err(ibqp
->device
, "hipz_h_query_qp() failed "
1269 "ehca_qp=%p qp_num=%x h_ret=%lli",
1270 my_qp
, ibqp
->qp_num
, h_ret
);
1271 ret
= ehca2ib_return_code(h_ret
);
1272 goto modify_qp_exit1
;
1277 qp_cur_state
= ehca2ib_qp_state(mqpcb
->qp_state
);
1279 if (qp_cur_state
== -EINVAL
) { /* invalid qp state */
1281 ehca_err(ibqp
->device
, "Invalid current ehca_qp_state=%x "
1282 "ehca_qp=%p qp_num=%x",
1283 mqpcb
->qp_state
, my_qp
, ibqp
->qp_num
);
1284 goto modify_qp_exit1
;
1287 * circumvention to set aqp0 initial state to init
1288 * as expected by IB spec
1290 if (smi_reset2init
== 0 &&
1291 ibqp
->qp_type
== IB_QPT_SMI
&&
1292 qp_cur_state
== IB_QPS_RESET
&&
1293 (attr_mask
& IB_QP_STATE
) &&
1294 attr
->qp_state
== IB_QPS_INIT
) { /* RESET -> INIT */
1295 struct ib_qp_attr smiqp_attr
= {
1296 .qp_state
= IB_QPS_INIT
,
1297 .port_num
= my_qp
->init_attr
.port_num
,
1301 int smiqp_attr_mask
= IB_QP_STATE
| IB_QP_PORT
|
1302 IB_QP_PKEY_INDEX
| IB_QP_QKEY
;
1303 int smirc
= internal_modify_qp(
1304 ibqp
, &smiqp_attr
, smiqp_attr_mask
, 1);
1306 ehca_err(ibqp
->device
, "SMI RESET -> INIT failed. "
1307 "ehca_modify_qp() rc=%i", smirc
);
1309 goto modify_qp_exit1
;
1311 qp_cur_state
= IB_QPS_INIT
;
1312 ehca_dbg(ibqp
->device
, "SMI RESET -> INIT succeeded");
1314 /* is transmitted current state equal to "real" current state */
1315 if ((attr_mask
& IB_QP_CUR_STATE
) &&
1316 qp_cur_state
!= attr
->cur_qp_state
) {
1318 ehca_err(ibqp
->device
,
1319 "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
1320 " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
1321 attr
->cur_qp_state
, qp_cur_state
, my_qp
, ibqp
->qp_num
);
1322 goto modify_qp_exit1
;
1325 ehca_dbg(ibqp
->device
, "ehca_qp=%p qp_num=%x current qp_state=%x "
1326 "new qp_state=%x attribute_mask=%x",
1327 my_qp
, ibqp
->qp_num
, qp_cur_state
, attr
->qp_state
, attr_mask
);
1329 qp_new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: qp_cur_state
;
1330 if (!smi_reset2init
&&
1331 !ib_modify_qp_is_ok(qp_cur_state
, qp_new_state
, ibqp
->qp_type
,
1332 attr_mask
, IB_LINK_LAYER_UNSPECIFIED
)) {
1334 ehca_err(ibqp
->device
,
1335 "Invalid qp transition new_state=%x cur_state=%x "
1336 "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state
,
1337 qp_cur_state
, my_qp
, ibqp
->qp_num
, attr_mask
);
1338 goto modify_qp_exit1
;
1341 mqpcb
->qp_state
= ib2ehca_qp_state(qp_new_state
);
1342 if (mqpcb
->qp_state
)
1343 update_mask
= EHCA_BMASK_SET(MQPCB_MASK_QP_STATE
, 1);
1346 ehca_err(ibqp
->device
, "Invalid new qp state=%x "
1347 "ehca_qp=%p qp_num=%x",
1348 qp_new_state
, my_qp
, ibqp
->qp_num
);
1349 goto modify_qp_exit1
;
1352 /* retrieve state transition struct to get req and opt attrs */
1353 statetrans
= get_modqp_statetrans(qp_cur_state
, qp_new_state
);
1354 if (statetrans
< 0) {
1356 ehca_err(ibqp
->device
, "<INVALID STATE CHANGE> qp_cur_state=%x "
1357 "new_qp_state=%x State_xsition=%x ehca_qp=%p "
1358 "qp_num=%x", qp_cur_state
, qp_new_state
,
1359 statetrans
, my_qp
, ibqp
->qp_num
);
1360 goto modify_qp_exit1
;
1363 qp_attr_idx
= ib2ehcaqptype(ibqp
->qp_type
);
1365 if (qp_attr_idx
< 0) {
1367 ehca_err(ibqp
->device
,
1368 "Invalid QP type=%x ehca_qp=%p qp_num=%x",
1369 ibqp
->qp_type
, my_qp
, ibqp
->qp_num
);
1370 goto modify_qp_exit1
;
1373 ehca_dbg(ibqp
->device
,
1374 "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
1375 my_qp
, ibqp
->qp_num
, statetrans
);
1377 /* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set
1380 if ((my_qp
->qp_type
== IB_QPT_UD
) &&
1381 (my_qp
->ext_type
!= EQPT_LLQP
) &&
1382 (statetrans
== IB_QPST_INIT2RTR
) &&
1383 (shca
->hw_level
>= 0x22)) {
1384 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG
, 1);
1385 mqpcb
->send_grh_flag
= 1;
1388 /* sqe -> rts: set purge bit of bad wqe before actual trans */
1389 if ((my_qp
->qp_type
== IB_QPT_UD
||
1390 my_qp
->qp_type
== IB_QPT_GSI
||
1391 my_qp
->qp_type
== IB_QPT_SMI
) &&
1392 statetrans
== IB_QPST_SQE2RTS
) {
1393 /* mark next free wqe if kernel */
1394 if (!ibqp
->uobject
) {
1395 struct ehca_wqe
*wqe
;
1396 /* lock send queue */
1397 spin_lock_irqsave(&my_qp
->spinlock_s
, flags
);
1399 /* mark next free wqe */
1400 wqe
= (struct ehca_wqe
*)
1401 ipz_qeit_get(&my_qp
->ipz_squeue
);
1402 wqe
->optype
= wqe
->wqef
= 0xff;
1403 ehca_dbg(ibqp
->device
, "qp_num=%x next_free_wqe=%p",
1406 ret
= prepare_sqe_rts(my_qp
, shca
, &bad_wqe_cnt
);
1408 ehca_err(ibqp
->device
, "prepare_sqe_rts() failed "
1409 "ehca_qp=%p qp_num=%x ret=%i",
1410 my_qp
, ibqp
->qp_num
, ret
);
1411 goto modify_qp_exit2
;
1416 * enable RDMA_Atomic_Control if reset->init und reliable con
1417 * this is necessary since gen2 does not provide that flag,
1418 * but pHyp requires it
1420 if (statetrans
== IB_QPST_RESET2INIT
&&
1421 (ibqp
->qp_type
== IB_QPT_RC
|| ibqp
->qp_type
== IB_QPT_UC
)) {
1422 mqpcb
->rdma_atomic_ctrl
= 3;
1423 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL
, 1);
1425 /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
1426 if (statetrans
== IB_QPST_INIT2RTR
&&
1427 (ibqp
->qp_type
== IB_QPT_UC
) &&
1428 !(attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)) {
1429 mqpcb
->rdma_nr_atomic_resp_res
= 1; /* default to 1 */
1431 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES
, 1);
1434 if (attr_mask
& IB_QP_PKEY_INDEX
) {
1435 if (attr
->pkey_index
>= 16) {
1437 ehca_err(ibqp
->device
, "Invalid pkey_index=%x. "
1438 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1439 attr
->pkey_index
, my_qp
, ibqp
->qp_num
);
1440 goto modify_qp_exit2
;
1442 mqpcb
->prim_p_key_idx
= attr
->pkey_index
;
1443 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX
, 1);
1445 if (attr_mask
& IB_QP_PORT
) {
1446 struct ehca_sport
*sport
;
1447 struct ehca_qp
*aqp1
;
1448 if (attr
->port_num
< 1 || attr
->port_num
> shca
->num_ports
) {
1450 ehca_err(ibqp
->device
, "Invalid port=%x. "
1451 "ehca_qp=%p qp_num=%x num_ports=%x",
1452 attr
->port_num
, my_qp
, ibqp
->qp_num
,
1454 goto modify_qp_exit2
;
1456 sport
= &shca
->sport
[attr
->port_num
- 1];
1457 if (!sport
->ibqp_sqp
[IB_QPT_GSI
]) {
1458 /* should not occur */
1460 ehca_err(ibqp
->device
, "AQP1 was not created for "
1461 "port=%x", attr
->port_num
);
1462 goto modify_qp_exit2
;
1464 aqp1
= container_of(sport
->ibqp_sqp
[IB_QPT_GSI
],
1465 struct ehca_qp
, ib_qp
);
1466 if (ibqp
->qp_type
!= IB_QPT_GSI
&&
1467 ibqp
->qp_type
!= IB_QPT_SMI
&&
1468 aqp1
->mod_qp_parm
) {
1470 * firmware will reject this modify_qp() because
1471 * port is not activated/initialized fully
1474 ehca_warn(ibqp
->device
, "Couldn't modify qp port=%x: "
1475 "either port is being activated (try again) "
1476 "or cabling issue", attr
->port_num
);
1477 goto modify_qp_exit2
;
1479 mqpcb
->prim_phys_port
= attr
->port_num
;
1480 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT
, 1);
1482 if (attr_mask
& IB_QP_QKEY
) {
1483 mqpcb
->qkey
= attr
->qkey
;
1484 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_QKEY
, 1);
1486 if (attr_mask
& IB_QP_AV
) {
1487 mqpcb
->dlid
= attr
->ah_attr
.dlid
;
1488 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_DLID
, 1);
1489 mqpcb
->source_path_bits
= attr
->ah_attr
.src_path_bits
;
1490 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS
, 1);
1491 mqpcb
->service_level
= attr
->ah_attr
.sl
;
1492 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL
, 1);
1494 if (ehca_calc_ipd(shca
, mqpcb
->prim_phys_port
,
1495 attr
->ah_attr
.static_rate
,
1496 &mqpcb
->max_static_rate
)) {
1498 goto modify_qp_exit2
;
1500 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE
, 1);
1503 * Always supply the GRH flag, even if it's zero, to give the
1504 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1506 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG
, 1);
1509 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1510 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1512 if (attr
->ah_attr
.ah_flags
== IB_AH_GRH
) {
1513 mqpcb
->send_grh_flag
= 1;
1515 mqpcb
->source_gid_idx
= attr
->ah_attr
.grh
.sgid_index
;
1517 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX
, 1);
1519 for (cnt
= 0; cnt
< 16; cnt
++)
1520 mqpcb
->dest_gid
.byte
[cnt
] =
1521 attr
->ah_attr
.grh
.dgid
.raw
[cnt
];
1523 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID
, 1);
1524 mqpcb
->flow_label
= attr
->ah_attr
.grh
.flow_label
;
1525 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL
, 1);
1526 mqpcb
->hop_limit
= attr
->ah_attr
.grh
.hop_limit
;
1527 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT
, 1);
1528 mqpcb
->traffic_class
= attr
->ah_attr
.grh
.traffic_class
;
1530 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS
, 1);
1534 if (attr_mask
& IB_QP_PATH_MTU
) {
1536 my_qp
->mtu_shift
= attr
->path_mtu
+ 7;
1537 mqpcb
->path_mtu
= attr
->path_mtu
;
1538 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU
, 1);
1540 if (attr_mask
& IB_QP_TIMEOUT
) {
1541 mqpcb
->timeout
= attr
->timeout
;
1542 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT
, 1);
1544 if (attr_mask
& IB_QP_RETRY_CNT
) {
1545 mqpcb
->retry_count
= attr
->retry_cnt
;
1546 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT
, 1);
1548 if (attr_mask
& IB_QP_RNR_RETRY
) {
1549 mqpcb
->rnr_retry_count
= attr
->rnr_retry
;
1550 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT
, 1);
1552 if (attr_mask
& IB_QP_RQ_PSN
) {
1553 mqpcb
->receive_psn
= attr
->rq_psn
;
1554 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN
, 1);
1556 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
1557 mqpcb
->rdma_nr_atomic_resp_res
= attr
->max_dest_rd_atomic
< 3 ?
1558 attr
->max_dest_rd_atomic
: 2;
1560 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES
, 1);
1562 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
1563 mqpcb
->rdma_atomic_outst_dest_qp
= attr
->max_rd_atomic
< 3 ?
1564 attr
->max_rd_atomic
: 2;
1567 (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP
, 1);
1569 if (attr_mask
& IB_QP_ALT_PATH
) {
1570 if (attr
->alt_port_num
< 1
1571 || attr
->alt_port_num
> shca
->num_ports
) {
1573 ehca_err(ibqp
->device
, "Invalid alt_port=%x. "
1574 "ehca_qp=%p qp_num=%x num_ports=%x",
1575 attr
->alt_port_num
, my_qp
, ibqp
->qp_num
,
1577 goto modify_qp_exit2
;
1579 mqpcb
->alt_phys_port
= attr
->alt_port_num
;
1581 if (attr
->alt_pkey_index
>= 16) {
1583 ehca_err(ibqp
->device
, "Invalid alt_pkey_index=%x. "
1584 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1585 attr
->pkey_index
, my_qp
, ibqp
->qp_num
);
1586 goto modify_qp_exit2
;
1588 mqpcb
->alt_p_key_idx
= attr
->alt_pkey_index
;
1590 mqpcb
->timeout_al
= attr
->alt_timeout
;
1591 mqpcb
->dlid_al
= attr
->alt_ah_attr
.dlid
;
1592 mqpcb
->source_path_bits_al
= attr
->alt_ah_attr
.src_path_bits
;
1593 mqpcb
->service_level_al
= attr
->alt_ah_attr
.sl
;
1595 if (ehca_calc_ipd(shca
, mqpcb
->alt_phys_port
,
1596 attr
->alt_ah_attr
.static_rate
,
1597 &mqpcb
->max_static_rate_al
)) {
1599 goto modify_qp_exit2
;
1602 /* OpenIB doesn't support alternate retry counts - copy them */
1603 mqpcb
->retry_count_al
= mqpcb
->retry_count
;
1604 mqpcb
->rnr_retry_count_al
= mqpcb
->rnr_retry_count
;
1606 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT
, 1)
1607 | EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX
, 1)
1608 | EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL
, 1)
1609 | EHCA_BMASK_SET(MQPCB_MASK_DLID_AL
, 1)
1610 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL
, 1)
1611 | EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL
, 1)
1612 | EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL
, 1)
1613 | EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL
, 1)
1614 | EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL
, 1);
1617 * Always supply the GRH flag, even if it's zero, to give the
1618 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1620 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL
, 1);
1623 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1624 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1626 if (attr
->alt_ah_attr
.ah_flags
== IB_AH_GRH
) {
1627 mqpcb
->send_grh_flag_al
= 1;
1629 for (cnt
= 0; cnt
< 16; cnt
++)
1630 mqpcb
->dest_gid_al
.byte
[cnt
] =
1631 attr
->alt_ah_attr
.grh
.dgid
.raw
[cnt
];
1632 mqpcb
->source_gid_idx_al
=
1633 attr
->alt_ah_attr
.grh
.sgid_index
;
1634 mqpcb
->flow_label_al
= attr
->alt_ah_attr
.grh
.flow_label
;
1635 mqpcb
->hop_limit_al
= attr
->alt_ah_attr
.grh
.hop_limit
;
1636 mqpcb
->traffic_class_al
=
1637 attr
->alt_ah_attr
.grh
.traffic_class
;
1640 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL
, 1)
1641 | EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL
, 1)
1642 | EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL
, 1)
1643 | EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL
, 1) |
1644 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL
, 1);
1648 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
1649 mqpcb
->min_rnr_nak_timer_field
= attr
->min_rnr_timer
;
1651 EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD
, 1);
1654 if (attr_mask
& IB_QP_SQ_PSN
) {
1655 mqpcb
->send_psn
= attr
->sq_psn
;
1656 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN
, 1);
1659 if (attr_mask
& IB_QP_DEST_QPN
) {
1660 mqpcb
->dest_qp_nr
= attr
->dest_qp_num
;
1661 update_mask
|= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR
, 1);
1664 if (attr_mask
& IB_QP_PATH_MIG_STATE
) {
1665 if (attr
->path_mig_state
!= IB_MIG_REARM
1666 && attr
->path_mig_state
!= IB_MIG_MIGRATED
) {
1668 ehca_err(ibqp
->device
, "Invalid mig_state=%x",
1669 attr
->path_mig_state
);
1670 goto modify_qp_exit2
;
1672 mqpcb
->path_migration_state
= attr
->path_mig_state
+ 1;
1673 if (attr
->path_mig_state
== IB_MIG_REARM
)
1674 my_qp
->mig_armed
= 1;
1676 EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE
, 1);
1679 if (attr_mask
& IB_QP_CAP
) {
1680 mqpcb
->max_nr_outst_send_wr
= attr
->cap
.max_send_wr
+1;
1682 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR
, 1);
1683 mqpcb
->max_nr_outst_recv_wr
= attr
->cap
.max_recv_wr
+1;
1685 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR
, 1);
1686 /* no support for max_send/recv_sge yet */
1689 if (ehca_debug_level
>= 2)
1690 ehca_dmp(mqpcb
, 4*70, "qp_num=%x", ibqp
->qp_num
);
1692 h_ret
= hipz_h_modify_qp(shca
->ipz_hca_handle
,
1693 my_qp
->ipz_qp_handle
,
1696 mqpcb
, my_qp
->galpas
.kernel
);
1698 if (h_ret
!= H_SUCCESS
) {
1699 ret
= ehca2ib_return_code(h_ret
);
1700 ehca_err(ibqp
->device
, "hipz_h_modify_qp() failed h_ret=%lli "
1701 "ehca_qp=%p qp_num=%x", h_ret
, my_qp
, ibqp
->qp_num
);
1702 goto modify_qp_exit2
;
1705 if ((my_qp
->qp_type
== IB_QPT_UD
||
1706 my_qp
->qp_type
== IB_QPT_GSI
||
1707 my_qp
->qp_type
== IB_QPT_SMI
) &&
1708 statetrans
== IB_QPST_SQE2RTS
) {
1709 /* doorbell to reprocessing wqes */
1710 iosync(); /* serialize GAL register access */
1711 hipz_update_sqa(my_qp
, bad_wqe_cnt
-1);
1712 ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt
);
1715 if (statetrans
== IB_QPST_RESET2INIT
||
1716 statetrans
== IB_QPST_INIT2INIT
) {
1717 mqpcb
->qp_enable
= 1;
1718 mqpcb
->qp_state
= EHCA_QPS_INIT
;
1720 update_mask
= EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE
, 1);
1722 h_ret
= hipz_h_modify_qp(shca
->ipz_hca_handle
,
1723 my_qp
->ipz_qp_handle
,
1727 my_qp
->galpas
.kernel
);
1729 if (h_ret
!= H_SUCCESS
) {
1730 ret
= ehca2ib_return_code(h_ret
);
1731 ehca_err(ibqp
->device
, "ENABLE in context of "
1732 "RESET_2_INIT failed! Maybe you didn't get "
1733 "a LID h_ret=%lli ehca_qp=%p qp_num=%x",
1734 h_ret
, my_qp
, ibqp
->qp_num
);
1735 goto modify_qp_exit2
;
1738 if ((qp_new_state
== IB_QPS_ERR
) && (qp_cur_state
!= IB_QPS_ERR
)
1740 ret
= check_for_left_cqes(my_qp
, shca
);
1742 goto modify_qp_exit2
;
1745 if (statetrans
== IB_QPST_ANY2RESET
) {
1746 ipz_qeit_reset(&my_qp
->ipz_rqueue
);
1747 ipz_qeit_reset(&my_qp
->ipz_squeue
);
1749 if (qp_cur_state
== IB_QPS_ERR
&& !is_user
) {
1750 del_from_err_list(my_qp
->send_cq
, &my_qp
->sq_err_node
);
1753 del_from_err_list(my_qp
->recv_cq
,
1754 &my_qp
->rq_err_node
);
1757 reset_queue_map(&my_qp
->sq_map
);
1759 if (HAS_RQ(my_qp
) && !is_user
)
1760 reset_queue_map(&my_qp
->rq_map
);
1763 if (attr_mask
& IB_QP_QKEY
)
1764 my_qp
->qkey
= attr
->qkey
;
1767 if (squeue_locked
) { /* this means: sqe -> rts */
1768 spin_unlock_irqrestore(&my_qp
->spinlock_s
, flags
);
1769 my_qp
->sqerr_purgeflag
= 1;
1773 ehca_free_fw_ctrlblock(mqpcb
);
1778 int ehca_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
, int attr_mask
,
1779 struct ib_udata
*udata
)
1783 struct ehca_shca
*shca
= container_of(ibqp
->device
, struct ehca_shca
,
1785 struct ehca_qp
*my_qp
= container_of(ibqp
, struct ehca_qp
, ib_qp
);
1787 /* The if-block below caches qp_attr to be modified for GSI and SMI
1788 * qps during the initialization by ib_mad. When the respective port
1789 * is activated, ie we got an event PORT_ACTIVE, we'll replay the
1790 * cached modify calls sequence, see ehca_recover_sqs() below.
1791 * Why that is required:
1792 * 1) If one port is connected, older code requires that port one
1793 * to be connected and module option nr_ports=1 to be given by
1794 * user, which is very inconvenient for end user.
1795 * 2) Firmware accepts modify_qp() only if respective port has become
1796 * active. Older code had a wait loop of 30sec create_qp()/
1797 * define_aqp1(), which is not appropriate in practice. This
1798 * code now removes that wait loop, see define_aqp1(), and always
1799 * reports all ports to ib_mad resp. users. Only activated ports
1800 * will then usable for the users.
1802 if (ibqp
->qp_type
== IB_QPT_GSI
|| ibqp
->qp_type
== IB_QPT_SMI
) {
1803 int port
= my_qp
->init_attr
.port_num
;
1804 struct ehca_sport
*sport
= &shca
->sport
[port
- 1];
1805 unsigned long flags
;
1806 spin_lock_irqsave(&sport
->mod_sqp_lock
, flags
);
1807 /* cache qp_attr only during init */
1808 if (my_qp
->mod_qp_parm
) {
1809 struct ehca_mod_qp_parm
*p
;
1810 if (my_qp
->mod_qp_parm_idx
>= EHCA_MOD_QP_PARM_MAX
) {
1811 ehca_err(&shca
->ib_device
,
1812 "mod_qp_parm overflow state=%x port=%x"
1813 " type=%x", attr
->qp_state
,
1814 my_qp
->init_attr
.port_num
,
1816 spin_unlock_irqrestore(&sport
->mod_sqp_lock
,
1820 p
= &my_qp
->mod_qp_parm
[my_qp
->mod_qp_parm_idx
];
1821 p
->mask
= attr_mask
;
1823 my_qp
->mod_qp_parm_idx
++;
1824 ehca_dbg(&shca
->ib_device
,
1825 "Saved qp_attr for state=%x port=%x type=%x",
1826 attr
->qp_state
, my_qp
->init_attr
.port_num
,
1828 spin_unlock_irqrestore(&sport
->mod_sqp_lock
, flags
);
1831 spin_unlock_irqrestore(&sport
->mod_sqp_lock
, flags
);
1834 ret
= internal_modify_qp(ibqp
, attr
, attr_mask
, 0);
1837 if ((ret
== 0) && (attr_mask
& IB_QP_STATE
))
1838 my_qp
->state
= attr
->qp_state
;
1843 void ehca_recover_sqp(struct ib_qp
*sqp
)
1845 struct ehca_qp
*my_sqp
= container_of(sqp
, struct ehca_qp
, ib_qp
);
1846 int port
= my_sqp
->init_attr
.port_num
;
1847 struct ib_qp_attr attr
;
1848 struct ehca_mod_qp_parm
*qp_parm
;
1849 int i
, qp_parm_idx
, ret
;
1850 unsigned long flags
, wr_cnt
;
1852 if (!my_sqp
->mod_qp_parm
)
1854 ehca_dbg(sqp
->device
, "SQP port=%x qp_num=%x", port
, sqp
->qp_num
);
1856 qp_parm
= my_sqp
->mod_qp_parm
;
1857 qp_parm_idx
= my_sqp
->mod_qp_parm_idx
;
1858 for (i
= 0; i
< qp_parm_idx
; i
++) {
1859 attr
= qp_parm
[i
].attr
;
1860 ret
= internal_modify_qp(sqp
, &attr
, qp_parm
[i
].mask
, 0);
1862 ehca_err(sqp
->device
, "Could not modify SQP port=%x "
1863 "qp_num=%x ret=%x", port
, sqp
->qp_num
, ret
);
1866 ehca_dbg(sqp
->device
, "SQP port=%x qp_num=%x in state=%x",
1867 port
, sqp
->qp_num
, attr
.qp_state
);
1870 /* re-trigger posted recv wrs */
1871 wr_cnt
= my_sqp
->ipz_rqueue
.current_q_offset
/
1872 my_sqp
->ipz_rqueue
.qe_size
;
1874 spin_lock_irqsave(&my_sqp
->spinlock_r
, flags
);
1875 hipz_update_rqa(my_sqp
, wr_cnt
);
1876 spin_unlock_irqrestore(&my_sqp
->spinlock_r
, flags
);
1877 ehca_dbg(sqp
->device
, "doorbell port=%x qp_num=%x wr_cnt=%lx",
1878 port
, sqp
->qp_num
, wr_cnt
);
1883 /* this prevents subsequent calls to modify_qp() to cache qp_attr */
1884 my_sqp
->mod_qp_parm
= NULL
;
1887 int ehca_query_qp(struct ib_qp
*qp
,
1888 struct ib_qp_attr
*qp_attr
,
1889 int qp_attr_mask
, struct ib_qp_init_attr
*qp_init_attr
)
1891 struct ehca_qp
*my_qp
= container_of(qp
, struct ehca_qp
, ib_qp
);
1892 struct ehca_shca
*shca
= container_of(qp
->device
, struct ehca_shca
,
1894 struct ipz_adapter_handle adapter_handle
= shca
->ipz_hca_handle
;
1895 struct hcp_modify_qp_control_block
*qpcb
;
1899 if (qp_attr_mask
& QP_ATTR_QUERY_NOT_SUPPORTED
) {
1900 ehca_err(qp
->device
, "Invalid attribute mask "
1901 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
1902 my_qp
, qp
->qp_num
, qp_attr_mask
);
1906 qpcb
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
1908 ehca_err(qp
->device
, "Out of memory for qpcb "
1909 "ehca_qp=%p qp_num=%x", my_qp
, qp
->qp_num
);
1913 h_ret
= hipz_h_query_qp(adapter_handle
,
1914 my_qp
->ipz_qp_handle
,
1916 qpcb
, my_qp
->galpas
.kernel
);
1918 if (h_ret
!= H_SUCCESS
) {
1919 ret
= ehca2ib_return_code(h_ret
);
1920 ehca_err(qp
->device
, "hipz_h_query_qp() failed "
1921 "ehca_qp=%p qp_num=%x h_ret=%lli",
1922 my_qp
, qp
->qp_num
, h_ret
);
1923 goto query_qp_exit1
;
1926 qp_attr
->cur_qp_state
= ehca2ib_qp_state(qpcb
->qp_state
);
1927 qp_attr
->qp_state
= qp_attr
->cur_qp_state
;
1929 if (qp_attr
->cur_qp_state
== -EINVAL
) {
1931 ehca_err(qp
->device
, "Got invalid ehca_qp_state=%x "
1932 "ehca_qp=%p qp_num=%x",
1933 qpcb
->qp_state
, my_qp
, qp
->qp_num
);
1934 goto query_qp_exit1
;
1937 if (qp_attr
->qp_state
== IB_QPS_SQD
)
1938 qp_attr
->sq_draining
= 1;
1940 qp_attr
->qkey
= qpcb
->qkey
;
1941 qp_attr
->path_mtu
= qpcb
->path_mtu
;
1942 qp_attr
->path_mig_state
= qpcb
->path_migration_state
- 1;
1943 qp_attr
->rq_psn
= qpcb
->receive_psn
;
1944 qp_attr
->sq_psn
= qpcb
->send_psn
;
1945 qp_attr
->min_rnr_timer
= qpcb
->min_rnr_nak_timer_field
;
1946 qp_attr
->cap
.max_send_wr
= qpcb
->max_nr_outst_send_wr
-1;
1947 qp_attr
->cap
.max_recv_wr
= qpcb
->max_nr_outst_recv_wr
-1;
1948 /* UD_AV CIRCUMVENTION */
1949 if (my_qp
->qp_type
== IB_QPT_UD
) {
1950 qp_attr
->cap
.max_send_sge
=
1951 qpcb
->actual_nr_sges_in_sq_wqe
- 2;
1952 qp_attr
->cap
.max_recv_sge
=
1953 qpcb
->actual_nr_sges_in_rq_wqe
- 2;
1955 qp_attr
->cap
.max_send_sge
=
1956 qpcb
->actual_nr_sges_in_sq_wqe
;
1957 qp_attr
->cap
.max_recv_sge
=
1958 qpcb
->actual_nr_sges_in_rq_wqe
;
1961 qp_attr
->cap
.max_inline_data
= my_qp
->sq_max_inline_data_size
;
1962 qp_attr
->dest_qp_num
= qpcb
->dest_qp_nr
;
1964 qp_attr
->pkey_index
= qpcb
->prim_p_key_idx
;
1965 qp_attr
->port_num
= qpcb
->prim_phys_port
;
1966 qp_attr
->timeout
= qpcb
->timeout
;
1967 qp_attr
->retry_cnt
= qpcb
->retry_count
;
1968 qp_attr
->rnr_retry
= qpcb
->rnr_retry_count
;
1970 qp_attr
->alt_pkey_index
= qpcb
->alt_p_key_idx
;
1971 qp_attr
->alt_port_num
= qpcb
->alt_phys_port
;
1972 qp_attr
->alt_timeout
= qpcb
->timeout_al
;
1974 qp_attr
->max_dest_rd_atomic
= qpcb
->rdma_nr_atomic_resp_res
;
1975 qp_attr
->max_rd_atomic
= qpcb
->rdma_atomic_outst_dest_qp
;
1978 qp_attr
->ah_attr
.sl
= qpcb
->service_level
;
1980 if (qpcb
->send_grh_flag
) {
1981 qp_attr
->ah_attr
.ah_flags
= IB_AH_GRH
;
1984 qp_attr
->ah_attr
.static_rate
= qpcb
->max_static_rate
;
1985 qp_attr
->ah_attr
.dlid
= qpcb
->dlid
;
1986 qp_attr
->ah_attr
.src_path_bits
= qpcb
->source_path_bits
;
1987 qp_attr
->ah_attr
.port_num
= qp_attr
->port_num
;
1990 qp_attr
->ah_attr
.grh
.traffic_class
= qpcb
->traffic_class
;
1991 qp_attr
->ah_attr
.grh
.hop_limit
= qpcb
->hop_limit
;
1992 qp_attr
->ah_attr
.grh
.sgid_index
= qpcb
->source_gid_idx
;
1993 qp_attr
->ah_attr
.grh
.flow_label
= qpcb
->flow_label
;
1995 for (cnt
= 0; cnt
< 16; cnt
++)
1996 qp_attr
->ah_attr
.grh
.dgid
.raw
[cnt
] =
1997 qpcb
->dest_gid
.byte
[cnt
];
2000 qp_attr
->alt_ah_attr
.sl
= qpcb
->service_level_al
;
2001 if (qpcb
->send_grh_flag_al
) {
2002 qp_attr
->alt_ah_attr
.ah_flags
= IB_AH_GRH
;
2005 qp_attr
->alt_ah_attr
.static_rate
= qpcb
->max_static_rate_al
;
2006 qp_attr
->alt_ah_attr
.dlid
= qpcb
->dlid_al
;
2007 qp_attr
->alt_ah_attr
.src_path_bits
= qpcb
->source_path_bits_al
;
2010 qp_attr
->alt_ah_attr
.grh
.traffic_class
= qpcb
->traffic_class_al
;
2011 qp_attr
->alt_ah_attr
.grh
.hop_limit
= qpcb
->hop_limit_al
;
2012 qp_attr
->alt_ah_attr
.grh
.sgid_index
= qpcb
->source_gid_idx_al
;
2013 qp_attr
->alt_ah_attr
.grh
.flow_label
= qpcb
->flow_label_al
;
2015 for (cnt
= 0; cnt
< 16; cnt
++)
2016 qp_attr
->alt_ah_attr
.grh
.dgid
.raw
[cnt
] =
2017 qpcb
->dest_gid_al
.byte
[cnt
];
2019 /* return init attributes given in ehca_create_qp */
2021 *qp_init_attr
= my_qp
->init_attr
;
2023 if (ehca_debug_level
>= 2)
2024 ehca_dmp(qpcb
, 4*70, "qp_num=%x", qp
->qp_num
);
2027 ehca_free_fw_ctrlblock(qpcb
);
2032 int ehca_modify_srq(struct ib_srq
*ibsrq
, struct ib_srq_attr
*attr
,
2033 enum ib_srq_attr_mask attr_mask
, struct ib_udata
*udata
)
2035 struct ehca_qp
*my_qp
=
2036 container_of(ibsrq
, struct ehca_qp
, ib_srq
);
2037 struct ehca_shca
*shca
=
2038 container_of(ibsrq
->pd
->device
, struct ehca_shca
, ib_device
);
2039 struct hcp_modify_qp_control_block
*mqpcb
;
2044 mqpcb
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
2046 ehca_err(ibsrq
->device
, "Could not get zeroed page for mqpcb "
2047 "ehca_qp=%p qp_num=%x ", my_qp
, my_qp
->real_qp_num
);
2052 if (attr_mask
& IB_SRQ_LIMIT
) {
2053 attr_mask
&= ~IB_SRQ_LIMIT
;
2055 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT
, 1)
2056 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG
, 1);
2057 mqpcb
->curr_srq_limit
= attr
->srq_limit
;
2058 mqpcb
->qp_aff_asyn_ev_log_reg
=
2059 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT
, 1);
2062 /* by now, all bits in attr_mask should have been cleared */
2064 ehca_err(ibsrq
->device
, "invalid attribute mask bits set "
2065 "attr_mask=%x", attr_mask
);
2067 goto modify_srq_exit0
;
2070 if (ehca_debug_level
>= 2)
2071 ehca_dmp(mqpcb
, 4*70, "qp_num=%x", my_qp
->real_qp_num
);
2073 h_ret
= hipz_h_modify_qp(shca
->ipz_hca_handle
, my_qp
->ipz_qp_handle
,
2074 NULL
, update_mask
, mqpcb
,
2075 my_qp
->galpas
.kernel
);
2077 if (h_ret
!= H_SUCCESS
) {
2078 ret
= ehca2ib_return_code(h_ret
);
2079 ehca_err(ibsrq
->device
, "hipz_h_modify_qp() failed h_ret=%lli "
2080 "ehca_qp=%p qp_num=%x",
2081 h_ret
, my_qp
, my_qp
->real_qp_num
);
2085 ehca_free_fw_ctrlblock(mqpcb
);
2090 int ehca_query_srq(struct ib_srq
*srq
, struct ib_srq_attr
*srq_attr
)
2092 struct ehca_qp
*my_qp
= container_of(srq
, struct ehca_qp
, ib_srq
);
2093 struct ehca_shca
*shca
= container_of(srq
->device
, struct ehca_shca
,
2095 struct ipz_adapter_handle adapter_handle
= shca
->ipz_hca_handle
;
2096 struct hcp_modify_qp_control_block
*qpcb
;
2100 qpcb
= ehca_alloc_fw_ctrlblock(GFP_KERNEL
);
2102 ehca_err(srq
->device
, "Out of memory for qpcb "
2103 "ehca_qp=%p qp_num=%x", my_qp
, my_qp
->real_qp_num
);
2107 h_ret
= hipz_h_query_qp(adapter_handle
, my_qp
->ipz_qp_handle
,
2108 NULL
, qpcb
, my_qp
->galpas
.kernel
);
2110 if (h_ret
!= H_SUCCESS
) {
2111 ret
= ehca2ib_return_code(h_ret
);
2112 ehca_err(srq
->device
, "hipz_h_query_qp() failed "
2113 "ehca_qp=%p qp_num=%x h_ret=%lli",
2114 my_qp
, my_qp
->real_qp_num
, h_ret
);
2115 goto query_srq_exit1
;
2118 srq_attr
->max_wr
= qpcb
->max_nr_outst_recv_wr
- 1;
2119 srq_attr
->max_sge
= 3;
2120 srq_attr
->srq_limit
= qpcb
->curr_srq_limit
;
2122 if (ehca_debug_level
>= 2)
2123 ehca_dmp(qpcb
, 4*70, "qp_num=%x", my_qp
->real_qp_num
);
2126 ehca_free_fw_ctrlblock(qpcb
);
2131 static int internal_destroy_qp(struct ib_device
*dev
, struct ehca_qp
*my_qp
,
2132 struct ib_uobject
*uobject
)
2134 struct ehca_shca
*shca
= container_of(dev
, struct ehca_shca
, ib_device
);
2135 struct ehca_pd
*my_pd
= container_of(my_qp
->ib_qp
.pd
, struct ehca_pd
,
2137 struct ehca_sport
*sport
= &shca
->sport
[my_qp
->init_attr
.port_num
- 1];
2138 u32 qp_num
= my_qp
->real_qp_num
;
2143 enum ib_qp_type qp_type
;
2144 unsigned long flags
;
2148 if (my_qp
->mm_count_galpa
||
2149 my_qp
->mm_count_rqueue
|| my_qp
->mm_count_squeue
) {
2150 ehca_err(dev
, "Resources still referenced in "
2151 "user space qp_num=%x", qp_num
);
2156 if (my_qp
->send_cq
) {
2157 ret
= ehca_cq_unassign_qp(my_qp
->send_cq
, qp_num
);
2159 ehca_err(dev
, "Couldn't unassign qp from "
2160 "send_cq ret=%i qp_num=%x cq_num=%x", ret
,
2161 qp_num
, my_qp
->send_cq
->cq_number
);
2166 write_lock_irqsave(&ehca_qp_idr_lock
, flags
);
2167 idr_remove(&ehca_qp_idr
, my_qp
->token
);
2168 write_unlock_irqrestore(&ehca_qp_idr_lock
, flags
);
2171 * SRQs will never get into an error list and do not have a recv_cq,
2172 * so we need to skip them here.
2174 if (HAS_RQ(my_qp
) && !IS_SRQ(my_qp
) && !is_user
)
2175 del_from_err_list(my_qp
->recv_cq
, &my_qp
->rq_err_node
);
2177 if (HAS_SQ(my_qp
) && !is_user
)
2178 del_from_err_list(my_qp
->send_cq
, &my_qp
->sq_err_node
);
2180 /* now wait until all pending events have completed */
2181 wait_event(my_qp
->wait_completion
, !atomic_read(&my_qp
->nr_events
));
2183 h_ret
= hipz_h_destroy_qp(shca
->ipz_hca_handle
, my_qp
);
2184 if (h_ret
!= H_SUCCESS
) {
2185 ehca_err(dev
, "hipz_h_destroy_qp() failed h_ret=%lli "
2186 "ehca_qp=%p qp_num=%x", h_ret
, my_qp
, qp_num
);
2187 return ehca2ib_return_code(h_ret
);
2190 port_num
= my_qp
->init_attr
.port_num
;
2191 qp_type
= my_qp
->init_attr
.qp_type
;
2193 if (qp_type
== IB_QPT_SMI
|| qp_type
== IB_QPT_GSI
) {
2194 spin_lock_irqsave(&sport
->mod_sqp_lock
, flags
);
2195 kfree(my_qp
->mod_qp_parm
);
2196 my_qp
->mod_qp_parm
= NULL
;
2197 shca
->sport
[port_num
- 1].ibqp_sqp
[qp_type
] = NULL
;
2198 spin_unlock_irqrestore(&sport
->mod_sqp_lock
, flags
);
2201 /* no support for IB_QPT_SMI yet */
2202 if (qp_type
== IB_QPT_GSI
) {
2203 struct ib_event event
;
2204 ehca_info(dev
, "device %s: port %x is inactive.",
2205 shca
->ib_device
.name
, port_num
);
2206 event
.device
= &shca
->ib_device
;
2207 event
.event
= IB_EVENT_PORT_ERR
;
2208 event
.element
.port_num
= port_num
;
2209 shca
->sport
[port_num
- 1].port_state
= IB_PORT_DOWN
;
2210 ib_dispatch_event(&event
);
2213 if (HAS_RQ(my_qp
)) {
2214 ipz_queue_dtor(my_pd
, &my_qp
->ipz_rqueue
);
2216 vfree(my_qp
->rq_map
.map
);
2218 if (HAS_SQ(my_qp
)) {
2219 ipz_queue_dtor(my_pd
, &my_qp
->ipz_squeue
);
2221 vfree(my_qp
->sq_map
.map
);
2223 kmem_cache_free(qp_cache
, my_qp
);
2224 atomic_dec(&shca
->num_qps
);
2228 int ehca_destroy_qp(struct ib_qp
*qp
)
2230 return internal_destroy_qp(qp
->device
,
2231 container_of(qp
, struct ehca_qp
, ib_qp
),
2235 int ehca_destroy_srq(struct ib_srq
*srq
)
2237 return internal_destroy_qp(srq
->device
,
2238 container_of(srq
, struct ehca_qp
, ib_srq
),
2242 int ehca_init_qp_cache(void)
2244 qp_cache
= kmem_cache_create("ehca_cache_qp",
2245 sizeof(struct ehca_qp
), 0,
2253 void ehca_cleanup_qp_cache(void)
2256 kmem_cache_destroy(qp_cache
);