PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / infiniband / hw / ehca / ehca_qp.c
blob2e89356c46faf7d53e5e37768d43ea4e812615ab
1 /*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * QP functions
6 * Authors: Joachim Fenkes <fenkes@de.ibm.com>
7 * Stefan Roscher <stefan.roscher@de.ibm.com>
8 * Waleri Fomin <fomin@de.ibm.com>
9 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
10 * Reinhard Ernst <rernst@de.ibm.com>
11 * Heiko J Schick <schickhj@de.ibm.com>
13 * Copyright (c) 2005 IBM Corporation
15 * All rights reserved.
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
18 * BSD.
20 * OpenIB BSD License
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
46 #include <linux/slab.h>
48 #include "ehca_classes.h"
49 #include "ehca_tools.h"
50 #include "ehca_qes.h"
51 #include "ehca_iverbs.h"
52 #include "hcp_if.h"
53 #include "hipz_fns.h"
55 static struct kmem_cache *qp_cache;
58 * attributes not supported by query qp
60 #define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \
61 IB_QP_EN_SQD_ASYNC_NOTIFY)
64 * ehca (internal) qp state values
66 enum ehca_qp_state {
67 EHCA_QPS_RESET = 1,
68 EHCA_QPS_INIT = 2,
69 EHCA_QPS_RTR = 3,
70 EHCA_QPS_RTS = 5,
71 EHCA_QPS_SQD = 6,
72 EHCA_QPS_SQE = 8,
73 EHCA_QPS_ERR = 128
77 * qp state transitions as defined by IB Arch Rel 1.1 page 431
79 enum ib_qp_statetrans {
80 IB_QPST_ANY2RESET,
81 IB_QPST_ANY2ERR,
82 IB_QPST_RESET2INIT,
83 IB_QPST_INIT2RTR,
84 IB_QPST_INIT2INIT,
85 IB_QPST_RTR2RTS,
86 IB_QPST_RTS2SQD,
87 IB_QPST_RTS2RTS,
88 IB_QPST_SQD2RTS,
89 IB_QPST_SQE2RTS,
90 IB_QPST_SQD2SQD,
91 IB_QPST_MAX /* nr of transitions, this must be last!!! */
95 * ib2ehca_qp_state maps IB to ehca qp_state
96 * returns ehca qp state corresponding to given ib qp state
98 static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state)
100 switch (ib_qp_state) {
101 case IB_QPS_RESET:
102 return EHCA_QPS_RESET;
103 case IB_QPS_INIT:
104 return EHCA_QPS_INIT;
105 case IB_QPS_RTR:
106 return EHCA_QPS_RTR;
107 case IB_QPS_RTS:
108 return EHCA_QPS_RTS;
109 case IB_QPS_SQD:
110 return EHCA_QPS_SQD;
111 case IB_QPS_SQE:
112 return EHCA_QPS_SQE;
113 case IB_QPS_ERR:
114 return EHCA_QPS_ERR;
115 default:
116 ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
117 return -EINVAL;
122 * ehca2ib_qp_state maps ehca to IB qp_state
123 * returns ib qp state corresponding to given ehca qp state
125 static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state
126 ehca_qp_state)
128 switch (ehca_qp_state) {
129 case EHCA_QPS_RESET:
130 return IB_QPS_RESET;
131 case EHCA_QPS_INIT:
132 return IB_QPS_INIT;
133 case EHCA_QPS_RTR:
134 return IB_QPS_RTR;
135 case EHCA_QPS_RTS:
136 return IB_QPS_RTS;
137 case EHCA_QPS_SQD:
138 return IB_QPS_SQD;
139 case EHCA_QPS_SQE:
140 return IB_QPS_SQE;
141 case EHCA_QPS_ERR:
142 return IB_QPS_ERR;
143 default:
144 ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
145 return -EINVAL;
150 * ehca_qp_type used as index for req_attr and opt_attr of
151 * struct ehca_modqp_statetrans
153 enum ehca_qp_type {
154 QPT_RC = 0,
155 QPT_UC = 1,
156 QPT_UD = 2,
157 QPT_SQP = 3,
158 QPT_MAX
162 * ib2ehcaqptype maps Ib to ehca qp_type
163 * returns ehca qp type corresponding to ib qp type
165 static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype)
167 switch (ibqptype) {
168 case IB_QPT_SMI:
169 case IB_QPT_GSI:
170 return QPT_SQP;
171 case IB_QPT_RC:
172 return QPT_RC;
173 case IB_QPT_UC:
174 return QPT_UC;
175 case IB_QPT_UD:
176 return QPT_UD;
177 default:
178 ehca_gen_err("Invalid ibqptype=%x", ibqptype);
179 return -EINVAL;
183 static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
184 int ib_tostate)
186 int index = -EINVAL;
187 switch (ib_tostate) {
188 case IB_QPS_RESET:
189 index = IB_QPST_ANY2RESET;
190 break;
191 case IB_QPS_INIT:
192 switch (ib_fromstate) {
193 case IB_QPS_RESET:
194 index = IB_QPST_RESET2INIT;
195 break;
196 case IB_QPS_INIT:
197 index = IB_QPST_INIT2INIT;
198 break;
200 break;
201 case IB_QPS_RTR:
202 if (ib_fromstate == IB_QPS_INIT)
203 index = IB_QPST_INIT2RTR;
204 break;
205 case IB_QPS_RTS:
206 switch (ib_fromstate) {
207 case IB_QPS_RTR:
208 index = IB_QPST_RTR2RTS;
209 break;
210 case IB_QPS_RTS:
211 index = IB_QPST_RTS2RTS;
212 break;
213 case IB_QPS_SQD:
214 index = IB_QPST_SQD2RTS;
215 break;
216 case IB_QPS_SQE:
217 index = IB_QPST_SQE2RTS;
218 break;
220 break;
221 case IB_QPS_SQD:
222 if (ib_fromstate == IB_QPS_RTS)
223 index = IB_QPST_RTS2SQD;
224 break;
225 case IB_QPS_SQE:
226 break;
227 case IB_QPS_ERR:
228 index = IB_QPST_ANY2ERR;
229 break;
230 default:
231 break;
233 return index;
237 * ibqptype2servicetype returns hcp service type corresponding to given
238 * ib qp type used by create_qp()
240 static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
242 switch (ibqptype) {
243 case IB_QPT_SMI:
244 case IB_QPT_GSI:
245 return ST_UD;
246 case IB_QPT_RC:
247 return ST_RC;
248 case IB_QPT_UC:
249 return ST_UC;
250 case IB_QPT_UD:
251 return ST_UD;
252 case IB_QPT_RAW_IPV6:
253 return -EINVAL;
254 case IB_QPT_RAW_ETHERTYPE:
255 return -EINVAL;
256 default:
257 ehca_gen_err("Invalid ibqptype=%x", ibqptype);
258 return -EINVAL;
263 * init userspace queue info from ipz_queue data
265 static inline void queue2resp(struct ipzu_queue_resp *resp,
266 struct ipz_queue *queue)
268 resp->qe_size = queue->qe_size;
269 resp->act_nr_of_sg = queue->act_nr_of_sg;
270 resp->queue_length = queue->queue_length;
271 resp->pagesize = queue->pagesize;
272 resp->toggle_state = queue->toggle_state;
273 resp->offset = queue->offset;
277 * init_qp_queue initializes/constructs r/squeue and registers queue pages.
279 static inline int init_qp_queue(struct ehca_shca *shca,
280 struct ehca_pd *pd,
281 struct ehca_qp *my_qp,
282 struct ipz_queue *queue,
283 int q_type,
284 u64 expected_hret,
285 struct ehca_alloc_queue_parms *parms,
286 int wqe_size)
288 int ret, cnt, ipz_rc, nr_q_pages;
289 void *vpage;
290 u64 rpage, h_ret;
291 struct ib_device *ib_dev = &shca->ib_device;
292 struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
294 if (!parms->queue_size)
295 return 0;
297 if (parms->is_small) {
298 nr_q_pages = 1;
299 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
300 128 << parms->page_size,
301 wqe_size, parms->act_nr_sges, 1);
302 } else {
303 nr_q_pages = parms->queue_size;
304 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
305 EHCA_PAGESIZE, wqe_size,
306 parms->act_nr_sges, 0);
309 if (!ipz_rc) {
310 ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%i",
311 ipz_rc);
312 return -EBUSY;
315 /* register queue pages */
316 for (cnt = 0; cnt < nr_q_pages; cnt++) {
317 vpage = ipz_qpageit_get_inc(queue);
318 if (!vpage) {
319 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
320 "failed p_vpage= %p", vpage);
321 ret = -EINVAL;
322 goto init_qp_queue1;
324 rpage = __pa(vpage);
326 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
327 my_qp->ipz_qp_handle,
328 NULL, 0, q_type,
329 rpage, parms->is_small ? 0 : 1,
330 my_qp->galpas.kernel);
331 if (cnt == (nr_q_pages - 1)) { /* last page! */
332 if (h_ret != expected_hret) {
333 ehca_err(ib_dev, "hipz_qp_register_rpage() "
334 "h_ret=%lli", h_ret);
335 ret = ehca2ib_return_code(h_ret);
336 goto init_qp_queue1;
338 vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
339 if (vpage) {
340 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
341 "should not succeed vpage=%p", vpage);
342 ret = -EINVAL;
343 goto init_qp_queue1;
345 } else {
346 if (h_ret != H_PAGE_REGISTERED) {
347 ehca_err(ib_dev, "hipz_qp_register_rpage() "
348 "h_ret=%lli", h_ret);
349 ret = ehca2ib_return_code(h_ret);
350 goto init_qp_queue1;
355 ipz_qeit_reset(queue);
357 return 0;
359 init_qp_queue1:
360 ipz_queue_dtor(pd, queue);
361 return ret;
364 static inline int ehca_calc_wqe_size(int act_nr_sge, int is_llqp)
366 if (is_llqp)
367 return 128 << act_nr_sge;
368 else
369 return offsetof(struct ehca_wqe,
370 u.nud.sg_list[act_nr_sge]);
373 static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
374 int req_nr_sge, int is_llqp)
376 u32 wqe_size, q_size;
377 int act_nr_sge = req_nr_sge;
379 if (!is_llqp)
380 /* round up #SGEs so WQE size is a power of 2 */
381 for (act_nr_sge = 4; act_nr_sge <= 252;
382 act_nr_sge = 4 + 2 * act_nr_sge)
383 if (act_nr_sge >= req_nr_sge)
384 break;
386 wqe_size = ehca_calc_wqe_size(act_nr_sge, is_llqp);
387 q_size = wqe_size * (queue->max_wr + 1);
389 if (q_size <= 512)
390 queue->page_size = 2;
391 else if (q_size <= 1024)
392 queue->page_size = 3;
393 else
394 queue->page_size = 0;
396 queue->is_small = (queue->page_size != 0);
399 /* needs to be called with cq->spinlock held */
400 void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq)
402 struct list_head *list, *node;
404 /* TODO: support low latency QPs */
405 if (qp->ext_type == EQPT_LLQP)
406 return;
408 if (on_sq) {
409 list = &qp->send_cq->sqp_err_list;
410 node = &qp->sq_err_node;
411 } else {
412 list = &qp->recv_cq->rqp_err_list;
413 node = &qp->rq_err_node;
416 if (list_empty(node))
417 list_add_tail(node, list);
419 return;
422 static void del_from_err_list(struct ehca_cq *cq, struct list_head *node)
424 unsigned long flags;
426 spin_lock_irqsave(&cq->spinlock, flags);
428 if (!list_empty(node))
429 list_del_init(node);
431 spin_unlock_irqrestore(&cq->spinlock, flags);
434 static void reset_queue_map(struct ehca_queue_map *qmap)
436 int i;
438 qmap->tail = qmap->entries - 1;
439 qmap->left_to_poll = 0;
440 qmap->next_wqe_idx = 0;
441 for (i = 0; i < qmap->entries; i++) {
442 qmap->map[i].reported = 1;
443 qmap->map[i].cqe_req = 0;
448 * Create an ib_qp struct that is either a QP or an SRQ, depending on
449 * the value of the is_srq parameter. If init_attr and srq_init_attr share
450 * fields, the field out of init_attr is used.
452 static struct ehca_qp *internal_create_qp(
453 struct ib_pd *pd,
454 struct ib_qp_init_attr *init_attr,
455 struct ib_srq_init_attr *srq_init_attr,
456 struct ib_udata *udata, int is_srq)
458 struct ehca_qp *my_qp, *my_srq = NULL;
459 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
460 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
461 ib_device);
462 struct ib_ucontext *context = NULL;
463 u64 h_ret;
464 int is_llqp = 0, has_srq = 0, is_user = 0;
465 int qp_type, max_send_sge, max_recv_sge, ret;
467 /* h_call's out parameters */
468 struct ehca_alloc_qp_parms parms;
469 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
470 unsigned long flags;
472 if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
473 ehca_err(pd->device, "Unable to create QP, max number of %i "
474 "QPs reached.", shca->max_num_qps);
475 ehca_err(pd->device, "To increase the maximum number of QPs "
476 "use the number_of_qps module parameter.\n");
477 return ERR_PTR(-ENOSPC);
480 if (init_attr->create_flags) {
481 atomic_dec(&shca->num_qps);
482 return ERR_PTR(-EINVAL);
485 memset(&parms, 0, sizeof(parms));
486 qp_type = init_attr->qp_type;
488 if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
489 init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
490 ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
491 init_attr->sq_sig_type);
492 atomic_dec(&shca->num_qps);
493 return ERR_PTR(-EINVAL);
496 /* save LLQP info */
497 if (qp_type & 0x80) {
498 is_llqp = 1;
499 parms.ext_type = EQPT_LLQP;
500 parms.ll_comp_flags = qp_type & LLQP_COMP_MASK;
502 qp_type &= 0x1F;
503 init_attr->qp_type &= 0x1F;
505 /* handle SRQ base QPs */
506 if (init_attr->srq) {
507 my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
509 if (qp_type == IB_QPT_UC) {
510 ehca_err(pd->device, "UC with SRQ not supported");
511 atomic_dec(&shca->num_qps);
512 return ERR_PTR(-EINVAL);
515 has_srq = 1;
516 parms.ext_type = EQPT_SRQBASE;
517 parms.srq_qpn = my_srq->real_qp_num;
520 if (is_llqp && has_srq) {
521 ehca_err(pd->device, "LLQPs can't have an SRQ");
522 atomic_dec(&shca->num_qps);
523 return ERR_PTR(-EINVAL);
526 /* handle SRQs */
527 if (is_srq) {
528 parms.ext_type = EQPT_SRQ;
529 parms.srq_limit = srq_init_attr->attr.srq_limit;
530 if (init_attr->cap.max_recv_sge > 3) {
531 ehca_err(pd->device, "no more than three SGEs "
532 "supported for SRQ pd=%p max_sge=%x",
533 pd, init_attr->cap.max_recv_sge);
534 atomic_dec(&shca->num_qps);
535 return ERR_PTR(-EINVAL);
539 /* check QP type */
540 if (qp_type != IB_QPT_UD &&
541 qp_type != IB_QPT_UC &&
542 qp_type != IB_QPT_RC &&
543 qp_type != IB_QPT_SMI &&
544 qp_type != IB_QPT_GSI) {
545 ehca_err(pd->device, "wrong QP Type=%x", qp_type);
546 atomic_dec(&shca->num_qps);
547 return ERR_PTR(-EINVAL);
550 if (is_llqp) {
551 switch (qp_type) {
552 case IB_QPT_RC:
553 if ((init_attr->cap.max_send_wr > 255) ||
554 (init_attr->cap.max_recv_wr > 255)) {
555 ehca_err(pd->device,
556 "Invalid Number of max_sq_wr=%x "
557 "or max_rq_wr=%x for RC LLQP",
558 init_attr->cap.max_send_wr,
559 init_attr->cap.max_recv_wr);
560 atomic_dec(&shca->num_qps);
561 return ERR_PTR(-EINVAL);
563 break;
564 case IB_QPT_UD:
565 if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
566 ehca_err(pd->device, "UD LLQP not supported "
567 "by this adapter");
568 atomic_dec(&shca->num_qps);
569 return ERR_PTR(-ENOSYS);
571 if (!(init_attr->cap.max_send_sge <= 5
572 && init_attr->cap.max_send_sge >= 1
573 && init_attr->cap.max_recv_sge <= 5
574 && init_attr->cap.max_recv_sge >= 1)) {
575 ehca_err(pd->device,
576 "Invalid Number of max_send_sge=%x "
577 "or max_recv_sge=%x for UD LLQP",
578 init_attr->cap.max_send_sge,
579 init_attr->cap.max_recv_sge);
580 atomic_dec(&shca->num_qps);
581 return ERR_PTR(-EINVAL);
582 } else if (init_attr->cap.max_send_wr > 255) {
583 ehca_err(pd->device,
584 "Invalid Number of "
585 "max_send_wr=%x for UD QP_TYPE=%x",
586 init_attr->cap.max_send_wr, qp_type);
587 atomic_dec(&shca->num_qps);
588 return ERR_PTR(-EINVAL);
590 break;
591 default:
592 ehca_err(pd->device, "unsupported LL QP Type=%x",
593 qp_type);
594 atomic_dec(&shca->num_qps);
595 return ERR_PTR(-EINVAL);
597 } else {
598 int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI
599 || qp_type == IB_QPT_GSI) ? 250 : 252;
601 if (init_attr->cap.max_send_sge > max_sge
602 || init_attr->cap.max_recv_sge > max_sge) {
603 ehca_err(pd->device, "Invalid number of SGEs requested "
604 "send_sge=%x recv_sge=%x max_sge=%x",
605 init_attr->cap.max_send_sge,
606 init_attr->cap.max_recv_sge, max_sge);
607 atomic_dec(&shca->num_qps);
608 return ERR_PTR(-EINVAL);
612 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
613 if (!my_qp) {
614 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
615 atomic_dec(&shca->num_qps);
616 return ERR_PTR(-ENOMEM);
619 if (pd->uobject && udata) {
620 is_user = 1;
621 context = pd->uobject->context;
624 atomic_set(&my_qp->nr_events, 0);
625 init_waitqueue_head(&my_qp->wait_completion);
626 spin_lock_init(&my_qp->spinlock_s);
627 spin_lock_init(&my_qp->spinlock_r);
628 my_qp->qp_type = qp_type;
629 my_qp->ext_type = parms.ext_type;
630 my_qp->state = IB_QPS_RESET;
632 if (init_attr->recv_cq)
633 my_qp->recv_cq =
634 container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
635 if (init_attr->send_cq)
636 my_qp->send_cq =
637 container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
639 idr_preload(GFP_KERNEL);
640 write_lock_irqsave(&ehca_qp_idr_lock, flags);
642 ret = idr_alloc(&ehca_qp_idr, my_qp, 0, 0x2000000, GFP_NOWAIT);
643 if (ret >= 0)
644 my_qp->token = ret;
646 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
647 idr_preload_end();
648 if (ret < 0) {
649 if (ret == -ENOSPC) {
650 ret = -EINVAL;
651 ehca_err(pd->device, "Invalid number of qp");
652 } else {
653 ret = -ENOMEM;
654 ehca_err(pd->device, "Can't allocate new idr entry.");
656 goto create_qp_exit0;
659 if (has_srq)
660 parms.srq_token = my_qp->token;
662 parms.servicetype = ibqptype2servicetype(qp_type);
663 if (parms.servicetype < 0) {
664 ret = -EINVAL;
665 ehca_err(pd->device, "Invalid qp_type=%x", qp_type);
666 goto create_qp_exit1;
669 /* Always signal by WQE so we can hide circ. WQEs */
670 parms.sigtype = HCALL_SIGT_BY_WQE;
672 /* UD_AV CIRCUMVENTION */
673 max_send_sge = init_attr->cap.max_send_sge;
674 max_recv_sge = init_attr->cap.max_recv_sge;
675 if (parms.servicetype == ST_UD && !is_llqp) {
676 max_send_sge += 2;
677 max_recv_sge += 2;
680 parms.token = my_qp->token;
681 parms.eq_handle = shca->eq.ipz_eq_handle;
682 parms.pd = my_pd->fw_pd;
683 if (my_qp->send_cq)
684 parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle;
685 if (my_qp->recv_cq)
686 parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle;
688 parms.squeue.max_wr = init_attr->cap.max_send_wr;
689 parms.rqueue.max_wr = init_attr->cap.max_recv_wr;
690 parms.squeue.max_sge = max_send_sge;
691 parms.rqueue.max_sge = max_recv_sge;
693 /* RC QPs need one more SWQE for unsolicited ack circumvention */
694 if (qp_type == IB_QPT_RC)
695 parms.squeue.max_wr++;
697 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
698 if (HAS_SQ(my_qp))
699 ehca_determine_small_queue(
700 &parms.squeue, max_send_sge, is_llqp);
701 if (HAS_RQ(my_qp))
702 ehca_determine_small_queue(
703 &parms.rqueue, max_recv_sge, is_llqp);
704 parms.qp_storage =
705 (parms.squeue.is_small || parms.rqueue.is_small);
708 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
709 if (h_ret != H_SUCCESS) {
710 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
711 h_ret);
712 ret = ehca2ib_return_code(h_ret);
713 goto create_qp_exit1;
716 ib_qp_num = my_qp->real_qp_num = parms.real_qp_num;
717 my_qp->ipz_qp_handle = parms.qp_handle;
718 my_qp->galpas = parms.galpas;
720 swqe_size = ehca_calc_wqe_size(parms.squeue.act_nr_sges, is_llqp);
721 rwqe_size = ehca_calc_wqe_size(parms.rqueue.act_nr_sges, is_llqp);
723 switch (qp_type) {
724 case IB_QPT_RC:
725 if (is_llqp) {
726 parms.squeue.act_nr_sges = 1;
727 parms.rqueue.act_nr_sges = 1;
729 /* hide the extra WQE */
730 parms.squeue.act_nr_wqes--;
731 break;
732 case IB_QPT_UD:
733 case IB_QPT_GSI:
734 case IB_QPT_SMI:
735 /* UD circumvention */
736 if (is_llqp) {
737 parms.squeue.act_nr_sges = 1;
738 parms.rqueue.act_nr_sges = 1;
739 } else {
740 parms.squeue.act_nr_sges -= 2;
741 parms.rqueue.act_nr_sges -= 2;
744 if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
745 parms.squeue.act_nr_wqes = init_attr->cap.max_send_wr;
746 parms.rqueue.act_nr_wqes = init_attr->cap.max_recv_wr;
747 parms.squeue.act_nr_sges = init_attr->cap.max_send_sge;
748 parms.rqueue.act_nr_sges = init_attr->cap.max_recv_sge;
749 ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1;
752 break;
754 default:
755 break;
758 /* initialize r/squeue and register queue pages */
759 if (HAS_SQ(my_qp)) {
760 ret = init_qp_queue(
761 shca, my_pd, my_qp, &my_qp->ipz_squeue, 0,
762 HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS,
763 &parms.squeue, swqe_size);
764 if (ret) {
765 ehca_err(pd->device, "Couldn't initialize squeue "
766 "and pages ret=%i", ret);
767 goto create_qp_exit2;
770 if (!is_user) {
771 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
772 my_qp->ipz_squeue.qe_size;
773 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
774 sizeof(struct ehca_qmap_entry));
775 if (!my_qp->sq_map.map) {
776 ehca_err(pd->device, "Couldn't allocate squeue "
777 "map ret=%i", ret);
778 goto create_qp_exit3;
780 INIT_LIST_HEAD(&my_qp->sq_err_node);
781 /* to avoid the generation of bogus flush CQEs */
782 reset_queue_map(&my_qp->sq_map);
786 if (HAS_RQ(my_qp)) {
787 ret = init_qp_queue(
788 shca, my_pd, my_qp, &my_qp->ipz_rqueue, 1,
789 H_SUCCESS, &parms.rqueue, rwqe_size);
790 if (ret) {
791 ehca_err(pd->device, "Couldn't initialize rqueue "
792 "and pages ret=%i", ret);
793 goto create_qp_exit4;
795 if (!is_user) {
796 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
797 my_qp->ipz_rqueue.qe_size;
798 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
799 sizeof(struct ehca_qmap_entry));
800 if (!my_qp->rq_map.map) {
801 ehca_err(pd->device, "Couldn't allocate squeue "
802 "map ret=%i", ret);
803 goto create_qp_exit5;
805 INIT_LIST_HEAD(&my_qp->rq_err_node);
806 /* to avoid the generation of bogus flush CQEs */
807 reset_queue_map(&my_qp->rq_map);
809 } else if (init_attr->srq && !is_user) {
810 /* this is a base QP, use the queue map of the SRQ */
811 my_qp->rq_map = my_srq->rq_map;
812 INIT_LIST_HEAD(&my_qp->rq_err_node);
814 my_qp->ipz_rqueue = my_srq->ipz_rqueue;
817 if (is_srq) {
818 my_qp->ib_srq.pd = &my_pd->ib_pd;
819 my_qp->ib_srq.device = my_pd->ib_pd.device;
821 my_qp->ib_srq.srq_context = init_attr->qp_context;
822 my_qp->ib_srq.event_handler = init_attr->event_handler;
823 } else {
824 my_qp->ib_qp.qp_num = ib_qp_num;
825 my_qp->ib_qp.pd = &my_pd->ib_pd;
826 my_qp->ib_qp.device = my_pd->ib_pd.device;
828 my_qp->ib_qp.recv_cq = init_attr->recv_cq;
829 my_qp->ib_qp.send_cq = init_attr->send_cq;
831 my_qp->ib_qp.qp_type = qp_type;
832 my_qp->ib_qp.srq = init_attr->srq;
834 my_qp->ib_qp.qp_context = init_attr->qp_context;
835 my_qp->ib_qp.event_handler = init_attr->event_handler;
838 init_attr->cap.max_inline_data = 0; /* not supported yet */
839 init_attr->cap.max_recv_sge = parms.rqueue.act_nr_sges;
840 init_attr->cap.max_recv_wr = parms.rqueue.act_nr_wqes;
841 init_attr->cap.max_send_sge = parms.squeue.act_nr_sges;
842 init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
843 my_qp->init_attr = *init_attr;
845 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
846 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
847 &my_qp->ib_qp;
848 if (ehca_nr_ports < 0) {
849 /* alloc array to cache subsequent modify qp parms
850 * for autodetect mode
852 my_qp->mod_qp_parm =
853 kzalloc(EHCA_MOD_QP_PARM_MAX *
854 sizeof(*my_qp->mod_qp_parm),
855 GFP_KERNEL);
856 if (!my_qp->mod_qp_parm) {
857 ehca_err(pd->device,
858 "Could not alloc mod_qp_parm");
859 goto create_qp_exit5;
864 /* NOTE: define_apq0() not supported yet */
865 if (qp_type == IB_QPT_GSI) {
866 h_ret = ehca_define_sqp(shca, my_qp, init_attr);
867 if (h_ret != H_SUCCESS) {
868 kfree(my_qp->mod_qp_parm);
869 my_qp->mod_qp_parm = NULL;
870 /* the QP pointer is no longer valid */
871 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
872 NULL;
873 ret = ehca2ib_return_code(h_ret);
874 goto create_qp_exit6;
878 if (my_qp->send_cq) {
879 ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
880 if (ret) {
881 ehca_err(pd->device,
882 "Couldn't assign qp to send_cq ret=%i", ret);
883 goto create_qp_exit7;
887 /* copy queues, galpa data to user space */
888 if (context && udata) {
889 struct ehca_create_qp_resp resp;
890 memset(&resp, 0, sizeof(resp));
892 resp.qp_num = my_qp->real_qp_num;
893 resp.token = my_qp->token;
894 resp.qp_type = my_qp->qp_type;
895 resp.ext_type = my_qp->ext_type;
896 resp.qkey = my_qp->qkey;
897 resp.real_qp_num = my_qp->real_qp_num;
899 if (HAS_SQ(my_qp))
900 queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue);
901 if (HAS_RQ(my_qp))
902 queue2resp(&resp.ipz_rqueue, &my_qp->ipz_rqueue);
903 resp.fw_handle_ofs = (u32)
904 (my_qp->galpas.user.fw_handle & (PAGE_SIZE - 1));
906 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
907 ehca_err(pd->device, "Copy to udata failed");
908 ret = -EINVAL;
909 goto create_qp_exit8;
913 return my_qp;
915 create_qp_exit8:
916 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
918 create_qp_exit7:
919 kfree(my_qp->mod_qp_parm);
921 create_qp_exit6:
922 if (HAS_RQ(my_qp) && !is_user)
923 vfree(my_qp->rq_map.map);
925 create_qp_exit5:
926 if (HAS_RQ(my_qp))
927 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
929 create_qp_exit4:
930 if (HAS_SQ(my_qp) && !is_user)
931 vfree(my_qp->sq_map.map);
933 create_qp_exit3:
934 if (HAS_SQ(my_qp))
935 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
937 create_qp_exit2:
938 hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
940 create_qp_exit1:
941 write_lock_irqsave(&ehca_qp_idr_lock, flags);
942 idr_remove(&ehca_qp_idr, my_qp->token);
943 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
945 create_qp_exit0:
946 kmem_cache_free(qp_cache, my_qp);
947 atomic_dec(&shca->num_qps);
948 return ERR_PTR(ret);
951 struct ib_qp *ehca_create_qp(struct ib_pd *pd,
952 struct ib_qp_init_attr *qp_init_attr,
953 struct ib_udata *udata)
955 struct ehca_qp *ret;
957 ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0);
958 return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp;
961 static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
962 struct ib_uobject *uobject);
964 struct ib_srq *ehca_create_srq(struct ib_pd *pd,
965 struct ib_srq_init_attr *srq_init_attr,
966 struct ib_udata *udata)
968 struct ib_qp_init_attr qp_init_attr;
969 struct ehca_qp *my_qp;
970 struct ib_srq *ret;
971 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
972 ib_device);
973 struct hcp_modify_qp_control_block *mqpcb;
974 u64 hret, update_mask;
976 if (srq_init_attr->srq_type != IB_SRQT_BASIC)
977 return ERR_PTR(-ENOSYS);
979 /* For common attributes, internal_create_qp() takes its info
980 * out of qp_init_attr, so copy all common attrs there.
982 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
983 qp_init_attr.event_handler = srq_init_attr->event_handler;
984 qp_init_attr.qp_context = srq_init_attr->srq_context;
985 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
986 qp_init_attr.qp_type = IB_QPT_RC;
987 qp_init_attr.cap.max_recv_wr = srq_init_attr->attr.max_wr;
988 qp_init_attr.cap.max_recv_sge = srq_init_attr->attr.max_sge;
990 my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
991 if (IS_ERR(my_qp))
992 return (struct ib_srq *)my_qp;
994 /* copy back return values */
995 srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr;
996 srq_init_attr->attr.max_sge = 3;
998 /* drive SRQ into RTR state */
999 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1000 if (!mqpcb) {
1001 ehca_err(pd->device, "Could not get zeroed page for mqpcb "
1002 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
1003 ret = ERR_PTR(-ENOMEM);
1004 goto create_srq1;
1007 mqpcb->qp_state = EHCA_QPS_INIT;
1008 mqpcb->prim_phys_port = 1;
1009 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1010 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1011 my_qp->ipz_qp_handle,
1012 &my_qp->pf,
1013 update_mask,
1014 mqpcb, my_qp->galpas.kernel);
1015 if (hret != H_SUCCESS) {
1016 ehca_err(pd->device, "Could not modify SRQ to INIT "
1017 "ehca_qp=%p qp_num=%x h_ret=%lli",
1018 my_qp, my_qp->real_qp_num, hret);
1019 goto create_srq2;
1022 mqpcb->qp_enable = 1;
1023 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
1024 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1025 my_qp->ipz_qp_handle,
1026 &my_qp->pf,
1027 update_mask,
1028 mqpcb, my_qp->galpas.kernel);
1029 if (hret != H_SUCCESS) {
1030 ehca_err(pd->device, "Could not enable SRQ "
1031 "ehca_qp=%p qp_num=%x h_ret=%lli",
1032 my_qp, my_qp->real_qp_num, hret);
1033 goto create_srq2;
1036 mqpcb->qp_state = EHCA_QPS_RTR;
1037 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1038 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1039 my_qp->ipz_qp_handle,
1040 &my_qp->pf,
1041 update_mask,
1042 mqpcb, my_qp->galpas.kernel);
1043 if (hret != H_SUCCESS) {
1044 ehca_err(pd->device, "Could not modify SRQ to RTR "
1045 "ehca_qp=%p qp_num=%x h_ret=%lli",
1046 my_qp, my_qp->real_qp_num, hret);
1047 goto create_srq2;
1050 ehca_free_fw_ctrlblock(mqpcb);
1052 return &my_qp->ib_srq;
1054 create_srq2:
1055 ret = ERR_PTR(ehca2ib_return_code(hret));
1056 ehca_free_fw_ctrlblock(mqpcb);
1058 create_srq1:
1059 internal_destroy_qp(pd->device, my_qp, my_qp->ib_srq.uobject);
1061 return ret;
1065 * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
1066 * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
1067 * returns total number of bad wqes in bad_wqe_cnt
1069 static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
1070 int *bad_wqe_cnt)
1072 u64 h_ret;
1073 struct ipz_queue *squeue;
1074 void *bad_send_wqe_p, *bad_send_wqe_v;
1075 u64 q_ofs;
1076 struct ehca_wqe *wqe;
1077 int qp_num = my_qp->ib_qp.qp_num;
1079 /* get send wqe pointer */
1080 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
1081 my_qp->ipz_qp_handle, &my_qp->pf,
1082 &bad_send_wqe_p, NULL, 2);
1083 if (h_ret != H_SUCCESS) {
1084 ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
1085 " ehca_qp=%p qp_num=%x h_ret=%lli",
1086 my_qp, qp_num, h_ret);
1087 return ehca2ib_return_code(h_ret);
1089 bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63)));
1090 ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
1091 qp_num, bad_send_wqe_p);
1092 /* convert wqe pointer to vadr */
1093 bad_send_wqe_v = __va((u64)bad_send_wqe_p);
1094 if (ehca_debug_level >= 2)
1095 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
1096 squeue = &my_qp->ipz_squeue;
1097 if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
1098 ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x"
1099 " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
1100 return -EFAULT;
1103 /* loop sets wqe's purge bit */
1104 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
1105 *bad_wqe_cnt = 0;
1106 while (wqe->optype != 0xff && wqe->wqef != 0xff) {
1107 if (ehca_debug_level >= 2)
1108 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
1109 wqe->nr_of_data_seg = 0; /* suppress data access */
1110 wqe->wqef = WQEF_PURGE; /* WQE to be purged */
1111 q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
1112 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
1113 *bad_wqe_cnt = (*bad_wqe_cnt)+1;
1116 * bad wqe will be reprocessed and ignored when pol_cq() is called,
1117 * i.e. nr of wqes with flush error status is one less
1119 ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
1120 qp_num, (*bad_wqe_cnt)-1);
1121 wqe->wqef = 0;
1123 return 0;
1126 static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1127 struct ehca_queue_map *qmap)
1129 void *wqe_v;
1130 u64 q_ofs;
1131 u32 wqe_idx;
1132 unsigned int tail_idx;
1134 /* convert real to abs address */
1135 wqe_p = wqe_p & (~(1UL << 63));
1137 wqe_v = __va(wqe_p);
1139 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
1140 ehca_gen_err("Invalid offset for calculating left cqes "
1141 "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
1142 return -EFAULT;
1145 tail_idx = next_index(qmap->tail, qmap->entries);
1146 wqe_idx = q_ofs / ipz_queue->qe_size;
1148 /* check all processed wqes, whether a cqe is requested or not */
1149 while (tail_idx != wqe_idx) {
1150 if (qmap->map[tail_idx].cqe_req)
1151 qmap->left_to_poll++;
1152 tail_idx = next_index(tail_idx, qmap->entries);
1154 /* save index in queue, where we have to start flushing */
1155 qmap->next_wqe_idx = wqe_idx;
1156 return 0;
1159 static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
1161 u64 h_ret;
1162 void *send_wqe_p, *recv_wqe_p;
1163 int ret;
1164 unsigned long flags;
1165 int qp_num = my_qp->ib_qp.qp_num;
1167 /* this hcall is not supported on base QPs */
1168 if (my_qp->ext_type != EQPT_SRQBASE) {
1169 /* get send and receive wqe pointer */
1170 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
1171 my_qp->ipz_qp_handle, &my_qp->pf,
1172 &send_wqe_p, &recv_wqe_p, 4);
1173 if (h_ret != H_SUCCESS) {
1174 ehca_err(&shca->ib_device, "disable_and_get_wqe() "
1175 "failed ehca_qp=%p qp_num=%x h_ret=%lli",
1176 my_qp, qp_num, h_ret);
1177 return ehca2ib_return_code(h_ret);
1181 * acquire lock to ensure that nobody is polling the cq which
1182 * could mean that the qmap->tail pointer is in an
1183 * inconsistent state.
1185 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1186 ret = calc_left_cqes((u64)send_wqe_p, &my_qp->ipz_squeue,
1187 &my_qp->sq_map);
1188 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1189 if (ret)
1190 return ret;
1193 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1194 ret = calc_left_cqes((u64)recv_wqe_p, &my_qp->ipz_rqueue,
1195 &my_qp->rq_map);
1196 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1197 if (ret)
1198 return ret;
1199 } else {
1200 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1201 my_qp->sq_map.left_to_poll = 0;
1202 my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
1203 my_qp->sq_map.entries);
1204 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1206 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1207 my_qp->rq_map.left_to_poll = 0;
1208 my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
1209 my_qp->rq_map.entries);
1210 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1213 /* this assures flush cqes being generated only for pending wqes */
1214 if ((my_qp->sq_map.left_to_poll == 0) &&
1215 (my_qp->rq_map.left_to_poll == 0)) {
1216 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1217 ehca_add_to_err_list(my_qp, 1);
1218 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1220 if (HAS_RQ(my_qp)) {
1221 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1222 ehca_add_to_err_list(my_qp, 0);
1223 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock,
1224 flags);
1228 return 0;
1232 * internal_modify_qp with circumvention to handle aqp0 properly
1233 * smi_reset2init indicates if this is an internal reset-to-init-call for
1234 * smi. This flag must always be zero if called from ehca_modify_qp()!
1235 * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
1237 static int internal_modify_qp(struct ib_qp *ibqp,
1238 struct ib_qp_attr *attr,
1239 int attr_mask, int smi_reset2init)
1241 enum ib_qp_state qp_cur_state, qp_new_state;
1242 int cnt, qp_attr_idx, ret = 0;
1243 enum ib_qp_statetrans statetrans;
1244 struct hcp_modify_qp_control_block *mqpcb;
1245 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1246 struct ehca_shca *shca =
1247 container_of(ibqp->pd->device, struct ehca_shca, ib_device);
1248 u64 update_mask;
1249 u64 h_ret;
1250 int bad_wqe_cnt = 0;
1251 int is_user = 0;
1252 int squeue_locked = 0;
1253 unsigned long flags = 0;
1255 /* do query_qp to obtain current attr values */
1256 mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
1257 if (!mqpcb) {
1258 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
1259 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
1260 return -ENOMEM;
1263 h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
1264 my_qp->ipz_qp_handle,
1265 &my_qp->pf,
1266 mqpcb, my_qp->galpas.kernel);
1267 if (h_ret != H_SUCCESS) {
1268 ehca_err(ibqp->device, "hipz_h_query_qp() failed "
1269 "ehca_qp=%p qp_num=%x h_ret=%lli",
1270 my_qp, ibqp->qp_num, h_ret);
1271 ret = ehca2ib_return_code(h_ret);
1272 goto modify_qp_exit1;
1274 if (ibqp->uobject)
1275 is_user = 1;
1277 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
1279 if (qp_cur_state == -EINVAL) { /* invalid qp state */
1280 ret = -EINVAL;
1281 ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
1282 "ehca_qp=%p qp_num=%x",
1283 mqpcb->qp_state, my_qp, ibqp->qp_num);
1284 goto modify_qp_exit1;
1287 * circumvention to set aqp0 initial state to init
1288 * as expected by IB spec
1290 if (smi_reset2init == 0 &&
1291 ibqp->qp_type == IB_QPT_SMI &&
1292 qp_cur_state == IB_QPS_RESET &&
1293 (attr_mask & IB_QP_STATE) &&
1294 attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */
1295 struct ib_qp_attr smiqp_attr = {
1296 .qp_state = IB_QPS_INIT,
1297 .port_num = my_qp->init_attr.port_num,
1298 .pkey_index = 0,
1299 .qkey = 0
1301 int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT |
1302 IB_QP_PKEY_INDEX | IB_QP_QKEY;
1303 int smirc = internal_modify_qp(
1304 ibqp, &smiqp_attr, smiqp_attr_mask, 1);
1305 if (smirc) {
1306 ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
1307 "ehca_modify_qp() rc=%i", smirc);
1308 ret = H_PARAMETER;
1309 goto modify_qp_exit1;
1311 qp_cur_state = IB_QPS_INIT;
1312 ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
1314 /* is transmitted current state equal to "real" current state */
1315 if ((attr_mask & IB_QP_CUR_STATE) &&
1316 qp_cur_state != attr->cur_qp_state) {
1317 ret = -EINVAL;
1318 ehca_err(ibqp->device,
1319 "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
1320 " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
1321 attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num);
1322 goto modify_qp_exit1;
1325 ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x "
1326 "new qp_state=%x attribute_mask=%x",
1327 my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
1329 qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
1330 if (!smi_reset2init &&
1331 !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
1332 attr_mask, IB_LINK_LAYER_UNSPECIFIED)) {
1333 ret = -EINVAL;
1334 ehca_err(ibqp->device,
1335 "Invalid qp transition new_state=%x cur_state=%x "
1336 "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
1337 qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
1338 goto modify_qp_exit1;
1341 mqpcb->qp_state = ib2ehca_qp_state(qp_new_state);
1342 if (mqpcb->qp_state)
1343 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1344 else {
1345 ret = -EINVAL;
1346 ehca_err(ibqp->device, "Invalid new qp state=%x "
1347 "ehca_qp=%p qp_num=%x",
1348 qp_new_state, my_qp, ibqp->qp_num);
1349 goto modify_qp_exit1;
1352 /* retrieve state transition struct to get req and opt attrs */
1353 statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
1354 if (statetrans < 0) {
1355 ret = -EINVAL;
1356 ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x "
1357 "new_qp_state=%x State_xsition=%x ehca_qp=%p "
1358 "qp_num=%x", qp_cur_state, qp_new_state,
1359 statetrans, my_qp, ibqp->qp_num);
1360 goto modify_qp_exit1;
1363 qp_attr_idx = ib2ehcaqptype(ibqp->qp_type);
1365 if (qp_attr_idx < 0) {
1366 ret = qp_attr_idx;
1367 ehca_err(ibqp->device,
1368 "Invalid QP type=%x ehca_qp=%p qp_num=%x",
1369 ibqp->qp_type, my_qp, ibqp->qp_num);
1370 goto modify_qp_exit1;
1373 ehca_dbg(ibqp->device,
1374 "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
1375 my_qp, ibqp->qp_num, statetrans);
1377 /* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set
1378 * in non-LL UD QPs.
1380 if ((my_qp->qp_type == IB_QPT_UD) &&
1381 (my_qp->ext_type != EQPT_LLQP) &&
1382 (statetrans == IB_QPST_INIT2RTR) &&
1383 (shca->hw_level >= 0x22)) {
1384 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
1385 mqpcb->send_grh_flag = 1;
1388 /* sqe -> rts: set purge bit of bad wqe before actual trans */
1389 if ((my_qp->qp_type == IB_QPT_UD ||
1390 my_qp->qp_type == IB_QPT_GSI ||
1391 my_qp->qp_type == IB_QPT_SMI) &&
1392 statetrans == IB_QPST_SQE2RTS) {
1393 /* mark next free wqe if kernel */
1394 if (!ibqp->uobject) {
1395 struct ehca_wqe *wqe;
1396 /* lock send queue */
1397 spin_lock_irqsave(&my_qp->spinlock_s, flags);
1398 squeue_locked = 1;
1399 /* mark next free wqe */
1400 wqe = (struct ehca_wqe *)
1401 ipz_qeit_get(&my_qp->ipz_squeue);
1402 wqe->optype = wqe->wqef = 0xff;
1403 ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
1404 ibqp->qp_num, wqe);
1406 ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
1407 if (ret) {
1408 ehca_err(ibqp->device, "prepare_sqe_rts() failed "
1409 "ehca_qp=%p qp_num=%x ret=%i",
1410 my_qp, ibqp->qp_num, ret);
1411 goto modify_qp_exit2;
1416 * enable RDMA_Atomic_Control if reset->init und reliable con
1417 * this is necessary since gen2 does not provide that flag,
1418 * but pHyp requires it
1420 if (statetrans == IB_QPST_RESET2INIT &&
1421 (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) {
1422 mqpcb->rdma_atomic_ctrl = 3;
1423 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1);
1425 /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
1426 if (statetrans == IB_QPST_INIT2RTR &&
1427 (ibqp->qp_type == IB_QPT_UC) &&
1428 !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) {
1429 mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */
1430 update_mask |=
1431 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
1434 if (attr_mask & IB_QP_PKEY_INDEX) {
1435 if (attr->pkey_index >= 16) {
1436 ret = -EINVAL;
1437 ehca_err(ibqp->device, "Invalid pkey_index=%x. "
1438 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1439 attr->pkey_index, my_qp, ibqp->qp_num);
1440 goto modify_qp_exit2;
1442 mqpcb->prim_p_key_idx = attr->pkey_index;
1443 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
1445 if (attr_mask & IB_QP_PORT) {
1446 struct ehca_sport *sport;
1447 struct ehca_qp *aqp1;
1448 if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
1449 ret = -EINVAL;
1450 ehca_err(ibqp->device, "Invalid port=%x. "
1451 "ehca_qp=%p qp_num=%x num_ports=%x",
1452 attr->port_num, my_qp, ibqp->qp_num,
1453 shca->num_ports);
1454 goto modify_qp_exit2;
1456 sport = &shca->sport[attr->port_num - 1];
1457 if (!sport->ibqp_sqp[IB_QPT_GSI]) {
1458 /* should not occur */
1459 ret = -EFAULT;
1460 ehca_err(ibqp->device, "AQP1 was not created for "
1461 "port=%x", attr->port_num);
1462 goto modify_qp_exit2;
1464 aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
1465 struct ehca_qp, ib_qp);
1466 if (ibqp->qp_type != IB_QPT_GSI &&
1467 ibqp->qp_type != IB_QPT_SMI &&
1468 aqp1->mod_qp_parm) {
1470 * firmware will reject this modify_qp() because
1471 * port is not activated/initialized fully
1473 ret = -EFAULT;
1474 ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
1475 "either port is being activated (try again) "
1476 "or cabling issue", attr->port_num);
1477 goto modify_qp_exit2;
1479 mqpcb->prim_phys_port = attr->port_num;
1480 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
1482 if (attr_mask & IB_QP_QKEY) {
1483 mqpcb->qkey = attr->qkey;
1484 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
1486 if (attr_mask & IB_QP_AV) {
1487 mqpcb->dlid = attr->ah_attr.dlid;
1488 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
1489 mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
1490 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1);
1491 mqpcb->service_level = attr->ah_attr.sl;
1492 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
1494 if (ehca_calc_ipd(shca, mqpcb->prim_phys_port,
1495 attr->ah_attr.static_rate,
1496 &mqpcb->max_static_rate)) {
1497 ret = -EINVAL;
1498 goto modify_qp_exit2;
1500 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
1503 * Always supply the GRH flag, even if it's zero, to give the
1504 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1506 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
1509 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1510 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1512 if (attr->ah_attr.ah_flags == IB_AH_GRH) {
1513 mqpcb->send_grh_flag = 1;
1515 mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
1516 update_mask |=
1517 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
1519 for (cnt = 0; cnt < 16; cnt++)
1520 mqpcb->dest_gid.byte[cnt] =
1521 attr->ah_attr.grh.dgid.raw[cnt];
1523 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1);
1524 mqpcb->flow_label = attr->ah_attr.grh.flow_label;
1525 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1);
1526 mqpcb->hop_limit = attr->ah_attr.grh.hop_limit;
1527 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1);
1528 mqpcb->traffic_class = attr->ah_attr.grh.traffic_class;
1529 update_mask |=
1530 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1);
1534 if (attr_mask & IB_QP_PATH_MTU) {
1535 /* store ld(MTU) */
1536 my_qp->mtu_shift = attr->path_mtu + 7;
1537 mqpcb->path_mtu = attr->path_mtu;
1538 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
1540 if (attr_mask & IB_QP_TIMEOUT) {
1541 mqpcb->timeout = attr->timeout;
1542 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
1544 if (attr_mask & IB_QP_RETRY_CNT) {
1545 mqpcb->retry_count = attr->retry_cnt;
1546 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
1548 if (attr_mask & IB_QP_RNR_RETRY) {
1549 mqpcb->rnr_retry_count = attr->rnr_retry;
1550 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1);
1552 if (attr_mask & IB_QP_RQ_PSN) {
1553 mqpcb->receive_psn = attr->rq_psn;
1554 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
1556 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1557 mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ?
1558 attr->max_dest_rd_atomic : 2;
1559 update_mask |=
1560 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
1562 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1563 mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ?
1564 attr->max_rd_atomic : 2;
1565 update_mask |=
1566 EHCA_BMASK_SET
1567 (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
1569 if (attr_mask & IB_QP_ALT_PATH) {
1570 if (attr->alt_port_num < 1
1571 || attr->alt_port_num > shca->num_ports) {
1572 ret = -EINVAL;
1573 ehca_err(ibqp->device, "Invalid alt_port=%x. "
1574 "ehca_qp=%p qp_num=%x num_ports=%x",
1575 attr->alt_port_num, my_qp, ibqp->qp_num,
1576 shca->num_ports);
1577 goto modify_qp_exit2;
1579 mqpcb->alt_phys_port = attr->alt_port_num;
1581 if (attr->alt_pkey_index >= 16) {
1582 ret = -EINVAL;
1583 ehca_err(ibqp->device, "Invalid alt_pkey_index=%x. "
1584 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1585 attr->pkey_index, my_qp, ibqp->qp_num);
1586 goto modify_qp_exit2;
1588 mqpcb->alt_p_key_idx = attr->alt_pkey_index;
1590 mqpcb->timeout_al = attr->alt_timeout;
1591 mqpcb->dlid_al = attr->alt_ah_attr.dlid;
1592 mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
1593 mqpcb->service_level_al = attr->alt_ah_attr.sl;
1595 if (ehca_calc_ipd(shca, mqpcb->alt_phys_port,
1596 attr->alt_ah_attr.static_rate,
1597 &mqpcb->max_static_rate_al)) {
1598 ret = -EINVAL;
1599 goto modify_qp_exit2;
1602 /* OpenIB doesn't support alternate retry counts - copy them */
1603 mqpcb->retry_count_al = mqpcb->retry_count;
1604 mqpcb->rnr_retry_count_al = mqpcb->rnr_retry_count;
1606 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT, 1)
1607 | EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX, 1)
1608 | EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL, 1)
1609 | EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1)
1610 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1)
1611 | EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1)
1612 | EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1)
1613 | EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL, 1)
1614 | EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL, 1);
1617 * Always supply the GRH flag, even if it's zero, to give the
1618 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1620 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
1623 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1624 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1626 if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
1627 mqpcb->send_grh_flag_al = 1;
1629 for (cnt = 0; cnt < 16; cnt++)
1630 mqpcb->dest_gid_al.byte[cnt] =
1631 attr->alt_ah_attr.grh.dgid.raw[cnt];
1632 mqpcb->source_gid_idx_al =
1633 attr->alt_ah_attr.grh.sgid_index;
1634 mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label;
1635 mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit;
1636 mqpcb->traffic_class_al =
1637 attr->alt_ah_attr.grh.traffic_class;
1639 update_mask |=
1640 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1)
1641 | EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1)
1642 | EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1)
1643 | EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1) |
1644 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
1648 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1649 mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
1650 update_mask |=
1651 EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1);
1654 if (attr_mask & IB_QP_SQ_PSN) {
1655 mqpcb->send_psn = attr->sq_psn;
1656 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
1659 if (attr_mask & IB_QP_DEST_QPN) {
1660 mqpcb->dest_qp_nr = attr->dest_qp_num;
1661 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
1664 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1665 if (attr->path_mig_state != IB_MIG_REARM
1666 && attr->path_mig_state != IB_MIG_MIGRATED) {
1667 ret = -EINVAL;
1668 ehca_err(ibqp->device, "Invalid mig_state=%x",
1669 attr->path_mig_state);
1670 goto modify_qp_exit2;
1672 mqpcb->path_migration_state = attr->path_mig_state + 1;
1673 if (attr->path_mig_state == IB_MIG_REARM)
1674 my_qp->mig_armed = 1;
1675 update_mask |=
1676 EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
1679 if (attr_mask & IB_QP_CAP) {
1680 mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1;
1681 update_mask |=
1682 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1);
1683 mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
1684 update_mask |=
1685 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1);
1686 /* no support for max_send/recv_sge yet */
1689 if (ehca_debug_level >= 2)
1690 ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
1692 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
1693 my_qp->ipz_qp_handle,
1694 &my_qp->pf,
1695 update_mask,
1696 mqpcb, my_qp->galpas.kernel);
1698 if (h_ret != H_SUCCESS) {
1699 ret = ehca2ib_return_code(h_ret);
1700 ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli "
1701 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
1702 goto modify_qp_exit2;
1705 if ((my_qp->qp_type == IB_QPT_UD ||
1706 my_qp->qp_type == IB_QPT_GSI ||
1707 my_qp->qp_type == IB_QPT_SMI) &&
1708 statetrans == IB_QPST_SQE2RTS) {
1709 /* doorbell to reprocessing wqes */
1710 iosync(); /* serialize GAL register access */
1711 hipz_update_sqa(my_qp, bad_wqe_cnt-1);
1712 ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
1715 if (statetrans == IB_QPST_RESET2INIT ||
1716 statetrans == IB_QPST_INIT2INIT) {
1717 mqpcb->qp_enable = 1;
1718 mqpcb->qp_state = EHCA_QPS_INIT;
1719 update_mask = 0;
1720 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
1722 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
1723 my_qp->ipz_qp_handle,
1724 &my_qp->pf,
1725 update_mask,
1726 mqpcb,
1727 my_qp->galpas.kernel);
1729 if (h_ret != H_SUCCESS) {
1730 ret = ehca2ib_return_code(h_ret);
1731 ehca_err(ibqp->device, "ENABLE in context of "
1732 "RESET_2_INIT failed! Maybe you didn't get "
1733 "a LID h_ret=%lli ehca_qp=%p qp_num=%x",
1734 h_ret, my_qp, ibqp->qp_num);
1735 goto modify_qp_exit2;
1738 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
1739 && !is_user) {
1740 ret = check_for_left_cqes(my_qp, shca);
1741 if (ret)
1742 goto modify_qp_exit2;
1745 if (statetrans == IB_QPST_ANY2RESET) {
1746 ipz_qeit_reset(&my_qp->ipz_rqueue);
1747 ipz_qeit_reset(&my_qp->ipz_squeue);
1749 if (qp_cur_state == IB_QPS_ERR && !is_user) {
1750 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
1752 if (HAS_RQ(my_qp))
1753 del_from_err_list(my_qp->recv_cq,
1754 &my_qp->rq_err_node);
1756 if (!is_user)
1757 reset_queue_map(&my_qp->sq_map);
1759 if (HAS_RQ(my_qp) && !is_user)
1760 reset_queue_map(&my_qp->rq_map);
1763 if (attr_mask & IB_QP_QKEY)
1764 my_qp->qkey = attr->qkey;
1766 modify_qp_exit2:
1767 if (squeue_locked) { /* this means: sqe -> rts */
1768 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
1769 my_qp->sqerr_purgeflag = 1;
1772 modify_qp_exit1:
1773 ehca_free_fw_ctrlblock(mqpcb);
1775 return ret;
1778 int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1779 struct ib_udata *udata)
1781 int ret = 0;
1783 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
1784 ib_device);
1785 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1787 /* The if-block below caches qp_attr to be modified for GSI and SMI
1788 * qps during the initialization by ib_mad. When the respective port
1789 * is activated, ie we got an event PORT_ACTIVE, we'll replay the
1790 * cached modify calls sequence, see ehca_recover_sqs() below.
1791 * Why that is required:
1792 * 1) If one port is connected, older code requires that port one
1793 * to be connected and module option nr_ports=1 to be given by
1794 * user, which is very inconvenient for end user.
1795 * 2) Firmware accepts modify_qp() only if respective port has become
1796 * active. Older code had a wait loop of 30sec create_qp()/
1797 * define_aqp1(), which is not appropriate in practice. This
1798 * code now removes that wait loop, see define_aqp1(), and always
1799 * reports all ports to ib_mad resp. users. Only activated ports
1800 * will then usable for the users.
1802 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1803 int port = my_qp->init_attr.port_num;
1804 struct ehca_sport *sport = &shca->sport[port - 1];
1805 unsigned long flags;
1806 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
1807 /* cache qp_attr only during init */
1808 if (my_qp->mod_qp_parm) {
1809 struct ehca_mod_qp_parm *p;
1810 if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
1811 ehca_err(&shca->ib_device,
1812 "mod_qp_parm overflow state=%x port=%x"
1813 " type=%x", attr->qp_state,
1814 my_qp->init_attr.port_num,
1815 ibqp->qp_type);
1816 spin_unlock_irqrestore(&sport->mod_sqp_lock,
1817 flags);
1818 return -EINVAL;
1820 p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
1821 p->mask = attr_mask;
1822 p->attr = *attr;
1823 my_qp->mod_qp_parm_idx++;
1824 ehca_dbg(&shca->ib_device,
1825 "Saved qp_attr for state=%x port=%x type=%x",
1826 attr->qp_state, my_qp->init_attr.port_num,
1827 ibqp->qp_type);
1828 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1829 goto out;
1831 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1834 ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
1836 out:
1837 if ((ret == 0) && (attr_mask & IB_QP_STATE))
1838 my_qp->state = attr->qp_state;
1840 return ret;
1843 void ehca_recover_sqp(struct ib_qp *sqp)
1845 struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
1846 int port = my_sqp->init_attr.port_num;
1847 struct ib_qp_attr attr;
1848 struct ehca_mod_qp_parm *qp_parm;
1849 int i, qp_parm_idx, ret;
1850 unsigned long flags, wr_cnt;
1852 if (!my_sqp->mod_qp_parm)
1853 return;
1854 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
1856 qp_parm = my_sqp->mod_qp_parm;
1857 qp_parm_idx = my_sqp->mod_qp_parm_idx;
1858 for (i = 0; i < qp_parm_idx; i++) {
1859 attr = qp_parm[i].attr;
1860 ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
1861 if (ret) {
1862 ehca_err(sqp->device, "Could not modify SQP port=%x "
1863 "qp_num=%x ret=%x", port, sqp->qp_num, ret);
1864 goto free_qp_parm;
1866 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
1867 port, sqp->qp_num, attr.qp_state);
1870 /* re-trigger posted recv wrs */
1871 wr_cnt = my_sqp->ipz_rqueue.current_q_offset /
1872 my_sqp->ipz_rqueue.qe_size;
1873 if (wr_cnt) {
1874 spin_lock_irqsave(&my_sqp->spinlock_r, flags);
1875 hipz_update_rqa(my_sqp, wr_cnt);
1876 spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
1877 ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
1878 port, sqp->qp_num, wr_cnt);
1881 free_qp_parm:
1882 kfree(qp_parm);
1883 /* this prevents subsequent calls to modify_qp() to cache qp_attr */
1884 my_sqp->mod_qp_parm = NULL;
1887 int ehca_query_qp(struct ib_qp *qp,
1888 struct ib_qp_attr *qp_attr,
1889 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1891 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
1892 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
1893 ib_device);
1894 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
1895 struct hcp_modify_qp_control_block *qpcb;
1896 int cnt, ret = 0;
1897 u64 h_ret;
1899 if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
1900 ehca_err(qp->device, "Invalid attribute mask "
1901 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
1902 my_qp, qp->qp_num, qp_attr_mask);
1903 return -EINVAL;
1906 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1907 if (!qpcb) {
1908 ehca_err(qp->device, "Out of memory for qpcb "
1909 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
1910 return -ENOMEM;
1913 h_ret = hipz_h_query_qp(adapter_handle,
1914 my_qp->ipz_qp_handle,
1915 &my_qp->pf,
1916 qpcb, my_qp->galpas.kernel);
1918 if (h_ret != H_SUCCESS) {
1919 ret = ehca2ib_return_code(h_ret);
1920 ehca_err(qp->device, "hipz_h_query_qp() failed "
1921 "ehca_qp=%p qp_num=%x h_ret=%lli",
1922 my_qp, qp->qp_num, h_ret);
1923 goto query_qp_exit1;
1926 qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
1927 qp_attr->qp_state = qp_attr->cur_qp_state;
1929 if (qp_attr->cur_qp_state == -EINVAL) {
1930 ret = -EINVAL;
1931 ehca_err(qp->device, "Got invalid ehca_qp_state=%x "
1932 "ehca_qp=%p qp_num=%x",
1933 qpcb->qp_state, my_qp, qp->qp_num);
1934 goto query_qp_exit1;
1937 if (qp_attr->qp_state == IB_QPS_SQD)
1938 qp_attr->sq_draining = 1;
1940 qp_attr->qkey = qpcb->qkey;
1941 qp_attr->path_mtu = qpcb->path_mtu;
1942 qp_attr->path_mig_state = qpcb->path_migration_state - 1;
1943 qp_attr->rq_psn = qpcb->receive_psn;
1944 qp_attr->sq_psn = qpcb->send_psn;
1945 qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field;
1946 qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1;
1947 qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1;
1948 /* UD_AV CIRCUMVENTION */
1949 if (my_qp->qp_type == IB_QPT_UD) {
1950 qp_attr->cap.max_send_sge =
1951 qpcb->actual_nr_sges_in_sq_wqe - 2;
1952 qp_attr->cap.max_recv_sge =
1953 qpcb->actual_nr_sges_in_rq_wqe - 2;
1954 } else {
1955 qp_attr->cap.max_send_sge =
1956 qpcb->actual_nr_sges_in_sq_wqe;
1957 qp_attr->cap.max_recv_sge =
1958 qpcb->actual_nr_sges_in_rq_wqe;
1961 qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
1962 qp_attr->dest_qp_num = qpcb->dest_qp_nr;
1964 qp_attr->pkey_index = qpcb->prim_p_key_idx;
1965 qp_attr->port_num = qpcb->prim_phys_port;
1966 qp_attr->timeout = qpcb->timeout;
1967 qp_attr->retry_cnt = qpcb->retry_count;
1968 qp_attr->rnr_retry = qpcb->rnr_retry_count;
1970 qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;
1971 qp_attr->alt_port_num = qpcb->alt_phys_port;
1972 qp_attr->alt_timeout = qpcb->timeout_al;
1974 qp_attr->max_dest_rd_atomic = qpcb->rdma_nr_atomic_resp_res;
1975 qp_attr->max_rd_atomic = qpcb->rdma_atomic_outst_dest_qp;
1977 /* primary av */
1978 qp_attr->ah_attr.sl = qpcb->service_level;
1980 if (qpcb->send_grh_flag) {
1981 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1984 qp_attr->ah_attr.static_rate = qpcb->max_static_rate;
1985 qp_attr->ah_attr.dlid = qpcb->dlid;
1986 qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits;
1987 qp_attr->ah_attr.port_num = qp_attr->port_num;
1989 /* primary GRH */
1990 qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class;
1991 qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit;
1992 qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx;
1993 qp_attr->ah_attr.grh.flow_label = qpcb->flow_label;
1995 for (cnt = 0; cnt < 16; cnt++)
1996 qp_attr->ah_attr.grh.dgid.raw[cnt] =
1997 qpcb->dest_gid.byte[cnt];
1999 /* alternate AV */
2000 qp_attr->alt_ah_attr.sl = qpcb->service_level_al;
2001 if (qpcb->send_grh_flag_al) {
2002 qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH;
2005 qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al;
2006 qp_attr->alt_ah_attr.dlid = qpcb->dlid_al;
2007 qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al;
2009 /* alternate GRH */
2010 qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al;
2011 qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al;
2012 qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al;
2013 qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al;
2015 for (cnt = 0; cnt < 16; cnt++)
2016 qp_attr->alt_ah_attr.grh.dgid.raw[cnt] =
2017 qpcb->dest_gid_al.byte[cnt];
2019 /* return init attributes given in ehca_create_qp */
2020 if (qp_init_attr)
2021 *qp_init_attr = my_qp->init_attr;
2023 if (ehca_debug_level >= 2)
2024 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
2026 query_qp_exit1:
2027 ehca_free_fw_ctrlblock(qpcb);
2029 return ret;
2032 int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
2033 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
2035 struct ehca_qp *my_qp =
2036 container_of(ibsrq, struct ehca_qp, ib_srq);
2037 struct ehca_shca *shca =
2038 container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
2039 struct hcp_modify_qp_control_block *mqpcb;
2040 u64 update_mask;
2041 u64 h_ret;
2042 int ret = 0;
2044 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2045 if (!mqpcb) {
2046 ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
2047 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
2048 return -ENOMEM;
2051 update_mask = 0;
2052 if (attr_mask & IB_SRQ_LIMIT) {
2053 attr_mask &= ~IB_SRQ_LIMIT;
2054 update_mask |=
2055 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
2056 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
2057 mqpcb->curr_srq_limit = attr->srq_limit;
2058 mqpcb->qp_aff_asyn_ev_log_reg =
2059 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
2062 /* by now, all bits in attr_mask should have been cleared */
2063 if (attr_mask) {
2064 ehca_err(ibsrq->device, "invalid attribute mask bits set "
2065 "attr_mask=%x", attr_mask);
2066 ret = -EINVAL;
2067 goto modify_srq_exit0;
2070 if (ehca_debug_level >= 2)
2071 ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
2073 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
2074 NULL, update_mask, mqpcb,
2075 my_qp->galpas.kernel);
2077 if (h_ret != H_SUCCESS) {
2078 ret = ehca2ib_return_code(h_ret);
2079 ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli "
2080 "ehca_qp=%p qp_num=%x",
2081 h_ret, my_qp, my_qp->real_qp_num);
2084 modify_srq_exit0:
2085 ehca_free_fw_ctrlblock(mqpcb);
2087 return ret;
2090 int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
2092 struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
2093 struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
2094 ib_device);
2095 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
2096 struct hcp_modify_qp_control_block *qpcb;
2097 int ret = 0;
2098 u64 h_ret;
2100 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2101 if (!qpcb) {
2102 ehca_err(srq->device, "Out of memory for qpcb "
2103 "ehca_qp=%p qp_num=%x", my_qp, my_qp->real_qp_num);
2104 return -ENOMEM;
2107 h_ret = hipz_h_query_qp(adapter_handle, my_qp->ipz_qp_handle,
2108 NULL, qpcb, my_qp->galpas.kernel);
2110 if (h_ret != H_SUCCESS) {
2111 ret = ehca2ib_return_code(h_ret);
2112 ehca_err(srq->device, "hipz_h_query_qp() failed "
2113 "ehca_qp=%p qp_num=%x h_ret=%lli",
2114 my_qp, my_qp->real_qp_num, h_ret);
2115 goto query_srq_exit1;
2118 srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
2119 srq_attr->max_sge = 3;
2120 srq_attr->srq_limit = qpcb->curr_srq_limit;
2122 if (ehca_debug_level >= 2)
2123 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
2125 query_srq_exit1:
2126 ehca_free_fw_ctrlblock(qpcb);
2128 return ret;
2131 static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2132 struct ib_uobject *uobject)
2134 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
2135 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
2136 ib_pd);
2137 struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
2138 u32 qp_num = my_qp->real_qp_num;
2139 int ret;
2140 u64 h_ret;
2141 u8 port_num;
2142 int is_user = 0;
2143 enum ib_qp_type qp_type;
2144 unsigned long flags;
2146 if (uobject) {
2147 is_user = 1;
2148 if (my_qp->mm_count_galpa ||
2149 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
2150 ehca_err(dev, "Resources still referenced in "
2151 "user space qp_num=%x", qp_num);
2152 return -EINVAL;
2156 if (my_qp->send_cq) {
2157 ret = ehca_cq_unassign_qp(my_qp->send_cq, qp_num);
2158 if (ret) {
2159 ehca_err(dev, "Couldn't unassign qp from "
2160 "send_cq ret=%i qp_num=%x cq_num=%x", ret,
2161 qp_num, my_qp->send_cq->cq_number);
2162 return ret;
2166 write_lock_irqsave(&ehca_qp_idr_lock, flags);
2167 idr_remove(&ehca_qp_idr, my_qp->token);
2168 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
2171 * SRQs will never get into an error list and do not have a recv_cq,
2172 * so we need to skip them here.
2174 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
2175 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
2177 if (HAS_SQ(my_qp) && !is_user)
2178 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
2180 /* now wait until all pending events have completed */
2181 wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
2183 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
2184 if (h_ret != H_SUCCESS) {
2185 ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli "
2186 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
2187 return ehca2ib_return_code(h_ret);
2190 port_num = my_qp->init_attr.port_num;
2191 qp_type = my_qp->init_attr.qp_type;
2193 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
2194 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
2195 kfree(my_qp->mod_qp_parm);
2196 my_qp->mod_qp_parm = NULL;
2197 shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
2198 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
2201 /* no support for IB_QPT_SMI yet */
2202 if (qp_type == IB_QPT_GSI) {
2203 struct ib_event event;
2204 ehca_info(dev, "device %s: port %x is inactive.",
2205 shca->ib_device.name, port_num);
2206 event.device = &shca->ib_device;
2207 event.event = IB_EVENT_PORT_ERR;
2208 event.element.port_num = port_num;
2209 shca->sport[port_num - 1].port_state = IB_PORT_DOWN;
2210 ib_dispatch_event(&event);
2213 if (HAS_RQ(my_qp)) {
2214 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
2215 if (!is_user)
2216 vfree(my_qp->rq_map.map);
2218 if (HAS_SQ(my_qp)) {
2219 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
2220 if (!is_user)
2221 vfree(my_qp->sq_map.map);
2223 kmem_cache_free(qp_cache, my_qp);
2224 atomic_dec(&shca->num_qps);
2225 return 0;
2228 int ehca_destroy_qp(struct ib_qp *qp)
2230 return internal_destroy_qp(qp->device,
2231 container_of(qp, struct ehca_qp, ib_qp),
2232 qp->uobject);
2235 int ehca_destroy_srq(struct ib_srq *srq)
2237 return internal_destroy_qp(srq->device,
2238 container_of(srq, struct ehca_qp, ib_srq),
2239 srq->uobject);
2242 int ehca_init_qp_cache(void)
2244 qp_cache = kmem_cache_create("ehca_cache_qp",
2245 sizeof(struct ehca_qp), 0,
2246 SLAB_HWCACHE_ALIGN,
2247 NULL);
2248 if (!qp_cache)
2249 return -ENOMEM;
2250 return 0;
2253 void ehca_cleanup_qp_cache(void)
2255 if (qp_cache)
2256 kmem_cache_destroy(qp_cache);