OMAPDSS: VENC: fix NULL pointer dereference in DSS2 VENC sysfs debug attr on OMAP4
[zen-stable.git] / drivers / infiniband / hw / ehca / ehca_qp.c
blob964f85520798b81605cf65ea516ef5465e7dc8ad
1 /*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * QP functions
6 * Authors: Joachim Fenkes <fenkes@de.ibm.com>
7 * Stefan Roscher <stefan.roscher@de.ibm.com>
8 * Waleri Fomin <fomin@de.ibm.com>
9 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
10 * Reinhard Ernst <rernst@de.ibm.com>
11 * Heiko J Schick <schickhj@de.ibm.com>
13 * Copyright (c) 2005 IBM Corporation
15 * All rights reserved.
17 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
18 * BSD.
20 * OpenIB BSD License
22 * Redistribution and use in source and binary forms, with or without
23 * modification, are permitted provided that the following conditions are met:
25 * Redistributions of source code must retain the above copyright notice, this
26 * list of conditions and the following disclaimer.
28 * Redistributions in binary form must reproduce the above copyright notice,
29 * this list of conditions and the following disclaimer in the documentation
30 * and/or other materials
31 * provided with the distribution.
33 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
34 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
37 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
38 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
39 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
40 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
41 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
43 * POSSIBILITY OF SUCH DAMAGE.
46 #include <linux/slab.h>
48 #include "ehca_classes.h"
49 #include "ehca_tools.h"
50 #include "ehca_qes.h"
51 #include "ehca_iverbs.h"
52 #include "hcp_if.h"
53 #include "hipz_fns.h"
55 static struct kmem_cache *qp_cache;
58 * attributes not supported by query qp
60 #define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \
61 IB_QP_EN_SQD_ASYNC_NOTIFY)
64 * ehca (internal) qp state values
66 enum ehca_qp_state {
67 EHCA_QPS_RESET = 1,
68 EHCA_QPS_INIT = 2,
69 EHCA_QPS_RTR = 3,
70 EHCA_QPS_RTS = 5,
71 EHCA_QPS_SQD = 6,
72 EHCA_QPS_SQE = 8,
73 EHCA_QPS_ERR = 128
77 * qp state transitions as defined by IB Arch Rel 1.1 page 431
79 enum ib_qp_statetrans {
80 IB_QPST_ANY2RESET,
81 IB_QPST_ANY2ERR,
82 IB_QPST_RESET2INIT,
83 IB_QPST_INIT2RTR,
84 IB_QPST_INIT2INIT,
85 IB_QPST_RTR2RTS,
86 IB_QPST_RTS2SQD,
87 IB_QPST_RTS2RTS,
88 IB_QPST_SQD2RTS,
89 IB_QPST_SQE2RTS,
90 IB_QPST_SQD2SQD,
91 IB_QPST_MAX /* nr of transitions, this must be last!!! */
95 * ib2ehca_qp_state maps IB to ehca qp_state
96 * returns ehca qp state corresponding to given ib qp state
98 static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state)
100 switch (ib_qp_state) {
101 case IB_QPS_RESET:
102 return EHCA_QPS_RESET;
103 case IB_QPS_INIT:
104 return EHCA_QPS_INIT;
105 case IB_QPS_RTR:
106 return EHCA_QPS_RTR;
107 case IB_QPS_RTS:
108 return EHCA_QPS_RTS;
109 case IB_QPS_SQD:
110 return EHCA_QPS_SQD;
111 case IB_QPS_SQE:
112 return EHCA_QPS_SQE;
113 case IB_QPS_ERR:
114 return EHCA_QPS_ERR;
115 default:
116 ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
117 return -EINVAL;
122 * ehca2ib_qp_state maps ehca to IB qp_state
123 * returns ib qp state corresponding to given ehca qp state
125 static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state
126 ehca_qp_state)
128 switch (ehca_qp_state) {
129 case EHCA_QPS_RESET:
130 return IB_QPS_RESET;
131 case EHCA_QPS_INIT:
132 return IB_QPS_INIT;
133 case EHCA_QPS_RTR:
134 return IB_QPS_RTR;
135 case EHCA_QPS_RTS:
136 return IB_QPS_RTS;
137 case EHCA_QPS_SQD:
138 return IB_QPS_SQD;
139 case EHCA_QPS_SQE:
140 return IB_QPS_SQE;
141 case EHCA_QPS_ERR:
142 return IB_QPS_ERR;
143 default:
144 ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
145 return -EINVAL;
150 * ehca_qp_type used as index for req_attr and opt_attr of
151 * struct ehca_modqp_statetrans
153 enum ehca_qp_type {
154 QPT_RC = 0,
155 QPT_UC = 1,
156 QPT_UD = 2,
157 QPT_SQP = 3,
158 QPT_MAX
162 * ib2ehcaqptype maps Ib to ehca qp_type
163 * returns ehca qp type corresponding to ib qp type
165 static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype)
167 switch (ibqptype) {
168 case IB_QPT_SMI:
169 case IB_QPT_GSI:
170 return QPT_SQP;
171 case IB_QPT_RC:
172 return QPT_RC;
173 case IB_QPT_UC:
174 return QPT_UC;
175 case IB_QPT_UD:
176 return QPT_UD;
177 default:
178 ehca_gen_err("Invalid ibqptype=%x", ibqptype);
179 return -EINVAL;
183 static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
184 int ib_tostate)
186 int index = -EINVAL;
187 switch (ib_tostate) {
188 case IB_QPS_RESET:
189 index = IB_QPST_ANY2RESET;
190 break;
191 case IB_QPS_INIT:
192 switch (ib_fromstate) {
193 case IB_QPS_RESET:
194 index = IB_QPST_RESET2INIT;
195 break;
196 case IB_QPS_INIT:
197 index = IB_QPST_INIT2INIT;
198 break;
200 break;
201 case IB_QPS_RTR:
202 if (ib_fromstate == IB_QPS_INIT)
203 index = IB_QPST_INIT2RTR;
204 break;
205 case IB_QPS_RTS:
206 switch (ib_fromstate) {
207 case IB_QPS_RTR:
208 index = IB_QPST_RTR2RTS;
209 break;
210 case IB_QPS_RTS:
211 index = IB_QPST_RTS2RTS;
212 break;
213 case IB_QPS_SQD:
214 index = IB_QPST_SQD2RTS;
215 break;
216 case IB_QPS_SQE:
217 index = IB_QPST_SQE2RTS;
218 break;
220 break;
221 case IB_QPS_SQD:
222 if (ib_fromstate == IB_QPS_RTS)
223 index = IB_QPST_RTS2SQD;
224 break;
225 case IB_QPS_SQE:
226 break;
227 case IB_QPS_ERR:
228 index = IB_QPST_ANY2ERR;
229 break;
230 default:
231 break;
233 return index;
237 * ibqptype2servicetype returns hcp service type corresponding to given
238 * ib qp type used by create_qp()
240 static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
242 switch (ibqptype) {
243 case IB_QPT_SMI:
244 case IB_QPT_GSI:
245 return ST_UD;
246 case IB_QPT_RC:
247 return ST_RC;
248 case IB_QPT_UC:
249 return ST_UC;
250 case IB_QPT_UD:
251 return ST_UD;
252 case IB_QPT_RAW_IPV6:
253 return -EINVAL;
254 case IB_QPT_RAW_ETHERTYPE:
255 return -EINVAL;
256 default:
257 ehca_gen_err("Invalid ibqptype=%x", ibqptype);
258 return -EINVAL;
263 * init userspace queue info from ipz_queue data
265 static inline void queue2resp(struct ipzu_queue_resp *resp,
266 struct ipz_queue *queue)
268 resp->qe_size = queue->qe_size;
269 resp->act_nr_of_sg = queue->act_nr_of_sg;
270 resp->queue_length = queue->queue_length;
271 resp->pagesize = queue->pagesize;
272 resp->toggle_state = queue->toggle_state;
273 resp->offset = queue->offset;
277 * init_qp_queue initializes/constructs r/squeue and registers queue pages.
279 static inline int init_qp_queue(struct ehca_shca *shca,
280 struct ehca_pd *pd,
281 struct ehca_qp *my_qp,
282 struct ipz_queue *queue,
283 int q_type,
284 u64 expected_hret,
285 struct ehca_alloc_queue_parms *parms,
286 int wqe_size)
288 int ret, cnt, ipz_rc, nr_q_pages;
289 void *vpage;
290 u64 rpage, h_ret;
291 struct ib_device *ib_dev = &shca->ib_device;
292 struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
294 if (!parms->queue_size)
295 return 0;
297 if (parms->is_small) {
298 nr_q_pages = 1;
299 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
300 128 << parms->page_size,
301 wqe_size, parms->act_nr_sges, 1);
302 } else {
303 nr_q_pages = parms->queue_size;
304 ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
305 EHCA_PAGESIZE, wqe_size,
306 parms->act_nr_sges, 0);
309 if (!ipz_rc) {
310 ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%i",
311 ipz_rc);
312 return -EBUSY;
315 /* register queue pages */
316 for (cnt = 0; cnt < nr_q_pages; cnt++) {
317 vpage = ipz_qpageit_get_inc(queue);
318 if (!vpage) {
319 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
320 "failed p_vpage= %p", vpage);
321 ret = -EINVAL;
322 goto init_qp_queue1;
324 rpage = virt_to_abs(vpage);
326 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
327 my_qp->ipz_qp_handle,
328 NULL, 0, q_type,
329 rpage, parms->is_small ? 0 : 1,
330 my_qp->galpas.kernel);
331 if (cnt == (nr_q_pages - 1)) { /* last page! */
332 if (h_ret != expected_hret) {
333 ehca_err(ib_dev, "hipz_qp_register_rpage() "
334 "h_ret=%lli", h_ret);
335 ret = ehca2ib_return_code(h_ret);
336 goto init_qp_queue1;
338 vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
339 if (vpage) {
340 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
341 "should not succeed vpage=%p", vpage);
342 ret = -EINVAL;
343 goto init_qp_queue1;
345 } else {
346 if (h_ret != H_PAGE_REGISTERED) {
347 ehca_err(ib_dev, "hipz_qp_register_rpage() "
348 "h_ret=%lli", h_ret);
349 ret = ehca2ib_return_code(h_ret);
350 goto init_qp_queue1;
355 ipz_qeit_reset(queue);
357 return 0;
359 init_qp_queue1:
360 ipz_queue_dtor(pd, queue);
361 return ret;
364 static inline int ehca_calc_wqe_size(int act_nr_sge, int is_llqp)
366 if (is_llqp)
367 return 128 << act_nr_sge;
368 else
369 return offsetof(struct ehca_wqe,
370 u.nud.sg_list[act_nr_sge]);
373 static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
374 int req_nr_sge, int is_llqp)
376 u32 wqe_size, q_size;
377 int act_nr_sge = req_nr_sge;
379 if (!is_llqp)
380 /* round up #SGEs so WQE size is a power of 2 */
381 for (act_nr_sge = 4; act_nr_sge <= 252;
382 act_nr_sge = 4 + 2 * act_nr_sge)
383 if (act_nr_sge >= req_nr_sge)
384 break;
386 wqe_size = ehca_calc_wqe_size(act_nr_sge, is_llqp);
387 q_size = wqe_size * (queue->max_wr + 1);
389 if (q_size <= 512)
390 queue->page_size = 2;
391 else if (q_size <= 1024)
392 queue->page_size = 3;
393 else
394 queue->page_size = 0;
396 queue->is_small = (queue->page_size != 0);
399 /* needs to be called with cq->spinlock held */
400 void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq)
402 struct list_head *list, *node;
404 /* TODO: support low latency QPs */
405 if (qp->ext_type == EQPT_LLQP)
406 return;
408 if (on_sq) {
409 list = &qp->send_cq->sqp_err_list;
410 node = &qp->sq_err_node;
411 } else {
412 list = &qp->recv_cq->rqp_err_list;
413 node = &qp->rq_err_node;
416 if (list_empty(node))
417 list_add_tail(node, list);
419 return;
422 static void del_from_err_list(struct ehca_cq *cq, struct list_head *node)
424 unsigned long flags;
426 spin_lock_irqsave(&cq->spinlock, flags);
428 if (!list_empty(node))
429 list_del_init(node);
431 spin_unlock_irqrestore(&cq->spinlock, flags);
434 static void reset_queue_map(struct ehca_queue_map *qmap)
436 int i;
438 qmap->tail = qmap->entries - 1;
439 qmap->left_to_poll = 0;
440 qmap->next_wqe_idx = 0;
441 for (i = 0; i < qmap->entries; i++) {
442 qmap->map[i].reported = 1;
443 qmap->map[i].cqe_req = 0;
448 * Create an ib_qp struct that is either a QP or an SRQ, depending on
449 * the value of the is_srq parameter. If init_attr and srq_init_attr share
450 * fields, the field out of init_attr is used.
452 static struct ehca_qp *internal_create_qp(
453 struct ib_pd *pd,
454 struct ib_qp_init_attr *init_attr,
455 struct ib_srq_init_attr *srq_init_attr,
456 struct ib_udata *udata, int is_srq)
458 struct ehca_qp *my_qp, *my_srq = NULL;
459 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
460 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
461 ib_device);
462 struct ib_ucontext *context = NULL;
463 u64 h_ret;
464 int is_llqp = 0, has_srq = 0, is_user = 0;
465 int qp_type, max_send_sge, max_recv_sge, ret;
467 /* h_call's out parameters */
468 struct ehca_alloc_qp_parms parms;
469 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
470 unsigned long flags;
472 if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
473 ehca_err(pd->device, "Unable to create QP, max number of %i "
474 "QPs reached.", shca->max_num_qps);
475 ehca_err(pd->device, "To increase the maximum number of QPs "
476 "use the number_of_qps module parameter.\n");
477 return ERR_PTR(-ENOSPC);
480 if (init_attr->create_flags) {
481 atomic_dec(&shca->num_qps);
482 return ERR_PTR(-EINVAL);
485 memset(&parms, 0, sizeof(parms));
486 qp_type = init_attr->qp_type;
488 if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
489 init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
490 ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
491 init_attr->sq_sig_type);
492 atomic_dec(&shca->num_qps);
493 return ERR_PTR(-EINVAL);
496 /* save LLQP info */
497 if (qp_type & 0x80) {
498 is_llqp = 1;
499 parms.ext_type = EQPT_LLQP;
500 parms.ll_comp_flags = qp_type & LLQP_COMP_MASK;
502 qp_type &= 0x1F;
503 init_attr->qp_type &= 0x1F;
505 /* handle SRQ base QPs */
506 if (init_attr->srq) {
507 my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
509 if (qp_type == IB_QPT_UC) {
510 ehca_err(pd->device, "UC with SRQ not supported");
511 atomic_dec(&shca->num_qps);
512 return ERR_PTR(-EINVAL);
515 has_srq = 1;
516 parms.ext_type = EQPT_SRQBASE;
517 parms.srq_qpn = my_srq->real_qp_num;
520 if (is_llqp && has_srq) {
521 ehca_err(pd->device, "LLQPs can't have an SRQ");
522 atomic_dec(&shca->num_qps);
523 return ERR_PTR(-EINVAL);
526 /* handle SRQs */
527 if (is_srq) {
528 parms.ext_type = EQPT_SRQ;
529 parms.srq_limit = srq_init_attr->attr.srq_limit;
530 if (init_attr->cap.max_recv_sge > 3) {
531 ehca_err(pd->device, "no more than three SGEs "
532 "supported for SRQ pd=%p max_sge=%x",
533 pd, init_attr->cap.max_recv_sge);
534 atomic_dec(&shca->num_qps);
535 return ERR_PTR(-EINVAL);
539 /* check QP type */
540 if (qp_type != IB_QPT_UD &&
541 qp_type != IB_QPT_UC &&
542 qp_type != IB_QPT_RC &&
543 qp_type != IB_QPT_SMI &&
544 qp_type != IB_QPT_GSI) {
545 ehca_err(pd->device, "wrong QP Type=%x", qp_type);
546 atomic_dec(&shca->num_qps);
547 return ERR_PTR(-EINVAL);
550 if (is_llqp) {
551 switch (qp_type) {
552 case IB_QPT_RC:
553 if ((init_attr->cap.max_send_wr > 255) ||
554 (init_attr->cap.max_recv_wr > 255)) {
555 ehca_err(pd->device,
556 "Invalid Number of max_sq_wr=%x "
557 "or max_rq_wr=%x for RC LLQP",
558 init_attr->cap.max_send_wr,
559 init_attr->cap.max_recv_wr);
560 atomic_dec(&shca->num_qps);
561 return ERR_PTR(-EINVAL);
563 break;
564 case IB_QPT_UD:
565 if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
566 ehca_err(pd->device, "UD LLQP not supported "
567 "by this adapter");
568 atomic_dec(&shca->num_qps);
569 return ERR_PTR(-ENOSYS);
571 if (!(init_attr->cap.max_send_sge <= 5
572 && init_attr->cap.max_send_sge >= 1
573 && init_attr->cap.max_recv_sge <= 5
574 && init_attr->cap.max_recv_sge >= 1)) {
575 ehca_err(pd->device,
576 "Invalid Number of max_send_sge=%x "
577 "or max_recv_sge=%x for UD LLQP",
578 init_attr->cap.max_send_sge,
579 init_attr->cap.max_recv_sge);
580 atomic_dec(&shca->num_qps);
581 return ERR_PTR(-EINVAL);
582 } else if (init_attr->cap.max_send_wr > 255) {
583 ehca_err(pd->device,
584 "Invalid Number of "
585 "max_send_wr=%x for UD QP_TYPE=%x",
586 init_attr->cap.max_send_wr, qp_type);
587 atomic_dec(&shca->num_qps);
588 return ERR_PTR(-EINVAL);
590 break;
591 default:
592 ehca_err(pd->device, "unsupported LL QP Type=%x",
593 qp_type);
594 atomic_dec(&shca->num_qps);
595 return ERR_PTR(-EINVAL);
597 } else {
598 int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI
599 || qp_type == IB_QPT_GSI) ? 250 : 252;
601 if (init_attr->cap.max_send_sge > max_sge
602 || init_attr->cap.max_recv_sge > max_sge) {
603 ehca_err(pd->device, "Invalid number of SGEs requested "
604 "send_sge=%x recv_sge=%x max_sge=%x",
605 init_attr->cap.max_send_sge,
606 init_attr->cap.max_recv_sge, max_sge);
607 atomic_dec(&shca->num_qps);
608 return ERR_PTR(-EINVAL);
612 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
613 if (!my_qp) {
614 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
615 atomic_dec(&shca->num_qps);
616 return ERR_PTR(-ENOMEM);
619 if (pd->uobject && udata) {
620 is_user = 1;
621 context = pd->uobject->context;
624 atomic_set(&my_qp->nr_events, 0);
625 init_waitqueue_head(&my_qp->wait_completion);
626 spin_lock_init(&my_qp->spinlock_s);
627 spin_lock_init(&my_qp->spinlock_r);
628 my_qp->qp_type = qp_type;
629 my_qp->ext_type = parms.ext_type;
630 my_qp->state = IB_QPS_RESET;
632 if (init_attr->recv_cq)
633 my_qp->recv_cq =
634 container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
635 if (init_attr->send_cq)
636 my_qp->send_cq =
637 container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
639 do {
640 if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
641 ret = -ENOMEM;
642 ehca_err(pd->device, "Can't reserve idr resources.");
643 goto create_qp_exit0;
646 write_lock_irqsave(&ehca_qp_idr_lock, flags);
647 ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token);
648 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
649 } while (ret == -EAGAIN);
651 if (ret) {
652 ret = -ENOMEM;
653 ehca_err(pd->device, "Can't allocate new idr entry.");
654 goto create_qp_exit0;
657 if (my_qp->token > 0x1FFFFFF) {
658 ret = -EINVAL;
659 ehca_err(pd->device, "Invalid number of qp");
660 goto create_qp_exit1;
663 if (has_srq)
664 parms.srq_token = my_qp->token;
666 parms.servicetype = ibqptype2servicetype(qp_type);
667 if (parms.servicetype < 0) {
668 ret = -EINVAL;
669 ehca_err(pd->device, "Invalid qp_type=%x", qp_type);
670 goto create_qp_exit1;
673 /* Always signal by WQE so we can hide circ. WQEs */
674 parms.sigtype = HCALL_SIGT_BY_WQE;
676 /* UD_AV CIRCUMVENTION */
677 max_send_sge = init_attr->cap.max_send_sge;
678 max_recv_sge = init_attr->cap.max_recv_sge;
679 if (parms.servicetype == ST_UD && !is_llqp) {
680 max_send_sge += 2;
681 max_recv_sge += 2;
684 parms.token = my_qp->token;
685 parms.eq_handle = shca->eq.ipz_eq_handle;
686 parms.pd = my_pd->fw_pd;
687 if (my_qp->send_cq)
688 parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle;
689 if (my_qp->recv_cq)
690 parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle;
692 parms.squeue.max_wr = init_attr->cap.max_send_wr;
693 parms.rqueue.max_wr = init_attr->cap.max_recv_wr;
694 parms.squeue.max_sge = max_send_sge;
695 parms.rqueue.max_sge = max_recv_sge;
697 /* RC QPs need one more SWQE for unsolicited ack circumvention */
698 if (qp_type == IB_QPT_RC)
699 parms.squeue.max_wr++;
701 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
702 if (HAS_SQ(my_qp))
703 ehca_determine_small_queue(
704 &parms.squeue, max_send_sge, is_llqp);
705 if (HAS_RQ(my_qp))
706 ehca_determine_small_queue(
707 &parms.rqueue, max_recv_sge, is_llqp);
708 parms.qp_storage =
709 (parms.squeue.is_small || parms.rqueue.is_small);
712 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
713 if (h_ret != H_SUCCESS) {
714 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
715 h_ret);
716 ret = ehca2ib_return_code(h_ret);
717 goto create_qp_exit1;
720 ib_qp_num = my_qp->real_qp_num = parms.real_qp_num;
721 my_qp->ipz_qp_handle = parms.qp_handle;
722 my_qp->galpas = parms.galpas;
724 swqe_size = ehca_calc_wqe_size(parms.squeue.act_nr_sges, is_llqp);
725 rwqe_size = ehca_calc_wqe_size(parms.rqueue.act_nr_sges, is_llqp);
727 switch (qp_type) {
728 case IB_QPT_RC:
729 if (is_llqp) {
730 parms.squeue.act_nr_sges = 1;
731 parms.rqueue.act_nr_sges = 1;
733 /* hide the extra WQE */
734 parms.squeue.act_nr_wqes--;
735 break;
736 case IB_QPT_UD:
737 case IB_QPT_GSI:
738 case IB_QPT_SMI:
739 /* UD circumvention */
740 if (is_llqp) {
741 parms.squeue.act_nr_sges = 1;
742 parms.rqueue.act_nr_sges = 1;
743 } else {
744 parms.squeue.act_nr_sges -= 2;
745 parms.rqueue.act_nr_sges -= 2;
748 if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
749 parms.squeue.act_nr_wqes = init_attr->cap.max_send_wr;
750 parms.rqueue.act_nr_wqes = init_attr->cap.max_recv_wr;
751 parms.squeue.act_nr_sges = init_attr->cap.max_send_sge;
752 parms.rqueue.act_nr_sges = init_attr->cap.max_recv_sge;
753 ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1;
756 break;
758 default:
759 break;
762 /* initialize r/squeue and register queue pages */
763 if (HAS_SQ(my_qp)) {
764 ret = init_qp_queue(
765 shca, my_pd, my_qp, &my_qp->ipz_squeue, 0,
766 HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS,
767 &parms.squeue, swqe_size);
768 if (ret) {
769 ehca_err(pd->device, "Couldn't initialize squeue "
770 "and pages ret=%i", ret);
771 goto create_qp_exit2;
774 if (!is_user) {
775 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
776 my_qp->ipz_squeue.qe_size;
777 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
778 sizeof(struct ehca_qmap_entry));
779 if (!my_qp->sq_map.map) {
780 ehca_err(pd->device, "Couldn't allocate squeue "
781 "map ret=%i", ret);
782 goto create_qp_exit3;
784 INIT_LIST_HEAD(&my_qp->sq_err_node);
785 /* to avoid the generation of bogus flush CQEs */
786 reset_queue_map(&my_qp->sq_map);
790 if (HAS_RQ(my_qp)) {
791 ret = init_qp_queue(
792 shca, my_pd, my_qp, &my_qp->ipz_rqueue, 1,
793 H_SUCCESS, &parms.rqueue, rwqe_size);
794 if (ret) {
795 ehca_err(pd->device, "Couldn't initialize rqueue "
796 "and pages ret=%i", ret);
797 goto create_qp_exit4;
799 if (!is_user) {
800 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
801 my_qp->ipz_rqueue.qe_size;
802 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
803 sizeof(struct ehca_qmap_entry));
804 if (!my_qp->rq_map.map) {
805 ehca_err(pd->device, "Couldn't allocate squeue "
806 "map ret=%i", ret);
807 goto create_qp_exit5;
809 INIT_LIST_HEAD(&my_qp->rq_err_node);
810 /* to avoid the generation of bogus flush CQEs */
811 reset_queue_map(&my_qp->rq_map);
813 } else if (init_attr->srq && !is_user) {
814 /* this is a base QP, use the queue map of the SRQ */
815 my_qp->rq_map = my_srq->rq_map;
816 INIT_LIST_HEAD(&my_qp->rq_err_node);
818 my_qp->ipz_rqueue = my_srq->ipz_rqueue;
821 if (is_srq) {
822 my_qp->ib_srq.pd = &my_pd->ib_pd;
823 my_qp->ib_srq.device = my_pd->ib_pd.device;
825 my_qp->ib_srq.srq_context = init_attr->qp_context;
826 my_qp->ib_srq.event_handler = init_attr->event_handler;
827 } else {
828 my_qp->ib_qp.qp_num = ib_qp_num;
829 my_qp->ib_qp.pd = &my_pd->ib_pd;
830 my_qp->ib_qp.device = my_pd->ib_pd.device;
832 my_qp->ib_qp.recv_cq = init_attr->recv_cq;
833 my_qp->ib_qp.send_cq = init_attr->send_cq;
835 my_qp->ib_qp.qp_type = qp_type;
836 my_qp->ib_qp.srq = init_attr->srq;
838 my_qp->ib_qp.qp_context = init_attr->qp_context;
839 my_qp->ib_qp.event_handler = init_attr->event_handler;
842 init_attr->cap.max_inline_data = 0; /* not supported yet */
843 init_attr->cap.max_recv_sge = parms.rqueue.act_nr_sges;
844 init_attr->cap.max_recv_wr = parms.rqueue.act_nr_wqes;
845 init_attr->cap.max_send_sge = parms.squeue.act_nr_sges;
846 init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
847 my_qp->init_attr = *init_attr;
849 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
850 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
851 &my_qp->ib_qp;
852 if (ehca_nr_ports < 0) {
853 /* alloc array to cache subsequent modify qp parms
854 * for autodetect mode
856 my_qp->mod_qp_parm =
857 kzalloc(EHCA_MOD_QP_PARM_MAX *
858 sizeof(*my_qp->mod_qp_parm),
859 GFP_KERNEL);
860 if (!my_qp->mod_qp_parm) {
861 ehca_err(pd->device,
862 "Could not alloc mod_qp_parm");
863 goto create_qp_exit5;
868 /* NOTE: define_apq0() not supported yet */
869 if (qp_type == IB_QPT_GSI) {
870 h_ret = ehca_define_sqp(shca, my_qp, init_attr);
871 if (h_ret != H_SUCCESS) {
872 kfree(my_qp->mod_qp_parm);
873 my_qp->mod_qp_parm = NULL;
874 /* the QP pointer is no longer valid */
875 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
876 NULL;
877 ret = ehca2ib_return_code(h_ret);
878 goto create_qp_exit6;
882 if (my_qp->send_cq) {
883 ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
884 if (ret) {
885 ehca_err(pd->device,
886 "Couldn't assign qp to send_cq ret=%i", ret);
887 goto create_qp_exit7;
891 /* copy queues, galpa data to user space */
892 if (context && udata) {
893 struct ehca_create_qp_resp resp;
894 memset(&resp, 0, sizeof(resp));
896 resp.qp_num = my_qp->real_qp_num;
897 resp.token = my_qp->token;
898 resp.qp_type = my_qp->qp_type;
899 resp.ext_type = my_qp->ext_type;
900 resp.qkey = my_qp->qkey;
901 resp.real_qp_num = my_qp->real_qp_num;
903 if (HAS_SQ(my_qp))
904 queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue);
905 if (HAS_RQ(my_qp))
906 queue2resp(&resp.ipz_rqueue, &my_qp->ipz_rqueue);
907 resp.fw_handle_ofs = (u32)
908 (my_qp->galpas.user.fw_handle & (PAGE_SIZE - 1));
910 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
911 ehca_err(pd->device, "Copy to udata failed");
912 ret = -EINVAL;
913 goto create_qp_exit8;
917 return my_qp;
919 create_qp_exit8:
920 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
922 create_qp_exit7:
923 kfree(my_qp->mod_qp_parm);
925 create_qp_exit6:
926 if (HAS_RQ(my_qp) && !is_user)
927 vfree(my_qp->rq_map.map);
929 create_qp_exit5:
930 if (HAS_RQ(my_qp))
931 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
933 create_qp_exit4:
934 if (HAS_SQ(my_qp) && !is_user)
935 vfree(my_qp->sq_map.map);
937 create_qp_exit3:
938 if (HAS_SQ(my_qp))
939 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
941 create_qp_exit2:
942 hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
944 create_qp_exit1:
945 write_lock_irqsave(&ehca_qp_idr_lock, flags);
946 idr_remove(&ehca_qp_idr, my_qp->token);
947 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
949 create_qp_exit0:
950 kmem_cache_free(qp_cache, my_qp);
951 atomic_dec(&shca->num_qps);
952 return ERR_PTR(ret);
955 struct ib_qp *ehca_create_qp(struct ib_pd *pd,
956 struct ib_qp_init_attr *qp_init_attr,
957 struct ib_udata *udata)
959 struct ehca_qp *ret;
961 ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0);
962 return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp;
965 static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
966 struct ib_uobject *uobject);
968 struct ib_srq *ehca_create_srq(struct ib_pd *pd,
969 struct ib_srq_init_attr *srq_init_attr,
970 struct ib_udata *udata)
972 struct ib_qp_init_attr qp_init_attr;
973 struct ehca_qp *my_qp;
974 struct ib_srq *ret;
975 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
976 ib_device);
977 struct hcp_modify_qp_control_block *mqpcb;
978 u64 hret, update_mask;
980 if (srq_init_attr->srq_type != IB_SRQT_BASIC)
981 return ERR_PTR(-ENOSYS);
983 /* For common attributes, internal_create_qp() takes its info
984 * out of qp_init_attr, so copy all common attrs there.
986 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
987 qp_init_attr.event_handler = srq_init_attr->event_handler;
988 qp_init_attr.qp_context = srq_init_attr->srq_context;
989 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
990 qp_init_attr.qp_type = IB_QPT_RC;
991 qp_init_attr.cap.max_recv_wr = srq_init_attr->attr.max_wr;
992 qp_init_attr.cap.max_recv_sge = srq_init_attr->attr.max_sge;
994 my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
995 if (IS_ERR(my_qp))
996 return (struct ib_srq *)my_qp;
998 /* copy back return values */
999 srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr;
1000 srq_init_attr->attr.max_sge = 3;
1002 /* drive SRQ into RTR state */
1003 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1004 if (!mqpcb) {
1005 ehca_err(pd->device, "Could not get zeroed page for mqpcb "
1006 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
1007 ret = ERR_PTR(-ENOMEM);
1008 goto create_srq1;
1011 mqpcb->qp_state = EHCA_QPS_INIT;
1012 mqpcb->prim_phys_port = 1;
1013 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1014 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1015 my_qp->ipz_qp_handle,
1016 &my_qp->pf,
1017 update_mask,
1018 mqpcb, my_qp->galpas.kernel);
1019 if (hret != H_SUCCESS) {
1020 ehca_err(pd->device, "Could not modify SRQ to INIT "
1021 "ehca_qp=%p qp_num=%x h_ret=%lli",
1022 my_qp, my_qp->real_qp_num, hret);
1023 goto create_srq2;
1026 mqpcb->qp_enable = 1;
1027 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
1028 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1029 my_qp->ipz_qp_handle,
1030 &my_qp->pf,
1031 update_mask,
1032 mqpcb, my_qp->galpas.kernel);
1033 if (hret != H_SUCCESS) {
1034 ehca_err(pd->device, "Could not enable SRQ "
1035 "ehca_qp=%p qp_num=%x h_ret=%lli",
1036 my_qp, my_qp->real_qp_num, hret);
1037 goto create_srq2;
1040 mqpcb->qp_state = EHCA_QPS_RTR;
1041 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1042 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
1043 my_qp->ipz_qp_handle,
1044 &my_qp->pf,
1045 update_mask,
1046 mqpcb, my_qp->galpas.kernel);
1047 if (hret != H_SUCCESS) {
1048 ehca_err(pd->device, "Could not modify SRQ to RTR "
1049 "ehca_qp=%p qp_num=%x h_ret=%lli",
1050 my_qp, my_qp->real_qp_num, hret);
1051 goto create_srq2;
1054 ehca_free_fw_ctrlblock(mqpcb);
1056 return &my_qp->ib_srq;
1058 create_srq2:
1059 ret = ERR_PTR(ehca2ib_return_code(hret));
1060 ehca_free_fw_ctrlblock(mqpcb);
1062 create_srq1:
1063 internal_destroy_qp(pd->device, my_qp, my_qp->ib_srq.uobject);
1065 return ret;
1069 * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
1070 * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
1071 * returns total number of bad wqes in bad_wqe_cnt
1073 static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
1074 int *bad_wqe_cnt)
1076 u64 h_ret;
1077 struct ipz_queue *squeue;
1078 void *bad_send_wqe_p, *bad_send_wqe_v;
1079 u64 q_ofs;
1080 struct ehca_wqe *wqe;
1081 int qp_num = my_qp->ib_qp.qp_num;
1083 /* get send wqe pointer */
1084 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
1085 my_qp->ipz_qp_handle, &my_qp->pf,
1086 &bad_send_wqe_p, NULL, 2);
1087 if (h_ret != H_SUCCESS) {
1088 ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
1089 " ehca_qp=%p qp_num=%x h_ret=%lli",
1090 my_qp, qp_num, h_ret);
1091 return ehca2ib_return_code(h_ret);
1093 bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63)));
1094 ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
1095 qp_num, bad_send_wqe_p);
1096 /* convert wqe pointer to vadr */
1097 bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p);
1098 if (ehca_debug_level >= 2)
1099 ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
1100 squeue = &my_qp->ipz_squeue;
1101 if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
1102 ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x"
1103 " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
1104 return -EFAULT;
1107 /* loop sets wqe's purge bit */
1108 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
1109 *bad_wqe_cnt = 0;
1110 while (wqe->optype != 0xff && wqe->wqef != 0xff) {
1111 if (ehca_debug_level >= 2)
1112 ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
1113 wqe->nr_of_data_seg = 0; /* suppress data access */
1114 wqe->wqef = WQEF_PURGE; /* WQE to be purged */
1115 q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
1116 wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
1117 *bad_wqe_cnt = (*bad_wqe_cnt)+1;
1120 * bad wqe will be reprocessed and ignored when pol_cq() is called,
1121 * i.e. nr of wqes with flush error status is one less
1123 ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
1124 qp_num, (*bad_wqe_cnt)-1);
1125 wqe->wqef = 0;
1127 return 0;
1130 static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
1131 struct ehca_queue_map *qmap)
1133 void *wqe_v;
1134 u64 q_ofs;
1135 u32 wqe_idx;
1136 unsigned int tail_idx;
1138 /* convert real to abs address */
1139 wqe_p = wqe_p & (~(1UL << 63));
1141 wqe_v = abs_to_virt(wqe_p);
1143 if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
1144 ehca_gen_err("Invalid offset for calculating left cqes "
1145 "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
1146 return -EFAULT;
1149 tail_idx = next_index(qmap->tail, qmap->entries);
1150 wqe_idx = q_ofs / ipz_queue->qe_size;
1152 /* check all processed wqes, whether a cqe is requested or not */
1153 while (tail_idx != wqe_idx) {
1154 if (qmap->map[tail_idx].cqe_req)
1155 qmap->left_to_poll++;
1156 tail_idx = next_index(tail_idx, qmap->entries);
1158 /* save index in queue, where we have to start flushing */
1159 qmap->next_wqe_idx = wqe_idx;
1160 return 0;
1163 static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
1165 u64 h_ret;
1166 void *send_wqe_p, *recv_wqe_p;
1167 int ret;
1168 unsigned long flags;
1169 int qp_num = my_qp->ib_qp.qp_num;
1171 /* this hcall is not supported on base QPs */
1172 if (my_qp->ext_type != EQPT_SRQBASE) {
1173 /* get send and receive wqe pointer */
1174 h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
1175 my_qp->ipz_qp_handle, &my_qp->pf,
1176 &send_wqe_p, &recv_wqe_p, 4);
1177 if (h_ret != H_SUCCESS) {
1178 ehca_err(&shca->ib_device, "disable_and_get_wqe() "
1179 "failed ehca_qp=%p qp_num=%x h_ret=%lli",
1180 my_qp, qp_num, h_ret);
1181 return ehca2ib_return_code(h_ret);
1185 * acquire lock to ensure that nobody is polling the cq which
1186 * could mean that the qmap->tail pointer is in an
1187 * inconsistent state.
1189 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1190 ret = calc_left_cqes((u64)send_wqe_p, &my_qp->ipz_squeue,
1191 &my_qp->sq_map);
1192 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1193 if (ret)
1194 return ret;
1197 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1198 ret = calc_left_cqes((u64)recv_wqe_p, &my_qp->ipz_rqueue,
1199 &my_qp->rq_map);
1200 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1201 if (ret)
1202 return ret;
1203 } else {
1204 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1205 my_qp->sq_map.left_to_poll = 0;
1206 my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
1207 my_qp->sq_map.entries);
1208 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1210 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1211 my_qp->rq_map.left_to_poll = 0;
1212 my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
1213 my_qp->rq_map.entries);
1214 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
1217 /* this assures flush cqes being generated only for pending wqes */
1218 if ((my_qp->sq_map.left_to_poll == 0) &&
1219 (my_qp->rq_map.left_to_poll == 0)) {
1220 spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
1221 ehca_add_to_err_list(my_qp, 1);
1222 spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
1224 if (HAS_RQ(my_qp)) {
1225 spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
1226 ehca_add_to_err_list(my_qp, 0);
1227 spin_unlock_irqrestore(&my_qp->recv_cq->spinlock,
1228 flags);
1232 return 0;
1236 * internal_modify_qp with circumvention to handle aqp0 properly
1237 * smi_reset2init indicates if this is an internal reset-to-init-call for
1238 * smi. This flag must always be zero if called from ehca_modify_qp()!
1239 * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
1241 static int internal_modify_qp(struct ib_qp *ibqp,
1242 struct ib_qp_attr *attr,
1243 int attr_mask, int smi_reset2init)
1245 enum ib_qp_state qp_cur_state, qp_new_state;
1246 int cnt, qp_attr_idx, ret = 0;
1247 enum ib_qp_statetrans statetrans;
1248 struct hcp_modify_qp_control_block *mqpcb;
1249 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1250 struct ehca_shca *shca =
1251 container_of(ibqp->pd->device, struct ehca_shca, ib_device);
1252 u64 update_mask;
1253 u64 h_ret;
1254 int bad_wqe_cnt = 0;
1255 int is_user = 0;
1256 int squeue_locked = 0;
1257 unsigned long flags = 0;
1259 /* do query_qp to obtain current attr values */
1260 mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
1261 if (!mqpcb) {
1262 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
1263 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
1264 return -ENOMEM;
1267 h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
1268 my_qp->ipz_qp_handle,
1269 &my_qp->pf,
1270 mqpcb, my_qp->galpas.kernel);
1271 if (h_ret != H_SUCCESS) {
1272 ehca_err(ibqp->device, "hipz_h_query_qp() failed "
1273 "ehca_qp=%p qp_num=%x h_ret=%lli",
1274 my_qp, ibqp->qp_num, h_ret);
1275 ret = ehca2ib_return_code(h_ret);
1276 goto modify_qp_exit1;
1278 if (ibqp->uobject)
1279 is_user = 1;
1281 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
1283 if (qp_cur_state == -EINVAL) { /* invalid qp state */
1284 ret = -EINVAL;
1285 ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
1286 "ehca_qp=%p qp_num=%x",
1287 mqpcb->qp_state, my_qp, ibqp->qp_num);
1288 goto modify_qp_exit1;
1291 * circumvention to set aqp0 initial state to init
1292 * as expected by IB spec
1294 if (smi_reset2init == 0 &&
1295 ibqp->qp_type == IB_QPT_SMI &&
1296 qp_cur_state == IB_QPS_RESET &&
1297 (attr_mask & IB_QP_STATE) &&
1298 attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */
1299 struct ib_qp_attr smiqp_attr = {
1300 .qp_state = IB_QPS_INIT,
1301 .port_num = my_qp->init_attr.port_num,
1302 .pkey_index = 0,
1303 .qkey = 0
1305 int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT |
1306 IB_QP_PKEY_INDEX | IB_QP_QKEY;
1307 int smirc = internal_modify_qp(
1308 ibqp, &smiqp_attr, smiqp_attr_mask, 1);
1309 if (smirc) {
1310 ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
1311 "ehca_modify_qp() rc=%i", smirc);
1312 ret = H_PARAMETER;
1313 goto modify_qp_exit1;
1315 qp_cur_state = IB_QPS_INIT;
1316 ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
1318 /* is transmitted current state equal to "real" current state */
1319 if ((attr_mask & IB_QP_CUR_STATE) &&
1320 qp_cur_state != attr->cur_qp_state) {
1321 ret = -EINVAL;
1322 ehca_err(ibqp->device,
1323 "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
1324 " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
1325 attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num);
1326 goto modify_qp_exit1;
1329 ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x "
1330 "new qp_state=%x attribute_mask=%x",
1331 my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
1333 qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
1334 if (!smi_reset2init &&
1335 !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
1336 attr_mask)) {
1337 ret = -EINVAL;
1338 ehca_err(ibqp->device,
1339 "Invalid qp transition new_state=%x cur_state=%x "
1340 "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
1341 qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
1342 goto modify_qp_exit1;
1345 mqpcb->qp_state = ib2ehca_qp_state(qp_new_state);
1346 if (mqpcb->qp_state)
1347 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
1348 else {
1349 ret = -EINVAL;
1350 ehca_err(ibqp->device, "Invalid new qp state=%x "
1351 "ehca_qp=%p qp_num=%x",
1352 qp_new_state, my_qp, ibqp->qp_num);
1353 goto modify_qp_exit1;
1356 /* retrieve state transition struct to get req and opt attrs */
1357 statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
1358 if (statetrans < 0) {
1359 ret = -EINVAL;
1360 ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x "
1361 "new_qp_state=%x State_xsition=%x ehca_qp=%p "
1362 "qp_num=%x", qp_cur_state, qp_new_state,
1363 statetrans, my_qp, ibqp->qp_num);
1364 goto modify_qp_exit1;
1367 qp_attr_idx = ib2ehcaqptype(ibqp->qp_type);
1369 if (qp_attr_idx < 0) {
1370 ret = qp_attr_idx;
1371 ehca_err(ibqp->device,
1372 "Invalid QP type=%x ehca_qp=%p qp_num=%x",
1373 ibqp->qp_type, my_qp, ibqp->qp_num);
1374 goto modify_qp_exit1;
1377 ehca_dbg(ibqp->device,
1378 "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
1379 my_qp, ibqp->qp_num, statetrans);
1381 /* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set
1382 * in non-LL UD QPs.
1384 if ((my_qp->qp_type == IB_QPT_UD) &&
1385 (my_qp->ext_type != EQPT_LLQP) &&
1386 (statetrans == IB_QPST_INIT2RTR) &&
1387 (shca->hw_level >= 0x22)) {
1388 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
1389 mqpcb->send_grh_flag = 1;
1392 /* sqe -> rts: set purge bit of bad wqe before actual trans */
1393 if ((my_qp->qp_type == IB_QPT_UD ||
1394 my_qp->qp_type == IB_QPT_GSI ||
1395 my_qp->qp_type == IB_QPT_SMI) &&
1396 statetrans == IB_QPST_SQE2RTS) {
1397 /* mark next free wqe if kernel */
1398 if (!ibqp->uobject) {
1399 struct ehca_wqe *wqe;
1400 /* lock send queue */
1401 spin_lock_irqsave(&my_qp->spinlock_s, flags);
1402 squeue_locked = 1;
1403 /* mark next free wqe */
1404 wqe = (struct ehca_wqe *)
1405 ipz_qeit_get(&my_qp->ipz_squeue);
1406 wqe->optype = wqe->wqef = 0xff;
1407 ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
1408 ibqp->qp_num, wqe);
1410 ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
1411 if (ret) {
1412 ehca_err(ibqp->device, "prepare_sqe_rts() failed "
1413 "ehca_qp=%p qp_num=%x ret=%i",
1414 my_qp, ibqp->qp_num, ret);
1415 goto modify_qp_exit2;
1420 * enable RDMA_Atomic_Control if reset->init und reliable con
1421 * this is necessary since gen2 does not provide that flag,
1422 * but pHyp requires it
1424 if (statetrans == IB_QPST_RESET2INIT &&
1425 (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) {
1426 mqpcb->rdma_atomic_ctrl = 3;
1427 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1);
1429 /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
1430 if (statetrans == IB_QPST_INIT2RTR &&
1431 (ibqp->qp_type == IB_QPT_UC) &&
1432 !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) {
1433 mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */
1434 update_mask |=
1435 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
1438 if (attr_mask & IB_QP_PKEY_INDEX) {
1439 if (attr->pkey_index >= 16) {
1440 ret = -EINVAL;
1441 ehca_err(ibqp->device, "Invalid pkey_index=%x. "
1442 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1443 attr->pkey_index, my_qp, ibqp->qp_num);
1444 goto modify_qp_exit2;
1446 mqpcb->prim_p_key_idx = attr->pkey_index;
1447 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
1449 if (attr_mask & IB_QP_PORT) {
1450 struct ehca_sport *sport;
1451 struct ehca_qp *aqp1;
1452 if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
1453 ret = -EINVAL;
1454 ehca_err(ibqp->device, "Invalid port=%x. "
1455 "ehca_qp=%p qp_num=%x num_ports=%x",
1456 attr->port_num, my_qp, ibqp->qp_num,
1457 shca->num_ports);
1458 goto modify_qp_exit2;
1460 sport = &shca->sport[attr->port_num - 1];
1461 if (!sport->ibqp_sqp[IB_QPT_GSI]) {
1462 /* should not occur */
1463 ret = -EFAULT;
1464 ehca_err(ibqp->device, "AQP1 was not created for "
1465 "port=%x", attr->port_num);
1466 goto modify_qp_exit2;
1468 aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
1469 struct ehca_qp, ib_qp);
1470 if (ibqp->qp_type != IB_QPT_GSI &&
1471 ibqp->qp_type != IB_QPT_SMI &&
1472 aqp1->mod_qp_parm) {
1474 * firmware will reject this modify_qp() because
1475 * port is not activated/initialized fully
1477 ret = -EFAULT;
1478 ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
1479 "either port is being activated (try again) "
1480 "or cabling issue", attr->port_num);
1481 goto modify_qp_exit2;
1483 mqpcb->prim_phys_port = attr->port_num;
1484 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
1486 if (attr_mask & IB_QP_QKEY) {
1487 mqpcb->qkey = attr->qkey;
1488 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
1490 if (attr_mask & IB_QP_AV) {
1491 mqpcb->dlid = attr->ah_attr.dlid;
1492 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
1493 mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
1494 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1);
1495 mqpcb->service_level = attr->ah_attr.sl;
1496 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
1498 if (ehca_calc_ipd(shca, mqpcb->prim_phys_port,
1499 attr->ah_attr.static_rate,
1500 &mqpcb->max_static_rate)) {
1501 ret = -EINVAL;
1502 goto modify_qp_exit2;
1504 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
1507 * Always supply the GRH flag, even if it's zero, to give the
1508 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1510 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
1513 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1514 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1516 if (attr->ah_attr.ah_flags == IB_AH_GRH) {
1517 mqpcb->send_grh_flag = 1;
1519 mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
1520 update_mask |=
1521 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
1523 for (cnt = 0; cnt < 16; cnt++)
1524 mqpcb->dest_gid.byte[cnt] =
1525 attr->ah_attr.grh.dgid.raw[cnt];
1527 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1);
1528 mqpcb->flow_label = attr->ah_attr.grh.flow_label;
1529 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1);
1530 mqpcb->hop_limit = attr->ah_attr.grh.hop_limit;
1531 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1);
1532 mqpcb->traffic_class = attr->ah_attr.grh.traffic_class;
1533 update_mask |=
1534 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1);
1538 if (attr_mask & IB_QP_PATH_MTU) {
1539 /* store ld(MTU) */
1540 my_qp->mtu_shift = attr->path_mtu + 7;
1541 mqpcb->path_mtu = attr->path_mtu;
1542 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
1544 if (attr_mask & IB_QP_TIMEOUT) {
1545 mqpcb->timeout = attr->timeout;
1546 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
1548 if (attr_mask & IB_QP_RETRY_CNT) {
1549 mqpcb->retry_count = attr->retry_cnt;
1550 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
1552 if (attr_mask & IB_QP_RNR_RETRY) {
1553 mqpcb->rnr_retry_count = attr->rnr_retry;
1554 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1);
1556 if (attr_mask & IB_QP_RQ_PSN) {
1557 mqpcb->receive_psn = attr->rq_psn;
1558 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
1560 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1561 mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ?
1562 attr->max_dest_rd_atomic : 2;
1563 update_mask |=
1564 EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
1566 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1567 mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ?
1568 attr->max_rd_atomic : 2;
1569 update_mask |=
1570 EHCA_BMASK_SET
1571 (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
1573 if (attr_mask & IB_QP_ALT_PATH) {
1574 if (attr->alt_port_num < 1
1575 || attr->alt_port_num > shca->num_ports) {
1576 ret = -EINVAL;
1577 ehca_err(ibqp->device, "Invalid alt_port=%x. "
1578 "ehca_qp=%p qp_num=%x num_ports=%x",
1579 attr->alt_port_num, my_qp, ibqp->qp_num,
1580 shca->num_ports);
1581 goto modify_qp_exit2;
1583 mqpcb->alt_phys_port = attr->alt_port_num;
1585 if (attr->alt_pkey_index >= 16) {
1586 ret = -EINVAL;
1587 ehca_err(ibqp->device, "Invalid alt_pkey_index=%x. "
1588 "ehca_qp=%p qp_num=%x max_pkey_index=f",
1589 attr->pkey_index, my_qp, ibqp->qp_num);
1590 goto modify_qp_exit2;
1592 mqpcb->alt_p_key_idx = attr->alt_pkey_index;
1594 mqpcb->timeout_al = attr->alt_timeout;
1595 mqpcb->dlid_al = attr->alt_ah_attr.dlid;
1596 mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
1597 mqpcb->service_level_al = attr->alt_ah_attr.sl;
1599 if (ehca_calc_ipd(shca, mqpcb->alt_phys_port,
1600 attr->alt_ah_attr.static_rate,
1601 &mqpcb->max_static_rate_al)) {
1602 ret = -EINVAL;
1603 goto modify_qp_exit2;
1606 /* OpenIB doesn't support alternate retry counts - copy them */
1607 mqpcb->retry_count_al = mqpcb->retry_count;
1608 mqpcb->rnr_retry_count_al = mqpcb->rnr_retry_count;
1610 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT, 1)
1611 | EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX, 1)
1612 | EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL, 1)
1613 | EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1)
1614 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1)
1615 | EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1)
1616 | EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1)
1617 | EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL, 1)
1618 | EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL, 1);
1621 * Always supply the GRH flag, even if it's zero, to give the
1622 * hypervisor a clear "yes" or "no" instead of a "perhaps"
1624 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
1627 * only if GRH is TRUE we might consider SOURCE_GID_IDX
1628 * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
1630 if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
1631 mqpcb->send_grh_flag_al = 1;
1633 for (cnt = 0; cnt < 16; cnt++)
1634 mqpcb->dest_gid_al.byte[cnt] =
1635 attr->alt_ah_attr.grh.dgid.raw[cnt];
1636 mqpcb->source_gid_idx_al =
1637 attr->alt_ah_attr.grh.sgid_index;
1638 mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label;
1639 mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit;
1640 mqpcb->traffic_class_al =
1641 attr->alt_ah_attr.grh.traffic_class;
1643 update_mask |=
1644 EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1)
1645 | EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1)
1646 | EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1)
1647 | EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1) |
1648 EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
1652 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1653 mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
1654 update_mask |=
1655 EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1);
1658 if (attr_mask & IB_QP_SQ_PSN) {
1659 mqpcb->send_psn = attr->sq_psn;
1660 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
1663 if (attr_mask & IB_QP_DEST_QPN) {
1664 mqpcb->dest_qp_nr = attr->dest_qp_num;
1665 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
1668 if (attr_mask & IB_QP_PATH_MIG_STATE) {
1669 if (attr->path_mig_state != IB_MIG_REARM
1670 && attr->path_mig_state != IB_MIG_MIGRATED) {
1671 ret = -EINVAL;
1672 ehca_err(ibqp->device, "Invalid mig_state=%x",
1673 attr->path_mig_state);
1674 goto modify_qp_exit2;
1676 mqpcb->path_migration_state = attr->path_mig_state + 1;
1677 if (attr->path_mig_state == IB_MIG_REARM)
1678 my_qp->mig_armed = 1;
1679 update_mask |=
1680 EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
1683 if (attr_mask & IB_QP_CAP) {
1684 mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1;
1685 update_mask |=
1686 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1);
1687 mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
1688 update_mask |=
1689 EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1);
1690 /* no support for max_send/recv_sge yet */
1693 if (ehca_debug_level >= 2)
1694 ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
1696 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
1697 my_qp->ipz_qp_handle,
1698 &my_qp->pf,
1699 update_mask,
1700 mqpcb, my_qp->galpas.kernel);
1702 if (h_ret != H_SUCCESS) {
1703 ret = ehca2ib_return_code(h_ret);
1704 ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli "
1705 "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
1706 goto modify_qp_exit2;
1709 if ((my_qp->qp_type == IB_QPT_UD ||
1710 my_qp->qp_type == IB_QPT_GSI ||
1711 my_qp->qp_type == IB_QPT_SMI) &&
1712 statetrans == IB_QPST_SQE2RTS) {
1713 /* doorbell to reprocessing wqes */
1714 iosync(); /* serialize GAL register access */
1715 hipz_update_sqa(my_qp, bad_wqe_cnt-1);
1716 ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
1719 if (statetrans == IB_QPST_RESET2INIT ||
1720 statetrans == IB_QPST_INIT2INIT) {
1721 mqpcb->qp_enable = 1;
1722 mqpcb->qp_state = EHCA_QPS_INIT;
1723 update_mask = 0;
1724 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
1726 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
1727 my_qp->ipz_qp_handle,
1728 &my_qp->pf,
1729 update_mask,
1730 mqpcb,
1731 my_qp->galpas.kernel);
1733 if (h_ret != H_SUCCESS) {
1734 ret = ehca2ib_return_code(h_ret);
1735 ehca_err(ibqp->device, "ENABLE in context of "
1736 "RESET_2_INIT failed! Maybe you didn't get "
1737 "a LID h_ret=%lli ehca_qp=%p qp_num=%x",
1738 h_ret, my_qp, ibqp->qp_num);
1739 goto modify_qp_exit2;
1742 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
1743 && !is_user) {
1744 ret = check_for_left_cqes(my_qp, shca);
1745 if (ret)
1746 goto modify_qp_exit2;
1749 if (statetrans == IB_QPST_ANY2RESET) {
1750 ipz_qeit_reset(&my_qp->ipz_rqueue);
1751 ipz_qeit_reset(&my_qp->ipz_squeue);
1753 if (qp_cur_state == IB_QPS_ERR && !is_user) {
1754 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
1756 if (HAS_RQ(my_qp))
1757 del_from_err_list(my_qp->recv_cq,
1758 &my_qp->rq_err_node);
1760 if (!is_user)
1761 reset_queue_map(&my_qp->sq_map);
1763 if (HAS_RQ(my_qp) && !is_user)
1764 reset_queue_map(&my_qp->rq_map);
1767 if (attr_mask & IB_QP_QKEY)
1768 my_qp->qkey = attr->qkey;
1770 modify_qp_exit2:
1771 if (squeue_locked) { /* this means: sqe -> rts */
1772 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
1773 my_qp->sqerr_purgeflag = 1;
1776 modify_qp_exit1:
1777 ehca_free_fw_ctrlblock(mqpcb);
1779 return ret;
1782 int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1783 struct ib_udata *udata)
1785 int ret = 0;
1787 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
1788 ib_device);
1789 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1791 /* The if-block below caches qp_attr to be modified for GSI and SMI
1792 * qps during the initialization by ib_mad. When the respective port
1793 * is activated, ie we got an event PORT_ACTIVE, we'll replay the
1794 * cached modify calls sequence, see ehca_recover_sqs() below.
1795 * Why that is required:
1796 * 1) If one port is connected, older code requires that port one
1797 * to be connected and module option nr_ports=1 to be given by
1798 * user, which is very inconvenient for end user.
1799 * 2) Firmware accepts modify_qp() only if respective port has become
1800 * active. Older code had a wait loop of 30sec create_qp()/
1801 * define_aqp1(), which is not appropriate in practice. This
1802 * code now removes that wait loop, see define_aqp1(), and always
1803 * reports all ports to ib_mad resp. users. Only activated ports
1804 * will then usable for the users.
1806 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1807 int port = my_qp->init_attr.port_num;
1808 struct ehca_sport *sport = &shca->sport[port - 1];
1809 unsigned long flags;
1810 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
1811 /* cache qp_attr only during init */
1812 if (my_qp->mod_qp_parm) {
1813 struct ehca_mod_qp_parm *p;
1814 if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
1815 ehca_err(&shca->ib_device,
1816 "mod_qp_parm overflow state=%x port=%x"
1817 " type=%x", attr->qp_state,
1818 my_qp->init_attr.port_num,
1819 ibqp->qp_type);
1820 spin_unlock_irqrestore(&sport->mod_sqp_lock,
1821 flags);
1822 return -EINVAL;
1824 p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
1825 p->mask = attr_mask;
1826 p->attr = *attr;
1827 my_qp->mod_qp_parm_idx++;
1828 ehca_dbg(&shca->ib_device,
1829 "Saved qp_attr for state=%x port=%x type=%x",
1830 attr->qp_state, my_qp->init_attr.port_num,
1831 ibqp->qp_type);
1832 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1833 goto out;
1835 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1838 ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
1840 out:
1841 if ((ret == 0) && (attr_mask & IB_QP_STATE))
1842 my_qp->state = attr->qp_state;
1844 return ret;
1847 void ehca_recover_sqp(struct ib_qp *sqp)
1849 struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
1850 int port = my_sqp->init_attr.port_num;
1851 struct ib_qp_attr attr;
1852 struct ehca_mod_qp_parm *qp_parm;
1853 int i, qp_parm_idx, ret;
1854 unsigned long flags, wr_cnt;
1856 if (!my_sqp->mod_qp_parm)
1857 return;
1858 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
1860 qp_parm = my_sqp->mod_qp_parm;
1861 qp_parm_idx = my_sqp->mod_qp_parm_idx;
1862 for (i = 0; i < qp_parm_idx; i++) {
1863 attr = qp_parm[i].attr;
1864 ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
1865 if (ret) {
1866 ehca_err(sqp->device, "Could not modify SQP port=%x "
1867 "qp_num=%x ret=%x", port, sqp->qp_num, ret);
1868 goto free_qp_parm;
1870 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
1871 port, sqp->qp_num, attr.qp_state);
1874 /* re-trigger posted recv wrs */
1875 wr_cnt = my_sqp->ipz_rqueue.current_q_offset /
1876 my_sqp->ipz_rqueue.qe_size;
1877 if (wr_cnt) {
1878 spin_lock_irqsave(&my_sqp->spinlock_r, flags);
1879 hipz_update_rqa(my_sqp, wr_cnt);
1880 spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
1881 ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
1882 port, sqp->qp_num, wr_cnt);
1885 free_qp_parm:
1886 kfree(qp_parm);
1887 /* this prevents subsequent calls to modify_qp() to cache qp_attr */
1888 my_sqp->mod_qp_parm = NULL;
1891 int ehca_query_qp(struct ib_qp *qp,
1892 struct ib_qp_attr *qp_attr,
1893 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
1895 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
1896 struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
1897 ib_device);
1898 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
1899 struct hcp_modify_qp_control_block *qpcb;
1900 int cnt, ret = 0;
1901 u64 h_ret;
1903 if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
1904 ehca_err(qp->device, "Invalid attribute mask "
1905 "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
1906 my_qp, qp->qp_num, qp_attr_mask);
1907 return -EINVAL;
1910 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1911 if (!qpcb) {
1912 ehca_err(qp->device, "Out of memory for qpcb "
1913 "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
1914 return -ENOMEM;
1917 h_ret = hipz_h_query_qp(adapter_handle,
1918 my_qp->ipz_qp_handle,
1919 &my_qp->pf,
1920 qpcb, my_qp->galpas.kernel);
1922 if (h_ret != H_SUCCESS) {
1923 ret = ehca2ib_return_code(h_ret);
1924 ehca_err(qp->device, "hipz_h_query_qp() failed "
1925 "ehca_qp=%p qp_num=%x h_ret=%lli",
1926 my_qp, qp->qp_num, h_ret);
1927 goto query_qp_exit1;
1930 qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
1931 qp_attr->qp_state = qp_attr->cur_qp_state;
1933 if (qp_attr->cur_qp_state == -EINVAL) {
1934 ret = -EINVAL;
1935 ehca_err(qp->device, "Got invalid ehca_qp_state=%x "
1936 "ehca_qp=%p qp_num=%x",
1937 qpcb->qp_state, my_qp, qp->qp_num);
1938 goto query_qp_exit1;
1941 if (qp_attr->qp_state == IB_QPS_SQD)
1942 qp_attr->sq_draining = 1;
1944 qp_attr->qkey = qpcb->qkey;
1945 qp_attr->path_mtu = qpcb->path_mtu;
1946 qp_attr->path_mig_state = qpcb->path_migration_state - 1;
1947 qp_attr->rq_psn = qpcb->receive_psn;
1948 qp_attr->sq_psn = qpcb->send_psn;
1949 qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field;
1950 qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1;
1951 qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1;
1952 /* UD_AV CIRCUMVENTION */
1953 if (my_qp->qp_type == IB_QPT_UD) {
1954 qp_attr->cap.max_send_sge =
1955 qpcb->actual_nr_sges_in_sq_wqe - 2;
1956 qp_attr->cap.max_recv_sge =
1957 qpcb->actual_nr_sges_in_rq_wqe - 2;
1958 } else {
1959 qp_attr->cap.max_send_sge =
1960 qpcb->actual_nr_sges_in_sq_wqe;
1961 qp_attr->cap.max_recv_sge =
1962 qpcb->actual_nr_sges_in_rq_wqe;
1965 qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
1966 qp_attr->dest_qp_num = qpcb->dest_qp_nr;
1968 qp_attr->pkey_index = qpcb->prim_p_key_idx;
1969 qp_attr->port_num = qpcb->prim_phys_port;
1970 qp_attr->timeout = qpcb->timeout;
1971 qp_attr->retry_cnt = qpcb->retry_count;
1972 qp_attr->rnr_retry = qpcb->rnr_retry_count;
1974 qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;
1975 qp_attr->alt_port_num = qpcb->alt_phys_port;
1976 qp_attr->alt_timeout = qpcb->timeout_al;
1978 qp_attr->max_dest_rd_atomic = qpcb->rdma_nr_atomic_resp_res;
1979 qp_attr->max_rd_atomic = qpcb->rdma_atomic_outst_dest_qp;
1981 /* primary av */
1982 qp_attr->ah_attr.sl = qpcb->service_level;
1984 if (qpcb->send_grh_flag) {
1985 qp_attr->ah_attr.ah_flags = IB_AH_GRH;
1988 qp_attr->ah_attr.static_rate = qpcb->max_static_rate;
1989 qp_attr->ah_attr.dlid = qpcb->dlid;
1990 qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits;
1991 qp_attr->ah_attr.port_num = qp_attr->port_num;
1993 /* primary GRH */
1994 qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class;
1995 qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit;
1996 qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx;
1997 qp_attr->ah_attr.grh.flow_label = qpcb->flow_label;
1999 for (cnt = 0; cnt < 16; cnt++)
2000 qp_attr->ah_attr.grh.dgid.raw[cnt] =
2001 qpcb->dest_gid.byte[cnt];
2003 /* alternate AV */
2004 qp_attr->alt_ah_attr.sl = qpcb->service_level_al;
2005 if (qpcb->send_grh_flag_al) {
2006 qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH;
2009 qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al;
2010 qp_attr->alt_ah_attr.dlid = qpcb->dlid_al;
2011 qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al;
2013 /* alternate GRH */
2014 qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al;
2015 qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al;
2016 qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al;
2017 qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al;
2019 for (cnt = 0; cnt < 16; cnt++)
2020 qp_attr->alt_ah_attr.grh.dgid.raw[cnt] =
2021 qpcb->dest_gid_al.byte[cnt];
2023 /* return init attributes given in ehca_create_qp */
2024 if (qp_init_attr)
2025 *qp_init_attr = my_qp->init_attr;
2027 if (ehca_debug_level >= 2)
2028 ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
2030 query_qp_exit1:
2031 ehca_free_fw_ctrlblock(qpcb);
2033 return ret;
2036 int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
2037 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
2039 struct ehca_qp *my_qp =
2040 container_of(ibsrq, struct ehca_qp, ib_srq);
2041 struct ehca_shca *shca =
2042 container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
2043 struct hcp_modify_qp_control_block *mqpcb;
2044 u64 update_mask;
2045 u64 h_ret;
2046 int ret = 0;
2048 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2049 if (!mqpcb) {
2050 ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
2051 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
2052 return -ENOMEM;
2055 update_mask = 0;
2056 if (attr_mask & IB_SRQ_LIMIT) {
2057 attr_mask &= ~IB_SRQ_LIMIT;
2058 update_mask |=
2059 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
2060 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
2061 mqpcb->curr_srq_limit = attr->srq_limit;
2062 mqpcb->qp_aff_asyn_ev_log_reg =
2063 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
2066 /* by now, all bits in attr_mask should have been cleared */
2067 if (attr_mask) {
2068 ehca_err(ibsrq->device, "invalid attribute mask bits set "
2069 "attr_mask=%x", attr_mask);
2070 ret = -EINVAL;
2071 goto modify_srq_exit0;
2074 if (ehca_debug_level >= 2)
2075 ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
2077 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
2078 NULL, update_mask, mqpcb,
2079 my_qp->galpas.kernel);
2081 if (h_ret != H_SUCCESS) {
2082 ret = ehca2ib_return_code(h_ret);
2083 ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli "
2084 "ehca_qp=%p qp_num=%x",
2085 h_ret, my_qp, my_qp->real_qp_num);
2088 modify_srq_exit0:
2089 ehca_free_fw_ctrlblock(mqpcb);
2091 return ret;
2094 int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
2096 struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
2097 struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
2098 ib_device);
2099 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
2100 struct hcp_modify_qp_control_block *qpcb;
2101 int ret = 0;
2102 u64 h_ret;
2104 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
2105 if (!qpcb) {
2106 ehca_err(srq->device, "Out of memory for qpcb "
2107 "ehca_qp=%p qp_num=%x", my_qp, my_qp->real_qp_num);
2108 return -ENOMEM;
2111 h_ret = hipz_h_query_qp(adapter_handle, my_qp->ipz_qp_handle,
2112 NULL, qpcb, my_qp->galpas.kernel);
2114 if (h_ret != H_SUCCESS) {
2115 ret = ehca2ib_return_code(h_ret);
2116 ehca_err(srq->device, "hipz_h_query_qp() failed "
2117 "ehca_qp=%p qp_num=%x h_ret=%lli",
2118 my_qp, my_qp->real_qp_num, h_ret);
2119 goto query_srq_exit1;
2122 srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
2123 srq_attr->max_sge = 3;
2124 srq_attr->srq_limit = qpcb->curr_srq_limit;
2126 if (ehca_debug_level >= 2)
2127 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
2129 query_srq_exit1:
2130 ehca_free_fw_ctrlblock(qpcb);
2132 return ret;
2135 static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2136 struct ib_uobject *uobject)
2138 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
2139 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
2140 ib_pd);
2141 struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
2142 u32 qp_num = my_qp->real_qp_num;
2143 int ret;
2144 u64 h_ret;
2145 u8 port_num;
2146 int is_user = 0;
2147 enum ib_qp_type qp_type;
2148 unsigned long flags;
2150 if (uobject) {
2151 is_user = 1;
2152 if (my_qp->mm_count_galpa ||
2153 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
2154 ehca_err(dev, "Resources still referenced in "
2155 "user space qp_num=%x", qp_num);
2156 return -EINVAL;
2160 if (my_qp->send_cq) {
2161 ret = ehca_cq_unassign_qp(my_qp->send_cq, qp_num);
2162 if (ret) {
2163 ehca_err(dev, "Couldn't unassign qp from "
2164 "send_cq ret=%i qp_num=%x cq_num=%x", ret,
2165 qp_num, my_qp->send_cq->cq_number);
2166 return ret;
2170 write_lock_irqsave(&ehca_qp_idr_lock, flags);
2171 idr_remove(&ehca_qp_idr, my_qp->token);
2172 write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
2175 * SRQs will never get into an error list and do not have a recv_cq,
2176 * so we need to skip them here.
2178 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
2179 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
2181 if (HAS_SQ(my_qp) && !is_user)
2182 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
2184 /* now wait until all pending events have completed */
2185 wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
2187 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
2188 if (h_ret != H_SUCCESS) {
2189 ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli "
2190 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
2191 return ehca2ib_return_code(h_ret);
2194 port_num = my_qp->init_attr.port_num;
2195 qp_type = my_qp->init_attr.qp_type;
2197 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
2198 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
2199 kfree(my_qp->mod_qp_parm);
2200 my_qp->mod_qp_parm = NULL;
2201 shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
2202 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
2205 /* no support for IB_QPT_SMI yet */
2206 if (qp_type == IB_QPT_GSI) {
2207 struct ib_event event;
2208 ehca_info(dev, "device %s: port %x is inactive.",
2209 shca->ib_device.name, port_num);
2210 event.device = &shca->ib_device;
2211 event.event = IB_EVENT_PORT_ERR;
2212 event.element.port_num = port_num;
2213 shca->sport[port_num - 1].port_state = IB_PORT_DOWN;
2214 ib_dispatch_event(&event);
2217 if (HAS_RQ(my_qp)) {
2218 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
2219 if (!is_user)
2220 vfree(my_qp->rq_map.map);
2222 if (HAS_SQ(my_qp)) {
2223 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
2224 if (!is_user)
2225 vfree(my_qp->sq_map.map);
2227 kmem_cache_free(qp_cache, my_qp);
2228 atomic_dec(&shca->num_qps);
2229 return 0;
2232 int ehca_destroy_qp(struct ib_qp *qp)
2234 return internal_destroy_qp(qp->device,
2235 container_of(qp, struct ehca_qp, ib_qp),
2236 qp->uobject);
2239 int ehca_destroy_srq(struct ib_srq *srq)
2241 return internal_destroy_qp(srq->device,
2242 container_of(srq, struct ehca_qp, ib_srq),
2243 srq->uobject);
2246 int ehca_init_qp_cache(void)
2248 qp_cache = kmem_cache_create("ehca_cache_qp",
2249 sizeof(struct ehca_qp), 0,
2250 SLAB_HWCACHE_ALIGN,
2251 NULL);
2252 if (!qp_cache)
2253 return -ENOMEM;
2254 return 0;
2257 void ehca_cleanup_qp_cache(void)
2259 if (qp_cache)
2260 kmem_cache_destroy(qp_cache);