KVM: s390: arch backend for the kvm kernel module
[linux-2.6/openmoko-kernel/knife-kernel.git] / drivers / infiniband / hw / ehca / ehca_reqs.c
bloba20bbf4661881a095a34aab7cf13f5a443b832df
1 /*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
4 * post_send/recv, poll_cq, req_notify
6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
7 * Waleri Fomin <fomin@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
9 * Reinhard Ernst <rernst@de.ibm.com>
11 * Copyright (c) 2005 IBM Corporation
13 * All rights reserved.
15 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
16 * BSD.
18 * OpenIB BSD License
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are met:
23 * Redistributions of source code must retain the above copyright notice, this
24 * list of conditions and the following disclaimer.
26 * Redistributions in binary form must reproduce the above copyright notice,
27 * this list of conditions and the following disclaimer in the documentation
28 * and/or other materials
29 * provided with the distribution.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
32 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
35 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
36 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
37 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
38 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
39 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGE.
45 #include <asm-powerpc/system.h>
46 #include "ehca_classes.h"
47 #include "ehca_tools.h"
48 #include "ehca_qes.h"
49 #include "ehca_iverbs.h"
50 #include "hcp_if.h"
51 #include "hipz_fns.h"
53 /* in RC traffic, insert an empty RDMA READ every this many packets */
54 #define ACK_CIRC_THRESHOLD 2000000
56 static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
57 struct ehca_wqe *wqe_p,
58 struct ib_recv_wr *recv_wr)
60 u8 cnt_ds;
61 if (unlikely((recv_wr->num_sge < 0) ||
62 (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
63 ehca_gen_err("Invalid number of WQE SGE. "
64 "num_sqe=%x max_nr_of_sg=%x",
65 recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
66 return -EINVAL; /* invalid SG list length */
69 /* clear wqe header until sglist */
70 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
72 wqe_p->work_request_id = recv_wr->wr_id;
73 wqe_p->nr_of_data_seg = recv_wr->num_sge;
75 for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
76 wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
77 recv_wr->sg_list[cnt_ds].addr;
78 wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
79 recv_wr->sg_list[cnt_ds].lkey;
80 wqe_p->u.all_rcv.sg_list[cnt_ds].length =
81 recv_wr->sg_list[cnt_ds].length;
84 if (ehca_debug_level) {
85 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
86 ipz_rqueue);
87 ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
90 return 0;
93 #if defined(DEBUG_GSI_SEND_WR)
95 /* need ib_mad struct */
96 #include <rdma/ib_mad.h>
98 static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
100 int idx;
101 int j;
102 while (send_wr) {
103 struct ib_mad_hdr *mad_hdr = send_wr->wr.ud.mad_hdr;
104 struct ib_sge *sge = send_wr->sg_list;
105 ehca_gen_dbg("send_wr#%x wr_id=%lx num_sge=%x "
106 "send_flags=%x opcode=%x", idx, send_wr->wr_id,
107 send_wr->num_sge, send_wr->send_flags,
108 send_wr->opcode);
109 if (mad_hdr) {
110 ehca_gen_dbg("send_wr#%x mad_hdr base_version=%x "
111 "mgmt_class=%x class_version=%x method=%x "
112 "status=%x class_specific=%x tid=%lx "
113 "attr_id=%x resv=%x attr_mod=%x",
114 idx, mad_hdr->base_version,
115 mad_hdr->mgmt_class,
116 mad_hdr->class_version, mad_hdr->method,
117 mad_hdr->status, mad_hdr->class_specific,
118 mad_hdr->tid, mad_hdr->attr_id,
119 mad_hdr->resv,
120 mad_hdr->attr_mod);
122 for (j = 0; j < send_wr->num_sge; j++) {
123 u8 *data = (u8 *)abs_to_virt(sge->addr);
124 ehca_gen_dbg("send_wr#%x sge#%x addr=%p length=%x "
125 "lkey=%x",
126 idx, j, data, sge->length, sge->lkey);
127 /* assume length is n*16 */
128 ehca_dmp(data, sge->length, "send_wr#%x sge#%x",
129 idx, j);
130 sge++;
131 } /* eof for j */
132 idx++;
133 send_wr = send_wr->next;
134 } /* eof while send_wr */
137 #endif /* DEBUG_GSI_SEND_WR */
139 static inline int ehca_write_swqe(struct ehca_qp *qp,
140 struct ehca_wqe *wqe_p,
141 const struct ib_send_wr *send_wr,
142 int hidden)
144 u32 idx;
145 u64 dma_length;
146 struct ehca_av *my_av;
147 u32 remote_qkey = send_wr->wr.ud.remote_qkey;
149 if (unlikely((send_wr->num_sge < 0) ||
150 (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
151 ehca_gen_err("Invalid number of WQE SGE. "
152 "num_sqe=%x max_nr_of_sg=%x",
153 send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
154 return -EINVAL; /* invalid SG list length */
157 /* clear wqe header until sglist */
158 memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
160 wqe_p->work_request_id = send_wr->wr_id;
162 switch (send_wr->opcode) {
163 case IB_WR_SEND:
164 case IB_WR_SEND_WITH_IMM:
165 wqe_p->optype = WQE_OPTYPE_SEND;
166 break;
167 case IB_WR_RDMA_WRITE:
168 case IB_WR_RDMA_WRITE_WITH_IMM:
169 wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
170 break;
171 case IB_WR_RDMA_READ:
172 wqe_p->optype = WQE_OPTYPE_RDMAREAD;
173 break;
174 default:
175 ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
176 return -EINVAL; /* invalid opcode */
179 wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
181 wqe_p->wr_flag = 0;
183 if ((send_wr->send_flags & IB_SEND_SIGNALED ||
184 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
185 && !hidden)
186 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
188 if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
189 send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
190 /* this might not work as long as HW does not support it */
191 wqe_p->immediate_data = be32_to_cpu(send_wr->ex.imm_data);
192 wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
195 wqe_p->nr_of_data_seg = send_wr->num_sge;
197 switch (qp->qp_type) {
198 case IB_QPT_SMI:
199 case IB_QPT_GSI:
200 /* no break is intential here */
201 case IB_QPT_UD:
202 /* IB 1.2 spec C10-15 compliance */
203 if (send_wr->wr.ud.remote_qkey & 0x80000000)
204 remote_qkey = qp->qkey;
206 wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8;
207 wqe_p->local_ee_context_qkey = remote_qkey;
208 if (unlikely(!send_wr->wr.ud.ah)) {
209 ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
210 return -EINVAL;
212 if (unlikely(send_wr->wr.ud.remote_qpn == 0)) {
213 ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num);
214 return -EINVAL;
216 my_av = container_of(send_wr->wr.ud.ah, struct ehca_av, ib_ah);
217 wqe_p->u.ud_av.ud_av = my_av->av;
220 * omitted check of IB_SEND_INLINE
221 * since HW does not support it
223 for (idx = 0; idx < send_wr->num_sge; idx++) {
224 wqe_p->u.ud_av.sg_list[idx].vaddr =
225 send_wr->sg_list[idx].addr;
226 wqe_p->u.ud_av.sg_list[idx].lkey =
227 send_wr->sg_list[idx].lkey;
228 wqe_p->u.ud_av.sg_list[idx].length =
229 send_wr->sg_list[idx].length;
230 } /* eof for idx */
231 if (qp->qp_type == IB_QPT_SMI ||
232 qp->qp_type == IB_QPT_GSI)
233 wqe_p->u.ud_av.ud_av.pmtu = 1;
234 if (qp->qp_type == IB_QPT_GSI) {
235 wqe_p->pkeyi = send_wr->wr.ud.pkey_index;
236 #ifdef DEBUG_GSI_SEND_WR
237 trace_send_wr_ud(send_wr);
238 #endif /* DEBUG_GSI_SEND_WR */
240 break;
242 case IB_QPT_UC:
243 if (send_wr->send_flags & IB_SEND_FENCE)
244 wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
245 /* no break is intentional here */
246 case IB_QPT_RC:
247 /* TODO: atomic not implemented */
248 wqe_p->u.nud.remote_virtual_adress =
249 send_wr->wr.rdma.remote_addr;
250 wqe_p->u.nud.rkey = send_wr->wr.rdma.rkey;
253 * omitted checking of IB_SEND_INLINE
254 * since HW does not support it
256 dma_length = 0;
257 for (idx = 0; idx < send_wr->num_sge; idx++) {
258 wqe_p->u.nud.sg_list[idx].vaddr =
259 send_wr->sg_list[idx].addr;
260 wqe_p->u.nud.sg_list[idx].lkey =
261 send_wr->sg_list[idx].lkey;
262 wqe_p->u.nud.sg_list[idx].length =
263 send_wr->sg_list[idx].length;
264 dma_length += send_wr->sg_list[idx].length;
265 } /* eof idx */
266 wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
268 /* unsolicited ack circumvention */
269 if (send_wr->opcode == IB_WR_RDMA_READ) {
270 /* on RDMA read, switch on and reset counters */
271 qp->message_count = qp->packet_count = 0;
272 qp->unsol_ack_circ = 1;
273 } else
274 /* else estimate #packets */
275 qp->packet_count += (dma_length >> qp->mtu_shift) + 1;
277 break;
279 default:
280 ehca_gen_err("Invalid qptype=%x", qp->qp_type);
281 return -EINVAL;
284 if (ehca_debug_level) {
285 ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
286 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
288 return 0;
291 /* map_ib_wc_status converts raw cqe_status to ib_wc_status */
292 static inline void map_ib_wc_status(u32 cqe_status,
293 enum ib_wc_status *wc_status)
295 if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
296 switch (cqe_status & 0x3F) {
297 case 0x01:
298 case 0x21:
299 *wc_status = IB_WC_LOC_LEN_ERR;
300 break;
301 case 0x02:
302 case 0x22:
303 *wc_status = IB_WC_LOC_QP_OP_ERR;
304 break;
305 case 0x03:
306 case 0x23:
307 *wc_status = IB_WC_LOC_EEC_OP_ERR;
308 break;
309 case 0x04:
310 case 0x24:
311 *wc_status = IB_WC_LOC_PROT_ERR;
312 break;
313 case 0x05:
314 case 0x25:
315 *wc_status = IB_WC_WR_FLUSH_ERR;
316 break;
317 case 0x06:
318 *wc_status = IB_WC_MW_BIND_ERR;
319 break;
320 case 0x07: /* remote error - look into bits 20:24 */
321 switch ((cqe_status
322 & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
323 case 0x0:
325 * PSN Sequence Error!
326 * couldn't find a matching status!
328 *wc_status = IB_WC_GENERAL_ERR;
329 break;
330 case 0x1:
331 *wc_status = IB_WC_REM_INV_REQ_ERR;
332 break;
333 case 0x2:
334 *wc_status = IB_WC_REM_ACCESS_ERR;
335 break;
336 case 0x3:
337 *wc_status = IB_WC_REM_OP_ERR;
338 break;
339 case 0x4:
340 *wc_status = IB_WC_REM_INV_RD_REQ_ERR;
341 break;
343 break;
344 case 0x08:
345 *wc_status = IB_WC_RETRY_EXC_ERR;
346 break;
347 case 0x09:
348 *wc_status = IB_WC_RNR_RETRY_EXC_ERR;
349 break;
350 case 0x0A:
351 case 0x2D:
352 *wc_status = IB_WC_REM_ABORT_ERR;
353 break;
354 case 0x0B:
355 case 0x2E:
356 *wc_status = IB_WC_INV_EECN_ERR;
357 break;
358 case 0x0C:
359 case 0x2F:
360 *wc_status = IB_WC_INV_EEC_STATE_ERR;
361 break;
362 case 0x0D:
363 *wc_status = IB_WC_BAD_RESP_ERR;
364 break;
365 case 0x10:
366 /* WQE purged */
367 *wc_status = IB_WC_WR_FLUSH_ERR;
368 break;
369 default:
370 *wc_status = IB_WC_FATAL_ERR;
373 } else
374 *wc_status = IB_WC_SUCCESS;
377 static inline int post_one_send(struct ehca_qp *my_qp,
378 struct ib_send_wr *cur_send_wr,
379 struct ib_send_wr **bad_send_wr,
380 int hidden)
382 struct ehca_wqe *wqe_p;
383 int ret;
384 u64 start_offset = my_qp->ipz_squeue.current_q_offset;
386 /* get pointer next to free WQE */
387 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
388 if (unlikely(!wqe_p)) {
389 /* too many posted work requests: queue overflow */
390 if (bad_send_wr)
391 *bad_send_wr = cur_send_wr;
392 ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
393 "qp_num=%x", my_qp->ib_qp.qp_num);
394 return -ENOMEM;
396 /* write a SEND WQE into the QUEUE */
397 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, hidden);
399 * if something failed,
400 * reset the free entry pointer to the start value
402 if (unlikely(ret)) {
403 my_qp->ipz_squeue.current_q_offset = start_offset;
404 if (bad_send_wr)
405 *bad_send_wr = cur_send_wr;
406 ehca_err(my_qp->ib_qp.device, "Could not write WQE "
407 "qp_num=%x", my_qp->ib_qp.qp_num);
408 return -EINVAL;
411 return 0;
414 int ehca_post_send(struct ib_qp *qp,
415 struct ib_send_wr *send_wr,
416 struct ib_send_wr **bad_send_wr)
418 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
419 struct ib_send_wr *cur_send_wr;
420 int wqe_cnt = 0;
421 int ret = 0;
422 unsigned long flags;
424 /* LOCK the QUEUE */
425 spin_lock_irqsave(&my_qp->spinlock_s, flags);
427 /* Send an empty extra RDMA read if:
428 * 1) there has been an RDMA read on this connection before
429 * 2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
430 * 3) we can be sure that any previous extra RDMA read has been
431 * processed so we don't overflow the SQ
433 if (unlikely(my_qp->unsol_ack_circ &&
434 my_qp->packet_count > ACK_CIRC_THRESHOLD &&
435 my_qp->message_count > my_qp->init_attr.cap.max_send_wr)) {
436 /* insert an empty RDMA READ to fix up the remote QP state */
437 struct ib_send_wr circ_wr;
438 memset(&circ_wr, 0, sizeof(circ_wr));
439 circ_wr.opcode = IB_WR_RDMA_READ;
440 post_one_send(my_qp, &circ_wr, NULL, 1); /* ignore retcode */
441 wqe_cnt++;
442 ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num);
443 my_qp->message_count = my_qp->packet_count = 0;
446 /* loop processes list of send reqs */
447 for (cur_send_wr = send_wr; cur_send_wr != NULL;
448 cur_send_wr = cur_send_wr->next) {
449 ret = post_one_send(my_qp, cur_send_wr, bad_send_wr, 0);
450 if (unlikely(ret)) {
451 /* if one or more WQEs were successful, don't fail */
452 if (wqe_cnt)
453 ret = 0;
454 goto post_send_exit0;
456 wqe_cnt++;
457 ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
458 my_qp, qp->qp_num, wqe_cnt);
459 } /* eof for cur_send_wr */
461 post_send_exit0:
462 iosync(); /* serialize GAL register access */
463 hipz_update_sqa(my_qp, wqe_cnt);
464 my_qp->message_count += wqe_cnt;
465 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
466 return ret;
469 static int internal_post_recv(struct ehca_qp *my_qp,
470 struct ib_device *dev,
471 struct ib_recv_wr *recv_wr,
472 struct ib_recv_wr **bad_recv_wr)
474 struct ib_recv_wr *cur_recv_wr;
475 struct ehca_wqe *wqe_p;
476 int wqe_cnt = 0;
477 int ret = 0;
478 unsigned long flags;
480 if (unlikely(!HAS_RQ(my_qp))) {
481 ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
482 my_qp, my_qp->real_qp_num, my_qp->ext_type);
483 return -ENODEV;
486 /* LOCK the QUEUE */
487 spin_lock_irqsave(&my_qp->spinlock_r, flags);
489 /* loop processes list of send reqs */
490 for (cur_recv_wr = recv_wr; cur_recv_wr != NULL;
491 cur_recv_wr = cur_recv_wr->next) {
492 u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
493 /* get pointer next to free WQE */
494 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
495 if (unlikely(!wqe_p)) {
496 /* too many posted work requests: queue overflow */
497 if (bad_recv_wr)
498 *bad_recv_wr = cur_recv_wr;
499 if (wqe_cnt == 0) {
500 ret = -ENOMEM;
501 ehca_err(dev, "Too many posted WQEs "
502 "qp_num=%x", my_qp->real_qp_num);
504 goto post_recv_exit0;
506 /* write a RECV WQE into the QUEUE */
507 ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, cur_recv_wr);
509 * if something failed,
510 * reset the free entry pointer to the start value
512 if (unlikely(ret)) {
513 my_qp->ipz_rqueue.current_q_offset = start_offset;
514 *bad_recv_wr = cur_recv_wr;
515 if (wqe_cnt == 0) {
516 ret = -EINVAL;
517 ehca_err(dev, "Could not write WQE "
518 "qp_num=%x", my_qp->real_qp_num);
520 goto post_recv_exit0;
522 wqe_cnt++;
523 ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
524 my_qp, my_qp->real_qp_num, wqe_cnt);
525 } /* eof for cur_recv_wr */
527 post_recv_exit0:
528 iosync(); /* serialize GAL register access */
529 hipz_update_rqa(my_qp, wqe_cnt);
530 spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
531 return ret;
534 int ehca_post_recv(struct ib_qp *qp,
535 struct ib_recv_wr *recv_wr,
536 struct ib_recv_wr **bad_recv_wr)
538 return internal_post_recv(container_of(qp, struct ehca_qp, ib_qp),
539 qp->device, recv_wr, bad_recv_wr);
542 int ehca_post_srq_recv(struct ib_srq *srq,
543 struct ib_recv_wr *recv_wr,
544 struct ib_recv_wr **bad_recv_wr)
546 return internal_post_recv(container_of(srq, struct ehca_qp, ib_srq),
547 srq->device, recv_wr, bad_recv_wr);
551 * ib_wc_opcode table converts ehca wc opcode to ib
552 * Since we use zero to indicate invalid opcode, the actual ib opcode must
553 * be decremented!!!
555 static const u8 ib_wc_opcode[255] = {
556 [0x01] = IB_WC_RECV+1,
557 [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
558 [0x04] = IB_WC_BIND_MW+1,
559 [0x08] = IB_WC_FETCH_ADD+1,
560 [0x10] = IB_WC_COMP_SWAP+1,
561 [0x20] = IB_WC_RDMA_WRITE+1,
562 [0x40] = IB_WC_RDMA_READ+1,
563 [0x80] = IB_WC_SEND+1
566 /* internal function to poll one entry of cq */
567 static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
569 int ret = 0;
570 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
571 struct ehca_cqe *cqe;
572 struct ehca_qp *my_qp;
573 int cqe_count = 0;
575 poll_cq_one_read_cqe:
576 cqe = (struct ehca_cqe *)
577 ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
578 if (!cqe) {
579 ret = -EAGAIN;
580 ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p "
581 "cq_num=%x ret=%i", my_cq, my_cq->cq_number, ret);
582 goto poll_cq_one_exit0;
585 /* prevents loads being reordered across this point */
586 rmb();
588 cqe_count++;
589 if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
590 struct ehca_qp *qp;
591 int purgeflag;
592 unsigned long flags;
594 qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
595 if (!qp) {
596 ehca_err(cq->device, "cq_num=%x qp_num=%x "
597 "could not find qp -> ignore cqe",
598 my_cq->cq_number, cqe->local_qp_number);
599 ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
600 my_cq->cq_number, cqe->local_qp_number);
601 /* ignore this purged cqe */
602 goto poll_cq_one_read_cqe;
604 spin_lock_irqsave(&qp->spinlock_s, flags);
605 purgeflag = qp->sqerr_purgeflag;
606 spin_unlock_irqrestore(&qp->spinlock_s, flags);
608 if (purgeflag) {
609 ehca_dbg(cq->device,
610 "Got CQE with purged bit qp_num=%x src_qp=%x",
611 cqe->local_qp_number, cqe->remote_qp_number);
612 if (ehca_debug_level)
613 ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
614 cqe->local_qp_number,
615 cqe->remote_qp_number);
617 * ignore this to avoid double cqes of bad wqe
618 * that caused sqe and turn off purge flag
620 qp->sqerr_purgeflag = 0;
621 goto poll_cq_one_read_cqe;
625 /* tracing cqe */
626 if (unlikely(ehca_debug_level)) {
627 ehca_dbg(cq->device,
628 "Received COMPLETION ehca_cq=%p cq_num=%x -----",
629 my_cq, my_cq->cq_number);
630 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
631 my_cq, my_cq->cq_number);
632 ehca_dbg(cq->device,
633 "ehca_cq=%p cq_num=%x -------------------------",
634 my_cq, my_cq->cq_number);
637 /* we got a completion! */
638 wc->wr_id = cqe->work_request_id;
640 /* eval ib_wc_opcode */
641 wc->opcode = ib_wc_opcode[cqe->optype]-1;
642 if (unlikely(wc->opcode == -1)) {
643 ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
644 "ehca_cq=%p cq_num=%x",
645 cqe->optype, cqe->status, my_cq, my_cq->cq_number);
646 /* dump cqe for other infos */
647 ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
648 my_cq, my_cq->cq_number);
649 /* update also queue adder to throw away this entry!!! */
650 goto poll_cq_one_exit0;
652 /* eval ib_wc_status */
653 if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) {
654 /* complete with errors */
655 map_ib_wc_status(cqe->status, &wc->status);
656 wc->vendor_err = wc->status;
657 } else
658 wc->status = IB_WC_SUCCESS;
660 read_lock(&ehca_qp_idr_lock);
661 my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
662 wc->qp = &my_qp->ib_qp;
663 read_unlock(&ehca_qp_idr_lock);
665 wc->byte_len = cqe->nr_bytes_transferred;
666 wc->pkey_index = cqe->pkey_index;
667 wc->slid = cqe->rlid;
668 wc->dlid_path_bits = cqe->dlid;
669 wc->src_qp = cqe->remote_qp_number;
670 wc->wc_flags = cqe->w_completion_flags;
671 wc->imm_data = cpu_to_be32(cqe->immediate_data);
672 wc->sl = cqe->service_level;
674 if (unlikely(wc->status != IB_WC_SUCCESS))
675 ehca_dbg(cq->device,
676 "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
677 "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
678 "cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
679 cqe->status, cqe->local_qp_number,
680 cqe->remote_qp_number, cqe->work_request_id, cqe);
682 poll_cq_one_exit0:
683 if (cqe_count > 0)
684 hipz_update_feca(my_cq, cqe_count);
686 return ret;
689 int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
691 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
692 int nr;
693 struct ib_wc *current_wc = wc;
694 int ret = 0;
695 unsigned long flags;
697 if (num_entries < 1) {
698 ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
699 "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
700 ret = -EINVAL;
701 goto poll_cq_exit0;
704 spin_lock_irqsave(&my_cq->spinlock, flags);
705 for (nr = 0; nr < num_entries; nr++) {
706 ret = ehca_poll_cq_one(cq, current_wc);
707 if (ret)
708 break;
709 current_wc++;
710 } /* eof for nr */
711 spin_unlock_irqrestore(&my_cq->spinlock, flags);
712 if (ret == -EAGAIN || !ret)
713 ret = nr;
715 poll_cq_exit0:
716 return ret;
719 int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
721 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
722 int ret = 0;
724 switch (notify_flags & IB_CQ_SOLICITED_MASK) {
725 case IB_CQ_SOLICITED:
726 hipz_set_cqx_n0(my_cq, 1);
727 break;
728 case IB_CQ_NEXT_COMP:
729 hipz_set_cqx_n1(my_cq, 1);
730 break;
731 default:
732 return -EINVAL;
735 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
736 unsigned long spl_flags;
737 spin_lock_irqsave(&my_cq->spinlock, spl_flags);
738 ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
739 spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
742 return ret;