fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / drivers / infiniband / hw / ipath / ipath_qp.c
blob6a41fdbc8e57dfca779759de14137fe33b811015
1 /*
2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #include <linux/err.h>
35 #include <linux/vmalloc.h>
37 #include "ipath_verbs.h"
38 #include "ipath_kernel.h"
40 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
41 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
42 #define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \
43 (off))
44 #define find_next_offset(map, off) find_next_zero_bit((map)->page, \
45 BITS_PER_PAGE, off)
48 * Convert the AETH credit code into the number of credits.
50 static u32 credit_table[31] = {
51 0, /* 0 */
52 1, /* 1 */
53 2, /* 2 */
54 3, /* 3 */
55 4, /* 4 */
56 6, /* 5 */
57 8, /* 6 */
58 12, /* 7 */
59 16, /* 8 */
60 24, /* 9 */
61 32, /* A */
62 48, /* B */
63 64, /* C */
64 96, /* D */
65 128, /* E */
66 192, /* F */
67 256, /* 10 */
68 384, /* 11 */
69 512, /* 12 */
70 768, /* 13 */
71 1024, /* 14 */
72 1536, /* 15 */
73 2048, /* 16 */
74 3072, /* 17 */
75 4096, /* 18 */
76 6144, /* 19 */
77 8192, /* 1A */
78 12288, /* 1B */
79 16384, /* 1C */
80 24576, /* 1D */
81 32768 /* 1E */
85 static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map)
87 unsigned long page = get_zeroed_page(GFP_KERNEL);
88 unsigned long flags;
91 * Free the page if someone raced with us installing it.
94 spin_lock_irqsave(&qpt->lock, flags);
95 if (map->page)
96 free_page(page);
97 else
98 map->page = (void *)page;
99 spin_unlock_irqrestore(&qpt->lock, flags);
103 static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
105 u32 i, offset, max_scan, qpn;
106 struct qpn_map *map;
107 u32 ret = -1;
109 if (type == IB_QPT_SMI)
110 ret = 0;
111 else if (type == IB_QPT_GSI)
112 ret = 1;
114 if (ret != -1) {
115 map = &qpt->map[0];
116 if (unlikely(!map->page)) {
117 get_map_page(qpt, map);
118 if (unlikely(!map->page)) {
119 ret = -ENOMEM;
120 goto bail;
123 if (!test_and_set_bit(ret, map->page))
124 atomic_dec(&map->n_free);
125 else
126 ret = -EBUSY;
127 goto bail;
130 qpn = qpt->last + 1;
131 if (qpn >= QPN_MAX)
132 qpn = 2;
133 offset = qpn & BITS_PER_PAGE_MASK;
134 map = &qpt->map[qpn / BITS_PER_PAGE];
135 max_scan = qpt->nmaps - !offset;
136 for (i = 0;;) {
137 if (unlikely(!map->page)) {
138 get_map_page(qpt, map);
139 if (unlikely(!map->page))
140 break;
142 if (likely(atomic_read(&map->n_free))) {
143 do {
144 if (!test_and_set_bit(offset, map->page)) {
145 atomic_dec(&map->n_free);
146 qpt->last = qpn;
147 ret = qpn;
148 goto bail;
150 offset = find_next_offset(map, offset);
151 qpn = mk_qpn(qpt, map, offset);
153 * This test differs from alloc_pidmap().
154 * If find_next_offset() does find a zero
155 * bit, we don't need to check for QPN
156 * wrapping around past our starting QPN.
157 * We just need to be sure we don't loop
158 * forever.
160 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
163 * In order to keep the number of pages allocated to a
164 * minimum, we scan the all existing pages before increasing
165 * the size of the bitmap table.
167 if (++i > max_scan) {
168 if (qpt->nmaps == QPNMAP_ENTRIES)
169 break;
170 map = &qpt->map[qpt->nmaps++];
171 offset = 0;
172 } else if (map < &qpt->map[qpt->nmaps]) {
173 ++map;
174 offset = 0;
175 } else {
176 map = &qpt->map[0];
177 offset = 2;
179 qpn = mk_qpn(qpt, map, offset);
182 ret = -ENOMEM;
184 bail:
185 return ret;
188 static void free_qpn(struct ipath_qp_table *qpt, u32 qpn)
190 struct qpn_map *map;
192 map = qpt->map + qpn / BITS_PER_PAGE;
193 if (map->page)
194 clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
195 atomic_inc(&map->n_free);
199 * ipath_alloc_qpn - allocate a QP number
200 * @qpt: the QP table
201 * @qp: the QP
202 * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
204 * Allocate the next available QPN and put the QP into the hash table.
205 * The hash table holds a reference to the QP.
207 static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
208 enum ib_qp_type type)
210 unsigned long flags;
211 int ret;
213 ret = alloc_qpn(qpt, type);
214 if (ret < 0)
215 goto bail;
216 qp->ibqp.qp_num = ret;
218 /* Add the QP to the hash table. */
219 spin_lock_irqsave(&qpt->lock, flags);
221 ret %= qpt->max;
222 qp->next = qpt->table[ret];
223 qpt->table[ret] = qp;
224 atomic_inc(&qp->refcount);
226 spin_unlock_irqrestore(&qpt->lock, flags);
227 ret = 0;
229 bail:
230 return ret;
234 * ipath_free_qp - remove a QP from the QP table
235 * @qpt: the QP table
236 * @qp: the QP to remove
238 * Remove the QP from the table so it can't be found asynchronously by
239 * the receive interrupt routine.
241 static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
243 struct ipath_qp *q, **qpp;
244 unsigned long flags;
245 int fnd = 0;
247 spin_lock_irqsave(&qpt->lock, flags);
249 /* Remove QP from the hash table. */
250 qpp = &qpt->table[qp->ibqp.qp_num % qpt->max];
251 for (; (q = *qpp) != NULL; qpp = &q->next) {
252 if (q == qp) {
253 *qpp = qp->next;
254 qp->next = NULL;
255 atomic_dec(&qp->refcount);
256 fnd = 1;
257 break;
261 spin_unlock_irqrestore(&qpt->lock, flags);
263 if (!fnd)
264 return;
266 free_qpn(qpt, qp->ibqp.qp_num);
268 wait_event(qp->wait, !atomic_read(&qp->refcount));
272 * ipath_free_all_qps - remove all QPs from the table
273 * @qpt: the QP table to empty
275 void ipath_free_all_qps(struct ipath_qp_table *qpt)
277 unsigned long flags;
278 struct ipath_qp *qp, *nqp;
279 u32 n;
281 for (n = 0; n < qpt->max; n++) {
282 spin_lock_irqsave(&qpt->lock, flags);
283 qp = qpt->table[n];
284 qpt->table[n] = NULL;
285 spin_unlock_irqrestore(&qpt->lock, flags);
287 while (qp) {
288 nqp = qp->next;
289 free_qpn(qpt, qp->ibqp.qp_num);
290 if (!atomic_dec_and_test(&qp->refcount) ||
291 !ipath_destroy_qp(&qp->ibqp))
292 ipath_dbg("QP memory leak!\n");
293 qp = nqp;
297 for (n = 0; n < ARRAY_SIZE(qpt->map); n++) {
298 if (qpt->map[n].page)
299 free_page((unsigned long)qpt->map[n].page);
304 * ipath_lookup_qpn - return the QP with the given QPN
305 * @qpt: the QP table
306 * @qpn: the QP number to look up
308 * The caller is responsible for decrementing the QP reference count
309 * when done.
311 struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn)
313 unsigned long flags;
314 struct ipath_qp *qp;
316 spin_lock_irqsave(&qpt->lock, flags);
318 for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) {
319 if (qp->ibqp.qp_num == qpn) {
320 atomic_inc(&qp->refcount);
321 break;
325 spin_unlock_irqrestore(&qpt->lock, flags);
326 return qp;
330 * ipath_reset_qp - initialize the QP state to the reset state
331 * @qp: the QP to reset
333 static void ipath_reset_qp(struct ipath_qp *qp)
335 qp->remote_qpn = 0;
336 qp->qkey = 0;
337 qp->qp_access_flags = 0;
338 qp->s_busy = 0;
339 qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
340 qp->s_hdrwords = 0;
341 qp->s_wqe = NULL;
342 qp->s_psn = 0;
343 qp->r_psn = 0;
344 qp->r_msn = 0;
345 if (qp->ibqp.qp_type == IB_QPT_RC) {
346 qp->s_state = IB_OPCODE_RC_SEND_LAST;
347 qp->r_state = IB_OPCODE_RC_SEND_LAST;
348 } else {
349 qp->s_state = IB_OPCODE_UC_SEND_LAST;
350 qp->r_state = IB_OPCODE_UC_SEND_LAST;
352 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
353 qp->r_nak_state = 0;
354 qp->r_wrid_valid = 0;
355 qp->s_rnr_timeout = 0;
356 qp->s_head = 0;
357 qp->s_tail = 0;
358 qp->s_cur = 0;
359 qp->s_last = 0;
360 qp->s_ssn = 1;
361 qp->s_lsn = 0;
362 qp->s_wait_credit = 0;
363 memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
364 qp->r_head_ack_queue = 0;
365 qp->s_tail_ack_queue = 0;
366 qp->s_num_rd_atomic = 0;
367 if (qp->r_rq.wq) {
368 qp->r_rq.wq->head = 0;
369 qp->r_rq.wq->tail = 0;
371 qp->r_reuse_sge = 0;
375 * ipath_error_qp - put a QP into an error state
376 * @qp: the QP to put into an error state
377 * @err: the receive completion error to signal if a RWQE is active
379 * Flushes both send and receive work queues.
380 * Returns true if last WQE event should be generated.
381 * The QP s_lock should be held and interrupts disabled.
384 int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
386 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
387 struct ib_wc wc;
388 int ret = 0;
390 ipath_dbg("QP%d/%d in error state\n",
391 qp->ibqp.qp_num, qp->remote_qpn);
393 spin_lock(&dev->pending_lock);
394 /* XXX What if its already removed by the timeout code? */
395 if (!list_empty(&qp->timerwait))
396 list_del_init(&qp->timerwait);
397 if (!list_empty(&qp->piowait))
398 list_del_init(&qp->piowait);
399 spin_unlock(&dev->pending_lock);
401 wc.vendor_err = 0;
402 wc.byte_len = 0;
403 wc.imm_data = 0;
404 wc.qp = &qp->ibqp;
405 wc.src_qp = 0;
406 wc.wc_flags = 0;
407 wc.pkey_index = 0;
408 wc.slid = 0;
409 wc.sl = 0;
410 wc.dlid_path_bits = 0;
411 wc.port_num = 0;
412 if (qp->r_wrid_valid) {
413 qp->r_wrid_valid = 0;
414 wc.wr_id = qp->r_wr_id;
415 wc.opcode = IB_WC_RECV;
416 wc.status = err;
417 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
419 wc.status = IB_WC_WR_FLUSH_ERR;
421 while (qp->s_last != qp->s_head) {
422 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
424 wc.wr_id = wqe->wr.wr_id;
425 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
426 if (++qp->s_last >= qp->s_size)
427 qp->s_last = 0;
428 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
430 qp->s_cur = qp->s_tail = qp->s_head;
431 qp->s_hdrwords = 0;
432 qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
434 if (qp->r_rq.wq) {
435 struct ipath_rwq *wq;
436 u32 head;
437 u32 tail;
439 spin_lock(&qp->r_rq.lock);
441 /* sanity check pointers before trusting them */
442 wq = qp->r_rq.wq;
443 head = wq->head;
444 if (head >= qp->r_rq.size)
445 head = 0;
446 tail = wq->tail;
447 if (tail >= qp->r_rq.size)
448 tail = 0;
449 wc.opcode = IB_WC_RECV;
450 while (tail != head) {
451 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
452 if (++tail >= qp->r_rq.size)
453 tail = 0;
454 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
456 wq->tail = tail;
458 spin_unlock(&qp->r_rq.lock);
459 } else if (qp->ibqp.event_handler)
460 ret = 1;
462 return ret;
466 * ipath_modify_qp - modify the attributes of a queue pair
467 * @ibqp: the queue pair who's attributes we're modifying
468 * @attr: the new attributes
469 * @attr_mask: the mask of attributes to modify
470 * @udata: user data for ipathverbs.so
472 * Returns 0 on success, otherwise returns an errno.
474 int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
475 int attr_mask, struct ib_udata *udata)
477 struct ipath_ibdev *dev = to_idev(ibqp->device);
478 struct ipath_qp *qp = to_iqp(ibqp);
479 enum ib_qp_state cur_state, new_state;
480 unsigned long flags;
481 int lastwqe = 0;
482 int ret;
484 spin_lock_irqsave(&qp->s_lock, flags);
486 cur_state = attr_mask & IB_QP_CUR_STATE ?
487 attr->cur_qp_state : qp->state;
488 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
490 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
491 attr_mask))
492 goto inval;
494 if (attr_mask & IB_QP_AV) {
495 if (attr->ah_attr.dlid == 0 ||
496 attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
497 goto inval;
499 if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
500 (attr->ah_attr.grh.sgid_index > 1))
501 goto inval;
504 if (attr_mask & IB_QP_PKEY_INDEX)
505 if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
506 goto inval;
508 if (attr_mask & IB_QP_MIN_RNR_TIMER)
509 if (attr->min_rnr_timer > 31)
510 goto inval;
512 if (attr_mask & IB_QP_PORT)
513 if (attr->port_num == 0 ||
514 attr->port_num > ibqp->device->phys_port_cnt)
515 goto inval;
518 * Note: the chips support a maximum MTU of 4096, but the driver
519 * hasn't implemented this feature yet, so don't allow Path MTU
520 * values greater than 2048.
522 if (attr_mask & IB_QP_PATH_MTU)
523 if (attr->path_mtu > IB_MTU_2048)
524 goto inval;
526 if (attr_mask & IB_QP_PATH_MIG_STATE)
527 if (attr->path_mig_state != IB_MIG_MIGRATED &&
528 attr->path_mig_state != IB_MIG_REARM)
529 goto inval;
531 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
532 if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC)
533 goto inval;
535 switch (new_state) {
536 case IB_QPS_RESET:
537 ipath_reset_qp(qp);
538 break;
540 case IB_QPS_ERR:
541 lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
542 break;
544 default:
545 break;
549 if (attr_mask & IB_QP_PKEY_INDEX)
550 qp->s_pkey_index = attr->pkey_index;
552 if (attr_mask & IB_QP_DEST_QPN)
553 qp->remote_qpn = attr->dest_qp_num;
555 if (attr_mask & IB_QP_SQ_PSN) {
556 qp->s_psn = qp->s_next_psn = attr->sq_psn;
557 qp->s_last_psn = qp->s_next_psn - 1;
560 if (attr_mask & IB_QP_RQ_PSN)
561 qp->r_psn = attr->rq_psn;
563 if (attr_mask & IB_QP_ACCESS_FLAGS)
564 qp->qp_access_flags = attr->qp_access_flags;
566 if (attr_mask & IB_QP_AV)
567 qp->remote_ah_attr = attr->ah_attr;
569 if (attr_mask & IB_QP_PATH_MTU)
570 qp->path_mtu = attr->path_mtu;
572 if (attr_mask & IB_QP_RETRY_CNT)
573 qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;
575 if (attr_mask & IB_QP_RNR_RETRY) {
576 qp->s_rnr_retry = attr->rnr_retry;
577 if (qp->s_rnr_retry > 7)
578 qp->s_rnr_retry = 7;
579 qp->s_rnr_retry_cnt = qp->s_rnr_retry;
582 if (attr_mask & IB_QP_MIN_RNR_TIMER)
583 qp->r_min_rnr_timer = attr->min_rnr_timer;
585 if (attr_mask & IB_QP_TIMEOUT)
586 qp->timeout = attr->timeout;
588 if (attr_mask & IB_QP_QKEY)
589 qp->qkey = attr->qkey;
591 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
592 qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
594 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
595 qp->s_max_rd_atomic = attr->max_rd_atomic;
597 qp->state = new_state;
598 spin_unlock_irqrestore(&qp->s_lock, flags);
600 if (lastwqe) {
601 struct ib_event ev;
603 ev.device = qp->ibqp.device;
604 ev.element.qp = &qp->ibqp;
605 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
606 qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
608 ret = 0;
609 goto bail;
611 inval:
612 spin_unlock_irqrestore(&qp->s_lock, flags);
613 ret = -EINVAL;
615 bail:
616 return ret;
619 int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
620 int attr_mask, struct ib_qp_init_attr *init_attr)
622 struct ipath_qp *qp = to_iqp(ibqp);
624 attr->qp_state = qp->state;
625 attr->cur_qp_state = attr->qp_state;
626 attr->path_mtu = qp->path_mtu;
627 attr->path_mig_state = 0;
628 attr->qkey = qp->qkey;
629 attr->rq_psn = qp->r_psn;
630 attr->sq_psn = qp->s_next_psn;
631 attr->dest_qp_num = qp->remote_qpn;
632 attr->qp_access_flags = qp->qp_access_flags;
633 attr->cap.max_send_wr = qp->s_size - 1;
634 attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
635 attr->cap.max_send_sge = qp->s_max_sge;
636 attr->cap.max_recv_sge = qp->r_rq.max_sge;
637 attr->cap.max_inline_data = 0;
638 attr->ah_attr = qp->remote_ah_attr;
639 memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr));
640 attr->pkey_index = qp->s_pkey_index;
641 attr->alt_pkey_index = 0;
642 attr->en_sqd_async_notify = 0;
643 attr->sq_draining = 0;
644 attr->max_rd_atomic = qp->s_max_rd_atomic;
645 attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
646 attr->min_rnr_timer = qp->r_min_rnr_timer;
647 attr->port_num = 1;
648 attr->timeout = qp->timeout;
649 attr->retry_cnt = qp->s_retry_cnt;
650 attr->rnr_retry = qp->s_rnr_retry;
651 attr->alt_port_num = 0;
652 attr->alt_timeout = 0;
654 init_attr->event_handler = qp->ibqp.event_handler;
655 init_attr->qp_context = qp->ibqp.qp_context;
656 init_attr->send_cq = qp->ibqp.send_cq;
657 init_attr->recv_cq = qp->ibqp.recv_cq;
658 init_attr->srq = qp->ibqp.srq;
659 init_attr->cap = attr->cap;
660 if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR)
661 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
662 else
663 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
664 init_attr->qp_type = qp->ibqp.qp_type;
665 init_attr->port_num = 1;
666 return 0;
670 * ipath_compute_aeth - compute the AETH (syndrome + MSN)
671 * @qp: the queue pair to compute the AETH for
673 * Returns the AETH.
675 __be32 ipath_compute_aeth(struct ipath_qp *qp)
677 u32 aeth = qp->r_msn & IPATH_MSN_MASK;
679 if (qp->ibqp.srq) {
681 * Shared receive queues don't generate credits.
682 * Set the credit field to the invalid value.
684 aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT;
685 } else {
686 u32 min, max, x;
687 u32 credits;
688 struct ipath_rwq *wq = qp->r_rq.wq;
689 u32 head;
690 u32 tail;
692 /* sanity check pointers before trusting them */
693 head = wq->head;
694 if (head >= qp->r_rq.size)
695 head = 0;
696 tail = wq->tail;
697 if (tail >= qp->r_rq.size)
698 tail = 0;
700 * Compute the number of credits available (RWQEs).
701 * XXX Not holding the r_rq.lock here so there is a small
702 * chance that the pair of reads are not atomic.
704 credits = head - tail;
705 if ((int)credits < 0)
706 credits += qp->r_rq.size;
708 * Binary search the credit table to find the code to
709 * use.
711 min = 0;
712 max = 31;
713 for (;;) {
714 x = (min + max) / 2;
715 if (credit_table[x] == credits)
716 break;
717 if (credit_table[x] > credits)
718 max = x;
719 else if (min == x)
720 break;
721 else
722 min = x;
724 aeth |= x << IPATH_AETH_CREDIT_SHIFT;
726 return cpu_to_be32(aeth);
730 * ipath_create_qp - create a queue pair for a device
731 * @ibpd: the protection domain who's device we create the queue pair for
732 * @init_attr: the attributes of the queue pair
733 * @udata: unused by InfiniPath
735 * Returns the queue pair on success, otherwise returns an errno.
737 * Called by the ib_create_qp() core verbs function.
739 struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
740 struct ib_qp_init_attr *init_attr,
741 struct ib_udata *udata)
743 struct ipath_qp *qp;
744 int err;
745 struct ipath_swqe *swq = NULL;
746 struct ipath_ibdev *dev;
747 size_t sz;
748 struct ib_qp *ret;
750 if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
751 init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
752 init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs ||
753 init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
754 ret = ERR_PTR(-ENOMEM);
755 goto bail;
758 if (init_attr->cap.max_send_sge +
759 init_attr->cap.max_recv_sge +
760 init_attr->cap.max_send_wr +
761 init_attr->cap.max_recv_wr == 0) {
762 ret = ERR_PTR(-EINVAL);
763 goto bail;
766 switch (init_attr->qp_type) {
767 case IB_QPT_UC:
768 case IB_QPT_RC:
769 case IB_QPT_UD:
770 case IB_QPT_SMI:
771 case IB_QPT_GSI:
772 sz = sizeof(struct ipath_sge) *
773 init_attr->cap.max_send_sge +
774 sizeof(struct ipath_swqe);
775 swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
776 if (swq == NULL) {
777 ret = ERR_PTR(-ENOMEM);
778 goto bail;
780 sz = sizeof(*qp);
781 if (init_attr->srq) {
782 struct ipath_srq *srq = to_isrq(init_attr->srq);
784 sz += sizeof(*qp->r_sg_list) *
785 srq->rq.max_sge;
786 } else
787 sz += sizeof(*qp->r_sg_list) *
788 init_attr->cap.max_recv_sge;
789 qp = kmalloc(sz, GFP_KERNEL);
790 if (!qp) {
791 ret = ERR_PTR(-ENOMEM);
792 goto bail_swq;
794 if (init_attr->srq) {
795 sz = 0;
796 qp->r_rq.size = 0;
797 qp->r_rq.max_sge = 0;
798 qp->r_rq.wq = NULL;
799 init_attr->cap.max_recv_wr = 0;
800 init_attr->cap.max_recv_sge = 0;
801 } else {
802 qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
803 qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
804 sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
805 sizeof(struct ipath_rwqe);
806 qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
807 qp->r_rq.size * sz);
808 if (!qp->r_rq.wq) {
809 ret = ERR_PTR(-ENOMEM);
810 goto bail_qp;
815 * ib_create_qp() will initialize qp->ibqp
816 * except for qp->ibqp.qp_num.
818 spin_lock_init(&qp->s_lock);
819 spin_lock_init(&qp->r_rq.lock);
820 atomic_set(&qp->refcount, 0);
821 init_waitqueue_head(&qp->wait);
822 tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
823 INIT_LIST_HEAD(&qp->piowait);
824 INIT_LIST_HEAD(&qp->timerwait);
825 qp->state = IB_QPS_RESET;
826 qp->s_wq = swq;
827 qp->s_size = init_attr->cap.max_send_wr + 1;
828 qp->s_max_sge = init_attr->cap.max_send_sge;
829 if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
830 qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
831 else
832 qp->s_flags = 0;
833 dev = to_idev(ibpd->device);
834 err = ipath_alloc_qpn(&dev->qp_table, qp,
835 init_attr->qp_type);
836 if (err) {
837 ret = ERR_PTR(err);
838 goto bail_rwq;
840 qp->ip = NULL;
841 ipath_reset_qp(qp);
842 break;
844 default:
845 /* Don't support raw QPs */
846 ret = ERR_PTR(-ENOSYS);
847 goto bail;
850 init_attr->cap.max_inline_data = 0;
853 * Return the address of the RWQ as the offset to mmap.
854 * See ipath_mmap() for details.
856 if (udata && udata->outlen >= sizeof(__u64)) {
857 int err;
859 if (!qp->r_rq.wq) {
860 __u64 offset = 0;
862 err = ib_copy_to_udata(udata, &offset,
863 sizeof(offset));
864 if (err) {
865 ret = ERR_PTR(err);
866 goto bail_rwq;
868 } else {
869 u32 s = sizeof(struct ipath_rwq) +
870 qp->r_rq.size * sz;
872 qp->ip =
873 ipath_create_mmap_info(dev, s,
874 ibpd->uobject->context,
875 qp->r_rq.wq);
876 if (!qp->ip) {
877 ret = ERR_PTR(-ENOMEM);
878 goto bail_rwq;
881 err = ib_copy_to_udata(udata, &(qp->ip->offset),
882 sizeof(qp->ip->offset));
883 if (err) {
884 ret = ERR_PTR(err);
885 goto bail_ip;
890 spin_lock(&dev->n_qps_lock);
891 if (dev->n_qps_allocated == ib_ipath_max_qps) {
892 spin_unlock(&dev->n_qps_lock);
893 ret = ERR_PTR(-ENOMEM);
894 goto bail_ip;
897 dev->n_qps_allocated++;
898 spin_unlock(&dev->n_qps_lock);
900 if (qp->ip) {
901 spin_lock_irq(&dev->pending_lock);
902 list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
903 spin_unlock_irq(&dev->pending_lock);
906 ret = &qp->ibqp;
907 goto bail;
909 bail_ip:
910 kfree(qp->ip);
911 bail_rwq:
912 vfree(qp->r_rq.wq);
913 bail_qp:
914 kfree(qp);
915 bail_swq:
916 vfree(swq);
917 bail:
918 return ret;
922 * ipath_destroy_qp - destroy a queue pair
923 * @ibqp: the queue pair to destroy
925 * Returns 0 on success.
927 * Note that this can be called while the QP is actively sending or
928 * receiving!
930 int ipath_destroy_qp(struct ib_qp *ibqp)
932 struct ipath_qp *qp = to_iqp(ibqp);
933 struct ipath_ibdev *dev = to_idev(ibqp->device);
934 unsigned long flags;
936 spin_lock_irqsave(&qp->s_lock, flags);
937 qp->state = IB_QPS_ERR;
938 spin_unlock_irqrestore(&qp->s_lock, flags);
939 spin_lock(&dev->n_qps_lock);
940 dev->n_qps_allocated--;
941 spin_unlock(&dev->n_qps_lock);
943 /* Stop the sending tasklet. */
944 tasklet_kill(&qp->s_task);
946 /* Make sure the QP isn't on the timeout list. */
947 spin_lock_irqsave(&dev->pending_lock, flags);
948 if (!list_empty(&qp->timerwait))
949 list_del_init(&qp->timerwait);
950 if (!list_empty(&qp->piowait))
951 list_del_init(&qp->piowait);
952 spin_unlock_irqrestore(&dev->pending_lock, flags);
955 * Make sure that the QP is not in the QPN table so receive
956 * interrupts will discard packets for this QP. XXX Also remove QP
957 * from multicast table.
959 if (atomic_read(&qp->refcount) != 0)
960 ipath_free_qp(&dev->qp_table, qp);
962 if (qp->ip)
963 kref_put(&qp->ip->ref, ipath_release_mmap_info);
964 else
965 vfree(qp->r_rq.wq);
966 vfree(qp->s_wq);
967 kfree(qp);
968 return 0;
972 * ipath_init_qp_table - initialize the QP table for a device
973 * @idev: the device who's QP table we're initializing
974 * @size: the size of the QP table
976 * Returns 0 on success, otherwise returns an errno.
978 int ipath_init_qp_table(struct ipath_ibdev *idev, int size)
980 int i;
981 int ret;
983 idev->qp_table.last = 1; /* QPN 0 and 1 are special. */
984 idev->qp_table.max = size;
985 idev->qp_table.nmaps = 1;
986 idev->qp_table.table = kzalloc(size * sizeof(*idev->qp_table.table),
987 GFP_KERNEL);
988 if (idev->qp_table.table == NULL) {
989 ret = -ENOMEM;
990 goto bail;
993 for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) {
994 atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE);
995 idev->qp_table.map[i].page = NULL;
998 ret = 0;
1000 bail:
1001 return ret;
1005 * ipath_sqerror_qp - put a QP's send queue into an error state
1006 * @qp: QP who's send queue will be put into an error state
1007 * @wc: the WC responsible for putting the QP in this state
1009 * Flushes the send work queue.
1010 * The QP s_lock should be held and interrupts disabled.
1013 void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
1015 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
1016 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
1018 ipath_dbg("Send queue error on QP%d/%d: err: %d\n",
1019 qp->ibqp.qp_num, qp->remote_qpn, wc->status);
1021 spin_lock(&dev->pending_lock);
1022 /* XXX What if its already removed by the timeout code? */
1023 if (!list_empty(&qp->timerwait))
1024 list_del_init(&qp->timerwait);
1025 if (!list_empty(&qp->piowait))
1026 list_del_init(&qp->piowait);
1027 spin_unlock(&dev->pending_lock);
1029 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
1030 if (++qp->s_last >= qp->s_size)
1031 qp->s_last = 0;
1033 wc->status = IB_WC_WR_FLUSH_ERR;
1035 while (qp->s_last != qp->s_head) {
1036 wqe = get_swqe_ptr(qp, qp->s_last);
1037 wc->wr_id = wqe->wr.wr_id;
1038 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
1039 ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
1040 if (++qp->s_last >= qp->s_size)
1041 qp->s_last = 0;
1043 qp->s_cur = qp->s_tail = qp->s_head;
1044 qp->state = IB_QPS_SQE;
1048 * ipath_get_credit - flush the send work queue of a QP
1049 * @qp: the qp who's send work queue to flush
1050 * @aeth: the Acknowledge Extended Transport Header
1052 * The QP s_lock should be held.
1054 void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
1056 u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK;
1059 * If the credit is invalid, we can send
1060 * as many packets as we like. Otherwise, we have to
1061 * honor the credit field.
1063 if (credit == IPATH_AETH_CREDIT_INVAL)
1064 qp->s_lsn = (u32) -1;
1065 else if (qp->s_lsn != (u32) -1) {
1066 /* Compute new LSN (i.e., MSN + credit) */
1067 credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK;
1068 if (ipath_cmp24(credit, qp->s_lsn) > 0)
1069 qp->s_lsn = credit;
1072 /* Restart sending if it was blocked due to lack of credits. */
1073 if (qp->s_cur != qp->s_head &&
1074 (qp->s_lsn == (u32) -1 ||
1075 ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
1076 qp->s_lsn + 1) <= 0))
1077 tasklet_hi_schedule(&qp->s_task);