usb: gadget: r8a66597-udc: fix cannot connect after rmmod gadget driver
[linux/fpc-iii.git] / drivers / infiniband / hw / mthca / mthca_qp.c
bloba34c9d38e82250ba50c31aa367afed1fb7f9d0b0
1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
36 #include <linux/string.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
40 #include <asm/io.h>
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_cache.h>
44 #include <rdma/ib_pack.h>
46 #include "mthca_dev.h"
47 #include "mthca_cmd.h"
48 #include "mthca_memfree.h"
49 #include "mthca_wqe.h"
51 enum {
52 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
53 MTHCA_ACK_REQ_FREQ = 10,
54 MTHCA_FLIGHT_LIMIT = 9,
55 MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */
56 MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */
57 MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */
60 enum {
61 MTHCA_QP_STATE_RST = 0,
62 MTHCA_QP_STATE_INIT = 1,
63 MTHCA_QP_STATE_RTR = 2,
64 MTHCA_QP_STATE_RTS = 3,
65 MTHCA_QP_STATE_SQE = 4,
66 MTHCA_QP_STATE_SQD = 5,
67 MTHCA_QP_STATE_ERR = 6,
68 MTHCA_QP_STATE_DRAINING = 7
71 enum {
72 MTHCA_QP_ST_RC = 0x0,
73 MTHCA_QP_ST_UC = 0x1,
74 MTHCA_QP_ST_RD = 0x2,
75 MTHCA_QP_ST_UD = 0x3,
76 MTHCA_QP_ST_MLX = 0x7
79 enum {
80 MTHCA_QP_PM_MIGRATED = 0x3,
81 MTHCA_QP_PM_ARMED = 0x0,
82 MTHCA_QP_PM_REARM = 0x1
85 enum {
86 /* qp_context flags */
87 MTHCA_QP_BIT_DE = 1 << 8,
88 /* params1 */
89 MTHCA_QP_BIT_SRE = 1 << 15,
90 MTHCA_QP_BIT_SWE = 1 << 14,
91 MTHCA_QP_BIT_SAE = 1 << 13,
92 MTHCA_QP_BIT_SIC = 1 << 4,
93 MTHCA_QP_BIT_SSC = 1 << 3,
94 /* params2 */
95 MTHCA_QP_BIT_RRE = 1 << 15,
96 MTHCA_QP_BIT_RWE = 1 << 14,
97 MTHCA_QP_BIT_RAE = 1 << 13,
98 MTHCA_QP_BIT_RIC = 1 << 4,
99 MTHCA_QP_BIT_RSC = 1 << 3
102 enum {
103 MTHCA_SEND_DOORBELL_FENCE = 1 << 5
106 struct mthca_qp_path {
107 __be32 port_pkey;
108 u8 rnr_retry;
109 u8 g_mylmc;
110 __be16 rlid;
111 u8 ackto;
112 u8 mgid_index;
113 u8 static_rate;
114 u8 hop_limit;
115 __be32 sl_tclass_flowlabel;
116 u8 rgid[16];
117 } __attribute__((packed));
119 struct mthca_qp_context {
120 __be32 flags;
121 __be32 tavor_sched_queue; /* Reserved on Arbel */
122 u8 mtu_msgmax;
123 u8 rq_size_stride; /* Reserved on Tavor */
124 u8 sq_size_stride; /* Reserved on Tavor */
125 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
126 __be32 usr_page;
127 __be32 local_qpn;
128 __be32 remote_qpn;
129 u32 reserved1[2];
130 struct mthca_qp_path pri_path;
131 struct mthca_qp_path alt_path;
132 __be32 rdd;
133 __be32 pd;
134 __be32 wqe_base;
135 __be32 wqe_lkey;
136 __be32 params1;
137 __be32 reserved2;
138 __be32 next_send_psn;
139 __be32 cqn_snd;
140 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */
141 __be32 snd_db_index; /* (debugging only entries) */
142 __be32 last_acked_psn;
143 __be32 ssn;
144 __be32 params2;
145 __be32 rnr_nextrecvpsn;
146 __be32 ra_buff_indx;
147 __be32 cqn_rcv;
148 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
149 __be32 rcv_db_index; /* (debugging only entries) */
150 __be32 qkey;
151 __be32 srqn;
152 __be32 rmsn;
153 __be16 rq_wqe_counter; /* reserved on Tavor */
154 __be16 sq_wqe_counter; /* reserved on Tavor */
155 u32 reserved3[18];
156 } __attribute__((packed));
158 struct mthca_qp_param {
159 __be32 opt_param_mask;
160 u32 reserved1;
161 struct mthca_qp_context context;
162 u32 reserved2[62];
163 } __attribute__((packed));
165 enum {
166 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
167 MTHCA_QP_OPTPAR_RRE = 1 << 1,
168 MTHCA_QP_OPTPAR_RAE = 1 << 2,
169 MTHCA_QP_OPTPAR_RWE = 1 << 3,
170 MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4,
171 MTHCA_QP_OPTPAR_Q_KEY = 1 << 5,
172 MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
173 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
174 MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8,
175 MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9,
176 MTHCA_QP_OPTPAR_PM_STATE = 1 << 10,
177 MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11,
178 MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12,
179 MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13,
180 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
181 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15,
182 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16
185 static const u8 mthca_opcode[] = {
186 [IB_WR_SEND] = MTHCA_OPCODE_SEND,
187 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM,
188 [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE,
189 [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM,
190 [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ,
191 [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS,
192 [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
195 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
197 return qp->qpn >= dev->qp_table.sqp_start &&
198 qp->qpn <= dev->qp_table.sqp_start + 3;
201 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
203 return qp->qpn >= dev->qp_table.sqp_start &&
204 qp->qpn <= dev->qp_table.sqp_start + 1;
207 static void *get_recv_wqe(struct mthca_qp *qp, int n)
209 if (qp->is_direct)
210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
211 else
212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
213 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
216 static void *get_send_wqe(struct mthca_qp *qp, int n)
218 if (qp->is_direct)
219 return qp->queue.direct.buf + qp->send_wqe_offset +
220 (n << qp->sq.wqe_shift);
221 else
222 return qp->queue.page_list[(qp->send_wqe_offset +
223 (n << qp->sq.wqe_shift)) >>
224 PAGE_SHIFT].buf +
225 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
226 (PAGE_SIZE - 1));
229 static void mthca_wq_reset(struct mthca_wq *wq)
231 wq->next_ind = 0;
232 wq->last_comp = wq->max - 1;
233 wq->head = 0;
234 wq->tail = 0;
237 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
238 enum ib_event_type event_type)
240 struct mthca_qp *qp;
241 struct ib_event event;
243 spin_lock(&dev->qp_table.lock);
244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
245 if (qp)
246 ++qp->refcount;
247 spin_unlock(&dev->qp_table.lock);
249 if (!qp) {
250 mthca_warn(dev, "Async event for bogus QP %08x\n", qpn);
251 return;
254 if (event_type == IB_EVENT_PATH_MIG)
255 qp->port = qp->alt_port;
257 event.device = &dev->ib_dev;
258 event.event = event_type;
259 event.element.qp = &qp->ibqp;
260 if (qp->ibqp.event_handler)
261 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
263 spin_lock(&dev->qp_table.lock);
264 if (!--qp->refcount)
265 wake_up(&qp->wait);
266 spin_unlock(&dev->qp_table.lock);
269 static int to_mthca_state(enum ib_qp_state ib_state)
271 switch (ib_state) {
272 case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
273 case IB_QPS_INIT: return MTHCA_QP_STATE_INIT;
274 case IB_QPS_RTR: return MTHCA_QP_STATE_RTR;
275 case IB_QPS_RTS: return MTHCA_QP_STATE_RTS;
276 case IB_QPS_SQD: return MTHCA_QP_STATE_SQD;
277 case IB_QPS_SQE: return MTHCA_QP_STATE_SQE;
278 case IB_QPS_ERR: return MTHCA_QP_STATE_ERR;
279 default: return -1;
283 enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
285 static int to_mthca_st(int transport)
287 switch (transport) {
288 case RC: return MTHCA_QP_ST_RC;
289 case UC: return MTHCA_QP_ST_UC;
290 case UD: return MTHCA_QP_ST_UD;
291 case RD: return MTHCA_QP_ST_RD;
292 case MLX: return MTHCA_QP_ST_MLX;
293 default: return -1;
297 static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,
298 int attr_mask)
300 if (attr_mask & IB_QP_PKEY_INDEX)
301 sqp->pkey_index = attr->pkey_index;
302 if (attr_mask & IB_QP_QKEY)
303 sqp->qkey = attr->qkey;
304 if (attr_mask & IB_QP_SQ_PSN)
305 sqp->send_psn = attr->sq_psn;
308 static void init_port(struct mthca_dev *dev, int port)
310 int err;
311 u8 status;
312 struct mthca_init_ib_param param;
314 memset(&param, 0, sizeof param);
316 param.port_width = dev->limits.port_width_cap;
317 param.vl_cap = dev->limits.vl_cap;
318 param.mtu_cap = dev->limits.mtu_cap;
319 param.gid_cap = dev->limits.gid_table_len;
320 param.pkey_cap = dev->limits.pkey_table_len;
322 err = mthca_INIT_IB(dev, &param, port, &status);
323 if (err)
324 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
325 if (status)
326 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
329 static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,
330 int attr_mask)
332 u8 dest_rd_atomic;
333 u32 access_flags;
334 u32 hw_access_flags = 0;
336 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
337 dest_rd_atomic = attr->max_dest_rd_atomic;
338 else
339 dest_rd_atomic = qp->resp_depth;
341 if (attr_mask & IB_QP_ACCESS_FLAGS)
342 access_flags = attr->qp_access_flags;
343 else
344 access_flags = qp->atomic_rd_en;
346 if (!dest_rd_atomic)
347 access_flags &= IB_ACCESS_REMOTE_WRITE;
349 if (access_flags & IB_ACCESS_REMOTE_READ)
350 hw_access_flags |= MTHCA_QP_BIT_RRE;
351 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
352 hw_access_flags |= MTHCA_QP_BIT_RAE;
353 if (access_flags & IB_ACCESS_REMOTE_WRITE)
354 hw_access_flags |= MTHCA_QP_BIT_RWE;
356 return cpu_to_be32(hw_access_flags);
359 static inline enum ib_qp_state to_ib_qp_state(int mthca_state)
361 switch (mthca_state) {
362 case MTHCA_QP_STATE_RST: return IB_QPS_RESET;
363 case MTHCA_QP_STATE_INIT: return IB_QPS_INIT;
364 case MTHCA_QP_STATE_RTR: return IB_QPS_RTR;
365 case MTHCA_QP_STATE_RTS: return IB_QPS_RTS;
366 case MTHCA_QP_STATE_DRAINING:
367 case MTHCA_QP_STATE_SQD: return IB_QPS_SQD;
368 case MTHCA_QP_STATE_SQE: return IB_QPS_SQE;
369 case MTHCA_QP_STATE_ERR: return IB_QPS_ERR;
370 default: return -1;
374 static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
376 switch (mthca_mig_state) {
377 case 0: return IB_MIG_ARMED;
378 case 1: return IB_MIG_REARM;
379 case 3: return IB_MIG_MIGRATED;
380 default: return -1;
384 static int to_ib_qp_access_flags(int mthca_flags)
386 int ib_flags = 0;
388 if (mthca_flags & MTHCA_QP_BIT_RRE)
389 ib_flags |= IB_ACCESS_REMOTE_READ;
390 if (mthca_flags & MTHCA_QP_BIT_RWE)
391 ib_flags |= IB_ACCESS_REMOTE_WRITE;
392 if (mthca_flags & MTHCA_QP_BIT_RAE)
393 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
395 return ib_flags;
398 static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
399 struct mthca_qp_path *path)
401 memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
402 ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
404 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)
405 return;
407 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
408 ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
409 ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
410 ib_ah_attr->static_rate = mthca_rate_to_ib(dev,
411 path->static_rate & 0xf,
412 ib_ah_attr->port_num);
413 ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
414 if (ib_ah_attr->ah_flags) {
415 ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
416 ib_ah_attr->grh.hop_limit = path->hop_limit;
417 ib_ah_attr->grh.traffic_class =
418 (be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
419 ib_ah_attr->grh.flow_label =
420 be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
421 memcpy(ib_ah_attr->grh.dgid.raw,
422 path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
426 int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
427 struct ib_qp_init_attr *qp_init_attr)
429 struct mthca_dev *dev = to_mdev(ibqp->device);
430 struct mthca_qp *qp = to_mqp(ibqp);
431 int err = 0;
432 struct mthca_mailbox *mailbox = NULL;
433 struct mthca_qp_param *qp_param;
434 struct mthca_qp_context *context;
435 int mthca_state;
436 u8 status;
438 mutex_lock(&qp->mutex);
440 if (qp->state == IB_QPS_RESET) {
441 qp_attr->qp_state = IB_QPS_RESET;
442 goto done;
445 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
446 if (IS_ERR(mailbox)) {
447 err = PTR_ERR(mailbox);
448 goto out;
451 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status);
452 if (err)
453 goto out_mailbox;
454 if (status) {
455 mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
456 err = -EINVAL;
457 goto out_mailbox;
460 qp_param = mailbox->buf;
461 context = &qp_param->context;
462 mthca_state = be32_to_cpu(context->flags) >> 28;
464 qp->state = to_ib_qp_state(mthca_state);
465 qp_attr->qp_state = qp->state;
466 qp_attr->path_mtu = context->mtu_msgmax >> 5;
467 qp_attr->path_mig_state =
468 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
469 qp_attr->qkey = be32_to_cpu(context->qkey);
470 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
471 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
472 qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff;
473 qp_attr->qp_access_flags =
474 to_ib_qp_access_flags(be32_to_cpu(context->params2));
476 if (qp->transport == RC || qp->transport == UC) {
477 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
478 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
479 qp_attr->alt_pkey_index =
480 be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
481 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
484 qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
485 qp_attr->port_num =
486 (be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3;
488 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
489 qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
491 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
493 qp_attr->max_dest_rd_atomic =
494 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
495 qp_attr->min_rnr_timer =
496 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
497 qp_attr->timeout = context->pri_path.ackto >> 3;
498 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
499 qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
500 qp_attr->alt_timeout = context->alt_path.ackto >> 3;
502 done:
503 qp_attr->cur_qp_state = qp_attr->qp_state;
504 qp_attr->cap.max_send_wr = qp->sq.max;
505 qp_attr->cap.max_recv_wr = qp->rq.max;
506 qp_attr->cap.max_send_sge = qp->sq.max_gs;
507 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
508 qp_attr->cap.max_inline_data = qp->max_inline_data;
510 qp_init_attr->cap = qp_attr->cap;
512 out_mailbox:
513 mthca_free_mailbox(dev, mailbox);
515 out:
516 mutex_unlock(&qp->mutex);
517 return err;
520 static int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah,
521 struct mthca_qp_path *path, u8 port)
523 path->g_mylmc = ah->src_path_bits & 0x7f;
524 path->rlid = cpu_to_be16(ah->dlid);
525 path->static_rate = mthca_get_rate(dev, ah->static_rate, port);
527 if (ah->ah_flags & IB_AH_GRH) {
528 if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
529 mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
530 ah->grh.sgid_index, dev->limits.gid_table_len-1);
531 return -1;
534 path->g_mylmc |= 1 << 7;
535 path->mgid_index = ah->grh.sgid_index;
536 path->hop_limit = ah->grh.hop_limit;
537 path->sl_tclass_flowlabel =
538 cpu_to_be32((ah->sl << 28) |
539 (ah->grh.traffic_class << 20) |
540 (ah->grh.flow_label));
541 memcpy(path->rgid, ah->grh.dgid.raw, 16);
542 } else
543 path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
545 return 0;
548 static int __mthca_modify_qp(struct ib_qp *ibqp,
549 const struct ib_qp_attr *attr, int attr_mask,
550 enum ib_qp_state cur_state, enum ib_qp_state new_state)
552 struct mthca_dev *dev = to_mdev(ibqp->device);
553 struct mthca_qp *qp = to_mqp(ibqp);
554 struct mthca_mailbox *mailbox;
555 struct mthca_qp_param *qp_param;
556 struct mthca_qp_context *qp_context;
557 u32 sqd_event = 0;
558 u8 status;
559 int err = -EINVAL;
561 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
562 if (IS_ERR(mailbox)) {
563 err = PTR_ERR(mailbox);
564 goto out;
566 qp_param = mailbox->buf;
567 qp_context = &qp_param->context;
568 memset(qp_param, 0, sizeof *qp_param);
570 qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) |
571 (to_mthca_st(qp->transport) << 16));
572 qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE);
573 if (!(attr_mask & IB_QP_PATH_MIG_STATE))
574 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
575 else {
576 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
577 switch (attr->path_mig_state) {
578 case IB_MIG_MIGRATED:
579 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
580 break;
581 case IB_MIG_REARM:
582 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
583 break;
584 case IB_MIG_ARMED:
585 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
586 break;
590 /* leave tavor_sched_queue as 0 */
592 if (qp->transport == MLX || qp->transport == UD)
593 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
594 else if (attr_mask & IB_QP_PATH_MTU) {
595 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
596 mthca_dbg(dev, "path MTU (%u) is invalid\n",
597 attr->path_mtu);
598 goto out_mailbox;
600 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
603 if (mthca_is_memfree(dev)) {
604 if (qp->rq.max)
605 qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;
606 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
608 if (qp->sq.max)
609 qp_context->sq_size_stride = ilog2(qp->sq.max) << 3;
610 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
613 /* leave arbel_sched_queue as 0 */
615 if (qp->ibqp.uobject)
616 qp_context->usr_page =
617 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
618 else
619 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
620 qp_context->local_qpn = cpu_to_be32(qp->qpn);
621 if (attr_mask & IB_QP_DEST_QPN) {
622 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
625 if (qp->transport == MLX)
626 qp_context->pri_path.port_pkey |=
627 cpu_to_be32(qp->port << 24);
628 else {
629 if (attr_mask & IB_QP_PORT) {
630 qp_context->pri_path.port_pkey |=
631 cpu_to_be32(attr->port_num << 24);
632 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
636 if (attr_mask & IB_QP_PKEY_INDEX) {
637 qp_context->pri_path.port_pkey |=
638 cpu_to_be32(attr->pkey_index);
639 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
642 if (attr_mask & IB_QP_RNR_RETRY) {
643 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
644 attr->rnr_retry << 5;
645 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
646 MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
649 if (attr_mask & IB_QP_AV) {
650 if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
651 attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
652 goto out_mailbox;
654 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
657 if (ibqp->qp_type == IB_QPT_RC &&
658 cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
659 u8 sched_queue = ibqp->uobject ? 0x2 : 0x1;
661 if (mthca_is_memfree(dev))
662 qp_context->rlkey_arbel_sched_queue |= sched_queue;
663 else
664 qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue);
666 qp_param->opt_param_mask |=
667 cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE);
670 if (attr_mask & IB_QP_TIMEOUT) {
671 qp_context->pri_path.ackto = attr->timeout << 3;
672 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
675 if (attr_mask & IB_QP_ALT_PATH) {
676 if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {
677 mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",
678 attr->alt_pkey_index, dev->limits.pkey_table_len-1);
679 goto out_mailbox;
682 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
683 mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
684 attr->alt_port_num);
685 goto out_mailbox;
688 if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
689 attr->alt_ah_attr.port_num))
690 goto out_mailbox;
692 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
693 attr->alt_port_num << 24);
694 qp_context->alt_path.ackto = attr->alt_timeout << 3;
695 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
698 /* leave rdd as 0 */
699 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
700 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
701 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey);
702 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
703 (MTHCA_FLIGHT_LIMIT << 24) |
704 MTHCA_QP_BIT_SWE);
705 if (qp->sq_policy == IB_SIGNAL_ALL_WR)
706 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
707 if (attr_mask & IB_QP_RETRY_CNT) {
708 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
709 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
712 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
713 if (attr->max_rd_atomic) {
714 qp_context->params1 |=
715 cpu_to_be32(MTHCA_QP_BIT_SRE |
716 MTHCA_QP_BIT_SAE);
717 qp_context->params1 |=
718 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
720 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
723 if (attr_mask & IB_QP_SQ_PSN)
724 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
725 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
727 if (mthca_is_memfree(dev)) {
728 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
729 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
732 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
733 if (attr->max_dest_rd_atomic)
734 qp_context->params2 |=
735 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
737 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
740 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
741 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask);
742 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
743 MTHCA_QP_OPTPAR_RRE |
744 MTHCA_QP_OPTPAR_RAE);
747 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
749 if (ibqp->srq)
750 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
752 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
753 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
754 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
756 if (attr_mask & IB_QP_RQ_PSN)
757 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
759 qp_context->ra_buff_indx =
760 cpu_to_be32(dev->qp_table.rdb_base +
761 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
762 dev->qp_table.rdb_shift));
764 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
766 if (mthca_is_memfree(dev))
767 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
769 if (attr_mask & IB_QP_QKEY) {
770 qp_context->qkey = cpu_to_be32(attr->qkey);
771 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
774 if (ibqp->srq)
775 qp_context->srqn = cpu_to_be32(1 << 24 |
776 to_msrq(ibqp->srq)->srqn);
778 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
779 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY &&
780 attr->en_sqd_async_notify)
781 sqd_event = 1 << 31;
783 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
784 mailbox, sqd_event, &status);
785 if (err)
786 goto out_mailbox;
787 if (status) {
788 mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
789 cur_state, new_state, status);
790 err = -EINVAL;
791 goto out_mailbox;
794 qp->state = new_state;
795 if (attr_mask & IB_QP_ACCESS_FLAGS)
796 qp->atomic_rd_en = attr->qp_access_flags;
797 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
798 qp->resp_depth = attr->max_dest_rd_atomic;
799 if (attr_mask & IB_QP_PORT)
800 qp->port = attr->port_num;
801 if (attr_mask & IB_QP_ALT_PATH)
802 qp->alt_port = attr->alt_port_num;
804 if (is_sqp(dev, qp))
805 store_attrs(to_msqp(qp), attr, attr_mask);
808 * If we moved QP0 to RTR, bring the IB link up; if we moved
809 * QP0 to RESET or ERROR, bring the link back down.
811 if (is_qp0(dev, qp)) {
812 if (cur_state != IB_QPS_RTR &&
813 new_state == IB_QPS_RTR)
814 init_port(dev, qp->port);
816 if (cur_state != IB_QPS_RESET &&
817 cur_state != IB_QPS_ERR &&
818 (new_state == IB_QPS_RESET ||
819 new_state == IB_QPS_ERR))
820 mthca_CLOSE_IB(dev, qp->port, &status);
824 * If we moved a kernel QP to RESET, clean up all old CQ
825 * entries and reinitialize the QP.
827 if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
828 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
829 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
830 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
831 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL);
833 mthca_wq_reset(&qp->sq);
834 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
836 mthca_wq_reset(&qp->rq);
837 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
839 if (mthca_is_memfree(dev)) {
840 *qp->sq.db = 0;
841 *qp->rq.db = 0;
845 out_mailbox:
846 mthca_free_mailbox(dev, mailbox);
847 out:
848 return err;
851 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
852 struct ib_udata *udata)
854 struct mthca_dev *dev = to_mdev(ibqp->device);
855 struct mthca_qp *qp = to_mqp(ibqp);
856 enum ib_qp_state cur_state, new_state;
857 int err = -EINVAL;
859 mutex_lock(&qp->mutex);
860 if (attr_mask & IB_QP_CUR_STATE) {
861 cur_state = attr->cur_qp_state;
862 } else {
863 spin_lock_irq(&qp->sq.lock);
864 spin_lock(&qp->rq.lock);
865 cur_state = qp->state;
866 spin_unlock(&qp->rq.lock);
867 spin_unlock_irq(&qp->sq.lock);
870 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
872 if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
873 mthca_dbg(dev, "Bad QP transition (transport %d) "
874 "%d->%d with attr 0x%08x\n",
875 qp->transport, cur_state, new_state,
876 attr_mask);
877 goto out;
880 if ((attr_mask & IB_QP_PKEY_INDEX) &&
881 attr->pkey_index >= dev->limits.pkey_table_len) {
882 mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
883 attr->pkey_index, dev->limits.pkey_table_len-1);
884 goto out;
887 if ((attr_mask & IB_QP_PORT) &&
888 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
889 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
890 goto out;
893 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
894 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
895 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
896 attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
897 goto out;
900 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
901 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
902 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
903 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
904 goto out;
907 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
908 err = 0;
909 goto out;
912 err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
914 out:
915 mutex_unlock(&qp->mutex);
916 return err;
919 static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
922 * Calculate the maximum size of WQE s/g segments, excluding
923 * the next segment and other non-data segments.
925 int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
927 switch (qp->transport) {
928 case MLX:
929 max_data_size -= 2 * sizeof (struct mthca_data_seg);
930 break;
932 case UD:
933 if (mthca_is_memfree(dev))
934 max_data_size -= sizeof (struct mthca_arbel_ud_seg);
935 else
936 max_data_size -= sizeof (struct mthca_tavor_ud_seg);
937 break;
939 default:
940 max_data_size -= sizeof (struct mthca_raddr_seg);
941 break;
944 return max_data_size;
947 static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
949 /* We don't support inline data for kernel QPs (yet). */
950 return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
953 static void mthca_adjust_qp_caps(struct mthca_dev *dev,
954 struct mthca_pd *pd,
955 struct mthca_qp *qp)
957 int max_data_size = mthca_max_data_size(dev, qp,
958 min(dev->limits.max_desc_sz,
959 1 << qp->sq.wqe_shift));
961 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
963 qp->sq.max_gs = min_t(int, dev->limits.max_sg,
964 max_data_size / sizeof (struct mthca_data_seg));
965 qp->rq.max_gs = min_t(int, dev->limits.max_sg,
966 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
967 sizeof (struct mthca_next_seg)) /
968 sizeof (struct mthca_data_seg));
972 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
973 * rq.max_gs and sq.max_gs must all be assigned.
974 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
975 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
976 * queue)
978 static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
979 struct mthca_pd *pd,
980 struct mthca_qp *qp)
982 int size;
983 int err = -ENOMEM;
985 size = sizeof (struct mthca_next_seg) +
986 qp->rq.max_gs * sizeof (struct mthca_data_seg);
988 if (size > dev->limits.max_desc_sz)
989 return -EINVAL;
991 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
992 qp->rq.wqe_shift++)
993 ; /* nothing */
995 size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
996 switch (qp->transport) {
997 case MLX:
998 size += 2 * sizeof (struct mthca_data_seg);
999 break;
1001 case UD:
1002 size += mthca_is_memfree(dev) ?
1003 sizeof (struct mthca_arbel_ud_seg) :
1004 sizeof (struct mthca_tavor_ud_seg);
1005 break;
1007 case UC:
1008 size += sizeof (struct mthca_raddr_seg);
1009 break;
1011 case RC:
1012 size += sizeof (struct mthca_raddr_seg);
1014 * An atomic op will require an atomic segment, a
1015 * remote address segment and one scatter entry.
1017 size = max_t(int, size,
1018 sizeof (struct mthca_atomic_seg) +
1019 sizeof (struct mthca_raddr_seg) +
1020 sizeof (struct mthca_data_seg));
1021 break;
1023 default:
1024 break;
1027 /* Make sure that we have enough space for a bind request */
1028 size = max_t(int, size, sizeof (struct mthca_bind_seg));
1030 size += sizeof (struct mthca_next_seg);
1032 if (size > dev->limits.max_desc_sz)
1033 return -EINVAL;
1035 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
1036 qp->sq.wqe_shift++)
1037 ; /* nothing */
1039 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
1040 1 << qp->sq.wqe_shift);
1043 * If this is a userspace QP, we don't actually have to
1044 * allocate anything. All we need is to calculate the WQE
1045 * sizes and the send_wqe_offset, so we're done now.
1047 if (pd->ibpd.uobject)
1048 return 0;
1050 size = PAGE_ALIGN(qp->send_wqe_offset +
1051 (qp->sq.max << qp->sq.wqe_shift));
1053 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
1054 GFP_KERNEL);
1055 if (!qp->wrid)
1056 goto err_out;
1058 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
1059 &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
1060 if (err)
1061 goto err_out;
1063 return 0;
1065 err_out:
1066 kfree(qp->wrid);
1067 return err;
1070 static void mthca_free_wqe_buf(struct mthca_dev *dev,
1071 struct mthca_qp *qp)
1073 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1074 (qp->sq.max << qp->sq.wqe_shift)),
1075 &qp->queue, qp->is_direct, &qp->mr);
1076 kfree(qp->wrid);
1079 static int mthca_map_memfree(struct mthca_dev *dev,
1080 struct mthca_qp *qp)
1082 int ret;
1084 if (mthca_is_memfree(dev)) {
1085 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
1086 if (ret)
1087 return ret;
1089 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
1090 if (ret)
1091 goto err_qpc;
1093 ret = mthca_table_get(dev, dev->qp_table.rdb_table,
1094 qp->qpn << dev->qp_table.rdb_shift);
1095 if (ret)
1096 goto err_eqpc;
1100 return 0;
1102 err_eqpc:
1103 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1105 err_qpc:
1106 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1108 return ret;
1111 static void mthca_unmap_memfree(struct mthca_dev *dev,
1112 struct mthca_qp *qp)
1114 mthca_table_put(dev, dev->qp_table.rdb_table,
1115 qp->qpn << dev->qp_table.rdb_shift);
1116 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1117 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1120 static int mthca_alloc_memfree(struct mthca_dev *dev,
1121 struct mthca_qp *qp)
1123 if (mthca_is_memfree(dev)) {
1124 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1125 qp->qpn, &qp->rq.db);
1126 if (qp->rq.db_index < 0)
1127 return -ENOMEM;
1129 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1130 qp->qpn, &qp->sq.db);
1131 if (qp->sq.db_index < 0) {
1132 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1133 return -ENOMEM;
1137 return 0;
1140 static void mthca_free_memfree(struct mthca_dev *dev,
1141 struct mthca_qp *qp)
1143 if (mthca_is_memfree(dev)) {
1144 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1145 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1149 static int mthca_alloc_qp_common(struct mthca_dev *dev,
1150 struct mthca_pd *pd,
1151 struct mthca_cq *send_cq,
1152 struct mthca_cq *recv_cq,
1153 enum ib_sig_type send_policy,
1154 struct mthca_qp *qp)
1156 int ret;
1157 int i;
1158 struct mthca_next_seg *next;
1160 qp->refcount = 1;
1161 init_waitqueue_head(&qp->wait);
1162 mutex_init(&qp->mutex);
1163 qp->state = IB_QPS_RESET;
1164 qp->atomic_rd_en = 0;
1165 qp->resp_depth = 0;
1166 qp->sq_policy = send_policy;
1167 mthca_wq_reset(&qp->sq);
1168 mthca_wq_reset(&qp->rq);
1170 spin_lock_init(&qp->sq.lock);
1171 spin_lock_init(&qp->rq.lock);
1173 ret = mthca_map_memfree(dev, qp);
1174 if (ret)
1175 return ret;
1177 ret = mthca_alloc_wqe_buf(dev, pd, qp);
1178 if (ret) {
1179 mthca_unmap_memfree(dev, qp);
1180 return ret;
1183 mthca_adjust_qp_caps(dev, pd, qp);
1186 * If this is a userspace QP, we're done now. The doorbells
1187 * will be allocated and buffers will be initialized in
1188 * userspace.
1190 if (pd->ibpd.uobject)
1191 return 0;
1193 ret = mthca_alloc_memfree(dev, qp);
1194 if (ret) {
1195 mthca_free_wqe_buf(dev, qp);
1196 mthca_unmap_memfree(dev, qp);
1197 return ret;
1200 if (mthca_is_memfree(dev)) {
1201 struct mthca_data_seg *scatter;
1202 int size = (sizeof (struct mthca_next_seg) +
1203 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1205 for (i = 0; i < qp->rq.max; ++i) {
1206 next = get_recv_wqe(qp, i);
1207 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1208 qp->rq.wqe_shift);
1209 next->ee_nds = cpu_to_be32(size);
1211 for (scatter = (void *) (next + 1);
1212 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1213 ++scatter)
1214 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
1217 for (i = 0; i < qp->sq.max; ++i) {
1218 next = get_send_wqe(qp, i);
1219 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
1220 qp->sq.wqe_shift) +
1221 qp->send_wqe_offset);
1223 } else {
1224 for (i = 0; i < qp->rq.max; ++i) {
1225 next = get_recv_wqe(qp, i);
1226 next->nda_op = htonl((((i + 1) % qp->rq.max) <<
1227 qp->rq.wqe_shift) | 1);
1232 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1233 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1235 return 0;
1238 static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1239 struct mthca_pd *pd, struct mthca_qp *qp)
1241 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
1243 /* Sanity check QP size before proceeding */
1244 if (cap->max_send_wr > dev->limits.max_wqes ||
1245 cap->max_recv_wr > dev->limits.max_wqes ||
1246 cap->max_send_sge > dev->limits.max_sg ||
1247 cap->max_recv_sge > dev->limits.max_sg ||
1248 cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
1249 return -EINVAL;
1252 * For MLX transport we need 2 extra send gather entries:
1253 * one for the header and one for the checksum at the end
1255 if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg)
1256 return -EINVAL;
1258 if (mthca_is_memfree(dev)) {
1259 qp->rq.max = cap->max_recv_wr ?
1260 roundup_pow_of_two(cap->max_recv_wr) : 0;
1261 qp->sq.max = cap->max_send_wr ?
1262 roundup_pow_of_two(cap->max_send_wr) : 0;
1263 } else {
1264 qp->rq.max = cap->max_recv_wr;
1265 qp->sq.max = cap->max_send_wr;
1268 qp->rq.max_gs = cap->max_recv_sge;
1269 qp->sq.max_gs = max_t(int, cap->max_send_sge,
1270 ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
1271 MTHCA_INLINE_CHUNK_SIZE) /
1272 sizeof (struct mthca_data_seg));
1274 return 0;
1277 int mthca_alloc_qp(struct mthca_dev *dev,
1278 struct mthca_pd *pd,
1279 struct mthca_cq *send_cq,
1280 struct mthca_cq *recv_cq,
1281 enum ib_qp_type type,
1282 enum ib_sig_type send_policy,
1283 struct ib_qp_cap *cap,
1284 struct mthca_qp *qp)
1286 int err;
1288 switch (type) {
1289 case IB_QPT_RC: qp->transport = RC; break;
1290 case IB_QPT_UC: qp->transport = UC; break;
1291 case IB_QPT_UD: qp->transport = UD; break;
1292 default: return -EINVAL;
1295 err = mthca_set_qp_size(dev, cap, pd, qp);
1296 if (err)
1297 return err;
1299 qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1300 if (qp->qpn == -1)
1301 return -ENOMEM;
1303 /* initialize port to zero for error-catching. */
1304 qp->port = 0;
1306 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1307 send_policy, qp);
1308 if (err) {
1309 mthca_free(&dev->qp_table.alloc, qp->qpn);
1310 return err;
1313 spin_lock_irq(&dev->qp_table.lock);
1314 mthca_array_set(&dev->qp_table.qp,
1315 qp->qpn & (dev->limits.num_qps - 1), qp);
1316 spin_unlock_irq(&dev->qp_table.lock);
1318 return 0;
1321 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1322 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1324 if (send_cq == recv_cq) {
1325 spin_lock_irq(&send_cq->lock);
1326 __acquire(&recv_cq->lock);
1327 } else if (send_cq->cqn < recv_cq->cqn) {
1328 spin_lock_irq(&send_cq->lock);
1329 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1330 } else {
1331 spin_lock_irq(&recv_cq->lock);
1332 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
1336 static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
1337 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1339 if (send_cq == recv_cq) {
1340 __release(&recv_cq->lock);
1341 spin_unlock_irq(&send_cq->lock);
1342 } else if (send_cq->cqn < recv_cq->cqn) {
1343 spin_unlock(&recv_cq->lock);
1344 spin_unlock_irq(&send_cq->lock);
1345 } else {
1346 spin_unlock(&send_cq->lock);
1347 spin_unlock_irq(&recv_cq->lock);
1351 int mthca_alloc_sqp(struct mthca_dev *dev,
1352 struct mthca_pd *pd,
1353 struct mthca_cq *send_cq,
1354 struct mthca_cq *recv_cq,
1355 enum ib_sig_type send_policy,
1356 struct ib_qp_cap *cap,
1357 int qpn,
1358 int port,
1359 struct mthca_sqp *sqp)
1361 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1362 int err;
1364 sqp->qp.transport = MLX;
1365 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
1366 if (err)
1367 return err;
1369 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1370 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1371 &sqp->header_dma, GFP_KERNEL);
1372 if (!sqp->header_buf)
1373 return -ENOMEM;
1375 spin_lock_irq(&dev->qp_table.lock);
1376 if (mthca_array_get(&dev->qp_table.qp, mqpn))
1377 err = -EBUSY;
1378 else
1379 mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
1380 spin_unlock_irq(&dev->qp_table.lock);
1382 if (err)
1383 goto err_out;
1385 sqp->qp.port = port;
1386 sqp->qp.qpn = mqpn;
1387 sqp->qp.transport = MLX;
1389 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1390 send_policy, &sqp->qp);
1391 if (err)
1392 goto err_out_free;
1394 atomic_inc(&pd->sqp_count);
1396 return 0;
1398 err_out_free:
1400 * Lock CQs here, so that CQ polling code can do QP lookup
1401 * without taking a lock.
1403 mthca_lock_cqs(send_cq, recv_cq);
1405 spin_lock(&dev->qp_table.lock);
1406 mthca_array_clear(&dev->qp_table.qp, mqpn);
1407 spin_unlock(&dev->qp_table.lock);
1409 mthca_unlock_cqs(send_cq, recv_cq);
1411 err_out:
1412 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1413 sqp->header_buf, sqp->header_dma);
1415 return err;
1418 static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
1420 int c;
1422 spin_lock_irq(&dev->qp_table.lock);
1423 c = qp->refcount;
1424 spin_unlock_irq(&dev->qp_table.lock);
1426 return c;
1429 void mthca_free_qp(struct mthca_dev *dev,
1430 struct mthca_qp *qp)
1432 u8 status;
1433 struct mthca_cq *send_cq;
1434 struct mthca_cq *recv_cq;
1436 send_cq = to_mcq(qp->ibqp.send_cq);
1437 recv_cq = to_mcq(qp->ibqp.recv_cq);
1440 * Lock CQs here, so that CQ polling code can do QP lookup
1441 * without taking a lock.
1443 mthca_lock_cqs(send_cq, recv_cq);
1445 spin_lock(&dev->qp_table.lock);
1446 mthca_array_clear(&dev->qp_table.qp,
1447 qp->qpn & (dev->limits.num_qps - 1));
1448 --qp->refcount;
1449 spin_unlock(&dev->qp_table.lock);
1451 mthca_unlock_cqs(send_cq, recv_cq);
1453 wait_event(qp->wait, !get_qp_refcount(dev, qp));
1455 if (qp->state != IB_QPS_RESET)
1456 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
1457 NULL, 0, &status);
1460 * If this is a userspace QP, the buffers, MR, CQs and so on
1461 * will be cleaned up in userspace, so all we have to do is
1462 * unref the mem-free tables and free the QPN in our table.
1464 if (!qp->ibqp.uobject) {
1465 mthca_cq_clean(dev, recv_cq, qp->qpn,
1466 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1467 if (send_cq != recv_cq)
1468 mthca_cq_clean(dev, send_cq, qp->qpn, NULL);
1470 mthca_free_memfree(dev, qp);
1471 mthca_free_wqe_buf(dev, qp);
1474 mthca_unmap_memfree(dev, qp);
1476 if (is_sqp(dev, qp)) {
1477 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1478 dma_free_coherent(&dev->pdev->dev,
1479 to_msqp(qp)->header_buf_size,
1480 to_msqp(qp)->header_buf,
1481 to_msqp(qp)->header_dma);
1482 } else
1483 mthca_free(&dev->qp_table.alloc, qp->qpn);
1486 /* Create UD header for an MLX send and build a data segment for it */
1487 static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1488 int ind, struct ib_send_wr *wr,
1489 struct mthca_mlx_seg *mlx,
1490 struct mthca_data_seg *data)
1492 int header_size;
1493 int err;
1494 u16 pkey;
1496 ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0,
1497 mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0,
1498 &sqp->ud_header);
1500 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
1501 if (err)
1502 return err;
1503 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1504 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1505 (sqp->ud_header.lrh.destination_lid ==
1506 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
1507 (sqp->ud_header.lrh.service_level << 8));
1508 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1509 mlx->vcrc = 0;
1511 switch (wr->opcode) {
1512 case IB_WR_SEND:
1513 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1514 sqp->ud_header.immediate_present = 0;
1515 break;
1516 case IB_WR_SEND_WITH_IMM:
1517 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1518 sqp->ud_header.immediate_present = 1;
1519 sqp->ud_header.immediate_data = wr->ex.imm_data;
1520 break;
1521 default:
1522 return -EINVAL;
1525 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1526 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1527 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1528 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1529 if (!sqp->qp.ibqp.qp_num)
1530 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1531 sqp->pkey_index, &pkey);
1532 else
1533 ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
1534 wr->wr.ud.pkey_index, &pkey);
1535 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1536 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1537 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1538 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
1539 sqp->qkey : wr->wr.ud.remote_qkey);
1540 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1542 header_size = ib_ud_header_pack(&sqp->ud_header,
1543 sqp->header_buf +
1544 ind * MTHCA_UD_HEADER_SIZE);
1546 data->byte_count = cpu_to_be32(header_size);
1547 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1548 data->addr = cpu_to_be64(sqp->header_dma +
1549 ind * MTHCA_UD_HEADER_SIZE);
1551 return 0;
1554 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1555 struct ib_cq *ib_cq)
1557 unsigned cur;
1558 struct mthca_cq *cq;
1560 cur = wq->head - wq->tail;
1561 if (likely(cur + nreq < wq->max))
1562 return 0;
1564 cq = to_mcq(ib_cq);
1565 spin_lock(&cq->lock);
1566 cur = wq->head - wq->tail;
1567 spin_unlock(&cq->lock);
1569 return cur + nreq >= wq->max;
1572 static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
1573 u64 remote_addr, u32 rkey)
1575 rseg->raddr = cpu_to_be64(remote_addr);
1576 rseg->rkey = cpu_to_be32(rkey);
1577 rseg->reserved = 0;
1580 static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
1581 struct ib_send_wr *wr)
1583 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1584 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1585 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1586 } else {
1587 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1588 aseg->compare = 0;
1593 static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
1594 struct ib_send_wr *wr)
1596 useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
1597 useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
1598 useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1599 useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1603 static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
1604 struct ib_send_wr *wr)
1606 memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
1607 useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1608 useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1611 int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1612 struct ib_send_wr **bad_wr)
1614 struct mthca_dev *dev = to_mdev(ibqp->device);
1615 struct mthca_qp *qp = to_mqp(ibqp);
1616 void *wqe;
1617 void *prev_wqe;
1618 unsigned long flags;
1619 int err = 0;
1620 int nreq;
1621 int i;
1622 int size;
1624 * f0 and size0 are only used if nreq != 0, and they will
1625 * always be initialized the first time through the main loop
1626 * before nreq is incremented. So nreq cannot become non-zero
1627 * without initializing f0 and size0, and they are in fact
1628 * never used uninitialized.
1630 int uninitialized_var(size0);
1631 u32 uninitialized_var(f0);
1632 int ind;
1633 u8 op0 = 0;
1635 spin_lock_irqsave(&qp->sq.lock, flags);
1637 /* XXX check that state is OK to post send */
1639 ind = qp->sq.next_ind;
1641 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1642 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1643 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1644 " %d max, %d nreq)\n", qp->qpn,
1645 qp->sq.head, qp->sq.tail,
1646 qp->sq.max, nreq);
1647 err = -ENOMEM;
1648 *bad_wr = wr;
1649 goto out;
1652 wqe = get_send_wqe(qp, ind);
1653 prev_wqe = qp->sq.last;
1654 qp->sq.last = wqe;
1656 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1657 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
1658 ((struct mthca_next_seg *) wqe)->flags =
1659 ((wr->send_flags & IB_SEND_SIGNALED) ?
1660 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1661 ((wr->send_flags & IB_SEND_SOLICITED) ?
1662 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1663 cpu_to_be32(1);
1664 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1665 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1666 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
1668 wqe += sizeof (struct mthca_next_seg);
1669 size = sizeof (struct mthca_next_seg) / 16;
1671 switch (qp->transport) {
1672 case RC:
1673 switch (wr->opcode) {
1674 case IB_WR_ATOMIC_CMP_AND_SWP:
1675 case IB_WR_ATOMIC_FETCH_AND_ADD:
1676 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
1677 wr->wr.atomic.rkey);
1678 wqe += sizeof (struct mthca_raddr_seg);
1680 set_atomic_seg(wqe, wr);
1681 wqe += sizeof (struct mthca_atomic_seg);
1682 size += (sizeof (struct mthca_raddr_seg) +
1683 sizeof (struct mthca_atomic_seg)) / 16;
1684 break;
1686 case IB_WR_RDMA_WRITE:
1687 case IB_WR_RDMA_WRITE_WITH_IMM:
1688 case IB_WR_RDMA_READ:
1689 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
1690 wr->wr.rdma.rkey);
1691 wqe += sizeof (struct mthca_raddr_seg);
1692 size += sizeof (struct mthca_raddr_seg) / 16;
1693 break;
1695 default:
1696 /* No extra segments required for sends */
1697 break;
1700 break;
1702 case UC:
1703 switch (wr->opcode) {
1704 case IB_WR_RDMA_WRITE:
1705 case IB_WR_RDMA_WRITE_WITH_IMM:
1706 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
1707 wr->wr.rdma.rkey);
1708 wqe += sizeof (struct mthca_raddr_seg);
1709 size += sizeof (struct mthca_raddr_seg) / 16;
1710 break;
1712 default:
1713 /* No extra segments required for sends */
1714 break;
1717 break;
1719 case UD:
1720 set_tavor_ud_seg(wqe, wr);
1721 wqe += sizeof (struct mthca_tavor_ud_seg);
1722 size += sizeof (struct mthca_tavor_ud_seg) / 16;
1723 break;
1725 case MLX:
1726 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1727 wqe - sizeof (struct mthca_next_seg),
1728 wqe);
1729 if (err) {
1730 *bad_wr = wr;
1731 goto out;
1733 wqe += sizeof (struct mthca_data_seg);
1734 size += sizeof (struct mthca_data_seg) / 16;
1735 break;
1738 if (wr->num_sge > qp->sq.max_gs) {
1739 mthca_err(dev, "too many gathers\n");
1740 err = -EINVAL;
1741 *bad_wr = wr;
1742 goto out;
1745 for (i = 0; i < wr->num_sge; ++i) {
1746 mthca_set_data_seg(wqe, wr->sg_list + i);
1747 wqe += sizeof (struct mthca_data_seg);
1748 size += sizeof (struct mthca_data_seg) / 16;
1751 /* Add one more inline data segment for ICRC */
1752 if (qp->transport == MLX) {
1753 ((struct mthca_data_seg *) wqe)->byte_count =
1754 cpu_to_be32((1 << 31) | 4);
1755 ((u32 *) wqe)[1] = 0;
1756 wqe += sizeof (struct mthca_data_seg);
1757 size += sizeof (struct mthca_data_seg) / 16;
1760 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1762 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1763 mthca_err(dev, "opcode invalid\n");
1764 err = -EINVAL;
1765 *bad_wr = wr;
1766 goto out;
1769 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1770 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1771 qp->send_wqe_offset) |
1772 mthca_opcode[wr->opcode]);
1773 wmb();
1774 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1775 cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size |
1776 ((wr->send_flags & IB_SEND_FENCE) ?
1777 MTHCA_NEXT_FENCE : 0));
1779 if (!nreq) {
1780 size0 = size;
1781 op0 = mthca_opcode[wr->opcode];
1782 f0 = wr->send_flags & IB_SEND_FENCE ?
1783 MTHCA_SEND_DOORBELL_FENCE : 0;
1786 ++ind;
1787 if (unlikely(ind >= qp->sq.max))
1788 ind -= qp->sq.max;
1791 out:
1792 if (likely(nreq)) {
1793 wmb();
1795 mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) +
1796 qp->send_wqe_offset) | f0 | op0,
1797 (qp->qpn << 8) | size0,
1798 dev->kar + MTHCA_SEND_DOORBELL,
1799 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1801 * Make sure doorbells don't leak out of SQ spinlock
1802 * and reach the HCA out of order:
1804 mmiowb();
1807 qp->sq.next_ind = ind;
1808 qp->sq.head += nreq;
1810 spin_unlock_irqrestore(&qp->sq.lock, flags);
1811 return err;
1814 int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1815 struct ib_recv_wr **bad_wr)
1817 struct mthca_dev *dev = to_mdev(ibqp->device);
1818 struct mthca_qp *qp = to_mqp(ibqp);
1819 unsigned long flags;
1820 int err = 0;
1821 int nreq;
1822 int i;
1823 int size;
1825 * size0 is only used if nreq != 0, and it will always be
1826 * initialized the first time through the main loop before
1827 * nreq is incremented. So nreq cannot become non-zero
1828 * without initializing size0, and it is in fact never used
1829 * uninitialized.
1831 int uninitialized_var(size0);
1832 int ind;
1833 void *wqe;
1834 void *prev_wqe;
1836 spin_lock_irqsave(&qp->rq.lock, flags);
1838 /* XXX check that state is OK to post receive */
1840 ind = qp->rq.next_ind;
1842 for (nreq = 0; wr; wr = wr->next) {
1843 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1844 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1845 " %d max, %d nreq)\n", qp->qpn,
1846 qp->rq.head, qp->rq.tail,
1847 qp->rq.max, nreq);
1848 err = -ENOMEM;
1849 *bad_wr = wr;
1850 goto out;
1853 wqe = get_recv_wqe(qp, ind);
1854 prev_wqe = qp->rq.last;
1855 qp->rq.last = wqe;
1857 ((struct mthca_next_seg *) wqe)->ee_nds =
1858 cpu_to_be32(MTHCA_NEXT_DBD);
1859 ((struct mthca_next_seg *) wqe)->flags = 0;
1861 wqe += sizeof (struct mthca_next_seg);
1862 size = sizeof (struct mthca_next_seg) / 16;
1864 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1865 err = -EINVAL;
1866 *bad_wr = wr;
1867 goto out;
1870 for (i = 0; i < wr->num_sge; ++i) {
1871 mthca_set_data_seg(wqe, wr->sg_list + i);
1872 wqe += sizeof (struct mthca_data_seg);
1873 size += sizeof (struct mthca_data_seg) / 16;
1876 qp->wrid[ind] = wr->wr_id;
1878 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1879 cpu_to_be32(MTHCA_NEXT_DBD | size);
1881 if (!nreq)
1882 size0 = size;
1884 ++ind;
1885 if (unlikely(ind >= qp->rq.max))
1886 ind -= qp->rq.max;
1888 ++nreq;
1889 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
1890 nreq = 0;
1892 wmb();
1894 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1895 qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL,
1896 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1898 qp->rq.next_ind = ind;
1899 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1903 out:
1904 if (likely(nreq)) {
1905 wmb();
1907 mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
1908 qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL,
1909 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1912 qp->rq.next_ind = ind;
1913 qp->rq.head += nreq;
1916 * Make sure doorbells don't leak out of RQ spinlock and reach
1917 * the HCA out of order:
1919 mmiowb();
1921 spin_unlock_irqrestore(&qp->rq.lock, flags);
1922 return err;
1925 int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1926 struct ib_send_wr **bad_wr)
1928 struct mthca_dev *dev = to_mdev(ibqp->device);
1929 struct mthca_qp *qp = to_mqp(ibqp);
1930 u32 dbhi;
1931 void *wqe;
1932 void *prev_wqe;
1933 unsigned long flags;
1934 int err = 0;
1935 int nreq;
1936 int i;
1937 int size;
1939 * f0 and size0 are only used if nreq != 0, and they will
1940 * always be initialized the first time through the main loop
1941 * before nreq is incremented. So nreq cannot become non-zero
1942 * without initializing f0 and size0, and they are in fact
1943 * never used uninitialized.
1945 int uninitialized_var(size0);
1946 u32 uninitialized_var(f0);
1947 int ind;
1948 u8 op0 = 0;
1950 spin_lock_irqsave(&qp->sq.lock, flags);
1952 /* XXX check that state is OK to post send */
1954 ind = qp->sq.head & (qp->sq.max - 1);
1956 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1957 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
1958 nreq = 0;
1960 dbhi = (MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
1961 ((qp->sq.head & 0xffff) << 8) | f0 | op0;
1963 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
1966 * Make sure that descriptors are written before
1967 * doorbell record.
1969 wmb();
1970 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1973 * Make sure doorbell record is written before we
1974 * write MMIO send doorbell.
1976 wmb();
1978 mthca_write64(dbhi, (qp->qpn << 8) | size0,
1979 dev->kar + MTHCA_SEND_DOORBELL,
1980 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1983 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1984 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1985 " %d max, %d nreq)\n", qp->qpn,
1986 qp->sq.head, qp->sq.tail,
1987 qp->sq.max, nreq);
1988 err = -ENOMEM;
1989 *bad_wr = wr;
1990 goto out;
1993 wqe = get_send_wqe(qp, ind);
1994 prev_wqe = qp->sq.last;
1995 qp->sq.last = wqe;
1997 ((struct mthca_next_seg *) wqe)->flags =
1998 ((wr->send_flags & IB_SEND_SIGNALED) ?
1999 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
2000 ((wr->send_flags & IB_SEND_SOLICITED) ?
2001 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
2002 ((wr->send_flags & IB_SEND_IP_CSUM) ?
2003 cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) |
2004 cpu_to_be32(1);
2005 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
2006 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
2007 ((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
2009 wqe += sizeof (struct mthca_next_seg);
2010 size = sizeof (struct mthca_next_seg) / 16;
2012 switch (qp->transport) {
2013 case RC:
2014 switch (wr->opcode) {
2015 case IB_WR_ATOMIC_CMP_AND_SWP:
2016 case IB_WR_ATOMIC_FETCH_AND_ADD:
2017 set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
2018 wr->wr.atomic.rkey);
2019 wqe += sizeof (struct mthca_raddr_seg);
2021 set_atomic_seg(wqe, wr);
2022 wqe += sizeof (struct mthca_atomic_seg);
2023 size += (sizeof (struct mthca_raddr_seg) +
2024 sizeof (struct mthca_atomic_seg)) / 16;
2025 break;
2027 case IB_WR_RDMA_READ:
2028 case IB_WR_RDMA_WRITE:
2029 case IB_WR_RDMA_WRITE_WITH_IMM:
2030 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
2031 wr->wr.rdma.rkey);
2032 wqe += sizeof (struct mthca_raddr_seg);
2033 size += sizeof (struct mthca_raddr_seg) / 16;
2034 break;
2036 default:
2037 /* No extra segments required for sends */
2038 break;
2041 break;
2043 case UC:
2044 switch (wr->opcode) {
2045 case IB_WR_RDMA_WRITE:
2046 case IB_WR_RDMA_WRITE_WITH_IMM:
2047 set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
2048 wr->wr.rdma.rkey);
2049 wqe += sizeof (struct mthca_raddr_seg);
2050 size += sizeof (struct mthca_raddr_seg) / 16;
2051 break;
2053 default:
2054 /* No extra segments required for sends */
2055 break;
2058 break;
2060 case UD:
2061 set_arbel_ud_seg(wqe, wr);
2062 wqe += sizeof (struct mthca_arbel_ud_seg);
2063 size += sizeof (struct mthca_arbel_ud_seg) / 16;
2064 break;
2066 case MLX:
2067 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
2068 wqe - sizeof (struct mthca_next_seg),
2069 wqe);
2070 if (err) {
2071 *bad_wr = wr;
2072 goto out;
2074 wqe += sizeof (struct mthca_data_seg);
2075 size += sizeof (struct mthca_data_seg) / 16;
2076 break;
2079 if (wr->num_sge > qp->sq.max_gs) {
2080 mthca_err(dev, "too many gathers\n");
2081 err = -EINVAL;
2082 *bad_wr = wr;
2083 goto out;
2086 for (i = 0; i < wr->num_sge; ++i) {
2087 mthca_set_data_seg(wqe, wr->sg_list + i);
2088 wqe += sizeof (struct mthca_data_seg);
2089 size += sizeof (struct mthca_data_seg) / 16;
2092 /* Add one more inline data segment for ICRC */
2093 if (qp->transport == MLX) {
2094 ((struct mthca_data_seg *) wqe)->byte_count =
2095 cpu_to_be32((1 << 31) | 4);
2096 ((u32 *) wqe)[1] = 0;
2097 wqe += sizeof (struct mthca_data_seg);
2098 size += sizeof (struct mthca_data_seg) / 16;
2101 qp->wrid[ind + qp->rq.max] = wr->wr_id;
2103 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
2104 mthca_err(dev, "opcode invalid\n");
2105 err = -EINVAL;
2106 *bad_wr = wr;
2107 goto out;
2110 ((struct mthca_next_seg *) prev_wqe)->nda_op =
2111 cpu_to_be32(((ind << qp->sq.wqe_shift) +
2112 qp->send_wqe_offset) |
2113 mthca_opcode[wr->opcode]);
2114 wmb();
2115 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
2116 cpu_to_be32(MTHCA_NEXT_DBD | size |
2117 ((wr->send_flags & IB_SEND_FENCE) ?
2118 MTHCA_NEXT_FENCE : 0));
2120 if (!nreq) {
2121 size0 = size;
2122 op0 = mthca_opcode[wr->opcode];
2123 f0 = wr->send_flags & IB_SEND_FENCE ?
2124 MTHCA_SEND_DOORBELL_FENCE : 0;
2127 ++ind;
2128 if (unlikely(ind >= qp->sq.max))
2129 ind -= qp->sq.max;
2132 out:
2133 if (likely(nreq)) {
2134 dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0;
2136 qp->sq.head += nreq;
2139 * Make sure that descriptors are written before
2140 * doorbell record.
2142 wmb();
2143 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
2146 * Make sure doorbell record is written before we
2147 * write MMIO send doorbell.
2149 wmb();
2151 mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL,
2152 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
2156 * Make sure doorbells don't leak out of SQ spinlock and reach
2157 * the HCA out of order:
2159 mmiowb();
2161 spin_unlock_irqrestore(&qp->sq.lock, flags);
2162 return err;
2165 int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2166 struct ib_recv_wr **bad_wr)
2168 struct mthca_dev *dev = to_mdev(ibqp->device);
2169 struct mthca_qp *qp = to_mqp(ibqp);
2170 unsigned long flags;
2171 int err = 0;
2172 int nreq;
2173 int ind;
2174 int i;
2175 void *wqe;
2177 spin_lock_irqsave(&qp->rq.lock, flags);
2179 /* XXX check that state is OK to post receive */
2181 ind = qp->rq.head & (qp->rq.max - 1);
2183 for (nreq = 0; wr; ++nreq, wr = wr->next) {
2184 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2185 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
2186 " %d max, %d nreq)\n", qp->qpn,
2187 qp->rq.head, qp->rq.tail,
2188 qp->rq.max, nreq);
2189 err = -ENOMEM;
2190 *bad_wr = wr;
2191 goto out;
2194 wqe = get_recv_wqe(qp, ind);
2196 ((struct mthca_next_seg *) wqe)->flags = 0;
2198 wqe += sizeof (struct mthca_next_seg);
2200 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2201 err = -EINVAL;
2202 *bad_wr = wr;
2203 goto out;
2206 for (i = 0; i < wr->num_sge; ++i) {
2207 mthca_set_data_seg(wqe, wr->sg_list + i);
2208 wqe += sizeof (struct mthca_data_seg);
2211 if (i < qp->rq.max_gs)
2212 mthca_set_data_seg_inval(wqe);
2214 qp->wrid[ind] = wr->wr_id;
2216 ++ind;
2217 if (unlikely(ind >= qp->rq.max))
2218 ind -= qp->rq.max;
2220 out:
2221 if (likely(nreq)) {
2222 qp->rq.head += nreq;
2225 * Make sure that descriptors are written before
2226 * doorbell record.
2228 wmb();
2229 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
2232 spin_unlock_irqrestore(&qp->rq.lock, flags);
2233 return err;
2236 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2237 int index, int *dbd, __be32 *new_wqe)
2239 struct mthca_next_seg *next;
2242 * For SRQs, all receive WQEs generate a CQE, so we're always
2243 * at the end of the doorbell chain.
2245 if (qp->ibqp.srq && !is_send) {
2246 *new_wqe = 0;
2247 return;
2250 if (is_send)
2251 next = get_send_wqe(qp, index);
2252 else
2253 next = get_recv_wqe(qp, index);
2255 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
2256 if (next->ee_nds & cpu_to_be32(0x3f))
2257 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
2258 (next->ee_nds & cpu_to_be32(0x3f));
2259 else
2260 *new_wqe = 0;
2263 int mthca_init_qp_table(struct mthca_dev *dev)
2265 int err;
2266 u8 status;
2267 int i;
2269 spin_lock_init(&dev->qp_table.lock);
2272 * We reserve 2 extra QPs per port for the special QPs. The
2273 * special QP for port 1 has to be even, so round up.
2275 dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
2276 err = mthca_alloc_init(&dev->qp_table.alloc,
2277 dev->limits.num_qps,
2278 (1 << 24) - 1,
2279 dev->qp_table.sqp_start +
2280 MTHCA_MAX_PORTS * 2);
2281 if (err)
2282 return err;
2284 err = mthca_array_init(&dev->qp_table.qp,
2285 dev->limits.num_qps);
2286 if (err) {
2287 mthca_alloc_cleanup(&dev->qp_table.alloc);
2288 return err;
2291 for (i = 0; i < 2; ++i) {
2292 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
2293 dev->qp_table.sqp_start + i * 2,
2294 &status);
2295 if (err)
2296 goto err_out;
2297 if (status) {
2298 mthca_warn(dev, "CONF_SPECIAL_QP returned "
2299 "status %02x, aborting.\n",
2300 status);
2301 err = -EINVAL;
2302 goto err_out;
2305 return 0;
2307 err_out:
2308 for (i = 0; i < 2; ++i)
2309 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2311 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2312 mthca_alloc_cleanup(&dev->qp_table.alloc);
2314 return err;
2317 void mthca_cleanup_qp_table(struct mthca_dev *dev)
2319 int i;
2320 u8 status;
2322 for (i = 0; i < 2; ++i)
2323 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2325 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2326 mthca_alloc_cleanup(&dev->qp_table.alloc);