2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
38 #include <linux/init.h>
40 #include <rdma/ib_verbs.h>
41 #include <rdma/ib_cache.h>
42 #include <rdma/ib_pack.h>
44 #include "mthca_dev.h"
45 #include "mthca_cmd.h"
46 #include "mthca_memfree.h"
47 #include "mthca_wqe.h"
50 MTHCA_MAX_DIRECT_QP_SIZE
= 4 * PAGE_SIZE
,
51 MTHCA_ACK_REQ_FREQ
= 10,
52 MTHCA_FLIGHT_LIMIT
= 9,
53 MTHCA_UD_HEADER_SIZE
= 72, /* largest UD header possible */
54 MTHCA_INLINE_HEADER_SIZE
= 4, /* data segment overhead for inline */
55 MTHCA_INLINE_CHUNK_SIZE
= 16 /* inline data segment chunk */
59 MTHCA_QP_STATE_RST
= 0,
60 MTHCA_QP_STATE_INIT
= 1,
61 MTHCA_QP_STATE_RTR
= 2,
62 MTHCA_QP_STATE_RTS
= 3,
63 MTHCA_QP_STATE_SQE
= 4,
64 MTHCA_QP_STATE_SQD
= 5,
65 MTHCA_QP_STATE_ERR
= 6,
66 MTHCA_QP_STATE_DRAINING
= 7
78 MTHCA_QP_PM_MIGRATED
= 0x3,
79 MTHCA_QP_PM_ARMED
= 0x0,
80 MTHCA_QP_PM_REARM
= 0x1
84 /* qp_context flags */
85 MTHCA_QP_BIT_DE
= 1 << 8,
87 MTHCA_QP_BIT_SRE
= 1 << 15,
88 MTHCA_QP_BIT_SWE
= 1 << 14,
89 MTHCA_QP_BIT_SAE
= 1 << 13,
90 MTHCA_QP_BIT_SIC
= 1 << 4,
91 MTHCA_QP_BIT_SSC
= 1 << 3,
93 MTHCA_QP_BIT_RRE
= 1 << 15,
94 MTHCA_QP_BIT_RWE
= 1 << 14,
95 MTHCA_QP_BIT_RAE
= 1 << 13,
96 MTHCA_QP_BIT_RIC
= 1 << 4,
97 MTHCA_QP_BIT_RSC
= 1 << 3
100 struct mthca_qp_path
{
109 __be32 sl_tclass_flowlabel
;
111 } __attribute__((packed
));
113 struct mthca_qp_context
{
115 __be32 tavor_sched_queue
; /* Reserved on Arbel */
117 u8 rq_size_stride
; /* Reserved on Tavor */
118 u8 sq_size_stride
; /* Reserved on Tavor */
119 u8 rlkey_arbel_sched_queue
; /* Reserved on Tavor */
124 struct mthca_qp_path pri_path
;
125 struct mthca_qp_path alt_path
;
132 __be32 next_send_psn
;
134 __be32 snd_wqe_base_l
; /* Next send WQE on Tavor */
135 __be32 snd_db_index
; /* (debugging only entries) */
136 __be32 last_acked_psn
;
139 __be32 rnr_nextrecvpsn
;
142 __be32 rcv_wqe_base_l
; /* Next recv WQE on Tavor */
143 __be32 rcv_db_index
; /* (debugging only entries) */
147 __be16 rq_wqe_counter
; /* reserved on Tavor */
148 __be16 sq_wqe_counter
; /* reserved on Tavor */
150 } __attribute__((packed
));
152 struct mthca_qp_param
{
153 __be32 opt_param_mask
;
155 struct mthca_qp_context context
;
157 } __attribute__((packed
));
160 MTHCA_QP_OPTPAR_ALT_ADDR_PATH
= 1 << 0,
161 MTHCA_QP_OPTPAR_RRE
= 1 << 1,
162 MTHCA_QP_OPTPAR_RAE
= 1 << 2,
163 MTHCA_QP_OPTPAR_RWE
= 1 << 3,
164 MTHCA_QP_OPTPAR_PKEY_INDEX
= 1 << 4,
165 MTHCA_QP_OPTPAR_Q_KEY
= 1 << 5,
166 MTHCA_QP_OPTPAR_RNR_TIMEOUT
= 1 << 6,
167 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH
= 1 << 7,
168 MTHCA_QP_OPTPAR_SRA_MAX
= 1 << 8,
169 MTHCA_QP_OPTPAR_RRA_MAX
= 1 << 9,
170 MTHCA_QP_OPTPAR_PM_STATE
= 1 << 10,
171 MTHCA_QP_OPTPAR_PORT_NUM
= 1 << 11,
172 MTHCA_QP_OPTPAR_RETRY_COUNT
= 1 << 12,
173 MTHCA_QP_OPTPAR_ALT_RNR_RETRY
= 1 << 13,
174 MTHCA_QP_OPTPAR_ACK_TIMEOUT
= 1 << 14,
175 MTHCA_QP_OPTPAR_RNR_RETRY
= 1 << 15,
176 MTHCA_QP_OPTPAR_SCHED_QUEUE
= 1 << 16
179 static const u8 mthca_opcode
[] = {
180 [IB_WR_SEND
] = MTHCA_OPCODE_SEND
,
181 [IB_WR_SEND_WITH_IMM
] = MTHCA_OPCODE_SEND_IMM
,
182 [IB_WR_RDMA_WRITE
] = MTHCA_OPCODE_RDMA_WRITE
,
183 [IB_WR_RDMA_WRITE_WITH_IMM
] = MTHCA_OPCODE_RDMA_WRITE_IMM
,
184 [IB_WR_RDMA_READ
] = MTHCA_OPCODE_RDMA_READ
,
185 [IB_WR_ATOMIC_CMP_AND_SWP
] = MTHCA_OPCODE_ATOMIC_CS
,
186 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MTHCA_OPCODE_ATOMIC_FA
,
189 static int is_sqp(struct mthca_dev
*dev
, struct mthca_qp
*qp
)
191 return qp
->qpn
>= dev
->qp_table
.sqp_start
&&
192 qp
->qpn
<= dev
->qp_table
.sqp_start
+ 3;
195 static int is_qp0(struct mthca_dev
*dev
, struct mthca_qp
*qp
)
197 return qp
->qpn
>= dev
->qp_table
.sqp_start
&&
198 qp
->qpn
<= dev
->qp_table
.sqp_start
+ 1;
201 static void *get_recv_wqe(struct mthca_qp
*qp
, int n
)
204 return qp
->queue
.direct
.buf
+ (n
<< qp
->rq
.wqe_shift
);
206 return qp
->queue
.page_list
[(n
<< qp
->rq
.wqe_shift
) >> PAGE_SHIFT
].buf
+
207 ((n
<< qp
->rq
.wqe_shift
) & (PAGE_SIZE
- 1));
210 static void *get_send_wqe(struct mthca_qp
*qp
, int n
)
213 return qp
->queue
.direct
.buf
+ qp
->send_wqe_offset
+
214 (n
<< qp
->sq
.wqe_shift
);
216 return qp
->queue
.page_list
[(qp
->send_wqe_offset
+
217 (n
<< qp
->sq
.wqe_shift
)) >>
219 ((qp
->send_wqe_offset
+ (n
<< qp
->sq
.wqe_shift
)) &
223 static void mthca_wq_init(struct mthca_wq
*wq
)
225 spin_lock_init(&wq
->lock
);
227 wq
->last_comp
= wq
->max
- 1;
232 void mthca_qp_event(struct mthca_dev
*dev
, u32 qpn
,
233 enum ib_event_type event_type
)
236 struct ib_event event
;
238 spin_lock(&dev
->qp_table
.lock
);
239 qp
= mthca_array_get(&dev
->qp_table
.qp
, qpn
& (dev
->limits
.num_qps
- 1));
241 atomic_inc(&qp
->refcount
);
242 spin_unlock(&dev
->qp_table
.lock
);
245 mthca_warn(dev
, "Async event for bogus QP %08x\n", qpn
);
249 event
.device
= &dev
->ib_dev
;
250 event
.event
= event_type
;
251 event
.element
.qp
= &qp
->ibqp
;
252 if (qp
->ibqp
.event_handler
)
253 qp
->ibqp
.event_handler(&event
, qp
->ibqp
.qp_context
);
255 if (atomic_dec_and_test(&qp
->refcount
))
259 static int to_mthca_state(enum ib_qp_state ib_state
)
262 case IB_QPS_RESET
: return MTHCA_QP_STATE_RST
;
263 case IB_QPS_INIT
: return MTHCA_QP_STATE_INIT
;
264 case IB_QPS_RTR
: return MTHCA_QP_STATE_RTR
;
265 case IB_QPS_RTS
: return MTHCA_QP_STATE_RTS
;
266 case IB_QPS_SQD
: return MTHCA_QP_STATE_SQD
;
267 case IB_QPS_SQE
: return MTHCA_QP_STATE_SQE
;
268 case IB_QPS_ERR
: return MTHCA_QP_STATE_ERR
;
273 enum { RC
, UC
, UD
, RD
, RDEE
, MLX
, NUM_TRANS
};
275 static int to_mthca_st(int transport
)
278 case RC
: return MTHCA_QP_ST_RC
;
279 case UC
: return MTHCA_QP_ST_UC
;
280 case UD
: return MTHCA_QP_ST_UD
;
281 case RD
: return MTHCA_QP_ST_RD
;
282 case MLX
: return MTHCA_QP_ST_MLX
;
287 static const struct {
289 u32 req_param
[NUM_TRANS
];
290 u32 opt_param
[NUM_TRANS
];
291 } state_table
[IB_QPS_ERR
+ 1][IB_QPS_ERR
+ 1] = {
293 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
294 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
296 .trans
= MTHCA_TRANS_RST2INIT
,
298 [UD
] = (IB_QP_PKEY_INDEX
|
301 [UC
] = (IB_QP_PKEY_INDEX
|
304 [RC
] = (IB_QP_PKEY_INDEX
|
307 [MLX
] = (IB_QP_PKEY_INDEX
|
310 /* bug-for-bug compatibility with VAPI: */
317 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
318 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
320 .trans
= MTHCA_TRANS_INIT2INIT
,
322 [UD
] = (IB_QP_PKEY_INDEX
|
325 [UC
] = (IB_QP_PKEY_INDEX
|
328 [RC
] = (IB_QP_PKEY_INDEX
|
331 [MLX
] = (IB_QP_PKEY_INDEX
|
336 .trans
= MTHCA_TRANS_INIT2RTR
,
346 IB_QP_MAX_DEST_RD_ATOMIC
|
347 IB_QP_MIN_RNR_TIMER
),
350 [UD
] = (IB_QP_PKEY_INDEX
|
352 [UC
] = (IB_QP_ALT_PATH
|
355 [RC
] = (IB_QP_ALT_PATH
|
358 [MLX
] = (IB_QP_PKEY_INDEX
|
364 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
365 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
367 .trans
= MTHCA_TRANS_RTR2RTS
,
371 [RC
] = (IB_QP_TIMEOUT
|
375 IB_QP_MAX_QP_RD_ATOMIC
),
376 [MLX
] = IB_QP_SQ_PSN
,
379 [UD
] = (IB_QP_CUR_STATE
|
381 [UC
] = (IB_QP_CUR_STATE
|
385 IB_QP_PATH_MIG_STATE
),
386 [RC
] = (IB_QP_CUR_STATE
|
390 IB_QP_MIN_RNR_TIMER
|
391 IB_QP_PATH_MIG_STATE
),
392 [MLX
] = (IB_QP_CUR_STATE
|
398 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
399 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
401 .trans
= MTHCA_TRANS_RTS2RTS
,
403 [UD
] = (IB_QP_CUR_STATE
|
405 [UC
] = (IB_QP_ACCESS_FLAGS
|
407 IB_QP_PATH_MIG_STATE
),
408 [RC
] = (IB_QP_ACCESS_FLAGS
|
410 IB_QP_PATH_MIG_STATE
|
411 IB_QP_MIN_RNR_TIMER
),
412 [MLX
] = (IB_QP_CUR_STATE
|
417 .trans
= MTHCA_TRANS_RTS2SQD
,
421 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
422 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
424 .trans
= MTHCA_TRANS_SQD2RTS
,
426 [UD
] = (IB_QP_CUR_STATE
|
428 [UC
] = (IB_QP_CUR_STATE
|
431 IB_QP_PATH_MIG_STATE
),
432 [RC
] = (IB_QP_CUR_STATE
|
435 IB_QP_MIN_RNR_TIMER
|
436 IB_QP_PATH_MIG_STATE
),
437 [MLX
] = (IB_QP_CUR_STATE
|
442 .trans
= MTHCA_TRANS_SQD2SQD
,
444 [UD
] = (IB_QP_PKEY_INDEX
|
451 IB_QP_PATH_MIG_STATE
),
456 IB_QP_MAX_QP_RD_ATOMIC
|
457 IB_QP_MAX_DEST_RD_ATOMIC
|
462 IB_QP_MIN_RNR_TIMER
|
463 IB_QP_PATH_MIG_STATE
),
464 [MLX
] = (IB_QP_PKEY_INDEX
|
470 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
471 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
473 .trans
= MTHCA_TRANS_SQERR2RTS
,
475 [UD
] = (IB_QP_CUR_STATE
|
477 [UC
] = IB_QP_CUR_STATE
,
478 [RC
] = (IB_QP_CUR_STATE
|
479 IB_QP_MIN_RNR_TIMER
),
480 [MLX
] = (IB_QP_CUR_STATE
|
486 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
487 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
}
491 static void store_attrs(struct mthca_sqp
*sqp
, struct ib_qp_attr
*attr
,
494 if (attr_mask
& IB_QP_PKEY_INDEX
)
495 sqp
->pkey_index
= attr
->pkey_index
;
496 if (attr_mask
& IB_QP_QKEY
)
497 sqp
->qkey
= attr
->qkey
;
498 if (attr_mask
& IB_QP_SQ_PSN
)
499 sqp
->send_psn
= attr
->sq_psn
;
502 static void init_port(struct mthca_dev
*dev
, int port
)
506 struct mthca_init_ib_param param
;
508 memset(¶m
, 0, sizeof param
);
510 param
.port_width
= dev
->limits
.port_width_cap
;
511 param
.vl_cap
= dev
->limits
.vl_cap
;
512 param
.mtu_cap
= dev
->limits
.mtu_cap
;
513 param
.gid_cap
= dev
->limits
.gid_table_len
;
514 param
.pkey_cap
= dev
->limits
.pkey_table_len
;
516 err
= mthca_INIT_IB(dev
, ¶m
, port
, &status
);
518 mthca_warn(dev
, "INIT_IB failed, return code %d.\n", err
);
520 mthca_warn(dev
, "INIT_IB returned status %02x.\n", status
);
523 int mthca_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
, int attr_mask
)
525 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
526 struct mthca_qp
*qp
= to_mqp(ibqp
);
527 enum ib_qp_state cur_state
, new_state
;
528 struct mthca_mailbox
*mailbox
;
529 struct mthca_qp_param
*qp_param
;
530 struct mthca_qp_context
*qp_context
;
531 u32 req_param
, opt_param
;
535 if (attr_mask
& IB_QP_CUR_STATE
) {
536 if (attr
->cur_qp_state
!= IB_QPS_RTR
&&
537 attr
->cur_qp_state
!= IB_QPS_RTS
&&
538 attr
->cur_qp_state
!= IB_QPS_SQD
&&
539 attr
->cur_qp_state
!= IB_QPS_SQE
)
542 cur_state
= attr
->cur_qp_state
;
544 spin_lock_irq(&qp
->sq
.lock
);
545 spin_lock(&qp
->rq
.lock
);
546 cur_state
= qp
->state
;
547 spin_unlock(&qp
->rq
.lock
);
548 spin_unlock_irq(&qp
->sq
.lock
);
551 if (attr_mask
& IB_QP_STATE
) {
552 if (attr
->qp_state
< 0 || attr
->qp_state
> IB_QPS_ERR
)
554 new_state
= attr
->qp_state
;
556 new_state
= cur_state
;
558 if (state_table
[cur_state
][new_state
].trans
== MTHCA_TRANS_INVALID
) {
559 mthca_dbg(dev
, "Illegal QP transition "
560 "%d->%d\n", cur_state
, new_state
);
564 req_param
= state_table
[cur_state
][new_state
].req_param
[qp
->transport
];
565 opt_param
= state_table
[cur_state
][new_state
].opt_param
[qp
->transport
];
567 if ((req_param
& attr_mask
) != req_param
) {
568 mthca_dbg(dev
, "QP transition "
569 "%d->%d missing req attr 0x%08x\n",
570 cur_state
, new_state
,
571 req_param
& ~attr_mask
);
575 if (attr_mask
& ~(req_param
| opt_param
| IB_QP_STATE
)) {
576 mthca_dbg(dev
, "QP transition (transport %d) "
577 "%d->%d has extra attr 0x%08x\n",
579 cur_state
, new_state
,
580 attr_mask
& ~(req_param
| opt_param
|
585 if ((attr_mask
& IB_QP_PKEY_INDEX
) &&
586 attr
->pkey_index
>= dev
->limits
.pkey_table_len
) {
587 mthca_dbg(dev
, "PKey index (%u) too large. max is %d\n",
588 attr
->pkey_index
,dev
->limits
.pkey_table_len
-1);
592 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
594 return PTR_ERR(mailbox
);
595 qp_param
= mailbox
->buf
;
596 qp_context
= &qp_param
->context
;
597 memset(qp_param
, 0, sizeof *qp_param
);
599 qp_context
->flags
= cpu_to_be32((to_mthca_state(new_state
) << 28) |
600 (to_mthca_st(qp
->transport
) << 16));
601 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_BIT_DE
);
602 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
603 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_MIGRATED
<< 11);
605 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE
);
606 switch (attr
->path_mig_state
) {
607 case IB_MIG_MIGRATED
:
608 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_MIGRATED
<< 11);
611 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_REARM
<< 11);
614 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_ARMED
<< 11);
619 /* leave tavor_sched_queue as 0 */
621 if (qp
->transport
== MLX
|| qp
->transport
== UD
)
622 qp_context
->mtu_msgmax
= (IB_MTU_2048
<< 5) | 11;
623 else if (attr_mask
& IB_QP_PATH_MTU
)
624 qp_context
->mtu_msgmax
= (attr
->path_mtu
<< 5) | 31;
626 if (mthca_is_memfree(dev
)) {
628 qp_context
->rq_size_stride
= long_log2(qp
->rq
.max
) << 3;
629 qp_context
->rq_size_stride
|= qp
->rq
.wqe_shift
- 4;
632 qp_context
->sq_size_stride
= long_log2(qp
->sq
.max
) << 3;
633 qp_context
->sq_size_stride
|= qp
->sq
.wqe_shift
- 4;
636 /* leave arbel_sched_queue as 0 */
638 if (qp
->ibqp
.uobject
)
639 qp_context
->usr_page
=
640 cpu_to_be32(to_mucontext(qp
->ibqp
.uobject
->context
)->uar
.index
);
642 qp_context
->usr_page
= cpu_to_be32(dev
->driver_uar
.index
);
643 qp_context
->local_qpn
= cpu_to_be32(qp
->qpn
);
644 if (attr_mask
& IB_QP_DEST_QPN
) {
645 qp_context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
648 if (qp
->transport
== MLX
)
649 qp_context
->pri_path
.port_pkey
|=
650 cpu_to_be32(to_msqp(qp
)->port
<< 24);
652 if (attr_mask
& IB_QP_PORT
) {
653 qp_context
->pri_path
.port_pkey
|=
654 cpu_to_be32(attr
->port_num
<< 24);
655 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM
);
659 if (attr_mask
& IB_QP_PKEY_INDEX
) {
660 qp_context
->pri_path
.port_pkey
|=
661 cpu_to_be32(attr
->pkey_index
);
662 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX
);
665 if (attr_mask
& IB_QP_RNR_RETRY
) {
666 qp_context
->pri_path
.rnr_retry
= attr
->rnr_retry
<< 5;
667 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY
);
670 if (attr_mask
& IB_QP_AV
) {
671 qp_context
->pri_path
.g_mylmc
= attr
->ah_attr
.src_path_bits
& 0x7f;
672 qp_context
->pri_path
.rlid
= cpu_to_be16(attr
->ah_attr
.dlid
);
673 qp_context
->pri_path
.static_rate
= !!attr
->ah_attr
.static_rate
;
674 if (attr
->ah_attr
.ah_flags
& IB_AH_GRH
) {
675 qp_context
->pri_path
.g_mylmc
|= 1 << 7;
676 qp_context
->pri_path
.mgid_index
= attr
->ah_attr
.grh
.sgid_index
;
677 qp_context
->pri_path
.hop_limit
= attr
->ah_attr
.grh
.hop_limit
;
678 qp_context
->pri_path
.sl_tclass_flowlabel
=
679 cpu_to_be32((attr
->ah_attr
.sl
<< 28) |
680 (attr
->ah_attr
.grh
.traffic_class
<< 20) |
681 (attr
->ah_attr
.grh
.flow_label
));
682 memcpy(qp_context
->pri_path
.rgid
,
683 attr
->ah_attr
.grh
.dgid
.raw
, 16);
685 qp_context
->pri_path
.sl_tclass_flowlabel
=
686 cpu_to_be32(attr
->ah_attr
.sl
<< 28);
688 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH
);
691 if (attr_mask
& IB_QP_TIMEOUT
) {
692 qp_context
->pri_path
.ackto
= attr
->timeout
<< 3;
693 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT
);
699 qp_context
->pd
= cpu_to_be32(to_mpd(ibqp
->pd
)->pd_num
);
700 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
701 qp_context
->wqe_lkey
= cpu_to_be32(qp
->mr
.ibmr
.lkey
);
702 qp_context
->params1
= cpu_to_be32((MTHCA_ACK_REQ_FREQ
<< 28) |
703 (MTHCA_FLIGHT_LIMIT
<< 24) |
707 if (qp
->sq_policy
== IB_SIGNAL_ALL_WR
)
708 qp_context
->params1
|= cpu_to_be32(MTHCA_QP_BIT_SSC
);
709 if (attr_mask
& IB_QP_RETRY_CNT
) {
710 qp_context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
711 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT
);
714 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
715 qp_context
->params1
|= cpu_to_be32(min(attr
->max_rd_atomic
?
716 ffs(attr
->max_rd_atomic
) - 1 : 0,
718 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX
);
721 if (attr_mask
& IB_QP_SQ_PSN
)
722 qp_context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
723 qp_context
->cqn_snd
= cpu_to_be32(to_mcq(ibqp
->send_cq
)->cqn
);
725 if (mthca_is_memfree(dev
)) {
726 qp_context
->snd_wqe_base_l
= cpu_to_be32(qp
->send_wqe_offset
);
727 qp_context
->snd_db_index
= cpu_to_be32(qp
->sq
.db_index
);
730 if (attr_mask
& IB_QP_ACCESS_FLAGS
) {
732 * Only enable RDMA/atomics if we have responder
733 * resources set to a non-zero value.
735 if (qp
->resp_depth
) {
736 qp_context
->params2
|=
737 cpu_to_be32(attr
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
?
738 MTHCA_QP_BIT_RWE
: 0);
739 qp_context
->params2
|=
740 cpu_to_be32(attr
->qp_access_flags
& IB_ACCESS_REMOTE_READ
?
741 MTHCA_QP_BIT_RRE
: 0);
742 qp_context
->params2
|=
743 cpu_to_be32(attr
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
?
744 MTHCA_QP_BIT_RAE
: 0);
747 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RWE
|
748 MTHCA_QP_OPTPAR_RRE
|
749 MTHCA_QP_OPTPAR_RAE
);
751 qp
->atomic_rd_en
= attr
->qp_access_flags
;
754 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
757 if (qp
->resp_depth
&& !attr
->max_dest_rd_atomic
) {
759 * Lowering our responder resources to zero.
760 * Turn off RDMA/atomics as responder.
761 * (RWE/RRE/RAE in params2 already zero)
763 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RWE
|
764 MTHCA_QP_OPTPAR_RRE
|
765 MTHCA_QP_OPTPAR_RAE
);
768 if (!qp
->resp_depth
&& attr
->max_dest_rd_atomic
) {
770 * Increasing our responder resources from
771 * zero. Turn on RDMA/atomics as appropriate.
773 qp_context
->params2
|=
774 cpu_to_be32(qp
->atomic_rd_en
& IB_ACCESS_REMOTE_WRITE
?
775 MTHCA_QP_BIT_RWE
: 0);
776 qp_context
->params2
|=
777 cpu_to_be32(qp
->atomic_rd_en
& IB_ACCESS_REMOTE_READ
?
778 MTHCA_QP_BIT_RRE
: 0);
779 qp_context
->params2
|=
780 cpu_to_be32(qp
->atomic_rd_en
& IB_ACCESS_REMOTE_ATOMIC
?
781 MTHCA_QP_BIT_RAE
: 0);
783 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RWE
|
784 MTHCA_QP_OPTPAR_RRE
|
785 MTHCA_QP_OPTPAR_RAE
);
789 1 << rra_max
< attr
->max_dest_rd_atomic
&&
790 rra_max
< dev
->qp_table
.rdb_shift
;
794 qp_context
->params2
|= cpu_to_be32(rra_max
<< 21);
795 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX
);
797 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
800 qp_context
->params2
|= cpu_to_be32(MTHCA_QP_BIT_RSC
);
803 qp_context
->params2
|= cpu_to_be32(MTHCA_QP_BIT_RIC
);
805 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
806 qp_context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
807 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT
);
809 if (attr_mask
& IB_QP_RQ_PSN
)
810 qp_context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
812 qp_context
->ra_buff_indx
=
813 cpu_to_be32(dev
->qp_table
.rdb_base
+
814 ((qp
->qpn
& (dev
->limits
.num_qps
- 1)) * MTHCA_RDB_ENTRY_SIZE
<<
815 dev
->qp_table
.rdb_shift
));
817 qp_context
->cqn_rcv
= cpu_to_be32(to_mcq(ibqp
->recv_cq
)->cqn
);
819 if (mthca_is_memfree(dev
))
820 qp_context
->rcv_db_index
= cpu_to_be32(qp
->rq
.db_index
);
822 if (attr_mask
& IB_QP_QKEY
) {
823 qp_context
->qkey
= cpu_to_be32(attr
->qkey
);
824 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY
);
828 qp_context
->srqn
= cpu_to_be32(1 << 24 |
829 to_msrq(ibqp
->srq
)->srqn
);
831 err
= mthca_MODIFY_QP(dev
, state_table
[cur_state
][new_state
].trans
,
832 qp
->qpn
, 0, mailbox
, 0, &status
);
834 mthca_warn(dev
, "modify QP %d returned status %02x.\n",
835 state_table
[cur_state
][new_state
].trans
, status
);
840 qp
->state
= new_state
;
842 mthca_free_mailbox(dev
, mailbox
);
845 store_attrs(to_msqp(qp
), attr
, attr_mask
);
848 * If we moved QP0 to RTR, bring the IB link up; if we moved
849 * QP0 to RESET or ERROR, bring the link back down.
851 if (is_qp0(dev
, qp
)) {
852 if (cur_state
!= IB_QPS_RTR
&&
853 new_state
== IB_QPS_RTR
)
854 init_port(dev
, to_msqp(qp
)->port
);
856 if (cur_state
!= IB_QPS_RESET
&&
857 cur_state
!= IB_QPS_ERR
&&
858 (new_state
== IB_QPS_RESET
||
859 new_state
== IB_QPS_ERR
))
860 mthca_CLOSE_IB(dev
, to_msqp(qp
)->port
, &status
);
864 * If we moved a kernel QP to RESET, clean up all old CQ
865 * entries and reinitialize the QP.
867 if (!err
&& new_state
== IB_QPS_RESET
&& !qp
->ibqp
.uobject
) {
868 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.send_cq
)->cqn
, qp
->qpn
,
869 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
870 if (qp
->ibqp
.send_cq
!= qp
->ibqp
.recv_cq
)
871 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.recv_cq
)->cqn
, qp
->qpn
,
872 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
874 mthca_wq_init(&qp
->sq
);
875 mthca_wq_init(&qp
->rq
);
877 if (mthca_is_memfree(dev
)) {
887 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
888 * rq.max_gs and sq.max_gs must all be assigned.
889 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
890 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
893 static int mthca_alloc_wqe_buf(struct mthca_dev
*dev
,
900 size
= sizeof (struct mthca_next_seg
) +
901 qp
->rq
.max_gs
* sizeof (struct mthca_data_seg
);
903 for (qp
->rq
.wqe_shift
= 6; 1 << qp
->rq
.wqe_shift
< size
;
907 size
= sizeof (struct mthca_next_seg
) +
908 qp
->sq
.max_gs
* sizeof (struct mthca_data_seg
);
909 switch (qp
->transport
) {
911 size
+= 2 * sizeof (struct mthca_data_seg
);
914 if (mthca_is_memfree(dev
))
915 size
+= sizeof (struct mthca_arbel_ud_seg
);
917 size
+= sizeof (struct mthca_tavor_ud_seg
);
920 /* bind seg is as big as atomic + raddr segs */
921 size
+= sizeof (struct mthca_bind_seg
);
924 for (qp
->sq
.wqe_shift
= 6; 1 << qp
->sq
.wqe_shift
< size
;
928 qp
->send_wqe_offset
= ALIGN(qp
->rq
.max
<< qp
->rq
.wqe_shift
,
929 1 << qp
->sq
.wqe_shift
);
932 * If this is a userspace QP, we don't actually have to
933 * allocate anything. All we need is to calculate the WQE
934 * sizes and the send_wqe_offset, so we're done now.
936 if (pd
->ibpd
.uobject
)
939 size
= PAGE_ALIGN(qp
->send_wqe_offset
+
940 (qp
->sq
.max
<< qp
->sq
.wqe_shift
));
942 qp
->wrid
= kmalloc((qp
->rq
.max
+ qp
->sq
.max
) * sizeof (u64
),
947 err
= mthca_buf_alloc(dev
, size
, MTHCA_MAX_DIRECT_QP_SIZE
,
948 &qp
->queue
, &qp
->is_direct
, pd
, 0, &qp
->mr
);
959 static void mthca_free_wqe_buf(struct mthca_dev
*dev
,
962 mthca_buf_free(dev
, PAGE_ALIGN(qp
->send_wqe_offset
+
963 (qp
->sq
.max
<< qp
->sq
.wqe_shift
)),
964 &qp
->queue
, qp
->is_direct
, &qp
->mr
);
968 static int mthca_map_memfree(struct mthca_dev
*dev
,
973 if (mthca_is_memfree(dev
)) {
974 ret
= mthca_table_get(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
978 ret
= mthca_table_get(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
982 ret
= mthca_table_get(dev
, dev
->qp_table
.rdb_table
,
983 qp
->qpn
<< dev
->qp_table
.rdb_shift
);
992 mthca_table_put(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
995 mthca_table_put(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1000 static void mthca_unmap_memfree(struct mthca_dev
*dev
,
1001 struct mthca_qp
*qp
)
1003 mthca_table_put(dev
, dev
->qp_table
.rdb_table
,
1004 qp
->qpn
<< dev
->qp_table
.rdb_shift
);
1005 mthca_table_put(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1006 mthca_table_put(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1009 static int mthca_alloc_memfree(struct mthca_dev
*dev
,
1010 struct mthca_qp
*qp
)
1014 if (mthca_is_memfree(dev
)) {
1015 qp
->rq
.db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_RQ
,
1016 qp
->qpn
, &qp
->rq
.db
);
1017 if (qp
->rq
.db_index
< 0)
1020 qp
->sq
.db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_SQ
,
1021 qp
->qpn
, &qp
->sq
.db
);
1022 if (qp
->sq
.db_index
< 0)
1023 mthca_free_db(dev
, MTHCA_DB_TYPE_RQ
, qp
->rq
.db_index
);
1029 static void mthca_free_memfree(struct mthca_dev
*dev
,
1030 struct mthca_qp
*qp
)
1032 if (mthca_is_memfree(dev
)) {
1033 mthca_free_db(dev
, MTHCA_DB_TYPE_SQ
, qp
->sq
.db_index
);
1034 mthca_free_db(dev
, MTHCA_DB_TYPE_RQ
, qp
->rq
.db_index
);
1038 static int mthca_alloc_qp_common(struct mthca_dev
*dev
,
1039 struct mthca_pd
*pd
,
1040 struct mthca_cq
*send_cq
,
1041 struct mthca_cq
*recv_cq
,
1042 enum ib_sig_type send_policy
,
1043 struct mthca_qp
*qp
)
1048 atomic_set(&qp
->refcount
, 1);
1049 init_waitqueue_head(&qp
->wait
);
1050 qp
->state
= IB_QPS_RESET
;
1051 qp
->atomic_rd_en
= 0;
1053 qp
->sq_policy
= send_policy
;
1054 mthca_wq_init(&qp
->sq
);
1055 mthca_wq_init(&qp
->rq
);
1057 ret
= mthca_map_memfree(dev
, qp
);
1061 ret
= mthca_alloc_wqe_buf(dev
, pd
, qp
);
1063 mthca_unmap_memfree(dev
, qp
);
1068 * If this is a userspace QP, we're done now. The doorbells
1069 * will be allocated and buffers will be initialized in
1072 if (pd
->ibpd
.uobject
)
1075 ret
= mthca_alloc_memfree(dev
, qp
);
1077 mthca_free_wqe_buf(dev
, qp
);
1078 mthca_unmap_memfree(dev
, qp
);
1082 if (mthca_is_memfree(dev
)) {
1083 struct mthca_next_seg
*next
;
1084 struct mthca_data_seg
*scatter
;
1085 int size
= (sizeof (struct mthca_next_seg
) +
1086 qp
->rq
.max_gs
* sizeof (struct mthca_data_seg
)) / 16;
1088 for (i
= 0; i
< qp
->rq
.max
; ++i
) {
1089 next
= get_recv_wqe(qp
, i
);
1090 next
->nda_op
= cpu_to_be32(((i
+ 1) & (qp
->rq
.max
- 1)) <<
1092 next
->ee_nds
= cpu_to_be32(size
);
1094 for (scatter
= (void *) (next
+ 1);
1095 (void *) scatter
< (void *) next
+ (1 << qp
->rq
.wqe_shift
);
1097 scatter
->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
1100 for (i
= 0; i
< qp
->sq
.max
; ++i
) {
1101 next
= get_send_wqe(qp
, i
);
1102 next
->nda_op
= cpu_to_be32((((i
+ 1) & (qp
->sq
.max
- 1)) <<
1104 qp
->send_wqe_offset
);
1108 qp
->sq
.last
= get_send_wqe(qp
, qp
->sq
.max
- 1);
1109 qp
->rq
.last
= get_recv_wqe(qp
, qp
->rq
.max
- 1);
1114 static int mthca_set_qp_size(struct mthca_dev
*dev
, struct ib_qp_cap
*cap
,
1115 struct mthca_qp
*qp
)
1117 /* Sanity check QP size before proceeding */
1118 if (cap
->max_send_wr
> dev
->limits
.max_wqes
||
1119 cap
->max_recv_wr
> dev
->limits
.max_wqes
||
1120 cap
->max_send_sge
> dev
->limits
.max_sg
||
1121 cap
->max_recv_sge
> dev
->limits
.max_sg
)
1124 if (mthca_is_memfree(dev
)) {
1125 qp
->rq
.max
= cap
->max_recv_wr
?
1126 roundup_pow_of_two(cap
->max_recv_wr
) : 0;
1127 qp
->sq
.max
= cap
->max_send_wr
?
1128 roundup_pow_of_two(cap
->max_send_wr
) : 0;
1130 qp
->rq
.max
= cap
->max_recv_wr
;
1131 qp
->sq
.max
= cap
->max_send_wr
;
1134 qp
->rq
.max_gs
= cap
->max_recv_sge
;
1135 qp
->sq
.max_gs
= max_t(int, cap
->max_send_sge
,
1136 ALIGN(cap
->max_inline_data
+ MTHCA_INLINE_HEADER_SIZE
,
1137 MTHCA_INLINE_CHUNK_SIZE
) /
1138 sizeof (struct mthca_data_seg
));
1141 * For MLX transport we need 2 extra S/G entries:
1142 * one for the header and one for the checksum at the end
1144 if ((qp
->transport
== MLX
&& qp
->sq
.max_gs
+ 2 > dev
->limits
.max_sg
) ||
1145 qp
->sq
.max_gs
> dev
->limits
.max_sg
|| qp
->rq
.max_gs
> dev
->limits
.max_sg
)
1151 int mthca_alloc_qp(struct mthca_dev
*dev
,
1152 struct mthca_pd
*pd
,
1153 struct mthca_cq
*send_cq
,
1154 struct mthca_cq
*recv_cq
,
1155 enum ib_qp_type type
,
1156 enum ib_sig_type send_policy
,
1157 struct ib_qp_cap
*cap
,
1158 struct mthca_qp
*qp
)
1162 err
= mthca_set_qp_size(dev
, cap
, qp
);
1167 case IB_QPT_RC
: qp
->transport
= RC
; break;
1168 case IB_QPT_UC
: qp
->transport
= UC
; break;
1169 case IB_QPT_UD
: qp
->transport
= UD
; break;
1170 default: return -EINVAL
;
1173 qp
->qpn
= mthca_alloc(&dev
->qp_table
.alloc
);
1177 err
= mthca_alloc_qp_common(dev
, pd
, send_cq
, recv_cq
,
1180 mthca_free(&dev
->qp_table
.alloc
, qp
->qpn
);
1184 spin_lock_irq(&dev
->qp_table
.lock
);
1185 mthca_array_set(&dev
->qp_table
.qp
,
1186 qp
->qpn
& (dev
->limits
.num_qps
- 1), qp
);
1187 spin_unlock_irq(&dev
->qp_table
.lock
);
1192 int mthca_alloc_sqp(struct mthca_dev
*dev
,
1193 struct mthca_pd
*pd
,
1194 struct mthca_cq
*send_cq
,
1195 struct mthca_cq
*recv_cq
,
1196 enum ib_sig_type send_policy
,
1197 struct ib_qp_cap
*cap
,
1200 struct mthca_sqp
*sqp
)
1202 u32 mqpn
= qpn
* 2 + dev
->qp_table
.sqp_start
+ port
- 1;
1205 err
= mthca_set_qp_size(dev
, cap
, &sqp
->qp
);
1209 sqp
->header_buf_size
= sqp
->qp
.sq
.max
* MTHCA_UD_HEADER_SIZE
;
1210 sqp
->header_buf
= dma_alloc_coherent(&dev
->pdev
->dev
, sqp
->header_buf_size
,
1211 &sqp
->header_dma
, GFP_KERNEL
);
1212 if (!sqp
->header_buf
)
1215 spin_lock_irq(&dev
->qp_table
.lock
);
1216 if (mthca_array_get(&dev
->qp_table
.qp
, mqpn
))
1219 mthca_array_set(&dev
->qp_table
.qp
, mqpn
, sqp
);
1220 spin_unlock_irq(&dev
->qp_table
.lock
);
1227 sqp
->qp
.transport
= MLX
;
1229 err
= mthca_alloc_qp_common(dev
, pd
, send_cq
, recv_cq
,
1230 send_policy
, &sqp
->qp
);
1234 atomic_inc(&pd
->sqp_count
);
1240 * Lock CQs here, so that CQ polling code can do QP lookup
1241 * without taking a lock.
1243 spin_lock_irq(&send_cq
->lock
);
1244 if (send_cq
!= recv_cq
)
1245 spin_lock(&recv_cq
->lock
);
1247 spin_lock(&dev
->qp_table
.lock
);
1248 mthca_array_clear(&dev
->qp_table
.qp
, mqpn
);
1249 spin_unlock(&dev
->qp_table
.lock
);
1251 if (send_cq
!= recv_cq
)
1252 spin_unlock(&recv_cq
->lock
);
1253 spin_unlock_irq(&send_cq
->lock
);
1256 dma_free_coherent(&dev
->pdev
->dev
, sqp
->header_buf_size
,
1257 sqp
->header_buf
, sqp
->header_dma
);
1262 void mthca_free_qp(struct mthca_dev
*dev
,
1263 struct mthca_qp
*qp
)
1266 struct mthca_cq
*send_cq
;
1267 struct mthca_cq
*recv_cq
;
1269 send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1270 recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
1273 * Lock CQs here, so that CQ polling code can do QP lookup
1274 * without taking a lock.
1276 spin_lock_irq(&send_cq
->lock
);
1277 if (send_cq
!= recv_cq
)
1278 spin_lock(&recv_cq
->lock
);
1280 spin_lock(&dev
->qp_table
.lock
);
1281 mthca_array_clear(&dev
->qp_table
.qp
,
1282 qp
->qpn
& (dev
->limits
.num_qps
- 1));
1283 spin_unlock(&dev
->qp_table
.lock
);
1285 if (send_cq
!= recv_cq
)
1286 spin_unlock(&recv_cq
->lock
);
1287 spin_unlock_irq(&send_cq
->lock
);
1289 atomic_dec(&qp
->refcount
);
1290 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
1292 if (qp
->state
!= IB_QPS_RESET
)
1293 mthca_MODIFY_QP(dev
, MTHCA_TRANS_ANY2RST
, qp
->qpn
, 0, NULL
, 0, &status
);
1296 * If this is a userspace QP, the buffers, MR, CQs and so on
1297 * will be cleaned up in userspace, so all we have to do is
1298 * unref the mem-free tables and free the QPN in our table.
1300 if (!qp
->ibqp
.uobject
) {
1301 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.send_cq
)->cqn
, qp
->qpn
,
1302 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
1303 if (qp
->ibqp
.send_cq
!= qp
->ibqp
.recv_cq
)
1304 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.recv_cq
)->cqn
, qp
->qpn
,
1305 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
1307 mthca_free_memfree(dev
, qp
);
1308 mthca_free_wqe_buf(dev
, qp
);
1311 mthca_unmap_memfree(dev
, qp
);
1313 if (is_sqp(dev
, qp
)) {
1314 atomic_dec(&(to_mpd(qp
->ibqp
.pd
)->sqp_count
));
1315 dma_free_coherent(&dev
->pdev
->dev
,
1316 to_msqp(qp
)->header_buf_size
,
1317 to_msqp(qp
)->header_buf
,
1318 to_msqp(qp
)->header_dma
);
1320 mthca_free(&dev
->qp_table
.alloc
, qp
->qpn
);
1323 /* Create UD header for an MLX send and build a data segment for it */
1324 static int build_mlx_header(struct mthca_dev
*dev
, struct mthca_sqp
*sqp
,
1325 int ind
, struct ib_send_wr
*wr
,
1326 struct mthca_mlx_seg
*mlx
,
1327 struct mthca_data_seg
*data
)
1333 ib_ud_header_init(256, /* assume a MAD */
1334 sqp
->ud_header
.grh_present
,
1337 err
= mthca_read_ah(dev
, to_mah(wr
->wr
.ud
.ah
), &sqp
->ud_header
);
1340 mlx
->flags
&= ~cpu_to_be32(MTHCA_NEXT_SOLICIT
| 1);
1341 mlx
->flags
|= cpu_to_be32((!sqp
->qp
.ibqp
.qp_num
? MTHCA_MLX_VL15
: 0) |
1342 (sqp
->ud_header
.lrh
.destination_lid
==
1343 IB_LID_PERMISSIVE
? MTHCA_MLX_SLR
: 0) |
1344 (sqp
->ud_header
.lrh
.service_level
<< 8));
1345 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
1348 switch (wr
->opcode
) {
1350 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
1351 sqp
->ud_header
.immediate_present
= 0;
1353 case IB_WR_SEND_WITH_IMM
:
1354 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
1355 sqp
->ud_header
.immediate_present
= 1;
1356 sqp
->ud_header
.immediate_data
= wr
->imm_data
;
1362 sqp
->ud_header
.lrh
.virtual_lane
= !sqp
->qp
.ibqp
.qp_num
? 15 : 0;
1363 if (sqp
->ud_header
.lrh
.destination_lid
== IB_LID_PERMISSIVE
)
1364 sqp
->ud_header
.lrh
.source_lid
= IB_LID_PERMISSIVE
;
1365 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->send_flags
& IB_SEND_SOLICITED
);
1366 if (!sqp
->qp
.ibqp
.qp_num
)
1367 ib_get_cached_pkey(&dev
->ib_dev
, sqp
->port
,
1368 sqp
->pkey_index
, &pkey
);
1370 ib_get_cached_pkey(&dev
->ib_dev
, sqp
->port
,
1371 wr
->wr
.ud
.pkey_index
, &pkey
);
1372 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
1373 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1374 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
1375 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
& 0x80000000 ?
1376 sqp
->qkey
: wr
->wr
.ud
.remote_qkey
);
1377 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.ibqp
.qp_num
);
1379 header_size
= ib_ud_header_pack(&sqp
->ud_header
,
1381 ind
* MTHCA_UD_HEADER_SIZE
);
1383 data
->byte_count
= cpu_to_be32(header_size
);
1384 data
->lkey
= cpu_to_be32(to_mpd(sqp
->qp
.ibqp
.pd
)->ntmr
.ibmr
.lkey
);
1385 data
->addr
= cpu_to_be64(sqp
->header_dma
+
1386 ind
* MTHCA_UD_HEADER_SIZE
);
1391 static inline int mthca_wq_overflow(struct mthca_wq
*wq
, int nreq
,
1392 struct ib_cq
*ib_cq
)
1395 struct mthca_cq
*cq
;
1397 cur
= wq
->head
- wq
->tail
;
1398 if (likely(cur
+ nreq
< wq
->max
))
1402 spin_lock(&cq
->lock
);
1403 cur
= wq
->head
- wq
->tail
;
1404 spin_unlock(&cq
->lock
);
1406 return cur
+ nreq
>= wq
->max
;
1409 int mthca_tavor_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1410 struct ib_send_wr
**bad_wr
)
1412 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1413 struct mthca_qp
*qp
= to_mqp(ibqp
);
1416 unsigned long flags
;
1426 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
1428 /* XXX check that state is OK to post send */
1430 ind
= qp
->sq
.next_ind
;
1432 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1433 if (mthca_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
1434 mthca_err(dev
, "SQ %06x full (%u head, %u tail,"
1435 " %d max, %d nreq)\n", qp
->qpn
,
1436 qp
->sq
.head
, qp
->sq
.tail
,
1443 wqe
= get_send_wqe(qp
, ind
);
1444 prev_wqe
= qp
->sq
.last
;
1447 ((struct mthca_next_seg
*) wqe
)->nda_op
= 0;
1448 ((struct mthca_next_seg
*) wqe
)->ee_nds
= 0;
1449 ((struct mthca_next_seg
*) wqe
)->flags
=
1450 ((wr
->send_flags
& IB_SEND_SIGNALED
) ?
1451 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE
) : 0) |
1452 ((wr
->send_flags
& IB_SEND_SOLICITED
) ?
1453 cpu_to_be32(MTHCA_NEXT_SOLICIT
) : 0) |
1455 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
||
1456 wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
1457 ((struct mthca_next_seg
*) wqe
)->imm
= wr
->imm_data
;
1459 wqe
+= sizeof (struct mthca_next_seg
);
1460 size
= sizeof (struct mthca_next_seg
) / 16;
1462 switch (qp
->transport
) {
1464 switch (wr
->opcode
) {
1465 case IB_WR_ATOMIC_CMP_AND_SWP
:
1466 case IB_WR_ATOMIC_FETCH_AND_ADD
:
1467 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1468 cpu_to_be64(wr
->wr
.atomic
.remote_addr
);
1469 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1470 cpu_to_be32(wr
->wr
.atomic
.rkey
);
1471 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1473 wqe
+= sizeof (struct mthca_raddr_seg
);
1475 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
1476 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1477 cpu_to_be64(wr
->wr
.atomic
.swap
);
1478 ((struct mthca_atomic_seg
*) wqe
)->compare
=
1479 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1481 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1482 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1483 ((struct mthca_atomic_seg
*) wqe
)->compare
= 0;
1486 wqe
+= sizeof (struct mthca_atomic_seg
);
1487 size
+= sizeof (struct mthca_raddr_seg
) / 16 +
1488 sizeof (struct mthca_atomic_seg
);
1491 case IB_WR_RDMA_WRITE
:
1492 case IB_WR_RDMA_WRITE_WITH_IMM
:
1493 case IB_WR_RDMA_READ
:
1494 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1495 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1496 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1497 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1498 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1499 wqe
+= sizeof (struct mthca_raddr_seg
);
1500 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1504 /* No extra segments required for sends */
1511 switch (wr
->opcode
) {
1512 case IB_WR_RDMA_WRITE
:
1513 case IB_WR_RDMA_WRITE_WITH_IMM
:
1514 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1515 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1516 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1517 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1518 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1519 wqe
+= sizeof (struct mthca_raddr_seg
);
1520 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1524 /* No extra segments required for sends */
1531 ((struct mthca_tavor_ud_seg
*) wqe
)->lkey
=
1532 cpu_to_be32(to_mah(wr
->wr
.ud
.ah
)->key
);
1533 ((struct mthca_tavor_ud_seg
*) wqe
)->av_addr
=
1534 cpu_to_be64(to_mah(wr
->wr
.ud
.ah
)->avdma
);
1535 ((struct mthca_tavor_ud_seg
*) wqe
)->dqpn
=
1536 cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1537 ((struct mthca_tavor_ud_seg
*) wqe
)->qkey
=
1538 cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1540 wqe
+= sizeof (struct mthca_tavor_ud_seg
);
1541 size
+= sizeof (struct mthca_tavor_ud_seg
) / 16;
1545 err
= build_mlx_header(dev
, to_msqp(qp
), ind
, wr
,
1546 wqe
- sizeof (struct mthca_next_seg
),
1552 wqe
+= sizeof (struct mthca_data_seg
);
1553 size
+= sizeof (struct mthca_data_seg
) / 16;
1557 if (wr
->num_sge
> qp
->sq
.max_gs
) {
1558 mthca_err(dev
, "too many gathers\n");
1564 for (i
= 0; i
< wr
->num_sge
; ++i
) {
1565 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1566 cpu_to_be32(wr
->sg_list
[i
].length
);
1567 ((struct mthca_data_seg
*) wqe
)->lkey
=
1568 cpu_to_be32(wr
->sg_list
[i
].lkey
);
1569 ((struct mthca_data_seg
*) wqe
)->addr
=
1570 cpu_to_be64(wr
->sg_list
[i
].addr
);
1571 wqe
+= sizeof (struct mthca_data_seg
);
1572 size
+= sizeof (struct mthca_data_seg
) / 16;
1575 /* Add one more inline data segment for ICRC */
1576 if (qp
->transport
== MLX
) {
1577 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1578 cpu_to_be32((1 << 31) | 4);
1579 ((u32
*) wqe
)[1] = 0;
1580 wqe
+= sizeof (struct mthca_data_seg
);
1581 size
+= sizeof (struct mthca_data_seg
) / 16;
1584 qp
->wrid
[ind
+ qp
->rq
.max
] = wr
->wr_id
;
1586 if (wr
->opcode
>= ARRAY_SIZE(mthca_opcode
)) {
1587 mthca_err(dev
, "opcode invalid\n");
1593 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
1594 cpu_to_be32(((ind
<< qp
->sq
.wqe_shift
) +
1595 qp
->send_wqe_offset
) |
1596 mthca_opcode
[wr
->opcode
]);
1598 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
1599 cpu_to_be32((size0
? 0 : MTHCA_NEXT_DBD
) | size
);
1603 op0
= mthca_opcode
[wr
->opcode
];
1607 if (unlikely(ind
>= qp
->sq
.max
))
1615 doorbell
[0] = cpu_to_be32(((qp
->sq
.next_ind
<< qp
->sq
.wqe_shift
) +
1616 qp
->send_wqe_offset
) | f0
| op0
);
1617 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | size0
);
1621 mthca_write64(doorbell
,
1622 dev
->kar
+ MTHCA_SEND_DOORBELL
,
1623 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1626 qp
->sq
.next_ind
= ind
;
1627 qp
->sq
.head
+= nreq
;
1629 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
1633 int mthca_tavor_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
1634 struct ib_recv_wr
**bad_wr
)
1636 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1637 struct mthca_qp
*qp
= to_mqp(ibqp
);
1638 unsigned long flags
;
1648 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
1650 /* XXX check that state is OK to post receive */
1652 ind
= qp
->rq
.next_ind
;
1654 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1655 if (mthca_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
1656 mthca_err(dev
, "RQ %06x full (%u head, %u tail,"
1657 " %d max, %d nreq)\n", qp
->qpn
,
1658 qp
->rq
.head
, qp
->rq
.tail
,
1665 wqe
= get_recv_wqe(qp
, ind
);
1666 prev_wqe
= qp
->rq
.last
;
1669 ((struct mthca_next_seg
*) wqe
)->nda_op
= 0;
1670 ((struct mthca_next_seg
*) wqe
)->ee_nds
=
1671 cpu_to_be32(MTHCA_NEXT_DBD
);
1672 ((struct mthca_next_seg
*) wqe
)->flags
= 0;
1674 wqe
+= sizeof (struct mthca_next_seg
);
1675 size
= sizeof (struct mthca_next_seg
) / 16;
1677 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
1683 for (i
= 0; i
< wr
->num_sge
; ++i
) {
1684 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1685 cpu_to_be32(wr
->sg_list
[i
].length
);
1686 ((struct mthca_data_seg
*) wqe
)->lkey
=
1687 cpu_to_be32(wr
->sg_list
[i
].lkey
);
1688 ((struct mthca_data_seg
*) wqe
)->addr
=
1689 cpu_to_be64(wr
->sg_list
[i
].addr
);
1690 wqe
+= sizeof (struct mthca_data_seg
);
1691 size
+= sizeof (struct mthca_data_seg
) / 16;
1694 qp
->wrid
[ind
] = wr
->wr_id
;
1696 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
1697 cpu_to_be32((ind
<< qp
->rq
.wqe_shift
) | 1);
1699 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
1700 cpu_to_be32(MTHCA_NEXT_DBD
| size
);
1706 if (unlikely(ind
>= qp
->rq
.max
))
1714 doorbell
[0] = cpu_to_be32((qp
->rq
.next_ind
<< qp
->rq
.wqe_shift
) | size0
);
1715 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | nreq
);
1719 mthca_write64(doorbell
,
1720 dev
->kar
+ MTHCA_RECEIVE_DOORBELL
,
1721 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1724 qp
->rq
.next_ind
= ind
;
1725 qp
->rq
.head
+= nreq
;
1727 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
1731 int mthca_arbel_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1732 struct ib_send_wr
**bad_wr
)
1734 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1735 struct mthca_qp
*qp
= to_mqp(ibqp
);
1738 unsigned long flags
;
1748 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
1750 /* XXX check that state is OK to post send */
1752 ind
= qp
->sq
.head
& (qp
->sq
.max
- 1);
1754 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1755 if (mthca_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
1756 mthca_err(dev
, "SQ %06x full (%u head, %u tail,"
1757 " %d max, %d nreq)\n", qp
->qpn
,
1758 qp
->sq
.head
, qp
->sq
.tail
,
1765 wqe
= get_send_wqe(qp
, ind
);
1766 prev_wqe
= qp
->sq
.last
;
1769 ((struct mthca_next_seg
*) wqe
)->flags
=
1770 ((wr
->send_flags
& IB_SEND_SIGNALED
) ?
1771 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE
) : 0) |
1772 ((wr
->send_flags
& IB_SEND_SOLICITED
) ?
1773 cpu_to_be32(MTHCA_NEXT_SOLICIT
) : 0) |
1775 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
||
1776 wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
1777 ((struct mthca_next_seg
*) wqe
)->imm
= wr
->imm_data
;
1779 wqe
+= sizeof (struct mthca_next_seg
);
1780 size
= sizeof (struct mthca_next_seg
) / 16;
1782 switch (qp
->transport
) {
1784 switch (wr
->opcode
) {
1785 case IB_WR_ATOMIC_CMP_AND_SWP
:
1786 case IB_WR_ATOMIC_FETCH_AND_ADD
:
1787 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1788 cpu_to_be64(wr
->wr
.atomic
.remote_addr
);
1789 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1790 cpu_to_be32(wr
->wr
.atomic
.rkey
);
1791 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1793 wqe
+= sizeof (struct mthca_raddr_seg
);
1795 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
1796 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1797 cpu_to_be64(wr
->wr
.atomic
.swap
);
1798 ((struct mthca_atomic_seg
*) wqe
)->compare
=
1799 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1801 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1802 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1803 ((struct mthca_atomic_seg
*) wqe
)->compare
= 0;
1806 wqe
+= sizeof (struct mthca_atomic_seg
);
1807 size
+= sizeof (struct mthca_raddr_seg
) / 16 +
1808 sizeof (struct mthca_atomic_seg
);
1811 case IB_WR_RDMA_READ
:
1812 case IB_WR_RDMA_WRITE
:
1813 case IB_WR_RDMA_WRITE_WITH_IMM
:
1814 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1815 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1816 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1817 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1818 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1819 wqe
+= sizeof (struct mthca_raddr_seg
);
1820 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1824 /* No extra segments required for sends */
1831 switch (wr
->opcode
) {
1832 case IB_WR_RDMA_WRITE
:
1833 case IB_WR_RDMA_WRITE_WITH_IMM
:
1834 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1835 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1836 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1837 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1838 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1839 wqe
+= sizeof (struct mthca_raddr_seg
);
1840 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1844 /* No extra segments required for sends */
1851 memcpy(((struct mthca_arbel_ud_seg
*) wqe
)->av
,
1852 to_mah(wr
->wr
.ud
.ah
)->av
, MTHCA_AV_SIZE
);
1853 ((struct mthca_arbel_ud_seg
*) wqe
)->dqpn
=
1854 cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1855 ((struct mthca_arbel_ud_seg
*) wqe
)->qkey
=
1856 cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1858 wqe
+= sizeof (struct mthca_arbel_ud_seg
);
1859 size
+= sizeof (struct mthca_arbel_ud_seg
) / 16;
1863 err
= build_mlx_header(dev
, to_msqp(qp
), ind
, wr
,
1864 wqe
- sizeof (struct mthca_next_seg
),
1870 wqe
+= sizeof (struct mthca_data_seg
);
1871 size
+= sizeof (struct mthca_data_seg
) / 16;
1875 if (wr
->num_sge
> qp
->sq
.max_gs
) {
1876 mthca_err(dev
, "too many gathers\n");
1882 for (i
= 0; i
< wr
->num_sge
; ++i
) {
1883 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1884 cpu_to_be32(wr
->sg_list
[i
].length
);
1885 ((struct mthca_data_seg
*) wqe
)->lkey
=
1886 cpu_to_be32(wr
->sg_list
[i
].lkey
);
1887 ((struct mthca_data_seg
*) wqe
)->addr
=
1888 cpu_to_be64(wr
->sg_list
[i
].addr
);
1889 wqe
+= sizeof (struct mthca_data_seg
);
1890 size
+= sizeof (struct mthca_data_seg
) / 16;
1893 /* Add one more inline data segment for ICRC */
1894 if (qp
->transport
== MLX
) {
1895 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1896 cpu_to_be32((1 << 31) | 4);
1897 ((u32
*) wqe
)[1] = 0;
1898 wqe
+= sizeof (struct mthca_data_seg
);
1899 size
+= sizeof (struct mthca_data_seg
) / 16;
1902 qp
->wrid
[ind
+ qp
->rq
.max
] = wr
->wr_id
;
1904 if (wr
->opcode
>= ARRAY_SIZE(mthca_opcode
)) {
1905 mthca_err(dev
, "opcode invalid\n");
1911 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
1912 cpu_to_be32(((ind
<< qp
->sq
.wqe_shift
) +
1913 qp
->send_wqe_offset
) |
1914 mthca_opcode
[wr
->opcode
]);
1916 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
1917 cpu_to_be32(MTHCA_NEXT_DBD
| size
);
1921 op0
= mthca_opcode
[wr
->opcode
];
1925 if (unlikely(ind
>= qp
->sq
.max
))
1933 doorbell
[0] = cpu_to_be32((nreq
<< 24) |
1934 ((qp
->sq
.head
& 0xffff) << 8) |
1936 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | size0
);
1938 qp
->sq
.head
+= nreq
;
1941 * Make sure that descriptors are written before
1945 *qp
->sq
.db
= cpu_to_be32(qp
->sq
.head
& 0xffff);
1948 * Make sure doorbell record is written before we
1949 * write MMIO send doorbell.
1952 mthca_write64(doorbell
,
1953 dev
->kar
+ MTHCA_SEND_DOORBELL
,
1954 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1957 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
1961 int mthca_arbel_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
1962 struct ib_recv_wr
**bad_wr
)
1964 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1965 struct mthca_qp
*qp
= to_mqp(ibqp
);
1966 unsigned long flags
;
1973 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
1975 /* XXX check that state is OK to post receive */
1977 ind
= qp
->rq
.head
& (qp
->rq
.max
- 1);
1979 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1980 if (mthca_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
1981 mthca_err(dev
, "RQ %06x full (%u head, %u tail,"
1982 " %d max, %d nreq)\n", qp
->qpn
,
1983 qp
->rq
.head
, qp
->rq
.tail
,
1990 wqe
= get_recv_wqe(qp
, ind
);
1992 ((struct mthca_next_seg
*) wqe
)->flags
= 0;
1994 wqe
+= sizeof (struct mthca_next_seg
);
1996 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
2002 for (i
= 0; i
< wr
->num_sge
; ++i
) {
2003 ((struct mthca_data_seg
*) wqe
)->byte_count
=
2004 cpu_to_be32(wr
->sg_list
[i
].length
);
2005 ((struct mthca_data_seg
*) wqe
)->lkey
=
2006 cpu_to_be32(wr
->sg_list
[i
].lkey
);
2007 ((struct mthca_data_seg
*) wqe
)->addr
=
2008 cpu_to_be64(wr
->sg_list
[i
].addr
);
2009 wqe
+= sizeof (struct mthca_data_seg
);
2012 if (i
< qp
->rq
.max_gs
) {
2013 ((struct mthca_data_seg
*) wqe
)->byte_count
= 0;
2014 ((struct mthca_data_seg
*) wqe
)->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
2015 ((struct mthca_data_seg
*) wqe
)->addr
= 0;
2018 qp
->wrid
[ind
] = wr
->wr_id
;
2021 if (unlikely(ind
>= qp
->rq
.max
))
2026 qp
->rq
.head
+= nreq
;
2029 * Make sure that descriptors are written before
2033 *qp
->rq
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2036 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2040 int mthca_free_err_wqe(struct mthca_dev
*dev
, struct mthca_qp
*qp
, int is_send
,
2041 int index
, int *dbd
, __be32
*new_wqe
)
2043 struct mthca_next_seg
*next
;
2046 * For SRQs, all WQEs generate a CQE, so we're always at the
2047 * end of the doorbell chain.
2055 next
= get_send_wqe(qp
, index
);
2057 next
= get_recv_wqe(qp
, index
);
2059 *dbd
= !!(next
->ee_nds
& cpu_to_be32(MTHCA_NEXT_DBD
));
2060 if (next
->ee_nds
& cpu_to_be32(0x3f))
2061 *new_wqe
= (next
->nda_op
& cpu_to_be32(~0x3f)) |
2062 (next
->ee_nds
& cpu_to_be32(0x3f));
2069 int __devinit
mthca_init_qp_table(struct mthca_dev
*dev
)
2075 spin_lock_init(&dev
->qp_table
.lock
);
2078 * We reserve 2 extra QPs per port for the special QPs. The
2079 * special QP for port 1 has to be even, so round up.
2081 dev
->qp_table
.sqp_start
= (dev
->limits
.reserved_qps
+ 1) & ~1UL;
2082 err
= mthca_alloc_init(&dev
->qp_table
.alloc
,
2083 dev
->limits
.num_qps
,
2085 dev
->qp_table
.sqp_start
+
2086 MTHCA_MAX_PORTS
* 2);
2090 err
= mthca_array_init(&dev
->qp_table
.qp
,
2091 dev
->limits
.num_qps
);
2093 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);
2097 for (i
= 0; i
< 2; ++i
) {
2098 err
= mthca_CONF_SPECIAL_QP(dev
, i
? IB_QPT_GSI
: IB_QPT_SMI
,
2099 dev
->qp_table
.sqp_start
+ i
* 2,
2104 mthca_warn(dev
, "CONF_SPECIAL_QP returned "
2105 "status %02x, aborting.\n",
2114 for (i
= 0; i
< 2; ++i
)
2115 mthca_CONF_SPECIAL_QP(dev
, i
, 0, &status
);
2117 mthca_array_cleanup(&dev
->qp_table
.qp
, dev
->limits
.num_qps
);
2118 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);
2123 void __devexit
mthca_cleanup_qp_table(struct mthca_dev
*dev
)
2128 for (i
= 0; i
< 2; ++i
)
2129 mthca_CONF_SPECIAL_QP(dev
, i
, 0, &status
);
2131 mthca_array_cleanup(&dev
->qp_table
.qp
, dev
->limits
.num_qps
);
2132 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);