2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/string.h>
37 #include <linux/slab.h>
38 #include <linux/sched.h>
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_cache.h>
44 #include <rdma/ib_pack.h>
45 #include <rdma/uverbs_ioctl.h>
47 #include "mthca_dev.h"
48 #include "mthca_cmd.h"
49 #include "mthca_memfree.h"
50 #include "mthca_wqe.h"
53 MTHCA_MAX_DIRECT_QP_SIZE
= 4 * PAGE_SIZE
,
54 MTHCA_ACK_REQ_FREQ
= 10,
55 MTHCA_FLIGHT_LIMIT
= 9,
56 MTHCA_UD_HEADER_SIZE
= 72, /* largest UD header possible */
57 MTHCA_INLINE_HEADER_SIZE
= 4, /* data segment overhead for inline */
58 MTHCA_INLINE_CHUNK_SIZE
= 16 /* inline data segment chunk */
62 MTHCA_QP_STATE_RST
= 0,
63 MTHCA_QP_STATE_INIT
= 1,
64 MTHCA_QP_STATE_RTR
= 2,
65 MTHCA_QP_STATE_RTS
= 3,
66 MTHCA_QP_STATE_SQE
= 4,
67 MTHCA_QP_STATE_SQD
= 5,
68 MTHCA_QP_STATE_ERR
= 6,
69 MTHCA_QP_STATE_DRAINING
= 7
81 MTHCA_QP_PM_MIGRATED
= 0x3,
82 MTHCA_QP_PM_ARMED
= 0x0,
83 MTHCA_QP_PM_REARM
= 0x1
87 /* qp_context flags */
88 MTHCA_QP_BIT_DE
= 1 << 8,
90 MTHCA_QP_BIT_SRE
= 1 << 15,
91 MTHCA_QP_BIT_SWE
= 1 << 14,
92 MTHCA_QP_BIT_SAE
= 1 << 13,
93 MTHCA_QP_BIT_SIC
= 1 << 4,
94 MTHCA_QP_BIT_SSC
= 1 << 3,
96 MTHCA_QP_BIT_RRE
= 1 << 15,
97 MTHCA_QP_BIT_RWE
= 1 << 14,
98 MTHCA_QP_BIT_RAE
= 1 << 13,
99 MTHCA_QP_BIT_RIC
= 1 << 4,
100 MTHCA_QP_BIT_RSC
= 1 << 3
104 MTHCA_SEND_DOORBELL_FENCE
= 1 << 5
107 struct mthca_qp_path
{
116 __be32 sl_tclass_flowlabel
;
120 struct mthca_qp_context
{
122 __be32 tavor_sched_queue
; /* Reserved on Arbel */
124 u8 rq_size_stride
; /* Reserved on Tavor */
125 u8 sq_size_stride
; /* Reserved on Tavor */
126 u8 rlkey_arbel_sched_queue
; /* Reserved on Tavor */
131 struct mthca_qp_path pri_path
;
132 struct mthca_qp_path alt_path
;
139 __be32 next_send_psn
;
141 __be32 snd_wqe_base_l
; /* Next send WQE on Tavor */
142 __be32 snd_db_index
; /* (debugging only entries) */
143 __be32 last_acked_psn
;
146 __be32 rnr_nextrecvpsn
;
149 __be32 rcv_wqe_base_l
; /* Next recv WQE on Tavor */
150 __be32 rcv_db_index
; /* (debugging only entries) */
154 __be16 rq_wqe_counter
; /* reserved on Tavor */
155 __be16 sq_wqe_counter
; /* reserved on Tavor */
159 struct mthca_qp_param
{
160 __be32 opt_param_mask
;
162 struct mthca_qp_context context
;
167 MTHCA_QP_OPTPAR_ALT_ADDR_PATH
= 1 << 0,
168 MTHCA_QP_OPTPAR_RRE
= 1 << 1,
169 MTHCA_QP_OPTPAR_RAE
= 1 << 2,
170 MTHCA_QP_OPTPAR_RWE
= 1 << 3,
171 MTHCA_QP_OPTPAR_PKEY_INDEX
= 1 << 4,
172 MTHCA_QP_OPTPAR_Q_KEY
= 1 << 5,
173 MTHCA_QP_OPTPAR_RNR_TIMEOUT
= 1 << 6,
174 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH
= 1 << 7,
175 MTHCA_QP_OPTPAR_SRA_MAX
= 1 << 8,
176 MTHCA_QP_OPTPAR_RRA_MAX
= 1 << 9,
177 MTHCA_QP_OPTPAR_PM_STATE
= 1 << 10,
178 MTHCA_QP_OPTPAR_PORT_NUM
= 1 << 11,
179 MTHCA_QP_OPTPAR_RETRY_COUNT
= 1 << 12,
180 MTHCA_QP_OPTPAR_ALT_RNR_RETRY
= 1 << 13,
181 MTHCA_QP_OPTPAR_ACK_TIMEOUT
= 1 << 14,
182 MTHCA_QP_OPTPAR_RNR_RETRY
= 1 << 15,
183 MTHCA_QP_OPTPAR_SCHED_QUEUE
= 1 << 16
186 static const u8 mthca_opcode
[] = {
187 [IB_WR_SEND
] = MTHCA_OPCODE_SEND
,
188 [IB_WR_SEND_WITH_IMM
] = MTHCA_OPCODE_SEND_IMM
,
189 [IB_WR_RDMA_WRITE
] = MTHCA_OPCODE_RDMA_WRITE
,
190 [IB_WR_RDMA_WRITE_WITH_IMM
] = MTHCA_OPCODE_RDMA_WRITE_IMM
,
191 [IB_WR_RDMA_READ
] = MTHCA_OPCODE_RDMA_READ
,
192 [IB_WR_ATOMIC_CMP_AND_SWP
] = MTHCA_OPCODE_ATOMIC_CS
,
193 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MTHCA_OPCODE_ATOMIC_FA
,
196 static int is_sqp(struct mthca_dev
*dev
, struct mthca_qp
*qp
)
198 return qp
->qpn
>= dev
->qp_table
.sqp_start
&&
199 qp
->qpn
<= dev
->qp_table
.sqp_start
+ 3;
202 static int is_qp0(struct mthca_dev
*dev
, struct mthca_qp
*qp
)
204 return qp
->qpn
>= dev
->qp_table
.sqp_start
&&
205 qp
->qpn
<= dev
->qp_table
.sqp_start
+ 1;
208 static void *get_recv_wqe(struct mthca_qp
*qp
, int n
)
211 return qp
->queue
.direct
.buf
+ (n
<< qp
->rq
.wqe_shift
);
213 return qp
->queue
.page_list
[(n
<< qp
->rq
.wqe_shift
) >> PAGE_SHIFT
].buf
+
214 ((n
<< qp
->rq
.wqe_shift
) & (PAGE_SIZE
- 1));
217 static void *get_send_wqe(struct mthca_qp
*qp
, int n
)
220 return qp
->queue
.direct
.buf
+ qp
->send_wqe_offset
+
221 (n
<< qp
->sq
.wqe_shift
);
223 return qp
->queue
.page_list
[(qp
->send_wqe_offset
+
224 (n
<< qp
->sq
.wqe_shift
)) >>
226 ((qp
->send_wqe_offset
+ (n
<< qp
->sq
.wqe_shift
)) &
230 static void mthca_wq_reset(struct mthca_wq
*wq
)
233 wq
->last_comp
= wq
->max
- 1;
238 void mthca_qp_event(struct mthca_dev
*dev
, u32 qpn
,
239 enum ib_event_type event_type
)
242 struct ib_event event
;
244 spin_lock(&dev
->qp_table
.lock
);
245 qp
= mthca_array_get(&dev
->qp_table
.qp
, qpn
& (dev
->limits
.num_qps
- 1));
248 spin_unlock(&dev
->qp_table
.lock
);
251 mthca_warn(dev
, "Async event %d for bogus QP %08x\n",
256 if (event_type
== IB_EVENT_PATH_MIG
)
257 qp
->port
= qp
->alt_port
;
259 event
.device
= &dev
->ib_dev
;
260 event
.event
= event_type
;
261 event
.element
.qp
= &qp
->ibqp
;
262 if (qp
->ibqp
.event_handler
)
263 qp
->ibqp
.event_handler(&event
, qp
->ibqp
.qp_context
);
265 spin_lock(&dev
->qp_table
.lock
);
268 spin_unlock(&dev
->qp_table
.lock
);
271 static int to_mthca_state(enum ib_qp_state ib_state
)
274 case IB_QPS_RESET
: return MTHCA_QP_STATE_RST
;
275 case IB_QPS_INIT
: return MTHCA_QP_STATE_INIT
;
276 case IB_QPS_RTR
: return MTHCA_QP_STATE_RTR
;
277 case IB_QPS_RTS
: return MTHCA_QP_STATE_RTS
;
278 case IB_QPS_SQD
: return MTHCA_QP_STATE_SQD
;
279 case IB_QPS_SQE
: return MTHCA_QP_STATE_SQE
;
280 case IB_QPS_ERR
: return MTHCA_QP_STATE_ERR
;
285 enum { RC
, UC
, UD
, RD
, RDEE
, MLX
, NUM_TRANS
};
287 static int to_mthca_st(int transport
)
290 case RC
: return MTHCA_QP_ST_RC
;
291 case UC
: return MTHCA_QP_ST_UC
;
292 case UD
: return MTHCA_QP_ST_UD
;
293 case RD
: return MTHCA_QP_ST_RD
;
294 case MLX
: return MTHCA_QP_ST_MLX
;
299 static void store_attrs(struct mthca_sqp
*sqp
, const struct ib_qp_attr
*attr
,
302 if (attr_mask
& IB_QP_PKEY_INDEX
)
303 sqp
->pkey_index
= attr
->pkey_index
;
304 if (attr_mask
& IB_QP_QKEY
)
305 sqp
->qkey
= attr
->qkey
;
306 if (attr_mask
& IB_QP_SQ_PSN
)
307 sqp
->send_psn
= attr
->sq_psn
;
310 static void init_port(struct mthca_dev
*dev
, int port
)
313 struct mthca_init_ib_param param
;
315 memset(¶m
, 0, sizeof param
);
317 param
.port_width
= dev
->limits
.port_width_cap
;
318 param
.vl_cap
= dev
->limits
.vl_cap
;
319 param
.mtu_cap
= dev
->limits
.mtu_cap
;
320 param
.gid_cap
= dev
->limits
.gid_table_len
;
321 param
.pkey_cap
= dev
->limits
.pkey_table_len
;
323 err
= mthca_INIT_IB(dev
, ¶m
, port
);
325 mthca_warn(dev
, "INIT_IB failed, return code %d.\n", err
);
328 static __be32
get_hw_access_flags(struct mthca_qp
*qp
, const struct ib_qp_attr
*attr
,
333 u32 hw_access_flags
= 0;
335 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
336 dest_rd_atomic
= attr
->max_dest_rd_atomic
;
338 dest_rd_atomic
= qp
->resp_depth
;
340 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
341 access_flags
= attr
->qp_access_flags
;
343 access_flags
= qp
->atomic_rd_en
;
346 access_flags
&= IB_ACCESS_REMOTE_WRITE
;
348 if (access_flags
& IB_ACCESS_REMOTE_READ
)
349 hw_access_flags
|= MTHCA_QP_BIT_RRE
;
350 if (access_flags
& IB_ACCESS_REMOTE_ATOMIC
)
351 hw_access_flags
|= MTHCA_QP_BIT_RAE
;
352 if (access_flags
& IB_ACCESS_REMOTE_WRITE
)
353 hw_access_flags
|= MTHCA_QP_BIT_RWE
;
355 return cpu_to_be32(hw_access_flags
);
358 static inline enum ib_qp_state
to_ib_qp_state(int mthca_state
)
360 switch (mthca_state
) {
361 case MTHCA_QP_STATE_RST
: return IB_QPS_RESET
;
362 case MTHCA_QP_STATE_INIT
: return IB_QPS_INIT
;
363 case MTHCA_QP_STATE_RTR
: return IB_QPS_RTR
;
364 case MTHCA_QP_STATE_RTS
: return IB_QPS_RTS
;
365 case MTHCA_QP_STATE_DRAINING
:
366 case MTHCA_QP_STATE_SQD
: return IB_QPS_SQD
;
367 case MTHCA_QP_STATE_SQE
: return IB_QPS_SQE
;
368 case MTHCA_QP_STATE_ERR
: return IB_QPS_ERR
;
373 static inline enum ib_mig_state
to_ib_mig_state(int mthca_mig_state
)
375 switch (mthca_mig_state
) {
376 case 0: return IB_MIG_ARMED
;
377 case 1: return IB_MIG_REARM
;
378 case 3: return IB_MIG_MIGRATED
;
383 static int to_ib_qp_access_flags(int mthca_flags
)
387 if (mthca_flags
& MTHCA_QP_BIT_RRE
)
388 ib_flags
|= IB_ACCESS_REMOTE_READ
;
389 if (mthca_flags
& MTHCA_QP_BIT_RWE
)
390 ib_flags
|= IB_ACCESS_REMOTE_WRITE
;
391 if (mthca_flags
& MTHCA_QP_BIT_RAE
)
392 ib_flags
|= IB_ACCESS_REMOTE_ATOMIC
;
397 static void to_rdma_ah_attr(struct mthca_dev
*dev
,
398 struct rdma_ah_attr
*ah_attr
,
399 struct mthca_qp_path
*path
)
401 u8 port_num
= (be32_to_cpu(path
->port_pkey
) >> 24) & 0x3;
403 memset(ah_attr
, 0, sizeof(*ah_attr
));
405 if (port_num
== 0 || port_num
> dev
->limits
.num_ports
)
407 ah_attr
->type
= rdma_ah_find_type(&dev
->ib_dev
, port_num
);
408 rdma_ah_set_port_num(ah_attr
, port_num
);
410 rdma_ah_set_dlid(ah_attr
, be16_to_cpu(path
->rlid
));
411 rdma_ah_set_sl(ah_attr
, be32_to_cpu(path
->sl_tclass_flowlabel
) >> 28);
412 rdma_ah_set_path_bits(ah_attr
, path
->g_mylmc
& 0x7f);
413 rdma_ah_set_static_rate(ah_attr
,
414 mthca_rate_to_ib(dev
,
415 path
->static_rate
& 0xf,
417 if (path
->g_mylmc
& (1 << 7)) {
418 u32 tc_fl
= be32_to_cpu(path
->sl_tclass_flowlabel
);
420 rdma_ah_set_grh(ah_attr
, NULL
,
423 (dev
->limits
.gid_table_len
- 1),
425 (tc_fl
>> 20) & 0xff);
426 rdma_ah_set_dgid_raw(ah_attr
, path
->rgid
);
430 int mthca_query_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*qp_attr
, int qp_attr_mask
,
431 struct ib_qp_init_attr
*qp_init_attr
)
433 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
434 struct mthca_qp
*qp
= to_mqp(ibqp
);
436 struct mthca_mailbox
*mailbox
= NULL
;
437 struct mthca_qp_param
*qp_param
;
438 struct mthca_qp_context
*context
;
441 mutex_lock(&qp
->mutex
);
443 if (qp
->state
== IB_QPS_RESET
) {
444 qp_attr
->qp_state
= IB_QPS_RESET
;
448 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
449 if (IS_ERR(mailbox
)) {
450 err
= PTR_ERR(mailbox
);
454 err
= mthca_QUERY_QP(dev
, qp
->qpn
, 0, mailbox
);
456 mthca_warn(dev
, "QUERY_QP failed (%d)\n", err
);
460 qp_param
= mailbox
->buf
;
461 context
= &qp_param
->context
;
462 mthca_state
= be32_to_cpu(context
->flags
) >> 28;
464 qp
->state
= to_ib_qp_state(mthca_state
);
465 qp_attr
->qp_state
= qp
->state
;
466 qp_attr
->path_mtu
= context
->mtu_msgmax
>> 5;
467 qp_attr
->path_mig_state
=
468 to_ib_mig_state((be32_to_cpu(context
->flags
) >> 11) & 0x3);
469 qp_attr
->qkey
= be32_to_cpu(context
->qkey
);
470 qp_attr
->rq_psn
= be32_to_cpu(context
->rnr_nextrecvpsn
) & 0xffffff;
471 qp_attr
->sq_psn
= be32_to_cpu(context
->next_send_psn
) & 0xffffff;
472 qp_attr
->dest_qp_num
= be32_to_cpu(context
->remote_qpn
) & 0xffffff;
473 qp_attr
->qp_access_flags
=
474 to_ib_qp_access_flags(be32_to_cpu(context
->params2
));
476 if (qp
->transport
== RC
|| qp
->transport
== UC
) {
477 to_rdma_ah_attr(dev
, &qp_attr
->ah_attr
, &context
->pri_path
);
478 to_rdma_ah_attr(dev
, &qp_attr
->alt_ah_attr
, &context
->alt_path
);
479 qp_attr
->alt_pkey_index
=
480 be32_to_cpu(context
->alt_path
.port_pkey
) & 0x7f;
481 qp_attr
->alt_port_num
=
482 rdma_ah_get_port_num(&qp_attr
->alt_ah_attr
);
485 qp_attr
->pkey_index
= be32_to_cpu(context
->pri_path
.port_pkey
) & 0x7f;
487 (be32_to_cpu(context
->pri_path
.port_pkey
) >> 24) & 0x3;
489 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
490 qp_attr
->sq_draining
= mthca_state
== MTHCA_QP_STATE_DRAINING
;
492 qp_attr
->max_rd_atomic
= 1 << ((be32_to_cpu(context
->params1
) >> 21) & 0x7);
494 qp_attr
->max_dest_rd_atomic
=
495 1 << ((be32_to_cpu(context
->params2
) >> 21) & 0x7);
496 qp_attr
->min_rnr_timer
=
497 (be32_to_cpu(context
->rnr_nextrecvpsn
) >> 24) & 0x1f;
498 qp_attr
->timeout
= context
->pri_path
.ackto
>> 3;
499 qp_attr
->retry_cnt
= (be32_to_cpu(context
->params1
) >> 16) & 0x7;
500 qp_attr
->rnr_retry
= context
->pri_path
.rnr_retry
>> 5;
501 qp_attr
->alt_timeout
= context
->alt_path
.ackto
>> 3;
504 qp_attr
->cur_qp_state
= qp_attr
->qp_state
;
505 qp_attr
->cap
.max_send_wr
= qp
->sq
.max
;
506 qp_attr
->cap
.max_recv_wr
= qp
->rq
.max
;
507 qp_attr
->cap
.max_send_sge
= qp
->sq
.max_gs
;
508 qp_attr
->cap
.max_recv_sge
= qp
->rq
.max_gs
;
509 qp_attr
->cap
.max_inline_data
= qp
->max_inline_data
;
511 qp_init_attr
->cap
= qp_attr
->cap
;
512 qp_init_attr
->sq_sig_type
= qp
->sq_policy
;
515 mthca_free_mailbox(dev
, mailbox
);
518 mutex_unlock(&qp
->mutex
);
522 static int mthca_path_set(struct mthca_dev
*dev
, const struct rdma_ah_attr
*ah
,
523 struct mthca_qp_path
*path
, u8 port
)
525 path
->g_mylmc
= rdma_ah_get_path_bits(ah
) & 0x7f;
526 path
->rlid
= cpu_to_be16(rdma_ah_get_dlid(ah
));
527 path
->static_rate
= mthca_get_rate(dev
, rdma_ah_get_static_rate(ah
),
530 if (rdma_ah_get_ah_flags(ah
) & IB_AH_GRH
) {
531 const struct ib_global_route
*grh
= rdma_ah_read_grh(ah
);
533 if (grh
->sgid_index
>= dev
->limits
.gid_table_len
) {
534 mthca_dbg(dev
, "sgid_index (%u) too large. max is %d\n",
536 dev
->limits
.gid_table_len
- 1);
540 path
->g_mylmc
|= 1 << 7;
541 path
->mgid_index
= grh
->sgid_index
;
542 path
->hop_limit
= grh
->hop_limit
;
543 path
->sl_tclass_flowlabel
=
544 cpu_to_be32((rdma_ah_get_sl(ah
) << 28) |
545 (grh
->traffic_class
<< 20) |
547 memcpy(path
->rgid
, grh
->dgid
.raw
, 16);
549 path
->sl_tclass_flowlabel
= cpu_to_be32(rdma_ah_get_sl(ah
) <<
556 static int __mthca_modify_qp(struct ib_qp
*ibqp
,
557 const struct ib_qp_attr
*attr
, int attr_mask
,
558 enum ib_qp_state cur_state
,
559 enum ib_qp_state new_state
,
560 struct ib_udata
*udata
)
562 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
563 struct mthca_qp
*qp
= to_mqp(ibqp
);
564 struct mthca_ucontext
*context
= rdma_udata_to_drv_context(
565 udata
, struct mthca_ucontext
, ibucontext
);
566 struct mthca_mailbox
*mailbox
;
567 struct mthca_qp_param
*qp_param
;
568 struct mthca_qp_context
*qp_context
;
572 mailbox
= mthca_alloc_mailbox(dev
, GFP_KERNEL
);
573 if (IS_ERR(mailbox
)) {
574 err
= PTR_ERR(mailbox
);
577 qp_param
= mailbox
->buf
;
578 qp_context
= &qp_param
->context
;
579 memset(qp_param
, 0, sizeof *qp_param
);
581 qp_context
->flags
= cpu_to_be32((to_mthca_state(new_state
) << 28) |
582 (to_mthca_st(qp
->transport
) << 16));
583 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_BIT_DE
);
584 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
585 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_MIGRATED
<< 11);
587 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE
);
588 switch (attr
->path_mig_state
) {
589 case IB_MIG_MIGRATED
:
590 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_MIGRATED
<< 11);
593 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_REARM
<< 11);
596 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_ARMED
<< 11);
601 /* leave tavor_sched_queue as 0 */
603 if (qp
->transport
== MLX
|| qp
->transport
== UD
)
604 qp_context
->mtu_msgmax
= (IB_MTU_2048
<< 5) | 11;
605 else if (attr_mask
& IB_QP_PATH_MTU
) {
606 if (attr
->path_mtu
< IB_MTU_256
|| attr
->path_mtu
> IB_MTU_2048
) {
607 mthca_dbg(dev
, "path MTU (%u) is invalid\n",
611 qp_context
->mtu_msgmax
= (attr
->path_mtu
<< 5) | 31;
614 if (mthca_is_memfree(dev
)) {
616 qp_context
->rq_size_stride
= ilog2(qp
->rq
.max
) << 3;
617 qp_context
->rq_size_stride
|= qp
->rq
.wqe_shift
- 4;
620 qp_context
->sq_size_stride
= ilog2(qp
->sq
.max
) << 3;
621 qp_context
->sq_size_stride
|= qp
->sq
.wqe_shift
- 4;
624 /* leave arbel_sched_queue as 0 */
626 if (qp
->ibqp
.uobject
)
627 qp_context
->usr_page
= cpu_to_be32(context
->uar
.index
);
629 qp_context
->usr_page
= cpu_to_be32(dev
->driver_uar
.index
);
630 qp_context
->local_qpn
= cpu_to_be32(qp
->qpn
);
631 if (attr_mask
& IB_QP_DEST_QPN
) {
632 qp_context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
635 if (qp
->transport
== MLX
)
636 qp_context
->pri_path
.port_pkey
|=
637 cpu_to_be32(qp
->port
<< 24);
639 if (attr_mask
& IB_QP_PORT
) {
640 qp_context
->pri_path
.port_pkey
|=
641 cpu_to_be32(attr
->port_num
<< 24);
642 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM
);
646 if (attr_mask
& IB_QP_PKEY_INDEX
) {
647 qp_context
->pri_path
.port_pkey
|=
648 cpu_to_be32(attr
->pkey_index
);
649 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX
);
652 if (attr_mask
& IB_QP_RNR_RETRY
) {
653 qp_context
->alt_path
.rnr_retry
= qp_context
->pri_path
.rnr_retry
=
654 attr
->rnr_retry
<< 5;
655 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY
|
656 MTHCA_QP_OPTPAR_ALT_RNR_RETRY
);
659 if (attr_mask
& IB_QP_AV
) {
660 if (mthca_path_set(dev
, &attr
->ah_attr
, &qp_context
->pri_path
,
661 attr_mask
& IB_QP_PORT
? attr
->port_num
: qp
->port
))
664 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH
);
667 if (ibqp
->qp_type
== IB_QPT_RC
&&
668 cur_state
== IB_QPS_INIT
&& new_state
== IB_QPS_RTR
) {
669 u8 sched_queue
= ibqp
->uobject
? 0x2 : 0x1;
671 if (mthca_is_memfree(dev
))
672 qp_context
->rlkey_arbel_sched_queue
|= sched_queue
;
674 qp_context
->tavor_sched_queue
|= cpu_to_be32(sched_queue
);
676 qp_param
->opt_param_mask
|=
677 cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE
);
680 if (attr_mask
& IB_QP_TIMEOUT
) {
681 qp_context
->pri_path
.ackto
= attr
->timeout
<< 3;
682 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT
);
685 if (attr_mask
& IB_QP_ALT_PATH
) {
686 if (attr
->alt_pkey_index
>= dev
->limits
.pkey_table_len
) {
687 mthca_dbg(dev
, "Alternate P_Key index (%u) too large. max is %d\n",
688 attr
->alt_pkey_index
, dev
->limits
.pkey_table_len
-1);
692 if (attr
->alt_port_num
== 0 || attr
->alt_port_num
> dev
->limits
.num_ports
) {
693 mthca_dbg(dev
, "Alternate port number (%u) is invalid\n",
698 if (mthca_path_set(dev
, &attr
->alt_ah_attr
, &qp_context
->alt_path
,
699 rdma_ah_get_port_num(&attr
->alt_ah_attr
)))
702 qp_context
->alt_path
.port_pkey
|= cpu_to_be32(attr
->alt_pkey_index
|
703 attr
->alt_port_num
<< 24);
704 qp_context
->alt_path
.ackto
= attr
->alt_timeout
<< 3;
705 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH
);
709 qp_context
->pd
= cpu_to_be32(to_mpd(ibqp
->pd
)->pd_num
);
710 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
711 qp_context
->wqe_lkey
= cpu_to_be32(qp
->mr
.ibmr
.lkey
);
712 qp_context
->params1
= cpu_to_be32((MTHCA_ACK_REQ_FREQ
<< 28) |
713 (MTHCA_FLIGHT_LIMIT
<< 24) |
715 if (qp
->sq_policy
== IB_SIGNAL_ALL_WR
)
716 qp_context
->params1
|= cpu_to_be32(MTHCA_QP_BIT_SSC
);
717 if (attr_mask
& IB_QP_RETRY_CNT
) {
718 qp_context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
719 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT
);
722 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
723 if (attr
->max_rd_atomic
) {
724 qp_context
->params1
|=
725 cpu_to_be32(MTHCA_QP_BIT_SRE
|
727 qp_context
->params1
|=
728 cpu_to_be32(fls(attr
->max_rd_atomic
- 1) << 21);
730 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX
);
733 if (attr_mask
& IB_QP_SQ_PSN
)
734 qp_context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
735 qp_context
->cqn_snd
= cpu_to_be32(to_mcq(ibqp
->send_cq
)->cqn
);
737 if (mthca_is_memfree(dev
)) {
738 qp_context
->snd_wqe_base_l
= cpu_to_be32(qp
->send_wqe_offset
);
739 qp_context
->snd_db_index
= cpu_to_be32(qp
->sq
.db_index
);
742 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
743 if (attr
->max_dest_rd_atomic
)
744 qp_context
->params2
|=
745 cpu_to_be32(fls(attr
->max_dest_rd_atomic
- 1) << 21);
747 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX
);
750 if (attr_mask
& (IB_QP_ACCESS_FLAGS
| IB_QP_MAX_DEST_RD_ATOMIC
)) {
751 qp_context
->params2
|= get_hw_access_flags(qp
, attr
, attr_mask
);
752 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RWE
|
753 MTHCA_QP_OPTPAR_RRE
|
754 MTHCA_QP_OPTPAR_RAE
);
757 qp_context
->params2
|= cpu_to_be32(MTHCA_QP_BIT_RSC
);
760 qp_context
->params2
|= cpu_to_be32(MTHCA_QP_BIT_RIC
);
762 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
763 qp_context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
764 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT
);
766 if (attr_mask
& IB_QP_RQ_PSN
)
767 qp_context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
769 qp_context
->ra_buff_indx
=
770 cpu_to_be32(dev
->qp_table
.rdb_base
+
771 ((qp
->qpn
& (dev
->limits
.num_qps
- 1)) * MTHCA_RDB_ENTRY_SIZE
<<
772 dev
->qp_table
.rdb_shift
));
774 qp_context
->cqn_rcv
= cpu_to_be32(to_mcq(ibqp
->recv_cq
)->cqn
);
776 if (mthca_is_memfree(dev
))
777 qp_context
->rcv_db_index
= cpu_to_be32(qp
->rq
.db_index
);
779 if (attr_mask
& IB_QP_QKEY
) {
780 qp_context
->qkey
= cpu_to_be32(attr
->qkey
);
781 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY
);
785 qp_context
->srqn
= cpu_to_be32(1 << 24 |
786 to_msrq(ibqp
->srq
)->srqn
);
788 if (cur_state
== IB_QPS_RTS
&& new_state
== IB_QPS_SQD
&&
789 attr_mask
& IB_QP_EN_SQD_ASYNC_NOTIFY
&&
790 attr
->en_sqd_async_notify
)
793 err
= mthca_MODIFY_QP(dev
, cur_state
, new_state
, qp
->qpn
, 0,
796 mthca_warn(dev
, "modify QP %d->%d returned %d.\n",
797 cur_state
, new_state
, err
);
801 qp
->state
= new_state
;
802 if (attr_mask
& IB_QP_ACCESS_FLAGS
)
803 qp
->atomic_rd_en
= attr
->qp_access_flags
;
804 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
)
805 qp
->resp_depth
= attr
->max_dest_rd_atomic
;
806 if (attr_mask
& IB_QP_PORT
)
807 qp
->port
= attr
->port_num
;
808 if (attr_mask
& IB_QP_ALT_PATH
)
809 qp
->alt_port
= attr
->alt_port_num
;
812 store_attrs(qp
->sqp
, attr
, attr_mask
);
815 * If we moved QP0 to RTR, bring the IB link up; if we moved
816 * QP0 to RESET or ERROR, bring the link back down.
818 if (is_qp0(dev
, qp
)) {
819 if (cur_state
!= IB_QPS_RTR
&&
820 new_state
== IB_QPS_RTR
)
821 init_port(dev
, qp
->port
);
823 if (cur_state
!= IB_QPS_RESET
&&
824 cur_state
!= IB_QPS_ERR
&&
825 (new_state
== IB_QPS_RESET
||
826 new_state
== IB_QPS_ERR
))
827 mthca_CLOSE_IB(dev
, qp
->port
);
831 * If we moved a kernel QP to RESET, clean up all old CQ
832 * entries and reinitialize the QP.
834 if (new_state
== IB_QPS_RESET
&& !qp
->ibqp
.uobject
) {
835 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.recv_cq
), qp
->qpn
,
836 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
837 if (qp
->ibqp
.send_cq
!= qp
->ibqp
.recv_cq
)
838 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.send_cq
), qp
->qpn
, NULL
);
840 mthca_wq_reset(&qp
->sq
);
841 qp
->sq
.last
= get_send_wqe(qp
, qp
->sq
.max
- 1);
843 mthca_wq_reset(&qp
->rq
);
844 qp
->rq
.last
= get_recv_wqe(qp
, qp
->rq
.max
- 1);
846 if (mthca_is_memfree(dev
)) {
853 mthca_free_mailbox(dev
, mailbox
);
858 int mthca_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
, int attr_mask
,
859 struct ib_udata
*udata
)
861 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
862 struct mthca_qp
*qp
= to_mqp(ibqp
);
863 enum ib_qp_state cur_state
, new_state
;
866 if (attr_mask
& ~IB_QP_ATTR_STANDARD_BITS
)
869 mutex_lock(&qp
->mutex
);
870 if (attr_mask
& IB_QP_CUR_STATE
) {
871 cur_state
= attr
->cur_qp_state
;
873 spin_lock_irq(&qp
->sq
.lock
);
874 spin_lock(&qp
->rq
.lock
);
875 cur_state
= qp
->state
;
876 spin_unlock(&qp
->rq
.lock
);
877 spin_unlock_irq(&qp
->sq
.lock
);
880 new_state
= attr_mask
& IB_QP_STATE
? attr
->qp_state
: cur_state
;
882 if (!ib_modify_qp_is_ok(cur_state
, new_state
, ibqp
->qp_type
,
884 mthca_dbg(dev
, "Bad QP transition (transport %d) "
885 "%d->%d with attr 0x%08x\n",
886 qp
->transport
, cur_state
, new_state
,
891 if ((attr_mask
& IB_QP_PKEY_INDEX
) &&
892 attr
->pkey_index
>= dev
->limits
.pkey_table_len
) {
893 mthca_dbg(dev
, "P_Key index (%u) too large. max is %d\n",
894 attr
->pkey_index
, dev
->limits
.pkey_table_len
-1);
898 if ((attr_mask
& IB_QP_PORT
) &&
899 (attr
->port_num
== 0 || attr
->port_num
> dev
->limits
.num_ports
)) {
900 mthca_dbg(dev
, "Port number (%u) is invalid\n", attr
->port_num
);
904 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
&&
905 attr
->max_rd_atomic
> dev
->limits
.max_qp_init_rdma
) {
906 mthca_dbg(dev
, "Max rdma_atomic as initiator %u too large (max is %d)\n",
907 attr
->max_rd_atomic
, dev
->limits
.max_qp_init_rdma
);
911 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
&&
912 attr
->max_dest_rd_atomic
> 1 << dev
->qp_table
.rdb_shift
) {
913 mthca_dbg(dev
, "Max rdma_atomic as responder %u too large (max %d)\n",
914 attr
->max_dest_rd_atomic
, 1 << dev
->qp_table
.rdb_shift
);
918 if (cur_state
== new_state
&& cur_state
== IB_QPS_RESET
) {
923 err
= __mthca_modify_qp(ibqp
, attr
, attr_mask
, cur_state
, new_state
,
927 mutex_unlock(&qp
->mutex
);
931 static int mthca_max_data_size(struct mthca_dev
*dev
, struct mthca_qp
*qp
, int desc_sz
)
934 * Calculate the maximum size of WQE s/g segments, excluding
935 * the next segment and other non-data segments.
937 int max_data_size
= desc_sz
- sizeof (struct mthca_next_seg
);
939 switch (qp
->transport
) {
941 max_data_size
-= 2 * sizeof (struct mthca_data_seg
);
945 if (mthca_is_memfree(dev
))
946 max_data_size
-= sizeof (struct mthca_arbel_ud_seg
);
948 max_data_size
-= sizeof (struct mthca_tavor_ud_seg
);
952 max_data_size
-= sizeof (struct mthca_raddr_seg
);
956 return max_data_size
;
959 static inline int mthca_max_inline_data(struct mthca_pd
*pd
, int max_data_size
)
961 /* We don't support inline data for kernel QPs (yet). */
962 return pd
->ibpd
.uobject
? max_data_size
- MTHCA_INLINE_HEADER_SIZE
: 0;
965 static void mthca_adjust_qp_caps(struct mthca_dev
*dev
,
969 int max_data_size
= mthca_max_data_size(dev
, qp
,
970 min(dev
->limits
.max_desc_sz
,
971 1 << qp
->sq
.wqe_shift
));
973 qp
->max_inline_data
= mthca_max_inline_data(pd
, max_data_size
);
975 qp
->sq
.max_gs
= min_t(int, dev
->limits
.max_sg
,
976 max_data_size
/ sizeof (struct mthca_data_seg
));
977 qp
->rq
.max_gs
= min_t(int, dev
->limits
.max_sg
,
978 (min(dev
->limits
.max_desc_sz
, 1 << qp
->rq
.wqe_shift
) -
979 sizeof (struct mthca_next_seg
)) /
980 sizeof (struct mthca_data_seg
));
984 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
985 * rq.max_gs and sq.max_gs must all be assigned.
986 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
987 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
990 static int mthca_alloc_wqe_buf(struct mthca_dev
*dev
,
993 struct ib_udata
*udata
)
998 size
= sizeof (struct mthca_next_seg
) +
999 qp
->rq
.max_gs
* sizeof (struct mthca_data_seg
);
1001 if (size
> dev
->limits
.max_desc_sz
)
1004 for (qp
->rq
.wqe_shift
= 6; 1 << qp
->rq
.wqe_shift
< size
;
1008 size
= qp
->sq
.max_gs
* sizeof (struct mthca_data_seg
);
1009 switch (qp
->transport
) {
1011 size
+= 2 * sizeof (struct mthca_data_seg
);
1015 size
+= mthca_is_memfree(dev
) ?
1016 sizeof (struct mthca_arbel_ud_seg
) :
1017 sizeof (struct mthca_tavor_ud_seg
);
1021 size
+= sizeof (struct mthca_raddr_seg
);
1025 size
+= sizeof (struct mthca_raddr_seg
);
1027 * An atomic op will require an atomic segment, a
1028 * remote address segment and one scatter entry.
1030 size
= max_t(int, size
,
1031 sizeof (struct mthca_atomic_seg
) +
1032 sizeof (struct mthca_raddr_seg
) +
1033 sizeof (struct mthca_data_seg
));
1040 /* Make sure that we have enough space for a bind request */
1041 size
= max_t(int, size
, sizeof (struct mthca_bind_seg
));
1043 size
+= sizeof (struct mthca_next_seg
);
1045 if (size
> dev
->limits
.max_desc_sz
)
1048 for (qp
->sq
.wqe_shift
= 6; 1 << qp
->sq
.wqe_shift
< size
;
1052 qp
->send_wqe_offset
= ALIGN(qp
->rq
.max
<< qp
->rq
.wqe_shift
,
1053 1 << qp
->sq
.wqe_shift
);
1056 * If this is a userspace QP, we don't actually have to
1057 * allocate anything. All we need is to calculate the WQE
1058 * sizes and the send_wqe_offset, so we're done now.
1063 size
= PAGE_ALIGN(qp
->send_wqe_offset
+
1064 (qp
->sq
.max
<< qp
->sq
.wqe_shift
));
1066 qp
->wrid
= kmalloc_array(qp
->rq
.max
+ qp
->sq
.max
, sizeof(u64
),
1071 err
= mthca_buf_alloc(dev
, size
, MTHCA_MAX_DIRECT_QP_SIZE
,
1072 &qp
->queue
, &qp
->is_direct
, pd
, 0, &qp
->mr
);
1083 static void mthca_free_wqe_buf(struct mthca_dev
*dev
,
1084 struct mthca_qp
*qp
)
1086 mthca_buf_free(dev
, PAGE_ALIGN(qp
->send_wqe_offset
+
1087 (qp
->sq
.max
<< qp
->sq
.wqe_shift
)),
1088 &qp
->queue
, qp
->is_direct
, &qp
->mr
);
1092 static int mthca_map_memfree(struct mthca_dev
*dev
,
1093 struct mthca_qp
*qp
)
1097 if (mthca_is_memfree(dev
)) {
1098 ret
= mthca_table_get(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1102 ret
= mthca_table_get(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1106 ret
= mthca_table_get(dev
, dev
->qp_table
.rdb_table
,
1107 qp
->qpn
<< dev
->qp_table
.rdb_shift
);
1116 mthca_table_put(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1119 mthca_table_put(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1124 static void mthca_unmap_memfree(struct mthca_dev
*dev
,
1125 struct mthca_qp
*qp
)
1127 mthca_table_put(dev
, dev
->qp_table
.rdb_table
,
1128 qp
->qpn
<< dev
->qp_table
.rdb_shift
);
1129 mthca_table_put(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1130 mthca_table_put(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1133 static int mthca_alloc_memfree(struct mthca_dev
*dev
,
1134 struct mthca_qp
*qp
)
1136 if (mthca_is_memfree(dev
)) {
1137 qp
->rq
.db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_RQ
,
1138 qp
->qpn
, &qp
->rq
.db
);
1139 if (qp
->rq
.db_index
< 0)
1142 qp
->sq
.db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_SQ
,
1143 qp
->qpn
, &qp
->sq
.db
);
1144 if (qp
->sq
.db_index
< 0) {
1145 mthca_free_db(dev
, MTHCA_DB_TYPE_RQ
, qp
->rq
.db_index
);
1153 static void mthca_free_memfree(struct mthca_dev
*dev
,
1154 struct mthca_qp
*qp
)
1156 if (mthca_is_memfree(dev
)) {
1157 mthca_free_db(dev
, MTHCA_DB_TYPE_SQ
, qp
->sq
.db_index
);
1158 mthca_free_db(dev
, MTHCA_DB_TYPE_RQ
, qp
->rq
.db_index
);
1162 static int mthca_alloc_qp_common(struct mthca_dev
*dev
,
1163 struct mthca_pd
*pd
,
1164 struct mthca_cq
*send_cq
,
1165 struct mthca_cq
*recv_cq
,
1166 enum ib_sig_type send_policy
,
1167 struct mthca_qp
*qp
,
1168 struct ib_udata
*udata
)
1172 struct mthca_next_seg
*next
;
1175 init_waitqueue_head(&qp
->wait
);
1176 mutex_init(&qp
->mutex
);
1177 qp
->state
= IB_QPS_RESET
;
1178 qp
->atomic_rd_en
= 0;
1180 qp
->sq_policy
= send_policy
;
1181 mthca_wq_reset(&qp
->sq
);
1182 mthca_wq_reset(&qp
->rq
);
1184 spin_lock_init(&qp
->sq
.lock
);
1185 spin_lock_init(&qp
->rq
.lock
);
1187 ret
= mthca_map_memfree(dev
, qp
);
1191 ret
= mthca_alloc_wqe_buf(dev
, pd
, qp
, udata
);
1193 mthca_unmap_memfree(dev
, qp
);
1197 mthca_adjust_qp_caps(dev
, pd
, qp
);
1200 * If this is a userspace QP, we're done now. The doorbells
1201 * will be allocated and buffers will be initialized in
1207 ret
= mthca_alloc_memfree(dev
, qp
);
1209 mthca_free_wqe_buf(dev
, qp
);
1210 mthca_unmap_memfree(dev
, qp
);
1214 if (mthca_is_memfree(dev
)) {
1215 struct mthca_data_seg
*scatter
;
1216 int size
= (sizeof (struct mthca_next_seg
) +
1217 qp
->rq
.max_gs
* sizeof (struct mthca_data_seg
)) / 16;
1219 for (i
= 0; i
< qp
->rq
.max
; ++i
) {
1220 next
= get_recv_wqe(qp
, i
);
1221 next
->nda_op
= cpu_to_be32(((i
+ 1) & (qp
->rq
.max
- 1)) <<
1223 next
->ee_nds
= cpu_to_be32(size
);
1225 for (scatter
= (void *) (next
+ 1);
1226 (void *) scatter
< (void *) next
+ (1 << qp
->rq
.wqe_shift
);
1228 scatter
->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
1231 for (i
= 0; i
< qp
->sq
.max
; ++i
) {
1232 next
= get_send_wqe(qp
, i
);
1233 next
->nda_op
= cpu_to_be32((((i
+ 1) & (qp
->sq
.max
- 1)) <<
1235 qp
->send_wqe_offset
);
1238 for (i
= 0; i
< qp
->rq
.max
; ++i
) {
1239 next
= get_recv_wqe(qp
, i
);
1240 next
->nda_op
= htonl((((i
+ 1) % qp
->rq
.max
) <<
1241 qp
->rq
.wqe_shift
) | 1);
1246 qp
->sq
.last
= get_send_wqe(qp
, qp
->sq
.max
- 1);
1247 qp
->rq
.last
= get_recv_wqe(qp
, qp
->rq
.max
- 1);
1252 static int mthca_set_qp_size(struct mthca_dev
*dev
, struct ib_qp_cap
*cap
,
1253 struct mthca_pd
*pd
, struct mthca_qp
*qp
)
1255 int max_data_size
= mthca_max_data_size(dev
, qp
, dev
->limits
.max_desc_sz
);
1257 /* Sanity check QP size before proceeding */
1258 if (cap
->max_send_wr
> dev
->limits
.max_wqes
||
1259 cap
->max_recv_wr
> dev
->limits
.max_wqes
||
1260 cap
->max_send_sge
> dev
->limits
.max_sg
||
1261 cap
->max_recv_sge
> dev
->limits
.max_sg
||
1262 cap
->max_inline_data
> mthca_max_inline_data(pd
, max_data_size
))
1266 * For MLX transport we need 2 extra send gather entries:
1267 * one for the header and one for the checksum at the end
1269 if (qp
->transport
== MLX
&& cap
->max_send_sge
+ 2 > dev
->limits
.max_sg
)
1272 if (mthca_is_memfree(dev
)) {
1273 qp
->rq
.max
= cap
->max_recv_wr
?
1274 roundup_pow_of_two(cap
->max_recv_wr
) : 0;
1275 qp
->sq
.max
= cap
->max_send_wr
?
1276 roundup_pow_of_two(cap
->max_send_wr
) : 0;
1278 qp
->rq
.max
= cap
->max_recv_wr
;
1279 qp
->sq
.max
= cap
->max_send_wr
;
1282 qp
->rq
.max_gs
= cap
->max_recv_sge
;
1283 qp
->sq
.max_gs
= max_t(int, cap
->max_send_sge
,
1284 ALIGN(cap
->max_inline_data
+ MTHCA_INLINE_HEADER_SIZE
,
1285 MTHCA_INLINE_CHUNK_SIZE
) /
1286 sizeof (struct mthca_data_seg
));
1291 int mthca_alloc_qp(struct mthca_dev
*dev
,
1292 struct mthca_pd
*pd
,
1293 struct mthca_cq
*send_cq
,
1294 struct mthca_cq
*recv_cq
,
1295 enum ib_qp_type type
,
1296 enum ib_sig_type send_policy
,
1297 struct ib_qp_cap
*cap
,
1298 struct mthca_qp
*qp
,
1299 struct ib_udata
*udata
)
1304 case IB_QPT_RC
: qp
->transport
= RC
; break;
1305 case IB_QPT_UC
: qp
->transport
= UC
; break;
1306 case IB_QPT_UD
: qp
->transport
= UD
; break;
1307 default: return -EINVAL
;
1310 err
= mthca_set_qp_size(dev
, cap
, pd
, qp
);
1314 qp
->qpn
= mthca_alloc(&dev
->qp_table
.alloc
);
1318 /* initialize port to zero for error-catching. */
1321 err
= mthca_alloc_qp_common(dev
, pd
, send_cq
, recv_cq
,
1322 send_policy
, qp
, udata
);
1324 mthca_free(&dev
->qp_table
.alloc
, qp
->qpn
);
1328 spin_lock_irq(&dev
->qp_table
.lock
);
1329 mthca_array_set(&dev
->qp_table
.qp
,
1330 qp
->qpn
& (dev
->limits
.num_qps
- 1), qp
);
1331 spin_unlock_irq(&dev
->qp_table
.lock
);
1336 static void mthca_lock_cqs(struct mthca_cq
*send_cq
, struct mthca_cq
*recv_cq
)
1337 __acquires(&send_cq
->lock
) __acquires(&recv_cq
->lock
)
1339 if (send_cq
== recv_cq
) {
1340 spin_lock_irq(&send_cq
->lock
);
1341 __acquire(&recv_cq
->lock
);
1342 } else if (send_cq
->cqn
< recv_cq
->cqn
) {
1343 spin_lock_irq(&send_cq
->lock
);
1344 spin_lock_nested(&recv_cq
->lock
, SINGLE_DEPTH_NESTING
);
1346 spin_lock_irq(&recv_cq
->lock
);
1347 spin_lock_nested(&send_cq
->lock
, SINGLE_DEPTH_NESTING
);
1351 static void mthca_unlock_cqs(struct mthca_cq
*send_cq
, struct mthca_cq
*recv_cq
)
1352 __releases(&send_cq
->lock
) __releases(&recv_cq
->lock
)
1354 if (send_cq
== recv_cq
) {
1355 __release(&recv_cq
->lock
);
1356 spin_unlock_irq(&send_cq
->lock
);
1357 } else if (send_cq
->cqn
< recv_cq
->cqn
) {
1358 spin_unlock(&recv_cq
->lock
);
1359 spin_unlock_irq(&send_cq
->lock
);
1361 spin_unlock(&send_cq
->lock
);
1362 spin_unlock_irq(&recv_cq
->lock
);
1366 int mthca_alloc_sqp(struct mthca_dev
*dev
,
1367 struct mthca_pd
*pd
,
1368 struct mthca_cq
*send_cq
,
1369 struct mthca_cq
*recv_cq
,
1370 enum ib_sig_type send_policy
,
1371 struct ib_qp_cap
*cap
,
1374 struct mthca_qp
*qp
,
1375 struct ib_udata
*udata
)
1377 u32 mqpn
= qpn
* 2 + dev
->qp_table
.sqp_start
+ port
- 1;
1380 qp
->transport
= MLX
;
1381 err
= mthca_set_qp_size(dev
, cap
, pd
, qp
);
1385 qp
->sqp
->header_buf_size
= qp
->sq
.max
* MTHCA_UD_HEADER_SIZE
;
1386 qp
->sqp
->header_buf
=
1387 dma_alloc_coherent(&dev
->pdev
->dev
, qp
->sqp
->header_buf_size
,
1388 &qp
->sqp
->header_dma
, GFP_KERNEL
);
1389 if (!qp
->sqp
->header_buf
)
1392 spin_lock_irq(&dev
->qp_table
.lock
);
1393 if (mthca_array_get(&dev
->qp_table
.qp
, mqpn
))
1396 mthca_array_set(&dev
->qp_table
.qp
, mqpn
, qp
->sqp
);
1397 spin_unlock_irq(&dev
->qp_table
.lock
);
1404 qp
->transport
= MLX
;
1406 err
= mthca_alloc_qp_common(dev
, pd
, send_cq
, recv_cq
,
1407 send_policy
, qp
, udata
);
1411 atomic_inc(&pd
->sqp_count
);
1417 * Lock CQs here, so that CQ polling code can do QP lookup
1418 * without taking a lock.
1420 mthca_lock_cqs(send_cq
, recv_cq
);
1422 spin_lock(&dev
->qp_table
.lock
);
1423 mthca_array_clear(&dev
->qp_table
.qp
, mqpn
);
1424 spin_unlock(&dev
->qp_table
.lock
);
1426 mthca_unlock_cqs(send_cq
, recv_cq
);
1429 dma_free_coherent(&dev
->pdev
->dev
, qp
->sqp
->header_buf_size
,
1430 qp
->sqp
->header_buf
, qp
->sqp
->header_dma
);
1434 static inline int get_qp_refcount(struct mthca_dev
*dev
, struct mthca_qp
*qp
)
1438 spin_lock_irq(&dev
->qp_table
.lock
);
1440 spin_unlock_irq(&dev
->qp_table
.lock
);
1445 void mthca_free_qp(struct mthca_dev
*dev
,
1446 struct mthca_qp
*qp
)
1448 struct mthca_cq
*send_cq
;
1449 struct mthca_cq
*recv_cq
;
1451 send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1452 recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
1455 * Lock CQs here, so that CQ polling code can do QP lookup
1456 * without taking a lock.
1458 mthca_lock_cqs(send_cq
, recv_cq
);
1460 spin_lock(&dev
->qp_table
.lock
);
1461 mthca_array_clear(&dev
->qp_table
.qp
,
1462 qp
->qpn
& (dev
->limits
.num_qps
- 1));
1464 spin_unlock(&dev
->qp_table
.lock
);
1466 mthca_unlock_cqs(send_cq
, recv_cq
);
1468 wait_event(qp
->wait
, !get_qp_refcount(dev
, qp
));
1470 if (qp
->state
!= IB_QPS_RESET
)
1471 mthca_MODIFY_QP(dev
, qp
->state
, IB_QPS_RESET
, qp
->qpn
, 0,
1475 * If this is a userspace QP, the buffers, MR, CQs and so on
1476 * will be cleaned up in userspace, so all we have to do is
1477 * unref the mem-free tables and free the QPN in our table.
1479 if (!qp
->ibqp
.uobject
) {
1480 mthca_cq_clean(dev
, recv_cq
, qp
->qpn
,
1481 qp
->ibqp
.srq
? to_msrq(qp
->ibqp
.srq
) : NULL
);
1482 if (send_cq
!= recv_cq
)
1483 mthca_cq_clean(dev
, send_cq
, qp
->qpn
, NULL
);
1485 mthca_free_memfree(dev
, qp
);
1486 mthca_free_wqe_buf(dev
, qp
);
1489 mthca_unmap_memfree(dev
, qp
);
1491 if (is_sqp(dev
, qp
)) {
1492 atomic_dec(&(to_mpd(qp
->ibqp
.pd
)->sqp_count
));
1493 dma_free_coherent(&dev
->pdev
->dev
, qp
->sqp
->header_buf_size
,
1494 qp
->sqp
->header_buf
, qp
->sqp
->header_dma
);
1496 mthca_free(&dev
->qp_table
.alloc
, qp
->qpn
);
1499 /* Create UD header for an MLX send and build a data segment for it */
1500 static int build_mlx_header(struct mthca_dev
*dev
, struct mthca_qp
*qp
, int ind
,
1501 const struct ib_ud_wr
*wr
,
1502 struct mthca_mlx_seg
*mlx
,
1503 struct mthca_data_seg
*data
)
1505 struct mthca_sqp
*sqp
= qp
->sqp
;
1510 ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0,
1511 mthca_ah_grh_present(to_mah(wr
->ah
)), 0, 0, 0,
1514 err
= mthca_read_ah(dev
, to_mah(wr
->ah
), &sqp
->ud_header
);
1517 mlx
->flags
&= ~cpu_to_be32(MTHCA_NEXT_SOLICIT
| 1);
1518 mlx
->flags
|= cpu_to_be32((!qp
->ibqp
.qp_num
? MTHCA_MLX_VL15
: 0) |
1519 (sqp
->ud_header
.lrh
.destination_lid
==
1520 IB_LID_PERMISSIVE
? MTHCA_MLX_SLR
: 0) |
1521 (sqp
->ud_header
.lrh
.service_level
<< 8));
1522 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
1525 switch (wr
->wr
.opcode
) {
1527 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
1528 sqp
->ud_header
.immediate_present
= 0;
1530 case IB_WR_SEND_WITH_IMM
:
1531 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
1532 sqp
->ud_header
.immediate_present
= 1;
1533 sqp
->ud_header
.immediate_data
= wr
->wr
.ex
.imm_data
;
1539 sqp
->ud_header
.lrh
.virtual_lane
= !qp
->ibqp
.qp_num
? 15 : 0;
1540 if (sqp
->ud_header
.lrh
.destination_lid
== IB_LID_PERMISSIVE
)
1541 sqp
->ud_header
.lrh
.source_lid
= IB_LID_PERMISSIVE
;
1542 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->wr
.send_flags
& IB_SEND_SOLICITED
);
1543 if (!qp
->ibqp
.qp_num
)
1544 ib_get_cached_pkey(&dev
->ib_dev
, qp
->port
, sqp
->pkey_index
,
1547 ib_get_cached_pkey(&dev
->ib_dev
, qp
->port
, wr
->pkey_index
,
1549 sqp
->ud_header
.bth
.pkey
= cpu_to_be16(pkey
);
1550 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->remote_qpn
);
1551 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
1552 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->remote_qkey
& 0x80000000 ?
1553 sqp
->qkey
: wr
->remote_qkey
);
1554 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(qp
->ibqp
.qp_num
);
1556 header_size
= ib_ud_header_pack(&sqp
->ud_header
,
1558 ind
* MTHCA_UD_HEADER_SIZE
);
1560 data
->byte_count
= cpu_to_be32(header_size
);
1561 data
->lkey
= cpu_to_be32(to_mpd(qp
->ibqp
.pd
)->ntmr
.ibmr
.lkey
);
1562 data
->addr
= cpu_to_be64(sqp
->header_dma
+
1563 ind
* MTHCA_UD_HEADER_SIZE
);
1568 static inline int mthca_wq_overflow(struct mthca_wq
*wq
, int nreq
,
1569 struct ib_cq
*ib_cq
)
1572 struct mthca_cq
*cq
;
1574 cur
= wq
->head
- wq
->tail
;
1575 if (likely(cur
+ nreq
< wq
->max
))
1579 spin_lock(&cq
->lock
);
1580 cur
= wq
->head
- wq
->tail
;
1581 spin_unlock(&cq
->lock
);
1583 return cur
+ nreq
>= wq
->max
;
1586 static __always_inline
void set_raddr_seg(struct mthca_raddr_seg
*rseg
,
1587 u64 remote_addr
, u32 rkey
)
1589 rseg
->raddr
= cpu_to_be64(remote_addr
);
1590 rseg
->rkey
= cpu_to_be32(rkey
);
1594 static __always_inline
void set_atomic_seg(struct mthca_atomic_seg
*aseg
,
1595 const struct ib_atomic_wr
*wr
)
1597 if (wr
->wr
.opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
1598 aseg
->swap_add
= cpu_to_be64(wr
->swap
);
1599 aseg
->compare
= cpu_to_be64(wr
->compare_add
);
1601 aseg
->swap_add
= cpu_to_be64(wr
->compare_add
);
1607 static void set_tavor_ud_seg(struct mthca_tavor_ud_seg
*useg
,
1608 const struct ib_ud_wr
*wr
)
1610 useg
->lkey
= cpu_to_be32(to_mah(wr
->ah
)->key
);
1611 useg
->av_addr
= cpu_to_be64(to_mah(wr
->ah
)->avdma
);
1612 useg
->dqpn
= cpu_to_be32(wr
->remote_qpn
);
1613 useg
->qkey
= cpu_to_be32(wr
->remote_qkey
);
1617 static void set_arbel_ud_seg(struct mthca_arbel_ud_seg
*useg
,
1618 const struct ib_ud_wr
*wr
)
1620 memcpy(useg
->av
, to_mah(wr
->ah
)->av
, MTHCA_AV_SIZE
);
1621 useg
->dqpn
= cpu_to_be32(wr
->remote_qpn
);
1622 useg
->qkey
= cpu_to_be32(wr
->remote_qkey
);
1625 int mthca_tavor_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
1626 const struct ib_send_wr
**bad_wr
)
1628 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1629 struct mthca_qp
*qp
= to_mqp(ibqp
);
1632 unsigned long flags
;
1638 * f0 and size0 are only used if nreq != 0, and they will
1639 * always be initialized the first time through the main loop
1640 * before nreq is incremented. So nreq cannot become non-zero
1641 * without initializing f0 and size0, and they are in fact
1642 * never used uninitialized.
1649 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
1651 /* XXX check that state is OK to post send */
1653 ind
= qp
->sq
.next_ind
;
1655 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1656 if (mthca_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
1657 mthca_err(dev
, "SQ %06x full (%u head, %u tail,"
1658 " %d max, %d nreq)\n", qp
->qpn
,
1659 qp
->sq
.head
, qp
->sq
.tail
,
1666 wqe
= get_send_wqe(qp
, ind
);
1667 prev_wqe
= qp
->sq
.last
;
1670 ((struct mthca_next_seg
*) wqe
)->nda_op
= 0;
1671 ((struct mthca_next_seg
*) wqe
)->ee_nds
= 0;
1672 ((struct mthca_next_seg
*) wqe
)->flags
=
1673 ((wr
->send_flags
& IB_SEND_SIGNALED
) ?
1674 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE
) : 0) |
1675 ((wr
->send_flags
& IB_SEND_SOLICITED
) ?
1676 cpu_to_be32(MTHCA_NEXT_SOLICIT
) : 0) |
1678 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
||
1679 wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
1680 ((struct mthca_next_seg
*) wqe
)->imm
= wr
->ex
.imm_data
;
1682 wqe
+= sizeof (struct mthca_next_seg
);
1683 size
= sizeof (struct mthca_next_seg
) / 16;
1685 switch (qp
->transport
) {
1687 switch (wr
->opcode
) {
1688 case IB_WR_ATOMIC_CMP_AND_SWP
:
1689 case IB_WR_ATOMIC_FETCH_AND_ADD
:
1690 set_raddr_seg(wqe
, atomic_wr(wr
)->remote_addr
,
1691 atomic_wr(wr
)->rkey
);
1692 wqe
+= sizeof (struct mthca_raddr_seg
);
1694 set_atomic_seg(wqe
, atomic_wr(wr
));
1695 wqe
+= sizeof (struct mthca_atomic_seg
);
1696 size
+= (sizeof (struct mthca_raddr_seg
) +
1697 sizeof (struct mthca_atomic_seg
)) / 16;
1700 case IB_WR_RDMA_WRITE
:
1701 case IB_WR_RDMA_WRITE_WITH_IMM
:
1702 case IB_WR_RDMA_READ
:
1703 set_raddr_seg(wqe
, rdma_wr(wr
)->remote_addr
,
1705 wqe
+= sizeof (struct mthca_raddr_seg
);
1706 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1710 /* No extra segments required for sends */
1717 switch (wr
->opcode
) {
1718 case IB_WR_RDMA_WRITE
:
1719 case IB_WR_RDMA_WRITE_WITH_IMM
:
1720 set_raddr_seg(wqe
, rdma_wr(wr
)->remote_addr
,
1722 wqe
+= sizeof (struct mthca_raddr_seg
);
1723 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1727 /* No extra segments required for sends */
1734 set_tavor_ud_seg(wqe
, ud_wr(wr
));
1735 wqe
+= sizeof (struct mthca_tavor_ud_seg
);
1736 size
+= sizeof (struct mthca_tavor_ud_seg
) / 16;
1740 err
= build_mlx_header(
1741 dev
, qp
, ind
, ud_wr(wr
),
1742 wqe
- sizeof(struct mthca_next_seg
), wqe
);
1747 wqe
+= sizeof (struct mthca_data_seg
);
1748 size
+= sizeof (struct mthca_data_seg
) / 16;
1752 if (wr
->num_sge
> qp
->sq
.max_gs
) {
1753 mthca_err(dev
, "too many gathers\n");
1759 for (i
= 0; i
< wr
->num_sge
; ++i
) {
1760 mthca_set_data_seg(wqe
, wr
->sg_list
+ i
);
1761 wqe
+= sizeof (struct mthca_data_seg
);
1762 size
+= sizeof (struct mthca_data_seg
) / 16;
1765 /* Add one more inline data segment for ICRC */
1766 if (qp
->transport
== MLX
) {
1767 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1768 cpu_to_be32((1 << 31) | 4);
1769 ((u32
*) wqe
)[1] = 0;
1770 wqe
+= sizeof (struct mthca_data_seg
);
1771 size
+= sizeof (struct mthca_data_seg
) / 16;
1774 qp
->wrid
[ind
+ qp
->rq
.max
] = wr
->wr_id
;
1776 if (wr
->opcode
>= ARRAY_SIZE(mthca_opcode
)) {
1777 mthca_err(dev
, "opcode invalid\n");
1783 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
1784 cpu_to_be32(((ind
<< qp
->sq
.wqe_shift
) +
1785 qp
->send_wqe_offset
) |
1786 mthca_opcode
[wr
->opcode
]);
1788 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
1789 cpu_to_be32((nreq
? 0 : MTHCA_NEXT_DBD
) | size
|
1790 ((wr
->send_flags
& IB_SEND_FENCE
) ?
1791 MTHCA_NEXT_FENCE
: 0));
1795 op0
= mthca_opcode
[wr
->opcode
];
1796 f0
= wr
->send_flags
& IB_SEND_FENCE
?
1797 MTHCA_SEND_DOORBELL_FENCE
: 0;
1801 if (unlikely(ind
>= qp
->sq
.max
))
1809 mthca_write64(((qp
->sq
.next_ind
<< qp
->sq
.wqe_shift
) +
1810 qp
->send_wqe_offset
) | f0
| op0
,
1811 (qp
->qpn
<< 8) | size0
,
1812 dev
->kar
+ MTHCA_SEND_DOORBELL
,
1813 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1816 qp
->sq
.next_ind
= ind
;
1817 qp
->sq
.head
+= nreq
;
1819 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
1823 int mthca_tavor_post_receive(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
1824 const struct ib_recv_wr
**bad_wr
)
1826 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1827 struct mthca_qp
*qp
= to_mqp(ibqp
);
1828 unsigned long flags
;
1834 * size0 is only used if nreq != 0, and it will always be
1835 * initialized the first time through the main loop before
1836 * nreq is incremented. So nreq cannot become non-zero
1837 * without initializing size0, and it is in fact never used
1845 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
1847 /* XXX check that state is OK to post receive */
1849 ind
= qp
->rq
.next_ind
;
1851 for (nreq
= 0; wr
; wr
= wr
->next
) {
1852 if (mthca_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
1853 mthca_err(dev
, "RQ %06x full (%u head, %u tail,"
1854 " %d max, %d nreq)\n", qp
->qpn
,
1855 qp
->rq
.head
, qp
->rq
.tail
,
1862 wqe
= get_recv_wqe(qp
, ind
);
1863 prev_wqe
= qp
->rq
.last
;
1866 ((struct mthca_next_seg
*) wqe
)->ee_nds
=
1867 cpu_to_be32(MTHCA_NEXT_DBD
);
1868 ((struct mthca_next_seg
*) wqe
)->flags
= 0;
1870 wqe
+= sizeof (struct mthca_next_seg
);
1871 size
= sizeof (struct mthca_next_seg
) / 16;
1873 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
1879 for (i
= 0; i
< wr
->num_sge
; ++i
) {
1880 mthca_set_data_seg(wqe
, wr
->sg_list
+ i
);
1881 wqe
+= sizeof (struct mthca_data_seg
);
1882 size
+= sizeof (struct mthca_data_seg
) / 16;
1885 qp
->wrid
[ind
] = wr
->wr_id
;
1887 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
1888 cpu_to_be32(MTHCA_NEXT_DBD
| size
);
1894 if (unlikely(ind
>= qp
->rq
.max
))
1898 if (unlikely(nreq
== MTHCA_TAVOR_MAX_WQES_PER_RECV_DB
)) {
1903 mthca_write64((qp
->rq
.next_ind
<< qp
->rq
.wqe_shift
) | size0
,
1904 qp
->qpn
<< 8, dev
->kar
+ MTHCA_RECEIVE_DOORBELL
,
1905 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1907 qp
->rq
.next_ind
= ind
;
1908 qp
->rq
.head
+= MTHCA_TAVOR_MAX_WQES_PER_RECV_DB
;
1916 mthca_write64((qp
->rq
.next_ind
<< qp
->rq
.wqe_shift
) | size0
,
1917 qp
->qpn
<< 8 | nreq
, dev
->kar
+ MTHCA_RECEIVE_DOORBELL
,
1918 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1921 qp
->rq
.next_ind
= ind
;
1922 qp
->rq
.head
+= nreq
;
1924 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
1928 int mthca_arbel_post_send(struct ib_qp
*ibqp
, const struct ib_send_wr
*wr
,
1929 const struct ib_send_wr
**bad_wr
)
1931 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1932 struct mthca_qp
*qp
= to_mqp(ibqp
);
1936 unsigned long flags
;
1942 * f0 and size0 are only used if nreq != 0, and they will
1943 * always be initialized the first time through the main loop
1944 * before nreq is incremented. So nreq cannot become non-zero
1945 * without initializing f0 and size0, and they are in fact
1946 * never used uninitialized.
1953 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
1955 /* XXX check that state is OK to post send */
1957 ind
= qp
->sq
.head
& (qp
->sq
.max
- 1);
1959 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1960 if (unlikely(nreq
== MTHCA_ARBEL_MAX_WQES_PER_SEND_DB
)) {
1963 dbhi
= (MTHCA_ARBEL_MAX_WQES_PER_SEND_DB
<< 24) |
1964 ((qp
->sq
.head
& 0xffff) << 8) | f0
| op0
;
1966 qp
->sq
.head
+= MTHCA_ARBEL_MAX_WQES_PER_SEND_DB
;
1969 * Make sure that descriptors are written before
1973 *qp
->sq
.db
= cpu_to_be32(qp
->sq
.head
& 0xffff);
1976 * Make sure doorbell record is written before we
1977 * write MMIO send doorbell.
1981 mthca_write64(dbhi
, (qp
->qpn
<< 8) | size0
,
1982 dev
->kar
+ MTHCA_SEND_DOORBELL
,
1983 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1986 if (mthca_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
1987 mthca_err(dev
, "SQ %06x full (%u head, %u tail,"
1988 " %d max, %d nreq)\n", qp
->qpn
,
1989 qp
->sq
.head
, qp
->sq
.tail
,
1996 wqe
= get_send_wqe(qp
, ind
);
1997 prev_wqe
= qp
->sq
.last
;
2000 ((struct mthca_next_seg
*) wqe
)->flags
=
2001 ((wr
->send_flags
& IB_SEND_SIGNALED
) ?
2002 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE
) : 0) |
2003 ((wr
->send_flags
& IB_SEND_SOLICITED
) ?
2004 cpu_to_be32(MTHCA_NEXT_SOLICIT
) : 0) |
2005 ((wr
->send_flags
& IB_SEND_IP_CSUM
) ?
2006 cpu_to_be32(MTHCA_NEXT_IP_CSUM
| MTHCA_NEXT_TCP_UDP_CSUM
) : 0) |
2008 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
||
2009 wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
2010 ((struct mthca_next_seg
*) wqe
)->imm
= wr
->ex
.imm_data
;
2012 wqe
+= sizeof (struct mthca_next_seg
);
2013 size
= sizeof (struct mthca_next_seg
) / 16;
2015 switch (qp
->transport
) {
2017 switch (wr
->opcode
) {
2018 case IB_WR_ATOMIC_CMP_AND_SWP
:
2019 case IB_WR_ATOMIC_FETCH_AND_ADD
:
2020 set_raddr_seg(wqe
, atomic_wr(wr
)->remote_addr
,
2021 atomic_wr(wr
)->rkey
);
2022 wqe
+= sizeof (struct mthca_raddr_seg
);
2024 set_atomic_seg(wqe
, atomic_wr(wr
));
2025 wqe
+= sizeof (struct mthca_atomic_seg
);
2026 size
+= (sizeof (struct mthca_raddr_seg
) +
2027 sizeof (struct mthca_atomic_seg
)) / 16;
2030 case IB_WR_RDMA_READ
:
2031 case IB_WR_RDMA_WRITE
:
2032 case IB_WR_RDMA_WRITE_WITH_IMM
:
2033 set_raddr_seg(wqe
, rdma_wr(wr
)->remote_addr
,
2035 wqe
+= sizeof (struct mthca_raddr_seg
);
2036 size
+= sizeof (struct mthca_raddr_seg
) / 16;
2040 /* No extra segments required for sends */
2047 switch (wr
->opcode
) {
2048 case IB_WR_RDMA_WRITE
:
2049 case IB_WR_RDMA_WRITE_WITH_IMM
:
2050 set_raddr_seg(wqe
, rdma_wr(wr
)->remote_addr
,
2052 wqe
+= sizeof (struct mthca_raddr_seg
);
2053 size
+= sizeof (struct mthca_raddr_seg
) / 16;
2057 /* No extra segments required for sends */
2064 set_arbel_ud_seg(wqe
, ud_wr(wr
));
2065 wqe
+= sizeof (struct mthca_arbel_ud_seg
);
2066 size
+= sizeof (struct mthca_arbel_ud_seg
) / 16;
2070 err
= build_mlx_header(
2071 dev
, qp
, ind
, ud_wr(wr
),
2072 wqe
- sizeof(struct mthca_next_seg
), wqe
);
2077 wqe
+= sizeof (struct mthca_data_seg
);
2078 size
+= sizeof (struct mthca_data_seg
) / 16;
2082 if (wr
->num_sge
> qp
->sq
.max_gs
) {
2083 mthca_err(dev
, "too many gathers\n");
2089 for (i
= 0; i
< wr
->num_sge
; ++i
) {
2090 mthca_set_data_seg(wqe
, wr
->sg_list
+ i
);
2091 wqe
+= sizeof (struct mthca_data_seg
);
2092 size
+= sizeof (struct mthca_data_seg
) / 16;
2095 /* Add one more inline data segment for ICRC */
2096 if (qp
->transport
== MLX
) {
2097 ((struct mthca_data_seg
*) wqe
)->byte_count
=
2098 cpu_to_be32((1 << 31) | 4);
2099 ((u32
*) wqe
)[1] = 0;
2100 wqe
+= sizeof (struct mthca_data_seg
);
2101 size
+= sizeof (struct mthca_data_seg
) / 16;
2104 qp
->wrid
[ind
+ qp
->rq
.max
] = wr
->wr_id
;
2106 if (wr
->opcode
>= ARRAY_SIZE(mthca_opcode
)) {
2107 mthca_err(dev
, "opcode invalid\n");
2113 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
2114 cpu_to_be32(((ind
<< qp
->sq
.wqe_shift
) +
2115 qp
->send_wqe_offset
) |
2116 mthca_opcode
[wr
->opcode
]);
2118 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
2119 cpu_to_be32(MTHCA_NEXT_DBD
| size
|
2120 ((wr
->send_flags
& IB_SEND_FENCE
) ?
2121 MTHCA_NEXT_FENCE
: 0));
2125 op0
= mthca_opcode
[wr
->opcode
];
2126 f0
= wr
->send_flags
& IB_SEND_FENCE
?
2127 MTHCA_SEND_DOORBELL_FENCE
: 0;
2131 if (unlikely(ind
>= qp
->sq
.max
))
2137 dbhi
= (nreq
<< 24) | ((qp
->sq
.head
& 0xffff) << 8) | f0
| op0
;
2139 qp
->sq
.head
+= nreq
;
2142 * Make sure that descriptors are written before
2146 *qp
->sq
.db
= cpu_to_be32(qp
->sq
.head
& 0xffff);
2149 * Make sure doorbell record is written before we
2150 * write MMIO send doorbell.
2154 mthca_write64(dbhi
, (qp
->qpn
<< 8) | size0
, dev
->kar
+ MTHCA_SEND_DOORBELL
,
2155 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
2158 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
2162 int mthca_arbel_post_receive(struct ib_qp
*ibqp
, const struct ib_recv_wr
*wr
,
2163 const struct ib_recv_wr
**bad_wr
)
2165 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
2166 struct mthca_qp
*qp
= to_mqp(ibqp
);
2167 unsigned long flags
;
2174 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
2176 /* XXX check that state is OK to post receive */
2178 ind
= qp
->rq
.head
& (qp
->rq
.max
- 1);
2180 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
2181 if (mthca_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
2182 mthca_err(dev
, "RQ %06x full (%u head, %u tail,"
2183 " %d max, %d nreq)\n", qp
->qpn
,
2184 qp
->rq
.head
, qp
->rq
.tail
,
2191 wqe
= get_recv_wqe(qp
, ind
);
2193 ((struct mthca_next_seg
*) wqe
)->flags
= 0;
2195 wqe
+= sizeof (struct mthca_next_seg
);
2197 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
2203 for (i
= 0; i
< wr
->num_sge
; ++i
) {
2204 mthca_set_data_seg(wqe
, wr
->sg_list
+ i
);
2205 wqe
+= sizeof (struct mthca_data_seg
);
2208 if (i
< qp
->rq
.max_gs
)
2209 mthca_set_data_seg_inval(wqe
);
2211 qp
->wrid
[ind
] = wr
->wr_id
;
2214 if (unlikely(ind
>= qp
->rq
.max
))
2219 qp
->rq
.head
+= nreq
;
2222 * Make sure that descriptors are written before
2226 *qp
->rq
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2229 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2233 void mthca_free_err_wqe(struct mthca_dev
*dev
, struct mthca_qp
*qp
, int is_send
,
2234 int index
, int *dbd
, __be32
*new_wqe
)
2236 struct mthca_next_seg
*next
;
2239 * For SRQs, all receive WQEs generate a CQE, so we're always
2240 * at the end of the doorbell chain.
2242 if (qp
->ibqp
.srq
&& !is_send
) {
2248 next
= get_send_wqe(qp
, index
);
2250 next
= get_recv_wqe(qp
, index
);
2252 *dbd
= !!(next
->ee_nds
& cpu_to_be32(MTHCA_NEXT_DBD
));
2253 if (next
->ee_nds
& cpu_to_be32(0x3f))
2254 *new_wqe
= (next
->nda_op
& cpu_to_be32(~0x3f)) |
2255 (next
->ee_nds
& cpu_to_be32(0x3f));
2260 int mthca_init_qp_table(struct mthca_dev
*dev
)
2265 spin_lock_init(&dev
->qp_table
.lock
);
2268 * We reserve 2 extra QPs per port for the special QPs. The
2269 * special QP for port 1 has to be even, so round up.
2271 dev
->qp_table
.sqp_start
= (dev
->limits
.reserved_qps
+ 1) & ~1UL;
2272 err
= mthca_alloc_init(&dev
->qp_table
.alloc
,
2273 dev
->limits
.num_qps
,
2275 dev
->qp_table
.sqp_start
+
2276 MTHCA_MAX_PORTS
* 2);
2280 err
= mthca_array_init(&dev
->qp_table
.qp
,
2281 dev
->limits
.num_qps
);
2283 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);
2287 for (i
= 0; i
< 2; ++i
) {
2288 err
= mthca_CONF_SPECIAL_QP(dev
, i
? IB_QPT_GSI
: IB_QPT_SMI
,
2289 dev
->qp_table
.sqp_start
+ i
* 2);
2291 mthca_warn(dev
, "CONF_SPECIAL_QP returned "
2292 "%d, aborting.\n", err
);
2299 for (i
= 0; i
< 2; ++i
)
2300 mthca_CONF_SPECIAL_QP(dev
, i
, 0);
2302 mthca_array_cleanup(&dev
->qp_table
.qp
, dev
->limits
.num_qps
);
2303 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);
2308 void mthca_cleanup_qp_table(struct mthca_dev
*dev
)
2312 for (i
= 0; i
< 2; ++i
)
2313 mthca_CONF_SPECIAL_QP(dev
, i
, 0);
2315 mthca_array_cleanup(&dev
->qp_table
.qp
, dev
->limits
.num_qps
);
2316 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);