2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
35 #include <linux/init.h>
41 #include "mthca_dev.h"
42 #include "mthca_cmd.h"
43 #include "mthca_memfree.h"
46 MTHCA_MAX_DIRECT_QP_SIZE
= 4 * PAGE_SIZE
,
47 MTHCA_ACK_REQ_FREQ
= 10,
48 MTHCA_FLIGHT_LIMIT
= 9,
49 MTHCA_UD_HEADER_SIZE
= 72 /* largest UD header possible */
53 MTHCA_QP_STATE_RST
= 0,
54 MTHCA_QP_STATE_INIT
= 1,
55 MTHCA_QP_STATE_RTR
= 2,
56 MTHCA_QP_STATE_RTS
= 3,
57 MTHCA_QP_STATE_SQE
= 4,
58 MTHCA_QP_STATE_SQD
= 5,
59 MTHCA_QP_STATE_ERR
= 6,
60 MTHCA_QP_STATE_DRAINING
= 7
72 MTHCA_QP_PM_MIGRATED
= 0x3,
73 MTHCA_QP_PM_ARMED
= 0x0,
74 MTHCA_QP_PM_REARM
= 0x1
78 /* qp_context flags */
79 MTHCA_QP_BIT_DE
= 1 << 8,
81 MTHCA_QP_BIT_SRE
= 1 << 15,
82 MTHCA_QP_BIT_SWE
= 1 << 14,
83 MTHCA_QP_BIT_SAE
= 1 << 13,
84 MTHCA_QP_BIT_SIC
= 1 << 4,
85 MTHCA_QP_BIT_SSC
= 1 << 3,
87 MTHCA_QP_BIT_RRE
= 1 << 15,
88 MTHCA_QP_BIT_RWE
= 1 << 14,
89 MTHCA_QP_BIT_RAE
= 1 << 13,
90 MTHCA_QP_BIT_RIC
= 1 << 4,
91 MTHCA_QP_BIT_RSC
= 1 << 3
94 struct mthca_qp_path
{
103 u32 sl_tclass_flowlabel
;
105 } __attribute__((packed
));
107 struct mthca_qp_context
{
109 u32 tavor_sched_queue
; /* Reserved on Arbel */
111 u8 rq_size_stride
; /* Reserved on Tavor */
112 u8 sq_size_stride
; /* Reserved on Tavor */
113 u8 rlkey_arbel_sched_queue
; /* Reserved on Tavor */
118 struct mthca_qp_path pri_path
;
119 struct mthca_qp_path alt_path
;
128 u32 snd_wqe_base_l
; /* Next send WQE on Tavor */
129 u32 snd_db_index
; /* (debugging only entries) */
136 u32 rcv_wqe_base_l
; /* Next recv WQE on Tavor */
137 u32 rcv_db_index
; /* (debugging only entries) */
141 u16 rq_wqe_counter
; /* reserved on Tavor */
142 u16 sq_wqe_counter
; /* reserved on Tavor */
144 } __attribute__((packed
));
146 struct mthca_qp_param
{
149 struct mthca_qp_context context
;
151 } __attribute__((packed
));
154 MTHCA_QP_OPTPAR_ALT_ADDR_PATH
= 1 << 0,
155 MTHCA_QP_OPTPAR_RRE
= 1 << 1,
156 MTHCA_QP_OPTPAR_RAE
= 1 << 2,
157 MTHCA_QP_OPTPAR_RWE
= 1 << 3,
158 MTHCA_QP_OPTPAR_PKEY_INDEX
= 1 << 4,
159 MTHCA_QP_OPTPAR_Q_KEY
= 1 << 5,
160 MTHCA_QP_OPTPAR_RNR_TIMEOUT
= 1 << 6,
161 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH
= 1 << 7,
162 MTHCA_QP_OPTPAR_SRA_MAX
= 1 << 8,
163 MTHCA_QP_OPTPAR_RRA_MAX
= 1 << 9,
164 MTHCA_QP_OPTPAR_PM_STATE
= 1 << 10,
165 MTHCA_QP_OPTPAR_PORT_NUM
= 1 << 11,
166 MTHCA_QP_OPTPAR_RETRY_COUNT
= 1 << 12,
167 MTHCA_QP_OPTPAR_ALT_RNR_RETRY
= 1 << 13,
168 MTHCA_QP_OPTPAR_ACK_TIMEOUT
= 1 << 14,
169 MTHCA_QP_OPTPAR_RNR_RETRY
= 1 << 15,
170 MTHCA_QP_OPTPAR_SCHED_QUEUE
= 1 << 16
174 MTHCA_NEXT_DBD
= 1 << 7,
175 MTHCA_NEXT_FENCE
= 1 << 6,
176 MTHCA_NEXT_CQ_UPDATE
= 1 << 3,
177 MTHCA_NEXT_EVENT_GEN
= 1 << 2,
178 MTHCA_NEXT_SOLICIT
= 1 << 1,
180 MTHCA_MLX_VL15
= 1 << 17,
181 MTHCA_MLX_SLR
= 1 << 16
185 MTHCA_INVAL_LKEY
= 0x100
188 struct mthca_next_seg
{
189 u32 nda_op
; /* [31:6] next WQE [4:0] next opcode */
190 u32 ee_nds
; /* [31:8] next EE [7] DBD [6] F [5:0] next WQE size */
191 u32 flags
; /* [3] CQ [2] Event [1] Solicit */
192 u32 imm
; /* immediate data */
195 struct mthca_tavor_ud_seg
{
205 struct mthca_arbel_ud_seg
{
212 struct mthca_bind_seg
{
213 u32 flags
; /* [31] Atomic [30] rem write [29] rem read */
221 struct mthca_raddr_seg
{
227 struct mthca_atomic_seg
{
232 struct mthca_data_seg
{
238 struct mthca_mlx_seg
{
241 u32 flags
; /* [17] VL15 [16] SLR [14:12] static rate
242 [11:8] SL [3] C [2] E */
247 static const u8 mthca_opcode
[] = {
248 [IB_WR_SEND
] = MTHCA_OPCODE_SEND
,
249 [IB_WR_SEND_WITH_IMM
] = MTHCA_OPCODE_SEND_IMM
,
250 [IB_WR_RDMA_WRITE
] = MTHCA_OPCODE_RDMA_WRITE
,
251 [IB_WR_RDMA_WRITE_WITH_IMM
] = MTHCA_OPCODE_RDMA_WRITE_IMM
,
252 [IB_WR_RDMA_READ
] = MTHCA_OPCODE_RDMA_READ
,
253 [IB_WR_ATOMIC_CMP_AND_SWP
] = MTHCA_OPCODE_ATOMIC_CS
,
254 [IB_WR_ATOMIC_FETCH_AND_ADD
] = MTHCA_OPCODE_ATOMIC_FA
,
257 static int is_sqp(struct mthca_dev
*dev
, struct mthca_qp
*qp
)
259 return qp
->qpn
>= dev
->qp_table
.sqp_start
&&
260 qp
->qpn
<= dev
->qp_table
.sqp_start
+ 3;
263 static int is_qp0(struct mthca_dev
*dev
, struct mthca_qp
*qp
)
265 return qp
->qpn
>= dev
->qp_table
.sqp_start
&&
266 qp
->qpn
<= dev
->qp_table
.sqp_start
+ 1;
269 static void *get_recv_wqe(struct mthca_qp
*qp
, int n
)
272 return qp
->queue
.direct
.buf
+ (n
<< qp
->rq
.wqe_shift
);
274 return qp
->queue
.page_list
[(n
<< qp
->rq
.wqe_shift
) >> PAGE_SHIFT
].buf
+
275 ((n
<< qp
->rq
.wqe_shift
) & (PAGE_SIZE
- 1));
278 static void *get_send_wqe(struct mthca_qp
*qp
, int n
)
281 return qp
->queue
.direct
.buf
+ qp
->send_wqe_offset
+
282 (n
<< qp
->sq
.wqe_shift
);
284 return qp
->queue
.page_list
[(qp
->send_wqe_offset
+
285 (n
<< qp
->sq
.wqe_shift
)) >>
287 ((qp
->send_wqe_offset
+ (n
<< qp
->sq
.wqe_shift
)) &
291 void mthca_qp_event(struct mthca_dev
*dev
, u32 qpn
,
292 enum ib_event_type event_type
)
295 struct ib_event event
;
297 spin_lock(&dev
->qp_table
.lock
);
298 qp
= mthca_array_get(&dev
->qp_table
.qp
, qpn
& (dev
->limits
.num_qps
- 1));
300 atomic_inc(&qp
->refcount
);
301 spin_unlock(&dev
->qp_table
.lock
);
304 mthca_warn(dev
, "Async event for bogus QP %08x\n", qpn
);
308 event
.device
= &dev
->ib_dev
;
309 event
.event
= event_type
;
310 event
.element
.qp
= &qp
->ibqp
;
311 if (qp
->ibqp
.event_handler
)
312 qp
->ibqp
.event_handler(&event
, qp
->ibqp
.qp_context
);
314 if (atomic_dec_and_test(&qp
->refcount
))
318 static int to_mthca_state(enum ib_qp_state ib_state
)
321 case IB_QPS_RESET
: return MTHCA_QP_STATE_RST
;
322 case IB_QPS_INIT
: return MTHCA_QP_STATE_INIT
;
323 case IB_QPS_RTR
: return MTHCA_QP_STATE_RTR
;
324 case IB_QPS_RTS
: return MTHCA_QP_STATE_RTS
;
325 case IB_QPS_SQD
: return MTHCA_QP_STATE_SQD
;
326 case IB_QPS_SQE
: return MTHCA_QP_STATE_SQE
;
327 case IB_QPS_ERR
: return MTHCA_QP_STATE_ERR
;
332 enum { RC
, UC
, UD
, RD
, RDEE
, MLX
, NUM_TRANS
};
334 static int to_mthca_st(int transport
)
337 case RC
: return MTHCA_QP_ST_RC
;
338 case UC
: return MTHCA_QP_ST_UC
;
339 case UD
: return MTHCA_QP_ST_UD
;
340 case RD
: return MTHCA_QP_ST_RD
;
341 case MLX
: return MTHCA_QP_ST_MLX
;
346 static const struct {
348 u32 req_param
[NUM_TRANS
];
349 u32 opt_param
[NUM_TRANS
];
350 } state_table
[IB_QPS_ERR
+ 1][IB_QPS_ERR
+ 1] = {
352 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
353 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
355 .trans
= MTHCA_TRANS_RST2INIT
,
357 [UD
] = (IB_QP_PKEY_INDEX
|
360 [RC
] = (IB_QP_PKEY_INDEX
|
363 [MLX
] = (IB_QP_PKEY_INDEX
|
366 /* bug-for-bug compatibility with VAPI: */
373 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
374 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
376 .trans
= MTHCA_TRANS_INIT2INIT
,
378 [UD
] = (IB_QP_PKEY_INDEX
|
381 [RC
] = (IB_QP_PKEY_INDEX
|
384 [MLX
] = (IB_QP_PKEY_INDEX
|
389 .trans
= MTHCA_TRANS_INIT2RTR
,
395 IB_QP_MAX_DEST_RD_ATOMIC
|
396 IB_QP_MIN_RNR_TIMER
),
399 [UD
] = (IB_QP_PKEY_INDEX
|
401 [RC
] = (IB_QP_ALT_PATH
|
404 [MLX
] = (IB_QP_PKEY_INDEX
|
410 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
411 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
413 .trans
= MTHCA_TRANS_RTR2RTS
,
416 [RC
] = (IB_QP_TIMEOUT
|
420 IB_QP_MAX_QP_RD_ATOMIC
),
421 [MLX
] = IB_QP_SQ_PSN
,
424 [UD
] = (IB_QP_CUR_STATE
|
426 [RC
] = (IB_QP_CUR_STATE
|
430 IB_QP_MIN_RNR_TIMER
|
431 IB_QP_PATH_MIG_STATE
),
432 [MLX
] = (IB_QP_CUR_STATE
|
438 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
439 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
441 .trans
= MTHCA_TRANS_RTS2RTS
,
443 [UD
] = (IB_QP_CUR_STATE
|
445 [RC
] = (IB_QP_ACCESS_FLAGS
|
447 IB_QP_PATH_MIG_STATE
|
448 IB_QP_MIN_RNR_TIMER
),
449 [MLX
] = (IB_QP_CUR_STATE
|
454 .trans
= MTHCA_TRANS_RTS2SQD
,
458 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
459 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
461 .trans
= MTHCA_TRANS_SQD2RTS
,
463 [UD
] = (IB_QP_CUR_STATE
|
465 [RC
] = (IB_QP_CUR_STATE
|
468 IB_QP_MIN_RNR_TIMER
|
469 IB_QP_PATH_MIG_STATE
),
470 [MLX
] = (IB_QP_CUR_STATE
|
475 .trans
= MTHCA_TRANS_SQD2SQD
,
477 [UD
] = (IB_QP_PKEY_INDEX
|
483 IB_QP_MAX_QP_RD_ATOMIC
|
484 IB_QP_MAX_DEST_RD_ATOMIC
|
489 IB_QP_MIN_RNR_TIMER
|
490 IB_QP_PATH_MIG_STATE
),
491 [MLX
] = (IB_QP_PKEY_INDEX
|
497 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
498 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
},
500 .trans
= MTHCA_TRANS_SQERR2RTS
,
502 [UD
] = (IB_QP_CUR_STATE
|
504 [RC
] = (IB_QP_CUR_STATE
|
505 IB_QP_MIN_RNR_TIMER
),
506 [MLX
] = (IB_QP_CUR_STATE
|
512 [IB_QPS_RESET
] = { .trans
= MTHCA_TRANS_ANY2RST
},
513 [IB_QPS_ERR
] = { .trans
= MTHCA_TRANS_ANY2ERR
}
517 static void store_attrs(struct mthca_sqp
*sqp
, struct ib_qp_attr
*attr
,
520 if (attr_mask
& IB_QP_PKEY_INDEX
)
521 sqp
->pkey_index
= attr
->pkey_index
;
522 if (attr_mask
& IB_QP_QKEY
)
523 sqp
->qkey
= attr
->qkey
;
524 if (attr_mask
& IB_QP_SQ_PSN
)
525 sqp
->send_psn
= attr
->sq_psn
;
528 static void init_port(struct mthca_dev
*dev
, int port
)
532 struct mthca_init_ib_param param
;
534 memset(¶m
, 0, sizeof param
);
538 param
.vl_cap
= dev
->limits
.vl_cap
;
539 param
.mtu_cap
= dev
->limits
.mtu_cap
;
540 param
.gid_cap
= dev
->limits
.gid_table_len
;
541 param
.pkey_cap
= dev
->limits
.pkey_table_len
;
543 err
= mthca_INIT_IB(dev
, ¶m
, port
, &status
);
545 mthca_warn(dev
, "INIT_IB failed, return code %d.\n", err
);
547 mthca_warn(dev
, "INIT_IB returned status %02x.\n", status
);
550 int mthca_modify_qp(struct ib_qp
*ibqp
, struct ib_qp_attr
*attr
, int attr_mask
)
552 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
553 struct mthca_qp
*qp
= to_mqp(ibqp
);
554 enum ib_qp_state cur_state
, new_state
;
555 void *mailbox
= NULL
;
556 struct mthca_qp_param
*qp_param
;
557 struct mthca_qp_context
*qp_context
;
558 u32 req_param
, opt_param
;
562 if (attr_mask
& IB_QP_CUR_STATE
) {
563 if (attr
->cur_qp_state
!= IB_QPS_RTR
&&
564 attr
->cur_qp_state
!= IB_QPS_RTS
&&
565 attr
->cur_qp_state
!= IB_QPS_SQD
&&
566 attr
->cur_qp_state
!= IB_QPS_SQE
)
569 cur_state
= attr
->cur_qp_state
;
571 spin_lock_irq(&qp
->sq
.lock
);
572 spin_lock(&qp
->rq
.lock
);
573 cur_state
= qp
->state
;
574 spin_unlock(&qp
->rq
.lock
);
575 spin_unlock_irq(&qp
->sq
.lock
);
578 if (attr_mask
& IB_QP_STATE
) {
579 if (attr
->qp_state
< 0 || attr
->qp_state
> IB_QPS_ERR
)
581 new_state
= attr
->qp_state
;
583 new_state
= cur_state
;
585 if (state_table
[cur_state
][new_state
].trans
== MTHCA_TRANS_INVALID
) {
586 mthca_dbg(dev
, "Illegal QP transition "
587 "%d->%d\n", cur_state
, new_state
);
591 req_param
= state_table
[cur_state
][new_state
].req_param
[qp
->transport
];
592 opt_param
= state_table
[cur_state
][new_state
].opt_param
[qp
->transport
];
594 if ((req_param
& attr_mask
) != req_param
) {
595 mthca_dbg(dev
, "QP transition "
596 "%d->%d missing req attr 0x%08x\n",
597 cur_state
, new_state
,
598 req_param
& ~attr_mask
);
602 if (attr_mask
& ~(req_param
| opt_param
| IB_QP_STATE
)) {
603 mthca_dbg(dev
, "QP transition (transport %d) "
604 "%d->%d has extra attr 0x%08x\n",
606 cur_state
, new_state
,
607 attr_mask
& ~(req_param
| opt_param
|
612 mailbox
= kmalloc(sizeof (*qp_param
) + MTHCA_CMD_MAILBOX_EXTRA
, GFP_KERNEL
);
615 qp_param
= MAILBOX_ALIGN(mailbox
);
616 qp_context
= &qp_param
->context
;
617 memset(qp_param
, 0, sizeof *qp_param
);
619 qp_context
->flags
= cpu_to_be32((to_mthca_state(new_state
) << 28) |
620 (to_mthca_st(qp
->transport
) << 16));
621 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_BIT_DE
);
622 if (!(attr_mask
& IB_QP_PATH_MIG_STATE
))
623 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_MIGRATED
<< 11);
625 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE
);
626 switch (attr
->path_mig_state
) {
627 case IB_MIG_MIGRATED
:
628 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_MIGRATED
<< 11);
631 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_REARM
<< 11);
634 qp_context
->flags
|= cpu_to_be32(MTHCA_QP_PM_ARMED
<< 11);
639 /* leave tavor_sched_queue as 0 */
641 if (qp
->transport
== MLX
|| qp
->transport
== UD
)
642 qp_context
->mtu_msgmax
= (IB_MTU_2048
<< 5) | 11;
643 else if (attr_mask
& IB_QP_PATH_MTU
)
644 qp_context
->mtu_msgmax
= (attr
->path_mtu
<< 5) | 31;
646 if (mthca_is_memfree(dev
)) {
647 qp_context
->rq_size_stride
=
648 ((ffs(qp
->rq
.max
) - 1) << 3) | (qp
->rq
.wqe_shift
- 4);
649 qp_context
->sq_size_stride
=
650 ((ffs(qp
->sq
.max
) - 1) << 3) | (qp
->sq
.wqe_shift
- 4);
653 /* leave arbel_sched_queue as 0 */
655 qp_context
->usr_page
= cpu_to_be32(dev
->driver_uar
.index
);
656 qp_context
->local_qpn
= cpu_to_be32(qp
->qpn
);
657 if (attr_mask
& IB_QP_DEST_QPN
) {
658 qp_context
->remote_qpn
= cpu_to_be32(attr
->dest_qp_num
);
661 if (qp
->transport
== MLX
)
662 qp_context
->pri_path
.port_pkey
|=
663 cpu_to_be32(to_msqp(qp
)->port
<< 24);
665 if (attr_mask
& IB_QP_PORT
) {
666 qp_context
->pri_path
.port_pkey
|=
667 cpu_to_be32(attr
->port_num
<< 24);
668 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM
);
672 if (attr_mask
& IB_QP_PKEY_INDEX
) {
673 qp_context
->pri_path
.port_pkey
|=
674 cpu_to_be32(attr
->pkey_index
);
675 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX
);
678 if (attr_mask
& IB_QP_RNR_RETRY
) {
679 qp_context
->pri_path
.rnr_retry
= attr
->rnr_retry
<< 5;
680 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY
);
683 if (attr_mask
& IB_QP_AV
) {
684 qp_context
->pri_path
.g_mylmc
= attr
->ah_attr
.src_path_bits
& 0x7f;
685 qp_context
->pri_path
.rlid
= cpu_to_be16(attr
->ah_attr
.dlid
);
686 qp_context
->pri_path
.static_rate
= (!!attr
->ah_attr
.static_rate
) << 3;
687 if (attr
->ah_attr
.ah_flags
& IB_AH_GRH
) {
688 qp_context
->pri_path
.g_mylmc
|= 1 << 7;
689 qp_context
->pri_path
.mgid_index
= attr
->ah_attr
.grh
.sgid_index
;
690 qp_context
->pri_path
.hop_limit
= attr
->ah_attr
.grh
.hop_limit
;
691 qp_context
->pri_path
.sl_tclass_flowlabel
=
692 cpu_to_be32((attr
->ah_attr
.sl
<< 28) |
693 (attr
->ah_attr
.grh
.traffic_class
<< 20) |
694 (attr
->ah_attr
.grh
.flow_label
));
695 memcpy(qp_context
->pri_path
.rgid
,
696 attr
->ah_attr
.grh
.dgid
.raw
, 16);
698 qp_context
->pri_path
.sl_tclass_flowlabel
=
699 cpu_to_be32(attr
->ah_attr
.sl
<< 28);
701 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH
);
704 if (attr_mask
& IB_QP_TIMEOUT
) {
705 qp_context
->pri_path
.ackto
= attr
->timeout
;
706 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT
);
712 qp_context
->pd
= cpu_to_be32(to_mpd(ibqp
->pd
)->pd_num
);
713 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
714 qp_context
->wqe_lkey
= cpu_to_be32(qp
->mr
.ibmr
.lkey
);
715 qp_context
->params1
= cpu_to_be32((MTHCA_ACK_REQ_FREQ
<< 28) |
716 (MTHCA_FLIGHT_LIMIT
<< 24) |
720 if (qp
->sq_policy
== IB_SIGNAL_ALL_WR
)
721 qp_context
->params1
|= cpu_to_be32(MTHCA_QP_BIT_SSC
);
722 if (attr_mask
& IB_QP_RETRY_CNT
) {
723 qp_context
->params1
|= cpu_to_be32(attr
->retry_cnt
<< 16);
724 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT
);
727 if (attr_mask
& IB_QP_MAX_DEST_RD_ATOMIC
) {
728 qp_context
->params1
|= cpu_to_be32(min(attr
->max_dest_rd_atomic
?
729 ffs(attr
->max_dest_rd_atomic
) - 1 : 0,
731 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX
);
734 if (attr_mask
& IB_QP_SQ_PSN
)
735 qp_context
->next_send_psn
= cpu_to_be32(attr
->sq_psn
);
736 qp_context
->cqn_snd
= cpu_to_be32(to_mcq(ibqp
->send_cq
)->cqn
);
738 if (mthca_is_memfree(dev
)) {
739 qp_context
->snd_wqe_base_l
= cpu_to_be32(qp
->send_wqe_offset
);
740 qp_context
->snd_db_index
= cpu_to_be32(qp
->sq
.db_index
);
743 if (attr_mask
& IB_QP_ACCESS_FLAGS
) {
745 * Only enable RDMA/atomics if we have responder
746 * resources set to a non-zero value.
748 if (qp
->resp_depth
) {
749 qp_context
->params2
|=
750 cpu_to_be32(attr
->qp_access_flags
& IB_ACCESS_REMOTE_WRITE
?
751 MTHCA_QP_BIT_RWE
: 0);
752 qp_context
->params2
|=
753 cpu_to_be32(attr
->qp_access_flags
& IB_ACCESS_REMOTE_READ
?
754 MTHCA_QP_BIT_RRE
: 0);
755 qp_context
->params2
|=
756 cpu_to_be32(attr
->qp_access_flags
& IB_ACCESS_REMOTE_ATOMIC
?
757 MTHCA_QP_BIT_RAE
: 0);
760 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RWE
|
761 MTHCA_QP_OPTPAR_RRE
|
762 MTHCA_QP_OPTPAR_RAE
);
764 qp
->atomic_rd_en
= attr
->qp_access_flags
;
767 if (attr_mask
& IB_QP_MAX_QP_RD_ATOMIC
) {
770 if (qp
->resp_depth
&& !attr
->max_rd_atomic
) {
772 * Lowering our responder resources to zero.
773 * Turn off RDMA/atomics as responder.
774 * (RWE/RRE/RAE in params2 already zero)
776 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RWE
|
777 MTHCA_QP_OPTPAR_RRE
|
778 MTHCA_QP_OPTPAR_RAE
);
781 if (!qp
->resp_depth
&& attr
->max_rd_atomic
) {
783 * Increasing our responder resources from
784 * zero. Turn on RDMA/atomics as appropriate.
786 qp_context
->params2
|=
787 cpu_to_be32(qp
->atomic_rd_en
& IB_ACCESS_REMOTE_WRITE
?
788 MTHCA_QP_BIT_RWE
: 0);
789 qp_context
->params2
|=
790 cpu_to_be32(qp
->atomic_rd_en
& IB_ACCESS_REMOTE_READ
?
791 MTHCA_QP_BIT_RRE
: 0);
792 qp_context
->params2
|=
793 cpu_to_be32(qp
->atomic_rd_en
& IB_ACCESS_REMOTE_ATOMIC
?
794 MTHCA_QP_BIT_RAE
: 0);
796 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RWE
|
797 MTHCA_QP_OPTPAR_RRE
|
798 MTHCA_QP_OPTPAR_RAE
);
802 1 << rra_max
< attr
->max_rd_atomic
&&
803 rra_max
< dev
->qp_table
.rdb_shift
;
807 qp_context
->params2
|= cpu_to_be32(rra_max
<< 21);
808 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX
);
810 qp
->resp_depth
= attr
->max_rd_atomic
;
813 qp_context
->params2
|= cpu_to_be32(MTHCA_QP_BIT_RSC
);
815 if (attr_mask
& IB_QP_MIN_RNR_TIMER
) {
816 qp_context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->min_rnr_timer
<< 24);
817 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT
);
819 if (attr_mask
& IB_QP_RQ_PSN
)
820 qp_context
->rnr_nextrecvpsn
|= cpu_to_be32(attr
->rq_psn
);
822 qp_context
->ra_buff_indx
=
823 cpu_to_be32(dev
->qp_table
.rdb_base
+
824 ((qp
->qpn
& (dev
->limits
.num_qps
- 1)) * MTHCA_RDB_ENTRY_SIZE
<<
825 dev
->qp_table
.rdb_shift
));
827 qp_context
->cqn_rcv
= cpu_to_be32(to_mcq(ibqp
->recv_cq
)->cqn
);
829 if (mthca_is_memfree(dev
))
830 qp_context
->rcv_db_index
= cpu_to_be32(qp
->rq
.db_index
);
832 if (attr_mask
& IB_QP_QKEY
) {
833 qp_context
->qkey
= cpu_to_be32(attr
->qkey
);
834 qp_param
->opt_param_mask
|= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY
);
837 err
= mthca_MODIFY_QP(dev
, state_table
[cur_state
][new_state
].trans
,
838 qp
->qpn
, 0, qp_param
, 0, &status
);
840 mthca_warn(dev
, "modify QP %d returned status %02x.\n",
841 state_table
[cur_state
][new_state
].trans
, status
);
846 qp
->state
= new_state
;
851 store_attrs(to_msqp(qp
), attr
, attr_mask
);
854 * If we are moving QP0 to RTR, bring the IB link up; if we
855 * are moving QP0 to RESET or ERROR, bring the link back down.
857 if (is_qp0(dev
, qp
)) {
858 if (cur_state
!= IB_QPS_RTR
&&
859 new_state
== IB_QPS_RTR
)
860 init_port(dev
, to_msqp(qp
)->port
);
862 if (cur_state
!= IB_QPS_RESET
&&
863 cur_state
!= IB_QPS_ERR
&&
864 (new_state
== IB_QPS_RESET
||
865 new_state
== IB_QPS_ERR
))
866 mthca_CLOSE_IB(dev
, to_msqp(qp
)->port
, &status
);
873 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
874 * rq.max_gs and sq.max_gs must all be assigned.
875 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
876 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
879 static int mthca_alloc_wqe_buf(struct mthca_dev
*dev
,
887 u64
*dma_list
= NULL
;
890 size
= sizeof (struct mthca_next_seg
) +
891 qp
->rq
.max_gs
* sizeof (struct mthca_data_seg
);
893 for (qp
->rq
.wqe_shift
= 6; 1 << qp
->rq
.wqe_shift
< size
;
897 size
= sizeof (struct mthca_next_seg
) +
898 qp
->sq
.max_gs
* sizeof (struct mthca_data_seg
);
899 switch (qp
->transport
) {
901 size
+= 2 * sizeof (struct mthca_data_seg
);
904 if (mthca_is_memfree(dev
))
905 size
+= sizeof (struct mthca_arbel_ud_seg
);
907 size
+= sizeof (struct mthca_tavor_ud_seg
);
910 /* bind seg is as big as atomic + raddr segs */
911 size
+= sizeof (struct mthca_bind_seg
);
914 for (qp
->sq
.wqe_shift
= 6; 1 << qp
->sq
.wqe_shift
< size
;
918 qp
->send_wqe_offset
= ALIGN(qp
->rq
.max
<< qp
->rq
.wqe_shift
,
919 1 << qp
->sq
.wqe_shift
);
920 size
= PAGE_ALIGN(qp
->send_wqe_offset
+
921 (qp
->sq
.max
<< qp
->sq
.wqe_shift
));
923 qp
->wrid
= kmalloc((qp
->rq
.max
+ qp
->sq
.max
) * sizeof (u64
),
928 if (size
<= MTHCA_MAX_DIRECT_QP_SIZE
) {
931 shift
= get_order(size
) + PAGE_SHIFT
;
934 mthca_dbg(dev
, "Creating direct QP of size %d (shift %d)\n",
937 qp
->queue
.direct
.buf
= pci_alloc_consistent(dev
->pdev
, size
, &t
);
938 if (!qp
->queue
.direct
.buf
)
941 pci_unmap_addr_set(&qp
->queue
.direct
, mapping
, t
);
943 memset(qp
->queue
.direct
.buf
, 0, size
);
945 while (t
& ((1 << shift
) - 1)) {
950 dma_list
= kmalloc(npages
* sizeof *dma_list
, GFP_KERNEL
);
954 for (i
= 0; i
< npages
; ++i
)
955 dma_list
[i
] = t
+ i
* (1 << shift
);
958 npages
= size
/ PAGE_SIZE
;
962 mthca_dbg(dev
, "Creating indirect QP with %d pages\n", npages
);
964 dma_list
= kmalloc(npages
* sizeof *dma_list
, GFP_KERNEL
);
968 qp
->queue
.page_list
= kmalloc(npages
*
969 sizeof *qp
->queue
.page_list
,
971 if (!qp
->queue
.page_list
)
974 for (i
= 0; i
< npages
; ++i
) {
975 qp
->queue
.page_list
[i
].buf
=
976 pci_alloc_consistent(dev
->pdev
, PAGE_SIZE
, &t
);
977 if (!qp
->queue
.page_list
[i
].buf
)
980 memset(qp
->queue
.page_list
[i
].buf
, 0, PAGE_SIZE
);
982 pci_unmap_addr_set(&qp
->queue
.page_list
[i
], mapping
, t
);
987 err
= mthca_mr_alloc_phys(dev
, pd
->pd_num
, dma_list
, shift
,
989 MTHCA_MPT_FLAG_LOCAL_READ
,
999 pci_free_consistent(dev
->pdev
, size
,
1000 qp
->queue
.direct
.buf
,
1001 pci_unmap_addr(&qp
->queue
.direct
, mapping
));
1003 for (i
= 0; i
< npages
; ++i
) {
1004 if (qp
->queue
.page_list
[i
].buf
)
1005 pci_free_consistent(dev
->pdev
, PAGE_SIZE
,
1006 qp
->queue
.page_list
[i
].buf
,
1007 pci_unmap_addr(&qp
->queue
.page_list
[i
],
1018 static int mthca_alloc_memfree(struct mthca_dev
*dev
,
1019 struct mthca_qp
*qp
)
1023 if (mthca_is_memfree(dev
)) {
1024 ret
= mthca_table_get(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1028 ret
= mthca_table_get(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1032 ret
= mthca_table_get(dev
, dev
->qp_table
.rdb_table
,
1033 qp
->qpn
<< dev
->qp_table
.rdb_shift
);
1037 qp
->rq
.db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_RQ
,
1038 qp
->qpn
, &qp
->rq
.db
);
1039 if (qp
->rq
.db_index
< 0) {
1044 qp
->sq
.db_index
= mthca_alloc_db(dev
, MTHCA_DB_TYPE_SQ
,
1045 qp
->qpn
, &qp
->sq
.db
);
1046 if (qp
->sq
.db_index
< 0) {
1055 mthca_free_db(dev
, MTHCA_DB_TYPE_RQ
, qp
->rq
.db_index
);
1058 mthca_table_put(dev
, dev
->qp_table
.rdb_table
,
1059 qp
->qpn
<< dev
->qp_table
.rdb_shift
);
1062 mthca_table_put(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1065 mthca_table_put(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1070 static void mthca_free_memfree(struct mthca_dev
*dev
,
1071 struct mthca_qp
*qp
)
1073 if (mthca_is_memfree(dev
)) {
1074 mthca_free_db(dev
, MTHCA_DB_TYPE_SQ
, qp
->sq
.db_index
);
1075 mthca_free_db(dev
, MTHCA_DB_TYPE_RQ
, qp
->rq
.db_index
);
1076 mthca_table_put(dev
, dev
->qp_table
.rdb_table
,
1077 qp
->qpn
<< dev
->qp_table
.rdb_shift
);
1078 mthca_table_put(dev
, dev
->qp_table
.eqp_table
, qp
->qpn
);
1079 mthca_table_put(dev
, dev
->qp_table
.qp_table
, qp
->qpn
);
1083 static void mthca_wq_init(struct mthca_wq
* wq
)
1085 spin_lock_init(&wq
->lock
);
1087 wq
->last_comp
= wq
->max
- 1;
1093 static int mthca_alloc_qp_common(struct mthca_dev
*dev
,
1094 struct mthca_pd
*pd
,
1095 struct mthca_cq
*send_cq
,
1096 struct mthca_cq
*recv_cq
,
1097 enum ib_sig_type send_policy
,
1098 struct mthca_qp
*qp
)
1103 atomic_set(&qp
->refcount
, 1);
1104 qp
->state
= IB_QPS_RESET
;
1105 qp
->atomic_rd_en
= 0;
1107 qp
->sq_policy
= send_policy
;
1108 mthca_wq_init(&qp
->sq
);
1109 mthca_wq_init(&qp
->rq
);
1111 ret
= mthca_alloc_memfree(dev
, qp
);
1115 ret
= mthca_alloc_wqe_buf(dev
, pd
, qp
);
1117 mthca_free_memfree(dev
, qp
);
1121 if (mthca_is_memfree(dev
)) {
1122 struct mthca_next_seg
*next
;
1123 struct mthca_data_seg
*scatter
;
1124 int size
= (sizeof (struct mthca_next_seg
) +
1125 qp
->rq
.max_gs
* sizeof (struct mthca_data_seg
)) / 16;
1127 for (i
= 0; i
< qp
->rq
.max
; ++i
) {
1128 next
= get_recv_wqe(qp
, i
);
1129 next
->nda_op
= cpu_to_be32(((i
+ 1) & (qp
->rq
.max
- 1)) <<
1131 next
->ee_nds
= cpu_to_be32(size
);
1133 for (scatter
= (void *) (next
+ 1);
1134 (void *) scatter
< (void *) next
+ (1 << qp
->rq
.wqe_shift
);
1136 scatter
->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
1139 for (i
= 0; i
< qp
->sq
.max
; ++i
) {
1140 next
= get_send_wqe(qp
, i
);
1141 next
->nda_op
= cpu_to_be32((((i
+ 1) & (qp
->sq
.max
- 1)) <<
1143 qp
->send_wqe_offset
);
1150 static void mthca_align_qp_size(struct mthca_dev
*dev
, struct mthca_qp
*qp
)
1154 if (!mthca_is_memfree(dev
))
1157 for (i
= 0; 1 << i
< qp
->rq
.max
; ++i
)
1160 qp
->rq
.max
= 1 << i
;
1162 for (i
= 0; 1 << i
< qp
->sq
.max
; ++i
)
1165 qp
->sq
.max
= 1 << i
;
1168 int mthca_alloc_qp(struct mthca_dev
*dev
,
1169 struct mthca_pd
*pd
,
1170 struct mthca_cq
*send_cq
,
1171 struct mthca_cq
*recv_cq
,
1172 enum ib_qp_type type
,
1173 enum ib_sig_type send_policy
,
1174 struct mthca_qp
*qp
)
1178 mthca_align_qp_size(dev
, qp
);
1181 case IB_QPT_RC
: qp
->transport
= RC
; break;
1182 case IB_QPT_UC
: qp
->transport
= UC
; break;
1183 case IB_QPT_UD
: qp
->transport
= UD
; break;
1184 default: return -EINVAL
;
1187 qp
->qpn
= mthca_alloc(&dev
->qp_table
.alloc
);
1191 err
= mthca_alloc_qp_common(dev
, pd
, send_cq
, recv_cq
,
1194 mthca_free(&dev
->qp_table
.alloc
, qp
->qpn
);
1198 spin_lock_irq(&dev
->qp_table
.lock
);
1199 mthca_array_set(&dev
->qp_table
.qp
,
1200 qp
->qpn
& (dev
->limits
.num_qps
- 1), qp
);
1201 spin_unlock_irq(&dev
->qp_table
.lock
);
1206 int mthca_alloc_sqp(struct mthca_dev
*dev
,
1207 struct mthca_pd
*pd
,
1208 struct mthca_cq
*send_cq
,
1209 struct mthca_cq
*recv_cq
,
1210 enum ib_sig_type send_policy
,
1213 struct mthca_sqp
*sqp
)
1216 u32 mqpn
= qpn
* 2 + dev
->qp_table
.sqp_start
+ port
- 1;
1218 mthca_align_qp_size(dev
, &sqp
->qp
);
1220 sqp
->header_buf_size
= sqp
->qp
.sq
.max
* MTHCA_UD_HEADER_SIZE
;
1221 sqp
->header_buf
= dma_alloc_coherent(&dev
->pdev
->dev
, sqp
->header_buf_size
,
1222 &sqp
->header_dma
, GFP_KERNEL
);
1223 if (!sqp
->header_buf
)
1226 spin_lock_irq(&dev
->qp_table
.lock
);
1227 if (mthca_array_get(&dev
->qp_table
.qp
, mqpn
))
1230 mthca_array_set(&dev
->qp_table
.qp
, mqpn
, sqp
);
1231 spin_unlock_irq(&dev
->qp_table
.lock
);
1238 sqp
->qp
.transport
= MLX
;
1240 err
= mthca_alloc_qp_common(dev
, pd
, send_cq
, recv_cq
,
1241 send_policy
, &sqp
->qp
);
1245 atomic_inc(&pd
->sqp_count
);
1251 * Lock CQs here, so that CQ polling code can do QP lookup
1252 * without taking a lock.
1254 spin_lock_irq(&send_cq
->lock
);
1255 if (send_cq
!= recv_cq
)
1256 spin_lock(&recv_cq
->lock
);
1258 spin_lock(&dev
->qp_table
.lock
);
1259 mthca_array_clear(&dev
->qp_table
.qp
, mqpn
);
1260 spin_unlock(&dev
->qp_table
.lock
);
1262 if (send_cq
!= recv_cq
)
1263 spin_unlock(&recv_cq
->lock
);
1264 spin_unlock_irq(&send_cq
->lock
);
1267 dma_free_coherent(&dev
->pdev
->dev
, sqp
->header_buf_size
,
1268 sqp
->header_buf
, sqp
->header_dma
);
1273 void mthca_free_qp(struct mthca_dev
*dev
,
1274 struct mthca_qp
*qp
)
1279 struct mthca_cq
*send_cq
;
1280 struct mthca_cq
*recv_cq
;
1282 send_cq
= to_mcq(qp
->ibqp
.send_cq
);
1283 recv_cq
= to_mcq(qp
->ibqp
.recv_cq
);
1286 * Lock CQs here, so that CQ polling code can do QP lookup
1287 * without taking a lock.
1289 spin_lock_irq(&send_cq
->lock
);
1290 if (send_cq
!= recv_cq
)
1291 spin_lock(&recv_cq
->lock
);
1293 spin_lock(&dev
->qp_table
.lock
);
1294 mthca_array_clear(&dev
->qp_table
.qp
,
1295 qp
->qpn
& (dev
->limits
.num_qps
- 1));
1296 spin_unlock(&dev
->qp_table
.lock
);
1298 if (send_cq
!= recv_cq
)
1299 spin_unlock(&recv_cq
->lock
);
1300 spin_unlock_irq(&send_cq
->lock
);
1302 atomic_dec(&qp
->refcount
);
1303 wait_event(qp
->wait
, !atomic_read(&qp
->refcount
));
1305 if (qp
->state
!= IB_QPS_RESET
)
1306 mthca_MODIFY_QP(dev
, MTHCA_TRANS_ANY2RST
, qp
->qpn
, 0, NULL
, 0, &status
);
1308 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.send_cq
)->cqn
, qp
->qpn
);
1309 if (qp
->ibqp
.send_cq
!= qp
->ibqp
.recv_cq
)
1310 mthca_cq_clean(dev
, to_mcq(qp
->ibqp
.recv_cq
)->cqn
, qp
->qpn
);
1312 mthca_free_mr(dev
, &qp
->mr
);
1314 size
= PAGE_ALIGN(qp
->send_wqe_offset
+
1315 (qp
->sq
.max
<< qp
->sq
.wqe_shift
));
1317 if (qp
->is_direct
) {
1318 pci_free_consistent(dev
->pdev
, size
,
1319 qp
->queue
.direct
.buf
,
1320 pci_unmap_addr(&qp
->queue
.direct
, mapping
));
1322 for (i
= 0; i
< size
/ PAGE_SIZE
; ++i
) {
1323 pci_free_consistent(dev
->pdev
, PAGE_SIZE
,
1324 qp
->queue
.page_list
[i
].buf
,
1325 pci_unmap_addr(&qp
->queue
.page_list
[i
],
1332 mthca_free_memfree(dev
, qp
);
1334 if (is_sqp(dev
, qp
)) {
1335 atomic_dec(&(to_mpd(qp
->ibqp
.pd
)->sqp_count
));
1336 dma_free_coherent(&dev
->pdev
->dev
,
1337 to_msqp(qp
)->header_buf_size
,
1338 to_msqp(qp
)->header_buf
,
1339 to_msqp(qp
)->header_dma
);
1341 mthca_free(&dev
->qp_table
.alloc
, qp
->qpn
);
1344 /* Create UD header for an MLX send and build a data segment for it */
1345 static int build_mlx_header(struct mthca_dev
*dev
, struct mthca_sqp
*sqp
,
1346 int ind
, struct ib_send_wr
*wr
,
1347 struct mthca_mlx_seg
*mlx
,
1348 struct mthca_data_seg
*data
)
1353 ib_ud_header_init(256, /* assume a MAD */
1354 sqp
->ud_header
.grh_present
,
1357 err
= mthca_read_ah(dev
, to_mah(wr
->wr
.ud
.ah
), &sqp
->ud_header
);
1360 mlx
->flags
&= ~cpu_to_be32(MTHCA_NEXT_SOLICIT
| 1);
1361 mlx
->flags
|= cpu_to_be32((!sqp
->qp
.ibqp
.qp_num
? MTHCA_MLX_VL15
: 0) |
1362 (sqp
->ud_header
.lrh
.destination_lid
== 0xffff ?
1363 MTHCA_MLX_SLR
: 0) |
1364 (sqp
->ud_header
.lrh
.service_level
<< 8));
1365 mlx
->rlid
= sqp
->ud_header
.lrh
.destination_lid
;
1368 switch (wr
->opcode
) {
1370 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY
;
1371 sqp
->ud_header
.immediate_present
= 0;
1373 case IB_WR_SEND_WITH_IMM
:
1374 sqp
->ud_header
.bth
.opcode
= IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE
;
1375 sqp
->ud_header
.immediate_present
= 1;
1376 sqp
->ud_header
.immediate_data
= wr
->imm_data
;
1382 sqp
->ud_header
.lrh
.virtual_lane
= !sqp
->qp
.ibqp
.qp_num
? 15 : 0;
1383 if (sqp
->ud_header
.lrh
.destination_lid
== 0xffff)
1384 sqp
->ud_header
.lrh
.source_lid
= 0xffff;
1385 sqp
->ud_header
.bth
.solicited_event
= !!(wr
->send_flags
& IB_SEND_SOLICITED
);
1386 if (!sqp
->qp
.ibqp
.qp_num
)
1387 ib_get_cached_pkey(&dev
->ib_dev
, sqp
->port
,
1389 &sqp
->ud_header
.bth
.pkey
);
1391 ib_get_cached_pkey(&dev
->ib_dev
, sqp
->port
,
1392 wr
->wr
.ud
.pkey_index
,
1393 &sqp
->ud_header
.bth
.pkey
);
1394 cpu_to_be16s(&sqp
->ud_header
.bth
.pkey
);
1395 sqp
->ud_header
.bth
.destination_qpn
= cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1396 sqp
->ud_header
.bth
.psn
= cpu_to_be32((sqp
->send_psn
++) & ((1 << 24) - 1));
1397 sqp
->ud_header
.deth
.qkey
= cpu_to_be32(wr
->wr
.ud
.remote_qkey
& 0x80000000 ?
1398 sqp
->qkey
: wr
->wr
.ud
.remote_qkey
);
1399 sqp
->ud_header
.deth
.source_qpn
= cpu_to_be32(sqp
->qp
.ibqp
.qp_num
);
1401 header_size
= ib_ud_header_pack(&sqp
->ud_header
,
1403 ind
* MTHCA_UD_HEADER_SIZE
);
1405 data
->byte_count
= cpu_to_be32(header_size
);
1406 data
->lkey
= cpu_to_be32(to_mpd(sqp
->qp
.ibqp
.pd
)->ntmr
.ibmr
.lkey
);
1407 data
->addr
= cpu_to_be64(sqp
->header_dma
+
1408 ind
* MTHCA_UD_HEADER_SIZE
);
1413 static inline int mthca_wq_overflow(struct mthca_wq
*wq
, int nreq
,
1414 struct ib_cq
*ib_cq
)
1417 struct mthca_cq
*cq
;
1419 cur
= wq
->head
- wq
->tail
;
1420 if (likely(cur
+ nreq
< wq
->max
))
1424 spin_lock(&cq
->lock
);
1425 cur
= wq
->head
- wq
->tail
;
1426 spin_unlock(&cq
->lock
);
1428 return cur
+ nreq
>= wq
->max
;
1431 int mthca_tavor_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1432 struct ib_send_wr
**bad_wr
)
1434 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1435 struct mthca_qp
*qp
= to_mqp(ibqp
);
1438 unsigned long flags
;
1448 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
1450 /* XXX check that state is OK to post send */
1452 ind
= qp
->sq
.next_ind
;
1454 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1455 if (mthca_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
1456 mthca_err(dev
, "SQ %06x full (%u head, %u tail,"
1457 " %d max, %d nreq)\n", qp
->qpn
,
1458 qp
->sq
.head
, qp
->sq
.tail
,
1465 wqe
= get_send_wqe(qp
, ind
);
1466 prev_wqe
= qp
->sq
.last
;
1469 ((struct mthca_next_seg
*) wqe
)->nda_op
= 0;
1470 ((struct mthca_next_seg
*) wqe
)->ee_nds
= 0;
1471 ((struct mthca_next_seg
*) wqe
)->flags
=
1472 ((wr
->send_flags
& IB_SEND_SIGNALED
) ?
1473 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE
) : 0) |
1474 ((wr
->send_flags
& IB_SEND_SOLICITED
) ?
1475 cpu_to_be32(MTHCA_NEXT_SOLICIT
) : 0) |
1477 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
||
1478 wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
1479 ((struct mthca_next_seg
*) wqe
)->imm
= wr
->imm_data
;
1481 wqe
+= sizeof (struct mthca_next_seg
);
1482 size
= sizeof (struct mthca_next_seg
) / 16;
1484 switch (qp
->transport
) {
1486 switch (wr
->opcode
) {
1487 case IB_WR_ATOMIC_CMP_AND_SWP
:
1488 case IB_WR_ATOMIC_FETCH_AND_ADD
:
1489 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1490 cpu_to_be64(wr
->wr
.atomic
.remote_addr
);
1491 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1492 cpu_to_be32(wr
->wr
.atomic
.rkey
);
1493 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1495 wqe
+= sizeof (struct mthca_raddr_seg
);
1497 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
1498 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1499 cpu_to_be64(wr
->wr
.atomic
.swap
);
1500 ((struct mthca_atomic_seg
*) wqe
)->compare
=
1501 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1503 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1504 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1505 ((struct mthca_atomic_seg
*) wqe
)->compare
= 0;
1508 wqe
+= sizeof (struct mthca_atomic_seg
);
1509 size
+= sizeof (struct mthca_raddr_seg
) / 16 +
1510 sizeof (struct mthca_atomic_seg
);
1513 case IB_WR_RDMA_WRITE
:
1514 case IB_WR_RDMA_WRITE_WITH_IMM
:
1515 case IB_WR_RDMA_READ
:
1516 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1517 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1518 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1519 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1520 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1521 wqe
+= sizeof (struct mthca_raddr_seg
);
1522 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1526 /* No extra segments required for sends */
1533 ((struct mthca_tavor_ud_seg
*) wqe
)->lkey
=
1534 cpu_to_be32(to_mah(wr
->wr
.ud
.ah
)->key
);
1535 ((struct mthca_tavor_ud_seg
*) wqe
)->av_addr
=
1536 cpu_to_be64(to_mah(wr
->wr
.ud
.ah
)->avdma
);
1537 ((struct mthca_tavor_ud_seg
*) wqe
)->dqpn
=
1538 cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1539 ((struct mthca_tavor_ud_seg
*) wqe
)->qkey
=
1540 cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1542 wqe
+= sizeof (struct mthca_tavor_ud_seg
);
1543 size
+= sizeof (struct mthca_tavor_ud_seg
) / 16;
1547 err
= build_mlx_header(dev
, to_msqp(qp
), ind
, wr
,
1548 wqe
- sizeof (struct mthca_next_seg
),
1554 wqe
+= sizeof (struct mthca_data_seg
);
1555 size
+= sizeof (struct mthca_data_seg
) / 16;
1559 if (wr
->num_sge
> qp
->sq
.max_gs
) {
1560 mthca_err(dev
, "too many gathers\n");
1566 for (i
= 0; i
< wr
->num_sge
; ++i
) {
1567 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1568 cpu_to_be32(wr
->sg_list
[i
].length
);
1569 ((struct mthca_data_seg
*) wqe
)->lkey
=
1570 cpu_to_be32(wr
->sg_list
[i
].lkey
);
1571 ((struct mthca_data_seg
*) wqe
)->addr
=
1572 cpu_to_be64(wr
->sg_list
[i
].addr
);
1573 wqe
+= sizeof (struct mthca_data_seg
);
1574 size
+= sizeof (struct mthca_data_seg
) / 16;
1577 /* Add one more inline data segment for ICRC */
1578 if (qp
->transport
== MLX
) {
1579 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1580 cpu_to_be32((1 << 31) | 4);
1581 ((u32
*) wqe
)[1] = 0;
1582 wqe
+= sizeof (struct mthca_data_seg
);
1583 size
+= sizeof (struct mthca_data_seg
) / 16;
1586 qp
->wrid
[ind
+ qp
->rq
.max
] = wr
->wr_id
;
1588 if (wr
->opcode
>= ARRAY_SIZE(mthca_opcode
)) {
1589 mthca_err(dev
, "opcode invalid\n");
1596 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
1597 cpu_to_be32(((ind
<< qp
->sq
.wqe_shift
) +
1598 qp
->send_wqe_offset
) |
1599 mthca_opcode
[wr
->opcode
]);
1601 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
1602 cpu_to_be32((size0
? 0 : MTHCA_NEXT_DBD
) | size
);
1607 op0
= mthca_opcode
[wr
->opcode
];
1611 if (unlikely(ind
>= qp
->sq
.max
))
1619 doorbell
[0] = cpu_to_be32(((qp
->sq
.next_ind
<< qp
->sq
.wqe_shift
) +
1620 qp
->send_wqe_offset
) | f0
| op0
);
1621 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | size0
);
1625 mthca_write64(doorbell
,
1626 dev
->kar
+ MTHCA_SEND_DOORBELL
,
1627 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1630 qp
->sq
.next_ind
= ind
;
1631 qp
->sq
.head
+= nreq
;
1633 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
1637 int mthca_tavor_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
1638 struct ib_recv_wr
**bad_wr
)
1640 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1641 struct mthca_qp
*qp
= to_mqp(ibqp
);
1642 unsigned long flags
;
1652 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
1654 /* XXX check that state is OK to post receive */
1656 ind
= qp
->rq
.next_ind
;
1658 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1659 if (mthca_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
1660 mthca_err(dev
, "RQ %06x full (%u head, %u tail,"
1661 " %d max, %d nreq)\n", qp
->qpn
,
1662 qp
->rq
.head
, qp
->rq
.tail
,
1669 wqe
= get_recv_wqe(qp
, ind
);
1670 prev_wqe
= qp
->rq
.last
;
1673 ((struct mthca_next_seg
*) wqe
)->nda_op
= 0;
1674 ((struct mthca_next_seg
*) wqe
)->ee_nds
=
1675 cpu_to_be32(MTHCA_NEXT_DBD
);
1676 ((struct mthca_next_seg
*) wqe
)->flags
= 0;
1678 wqe
+= sizeof (struct mthca_next_seg
);
1679 size
= sizeof (struct mthca_next_seg
) / 16;
1681 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
1687 for (i
= 0; i
< wr
->num_sge
; ++i
) {
1688 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1689 cpu_to_be32(wr
->sg_list
[i
].length
);
1690 ((struct mthca_data_seg
*) wqe
)->lkey
=
1691 cpu_to_be32(wr
->sg_list
[i
].lkey
);
1692 ((struct mthca_data_seg
*) wqe
)->addr
=
1693 cpu_to_be64(wr
->sg_list
[i
].addr
);
1694 wqe
+= sizeof (struct mthca_data_seg
);
1695 size
+= sizeof (struct mthca_data_seg
) / 16;
1698 qp
->wrid
[ind
] = wr
->wr_id
;
1700 if (likely(prev_wqe
)) {
1701 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
1702 cpu_to_be32((ind
<< qp
->rq
.wqe_shift
) | 1);
1704 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
1705 cpu_to_be32(MTHCA_NEXT_DBD
| size
);
1712 if (unlikely(ind
>= qp
->rq
.max
))
1720 doorbell
[0] = cpu_to_be32((qp
->rq
.next_ind
<< qp
->rq
.wqe_shift
) | size0
);
1721 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | nreq
);
1725 mthca_write64(doorbell
,
1726 dev
->kar
+ MTHCA_RECEIVE_DOORBELL
,
1727 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1730 qp
->rq
.next_ind
= ind
;
1731 qp
->rq
.head
+= nreq
;
1733 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
1737 int mthca_arbel_post_send(struct ib_qp
*ibqp
, struct ib_send_wr
*wr
,
1738 struct ib_send_wr
**bad_wr
)
1740 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1741 struct mthca_qp
*qp
= to_mqp(ibqp
);
1744 unsigned long flags
;
1754 spin_lock_irqsave(&qp
->sq
.lock
, flags
);
1756 /* XXX check that state is OK to post send */
1758 ind
= qp
->sq
.head
& (qp
->sq
.max
- 1);
1760 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1761 if (mthca_wq_overflow(&qp
->sq
, nreq
, qp
->ibqp
.send_cq
)) {
1762 mthca_err(dev
, "SQ %06x full (%u head, %u tail,"
1763 " %d max, %d nreq)\n", qp
->qpn
,
1764 qp
->sq
.head
, qp
->sq
.tail
,
1771 wqe
= get_send_wqe(qp
, ind
);
1772 prev_wqe
= qp
->sq
.last
;
1775 ((struct mthca_next_seg
*) wqe
)->flags
=
1776 ((wr
->send_flags
& IB_SEND_SIGNALED
) ?
1777 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE
) : 0) |
1778 ((wr
->send_flags
& IB_SEND_SOLICITED
) ?
1779 cpu_to_be32(MTHCA_NEXT_SOLICIT
) : 0) |
1781 if (wr
->opcode
== IB_WR_SEND_WITH_IMM
||
1782 wr
->opcode
== IB_WR_RDMA_WRITE_WITH_IMM
)
1783 ((struct mthca_next_seg
*) wqe
)->imm
= wr
->imm_data
;
1785 wqe
+= sizeof (struct mthca_next_seg
);
1786 size
= sizeof (struct mthca_next_seg
) / 16;
1788 switch (qp
->transport
) {
1790 switch (wr
->opcode
) {
1791 case IB_WR_ATOMIC_CMP_AND_SWP
:
1792 case IB_WR_ATOMIC_FETCH_AND_ADD
:
1793 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1794 cpu_to_be64(wr
->wr
.atomic
.remote_addr
);
1795 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1796 cpu_to_be32(wr
->wr
.atomic
.rkey
);
1797 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1799 wqe
+= sizeof (struct mthca_raddr_seg
);
1801 if (wr
->opcode
== IB_WR_ATOMIC_CMP_AND_SWP
) {
1802 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1803 cpu_to_be64(wr
->wr
.atomic
.swap
);
1804 ((struct mthca_atomic_seg
*) wqe
)->compare
=
1805 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1807 ((struct mthca_atomic_seg
*) wqe
)->swap_add
=
1808 cpu_to_be64(wr
->wr
.atomic
.compare_add
);
1809 ((struct mthca_atomic_seg
*) wqe
)->compare
= 0;
1812 wqe
+= sizeof (struct mthca_atomic_seg
);
1813 size
+= sizeof (struct mthca_raddr_seg
) / 16 +
1814 sizeof (struct mthca_atomic_seg
);
1817 case IB_WR_RDMA_WRITE
:
1818 case IB_WR_RDMA_WRITE_WITH_IMM
:
1819 case IB_WR_RDMA_READ
:
1820 ((struct mthca_raddr_seg
*) wqe
)->raddr
=
1821 cpu_to_be64(wr
->wr
.rdma
.remote_addr
);
1822 ((struct mthca_raddr_seg
*) wqe
)->rkey
=
1823 cpu_to_be32(wr
->wr
.rdma
.rkey
);
1824 ((struct mthca_raddr_seg
*) wqe
)->reserved
= 0;
1825 wqe
+= sizeof (struct mthca_raddr_seg
);
1826 size
+= sizeof (struct mthca_raddr_seg
) / 16;
1830 /* No extra segments required for sends */
1837 memcpy(((struct mthca_arbel_ud_seg
*) wqe
)->av
,
1838 to_mah(wr
->wr
.ud
.ah
)->av
, MTHCA_AV_SIZE
);
1839 ((struct mthca_arbel_ud_seg
*) wqe
)->dqpn
=
1840 cpu_to_be32(wr
->wr
.ud
.remote_qpn
);
1841 ((struct mthca_arbel_ud_seg
*) wqe
)->qkey
=
1842 cpu_to_be32(wr
->wr
.ud
.remote_qkey
);
1844 wqe
+= sizeof (struct mthca_arbel_ud_seg
);
1845 size
+= sizeof (struct mthca_arbel_ud_seg
) / 16;
1849 err
= build_mlx_header(dev
, to_msqp(qp
), ind
, wr
,
1850 wqe
- sizeof (struct mthca_next_seg
),
1856 wqe
+= sizeof (struct mthca_data_seg
);
1857 size
+= sizeof (struct mthca_data_seg
) / 16;
1861 if (wr
->num_sge
> qp
->sq
.max_gs
) {
1862 mthca_err(dev
, "too many gathers\n");
1868 for (i
= 0; i
< wr
->num_sge
; ++i
) {
1869 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1870 cpu_to_be32(wr
->sg_list
[i
].length
);
1871 ((struct mthca_data_seg
*) wqe
)->lkey
=
1872 cpu_to_be32(wr
->sg_list
[i
].lkey
);
1873 ((struct mthca_data_seg
*) wqe
)->addr
=
1874 cpu_to_be64(wr
->sg_list
[i
].addr
);
1875 wqe
+= sizeof (struct mthca_data_seg
);
1876 size
+= sizeof (struct mthca_data_seg
) / 16;
1879 /* Add one more inline data segment for ICRC */
1880 if (qp
->transport
== MLX
) {
1881 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1882 cpu_to_be32((1 << 31) | 4);
1883 ((u32
*) wqe
)[1] = 0;
1884 wqe
+= sizeof (struct mthca_data_seg
);
1885 size
+= sizeof (struct mthca_data_seg
) / 16;
1888 qp
->wrid
[ind
+ qp
->rq
.max
] = wr
->wr_id
;
1890 if (wr
->opcode
>= ARRAY_SIZE(mthca_opcode
)) {
1891 mthca_err(dev
, "opcode invalid\n");
1897 if (likely(prev_wqe
)) {
1898 ((struct mthca_next_seg
*) prev_wqe
)->nda_op
=
1899 cpu_to_be32(((ind
<< qp
->sq
.wqe_shift
) +
1900 qp
->send_wqe_offset
) |
1901 mthca_opcode
[wr
->opcode
]);
1903 ((struct mthca_next_seg
*) prev_wqe
)->ee_nds
=
1904 cpu_to_be32(MTHCA_NEXT_DBD
| size
);
1909 op0
= mthca_opcode
[wr
->opcode
];
1913 if (unlikely(ind
>= qp
->sq
.max
))
1921 doorbell
[0] = cpu_to_be32((nreq
<< 24) |
1922 ((qp
->sq
.head
& 0xffff) << 8) |
1924 doorbell
[1] = cpu_to_be32((qp
->qpn
<< 8) | size0
);
1926 qp
->sq
.head
+= nreq
;
1929 * Make sure that descriptors are written before
1933 *qp
->sq
.db
= cpu_to_be32(qp
->sq
.head
& 0xffff);
1936 * Make sure doorbell record is written before we
1937 * write MMIO send doorbell.
1940 mthca_write64(doorbell
,
1941 dev
->kar
+ MTHCA_SEND_DOORBELL
,
1942 MTHCA_GET_DOORBELL_LOCK(&dev
->doorbell_lock
));
1945 spin_unlock_irqrestore(&qp
->sq
.lock
, flags
);
1949 int mthca_arbel_post_receive(struct ib_qp
*ibqp
, struct ib_recv_wr
*wr
,
1950 struct ib_recv_wr
**bad_wr
)
1952 struct mthca_dev
*dev
= to_mdev(ibqp
->device
);
1953 struct mthca_qp
*qp
= to_mqp(ibqp
);
1954 unsigned long flags
;
1961 spin_lock_irqsave(&qp
->rq
.lock
, flags
);
1963 /* XXX check that state is OK to post receive */
1965 ind
= qp
->rq
.head
& (qp
->rq
.max
- 1);
1967 for (nreq
= 0; wr
; ++nreq
, wr
= wr
->next
) {
1968 if (mthca_wq_overflow(&qp
->rq
, nreq
, qp
->ibqp
.recv_cq
)) {
1969 mthca_err(dev
, "RQ %06x full (%u head, %u tail,"
1970 " %d max, %d nreq)\n", qp
->qpn
,
1971 qp
->rq
.head
, qp
->rq
.tail
,
1978 wqe
= get_recv_wqe(qp
, ind
);
1980 ((struct mthca_next_seg
*) wqe
)->flags
= 0;
1982 wqe
+= sizeof (struct mthca_next_seg
);
1984 if (unlikely(wr
->num_sge
> qp
->rq
.max_gs
)) {
1990 for (i
= 0; i
< wr
->num_sge
; ++i
) {
1991 ((struct mthca_data_seg
*) wqe
)->byte_count
=
1992 cpu_to_be32(wr
->sg_list
[i
].length
);
1993 ((struct mthca_data_seg
*) wqe
)->lkey
=
1994 cpu_to_be32(wr
->sg_list
[i
].lkey
);
1995 ((struct mthca_data_seg
*) wqe
)->addr
=
1996 cpu_to_be64(wr
->sg_list
[i
].addr
);
1997 wqe
+= sizeof (struct mthca_data_seg
);
2000 if (i
< qp
->rq
.max_gs
) {
2001 ((struct mthca_data_seg
*) wqe
)->byte_count
= 0;
2002 ((struct mthca_data_seg
*) wqe
)->lkey
= cpu_to_be32(MTHCA_INVAL_LKEY
);
2003 ((struct mthca_data_seg
*) wqe
)->addr
= 0;
2006 qp
->wrid
[ind
] = wr
->wr_id
;
2009 if (unlikely(ind
>= qp
->rq
.max
))
2014 qp
->rq
.head
+= nreq
;
2017 * Make sure that descriptors are written before
2021 *qp
->rq
.db
= cpu_to_be32(qp
->rq
.head
& 0xffff);
2024 spin_unlock_irqrestore(&qp
->rq
.lock
, flags
);
2028 int mthca_free_err_wqe(struct mthca_dev
*dev
, struct mthca_qp
*qp
, int is_send
,
2029 int index
, int *dbd
, u32
*new_wqe
)
2031 struct mthca_next_seg
*next
;
2034 next
= get_send_wqe(qp
, index
);
2036 next
= get_recv_wqe(qp
, index
);
2038 if (mthca_is_memfree(dev
))
2041 *dbd
= !!(next
->ee_nds
& cpu_to_be32(MTHCA_NEXT_DBD
));
2042 if (next
->ee_nds
& cpu_to_be32(0x3f))
2043 *new_wqe
= (next
->nda_op
& cpu_to_be32(~0x3f)) |
2044 (next
->ee_nds
& cpu_to_be32(0x3f));
2051 int __devinit
mthca_init_qp_table(struct mthca_dev
*dev
)
2057 spin_lock_init(&dev
->qp_table
.lock
);
2060 * We reserve 2 extra QPs per port for the special QPs. The
2061 * special QP for port 1 has to be even, so round up.
2063 dev
->qp_table
.sqp_start
= (dev
->limits
.reserved_qps
+ 1) & ~1UL;
2064 err
= mthca_alloc_init(&dev
->qp_table
.alloc
,
2065 dev
->limits
.num_qps
,
2067 dev
->qp_table
.sqp_start
+
2068 MTHCA_MAX_PORTS
* 2);
2072 err
= mthca_array_init(&dev
->qp_table
.qp
,
2073 dev
->limits
.num_qps
);
2075 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);
2079 for (i
= 0; i
< 2; ++i
) {
2080 err
= mthca_CONF_SPECIAL_QP(dev
, i
? IB_QPT_GSI
: IB_QPT_SMI
,
2081 dev
->qp_table
.sqp_start
+ i
* 2,
2086 mthca_warn(dev
, "CONF_SPECIAL_QP returned "
2087 "status %02x, aborting.\n",
2096 for (i
= 0; i
< 2; ++i
)
2097 mthca_CONF_SPECIAL_QP(dev
, i
, 0, &status
);
2099 mthca_array_cleanup(&dev
->qp_table
.qp
, dev
->limits
.num_qps
);
2100 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);
2105 void __devexit
mthca_cleanup_qp_table(struct mthca_dev
*dev
)
2110 for (i
= 0; i
< 2; ++i
)
2111 mthca_CONF_SPECIAL_QP(dev
, i
, 0, &status
);
2113 mthca_alloc_cleanup(&dev
->qp_table
.alloc
);