2 * Copyright(c) 2015 - 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/err.h>
49 #include <linux/vmalloc.h>
50 #include <linux/hash.h>
51 #include <linux/module.h>
52 #include <linux/seq_file.h>
53 #include <rdma/rdma_vt.h>
54 #include <rdma/rdmavt_qp.h>
55 #include <rdma/ib_verbs.h>
60 #include "verbs_txreq.h"
62 unsigned int hfi1_qp_table_size
= 256;
63 module_param_named(qp_table_size
, hfi1_qp_table_size
, uint
, S_IRUGO
);
64 MODULE_PARM_DESC(qp_table_size
, "QP table size");
66 static void flush_tx_list(struct rvt_qp
*qp
);
67 static int iowait_sleep(
68 struct sdma_engine
*sde
,
70 struct sdma_txreq
*stx
,
73 static void iowait_wakeup(struct iowait
*wait
, int reason
);
74 static void iowait_sdma_drained(struct iowait
*wait
);
75 static void qp_pio_drain(struct rvt_qp
*qp
);
77 const struct rvt_operation_params hfi1_post_parms
[RVT_OPERATION_MAX
] = {
78 [IB_WR_RDMA_WRITE
] = {
79 .length
= sizeof(struct ib_rdma_wr
),
80 .qpt_support
= BIT(IB_QPT_UC
) | BIT(IB_QPT_RC
),
84 .length
= sizeof(struct ib_rdma_wr
),
85 .qpt_support
= BIT(IB_QPT_RC
),
86 .flags
= RVT_OPERATION_ATOMIC
,
89 [IB_WR_ATOMIC_CMP_AND_SWP
] = {
90 .length
= sizeof(struct ib_atomic_wr
),
91 .qpt_support
= BIT(IB_QPT_RC
),
92 .flags
= RVT_OPERATION_ATOMIC
| RVT_OPERATION_ATOMIC_SGE
,
95 [IB_WR_ATOMIC_FETCH_AND_ADD
] = {
96 .length
= sizeof(struct ib_atomic_wr
),
97 .qpt_support
= BIT(IB_QPT_RC
),
98 .flags
= RVT_OPERATION_ATOMIC
| RVT_OPERATION_ATOMIC_SGE
,
101 [IB_WR_RDMA_WRITE_WITH_IMM
] = {
102 .length
= sizeof(struct ib_rdma_wr
),
103 .qpt_support
= BIT(IB_QPT_UC
) | BIT(IB_QPT_RC
),
107 .length
= sizeof(struct ib_send_wr
),
108 .qpt_support
= BIT(IB_QPT_UD
) | BIT(IB_QPT_SMI
) | BIT(IB_QPT_GSI
) |
109 BIT(IB_QPT_UC
) | BIT(IB_QPT_RC
),
112 [IB_WR_SEND_WITH_IMM
] = {
113 .length
= sizeof(struct ib_send_wr
),
114 .qpt_support
= BIT(IB_QPT_UD
) | BIT(IB_QPT_SMI
) | BIT(IB_QPT_GSI
) |
115 BIT(IB_QPT_UC
) | BIT(IB_QPT_RC
),
119 .length
= sizeof(struct ib_reg_wr
),
120 .qpt_support
= BIT(IB_QPT_UC
) | BIT(IB_QPT_RC
),
121 .flags
= RVT_OPERATION_LOCAL
,
124 [IB_WR_LOCAL_INV
] = {
125 .length
= sizeof(struct ib_send_wr
),
126 .qpt_support
= BIT(IB_QPT_UC
) | BIT(IB_QPT_RC
),
127 .flags
= RVT_OPERATION_LOCAL
,
130 [IB_WR_SEND_WITH_INV
] = {
131 .length
= sizeof(struct ib_send_wr
),
132 .qpt_support
= BIT(IB_QPT_RC
),
137 static void flush_tx_list(struct rvt_qp
*qp
)
139 struct hfi1_qp_priv
*priv
= qp
->priv
;
141 while (!list_empty(&priv
->s_iowait
.tx_head
)) {
142 struct sdma_txreq
*tx
;
144 tx
= list_first_entry(
145 &priv
->s_iowait
.tx_head
,
148 list_del_init(&tx
->list
);
150 container_of(tx
, struct verbs_txreq
, txreq
));
154 static void flush_iowait(struct rvt_qp
*qp
)
156 struct hfi1_qp_priv
*priv
= qp
->priv
;
158 seqlock_t
*lock
= priv
->s_iowait
.lock
;
162 write_seqlock_irqsave(lock
, flags
);
163 if (!list_empty(&priv
->s_iowait
.list
)) {
164 list_del_init(&priv
->s_iowait
.list
);
165 priv
->s_iowait
.lock
= NULL
;
168 write_sequnlock_irqrestore(lock
, flags
);
171 static inline int opa_mtu_enum_to_int(int mtu
)
174 case OPA_MTU_8192
: return 8192;
175 case OPA_MTU_10240
: return 10240;
181 * This function is what we would push to the core layer if we wanted to be a
182 * "first class citizen". Instead we hide this here and rely on Verbs ULPs
183 * to blindly pass the MTU enum value from the PathRecord to us.
185 static inline int verbs_mtu_enum_to_int(struct ib_device
*dev
, enum ib_mtu mtu
)
189 /* Constraining 10KB packets to 8KB packets */
190 if (mtu
== (enum ib_mtu
)OPA_MTU_10240
)
192 val
= opa_mtu_enum_to_int((int)mtu
);
195 return ib_mtu_enum_to_int(mtu
);
198 int hfi1_check_modify_qp(struct rvt_qp
*qp
, struct ib_qp_attr
*attr
,
199 int attr_mask
, struct ib_udata
*udata
)
201 struct ib_qp
*ibqp
= &qp
->ibqp
;
202 struct hfi1_ibdev
*dev
= to_idev(ibqp
->device
);
203 struct hfi1_devdata
*dd
= dd_from_dev(dev
);
206 if (attr_mask
& IB_QP_AV
) {
207 sc
= ah_to_sc(ibqp
->device
, &attr
->ah_attr
);
211 if (!qp_to_sdma_engine(qp
, sc
) &&
212 dd
->flags
& HFI1_HAS_SEND_DMA
)
215 if (!qp_to_send_context(qp
, sc
))
219 if (attr_mask
& IB_QP_ALT_PATH
) {
220 sc
= ah_to_sc(ibqp
->device
, &attr
->alt_ah_attr
);
224 if (!qp_to_sdma_engine(qp
, sc
) &&
225 dd
->flags
& HFI1_HAS_SEND_DMA
)
228 if (!qp_to_send_context(qp
, sc
))
236 * qp_set_16b - Set the hdr_type based on whether the slid or the
237 * dlid in the connection is extended. Only applicable for RC and UC
238 * QPs. UD QPs determine this on the fly from the ah in the wqe
240 static inline void qp_set_16b(struct rvt_qp
*qp
)
242 struct hfi1_pportdata
*ppd
;
243 struct hfi1_ibport
*ibp
;
244 struct hfi1_qp_priv
*priv
= qp
->priv
;
246 /* Update ah_attr to account for extended LIDs */
247 hfi1_update_ah_attr(qp
->ibqp
.device
, &qp
->remote_ah_attr
);
249 /* Create 32 bit LIDs */
250 hfi1_make_opa_lid(&qp
->remote_ah_attr
);
252 if (!(rdma_ah_get_ah_flags(&qp
->remote_ah_attr
) & IB_AH_GRH
))
255 ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
256 ppd
= ppd_from_ibp(ibp
);
257 priv
->hdr_type
= hfi1_get_hdr_type(ppd
->lid
, &qp
->remote_ah_attr
);
260 void hfi1_modify_qp(struct rvt_qp
*qp
, struct ib_qp_attr
*attr
,
261 int attr_mask
, struct ib_udata
*udata
)
263 struct ib_qp
*ibqp
= &qp
->ibqp
;
264 struct hfi1_qp_priv
*priv
= qp
->priv
;
266 if (attr_mask
& IB_QP_AV
) {
267 priv
->s_sc
= ah_to_sc(ibqp
->device
, &qp
->remote_ah_attr
);
268 priv
->s_sde
= qp_to_sdma_engine(qp
, priv
->s_sc
);
269 priv
->s_sendcontext
= qp_to_send_context(qp
, priv
->s_sc
);
273 if (attr_mask
& IB_QP_PATH_MIG_STATE
&&
274 attr
->path_mig_state
== IB_MIG_MIGRATED
&&
275 qp
->s_mig_state
== IB_MIG_ARMED
) {
276 qp
->s_flags
|= RVT_S_AHG_CLEAR
;
277 priv
->s_sc
= ah_to_sc(ibqp
->device
, &qp
->remote_ah_attr
);
278 priv
->s_sde
= qp_to_sdma_engine(qp
, priv
->s_sc
);
279 priv
->s_sendcontext
= qp_to_send_context(qp
, priv
->s_sc
);
285 * hfi1_check_send_wqe - validate wqe
287 * @wqe - The built wqe
289 * validate wqe. This is called
290 * prior to inserting the wqe into
291 * the ring but after the wqe has been
294 * Returns 0 on success, -EINVAL on failure
297 int hfi1_check_send_wqe(struct rvt_qp
*qp
,
298 struct rvt_swqe
*wqe
)
300 struct hfi1_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
303 switch (qp
->ibqp
.qp_type
) {
306 if (wqe
->length
> 0x80000000U
)
310 ah
= ibah_to_rvtah(wqe
->ud_wr
.ah
);
311 if (wqe
->length
> (1 << ah
->log_pmtu
))
316 ah
= ibah_to_rvtah(wqe
->ud_wr
.ah
);
317 if (wqe
->length
> (1 << ah
->log_pmtu
))
319 if (ibp
->sl_to_sc
[rdma_ah_get_sl(&ah
->attr
)] == 0xf)
324 return wqe
->length
<= piothreshold
;
328 * _hfi1_schedule_send - schedule progress
331 * This schedules qp progress w/o regard to the s_flags.
333 * It is only used in the post send, which doesn't hold
336 void _hfi1_schedule_send(struct rvt_qp
*qp
)
338 struct hfi1_qp_priv
*priv
= qp
->priv
;
339 struct hfi1_ibport
*ibp
=
340 to_iport(qp
->ibqp
.device
, qp
->port_num
);
341 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
342 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
344 iowait_schedule(&priv
->s_iowait
, ppd
->hfi1_wq
,
347 cpumask_first(cpumask_of_node(dd
->node
)));
350 static void qp_pio_drain(struct rvt_qp
*qp
)
352 struct hfi1_ibdev
*dev
;
353 struct hfi1_qp_priv
*priv
= qp
->priv
;
355 if (!priv
->s_sendcontext
)
357 dev
= to_idev(qp
->ibqp
.device
);
358 while (iowait_pio_pending(&priv
->s_iowait
)) {
359 write_seqlock_irq(&dev
->iowait_lock
);
360 hfi1_sc_wantpiobuf_intr(priv
->s_sendcontext
, 1);
361 write_sequnlock_irq(&dev
->iowait_lock
);
362 iowait_pio_drain(&priv
->s_iowait
);
363 write_seqlock_irq(&dev
->iowait_lock
);
364 hfi1_sc_wantpiobuf_intr(priv
->s_sendcontext
, 0);
365 write_sequnlock_irq(&dev
->iowait_lock
);
370 * hfi1_schedule_send - schedule progress
373 * This schedules qp progress and caller should hold
376 void hfi1_schedule_send(struct rvt_qp
*qp
)
378 lockdep_assert_held(&qp
->s_lock
);
379 if (hfi1_send_ok(qp
))
380 _hfi1_schedule_send(qp
);
383 void hfi1_qp_wakeup(struct rvt_qp
*qp
, u32 flag
)
387 spin_lock_irqsave(&qp
->s_lock
, flags
);
388 if (qp
->s_flags
& flag
) {
389 qp
->s_flags
&= ~flag
;
390 trace_hfi1_qpwakeup(qp
, flag
);
391 hfi1_schedule_send(qp
);
393 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
394 /* Notify hfi1_destroy_qp() if it is waiting. */
398 static int iowait_sleep(
399 struct sdma_engine
*sde
,
401 struct sdma_txreq
*stx
,
405 struct verbs_txreq
*tx
= container_of(stx
, struct verbs_txreq
, txreq
);
407 struct hfi1_qp_priv
*priv
;
410 struct hfi1_ibdev
*dev
;
415 spin_lock_irqsave(&qp
->s_lock
, flags
);
416 if (ib_rvt_state_ops
[qp
->state
] & RVT_PROCESS_RECV_OK
) {
418 * If we couldn't queue the DMA request, save the info
419 * and try again later rather than destroying the
420 * buffer and undoing the side effects of the copy.
422 /* Make a common routine? */
423 dev
= &sde
->dd
->verbs_dev
;
424 list_add_tail(&stx
->list
, &wait
->tx_head
);
425 write_seqlock(&dev
->iowait_lock
);
426 if (sdma_progress(sde
, seq
, stx
))
428 if (list_empty(&priv
->s_iowait
.list
)) {
429 struct hfi1_ibport
*ibp
=
430 to_iport(qp
->ibqp
.device
, qp
->port_num
);
432 ibp
->rvp
.n_dmawait
++;
433 qp
->s_flags
|= RVT_S_WAIT_DMA_DESC
;
434 iowait_queue(pkts_sent
, &priv
->s_iowait
,
436 priv
->s_iowait
.lock
= &dev
->iowait_lock
;
437 trace_hfi1_qpsleep(qp
, RVT_S_WAIT_DMA_DESC
);
440 write_sequnlock(&dev
->iowait_lock
);
441 qp
->s_flags
&= ~RVT_S_BUSY
;
442 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
445 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
450 write_sequnlock(&dev
->iowait_lock
);
451 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
452 list_del_init(&stx
->list
);
456 static void iowait_wakeup(struct iowait
*wait
, int reason
)
458 struct rvt_qp
*qp
= iowait_to_qp(wait
);
460 WARN_ON(reason
!= SDMA_AVAIL_REASON
);
461 hfi1_qp_wakeup(qp
, RVT_S_WAIT_DMA_DESC
);
464 static void iowait_sdma_drained(struct iowait
*wait
)
466 struct rvt_qp
*qp
= iowait_to_qp(wait
);
470 * This happens when the send engine notes
471 * a QP in the error state and cannot
472 * do the flush work until that QP's
473 * sdma work has finished.
475 spin_lock_irqsave(&qp
->s_lock
, flags
);
476 if (qp
->s_flags
& RVT_S_WAIT_DMA
) {
477 qp
->s_flags
&= ~RVT_S_WAIT_DMA
;
478 hfi1_schedule_send(qp
);
480 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
485 * qp_to_sdma_engine - map a qp to a send engine
490 * A send engine for the qp or NULL for SMI type qp.
492 struct sdma_engine
*qp_to_sdma_engine(struct rvt_qp
*qp
, u8 sc5
)
494 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
495 struct sdma_engine
*sde
;
497 if (!(dd
->flags
& HFI1_HAS_SEND_DMA
))
499 switch (qp
->ibqp
.qp_type
) {
505 sde
= sdma_select_engine_sc(dd
, qp
->ibqp
.qp_num
>> dd
->qos_shift
, sc5
);
510 * qp_to_send_context - map a qp to a send context
515 * A send context for the qp
517 struct send_context
*qp_to_send_context(struct rvt_qp
*qp
, u8 sc5
)
519 struct hfi1_devdata
*dd
= dd_from_ibdev(qp
->ibqp
.device
);
521 switch (qp
->ibqp
.qp_type
) {
523 /* SMA packets to VL15 */
524 return dd
->vld
[15].sc
;
529 return pio_select_send_context_sc(dd
, qp
->ibqp
.qp_num
>> dd
->qos_shift
,
533 static const char * const qp_type_str
[] = {
534 "SMI", "GSI", "RC", "UC", "UD",
537 static int qp_idle(struct rvt_qp
*qp
)
540 qp
->s_last
== qp
->s_acked
&&
541 qp
->s_acked
== qp
->s_cur
&&
542 qp
->s_cur
== qp
->s_tail
&&
543 qp
->s_tail
== qp
->s_head
;
547 * qp_iter_print - print the qp information to seq_file
548 * @s: the seq_file to emit the qp information on
549 * @iter: the iterator for the qp hash list
551 void qp_iter_print(struct seq_file
*s
, struct rvt_qp_iter
*iter
)
553 struct rvt_swqe
*wqe
;
554 struct rvt_qp
*qp
= iter
->qp
;
555 struct hfi1_qp_priv
*priv
= qp
->priv
;
556 struct sdma_engine
*sde
;
557 struct send_context
*send_context
;
558 struct rvt_ack_entry
*e
= NULL
;
559 struct rvt_srq
*srq
= qp
->ibqp
.srq
?
560 ibsrq_to_rvtsrq(qp
->ibqp
.srq
) : NULL
;
562 sde
= qp_to_sdma_engine(qp
, priv
->s_sc
);
563 wqe
= rvt_get_swqe_ptr(qp
, qp
->s_last
);
564 send_context
= qp_to_send_context(qp
, priv
->s_sc
);
566 e
= &qp
->s_ack_queue
[qp
->s_tail_ack_queue
];
568 "N %d %s QP %x R %u %s %u %u f=%x %u %u %u %u %u %u SPSN %x %x %x %x %x RPSN %x S(%u %u %u %u %u %u %u) R(%u %u %u) RQP %x LID %x SL %u MTU %u %u %u %u %u SDE %p,%u SC %p,%u SCQ %u %u PID %d OS %x %x E %x %x %x RNR %d %s %d\n",
570 qp_idle(qp
) ? "I" : "B",
572 atomic_read(&qp
->refcount
),
573 qp_type_str
[qp
->ibqp
.qp_type
],
575 wqe
? wqe
->wr
.opcode
: 0,
577 iowait_sdma_pending(&priv
->s_iowait
),
578 iowait_pio_pending(&priv
->s_iowait
),
579 !list_empty(&priv
->s_iowait
.list
),
584 qp
->s_psn
, qp
->s_next_psn
,
585 qp
->s_sending_psn
, qp
->s_sending_hpsn
,
587 qp
->s_last
, qp
->s_acked
, qp
->s_cur
,
588 qp
->s_tail
, qp
->s_head
, qp
->s_size
,
590 /* ack_queue ring pointers, size */
591 qp
->s_tail_ack_queue
, qp
->r_head_ack_queue
,
592 rvt_max_atomic(&to_idev(qp
->ibqp
.device
)->rdi
),
595 rdma_ah_get_dlid(&qp
->remote_ah_attr
),
596 rdma_ah_get_sl(&qp
->remote_ah_attr
),
603 sde
? sde
->this_idx
: 0,
605 send_context
? send_context
->sw_index
: 0,
606 ibcq_to_rvtcq(qp
->ibqp
.send_cq
)->queue
->head
,
607 ibcq_to_rvtcq(qp
->ibqp
.send_cq
)->queue
->tail
,
611 /* ack queue information */
617 srq
? srq
->rq
.size
: qp
->r_rq
.size
621 void *qp_priv_alloc(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
623 struct hfi1_qp_priv
*priv
;
625 priv
= kzalloc_node(sizeof(*priv
), GFP_KERNEL
, rdi
->dparms
.node
);
627 return ERR_PTR(-ENOMEM
);
631 priv
->s_ahg
= kzalloc_node(sizeof(*priv
->s_ahg
), GFP_KERNEL
,
635 return ERR_PTR(-ENOMEM
);
643 iowait_sdma_drained
);
647 void qp_priv_free(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
)
649 struct hfi1_qp_priv
*priv
= qp
->priv
;
655 unsigned free_all_qps(struct rvt_dev_info
*rdi
)
657 struct hfi1_ibdev
*verbs_dev
= container_of(rdi
,
660 struct hfi1_devdata
*dd
= container_of(verbs_dev
,
664 unsigned qp_inuse
= 0;
666 for (n
= 0; n
< dd
->num_pports
; n
++) {
667 struct hfi1_ibport
*ibp
= &dd
->pport
[n
].ibport_data
;
670 if (rcu_dereference(ibp
->rvp
.qp
[0]))
672 if (rcu_dereference(ibp
->rvp
.qp
[1]))
680 void flush_qp_waiters(struct rvt_qp
*qp
)
682 lockdep_assert_held(&qp
->s_lock
);
686 void stop_send_queue(struct rvt_qp
*qp
)
688 struct hfi1_qp_priv
*priv
= qp
->priv
;
690 cancel_work_sync(&priv
->s_iowait
.iowork
);
693 void quiesce_qp(struct rvt_qp
*qp
)
695 struct hfi1_qp_priv
*priv
= qp
->priv
;
697 iowait_sdma_drain(&priv
->s_iowait
);
702 void notify_qp_reset(struct rvt_qp
*qp
)
709 * Switch to alternate path.
710 * The QP s_lock should be held and interrupts disabled.
712 void hfi1_migrate_qp(struct rvt_qp
*qp
)
714 struct hfi1_qp_priv
*priv
= qp
->priv
;
717 qp
->s_mig_state
= IB_MIG_MIGRATED
;
718 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
719 qp
->port_num
= rdma_ah_get_port_num(&qp
->alt_ah_attr
);
720 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
721 qp
->s_flags
|= RVT_S_AHG_CLEAR
;
722 priv
->s_sc
= ah_to_sc(qp
->ibqp
.device
, &qp
->remote_ah_attr
);
723 priv
->s_sde
= qp_to_sdma_engine(qp
, priv
->s_sc
);
726 ev
.device
= qp
->ibqp
.device
;
727 ev
.element
.qp
= &qp
->ibqp
;
728 ev
.event
= IB_EVENT_PATH_MIG
;
729 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
732 int mtu_to_path_mtu(u32 mtu
)
734 return mtu_to_enum(mtu
, OPA_MTU_8192
);
737 u32
mtu_from_qp(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
, u32 pmtu
)
740 struct hfi1_ibdev
*verbs_dev
= container_of(rdi
,
743 struct hfi1_devdata
*dd
= container_of(verbs_dev
,
746 struct hfi1_ibport
*ibp
;
749 ibp
= &dd
->pport
[qp
->port_num
- 1].ibport_data
;
750 sc
= ibp
->sl_to_sc
[rdma_ah_get_sl(&qp
->remote_ah_attr
)];
751 vl
= sc_to_vlt(dd
, sc
);
753 mtu
= verbs_mtu_enum_to_int(qp
->ibqp
.device
, pmtu
);
754 if (vl
< PER_VL_SEND_CONTEXTS
)
755 mtu
= min_t(u32
, mtu
, dd
->vld
[vl
].mtu
);
759 int get_pmtu_from_attr(struct rvt_dev_info
*rdi
, struct rvt_qp
*qp
,
760 struct ib_qp_attr
*attr
)
762 int mtu
, pidx
= qp
->port_num
- 1;
763 struct hfi1_ibdev
*verbs_dev
= container_of(rdi
,
766 struct hfi1_devdata
*dd
= container_of(verbs_dev
,
769 mtu
= verbs_mtu_enum_to_int(qp
->ibqp
.device
, attr
->path_mtu
);
771 return -1; /* values less than 0 are error */
773 if (mtu
> dd
->pport
[pidx
].ibmtu
)
774 return mtu_to_enum(dd
->pport
[pidx
].ibmtu
, IB_MTU_2048
);
776 return attr
->path_mtu
;
779 void notify_error_qp(struct rvt_qp
*qp
)
781 struct hfi1_qp_priv
*priv
= qp
->priv
;
782 seqlock_t
*lock
= priv
->s_iowait
.lock
;
786 if (!list_empty(&priv
->s_iowait
.list
) &&
787 !(qp
->s_flags
& RVT_S_BUSY
)) {
788 qp
->s_flags
&= ~RVT_S_ANY_WAIT_IO
;
789 list_del_init(&priv
->s_iowait
.list
);
790 priv
->s_iowait
.lock
= NULL
;
793 write_sequnlock(lock
);
796 if (!(qp
->s_flags
& RVT_S_BUSY
)) {
798 rvt_put_mr(qp
->s_rdma_mr
);
799 qp
->s_rdma_mr
= NULL
;
806 * hfi1_qp_iter_cb - callback for iterator
808 * @v - the sl in low bits of v
810 * This is called from the iterator callback to work
811 * on an individual qp.
813 static void hfi1_qp_iter_cb(struct rvt_qp
*qp
, u64 v
)
817 struct hfi1_ibport
*ibp
=
818 to_iport(qp
->ibqp
.device
, qp
->port_num
);
819 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
822 if (qp
->port_num
!= ppd
->port
||
823 (qp
->ibqp
.qp_type
!= IB_QPT_UC
&&
824 qp
->ibqp
.qp_type
!= IB_QPT_RC
) ||
825 rdma_ah_get_sl(&qp
->remote_ah_attr
) != sl
||
826 !(ib_rvt_state_ops
[qp
->state
] & RVT_POST_SEND_OK
))
829 spin_lock_irq(&qp
->r_lock
);
830 spin_lock(&qp
->s_hlock
);
831 spin_lock(&qp
->s_lock
);
832 lastwqe
= rvt_error_qp(qp
, IB_WC_WR_FLUSH_ERR
);
833 spin_unlock(&qp
->s_lock
);
834 spin_unlock(&qp
->s_hlock
);
835 spin_unlock_irq(&qp
->r_lock
);
837 ev
.device
= qp
->ibqp
.device
;
838 ev
.element
.qp
= &qp
->ibqp
;
839 ev
.event
= IB_EVENT_QP_LAST_WQE_REACHED
;
840 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
845 * hfi1_error_port_qps - put a port's RC/UC qps into error state
847 * @sl: the service level.
849 * This function places all RC/UC qps with a given service level into error
850 * state. It is generally called to force upper lay apps to abandon stale qps
851 * after an sl->sc mapping change.
853 void hfi1_error_port_qps(struct hfi1_ibport
*ibp
, u8 sl
)
855 struct hfi1_pportdata
*ppd
= ppd_from_ibp(ibp
);
856 struct hfi1_ibdev
*dev
= &ppd
->dd
->verbs_dev
;
858 rvt_qp_iter(&dev
->rdi
, sl
, hfi1_qp_iter_cb
);