2 * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/spinlock.h>
35 #include <rdma/ib_smi.h>
41 * Switch to alternate path.
42 * The QP s_lock should be held and interrupts disabled.
44 void qib_migrate_qp(struct rvt_qp
*qp
)
48 qp
->s_mig_state
= IB_MIG_MIGRATED
;
49 qp
->remote_ah_attr
= qp
->alt_ah_attr
;
50 qp
->port_num
= rdma_ah_get_port_num(&qp
->alt_ah_attr
);
51 qp
->s_pkey_index
= qp
->s_alt_pkey_index
;
53 ev
.device
= qp
->ibqp
.device
;
54 ev
.element
.qp
= &qp
->ibqp
;
55 ev
.event
= IB_EVENT_PATH_MIG
;
56 qp
->ibqp
.event_handler(&ev
, qp
->ibqp
.qp_context
);
59 static __be64
get_sguid(struct qib_ibport
*ibp
, unsigned index
)
62 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
66 return ibp
->guids
[index
- 1];
69 static int gid_ok(union ib_gid
*gid
, __be64 gid_prefix
, __be64 id
)
71 return (gid
->global
.interface_id
== id
&&
72 (gid
->global
.subnet_prefix
== gid_prefix
||
73 gid
->global
.subnet_prefix
== IB_DEFAULT_GID_PREFIX
));
78 * This should be called with the QP r_lock held.
80 * The s_lock will be acquired around the qib_migrate_qp() call.
82 int qib_ruc_check_hdr(struct qib_ibport
*ibp
, struct ib_header
*hdr
,
83 int has_grh
, struct rvt_qp
*qp
, u32 bth0
)
88 if (qp
->s_mig_state
== IB_MIG_ARMED
&& (bth0
& IB_BTH_MIG_REQ
)) {
90 if (rdma_ah_get_ah_flags(&qp
->alt_ah_attr
) &
94 const struct ib_global_route
*grh
;
96 if (!(rdma_ah_get_ah_flags(&qp
->alt_ah_attr
) &
99 grh
= rdma_ah_read_grh(&qp
->alt_ah_attr
);
100 guid
= get_sguid(ibp
, grh
->sgid_index
);
101 if (!gid_ok(&hdr
->u
.l
.grh
.dgid
,
102 ibp
->rvp
.gid_prefix
, guid
))
104 if (!gid_ok(&hdr
->u
.l
.grh
.sgid
,
105 grh
->dgid
.global
.subnet_prefix
,
106 grh
->dgid
.global
.interface_id
))
109 if (!qib_pkey_ok((u16
)bth0
,
110 qib_get_pkey(ibp
, qp
->s_alt_pkey_index
))) {
113 (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF,
115 hdr
->lrh
[3], hdr
->lrh
[1]);
118 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
119 if ((be16_to_cpu(hdr
->lrh
[3]) !=
120 rdma_ah_get_dlid(&qp
->alt_ah_attr
)) ||
121 ppd_from_ibp(ibp
)->port
!=
122 rdma_ah_get_port_num(&qp
->alt_ah_attr
))
124 spin_lock_irqsave(&qp
->s_lock
, flags
);
126 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
129 if (rdma_ah_get_ah_flags(&qp
->remote_ah_attr
) &
133 const struct ib_global_route
*grh
;
135 if (!(rdma_ah_get_ah_flags(&qp
->remote_ah_attr
) &
138 grh
= rdma_ah_read_grh(&qp
->remote_ah_attr
);
139 guid
= get_sguid(ibp
, grh
->sgid_index
);
140 if (!gid_ok(&hdr
->u
.l
.grh
.dgid
,
141 ibp
->rvp
.gid_prefix
, guid
))
143 if (!gid_ok(&hdr
->u
.l
.grh
.sgid
,
144 grh
->dgid
.global
.subnet_prefix
,
145 grh
->dgid
.global
.interface_id
))
148 if (!qib_pkey_ok((u16
)bth0
,
149 qib_get_pkey(ibp
, qp
->s_pkey_index
))) {
152 (be16_to_cpu(hdr
->lrh
[0]) >> 4) & 0xF,
154 hdr
->lrh
[3], hdr
->lrh
[1]);
157 /* Validate the SLID. See Ch. 9.6.1.5 */
158 if (be16_to_cpu(hdr
->lrh
[3]) !=
159 rdma_ah_get_dlid(&qp
->remote_ah_attr
) ||
160 ppd_from_ibp(ibp
)->port
!= qp
->port_num
)
162 if (qp
->s_mig_state
== IB_MIG_REARM
&&
163 !(bth0
& IB_BTH_MIG_REQ
))
164 qp
->s_mig_state
= IB_MIG_ARMED
;
174 * qib_make_grh - construct a GRH header
175 * @ibp: a pointer to the IB port
176 * @hdr: a pointer to the GRH header being constructed
177 * @grh: the global route address to send to
178 * @hwords: the number of 32 bit words of header being sent
179 * @nwords: the number of 32 bit words of data being sent
181 * Return the size of the header in 32 bit words.
183 u32
qib_make_grh(struct qib_ibport
*ibp
, struct ib_grh
*hdr
,
184 const struct ib_global_route
*grh
, u32 hwords
, u32 nwords
)
186 hdr
->version_tclass_flow
=
187 cpu_to_be32((IB_GRH_VERSION
<< IB_GRH_VERSION_SHIFT
) |
188 (grh
->traffic_class
<< IB_GRH_TCLASS_SHIFT
) |
189 (grh
->flow_label
<< IB_GRH_FLOW_SHIFT
));
190 hdr
->paylen
= cpu_to_be16((hwords
- 2 + nwords
+ SIZE_OF_CRC
) << 2);
191 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
192 hdr
->next_hdr
= IB_GRH_NEXT_HDR
;
193 hdr
->hop_limit
= grh
->hop_limit
;
194 /* The SGID is 32-bit aligned. */
195 hdr
->sgid
.global
.subnet_prefix
= ibp
->rvp
.gid_prefix
;
196 if (!grh
->sgid_index
)
197 hdr
->sgid
.global
.interface_id
= ppd_from_ibp(ibp
)->guid
;
198 else if (grh
->sgid_index
< QIB_GUIDS_PER_PORT
)
199 hdr
->sgid
.global
.interface_id
= ibp
->guids
[grh
->sgid_index
- 1];
200 hdr
->dgid
= grh
->dgid
;
202 /* GRH header size in 32-bit words. */
203 return sizeof(struct ib_grh
) / sizeof(u32
);
206 void qib_make_ruc_header(struct rvt_qp
*qp
, struct ib_other_headers
*ohdr
,
209 struct qib_qp_priv
*priv
= qp
->priv
;
210 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
215 /* Construct the header. */
216 extra_bytes
= -qp
->s_cur_size
& 3;
217 nwords
= (qp
->s_cur_size
+ extra_bytes
) >> 2;
219 if (unlikely(rdma_ah_get_ah_flags(&qp
->remote_ah_attr
) & IB_AH_GRH
)) {
221 qib_make_grh(ibp
, &priv
->s_hdr
->u
.l
.grh
,
222 rdma_ah_read_grh(&qp
->remote_ah_attr
),
223 qp
->s_hdrwords
, nwords
);
226 lrh0
|= ibp
->sl_to_vl
[rdma_ah_get_sl(&qp
->remote_ah_attr
)] << 12 |
227 rdma_ah_get_sl(&qp
->remote_ah_attr
) << 4;
228 priv
->s_hdr
->lrh
[0] = cpu_to_be16(lrh0
);
229 priv
->s_hdr
->lrh
[1] =
230 cpu_to_be16(rdma_ah_get_dlid(&qp
->remote_ah_attr
));
231 priv
->s_hdr
->lrh
[2] =
232 cpu_to_be16(qp
->s_hdrwords
+ nwords
+ SIZE_OF_CRC
);
233 priv
->s_hdr
->lrh
[3] =
234 cpu_to_be16(ppd_from_ibp(ibp
)->lid
|
235 rdma_ah_get_path_bits(&qp
->remote_ah_attr
));
236 bth0
|= qib_get_pkey(ibp
, qp
->s_pkey_index
);
237 bth0
|= extra_bytes
<< 20;
238 if (qp
->s_mig_state
== IB_MIG_MIGRATED
)
239 bth0
|= IB_BTH_MIG_REQ
;
240 ohdr
->bth
[0] = cpu_to_be32(bth0
);
241 ohdr
->bth
[1] = cpu_to_be32(qp
->remote_qpn
);
242 ohdr
->bth
[2] = cpu_to_be32(bth2
);
243 this_cpu_inc(ibp
->pmastats
->n_unicast_xmit
);
246 void _qib_do_send(struct work_struct
*work
)
248 struct qib_qp_priv
*priv
= container_of(work
, struct qib_qp_priv
,
250 struct rvt_qp
*qp
= priv
->owner
;
256 * qib_do_send - perform a send on a QP
257 * @qp: pointer to the QP
259 * Process entries in the send work queue until credit or queue is
260 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
261 * Otherwise, two threads could send packets out of order.
263 void qib_do_send(struct rvt_qp
*qp
)
265 struct qib_qp_priv
*priv
= qp
->priv
;
266 struct qib_ibport
*ibp
= to_iport(qp
->ibqp
.device
, qp
->port_num
);
267 struct qib_pportdata
*ppd
= ppd_from_ibp(ibp
);
268 int (*make_req
)(struct rvt_qp
*qp
, unsigned long *flags
);
271 if ((qp
->ibqp
.qp_type
== IB_QPT_RC
||
272 qp
->ibqp
.qp_type
== IB_QPT_UC
) &&
273 (rdma_ah_get_dlid(&qp
->remote_ah_attr
) &
274 ~((1 << ppd
->lmc
) - 1)) == ppd
->lid
) {
275 rvt_ruc_loopback(qp
);
279 if (qp
->ibqp
.qp_type
== IB_QPT_RC
)
280 make_req
= qib_make_rc_req
;
281 else if (qp
->ibqp
.qp_type
== IB_QPT_UC
)
282 make_req
= qib_make_uc_req
;
284 make_req
= qib_make_ud_req
;
286 spin_lock_irqsave(&qp
->s_lock
, flags
);
288 /* Return if we are already busy processing a work request. */
289 if (!qib_send_ok(qp
)) {
290 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
294 qp
->s_flags
|= RVT_S_BUSY
;
297 /* Check for a constructed packet to be sent. */
298 if (qp
->s_hdrwords
!= 0) {
299 spin_unlock_irqrestore(&qp
->s_lock
, flags
);
301 * If the packet cannot be sent now, return and
302 * the send tasklet will be woken up later.
304 if (qib_verbs_send(qp
, priv
->s_hdr
, qp
->s_hdrwords
,
305 qp
->s_cur_sge
, qp
->s_cur_size
))
307 /* Record that s_hdr is empty. */
309 spin_lock_irqsave(&qp
->s_lock
, flags
);
311 } while (make_req(qp
, &flags
));
313 spin_unlock_irqrestore(&qp
->s_lock
, flags
);