2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/slab.h>
38 int ipoib_mcast_attach(struct net_device
*dev
, struct ib_device
*hca
,
39 union ib_gid
*mgid
, u16 mlid
, int set_qkey
, u32 qkey
)
41 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
42 struct ib_qp_attr
*qp_attr
= NULL
;
46 if (ib_find_pkey(priv
->ca
, priv
->port
, priv
->pkey
, &pkey_index
)) {
47 clear_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
);
51 set_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
);
55 qp_attr
= kmalloc(sizeof(*qp_attr
), GFP_KERNEL
);
59 /* set correct QKey for QP */
61 ret
= ib_modify_qp(priv
->qp
, qp_attr
, IB_QP_QKEY
);
63 ipoib_warn(priv
, "failed to modify QP, ret = %d\n", ret
);
68 /* attach QP to multicast group */
69 ret
= ib_attach_mcast(priv
->qp
, mgid
, mlid
);
71 ipoib_warn(priv
, "failed to attach to multicast group, ret = %d\n", ret
);
78 int ipoib_mcast_detach(struct net_device
*dev
, struct ib_device
*hca
,
79 union ib_gid
*mgid
, u16 mlid
)
81 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
84 ret
= ib_detach_mcast(priv
->qp
, mgid
, mlid
);
89 int ipoib_init_qp(struct net_device
*dev
)
91 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
93 struct ib_qp_attr qp_attr
;
96 if (!test_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
))
99 qp_attr
.qp_state
= IB_QPS_INIT
;
101 qp_attr
.port_num
= priv
->port
;
102 qp_attr
.pkey_index
= priv
->pkey_index
;
108 ret
= ib_modify_qp(priv
->qp
, &qp_attr
, attr_mask
);
110 ipoib_warn(priv
, "failed to modify QP to init, ret = %d\n", ret
);
114 qp_attr
.qp_state
= IB_QPS_RTR
;
115 /* Can't set this in a INIT->RTR transition */
116 attr_mask
&= ~IB_QP_PORT
;
117 ret
= ib_modify_qp(priv
->qp
, &qp_attr
, attr_mask
);
119 ipoib_warn(priv
, "failed to modify QP to RTR, ret = %d\n", ret
);
123 qp_attr
.qp_state
= IB_QPS_RTS
;
125 attr_mask
|= IB_QP_SQ_PSN
;
126 attr_mask
&= ~IB_QP_PKEY_INDEX
;
127 ret
= ib_modify_qp(priv
->qp
, &qp_attr
, attr_mask
);
129 ipoib_warn(priv
, "failed to modify QP to RTS, ret = %d\n", ret
);
136 qp_attr
.qp_state
= IB_QPS_RESET
;
137 if (ib_modify_qp(priv
->qp
, &qp_attr
, IB_QP_STATE
))
138 ipoib_warn(priv
, "Failed to modify QP to RESET state\n");
143 int ipoib_transport_dev_init(struct net_device
*dev
, struct ib_device
*ca
)
145 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
146 struct ib_qp_init_attr init_attr
= {
148 .max_send_wr
= ipoib_sendq_size
,
149 .max_recv_wr
= ipoib_recvq_size
,
150 .max_send_sge
= min_t(u32
, priv
->ca
->attrs
.max_send_sge
,
152 .max_recv_sge
= IPOIB_UD_RX_SG
154 .sq_sig_type
= IB_SIGNAL_ALL_WR
,
157 struct ib_cq_init_attr cq_attr
= {};
159 int ret
, size
, req_vec
;
162 size
= ipoib_recvq_size
+ 1;
163 ret
= ipoib_cm_dev_init(dev
);
165 size
+= ipoib_sendq_size
;
166 if (ipoib_cm_has_srq(dev
))
167 size
+= ipoib_recvq_size
+ 1; /* 1 extra for rx_drain_qp */
169 size
+= ipoib_recvq_size
* ipoib_max_conn_qp
;
171 if (ret
!= -EOPNOTSUPP
)
174 req_vec
= (priv
->port
- 1) * 2;
177 cq_attr
.comp_vector
= req_vec
% priv
->ca
->num_comp_vectors
;
178 priv
->recv_cq
= ib_create_cq(priv
->ca
, ipoib_ib_rx_completion
, NULL
,
180 if (IS_ERR(priv
->recv_cq
)) {
181 pr_warn("%s: failed to create receive CQ\n", ca
->name
);
182 goto out_cm_dev_cleanup
;
185 cq_attr
.cqe
= ipoib_sendq_size
;
186 cq_attr
.comp_vector
= (req_vec
+ 1) % priv
->ca
->num_comp_vectors
;
187 priv
->send_cq
= ib_create_cq(priv
->ca
, ipoib_ib_tx_completion
, NULL
,
189 if (IS_ERR(priv
->send_cq
)) {
190 pr_warn("%s: failed to create send CQ\n", ca
->name
);
191 goto out_free_recv_cq
;
194 if (ib_req_notify_cq(priv
->recv_cq
, IB_CQ_NEXT_COMP
))
195 goto out_free_send_cq
;
197 init_attr
.send_cq
= priv
->send_cq
;
198 init_attr
.recv_cq
= priv
->recv_cq
;
200 if (priv
->hca_caps
& IB_DEVICE_UD_TSO
)
201 init_attr
.create_flags
|= IB_QP_CREATE_IPOIB_UD_LSO
;
203 if (priv
->hca_caps
& IB_DEVICE_BLOCK_MULTICAST_LOOPBACK
)
204 init_attr
.create_flags
|= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK
;
206 if (priv
->hca_caps
& IB_DEVICE_MANAGED_FLOW_STEERING
)
207 init_attr
.create_flags
|= IB_QP_CREATE_NETIF_QP
;
209 priv
->qp
= ib_create_qp(priv
->pd
, &init_attr
);
210 if (IS_ERR(priv
->qp
)) {
211 pr_warn("%s: failed to create QP\n", ca
->name
);
212 goto out_free_send_cq
;
215 if (ib_req_notify_cq(priv
->send_cq
, IB_CQ_NEXT_COMP
))
216 goto out_free_send_cq
;
218 for (i
= 0; i
< MAX_SKB_FRAGS
+ 1; ++i
)
219 priv
->tx_sge
[i
].lkey
= priv
->pd
->local_dma_lkey
;
221 priv
->tx_wr
.wr
.opcode
= IB_WR_SEND
;
222 priv
->tx_wr
.wr
.sg_list
= priv
->tx_sge
;
223 priv
->tx_wr
.wr
.send_flags
= IB_SEND_SIGNALED
;
225 priv
->rx_sge
[0].lkey
= priv
->pd
->local_dma_lkey
;
227 priv
->rx_sge
[0].length
= IPOIB_UD_BUF_SIZE(priv
->max_ib_mtu
);
228 priv
->rx_wr
.num_sge
= 1;
230 priv
->rx_wr
.next
= NULL
;
231 priv
->rx_wr
.sg_list
= priv
->rx_sge
;
233 if (init_attr
.cap
.max_send_sge
> 1)
234 dev
->features
|= NETIF_F_SG
;
236 priv
->max_send_sge
= init_attr
.cap
.max_send_sge
;
241 ib_destroy_cq(priv
->send_cq
);
244 ib_destroy_cq(priv
->recv_cq
);
247 ipoib_cm_dev_cleanup(dev
);
252 void ipoib_transport_dev_cleanup(struct net_device
*dev
)
254 struct ipoib_dev_priv
*priv
= ipoib_priv(dev
);
257 if (ib_destroy_qp(priv
->qp
))
258 ipoib_warn(priv
, "ib_qp_destroy failed\n");
263 ib_destroy_cq(priv
->send_cq
);
264 ib_destroy_cq(priv
->recv_cq
);
267 void ipoib_event(struct ib_event_handler
*handler
,
268 struct ib_event
*record
)
270 struct ipoib_dev_priv
*priv
=
271 container_of(handler
, struct ipoib_dev_priv
, event_handler
);
273 if (record
->element
.port_num
!= priv
->port
)
276 ipoib_dbg(priv
, "Event %d on device %s port %d\n", record
->event
,
277 dev_name(&record
->device
->dev
), record
->element
.port_num
);
279 if (record
->event
== IB_EVENT_CLIENT_REREGISTER
) {
280 queue_work(ipoib_workqueue
, &priv
->flush_light
);
281 } else if (record
->event
== IB_EVENT_PORT_ERR
||
282 record
->event
== IB_EVENT_PORT_ACTIVE
||
283 record
->event
== IB_EVENT_LID_CHANGE
) {
284 queue_work(ipoib_workqueue
, &priv
->flush_normal
);
285 } else if (record
->event
== IB_EVENT_PKEY_CHANGE
) {
286 queue_work(ipoib_workqueue
, &priv
->flush_heavy
);
287 } else if (record
->event
== IB_EVENT_GID_CHANGE
&&
288 !test_bit(IPOIB_FLAG_DEV_ADDR_SET
, &priv
->flags
)) {
289 queue_work(ipoib_workqueue
, &priv
->flush_light
);