2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/delay.h>
37 #include <linux/moduleparam.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
42 #include <linux/tcp.h>
46 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
47 static int data_debug_level
;
49 module_param(data_debug_level
, int, 0644);
50 MODULE_PARM_DESC(data_debug_level
,
51 "Enable data path debug tracing if > 0");
54 struct ipoib_ah
*ipoib_create_ah(struct net_device
*dev
,
55 struct ib_pd
*pd
, struct ib_ah_attr
*attr
)
60 ah
= kmalloc(sizeof *ah
, GFP_KERNEL
);
62 return ERR_PTR(-ENOMEM
);
68 vah
= ib_create_ah(pd
, attr
);
71 ah
= (struct ipoib_ah
*)vah
;
74 ipoib_dbg(netdev_priv(dev
), "Created ah %p\n", ah
->ah
);
80 void ipoib_free_ah(struct kref
*kref
)
82 struct ipoib_ah
*ah
= container_of(kref
, struct ipoib_ah
, ref
);
83 struct ipoib_dev_priv
*priv
= netdev_priv(ah
->dev
);
87 spin_lock_irqsave(&priv
->lock
, flags
);
88 list_add_tail(&ah
->list
, &priv
->dead_ahs
);
89 spin_unlock_irqrestore(&priv
->lock
, flags
);
92 static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv
*priv
,
93 u64 mapping
[IPOIB_UD_RX_SG
])
95 ib_dma_unmap_single(priv
->ca
, mapping
[0],
96 IPOIB_UD_BUF_SIZE(priv
->max_ib_mtu
),
100 static int ipoib_ib_post_receive(struct net_device
*dev
, int id
)
102 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
103 struct ib_recv_wr
*bad_wr
;
106 priv
->rx_wr
.wr_id
= id
| IPOIB_OP_RECV
;
107 priv
->rx_sge
[0].addr
= priv
->rx_ring
[id
].mapping
[0];
108 priv
->rx_sge
[1].addr
= priv
->rx_ring
[id
].mapping
[1];
111 ret
= ib_post_recv(priv
->qp
, &priv
->rx_wr
, &bad_wr
);
113 ipoib_warn(priv
, "receive failed for buf %d (%d)\n", id
, ret
);
114 ipoib_ud_dma_unmap_rx(priv
, priv
->rx_ring
[id
].mapping
);
115 dev_kfree_skb_any(priv
->rx_ring
[id
].skb
);
116 priv
->rx_ring
[id
].skb
= NULL
;
122 static struct sk_buff
*ipoib_alloc_rx_skb(struct net_device
*dev
, int id
)
124 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
129 buf_size
= IPOIB_UD_BUF_SIZE(priv
->max_ib_mtu
);
131 skb
= dev_alloc_skb(buf_size
+ IPOIB_HARD_LEN
);
136 * the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
139 skb_reserve(skb
, sizeof(struct ipoib_pseudo_header
));
141 mapping
= priv
->rx_ring
[id
].mapping
;
142 mapping
[0] = ib_dma_map_single(priv
->ca
, skb
->data
, buf_size
,
144 if (unlikely(ib_dma_mapping_error(priv
->ca
, mapping
[0])))
147 priv
->rx_ring
[id
].skb
= skb
;
150 dev_kfree_skb_any(skb
);
154 static int ipoib_ib_post_receives(struct net_device
*dev
)
156 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
159 for (i
= 0; i
< ipoib_recvq_size
; ++i
) {
160 if (!ipoib_alloc_rx_skb(dev
, i
)) {
161 ipoib_warn(priv
, "failed to allocate receive buffer %d\n", i
);
164 if (ipoib_ib_post_receive(dev
, i
)) {
165 ipoib_warn(priv
, "ipoib_ib_post_receive failed for buf %d\n", i
);
173 static void ipoib_ib_handle_rx_wc(struct net_device
*dev
, struct ib_wc
*wc
)
175 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
176 unsigned int wr_id
= wc
->wr_id
& ~IPOIB_OP_RECV
;
178 u64 mapping
[IPOIB_UD_RX_SG
];
182 ipoib_dbg_data(priv
, "recv completion: id %d, status: %d\n",
185 if (unlikely(wr_id
>= ipoib_recvq_size
)) {
186 ipoib_warn(priv
, "recv completion event with wrid %d (> %d)\n",
187 wr_id
, ipoib_recvq_size
);
191 skb
= priv
->rx_ring
[wr_id
].skb
;
193 if (unlikely(wc
->status
!= IB_WC_SUCCESS
)) {
194 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
195 ipoib_warn(priv
, "failed recv event "
196 "(status=%d, wrid=%d vend_err %x)\n",
197 wc
->status
, wr_id
, wc
->vendor_err
);
198 ipoib_ud_dma_unmap_rx(priv
, priv
->rx_ring
[wr_id
].mapping
);
199 dev_kfree_skb_any(skb
);
200 priv
->rx_ring
[wr_id
].skb
= NULL
;
204 memcpy(mapping
, priv
->rx_ring
[wr_id
].mapping
,
205 IPOIB_UD_RX_SG
* sizeof *mapping
);
208 * If we can't allocate a new RX buffer, dump
209 * this packet and reuse the old buffer.
211 if (unlikely(!ipoib_alloc_rx_skb(dev
, wr_id
))) {
212 ++dev
->stats
.rx_dropped
;
216 ipoib_dbg_data(priv
, "received %d bytes, SLID 0x%04x\n",
217 wc
->byte_len
, wc
->slid
);
219 ipoib_ud_dma_unmap_rx(priv
, mapping
);
221 skb_put(skb
, wc
->byte_len
);
223 /* First byte of dgid signals multicast when 0xff */
224 dgid
= &((struct ib_grh
*)skb
->data
)->dgid
;
226 if (!(wc
->wc_flags
& IB_WC_GRH
) || dgid
->raw
[0] != 0xff)
227 skb
->pkt_type
= PACKET_HOST
;
228 else if (memcmp(dgid
, dev
->broadcast
+ 4, sizeof(union ib_gid
)) == 0)
229 skb
->pkt_type
= PACKET_BROADCAST
;
231 skb
->pkt_type
= PACKET_MULTICAST
;
233 sgid
= &((struct ib_grh
*)skb
->data
)->sgid
;
236 * Drop packets that this interface sent, ie multicast packets
237 * that the HCA has replicated.
239 if (wc
->slid
== priv
->local_lid
&& wc
->src_qp
== priv
->qp
->qp_num
) {
242 if ((wc
->wc_flags
& IB_WC_GRH
) &&
243 sgid
->global
.interface_id
!= priv
->local_gid
.global
.interface_id
)
247 dev_kfree_skb_any(skb
);
252 skb_pull(skb
, IB_GRH_BYTES
);
254 skb
->protocol
= ((struct ipoib_header
*) skb
->data
)->proto
;
255 skb_add_pseudo_hdr(skb
);
257 ++dev
->stats
.rx_packets
;
258 dev
->stats
.rx_bytes
+= skb
->len
;
261 if ((dev
->features
& NETIF_F_RXCSUM
) &&
262 likely(wc
->wc_flags
& IB_WC_IP_CSUM_OK
))
263 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
265 napi_gro_receive(&priv
->napi
, skb
);
268 if (unlikely(ipoib_ib_post_receive(dev
, wr_id
)))
269 ipoib_warn(priv
, "ipoib_ib_post_receive failed "
270 "for buf %d\n", wr_id
);
273 int ipoib_dma_map_tx(struct ib_device
*ca
, struct ipoib_tx_buf
*tx_req
)
275 struct sk_buff
*skb
= tx_req
->skb
;
276 u64
*mapping
= tx_req
->mapping
;
280 if (skb_headlen(skb
)) {
281 mapping
[0] = ib_dma_map_single(ca
, skb
->data
, skb_headlen(skb
),
283 if (unlikely(ib_dma_mapping_error(ca
, mapping
[0])))
290 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; ++i
) {
291 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
292 mapping
[i
+ off
] = ib_dma_map_page(ca
,
294 frag
->page_offset
, skb_frag_size(frag
),
296 if (unlikely(ib_dma_mapping_error(ca
, mapping
[i
+ off
])))
303 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
305 ib_dma_unmap_page(ca
, mapping
[i
- !off
], skb_frag_size(frag
), DMA_TO_DEVICE
);
309 ib_dma_unmap_single(ca
, mapping
[0], skb_headlen(skb
), DMA_TO_DEVICE
);
314 void ipoib_dma_unmap_tx(struct ipoib_dev_priv
*priv
,
315 struct ipoib_tx_buf
*tx_req
)
317 struct sk_buff
*skb
= tx_req
->skb
;
318 u64
*mapping
= tx_req
->mapping
;
322 if (skb_headlen(skb
)) {
323 ib_dma_unmap_single(priv
->ca
, mapping
[0], skb_headlen(skb
),
329 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; ++i
) {
330 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
332 ib_dma_unmap_page(priv
->ca
, mapping
[i
+ off
],
333 skb_frag_size(frag
), DMA_TO_DEVICE
);
338 * As the result of a completion error the QP Can be transferred to SQE states.
339 * The function checks if the (send)QP is in SQE state and
340 * moves it back to RTS state, that in order to have it functional again.
342 static void ipoib_qp_state_validate_work(struct work_struct
*work
)
344 struct ipoib_qp_state_validate
*qp_work
=
345 container_of(work
, struct ipoib_qp_state_validate
, work
);
347 struct ipoib_dev_priv
*priv
= qp_work
->priv
;
348 struct ib_qp_attr qp_attr
;
349 struct ib_qp_init_attr query_init_attr
;
352 ret
= ib_query_qp(priv
->qp
, &qp_attr
, IB_QP_STATE
, &query_init_attr
);
354 ipoib_warn(priv
, "%s: Failed to query QP ret: %d\n",
358 pr_info("%s: QP: 0x%x is in state: %d\n",
359 __func__
, priv
->qp
->qp_num
, qp_attr
.qp_state
);
361 /* currently support only in SQE->RTS transition*/
362 if (qp_attr
.qp_state
== IB_QPS_SQE
) {
363 qp_attr
.qp_state
= IB_QPS_RTS
;
365 ret
= ib_modify_qp(priv
->qp
, &qp_attr
, IB_QP_STATE
);
367 pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
368 ret
, priv
->qp
->qp_num
);
371 pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
372 __func__
, priv
->qp
->qp_num
);
374 pr_warn("QP (%d) will stay in state: %d\n",
375 priv
->qp
->qp_num
, qp_attr
.qp_state
);
382 static void ipoib_ib_handle_tx_wc(struct net_device
*dev
, struct ib_wc
*wc
)
384 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
385 unsigned int wr_id
= wc
->wr_id
;
386 struct ipoib_tx_buf
*tx_req
;
388 ipoib_dbg_data(priv
, "send completion: id %d, status: %d\n",
391 if (unlikely(wr_id
>= ipoib_sendq_size
)) {
392 ipoib_warn(priv
, "send completion event with wrid %d (> %d)\n",
393 wr_id
, ipoib_sendq_size
);
397 tx_req
= &priv
->tx_ring
[wr_id
];
399 ipoib_dma_unmap_tx(priv
, tx_req
);
401 ++dev
->stats
.tx_packets
;
402 dev
->stats
.tx_bytes
+= tx_req
->skb
->len
;
404 dev_kfree_skb_any(tx_req
->skb
);
407 if (unlikely(--priv
->tx_outstanding
== ipoib_sendq_size
>> 1) &&
408 netif_queue_stopped(dev
) &&
409 test_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
))
410 netif_wake_queue(dev
);
412 if (wc
->status
!= IB_WC_SUCCESS
&&
413 wc
->status
!= IB_WC_WR_FLUSH_ERR
) {
414 struct ipoib_qp_state_validate
*qp_work
;
415 ipoib_warn(priv
, "failed send event "
416 "(status=%d, wrid=%d vend_err %x)\n",
417 wc
->status
, wr_id
, wc
->vendor_err
);
418 qp_work
= kzalloc(sizeof(*qp_work
), GFP_ATOMIC
);
422 INIT_WORK(&qp_work
->work
, ipoib_qp_state_validate_work
);
423 qp_work
->priv
= priv
;
424 queue_work(priv
->wq
, &qp_work
->work
);
428 static int poll_tx(struct ipoib_dev_priv
*priv
)
432 n
= ib_poll_cq(priv
->send_cq
, MAX_SEND_CQE
, priv
->send_wc
);
433 for (i
= 0; i
< n
; ++i
)
434 ipoib_ib_handle_tx_wc(priv
->dev
, priv
->send_wc
+ i
);
436 return n
== MAX_SEND_CQE
;
439 int ipoib_poll(struct napi_struct
*napi
, int budget
)
441 struct ipoib_dev_priv
*priv
= container_of(napi
, struct ipoib_dev_priv
, napi
);
442 struct net_device
*dev
= priv
->dev
;
450 while (done
< budget
) {
451 int max
= (budget
- done
);
453 t
= min(IPOIB_NUM_WC
, max
);
454 n
= ib_poll_cq(priv
->recv_cq
, t
, priv
->ibwc
);
456 for (i
= 0; i
< n
; i
++) {
457 struct ib_wc
*wc
= priv
->ibwc
+ i
;
459 if (wc
->wr_id
& IPOIB_OP_RECV
) {
461 if (wc
->wr_id
& IPOIB_OP_CM
)
462 ipoib_cm_handle_rx_wc(dev
, wc
);
464 ipoib_ib_handle_rx_wc(dev
, wc
);
466 ipoib_cm_handle_tx_wc(priv
->dev
, wc
);
475 if (unlikely(ib_req_notify_cq(priv
->recv_cq
,
477 IB_CQ_REPORT_MISSED_EVENTS
)) &&
478 napi_reschedule(napi
))
485 void ipoib_ib_completion(struct ib_cq
*cq
, void *dev_ptr
)
487 struct net_device
*dev
= dev_ptr
;
488 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
490 napi_schedule(&priv
->napi
);
493 static void drain_tx_cq(struct net_device
*dev
)
495 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
498 while (poll_tx(priv
))
501 if (netif_queue_stopped(dev
))
502 mod_timer(&priv
->poll_timer
, jiffies
+ 1);
504 netif_tx_unlock(dev
);
507 void ipoib_send_comp_handler(struct ib_cq
*cq
, void *dev_ptr
)
509 struct ipoib_dev_priv
*priv
= netdev_priv(dev_ptr
);
511 mod_timer(&priv
->poll_timer
, jiffies
);
514 static inline int post_send(struct ipoib_dev_priv
*priv
,
516 struct ib_ah
*address
, u32 qpn
,
517 struct ipoib_tx_buf
*tx_req
,
518 void *head
, int hlen
)
520 struct ib_send_wr
*bad_wr
;
521 struct sk_buff
*skb
= tx_req
->skb
;
523 ipoib_build_sge(priv
, tx_req
);
525 priv
->tx_wr
.wr
.wr_id
= wr_id
;
526 priv
->tx_wr
.remote_qpn
= qpn
;
527 priv
->tx_wr
.ah
= address
;
530 priv
->tx_wr
.mss
= skb_shinfo(skb
)->gso_size
;
531 priv
->tx_wr
.header
= head
;
532 priv
->tx_wr
.hlen
= hlen
;
533 priv
->tx_wr
.wr
.opcode
= IB_WR_LSO
;
535 priv
->tx_wr
.wr
.opcode
= IB_WR_SEND
;
537 return ib_post_send(priv
->qp
, &priv
->tx_wr
.wr
, &bad_wr
);
540 void ipoib_send(struct net_device
*dev
, struct sk_buff
*skb
,
541 struct ipoib_ah
*address
, u32 qpn
)
543 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
544 struct ipoib_tx_buf
*tx_req
;
547 unsigned usable_sge
= priv
->max_send_sge
- !!skb_headlen(skb
);
549 if (skb_is_gso(skb
)) {
550 hlen
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
552 if (unlikely(!skb_pull(skb
, hlen
))) {
553 ipoib_warn(priv
, "linear data too small\n");
554 ++dev
->stats
.tx_dropped
;
555 ++dev
->stats
.tx_errors
;
556 dev_kfree_skb_any(skb
);
560 if (unlikely(skb
->len
> priv
->mcast_mtu
+ IPOIB_ENCAP_LEN
)) {
561 ipoib_warn(priv
, "packet len %d (> %d) too long to send, dropping\n",
562 skb
->len
, priv
->mcast_mtu
+ IPOIB_ENCAP_LEN
);
563 ++dev
->stats
.tx_dropped
;
564 ++dev
->stats
.tx_errors
;
565 ipoib_cm_skb_too_long(dev
, skb
, priv
->mcast_mtu
);
571 if (skb_shinfo(skb
)->nr_frags
> usable_sge
) {
572 if (skb_linearize(skb
) < 0) {
573 ipoib_warn(priv
, "skb could not be linearized\n");
574 ++dev
->stats
.tx_dropped
;
575 ++dev
->stats
.tx_errors
;
576 dev_kfree_skb_any(skb
);
579 /* Does skb_linearize return ok without reducing nr_frags? */
580 if (skb_shinfo(skb
)->nr_frags
> usable_sge
) {
581 ipoib_warn(priv
, "too many frags after skb linearize\n");
582 ++dev
->stats
.tx_dropped
;
583 ++dev
->stats
.tx_errors
;
584 dev_kfree_skb_any(skb
);
589 ipoib_dbg_data(priv
, "sending packet, length=%d address=%p qpn=0x%06x\n",
590 skb
->len
, address
, qpn
);
593 * We put the skb into the tx_ring _before_ we call post_send()
594 * because it's entirely possible that the completion handler will
595 * run before we execute anything after the post_send(). That
596 * means we have to make sure everything is properly recorded and
597 * our state is consistent before we call post_send().
599 tx_req
= &priv
->tx_ring
[priv
->tx_head
& (ipoib_sendq_size
- 1)];
601 if (unlikely(ipoib_dma_map_tx(priv
->ca
, tx_req
))) {
602 ++dev
->stats
.tx_errors
;
603 dev_kfree_skb_any(skb
);
607 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
608 priv
->tx_wr
.wr
.send_flags
|= IB_SEND_IP_CSUM
;
610 priv
->tx_wr
.wr
.send_flags
&= ~IB_SEND_IP_CSUM
;
612 if (++priv
->tx_outstanding
== ipoib_sendq_size
) {
613 ipoib_dbg(priv
, "TX ring full, stopping kernel net queue\n");
614 if (ib_req_notify_cq(priv
->send_cq
, IB_CQ_NEXT_COMP
))
615 ipoib_warn(priv
, "request notify on send CQ failed\n");
616 netif_stop_queue(dev
);
622 rc
= post_send(priv
, priv
->tx_head
& (ipoib_sendq_size
- 1),
623 address
->ah
, qpn
, tx_req
, phead
, hlen
);
625 ipoib_warn(priv
, "post_send failed, error %d\n", rc
);
626 ++dev
->stats
.tx_errors
;
627 --priv
->tx_outstanding
;
628 ipoib_dma_unmap_tx(priv
, tx_req
);
629 dev_kfree_skb_any(skb
);
630 if (netif_queue_stopped(dev
))
631 netif_wake_queue(dev
);
633 netif_trans_update(dev
);
635 address
->last_send
= priv
->tx_head
;
639 if (unlikely(priv
->tx_outstanding
> MAX_SEND_CQE
))
640 while (poll_tx(priv
))
644 static void __ipoib_reap_ah(struct net_device
*dev
)
646 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
647 struct ipoib_ah
*ah
, *tah
;
648 LIST_HEAD(remove_list
);
651 netif_tx_lock_bh(dev
);
652 spin_lock_irqsave(&priv
->lock
, flags
);
654 list_for_each_entry_safe(ah
, tah
, &priv
->dead_ahs
, list
)
655 if ((int) priv
->tx_tail
- (int) ah
->last_send
>= 0) {
657 ib_destroy_ah(ah
->ah
);
661 spin_unlock_irqrestore(&priv
->lock
, flags
);
662 netif_tx_unlock_bh(dev
);
665 void ipoib_reap_ah(struct work_struct
*work
)
667 struct ipoib_dev_priv
*priv
=
668 container_of(work
, struct ipoib_dev_priv
, ah_reap_task
.work
);
669 struct net_device
*dev
= priv
->dev
;
671 __ipoib_reap_ah(dev
);
673 if (!test_bit(IPOIB_STOP_REAPER
, &priv
->flags
))
674 queue_delayed_work(priv
->wq
, &priv
->ah_reap_task
,
675 round_jiffies_relative(HZ
));
678 static void ipoib_flush_ah(struct net_device
*dev
)
680 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
682 cancel_delayed_work(&priv
->ah_reap_task
);
683 flush_workqueue(priv
->wq
);
684 ipoib_reap_ah(&priv
->ah_reap_task
.work
);
687 static void ipoib_stop_ah(struct net_device
*dev
)
689 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
691 set_bit(IPOIB_STOP_REAPER
, &priv
->flags
);
695 static void ipoib_ib_tx_timer_func(unsigned long ctx
)
697 drain_tx_cq((struct net_device
*)ctx
);
700 int ipoib_ib_dev_open(struct net_device
*dev
)
702 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
705 ipoib_pkey_dev_check_presence(dev
);
707 if (!test_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
)) {
708 ipoib_warn(priv
, "P_Key 0x%04x is %s\n", priv
->pkey
,
709 (!(priv
->pkey
& 0x7fff) ? "Invalid" : "not found"));
713 ret
= ipoib_init_qp(dev
);
715 ipoib_warn(priv
, "ipoib_init_qp returned %d\n", ret
);
719 ret
= ipoib_ib_post_receives(dev
);
721 ipoib_warn(priv
, "ipoib_ib_post_receives returned %d\n", ret
);
725 ret
= ipoib_cm_dev_open(dev
);
727 ipoib_warn(priv
, "ipoib_cm_dev_open returned %d\n", ret
);
731 clear_bit(IPOIB_STOP_REAPER
, &priv
->flags
);
732 queue_delayed_work(priv
->wq
, &priv
->ah_reap_task
,
733 round_jiffies_relative(HZ
));
735 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED
, &priv
->flags
))
736 napi_enable(&priv
->napi
);
740 if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED
, &priv
->flags
))
741 napi_enable(&priv
->napi
);
742 ipoib_ib_dev_stop(dev
);
746 void ipoib_pkey_dev_check_presence(struct net_device
*dev
)
748 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
750 if (!(priv
->pkey
& 0x7fff) ||
751 ib_find_pkey(priv
->ca
, priv
->port
, priv
->pkey
,
753 clear_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
);
755 set_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
);
758 int ipoib_ib_dev_up(struct net_device
*dev
)
760 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
762 ipoib_pkey_dev_check_presence(dev
);
764 if (!test_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
)) {
765 ipoib_dbg(priv
, "PKEY is not assigned.\n");
769 set_bit(IPOIB_FLAG_OPER_UP
, &priv
->flags
);
771 return ipoib_mcast_start_thread(dev
);
774 int ipoib_ib_dev_down(struct net_device
*dev
)
776 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
778 ipoib_dbg(priv
, "downing ib_dev\n");
780 clear_bit(IPOIB_FLAG_OPER_UP
, &priv
->flags
);
781 netif_carrier_off(dev
);
783 ipoib_mcast_stop_thread(dev
);
784 ipoib_mcast_dev_flush(dev
);
786 ipoib_flush_paths(dev
);
791 static int recvs_pending(struct net_device
*dev
)
793 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
797 for (i
= 0; i
< ipoib_recvq_size
; ++i
)
798 if (priv
->rx_ring
[i
].skb
)
804 void ipoib_drain_cq(struct net_device
*dev
)
806 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
810 * We call completion handling routines that expect to be
811 * called from the BH-disabled NAPI poll context, so disable
817 n
= ib_poll_cq(priv
->recv_cq
, IPOIB_NUM_WC
, priv
->ibwc
);
818 for (i
= 0; i
< n
; ++i
) {
820 * Convert any successful completions to flush
821 * errors to avoid passing packets up the
822 * stack after bringing the device down.
824 if (priv
->ibwc
[i
].status
== IB_WC_SUCCESS
)
825 priv
->ibwc
[i
].status
= IB_WC_WR_FLUSH_ERR
;
827 if (priv
->ibwc
[i
].wr_id
& IPOIB_OP_RECV
) {
828 if (priv
->ibwc
[i
].wr_id
& IPOIB_OP_CM
)
829 ipoib_cm_handle_rx_wc(dev
, priv
->ibwc
+ i
);
831 ipoib_ib_handle_rx_wc(dev
, priv
->ibwc
+ i
);
833 ipoib_cm_handle_tx_wc(dev
, priv
->ibwc
+ i
);
835 } while (n
== IPOIB_NUM_WC
);
837 while (poll_tx(priv
))
843 int ipoib_ib_dev_stop(struct net_device
*dev
)
845 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
846 struct ib_qp_attr qp_attr
;
848 struct ipoib_tx_buf
*tx_req
;
851 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED
, &priv
->flags
))
852 napi_disable(&priv
->napi
);
854 ipoib_cm_dev_stop(dev
);
857 * Move our QP to the error state and then reinitialize in
858 * when all work requests have completed or have been flushed.
860 qp_attr
.qp_state
= IB_QPS_ERR
;
861 if (ib_modify_qp(priv
->qp
, &qp_attr
, IB_QP_STATE
))
862 ipoib_warn(priv
, "Failed to modify QP to ERROR state\n");
864 /* Wait for all sends and receives to complete */
867 while (priv
->tx_head
!= priv
->tx_tail
|| recvs_pending(dev
)) {
868 if (time_after(jiffies
, begin
+ 5 * HZ
)) {
869 ipoib_warn(priv
, "timing out; %d sends %d receives not completed\n",
870 priv
->tx_head
- priv
->tx_tail
, recvs_pending(dev
));
873 * assume the HW is wedged and just free up
874 * all our pending work requests.
876 while ((int) priv
->tx_tail
- (int) priv
->tx_head
< 0) {
877 tx_req
= &priv
->tx_ring
[priv
->tx_tail
&
878 (ipoib_sendq_size
- 1)];
879 ipoib_dma_unmap_tx(priv
, tx_req
);
880 dev_kfree_skb_any(tx_req
->skb
);
882 --priv
->tx_outstanding
;
885 for (i
= 0; i
< ipoib_recvq_size
; ++i
) {
886 struct ipoib_rx_buf
*rx_req
;
888 rx_req
= &priv
->rx_ring
[i
];
891 ipoib_ud_dma_unmap_rx(priv
,
892 priv
->rx_ring
[i
].mapping
);
893 dev_kfree_skb_any(rx_req
->skb
);
905 ipoib_dbg(priv
, "All sends and receives done.\n");
908 del_timer_sync(&priv
->poll_timer
);
909 qp_attr
.qp_state
= IB_QPS_RESET
;
910 if (ib_modify_qp(priv
->qp
, &qp_attr
, IB_QP_STATE
))
911 ipoib_warn(priv
, "Failed to modify QP to RESET state\n");
915 ib_req_notify_cq(priv
->recv_cq
, IB_CQ_NEXT_COMP
);
920 int ipoib_ib_dev_init(struct net_device
*dev
, struct ib_device
*ca
, int port
)
922 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
928 if (ipoib_transport_dev_init(dev
, ca
)) {
929 printk(KERN_WARNING
"%s: ipoib_transport_dev_init failed\n", ca
->name
);
933 setup_timer(&priv
->poll_timer
, ipoib_ib_tx_timer_func
,
934 (unsigned long) dev
);
936 if (dev
->flags
& IFF_UP
) {
937 if (ipoib_ib_dev_open(dev
)) {
938 ipoib_transport_dev_cleanup(dev
);
947 * Takes whatever value which is in pkey index 0 and updates priv->pkey
948 * returns 0 if the pkey value was changed.
950 static inline int update_parent_pkey(struct ipoib_dev_priv
*priv
)
955 prev_pkey
= priv
->pkey
;
956 result
= ib_query_pkey(priv
->ca
, priv
->port
, 0, &priv
->pkey
);
958 ipoib_warn(priv
, "ib_query_pkey port %d failed (ret = %d)\n",
963 priv
->pkey
|= 0x8000;
965 if (prev_pkey
!= priv
->pkey
) {
966 ipoib_dbg(priv
, "pkey changed from 0x%x to 0x%x\n",
967 prev_pkey
, priv
->pkey
);
969 * Update the pkey in the broadcast address, while making sure to set
970 * the full membership bit, so that we join the right broadcast group.
972 priv
->dev
->broadcast
[8] = priv
->pkey
>> 8;
973 priv
->dev
->broadcast
[9] = priv
->pkey
& 0xff;
980 * returns 0 if pkey value was found in a different slot.
982 static inline int update_child_pkey(struct ipoib_dev_priv
*priv
)
984 u16 old_index
= priv
->pkey_index
;
986 priv
->pkey_index
= 0;
987 ipoib_pkey_dev_check_presence(priv
->dev
);
989 if (test_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
) &&
990 (old_index
== priv
->pkey_index
))
996 * returns true if the device address of the ipoib interface has changed and the
997 * new address is a valid one (i.e in the gid table), return false otherwise.
999 static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv
*priv
)
1001 union ib_gid search_gid
;
1003 union ib_gid
*netdev_gid
;
1009 netdev_gid
= (union ib_gid
*)(priv
->dev
->dev_addr
+ 4);
1010 if (ib_query_gid(priv
->ca
, priv
->port
, 0, &gid0
, NULL
))
1013 netif_addr_lock_bh(priv
->dev
);
1015 /* The subnet prefix may have changed, update it now so we won't have
1018 priv
->local_gid
.global
.subnet_prefix
= gid0
.global
.subnet_prefix
;
1019 netdev_gid
->global
.subnet_prefix
= gid0
.global
.subnet_prefix
;
1020 search_gid
.global
.subnet_prefix
= gid0
.global
.subnet_prefix
;
1022 search_gid
.global
.interface_id
= priv
->local_gid
.global
.interface_id
;
1024 netif_addr_unlock_bh(priv
->dev
);
1026 err
= ib_find_gid(priv
->ca
, &search_gid
, IB_GID_TYPE_IB
,
1027 priv
->dev
, &port
, &index
);
1029 netif_addr_lock_bh(priv
->dev
);
1031 if (search_gid
.global
.interface_id
!=
1032 priv
->local_gid
.global
.interface_id
)
1033 /* There was a change while we were looking up the gid, bail
1034 * here and let the next work sort this out
1038 /* The next section of code needs some background:
1039 * Per IB spec the port GUID can't change if the HCA is powered on.
1040 * port GUID is the basis for GID at index 0 which is the basis for
1041 * the default device address of a ipoib interface.
1043 * so it seems the flow should be:
1044 * if user_changed_dev_addr && gid in gid tbl
1045 * set bit dev_addr_set
1050 * The issue is that there are devices that don't follow the spec,
1051 * they change the port GUID when the HCA is powered, so in order
1052 * not to break userspace applications, We need to check if the
1053 * user wanted to control the device address and we assume that
1054 * if he sets the device address back to be based on GID index 0,
1055 * he no longer wishs to control it.
1057 * If the user doesn't control the the device address,
1058 * IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
1059 * the port GUID has changed and GID at index 0 has changed
1060 * so we need to change priv->local_gid and priv->dev->dev_addr
1061 * to reflect the new GID.
1063 if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET
, &priv
->flags
)) {
1064 if (!err
&& port
== priv
->port
) {
1065 set_bit(IPOIB_FLAG_DEV_ADDR_SET
, &priv
->flags
);
1067 clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL
,
1070 set_bit(IPOIB_FLAG_DEV_ADDR_CTRL
, &priv
->flags
);
1076 if (!err
&& port
== priv
->port
) {
1079 if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL
, &priv
->flags
)) {
1080 memcpy(&priv
->local_gid
, &gid0
,
1081 sizeof(priv
->local_gid
));
1082 memcpy(priv
->dev
->dev_addr
+ 4, &gid0
,
1083 sizeof(priv
->local_gid
));
1090 netif_addr_unlock_bh(priv
->dev
);
1095 static void __ipoib_ib_dev_flush(struct ipoib_dev_priv
*priv
,
1096 enum ipoib_flush_level level
,
1099 struct ipoib_dev_priv
*cpriv
;
1100 struct net_device
*dev
= priv
->dev
;
1103 down_read_nested(&priv
->vlan_rwsem
, nesting
);
1106 * Flush any child interfaces too -- they might be up even if
1107 * the parent is down.
1109 list_for_each_entry(cpriv
, &priv
->child_intfs
, list
)
1110 __ipoib_ib_dev_flush(cpriv
, level
, nesting
+ 1);
1112 up_read(&priv
->vlan_rwsem
);
1114 if (!test_bit(IPOIB_FLAG_INITIALIZED
, &priv
->flags
) &&
1115 level
!= IPOIB_FLUSH_HEAVY
) {
1116 /* Make sure the dev_addr is set even if not flushing */
1117 if (level
== IPOIB_FLUSH_LIGHT
)
1118 ipoib_dev_addr_changed_valid(priv
);
1119 ipoib_dbg(priv
, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
1123 if (!test_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
)) {
1124 /* interface is down. update pkey and leave. */
1125 if (level
== IPOIB_FLUSH_HEAVY
) {
1126 if (!test_bit(IPOIB_FLAG_SUBINTERFACE
, &priv
->flags
))
1127 update_parent_pkey(priv
);
1129 update_child_pkey(priv
);
1130 } else if (level
== IPOIB_FLUSH_LIGHT
)
1131 ipoib_dev_addr_changed_valid(priv
);
1132 ipoib_dbg(priv
, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
1136 if (level
== IPOIB_FLUSH_HEAVY
) {
1137 /* child devices chase their origin pkey value, while non-child
1138 * (parent) devices should always takes what present in pkey index 0
1140 if (test_bit(IPOIB_FLAG_SUBINTERFACE
, &priv
->flags
)) {
1141 result
= update_child_pkey(priv
);
1143 /* restart QP only if P_Key index is changed */
1144 ipoib_dbg(priv
, "Not flushing - P_Key index not changed.\n");
1149 result
= update_parent_pkey(priv
);
1150 /* restart QP only if P_Key value changed */
1152 ipoib_dbg(priv
, "Not flushing - P_Key value not changed.\n");
1158 if (level
== IPOIB_FLUSH_LIGHT
) {
1160 ipoib_mark_paths_invalid(dev
);
1161 /* Set IPoIB operation as down to prevent races between:
1162 * the flush flow which leaves MCG and on the fly joins
1163 * which can happen during that time. mcast restart task
1164 * should deal with join requests we missed.
1166 oper_up
= test_and_clear_bit(IPOIB_FLAG_OPER_UP
, &priv
->flags
);
1167 ipoib_mcast_dev_flush(dev
);
1169 set_bit(IPOIB_FLAG_OPER_UP
, &priv
->flags
);
1170 ipoib_flush_ah(dev
);
1173 if (level
>= IPOIB_FLUSH_NORMAL
)
1174 ipoib_ib_dev_down(dev
);
1176 if (level
== IPOIB_FLUSH_HEAVY
) {
1177 if (test_bit(IPOIB_FLAG_INITIALIZED
, &priv
->flags
))
1178 ipoib_ib_dev_stop(dev
);
1179 if (ipoib_ib_dev_open(dev
) != 0)
1181 if (netif_queue_stopped(dev
))
1182 netif_start_queue(dev
);
1186 * The device could have been brought down between the start and when
1187 * we get here, don't bring it back up if it's not configured up
1189 if (test_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
)) {
1190 if (level
>= IPOIB_FLUSH_NORMAL
)
1191 ipoib_ib_dev_up(dev
);
1192 if (ipoib_dev_addr_changed_valid(priv
))
1193 ipoib_mcast_restart_task(&priv
->restart_task
);
1197 void ipoib_ib_dev_flush_light(struct work_struct
*work
)
1199 struct ipoib_dev_priv
*priv
=
1200 container_of(work
, struct ipoib_dev_priv
, flush_light
);
1202 __ipoib_ib_dev_flush(priv
, IPOIB_FLUSH_LIGHT
, 0);
1205 void ipoib_ib_dev_flush_normal(struct work_struct
*work
)
1207 struct ipoib_dev_priv
*priv
=
1208 container_of(work
, struct ipoib_dev_priv
, flush_normal
);
1210 __ipoib_ib_dev_flush(priv
, IPOIB_FLUSH_NORMAL
, 0);
1213 void ipoib_ib_dev_flush_heavy(struct work_struct
*work
)
1215 struct ipoib_dev_priv
*priv
=
1216 container_of(work
, struct ipoib_dev_priv
, flush_heavy
);
1218 __ipoib_ib_dev_flush(priv
, IPOIB_FLUSH_HEAVY
, 0);
1221 void ipoib_ib_dev_cleanup(struct net_device
*dev
)
1223 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1225 ipoib_dbg(priv
, "cleaning up ib_dev\n");
1227 * We must make sure there are no more (path) completions
1228 * that may wish to touch priv fields that are no longer valid
1230 ipoib_flush_paths(dev
);
1232 ipoib_mcast_stop_thread(dev
);
1233 ipoib_mcast_dev_flush(dev
);
1236 * All of our ah references aren't free until after
1237 * ipoib_mcast_dev_flush(), ipoib_flush_paths, and
1238 * the neighbor garbage collection is stopped and reaped.
1239 * That should all be done now, so make a final ah flush.
1243 ipoib_transport_dev_cleanup(dev
);