2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/skbuff.h>
36 #include <linux/rtnetlink.h>
39 #include <linux/igmp.h>
40 #include <linux/inetdevice.h>
41 #include <linux/delay.h>
42 #include <linux/completion.h>
48 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
49 static int mcast_debug_level
;
51 module_param(mcast_debug_level
, int, 0644);
52 MODULE_PARM_DESC(mcast_debug_level
,
53 "Enable multicast debug tracing if > 0");
56 static DEFINE_MUTEX(mcast_mutex
);
58 struct ipoib_mcast_iter
{
59 struct net_device
*dev
;
61 unsigned long created
;
62 unsigned int queuelen
;
63 unsigned int complete
;
64 unsigned int send_only
;
67 static void ipoib_mcast_free(struct ipoib_mcast
*mcast
)
69 struct net_device
*dev
= mcast
->dev
;
70 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
71 struct ipoib_neigh
*neigh
, *tmp
;
74 ipoib_dbg_mcast(netdev_priv(dev
), "deleting multicast group %pI6\n",
75 mcast
->mcmember
.mgid
.raw
);
77 spin_lock_irq(&priv
->lock
);
79 list_for_each_entry_safe(neigh
, tmp
, &mcast
->neigh_list
, list
) {
81 * It's safe to call ipoib_put_ah() inside priv->lock
82 * here, because we know that mcast->ah will always
83 * hold one more reference, so ipoib_put_ah() will
84 * never do more than decrement the ref count.
87 ipoib_put_ah(neigh
->ah
);
88 ipoib_neigh_free(dev
, neigh
);
91 spin_unlock_irq(&priv
->lock
);
94 ipoib_put_ah(mcast
->ah
);
96 while (!skb_queue_empty(&mcast
->pkt_queue
)) {
98 dev_kfree_skb_any(skb_dequeue(&mcast
->pkt_queue
));
101 netif_tx_lock_bh(dev
);
102 dev
->stats
.tx_dropped
+= tx_dropped
;
103 netif_tx_unlock_bh(dev
);
108 static struct ipoib_mcast
*ipoib_mcast_alloc(struct net_device
*dev
,
111 struct ipoib_mcast
*mcast
;
113 mcast
= kzalloc(sizeof *mcast
, can_sleep
? GFP_KERNEL
: GFP_ATOMIC
);
118 mcast
->created
= jiffies
;
121 INIT_LIST_HEAD(&mcast
->list
);
122 INIT_LIST_HEAD(&mcast
->neigh_list
);
123 skb_queue_head_init(&mcast
->pkt_queue
);
128 static struct ipoib_mcast
*__ipoib_mcast_find(struct net_device
*dev
, void *mgid
)
130 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
131 struct rb_node
*n
= priv
->multicast_tree
.rb_node
;
134 struct ipoib_mcast
*mcast
;
137 mcast
= rb_entry(n
, struct ipoib_mcast
, rb_node
);
139 ret
= memcmp(mgid
, mcast
->mcmember
.mgid
.raw
,
140 sizeof (union ib_gid
));
152 static int __ipoib_mcast_add(struct net_device
*dev
, struct ipoib_mcast
*mcast
)
154 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
155 struct rb_node
**n
= &priv
->multicast_tree
.rb_node
, *pn
= NULL
;
158 struct ipoib_mcast
*tmcast
;
162 tmcast
= rb_entry(pn
, struct ipoib_mcast
, rb_node
);
164 ret
= memcmp(mcast
->mcmember
.mgid
.raw
, tmcast
->mcmember
.mgid
.raw
,
165 sizeof (union ib_gid
));
174 rb_link_node(&mcast
->rb_node
, pn
, n
);
175 rb_insert_color(&mcast
->rb_node
, &priv
->multicast_tree
);
180 static int ipoib_mcast_join_finish(struct ipoib_mcast
*mcast
,
181 struct ib_sa_mcmember_rec
*mcmember
)
183 struct net_device
*dev
= mcast
->dev
;
184 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
189 mcast
->mcmember
= *mcmember
;
191 /* Set the cached Q_Key before we attach if it's the broadcast group */
192 if (!memcmp(mcast
->mcmember
.mgid
.raw
, priv
->dev
->broadcast
+ 4,
193 sizeof (union ib_gid
))) {
194 spin_lock_irq(&priv
->lock
);
195 if (!priv
->broadcast
) {
196 spin_unlock_irq(&priv
->lock
);
199 priv
->qkey
= be32_to_cpu(priv
->broadcast
->mcmember
.qkey
);
200 spin_unlock_irq(&priv
->lock
);
201 priv
->tx_wr
.wr
.ud
.remote_qkey
= priv
->qkey
;
205 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY
, &mcast
->flags
)) {
206 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED
, &mcast
->flags
)) {
207 ipoib_warn(priv
, "multicast group %pI6 already attached\n",
208 mcast
->mcmember
.mgid
.raw
);
213 ret
= ipoib_mcast_attach(dev
, be16_to_cpu(mcast
->mcmember
.mlid
),
214 &mcast
->mcmember
.mgid
, set_qkey
);
216 ipoib_warn(priv
, "couldn't attach QP to multicast group %pI6\n",
217 mcast
->mcmember
.mgid
.raw
);
219 clear_bit(IPOIB_MCAST_FLAG_ATTACHED
, &mcast
->flags
);
225 struct ib_ah_attr av
= {
226 .dlid
= be16_to_cpu(mcast
->mcmember
.mlid
),
227 .port_num
= priv
->port
,
228 .sl
= mcast
->mcmember
.sl
,
229 .ah_flags
= IB_AH_GRH
,
230 .static_rate
= mcast
->mcmember
.rate
,
232 .flow_label
= be32_to_cpu(mcast
->mcmember
.flow_label
),
233 .hop_limit
= mcast
->mcmember
.hop_limit
,
235 .traffic_class
= mcast
->mcmember
.traffic_class
238 av
.grh
.dgid
= mcast
->mcmember
.mgid
;
240 ah
= ipoib_create_ah(dev
, priv
->pd
, &av
);
242 ipoib_warn(priv
, "ib_address_create failed\n");
244 spin_lock_irq(&priv
->lock
);
246 spin_unlock_irq(&priv
->lock
);
248 ipoib_dbg_mcast(priv
, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n",
249 mcast
->mcmember
.mgid
.raw
,
251 be16_to_cpu(mcast
->mcmember
.mlid
),
256 /* actually send any queued packets */
257 netif_tx_lock_bh(dev
);
258 while (!skb_queue_empty(&mcast
->pkt_queue
)) {
259 struct sk_buff
*skb
= skb_dequeue(&mcast
->pkt_queue
);
260 netif_tx_unlock_bh(dev
);
264 if (!skb_dst(skb
) || !skb_dst(skb
)->neighbour
) {
265 /* put pseudoheader back on for next time */
266 skb_push(skb
, sizeof (struct ipoib_pseudoheader
));
269 if (dev_queue_xmit(skb
))
270 ipoib_warn(priv
, "dev_queue_xmit failed to requeue packet\n");
271 netif_tx_lock_bh(dev
);
273 netif_tx_unlock_bh(dev
);
279 ipoib_mcast_sendonly_join_complete(int status
,
280 struct ib_sa_multicast
*multicast
)
282 struct ipoib_mcast
*mcast
= multicast
->context
;
283 struct net_device
*dev
= mcast
->dev
;
285 /* We trap for port events ourselves. */
286 if (status
== -ENETRESET
)
290 status
= ipoib_mcast_join_finish(mcast
, &multicast
->rec
);
293 if (mcast
->logcount
++ < 20)
294 ipoib_dbg_mcast(netdev_priv(dev
), "multicast join failed for %pI6, status %d\n",
295 mcast
->mcmember
.mgid
.raw
, status
);
297 /* Flush out any queued packets */
298 netif_tx_lock_bh(dev
);
299 while (!skb_queue_empty(&mcast
->pkt_queue
)) {
300 ++dev
->stats
.tx_dropped
;
301 dev_kfree_skb_any(skb_dequeue(&mcast
->pkt_queue
));
303 netif_tx_unlock_bh(dev
);
305 /* Clear the busy flag so we try again */
306 status
= test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY
,
312 static int ipoib_mcast_sendonly_join(struct ipoib_mcast
*mcast
)
314 struct net_device
*dev
= mcast
->dev
;
315 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
316 struct ib_sa_mcmember_rec rec
= {
317 #if 0 /* Some SMs don't support send-only yet */
325 if (!test_bit(IPOIB_FLAG_OPER_UP
, &priv
->flags
)) {
326 ipoib_dbg_mcast(priv
, "device shutting down, no multicast joins\n");
330 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY
, &mcast
->flags
)) {
331 ipoib_dbg_mcast(priv
, "multicast entry busy, skipping\n");
335 rec
.mgid
= mcast
->mcmember
.mgid
;
336 rec
.port_gid
= priv
->local_gid
;
337 rec
.pkey
= cpu_to_be16(priv
->pkey
);
339 mcast
->mc
= ib_sa_join_multicast(&ipoib_sa_client
, priv
->ca
,
341 IB_SA_MCMEMBER_REC_MGID
|
342 IB_SA_MCMEMBER_REC_PORT_GID
|
343 IB_SA_MCMEMBER_REC_PKEY
|
344 IB_SA_MCMEMBER_REC_JOIN_STATE
,
346 ipoib_mcast_sendonly_join_complete
,
348 if (IS_ERR(mcast
->mc
)) {
349 ret
= PTR_ERR(mcast
->mc
);
350 clear_bit(IPOIB_MCAST_FLAG_BUSY
, &mcast
->flags
);
351 ipoib_warn(priv
, "ib_sa_join_multicast failed (ret = %d)\n",
354 ipoib_dbg_mcast(priv
, "no multicast record for %pI6, starting join\n",
355 mcast
->mcmember
.mgid
.raw
);
361 void ipoib_mcast_carrier_on_task(struct work_struct
*work
)
363 struct ipoib_dev_priv
*priv
= container_of(work
, struct ipoib_dev_priv
,
367 * Take rtnl_lock to avoid racing with ipoib_stop() and
368 * turning the carrier back on while a device is being
372 netif_carrier_on(priv
->dev
);
376 static int ipoib_mcast_join_complete(int status
,
377 struct ib_sa_multicast
*multicast
)
379 struct ipoib_mcast
*mcast
= multicast
->context
;
380 struct net_device
*dev
= mcast
->dev
;
381 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
383 ipoib_dbg_mcast(priv
, "join completion for %pI6 (status %d)\n",
384 mcast
->mcmember
.mgid
.raw
, status
);
386 /* We trap for port events ourselves. */
387 if (status
== -ENETRESET
)
391 status
= ipoib_mcast_join_finish(mcast
, &multicast
->rec
);
395 mutex_lock(&mcast_mutex
);
396 if (test_bit(IPOIB_MCAST_RUN
, &priv
->flags
))
397 queue_delayed_work(ipoib_workqueue
,
398 &priv
->mcast_task
, 0);
399 mutex_unlock(&mcast_mutex
);
402 * Defer carrier on work to ipoib_workqueue to avoid a
403 * deadlock on rtnl_lock here.
405 if (mcast
== priv
->broadcast
)
406 queue_work(ipoib_workqueue
, &priv
->carrier_on_task
);
411 if (mcast
->logcount
++ < 20) {
412 if (status
== -ETIMEDOUT
|| status
== -EAGAIN
) {
413 ipoib_dbg_mcast(priv
, "multicast join failed for %pI6, status %d\n",
414 mcast
->mcmember
.mgid
.raw
, status
);
416 ipoib_warn(priv
, "multicast join failed for %pI6, status %d\n",
417 mcast
->mcmember
.mgid
.raw
, status
);
422 if (mcast
->backoff
> IPOIB_MAX_BACKOFF_SECONDS
)
423 mcast
->backoff
= IPOIB_MAX_BACKOFF_SECONDS
;
425 /* Clear the busy flag so we try again */
426 status
= test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY
, &mcast
->flags
);
428 mutex_lock(&mcast_mutex
);
429 spin_lock_irq(&priv
->lock
);
430 if (test_bit(IPOIB_MCAST_RUN
, &priv
->flags
))
431 queue_delayed_work(ipoib_workqueue
, &priv
->mcast_task
,
432 mcast
->backoff
* HZ
);
433 spin_unlock_irq(&priv
->lock
);
434 mutex_unlock(&mcast_mutex
);
439 static void ipoib_mcast_join(struct net_device
*dev
, struct ipoib_mcast
*mcast
,
442 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
443 struct ib_sa_mcmember_rec rec
= {
446 ib_sa_comp_mask comp_mask
;
449 ipoib_dbg_mcast(priv
, "joining MGID %pI6\n", mcast
->mcmember
.mgid
.raw
);
451 rec
.mgid
= mcast
->mcmember
.mgid
;
452 rec
.port_gid
= priv
->local_gid
;
453 rec
.pkey
= cpu_to_be16(priv
->pkey
);
456 IB_SA_MCMEMBER_REC_MGID
|
457 IB_SA_MCMEMBER_REC_PORT_GID
|
458 IB_SA_MCMEMBER_REC_PKEY
|
459 IB_SA_MCMEMBER_REC_JOIN_STATE
;
463 IB_SA_MCMEMBER_REC_QKEY
|
464 IB_SA_MCMEMBER_REC_MTU_SELECTOR
|
465 IB_SA_MCMEMBER_REC_MTU
|
466 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
|
467 IB_SA_MCMEMBER_REC_RATE_SELECTOR
|
468 IB_SA_MCMEMBER_REC_RATE
|
469 IB_SA_MCMEMBER_REC_SL
|
470 IB_SA_MCMEMBER_REC_FLOW_LABEL
|
471 IB_SA_MCMEMBER_REC_HOP_LIMIT
;
473 rec
.qkey
= priv
->broadcast
->mcmember
.qkey
;
474 rec
.mtu_selector
= IB_SA_EQ
;
475 rec
.mtu
= priv
->broadcast
->mcmember
.mtu
;
476 rec
.traffic_class
= priv
->broadcast
->mcmember
.traffic_class
;
477 rec
.rate_selector
= IB_SA_EQ
;
478 rec
.rate
= priv
->broadcast
->mcmember
.rate
;
479 rec
.sl
= priv
->broadcast
->mcmember
.sl
;
480 rec
.flow_label
= priv
->broadcast
->mcmember
.flow_label
;
481 rec
.hop_limit
= priv
->broadcast
->mcmember
.hop_limit
;
484 set_bit(IPOIB_MCAST_FLAG_BUSY
, &mcast
->flags
);
485 mcast
->mc
= ib_sa_join_multicast(&ipoib_sa_client
, priv
->ca
, priv
->port
,
486 &rec
, comp_mask
, GFP_KERNEL
,
487 ipoib_mcast_join_complete
, mcast
);
488 if (IS_ERR(mcast
->mc
)) {
489 clear_bit(IPOIB_MCAST_FLAG_BUSY
, &mcast
->flags
);
490 ret
= PTR_ERR(mcast
->mc
);
491 ipoib_warn(priv
, "ib_sa_join_multicast failed, status %d\n", ret
);
494 if (mcast
->backoff
> IPOIB_MAX_BACKOFF_SECONDS
)
495 mcast
->backoff
= IPOIB_MAX_BACKOFF_SECONDS
;
497 mutex_lock(&mcast_mutex
);
498 if (test_bit(IPOIB_MCAST_RUN
, &priv
->flags
))
499 queue_delayed_work(ipoib_workqueue
,
501 mcast
->backoff
* HZ
);
502 mutex_unlock(&mcast_mutex
);
506 void ipoib_mcast_join_task(struct work_struct
*work
)
508 struct ipoib_dev_priv
*priv
=
509 container_of(work
, struct ipoib_dev_priv
, mcast_task
.work
);
510 struct net_device
*dev
= priv
->dev
;
512 if (!test_bit(IPOIB_MCAST_RUN
, &priv
->flags
))
515 if (ib_query_gid(priv
->ca
, priv
->port
, 0, &priv
->local_gid
))
516 ipoib_warn(priv
, "ib_query_gid() failed\n");
518 memcpy(priv
->dev
->dev_addr
+ 4, priv
->local_gid
.raw
, sizeof (union ib_gid
));
521 struct ib_port_attr attr
;
523 if (!ib_query_port(priv
->ca
, priv
->port
, &attr
))
524 priv
->local_lid
= attr
.lid
;
526 ipoib_warn(priv
, "ib_query_port failed\n");
529 if (!priv
->broadcast
) {
530 struct ipoib_mcast
*broadcast
;
532 if (!test_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
))
535 broadcast
= ipoib_mcast_alloc(dev
, 1);
537 ipoib_warn(priv
, "failed to allocate broadcast group\n");
538 mutex_lock(&mcast_mutex
);
539 if (test_bit(IPOIB_MCAST_RUN
, &priv
->flags
))
540 queue_delayed_work(ipoib_workqueue
,
541 &priv
->mcast_task
, HZ
);
542 mutex_unlock(&mcast_mutex
);
546 spin_lock_irq(&priv
->lock
);
547 memcpy(broadcast
->mcmember
.mgid
.raw
, priv
->dev
->broadcast
+ 4,
548 sizeof (union ib_gid
));
549 priv
->broadcast
= broadcast
;
551 __ipoib_mcast_add(dev
, priv
->broadcast
);
552 spin_unlock_irq(&priv
->lock
);
555 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED
, &priv
->broadcast
->flags
)) {
556 if (!test_bit(IPOIB_MCAST_FLAG_BUSY
, &priv
->broadcast
->flags
))
557 ipoib_mcast_join(dev
, priv
->broadcast
, 0);
562 struct ipoib_mcast
*mcast
= NULL
;
564 spin_lock_irq(&priv
->lock
);
565 list_for_each_entry(mcast
, &priv
->multicast_list
, list
) {
566 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY
, &mcast
->flags
)
567 && !test_bit(IPOIB_MCAST_FLAG_BUSY
, &mcast
->flags
)
568 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED
, &mcast
->flags
)) {
569 /* Found the next unjoined group */
573 spin_unlock_irq(&priv
->lock
);
575 if (&mcast
->list
== &priv
->multicast_list
) {
580 ipoib_mcast_join(dev
, mcast
, 1);
584 priv
->mcast_mtu
= IPOIB_UD_MTU(ib_mtu_enum_to_int(priv
->broadcast
->mcmember
.mtu
));
586 if (!ipoib_cm_admin_enabled(dev
)) {
588 dev_set_mtu(dev
, min(priv
->mcast_mtu
, priv
->admin_mtu
));
592 ipoib_dbg_mcast(priv
, "successfully joined all multicast groups\n");
594 clear_bit(IPOIB_MCAST_RUN
, &priv
->flags
);
597 int ipoib_mcast_start_thread(struct net_device
*dev
)
599 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
601 ipoib_dbg_mcast(priv
, "starting multicast thread\n");
603 mutex_lock(&mcast_mutex
);
604 if (!test_and_set_bit(IPOIB_MCAST_RUN
, &priv
->flags
))
605 queue_delayed_work(ipoib_workqueue
, &priv
->mcast_task
, 0);
606 mutex_unlock(&mcast_mutex
);
611 int ipoib_mcast_stop_thread(struct net_device
*dev
, int flush
)
613 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
615 ipoib_dbg_mcast(priv
, "stopping multicast thread\n");
617 mutex_lock(&mcast_mutex
);
618 clear_bit(IPOIB_MCAST_RUN
, &priv
->flags
);
619 cancel_delayed_work(&priv
->mcast_task
);
620 mutex_unlock(&mcast_mutex
);
623 flush_workqueue(ipoib_workqueue
);
628 static int ipoib_mcast_leave(struct net_device
*dev
, struct ipoib_mcast
*mcast
)
630 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
633 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY
, &mcast
->flags
))
634 ib_sa_free_multicast(mcast
->mc
);
636 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED
, &mcast
->flags
)) {
637 ipoib_dbg_mcast(priv
, "leaving MGID %pI6\n",
638 mcast
->mcmember
.mgid
.raw
);
640 /* Remove ourselves from the multicast group */
641 ret
= ib_detach_mcast(priv
->qp
, &mcast
->mcmember
.mgid
,
642 be16_to_cpu(mcast
->mcmember
.mlid
));
644 ipoib_warn(priv
, "ib_detach_mcast failed (result = %d)\n", ret
);
650 void ipoib_mcast_send(struct net_device
*dev
, void *mgid
, struct sk_buff
*skb
)
652 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
653 struct ipoib_mcast
*mcast
;
656 spin_lock_irqsave(&priv
->lock
, flags
);
658 if (!test_bit(IPOIB_FLAG_OPER_UP
, &priv
->flags
) ||
660 !test_bit(IPOIB_MCAST_FLAG_ATTACHED
, &priv
->broadcast
->flags
)) {
661 ++dev
->stats
.tx_dropped
;
662 dev_kfree_skb_any(skb
);
666 mcast
= __ipoib_mcast_find(dev
, mgid
);
668 /* Let's create a new send only group now */
669 ipoib_dbg_mcast(priv
, "setting up send only multicast group for %pI6\n",
672 mcast
= ipoib_mcast_alloc(dev
, 0);
674 ipoib_warn(priv
, "unable to allocate memory for "
675 "multicast structure\n");
676 ++dev
->stats
.tx_dropped
;
677 dev_kfree_skb_any(skb
);
681 set_bit(IPOIB_MCAST_FLAG_SENDONLY
, &mcast
->flags
);
682 memcpy(mcast
->mcmember
.mgid
.raw
, mgid
, sizeof (union ib_gid
));
683 __ipoib_mcast_add(dev
, mcast
);
684 list_add_tail(&mcast
->list
, &priv
->multicast_list
);
688 if (skb_queue_len(&mcast
->pkt_queue
) < IPOIB_MAX_MCAST_QUEUE
)
689 skb_queue_tail(&mcast
->pkt_queue
, skb
);
691 ++dev
->stats
.tx_dropped
;
692 dev_kfree_skb_any(skb
);
695 if (test_bit(IPOIB_MCAST_FLAG_BUSY
, &mcast
->flags
))
696 ipoib_dbg_mcast(priv
, "no address vector, "
697 "but multicast join already started\n");
698 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY
, &mcast
->flags
))
699 ipoib_mcast_sendonly_join(mcast
);
702 * If lookup completes between here and out:, don't
703 * want to send packet twice.
709 if (mcast
&& mcast
->ah
) {
711 skb_dst(skb
)->neighbour
&&
712 !*to_ipoib_neigh(skb_dst(skb
)->neighbour
)) {
713 struct ipoib_neigh
*neigh
= ipoib_neigh_alloc(skb_dst(skb
)->neighbour
,
717 kref_get(&mcast
->ah
->ref
);
718 neigh
->ah
= mcast
->ah
;
719 list_add_tail(&neigh
->list
, &mcast
->neigh_list
);
723 spin_unlock_irqrestore(&priv
->lock
, flags
);
724 ipoib_send(dev
, skb
, mcast
->ah
, IB_MULTICAST_QPN
);
729 spin_unlock_irqrestore(&priv
->lock
, flags
);
732 void ipoib_mcast_dev_flush(struct net_device
*dev
)
734 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
735 LIST_HEAD(remove_list
);
736 struct ipoib_mcast
*mcast
, *tmcast
;
739 ipoib_dbg_mcast(priv
, "flushing multicast list\n");
741 spin_lock_irqsave(&priv
->lock
, flags
);
743 list_for_each_entry_safe(mcast
, tmcast
, &priv
->multicast_list
, list
) {
744 list_del(&mcast
->list
);
745 rb_erase(&mcast
->rb_node
, &priv
->multicast_tree
);
746 list_add_tail(&mcast
->list
, &remove_list
);
749 if (priv
->broadcast
) {
750 rb_erase(&priv
->broadcast
->rb_node
, &priv
->multicast_tree
);
751 list_add_tail(&priv
->broadcast
->list
, &remove_list
);
752 priv
->broadcast
= NULL
;
755 spin_unlock_irqrestore(&priv
->lock
, flags
);
757 list_for_each_entry_safe(mcast
, tmcast
, &remove_list
, list
) {
758 ipoib_mcast_leave(dev
, mcast
);
759 ipoib_mcast_free(mcast
);
763 static int ipoib_mcast_addr_is_valid(const u8
*addr
, unsigned int addrlen
,
766 if (addrlen
!= INFINIBAND_ALEN
)
768 /* reserved QPN, prefix, scope */
769 if (memcmp(addr
, broadcast
, 6))
771 /* signature lower, pkey */
772 if (memcmp(addr
+ 7, broadcast
+ 7, 3))
777 void ipoib_mcast_restart_task(struct work_struct
*work
)
779 struct ipoib_dev_priv
*priv
=
780 container_of(work
, struct ipoib_dev_priv
, restart_task
);
781 struct net_device
*dev
= priv
->dev
;
782 struct dev_mc_list
*mclist
;
783 struct ipoib_mcast
*mcast
, *tmcast
;
784 LIST_HEAD(remove_list
);
786 struct ib_sa_mcmember_rec rec
;
788 ipoib_dbg_mcast(priv
, "restarting multicast task\n");
790 ipoib_mcast_stop_thread(dev
, 0);
792 local_irq_save(flags
);
793 netif_addr_lock(dev
);
794 spin_lock(&priv
->lock
);
797 * Unfortunately, the networking core only gives us a list of all of
798 * the multicast hardware addresses. We need to figure out which ones
799 * are new and which ones have been removed
802 /* Clear out the found flag */
803 list_for_each_entry(mcast
, &priv
->multicast_list
, list
)
804 clear_bit(IPOIB_MCAST_FLAG_FOUND
, &mcast
->flags
);
806 /* Mark all of the entries that are found or don't exist */
807 for (mclist
= dev
->mc_list
; mclist
; mclist
= mclist
->next
) {
810 if (!ipoib_mcast_addr_is_valid(mclist
->dmi_addr
,
815 memcpy(mgid
.raw
, mclist
->dmi_addr
+ 4, sizeof mgid
);
817 mcast
= __ipoib_mcast_find(dev
, &mgid
);
818 if (!mcast
|| test_bit(IPOIB_MCAST_FLAG_SENDONLY
, &mcast
->flags
)) {
819 struct ipoib_mcast
*nmcast
;
821 /* ignore group which is directly joined by userspace */
822 if (test_bit(IPOIB_FLAG_UMCAST
, &priv
->flags
) &&
823 !ib_sa_get_mcmember_rec(priv
->ca
, priv
->port
, &mgid
, &rec
)) {
824 ipoib_dbg_mcast(priv
, "ignoring multicast entry for mgid %pI6\n",
829 /* Not found or send-only group, let's add a new entry */
830 ipoib_dbg_mcast(priv
, "adding multicast entry for mgid %pI6\n",
833 nmcast
= ipoib_mcast_alloc(dev
, 0);
835 ipoib_warn(priv
, "unable to allocate memory for multicast structure\n");
839 set_bit(IPOIB_MCAST_FLAG_FOUND
, &nmcast
->flags
);
841 nmcast
->mcmember
.mgid
= mgid
;
844 /* Destroy the send only entry */
845 list_move_tail(&mcast
->list
, &remove_list
);
847 rb_replace_node(&mcast
->rb_node
,
849 &priv
->multicast_tree
);
851 __ipoib_mcast_add(dev
, nmcast
);
853 list_add_tail(&nmcast
->list
, &priv
->multicast_list
);
857 set_bit(IPOIB_MCAST_FLAG_FOUND
, &mcast
->flags
);
860 /* Remove all of the entries don't exist anymore */
861 list_for_each_entry_safe(mcast
, tmcast
, &priv
->multicast_list
, list
) {
862 if (!test_bit(IPOIB_MCAST_FLAG_FOUND
, &mcast
->flags
) &&
863 !test_bit(IPOIB_MCAST_FLAG_SENDONLY
, &mcast
->flags
)) {
864 ipoib_dbg_mcast(priv
, "deleting multicast group %pI6\n",
865 mcast
->mcmember
.mgid
.raw
);
867 rb_erase(&mcast
->rb_node
, &priv
->multicast_tree
);
869 /* Move to the remove list */
870 list_move_tail(&mcast
->list
, &remove_list
);
874 spin_unlock(&priv
->lock
);
875 netif_addr_unlock(dev
);
876 local_irq_restore(flags
);
878 /* We have to cancel outside of the spinlock */
879 list_for_each_entry_safe(mcast
, tmcast
, &remove_list
, list
) {
880 ipoib_mcast_leave(mcast
->dev
, mcast
);
881 ipoib_mcast_free(mcast
);
884 if (test_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
))
885 ipoib_mcast_start_thread(dev
);
888 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
890 struct ipoib_mcast_iter
*ipoib_mcast_iter_init(struct net_device
*dev
)
892 struct ipoib_mcast_iter
*iter
;
894 iter
= kmalloc(sizeof *iter
, GFP_KERNEL
);
899 memset(iter
->mgid
.raw
, 0, 16);
901 if (ipoib_mcast_iter_next(iter
)) {
909 int ipoib_mcast_iter_next(struct ipoib_mcast_iter
*iter
)
911 struct ipoib_dev_priv
*priv
= netdev_priv(iter
->dev
);
913 struct ipoib_mcast
*mcast
;
916 spin_lock_irq(&priv
->lock
);
918 n
= rb_first(&priv
->multicast_tree
);
921 mcast
= rb_entry(n
, struct ipoib_mcast
, rb_node
);
923 if (memcmp(iter
->mgid
.raw
, mcast
->mcmember
.mgid
.raw
,
924 sizeof (union ib_gid
)) < 0) {
925 iter
->mgid
= mcast
->mcmember
.mgid
;
926 iter
->created
= mcast
->created
;
927 iter
->queuelen
= skb_queue_len(&mcast
->pkt_queue
);
928 iter
->complete
= !!mcast
->ah
;
929 iter
->send_only
= !!(mcast
->flags
& (1 << IPOIB_MCAST_FLAG_SENDONLY
));
939 spin_unlock_irq(&priv
->lock
);
944 void ipoib_mcast_iter_read(struct ipoib_mcast_iter
*iter
,
946 unsigned long *created
,
947 unsigned int *queuelen
,
948 unsigned int *complete
,
949 unsigned int *send_only
)
952 *created
= iter
->created
;
953 *queuelen
= iter
->queuelen
;
954 *complete
= iter
->complete
;
955 *send_only
= iter
->send_only
;
958 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */