Revert "[PATCH] paravirt: Add startup infrastructure for paravirtualization"
[pv_ops_mirror.git] / drivers / infiniband / ulp / ipoib / ipoib_multicast.c
blob54fbead4de01f110f3cca9cf06e6fed366e821c9
1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
34 * $Id: ipoib_multicast.c 1362 2004-12-18 15:56:29Z roland $
37 #include <linux/skbuff.h>
38 #include <linux/rtnetlink.h>
39 #include <linux/ip.h>
40 #include <linux/in.h>
41 #include <linux/igmp.h>
42 #include <linux/inetdevice.h>
43 #include <linux/delay.h>
44 #include <linux/completion.h>
46 #include <net/dst.h>
48 #include "ipoib.h"
50 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
51 static int mcast_debug_level;
53 module_param(mcast_debug_level, int, 0644);
54 MODULE_PARM_DESC(mcast_debug_level,
55 "Enable multicast debug tracing if > 0");
56 #endif
58 static DEFINE_MUTEX(mcast_mutex);
60 /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
61 struct ipoib_mcast {
62 struct ib_sa_mcmember_rec mcmember;
63 struct ib_sa_multicast *mc;
64 struct ipoib_ah *ah;
66 struct rb_node rb_node;
67 struct list_head list;
69 unsigned long created;
70 unsigned long backoff;
72 unsigned long flags;
73 unsigned char logcount;
75 struct list_head neigh_list;
77 struct sk_buff_head pkt_queue;
79 struct net_device *dev;
82 struct ipoib_mcast_iter {
83 struct net_device *dev;
84 union ib_gid mgid;
85 unsigned long created;
86 unsigned int queuelen;
87 unsigned int complete;
88 unsigned int send_only;
91 static void ipoib_mcast_free(struct ipoib_mcast *mcast)
93 struct net_device *dev = mcast->dev;
94 struct ipoib_dev_priv *priv = netdev_priv(dev);
95 struct ipoib_neigh *neigh, *tmp;
96 unsigned long flags;
97 int tx_dropped = 0;
99 ipoib_dbg_mcast(netdev_priv(dev),
100 "deleting multicast group " IPOIB_GID_FMT "\n",
101 IPOIB_GID_ARG(mcast->mcmember.mgid));
103 spin_lock_irqsave(&priv->lock, flags);
105 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
107 * It's safe to call ipoib_put_ah() inside priv->lock
108 * here, because we know that mcast->ah will always
109 * hold one more reference, so ipoib_put_ah() will
110 * never do more than decrement the ref count.
112 if (neigh->ah)
113 ipoib_put_ah(neigh->ah);
114 ipoib_neigh_free(dev, neigh);
117 spin_unlock_irqrestore(&priv->lock, flags);
119 if (mcast->ah)
120 ipoib_put_ah(mcast->ah);
122 while (!skb_queue_empty(&mcast->pkt_queue)) {
123 ++tx_dropped;
124 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
127 spin_lock_irqsave(&priv->tx_lock, flags);
128 priv->stats.tx_dropped += tx_dropped;
129 spin_unlock_irqrestore(&priv->tx_lock, flags);
131 kfree(mcast);
134 static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
135 int can_sleep)
137 struct ipoib_mcast *mcast;
139 mcast = kzalloc(sizeof *mcast, can_sleep ? GFP_KERNEL : GFP_ATOMIC);
140 if (!mcast)
141 return NULL;
143 mcast->dev = dev;
144 mcast->created = jiffies;
145 mcast->backoff = 1;
147 INIT_LIST_HEAD(&mcast->list);
148 INIT_LIST_HEAD(&mcast->neigh_list);
149 skb_queue_head_init(&mcast->pkt_queue);
151 return mcast;
154 static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
156 struct ipoib_dev_priv *priv = netdev_priv(dev);
157 struct rb_node *n = priv->multicast_tree.rb_node;
159 while (n) {
160 struct ipoib_mcast *mcast;
161 int ret;
163 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
165 ret = memcmp(mgid, mcast->mcmember.mgid.raw,
166 sizeof (union ib_gid));
167 if (ret < 0)
168 n = n->rb_left;
169 else if (ret > 0)
170 n = n->rb_right;
171 else
172 return mcast;
175 return NULL;
178 static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
180 struct ipoib_dev_priv *priv = netdev_priv(dev);
181 struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
183 while (*n) {
184 struct ipoib_mcast *tmcast;
185 int ret;
187 pn = *n;
188 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
190 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
191 sizeof (union ib_gid));
192 if (ret < 0)
193 n = &pn->rb_left;
194 else if (ret > 0)
195 n = &pn->rb_right;
196 else
197 return -EEXIST;
200 rb_link_node(&mcast->rb_node, pn, n);
201 rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
203 return 0;
206 static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
207 struct ib_sa_mcmember_rec *mcmember)
209 struct net_device *dev = mcast->dev;
210 struct ipoib_dev_priv *priv = netdev_priv(dev);
211 struct ipoib_ah *ah;
212 int ret;
214 mcast->mcmember = *mcmember;
216 /* Set the cached Q_Key before we attach if it's the broadcast group */
217 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
218 sizeof (union ib_gid))) {
219 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
220 priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
223 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
224 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
225 ipoib_warn(priv, "multicast group " IPOIB_GID_FMT
226 " already attached\n",
227 IPOIB_GID_ARG(mcast->mcmember.mgid));
229 return 0;
232 ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid),
233 &mcast->mcmember.mgid);
234 if (ret < 0) {
235 ipoib_warn(priv, "couldn't attach QP to multicast group "
236 IPOIB_GID_FMT "\n",
237 IPOIB_GID_ARG(mcast->mcmember.mgid));
239 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
240 return ret;
245 struct ib_ah_attr av = {
246 .dlid = be16_to_cpu(mcast->mcmember.mlid),
247 .port_num = priv->port,
248 .sl = mcast->mcmember.sl,
249 .ah_flags = IB_AH_GRH,
250 .static_rate = mcast->mcmember.rate,
251 .grh = {
252 .flow_label = be32_to_cpu(mcast->mcmember.flow_label),
253 .hop_limit = mcast->mcmember.hop_limit,
254 .sgid_index = 0,
255 .traffic_class = mcast->mcmember.traffic_class
258 av.grh.dgid = mcast->mcmember.mgid;
260 ah = ipoib_create_ah(dev, priv->pd, &av);
261 if (!ah) {
262 ipoib_warn(priv, "ib_address_create failed\n");
263 } else {
264 spin_lock_irq(&priv->lock);
265 mcast->ah = ah;
266 spin_unlock_irq(&priv->lock);
268 ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT
269 " AV %p, LID 0x%04x, SL %d\n",
270 IPOIB_GID_ARG(mcast->mcmember.mgid),
271 mcast->ah->ah,
272 be16_to_cpu(mcast->mcmember.mlid),
273 mcast->mcmember.sl);
277 /* actually send any queued packets */
278 spin_lock_irq(&priv->tx_lock);
279 while (!skb_queue_empty(&mcast->pkt_queue)) {
280 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
281 spin_unlock_irq(&priv->tx_lock);
283 skb->dev = dev;
285 if (!skb->dst || !skb->dst->neighbour) {
286 /* put pseudoheader back on for next time */
287 skb_push(skb, sizeof (struct ipoib_pseudoheader));
290 if (dev_queue_xmit(skb))
291 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
292 spin_lock_irq(&priv->tx_lock);
294 spin_unlock_irq(&priv->tx_lock);
296 return 0;
299 static int
300 ipoib_mcast_sendonly_join_complete(int status,
301 struct ib_sa_multicast *multicast)
303 struct ipoib_mcast *mcast = multicast->context;
304 struct net_device *dev = mcast->dev;
305 struct ipoib_dev_priv *priv = netdev_priv(dev);
307 /* We trap for port events ourselves. */
308 if (status == -ENETRESET)
309 return 0;
311 if (!status)
312 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
314 if (status) {
315 if (mcast->logcount++ < 20)
316 ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for "
317 IPOIB_GID_FMT ", status %d\n",
318 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
320 /* Flush out any queued packets */
321 spin_lock_irq(&priv->tx_lock);
322 while (!skb_queue_empty(&mcast->pkt_queue)) {
323 ++priv->stats.tx_dropped;
324 dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
326 spin_unlock_irq(&priv->tx_lock);
328 /* Clear the busy flag so we try again */
329 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY,
330 &mcast->flags);
332 return status;
335 static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
337 struct net_device *dev = mcast->dev;
338 struct ipoib_dev_priv *priv = netdev_priv(dev);
339 struct ib_sa_mcmember_rec rec = {
340 #if 0 /* Some SMs don't support send-only yet */
341 .join_state = 4
342 #else
343 .join_state = 1
344 #endif
346 int ret = 0;
348 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
349 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
350 return -ENODEV;
353 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
354 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
355 return -EBUSY;
358 rec.mgid = mcast->mcmember.mgid;
359 rec.port_gid = priv->local_gid;
360 rec.pkey = cpu_to_be16(priv->pkey);
362 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca,
363 priv->port, &rec,
364 IB_SA_MCMEMBER_REC_MGID |
365 IB_SA_MCMEMBER_REC_PORT_GID |
366 IB_SA_MCMEMBER_REC_PKEY |
367 IB_SA_MCMEMBER_REC_JOIN_STATE,
368 GFP_ATOMIC,
369 ipoib_mcast_sendonly_join_complete,
370 mcast);
371 if (IS_ERR(mcast->mc)) {
372 ret = PTR_ERR(mcast->mc);
373 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
374 ipoib_warn(priv, "ib_sa_join_multicast failed (ret = %d)\n",
375 ret);
376 } else {
377 ipoib_dbg_mcast(priv, "no multicast record for " IPOIB_GID_FMT
378 ", starting join\n",
379 IPOIB_GID_ARG(mcast->mcmember.mgid));
382 return ret;
385 static int ipoib_mcast_join_complete(int status,
386 struct ib_sa_multicast *multicast)
388 struct ipoib_mcast *mcast = multicast->context;
389 struct net_device *dev = mcast->dev;
390 struct ipoib_dev_priv *priv = netdev_priv(dev);
392 ipoib_dbg_mcast(priv, "join completion for " IPOIB_GID_FMT
393 " (status %d)\n",
394 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
396 /* We trap for port events ourselves. */
397 if (status == -ENETRESET)
398 return 0;
400 if (!status)
401 status = ipoib_mcast_join_finish(mcast, &multicast->rec);
403 if (!status) {
404 mcast->backoff = 1;
405 mutex_lock(&mcast_mutex);
406 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
407 queue_delayed_work(ipoib_workqueue,
408 &priv->mcast_task, 0);
409 mutex_unlock(&mcast_mutex);
411 if (mcast == priv->broadcast)
412 netif_carrier_on(dev);
414 return 0;
417 if (mcast->logcount++ < 20) {
418 if (status == -ETIMEDOUT) {
419 ipoib_dbg_mcast(priv, "multicast join failed for " IPOIB_GID_FMT
420 ", status %d\n",
421 IPOIB_GID_ARG(mcast->mcmember.mgid),
422 status);
423 } else {
424 ipoib_warn(priv, "multicast join failed for "
425 IPOIB_GID_FMT ", status %d\n",
426 IPOIB_GID_ARG(mcast->mcmember.mgid),
427 status);
431 mcast->backoff *= 2;
432 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
433 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
435 /* Clear the busy flag so we try again */
436 status = test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
438 mutex_lock(&mcast_mutex);
439 spin_lock_irq(&priv->lock);
440 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
441 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
442 mcast->backoff * HZ);
443 spin_unlock_irq(&priv->lock);
444 mutex_unlock(&mcast_mutex);
446 return status;
449 static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
450 int create)
452 struct ipoib_dev_priv *priv = netdev_priv(dev);
453 struct ib_sa_mcmember_rec rec = {
454 .join_state = 1
456 ib_sa_comp_mask comp_mask;
457 int ret = 0;
459 ipoib_dbg_mcast(priv, "joining MGID " IPOIB_GID_FMT "\n",
460 IPOIB_GID_ARG(mcast->mcmember.mgid));
462 rec.mgid = mcast->mcmember.mgid;
463 rec.port_gid = priv->local_gid;
464 rec.pkey = cpu_to_be16(priv->pkey);
466 comp_mask =
467 IB_SA_MCMEMBER_REC_MGID |
468 IB_SA_MCMEMBER_REC_PORT_GID |
469 IB_SA_MCMEMBER_REC_PKEY |
470 IB_SA_MCMEMBER_REC_JOIN_STATE;
472 if (create) {
473 comp_mask |=
474 IB_SA_MCMEMBER_REC_QKEY |
475 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
476 IB_SA_MCMEMBER_REC_MTU |
477 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS |
478 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
479 IB_SA_MCMEMBER_REC_RATE |
480 IB_SA_MCMEMBER_REC_SL |
481 IB_SA_MCMEMBER_REC_FLOW_LABEL |
482 IB_SA_MCMEMBER_REC_HOP_LIMIT;
484 rec.qkey = priv->broadcast->mcmember.qkey;
485 rec.mtu_selector = IB_SA_EQ;
486 rec.mtu = priv->broadcast->mcmember.mtu;
487 rec.traffic_class = priv->broadcast->mcmember.traffic_class;
488 rec.rate_selector = IB_SA_EQ;
489 rec.rate = priv->broadcast->mcmember.rate;
490 rec.sl = priv->broadcast->mcmember.sl;
491 rec.flow_label = priv->broadcast->mcmember.flow_label;
492 rec.hop_limit = priv->broadcast->mcmember.hop_limit;
495 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
496 mcast->mc = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
497 &rec, comp_mask, GFP_KERNEL,
498 ipoib_mcast_join_complete, mcast);
499 if (IS_ERR(mcast->mc)) {
500 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
501 ret = PTR_ERR(mcast->mc);
502 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
504 mcast->backoff *= 2;
505 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
506 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
508 mutex_lock(&mcast_mutex);
509 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
510 queue_delayed_work(ipoib_workqueue,
511 &priv->mcast_task,
512 mcast->backoff * HZ);
513 mutex_unlock(&mcast_mutex);
517 void ipoib_mcast_join_task(struct work_struct *work)
519 struct ipoib_dev_priv *priv =
520 container_of(work, struct ipoib_dev_priv, mcast_task.work);
521 struct net_device *dev = priv->dev;
523 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
524 return;
526 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
527 ipoib_warn(priv, "ib_gid_entry_get() failed\n");
528 else
529 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
532 struct ib_port_attr attr;
534 if (!ib_query_port(priv->ca, priv->port, &attr))
535 priv->local_lid = attr.lid;
536 else
537 ipoib_warn(priv, "ib_query_port failed\n");
540 if (!priv->broadcast) {
541 struct ipoib_mcast *broadcast;
543 broadcast = ipoib_mcast_alloc(dev, 1);
544 if (!broadcast) {
545 ipoib_warn(priv, "failed to allocate broadcast group\n");
546 mutex_lock(&mcast_mutex);
547 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
548 queue_delayed_work(ipoib_workqueue,
549 &priv->mcast_task, HZ);
550 mutex_unlock(&mcast_mutex);
551 return;
554 spin_lock_irq(&priv->lock);
555 memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
556 sizeof (union ib_gid));
557 priv->broadcast = broadcast;
559 __ipoib_mcast_add(dev, priv->broadcast);
560 spin_unlock_irq(&priv->lock);
563 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
564 if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags))
565 ipoib_mcast_join(dev, priv->broadcast, 0);
566 return;
569 while (1) {
570 struct ipoib_mcast *mcast = NULL;
572 spin_lock_irq(&priv->lock);
573 list_for_each_entry(mcast, &priv->multicast_list, list) {
574 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
575 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
576 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
577 /* Found the next unjoined group */
578 break;
581 spin_unlock_irq(&priv->lock);
583 if (&mcast->list == &priv->multicast_list) {
584 /* All done */
585 break;
588 ipoib_mcast_join(dev, mcast, 1);
589 return;
592 priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) -
593 IPOIB_ENCAP_LEN;
595 if (!ipoib_cm_admin_enabled(dev))
596 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
598 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
600 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
603 int ipoib_mcast_start_thread(struct net_device *dev)
605 struct ipoib_dev_priv *priv = netdev_priv(dev);
607 ipoib_dbg_mcast(priv, "starting multicast thread\n");
609 mutex_lock(&mcast_mutex);
610 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
611 queue_delayed_work(ipoib_workqueue, &priv->mcast_task, 0);
612 mutex_unlock(&mcast_mutex);
614 spin_lock_irq(&priv->lock);
615 set_bit(IPOIB_MCAST_STARTED, &priv->flags);
616 spin_unlock_irq(&priv->lock);
618 return 0;
621 int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
623 struct ipoib_dev_priv *priv = netdev_priv(dev);
625 ipoib_dbg_mcast(priv, "stopping multicast thread\n");
627 spin_lock_irq(&priv->lock);
628 clear_bit(IPOIB_MCAST_STARTED, &priv->flags);
629 spin_unlock_irq(&priv->lock);
631 mutex_lock(&mcast_mutex);
632 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
633 cancel_delayed_work(&priv->mcast_task);
634 mutex_unlock(&mcast_mutex);
636 if (flush)
637 flush_workqueue(ipoib_workqueue);
639 return 0;
642 static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
644 struct ipoib_dev_priv *priv = netdev_priv(dev);
645 int ret = 0;
647 if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
648 ib_sa_free_multicast(mcast->mc);
650 if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
651 ipoib_dbg_mcast(priv, "leaving MGID " IPOIB_GID_FMT "\n",
652 IPOIB_GID_ARG(mcast->mcmember.mgid));
654 /* Remove ourselves from the multicast group */
655 ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid),
656 &mcast->mcmember.mgid);
657 if (ret)
658 ipoib_warn(priv, "ipoib_mcast_detach failed (result = %d)\n", ret);
661 return 0;
664 void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
666 struct ipoib_dev_priv *priv = netdev_priv(dev);
667 struct ipoib_mcast *mcast;
670 * We can only be called from ipoib_start_xmit, so we're
671 * inside tx_lock -- no need to save/restore flags.
673 spin_lock(&priv->lock);
675 if (!test_bit(IPOIB_MCAST_STARTED, &priv->flags) ||
676 !priv->broadcast ||
677 !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
678 ++priv->stats.tx_dropped;
679 dev_kfree_skb_any(skb);
680 goto unlock;
683 mcast = __ipoib_mcast_find(dev, mgid);
684 if (!mcast) {
685 /* Let's create a new send only group now */
686 ipoib_dbg_mcast(priv, "setting up send only multicast group for "
687 IPOIB_GID_FMT "\n", IPOIB_GID_RAW_ARG(mgid));
689 mcast = ipoib_mcast_alloc(dev, 0);
690 if (!mcast) {
691 ipoib_warn(priv, "unable to allocate memory for "
692 "multicast structure\n");
693 ++priv->stats.tx_dropped;
694 dev_kfree_skb_any(skb);
695 goto out;
698 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
699 memcpy(mcast->mcmember.mgid.raw, mgid, sizeof (union ib_gid));
700 __ipoib_mcast_add(dev, mcast);
701 list_add_tail(&mcast->list, &priv->multicast_list);
704 if (!mcast->ah) {
705 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
706 skb_queue_tail(&mcast->pkt_queue, skb);
707 else {
708 ++priv->stats.tx_dropped;
709 dev_kfree_skb_any(skb);
712 if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
713 ipoib_dbg_mcast(priv, "no address vector, "
714 "but multicast join already started\n");
715 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
716 ipoib_mcast_sendonly_join(mcast);
719 * If lookup completes between here and out:, don't
720 * want to send packet twice.
722 mcast = NULL;
725 out:
726 if (mcast && mcast->ah) {
727 if (skb->dst &&
728 skb->dst->neighbour &&
729 !*to_ipoib_neigh(skb->dst->neighbour)) {
730 struct ipoib_neigh *neigh = ipoib_neigh_alloc(skb->dst->neighbour);
732 if (neigh) {
733 kref_get(&mcast->ah->ref);
734 neigh->ah = mcast->ah;
735 list_add_tail(&neigh->list, &mcast->neigh_list);
739 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
742 unlock:
743 spin_unlock(&priv->lock);
746 void ipoib_mcast_dev_flush(struct net_device *dev)
748 struct ipoib_dev_priv *priv = netdev_priv(dev);
749 LIST_HEAD(remove_list);
750 struct ipoib_mcast *mcast, *tmcast;
751 unsigned long flags;
753 ipoib_dbg_mcast(priv, "flushing multicast list\n");
755 spin_lock_irqsave(&priv->lock, flags);
757 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
758 list_del(&mcast->list);
759 rb_erase(&mcast->rb_node, &priv->multicast_tree);
760 list_add_tail(&mcast->list, &remove_list);
763 if (priv->broadcast) {
764 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
765 list_add_tail(&priv->broadcast->list, &remove_list);
766 priv->broadcast = NULL;
769 spin_unlock_irqrestore(&priv->lock, flags);
771 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
772 ipoib_mcast_leave(dev, mcast);
773 ipoib_mcast_free(mcast);
777 void ipoib_mcast_restart_task(struct work_struct *work)
779 struct ipoib_dev_priv *priv =
780 container_of(work, struct ipoib_dev_priv, restart_task);
781 struct net_device *dev = priv->dev;
782 struct dev_mc_list *mclist;
783 struct ipoib_mcast *mcast, *tmcast;
784 LIST_HEAD(remove_list);
785 unsigned long flags;
787 ipoib_dbg_mcast(priv, "restarting multicast task\n");
789 ipoib_mcast_stop_thread(dev, 0);
791 local_irq_save(flags);
792 netif_tx_lock(dev);
793 spin_lock(&priv->lock);
796 * Unfortunately, the networking core only gives us a list of all of
797 * the multicast hardware addresses. We need to figure out which ones
798 * are new and which ones have been removed
801 /* Clear out the found flag */
802 list_for_each_entry(mcast, &priv->multicast_list, list)
803 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
805 /* Mark all of the entries that are found or don't exist */
806 for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
807 union ib_gid mgid;
809 memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
811 /* Add in the P_Key */
812 mgid.raw[4] = (priv->pkey >> 8) & 0xff;
813 mgid.raw[5] = priv->pkey & 0xff;
815 mcast = __ipoib_mcast_find(dev, &mgid);
816 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
817 struct ipoib_mcast *nmcast;
819 /* Not found or send-only group, let's add a new entry */
820 ipoib_dbg_mcast(priv, "adding multicast entry for mgid "
821 IPOIB_GID_FMT "\n", IPOIB_GID_ARG(mgid));
823 nmcast = ipoib_mcast_alloc(dev, 0);
824 if (!nmcast) {
825 ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
826 continue;
829 set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
831 nmcast->mcmember.mgid = mgid;
833 if (mcast) {
834 /* Destroy the send only entry */
835 list_move_tail(&mcast->list, &remove_list);
837 rb_replace_node(&mcast->rb_node,
838 &nmcast->rb_node,
839 &priv->multicast_tree);
840 } else
841 __ipoib_mcast_add(dev, nmcast);
843 list_add_tail(&nmcast->list, &priv->multicast_list);
846 if (mcast)
847 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
850 /* Remove all of the entries don't exist anymore */
851 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
852 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
853 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
854 ipoib_dbg_mcast(priv, "deleting multicast group " IPOIB_GID_FMT "\n",
855 IPOIB_GID_ARG(mcast->mcmember.mgid));
857 rb_erase(&mcast->rb_node, &priv->multicast_tree);
859 /* Move to the remove list */
860 list_move_tail(&mcast->list, &remove_list);
864 spin_unlock(&priv->lock);
865 netif_tx_unlock(dev);
866 local_irq_restore(flags);
868 /* We have to cancel outside of the spinlock */
869 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
870 ipoib_mcast_leave(mcast->dev, mcast);
871 ipoib_mcast_free(mcast);
874 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
875 ipoib_mcast_start_thread(dev);
878 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
880 struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
882 struct ipoib_mcast_iter *iter;
884 iter = kmalloc(sizeof *iter, GFP_KERNEL);
885 if (!iter)
886 return NULL;
888 iter->dev = dev;
889 memset(iter->mgid.raw, 0, 16);
891 if (ipoib_mcast_iter_next(iter)) {
892 kfree(iter);
893 return NULL;
896 return iter;
899 int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
901 struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
902 struct rb_node *n;
903 struct ipoib_mcast *mcast;
904 int ret = 1;
906 spin_lock_irq(&priv->lock);
908 n = rb_first(&priv->multicast_tree);
910 while (n) {
911 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
913 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
914 sizeof (union ib_gid)) < 0) {
915 iter->mgid = mcast->mcmember.mgid;
916 iter->created = mcast->created;
917 iter->queuelen = skb_queue_len(&mcast->pkt_queue);
918 iter->complete = !!mcast->ah;
919 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
921 ret = 0;
923 break;
926 n = rb_next(n);
929 spin_unlock_irq(&priv->lock);
931 return ret;
934 void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
935 union ib_gid *mgid,
936 unsigned long *created,
937 unsigned int *queuelen,
938 unsigned int *complete,
939 unsigned int *send_only)
941 *mgid = iter->mgid;
942 *created = iter->created;
943 *queuelen = iter->queuelen;
944 *complete = iter->complete;
945 *send_only = iter->send_only;
948 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */