2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/slab.h>
41 #include <linux/kernel.h>
42 #include <linux/vmalloc.h>
44 #include <linux/if_arp.h> /* For ARPHRD_xxx */
49 #include <linux/jhash.h>
51 #include <net/addrconf.h>
52 #include <linux/inetdevice.h>
53 #include <rdma/ib_cache.h>
54 #include <linux/pci.h>
56 #define DRV_VERSION "1.0.0"
58 const char ipoib_driver_version
[] = DRV_VERSION
;
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
62 MODULE_LICENSE("Dual BSD/GPL");
63 MODULE_VERSION(DRV_VERSION
);
65 int ipoib_sendq_size __read_mostly
= IPOIB_TX_RING_SIZE
;
66 int ipoib_recvq_size __read_mostly
= IPOIB_RX_RING_SIZE
;
68 module_param_named(send_queue_size
, ipoib_sendq_size
, int, 0444);
69 MODULE_PARM_DESC(send_queue_size
, "Number of descriptors in send queue");
70 module_param_named(recv_queue_size
, ipoib_recvq_size
, int, 0444);
71 MODULE_PARM_DESC(recv_queue_size
, "Number of descriptors in receive queue");
73 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
74 int ipoib_debug_level
;
76 module_param_named(debug_level
, ipoib_debug_level
, int, 0644);
77 MODULE_PARM_DESC(debug_level
, "Enable debug tracing if > 0");
80 struct ipoib_path_iter
{
81 struct net_device
*dev
;
82 struct ipoib_path path
;
85 static const u8 ipv4_bcast_addr
[] = {
86 0x00, 0xff, 0xff, 0xff,
87 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
88 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
91 struct workqueue_struct
*ipoib_workqueue
;
93 struct ib_sa_client ipoib_sa_client
;
95 static void ipoib_add_one(struct ib_device
*device
);
96 static void ipoib_remove_one(struct ib_device
*device
, void *client_data
);
97 static void ipoib_neigh_reclaim(struct rcu_head
*rp
);
98 static struct net_device
*ipoib_get_net_dev_by_params(
99 struct ib_device
*dev
, u8 port
, u16 pkey
,
100 const union ib_gid
*gid
, const struct sockaddr
*addr
,
102 static int ipoib_set_mac(struct net_device
*dev
, void *addr
);
104 static struct ib_client ipoib_client
= {
106 .add
= ipoib_add_one
,
107 .remove
= ipoib_remove_one
,
108 .get_net_dev_by_params
= ipoib_get_net_dev_by_params
,
111 int ipoib_open(struct net_device
*dev
)
113 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
115 ipoib_dbg(priv
, "bringing up interface\n");
117 netif_carrier_off(dev
);
119 set_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
);
121 priv
->sm_fullmember_sendonly_support
= false;
123 if (ipoib_ib_dev_open(dev
)) {
124 if (!test_bit(IPOIB_PKEY_ASSIGNED
, &priv
->flags
))
129 if (ipoib_ib_dev_up(dev
))
132 if (!test_bit(IPOIB_FLAG_SUBINTERFACE
, &priv
->flags
)) {
133 struct ipoib_dev_priv
*cpriv
;
135 /* Bring up any child interfaces too */
136 down_read(&priv
->vlan_rwsem
);
137 list_for_each_entry(cpriv
, &priv
->child_intfs
, list
) {
140 flags
= cpriv
->dev
->flags
;
144 dev_change_flags(cpriv
->dev
, flags
| IFF_UP
);
146 up_read(&priv
->vlan_rwsem
);
149 netif_start_queue(dev
);
154 ipoib_ib_dev_stop(dev
);
157 clear_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
);
162 static int ipoib_stop(struct net_device
*dev
)
164 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
166 ipoib_dbg(priv
, "stopping interface\n");
168 clear_bit(IPOIB_FLAG_ADMIN_UP
, &priv
->flags
);
170 netif_stop_queue(dev
);
172 ipoib_ib_dev_down(dev
);
173 ipoib_ib_dev_stop(dev
);
175 if (!test_bit(IPOIB_FLAG_SUBINTERFACE
, &priv
->flags
)) {
176 struct ipoib_dev_priv
*cpriv
;
178 /* Bring down any child interfaces too */
179 down_read(&priv
->vlan_rwsem
);
180 list_for_each_entry(cpriv
, &priv
->child_intfs
, list
) {
183 flags
= cpriv
->dev
->flags
;
184 if (!(flags
& IFF_UP
))
187 dev_change_flags(cpriv
->dev
, flags
& ~IFF_UP
);
189 up_read(&priv
->vlan_rwsem
);
195 static void ipoib_uninit(struct net_device
*dev
)
197 ipoib_dev_cleanup(dev
);
200 static netdev_features_t
ipoib_fix_features(struct net_device
*dev
, netdev_features_t features
)
202 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
204 if (test_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
))
205 features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_TSO
);
210 static int ipoib_change_mtu(struct net_device
*dev
, int new_mtu
)
212 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
214 /* dev->mtu > 2K ==> connected mode */
215 if (ipoib_cm_admin_enabled(dev
)) {
216 if (new_mtu
> ipoib_cm_max_mtu(dev
))
219 if (new_mtu
> priv
->mcast_mtu
)
220 ipoib_warn(priv
, "mtu > %d will cause multicast packet drops.\n",
227 if (new_mtu
> IPOIB_UD_MTU(priv
->max_ib_mtu
))
230 priv
->admin_mtu
= new_mtu
;
232 dev
->mtu
= min(priv
->mcast_mtu
, priv
->admin_mtu
);
237 /* Called with an RCU read lock taken */
238 static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr
*addr
,
239 struct net_device
*dev
)
241 struct net
*net
= dev_net(dev
);
242 struct in_device
*in_dev
;
243 struct sockaddr_in
*addr_in
= (struct sockaddr_in
*)addr
;
244 struct sockaddr_in6
*addr_in6
= (struct sockaddr_in6
*)addr
;
247 switch (addr
->sa_family
) {
249 in_dev
= in_dev_get(dev
);
253 ret_addr
= inet_confirm_addr(net
, in_dev
, 0,
254 addr_in
->sin_addr
.s_addr
,
262 if (IS_ENABLED(CONFIG_IPV6
) &&
263 ipv6_chk_addr(net
, &addr_in6
->sin6_addr
, dev
, 1))
272 * Find the master net_device on top of the given net_device.
273 * @dev: base IPoIB net_device
275 * Returns the master net_device with a reference held, or the same net_device
276 * if no master exists.
278 static struct net_device
*ipoib_get_master_net_dev(struct net_device
*dev
)
280 struct net_device
*master
;
283 master
= netdev_master_upper_dev_get_rcu(dev
);
295 struct ipoib_walk_data
{
296 const struct sockaddr
*addr
;
297 struct net_device
*result
;
300 static int ipoib_upper_walk(struct net_device
*upper
, void *_data
)
302 struct ipoib_walk_data
*data
= _data
;
305 if (ipoib_is_dev_match_addr_rcu(data
->addr
, upper
)) {
307 data
->result
= upper
;
315 * Find a net_device matching the given address, which is an upper device of
316 * the given net_device.
317 * @addr: IP address to look for.
318 * @dev: base IPoIB net_device
320 * If found, returns the net_device with a reference held. Otherwise return
323 static struct net_device
*ipoib_get_net_dev_match_addr(
324 const struct sockaddr
*addr
, struct net_device
*dev
)
326 struct ipoib_walk_data data
= {
331 if (ipoib_is_dev_match_addr_rcu(addr
, dev
)) {
337 netdev_walk_all_upper_dev_rcu(dev
, ipoib_upper_walk
, &data
);
343 /* returns the number of IPoIB netdevs on top a given ipoib device matching a
344 * pkey_index and address, if one exists.
346 * @found_net_dev: contains a matching net_device if the return value >= 1,
347 * with a reference held. */
348 static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv
*priv
,
349 const union ib_gid
*gid
,
351 const struct sockaddr
*addr
,
353 struct net_device
**found_net_dev
)
355 struct ipoib_dev_priv
*child_priv
;
356 struct net_device
*net_dev
= NULL
;
359 if (priv
->pkey_index
== pkey_index
&&
360 (!gid
|| !memcmp(gid
, &priv
->local_gid
, sizeof(*gid
)))) {
362 net_dev
= ipoib_get_master_net_dev(priv
->dev
);
364 /* Verify the net_device matches the IP address, as
365 * IPoIB child devices currently share a GID. */
366 net_dev
= ipoib_get_net_dev_match_addr(addr
, priv
->dev
);
370 *found_net_dev
= net_dev
;
377 /* Check child interfaces */
378 down_read_nested(&priv
->vlan_rwsem
, nesting
);
379 list_for_each_entry(child_priv
, &priv
->child_intfs
, list
) {
380 matches
+= ipoib_match_gid_pkey_addr(child_priv
, gid
,
387 up_read(&priv
->vlan_rwsem
);
392 /* Returns the number of matching net_devs found (between 0 and 2). Also
393 * return the matching net_device in the @net_dev parameter, holding a
394 * reference to the net_device, if the number of matches >= 1 */
395 static int __ipoib_get_net_dev_by_params(struct list_head
*dev_list
, u8 port
,
397 const union ib_gid
*gid
,
398 const struct sockaddr
*addr
,
399 struct net_device
**net_dev
)
401 struct ipoib_dev_priv
*priv
;
406 list_for_each_entry(priv
, dev_list
, list
) {
407 if (priv
->port
!= port
)
410 matches
+= ipoib_match_gid_pkey_addr(priv
, gid
, pkey_index
,
419 static struct net_device
*ipoib_get_net_dev_by_params(
420 struct ib_device
*dev
, u8 port
, u16 pkey
,
421 const union ib_gid
*gid
, const struct sockaddr
*addr
,
424 struct net_device
*net_dev
;
425 struct list_head
*dev_list
= client_data
;
430 if (!rdma_protocol_ib(dev
, port
))
433 ret
= ib_find_cached_pkey(dev
, port
, pkey
, &pkey_index
);
440 /* See if we can find a unique device matching the L2 parameters */
441 matches
= __ipoib_get_net_dev_by_params(dev_list
, port
, pkey_index
,
442 gid
, NULL
, &net_dev
);
453 /* Couldn't find a unique device with L2 parameters only. Use L3
454 * address to uniquely match the net device */
455 matches
= __ipoib_get_net_dev_by_params(dev_list
, port
, pkey_index
,
456 gid
, addr
, &net_dev
);
461 dev_warn_ratelimited(&dev
->dev
,
462 "duplicate IP address detected\n");
469 int ipoib_set_mode(struct net_device
*dev
, const char *buf
)
471 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
473 /* flush paths if we switch modes so that connections are restarted */
474 if (IPOIB_CM_SUPPORTED(dev
->dev_addr
) && !strcmp(buf
, "connected\n")) {
475 set_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
);
476 ipoib_warn(priv
, "enabling connected mode "
477 "will cause multicast packet drops\n");
478 netdev_update_features(dev
);
479 dev_set_mtu(dev
, ipoib_cm_max_mtu(dev
));
481 priv
->tx_wr
.wr
.send_flags
&= ~IB_SEND_IP_CSUM
;
483 ipoib_flush_paths(dev
);
488 if (!strcmp(buf
, "datagram\n")) {
489 clear_bit(IPOIB_FLAG_ADMIN_CM
, &priv
->flags
);
490 netdev_update_features(dev
);
491 dev_set_mtu(dev
, min(priv
->mcast_mtu
, dev
->mtu
));
493 ipoib_flush_paths(dev
);
501 struct ipoib_path
*__path_find(struct net_device
*dev
, void *gid
)
503 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
504 struct rb_node
*n
= priv
->path_tree
.rb_node
;
505 struct ipoib_path
*path
;
509 path
= rb_entry(n
, struct ipoib_path
, rb_node
);
511 ret
= memcmp(gid
, path
->pathrec
.dgid
.raw
,
512 sizeof (union ib_gid
));
525 static int __path_add(struct net_device
*dev
, struct ipoib_path
*path
)
527 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
528 struct rb_node
**n
= &priv
->path_tree
.rb_node
;
529 struct rb_node
*pn
= NULL
;
530 struct ipoib_path
*tpath
;
535 tpath
= rb_entry(pn
, struct ipoib_path
, rb_node
);
537 ret
= memcmp(path
->pathrec
.dgid
.raw
, tpath
->pathrec
.dgid
.raw
,
538 sizeof (union ib_gid
));
547 rb_link_node(&path
->rb_node
, pn
, n
);
548 rb_insert_color(&path
->rb_node
, &priv
->path_tree
);
550 list_add_tail(&path
->list
, &priv
->path_list
);
555 static void path_free(struct net_device
*dev
, struct ipoib_path
*path
)
559 while ((skb
= __skb_dequeue(&path
->queue
)))
560 dev_kfree_skb_irq(skb
);
562 ipoib_dbg(netdev_priv(dev
), "path_free\n");
564 /* remove all neigh connected to this path */
565 ipoib_del_neighs_by_gid(dev
, path
->pathrec
.dgid
.raw
);
568 ipoib_put_ah(path
->ah
);
573 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
575 struct ipoib_path_iter
*ipoib_path_iter_init(struct net_device
*dev
)
577 struct ipoib_path_iter
*iter
;
579 iter
= kmalloc(sizeof *iter
, GFP_KERNEL
);
584 memset(iter
->path
.pathrec
.dgid
.raw
, 0, 16);
586 if (ipoib_path_iter_next(iter
)) {
594 int ipoib_path_iter_next(struct ipoib_path_iter
*iter
)
596 struct ipoib_dev_priv
*priv
= netdev_priv(iter
->dev
);
598 struct ipoib_path
*path
;
601 spin_lock_irq(&priv
->lock
);
603 n
= rb_first(&priv
->path_tree
);
606 path
= rb_entry(n
, struct ipoib_path
, rb_node
);
608 if (memcmp(iter
->path
.pathrec
.dgid
.raw
, path
->pathrec
.dgid
.raw
,
609 sizeof (union ib_gid
)) < 0) {
618 spin_unlock_irq(&priv
->lock
);
623 void ipoib_path_iter_read(struct ipoib_path_iter
*iter
,
624 struct ipoib_path
*path
)
629 #endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
631 void ipoib_mark_paths_invalid(struct net_device
*dev
)
633 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
634 struct ipoib_path
*path
, *tp
;
636 spin_lock_irq(&priv
->lock
);
638 list_for_each_entry_safe(path
, tp
, &priv
->path_list
, list
) {
639 ipoib_dbg(priv
, "mark path LID 0x%04x GID %pI6 invalid\n",
640 be16_to_cpu(path
->pathrec
.dlid
),
641 path
->pathrec
.dgid
.raw
);
645 spin_unlock_irq(&priv
->lock
);
648 struct classport_info_context
{
649 struct ipoib_dev_priv
*priv
;
650 struct completion done
;
651 struct ib_sa_query
*sa_query
;
654 static void classport_info_query_cb(int status
, struct ib_class_port_info
*rec
,
657 struct classport_info_context
*cb_ctx
= context
;
658 struct ipoib_dev_priv
*priv
;
664 if (status
|| !rec
) {
665 pr_debug("device: %s failed query classport_info status: %d\n",
666 priv
->dev
->name
, status
);
667 /* keeps the default, will try next mcast_restart */
668 priv
->sm_fullmember_sendonly_support
= false;
672 if (ib_get_cpi_capmask2(rec
) &
673 IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT
) {
674 pr_debug("device: %s enabled fullmember-sendonly for sendonly MCG\n",
676 priv
->sm_fullmember_sendonly_support
= true;
678 pr_debug("device: %s disabled fullmember-sendonly for sendonly MCG\n",
680 priv
->sm_fullmember_sendonly_support
= false;
684 complete(&cb_ctx
->done
);
687 int ipoib_check_sm_sendonly_fullmember_support(struct ipoib_dev_priv
*priv
)
689 struct classport_info_context
*callback_context
;
692 callback_context
= kmalloc(sizeof(*callback_context
), GFP_KERNEL
);
693 if (!callback_context
)
696 callback_context
->priv
= priv
;
697 init_completion(&callback_context
->done
);
699 ret
= ib_sa_classport_info_rec_query(&ipoib_sa_client
,
700 priv
->ca
, priv
->port
, 3000,
702 classport_info_query_cb
,
704 &callback_context
->sa_query
);
706 pr_info("%s failed to send ib_sa_classport_info query, ret: %d\n",
707 priv
->dev
->name
, ret
);
708 kfree(callback_context
);
712 /* waiting for the callback to finish before returnning */
713 wait_for_completion(&callback_context
->done
);
714 kfree(callback_context
);
719 void ipoib_flush_paths(struct net_device
*dev
)
721 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
722 struct ipoib_path
*path
, *tp
;
723 LIST_HEAD(remove_list
);
726 netif_tx_lock_bh(dev
);
727 spin_lock_irqsave(&priv
->lock
, flags
);
729 list_splice_init(&priv
->path_list
, &remove_list
);
731 list_for_each_entry(path
, &remove_list
, list
)
732 rb_erase(&path
->rb_node
, &priv
->path_tree
);
734 list_for_each_entry_safe(path
, tp
, &remove_list
, list
) {
736 ib_sa_cancel_query(path
->query_id
, path
->query
);
737 spin_unlock_irqrestore(&priv
->lock
, flags
);
738 netif_tx_unlock_bh(dev
);
739 wait_for_completion(&path
->done
);
740 path_free(dev
, path
);
741 netif_tx_lock_bh(dev
);
742 spin_lock_irqsave(&priv
->lock
, flags
);
745 spin_unlock_irqrestore(&priv
->lock
, flags
);
746 netif_tx_unlock_bh(dev
);
749 static void path_rec_completion(int status
,
750 struct ib_sa_path_rec
*pathrec
,
753 struct ipoib_path
*path
= path_ptr
;
754 struct net_device
*dev
= path
->dev
;
755 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
756 struct ipoib_ah
*ah
= NULL
;
757 struct ipoib_ah
*old_ah
= NULL
;
758 struct ipoib_neigh
*neigh
, *tn
;
759 struct sk_buff_head skqueue
;
764 ipoib_dbg(priv
, "PathRec LID 0x%04x for GID %pI6\n",
765 be16_to_cpu(pathrec
->dlid
), pathrec
->dgid
.raw
);
767 ipoib_dbg(priv
, "PathRec status %d for GID %pI6\n",
768 status
, path
->pathrec
.dgid
.raw
);
770 skb_queue_head_init(&skqueue
);
773 struct ib_ah_attr av
;
775 if (!ib_init_ah_from_path(priv
->ca
, priv
->port
, pathrec
, &av
))
776 ah
= ipoib_create_ah(dev
, priv
->pd
, &av
);
779 spin_lock_irqsave(&priv
->lock
, flags
);
781 if (!IS_ERR_OR_NULL(ah
)) {
782 path
->pathrec
= *pathrec
;
787 ipoib_dbg(priv
, "created address handle %p for LID 0x%04x, SL %d\n",
788 ah
, be16_to_cpu(pathrec
->dlid
), pathrec
->sl
);
790 while ((skb
= __skb_dequeue(&path
->queue
)))
791 __skb_queue_tail(&skqueue
, skb
);
793 list_for_each_entry_safe(neigh
, tn
, &path
->neigh_list
, list
) {
795 WARN_ON(neigh
->ah
!= old_ah
);
797 * Dropping the ah reference inside
798 * priv->lock is safe here, because we
799 * will hold one more reference from
800 * the original value of path->ah (ie
803 ipoib_put_ah(neigh
->ah
);
805 kref_get(&path
->ah
->ref
);
806 neigh
->ah
= path
->ah
;
808 if (ipoib_cm_enabled(dev
, neigh
->daddr
)) {
809 if (!ipoib_cm_get(neigh
))
810 ipoib_cm_set(neigh
, ipoib_cm_create_tx(dev
,
813 if (!ipoib_cm_get(neigh
)) {
814 ipoib_neigh_free(neigh
);
819 while ((skb
= __skb_dequeue(&neigh
->queue
)))
820 __skb_queue_tail(&skqueue
, skb
);
826 complete(&path
->done
);
828 spin_unlock_irqrestore(&priv
->lock
, flags
);
830 if (IS_ERR_OR_NULL(ah
))
831 ipoib_del_neighs_by_gid(dev
, path
->pathrec
.dgid
.raw
);
834 ipoib_put_ah(old_ah
);
836 while ((skb
= __skb_dequeue(&skqueue
))) {
838 if (dev_queue_xmit(skb
))
839 ipoib_warn(priv
, "dev_queue_xmit failed "
840 "to requeue packet\n");
844 static struct ipoib_path
*path_rec_create(struct net_device
*dev
, void *gid
)
846 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
847 struct ipoib_path
*path
;
849 if (!priv
->broadcast
)
852 path
= kzalloc(sizeof *path
, GFP_ATOMIC
);
858 skb_queue_head_init(&path
->queue
);
860 INIT_LIST_HEAD(&path
->neigh_list
);
862 memcpy(path
->pathrec
.dgid
.raw
, gid
, sizeof (union ib_gid
));
863 path
->pathrec
.sgid
= priv
->local_gid
;
864 path
->pathrec
.pkey
= cpu_to_be16(priv
->pkey
);
865 path
->pathrec
.numb_path
= 1;
866 path
->pathrec
.traffic_class
= priv
->broadcast
->mcmember
.traffic_class
;
871 static int path_rec_start(struct net_device
*dev
,
872 struct ipoib_path
*path
)
874 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
876 ipoib_dbg(priv
, "Start path record lookup for %pI6\n",
877 path
->pathrec
.dgid
.raw
);
879 init_completion(&path
->done
);
882 ib_sa_path_rec_get(&ipoib_sa_client
, priv
->ca
, priv
->port
,
884 IB_SA_PATH_REC_DGID
|
885 IB_SA_PATH_REC_SGID
|
886 IB_SA_PATH_REC_NUMB_PATH
|
887 IB_SA_PATH_REC_TRAFFIC_CLASS
|
892 if (path
->query_id
< 0) {
893 ipoib_warn(priv
, "ib_sa_path_rec_get failed: %d\n", path
->query_id
);
895 complete(&path
->done
);
896 return path
->query_id
;
902 static void neigh_add_path(struct sk_buff
*skb
, u8
*daddr
,
903 struct net_device
*dev
)
905 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
906 struct ipoib_path
*path
;
907 struct ipoib_neigh
*neigh
;
910 spin_lock_irqsave(&priv
->lock
, flags
);
911 neigh
= ipoib_neigh_alloc(daddr
, dev
);
913 spin_unlock_irqrestore(&priv
->lock
, flags
);
914 ++dev
->stats
.tx_dropped
;
915 dev_kfree_skb_any(skb
);
919 path
= __path_find(dev
, daddr
+ 4);
921 path
= path_rec_create(dev
, daddr
+ 4);
925 __path_add(dev
, path
);
928 list_add_tail(&neigh
->list
, &path
->neigh_list
);
931 kref_get(&path
->ah
->ref
);
932 neigh
->ah
= path
->ah
;
934 if (ipoib_cm_enabled(dev
, neigh
->daddr
)) {
935 if (!ipoib_cm_get(neigh
))
936 ipoib_cm_set(neigh
, ipoib_cm_create_tx(dev
, path
, neigh
));
937 if (!ipoib_cm_get(neigh
)) {
938 ipoib_neigh_free(neigh
);
941 if (skb_queue_len(&neigh
->queue
) <
942 IPOIB_MAX_PATH_REC_QUEUE
) {
943 /* put pseudoheader back on for next time */
944 skb_push(skb
, IPOIB_PSEUDO_LEN
);
945 __skb_queue_tail(&neigh
->queue
, skb
);
947 ipoib_warn(priv
, "queue length limit %d. Packet drop.\n",
948 skb_queue_len(&neigh
->queue
));
952 spin_unlock_irqrestore(&priv
->lock
, flags
);
953 ipoib_send(dev
, skb
, path
->ah
, IPOIB_QPN(daddr
));
954 ipoib_neigh_put(neigh
);
960 if (!path
->query
&& path_rec_start(dev
, path
))
962 if (skb_queue_len(&neigh
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
)
963 __skb_queue_tail(&neigh
->queue
, skb
);
968 spin_unlock_irqrestore(&priv
->lock
, flags
);
969 ipoib_neigh_put(neigh
);
973 ipoib_neigh_free(neigh
);
975 ++dev
->stats
.tx_dropped
;
976 dev_kfree_skb_any(skb
);
978 spin_unlock_irqrestore(&priv
->lock
, flags
);
979 ipoib_neigh_put(neigh
);
982 static void unicast_arp_send(struct sk_buff
*skb
, struct net_device
*dev
,
983 struct ipoib_pseudo_header
*phdr
)
985 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
986 struct ipoib_path
*path
;
989 spin_lock_irqsave(&priv
->lock
, flags
);
991 path
= __path_find(dev
, phdr
->hwaddr
+ 4);
992 if (!path
|| !path
->valid
) {
996 path
= path_rec_create(dev
, phdr
->hwaddr
+ 4);
1000 if (skb_queue_len(&path
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
) {
1001 /* put pseudoheader back on for next time */
1002 skb_push(skb
, IPOIB_PSEUDO_LEN
);
1003 __skb_queue_tail(&path
->queue
, skb
);
1005 ++dev
->stats
.tx_dropped
;
1006 dev_kfree_skb_any(skb
);
1009 if (!path
->query
&& path_rec_start(dev
, path
)) {
1010 spin_unlock_irqrestore(&priv
->lock
, flags
);
1012 path_free(dev
, path
);
1015 __path_add(dev
, path
);
1017 ++dev
->stats
.tx_dropped
;
1018 dev_kfree_skb_any(skb
);
1021 spin_unlock_irqrestore(&priv
->lock
, flags
);
1026 ipoib_dbg(priv
, "Send unicast ARP to %04x\n",
1027 be16_to_cpu(path
->pathrec
.dlid
));
1029 spin_unlock_irqrestore(&priv
->lock
, flags
);
1030 ipoib_send(dev
, skb
, path
->ah
, IPOIB_QPN(phdr
->hwaddr
));
1032 } else if ((path
->query
|| !path_rec_start(dev
, path
)) &&
1033 skb_queue_len(&path
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
) {
1034 /* put pseudoheader back on for next time */
1035 skb_push(skb
, IPOIB_PSEUDO_LEN
);
1036 __skb_queue_tail(&path
->queue
, skb
);
1038 ++dev
->stats
.tx_dropped
;
1039 dev_kfree_skb_any(skb
);
1042 spin_unlock_irqrestore(&priv
->lock
, flags
);
1045 static int ipoib_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1047 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1048 struct ipoib_neigh
*neigh
;
1049 struct ipoib_pseudo_header
*phdr
;
1050 struct ipoib_header
*header
;
1051 unsigned long flags
;
1053 phdr
= (struct ipoib_pseudo_header
*) skb
->data
;
1054 skb_pull(skb
, sizeof(*phdr
));
1055 header
= (struct ipoib_header
*) skb
->data
;
1057 if (unlikely(phdr
->hwaddr
[4] == 0xff)) {
1058 /* multicast, arrange "if" according to probability */
1059 if ((header
->proto
!= htons(ETH_P_IP
)) &&
1060 (header
->proto
!= htons(ETH_P_IPV6
)) &&
1061 (header
->proto
!= htons(ETH_P_ARP
)) &&
1062 (header
->proto
!= htons(ETH_P_RARP
)) &&
1063 (header
->proto
!= htons(ETH_P_TIPC
))) {
1064 /* ethertype not supported by IPoIB */
1065 ++dev
->stats
.tx_dropped
;
1066 dev_kfree_skb_any(skb
);
1067 return NETDEV_TX_OK
;
1069 /* Add in the P_Key for multicast*/
1070 phdr
->hwaddr
[8] = (priv
->pkey
>> 8) & 0xff;
1071 phdr
->hwaddr
[9] = priv
->pkey
& 0xff;
1073 neigh
= ipoib_neigh_get(dev
, phdr
->hwaddr
);
1075 goto send_using_neigh
;
1076 ipoib_mcast_send(dev
, phdr
->hwaddr
, skb
);
1077 return NETDEV_TX_OK
;
1080 /* unicast, arrange "switch" according to probability */
1081 switch (header
->proto
) {
1082 case htons(ETH_P_IP
):
1083 case htons(ETH_P_IPV6
):
1084 case htons(ETH_P_TIPC
):
1085 neigh
= ipoib_neigh_get(dev
, phdr
->hwaddr
);
1086 if (unlikely(!neigh
)) {
1087 neigh_add_path(skb
, phdr
->hwaddr
, dev
);
1088 return NETDEV_TX_OK
;
1091 case htons(ETH_P_ARP
):
1092 case htons(ETH_P_RARP
):
1093 /* for unicast ARP and RARP should always perform path find */
1094 unicast_arp_send(skb
, dev
, phdr
);
1095 return NETDEV_TX_OK
;
1097 /* ethertype not supported by IPoIB */
1098 ++dev
->stats
.tx_dropped
;
1099 dev_kfree_skb_any(skb
);
1100 return NETDEV_TX_OK
;
1104 /* note we now hold a ref to neigh */
1105 if (ipoib_cm_get(neigh
)) {
1106 if (ipoib_cm_up(neigh
)) {
1107 ipoib_cm_send(dev
, skb
, ipoib_cm_get(neigh
));
1110 } else if (neigh
->ah
) {
1111 ipoib_send(dev
, skb
, neigh
->ah
, IPOIB_QPN(phdr
->hwaddr
));
1115 if (skb_queue_len(&neigh
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
) {
1116 /* put pseudoheader back on for next time */
1117 skb_push(skb
, sizeof(*phdr
));
1118 spin_lock_irqsave(&priv
->lock
, flags
);
1119 __skb_queue_tail(&neigh
->queue
, skb
);
1120 spin_unlock_irqrestore(&priv
->lock
, flags
);
1122 ++dev
->stats
.tx_dropped
;
1123 dev_kfree_skb_any(skb
);
1127 ipoib_neigh_put(neigh
);
1129 return NETDEV_TX_OK
;
1132 static void ipoib_timeout(struct net_device
*dev
)
1134 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1136 ipoib_warn(priv
, "transmit timeout: latency %d msecs\n",
1137 jiffies_to_msecs(jiffies
- dev_trans_start(dev
)));
1138 ipoib_warn(priv
, "queue stopped %d, tx_head %u, tx_tail %u\n",
1139 netif_queue_stopped(dev
),
1140 priv
->tx_head
, priv
->tx_tail
);
1141 /* XXX reset QP, etc. */
1144 static int ipoib_hard_header(struct sk_buff
*skb
,
1145 struct net_device
*dev
,
1146 unsigned short type
,
1147 const void *daddr
, const void *saddr
, unsigned len
)
1149 struct ipoib_pseudo_header
*phdr
;
1150 struct ipoib_header
*header
;
1152 header
= (struct ipoib_header
*) skb_push(skb
, sizeof *header
);
1154 header
->proto
= htons(type
);
1155 header
->reserved
= 0;
1158 * we don't rely on dst_entry structure, always stuff the
1159 * destination address into skb hard header so we can figure out where
1160 * to send the packet later.
1162 phdr
= (struct ipoib_pseudo_header
*) skb_push(skb
, sizeof(*phdr
));
1163 memcpy(phdr
->hwaddr
, daddr
, INFINIBAND_ALEN
);
1165 return IPOIB_HARD_LEN
;
1168 static void ipoib_set_mcast_list(struct net_device
*dev
)
1170 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1172 if (!test_bit(IPOIB_FLAG_OPER_UP
, &priv
->flags
)) {
1173 ipoib_dbg(priv
, "IPOIB_FLAG_OPER_UP not set");
1177 queue_work(priv
->wq
, &priv
->restart_task
);
1180 static int ipoib_get_iflink(const struct net_device
*dev
)
1182 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1184 /* parent interface */
1185 if (!test_bit(IPOIB_FLAG_SUBINTERFACE
, &priv
->flags
))
1186 return dev
->ifindex
;
1188 /* child/vlan interface */
1189 return priv
->parent
->ifindex
;
1192 static u32
ipoib_addr_hash(struct ipoib_neigh_hash
*htbl
, u8
*daddr
)
1195 * Use only the address parts that contributes to spreading
1196 * The subnet prefix is not used as one can not connect to
1197 * same remote port (GUID) using the same remote QPN via two
1198 * different subnets.
1200 /* qpn octets[1:4) & port GUID octets[12:20) */
1201 u32
*d32
= (u32
*) daddr
;
1204 hv
= jhash_3words(d32
[3], d32
[4], IPOIB_QPN_MASK
& d32
[0], 0);
1205 return hv
& htbl
->mask
;
1208 struct ipoib_neigh
*ipoib_neigh_get(struct net_device
*dev
, u8
*daddr
)
1210 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1211 struct ipoib_neigh_table
*ntbl
= &priv
->ntbl
;
1212 struct ipoib_neigh_hash
*htbl
;
1213 struct ipoib_neigh
*neigh
= NULL
;
1218 htbl
= rcu_dereference_bh(ntbl
->htbl
);
1223 hash_val
= ipoib_addr_hash(htbl
, daddr
);
1224 for (neigh
= rcu_dereference_bh(htbl
->buckets
[hash_val
]);
1226 neigh
= rcu_dereference_bh(neigh
->hnext
)) {
1227 if (memcmp(daddr
, neigh
->daddr
, INFINIBAND_ALEN
) == 0) {
1228 /* found, take one ref on behalf of the caller */
1229 if (!atomic_inc_not_zero(&neigh
->refcnt
)) {
1235 if (likely(skb_queue_len(&neigh
->queue
) < IPOIB_MAX_PATH_REC_QUEUE
))
1236 neigh
->alive
= jiffies
;
1242 rcu_read_unlock_bh();
1246 static void __ipoib_reap_neigh(struct ipoib_dev_priv
*priv
)
1248 struct ipoib_neigh_table
*ntbl
= &priv
->ntbl
;
1249 struct ipoib_neigh_hash
*htbl
;
1250 unsigned long neigh_obsolete
;
1252 unsigned long flags
;
1254 LIST_HEAD(remove_list
);
1256 if (test_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
))
1259 spin_lock_irqsave(&priv
->lock
, flags
);
1261 htbl
= rcu_dereference_protected(ntbl
->htbl
,
1262 lockdep_is_held(&priv
->lock
));
1267 /* neigh is obsolete if it was idle for two GC periods */
1268 dt
= 2 * arp_tbl
.gc_interval
;
1269 neigh_obsolete
= jiffies
- dt
;
1270 /* handle possible race condition */
1271 if (test_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
))
1274 for (i
= 0; i
< htbl
->size
; i
++) {
1275 struct ipoib_neigh
*neigh
;
1276 struct ipoib_neigh __rcu
**np
= &htbl
->buckets
[i
];
1278 while ((neigh
= rcu_dereference_protected(*np
,
1279 lockdep_is_held(&priv
->lock
))) != NULL
) {
1280 /* was the neigh idle for two GC periods */
1281 if (time_after(neigh_obsolete
, neigh
->alive
)) {
1283 ipoib_check_and_add_mcast_sendonly(priv
, neigh
->daddr
+ 4, &remove_list
);
1285 rcu_assign_pointer(*np
,
1286 rcu_dereference_protected(neigh
->hnext
,
1287 lockdep_is_held(&priv
->lock
)));
1288 /* remove from path/mc list */
1289 list_del(&neigh
->list
);
1290 call_rcu(&neigh
->rcu
, ipoib_neigh_reclaim
);
1299 spin_unlock_irqrestore(&priv
->lock
, flags
);
1300 ipoib_mcast_remove_list(&remove_list
);
1303 static void ipoib_reap_neigh(struct work_struct
*work
)
1305 struct ipoib_dev_priv
*priv
=
1306 container_of(work
, struct ipoib_dev_priv
, neigh_reap_task
.work
);
1308 __ipoib_reap_neigh(priv
);
1310 if (!test_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
))
1311 queue_delayed_work(priv
->wq
, &priv
->neigh_reap_task
,
1312 arp_tbl
.gc_interval
);
1316 static struct ipoib_neigh
*ipoib_neigh_ctor(u8
*daddr
,
1317 struct net_device
*dev
)
1319 struct ipoib_neigh
*neigh
;
1321 neigh
= kzalloc(sizeof *neigh
, GFP_ATOMIC
);
1326 memcpy(&neigh
->daddr
, daddr
, sizeof(neigh
->daddr
));
1327 skb_queue_head_init(&neigh
->queue
);
1328 INIT_LIST_HEAD(&neigh
->list
);
1329 ipoib_cm_set(neigh
, NULL
);
1330 /* one ref on behalf of the caller */
1331 atomic_set(&neigh
->refcnt
, 1);
1336 struct ipoib_neigh
*ipoib_neigh_alloc(u8
*daddr
,
1337 struct net_device
*dev
)
1339 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1340 struct ipoib_neigh_table
*ntbl
= &priv
->ntbl
;
1341 struct ipoib_neigh_hash
*htbl
;
1342 struct ipoib_neigh
*neigh
;
1345 htbl
= rcu_dereference_protected(ntbl
->htbl
,
1346 lockdep_is_held(&priv
->lock
));
1352 /* need to add a new neigh, but maybe some other thread succeeded?
1353 * recalc hash, maybe hash resize took place so we do a search
1355 hash_val
= ipoib_addr_hash(htbl
, daddr
);
1356 for (neigh
= rcu_dereference_protected(htbl
->buckets
[hash_val
],
1357 lockdep_is_held(&priv
->lock
));
1359 neigh
= rcu_dereference_protected(neigh
->hnext
,
1360 lockdep_is_held(&priv
->lock
))) {
1361 if (memcmp(daddr
, neigh
->daddr
, INFINIBAND_ALEN
) == 0) {
1362 /* found, take one ref on behalf of the caller */
1363 if (!atomic_inc_not_zero(&neigh
->refcnt
)) {
1368 neigh
->alive
= jiffies
;
1373 neigh
= ipoib_neigh_ctor(daddr
, dev
);
1377 /* one ref on behalf of the hash table */
1378 atomic_inc(&neigh
->refcnt
);
1379 neigh
->alive
= jiffies
;
1381 rcu_assign_pointer(neigh
->hnext
,
1382 rcu_dereference_protected(htbl
->buckets
[hash_val
],
1383 lockdep_is_held(&priv
->lock
)));
1384 rcu_assign_pointer(htbl
->buckets
[hash_val
], neigh
);
1385 atomic_inc(&ntbl
->entries
);
1392 void ipoib_neigh_dtor(struct ipoib_neigh
*neigh
)
1394 /* neigh reference count was dropprd to zero */
1395 struct net_device
*dev
= neigh
->dev
;
1396 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1397 struct sk_buff
*skb
;
1399 ipoib_put_ah(neigh
->ah
);
1400 while ((skb
= __skb_dequeue(&neigh
->queue
))) {
1401 ++dev
->stats
.tx_dropped
;
1402 dev_kfree_skb_any(skb
);
1404 if (ipoib_cm_get(neigh
))
1405 ipoib_cm_destroy_tx(ipoib_cm_get(neigh
));
1406 ipoib_dbg(netdev_priv(dev
),
1407 "neigh free for %06x %pI6\n",
1408 IPOIB_QPN(neigh
->daddr
),
1411 if (atomic_dec_and_test(&priv
->ntbl
.entries
)) {
1412 if (test_bit(IPOIB_NEIGH_TBL_FLUSH
, &priv
->flags
))
1413 complete(&priv
->ntbl
.flushed
);
1417 static void ipoib_neigh_reclaim(struct rcu_head
*rp
)
1419 /* Called as a result of removal from hash table */
1420 struct ipoib_neigh
*neigh
= container_of(rp
, struct ipoib_neigh
, rcu
);
1421 /* note TX context may hold another ref */
1422 ipoib_neigh_put(neigh
);
1425 void ipoib_neigh_free(struct ipoib_neigh
*neigh
)
1427 struct net_device
*dev
= neigh
->dev
;
1428 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1429 struct ipoib_neigh_table
*ntbl
= &priv
->ntbl
;
1430 struct ipoib_neigh_hash
*htbl
;
1431 struct ipoib_neigh __rcu
**np
;
1432 struct ipoib_neigh
*n
;
1435 htbl
= rcu_dereference_protected(ntbl
->htbl
,
1436 lockdep_is_held(&priv
->lock
));
1440 hash_val
= ipoib_addr_hash(htbl
, neigh
->daddr
);
1441 np
= &htbl
->buckets
[hash_val
];
1442 for (n
= rcu_dereference_protected(*np
,
1443 lockdep_is_held(&priv
->lock
));
1445 n
= rcu_dereference_protected(*np
,
1446 lockdep_is_held(&priv
->lock
))) {
1449 rcu_assign_pointer(*np
,
1450 rcu_dereference_protected(neigh
->hnext
,
1451 lockdep_is_held(&priv
->lock
)));
1452 /* remove from parent list */
1453 list_del(&neigh
->list
);
1454 call_rcu(&neigh
->rcu
, ipoib_neigh_reclaim
);
1462 static int ipoib_neigh_hash_init(struct ipoib_dev_priv
*priv
)
1464 struct ipoib_neigh_table
*ntbl
= &priv
->ntbl
;
1465 struct ipoib_neigh_hash
*htbl
;
1466 struct ipoib_neigh __rcu
**buckets
;
1469 clear_bit(IPOIB_NEIGH_TBL_FLUSH
, &priv
->flags
);
1471 htbl
= kzalloc(sizeof(*htbl
), GFP_KERNEL
);
1474 set_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
);
1475 size
= roundup_pow_of_two(arp_tbl
.gc_thresh3
);
1476 buckets
= kzalloc(size
* sizeof(*buckets
), GFP_KERNEL
);
1482 htbl
->mask
= (size
- 1);
1483 htbl
->buckets
= buckets
;
1484 RCU_INIT_POINTER(ntbl
->htbl
, htbl
);
1486 atomic_set(&ntbl
->entries
, 0);
1488 /* start garbage collection */
1489 clear_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
);
1490 queue_delayed_work(priv
->wq
, &priv
->neigh_reap_task
,
1491 arp_tbl
.gc_interval
);
1496 static void neigh_hash_free_rcu(struct rcu_head
*head
)
1498 struct ipoib_neigh_hash
*htbl
= container_of(head
,
1499 struct ipoib_neigh_hash
,
1501 struct ipoib_neigh __rcu
**buckets
= htbl
->buckets
;
1502 struct ipoib_neigh_table
*ntbl
= htbl
->ntbl
;
1506 complete(&ntbl
->deleted
);
1509 void ipoib_del_neighs_by_gid(struct net_device
*dev
, u8
*gid
)
1511 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1512 struct ipoib_neigh_table
*ntbl
= &priv
->ntbl
;
1513 struct ipoib_neigh_hash
*htbl
;
1514 unsigned long flags
;
1517 /* remove all neigh connected to a given path or mcast */
1518 spin_lock_irqsave(&priv
->lock
, flags
);
1520 htbl
= rcu_dereference_protected(ntbl
->htbl
,
1521 lockdep_is_held(&priv
->lock
));
1526 for (i
= 0; i
< htbl
->size
; i
++) {
1527 struct ipoib_neigh
*neigh
;
1528 struct ipoib_neigh __rcu
**np
= &htbl
->buckets
[i
];
1530 while ((neigh
= rcu_dereference_protected(*np
,
1531 lockdep_is_held(&priv
->lock
))) != NULL
) {
1532 /* delete neighs belong to this parent */
1533 if (!memcmp(gid
, neigh
->daddr
+ 4, sizeof (union ib_gid
))) {
1534 rcu_assign_pointer(*np
,
1535 rcu_dereference_protected(neigh
->hnext
,
1536 lockdep_is_held(&priv
->lock
)));
1537 /* remove from parent list */
1538 list_del(&neigh
->list
);
1539 call_rcu(&neigh
->rcu
, ipoib_neigh_reclaim
);
1547 spin_unlock_irqrestore(&priv
->lock
, flags
);
1550 static void ipoib_flush_neighs(struct ipoib_dev_priv
*priv
)
1552 struct ipoib_neigh_table
*ntbl
= &priv
->ntbl
;
1553 struct ipoib_neigh_hash
*htbl
;
1554 unsigned long flags
;
1555 int i
, wait_flushed
= 0;
1557 init_completion(&priv
->ntbl
.flushed
);
1559 spin_lock_irqsave(&priv
->lock
, flags
);
1561 htbl
= rcu_dereference_protected(ntbl
->htbl
,
1562 lockdep_is_held(&priv
->lock
));
1566 wait_flushed
= atomic_read(&priv
->ntbl
.entries
);
1570 for (i
= 0; i
< htbl
->size
; i
++) {
1571 struct ipoib_neigh
*neigh
;
1572 struct ipoib_neigh __rcu
**np
= &htbl
->buckets
[i
];
1574 while ((neigh
= rcu_dereference_protected(*np
,
1575 lockdep_is_held(&priv
->lock
))) != NULL
) {
1576 rcu_assign_pointer(*np
,
1577 rcu_dereference_protected(neigh
->hnext
,
1578 lockdep_is_held(&priv
->lock
)));
1579 /* remove from path/mc list */
1580 list_del(&neigh
->list
);
1581 call_rcu(&neigh
->rcu
, ipoib_neigh_reclaim
);
1586 rcu_assign_pointer(ntbl
->htbl
, NULL
);
1587 call_rcu(&htbl
->rcu
, neigh_hash_free_rcu
);
1590 spin_unlock_irqrestore(&priv
->lock
, flags
);
1592 wait_for_completion(&priv
->ntbl
.flushed
);
1595 static void ipoib_neigh_hash_uninit(struct net_device
*dev
)
1597 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1600 ipoib_dbg(priv
, "ipoib_neigh_hash_uninit\n");
1601 init_completion(&priv
->ntbl
.deleted
);
1602 set_bit(IPOIB_NEIGH_TBL_FLUSH
, &priv
->flags
);
1604 /* Stop GC if called at init fail need to cancel work */
1605 stopped
= test_and_set_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
);
1607 cancel_delayed_work(&priv
->neigh_reap_task
);
1609 ipoib_flush_neighs(priv
);
1611 wait_for_completion(&priv
->ntbl
.deleted
);
1615 int ipoib_dev_init(struct net_device
*dev
, struct ib_device
*ca
, int port
)
1617 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1619 /* Allocate RX/TX "rings" to hold queued skbs */
1620 priv
->rx_ring
= kzalloc(ipoib_recvq_size
* sizeof *priv
->rx_ring
,
1625 priv
->tx_ring
= vzalloc(ipoib_sendq_size
* sizeof *priv
->tx_ring
);
1626 if (!priv
->tx_ring
) {
1627 printk(KERN_WARNING
"%s: failed to allocate TX ring (%d entries)\n",
1628 ca
->name
, ipoib_sendq_size
);
1629 goto out_rx_ring_cleanup
;
1632 /* priv->tx_head, tx_tail & tx_outstanding are already 0 */
1634 if (ipoib_ib_dev_init(dev
, ca
, port
))
1635 goto out_tx_ring_cleanup
;
1638 * Must be after ipoib_ib_dev_init so we can allocate a per
1639 * device wq there and use it here
1641 if (ipoib_neigh_hash_init(priv
) < 0)
1642 goto out_dev_uninit
;
1647 ipoib_ib_dev_cleanup(dev
);
1649 out_tx_ring_cleanup
:
1650 vfree(priv
->tx_ring
);
1652 out_rx_ring_cleanup
:
1653 kfree(priv
->rx_ring
);
1659 void ipoib_dev_cleanup(struct net_device
*dev
)
1661 struct ipoib_dev_priv
*priv
= netdev_priv(dev
), *cpriv
, *tcpriv
;
1666 ipoib_delete_debug_files(dev
);
1668 /* Delete any child interfaces first */
1669 list_for_each_entry_safe(cpriv
, tcpriv
, &priv
->child_intfs
, list
) {
1670 /* Stop GC on child */
1671 set_bit(IPOIB_STOP_NEIGH_GC
, &cpriv
->flags
);
1672 cancel_delayed_work(&cpriv
->neigh_reap_task
);
1673 unregister_netdevice_queue(cpriv
->dev
, &head
);
1675 unregister_netdevice_many(&head
);
1678 * Must be before ipoib_ib_dev_cleanup or we delete an in use
1681 ipoib_neigh_hash_uninit(dev
);
1683 ipoib_ib_dev_cleanup(dev
);
1685 kfree(priv
->rx_ring
);
1686 vfree(priv
->tx_ring
);
1688 priv
->rx_ring
= NULL
;
1689 priv
->tx_ring
= NULL
;
1692 static int ipoib_set_vf_link_state(struct net_device
*dev
, int vf
, int link_state
)
1694 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1696 return ib_set_vf_link_state(priv
->ca
, vf
, priv
->port
, link_state
);
1699 static int ipoib_get_vf_config(struct net_device
*dev
, int vf
,
1700 struct ifla_vf_info
*ivf
)
1702 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1705 err
= ib_get_vf_config(priv
->ca
, vf
, priv
->port
, ivf
);
1714 static int ipoib_set_vf_guid(struct net_device
*dev
, int vf
, u64 guid
, int type
)
1716 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1718 if (type
!= IFLA_VF_IB_NODE_GUID
&& type
!= IFLA_VF_IB_PORT_GUID
)
1721 return ib_set_vf_guid(priv
->ca
, vf
, priv
->port
, guid
, type
);
1724 static int ipoib_get_vf_stats(struct net_device
*dev
, int vf
,
1725 struct ifla_vf_stats
*vf_stats
)
1727 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1729 return ib_get_vf_stats(priv
->ca
, vf
, priv
->port
, vf_stats
);
1732 static const struct header_ops ipoib_header_ops
= {
1733 .create
= ipoib_hard_header
,
1736 static const struct net_device_ops ipoib_netdev_ops_pf
= {
1737 .ndo_uninit
= ipoib_uninit
,
1738 .ndo_open
= ipoib_open
,
1739 .ndo_stop
= ipoib_stop
,
1740 .ndo_change_mtu
= ipoib_change_mtu
,
1741 .ndo_fix_features
= ipoib_fix_features
,
1742 .ndo_start_xmit
= ipoib_start_xmit
,
1743 .ndo_tx_timeout
= ipoib_timeout
,
1744 .ndo_set_rx_mode
= ipoib_set_mcast_list
,
1745 .ndo_get_iflink
= ipoib_get_iflink
,
1746 .ndo_set_vf_link_state
= ipoib_set_vf_link_state
,
1747 .ndo_get_vf_config
= ipoib_get_vf_config
,
1748 .ndo_get_vf_stats
= ipoib_get_vf_stats
,
1749 .ndo_set_vf_guid
= ipoib_set_vf_guid
,
1750 .ndo_set_mac_address
= ipoib_set_mac
,
1753 static const struct net_device_ops ipoib_netdev_ops_vf
= {
1754 .ndo_uninit
= ipoib_uninit
,
1755 .ndo_open
= ipoib_open
,
1756 .ndo_stop
= ipoib_stop
,
1757 .ndo_change_mtu
= ipoib_change_mtu
,
1758 .ndo_fix_features
= ipoib_fix_features
,
1759 .ndo_start_xmit
= ipoib_start_xmit
,
1760 .ndo_tx_timeout
= ipoib_timeout
,
1761 .ndo_set_rx_mode
= ipoib_set_mcast_list
,
1762 .ndo_get_iflink
= ipoib_get_iflink
,
1765 void ipoib_setup(struct net_device
*dev
)
1767 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1769 if (priv
->hca_caps
& IB_DEVICE_VIRTUAL_FUNCTION
)
1770 dev
->netdev_ops
= &ipoib_netdev_ops_vf
;
1772 dev
->netdev_ops
= &ipoib_netdev_ops_pf
;
1774 dev
->header_ops
= &ipoib_header_ops
;
1776 ipoib_set_ethtool_ops(dev
);
1778 netif_napi_add(dev
, &priv
->napi
, ipoib_poll
, NAPI_POLL_WEIGHT
);
1780 dev
->watchdog_timeo
= HZ
;
1782 dev
->flags
|= IFF_BROADCAST
| IFF_MULTICAST
;
1784 dev
->hard_header_len
= IPOIB_HARD_LEN
;
1785 dev
->addr_len
= INFINIBAND_ALEN
;
1786 dev
->type
= ARPHRD_INFINIBAND
;
1787 dev
->tx_queue_len
= ipoib_sendq_size
* 2;
1788 dev
->features
= (NETIF_F_VLAN_CHALLENGED
|
1790 netif_keep_dst(dev
);
1792 memcpy(dev
->broadcast
, ipv4_bcast_addr
, INFINIBAND_ALEN
);
1796 spin_lock_init(&priv
->lock
);
1798 init_rwsem(&priv
->vlan_rwsem
);
1800 INIT_LIST_HEAD(&priv
->path_list
);
1801 INIT_LIST_HEAD(&priv
->child_intfs
);
1802 INIT_LIST_HEAD(&priv
->dead_ahs
);
1803 INIT_LIST_HEAD(&priv
->multicast_list
);
1805 INIT_DELAYED_WORK(&priv
->mcast_task
, ipoib_mcast_join_task
);
1806 INIT_WORK(&priv
->carrier_on_task
, ipoib_mcast_carrier_on_task
);
1807 INIT_WORK(&priv
->flush_light
, ipoib_ib_dev_flush_light
);
1808 INIT_WORK(&priv
->flush_normal
, ipoib_ib_dev_flush_normal
);
1809 INIT_WORK(&priv
->flush_heavy
, ipoib_ib_dev_flush_heavy
);
1810 INIT_WORK(&priv
->restart_task
, ipoib_mcast_restart_task
);
1811 INIT_DELAYED_WORK(&priv
->ah_reap_task
, ipoib_reap_ah
);
1812 INIT_DELAYED_WORK(&priv
->neigh_reap_task
, ipoib_reap_neigh
);
1815 struct ipoib_dev_priv
*ipoib_intf_alloc(const char *name
)
1817 struct net_device
*dev
;
1819 dev
= alloc_netdev((int)sizeof(struct ipoib_dev_priv
), name
,
1820 NET_NAME_UNKNOWN
, ipoib_setup
);
1824 return netdev_priv(dev
);
1827 static ssize_t
show_pkey(struct device
*dev
,
1828 struct device_attribute
*attr
, char *buf
)
1830 struct ipoib_dev_priv
*priv
= netdev_priv(to_net_dev(dev
));
1832 return sprintf(buf
, "0x%04x\n", priv
->pkey
);
1834 static DEVICE_ATTR(pkey
, S_IRUGO
, show_pkey
, NULL
);
1836 static ssize_t
show_umcast(struct device
*dev
,
1837 struct device_attribute
*attr
, char *buf
)
1839 struct ipoib_dev_priv
*priv
= netdev_priv(to_net_dev(dev
));
1841 return sprintf(buf
, "%d\n", test_bit(IPOIB_FLAG_UMCAST
, &priv
->flags
));
1844 void ipoib_set_umcast(struct net_device
*ndev
, int umcast_val
)
1846 struct ipoib_dev_priv
*priv
= netdev_priv(ndev
);
1848 if (umcast_val
> 0) {
1849 set_bit(IPOIB_FLAG_UMCAST
, &priv
->flags
);
1850 ipoib_warn(priv
, "ignoring multicast groups joined directly "
1853 clear_bit(IPOIB_FLAG_UMCAST
, &priv
->flags
);
1856 static ssize_t
set_umcast(struct device
*dev
,
1857 struct device_attribute
*attr
,
1858 const char *buf
, size_t count
)
1860 unsigned long umcast_val
= simple_strtoul(buf
, NULL
, 0);
1862 ipoib_set_umcast(to_net_dev(dev
), umcast_val
);
1866 static DEVICE_ATTR(umcast
, S_IWUSR
| S_IRUGO
, show_umcast
, set_umcast
);
1868 int ipoib_add_umcast_attr(struct net_device
*dev
)
1870 return device_create_file(&dev
->dev
, &dev_attr_umcast
);
1873 static void set_base_guid(struct ipoib_dev_priv
*priv
, union ib_gid
*gid
)
1875 struct ipoib_dev_priv
*child_priv
;
1876 struct net_device
*netdev
= priv
->dev
;
1878 netif_addr_lock_bh(netdev
);
1880 memcpy(&priv
->local_gid
.global
.interface_id
,
1881 &gid
->global
.interface_id
,
1882 sizeof(gid
->global
.interface_id
));
1883 memcpy(netdev
->dev_addr
+ 4, &priv
->local_gid
, sizeof(priv
->local_gid
));
1884 clear_bit(IPOIB_FLAG_DEV_ADDR_SET
, &priv
->flags
);
1886 netif_addr_unlock_bh(netdev
);
1888 if (!test_bit(IPOIB_FLAG_SUBINTERFACE
, &priv
->flags
)) {
1889 down_read(&priv
->vlan_rwsem
);
1890 list_for_each_entry(child_priv
, &priv
->child_intfs
, list
)
1891 set_base_guid(child_priv
, gid
);
1892 up_read(&priv
->vlan_rwsem
);
1896 static int ipoib_check_lladdr(struct net_device
*dev
,
1897 struct sockaddr_storage
*ss
)
1899 union ib_gid
*gid
= (union ib_gid
*)(ss
->__data
+ 4);
1902 netif_addr_lock_bh(dev
);
1904 /* Make sure the QPN, reserved and subnet prefix match the current
1905 * lladdr, it also makes sure the lladdr is unicast.
1907 if (memcmp(dev
->dev_addr
, ss
->__data
,
1908 4 + sizeof(gid
->global
.subnet_prefix
)) ||
1909 gid
->global
.interface_id
== 0)
1912 netif_addr_unlock_bh(dev
);
1917 static int ipoib_set_mac(struct net_device
*dev
, void *addr
)
1919 struct ipoib_dev_priv
*priv
= netdev_priv(dev
);
1920 struct sockaddr_storage
*ss
= addr
;
1923 if (!(dev
->priv_flags
& IFF_LIVE_ADDR_CHANGE
) && netif_running(dev
))
1926 ret
= ipoib_check_lladdr(dev
, ss
);
1930 set_base_guid(priv
, (union ib_gid
*)(ss
->__data
+ 4));
1932 queue_work(ipoib_workqueue
, &priv
->flush_light
);
1937 static ssize_t
create_child(struct device
*dev
,
1938 struct device_attribute
*attr
,
1939 const char *buf
, size_t count
)
1944 if (sscanf(buf
, "%i", &pkey
) != 1)
1947 if (pkey
<= 0 || pkey
> 0xffff || pkey
== 0x8000)
1951 * Set the full membership bit, so that we join the right
1952 * broadcast group, etc.
1956 ret
= ipoib_vlan_add(to_net_dev(dev
), pkey
);
1958 return ret
? ret
: count
;
1960 static DEVICE_ATTR(create_child
, S_IWUSR
, NULL
, create_child
);
1962 static ssize_t
delete_child(struct device
*dev
,
1963 struct device_attribute
*attr
,
1964 const char *buf
, size_t count
)
1969 if (sscanf(buf
, "%i", &pkey
) != 1)
1972 if (pkey
< 0 || pkey
> 0xffff)
1975 ret
= ipoib_vlan_delete(to_net_dev(dev
), pkey
);
1977 return ret
? ret
: count
;
1980 static DEVICE_ATTR(delete_child
, S_IWUSR
, NULL
, delete_child
);
1982 int ipoib_add_pkey_attr(struct net_device
*dev
)
1984 return device_create_file(&dev
->dev
, &dev_attr_pkey
);
1987 int ipoib_set_dev_features(struct ipoib_dev_priv
*priv
, struct ib_device
*hca
)
1989 priv
->hca_caps
= hca
->attrs
.device_cap_flags
;
1991 if (priv
->hca_caps
& IB_DEVICE_UD_IP_CSUM
) {
1992 priv
->dev
->hw_features
= NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
;
1994 if (priv
->hca_caps
& IB_DEVICE_UD_TSO
)
1995 priv
->dev
->hw_features
|= NETIF_F_TSO
;
1997 priv
->dev
->features
|= priv
->dev
->hw_features
;
2003 static struct net_device
*ipoib_add_port(const char *format
,
2004 struct ib_device
*hca
, u8 port
)
2006 struct ipoib_dev_priv
*priv
;
2007 struct ib_port_attr attr
;
2008 int result
= -ENOMEM
;
2010 priv
= ipoib_intf_alloc(format
);
2012 goto alloc_mem_failed
;
2014 SET_NETDEV_DEV(priv
->dev
, hca
->dma_device
);
2015 priv
->dev
->dev_id
= port
- 1;
2017 result
= ib_query_port(hca
, port
, &attr
);
2019 priv
->max_ib_mtu
= ib_mtu_enum_to_int(attr
.max_mtu
);
2021 printk(KERN_WARNING
"%s: ib_query_port %d failed\n",
2023 goto device_init_failed
;
2026 /* MTU will be reset when mcast join happens */
2027 priv
->dev
->mtu
= IPOIB_UD_MTU(priv
->max_ib_mtu
);
2028 priv
->mcast_mtu
= priv
->admin_mtu
= priv
->dev
->mtu
;
2029 priv
->dev
->max_mtu
= IPOIB_CM_MTU
;
2031 priv
->dev
->neigh_priv_len
= sizeof(struct ipoib_neigh
);
2033 result
= ib_query_pkey(hca
, port
, 0, &priv
->pkey
);
2035 printk(KERN_WARNING
"%s: ib_query_pkey port %d failed (ret = %d)\n",
2036 hca
->name
, port
, result
);
2037 goto device_init_failed
;
2040 result
= ipoib_set_dev_features(priv
, hca
);
2042 goto device_init_failed
;
2045 * Set the full membership bit, so that we join the right
2046 * broadcast group, etc.
2048 priv
->pkey
|= 0x8000;
2050 priv
->dev
->broadcast
[8] = priv
->pkey
>> 8;
2051 priv
->dev
->broadcast
[9] = priv
->pkey
& 0xff;
2053 result
= ib_query_gid(hca
, port
, 0, &priv
->local_gid
, NULL
);
2055 printk(KERN_WARNING
"%s: ib_query_gid port %d failed (ret = %d)\n",
2056 hca
->name
, port
, result
);
2057 goto device_init_failed
;
2059 memcpy(priv
->dev
->dev_addr
+ 4, priv
->local_gid
.raw
, sizeof (union ib_gid
));
2060 set_bit(IPOIB_FLAG_DEV_ADDR_SET
, &priv
->flags
);
2062 result
= ipoib_dev_init(priv
->dev
, hca
, port
);
2064 printk(KERN_WARNING
"%s: failed to initialize port %d (ret = %d)\n",
2065 hca
->name
, port
, result
);
2066 goto device_init_failed
;
2069 INIT_IB_EVENT_HANDLER(&priv
->event_handler
,
2070 priv
->ca
, ipoib_event
);
2071 result
= ib_register_event_handler(&priv
->event_handler
);
2073 printk(KERN_WARNING
"%s: ib_register_event_handler failed for "
2074 "port %d (ret = %d)\n",
2075 hca
->name
, port
, result
);
2079 result
= register_netdev(priv
->dev
);
2081 printk(KERN_WARNING
"%s: couldn't register ipoib port %d; error %d\n",
2082 hca
->name
, port
, result
);
2083 goto register_failed
;
2086 ipoib_create_debug_files(priv
->dev
);
2088 if (ipoib_cm_add_mode_attr(priv
->dev
))
2090 if (ipoib_add_pkey_attr(priv
->dev
))
2092 if (ipoib_add_umcast_attr(priv
->dev
))
2094 if (device_create_file(&priv
->dev
->dev
, &dev_attr_create_child
))
2096 if (device_create_file(&priv
->dev
->dev
, &dev_attr_delete_child
))
2102 ipoib_delete_debug_files(priv
->dev
);
2103 unregister_netdev(priv
->dev
);
2106 ib_unregister_event_handler(&priv
->event_handler
);
2107 flush_workqueue(ipoib_workqueue
);
2108 /* Stop GC if started before flush */
2109 set_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
);
2110 cancel_delayed_work(&priv
->neigh_reap_task
);
2111 flush_workqueue(priv
->wq
);
2114 ipoib_dev_cleanup(priv
->dev
);
2117 free_netdev(priv
->dev
);
2120 return ERR_PTR(result
);
2123 static void ipoib_add_one(struct ib_device
*device
)
2125 struct list_head
*dev_list
;
2126 struct net_device
*dev
;
2127 struct ipoib_dev_priv
*priv
;
2131 dev_list
= kmalloc(sizeof *dev_list
, GFP_KERNEL
);
2135 INIT_LIST_HEAD(dev_list
);
2137 for (p
= rdma_start_port(device
); p
<= rdma_end_port(device
); ++p
) {
2138 if (!rdma_protocol_ib(device
, p
))
2140 dev
= ipoib_add_port("ib%d", device
, p
);
2142 priv
= netdev_priv(dev
);
2143 list_add_tail(&priv
->list
, dev_list
);
2153 ib_set_client_data(device
, &ipoib_client
, dev_list
);
2156 static void ipoib_remove_one(struct ib_device
*device
, void *client_data
)
2158 struct ipoib_dev_priv
*priv
, *tmp
;
2159 struct list_head
*dev_list
= client_data
;
2164 list_for_each_entry_safe(priv
, tmp
, dev_list
, list
) {
2165 ib_unregister_event_handler(&priv
->event_handler
);
2166 flush_workqueue(ipoib_workqueue
);
2168 /* mark interface in the middle of destruction */
2169 set_bit(IPOIB_FLAG_GOING_DOWN
, &priv
->flags
);
2172 dev_change_flags(priv
->dev
, priv
->dev
->flags
& ~IFF_UP
);
2176 set_bit(IPOIB_STOP_NEIGH_GC
, &priv
->flags
);
2177 cancel_delayed_work(&priv
->neigh_reap_task
);
2178 flush_workqueue(priv
->wq
);
2180 unregister_netdev(priv
->dev
);
2181 free_netdev(priv
->dev
);
2187 static int __init
ipoib_init_module(void)
2191 ipoib_recvq_size
= roundup_pow_of_two(ipoib_recvq_size
);
2192 ipoib_recvq_size
= min(ipoib_recvq_size
, IPOIB_MAX_QUEUE_SIZE
);
2193 ipoib_recvq_size
= max(ipoib_recvq_size
, IPOIB_MIN_QUEUE_SIZE
);
2195 ipoib_sendq_size
= roundup_pow_of_two(ipoib_sendq_size
);
2196 ipoib_sendq_size
= min(ipoib_sendq_size
, IPOIB_MAX_QUEUE_SIZE
);
2197 ipoib_sendq_size
= max3(ipoib_sendq_size
, 2 * MAX_SEND_CQE
, IPOIB_MIN_QUEUE_SIZE
);
2198 #ifdef CONFIG_INFINIBAND_IPOIB_CM
2199 ipoib_max_conn_qp
= min(ipoib_max_conn_qp
, IPOIB_CM_MAX_CONN_QP
);
2203 * When copying small received packets, we only copy from the
2204 * linear data part of the SKB, so we rely on this condition.
2206 BUILD_BUG_ON(IPOIB_CM_COPYBREAK
> IPOIB_CM_HEAD_SIZE
);
2208 ret
= ipoib_register_debugfs();
2213 * We create a global workqueue here that is used for all flush
2214 * operations. However, if you attempt to flush a workqueue
2215 * from a task on that same workqueue, it deadlocks the system.
2216 * We want to be able to flush the tasks associated with a
2217 * specific net device, so we also create a workqueue for each
2218 * netdevice. We queue up the tasks for that device only on
2219 * its private workqueue, and we only queue up flush events
2220 * on our global flush workqueue. This avoids the deadlocks.
2222 ipoib_workqueue
= alloc_ordered_workqueue("ipoib_flush",
2224 if (!ipoib_workqueue
) {
2229 ib_sa_register_client(&ipoib_sa_client
);
2231 ret
= ib_register_client(&ipoib_client
);
2235 ret
= ipoib_netlink_init();
2242 ib_unregister_client(&ipoib_client
);
2245 ib_sa_unregister_client(&ipoib_sa_client
);
2246 destroy_workqueue(ipoib_workqueue
);
2249 ipoib_unregister_debugfs();
2254 static void __exit
ipoib_cleanup_module(void)
2256 ipoib_netlink_fini();
2257 ib_unregister_client(&ipoib_client
);
2258 ib_sa_unregister_client(&ipoib_sa_client
);
2259 ipoib_unregister_debugfs();
2260 destroy_workqueue(ipoib_workqueue
);
2263 module_init(ipoib_init_module
);
2264 module_exit(ipoib_cleanup_module
);