2 * Copyright (c) 2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/err.h>
36 #include <linux/interrupt.h>
37 #include <linux/export.h>
38 #include <linux/slab.h>
39 #include <linux/bitops.h>
40 #include <linux/random.h>
42 #include <rdma/ib_cache.h>
45 static void mcast_add_one(struct ib_device
*device
);
46 static void mcast_remove_one(struct ib_device
*device
, void *client_data
);
48 static struct ib_client mcast_client
= {
49 .name
= "ib_multicast",
51 .remove
= mcast_remove_one
54 static struct ib_sa_client sa_client
;
55 static struct workqueue_struct
*mcast_wq
;
56 static union ib_gid mgid0
;
61 struct mcast_device
*dev
;
65 struct completion comp
;
70 struct ib_device
*device
;
71 struct ib_event_handler event_handler
;
74 struct mcast_port port
[0];
83 enum mcast_group_state
{
91 MCAST_INVALID_PKEY_INDEX
= 0xFFFF
97 struct ib_sa_mcmember_rec rec
;
99 struct mcast_port
*port
;
101 struct work_struct work
;
102 struct list_head pending_list
;
103 struct list_head active_list
;
104 struct mcast_member
*last_join
;
105 int members
[NUM_JOIN_MEMBERSHIP_TYPES
];
107 enum mcast_group_state state
;
108 struct ib_sa_query
*query
;
114 struct mcast_member
{
115 struct ib_sa_multicast multicast
;
116 struct ib_sa_client
*client
;
117 struct mcast_group
*group
;
118 struct list_head list
;
119 enum mcast_state state
;
121 struct completion comp
;
124 static void join_handler(int status
, struct ib_sa_mcmember_rec
*rec
,
126 static void leave_handler(int status
, struct ib_sa_mcmember_rec
*rec
,
129 static struct mcast_group
*mcast_find(struct mcast_port
*port
,
132 struct rb_node
*node
= port
->table
.rb_node
;
133 struct mcast_group
*group
;
137 group
= rb_entry(node
, struct mcast_group
, node
);
138 ret
= memcmp(mgid
->raw
, group
->rec
.mgid
.raw
, sizeof *mgid
);
143 node
= node
->rb_left
;
145 node
= node
->rb_right
;
150 static struct mcast_group
*mcast_insert(struct mcast_port
*port
,
151 struct mcast_group
*group
,
152 int allow_duplicates
)
154 struct rb_node
**link
= &port
->table
.rb_node
;
155 struct rb_node
*parent
= NULL
;
156 struct mcast_group
*cur_group
;
161 cur_group
= rb_entry(parent
, struct mcast_group
, node
);
163 ret
= memcmp(group
->rec
.mgid
.raw
, cur_group
->rec
.mgid
.raw
,
164 sizeof group
->rec
.mgid
);
166 link
= &(*link
)->rb_left
;
168 link
= &(*link
)->rb_right
;
169 else if (allow_duplicates
)
170 link
= &(*link
)->rb_left
;
174 rb_link_node(&group
->node
, parent
, link
);
175 rb_insert_color(&group
->node
, &port
->table
);
179 static void deref_port(struct mcast_port
*port
)
181 if (atomic_dec_and_test(&port
->refcount
))
182 complete(&port
->comp
);
185 static void release_group(struct mcast_group
*group
)
187 struct mcast_port
*port
= group
->port
;
190 spin_lock_irqsave(&port
->lock
, flags
);
191 if (atomic_dec_and_test(&group
->refcount
)) {
192 rb_erase(&group
->node
, &port
->table
);
193 spin_unlock_irqrestore(&port
->lock
, flags
);
197 spin_unlock_irqrestore(&port
->lock
, flags
);
200 static void deref_member(struct mcast_member
*member
)
202 if (atomic_dec_and_test(&member
->refcount
))
203 complete(&member
->comp
);
206 static void queue_join(struct mcast_member
*member
)
208 struct mcast_group
*group
= member
->group
;
211 spin_lock_irqsave(&group
->lock
, flags
);
212 list_add_tail(&member
->list
, &group
->pending_list
);
213 if (group
->state
== MCAST_IDLE
) {
214 group
->state
= MCAST_BUSY
;
215 atomic_inc(&group
->refcount
);
216 queue_work(mcast_wq
, &group
->work
);
218 spin_unlock_irqrestore(&group
->lock
, flags
);
222 * A multicast group has four types of members: full member, non member,
223 * sendonly non member and sendonly full member.
224 * We need to keep track of the number of members of each
225 * type based on their join state. Adjust the number of members the belong to
226 * the specified join states.
228 static void adjust_membership(struct mcast_group
*group
, u8 join_state
, int inc
)
232 for (i
= 0; i
< NUM_JOIN_MEMBERSHIP_TYPES
; i
++, join_state
>>= 1)
233 if (join_state
& 0x1)
234 group
->members
[i
] += inc
;
238 * If a multicast group has zero members left for a particular join state, but
239 * the group is still a member with the SA, we need to leave that join state.
240 * Determine which join states we still belong to, but that do not have any
243 static u8
get_leave_state(struct mcast_group
*group
)
248 for (i
= 0; i
< NUM_JOIN_MEMBERSHIP_TYPES
; i
++)
249 if (!group
->members
[i
])
250 leave_state
|= (0x1 << i
);
252 return leave_state
& group
->rec
.join_state
;
255 static int check_selector(ib_sa_comp_mask comp_mask
,
256 ib_sa_comp_mask selector_mask
,
257 ib_sa_comp_mask value_mask
,
258 u8 selector
, u8 src_value
, u8 dst_value
)
262 if (!(comp_mask
& selector_mask
) || !(comp_mask
& value_mask
))
267 err
= (src_value
<= dst_value
);
270 err
= (src_value
>= dst_value
);
273 err
= (src_value
!= dst_value
);
283 static int cmp_rec(struct ib_sa_mcmember_rec
*src
,
284 struct ib_sa_mcmember_rec
*dst
, ib_sa_comp_mask comp_mask
)
286 /* MGID must already match */
288 if (comp_mask
& IB_SA_MCMEMBER_REC_PORT_GID
&&
289 memcmp(&src
->port_gid
, &dst
->port_gid
, sizeof src
->port_gid
))
291 if (comp_mask
& IB_SA_MCMEMBER_REC_QKEY
&& src
->qkey
!= dst
->qkey
)
293 if (comp_mask
& IB_SA_MCMEMBER_REC_MLID
&& src
->mlid
!= dst
->mlid
)
295 if (check_selector(comp_mask
, IB_SA_MCMEMBER_REC_MTU_SELECTOR
,
296 IB_SA_MCMEMBER_REC_MTU
, dst
->mtu_selector
,
299 if (comp_mask
& IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
&&
300 src
->traffic_class
!= dst
->traffic_class
)
302 if (comp_mask
& IB_SA_MCMEMBER_REC_PKEY
&& src
->pkey
!= dst
->pkey
)
304 if (check_selector(comp_mask
, IB_SA_MCMEMBER_REC_RATE_SELECTOR
,
305 IB_SA_MCMEMBER_REC_RATE
, dst
->rate_selector
,
306 src
->rate
, dst
->rate
))
308 if (check_selector(comp_mask
,
309 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR
,
310 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME
,
311 dst
->packet_life_time_selector
,
312 src
->packet_life_time
, dst
->packet_life_time
))
314 if (comp_mask
& IB_SA_MCMEMBER_REC_SL
&& src
->sl
!= dst
->sl
)
316 if (comp_mask
& IB_SA_MCMEMBER_REC_FLOW_LABEL
&&
317 src
->flow_label
!= dst
->flow_label
)
319 if (comp_mask
& IB_SA_MCMEMBER_REC_HOP_LIMIT
&&
320 src
->hop_limit
!= dst
->hop_limit
)
322 if (comp_mask
& IB_SA_MCMEMBER_REC_SCOPE
&& src
->scope
!= dst
->scope
)
325 /* join_state checked separately, proxy_join ignored */
330 static int send_join(struct mcast_group
*group
, struct mcast_member
*member
)
332 struct mcast_port
*port
= group
->port
;
335 group
->last_join
= member
;
336 ret
= ib_sa_mcmember_rec_query(&sa_client
, port
->dev
->device
,
337 port
->port_num
, IB_MGMT_METHOD_SET
,
338 &member
->multicast
.rec
,
339 member
->multicast
.comp_mask
,
340 3000, GFP_KERNEL
, join_handler
, group
,
342 return (ret
> 0) ? 0 : ret
;
345 static int send_leave(struct mcast_group
*group
, u8 leave_state
)
347 struct mcast_port
*port
= group
->port
;
348 struct ib_sa_mcmember_rec rec
;
352 rec
.join_state
= leave_state
;
353 group
->leave_state
= leave_state
;
355 ret
= ib_sa_mcmember_rec_query(&sa_client
, port
->dev
->device
,
356 port
->port_num
, IB_SA_METHOD_DELETE
, &rec
,
357 IB_SA_MCMEMBER_REC_MGID
|
358 IB_SA_MCMEMBER_REC_PORT_GID
|
359 IB_SA_MCMEMBER_REC_JOIN_STATE
,
360 3000, GFP_KERNEL
, leave_handler
,
361 group
, &group
->query
);
362 return (ret
> 0) ? 0 : ret
;
365 static void join_group(struct mcast_group
*group
, struct mcast_member
*member
,
368 member
->state
= MCAST_MEMBER
;
369 adjust_membership(group
, join_state
, 1);
370 group
->rec
.join_state
|= join_state
;
371 member
->multicast
.rec
= group
->rec
;
372 member
->multicast
.rec
.join_state
= join_state
;
373 list_move(&member
->list
, &group
->active_list
);
376 static int fail_join(struct mcast_group
*group
, struct mcast_member
*member
,
379 spin_lock_irq(&group
->lock
);
380 list_del_init(&member
->list
);
381 spin_unlock_irq(&group
->lock
);
382 return member
->multicast
.callback(status
, &member
->multicast
);
385 static void process_group_error(struct mcast_group
*group
)
387 struct mcast_member
*member
;
391 if (group
->state
== MCAST_PKEY_EVENT
)
392 ret
= ib_find_pkey(group
->port
->dev
->device
,
393 group
->port
->port_num
,
394 be16_to_cpu(group
->rec
.pkey
), &pkey_index
);
396 spin_lock_irq(&group
->lock
);
397 if (group
->state
== MCAST_PKEY_EVENT
&& !ret
&&
398 group
->pkey_index
== pkey_index
)
401 while (!list_empty(&group
->active_list
)) {
402 member
= list_entry(group
->active_list
.next
,
403 struct mcast_member
, list
);
404 atomic_inc(&member
->refcount
);
405 list_del_init(&member
->list
);
406 adjust_membership(group
, member
->multicast
.rec
.join_state
, -1);
407 member
->state
= MCAST_ERROR
;
408 spin_unlock_irq(&group
->lock
);
410 ret
= member
->multicast
.callback(-ENETRESET
,
412 deref_member(member
);
414 ib_sa_free_multicast(&member
->multicast
);
415 spin_lock_irq(&group
->lock
);
418 group
->rec
.join_state
= 0;
420 group
->state
= MCAST_BUSY
;
421 spin_unlock_irq(&group
->lock
);
424 static void mcast_work_handler(struct work_struct
*work
)
426 struct mcast_group
*group
;
427 struct mcast_member
*member
;
428 struct ib_sa_multicast
*multicast
;
432 group
= container_of(work
, typeof(*group
), work
);
434 spin_lock_irq(&group
->lock
);
435 while (!list_empty(&group
->pending_list
) ||
436 (group
->state
!= MCAST_BUSY
)) {
438 if (group
->state
!= MCAST_BUSY
) {
439 spin_unlock_irq(&group
->lock
);
440 process_group_error(group
);
444 member
= list_entry(group
->pending_list
.next
,
445 struct mcast_member
, list
);
446 multicast
= &member
->multicast
;
447 join_state
= multicast
->rec
.join_state
;
448 atomic_inc(&member
->refcount
);
450 if (join_state
== (group
->rec
.join_state
& join_state
)) {
451 status
= cmp_rec(&group
->rec
, &multicast
->rec
,
452 multicast
->comp_mask
);
454 join_group(group
, member
, join_state
);
456 list_del_init(&member
->list
);
457 spin_unlock_irq(&group
->lock
);
458 ret
= multicast
->callback(status
, multicast
);
460 spin_unlock_irq(&group
->lock
);
461 status
= send_join(group
, member
);
463 deref_member(member
);
466 ret
= fail_join(group
, member
, status
);
469 deref_member(member
);
471 ib_sa_free_multicast(&member
->multicast
);
472 spin_lock_irq(&group
->lock
);
475 join_state
= get_leave_state(group
);
477 group
->rec
.join_state
&= ~join_state
;
478 spin_unlock_irq(&group
->lock
);
479 if (send_leave(group
, join_state
))
482 group
->state
= MCAST_IDLE
;
483 spin_unlock_irq(&group
->lock
);
484 release_group(group
);
489 * Fail a join request if it is still active - at the head of the pending queue.
491 static void process_join_error(struct mcast_group
*group
, int status
)
493 struct mcast_member
*member
;
496 spin_lock_irq(&group
->lock
);
497 member
= list_entry(group
->pending_list
.next
,
498 struct mcast_member
, list
);
499 if (group
->last_join
== member
) {
500 atomic_inc(&member
->refcount
);
501 list_del_init(&member
->list
);
502 spin_unlock_irq(&group
->lock
);
503 ret
= member
->multicast
.callback(status
, &member
->multicast
);
504 deref_member(member
);
506 ib_sa_free_multicast(&member
->multicast
);
508 spin_unlock_irq(&group
->lock
);
511 static void join_handler(int status
, struct ib_sa_mcmember_rec
*rec
,
514 struct mcast_group
*group
= context
;
515 u16 pkey_index
= MCAST_INVALID_PKEY_INDEX
;
518 process_join_error(group
, status
);
520 int mgids_changed
, is_mgid0
;
522 if (ib_find_pkey(group
->port
->dev
->device
,
523 group
->port
->port_num
, be16_to_cpu(rec
->pkey
),
525 pkey_index
= MCAST_INVALID_PKEY_INDEX
;
527 spin_lock_irq(&group
->port
->lock
);
528 if (group
->state
== MCAST_BUSY
&&
529 group
->pkey_index
== MCAST_INVALID_PKEY_INDEX
)
530 group
->pkey_index
= pkey_index
;
531 mgids_changed
= memcmp(&rec
->mgid
, &group
->rec
.mgid
,
532 sizeof(group
->rec
.mgid
));
535 rb_erase(&group
->node
, &group
->port
->table
);
536 is_mgid0
= !memcmp(&mgid0
, &group
->rec
.mgid
,
538 mcast_insert(group
->port
, group
, is_mgid0
);
540 spin_unlock_irq(&group
->port
->lock
);
542 mcast_work_handler(&group
->work
);
545 static void leave_handler(int status
, struct ib_sa_mcmember_rec
*rec
,
548 struct mcast_group
*group
= context
;
550 if (status
&& group
->retries
> 0 &&
551 !send_leave(group
, group
->leave_state
))
554 mcast_work_handler(&group
->work
);
557 static struct mcast_group
*acquire_group(struct mcast_port
*port
,
558 union ib_gid
*mgid
, gfp_t gfp_mask
)
560 struct mcast_group
*group
, *cur_group
;
564 is_mgid0
= !memcmp(&mgid0
, mgid
, sizeof mgid0
);
566 spin_lock_irqsave(&port
->lock
, flags
);
567 group
= mcast_find(port
, mgid
);
570 spin_unlock_irqrestore(&port
->lock
, flags
);
573 group
= kzalloc(sizeof *group
, gfp_mask
);
579 group
->rec
.mgid
= *mgid
;
580 group
->pkey_index
= MCAST_INVALID_PKEY_INDEX
;
581 INIT_LIST_HEAD(&group
->pending_list
);
582 INIT_LIST_HEAD(&group
->active_list
);
583 INIT_WORK(&group
->work
, mcast_work_handler
);
584 spin_lock_init(&group
->lock
);
586 spin_lock_irqsave(&port
->lock
, flags
);
587 cur_group
= mcast_insert(port
, group
, is_mgid0
);
592 atomic_inc(&port
->refcount
);
594 atomic_inc(&group
->refcount
);
595 spin_unlock_irqrestore(&port
->lock
, flags
);
600 * We serialize all join requests to a single group to make our lives much
601 * easier. Otherwise, two users could try to join the same group
602 * simultaneously, with different configurations, one could leave while the
603 * join is in progress, etc., which makes locking around error recovery
606 struct ib_sa_multicast
*
607 ib_sa_join_multicast(struct ib_sa_client
*client
,
608 struct ib_device
*device
, u8 port_num
,
609 struct ib_sa_mcmember_rec
*rec
,
610 ib_sa_comp_mask comp_mask
, gfp_t gfp_mask
,
611 int (*callback
)(int status
,
612 struct ib_sa_multicast
*multicast
),
615 struct mcast_device
*dev
;
616 struct mcast_member
*member
;
617 struct ib_sa_multicast
*multicast
;
620 dev
= ib_get_client_data(device
, &mcast_client
);
622 return ERR_PTR(-ENODEV
);
624 member
= kmalloc(sizeof *member
, gfp_mask
);
626 return ERR_PTR(-ENOMEM
);
628 ib_sa_client_get(client
);
629 member
->client
= client
;
630 member
->multicast
.rec
= *rec
;
631 member
->multicast
.comp_mask
= comp_mask
;
632 member
->multicast
.callback
= callback
;
633 member
->multicast
.context
= context
;
634 init_completion(&member
->comp
);
635 atomic_set(&member
->refcount
, 1);
636 member
->state
= MCAST_JOINING
;
638 member
->group
= acquire_group(&dev
->port
[port_num
- dev
->start_port
],
639 &rec
->mgid
, gfp_mask
);
640 if (!member
->group
) {
646 * The user will get the multicast structure in their callback. They
647 * could then free the multicast structure before we can return from
648 * this routine. So we save the pointer to return before queuing
651 multicast
= &member
->multicast
;
656 ib_sa_client_put(client
);
660 EXPORT_SYMBOL(ib_sa_join_multicast
);
662 void ib_sa_free_multicast(struct ib_sa_multicast
*multicast
)
664 struct mcast_member
*member
;
665 struct mcast_group
*group
;
667 member
= container_of(multicast
, struct mcast_member
, multicast
);
668 group
= member
->group
;
670 spin_lock_irq(&group
->lock
);
671 if (member
->state
== MCAST_MEMBER
)
672 adjust_membership(group
, multicast
->rec
.join_state
, -1);
674 list_del_init(&member
->list
);
676 if (group
->state
== MCAST_IDLE
) {
677 group
->state
= MCAST_BUSY
;
678 spin_unlock_irq(&group
->lock
);
679 /* Continue to hold reference on group until callback */
680 queue_work(mcast_wq
, &group
->work
);
682 spin_unlock_irq(&group
->lock
);
683 release_group(group
);
686 deref_member(member
);
687 wait_for_completion(&member
->comp
);
688 ib_sa_client_put(member
->client
);
691 EXPORT_SYMBOL(ib_sa_free_multicast
);
693 int ib_sa_get_mcmember_rec(struct ib_device
*device
, u8 port_num
,
694 union ib_gid
*mgid
, struct ib_sa_mcmember_rec
*rec
)
696 struct mcast_device
*dev
;
697 struct mcast_port
*port
;
698 struct mcast_group
*group
;
702 dev
= ib_get_client_data(device
, &mcast_client
);
706 port
= &dev
->port
[port_num
- dev
->start_port
];
707 spin_lock_irqsave(&port
->lock
, flags
);
708 group
= mcast_find(port
, mgid
);
712 ret
= -EADDRNOTAVAIL
;
713 spin_unlock_irqrestore(&port
->lock
, flags
);
717 EXPORT_SYMBOL(ib_sa_get_mcmember_rec
);
720 * ib_init_ah_from_mcmember - Initialize AH attribute from multicast
721 * member record and gid of the device.
722 * @device: RDMA device
723 * @port_num: Port of the rdma device to consider
724 * @ndev: Optional netdevice, applicable only for RoCE
725 * @gid_type: GID type to consider
726 * @ah_attr: AH attribute to fillup on successful completion
728 * ib_init_ah_from_mcmember() initializes AH attribute based on multicast
729 * member record and other device properties. On success the caller is
730 * responsible to call rdma_destroy_ah_attr on the ah_attr. Returns 0 on
731 * success or appropriate error code.
734 int ib_init_ah_from_mcmember(struct ib_device
*device
, u8 port_num
,
735 struct ib_sa_mcmember_rec
*rec
,
736 struct net_device
*ndev
,
737 enum ib_gid_type gid_type
,
738 struct rdma_ah_attr
*ah_attr
)
740 const struct ib_gid_attr
*sgid_attr
;
742 /* GID table is not based on the netdevice for IB link layer,
743 * so ignore ndev during search.
745 if (rdma_protocol_ib(device
, port_num
))
747 else if (!rdma_protocol_roce(device
, port_num
))
750 sgid_attr
= rdma_find_gid_by_port(device
, &rec
->port_gid
,
751 gid_type
, port_num
, ndev
);
752 if (IS_ERR(sgid_attr
))
753 return PTR_ERR(sgid_attr
);
755 memset(ah_attr
, 0, sizeof(*ah_attr
));
756 ah_attr
->type
= rdma_ah_find_type(device
, port_num
);
758 rdma_ah_set_dlid(ah_attr
, be16_to_cpu(rec
->mlid
));
759 rdma_ah_set_sl(ah_attr
, rec
->sl
);
760 rdma_ah_set_port_num(ah_attr
, port_num
);
761 rdma_ah_set_static_rate(ah_attr
, rec
->rate
);
762 rdma_move_grh_sgid_attr(ah_attr
, &rec
->mgid
,
763 be32_to_cpu(rec
->flow_label
),
764 rec
->hop_limit
, rec
->traffic_class
,
768 EXPORT_SYMBOL(ib_init_ah_from_mcmember
);
770 static void mcast_groups_event(struct mcast_port
*port
,
771 enum mcast_group_state state
)
773 struct mcast_group
*group
;
774 struct rb_node
*node
;
777 spin_lock_irqsave(&port
->lock
, flags
);
778 for (node
= rb_first(&port
->table
); node
; node
= rb_next(node
)) {
779 group
= rb_entry(node
, struct mcast_group
, node
);
780 spin_lock(&group
->lock
);
781 if (group
->state
== MCAST_IDLE
) {
782 atomic_inc(&group
->refcount
);
783 queue_work(mcast_wq
, &group
->work
);
785 if (group
->state
!= MCAST_GROUP_ERROR
)
786 group
->state
= state
;
787 spin_unlock(&group
->lock
);
789 spin_unlock_irqrestore(&port
->lock
, flags
);
792 static void mcast_event_handler(struct ib_event_handler
*handler
,
793 struct ib_event
*event
)
795 struct mcast_device
*dev
;
798 dev
= container_of(handler
, struct mcast_device
, event_handler
);
799 if (!rdma_cap_ib_mcast(dev
->device
, event
->element
.port_num
))
802 index
= event
->element
.port_num
- dev
->start_port
;
804 switch (event
->event
) {
805 case IB_EVENT_PORT_ERR
:
806 case IB_EVENT_LID_CHANGE
:
807 case IB_EVENT_CLIENT_REREGISTER
:
808 mcast_groups_event(&dev
->port
[index
], MCAST_GROUP_ERROR
);
810 case IB_EVENT_PKEY_CHANGE
:
811 mcast_groups_event(&dev
->port
[index
], MCAST_PKEY_EVENT
);
818 static void mcast_add_one(struct ib_device
*device
)
820 struct mcast_device
*dev
;
821 struct mcast_port
*port
;
825 dev
= kmalloc(struct_size(dev
, port
, device
->phys_port_cnt
),
830 dev
->start_port
= rdma_start_port(device
);
831 dev
->end_port
= rdma_end_port(device
);
833 for (i
= 0; i
<= dev
->end_port
- dev
->start_port
; i
++) {
834 if (!rdma_cap_ib_mcast(device
, dev
->start_port
+ i
))
836 port
= &dev
->port
[i
];
838 port
->port_num
= dev
->start_port
+ i
;
839 spin_lock_init(&port
->lock
);
840 port
->table
= RB_ROOT
;
841 init_completion(&port
->comp
);
842 atomic_set(&port
->refcount
, 1);
851 dev
->device
= device
;
852 ib_set_client_data(device
, &mcast_client
, dev
);
854 INIT_IB_EVENT_HANDLER(&dev
->event_handler
, device
, mcast_event_handler
);
855 ib_register_event_handler(&dev
->event_handler
);
858 static void mcast_remove_one(struct ib_device
*device
, void *client_data
)
860 struct mcast_device
*dev
= client_data
;
861 struct mcast_port
*port
;
867 ib_unregister_event_handler(&dev
->event_handler
);
868 flush_workqueue(mcast_wq
);
870 for (i
= 0; i
<= dev
->end_port
- dev
->start_port
; i
++) {
871 if (rdma_cap_ib_mcast(device
, dev
->start_port
+ i
)) {
872 port
= &dev
->port
[i
];
874 wait_for_completion(&port
->comp
);
885 mcast_wq
= alloc_ordered_workqueue("ib_mcast", WQ_MEM_RECLAIM
);
889 ib_sa_register_client(&sa_client
);
891 ret
= ib_register_client(&mcast_client
);
897 ib_sa_unregister_client(&sa_client
);
898 destroy_workqueue(mcast_wq
);
902 void mcast_cleanup(void)
904 ib_unregister_client(&mcast_client
);
905 ib_sa_unregister_client(&sa_client
);
906 destroy_workqueue(mcast_wq
);