2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_sa.h>
38 #include <linux/mlx4/cmd.h>
39 #include <linux/rbtree.h>
40 #include <linux/delay.h>
45 #define MAX_PEND_REQS_PER_FUNC 4
46 #define MAD_TIMEOUT_MS 2000
48 #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
49 #define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
50 #define mcg_warn_group(group, format, arg...) \
51 pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
52 (group)->name, group->demux->port, ## arg)
54 #define mcg_error_group(group, format, arg...) \
55 pr_err(" %16s: " format, (group)->name, ## arg)
58 static union ib_gid mgid0
;
60 static struct workqueue_struct
*clean_wq
;
67 enum mcast_group_state
{
75 enum mcast_state state
;
78 struct list_head pending
;
81 struct ib_sa_mcmember_data
{
83 union ib_gid port_gid
;
91 __be32 sl_flowlabel_hoplimit
;
98 struct ib_sa_mcmember_data rec
;
100 struct list_head mgid0_list
;
101 struct mlx4_ib_demux_ctx
*demux
;
102 struct mcast_member func
[MAX_VFS
];
104 struct work_struct work
;
105 struct list_head pending_list
;
107 enum mcast_group_state state
;
108 enum mcast_group_state prev_state
;
109 struct ib_sa_mad response_sa_mad
;
112 char name
[33]; /* MGID string */
113 struct device_attribute dentry
;
115 /* refcount is the reference count for the following:
116 1. Each queued request
117 2. Each invocation of the worker thread
118 3. Membership of the port at the SA
122 /* delayed work to clean pending SM request */
123 struct delayed_work timeout_work
;
124 struct list_head cleanup_list
;
129 struct ib_sa_mad sa_mad
;
130 struct list_head group_list
;
131 struct list_head func_list
;
132 struct mcast_group
*group
;
137 #define safe_atomic_dec(ref) \
139 if (atomic_dec_and_test(ref)) \
140 mcg_warn_group(group, "did not expect to reach zero\n"); \
143 static const char *get_state_string(enum mcast_group_state state
)
148 case MCAST_JOIN_SENT
:
149 return "MCAST_JOIN_SENT";
150 case MCAST_LEAVE_SENT
:
151 return "MCAST_LEAVE_SENT";
152 case MCAST_RESP_READY
:
153 return "MCAST_RESP_READY";
155 return "Invalid State";
158 static struct mcast_group
*mcast_find(struct mlx4_ib_demux_ctx
*ctx
,
161 struct rb_node
*node
= ctx
->mcg_table
.rb_node
;
162 struct mcast_group
*group
;
166 group
= rb_entry(node
, struct mcast_group
, node
);
167 ret
= memcmp(mgid
->raw
, group
->rec
.mgid
.raw
, sizeof *mgid
);
172 node
= node
->rb_left
;
174 node
= node
->rb_right
;
179 static struct mcast_group
*mcast_insert(struct mlx4_ib_demux_ctx
*ctx
,
180 struct mcast_group
*group
)
182 struct rb_node
**link
= &ctx
->mcg_table
.rb_node
;
183 struct rb_node
*parent
= NULL
;
184 struct mcast_group
*cur_group
;
189 cur_group
= rb_entry(parent
, struct mcast_group
, node
);
191 ret
= memcmp(group
->rec
.mgid
.raw
, cur_group
->rec
.mgid
.raw
,
192 sizeof group
->rec
.mgid
);
194 link
= &(*link
)->rb_left
;
196 link
= &(*link
)->rb_right
;
200 rb_link_node(&group
->node
, parent
, link
);
201 rb_insert_color(&group
->node
, &ctx
->mcg_table
);
205 static int send_mad_to_wire(struct mlx4_ib_demux_ctx
*ctx
, struct ib_mad
*mad
)
207 struct mlx4_ib_dev
*dev
= ctx
->dev
;
208 struct ib_ah_attr ah_attr
;
210 spin_lock(&dev
->sm_lock
);
211 if (!dev
->sm_ah
[ctx
->port
- 1]) {
212 /* port is not yet Active, sm_ah not ready */
213 spin_unlock(&dev
->sm_lock
);
216 mlx4_ib_query_ah(dev
->sm_ah
[ctx
->port
- 1], &ah_attr
);
217 spin_unlock(&dev
->sm_lock
);
218 return mlx4_ib_send_to_wire(dev
, mlx4_master_func_num(dev
->dev
),
219 ctx
->port
, IB_QPT_GSI
, 0, 1, IB_QP1_QKEY
,
220 &ah_attr
, NULL
, mad
);
223 static int send_mad_to_slave(int slave
, struct mlx4_ib_demux_ctx
*ctx
,
226 struct mlx4_ib_dev
*dev
= ctx
->dev
;
227 struct ib_mad_agent
*agent
= dev
->send_agent
[ctx
->port
- 1][1];
229 struct ib_ah_attr ah_attr
;
231 /* Our agent might not yet be registered when mads start to arrive */
235 ib_query_ah(dev
->sm_ah
[ctx
->port
- 1], &ah_attr
);
237 if (ib_find_cached_pkey(&dev
->ib_dev
, ctx
->port
, IB_DEFAULT_PKEY_FULL
, &wc
.pkey_index
))
240 wc
.dlid_path_bits
= 0;
241 wc
.port_num
= ctx
->port
;
242 wc
.slid
= ah_attr
.dlid
; /* opensm lid */
244 return mlx4_ib_send_to_slave(dev
, slave
, ctx
->port
, IB_QPT_GSI
, &wc
, NULL
, mad
);
247 static int send_join_to_wire(struct mcast_group
*group
, struct ib_sa_mad
*sa_mad
)
249 struct ib_sa_mad mad
;
250 struct ib_sa_mcmember_data
*sa_mad_data
= (struct ib_sa_mcmember_data
*)&mad
.data
;
253 /* we rely on a mad request as arrived from a VF */
254 memcpy(&mad
, sa_mad
, sizeof mad
);
256 /* fix port GID to be the real one (slave 0) */
257 sa_mad_data
->port_gid
.global
.interface_id
= group
->demux
->guid_cache
[0];
259 /* assign our own TID */
260 mad
.mad_hdr
.tid
= mlx4_ib_get_new_demux_tid(group
->demux
);
261 group
->last_req_tid
= mad
.mad_hdr
.tid
; /* keep it for later validation */
263 ret
= send_mad_to_wire(group
->demux
, (struct ib_mad
*)&mad
);
264 /* set timeout handler */
266 /* calls mlx4_ib_mcg_timeout_handler */
267 queue_delayed_work(group
->demux
->mcg_wq
, &group
->timeout_work
,
268 msecs_to_jiffies(MAD_TIMEOUT_MS
));
274 static int send_leave_to_wire(struct mcast_group
*group
, u8 join_state
)
276 struct ib_sa_mad mad
;
277 struct ib_sa_mcmember_data
*sa_data
= (struct ib_sa_mcmember_data
*)&mad
.data
;
280 memset(&mad
, 0, sizeof mad
);
281 mad
.mad_hdr
.base_version
= 1;
282 mad
.mad_hdr
.mgmt_class
= IB_MGMT_CLASS_SUBN_ADM
;
283 mad
.mad_hdr
.class_version
= 2;
284 mad
.mad_hdr
.method
= IB_SA_METHOD_DELETE
;
285 mad
.mad_hdr
.status
= cpu_to_be16(0);
286 mad
.mad_hdr
.class_specific
= cpu_to_be16(0);
287 mad
.mad_hdr
.tid
= mlx4_ib_get_new_demux_tid(group
->demux
);
288 group
->last_req_tid
= mad
.mad_hdr
.tid
; /* keep it for later validation */
289 mad
.mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC
);
290 mad
.mad_hdr
.attr_mod
= cpu_to_be32(0);
291 mad
.sa_hdr
.sm_key
= 0x0;
292 mad
.sa_hdr
.attr_offset
= cpu_to_be16(7);
293 mad
.sa_hdr
.comp_mask
= IB_SA_MCMEMBER_REC_MGID
|
294 IB_SA_MCMEMBER_REC_PORT_GID
| IB_SA_MCMEMBER_REC_JOIN_STATE
;
296 *sa_data
= group
->rec
;
297 sa_data
->scope_join_state
= join_state
;
299 ret
= send_mad_to_wire(group
->demux
, (struct ib_mad
*)&mad
);
301 group
->state
= MCAST_IDLE
;
303 /* set timeout handler */
305 /* calls mlx4_ib_mcg_timeout_handler */
306 queue_delayed_work(group
->demux
->mcg_wq
, &group
->timeout_work
,
307 msecs_to_jiffies(MAD_TIMEOUT_MS
));
313 static int send_reply_to_slave(int slave
, struct mcast_group
*group
,
314 struct ib_sa_mad
*req_sa_mad
, u16 status
)
316 struct ib_sa_mad mad
;
317 struct ib_sa_mcmember_data
*sa_data
= (struct ib_sa_mcmember_data
*)&mad
.data
;
318 struct ib_sa_mcmember_data
*req_sa_data
= (struct ib_sa_mcmember_data
*)&req_sa_mad
->data
;
321 memset(&mad
, 0, sizeof mad
);
322 mad
.mad_hdr
.base_version
= 1;
323 mad
.mad_hdr
.mgmt_class
= IB_MGMT_CLASS_SUBN_ADM
;
324 mad
.mad_hdr
.class_version
= 2;
325 mad
.mad_hdr
.method
= IB_MGMT_METHOD_GET_RESP
;
326 mad
.mad_hdr
.status
= cpu_to_be16(status
);
327 mad
.mad_hdr
.class_specific
= cpu_to_be16(0);
328 mad
.mad_hdr
.tid
= req_sa_mad
->mad_hdr
.tid
;
329 *(u8
*)&mad
.mad_hdr
.tid
= 0; /* resetting tid to 0 */
330 mad
.mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC
);
331 mad
.mad_hdr
.attr_mod
= cpu_to_be32(0);
332 mad
.sa_hdr
.sm_key
= req_sa_mad
->sa_hdr
.sm_key
;
333 mad
.sa_hdr
.attr_offset
= cpu_to_be16(7);
334 mad
.sa_hdr
.comp_mask
= 0; /* ignored on responses, see IBTA spec */
336 *sa_data
= group
->rec
;
338 /* reconstruct VF's requested join_state and port_gid */
339 sa_data
->scope_join_state
&= 0xf0;
340 sa_data
->scope_join_state
|= (group
->func
[slave
].join_state
& 0x0f);
341 memcpy(&sa_data
->port_gid
, &req_sa_data
->port_gid
, sizeof req_sa_data
->port_gid
);
343 ret
= send_mad_to_slave(slave
, group
->demux
, (struct ib_mad
*)&mad
);
347 static int check_selector(ib_sa_comp_mask comp_mask
,
348 ib_sa_comp_mask selector_mask
,
349 ib_sa_comp_mask value_mask
,
350 u8 src_value
, u8 dst_value
)
353 u8 selector
= dst_value
>> 6;
357 if (!(comp_mask
& selector_mask
) || !(comp_mask
& value_mask
))
362 err
= (src_value
<= dst_value
);
365 err
= (src_value
>= dst_value
);
368 err
= (src_value
!= dst_value
);
378 static u16
cmp_rec(struct ib_sa_mcmember_data
*src
,
379 struct ib_sa_mcmember_data
*dst
, ib_sa_comp_mask comp_mask
)
381 /* src is group record, dst is request record */
382 /* MGID must already match */
383 /* Port_GID we always replace to our Port_GID, so it is a match */
385 #define MAD_STATUS_REQ_INVALID 0x0200
386 if (comp_mask
& IB_SA_MCMEMBER_REC_QKEY
&& src
->qkey
!= dst
->qkey
)
387 return MAD_STATUS_REQ_INVALID
;
388 if (comp_mask
& IB_SA_MCMEMBER_REC_MLID
&& src
->mlid
!= dst
->mlid
)
389 return MAD_STATUS_REQ_INVALID
;
390 if (check_selector(comp_mask
, IB_SA_MCMEMBER_REC_MTU_SELECTOR
,
391 IB_SA_MCMEMBER_REC_MTU
,
392 src
->mtusel_mtu
, dst
->mtusel_mtu
))
393 return MAD_STATUS_REQ_INVALID
;
394 if (comp_mask
& IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
&&
395 src
->tclass
!= dst
->tclass
)
396 return MAD_STATUS_REQ_INVALID
;
397 if (comp_mask
& IB_SA_MCMEMBER_REC_PKEY
&& src
->pkey
!= dst
->pkey
)
398 return MAD_STATUS_REQ_INVALID
;
399 if (check_selector(comp_mask
, IB_SA_MCMEMBER_REC_RATE_SELECTOR
,
400 IB_SA_MCMEMBER_REC_RATE
,
401 src
->ratesel_rate
, dst
->ratesel_rate
))
402 return MAD_STATUS_REQ_INVALID
;
403 if (check_selector(comp_mask
,
404 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR
,
405 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME
,
406 src
->lifetmsel_lifetm
, dst
->lifetmsel_lifetm
))
407 return MAD_STATUS_REQ_INVALID
;
408 if (comp_mask
& IB_SA_MCMEMBER_REC_SL
&&
409 (be32_to_cpu(src
->sl_flowlabel_hoplimit
) & 0xf0000000) !=
410 (be32_to_cpu(dst
->sl_flowlabel_hoplimit
) & 0xf0000000))
411 return MAD_STATUS_REQ_INVALID
;
412 if (comp_mask
& IB_SA_MCMEMBER_REC_FLOW_LABEL
&&
413 (be32_to_cpu(src
->sl_flowlabel_hoplimit
) & 0x0fffff00) !=
414 (be32_to_cpu(dst
->sl_flowlabel_hoplimit
) & 0x0fffff00))
415 return MAD_STATUS_REQ_INVALID
;
416 if (comp_mask
& IB_SA_MCMEMBER_REC_HOP_LIMIT
&&
417 (be32_to_cpu(src
->sl_flowlabel_hoplimit
) & 0x000000ff) !=
418 (be32_to_cpu(dst
->sl_flowlabel_hoplimit
) & 0x000000ff))
419 return MAD_STATUS_REQ_INVALID
;
420 if (comp_mask
& IB_SA_MCMEMBER_REC_SCOPE
&&
421 (src
->scope_join_state
& 0xf0) !=
422 (dst
->scope_join_state
& 0xf0))
423 return MAD_STATUS_REQ_INVALID
;
425 /* join_state checked separately, proxy_join ignored */
430 /* release group, return 1 if this was last release and group is destroyed
431 * timout work is canceled sync */
432 static int release_group(struct mcast_group
*group
, int from_timeout_handler
)
434 struct mlx4_ib_demux_ctx
*ctx
= group
->demux
;
437 mutex_lock(&ctx
->mcg_table_lock
);
438 mutex_lock(&group
->lock
);
439 if (atomic_dec_and_test(&group
->refcount
)) {
440 if (!from_timeout_handler
) {
441 if (group
->state
!= MCAST_IDLE
&&
442 !cancel_delayed_work(&group
->timeout_work
)) {
443 atomic_inc(&group
->refcount
);
444 mutex_unlock(&group
->lock
);
445 mutex_unlock(&ctx
->mcg_table_lock
);
450 nzgroup
= memcmp(&group
->rec
.mgid
, &mgid0
, sizeof mgid0
);
452 del_sysfs_port_mcg_attr(ctx
->dev
, ctx
->port
, &group
->dentry
.attr
);
453 if (!list_empty(&group
->pending_list
))
454 mcg_warn_group(group
, "releasing a group with non empty pending list\n");
456 rb_erase(&group
->node
, &ctx
->mcg_table
);
457 list_del_init(&group
->mgid0_list
);
458 mutex_unlock(&group
->lock
);
459 mutex_unlock(&ctx
->mcg_table_lock
);
463 mutex_unlock(&group
->lock
);
464 mutex_unlock(&ctx
->mcg_table_lock
);
469 static void adjust_membership(struct mcast_group
*group
, u8 join_state
, int inc
)
473 for (i
= 0; i
< 3; i
++, join_state
>>= 1)
474 if (join_state
& 0x1)
475 group
->members
[i
] += inc
;
478 static u8
get_leave_state(struct mcast_group
*group
)
483 for (i
= 0; i
< 3; i
++)
484 if (!group
->members
[i
])
485 leave_state
|= (1 << i
);
487 return leave_state
& (group
->rec
.scope_join_state
& 7);
490 static int join_group(struct mcast_group
*group
, int slave
, u8 join_mask
)
495 /* remove bits that slave is already member of, and adjust */
496 join_state
= join_mask
& (~group
->func
[slave
].join_state
);
497 adjust_membership(group
, join_state
, 1);
498 group
->func
[slave
].join_state
|= join_state
;
499 if (group
->func
[slave
].state
!= MCAST_MEMBER
&& join_state
) {
500 group
->func
[slave
].state
= MCAST_MEMBER
;
506 static int leave_group(struct mcast_group
*group
, int slave
, u8 leave_state
)
510 adjust_membership(group
, leave_state
, -1);
511 group
->func
[slave
].join_state
&= ~leave_state
;
512 if (!group
->func
[slave
].join_state
) {
513 group
->func
[slave
].state
= MCAST_NOT_MEMBER
;
519 static int check_leave(struct mcast_group
*group
, int slave
, u8 leave_mask
)
521 if (group
->func
[slave
].state
!= MCAST_MEMBER
)
522 return MAD_STATUS_REQ_INVALID
;
524 /* make sure we're not deleting unset bits */
525 if (~group
->func
[slave
].join_state
& leave_mask
)
526 return MAD_STATUS_REQ_INVALID
;
529 return MAD_STATUS_REQ_INVALID
;
534 static void mlx4_ib_mcg_timeout_handler(struct work_struct
*work
)
536 struct delayed_work
*delay
= to_delayed_work(work
);
537 struct mcast_group
*group
;
538 struct mcast_req
*req
= NULL
;
540 group
= container_of(delay
, typeof(*group
), timeout_work
);
542 mutex_lock(&group
->lock
);
543 if (group
->state
== MCAST_JOIN_SENT
) {
544 if (!list_empty(&group
->pending_list
)) {
545 req
= list_first_entry(&group
->pending_list
, struct mcast_req
, group_list
);
546 list_del(&req
->group_list
);
547 list_del(&req
->func_list
);
548 --group
->func
[req
->func
].num_pend_reqs
;
549 mutex_unlock(&group
->lock
);
551 if (memcmp(&group
->rec
.mgid
, &mgid0
, sizeof mgid0
)) {
552 if (release_group(group
, 1))
558 mutex_lock(&group
->lock
);
560 mcg_warn_group(group
, "DRIVER BUG\n");
561 } else if (group
->state
== MCAST_LEAVE_SENT
) {
562 if (group
->rec
.scope_join_state
& 7)
563 group
->rec
.scope_join_state
&= 0xf8;
564 group
->state
= MCAST_IDLE
;
565 mutex_unlock(&group
->lock
);
566 if (release_group(group
, 1))
568 mutex_lock(&group
->lock
);
570 mcg_warn_group(group
, "invalid state %s\n", get_state_string(group
->state
));
571 group
->state
= MCAST_IDLE
;
572 atomic_inc(&group
->refcount
);
573 if (!queue_work(group
->demux
->mcg_wq
, &group
->work
))
574 safe_atomic_dec(&group
->refcount
);
576 mutex_unlock(&group
->lock
);
579 static int handle_leave_req(struct mcast_group
*group
, u8 leave_mask
,
580 struct mcast_req
*req
)
585 leave_mask
= group
->func
[req
->func
].join_state
;
587 status
= check_leave(group
, req
->func
, leave_mask
);
589 leave_group(group
, req
->func
, leave_mask
);
592 send_reply_to_slave(req
->func
, group
, &req
->sa_mad
, status
);
593 --group
->func
[req
->func
].num_pend_reqs
;
594 list_del(&req
->group_list
);
595 list_del(&req
->func_list
);
600 static int handle_join_req(struct mcast_group
*group
, u8 join_mask
,
601 struct mcast_req
*req
)
603 u8 group_join_state
= group
->rec
.scope_join_state
& 7;
606 struct ib_sa_mcmember_data
*sa_data
= (struct ib_sa_mcmember_data
*)req
->sa_mad
.data
;
608 if (join_mask
== (group_join_state
& join_mask
)) {
609 /* port's membership need not change */
610 status
= cmp_rec(&group
->rec
, sa_data
, req
->sa_mad
.sa_hdr
.comp_mask
);
612 join_group(group
, req
->func
, join_mask
);
614 --group
->func
[req
->func
].num_pend_reqs
;
615 send_reply_to_slave(req
->func
, group
, &req
->sa_mad
, status
);
616 list_del(&req
->group_list
);
617 list_del(&req
->func_list
);
621 /* port's membership needs to be updated */
622 group
->prev_state
= group
->state
;
623 if (send_join_to_wire(group
, &req
->sa_mad
)) {
624 --group
->func
[req
->func
].num_pend_reqs
;
625 list_del(&req
->group_list
);
626 list_del(&req
->func_list
);
629 group
->state
= group
->prev_state
;
631 group
->state
= MCAST_JOIN_SENT
;
637 static void mlx4_ib_mcg_work_handler(struct work_struct
*work
)
639 struct mcast_group
*group
;
640 struct mcast_req
*req
= NULL
;
641 struct ib_sa_mcmember_data
*sa_data
;
643 int rc
= 1; /* release_count - this is for the scheduled work */
647 group
= container_of(work
, typeof(*group
), work
);
649 mutex_lock(&group
->lock
);
651 /* First, let's see if a response from SM is waiting regarding this group.
652 * If so, we need to update the group's REC. If this is a bad response, we
653 * may need to send a bad response to a VF waiting for it. If VF is waiting
654 * and this is a good response, the VF will be answered later in this func. */
655 if (group
->state
== MCAST_RESP_READY
) {
656 /* cancels mlx4_ib_mcg_timeout_handler */
657 cancel_delayed_work(&group
->timeout_work
);
658 status
= be16_to_cpu(group
->response_sa_mad
.mad_hdr
.status
);
659 method
= group
->response_sa_mad
.mad_hdr
.method
;
660 if (group
->last_req_tid
!= group
->response_sa_mad
.mad_hdr
.tid
) {
661 mcg_warn_group(group
, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n",
662 be64_to_cpu(group
->response_sa_mad
.mad_hdr
.tid
),
663 be64_to_cpu(group
->last_req_tid
));
664 group
->state
= group
->prev_state
;
665 goto process_requests
;
668 if (!list_empty(&group
->pending_list
))
669 req
= list_first_entry(&group
->pending_list
,
670 struct mcast_req
, group_list
);
671 if ((method
== IB_MGMT_METHOD_GET_RESP
)) {
673 send_reply_to_slave(req
->func
, group
, &req
->sa_mad
, status
);
674 --group
->func
[req
->func
].num_pend_reqs
;
675 list_del(&req
->group_list
);
676 list_del(&req
->func_list
);
680 mcg_warn_group(group
, "no request for failed join\n");
681 } else if (method
== IB_SA_METHOD_DELETE_RESP
&& group
->demux
->flushing
)
687 resp_join_state
= ((struct ib_sa_mcmember_data
*)
688 group
->response_sa_mad
.data
)->scope_join_state
& 7;
689 cur_join_state
= group
->rec
.scope_join_state
& 7;
691 if (method
== IB_MGMT_METHOD_GET_RESP
) {
692 /* successfull join */
693 if (!cur_join_state
&& resp_join_state
)
695 } else if (!resp_join_state
)
697 memcpy(&group
->rec
, group
->response_sa_mad
.data
, sizeof group
->rec
);
699 group
->state
= MCAST_IDLE
;
703 /* We should now go over pending join/leave requests, as long as we are idle. */
704 while (!list_empty(&group
->pending_list
) && group
->state
== MCAST_IDLE
) {
705 req
= list_first_entry(&group
->pending_list
, struct mcast_req
,
707 sa_data
= (struct ib_sa_mcmember_data
*)req
->sa_mad
.data
;
708 req_join_state
= sa_data
->scope_join_state
& 0x7;
710 /* For a leave request, we will immediately answer the VF, and
711 * update our internal counters. The actual leave will be sent
712 * to SM later, if at all needed. We dequeue the request now. */
713 if (req
->sa_mad
.mad_hdr
.method
== IB_SA_METHOD_DELETE
)
714 rc
+= handle_leave_req(group
, req_join_state
, req
);
716 rc
+= handle_join_req(group
, req_join_state
, req
);
720 if (group
->state
== MCAST_IDLE
) {
721 req_join_state
= get_leave_state(group
);
722 if (req_join_state
) {
723 group
->rec
.scope_join_state
&= ~req_join_state
;
724 group
->prev_state
= group
->state
;
725 if (send_leave_to_wire(group
, req_join_state
)) {
726 group
->state
= group
->prev_state
;
729 group
->state
= MCAST_LEAVE_SENT
;
733 if (!list_empty(&group
->pending_list
) && group
->state
== MCAST_IDLE
)
734 goto process_requests
;
735 mutex_unlock(&group
->lock
);
738 release_group(group
, 0);
741 static struct mcast_group
*search_relocate_mgid0_group(struct mlx4_ib_demux_ctx
*ctx
,
743 union ib_gid
*new_mgid
)
745 struct mcast_group
*group
= NULL
, *cur_group
;
746 struct mcast_req
*req
;
747 struct list_head
*pos
;
750 mutex_lock(&ctx
->mcg_table_lock
);
751 list_for_each_safe(pos
, n
, &ctx
->mcg_mgid0_list
) {
752 group
= list_entry(pos
, struct mcast_group
, mgid0_list
);
753 mutex_lock(&group
->lock
);
754 if (group
->last_req_tid
== tid
) {
755 if (memcmp(new_mgid
, &mgid0
, sizeof mgid0
)) {
756 group
->rec
.mgid
= *new_mgid
;
757 sprintf(group
->name
, "%016llx%016llx",
758 be64_to_cpu(group
->rec
.mgid
.global
.subnet_prefix
),
759 be64_to_cpu(group
->rec
.mgid
.global
.interface_id
));
760 list_del_init(&group
->mgid0_list
);
761 cur_group
= mcast_insert(ctx
, group
);
763 /* A race between our code and SM. Silently cleaning the new one */
764 req
= list_first_entry(&group
->pending_list
,
765 struct mcast_req
, group_list
);
766 --group
->func
[req
->func
].num_pend_reqs
;
767 list_del(&req
->group_list
);
768 list_del(&req
->func_list
);
770 mutex_unlock(&group
->lock
);
771 mutex_unlock(&ctx
->mcg_table_lock
);
772 release_group(group
, 0);
776 atomic_inc(&group
->refcount
);
777 add_sysfs_port_mcg_attr(ctx
->dev
, ctx
->port
, &group
->dentry
.attr
);
778 mutex_unlock(&group
->lock
);
779 mutex_unlock(&ctx
->mcg_table_lock
);
782 struct mcast_req
*tmp1
, *tmp2
;
784 list_del(&group
->mgid0_list
);
785 if (!list_empty(&group
->pending_list
) && group
->state
!= MCAST_IDLE
)
786 cancel_delayed_work_sync(&group
->timeout_work
);
788 list_for_each_entry_safe(tmp1
, tmp2
, &group
->pending_list
, group_list
) {
789 list_del(&tmp1
->group_list
);
792 mutex_unlock(&group
->lock
);
793 mutex_unlock(&ctx
->mcg_table_lock
);
798 mutex_unlock(&group
->lock
);
800 mutex_unlock(&ctx
->mcg_table_lock
);
805 static ssize_t
sysfs_show_group(struct device
*dev
,
806 struct device_attribute
*attr
, char *buf
);
808 static struct mcast_group
*acquire_group(struct mlx4_ib_demux_ctx
*ctx
,
809 union ib_gid
*mgid
, int create
,
812 struct mcast_group
*group
, *cur_group
;
816 is_mgid0
= !memcmp(&mgid0
, mgid
, sizeof mgid0
);
818 group
= mcast_find(ctx
, mgid
);
824 return ERR_PTR(-ENOENT
);
826 group
= kzalloc(sizeof *group
, gfp_mask
);
828 return ERR_PTR(-ENOMEM
);
831 group
->rec
.mgid
= *mgid
;
832 INIT_LIST_HEAD(&group
->pending_list
);
833 INIT_LIST_HEAD(&group
->mgid0_list
);
834 for (i
= 0; i
< MAX_VFS
; ++i
)
835 INIT_LIST_HEAD(&group
->func
[i
].pending
);
836 INIT_WORK(&group
->work
, mlx4_ib_mcg_work_handler
);
837 INIT_DELAYED_WORK(&group
->timeout_work
, mlx4_ib_mcg_timeout_handler
);
838 mutex_init(&group
->lock
);
839 sprintf(group
->name
, "%016llx%016llx",
840 be64_to_cpu(group
->rec
.mgid
.global
.subnet_prefix
),
841 be64_to_cpu(group
->rec
.mgid
.global
.interface_id
));
842 sysfs_attr_init(&group
->dentry
.attr
);
843 group
->dentry
.show
= sysfs_show_group
;
844 group
->dentry
.store
= NULL
;
845 group
->dentry
.attr
.name
= group
->name
;
846 group
->dentry
.attr
.mode
= 0400;
847 group
->state
= MCAST_IDLE
;
850 list_add(&group
->mgid0_list
, &ctx
->mcg_mgid0_list
);
854 cur_group
= mcast_insert(ctx
, group
);
856 mcg_warn("group just showed up %s - confused\n", cur_group
->name
);
858 return ERR_PTR(-EINVAL
);
861 add_sysfs_port_mcg_attr(ctx
->dev
, ctx
->port
, &group
->dentry
.attr
);
864 atomic_inc(&group
->refcount
);
868 static void queue_req(struct mcast_req
*req
)
870 struct mcast_group
*group
= req
->group
;
872 atomic_inc(&group
->refcount
); /* for the request */
873 atomic_inc(&group
->refcount
); /* for scheduling the work */
874 list_add_tail(&req
->group_list
, &group
->pending_list
);
875 list_add_tail(&req
->func_list
, &group
->func
[req
->func
].pending
);
876 /* calls mlx4_ib_mcg_work_handler */
877 if (!queue_work(group
->demux
->mcg_wq
, &group
->work
))
878 safe_atomic_dec(&group
->refcount
);
881 int mlx4_ib_mcg_demux_handler(struct ib_device
*ibdev
, int port
, int slave
,
882 struct ib_sa_mad
*mad
)
884 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
885 struct ib_sa_mcmember_data
*rec
= (struct ib_sa_mcmember_data
*)mad
->data
;
886 struct mlx4_ib_demux_ctx
*ctx
= &dev
->sriov
.demux
[port
- 1];
887 struct mcast_group
*group
;
889 switch (mad
->mad_hdr
.method
) {
890 case IB_MGMT_METHOD_GET_RESP
:
891 case IB_SA_METHOD_DELETE_RESP
:
892 mutex_lock(&ctx
->mcg_table_lock
);
893 group
= acquire_group(ctx
, &rec
->mgid
, 0, GFP_KERNEL
);
894 mutex_unlock(&ctx
->mcg_table_lock
);
896 if (mad
->mad_hdr
.method
== IB_MGMT_METHOD_GET_RESP
) {
897 __be64 tid
= mad
->mad_hdr
.tid
;
898 *(u8
*)(&tid
) = (u8
)slave
; /* in group we kept the modified TID */
899 group
= search_relocate_mgid0_group(ctx
, tid
, &rec
->mgid
);
907 mutex_lock(&group
->lock
);
908 group
->response_sa_mad
= *mad
;
909 group
->prev_state
= group
->state
;
910 group
->state
= MCAST_RESP_READY
;
911 /* calls mlx4_ib_mcg_work_handler */
912 atomic_inc(&group
->refcount
);
913 if (!queue_work(ctx
->mcg_wq
, &group
->work
))
914 safe_atomic_dec(&group
->refcount
);
915 mutex_unlock(&group
->lock
);
916 release_group(group
, 0);
917 return 1; /* consumed */
918 case IB_MGMT_METHOD_SET
:
919 case IB_SA_METHOD_GET_TABLE
:
920 case IB_SA_METHOD_GET_TABLE_RESP
:
921 case IB_SA_METHOD_DELETE
:
922 return 0; /* not consumed, pass-through to guest over tunnel */
924 mcg_warn("In demux, port %d: unexpected MCMember method: 0x%x, dropping\n",
925 port
, mad
->mad_hdr
.method
);
926 return 1; /* consumed */
930 int mlx4_ib_mcg_multiplex_handler(struct ib_device
*ibdev
, int port
,
931 int slave
, struct ib_sa_mad
*sa_mad
)
933 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
934 struct ib_sa_mcmember_data
*rec
= (struct ib_sa_mcmember_data
*)sa_mad
->data
;
935 struct mlx4_ib_demux_ctx
*ctx
= &dev
->sriov
.demux
[port
- 1];
936 struct mcast_group
*group
;
937 struct mcast_req
*req
;
943 switch (sa_mad
->mad_hdr
.method
) {
944 case IB_MGMT_METHOD_SET
:
946 case IB_SA_METHOD_DELETE
:
947 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
952 req
->sa_mad
= *sa_mad
;
954 mutex_lock(&ctx
->mcg_table_lock
);
955 group
= acquire_group(ctx
, &rec
->mgid
, may_create
, GFP_KERNEL
);
956 mutex_unlock(&ctx
->mcg_table_lock
);
959 return PTR_ERR(group
);
961 mutex_lock(&group
->lock
);
962 if (group
->func
[slave
].num_pend_reqs
> MAX_PEND_REQS_PER_FUNC
) {
963 mutex_unlock(&group
->lock
);
964 mcg_warn_group(group
, "Port %d, Func %d has too many pending requests (%d), dropping\n",
965 port
, slave
, MAX_PEND_REQS_PER_FUNC
);
966 release_group(group
, 0);
970 ++group
->func
[slave
].num_pend_reqs
;
973 mutex_unlock(&group
->lock
);
974 release_group(group
, 0);
975 return 1; /* consumed */
976 case IB_SA_METHOD_GET_TABLE
:
977 case IB_MGMT_METHOD_GET_RESP
:
978 case IB_SA_METHOD_GET_TABLE_RESP
:
979 case IB_SA_METHOD_DELETE_RESP
:
980 return 0; /* not consumed, pass-through */
982 mcg_warn("In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n",
983 port
, slave
, sa_mad
->mad_hdr
.method
);
984 return 1; /* consumed */
988 static ssize_t
sysfs_show_group(struct device
*dev
,
989 struct device_attribute
*attr
, char *buf
)
991 struct mcast_group
*group
=
992 container_of(attr
, struct mcast_group
, dentry
);
993 struct mcast_req
*req
= NULL
;
994 char pending_str
[40];
999 if (group
->state
== MCAST_IDLE
)
1000 sprintf(state_str
, "%s", get_state_string(group
->state
));
1002 sprintf(state_str
, "%s(TID=0x%llx)",
1003 get_state_string(group
->state
),
1004 be64_to_cpu(group
->last_req_tid
));
1005 if (list_empty(&group
->pending_list
)) {
1006 sprintf(pending_str
, "No");
1008 req
= list_first_entry(&group
->pending_list
, struct mcast_req
, group_list
);
1009 sprintf(pending_str
, "Yes(TID=0x%llx)",
1010 be64_to_cpu(req
->sa_mad
.mad_hdr
.tid
));
1012 len
+= sprintf(buf
+ len
, "%1d [%02d,%02d,%02d] %4d %4s %5s ",
1013 group
->rec
.scope_join_state
& 0xf,
1014 group
->members
[2], group
->members
[1], group
->members
[0],
1015 atomic_read(&group
->refcount
),
1018 for (f
= 0; f
< MAX_VFS
; ++f
)
1019 if (group
->func
[f
].state
== MCAST_MEMBER
)
1020 len
+= sprintf(buf
+ len
, "%d[%1x] ",
1021 f
, group
->func
[f
].join_state
);
1023 len
+= sprintf(buf
+ len
, "\t\t(%4hx %4x %2x %2x %2x %2x %2x "
1024 "%4x %4x %2x %2x)\n",
1025 be16_to_cpu(group
->rec
.pkey
),
1026 be32_to_cpu(group
->rec
.qkey
),
1027 (group
->rec
.mtusel_mtu
& 0xc0) >> 6,
1028 group
->rec
.mtusel_mtu
& 0x3f,
1030 (group
->rec
.ratesel_rate
& 0xc0) >> 6,
1031 group
->rec
.ratesel_rate
& 0x3f,
1032 (be32_to_cpu(group
->rec
.sl_flowlabel_hoplimit
) & 0xf0000000) >> 28,
1033 (be32_to_cpu(group
->rec
.sl_flowlabel_hoplimit
) & 0x0fffff00) >> 8,
1034 be32_to_cpu(group
->rec
.sl_flowlabel_hoplimit
) & 0x000000ff,
1035 group
->rec
.proxy_join
);
1040 int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx
*ctx
)
1044 atomic_set(&ctx
->tid
, 0);
1045 sprintf(name
, "mlx4_ib_mcg%d", ctx
->port
);
1046 ctx
->mcg_wq
= create_singlethread_workqueue(name
);
1050 mutex_init(&ctx
->mcg_table_lock
);
1051 ctx
->mcg_table
= RB_ROOT
;
1052 INIT_LIST_HEAD(&ctx
->mcg_mgid0_list
);
1058 static void force_clean_group(struct mcast_group
*group
)
1060 struct mcast_req
*req
, *tmp
1062 list_for_each_entry_safe(req
, tmp
, &group
->pending_list
, group_list
) {
1063 list_del(&req
->group_list
);
1066 del_sysfs_port_mcg_attr(group
->demux
->dev
, group
->demux
->port
, &group
->dentry
.attr
);
1067 rb_erase(&group
->node
, &group
->demux
->mcg_table
);
1071 static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx
*ctx
, int destroy_wq
)
1075 struct mcast_group
*group
;
1079 for (i
= 0; i
< MAX_VFS
; ++i
)
1080 clean_vf_mcast(ctx
, i
);
1082 end
= jiffies
+ msecs_to_jiffies(MAD_TIMEOUT_MS
+ 3000);
1085 mutex_lock(&ctx
->mcg_table_lock
);
1086 for (p
= rb_first(&ctx
->mcg_table
); p
; p
= rb_next(p
))
1088 mutex_unlock(&ctx
->mcg_table_lock
);
1093 } while (time_after(end
, jiffies
));
1095 flush_workqueue(ctx
->mcg_wq
);
1097 destroy_workqueue(ctx
->mcg_wq
);
1099 mutex_lock(&ctx
->mcg_table_lock
);
1100 while ((p
= rb_first(&ctx
->mcg_table
)) != NULL
) {
1101 group
= rb_entry(p
, struct mcast_group
, node
);
1102 if (atomic_read(&group
->refcount
))
1103 mcg_warn_group(group
, "group refcount %d!!! (pointer %p)\n", atomic_read(&group
->refcount
), group
);
1105 force_clean_group(group
);
1107 mutex_unlock(&ctx
->mcg_table_lock
);
1111 struct work_struct work
;
1112 struct mlx4_ib_demux_ctx
*ctx
;
1116 static void mcg_clean_task(struct work_struct
*work
)
1118 struct clean_work
*cw
= container_of(work
, struct clean_work
, work
);
1120 _mlx4_ib_mcg_port_cleanup(cw
->ctx
, cw
->destroy_wq
);
1121 cw
->ctx
->flushing
= 0;
1125 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx
*ctx
, int destroy_wq
)
1127 struct clean_work
*work
;
1135 _mlx4_ib_mcg_port_cleanup(ctx
, destroy_wq
);
1140 work
= kmalloc(sizeof *work
, GFP_KERNEL
);
1143 mcg_warn("failed allocating work for cleanup\n");
1148 work
->destroy_wq
= destroy_wq
;
1149 INIT_WORK(&work
->work
, mcg_clean_task
);
1150 queue_work(clean_wq
, &work
->work
);
1153 static void build_leave_mad(struct mcast_req
*req
)
1155 struct ib_sa_mad
*mad
= &req
->sa_mad
;
1157 mad
->mad_hdr
.method
= IB_SA_METHOD_DELETE
;
1161 static void clear_pending_reqs(struct mcast_group
*group
, int vf
)
1163 struct mcast_req
*req
, *tmp
, *group_first
= NULL
;
1167 if (!list_empty(&group
->pending_list
))
1168 group_first
= list_first_entry(&group
->pending_list
, struct mcast_req
, group_list
);
1170 list_for_each_entry_safe(req
, tmp
, &group
->func
[vf
].pending
, func_list
) {
1172 if (group_first
== req
&&
1173 (group
->state
== MCAST_JOIN_SENT
||
1174 group
->state
== MCAST_LEAVE_SENT
)) {
1175 clear
= cancel_delayed_work(&group
->timeout_work
);
1177 group
->state
= MCAST_IDLE
;
1180 --group
->func
[vf
].num_pend_reqs
;
1181 list_del(&req
->group_list
);
1182 list_del(&req
->func_list
);
1184 atomic_dec(&group
->refcount
);
1188 if (!pend
&& (!list_empty(&group
->func
[vf
].pending
) || group
->func
[vf
].num_pend_reqs
)) {
1189 mcg_warn_group(group
, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n",
1190 list_empty(&group
->func
[vf
].pending
), group
->func
[vf
].num_pend_reqs
);
1194 static int push_deleteing_req(struct mcast_group
*group
, int slave
)
1196 struct mcast_req
*req
;
1197 struct mcast_req
*pend_req
;
1199 if (!group
->func
[slave
].join_state
)
1202 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
1204 mcg_warn_group(group
, "failed allocation - may leave stall groups\n");
1208 if (!list_empty(&group
->func
[slave
].pending
)) {
1209 pend_req
= list_entry(group
->func
[slave
].pending
.prev
, struct mcast_req
, group_list
);
1210 if (pend_req
->clean
) {
1219 ++group
->func
[slave
].num_pend_reqs
;
1220 build_leave_mad(req
);
1225 void clean_vf_mcast(struct mlx4_ib_demux_ctx
*ctx
, int slave
)
1227 struct mcast_group
*group
;
1230 mutex_lock(&ctx
->mcg_table_lock
);
1231 for (p
= rb_first(&ctx
->mcg_table
); p
; p
= rb_next(p
)) {
1232 group
= rb_entry(p
, struct mcast_group
, node
);
1233 mutex_lock(&group
->lock
);
1234 if (atomic_read(&group
->refcount
)) {
1235 /* clear pending requests of this VF */
1236 clear_pending_reqs(group
, slave
);
1237 push_deleteing_req(group
, slave
);
1239 mutex_unlock(&group
->lock
);
1241 mutex_unlock(&ctx
->mcg_table_lock
);
1245 int mlx4_ib_mcg_init(void)
1247 clean_wq
= create_singlethread_workqueue("mlx4_ib_mcg");
1254 void mlx4_ib_mcg_destroy(void)
1256 destroy_workqueue(clean_wq
);