2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_sa.h>
38 #include <linux/mlx4/cmd.h>
39 #include <linux/rbtree.h>
40 #include <linux/delay.h>
45 #define MAX_PEND_REQS_PER_FUNC 4
46 #define MAD_TIMEOUT_MS 2000
48 #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
49 #define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
50 #define mcg_warn_group(group, format, arg...) \
51 pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
52 (group)->name, group->demux->port, ## arg)
54 #define mcg_error_group(group, format, arg...) \
55 pr_err(" %16s: " format, (group)->name, ## arg)
58 static union ib_gid mgid0
;
60 static struct workqueue_struct
*clean_wq
;
67 enum mcast_group_state
{
75 enum mcast_state state
;
78 struct list_head pending
;
81 struct ib_sa_mcmember_data
{
83 union ib_gid port_gid
;
91 __be32 sl_flowlabel_hoplimit
;
98 struct ib_sa_mcmember_data rec
;
100 struct list_head mgid0_list
;
101 struct mlx4_ib_demux_ctx
*demux
;
102 struct mcast_member func
[MAX_VFS
];
104 struct work_struct work
;
105 struct list_head pending_list
;
107 enum mcast_group_state state
;
108 enum mcast_group_state prev_state
;
109 struct ib_sa_mad response_sa_mad
;
112 char name
[33]; /* MGID string */
113 struct device_attribute dentry
;
115 /* refcount is the reference count for the following:
116 1. Each queued request
117 2. Each invocation of the worker thread
118 3. Membership of the port at the SA
122 /* delayed work to clean pending SM request */
123 struct delayed_work timeout_work
;
124 struct list_head cleanup_list
;
129 struct ib_sa_mad sa_mad
;
130 struct list_head group_list
;
131 struct list_head func_list
;
132 struct mcast_group
*group
;
137 #define safe_atomic_dec(ref) \
139 if (atomic_dec_and_test(ref)) \
140 mcg_warn_group(group, "did not expect to reach zero\n"); \
143 static const char *get_state_string(enum mcast_group_state state
)
148 case MCAST_JOIN_SENT
:
149 return "MCAST_JOIN_SENT";
150 case MCAST_LEAVE_SENT
:
151 return "MCAST_LEAVE_SENT";
152 case MCAST_RESP_READY
:
153 return "MCAST_RESP_READY";
155 return "Invalid State";
158 static struct mcast_group
*mcast_find(struct mlx4_ib_demux_ctx
*ctx
,
161 struct rb_node
*node
= ctx
->mcg_table
.rb_node
;
162 struct mcast_group
*group
;
166 group
= rb_entry(node
, struct mcast_group
, node
);
167 ret
= memcmp(mgid
->raw
, group
->rec
.mgid
.raw
, sizeof *mgid
);
172 node
= node
->rb_left
;
174 node
= node
->rb_right
;
179 static struct mcast_group
*mcast_insert(struct mlx4_ib_demux_ctx
*ctx
,
180 struct mcast_group
*group
)
182 struct rb_node
**link
= &ctx
->mcg_table
.rb_node
;
183 struct rb_node
*parent
= NULL
;
184 struct mcast_group
*cur_group
;
189 cur_group
= rb_entry(parent
, struct mcast_group
, node
);
191 ret
= memcmp(group
->rec
.mgid
.raw
, cur_group
->rec
.mgid
.raw
,
192 sizeof group
->rec
.mgid
);
194 link
= &(*link
)->rb_left
;
196 link
= &(*link
)->rb_right
;
200 rb_link_node(&group
->node
, parent
, link
);
201 rb_insert_color(&group
->node
, &ctx
->mcg_table
);
205 static int send_mad_to_wire(struct mlx4_ib_demux_ctx
*ctx
, struct ib_mad
*mad
)
207 struct mlx4_ib_dev
*dev
= ctx
->dev
;
208 struct ib_ah_attr ah_attr
;
210 spin_lock(&dev
->sm_lock
);
211 if (!dev
->sm_ah
[ctx
->port
- 1]) {
212 /* port is not yet Active, sm_ah not ready */
213 spin_unlock(&dev
->sm_lock
);
216 mlx4_ib_query_ah(dev
->sm_ah
[ctx
->port
- 1], &ah_attr
);
217 spin_unlock(&dev
->sm_lock
);
218 return mlx4_ib_send_to_wire(dev
, mlx4_master_func_num(dev
->dev
), ctx
->port
,
219 IB_QPT_GSI
, 0, 1, IB_QP1_QKEY
, &ah_attr
, mad
);
222 static int send_mad_to_slave(int slave
, struct mlx4_ib_demux_ctx
*ctx
,
225 struct mlx4_ib_dev
*dev
= ctx
->dev
;
226 struct ib_mad_agent
*agent
= dev
->send_agent
[ctx
->port
- 1][1];
228 struct ib_ah_attr ah_attr
;
230 /* Our agent might not yet be registered when mads start to arrive */
234 ib_query_ah(dev
->sm_ah
[ctx
->port
- 1], &ah_attr
);
236 if (ib_find_cached_pkey(&dev
->ib_dev
, ctx
->port
, IB_DEFAULT_PKEY_FULL
, &wc
.pkey_index
))
239 wc
.dlid_path_bits
= 0;
240 wc
.port_num
= ctx
->port
;
241 wc
.slid
= ah_attr
.dlid
; /* opensm lid */
243 return mlx4_ib_send_to_slave(dev
, slave
, ctx
->port
, IB_QPT_GSI
, &wc
, NULL
, mad
);
246 static int send_join_to_wire(struct mcast_group
*group
, struct ib_sa_mad
*sa_mad
)
248 struct ib_sa_mad mad
;
249 struct ib_sa_mcmember_data
*sa_mad_data
= (struct ib_sa_mcmember_data
*)&mad
.data
;
252 /* we rely on a mad request as arrived from a VF */
253 memcpy(&mad
, sa_mad
, sizeof mad
);
255 /* fix port GID to be the real one (slave 0) */
256 sa_mad_data
->port_gid
.global
.interface_id
= group
->demux
->guid_cache
[0];
258 /* assign our own TID */
259 mad
.mad_hdr
.tid
= mlx4_ib_get_new_demux_tid(group
->demux
);
260 group
->last_req_tid
= mad
.mad_hdr
.tid
; /* keep it for later validation */
262 ret
= send_mad_to_wire(group
->demux
, (struct ib_mad
*)&mad
);
263 /* set timeout handler */
265 /* calls mlx4_ib_mcg_timeout_handler */
266 queue_delayed_work(group
->demux
->mcg_wq
, &group
->timeout_work
,
267 msecs_to_jiffies(MAD_TIMEOUT_MS
));
273 static int send_leave_to_wire(struct mcast_group
*group
, u8 join_state
)
275 struct ib_sa_mad mad
;
276 struct ib_sa_mcmember_data
*sa_data
= (struct ib_sa_mcmember_data
*)&mad
.data
;
279 memset(&mad
, 0, sizeof mad
);
280 mad
.mad_hdr
.base_version
= 1;
281 mad
.mad_hdr
.mgmt_class
= IB_MGMT_CLASS_SUBN_ADM
;
282 mad
.mad_hdr
.class_version
= 2;
283 mad
.mad_hdr
.method
= IB_SA_METHOD_DELETE
;
284 mad
.mad_hdr
.status
= cpu_to_be16(0);
285 mad
.mad_hdr
.class_specific
= cpu_to_be16(0);
286 mad
.mad_hdr
.tid
= mlx4_ib_get_new_demux_tid(group
->demux
);
287 group
->last_req_tid
= mad
.mad_hdr
.tid
; /* keep it for later validation */
288 mad
.mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC
);
289 mad
.mad_hdr
.attr_mod
= cpu_to_be32(0);
290 mad
.sa_hdr
.sm_key
= 0x0;
291 mad
.sa_hdr
.attr_offset
= cpu_to_be16(7);
292 mad
.sa_hdr
.comp_mask
= IB_SA_MCMEMBER_REC_MGID
|
293 IB_SA_MCMEMBER_REC_PORT_GID
| IB_SA_MCMEMBER_REC_JOIN_STATE
;
295 *sa_data
= group
->rec
;
296 sa_data
->scope_join_state
= join_state
;
298 ret
= send_mad_to_wire(group
->demux
, (struct ib_mad
*)&mad
);
300 group
->state
= MCAST_IDLE
;
302 /* set timeout handler */
304 /* calls mlx4_ib_mcg_timeout_handler */
305 queue_delayed_work(group
->demux
->mcg_wq
, &group
->timeout_work
,
306 msecs_to_jiffies(MAD_TIMEOUT_MS
));
312 static int send_reply_to_slave(int slave
, struct mcast_group
*group
,
313 struct ib_sa_mad
*req_sa_mad
, u16 status
)
315 struct ib_sa_mad mad
;
316 struct ib_sa_mcmember_data
*sa_data
= (struct ib_sa_mcmember_data
*)&mad
.data
;
317 struct ib_sa_mcmember_data
*req_sa_data
= (struct ib_sa_mcmember_data
*)&req_sa_mad
->data
;
320 memset(&mad
, 0, sizeof mad
);
321 mad
.mad_hdr
.base_version
= 1;
322 mad
.mad_hdr
.mgmt_class
= IB_MGMT_CLASS_SUBN_ADM
;
323 mad
.mad_hdr
.class_version
= 2;
324 mad
.mad_hdr
.method
= IB_MGMT_METHOD_GET_RESP
;
325 mad
.mad_hdr
.status
= cpu_to_be16(status
);
326 mad
.mad_hdr
.class_specific
= cpu_to_be16(0);
327 mad
.mad_hdr
.tid
= req_sa_mad
->mad_hdr
.tid
;
328 *(u8
*)&mad
.mad_hdr
.tid
= 0; /* resetting tid to 0 */
329 mad
.mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC
);
330 mad
.mad_hdr
.attr_mod
= cpu_to_be32(0);
331 mad
.sa_hdr
.sm_key
= req_sa_mad
->sa_hdr
.sm_key
;
332 mad
.sa_hdr
.attr_offset
= cpu_to_be16(7);
333 mad
.sa_hdr
.comp_mask
= 0; /* ignored on responses, see IBTA spec */
335 *sa_data
= group
->rec
;
337 /* reconstruct VF's requested join_state and port_gid */
338 sa_data
->scope_join_state
&= 0xf0;
339 sa_data
->scope_join_state
|= (group
->func
[slave
].join_state
& 0x0f);
340 memcpy(&sa_data
->port_gid
, &req_sa_data
->port_gid
, sizeof req_sa_data
->port_gid
);
342 ret
= send_mad_to_slave(slave
, group
->demux
, (struct ib_mad
*)&mad
);
346 static int check_selector(ib_sa_comp_mask comp_mask
,
347 ib_sa_comp_mask selector_mask
,
348 ib_sa_comp_mask value_mask
,
349 u8 src_value
, u8 dst_value
)
352 u8 selector
= dst_value
>> 6;
356 if (!(comp_mask
& selector_mask
) || !(comp_mask
& value_mask
))
361 err
= (src_value
<= dst_value
);
364 err
= (src_value
>= dst_value
);
367 err
= (src_value
!= dst_value
);
377 static u16
cmp_rec(struct ib_sa_mcmember_data
*src
,
378 struct ib_sa_mcmember_data
*dst
, ib_sa_comp_mask comp_mask
)
380 /* src is group record, dst is request record */
381 /* MGID must already match */
382 /* Port_GID we always replace to our Port_GID, so it is a match */
384 #define MAD_STATUS_REQ_INVALID 0x0200
385 if (comp_mask
& IB_SA_MCMEMBER_REC_QKEY
&& src
->qkey
!= dst
->qkey
)
386 return MAD_STATUS_REQ_INVALID
;
387 if (comp_mask
& IB_SA_MCMEMBER_REC_MLID
&& src
->mlid
!= dst
->mlid
)
388 return MAD_STATUS_REQ_INVALID
;
389 if (check_selector(comp_mask
, IB_SA_MCMEMBER_REC_MTU_SELECTOR
,
390 IB_SA_MCMEMBER_REC_MTU
,
391 src
->mtusel_mtu
, dst
->mtusel_mtu
))
392 return MAD_STATUS_REQ_INVALID
;
393 if (comp_mask
& IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
&&
394 src
->tclass
!= dst
->tclass
)
395 return MAD_STATUS_REQ_INVALID
;
396 if (comp_mask
& IB_SA_MCMEMBER_REC_PKEY
&& src
->pkey
!= dst
->pkey
)
397 return MAD_STATUS_REQ_INVALID
;
398 if (check_selector(comp_mask
, IB_SA_MCMEMBER_REC_RATE_SELECTOR
,
399 IB_SA_MCMEMBER_REC_RATE
,
400 src
->ratesel_rate
, dst
->ratesel_rate
))
401 return MAD_STATUS_REQ_INVALID
;
402 if (check_selector(comp_mask
,
403 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR
,
404 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME
,
405 src
->lifetmsel_lifetm
, dst
->lifetmsel_lifetm
))
406 return MAD_STATUS_REQ_INVALID
;
407 if (comp_mask
& IB_SA_MCMEMBER_REC_SL
&&
408 (be32_to_cpu(src
->sl_flowlabel_hoplimit
) & 0xf0000000) !=
409 (be32_to_cpu(dst
->sl_flowlabel_hoplimit
) & 0xf0000000))
410 return MAD_STATUS_REQ_INVALID
;
411 if (comp_mask
& IB_SA_MCMEMBER_REC_FLOW_LABEL
&&
412 (be32_to_cpu(src
->sl_flowlabel_hoplimit
) & 0x0fffff00) !=
413 (be32_to_cpu(dst
->sl_flowlabel_hoplimit
) & 0x0fffff00))
414 return MAD_STATUS_REQ_INVALID
;
415 if (comp_mask
& IB_SA_MCMEMBER_REC_HOP_LIMIT
&&
416 (be32_to_cpu(src
->sl_flowlabel_hoplimit
) & 0x000000ff) !=
417 (be32_to_cpu(dst
->sl_flowlabel_hoplimit
) & 0x000000ff))
418 return MAD_STATUS_REQ_INVALID
;
419 if (comp_mask
& IB_SA_MCMEMBER_REC_SCOPE
&&
420 (src
->scope_join_state
& 0xf0) !=
421 (dst
->scope_join_state
& 0xf0))
422 return MAD_STATUS_REQ_INVALID
;
424 /* join_state checked separately, proxy_join ignored */
429 /* release group, return 1 if this was last release and group is destroyed
430 * timout work is canceled sync */
431 static int release_group(struct mcast_group
*group
, int from_timeout_handler
)
433 struct mlx4_ib_demux_ctx
*ctx
= group
->demux
;
436 mutex_lock(&ctx
->mcg_table_lock
);
437 mutex_lock(&group
->lock
);
438 if (atomic_dec_and_test(&group
->refcount
)) {
439 if (!from_timeout_handler
) {
440 if (group
->state
!= MCAST_IDLE
&&
441 !cancel_delayed_work(&group
->timeout_work
)) {
442 atomic_inc(&group
->refcount
);
443 mutex_unlock(&group
->lock
);
444 mutex_unlock(&ctx
->mcg_table_lock
);
449 nzgroup
= memcmp(&group
->rec
.mgid
, &mgid0
, sizeof mgid0
);
451 del_sysfs_port_mcg_attr(ctx
->dev
, ctx
->port
, &group
->dentry
.attr
);
452 if (!list_empty(&group
->pending_list
))
453 mcg_warn_group(group
, "releasing a group with non empty pending list\n");
455 rb_erase(&group
->node
, &ctx
->mcg_table
);
456 list_del_init(&group
->mgid0_list
);
457 mutex_unlock(&group
->lock
);
458 mutex_unlock(&ctx
->mcg_table_lock
);
462 mutex_unlock(&group
->lock
);
463 mutex_unlock(&ctx
->mcg_table_lock
);
468 static void adjust_membership(struct mcast_group
*group
, u8 join_state
, int inc
)
472 for (i
= 0; i
< 3; i
++, join_state
>>= 1)
473 if (join_state
& 0x1)
474 group
->members
[i
] += inc
;
477 static u8
get_leave_state(struct mcast_group
*group
)
482 for (i
= 0; i
< 3; i
++)
483 if (!group
->members
[i
])
484 leave_state
|= (1 << i
);
486 return leave_state
& (group
->rec
.scope_join_state
& 7);
489 static int join_group(struct mcast_group
*group
, int slave
, u8 join_mask
)
494 /* remove bits that slave is already member of, and adjust */
495 join_state
= join_mask
& (~group
->func
[slave
].join_state
);
496 adjust_membership(group
, join_state
, 1);
497 group
->func
[slave
].join_state
|= join_state
;
498 if (group
->func
[slave
].state
!= MCAST_MEMBER
&& join_state
) {
499 group
->func
[slave
].state
= MCAST_MEMBER
;
505 static int leave_group(struct mcast_group
*group
, int slave
, u8 leave_state
)
509 adjust_membership(group
, leave_state
, -1);
510 group
->func
[slave
].join_state
&= ~leave_state
;
511 if (!group
->func
[slave
].join_state
) {
512 group
->func
[slave
].state
= MCAST_NOT_MEMBER
;
518 static int check_leave(struct mcast_group
*group
, int slave
, u8 leave_mask
)
520 if (group
->func
[slave
].state
!= MCAST_MEMBER
)
521 return MAD_STATUS_REQ_INVALID
;
523 /* make sure we're not deleting unset bits */
524 if (~group
->func
[slave
].join_state
& leave_mask
)
525 return MAD_STATUS_REQ_INVALID
;
528 return MAD_STATUS_REQ_INVALID
;
533 static void mlx4_ib_mcg_timeout_handler(struct work_struct
*work
)
535 struct delayed_work
*delay
= to_delayed_work(work
);
536 struct mcast_group
*group
;
537 struct mcast_req
*req
= NULL
;
539 group
= container_of(delay
, typeof(*group
), timeout_work
);
541 mutex_lock(&group
->lock
);
542 if (group
->state
== MCAST_JOIN_SENT
) {
543 if (!list_empty(&group
->pending_list
)) {
544 req
= list_first_entry(&group
->pending_list
, struct mcast_req
, group_list
);
545 list_del(&req
->group_list
);
546 list_del(&req
->func_list
);
547 --group
->func
[req
->func
].num_pend_reqs
;
548 mutex_unlock(&group
->lock
);
550 if (memcmp(&group
->rec
.mgid
, &mgid0
, sizeof mgid0
)) {
551 if (release_group(group
, 1))
557 mutex_lock(&group
->lock
);
559 mcg_warn_group(group
, "DRIVER BUG\n");
560 } else if (group
->state
== MCAST_LEAVE_SENT
) {
561 if (group
->rec
.scope_join_state
& 7)
562 group
->rec
.scope_join_state
&= 0xf8;
563 group
->state
= MCAST_IDLE
;
564 mutex_unlock(&group
->lock
);
565 if (release_group(group
, 1))
567 mutex_lock(&group
->lock
);
569 mcg_warn_group(group
, "invalid state %s\n", get_state_string(group
->state
));
570 group
->state
= MCAST_IDLE
;
571 atomic_inc(&group
->refcount
);
572 if (!queue_work(group
->demux
->mcg_wq
, &group
->work
))
573 safe_atomic_dec(&group
->refcount
);
575 mutex_unlock(&group
->lock
);
578 static int handle_leave_req(struct mcast_group
*group
, u8 leave_mask
,
579 struct mcast_req
*req
)
584 leave_mask
= group
->func
[req
->func
].join_state
;
586 status
= check_leave(group
, req
->func
, leave_mask
);
588 leave_group(group
, req
->func
, leave_mask
);
591 send_reply_to_slave(req
->func
, group
, &req
->sa_mad
, status
);
592 --group
->func
[req
->func
].num_pend_reqs
;
593 list_del(&req
->group_list
);
594 list_del(&req
->func_list
);
599 static int handle_join_req(struct mcast_group
*group
, u8 join_mask
,
600 struct mcast_req
*req
)
602 u8 group_join_state
= group
->rec
.scope_join_state
& 7;
605 struct ib_sa_mcmember_data
*sa_data
= (struct ib_sa_mcmember_data
*)req
->sa_mad
.data
;
607 if (join_mask
== (group_join_state
& join_mask
)) {
608 /* port's membership need not change */
609 status
= cmp_rec(&group
->rec
, sa_data
, req
->sa_mad
.sa_hdr
.comp_mask
);
611 join_group(group
, req
->func
, join_mask
);
613 --group
->func
[req
->func
].num_pend_reqs
;
614 send_reply_to_slave(req
->func
, group
, &req
->sa_mad
, status
);
615 list_del(&req
->group_list
);
616 list_del(&req
->func_list
);
620 /* port's membership needs to be updated */
621 group
->prev_state
= group
->state
;
622 if (send_join_to_wire(group
, &req
->sa_mad
)) {
623 --group
->func
[req
->func
].num_pend_reqs
;
624 list_del(&req
->group_list
);
625 list_del(&req
->func_list
);
628 group
->state
= group
->prev_state
;
630 group
->state
= MCAST_JOIN_SENT
;
636 static void mlx4_ib_mcg_work_handler(struct work_struct
*work
)
638 struct mcast_group
*group
;
639 struct mcast_req
*req
= NULL
;
640 struct ib_sa_mcmember_data
*sa_data
;
642 int rc
= 1; /* release_count - this is for the scheduled work */
646 group
= container_of(work
, typeof(*group
), work
);
648 mutex_lock(&group
->lock
);
650 /* First, let's see if a response from SM is waiting regarding this group.
651 * If so, we need to update the group's REC. If this is a bad response, we
652 * may need to send a bad response to a VF waiting for it. If VF is waiting
653 * and this is a good response, the VF will be answered later in this func. */
654 if (group
->state
== MCAST_RESP_READY
) {
655 /* cancels mlx4_ib_mcg_timeout_handler */
656 cancel_delayed_work(&group
->timeout_work
);
657 status
= be16_to_cpu(group
->response_sa_mad
.mad_hdr
.status
);
658 method
= group
->response_sa_mad
.mad_hdr
.method
;
659 if (group
->last_req_tid
!= group
->response_sa_mad
.mad_hdr
.tid
) {
660 mcg_warn_group(group
, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n",
661 be64_to_cpu(group
->response_sa_mad
.mad_hdr
.tid
),
662 be64_to_cpu(group
->last_req_tid
));
663 group
->state
= group
->prev_state
;
664 goto process_requests
;
667 if (!list_empty(&group
->pending_list
))
668 req
= list_first_entry(&group
->pending_list
,
669 struct mcast_req
, group_list
);
670 if ((method
== IB_MGMT_METHOD_GET_RESP
)) {
672 send_reply_to_slave(req
->func
, group
, &req
->sa_mad
, status
);
673 --group
->func
[req
->func
].num_pend_reqs
;
674 list_del(&req
->group_list
);
675 list_del(&req
->func_list
);
679 mcg_warn_group(group
, "no request for failed join\n");
680 } else if (method
== IB_SA_METHOD_DELETE_RESP
&& group
->demux
->flushing
)
686 resp_join_state
= ((struct ib_sa_mcmember_data
*)
687 group
->response_sa_mad
.data
)->scope_join_state
& 7;
688 cur_join_state
= group
->rec
.scope_join_state
& 7;
690 if (method
== IB_MGMT_METHOD_GET_RESP
) {
691 /* successfull join */
692 if (!cur_join_state
&& resp_join_state
)
694 } else if (!resp_join_state
)
696 memcpy(&group
->rec
, group
->response_sa_mad
.data
, sizeof group
->rec
);
698 group
->state
= MCAST_IDLE
;
702 /* We should now go over pending join/leave requests, as long as we are idle. */
703 while (!list_empty(&group
->pending_list
) && group
->state
== MCAST_IDLE
) {
704 req
= list_first_entry(&group
->pending_list
, struct mcast_req
,
706 sa_data
= (struct ib_sa_mcmember_data
*)req
->sa_mad
.data
;
707 req_join_state
= sa_data
->scope_join_state
& 0x7;
709 /* For a leave request, we will immediately answer the VF, and
710 * update our internal counters. The actual leave will be sent
711 * to SM later, if at all needed. We dequeue the request now. */
712 if (req
->sa_mad
.mad_hdr
.method
== IB_SA_METHOD_DELETE
)
713 rc
+= handle_leave_req(group
, req_join_state
, req
);
715 rc
+= handle_join_req(group
, req_join_state
, req
);
719 if (group
->state
== MCAST_IDLE
) {
720 req_join_state
= get_leave_state(group
);
721 if (req_join_state
) {
722 group
->rec
.scope_join_state
&= ~req_join_state
;
723 group
->prev_state
= group
->state
;
724 if (send_leave_to_wire(group
, req_join_state
)) {
725 group
->state
= group
->prev_state
;
728 group
->state
= MCAST_LEAVE_SENT
;
732 if (!list_empty(&group
->pending_list
) && group
->state
== MCAST_IDLE
)
733 goto process_requests
;
734 mutex_unlock(&group
->lock
);
737 release_group(group
, 0);
740 static struct mcast_group
*search_relocate_mgid0_group(struct mlx4_ib_demux_ctx
*ctx
,
742 union ib_gid
*new_mgid
)
744 struct mcast_group
*group
= NULL
, *cur_group
;
745 struct mcast_req
*req
;
746 struct list_head
*pos
;
749 mutex_lock(&ctx
->mcg_table_lock
);
750 list_for_each_safe(pos
, n
, &ctx
->mcg_mgid0_list
) {
751 group
= list_entry(pos
, struct mcast_group
, mgid0_list
);
752 mutex_lock(&group
->lock
);
753 if (group
->last_req_tid
== tid
) {
754 if (memcmp(new_mgid
, &mgid0
, sizeof mgid0
)) {
755 group
->rec
.mgid
= *new_mgid
;
756 sprintf(group
->name
, "%016llx%016llx",
757 be64_to_cpu(group
->rec
.mgid
.global
.subnet_prefix
),
758 be64_to_cpu(group
->rec
.mgid
.global
.interface_id
));
759 list_del_init(&group
->mgid0_list
);
760 cur_group
= mcast_insert(ctx
, group
);
762 /* A race between our code and SM. Silently cleaning the new one */
763 req
= list_first_entry(&group
->pending_list
,
764 struct mcast_req
, group_list
);
765 --group
->func
[req
->func
].num_pend_reqs
;
766 list_del(&req
->group_list
);
767 list_del(&req
->func_list
);
769 mutex_unlock(&group
->lock
);
770 mutex_unlock(&ctx
->mcg_table_lock
);
771 release_group(group
, 0);
775 atomic_inc(&group
->refcount
);
776 add_sysfs_port_mcg_attr(ctx
->dev
, ctx
->port
, &group
->dentry
.attr
);
777 mutex_unlock(&group
->lock
);
778 mutex_unlock(&ctx
->mcg_table_lock
);
781 struct mcast_req
*tmp1
, *tmp2
;
783 list_del(&group
->mgid0_list
);
784 if (!list_empty(&group
->pending_list
) && group
->state
!= MCAST_IDLE
)
785 cancel_delayed_work_sync(&group
->timeout_work
);
787 list_for_each_entry_safe(tmp1
, tmp2
, &group
->pending_list
, group_list
) {
788 list_del(&tmp1
->group_list
);
791 mutex_unlock(&group
->lock
);
792 mutex_unlock(&ctx
->mcg_table_lock
);
797 mutex_unlock(&group
->lock
);
799 mutex_unlock(&ctx
->mcg_table_lock
);
804 static ssize_t
sysfs_show_group(struct device
*dev
,
805 struct device_attribute
*attr
, char *buf
);
807 static struct mcast_group
*acquire_group(struct mlx4_ib_demux_ctx
*ctx
,
808 union ib_gid
*mgid
, int create
,
811 struct mcast_group
*group
, *cur_group
;
815 is_mgid0
= !memcmp(&mgid0
, mgid
, sizeof mgid0
);
817 group
= mcast_find(ctx
, mgid
);
823 return ERR_PTR(-ENOENT
);
825 group
= kzalloc(sizeof *group
, gfp_mask
);
827 return ERR_PTR(-ENOMEM
);
830 group
->rec
.mgid
= *mgid
;
831 INIT_LIST_HEAD(&group
->pending_list
);
832 INIT_LIST_HEAD(&group
->mgid0_list
);
833 for (i
= 0; i
< MAX_VFS
; ++i
)
834 INIT_LIST_HEAD(&group
->func
[i
].pending
);
835 INIT_WORK(&group
->work
, mlx4_ib_mcg_work_handler
);
836 INIT_DELAYED_WORK(&group
->timeout_work
, mlx4_ib_mcg_timeout_handler
);
837 mutex_init(&group
->lock
);
838 sprintf(group
->name
, "%016llx%016llx",
839 be64_to_cpu(group
->rec
.mgid
.global
.subnet_prefix
),
840 be64_to_cpu(group
->rec
.mgid
.global
.interface_id
));
841 sysfs_attr_init(&group
->dentry
.attr
);
842 group
->dentry
.show
= sysfs_show_group
;
843 group
->dentry
.store
= NULL
;
844 group
->dentry
.attr
.name
= group
->name
;
845 group
->dentry
.attr
.mode
= 0400;
846 group
->state
= MCAST_IDLE
;
849 list_add(&group
->mgid0_list
, &ctx
->mcg_mgid0_list
);
853 cur_group
= mcast_insert(ctx
, group
);
855 mcg_warn("group just showed up %s - confused\n", cur_group
->name
);
857 return ERR_PTR(-EINVAL
);
860 add_sysfs_port_mcg_attr(ctx
->dev
, ctx
->port
, &group
->dentry
.attr
);
863 atomic_inc(&group
->refcount
);
867 static void queue_req(struct mcast_req
*req
)
869 struct mcast_group
*group
= req
->group
;
871 atomic_inc(&group
->refcount
); /* for the request */
872 atomic_inc(&group
->refcount
); /* for scheduling the work */
873 list_add_tail(&req
->group_list
, &group
->pending_list
);
874 list_add_tail(&req
->func_list
, &group
->func
[req
->func
].pending
);
875 /* calls mlx4_ib_mcg_work_handler */
876 if (!queue_work(group
->demux
->mcg_wq
, &group
->work
))
877 safe_atomic_dec(&group
->refcount
);
880 int mlx4_ib_mcg_demux_handler(struct ib_device
*ibdev
, int port
, int slave
,
881 struct ib_sa_mad
*mad
)
883 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
884 struct ib_sa_mcmember_data
*rec
= (struct ib_sa_mcmember_data
*)mad
->data
;
885 struct mlx4_ib_demux_ctx
*ctx
= &dev
->sriov
.demux
[port
- 1];
886 struct mcast_group
*group
;
888 switch (mad
->mad_hdr
.method
) {
889 case IB_MGMT_METHOD_GET_RESP
:
890 case IB_SA_METHOD_DELETE_RESP
:
891 mutex_lock(&ctx
->mcg_table_lock
);
892 group
= acquire_group(ctx
, &rec
->mgid
, 0, GFP_KERNEL
);
893 mutex_unlock(&ctx
->mcg_table_lock
);
895 if (mad
->mad_hdr
.method
== IB_MGMT_METHOD_GET_RESP
) {
896 __be64 tid
= mad
->mad_hdr
.tid
;
897 *(u8
*)(&tid
) = (u8
)slave
; /* in group we kept the modified TID */
898 group
= search_relocate_mgid0_group(ctx
, tid
, &rec
->mgid
);
906 mutex_lock(&group
->lock
);
907 group
->response_sa_mad
= *mad
;
908 group
->prev_state
= group
->state
;
909 group
->state
= MCAST_RESP_READY
;
910 /* calls mlx4_ib_mcg_work_handler */
911 atomic_inc(&group
->refcount
);
912 if (!queue_work(ctx
->mcg_wq
, &group
->work
))
913 safe_atomic_dec(&group
->refcount
);
914 mutex_unlock(&group
->lock
);
915 release_group(group
, 0);
916 return 1; /* consumed */
917 case IB_MGMT_METHOD_SET
:
918 case IB_SA_METHOD_GET_TABLE
:
919 case IB_SA_METHOD_GET_TABLE_RESP
:
920 case IB_SA_METHOD_DELETE
:
921 return 0; /* not consumed, pass-through to guest over tunnel */
923 mcg_warn("In demux, port %d: unexpected MCMember method: 0x%x, dropping\n",
924 port
, mad
->mad_hdr
.method
);
925 return 1; /* consumed */
929 int mlx4_ib_mcg_multiplex_handler(struct ib_device
*ibdev
, int port
,
930 int slave
, struct ib_sa_mad
*sa_mad
)
932 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
933 struct ib_sa_mcmember_data
*rec
= (struct ib_sa_mcmember_data
*)sa_mad
->data
;
934 struct mlx4_ib_demux_ctx
*ctx
= &dev
->sriov
.demux
[port
- 1];
935 struct mcast_group
*group
;
936 struct mcast_req
*req
;
942 switch (sa_mad
->mad_hdr
.method
) {
943 case IB_MGMT_METHOD_SET
:
945 case IB_SA_METHOD_DELETE
:
946 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
951 req
->sa_mad
= *sa_mad
;
953 mutex_lock(&ctx
->mcg_table_lock
);
954 group
= acquire_group(ctx
, &rec
->mgid
, may_create
, GFP_KERNEL
);
955 mutex_unlock(&ctx
->mcg_table_lock
);
958 return PTR_ERR(group
);
960 mutex_lock(&group
->lock
);
961 if (group
->func
[slave
].num_pend_reqs
> MAX_PEND_REQS_PER_FUNC
) {
962 mutex_unlock(&group
->lock
);
963 mcg_warn_group(group
, "Port %d, Func %d has too many pending requests (%d), dropping\n",
964 port
, slave
, MAX_PEND_REQS_PER_FUNC
);
965 release_group(group
, 0);
969 ++group
->func
[slave
].num_pend_reqs
;
972 mutex_unlock(&group
->lock
);
973 release_group(group
, 0);
974 return 1; /* consumed */
975 case IB_SA_METHOD_GET_TABLE
:
976 case IB_MGMT_METHOD_GET_RESP
:
977 case IB_SA_METHOD_GET_TABLE_RESP
:
978 case IB_SA_METHOD_DELETE_RESP
:
979 return 0; /* not consumed, pass-through */
981 mcg_warn("In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n",
982 port
, slave
, sa_mad
->mad_hdr
.method
);
983 return 1; /* consumed */
987 static ssize_t
sysfs_show_group(struct device
*dev
,
988 struct device_attribute
*attr
, char *buf
)
990 struct mcast_group
*group
=
991 container_of(attr
, struct mcast_group
, dentry
);
992 struct mcast_req
*req
= NULL
;
993 char pending_str
[40];
998 if (group
->state
== MCAST_IDLE
)
999 sprintf(state_str
, "%s", get_state_string(group
->state
));
1001 sprintf(state_str
, "%s(TID=0x%llx)",
1002 get_state_string(group
->state
),
1003 be64_to_cpu(group
->last_req_tid
));
1004 if (list_empty(&group
->pending_list
)) {
1005 sprintf(pending_str
, "No");
1007 req
= list_first_entry(&group
->pending_list
, struct mcast_req
, group_list
);
1008 sprintf(pending_str
, "Yes(TID=0x%llx)",
1009 be64_to_cpu(req
->sa_mad
.mad_hdr
.tid
));
1011 len
+= sprintf(buf
+ len
, "%1d [%02d,%02d,%02d] %4d %4s %5s ",
1012 group
->rec
.scope_join_state
& 0xf,
1013 group
->members
[2], group
->members
[1], group
->members
[0],
1014 atomic_read(&group
->refcount
),
1017 for (f
= 0; f
< MAX_VFS
; ++f
)
1018 if (group
->func
[f
].state
== MCAST_MEMBER
)
1019 len
+= sprintf(buf
+ len
, "%d[%1x] ",
1020 f
, group
->func
[f
].join_state
);
1022 len
+= sprintf(buf
+ len
, "\t\t(%4hx %4x %2x %2x %2x %2x %2x "
1023 "%4x %4x %2x %2x)\n",
1024 be16_to_cpu(group
->rec
.pkey
),
1025 be32_to_cpu(group
->rec
.qkey
),
1026 (group
->rec
.mtusel_mtu
& 0xc0) >> 6,
1027 group
->rec
.mtusel_mtu
& 0x3f,
1029 (group
->rec
.ratesel_rate
& 0xc0) >> 6,
1030 group
->rec
.ratesel_rate
& 0x3f,
1031 (be32_to_cpu(group
->rec
.sl_flowlabel_hoplimit
) & 0xf0000000) >> 28,
1032 (be32_to_cpu(group
->rec
.sl_flowlabel_hoplimit
) & 0x0fffff00) >> 8,
1033 be32_to_cpu(group
->rec
.sl_flowlabel_hoplimit
) & 0x000000ff,
1034 group
->rec
.proxy_join
);
1039 int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx
*ctx
)
1043 atomic_set(&ctx
->tid
, 0);
1044 sprintf(name
, "mlx4_ib_mcg%d", ctx
->port
);
1045 ctx
->mcg_wq
= create_singlethread_workqueue(name
);
1049 mutex_init(&ctx
->mcg_table_lock
);
1050 ctx
->mcg_table
= RB_ROOT
;
1051 INIT_LIST_HEAD(&ctx
->mcg_mgid0_list
);
1057 static void force_clean_group(struct mcast_group
*group
)
1059 struct mcast_req
*req
, *tmp
1061 list_for_each_entry_safe(req
, tmp
, &group
->pending_list
, group_list
) {
1062 list_del(&req
->group_list
);
1065 del_sysfs_port_mcg_attr(group
->demux
->dev
, group
->demux
->port
, &group
->dentry
.attr
);
1066 rb_erase(&group
->node
, &group
->demux
->mcg_table
);
1070 static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx
*ctx
, int destroy_wq
)
1074 struct mcast_group
*group
;
1078 for (i
= 0; i
< MAX_VFS
; ++i
)
1079 clean_vf_mcast(ctx
, i
);
1081 end
= jiffies
+ msecs_to_jiffies(MAD_TIMEOUT_MS
+ 3000);
1084 mutex_lock(&ctx
->mcg_table_lock
);
1085 for (p
= rb_first(&ctx
->mcg_table
); p
; p
= rb_next(p
))
1087 mutex_unlock(&ctx
->mcg_table_lock
);
1092 } while (time_after(end
, jiffies
));
1094 flush_workqueue(ctx
->mcg_wq
);
1096 destroy_workqueue(ctx
->mcg_wq
);
1098 mutex_lock(&ctx
->mcg_table_lock
);
1099 while ((p
= rb_first(&ctx
->mcg_table
)) != NULL
) {
1100 group
= rb_entry(p
, struct mcast_group
, node
);
1101 if (atomic_read(&group
->refcount
))
1102 mcg_warn_group(group
, "group refcount %d!!! (pointer %p)\n", atomic_read(&group
->refcount
), group
);
1104 force_clean_group(group
);
1106 mutex_unlock(&ctx
->mcg_table_lock
);
1110 struct work_struct work
;
1111 struct mlx4_ib_demux_ctx
*ctx
;
1115 static void mcg_clean_task(struct work_struct
*work
)
1117 struct clean_work
*cw
= container_of(work
, struct clean_work
, work
);
1119 _mlx4_ib_mcg_port_cleanup(cw
->ctx
, cw
->destroy_wq
);
1120 cw
->ctx
->flushing
= 0;
1124 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx
*ctx
, int destroy_wq
)
1126 struct clean_work
*work
;
1134 _mlx4_ib_mcg_port_cleanup(ctx
, destroy_wq
);
1139 work
= kmalloc(sizeof *work
, GFP_KERNEL
);
1142 mcg_warn("failed allocating work for cleanup\n");
1147 work
->destroy_wq
= destroy_wq
;
1148 INIT_WORK(&work
->work
, mcg_clean_task
);
1149 queue_work(clean_wq
, &work
->work
);
1152 static void build_leave_mad(struct mcast_req
*req
)
1154 struct ib_sa_mad
*mad
= &req
->sa_mad
;
1156 mad
->mad_hdr
.method
= IB_SA_METHOD_DELETE
;
1160 static void clear_pending_reqs(struct mcast_group
*group
, int vf
)
1162 struct mcast_req
*req
, *tmp
, *group_first
= NULL
;
1166 if (!list_empty(&group
->pending_list
))
1167 group_first
= list_first_entry(&group
->pending_list
, struct mcast_req
, group_list
);
1169 list_for_each_entry_safe(req
, tmp
, &group
->func
[vf
].pending
, func_list
) {
1171 if (group_first
== req
&&
1172 (group
->state
== MCAST_JOIN_SENT
||
1173 group
->state
== MCAST_LEAVE_SENT
)) {
1174 clear
= cancel_delayed_work(&group
->timeout_work
);
1176 group
->state
= MCAST_IDLE
;
1179 --group
->func
[vf
].num_pend_reqs
;
1180 list_del(&req
->group_list
);
1181 list_del(&req
->func_list
);
1183 atomic_dec(&group
->refcount
);
1187 if (!pend
&& (!list_empty(&group
->func
[vf
].pending
) || group
->func
[vf
].num_pend_reqs
)) {
1188 mcg_warn_group(group
, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n",
1189 list_empty(&group
->func
[vf
].pending
), group
->func
[vf
].num_pend_reqs
);
1193 static int push_deleteing_req(struct mcast_group
*group
, int slave
)
1195 struct mcast_req
*req
;
1196 struct mcast_req
*pend_req
;
1198 if (!group
->func
[slave
].join_state
)
1201 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
1203 mcg_warn_group(group
, "failed allocation - may leave stall groups\n");
1207 if (!list_empty(&group
->func
[slave
].pending
)) {
1208 pend_req
= list_entry(group
->func
[slave
].pending
.prev
, struct mcast_req
, group_list
);
1209 if (pend_req
->clean
) {
1218 ++group
->func
[slave
].num_pend_reqs
;
1219 build_leave_mad(req
);
1224 void clean_vf_mcast(struct mlx4_ib_demux_ctx
*ctx
, int slave
)
1226 struct mcast_group
*group
;
1229 mutex_lock(&ctx
->mcg_table_lock
);
1230 for (p
= rb_first(&ctx
->mcg_table
); p
; p
= rb_next(p
)) {
1231 group
= rb_entry(p
, struct mcast_group
, node
);
1232 mutex_lock(&group
->lock
);
1233 if (atomic_read(&group
->refcount
)) {
1234 /* clear pending requests of this VF */
1235 clear_pending_reqs(group
, slave
);
1236 push_deleteing_req(group
, slave
);
1238 mutex_unlock(&group
->lock
);
1240 mutex_unlock(&ctx
->mcg_table_lock
);
1244 int mlx4_ib_mcg_init(void)
1246 clean_wq
= create_singlethread_workqueue("mlx4_ib_mcg");
1253 void mlx4_ib_mcg_destroy(void)
1255 destroy_workqueue(clean_wq
);