2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_sa.h>
38 #include <linux/mlx4/cmd.h>
39 #include <linux/rbtree.h>
40 #include <linux/delay.h>
45 #define MAX_PEND_REQS_PER_FUNC 4
46 #define MAD_TIMEOUT_MS 2000
48 #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
49 #define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
50 #define mcg_warn_group(group, format, arg...) \
51 pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
52 (group)->name, group->demux->port, ## arg)
54 #define mcg_debug_group(group, format, arg...) \
55 pr_debug("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
56 (group)->name, (group)->demux->port, ## arg)
58 #define mcg_error_group(group, format, arg...) \
59 pr_err(" %16s: " format, (group)->name, ## arg)
62 static union ib_gid mgid0
;
64 static struct workqueue_struct
*clean_wq
;
71 enum mcast_group_state
{
79 enum mcast_state state
;
82 struct list_head pending
;
85 struct ib_sa_mcmember_data
{
87 union ib_gid port_gid
;
95 __be32 sl_flowlabel_hoplimit
;
102 struct ib_sa_mcmember_data rec
;
104 struct list_head mgid0_list
;
105 struct mlx4_ib_demux_ctx
*demux
;
106 struct mcast_member func
[MAX_VFS
];
108 struct work_struct work
;
109 struct list_head pending_list
;
111 enum mcast_group_state state
;
112 enum mcast_group_state prev_state
;
113 struct ib_sa_mad response_sa_mad
;
116 char name
[33]; /* MGID string */
117 struct device_attribute dentry
;
119 /* refcount is the reference count for the following:
120 1. Each queued request
121 2. Each invocation of the worker thread
122 3. Membership of the port at the SA
126 /* delayed work to clean pending SM request */
127 struct delayed_work timeout_work
;
128 struct list_head cleanup_list
;
133 struct ib_sa_mad sa_mad
;
134 struct list_head group_list
;
135 struct list_head func_list
;
136 struct mcast_group
*group
;
141 #define safe_atomic_dec(ref) \
143 if (atomic_dec_and_test(ref)) \
144 mcg_warn_group(group, "did not expect to reach zero\n"); \
147 static const char *get_state_string(enum mcast_group_state state
)
152 case MCAST_JOIN_SENT
:
153 return "MCAST_JOIN_SENT";
154 case MCAST_LEAVE_SENT
:
155 return "MCAST_LEAVE_SENT";
156 case MCAST_RESP_READY
:
157 return "MCAST_RESP_READY";
159 return "Invalid State";
162 static struct mcast_group
*mcast_find(struct mlx4_ib_demux_ctx
*ctx
,
165 struct rb_node
*node
= ctx
->mcg_table
.rb_node
;
166 struct mcast_group
*group
;
170 group
= rb_entry(node
, struct mcast_group
, node
);
171 ret
= memcmp(mgid
->raw
, group
->rec
.mgid
.raw
, sizeof *mgid
);
176 node
= node
->rb_left
;
178 node
= node
->rb_right
;
183 static struct mcast_group
*mcast_insert(struct mlx4_ib_demux_ctx
*ctx
,
184 struct mcast_group
*group
)
186 struct rb_node
**link
= &ctx
->mcg_table
.rb_node
;
187 struct rb_node
*parent
= NULL
;
188 struct mcast_group
*cur_group
;
193 cur_group
= rb_entry(parent
, struct mcast_group
, node
);
195 ret
= memcmp(group
->rec
.mgid
.raw
, cur_group
->rec
.mgid
.raw
,
196 sizeof group
->rec
.mgid
);
198 link
= &(*link
)->rb_left
;
200 link
= &(*link
)->rb_right
;
204 rb_link_node(&group
->node
, parent
, link
);
205 rb_insert_color(&group
->node
, &ctx
->mcg_table
);
209 static int send_mad_to_wire(struct mlx4_ib_demux_ctx
*ctx
, struct ib_mad
*mad
)
211 struct mlx4_ib_dev
*dev
= ctx
->dev
;
212 struct ib_ah_attr ah_attr
;
215 spin_lock_irqsave(&dev
->sm_lock
, flags
);
216 if (!dev
->sm_ah
[ctx
->port
- 1]) {
217 /* port is not yet Active, sm_ah not ready */
218 spin_unlock_irqrestore(&dev
->sm_lock
, flags
);
221 mlx4_ib_query_ah(dev
->sm_ah
[ctx
->port
- 1], &ah_attr
);
222 spin_unlock_irqrestore(&dev
->sm_lock
, flags
);
223 return mlx4_ib_send_to_wire(dev
, mlx4_master_func_num(dev
->dev
),
224 ctx
->port
, IB_QPT_GSI
, 0, 1, IB_QP1_QKEY
,
225 &ah_attr
, NULL
, 0xffff, mad
);
228 static int send_mad_to_slave(int slave
, struct mlx4_ib_demux_ctx
*ctx
,
231 struct mlx4_ib_dev
*dev
= ctx
->dev
;
232 struct ib_mad_agent
*agent
= dev
->send_agent
[ctx
->port
- 1][1];
234 struct ib_ah_attr ah_attr
;
236 /* Our agent might not yet be registered when mads start to arrive */
240 ib_query_ah(dev
->sm_ah
[ctx
->port
- 1], &ah_attr
);
242 if (ib_find_cached_pkey(&dev
->ib_dev
, ctx
->port
, IB_DEFAULT_PKEY_FULL
, &wc
.pkey_index
))
245 wc
.dlid_path_bits
= 0;
246 wc
.port_num
= ctx
->port
;
247 wc
.slid
= ah_attr
.dlid
; /* opensm lid */
249 return mlx4_ib_send_to_slave(dev
, slave
, ctx
->port
, IB_QPT_GSI
, &wc
, NULL
, mad
);
252 static int send_join_to_wire(struct mcast_group
*group
, struct ib_sa_mad
*sa_mad
)
254 struct ib_sa_mad mad
;
255 struct ib_sa_mcmember_data
*sa_mad_data
= (struct ib_sa_mcmember_data
*)&mad
.data
;
258 /* we rely on a mad request as arrived from a VF */
259 memcpy(&mad
, sa_mad
, sizeof mad
);
261 /* fix port GID to be the real one (slave 0) */
262 sa_mad_data
->port_gid
.global
.interface_id
= group
->demux
->guid_cache
[0];
264 /* assign our own TID */
265 mad
.mad_hdr
.tid
= mlx4_ib_get_new_demux_tid(group
->demux
);
266 group
->last_req_tid
= mad
.mad_hdr
.tid
; /* keep it for later validation */
268 ret
= send_mad_to_wire(group
->demux
, (struct ib_mad
*)&mad
);
269 /* set timeout handler */
271 /* calls mlx4_ib_mcg_timeout_handler */
272 queue_delayed_work(group
->demux
->mcg_wq
, &group
->timeout_work
,
273 msecs_to_jiffies(MAD_TIMEOUT_MS
));
279 static int send_leave_to_wire(struct mcast_group
*group
, u8 join_state
)
281 struct ib_sa_mad mad
;
282 struct ib_sa_mcmember_data
*sa_data
= (struct ib_sa_mcmember_data
*)&mad
.data
;
285 memset(&mad
, 0, sizeof mad
);
286 mad
.mad_hdr
.base_version
= 1;
287 mad
.mad_hdr
.mgmt_class
= IB_MGMT_CLASS_SUBN_ADM
;
288 mad
.mad_hdr
.class_version
= 2;
289 mad
.mad_hdr
.method
= IB_SA_METHOD_DELETE
;
290 mad
.mad_hdr
.status
= cpu_to_be16(0);
291 mad
.mad_hdr
.class_specific
= cpu_to_be16(0);
292 mad
.mad_hdr
.tid
= mlx4_ib_get_new_demux_tid(group
->demux
);
293 group
->last_req_tid
= mad
.mad_hdr
.tid
; /* keep it for later validation */
294 mad
.mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC
);
295 mad
.mad_hdr
.attr_mod
= cpu_to_be32(0);
296 mad
.sa_hdr
.sm_key
= 0x0;
297 mad
.sa_hdr
.attr_offset
= cpu_to_be16(7);
298 mad
.sa_hdr
.comp_mask
= IB_SA_MCMEMBER_REC_MGID
|
299 IB_SA_MCMEMBER_REC_PORT_GID
| IB_SA_MCMEMBER_REC_JOIN_STATE
;
301 *sa_data
= group
->rec
;
302 sa_data
->scope_join_state
= join_state
;
304 ret
= send_mad_to_wire(group
->demux
, (struct ib_mad
*)&mad
);
306 group
->state
= MCAST_IDLE
;
308 /* set timeout handler */
310 /* calls mlx4_ib_mcg_timeout_handler */
311 queue_delayed_work(group
->demux
->mcg_wq
, &group
->timeout_work
,
312 msecs_to_jiffies(MAD_TIMEOUT_MS
));
318 static int send_reply_to_slave(int slave
, struct mcast_group
*group
,
319 struct ib_sa_mad
*req_sa_mad
, u16 status
)
321 struct ib_sa_mad mad
;
322 struct ib_sa_mcmember_data
*sa_data
= (struct ib_sa_mcmember_data
*)&mad
.data
;
323 struct ib_sa_mcmember_data
*req_sa_data
= (struct ib_sa_mcmember_data
*)&req_sa_mad
->data
;
326 memset(&mad
, 0, sizeof mad
);
327 mad
.mad_hdr
.base_version
= 1;
328 mad
.mad_hdr
.mgmt_class
= IB_MGMT_CLASS_SUBN_ADM
;
329 mad
.mad_hdr
.class_version
= 2;
330 mad
.mad_hdr
.method
= IB_MGMT_METHOD_GET_RESP
;
331 mad
.mad_hdr
.status
= cpu_to_be16(status
);
332 mad
.mad_hdr
.class_specific
= cpu_to_be16(0);
333 mad
.mad_hdr
.tid
= req_sa_mad
->mad_hdr
.tid
;
334 *(u8
*)&mad
.mad_hdr
.tid
= 0; /* resetting tid to 0 */
335 mad
.mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC
);
336 mad
.mad_hdr
.attr_mod
= cpu_to_be32(0);
337 mad
.sa_hdr
.sm_key
= req_sa_mad
->sa_hdr
.sm_key
;
338 mad
.sa_hdr
.attr_offset
= cpu_to_be16(7);
339 mad
.sa_hdr
.comp_mask
= 0; /* ignored on responses, see IBTA spec */
341 *sa_data
= group
->rec
;
343 /* reconstruct VF's requested join_state and port_gid */
344 sa_data
->scope_join_state
&= 0xf0;
345 sa_data
->scope_join_state
|= (group
->func
[slave
].join_state
& 0x0f);
346 memcpy(&sa_data
->port_gid
, &req_sa_data
->port_gid
, sizeof req_sa_data
->port_gid
);
348 ret
= send_mad_to_slave(slave
, group
->demux
, (struct ib_mad
*)&mad
);
352 static int check_selector(ib_sa_comp_mask comp_mask
,
353 ib_sa_comp_mask selector_mask
,
354 ib_sa_comp_mask value_mask
,
355 u8 src_value
, u8 dst_value
)
358 u8 selector
= dst_value
>> 6;
362 if (!(comp_mask
& selector_mask
) || !(comp_mask
& value_mask
))
367 err
= (src_value
<= dst_value
);
370 err
= (src_value
>= dst_value
);
373 err
= (src_value
!= dst_value
);
383 static u16
cmp_rec(struct ib_sa_mcmember_data
*src
,
384 struct ib_sa_mcmember_data
*dst
, ib_sa_comp_mask comp_mask
)
386 /* src is group record, dst is request record */
387 /* MGID must already match */
388 /* Port_GID we always replace to our Port_GID, so it is a match */
390 #define MAD_STATUS_REQ_INVALID 0x0200
391 if (comp_mask
& IB_SA_MCMEMBER_REC_QKEY
&& src
->qkey
!= dst
->qkey
)
392 return MAD_STATUS_REQ_INVALID
;
393 if (comp_mask
& IB_SA_MCMEMBER_REC_MLID
&& src
->mlid
!= dst
->mlid
)
394 return MAD_STATUS_REQ_INVALID
;
395 if (check_selector(comp_mask
, IB_SA_MCMEMBER_REC_MTU_SELECTOR
,
396 IB_SA_MCMEMBER_REC_MTU
,
397 src
->mtusel_mtu
, dst
->mtusel_mtu
))
398 return MAD_STATUS_REQ_INVALID
;
399 if (comp_mask
& IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
&&
400 src
->tclass
!= dst
->tclass
)
401 return MAD_STATUS_REQ_INVALID
;
402 if (comp_mask
& IB_SA_MCMEMBER_REC_PKEY
&& src
->pkey
!= dst
->pkey
)
403 return MAD_STATUS_REQ_INVALID
;
404 if (check_selector(comp_mask
, IB_SA_MCMEMBER_REC_RATE_SELECTOR
,
405 IB_SA_MCMEMBER_REC_RATE
,
406 src
->ratesel_rate
, dst
->ratesel_rate
))
407 return MAD_STATUS_REQ_INVALID
;
408 if (check_selector(comp_mask
,
409 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR
,
410 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME
,
411 src
->lifetmsel_lifetm
, dst
->lifetmsel_lifetm
))
412 return MAD_STATUS_REQ_INVALID
;
413 if (comp_mask
& IB_SA_MCMEMBER_REC_SL
&&
414 (be32_to_cpu(src
->sl_flowlabel_hoplimit
) & 0xf0000000) !=
415 (be32_to_cpu(dst
->sl_flowlabel_hoplimit
) & 0xf0000000))
416 return MAD_STATUS_REQ_INVALID
;
417 if (comp_mask
& IB_SA_MCMEMBER_REC_FLOW_LABEL
&&
418 (be32_to_cpu(src
->sl_flowlabel_hoplimit
) & 0x0fffff00) !=
419 (be32_to_cpu(dst
->sl_flowlabel_hoplimit
) & 0x0fffff00))
420 return MAD_STATUS_REQ_INVALID
;
421 if (comp_mask
& IB_SA_MCMEMBER_REC_HOP_LIMIT
&&
422 (be32_to_cpu(src
->sl_flowlabel_hoplimit
) & 0x000000ff) !=
423 (be32_to_cpu(dst
->sl_flowlabel_hoplimit
) & 0x000000ff))
424 return MAD_STATUS_REQ_INVALID
;
425 if (comp_mask
& IB_SA_MCMEMBER_REC_SCOPE
&&
426 (src
->scope_join_state
& 0xf0) !=
427 (dst
->scope_join_state
& 0xf0))
428 return MAD_STATUS_REQ_INVALID
;
430 /* join_state checked separately, proxy_join ignored */
435 /* release group, return 1 if this was last release and group is destroyed
436 * timout work is canceled sync */
437 static int release_group(struct mcast_group
*group
, int from_timeout_handler
)
439 struct mlx4_ib_demux_ctx
*ctx
= group
->demux
;
442 mutex_lock(&ctx
->mcg_table_lock
);
443 mutex_lock(&group
->lock
);
444 if (atomic_dec_and_test(&group
->refcount
)) {
445 if (!from_timeout_handler
) {
446 if (group
->state
!= MCAST_IDLE
&&
447 !cancel_delayed_work(&group
->timeout_work
)) {
448 atomic_inc(&group
->refcount
);
449 mutex_unlock(&group
->lock
);
450 mutex_unlock(&ctx
->mcg_table_lock
);
455 nzgroup
= memcmp(&group
->rec
.mgid
, &mgid0
, sizeof mgid0
);
457 del_sysfs_port_mcg_attr(ctx
->dev
, ctx
->port
, &group
->dentry
.attr
);
458 if (!list_empty(&group
->pending_list
))
459 mcg_warn_group(group
, "releasing a group with non empty pending list\n");
461 rb_erase(&group
->node
, &ctx
->mcg_table
);
462 list_del_init(&group
->mgid0_list
);
463 mutex_unlock(&group
->lock
);
464 mutex_unlock(&ctx
->mcg_table_lock
);
468 mutex_unlock(&group
->lock
);
469 mutex_unlock(&ctx
->mcg_table_lock
);
474 static void adjust_membership(struct mcast_group
*group
, u8 join_state
, int inc
)
478 for (i
= 0; i
< 3; i
++, join_state
>>= 1)
479 if (join_state
& 0x1)
480 group
->members
[i
] += inc
;
483 static u8
get_leave_state(struct mcast_group
*group
)
488 for (i
= 0; i
< 3; i
++)
489 if (!group
->members
[i
])
490 leave_state
|= (1 << i
);
492 return leave_state
& (group
->rec
.scope_join_state
& 7);
495 static int join_group(struct mcast_group
*group
, int slave
, u8 join_mask
)
500 /* remove bits that slave is already member of, and adjust */
501 join_state
= join_mask
& (~group
->func
[slave
].join_state
);
502 adjust_membership(group
, join_state
, 1);
503 group
->func
[slave
].join_state
|= join_state
;
504 if (group
->func
[slave
].state
!= MCAST_MEMBER
&& join_state
) {
505 group
->func
[slave
].state
= MCAST_MEMBER
;
511 static int leave_group(struct mcast_group
*group
, int slave
, u8 leave_state
)
515 adjust_membership(group
, leave_state
, -1);
516 group
->func
[slave
].join_state
&= ~leave_state
;
517 if (!group
->func
[slave
].join_state
) {
518 group
->func
[slave
].state
= MCAST_NOT_MEMBER
;
524 static int check_leave(struct mcast_group
*group
, int slave
, u8 leave_mask
)
526 if (group
->func
[slave
].state
!= MCAST_MEMBER
)
527 return MAD_STATUS_REQ_INVALID
;
529 /* make sure we're not deleting unset bits */
530 if (~group
->func
[slave
].join_state
& leave_mask
)
531 return MAD_STATUS_REQ_INVALID
;
534 return MAD_STATUS_REQ_INVALID
;
539 static void mlx4_ib_mcg_timeout_handler(struct work_struct
*work
)
541 struct delayed_work
*delay
= to_delayed_work(work
);
542 struct mcast_group
*group
;
543 struct mcast_req
*req
= NULL
;
545 group
= container_of(delay
, typeof(*group
), timeout_work
);
547 mutex_lock(&group
->lock
);
548 if (group
->state
== MCAST_JOIN_SENT
) {
549 if (!list_empty(&group
->pending_list
)) {
550 req
= list_first_entry(&group
->pending_list
, struct mcast_req
, group_list
);
551 list_del(&req
->group_list
);
552 list_del(&req
->func_list
);
553 --group
->func
[req
->func
].num_pend_reqs
;
554 mutex_unlock(&group
->lock
);
556 if (memcmp(&group
->rec
.mgid
, &mgid0
, sizeof mgid0
)) {
557 if (release_group(group
, 1))
563 mutex_lock(&group
->lock
);
565 mcg_warn_group(group
, "DRIVER BUG\n");
566 } else if (group
->state
== MCAST_LEAVE_SENT
) {
567 if (group
->rec
.scope_join_state
& 7)
568 group
->rec
.scope_join_state
&= 0xf8;
569 group
->state
= MCAST_IDLE
;
570 mutex_unlock(&group
->lock
);
571 if (release_group(group
, 1))
573 mutex_lock(&group
->lock
);
575 mcg_warn_group(group
, "invalid state %s\n", get_state_string(group
->state
));
576 group
->state
= MCAST_IDLE
;
577 atomic_inc(&group
->refcount
);
578 if (!queue_work(group
->demux
->mcg_wq
, &group
->work
))
579 safe_atomic_dec(&group
->refcount
);
581 mutex_unlock(&group
->lock
);
584 static int handle_leave_req(struct mcast_group
*group
, u8 leave_mask
,
585 struct mcast_req
*req
)
590 leave_mask
= group
->func
[req
->func
].join_state
;
592 status
= check_leave(group
, req
->func
, leave_mask
);
594 leave_group(group
, req
->func
, leave_mask
);
597 send_reply_to_slave(req
->func
, group
, &req
->sa_mad
, status
);
598 --group
->func
[req
->func
].num_pend_reqs
;
599 list_del(&req
->group_list
);
600 list_del(&req
->func_list
);
605 static int handle_join_req(struct mcast_group
*group
, u8 join_mask
,
606 struct mcast_req
*req
)
608 u8 group_join_state
= group
->rec
.scope_join_state
& 7;
611 struct ib_sa_mcmember_data
*sa_data
= (struct ib_sa_mcmember_data
*)req
->sa_mad
.data
;
613 if (join_mask
== (group_join_state
& join_mask
)) {
614 /* port's membership need not change */
615 status
= cmp_rec(&group
->rec
, sa_data
, req
->sa_mad
.sa_hdr
.comp_mask
);
617 join_group(group
, req
->func
, join_mask
);
619 --group
->func
[req
->func
].num_pend_reqs
;
620 send_reply_to_slave(req
->func
, group
, &req
->sa_mad
, status
);
621 list_del(&req
->group_list
);
622 list_del(&req
->func_list
);
626 /* port's membership needs to be updated */
627 group
->prev_state
= group
->state
;
628 if (send_join_to_wire(group
, &req
->sa_mad
)) {
629 --group
->func
[req
->func
].num_pend_reqs
;
630 list_del(&req
->group_list
);
631 list_del(&req
->func_list
);
634 group
->state
= group
->prev_state
;
636 group
->state
= MCAST_JOIN_SENT
;
642 static void mlx4_ib_mcg_work_handler(struct work_struct
*work
)
644 struct mcast_group
*group
;
645 struct mcast_req
*req
= NULL
;
646 struct ib_sa_mcmember_data
*sa_data
;
648 int rc
= 1; /* release_count - this is for the scheduled work */
652 group
= container_of(work
, typeof(*group
), work
);
654 mutex_lock(&group
->lock
);
656 /* First, let's see if a response from SM is waiting regarding this group.
657 * If so, we need to update the group's REC. If this is a bad response, we
658 * may need to send a bad response to a VF waiting for it. If VF is waiting
659 * and this is a good response, the VF will be answered later in this func. */
660 if (group
->state
== MCAST_RESP_READY
) {
661 /* cancels mlx4_ib_mcg_timeout_handler */
662 cancel_delayed_work(&group
->timeout_work
);
663 status
= be16_to_cpu(group
->response_sa_mad
.mad_hdr
.status
);
664 method
= group
->response_sa_mad
.mad_hdr
.method
;
665 if (group
->last_req_tid
!= group
->response_sa_mad
.mad_hdr
.tid
) {
666 mcg_warn_group(group
, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n",
667 be64_to_cpu(group
->response_sa_mad
.mad_hdr
.tid
),
668 be64_to_cpu(group
->last_req_tid
));
669 group
->state
= group
->prev_state
;
670 goto process_requests
;
673 if (!list_empty(&group
->pending_list
))
674 req
= list_first_entry(&group
->pending_list
,
675 struct mcast_req
, group_list
);
676 if ((method
== IB_MGMT_METHOD_GET_RESP
)) {
678 send_reply_to_slave(req
->func
, group
, &req
->sa_mad
, status
);
679 --group
->func
[req
->func
].num_pend_reqs
;
680 list_del(&req
->group_list
);
681 list_del(&req
->func_list
);
685 mcg_warn_group(group
, "no request for failed join\n");
686 } else if (method
== IB_SA_METHOD_DELETE_RESP
&& group
->demux
->flushing
)
692 resp_join_state
= ((struct ib_sa_mcmember_data
*)
693 group
->response_sa_mad
.data
)->scope_join_state
& 7;
694 cur_join_state
= group
->rec
.scope_join_state
& 7;
696 if (method
== IB_MGMT_METHOD_GET_RESP
) {
697 /* successfull join */
698 if (!cur_join_state
&& resp_join_state
)
700 } else if (!resp_join_state
)
702 memcpy(&group
->rec
, group
->response_sa_mad
.data
, sizeof group
->rec
);
704 group
->state
= MCAST_IDLE
;
708 /* We should now go over pending join/leave requests, as long as we are idle. */
709 while (!list_empty(&group
->pending_list
) && group
->state
== MCAST_IDLE
) {
710 req
= list_first_entry(&group
->pending_list
, struct mcast_req
,
712 sa_data
= (struct ib_sa_mcmember_data
*)req
->sa_mad
.data
;
713 req_join_state
= sa_data
->scope_join_state
& 0x7;
715 /* For a leave request, we will immediately answer the VF, and
716 * update our internal counters. The actual leave will be sent
717 * to SM later, if at all needed. We dequeue the request now. */
718 if (req
->sa_mad
.mad_hdr
.method
== IB_SA_METHOD_DELETE
)
719 rc
+= handle_leave_req(group
, req_join_state
, req
);
721 rc
+= handle_join_req(group
, req_join_state
, req
);
725 if (group
->state
== MCAST_IDLE
) {
726 req_join_state
= get_leave_state(group
);
727 if (req_join_state
) {
728 group
->rec
.scope_join_state
&= ~req_join_state
;
729 group
->prev_state
= group
->state
;
730 if (send_leave_to_wire(group
, req_join_state
)) {
731 group
->state
= group
->prev_state
;
734 group
->state
= MCAST_LEAVE_SENT
;
738 if (!list_empty(&group
->pending_list
) && group
->state
== MCAST_IDLE
)
739 goto process_requests
;
740 mutex_unlock(&group
->lock
);
743 release_group(group
, 0);
746 static struct mcast_group
*search_relocate_mgid0_group(struct mlx4_ib_demux_ctx
*ctx
,
748 union ib_gid
*new_mgid
)
750 struct mcast_group
*group
= NULL
, *cur_group
;
751 struct mcast_req
*req
;
752 struct list_head
*pos
;
755 mutex_lock(&ctx
->mcg_table_lock
);
756 list_for_each_safe(pos
, n
, &ctx
->mcg_mgid0_list
) {
757 group
= list_entry(pos
, struct mcast_group
, mgid0_list
);
758 mutex_lock(&group
->lock
);
759 if (group
->last_req_tid
== tid
) {
760 if (memcmp(new_mgid
, &mgid0
, sizeof mgid0
)) {
761 group
->rec
.mgid
= *new_mgid
;
762 sprintf(group
->name
, "%016llx%016llx",
763 be64_to_cpu(group
->rec
.mgid
.global
.subnet_prefix
),
764 be64_to_cpu(group
->rec
.mgid
.global
.interface_id
));
765 list_del_init(&group
->mgid0_list
);
766 cur_group
= mcast_insert(ctx
, group
);
768 /* A race between our code and SM. Silently cleaning the new one */
769 req
= list_first_entry(&group
->pending_list
,
770 struct mcast_req
, group_list
);
771 --group
->func
[req
->func
].num_pend_reqs
;
772 list_del(&req
->group_list
);
773 list_del(&req
->func_list
);
775 mutex_unlock(&group
->lock
);
776 mutex_unlock(&ctx
->mcg_table_lock
);
777 release_group(group
, 0);
781 atomic_inc(&group
->refcount
);
782 add_sysfs_port_mcg_attr(ctx
->dev
, ctx
->port
, &group
->dentry
.attr
);
783 mutex_unlock(&group
->lock
);
784 mutex_unlock(&ctx
->mcg_table_lock
);
787 struct mcast_req
*tmp1
, *tmp2
;
789 list_del(&group
->mgid0_list
);
790 if (!list_empty(&group
->pending_list
) && group
->state
!= MCAST_IDLE
)
791 cancel_delayed_work_sync(&group
->timeout_work
);
793 list_for_each_entry_safe(tmp1
, tmp2
, &group
->pending_list
, group_list
) {
794 list_del(&tmp1
->group_list
);
797 mutex_unlock(&group
->lock
);
798 mutex_unlock(&ctx
->mcg_table_lock
);
803 mutex_unlock(&group
->lock
);
805 mutex_unlock(&ctx
->mcg_table_lock
);
810 static ssize_t
sysfs_show_group(struct device
*dev
,
811 struct device_attribute
*attr
, char *buf
);
813 static struct mcast_group
*acquire_group(struct mlx4_ib_demux_ctx
*ctx
,
814 union ib_gid
*mgid
, int create
,
817 struct mcast_group
*group
, *cur_group
;
821 is_mgid0
= !memcmp(&mgid0
, mgid
, sizeof mgid0
);
823 group
= mcast_find(ctx
, mgid
);
829 return ERR_PTR(-ENOENT
);
831 group
= kzalloc(sizeof *group
, gfp_mask
);
833 return ERR_PTR(-ENOMEM
);
836 group
->rec
.mgid
= *mgid
;
837 INIT_LIST_HEAD(&group
->pending_list
);
838 INIT_LIST_HEAD(&group
->mgid0_list
);
839 for (i
= 0; i
< MAX_VFS
; ++i
)
840 INIT_LIST_HEAD(&group
->func
[i
].pending
);
841 INIT_WORK(&group
->work
, mlx4_ib_mcg_work_handler
);
842 INIT_DELAYED_WORK(&group
->timeout_work
, mlx4_ib_mcg_timeout_handler
);
843 mutex_init(&group
->lock
);
844 sprintf(group
->name
, "%016llx%016llx",
845 be64_to_cpu(group
->rec
.mgid
.global
.subnet_prefix
),
846 be64_to_cpu(group
->rec
.mgid
.global
.interface_id
));
847 sysfs_attr_init(&group
->dentry
.attr
);
848 group
->dentry
.show
= sysfs_show_group
;
849 group
->dentry
.store
= NULL
;
850 group
->dentry
.attr
.name
= group
->name
;
851 group
->dentry
.attr
.mode
= 0400;
852 group
->state
= MCAST_IDLE
;
855 list_add(&group
->mgid0_list
, &ctx
->mcg_mgid0_list
);
859 cur_group
= mcast_insert(ctx
, group
);
861 mcg_warn("group just showed up %s - confused\n", cur_group
->name
);
863 return ERR_PTR(-EINVAL
);
866 add_sysfs_port_mcg_attr(ctx
->dev
, ctx
->port
, &group
->dentry
.attr
);
869 atomic_inc(&group
->refcount
);
873 static void queue_req(struct mcast_req
*req
)
875 struct mcast_group
*group
= req
->group
;
877 atomic_inc(&group
->refcount
); /* for the request */
878 atomic_inc(&group
->refcount
); /* for scheduling the work */
879 list_add_tail(&req
->group_list
, &group
->pending_list
);
880 list_add_tail(&req
->func_list
, &group
->func
[req
->func
].pending
);
881 /* calls mlx4_ib_mcg_work_handler */
882 if (!queue_work(group
->demux
->mcg_wq
, &group
->work
))
883 safe_atomic_dec(&group
->refcount
);
886 int mlx4_ib_mcg_demux_handler(struct ib_device
*ibdev
, int port
, int slave
,
887 struct ib_sa_mad
*mad
)
889 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
890 struct ib_sa_mcmember_data
*rec
= (struct ib_sa_mcmember_data
*)mad
->data
;
891 struct mlx4_ib_demux_ctx
*ctx
= &dev
->sriov
.demux
[port
- 1];
892 struct mcast_group
*group
;
894 switch (mad
->mad_hdr
.method
) {
895 case IB_MGMT_METHOD_GET_RESP
:
896 case IB_SA_METHOD_DELETE_RESP
:
897 mutex_lock(&ctx
->mcg_table_lock
);
898 group
= acquire_group(ctx
, &rec
->mgid
, 0, GFP_KERNEL
);
899 mutex_unlock(&ctx
->mcg_table_lock
);
901 if (mad
->mad_hdr
.method
== IB_MGMT_METHOD_GET_RESP
) {
902 __be64 tid
= mad
->mad_hdr
.tid
;
903 *(u8
*)(&tid
) = (u8
)slave
; /* in group we kept the modified TID */
904 group
= search_relocate_mgid0_group(ctx
, tid
, &rec
->mgid
);
912 mutex_lock(&group
->lock
);
913 group
->response_sa_mad
= *mad
;
914 group
->prev_state
= group
->state
;
915 group
->state
= MCAST_RESP_READY
;
916 /* calls mlx4_ib_mcg_work_handler */
917 atomic_inc(&group
->refcount
);
918 if (!queue_work(ctx
->mcg_wq
, &group
->work
))
919 safe_atomic_dec(&group
->refcount
);
920 mutex_unlock(&group
->lock
);
921 release_group(group
, 0);
922 return 1; /* consumed */
923 case IB_MGMT_METHOD_SET
:
924 case IB_SA_METHOD_GET_TABLE
:
925 case IB_SA_METHOD_GET_TABLE_RESP
:
926 case IB_SA_METHOD_DELETE
:
927 return 0; /* not consumed, pass-through to guest over tunnel */
929 mcg_warn("In demux, port %d: unexpected MCMember method: 0x%x, dropping\n",
930 port
, mad
->mad_hdr
.method
);
931 return 1; /* consumed */
935 int mlx4_ib_mcg_multiplex_handler(struct ib_device
*ibdev
, int port
,
936 int slave
, struct ib_sa_mad
*sa_mad
)
938 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
939 struct ib_sa_mcmember_data
*rec
= (struct ib_sa_mcmember_data
*)sa_mad
->data
;
940 struct mlx4_ib_demux_ctx
*ctx
= &dev
->sriov
.demux
[port
- 1];
941 struct mcast_group
*group
;
942 struct mcast_req
*req
;
948 switch (sa_mad
->mad_hdr
.method
) {
949 case IB_MGMT_METHOD_SET
:
951 case IB_SA_METHOD_DELETE
:
952 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
957 req
->sa_mad
= *sa_mad
;
959 mutex_lock(&ctx
->mcg_table_lock
);
960 group
= acquire_group(ctx
, &rec
->mgid
, may_create
, GFP_KERNEL
);
961 mutex_unlock(&ctx
->mcg_table_lock
);
964 return PTR_ERR(group
);
966 mutex_lock(&group
->lock
);
967 if (group
->func
[slave
].num_pend_reqs
> MAX_PEND_REQS_PER_FUNC
) {
968 mutex_unlock(&group
->lock
);
969 mcg_debug_group(group
, "Port %d, Func %d has too many pending requests (%d), dropping\n",
970 port
, slave
, MAX_PEND_REQS_PER_FUNC
);
971 release_group(group
, 0);
975 ++group
->func
[slave
].num_pend_reqs
;
978 mutex_unlock(&group
->lock
);
979 release_group(group
, 0);
980 return 1; /* consumed */
981 case IB_SA_METHOD_GET_TABLE
:
982 case IB_MGMT_METHOD_GET_RESP
:
983 case IB_SA_METHOD_GET_TABLE_RESP
:
984 case IB_SA_METHOD_DELETE_RESP
:
985 return 0; /* not consumed, pass-through */
987 mcg_warn("In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n",
988 port
, slave
, sa_mad
->mad_hdr
.method
);
989 return 1; /* consumed */
993 static ssize_t
sysfs_show_group(struct device
*dev
,
994 struct device_attribute
*attr
, char *buf
)
996 struct mcast_group
*group
=
997 container_of(attr
, struct mcast_group
, dentry
);
998 struct mcast_req
*req
= NULL
;
999 char pending_str
[40];
1004 if (group
->state
== MCAST_IDLE
)
1005 sprintf(state_str
, "%s", get_state_string(group
->state
));
1007 sprintf(state_str
, "%s(TID=0x%llx)",
1008 get_state_string(group
->state
),
1009 be64_to_cpu(group
->last_req_tid
));
1010 if (list_empty(&group
->pending_list
)) {
1011 sprintf(pending_str
, "No");
1013 req
= list_first_entry(&group
->pending_list
, struct mcast_req
, group_list
);
1014 sprintf(pending_str
, "Yes(TID=0x%llx)",
1015 be64_to_cpu(req
->sa_mad
.mad_hdr
.tid
));
1017 len
+= sprintf(buf
+ len
, "%1d [%02d,%02d,%02d] %4d %4s %5s ",
1018 group
->rec
.scope_join_state
& 0xf,
1019 group
->members
[2], group
->members
[1], group
->members
[0],
1020 atomic_read(&group
->refcount
),
1023 for (f
= 0; f
< MAX_VFS
; ++f
)
1024 if (group
->func
[f
].state
== MCAST_MEMBER
)
1025 len
+= sprintf(buf
+ len
, "%d[%1x] ",
1026 f
, group
->func
[f
].join_state
);
1028 len
+= sprintf(buf
+ len
, "\t\t(%4hx %4x %2x %2x %2x %2x %2x "
1029 "%4x %4x %2x %2x)\n",
1030 be16_to_cpu(group
->rec
.pkey
),
1031 be32_to_cpu(group
->rec
.qkey
),
1032 (group
->rec
.mtusel_mtu
& 0xc0) >> 6,
1033 group
->rec
.mtusel_mtu
& 0x3f,
1035 (group
->rec
.ratesel_rate
& 0xc0) >> 6,
1036 group
->rec
.ratesel_rate
& 0x3f,
1037 (be32_to_cpu(group
->rec
.sl_flowlabel_hoplimit
) & 0xf0000000) >> 28,
1038 (be32_to_cpu(group
->rec
.sl_flowlabel_hoplimit
) & 0x0fffff00) >> 8,
1039 be32_to_cpu(group
->rec
.sl_flowlabel_hoplimit
) & 0x000000ff,
1040 group
->rec
.proxy_join
);
1045 int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx
*ctx
)
1049 atomic_set(&ctx
->tid
, 0);
1050 sprintf(name
, "mlx4_ib_mcg%d", ctx
->port
);
1051 ctx
->mcg_wq
= create_singlethread_workqueue(name
);
1055 mutex_init(&ctx
->mcg_table_lock
);
1056 ctx
->mcg_table
= RB_ROOT
;
1057 INIT_LIST_HEAD(&ctx
->mcg_mgid0_list
);
1063 static void force_clean_group(struct mcast_group
*group
)
1065 struct mcast_req
*req
, *tmp
1067 list_for_each_entry_safe(req
, tmp
, &group
->pending_list
, group_list
) {
1068 list_del(&req
->group_list
);
1071 del_sysfs_port_mcg_attr(group
->demux
->dev
, group
->demux
->port
, &group
->dentry
.attr
);
1072 rb_erase(&group
->node
, &group
->demux
->mcg_table
);
1076 static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx
*ctx
, int destroy_wq
)
1080 struct mcast_group
*group
;
1084 for (i
= 0; i
< MAX_VFS
; ++i
)
1085 clean_vf_mcast(ctx
, i
);
1087 end
= jiffies
+ msecs_to_jiffies(MAD_TIMEOUT_MS
+ 3000);
1090 mutex_lock(&ctx
->mcg_table_lock
);
1091 for (p
= rb_first(&ctx
->mcg_table
); p
; p
= rb_next(p
))
1093 mutex_unlock(&ctx
->mcg_table_lock
);
1098 } while (time_after(end
, jiffies
));
1100 flush_workqueue(ctx
->mcg_wq
);
1102 destroy_workqueue(ctx
->mcg_wq
);
1104 mutex_lock(&ctx
->mcg_table_lock
);
1105 while ((p
= rb_first(&ctx
->mcg_table
)) != NULL
) {
1106 group
= rb_entry(p
, struct mcast_group
, node
);
1107 if (atomic_read(&group
->refcount
))
1108 mcg_warn_group(group
, "group refcount %d!!! (pointer %p)\n", atomic_read(&group
->refcount
), group
);
1110 force_clean_group(group
);
1112 mutex_unlock(&ctx
->mcg_table_lock
);
1116 struct work_struct work
;
1117 struct mlx4_ib_demux_ctx
*ctx
;
1121 static void mcg_clean_task(struct work_struct
*work
)
1123 struct clean_work
*cw
= container_of(work
, struct clean_work
, work
);
1125 _mlx4_ib_mcg_port_cleanup(cw
->ctx
, cw
->destroy_wq
);
1126 cw
->ctx
->flushing
= 0;
1130 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx
*ctx
, int destroy_wq
)
1132 struct clean_work
*work
;
1140 _mlx4_ib_mcg_port_cleanup(ctx
, destroy_wq
);
1145 work
= kmalloc(sizeof *work
, GFP_KERNEL
);
1148 mcg_warn("failed allocating work for cleanup\n");
1153 work
->destroy_wq
= destroy_wq
;
1154 INIT_WORK(&work
->work
, mcg_clean_task
);
1155 queue_work(clean_wq
, &work
->work
);
1158 static void build_leave_mad(struct mcast_req
*req
)
1160 struct ib_sa_mad
*mad
= &req
->sa_mad
;
1162 mad
->mad_hdr
.method
= IB_SA_METHOD_DELETE
;
1166 static void clear_pending_reqs(struct mcast_group
*group
, int vf
)
1168 struct mcast_req
*req
, *tmp
, *group_first
= NULL
;
1172 if (!list_empty(&group
->pending_list
))
1173 group_first
= list_first_entry(&group
->pending_list
, struct mcast_req
, group_list
);
1175 list_for_each_entry_safe(req
, tmp
, &group
->func
[vf
].pending
, func_list
) {
1177 if (group_first
== req
&&
1178 (group
->state
== MCAST_JOIN_SENT
||
1179 group
->state
== MCAST_LEAVE_SENT
)) {
1180 clear
= cancel_delayed_work(&group
->timeout_work
);
1182 group
->state
= MCAST_IDLE
;
1185 --group
->func
[vf
].num_pend_reqs
;
1186 list_del(&req
->group_list
);
1187 list_del(&req
->func_list
);
1189 atomic_dec(&group
->refcount
);
1193 if (!pend
&& (!list_empty(&group
->func
[vf
].pending
) || group
->func
[vf
].num_pend_reqs
)) {
1194 mcg_warn_group(group
, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n",
1195 list_empty(&group
->func
[vf
].pending
), group
->func
[vf
].num_pend_reqs
);
1199 static int push_deleteing_req(struct mcast_group
*group
, int slave
)
1201 struct mcast_req
*req
;
1202 struct mcast_req
*pend_req
;
1204 if (!group
->func
[slave
].join_state
)
1207 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
1209 mcg_warn_group(group
, "failed allocation - may leave stall groups\n");
1213 if (!list_empty(&group
->func
[slave
].pending
)) {
1214 pend_req
= list_entry(group
->func
[slave
].pending
.prev
, struct mcast_req
, group_list
);
1215 if (pend_req
->clean
) {
1224 ++group
->func
[slave
].num_pend_reqs
;
1225 build_leave_mad(req
);
1230 void clean_vf_mcast(struct mlx4_ib_demux_ctx
*ctx
, int slave
)
1232 struct mcast_group
*group
;
1235 mutex_lock(&ctx
->mcg_table_lock
);
1236 for (p
= rb_first(&ctx
->mcg_table
); p
; p
= rb_next(p
)) {
1237 group
= rb_entry(p
, struct mcast_group
, node
);
1238 mutex_lock(&group
->lock
);
1239 if (atomic_read(&group
->refcount
)) {
1240 /* clear pending requests of this VF */
1241 clear_pending_reqs(group
, slave
);
1242 push_deleteing_req(group
, slave
);
1244 mutex_unlock(&group
->lock
);
1246 mutex_unlock(&ctx
->mcg_table_lock
);
1250 int mlx4_ib_mcg_init(void)
1252 clean_wq
= create_singlethread_workqueue("mlx4_ib_mcg");
1259 void mlx4_ib_mcg_destroy(void)
1261 destroy_workqueue(clean_wq
);