2 * Copyright (c) 2012 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_cache.h>
36 #include <rdma/ib_sa.h>
38 #include <linux/mlx4/cmd.h>
39 #include <linux/rbtree.h>
40 #include <linux/delay.h>
45 #define MAX_PEND_REQS_PER_FUNC 4
46 #define MAD_TIMEOUT_MS 2000
48 #define mcg_warn(fmt, arg...) pr_warn("MCG WARNING: " fmt, ##arg)
49 #define mcg_error(fmt, arg...) pr_err(fmt, ##arg)
50 #define mcg_warn_group(group, format, arg...) \
51 pr_warn("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
52 (group)->name, group->demux->port, ## arg)
54 #define mcg_debug_group(group, format, arg...) \
55 pr_debug("%s-%d: %16s (port %d): WARNING: " format, __func__, __LINE__,\
56 (group)->name, (group)->demux->port, ## arg)
58 #define mcg_error_group(group, format, arg...) \
59 pr_err(" %16s: " format, (group)->name, ## arg)
62 static union ib_gid mgid0
;
64 static struct workqueue_struct
*clean_wq
;
71 enum mcast_group_state
{
79 enum mcast_state state
;
82 struct list_head pending
;
85 struct ib_sa_mcmember_data
{
87 union ib_gid port_gid
;
95 __be32 sl_flowlabel_hoplimit
;
99 } __packed
__aligned(4);
102 struct ib_sa_mcmember_data rec
;
104 struct list_head mgid0_list
;
105 struct mlx4_ib_demux_ctx
*demux
;
106 struct mcast_member func
[MAX_VFS
];
108 struct work_struct work
;
109 struct list_head pending_list
;
111 enum mcast_group_state state
;
112 enum mcast_group_state prev_state
;
113 struct ib_sa_mad response_sa_mad
;
116 char name
[33]; /* MGID string */
117 struct device_attribute dentry
;
119 /* refcount is the reference count for the following:
120 1. Each queued request
121 2. Each invocation of the worker thread
122 3. Membership of the port at the SA
126 /* delayed work to clean pending SM request */
127 struct delayed_work timeout_work
;
128 struct list_head cleanup_list
;
133 struct ib_sa_mad sa_mad
;
134 struct list_head group_list
;
135 struct list_head func_list
;
136 struct mcast_group
*group
;
141 #define safe_atomic_dec(ref) \
143 if (atomic_dec_and_test(ref)) \
144 mcg_warn_group(group, "did not expect to reach zero\n"); \
147 static const char *get_state_string(enum mcast_group_state state
)
152 case MCAST_JOIN_SENT
:
153 return "MCAST_JOIN_SENT";
154 case MCAST_LEAVE_SENT
:
155 return "MCAST_LEAVE_SENT";
156 case MCAST_RESP_READY
:
157 return "MCAST_RESP_READY";
159 return "Invalid State";
162 static struct mcast_group
*mcast_find(struct mlx4_ib_demux_ctx
*ctx
,
165 struct rb_node
*node
= ctx
->mcg_table
.rb_node
;
166 struct mcast_group
*group
;
170 group
= rb_entry(node
, struct mcast_group
, node
);
171 ret
= memcmp(mgid
->raw
, group
->rec
.mgid
.raw
, sizeof *mgid
);
176 node
= node
->rb_left
;
178 node
= node
->rb_right
;
183 static struct mcast_group
*mcast_insert(struct mlx4_ib_demux_ctx
*ctx
,
184 struct mcast_group
*group
)
186 struct rb_node
**link
= &ctx
->mcg_table
.rb_node
;
187 struct rb_node
*parent
= NULL
;
188 struct mcast_group
*cur_group
;
193 cur_group
= rb_entry(parent
, struct mcast_group
, node
);
195 ret
= memcmp(group
->rec
.mgid
.raw
, cur_group
->rec
.mgid
.raw
,
196 sizeof group
->rec
.mgid
);
198 link
= &(*link
)->rb_left
;
200 link
= &(*link
)->rb_right
;
204 rb_link_node(&group
->node
, parent
, link
);
205 rb_insert_color(&group
->node
, &ctx
->mcg_table
);
209 static int send_mad_to_wire(struct mlx4_ib_demux_ctx
*ctx
, struct ib_mad
*mad
)
211 struct mlx4_ib_dev
*dev
= ctx
->dev
;
212 struct ib_ah_attr ah_attr
;
215 spin_lock_irqsave(&dev
->sm_lock
, flags
);
216 if (!dev
->sm_ah
[ctx
->port
- 1]) {
217 /* port is not yet Active, sm_ah not ready */
218 spin_unlock_irqrestore(&dev
->sm_lock
, flags
);
221 mlx4_ib_query_ah(dev
->sm_ah
[ctx
->port
- 1], &ah_attr
);
222 spin_unlock_irqrestore(&dev
->sm_lock
, flags
);
223 return mlx4_ib_send_to_wire(dev
, mlx4_master_func_num(dev
->dev
),
224 ctx
->port
, IB_QPT_GSI
, 0, 1, IB_QP1_QKEY
,
225 &ah_attr
, NULL
, 0xffff, mad
);
228 static int send_mad_to_slave(int slave
, struct mlx4_ib_demux_ctx
*ctx
,
231 struct mlx4_ib_dev
*dev
= ctx
->dev
;
232 struct ib_mad_agent
*agent
= dev
->send_agent
[ctx
->port
- 1][1];
234 struct ib_ah_attr ah_attr
;
236 /* Our agent might not yet be registered when mads start to arrive */
240 ib_query_ah(dev
->sm_ah
[ctx
->port
- 1], &ah_attr
);
242 if (ib_find_cached_pkey(&dev
->ib_dev
, ctx
->port
, IB_DEFAULT_PKEY_FULL
, &wc
.pkey_index
))
245 wc
.dlid_path_bits
= 0;
246 wc
.port_num
= ctx
->port
;
247 wc
.slid
= ah_attr
.dlid
; /* opensm lid */
249 return mlx4_ib_send_to_slave(dev
, slave
, ctx
->port
, IB_QPT_GSI
, &wc
, NULL
, mad
);
252 static int send_join_to_wire(struct mcast_group
*group
, struct ib_sa_mad
*sa_mad
)
254 struct ib_sa_mad mad
;
255 struct ib_sa_mcmember_data
*sa_mad_data
= (struct ib_sa_mcmember_data
*)&mad
.data
;
258 /* we rely on a mad request as arrived from a VF */
259 memcpy(&mad
, sa_mad
, sizeof mad
);
261 /* fix port GID to be the real one (slave 0) */
262 sa_mad_data
->port_gid
.global
.interface_id
= group
->demux
->guid_cache
[0];
264 /* assign our own TID */
265 mad
.mad_hdr
.tid
= mlx4_ib_get_new_demux_tid(group
->demux
);
266 group
->last_req_tid
= mad
.mad_hdr
.tid
; /* keep it for later validation */
268 ret
= send_mad_to_wire(group
->demux
, (struct ib_mad
*)&mad
);
269 /* set timeout handler */
271 /* calls mlx4_ib_mcg_timeout_handler */
272 queue_delayed_work(group
->demux
->mcg_wq
, &group
->timeout_work
,
273 msecs_to_jiffies(MAD_TIMEOUT_MS
));
279 static int send_leave_to_wire(struct mcast_group
*group
, u8 join_state
)
281 struct ib_sa_mad mad
;
282 struct ib_sa_mcmember_data
*sa_data
= (struct ib_sa_mcmember_data
*)&mad
.data
;
285 memset(&mad
, 0, sizeof mad
);
286 mad
.mad_hdr
.base_version
= 1;
287 mad
.mad_hdr
.mgmt_class
= IB_MGMT_CLASS_SUBN_ADM
;
288 mad
.mad_hdr
.class_version
= 2;
289 mad
.mad_hdr
.method
= IB_SA_METHOD_DELETE
;
290 mad
.mad_hdr
.status
= cpu_to_be16(0);
291 mad
.mad_hdr
.class_specific
= cpu_to_be16(0);
292 mad
.mad_hdr
.tid
= mlx4_ib_get_new_demux_tid(group
->demux
);
293 group
->last_req_tid
= mad
.mad_hdr
.tid
; /* keep it for later validation */
294 mad
.mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC
);
295 mad
.mad_hdr
.attr_mod
= cpu_to_be32(0);
296 mad
.sa_hdr
.sm_key
= 0x0;
297 mad
.sa_hdr
.attr_offset
= cpu_to_be16(7);
298 mad
.sa_hdr
.comp_mask
= IB_SA_MCMEMBER_REC_MGID
|
299 IB_SA_MCMEMBER_REC_PORT_GID
| IB_SA_MCMEMBER_REC_JOIN_STATE
;
301 *sa_data
= group
->rec
;
302 sa_data
->scope_join_state
= join_state
;
304 ret
= send_mad_to_wire(group
->demux
, (struct ib_mad
*)&mad
);
306 group
->state
= MCAST_IDLE
;
308 /* set timeout handler */
310 /* calls mlx4_ib_mcg_timeout_handler */
311 queue_delayed_work(group
->demux
->mcg_wq
, &group
->timeout_work
,
312 msecs_to_jiffies(MAD_TIMEOUT_MS
));
318 static int send_reply_to_slave(int slave
, struct mcast_group
*group
,
319 struct ib_sa_mad
*req_sa_mad
, u16 status
)
321 struct ib_sa_mad mad
;
322 struct ib_sa_mcmember_data
*sa_data
= (struct ib_sa_mcmember_data
*)&mad
.data
;
323 struct ib_sa_mcmember_data
*req_sa_data
= (struct ib_sa_mcmember_data
*)&req_sa_mad
->data
;
326 memset(&mad
, 0, sizeof mad
);
327 mad
.mad_hdr
.base_version
= 1;
328 mad
.mad_hdr
.mgmt_class
= IB_MGMT_CLASS_SUBN_ADM
;
329 mad
.mad_hdr
.class_version
= 2;
330 mad
.mad_hdr
.method
= IB_MGMT_METHOD_GET_RESP
;
331 mad
.mad_hdr
.status
= cpu_to_be16(status
);
332 mad
.mad_hdr
.class_specific
= cpu_to_be16(0);
333 mad
.mad_hdr
.tid
= req_sa_mad
->mad_hdr
.tid
;
334 *(u8
*)&mad
.mad_hdr
.tid
= 0; /* resetting tid to 0 */
335 mad
.mad_hdr
.attr_id
= cpu_to_be16(IB_SA_ATTR_MC_MEMBER_REC
);
336 mad
.mad_hdr
.attr_mod
= cpu_to_be32(0);
337 mad
.sa_hdr
.sm_key
= req_sa_mad
->sa_hdr
.sm_key
;
338 mad
.sa_hdr
.attr_offset
= cpu_to_be16(7);
339 mad
.sa_hdr
.comp_mask
= 0; /* ignored on responses, see IBTA spec */
341 *sa_data
= group
->rec
;
343 /* reconstruct VF's requested join_state and port_gid */
344 sa_data
->scope_join_state
&= 0xf0;
345 sa_data
->scope_join_state
|= (group
->func
[slave
].join_state
& 0x0f);
346 memcpy(&sa_data
->port_gid
, &req_sa_data
->port_gid
, sizeof req_sa_data
->port_gid
);
348 ret
= send_mad_to_slave(slave
, group
->demux
, (struct ib_mad
*)&mad
);
352 static int check_selector(ib_sa_comp_mask comp_mask
,
353 ib_sa_comp_mask selector_mask
,
354 ib_sa_comp_mask value_mask
,
355 u8 src_value
, u8 dst_value
)
358 u8 selector
= dst_value
>> 6;
362 if (!(comp_mask
& selector_mask
) || !(comp_mask
& value_mask
))
367 err
= (src_value
<= dst_value
);
370 err
= (src_value
>= dst_value
);
373 err
= (src_value
!= dst_value
);
383 static u16
cmp_rec(struct ib_sa_mcmember_data
*src
,
384 struct ib_sa_mcmember_data
*dst
, ib_sa_comp_mask comp_mask
)
386 /* src is group record, dst is request record */
387 /* MGID must already match */
388 /* Port_GID we always replace to our Port_GID, so it is a match */
390 #define MAD_STATUS_REQ_INVALID 0x0200
391 if (comp_mask
& IB_SA_MCMEMBER_REC_QKEY
&& src
->qkey
!= dst
->qkey
)
392 return MAD_STATUS_REQ_INVALID
;
393 if (comp_mask
& IB_SA_MCMEMBER_REC_MLID
&& src
->mlid
!= dst
->mlid
)
394 return MAD_STATUS_REQ_INVALID
;
395 if (check_selector(comp_mask
, IB_SA_MCMEMBER_REC_MTU_SELECTOR
,
396 IB_SA_MCMEMBER_REC_MTU
,
397 src
->mtusel_mtu
, dst
->mtusel_mtu
))
398 return MAD_STATUS_REQ_INVALID
;
399 if (comp_mask
& IB_SA_MCMEMBER_REC_TRAFFIC_CLASS
&&
400 src
->tclass
!= dst
->tclass
)
401 return MAD_STATUS_REQ_INVALID
;
402 if (comp_mask
& IB_SA_MCMEMBER_REC_PKEY
&& src
->pkey
!= dst
->pkey
)
403 return MAD_STATUS_REQ_INVALID
;
404 if (check_selector(comp_mask
, IB_SA_MCMEMBER_REC_RATE_SELECTOR
,
405 IB_SA_MCMEMBER_REC_RATE
,
406 src
->ratesel_rate
, dst
->ratesel_rate
))
407 return MAD_STATUS_REQ_INVALID
;
408 if (check_selector(comp_mask
,
409 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR
,
410 IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME
,
411 src
->lifetmsel_lifetm
, dst
->lifetmsel_lifetm
))
412 return MAD_STATUS_REQ_INVALID
;
413 if (comp_mask
& IB_SA_MCMEMBER_REC_SL
&&
414 (be32_to_cpu(src
->sl_flowlabel_hoplimit
) & 0xf0000000) !=
415 (be32_to_cpu(dst
->sl_flowlabel_hoplimit
) & 0xf0000000))
416 return MAD_STATUS_REQ_INVALID
;
417 if (comp_mask
& IB_SA_MCMEMBER_REC_FLOW_LABEL
&&
418 (be32_to_cpu(src
->sl_flowlabel_hoplimit
) & 0x0fffff00) !=
419 (be32_to_cpu(dst
->sl_flowlabel_hoplimit
) & 0x0fffff00))
420 return MAD_STATUS_REQ_INVALID
;
421 if (comp_mask
& IB_SA_MCMEMBER_REC_HOP_LIMIT
&&
422 (be32_to_cpu(src
->sl_flowlabel_hoplimit
) & 0x000000ff) !=
423 (be32_to_cpu(dst
->sl_flowlabel_hoplimit
) & 0x000000ff))
424 return MAD_STATUS_REQ_INVALID
;
425 if (comp_mask
& IB_SA_MCMEMBER_REC_SCOPE
&&
426 (src
->scope_join_state
& 0xf0) !=
427 (dst
->scope_join_state
& 0xf0))
428 return MAD_STATUS_REQ_INVALID
;
430 /* join_state checked separately, proxy_join ignored */
435 /* release group, return 1 if this was last release and group is destroyed
436 * timout work is canceled sync */
437 static int release_group(struct mcast_group
*group
, int from_timeout_handler
)
439 struct mlx4_ib_demux_ctx
*ctx
= group
->demux
;
442 mutex_lock(&ctx
->mcg_table_lock
);
443 mutex_lock(&group
->lock
);
444 if (atomic_dec_and_test(&group
->refcount
)) {
445 if (!from_timeout_handler
) {
446 if (group
->state
!= MCAST_IDLE
&&
447 !cancel_delayed_work(&group
->timeout_work
)) {
448 atomic_inc(&group
->refcount
);
449 mutex_unlock(&group
->lock
);
450 mutex_unlock(&ctx
->mcg_table_lock
);
455 nzgroup
= memcmp(&group
->rec
.mgid
, &mgid0
, sizeof mgid0
);
457 del_sysfs_port_mcg_attr(ctx
->dev
, ctx
->port
, &group
->dentry
.attr
);
458 if (!list_empty(&group
->pending_list
))
459 mcg_warn_group(group
, "releasing a group with non empty pending list\n");
461 rb_erase(&group
->node
, &ctx
->mcg_table
);
462 list_del_init(&group
->mgid0_list
);
463 mutex_unlock(&group
->lock
);
464 mutex_unlock(&ctx
->mcg_table_lock
);
468 mutex_unlock(&group
->lock
);
469 mutex_unlock(&ctx
->mcg_table_lock
);
474 static void adjust_membership(struct mcast_group
*group
, u8 join_state
, int inc
)
478 for (i
= 0; i
< 3; i
++, join_state
>>= 1)
479 if (join_state
& 0x1)
480 group
->members
[i
] += inc
;
483 static u8
get_leave_state(struct mcast_group
*group
)
488 for (i
= 0; i
< 3; i
++)
489 if (!group
->members
[i
])
490 leave_state
|= (1 << i
);
492 return leave_state
& (group
->rec
.scope_join_state
& 7);
495 static int join_group(struct mcast_group
*group
, int slave
, u8 join_mask
)
500 /* remove bits that slave is already member of, and adjust */
501 join_state
= join_mask
& (~group
->func
[slave
].join_state
);
502 adjust_membership(group
, join_state
, 1);
503 group
->func
[slave
].join_state
|= join_state
;
504 if (group
->func
[slave
].state
!= MCAST_MEMBER
&& join_state
) {
505 group
->func
[slave
].state
= MCAST_MEMBER
;
511 static int leave_group(struct mcast_group
*group
, int slave
, u8 leave_state
)
515 adjust_membership(group
, leave_state
, -1);
516 group
->func
[slave
].join_state
&= ~leave_state
;
517 if (!group
->func
[slave
].join_state
) {
518 group
->func
[slave
].state
= MCAST_NOT_MEMBER
;
524 static int check_leave(struct mcast_group
*group
, int slave
, u8 leave_mask
)
526 if (group
->func
[slave
].state
!= MCAST_MEMBER
)
527 return MAD_STATUS_REQ_INVALID
;
529 /* make sure we're not deleting unset bits */
530 if (~group
->func
[slave
].join_state
& leave_mask
)
531 return MAD_STATUS_REQ_INVALID
;
534 return MAD_STATUS_REQ_INVALID
;
539 static void mlx4_ib_mcg_timeout_handler(struct work_struct
*work
)
541 struct delayed_work
*delay
= to_delayed_work(work
);
542 struct mcast_group
*group
;
543 struct mcast_req
*req
= NULL
;
545 group
= container_of(delay
, typeof(*group
), timeout_work
);
547 mutex_lock(&group
->lock
);
548 if (group
->state
== MCAST_JOIN_SENT
) {
549 if (!list_empty(&group
->pending_list
)) {
550 req
= list_first_entry(&group
->pending_list
, struct mcast_req
, group_list
);
551 list_del(&req
->group_list
);
552 list_del(&req
->func_list
);
553 --group
->func
[req
->func
].num_pend_reqs
;
554 mutex_unlock(&group
->lock
);
556 if (memcmp(&group
->rec
.mgid
, &mgid0
, sizeof mgid0
)) {
557 if (release_group(group
, 1))
563 mutex_lock(&group
->lock
);
565 mcg_warn_group(group
, "DRIVER BUG\n");
566 } else if (group
->state
== MCAST_LEAVE_SENT
) {
567 if (group
->rec
.scope_join_state
& 7)
568 group
->rec
.scope_join_state
&= 0xf8;
569 group
->state
= MCAST_IDLE
;
570 mutex_unlock(&group
->lock
);
571 if (release_group(group
, 1))
573 mutex_lock(&group
->lock
);
575 mcg_warn_group(group
, "invalid state %s\n", get_state_string(group
->state
));
576 group
->state
= MCAST_IDLE
;
577 atomic_inc(&group
->refcount
);
578 if (!queue_work(group
->demux
->mcg_wq
, &group
->work
))
579 safe_atomic_dec(&group
->refcount
);
581 mutex_unlock(&group
->lock
);
584 static int handle_leave_req(struct mcast_group
*group
, u8 leave_mask
,
585 struct mcast_req
*req
)
590 leave_mask
= group
->func
[req
->func
].join_state
;
592 status
= check_leave(group
, req
->func
, leave_mask
);
594 leave_group(group
, req
->func
, leave_mask
);
597 send_reply_to_slave(req
->func
, group
, &req
->sa_mad
, status
);
598 --group
->func
[req
->func
].num_pend_reqs
;
599 list_del(&req
->group_list
);
600 list_del(&req
->func_list
);
605 static int handle_join_req(struct mcast_group
*group
, u8 join_mask
,
606 struct mcast_req
*req
)
608 u8 group_join_state
= group
->rec
.scope_join_state
& 7;
611 struct ib_sa_mcmember_data
*sa_data
= (struct ib_sa_mcmember_data
*)req
->sa_mad
.data
;
613 if (join_mask
== (group_join_state
& join_mask
)) {
614 /* port's membership need not change */
615 status
= cmp_rec(&group
->rec
, sa_data
, req
->sa_mad
.sa_hdr
.comp_mask
);
617 join_group(group
, req
->func
, join_mask
);
619 --group
->func
[req
->func
].num_pend_reqs
;
620 send_reply_to_slave(req
->func
, group
, &req
->sa_mad
, status
);
621 list_del(&req
->group_list
);
622 list_del(&req
->func_list
);
626 /* port's membership needs to be updated */
627 group
->prev_state
= group
->state
;
628 if (send_join_to_wire(group
, &req
->sa_mad
)) {
629 --group
->func
[req
->func
].num_pend_reqs
;
630 list_del(&req
->group_list
);
631 list_del(&req
->func_list
);
634 group
->state
= group
->prev_state
;
636 group
->state
= MCAST_JOIN_SENT
;
642 static void mlx4_ib_mcg_work_handler(struct work_struct
*work
)
644 struct mcast_group
*group
;
645 struct mcast_req
*req
= NULL
;
646 struct ib_sa_mcmember_data
*sa_data
;
648 int rc
= 1; /* release_count - this is for the scheduled work */
652 group
= container_of(work
, typeof(*group
), work
);
654 mutex_lock(&group
->lock
);
656 /* First, let's see if a response from SM is waiting regarding this group.
657 * If so, we need to update the group's REC. If this is a bad response, we
658 * may need to send a bad response to a VF waiting for it. If VF is waiting
659 * and this is a good response, the VF will be answered later in this func. */
660 if (group
->state
== MCAST_RESP_READY
) {
661 /* cancels mlx4_ib_mcg_timeout_handler */
662 cancel_delayed_work(&group
->timeout_work
);
663 status
= be16_to_cpu(group
->response_sa_mad
.mad_hdr
.status
);
664 method
= group
->response_sa_mad
.mad_hdr
.method
;
665 if (group
->last_req_tid
!= group
->response_sa_mad
.mad_hdr
.tid
) {
666 mcg_warn_group(group
, "Got MAD response to existing MGID but wrong TID, dropping. Resp TID=%llx, group TID=%llx\n",
667 be64_to_cpu(group
->response_sa_mad
.mad_hdr
.tid
),
668 be64_to_cpu(group
->last_req_tid
));
669 group
->state
= group
->prev_state
;
670 goto process_requests
;
673 if (!list_empty(&group
->pending_list
))
674 req
= list_first_entry(&group
->pending_list
,
675 struct mcast_req
, group_list
);
676 if ((method
== IB_MGMT_METHOD_GET_RESP
)) {
678 send_reply_to_slave(req
->func
, group
, &req
->sa_mad
, status
);
679 --group
->func
[req
->func
].num_pend_reqs
;
680 list_del(&req
->group_list
);
681 list_del(&req
->func_list
);
685 mcg_warn_group(group
, "no request for failed join\n");
686 } else if (method
== IB_SA_METHOD_DELETE_RESP
&& group
->demux
->flushing
)
692 resp_join_state
= ((struct ib_sa_mcmember_data
*)
693 group
->response_sa_mad
.data
)->scope_join_state
& 7;
694 cur_join_state
= group
->rec
.scope_join_state
& 7;
696 if (method
== IB_MGMT_METHOD_GET_RESP
) {
697 /* successfull join */
698 if (!cur_join_state
&& resp_join_state
)
700 } else if (!resp_join_state
)
702 memcpy(&group
->rec
, group
->response_sa_mad
.data
, sizeof group
->rec
);
704 group
->state
= MCAST_IDLE
;
708 /* We should now go over pending join/leave requests, as long as we are idle. */
709 while (!list_empty(&group
->pending_list
) && group
->state
== MCAST_IDLE
) {
710 req
= list_first_entry(&group
->pending_list
, struct mcast_req
,
712 sa_data
= (struct ib_sa_mcmember_data
*)req
->sa_mad
.data
;
713 req_join_state
= sa_data
->scope_join_state
& 0x7;
715 /* For a leave request, we will immediately answer the VF, and
716 * update our internal counters. The actual leave will be sent
717 * to SM later, if at all needed. We dequeue the request now. */
718 if (req
->sa_mad
.mad_hdr
.method
== IB_SA_METHOD_DELETE
)
719 rc
+= handle_leave_req(group
, req_join_state
, req
);
721 rc
+= handle_join_req(group
, req_join_state
, req
);
725 if (group
->state
== MCAST_IDLE
) {
726 req_join_state
= get_leave_state(group
);
727 if (req_join_state
) {
728 group
->rec
.scope_join_state
&= ~req_join_state
;
729 group
->prev_state
= group
->state
;
730 if (send_leave_to_wire(group
, req_join_state
)) {
731 group
->state
= group
->prev_state
;
734 group
->state
= MCAST_LEAVE_SENT
;
738 if (!list_empty(&group
->pending_list
) && group
->state
== MCAST_IDLE
)
739 goto process_requests
;
740 mutex_unlock(&group
->lock
);
743 release_group(group
, 0);
746 static struct mcast_group
*search_relocate_mgid0_group(struct mlx4_ib_demux_ctx
*ctx
,
748 union ib_gid
*new_mgid
)
750 struct mcast_group
*group
= NULL
, *cur_group
, *n
;
751 struct mcast_req
*req
;
753 mutex_lock(&ctx
->mcg_table_lock
);
754 list_for_each_entry_safe(group
, n
, &ctx
->mcg_mgid0_list
, mgid0_list
) {
755 mutex_lock(&group
->lock
);
756 if (group
->last_req_tid
== tid
) {
757 if (memcmp(new_mgid
, &mgid0
, sizeof mgid0
)) {
758 group
->rec
.mgid
= *new_mgid
;
759 sprintf(group
->name
, "%016llx%016llx",
760 be64_to_cpu(group
->rec
.mgid
.global
.subnet_prefix
),
761 be64_to_cpu(group
->rec
.mgid
.global
.interface_id
));
762 list_del_init(&group
->mgid0_list
);
763 cur_group
= mcast_insert(ctx
, group
);
765 /* A race between our code and SM. Silently cleaning the new one */
766 req
= list_first_entry(&group
->pending_list
,
767 struct mcast_req
, group_list
);
768 --group
->func
[req
->func
].num_pend_reqs
;
769 list_del(&req
->group_list
);
770 list_del(&req
->func_list
);
772 mutex_unlock(&group
->lock
);
773 mutex_unlock(&ctx
->mcg_table_lock
);
774 release_group(group
, 0);
778 atomic_inc(&group
->refcount
);
779 add_sysfs_port_mcg_attr(ctx
->dev
, ctx
->port
, &group
->dentry
.attr
);
780 mutex_unlock(&group
->lock
);
781 mutex_unlock(&ctx
->mcg_table_lock
);
784 struct mcast_req
*tmp1
, *tmp2
;
786 list_del(&group
->mgid0_list
);
787 if (!list_empty(&group
->pending_list
) && group
->state
!= MCAST_IDLE
)
788 cancel_delayed_work_sync(&group
->timeout_work
);
790 list_for_each_entry_safe(tmp1
, tmp2
, &group
->pending_list
, group_list
) {
791 list_del(&tmp1
->group_list
);
794 mutex_unlock(&group
->lock
);
795 mutex_unlock(&ctx
->mcg_table_lock
);
800 mutex_unlock(&group
->lock
);
802 mutex_unlock(&ctx
->mcg_table_lock
);
807 static ssize_t
sysfs_show_group(struct device
*dev
,
808 struct device_attribute
*attr
, char *buf
);
810 static struct mcast_group
*acquire_group(struct mlx4_ib_demux_ctx
*ctx
,
811 union ib_gid
*mgid
, int create
,
814 struct mcast_group
*group
, *cur_group
;
818 is_mgid0
= !memcmp(&mgid0
, mgid
, sizeof mgid0
);
820 group
= mcast_find(ctx
, mgid
);
826 return ERR_PTR(-ENOENT
);
828 group
= kzalloc(sizeof *group
, gfp_mask
);
830 return ERR_PTR(-ENOMEM
);
833 group
->rec
.mgid
= *mgid
;
834 INIT_LIST_HEAD(&group
->pending_list
);
835 INIT_LIST_HEAD(&group
->mgid0_list
);
836 for (i
= 0; i
< MAX_VFS
; ++i
)
837 INIT_LIST_HEAD(&group
->func
[i
].pending
);
838 INIT_WORK(&group
->work
, mlx4_ib_mcg_work_handler
);
839 INIT_DELAYED_WORK(&group
->timeout_work
, mlx4_ib_mcg_timeout_handler
);
840 mutex_init(&group
->lock
);
841 sprintf(group
->name
, "%016llx%016llx",
842 be64_to_cpu(group
->rec
.mgid
.global
.subnet_prefix
),
843 be64_to_cpu(group
->rec
.mgid
.global
.interface_id
));
844 sysfs_attr_init(&group
->dentry
.attr
);
845 group
->dentry
.show
= sysfs_show_group
;
846 group
->dentry
.store
= NULL
;
847 group
->dentry
.attr
.name
= group
->name
;
848 group
->dentry
.attr
.mode
= 0400;
849 group
->state
= MCAST_IDLE
;
852 list_add(&group
->mgid0_list
, &ctx
->mcg_mgid0_list
);
856 cur_group
= mcast_insert(ctx
, group
);
858 mcg_warn("group just showed up %s - confused\n", cur_group
->name
);
860 return ERR_PTR(-EINVAL
);
863 add_sysfs_port_mcg_attr(ctx
->dev
, ctx
->port
, &group
->dentry
.attr
);
866 atomic_inc(&group
->refcount
);
870 static void queue_req(struct mcast_req
*req
)
872 struct mcast_group
*group
= req
->group
;
874 atomic_inc(&group
->refcount
); /* for the request */
875 atomic_inc(&group
->refcount
); /* for scheduling the work */
876 list_add_tail(&req
->group_list
, &group
->pending_list
);
877 list_add_tail(&req
->func_list
, &group
->func
[req
->func
].pending
);
878 /* calls mlx4_ib_mcg_work_handler */
879 if (!queue_work(group
->demux
->mcg_wq
, &group
->work
))
880 safe_atomic_dec(&group
->refcount
);
883 int mlx4_ib_mcg_demux_handler(struct ib_device
*ibdev
, int port
, int slave
,
884 struct ib_sa_mad
*mad
)
886 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
887 struct ib_sa_mcmember_data
*rec
= (struct ib_sa_mcmember_data
*)mad
->data
;
888 struct mlx4_ib_demux_ctx
*ctx
= &dev
->sriov
.demux
[port
- 1];
889 struct mcast_group
*group
;
891 switch (mad
->mad_hdr
.method
) {
892 case IB_MGMT_METHOD_GET_RESP
:
893 case IB_SA_METHOD_DELETE_RESP
:
894 mutex_lock(&ctx
->mcg_table_lock
);
895 group
= acquire_group(ctx
, &rec
->mgid
, 0, GFP_KERNEL
);
896 mutex_unlock(&ctx
->mcg_table_lock
);
898 if (mad
->mad_hdr
.method
== IB_MGMT_METHOD_GET_RESP
) {
899 __be64 tid
= mad
->mad_hdr
.tid
;
900 *(u8
*)(&tid
) = (u8
)slave
; /* in group we kept the modified TID */
901 group
= search_relocate_mgid0_group(ctx
, tid
, &rec
->mgid
);
909 mutex_lock(&group
->lock
);
910 group
->response_sa_mad
= *mad
;
911 group
->prev_state
= group
->state
;
912 group
->state
= MCAST_RESP_READY
;
913 /* calls mlx4_ib_mcg_work_handler */
914 atomic_inc(&group
->refcount
);
915 if (!queue_work(ctx
->mcg_wq
, &group
->work
))
916 safe_atomic_dec(&group
->refcount
);
917 mutex_unlock(&group
->lock
);
918 release_group(group
, 0);
919 return 1; /* consumed */
920 case IB_MGMT_METHOD_SET
:
921 case IB_SA_METHOD_GET_TABLE
:
922 case IB_SA_METHOD_GET_TABLE_RESP
:
923 case IB_SA_METHOD_DELETE
:
924 return 0; /* not consumed, pass-through to guest over tunnel */
926 mcg_warn("In demux, port %d: unexpected MCMember method: 0x%x, dropping\n",
927 port
, mad
->mad_hdr
.method
);
928 return 1; /* consumed */
932 int mlx4_ib_mcg_multiplex_handler(struct ib_device
*ibdev
, int port
,
933 int slave
, struct ib_sa_mad
*sa_mad
)
935 struct mlx4_ib_dev
*dev
= to_mdev(ibdev
);
936 struct ib_sa_mcmember_data
*rec
= (struct ib_sa_mcmember_data
*)sa_mad
->data
;
937 struct mlx4_ib_demux_ctx
*ctx
= &dev
->sriov
.demux
[port
- 1];
938 struct mcast_group
*group
;
939 struct mcast_req
*req
;
945 switch (sa_mad
->mad_hdr
.method
) {
946 case IB_MGMT_METHOD_SET
:
948 case IB_SA_METHOD_DELETE
:
949 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
954 req
->sa_mad
= *sa_mad
;
956 mutex_lock(&ctx
->mcg_table_lock
);
957 group
= acquire_group(ctx
, &rec
->mgid
, may_create
, GFP_KERNEL
);
958 mutex_unlock(&ctx
->mcg_table_lock
);
961 return PTR_ERR(group
);
963 mutex_lock(&group
->lock
);
964 if (group
->func
[slave
].num_pend_reqs
> MAX_PEND_REQS_PER_FUNC
) {
965 mutex_unlock(&group
->lock
);
966 mcg_debug_group(group
, "Port %d, Func %d has too many pending requests (%d), dropping\n",
967 port
, slave
, MAX_PEND_REQS_PER_FUNC
);
968 release_group(group
, 0);
972 ++group
->func
[slave
].num_pend_reqs
;
975 mutex_unlock(&group
->lock
);
976 release_group(group
, 0);
977 return 1; /* consumed */
978 case IB_SA_METHOD_GET_TABLE
:
979 case IB_MGMT_METHOD_GET_RESP
:
980 case IB_SA_METHOD_GET_TABLE_RESP
:
981 case IB_SA_METHOD_DELETE_RESP
:
982 return 0; /* not consumed, pass-through */
984 mcg_warn("In multiplex, port %d, func %d: unexpected MCMember method: 0x%x, dropping\n",
985 port
, slave
, sa_mad
->mad_hdr
.method
);
986 return 1; /* consumed */
990 static ssize_t
sysfs_show_group(struct device
*dev
,
991 struct device_attribute
*attr
, char *buf
)
993 struct mcast_group
*group
=
994 container_of(attr
, struct mcast_group
, dentry
);
995 struct mcast_req
*req
= NULL
;
996 char pending_str
[40];
1001 if (group
->state
== MCAST_IDLE
)
1002 sprintf(state_str
, "%s", get_state_string(group
->state
));
1004 sprintf(state_str
, "%s(TID=0x%llx)",
1005 get_state_string(group
->state
),
1006 be64_to_cpu(group
->last_req_tid
));
1007 if (list_empty(&group
->pending_list
)) {
1008 sprintf(pending_str
, "No");
1010 req
= list_first_entry(&group
->pending_list
, struct mcast_req
, group_list
);
1011 sprintf(pending_str
, "Yes(TID=0x%llx)",
1012 be64_to_cpu(req
->sa_mad
.mad_hdr
.tid
));
1014 len
+= sprintf(buf
+ len
, "%1d [%02d,%02d,%02d] %4d %4s %5s ",
1015 group
->rec
.scope_join_state
& 0xf,
1016 group
->members
[2], group
->members
[1], group
->members
[0],
1017 atomic_read(&group
->refcount
),
1020 for (f
= 0; f
< MAX_VFS
; ++f
)
1021 if (group
->func
[f
].state
== MCAST_MEMBER
)
1022 len
+= sprintf(buf
+ len
, "%d[%1x] ",
1023 f
, group
->func
[f
].join_state
);
1025 len
+= sprintf(buf
+ len
, "\t\t(%4hx %4x %2x %2x %2x %2x %2x "
1026 "%4x %4x %2x %2x)\n",
1027 be16_to_cpu(group
->rec
.pkey
),
1028 be32_to_cpu(group
->rec
.qkey
),
1029 (group
->rec
.mtusel_mtu
& 0xc0) >> 6,
1030 group
->rec
.mtusel_mtu
& 0x3f,
1032 (group
->rec
.ratesel_rate
& 0xc0) >> 6,
1033 group
->rec
.ratesel_rate
& 0x3f,
1034 (be32_to_cpu(group
->rec
.sl_flowlabel_hoplimit
) & 0xf0000000) >> 28,
1035 (be32_to_cpu(group
->rec
.sl_flowlabel_hoplimit
) & 0x0fffff00) >> 8,
1036 be32_to_cpu(group
->rec
.sl_flowlabel_hoplimit
) & 0x000000ff,
1037 group
->rec
.proxy_join
);
1042 int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx
*ctx
)
1046 atomic_set(&ctx
->tid
, 0);
1047 sprintf(name
, "mlx4_ib_mcg%d", ctx
->port
);
1048 ctx
->mcg_wq
= create_singlethread_workqueue(name
);
1052 mutex_init(&ctx
->mcg_table_lock
);
1053 ctx
->mcg_table
= RB_ROOT
;
1054 INIT_LIST_HEAD(&ctx
->mcg_mgid0_list
);
1060 static void force_clean_group(struct mcast_group
*group
)
1062 struct mcast_req
*req
, *tmp
1064 list_for_each_entry_safe(req
, tmp
, &group
->pending_list
, group_list
) {
1065 list_del(&req
->group_list
);
1068 del_sysfs_port_mcg_attr(group
->demux
->dev
, group
->demux
->port
, &group
->dentry
.attr
);
1069 rb_erase(&group
->node
, &group
->demux
->mcg_table
);
1073 static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx
*ctx
, int destroy_wq
)
1077 struct mcast_group
*group
;
1081 for (i
= 0; i
< MAX_VFS
; ++i
)
1082 clean_vf_mcast(ctx
, i
);
1084 end
= jiffies
+ msecs_to_jiffies(MAD_TIMEOUT_MS
+ 3000);
1087 mutex_lock(&ctx
->mcg_table_lock
);
1088 for (p
= rb_first(&ctx
->mcg_table
); p
; p
= rb_next(p
))
1090 mutex_unlock(&ctx
->mcg_table_lock
);
1095 } while (time_after(end
, jiffies
));
1097 flush_workqueue(ctx
->mcg_wq
);
1099 destroy_workqueue(ctx
->mcg_wq
);
1101 mutex_lock(&ctx
->mcg_table_lock
);
1102 while ((p
= rb_first(&ctx
->mcg_table
)) != NULL
) {
1103 group
= rb_entry(p
, struct mcast_group
, node
);
1104 if (atomic_read(&group
->refcount
))
1105 mcg_warn_group(group
, "group refcount %d!!! (pointer %p)\n", atomic_read(&group
->refcount
), group
);
1107 force_clean_group(group
);
1109 mutex_unlock(&ctx
->mcg_table_lock
);
1113 struct work_struct work
;
1114 struct mlx4_ib_demux_ctx
*ctx
;
1118 static void mcg_clean_task(struct work_struct
*work
)
1120 struct clean_work
*cw
= container_of(work
, struct clean_work
, work
);
1122 _mlx4_ib_mcg_port_cleanup(cw
->ctx
, cw
->destroy_wq
);
1123 cw
->ctx
->flushing
= 0;
1127 void mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx
*ctx
, int destroy_wq
)
1129 struct clean_work
*work
;
1137 _mlx4_ib_mcg_port_cleanup(ctx
, destroy_wq
);
1142 work
= kmalloc(sizeof *work
, GFP_KERNEL
);
1145 mcg_warn("failed allocating work for cleanup\n");
1150 work
->destroy_wq
= destroy_wq
;
1151 INIT_WORK(&work
->work
, mcg_clean_task
);
1152 queue_work(clean_wq
, &work
->work
);
1155 static void build_leave_mad(struct mcast_req
*req
)
1157 struct ib_sa_mad
*mad
= &req
->sa_mad
;
1159 mad
->mad_hdr
.method
= IB_SA_METHOD_DELETE
;
1163 static void clear_pending_reqs(struct mcast_group
*group
, int vf
)
1165 struct mcast_req
*req
, *tmp
, *group_first
= NULL
;
1169 if (!list_empty(&group
->pending_list
))
1170 group_first
= list_first_entry(&group
->pending_list
, struct mcast_req
, group_list
);
1172 list_for_each_entry_safe(req
, tmp
, &group
->func
[vf
].pending
, func_list
) {
1174 if (group_first
== req
&&
1175 (group
->state
== MCAST_JOIN_SENT
||
1176 group
->state
== MCAST_LEAVE_SENT
)) {
1177 clear
= cancel_delayed_work(&group
->timeout_work
);
1179 group
->state
= MCAST_IDLE
;
1182 --group
->func
[vf
].num_pend_reqs
;
1183 list_del(&req
->group_list
);
1184 list_del(&req
->func_list
);
1186 atomic_dec(&group
->refcount
);
1190 if (!pend
&& (!list_empty(&group
->func
[vf
].pending
) || group
->func
[vf
].num_pend_reqs
)) {
1191 mcg_warn_group(group
, "DRIVER BUG: list_empty %d, num_pend_reqs %d\n",
1192 list_empty(&group
->func
[vf
].pending
), group
->func
[vf
].num_pend_reqs
);
1196 static int push_deleteing_req(struct mcast_group
*group
, int slave
)
1198 struct mcast_req
*req
;
1199 struct mcast_req
*pend_req
;
1201 if (!group
->func
[slave
].join_state
)
1204 req
= kzalloc(sizeof *req
, GFP_KERNEL
);
1206 mcg_warn_group(group
, "failed allocation - may leave stall groups\n");
1210 if (!list_empty(&group
->func
[slave
].pending
)) {
1211 pend_req
= list_entry(group
->func
[slave
].pending
.prev
, struct mcast_req
, group_list
);
1212 if (pend_req
->clean
) {
1221 ++group
->func
[slave
].num_pend_reqs
;
1222 build_leave_mad(req
);
1227 void clean_vf_mcast(struct mlx4_ib_demux_ctx
*ctx
, int slave
)
1229 struct mcast_group
*group
;
1232 mutex_lock(&ctx
->mcg_table_lock
);
1233 for (p
= rb_first(&ctx
->mcg_table
); p
; p
= rb_next(p
)) {
1234 group
= rb_entry(p
, struct mcast_group
, node
);
1235 mutex_lock(&group
->lock
);
1236 if (atomic_read(&group
->refcount
)) {
1237 /* clear pending requests of this VF */
1238 clear_pending_reqs(group
, slave
);
1239 push_deleteing_req(group
, slave
);
1241 mutex_unlock(&group
->lock
);
1243 mutex_unlock(&ctx
->mcg_table_lock
);
1247 int mlx4_ib_mcg_init(void)
1249 clean_wq
= create_singlethread_workqueue("mlx4_ib_mcg");
1256 void mlx4_ib_mcg_destroy(void)
1258 destroy_workqueue(clean_wq
);