2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/slab.h>
49 #include <linux/sched.h>
50 #include <linux/rculist.h>
51 #include <rdma/rdma_vt.h>
52 #include <rdma/rdmavt_qp.h>
57 * rvt_driver_mcast_init - init resources for multicast
58 * @rdi: rvt dev struct
60 * This is per device that registers with rdmavt
62 void rvt_driver_mcast_init(struct rvt_dev_info
*rdi
)
65 * Anything that needs setup for multicast on a per driver or per rdi
66 * basis should be done in here.
68 spin_lock_init(&rdi
->n_mcast_grps_lock
);
72 * rvt_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
75 static struct rvt_mcast_qp
*rvt_mcast_qp_alloc(struct rvt_qp
*qp
)
77 struct rvt_mcast_qp
*mqp
;
79 mqp
= kmalloc(sizeof(*mqp
), GFP_KERNEL
);
90 static void rvt_mcast_qp_free(struct rvt_mcast_qp
*mqp
)
92 struct rvt_qp
*qp
= mqp
->qp
;
94 /* Notify hfi1_destroy_qp() if it is waiting. */
101 * rvt_mcast_alloc - allocate the multicast GID structure
102 * @mgid: the multicast GID
103 * @lid: the muilticast LID (host order)
105 * A list of QPs will be attached to this structure.
107 static struct rvt_mcast
*rvt_mcast_alloc(union ib_gid
*mgid
, u16 lid
)
109 struct rvt_mcast
*mcast
;
111 mcast
= kzalloc(sizeof(*mcast
), GFP_KERNEL
);
115 mcast
->mcast_addr
.mgid
= *mgid
;
116 mcast
->mcast_addr
.lid
= lid
;
118 INIT_LIST_HEAD(&mcast
->qp_list
);
119 init_waitqueue_head(&mcast
->wait
);
120 atomic_set(&mcast
->refcount
, 0);
126 static void rvt_mcast_free(struct rvt_mcast
*mcast
)
128 struct rvt_mcast_qp
*p
, *tmp
;
130 list_for_each_entry_safe(p
, tmp
, &mcast
->qp_list
, list
)
131 rvt_mcast_qp_free(p
);
137 * rvt_mcast_find - search the global table for the given multicast GID/LID
138 * NOTE: It is valid to have 1 MLID with multiple MGIDs. It is not valid
139 * to have 1 MGID with multiple MLIDs.
140 * @ibp: the IB port structure
141 * @mgid: the multicast GID to search for
142 * @lid: the multicast LID portion of the multicast address (host order)
144 * The caller is responsible for decrementing the reference count if found.
146 * Return: NULL if not found.
148 struct rvt_mcast
*rvt_mcast_find(struct rvt_ibport
*ibp
, union ib_gid
*mgid
,
153 struct rvt_mcast
*found
= NULL
;
155 spin_lock_irqsave(&ibp
->lock
, flags
);
156 n
= ibp
->mcast_tree
.rb_node
;
159 struct rvt_mcast
*mcast
;
161 mcast
= rb_entry(n
, struct rvt_mcast
, rb_node
);
163 ret
= memcmp(mgid
->raw
, mcast
->mcast_addr
.mgid
.raw
,
167 } else if (ret
> 0) {
170 /* MGID/MLID must match */
171 if (mcast
->mcast_addr
.lid
== lid
) {
172 atomic_inc(&mcast
->refcount
);
178 spin_unlock_irqrestore(&ibp
->lock
, flags
);
181 EXPORT_SYMBOL(rvt_mcast_find
);
184 * rvt_mcast_add - insert mcast GID into table and attach QP struct
185 * @mcast: the mcast GID table
186 * @mqp: the QP to attach
188 * Return: zero if both were added. Return EEXIST if the GID was already in
189 * the table but the QP was added. Return ESRCH if the QP was already
190 * attached and neither structure was added. Return EINVAL if the MGID was
191 * found, but the MLID did NOT match.
193 static int rvt_mcast_add(struct rvt_dev_info
*rdi
, struct rvt_ibport
*ibp
,
194 struct rvt_mcast
*mcast
, struct rvt_mcast_qp
*mqp
)
196 struct rb_node
**n
= &ibp
->mcast_tree
.rb_node
;
197 struct rb_node
*pn
= NULL
;
200 spin_lock_irq(&ibp
->lock
);
203 struct rvt_mcast
*tmcast
;
204 struct rvt_mcast_qp
*p
;
207 tmcast
= rb_entry(pn
, struct rvt_mcast
, rb_node
);
209 ret
= memcmp(mcast
->mcast_addr
.mgid
.raw
,
210 tmcast
->mcast_addr
.mgid
.raw
,
211 sizeof(mcast
->mcast_addr
.mgid
));
221 if (tmcast
->mcast_addr
.lid
!= mcast
->mcast_addr
.lid
) {
226 /* Search the QP list to see if this is already there. */
227 list_for_each_entry_rcu(p
, &tmcast
->qp_list
, list
) {
228 if (p
->qp
== mqp
->qp
) {
233 if (tmcast
->n_attached
==
234 rdi
->dparms
.props
.max_mcast_qp_attach
) {
239 tmcast
->n_attached
++;
241 list_add_tail_rcu(&mqp
->list
, &tmcast
->qp_list
);
246 spin_lock(&rdi
->n_mcast_grps_lock
);
247 if (rdi
->n_mcast_grps_allocated
== rdi
->dparms
.props
.max_mcast_grp
) {
248 spin_unlock(&rdi
->n_mcast_grps_lock
);
253 rdi
->n_mcast_grps_allocated
++;
254 spin_unlock(&rdi
->n_mcast_grps_lock
);
258 list_add_tail_rcu(&mqp
->list
, &mcast
->qp_list
);
260 atomic_inc(&mcast
->refcount
);
261 rb_link_node(&mcast
->rb_node
, pn
, n
);
262 rb_insert_color(&mcast
->rb_node
, &ibp
->mcast_tree
);
267 spin_unlock_irq(&ibp
->lock
);
273 * rvt_attach_mcast - attach a qp to a multicast group
274 * @ibqp: Infiniband qp
275 * @gid: multicast guid
276 * @lid: multicast lid
278 * Return: 0 on success
280 int rvt_attach_mcast(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
282 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
283 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
284 struct rvt_ibport
*ibp
= rdi
->ports
[qp
->port_num
- 1];
285 struct rvt_mcast
*mcast
;
286 struct rvt_mcast_qp
*mqp
;
289 if (ibqp
->qp_num
<= 1 || qp
->state
== IB_QPS_RESET
)
293 * Allocate data structures since its better to do this outside of
294 * spin locks and it will most likely be needed.
296 mcast
= rvt_mcast_alloc(gid
, lid
);
300 mqp
= rvt_mcast_qp_alloc(qp
);
304 switch (rvt_mcast_add(rdi
, ibp
, mcast
, mqp
)) {
306 /* Neither was used: OK to attach the same QP twice. */
309 case EEXIST
: /* The mcast wasn't used */
313 /* Exceeded the maximum number of mcast groups. */
317 /* Invalid MGID/MLID pair */
327 rvt_mcast_qp_free(mqp
);
330 rvt_mcast_free(mcast
);
336 * rvt_detach_mcast - remove a qp from a multicast group
337 * @ibqp: Infiniband qp
338 * @gid: multicast guid
339 * @lid: multicast lid
341 * Return: 0 on success
343 int rvt_detach_mcast(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
345 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
346 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
347 struct rvt_ibport
*ibp
= rdi
->ports
[qp
->port_num
- 1];
348 struct rvt_mcast
*mcast
= NULL
;
349 struct rvt_mcast_qp
*p
, *tmp
, *delp
= NULL
;
354 if (ibqp
->qp_num
<= 1)
357 spin_lock_irq(&ibp
->lock
);
359 /* Find the GID in the mcast table. */
360 n
= ibp
->mcast_tree
.rb_node
;
363 spin_unlock_irq(&ibp
->lock
);
367 mcast
= rb_entry(n
, struct rvt_mcast
, rb_node
);
368 ret
= memcmp(gid
->raw
, mcast
->mcast_addr
.mgid
.raw
,
372 } else if (ret
> 0) {
375 /* MGID/MLID must match */
376 if (mcast
->mcast_addr
.lid
!= lid
) {
377 spin_unlock_irq(&ibp
->lock
);
384 /* Search the QP list. */
385 list_for_each_entry_safe(p
, tmp
, &mcast
->qp_list
, list
) {
389 * We found it, so remove it, but don't poison the forward
390 * link until we are sure there are no list walkers.
392 list_del_rcu(&p
->list
);
396 /* If this was the last attached QP, remove the GID too. */
397 if (list_empty(&mcast
->qp_list
)) {
398 rb_erase(&mcast
->rb_node
, &ibp
->mcast_tree
);
404 spin_unlock_irq(&ibp
->lock
);
405 /* QP not attached */
410 * Wait for any list walkers to finish before freeing the
413 wait_event(mcast
->wait
, atomic_read(&mcast
->refcount
) <= 1);
414 rvt_mcast_qp_free(delp
);
417 atomic_dec(&mcast
->refcount
);
418 wait_event(mcast
->wait
, !atomic_read(&mcast
->refcount
));
419 rvt_mcast_free(mcast
);
420 spin_lock_irq(&rdi
->n_mcast_grps_lock
);
421 rdi
->n_mcast_grps_allocated
--;
422 spin_unlock_irq(&rdi
->n_mcast_grps_lock
);
429 * rvt_mcast_tree_empty - determine if any qps are attached to any mcast group
430 * @rdi: rvt dev struct
432 * Return: in use count
434 int rvt_mcast_tree_empty(struct rvt_dev_info
*rdi
)
439 for (i
= 0; i
< rdi
->dparms
.nports
; i
++)
440 if (rdi
->ports
[i
]->mcast_tree
.rb_node
)