2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/slab.h>
49 #include <linux/sched.h>
50 #include <linux/rculist.h>
51 #include <rdma/rdma_vt.h>
52 #include <rdma/rdmavt_qp.h>
57 * rvt_driver_mcast - init resources for multicast
58 * @rdi: rvt dev struct
60 * This is per device that registers with rdmavt
62 void rvt_driver_mcast_init(struct rvt_dev_info
*rdi
)
65 * Anything that needs setup for multicast on a per driver or per rdi
66 * basis should be done in here.
68 spin_lock_init(&rdi
->n_mcast_grps_lock
);
72 * mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
75 static struct rvt_mcast_qp
*rvt_mcast_qp_alloc(struct rvt_qp
*qp
)
77 struct rvt_mcast_qp
*mqp
;
79 mqp
= kmalloc(sizeof(*mqp
), GFP_KERNEL
);
84 atomic_inc(&qp
->refcount
);
90 static void rvt_mcast_qp_free(struct rvt_mcast_qp
*mqp
)
92 struct rvt_qp
*qp
= mqp
->qp
;
94 /* Notify hfi1_destroy_qp() if it is waiting. */
95 if (atomic_dec_and_test(&qp
->refcount
))
102 * mcast_alloc - allocate the multicast GID structure
103 * @mgid: the multicast GID
105 * A list of QPs will be attached to this structure.
107 static struct rvt_mcast
*rvt_mcast_alloc(union ib_gid
*mgid
)
109 struct rvt_mcast
*mcast
;
111 mcast
= kzalloc(sizeof(*mcast
), GFP_KERNEL
);
116 INIT_LIST_HEAD(&mcast
->qp_list
);
117 init_waitqueue_head(&mcast
->wait
);
118 atomic_set(&mcast
->refcount
, 0);
124 static void rvt_mcast_free(struct rvt_mcast
*mcast
)
126 struct rvt_mcast_qp
*p
, *tmp
;
128 list_for_each_entry_safe(p
, tmp
, &mcast
->qp_list
, list
)
129 rvt_mcast_qp_free(p
);
135 * rvt_mcast_find - search the global table for the given multicast GID
136 * @ibp: the IB port structure
137 * @mgid: the multicast GID to search for
139 * The caller is responsible for decrementing the reference count if found.
141 * Return: NULL if not found.
143 struct rvt_mcast
*rvt_mcast_find(struct rvt_ibport
*ibp
, union ib_gid
*mgid
)
147 struct rvt_mcast
*found
= NULL
;
149 spin_lock_irqsave(&ibp
->lock
, flags
);
150 n
= ibp
->mcast_tree
.rb_node
;
153 struct rvt_mcast
*mcast
;
155 mcast
= rb_entry(n
, struct rvt_mcast
, rb_node
);
157 ret
= memcmp(mgid
->raw
, mcast
->mgid
.raw
,
158 sizeof(union ib_gid
));
161 } else if (ret
> 0) {
164 atomic_inc(&mcast
->refcount
);
169 spin_unlock_irqrestore(&ibp
->lock
, flags
);
172 EXPORT_SYMBOL(rvt_mcast_find
);
175 * mcast_add - insert mcast GID into table and attach QP struct
176 * @mcast: the mcast GID table
177 * @mqp: the QP to attach
179 * Return: zero if both were added. Return EEXIST if the GID was already in
180 * the table but the QP was added. Return ESRCH if the QP was already
181 * attached and neither structure was added.
183 static int rvt_mcast_add(struct rvt_dev_info
*rdi
, struct rvt_ibport
*ibp
,
184 struct rvt_mcast
*mcast
, struct rvt_mcast_qp
*mqp
)
186 struct rb_node
**n
= &ibp
->mcast_tree
.rb_node
;
187 struct rb_node
*pn
= NULL
;
190 spin_lock_irq(&ibp
->lock
);
193 struct rvt_mcast
*tmcast
;
194 struct rvt_mcast_qp
*p
;
197 tmcast
= rb_entry(pn
, struct rvt_mcast
, rb_node
);
199 ret
= memcmp(mcast
->mgid
.raw
, tmcast
->mgid
.raw
,
200 sizeof(union ib_gid
));
210 /* Search the QP list to see if this is already there. */
211 list_for_each_entry_rcu(p
, &tmcast
->qp_list
, list
) {
212 if (p
->qp
== mqp
->qp
) {
217 if (tmcast
->n_attached
==
218 rdi
->dparms
.props
.max_mcast_qp_attach
) {
223 tmcast
->n_attached
++;
225 list_add_tail_rcu(&mqp
->list
, &tmcast
->qp_list
);
230 spin_lock(&rdi
->n_mcast_grps_lock
);
231 if (rdi
->n_mcast_grps_allocated
== rdi
->dparms
.props
.max_mcast_grp
) {
232 spin_unlock(&rdi
->n_mcast_grps_lock
);
237 rdi
->n_mcast_grps_allocated
++;
238 spin_unlock(&rdi
->n_mcast_grps_lock
);
242 list_add_tail_rcu(&mqp
->list
, &mcast
->qp_list
);
244 atomic_inc(&mcast
->refcount
);
245 rb_link_node(&mcast
->rb_node
, pn
, n
);
246 rb_insert_color(&mcast
->rb_node
, &ibp
->mcast_tree
);
251 spin_unlock_irq(&ibp
->lock
);
257 * rvt_attach_mcast - attach a qp to a multicast group
258 * @ibqp: Infiniband qp
259 * @igd: multicast guid
260 * @lid: multicast lid
262 * Return: 0 on success
264 int rvt_attach_mcast(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
266 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
267 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
268 struct rvt_ibport
*ibp
= rdi
->ports
[qp
->port_num
- 1];
269 struct rvt_mcast
*mcast
;
270 struct rvt_mcast_qp
*mqp
;
273 if (ibqp
->qp_num
<= 1 || qp
->state
== IB_QPS_RESET
)
277 * Allocate data structures since its better to do this outside of
278 * spin locks and it will most likely be needed.
280 mcast
= rvt_mcast_alloc(gid
);
284 mqp
= rvt_mcast_qp_alloc(qp
);
288 switch (rvt_mcast_add(rdi
, ibp
, mcast
, mqp
)) {
290 /* Neither was used: OK to attach the same QP twice. */
293 case EEXIST
: /* The mcast wasn't used */
297 /* Exceeded the maximum number of mcast groups. */
307 rvt_mcast_qp_free(mqp
);
310 rvt_mcast_free(mcast
);
316 * rvt_detach_mcast - remove a qp from a multicast group
317 * @ibqp: Infiniband qp
318 * @igd: multicast guid
319 * @lid: multicast lid
321 * Return: 0 on success
323 int rvt_detach_mcast(struct ib_qp
*ibqp
, union ib_gid
*gid
, u16 lid
)
325 struct rvt_qp
*qp
= ibqp_to_rvtqp(ibqp
);
326 struct rvt_dev_info
*rdi
= ib_to_rvt(ibqp
->device
);
327 struct rvt_ibport
*ibp
= rdi
->ports
[qp
->port_num
- 1];
328 struct rvt_mcast
*mcast
= NULL
;
329 struct rvt_mcast_qp
*p
, *tmp
, *delp
= NULL
;
334 if (ibqp
->qp_num
<= 1 || qp
->state
== IB_QPS_RESET
)
337 spin_lock_irq(&ibp
->lock
);
339 /* Find the GID in the mcast table. */
340 n
= ibp
->mcast_tree
.rb_node
;
343 spin_unlock_irq(&ibp
->lock
);
347 mcast
= rb_entry(n
, struct rvt_mcast
, rb_node
);
348 ret
= memcmp(gid
->raw
, mcast
->mgid
.raw
,
349 sizeof(union ib_gid
));
358 /* Search the QP list. */
359 list_for_each_entry_safe(p
, tmp
, &mcast
->qp_list
, list
) {
363 * We found it, so remove it, but don't poison the forward
364 * link until we are sure there are no list walkers.
366 list_del_rcu(&p
->list
);
370 /* If this was the last attached QP, remove the GID too. */
371 if (list_empty(&mcast
->qp_list
)) {
372 rb_erase(&mcast
->rb_node
, &ibp
->mcast_tree
);
378 spin_unlock_irq(&ibp
->lock
);
379 /* QP not attached */
384 * Wait for any list walkers to finish before freeing the
387 wait_event(mcast
->wait
, atomic_read(&mcast
->refcount
) <= 1);
388 rvt_mcast_qp_free(delp
);
391 atomic_dec(&mcast
->refcount
);
392 wait_event(mcast
->wait
, !atomic_read(&mcast
->refcount
));
393 rvt_mcast_free(mcast
);
394 spin_lock_irq(&rdi
->n_mcast_grps_lock
);
395 rdi
->n_mcast_grps_allocated
--;
396 spin_unlock_irq(&rdi
->n_mcast_grps_lock
);
403 *rvt_mast_tree_empty - determine if any qps are attached to any mcast group
404 *@rdi: rvt dev struct
406 * Return: in use count
408 int rvt_mcast_tree_empty(struct rvt_dev_info
*rdi
)
413 for (i
= 0; i
< rdi
->dparms
.nports
; i
++)
414 if (rdi
->ports
[i
]->mcast_tree
.rb_node
)