x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / drivers / infiniband / sw / rdmavt / mcast.c
blob05c8c2afb0e33a70fca3014e3bdf3a77bfe5b1a6
1 /*
2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
7 * GPL LICENSE SUMMARY
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * BSD LICENSE
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/slab.h>
49 #include <linux/sched.h>
50 #include <linux/rculist.h>
51 #include <rdma/rdma_vt.h>
52 #include <rdma/rdmavt_qp.h>
54 #include "mcast.h"
56 /**
57 * rvt_driver_mcast - init resources for multicast
58 * @rdi: rvt dev struct
60 * This is per device that registers with rdmavt
62 void rvt_driver_mcast_init(struct rvt_dev_info *rdi)
65 * Anything that needs setup for multicast on a per driver or per rdi
66 * basis should be done in here.
68 spin_lock_init(&rdi->n_mcast_grps_lock);
71 /**
72 * mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
73 * @qp: the QP to link
75 static struct rvt_mcast_qp *rvt_mcast_qp_alloc(struct rvt_qp *qp)
77 struct rvt_mcast_qp *mqp;
79 mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
80 if (!mqp)
81 goto bail;
83 mqp->qp = qp;
84 rvt_get_qp(qp);
86 bail:
87 return mqp;
90 static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp)
92 struct rvt_qp *qp = mqp->qp;
94 /* Notify hfi1_destroy_qp() if it is waiting. */
95 rvt_put_qp(qp);
97 kfree(mqp);
101 * mcast_alloc - allocate the multicast GID structure
102 * @mgid: the multicast GID
104 * A list of QPs will be attached to this structure.
106 static struct rvt_mcast *rvt_mcast_alloc(union ib_gid *mgid)
108 struct rvt_mcast *mcast;
110 mcast = kzalloc(sizeof(*mcast), GFP_KERNEL);
111 if (!mcast)
112 goto bail;
114 mcast->mgid = *mgid;
115 INIT_LIST_HEAD(&mcast->qp_list);
116 init_waitqueue_head(&mcast->wait);
117 atomic_set(&mcast->refcount, 0);
119 bail:
120 return mcast;
123 static void rvt_mcast_free(struct rvt_mcast *mcast)
125 struct rvt_mcast_qp *p, *tmp;
127 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
128 rvt_mcast_qp_free(p);
130 kfree(mcast);
134 * rvt_mcast_find - search the global table for the given multicast GID
135 * @ibp: the IB port structure
136 * @mgid: the multicast GID to search for
138 * The caller is responsible for decrementing the reference count if found.
140 * Return: NULL if not found.
142 struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid)
144 struct rb_node *n;
145 unsigned long flags;
146 struct rvt_mcast *found = NULL;
148 spin_lock_irqsave(&ibp->lock, flags);
149 n = ibp->mcast_tree.rb_node;
150 while (n) {
151 int ret;
152 struct rvt_mcast *mcast;
154 mcast = rb_entry(n, struct rvt_mcast, rb_node);
156 ret = memcmp(mgid->raw, mcast->mgid.raw,
157 sizeof(union ib_gid));
158 if (ret < 0) {
159 n = n->rb_left;
160 } else if (ret > 0) {
161 n = n->rb_right;
162 } else {
163 atomic_inc(&mcast->refcount);
164 found = mcast;
165 break;
168 spin_unlock_irqrestore(&ibp->lock, flags);
169 return found;
171 EXPORT_SYMBOL(rvt_mcast_find);
174 * mcast_add - insert mcast GID into table and attach QP struct
175 * @mcast: the mcast GID table
176 * @mqp: the QP to attach
178 * Return: zero if both were added. Return EEXIST if the GID was already in
179 * the table but the QP was added. Return ESRCH if the QP was already
180 * attached and neither structure was added.
182 static int rvt_mcast_add(struct rvt_dev_info *rdi, struct rvt_ibport *ibp,
183 struct rvt_mcast *mcast, struct rvt_mcast_qp *mqp)
185 struct rb_node **n = &ibp->mcast_tree.rb_node;
186 struct rb_node *pn = NULL;
187 int ret;
189 spin_lock_irq(&ibp->lock);
191 while (*n) {
192 struct rvt_mcast *tmcast;
193 struct rvt_mcast_qp *p;
195 pn = *n;
196 tmcast = rb_entry(pn, struct rvt_mcast, rb_node);
198 ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
199 sizeof(union ib_gid));
200 if (ret < 0) {
201 n = &pn->rb_left;
202 continue;
204 if (ret > 0) {
205 n = &pn->rb_right;
206 continue;
209 /* Search the QP list to see if this is already there. */
210 list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
211 if (p->qp == mqp->qp) {
212 ret = ESRCH;
213 goto bail;
216 if (tmcast->n_attached ==
217 rdi->dparms.props.max_mcast_qp_attach) {
218 ret = ENOMEM;
219 goto bail;
222 tmcast->n_attached++;
224 list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
225 ret = EEXIST;
226 goto bail;
229 spin_lock(&rdi->n_mcast_grps_lock);
230 if (rdi->n_mcast_grps_allocated == rdi->dparms.props.max_mcast_grp) {
231 spin_unlock(&rdi->n_mcast_grps_lock);
232 ret = ENOMEM;
233 goto bail;
236 rdi->n_mcast_grps_allocated++;
237 spin_unlock(&rdi->n_mcast_grps_lock);
239 mcast->n_attached++;
241 list_add_tail_rcu(&mqp->list, &mcast->qp_list);
243 atomic_inc(&mcast->refcount);
244 rb_link_node(&mcast->rb_node, pn, n);
245 rb_insert_color(&mcast->rb_node, &ibp->mcast_tree);
247 ret = 0;
249 bail:
250 spin_unlock_irq(&ibp->lock);
252 return ret;
256 * rvt_attach_mcast - attach a qp to a multicast group
257 * @ibqp: Infiniband qp
258 * @igd: multicast guid
259 * @lid: multicast lid
261 * Return: 0 on success
263 int rvt_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
265 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
266 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
267 struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
268 struct rvt_mcast *mcast;
269 struct rvt_mcast_qp *mqp;
270 int ret = -ENOMEM;
272 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
273 return -EINVAL;
276 * Allocate data structures since its better to do this outside of
277 * spin locks and it will most likely be needed.
279 mcast = rvt_mcast_alloc(gid);
280 if (!mcast)
281 return -ENOMEM;
283 mqp = rvt_mcast_qp_alloc(qp);
284 if (!mqp)
285 goto bail_mcast;
287 switch (rvt_mcast_add(rdi, ibp, mcast, mqp)) {
288 case ESRCH:
289 /* Neither was used: OK to attach the same QP twice. */
290 ret = 0;
291 goto bail_mqp;
292 case EEXIST: /* The mcast wasn't used */
293 ret = 0;
294 goto bail_mcast;
295 case ENOMEM:
296 /* Exceeded the maximum number of mcast groups. */
297 ret = -ENOMEM;
298 goto bail_mqp;
299 default:
300 break;
303 return 0;
305 bail_mqp:
306 rvt_mcast_qp_free(mqp);
308 bail_mcast:
309 rvt_mcast_free(mcast);
311 return ret;
315 * rvt_detach_mcast - remove a qp from a multicast group
316 * @ibqp: Infiniband qp
317 * @igd: multicast guid
318 * @lid: multicast lid
320 * Return: 0 on success
322 int rvt_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
324 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
325 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
326 struct rvt_ibport *ibp = rdi->ports[qp->port_num - 1];
327 struct rvt_mcast *mcast = NULL;
328 struct rvt_mcast_qp *p, *tmp, *delp = NULL;
329 struct rb_node *n;
330 int last = 0;
331 int ret = 0;
333 if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
334 return -EINVAL;
336 spin_lock_irq(&ibp->lock);
338 /* Find the GID in the mcast table. */
339 n = ibp->mcast_tree.rb_node;
340 while (1) {
341 if (!n) {
342 spin_unlock_irq(&ibp->lock);
343 return -EINVAL;
346 mcast = rb_entry(n, struct rvt_mcast, rb_node);
347 ret = memcmp(gid->raw, mcast->mgid.raw,
348 sizeof(union ib_gid));
349 if (ret < 0)
350 n = n->rb_left;
351 else if (ret > 0)
352 n = n->rb_right;
353 else
354 break;
357 /* Search the QP list. */
358 list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
359 if (p->qp != qp)
360 continue;
362 * We found it, so remove it, but don't poison the forward
363 * link until we are sure there are no list walkers.
365 list_del_rcu(&p->list);
366 mcast->n_attached--;
367 delp = p;
369 /* If this was the last attached QP, remove the GID too. */
370 if (list_empty(&mcast->qp_list)) {
371 rb_erase(&mcast->rb_node, &ibp->mcast_tree);
372 last = 1;
374 break;
377 spin_unlock_irq(&ibp->lock);
378 /* QP not attached */
379 if (!delp)
380 return -EINVAL;
383 * Wait for any list walkers to finish before freeing the
384 * list element.
386 wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
387 rvt_mcast_qp_free(delp);
389 if (last) {
390 atomic_dec(&mcast->refcount);
391 wait_event(mcast->wait, !atomic_read(&mcast->refcount));
392 rvt_mcast_free(mcast);
393 spin_lock_irq(&rdi->n_mcast_grps_lock);
394 rdi->n_mcast_grps_allocated--;
395 spin_unlock_irq(&rdi->n_mcast_grps_lock);
398 return 0;
402 *rvt_mast_tree_empty - determine if any qps are attached to any mcast group
403 *@rdi: rvt dev struct
405 * Return: in use count
407 int rvt_mcast_tree_empty(struct rvt_dev_info *rdi)
409 int i;
410 int in_use = 0;
412 for (i = 0; i < rdi->dparms.nports; i++)
413 if (rdi->ports[i]->mcast_tree.rb_node)
414 in_use++;
415 return in_use;