locking/refcounts: Include fewer headers in <linux/refcount.h>
[linux/fpc-iii.git] / drivers / infiniband / core / security.c
blob9b0bea8303e073c8983d2327d5fffc144b50e7ae
1 /*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
33 #include <linux/security.h>
34 #include <linux/completion.h>
35 #include <linux/list.h>
37 #include <rdma/ib_verbs.h>
38 #include <rdma/ib_cache.h>
39 #include "core_priv.h"
40 #include "mad_priv.h"
42 static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
44 struct pkey_index_qp_list *pkey = NULL;
45 struct pkey_index_qp_list *tmp_pkey;
46 struct ib_device *dev = pp->sec->dev;
48 spin_lock(&dev->port_pkey_list[pp->port_num].list_lock);
49 list_for_each_entry(tmp_pkey,
50 &dev->port_pkey_list[pp->port_num].pkey_list,
51 pkey_index_list) {
52 if (tmp_pkey->pkey_index == pp->pkey_index) {
53 pkey = tmp_pkey;
54 break;
57 spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock);
58 return pkey;
61 static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
62 u16 *pkey,
63 u64 *subnet_prefix)
65 struct ib_device *dev = pp->sec->dev;
66 int ret;
68 ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
69 if (ret)
70 return ret;
72 ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
74 return ret;
77 static int enforce_qp_pkey_security(u16 pkey,
78 u64 subnet_prefix,
79 struct ib_qp_security *qp_sec)
81 struct ib_qp_security *shared_qp_sec;
82 int ret;
84 ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
85 if (ret)
86 return ret;
88 list_for_each_entry(shared_qp_sec,
89 &qp_sec->shared_qp_list,
90 shared_qp_list) {
91 ret = security_ib_pkey_access(shared_qp_sec->security,
92 subnet_prefix,
93 pkey);
94 if (ret)
95 return ret;
97 return 0;
100 /* The caller of this function must hold the QP security
101 * mutex of the QP of the security structure in *pps.
103 * It takes separate ports_pkeys and security structure
104 * because in some cases the pps will be for a new settings
105 * or the pps will be for the real QP and security structure
106 * will be for a shared QP.
108 static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
109 struct ib_qp_security *sec)
111 u64 subnet_prefix;
112 u16 pkey;
113 int ret = 0;
115 if (!pps)
116 return 0;
118 if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
119 ret = get_pkey_and_subnet_prefix(&pps->main,
120 &pkey,
121 &subnet_prefix);
122 if (ret)
123 return ret;
125 ret = enforce_qp_pkey_security(pkey,
126 subnet_prefix,
127 sec);
128 if (ret)
129 return ret;
132 if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
133 ret = get_pkey_and_subnet_prefix(&pps->alt,
134 &pkey,
135 &subnet_prefix);
136 if (ret)
137 return ret;
139 ret = enforce_qp_pkey_security(pkey,
140 subnet_prefix,
141 sec);
144 return ret;
147 /* The caller of this function must hold the QP security
148 * mutex.
150 static void qp_to_error(struct ib_qp_security *sec)
152 struct ib_qp_security *shared_qp_sec;
153 struct ib_qp_attr attr = {
154 .qp_state = IB_QPS_ERR
156 struct ib_event event = {
157 .event = IB_EVENT_QP_FATAL
160 /* If the QP is in the process of being destroyed
161 * the qp pointer in the security structure is
162 * undefined. It cannot be modified now.
164 if (sec->destroying)
165 return;
167 ib_modify_qp(sec->qp,
168 &attr,
169 IB_QP_STATE);
171 if (sec->qp->event_handler && sec->qp->qp_context) {
172 event.element.qp = sec->qp;
173 sec->qp->event_handler(&event,
174 sec->qp->qp_context);
177 list_for_each_entry(shared_qp_sec,
178 &sec->shared_qp_list,
179 shared_qp_list) {
180 struct ib_qp *qp = shared_qp_sec->qp;
182 if (qp->event_handler && qp->qp_context) {
183 event.element.qp = qp;
184 event.device = qp->device;
185 qp->event_handler(&event,
186 qp->qp_context);
191 static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
192 struct ib_device *device,
193 u8 port_num,
194 u64 subnet_prefix)
196 struct ib_port_pkey *pp, *tmp_pp;
197 bool comp;
198 LIST_HEAD(to_error_list);
199 u16 pkey_val;
201 if (!ib_get_cached_pkey(device,
202 port_num,
203 pkey->pkey_index,
204 &pkey_val)) {
205 spin_lock(&pkey->qp_list_lock);
206 list_for_each_entry(pp, &pkey->qp_list, qp_list) {
207 if (atomic_read(&pp->sec->error_list_count))
208 continue;
210 if (enforce_qp_pkey_security(pkey_val,
211 subnet_prefix,
212 pp->sec)) {
213 atomic_inc(&pp->sec->error_list_count);
214 list_add(&pp->to_error_list,
215 &to_error_list);
218 spin_unlock(&pkey->qp_list_lock);
221 list_for_each_entry_safe(pp,
222 tmp_pp,
223 &to_error_list,
224 to_error_list) {
225 mutex_lock(&pp->sec->mutex);
226 qp_to_error(pp->sec);
227 list_del(&pp->to_error_list);
228 atomic_dec(&pp->sec->error_list_count);
229 comp = pp->sec->destroying;
230 mutex_unlock(&pp->sec->mutex);
232 if (comp)
233 complete(&pp->sec->error_complete);
237 /* The caller of this function must hold the QP security
238 * mutex.
240 static int port_pkey_list_insert(struct ib_port_pkey *pp)
242 struct pkey_index_qp_list *tmp_pkey;
243 struct pkey_index_qp_list *pkey;
244 struct ib_device *dev;
245 u8 port_num = pp->port_num;
246 int ret = 0;
248 if (pp->state != IB_PORT_PKEY_VALID)
249 return 0;
251 dev = pp->sec->dev;
253 pkey = get_pkey_idx_qp_list(pp);
255 if (!pkey) {
256 bool found = false;
258 pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
259 if (!pkey)
260 return -ENOMEM;
262 spin_lock(&dev->port_pkey_list[port_num].list_lock);
263 /* Check for the PKey again. A racing process may
264 * have created it.
266 list_for_each_entry(tmp_pkey,
267 &dev->port_pkey_list[port_num].pkey_list,
268 pkey_index_list) {
269 if (tmp_pkey->pkey_index == pp->pkey_index) {
270 kfree(pkey);
271 pkey = tmp_pkey;
272 found = true;
273 break;
277 if (!found) {
278 pkey->pkey_index = pp->pkey_index;
279 spin_lock_init(&pkey->qp_list_lock);
280 INIT_LIST_HEAD(&pkey->qp_list);
281 list_add(&pkey->pkey_index_list,
282 &dev->port_pkey_list[port_num].pkey_list);
284 spin_unlock(&dev->port_pkey_list[port_num].list_lock);
287 spin_lock(&pkey->qp_list_lock);
288 list_add(&pp->qp_list, &pkey->qp_list);
289 spin_unlock(&pkey->qp_list_lock);
291 pp->state = IB_PORT_PKEY_LISTED;
293 return ret;
296 /* The caller of this function must hold the QP security
297 * mutex.
299 static void port_pkey_list_remove(struct ib_port_pkey *pp)
301 struct pkey_index_qp_list *pkey;
303 if (pp->state != IB_PORT_PKEY_LISTED)
304 return;
306 pkey = get_pkey_idx_qp_list(pp);
308 spin_lock(&pkey->qp_list_lock);
309 list_del(&pp->qp_list);
310 spin_unlock(&pkey->qp_list_lock);
312 /* The setting may still be valid, i.e. after
313 * a destroy has failed for example.
315 pp->state = IB_PORT_PKEY_VALID;
318 static void destroy_qp_security(struct ib_qp_security *sec)
320 security_ib_free_security(sec->security);
321 kfree(sec->ports_pkeys);
322 kfree(sec);
325 /* The caller of this function must hold the QP security
326 * mutex.
328 static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
329 const struct ib_qp_attr *qp_attr,
330 int qp_attr_mask)
332 struct ib_ports_pkeys *new_pps;
333 struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
335 new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
336 if (!new_pps)
337 return NULL;
339 if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
340 if (!qp_pps) {
341 new_pps->main.port_num = qp_attr->port_num;
342 new_pps->main.pkey_index = qp_attr->pkey_index;
343 } else {
344 new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
345 qp_attr->port_num :
346 qp_pps->main.port_num;
348 new_pps->main.pkey_index =
349 (qp_attr_mask & IB_QP_PKEY_INDEX) ?
350 qp_attr->pkey_index :
351 qp_pps->main.pkey_index;
353 new_pps->main.state = IB_PORT_PKEY_VALID;
354 } else if (qp_pps) {
355 new_pps->main.port_num = qp_pps->main.port_num;
356 new_pps->main.pkey_index = qp_pps->main.pkey_index;
357 if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
358 new_pps->main.state = IB_PORT_PKEY_VALID;
361 if (qp_attr_mask & IB_QP_ALT_PATH) {
362 new_pps->alt.port_num = qp_attr->alt_port_num;
363 new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
364 new_pps->alt.state = IB_PORT_PKEY_VALID;
365 } else if (qp_pps) {
366 new_pps->alt.port_num = qp_pps->alt.port_num;
367 new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
368 if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
369 new_pps->alt.state = IB_PORT_PKEY_VALID;
372 new_pps->main.sec = qp->qp_sec;
373 new_pps->alt.sec = qp->qp_sec;
374 return new_pps;
377 int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
379 struct ib_qp *real_qp = qp->real_qp;
380 int ret;
382 ret = ib_create_qp_security(qp, dev);
384 if (ret)
385 return ret;
387 if (!qp->qp_sec)
388 return 0;
390 mutex_lock(&real_qp->qp_sec->mutex);
391 ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
392 qp->qp_sec);
394 if (ret)
395 goto ret;
397 if (qp != real_qp)
398 list_add(&qp->qp_sec->shared_qp_list,
399 &real_qp->qp_sec->shared_qp_list);
400 ret:
401 mutex_unlock(&real_qp->qp_sec->mutex);
402 if (ret)
403 destroy_qp_security(qp->qp_sec);
405 return ret;
408 void ib_close_shared_qp_security(struct ib_qp_security *sec)
410 struct ib_qp *real_qp = sec->qp->real_qp;
412 mutex_lock(&real_qp->qp_sec->mutex);
413 list_del(&sec->shared_qp_list);
414 mutex_unlock(&real_qp->qp_sec->mutex);
416 destroy_qp_security(sec);
419 int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
421 u8 i = rdma_start_port(dev);
422 bool is_ib = false;
423 int ret;
425 while (i <= rdma_end_port(dev) && !is_ib)
426 is_ib = rdma_protocol_ib(dev, i++);
428 /* If this isn't an IB device don't create the security context */
429 if (!is_ib)
430 return 0;
432 qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
433 if (!qp->qp_sec)
434 return -ENOMEM;
436 qp->qp_sec->qp = qp;
437 qp->qp_sec->dev = dev;
438 mutex_init(&qp->qp_sec->mutex);
439 INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
440 atomic_set(&qp->qp_sec->error_list_count, 0);
441 init_completion(&qp->qp_sec->error_complete);
442 ret = security_ib_alloc_security(&qp->qp_sec->security);
443 if (ret) {
444 kfree(qp->qp_sec);
445 qp->qp_sec = NULL;
448 return ret;
450 EXPORT_SYMBOL(ib_create_qp_security);
452 void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
454 /* Return if not IB */
455 if (!sec)
456 return;
458 mutex_lock(&sec->mutex);
460 /* Remove the QP from the lists so it won't get added to
461 * a to_error_list during the destroy process.
463 if (sec->ports_pkeys) {
464 port_pkey_list_remove(&sec->ports_pkeys->main);
465 port_pkey_list_remove(&sec->ports_pkeys->alt);
468 /* If the QP is already in one or more of those lists
469 * the destroying flag will ensure the to error flow
470 * doesn't operate on an undefined QP.
472 sec->destroying = true;
474 /* Record the error list count to know how many completions
475 * to wait for.
477 sec->error_comps_pending = atomic_read(&sec->error_list_count);
479 mutex_unlock(&sec->mutex);
482 void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
484 int ret;
485 int i;
487 /* Return if not IB */
488 if (!sec)
489 return;
491 /* If a concurrent cache update is in progress this
492 * QP security could be marked for an error state
493 * transition. Wait for this to complete.
495 for (i = 0; i < sec->error_comps_pending; i++)
496 wait_for_completion(&sec->error_complete);
498 mutex_lock(&sec->mutex);
499 sec->destroying = false;
501 /* Restore the position in the lists and verify
502 * access is still allowed in case a cache update
503 * occurred while attempting to destroy.
505 * Because these setting were listed already
506 * and removed during ib_destroy_qp_security_begin
507 * we know the pkey_index_qp_list for the PKey
508 * already exists so port_pkey_list_insert won't fail.
510 if (sec->ports_pkeys) {
511 port_pkey_list_insert(&sec->ports_pkeys->main);
512 port_pkey_list_insert(&sec->ports_pkeys->alt);
515 ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
516 if (ret)
517 qp_to_error(sec);
519 mutex_unlock(&sec->mutex);
522 void ib_destroy_qp_security_end(struct ib_qp_security *sec)
524 int i;
526 /* Return if not IB */
527 if (!sec)
528 return;
530 /* If a concurrent cache update is occurring we must
531 * wait until this QP security structure is processed
532 * in the QP to error flow before destroying it because
533 * the to_error_list is in use.
535 for (i = 0; i < sec->error_comps_pending; i++)
536 wait_for_completion(&sec->error_complete);
538 destroy_qp_security(sec);
541 void ib_security_cache_change(struct ib_device *device,
542 u8 port_num,
543 u64 subnet_prefix)
545 struct pkey_index_qp_list *pkey;
547 list_for_each_entry(pkey,
548 &device->port_pkey_list[port_num].pkey_list,
549 pkey_index_list) {
550 check_pkey_qps(pkey,
551 device,
552 port_num,
553 subnet_prefix);
557 void ib_security_destroy_port_pkey_list(struct ib_device *device)
559 struct pkey_index_qp_list *pkey, *tmp_pkey;
560 int i;
562 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
563 spin_lock(&device->port_pkey_list[i].list_lock);
564 list_for_each_entry_safe(pkey,
565 tmp_pkey,
566 &device->port_pkey_list[i].pkey_list,
567 pkey_index_list) {
568 list_del(&pkey->pkey_index_list);
569 kfree(pkey);
571 spin_unlock(&device->port_pkey_list[i].list_lock);
575 int ib_security_modify_qp(struct ib_qp *qp,
576 struct ib_qp_attr *qp_attr,
577 int qp_attr_mask,
578 struct ib_udata *udata)
580 int ret = 0;
581 struct ib_ports_pkeys *tmp_pps;
582 struct ib_ports_pkeys *new_pps = NULL;
583 struct ib_qp *real_qp = qp->real_qp;
584 bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
585 real_qp->qp_type == IB_QPT_GSI ||
586 real_qp->qp_type >= IB_QPT_RESERVED1);
587 bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
588 (qp_attr_mask & IB_QP_ALT_PATH));
590 WARN_ONCE((qp_attr_mask & IB_QP_PORT &&
591 rdma_protocol_ib(real_qp->device, qp_attr->port_num) &&
592 !real_qp->qp_sec),
593 "%s: QP security is not initialized for IB QP: %d\n",
594 __func__, real_qp->qp_num);
596 /* The port/pkey settings are maintained only for the real QP. Open
597 * handles on the real QP will be in the shared_qp_list. When
598 * enforcing security on the real QP all the shared QPs will be
599 * checked as well.
602 if (pps_change && !special_qp && real_qp->qp_sec) {
603 mutex_lock(&real_qp->qp_sec->mutex);
604 new_pps = get_new_pps(real_qp,
605 qp_attr,
606 qp_attr_mask);
607 if (!new_pps) {
608 mutex_unlock(&real_qp->qp_sec->mutex);
609 return -ENOMEM;
611 /* Add this QP to the lists for the new port
612 * and pkey settings before checking for permission
613 * in case there is a concurrent cache update
614 * occurring. Walking the list for a cache change
615 * doesn't acquire the security mutex unless it's
616 * sending the QP to error.
618 ret = port_pkey_list_insert(&new_pps->main);
620 if (!ret)
621 ret = port_pkey_list_insert(&new_pps->alt);
623 if (!ret)
624 ret = check_qp_port_pkey_settings(new_pps,
625 real_qp->qp_sec);
628 if (!ret)
629 ret = real_qp->device->modify_qp(real_qp,
630 qp_attr,
631 qp_attr_mask,
632 udata);
634 if (new_pps) {
635 /* Clean up the lists and free the appropriate
636 * ports_pkeys structure.
638 if (ret) {
639 tmp_pps = new_pps;
640 } else {
641 tmp_pps = real_qp->qp_sec->ports_pkeys;
642 real_qp->qp_sec->ports_pkeys = new_pps;
645 if (tmp_pps) {
646 port_pkey_list_remove(&tmp_pps->main);
647 port_pkey_list_remove(&tmp_pps->alt);
649 kfree(tmp_pps);
650 mutex_unlock(&real_qp->qp_sec->mutex);
652 return ret;
655 static int ib_security_pkey_access(struct ib_device *dev,
656 u8 port_num,
657 u16 pkey_index,
658 void *sec)
660 u64 subnet_prefix;
661 u16 pkey;
662 int ret;
664 if (!rdma_protocol_ib(dev, port_num))
665 return 0;
667 ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey);
668 if (ret)
669 return ret;
671 ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix);
673 if (ret)
674 return ret;
676 return security_ib_pkey_access(sec, subnet_prefix, pkey);
679 static int ib_mad_agent_security_change(struct notifier_block *nb,
680 unsigned long event,
681 void *data)
683 struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb);
685 if (event != LSM_POLICY_CHANGE)
686 return NOTIFY_DONE;
688 ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security,
689 ag->device->name,
690 ag->port_num);
692 return NOTIFY_OK;
695 int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
696 enum ib_qp_type qp_type)
698 int ret;
700 if (!rdma_protocol_ib(agent->device, agent->port_num))
701 return 0;
703 ret = security_ib_alloc_security(&agent->security);
704 if (ret)
705 return ret;
707 if (qp_type != IB_QPT_SMI)
708 return 0;
710 ret = security_ib_endport_manage_subnet(agent->security,
711 agent->device->name,
712 agent->port_num);
713 if (ret)
714 return ret;
716 agent->lsm_nb.notifier_call = ib_mad_agent_security_change;
717 ret = register_lsm_notifier(&agent->lsm_nb);
718 if (ret)
719 return ret;
721 agent->smp_allowed = true;
722 agent->lsm_nb_reg = true;
723 return 0;
726 void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
728 if (!rdma_protocol_ib(agent->device, agent->port_num))
729 return;
731 security_ib_free_security(agent->security);
732 if (agent->lsm_nb_reg)
733 unregister_lsm_notifier(&agent->lsm_nb);
736 int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
738 if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
739 return 0;
741 if (map->agent.qp->qp_type == IB_QPT_SMI) {
742 if (!map->agent.smp_allowed)
743 return -EACCES;
744 return 0;
747 return ib_security_pkey_access(map->agent.device,
748 map->agent.port_num,
749 pkey_index,
750 map->agent.security);