2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/security.h>
34 #include <linux/completion.h>
35 #include <linux/list.h>
37 #include <rdma/ib_verbs.h>
38 #include <rdma/ib_cache.h>
39 #include "core_priv.h"
42 static struct pkey_index_qp_list
*get_pkey_idx_qp_list(struct ib_port_pkey
*pp
)
44 struct pkey_index_qp_list
*pkey
= NULL
;
45 struct pkey_index_qp_list
*tmp_pkey
;
46 struct ib_device
*dev
= pp
->sec
->dev
;
48 spin_lock(&dev
->port_pkey_list
[pp
->port_num
].list_lock
);
49 list_for_each_entry(tmp_pkey
,
50 &dev
->port_pkey_list
[pp
->port_num
].pkey_list
,
52 if (tmp_pkey
->pkey_index
== pp
->pkey_index
) {
57 spin_unlock(&dev
->port_pkey_list
[pp
->port_num
].list_lock
);
61 static int get_pkey_and_subnet_prefix(struct ib_port_pkey
*pp
,
65 struct ib_device
*dev
= pp
->sec
->dev
;
68 ret
= ib_get_cached_pkey(dev
, pp
->port_num
, pp
->pkey_index
, pkey
);
72 ret
= ib_get_cached_subnet_prefix(dev
, pp
->port_num
, subnet_prefix
);
77 static int enforce_qp_pkey_security(u16 pkey
,
79 struct ib_qp_security
*qp_sec
)
81 struct ib_qp_security
*shared_qp_sec
;
84 ret
= security_ib_pkey_access(qp_sec
->security
, subnet_prefix
, pkey
);
88 list_for_each_entry(shared_qp_sec
,
89 &qp_sec
->shared_qp_list
,
91 ret
= security_ib_pkey_access(shared_qp_sec
->security
,
100 /* The caller of this function must hold the QP security
101 * mutex of the QP of the security structure in *pps.
103 * It takes separate ports_pkeys and security structure
104 * because in some cases the pps will be for a new settings
105 * or the pps will be for the real QP and security structure
106 * will be for a shared QP.
108 static int check_qp_port_pkey_settings(struct ib_ports_pkeys
*pps
,
109 struct ib_qp_security
*sec
)
118 if (pps
->main
.state
!= IB_PORT_PKEY_NOT_VALID
) {
119 ret
= get_pkey_and_subnet_prefix(&pps
->main
,
125 ret
= enforce_qp_pkey_security(pkey
,
132 if (pps
->alt
.state
!= IB_PORT_PKEY_NOT_VALID
) {
133 ret
= get_pkey_and_subnet_prefix(&pps
->alt
,
139 ret
= enforce_qp_pkey_security(pkey
,
147 /* The caller of this function must hold the QP security
150 static void qp_to_error(struct ib_qp_security
*sec
)
152 struct ib_qp_security
*shared_qp_sec
;
153 struct ib_qp_attr attr
= {
154 .qp_state
= IB_QPS_ERR
156 struct ib_event event
= {
157 .event
= IB_EVENT_QP_FATAL
160 /* If the QP is in the process of being destroyed
161 * the qp pointer in the security structure is
162 * undefined. It cannot be modified now.
167 ib_modify_qp(sec
->qp
,
171 if (sec
->qp
->event_handler
&& sec
->qp
->qp_context
) {
172 event
.element
.qp
= sec
->qp
;
173 sec
->qp
->event_handler(&event
,
174 sec
->qp
->qp_context
);
177 list_for_each_entry(shared_qp_sec
,
178 &sec
->shared_qp_list
,
180 struct ib_qp
*qp
= shared_qp_sec
->qp
;
182 if (qp
->event_handler
&& qp
->qp_context
) {
183 event
.element
.qp
= qp
;
184 event
.device
= qp
->device
;
185 qp
->event_handler(&event
,
191 static inline void check_pkey_qps(struct pkey_index_qp_list
*pkey
,
192 struct ib_device
*device
,
196 struct ib_port_pkey
*pp
, *tmp_pp
;
198 LIST_HEAD(to_error_list
);
201 if (!ib_get_cached_pkey(device
,
205 spin_lock(&pkey
->qp_list_lock
);
206 list_for_each_entry(pp
, &pkey
->qp_list
, qp_list
) {
207 if (atomic_read(&pp
->sec
->error_list_count
))
210 if (enforce_qp_pkey_security(pkey_val
,
213 atomic_inc(&pp
->sec
->error_list_count
);
214 list_add(&pp
->to_error_list
,
218 spin_unlock(&pkey
->qp_list_lock
);
221 list_for_each_entry_safe(pp
,
225 mutex_lock(&pp
->sec
->mutex
);
226 qp_to_error(pp
->sec
);
227 list_del(&pp
->to_error_list
);
228 atomic_dec(&pp
->sec
->error_list_count
);
229 comp
= pp
->sec
->destroying
;
230 mutex_unlock(&pp
->sec
->mutex
);
233 complete(&pp
->sec
->error_complete
);
237 /* The caller of this function must hold the QP security
240 static int port_pkey_list_insert(struct ib_port_pkey
*pp
)
242 struct pkey_index_qp_list
*tmp_pkey
;
243 struct pkey_index_qp_list
*pkey
;
244 struct ib_device
*dev
;
245 u8 port_num
= pp
->port_num
;
248 if (pp
->state
!= IB_PORT_PKEY_VALID
)
253 pkey
= get_pkey_idx_qp_list(pp
);
258 pkey
= kzalloc(sizeof(*pkey
), GFP_KERNEL
);
262 spin_lock(&dev
->port_pkey_list
[port_num
].list_lock
);
263 /* Check for the PKey again. A racing process may
266 list_for_each_entry(tmp_pkey
,
267 &dev
->port_pkey_list
[port_num
].pkey_list
,
269 if (tmp_pkey
->pkey_index
== pp
->pkey_index
) {
278 pkey
->pkey_index
= pp
->pkey_index
;
279 spin_lock_init(&pkey
->qp_list_lock
);
280 INIT_LIST_HEAD(&pkey
->qp_list
);
281 list_add(&pkey
->pkey_index_list
,
282 &dev
->port_pkey_list
[port_num
].pkey_list
);
284 spin_unlock(&dev
->port_pkey_list
[port_num
].list_lock
);
287 spin_lock(&pkey
->qp_list_lock
);
288 list_add(&pp
->qp_list
, &pkey
->qp_list
);
289 spin_unlock(&pkey
->qp_list_lock
);
291 pp
->state
= IB_PORT_PKEY_LISTED
;
296 /* The caller of this function must hold the QP security
299 static void port_pkey_list_remove(struct ib_port_pkey
*pp
)
301 struct pkey_index_qp_list
*pkey
;
303 if (pp
->state
!= IB_PORT_PKEY_LISTED
)
306 pkey
= get_pkey_idx_qp_list(pp
);
308 spin_lock(&pkey
->qp_list_lock
);
309 list_del(&pp
->qp_list
);
310 spin_unlock(&pkey
->qp_list_lock
);
312 /* The setting may still be valid, i.e. after
313 * a destroy has failed for example.
315 pp
->state
= IB_PORT_PKEY_VALID
;
318 static void destroy_qp_security(struct ib_qp_security
*sec
)
320 security_ib_free_security(sec
->security
);
321 kfree(sec
->ports_pkeys
);
325 /* The caller of this function must hold the QP security
328 static struct ib_ports_pkeys
*get_new_pps(const struct ib_qp
*qp
,
329 const struct ib_qp_attr
*qp_attr
,
332 struct ib_ports_pkeys
*new_pps
;
333 struct ib_ports_pkeys
*qp_pps
= qp
->qp_sec
->ports_pkeys
;
335 new_pps
= kzalloc(sizeof(*new_pps
), GFP_KERNEL
);
339 if (qp_attr_mask
& (IB_QP_PKEY_INDEX
| IB_QP_PORT
)) {
341 new_pps
->main
.port_num
= qp_attr
->port_num
;
342 new_pps
->main
.pkey_index
= qp_attr
->pkey_index
;
344 new_pps
->main
.port_num
= (qp_attr_mask
& IB_QP_PORT
) ?
346 qp_pps
->main
.port_num
;
348 new_pps
->main
.pkey_index
=
349 (qp_attr_mask
& IB_QP_PKEY_INDEX
) ?
350 qp_attr
->pkey_index
:
351 qp_pps
->main
.pkey_index
;
353 new_pps
->main
.state
= IB_PORT_PKEY_VALID
;
355 new_pps
->main
.port_num
= qp_pps
->main
.port_num
;
356 new_pps
->main
.pkey_index
= qp_pps
->main
.pkey_index
;
357 if (qp_pps
->main
.state
!= IB_PORT_PKEY_NOT_VALID
)
358 new_pps
->main
.state
= IB_PORT_PKEY_VALID
;
361 if (qp_attr_mask
& IB_QP_ALT_PATH
) {
362 new_pps
->alt
.port_num
= qp_attr
->alt_port_num
;
363 new_pps
->alt
.pkey_index
= qp_attr
->alt_pkey_index
;
364 new_pps
->alt
.state
= IB_PORT_PKEY_VALID
;
366 new_pps
->alt
.port_num
= qp_pps
->alt
.port_num
;
367 new_pps
->alt
.pkey_index
= qp_pps
->alt
.pkey_index
;
368 if (qp_pps
->alt
.state
!= IB_PORT_PKEY_NOT_VALID
)
369 new_pps
->alt
.state
= IB_PORT_PKEY_VALID
;
372 new_pps
->main
.sec
= qp
->qp_sec
;
373 new_pps
->alt
.sec
= qp
->qp_sec
;
377 int ib_open_shared_qp_security(struct ib_qp
*qp
, struct ib_device
*dev
)
379 struct ib_qp
*real_qp
= qp
->real_qp
;
382 ret
= ib_create_qp_security(qp
, dev
);
390 mutex_lock(&real_qp
->qp_sec
->mutex
);
391 ret
= check_qp_port_pkey_settings(real_qp
->qp_sec
->ports_pkeys
,
398 list_add(&qp
->qp_sec
->shared_qp_list
,
399 &real_qp
->qp_sec
->shared_qp_list
);
401 mutex_unlock(&real_qp
->qp_sec
->mutex
);
403 destroy_qp_security(qp
->qp_sec
);
408 void ib_close_shared_qp_security(struct ib_qp_security
*sec
)
410 struct ib_qp
*real_qp
= sec
->qp
->real_qp
;
412 mutex_lock(&real_qp
->qp_sec
->mutex
);
413 list_del(&sec
->shared_qp_list
);
414 mutex_unlock(&real_qp
->qp_sec
->mutex
);
416 destroy_qp_security(sec
);
419 int ib_create_qp_security(struct ib_qp
*qp
, struct ib_device
*dev
)
421 u8 i
= rdma_start_port(dev
);
425 while (i
<= rdma_end_port(dev
) && !is_ib
)
426 is_ib
= rdma_protocol_ib(dev
, i
++);
428 /* If this isn't an IB device don't create the security context */
432 qp
->qp_sec
= kzalloc(sizeof(*qp
->qp_sec
), GFP_KERNEL
);
437 qp
->qp_sec
->dev
= dev
;
438 mutex_init(&qp
->qp_sec
->mutex
);
439 INIT_LIST_HEAD(&qp
->qp_sec
->shared_qp_list
);
440 atomic_set(&qp
->qp_sec
->error_list_count
, 0);
441 init_completion(&qp
->qp_sec
->error_complete
);
442 ret
= security_ib_alloc_security(&qp
->qp_sec
->security
);
450 EXPORT_SYMBOL(ib_create_qp_security
);
452 void ib_destroy_qp_security_begin(struct ib_qp_security
*sec
)
454 /* Return if not IB */
458 mutex_lock(&sec
->mutex
);
460 /* Remove the QP from the lists so it won't get added to
461 * a to_error_list during the destroy process.
463 if (sec
->ports_pkeys
) {
464 port_pkey_list_remove(&sec
->ports_pkeys
->main
);
465 port_pkey_list_remove(&sec
->ports_pkeys
->alt
);
468 /* If the QP is already in one or more of those lists
469 * the destroying flag will ensure the to error flow
470 * doesn't operate on an undefined QP.
472 sec
->destroying
= true;
474 /* Record the error list count to know how many completions
477 sec
->error_comps_pending
= atomic_read(&sec
->error_list_count
);
479 mutex_unlock(&sec
->mutex
);
482 void ib_destroy_qp_security_abort(struct ib_qp_security
*sec
)
487 /* Return if not IB */
491 /* If a concurrent cache update is in progress this
492 * QP security could be marked for an error state
493 * transition. Wait for this to complete.
495 for (i
= 0; i
< sec
->error_comps_pending
; i
++)
496 wait_for_completion(&sec
->error_complete
);
498 mutex_lock(&sec
->mutex
);
499 sec
->destroying
= false;
501 /* Restore the position in the lists and verify
502 * access is still allowed in case a cache update
503 * occurred while attempting to destroy.
505 * Because these setting were listed already
506 * and removed during ib_destroy_qp_security_begin
507 * we know the pkey_index_qp_list for the PKey
508 * already exists so port_pkey_list_insert won't fail.
510 if (sec
->ports_pkeys
) {
511 port_pkey_list_insert(&sec
->ports_pkeys
->main
);
512 port_pkey_list_insert(&sec
->ports_pkeys
->alt
);
515 ret
= check_qp_port_pkey_settings(sec
->ports_pkeys
, sec
);
519 mutex_unlock(&sec
->mutex
);
522 void ib_destroy_qp_security_end(struct ib_qp_security
*sec
)
526 /* Return if not IB */
530 /* If a concurrent cache update is occurring we must
531 * wait until this QP security structure is processed
532 * in the QP to error flow before destroying it because
533 * the to_error_list is in use.
535 for (i
= 0; i
< sec
->error_comps_pending
; i
++)
536 wait_for_completion(&sec
->error_complete
);
538 destroy_qp_security(sec
);
541 void ib_security_cache_change(struct ib_device
*device
,
545 struct pkey_index_qp_list
*pkey
;
547 list_for_each_entry(pkey
,
548 &device
->port_pkey_list
[port_num
].pkey_list
,
557 void ib_security_destroy_port_pkey_list(struct ib_device
*device
)
559 struct pkey_index_qp_list
*pkey
, *tmp_pkey
;
562 for (i
= rdma_start_port(device
); i
<= rdma_end_port(device
); i
++) {
563 spin_lock(&device
->port_pkey_list
[i
].list_lock
);
564 list_for_each_entry_safe(pkey
,
566 &device
->port_pkey_list
[i
].pkey_list
,
568 list_del(&pkey
->pkey_index_list
);
571 spin_unlock(&device
->port_pkey_list
[i
].list_lock
);
575 int ib_security_modify_qp(struct ib_qp
*qp
,
576 struct ib_qp_attr
*qp_attr
,
578 struct ib_udata
*udata
)
581 struct ib_ports_pkeys
*tmp_pps
;
582 struct ib_ports_pkeys
*new_pps
= NULL
;
583 struct ib_qp
*real_qp
= qp
->real_qp
;
584 bool special_qp
= (real_qp
->qp_type
== IB_QPT_SMI
||
585 real_qp
->qp_type
== IB_QPT_GSI
||
586 real_qp
->qp_type
>= IB_QPT_RESERVED1
);
587 bool pps_change
= ((qp_attr_mask
& (IB_QP_PKEY_INDEX
| IB_QP_PORT
)) ||
588 (qp_attr_mask
& IB_QP_ALT_PATH
));
590 WARN_ONCE((qp_attr_mask
& IB_QP_PORT
&&
591 rdma_protocol_ib(real_qp
->device
, qp_attr
->port_num
) &&
593 "%s: QP security is not initialized for IB QP: %d\n",
594 __func__
, real_qp
->qp_num
);
596 /* The port/pkey settings are maintained only for the real QP. Open
597 * handles on the real QP will be in the shared_qp_list. When
598 * enforcing security on the real QP all the shared QPs will be
602 if (pps_change
&& !special_qp
&& real_qp
->qp_sec
) {
603 mutex_lock(&real_qp
->qp_sec
->mutex
);
604 new_pps
= get_new_pps(real_qp
,
608 mutex_unlock(&real_qp
->qp_sec
->mutex
);
611 /* Add this QP to the lists for the new port
612 * and pkey settings before checking for permission
613 * in case there is a concurrent cache update
614 * occurring. Walking the list for a cache change
615 * doesn't acquire the security mutex unless it's
616 * sending the QP to error.
618 ret
= port_pkey_list_insert(&new_pps
->main
);
621 ret
= port_pkey_list_insert(&new_pps
->alt
);
624 ret
= check_qp_port_pkey_settings(new_pps
,
629 ret
= real_qp
->device
->modify_qp(real_qp
,
635 /* Clean up the lists and free the appropriate
636 * ports_pkeys structure.
641 tmp_pps
= real_qp
->qp_sec
->ports_pkeys
;
642 real_qp
->qp_sec
->ports_pkeys
= new_pps
;
646 port_pkey_list_remove(&tmp_pps
->main
);
647 port_pkey_list_remove(&tmp_pps
->alt
);
650 mutex_unlock(&real_qp
->qp_sec
->mutex
);
655 static int ib_security_pkey_access(struct ib_device
*dev
,
664 if (!rdma_protocol_ib(dev
, port_num
))
667 ret
= ib_get_cached_pkey(dev
, port_num
, pkey_index
, &pkey
);
671 ret
= ib_get_cached_subnet_prefix(dev
, port_num
, &subnet_prefix
);
676 return security_ib_pkey_access(sec
, subnet_prefix
, pkey
);
679 static int ib_mad_agent_security_change(struct notifier_block
*nb
,
683 struct ib_mad_agent
*ag
= container_of(nb
, struct ib_mad_agent
, lsm_nb
);
685 if (event
!= LSM_POLICY_CHANGE
)
688 ag
->smp_allowed
= !security_ib_endport_manage_subnet(ag
->security
,
695 int ib_mad_agent_security_setup(struct ib_mad_agent
*agent
,
696 enum ib_qp_type qp_type
)
700 if (!rdma_protocol_ib(agent
->device
, agent
->port_num
))
703 ret
= security_ib_alloc_security(&agent
->security
);
707 if (qp_type
!= IB_QPT_SMI
)
710 ret
= security_ib_endport_manage_subnet(agent
->security
,
716 agent
->lsm_nb
.notifier_call
= ib_mad_agent_security_change
;
717 ret
= register_lsm_notifier(&agent
->lsm_nb
);
721 agent
->smp_allowed
= true;
722 agent
->lsm_nb_reg
= true;
726 void ib_mad_agent_security_cleanup(struct ib_mad_agent
*agent
)
728 if (!rdma_protocol_ib(agent
->device
, agent
->port_num
))
731 security_ib_free_security(agent
->security
);
732 if (agent
->lsm_nb_reg
)
733 unregister_lsm_notifier(&agent
->lsm_nb
);
736 int ib_mad_enforce_security(struct ib_mad_agent_private
*map
, u16 pkey_index
)
738 if (!rdma_protocol_ib(map
->agent
.device
, map
->agent
.port_num
))
741 if (map
->agent
.qp
->qp_type
== IB_QPT_SMI
) {
742 if (!map
->agent
.smp_allowed
)
747 return ib_security_pkey_access(map
->agent
.device
,
750 map
->agent
.security
);