1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES
4 #include "iommufd_private.h"
6 void iommufd_viommu_destroy(struct iommufd_object
*obj
)
8 struct iommufd_viommu
*viommu
=
9 container_of(obj
, struct iommufd_viommu
, obj
);
11 if (viommu
->ops
&& viommu
->ops
->destroy
)
12 viommu
->ops
->destroy(viommu
);
13 refcount_dec(&viommu
->hwpt
->common
.obj
.users
);
14 xa_destroy(&viommu
->vdevs
);
17 int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd
*ucmd
)
19 struct iommu_viommu_alloc
*cmd
= ucmd
->cmd
;
20 struct iommufd_hwpt_paging
*hwpt_paging
;
21 struct iommufd_viommu
*viommu
;
22 struct iommufd_device
*idev
;
23 const struct iommu_ops
*ops
;
26 if (cmd
->flags
|| cmd
->type
== IOMMU_VIOMMU_TYPE_DEFAULT
)
29 idev
= iommufd_get_device(ucmd
, cmd
->dev_id
);
33 ops
= dev_iommu_ops(idev
->dev
);
34 if (!ops
->viommu_alloc
) {
39 hwpt_paging
= iommufd_get_hwpt_paging(ucmd
, cmd
->hwpt_id
);
40 if (IS_ERR(hwpt_paging
)) {
41 rc
= PTR_ERR(hwpt_paging
);
45 if (!hwpt_paging
->nest_parent
) {
50 viommu
= ops
->viommu_alloc(idev
->dev
, hwpt_paging
->common
.domain
,
51 ucmd
->ictx
, cmd
->type
);
57 xa_init(&viommu
->vdevs
);
58 viommu
->type
= cmd
->type
;
59 viommu
->ictx
= ucmd
->ictx
;
60 viommu
->hwpt
= hwpt_paging
;
61 refcount_inc(&viommu
->hwpt
->common
.obj
.users
);
63 * It is the most likely case that a physical IOMMU is unpluggable. A
64 * pluggable IOMMU instance (if exists) is responsible for refcounting
67 viommu
->iommu_dev
= __iommu_get_iommu_dev(idev
->dev
);
69 cmd
->out_viommu_id
= viommu
->obj
.id
;
70 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
73 iommufd_object_finalize(ucmd
->ictx
, &viommu
->obj
);
77 iommufd_object_abort_and_destroy(ucmd
->ictx
, &viommu
->obj
);
79 iommufd_put_object(ucmd
->ictx
, &hwpt_paging
->common
.obj
);
81 iommufd_put_object(ucmd
->ictx
, &idev
->obj
);
85 void iommufd_vdevice_destroy(struct iommufd_object
*obj
)
87 struct iommufd_vdevice
*vdev
=
88 container_of(obj
, struct iommufd_vdevice
, obj
);
89 struct iommufd_viommu
*viommu
= vdev
->viommu
;
91 /* xa_cmpxchg is okay to fail if alloc failed xa_cmpxchg previously */
92 xa_cmpxchg(&viommu
->vdevs
, vdev
->id
, vdev
, NULL
, GFP_KERNEL
);
93 refcount_dec(&viommu
->obj
.users
);
94 put_device(vdev
->dev
);
97 int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd
*ucmd
)
99 struct iommu_vdevice_alloc
*cmd
= ucmd
->cmd
;
100 struct iommufd_vdevice
*vdev
, *curr
;
101 struct iommufd_viommu
*viommu
;
102 struct iommufd_device
*idev
;
103 u64 virt_id
= cmd
->virt_id
;
106 /* virt_id indexes an xarray */
107 if (virt_id
> ULONG_MAX
)
110 viommu
= iommufd_get_viommu(ucmd
, cmd
->viommu_id
);
112 return PTR_ERR(viommu
);
114 idev
= iommufd_get_device(ucmd
, cmd
->dev_id
);
120 if (viommu
->iommu_dev
!= __iommu_get_iommu_dev(idev
->dev
)) {
125 vdev
= iommufd_object_alloc(ucmd
->ictx
, vdev
, IOMMUFD_OBJ_VDEVICE
);
132 vdev
->dev
= idev
->dev
;
133 get_device(idev
->dev
);
134 vdev
->viommu
= viommu
;
135 refcount_inc(&viommu
->obj
.users
);
137 curr
= xa_cmpxchg(&viommu
->vdevs
, virt_id
, NULL
, vdev
, GFP_KERNEL
);
139 rc
= xa_err(curr
) ?: -EEXIST
;
143 cmd
->out_vdevice_id
= vdev
->obj
.id
;
144 rc
= iommufd_ucmd_respond(ucmd
, sizeof(*cmd
));
147 iommufd_object_finalize(ucmd
->ictx
, &vdev
->obj
);
151 iommufd_object_abort_and_destroy(ucmd
->ictx
, &vdev
->obj
);
153 iommufd_put_object(ucmd
->ictx
, &idev
->obj
);
155 iommufd_put_object(ucmd
->ictx
, &viommu
->obj
);