1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, NVIDIA Corporation.
6 #include <linux/device.h>
7 #include <linux/kref.h>
9 #include <linux/of_device.h>
10 #include <linux/pid.h>
11 #include <linux/slab.h>
16 static void host1x_memory_context_release(struct device
*dev
)
18 /* context device is freed in host1x_memory_context_list_free() */
21 int host1x_memory_context_list_init(struct host1x
*host1x
)
23 struct host1x_memory_context_list
*cdl
= &host1x
->context_list
;
24 struct device_node
*node
= host1x
->dev
->of_node
;
25 struct host1x_memory_context
*ctx
;
31 mutex_init(&cdl
->lock
);
33 err
= of_property_count_u32_elems(node
, "iommu-map");
38 cdl
->devs
= kcalloc(cdl
->len
, sizeof(*cdl
->devs
), GFP_KERNEL
);
42 for (i
= 0; i
< cdl
->len
; i
++) {
47 device_initialize(&ctx
->dev
);
50 * Due to an issue with T194 NVENC, only 38 bits can be used.
51 * Anyway, 256GiB of IOVA ought to be enough for anyone.
53 ctx
->dma_mask
= DMA_BIT_MASK(38);
54 ctx
->dev
.dma_mask
= &ctx
->dma_mask
;
55 ctx
->dev
.coherent_dma_mask
= ctx
->dma_mask
;
56 dev_set_name(&ctx
->dev
, "host1x-ctx.%d", i
);
57 ctx
->dev
.bus
= &host1x_context_device_bus_type
;
58 ctx
->dev
.parent
= host1x
->dev
;
59 ctx
->dev
.release
= host1x_memory_context_release
;
61 ctx
->dev
.dma_parms
= &ctx
->dma_parms
;
62 dma_set_max_seg_size(&ctx
->dev
, UINT_MAX
);
64 err
= device_add(&ctx
->dev
);
66 dev_err(host1x
->dev
, "could not add context device %d: %d\n", i
, err
);
67 put_device(&ctx
->dev
);
71 err
= of_dma_configure_id(&ctx
->dev
, node
, true, &i
);
73 dev_err(host1x
->dev
, "IOMMU configuration failed for context device %d: %d\n",
75 device_unregister(&ctx
->dev
);
79 if (!tegra_dev_iommu_get_stream_id(&ctx
->dev
, &ctx
->stream_id
) ||
80 !device_iommu_mapped(&ctx
->dev
)) {
81 dev_err(host1x
->dev
, "Context device %d has no IOMMU!\n", i
);
82 device_unregister(&ctx
->dev
);
85 * This means that if IOMMU is disabled but context devices
86 * are defined in the device tree, Host1x will fail to probe.
87 * That's probably OK in this time and age.
99 device_unregister(&cdl
->devs
[i
].dev
);
108 void host1x_memory_context_list_free(struct host1x_memory_context_list
*cdl
)
112 for (i
= 0; i
< cdl
->len
; i
++)
113 device_unregister(&cdl
->devs
[i
].dev
);
119 struct host1x_memory_context
*host1x_memory_context_alloc(struct host1x
*host1x
,
123 struct host1x_memory_context_list
*cdl
= &host1x
->context_list
;
124 struct host1x_memory_context
*free
= NULL
;
128 return ERR_PTR(-EOPNOTSUPP
);
130 mutex_lock(&cdl
->lock
);
132 for (i
= 0; i
< cdl
->len
; i
++) {
133 struct host1x_memory_context
*cd
= &cdl
->devs
[i
];
135 if (cd
->dev
.iommu
->iommu_dev
!= dev
->iommu
->iommu_dev
)
138 if (cd
->owner
== pid
) {
139 refcount_inc(&cd
->ref
);
140 mutex_unlock(&cdl
->lock
);
142 } else if (!cd
->owner
&& !free
) {
148 mutex_unlock(&cdl
->lock
);
149 return ERR_PTR(-EBUSY
);
152 refcount_set(&free
->ref
, 1);
153 free
->owner
= get_pid(pid
);
155 mutex_unlock(&cdl
->lock
);
159 EXPORT_SYMBOL_GPL(host1x_memory_context_alloc
);
161 void host1x_memory_context_get(struct host1x_memory_context
*cd
)
163 refcount_inc(&cd
->ref
);
165 EXPORT_SYMBOL_GPL(host1x_memory_context_get
);
167 void host1x_memory_context_put(struct host1x_memory_context
*cd
)
169 struct host1x_memory_context_list
*cdl
= &cd
->host
->context_list
;
171 if (refcount_dec_and_mutex_lock(&cd
->ref
, &cdl
->lock
)) {
174 mutex_unlock(&cdl
->lock
);
177 EXPORT_SYMBOL_GPL(host1x_memory_context_put
);