Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / firmware / qcom / qcom_tzmem.c
blob92b3651782355f72ad17750b0d453b07c2b7b0a8
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Memory allocator for buffers shared with the TrustZone.
5 * Copyright (C) 2023-2024 Linaro Ltd.
6 */
8 #include <linux/bug.h>
9 #include <linux/cleanup.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/firmware/qcom/qcom_tzmem.h>
13 #include <linux/genalloc.h>
14 #include <linux/gfp.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/mm.h>
18 #include <linux/radix-tree.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
23 #include "qcom_tzmem.h"
25 struct qcom_tzmem_area {
26 struct list_head list;
27 void *vaddr;
28 dma_addr_t paddr;
29 size_t size;
30 void *priv;
33 struct qcom_tzmem_pool {
34 struct gen_pool *genpool;
35 struct list_head areas;
36 enum qcom_tzmem_policy policy;
37 size_t increment;
38 size_t max_size;
39 spinlock_t lock;
42 struct qcom_tzmem_chunk {
43 size_t size;
44 struct qcom_tzmem_pool *owner;
47 static struct device *qcom_tzmem_dev;
48 static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC);
49 static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock);
51 #if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC)
53 static int qcom_tzmem_init(void)
55 return 0;
58 static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
60 return 0;
63 static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
68 #elif IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE)
70 #include <linux/firmware/qcom/qcom_scm.h>
71 #include <linux/of.h>
73 #define QCOM_SHM_BRIDGE_NUM_VM_SHIFT 9
75 static bool qcom_tzmem_using_shm_bridge;
77 /* List of machines that are known to not support SHM bridge correctly. */
78 static const char *const qcom_tzmem_blacklist[] = {
79 "qcom,sc8180x",
80 "qcom,sdm670", /* failure in GPU firmware loading */
81 "qcom,sdm845", /* reset in rmtfs memory assignment */
82 "qcom,sm8150", /* reset in rmtfs memory assignment */
83 NULL
86 static int qcom_tzmem_init(void)
88 const char *const *platform;
89 int ret;
91 for (platform = qcom_tzmem_blacklist; *platform; platform++) {
92 if (of_machine_is_compatible(*platform))
93 goto notsupp;
96 ret = qcom_scm_shm_bridge_enable();
97 if (ret == -EOPNOTSUPP)
98 goto notsupp;
100 if (!ret)
101 qcom_tzmem_using_shm_bridge = true;
103 return ret;
105 notsupp:
106 dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n");
107 return 0;
110 static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
112 u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
113 int ret;
115 if (!qcom_tzmem_using_shm_bridge)
116 return 0;
118 pfn_and_ns_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
119 ipfn_and_s_perm = (u64)area->paddr | QCOM_SCM_PERM_RW;
120 size_and_flags = area->size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
122 u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
123 if (!handle)
124 return -ENOMEM;
126 ret = qcom_scm_shm_bridge_create(qcom_tzmem_dev, pfn_and_ns_perm,
127 ipfn_and_s_perm, size_and_flags,
128 QCOM_SCM_VMID_HLOS, handle);
129 if (ret)
130 return ret;
132 area->priv = no_free_ptr(handle);
134 return 0;
137 static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
139 u64 *handle = area->priv;
141 if (!qcom_tzmem_using_shm_bridge)
142 return;
144 qcom_scm_shm_bridge_delete(qcom_tzmem_dev, *handle);
145 kfree(handle);
148 #endif /* CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE */
150 static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool,
151 size_t size, gfp_t gfp)
153 int ret;
155 struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area),
156 gfp);
157 if (!area)
158 return -ENOMEM;
160 area->size = PAGE_ALIGN(size);
162 area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size,
163 &area->paddr, gfp);
164 if (!area->vaddr)
165 return -ENOMEM;
167 ret = qcom_tzmem_init_area(area);
168 if (ret) {
169 dma_free_coherent(qcom_tzmem_dev, area->size,
170 area->vaddr, area->paddr);
171 return ret;
174 ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr,
175 (phys_addr_t)area->paddr, size, -1);
176 if (ret) {
177 dma_free_coherent(qcom_tzmem_dev, area->size,
178 area->vaddr, area->paddr);
179 return ret;
182 scoped_guard(spinlock_irqsave, &pool->lock)
183 list_add_tail(&area->list, &pool->areas);
185 area = NULL;
186 return 0;
190 * qcom_tzmem_pool_new() - Create a new TZ memory pool.
191 * @config: Pool configuration.
193 * Create a new pool of memory suitable for sharing with the TrustZone.
195 * Must not be used in atomic context.
197 * Return: New memory pool address or ERR_PTR() on error.
199 struct qcom_tzmem_pool *
200 qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config)
202 int ret = -ENOMEM;
204 might_sleep();
206 switch (config->policy) {
207 case QCOM_TZMEM_POLICY_STATIC:
208 if (!config->initial_size)
209 return ERR_PTR(-EINVAL);
210 break;
211 case QCOM_TZMEM_POLICY_MULTIPLIER:
212 if (!config->increment)
213 return ERR_PTR(-EINVAL);
214 break;
215 case QCOM_TZMEM_POLICY_ON_DEMAND:
216 break;
217 default:
218 return ERR_PTR(-EINVAL);
221 struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool),
222 GFP_KERNEL);
223 if (!pool)
224 return ERR_PTR(-ENOMEM);
226 pool->genpool = gen_pool_create(PAGE_SHIFT, -1);
227 if (!pool->genpool)
228 return ERR_PTR(-ENOMEM);
230 gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL);
232 pool->policy = config->policy;
233 pool->increment = config->increment;
234 pool->max_size = config->max_size;
235 INIT_LIST_HEAD(&pool->areas);
236 spin_lock_init(&pool->lock);
238 if (config->initial_size) {
239 ret = qcom_tzmem_pool_add_memory(pool, config->initial_size,
240 GFP_KERNEL);
241 if (ret) {
242 gen_pool_destroy(pool->genpool);
243 return ERR_PTR(ret);
247 return_ptr(pool);
249 EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new);
252 * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources.
253 * @pool: Memory pool to free.
255 * Must not be called if any of the allocated chunks has not been freed.
256 * Must not be used in atomic context.
258 void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool)
260 struct qcom_tzmem_area *area, *next;
261 struct qcom_tzmem_chunk *chunk;
262 struct radix_tree_iter iter;
263 bool non_empty = false;
264 void __rcu **slot;
266 might_sleep();
268 if (!pool)
269 return;
271 scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
272 radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
273 chunk = radix_tree_deref_slot_protected(slot,
274 &qcom_tzmem_chunks_lock);
276 if (chunk->owner == pool)
277 non_empty = true;
281 WARN(non_empty, "Freeing TZ memory pool with memory still allocated");
283 list_for_each_entry_safe(area, next, &pool->areas, list) {
284 list_del(&area->list);
285 qcom_tzmem_cleanup_area(area);
286 dma_free_coherent(qcom_tzmem_dev, area->size,
287 area->vaddr, area->paddr);
288 kfree(area);
291 gen_pool_destroy(pool->genpool);
292 kfree(pool);
294 EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free);
296 static void devm_qcom_tzmem_pool_free(void *data)
298 struct qcom_tzmem_pool *pool = data;
300 qcom_tzmem_pool_free(pool);
304 * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new().
305 * @dev: Device managing this resource.
306 * @config: Pool configuration.
308 * Must not be used in atomic context.
310 * Return: Address of the managed pool or ERR_PTR() on failure.
312 struct qcom_tzmem_pool *
313 devm_qcom_tzmem_pool_new(struct device *dev,
314 const struct qcom_tzmem_pool_config *config)
316 struct qcom_tzmem_pool *pool;
317 int ret;
319 pool = qcom_tzmem_pool_new(config);
320 if (IS_ERR(pool))
321 return pool;
323 ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool);
324 if (ret)
325 return ERR_PTR(ret);
327 return pool;
329 EXPORT_SYMBOL_GPL(devm_qcom_tzmem_pool_new);
331 static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool,
332 size_t requested, gfp_t gfp)
334 size_t current_size = gen_pool_size(pool->genpool);
336 if (pool->max_size && (current_size + requested) > pool->max_size)
337 return false;
339 switch (pool->policy) {
340 case QCOM_TZMEM_POLICY_STATIC:
341 return false;
342 case QCOM_TZMEM_POLICY_MULTIPLIER:
343 requested = current_size * pool->increment;
344 break;
345 case QCOM_TZMEM_POLICY_ON_DEMAND:
346 break;
349 return !qcom_tzmem_pool_add_memory(pool, requested, gfp);
353 * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ.
354 * @pool: TZ memory pool from which to allocate memory.
355 * @size: Number of bytes to allocate.
356 * @gfp: GFP flags.
358 * Can be used in any context.
360 * Return:
361 * Address of the allocated buffer or NULL if no more memory can be allocated.
362 * The buffer must be released using qcom_tzmem_free().
364 void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp)
366 unsigned long vaddr;
367 int ret;
369 if (!size)
370 return NULL;
372 size = PAGE_ALIGN(size);
374 struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk),
375 gfp);
376 if (!chunk)
377 return NULL;
379 again:
380 vaddr = gen_pool_alloc(pool->genpool, size);
381 if (!vaddr) {
382 if (qcom_tzmem_try_grow_pool(pool, size, gfp))
383 goto again;
385 return NULL;
388 chunk->size = size;
389 chunk->owner = pool;
391 scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
392 ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk);
393 if (ret) {
394 gen_pool_free(pool->genpool, vaddr, size);
395 return NULL;
398 chunk = NULL;
401 return (void *)vaddr;
403 EXPORT_SYMBOL_GPL(qcom_tzmem_alloc);
406 * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool.
407 * @vaddr: Virtual address of the buffer.
409 * Can be used in any context.
411 void qcom_tzmem_free(void *vaddr)
413 struct qcom_tzmem_chunk *chunk;
415 scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock)
416 chunk = radix_tree_delete_item(&qcom_tzmem_chunks,
417 (unsigned long)vaddr, NULL);
419 if (!chunk) {
420 WARN(1, "Virtual address %p not owned by TZ memory allocator",
421 vaddr);
422 return;
425 scoped_guard(spinlock_irqsave, &chunk->owner->lock)
426 gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr,
427 chunk->size);
428 kfree(chunk);
430 EXPORT_SYMBOL_GPL(qcom_tzmem_free);
433 * qcom_tzmem_to_phys() - Map the virtual address of TZ memory to physical.
434 * @vaddr: Virtual address of memory allocated from a TZ memory pool.
436 * Can be used in any context. The address must point to memory allocated
437 * using qcom_tzmem_alloc().
439 * Returns:
440 * Physical address mapped from the virtual or 0 if the mapping failed.
442 phys_addr_t qcom_tzmem_to_phys(void *vaddr)
444 struct qcom_tzmem_chunk *chunk;
445 struct radix_tree_iter iter;
446 void __rcu **slot;
447 phys_addr_t ret;
449 guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock);
451 radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
452 chunk = radix_tree_deref_slot_protected(slot,
453 &qcom_tzmem_chunks_lock);
455 ret = gen_pool_virt_to_phys(chunk->owner->genpool,
456 (unsigned long)vaddr);
457 if (ret == -1)
458 continue;
460 return ret;
463 return 0;
465 EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys);
467 int qcom_tzmem_enable(struct device *dev)
469 if (qcom_tzmem_dev)
470 return -EBUSY;
472 qcom_tzmem_dev = dev;
474 return qcom_tzmem_init();
476 EXPORT_SYMBOL_GPL(qcom_tzmem_enable);
478 MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers");
479 MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
480 MODULE_LICENSE("GPL");