Linux 4.19.133
[linux/fpc-iii.git] / drivers / gpu / drm / etnaviv / etnaviv_mmu.c
blobe132dccedf88f2076240b7de746e2a7bc620dfb5
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2015-2018 Etnaviv Project
4 */
6 #include "common.xml.h"
7 #include "etnaviv_cmdbuf.h"
8 #include "etnaviv_drv.h"
9 #include "etnaviv_gem.h"
10 #include "etnaviv_gpu.h"
11 #include "etnaviv_iommu.h"
12 #include "etnaviv_mmu.h"
14 static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
15 unsigned long iova, size_t size)
17 size_t unmapped_page, unmapped = 0;
18 size_t pgsize = SZ_4K;
20 if (!IS_ALIGNED(iova | size, pgsize)) {
21 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
22 iova, size, pgsize);
23 return;
26 while (unmapped < size) {
27 unmapped_page = domain->ops->unmap(domain, iova, pgsize);
28 if (!unmapped_page)
29 break;
31 iova += unmapped_page;
32 unmapped += unmapped_page;
36 static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
37 unsigned long iova, phys_addr_t paddr,
38 size_t size, int prot)
40 unsigned long orig_iova = iova;
41 size_t pgsize = SZ_4K;
42 size_t orig_size = size;
43 int ret = 0;
45 if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
46 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
47 iova, &paddr, size, pgsize);
48 return -EINVAL;
51 while (size) {
52 ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
53 if (ret)
54 break;
56 iova += pgsize;
57 paddr += pgsize;
58 size -= pgsize;
61 /* unroll mapping in case something went wrong */
62 if (ret)
63 etnaviv_domain_unmap(domain, orig_iova, orig_size - size);
65 return ret;
68 static int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
69 struct sg_table *sgt, unsigned len, int prot)
71 struct etnaviv_iommu_domain *domain = iommu->domain;
72 struct scatterlist *sg;
73 unsigned int da = iova;
74 unsigned int i, j;
75 int ret;
77 if (!domain || !sgt)
78 return -EINVAL;
80 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
81 u32 pa = sg_dma_address(sg) - sg->offset;
82 size_t bytes = sg_dma_len(sg) + sg->offset;
84 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
86 ret = etnaviv_domain_map(domain, da, pa, bytes, prot);
87 if (ret)
88 goto fail;
90 da += bytes;
93 return 0;
95 fail:
96 da = iova;
98 for_each_sg(sgt->sgl, sg, i, j) {
99 size_t bytes = sg_dma_len(sg) + sg->offset;
101 etnaviv_domain_unmap(domain, da, bytes);
102 da += bytes;
104 return ret;
107 static void etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
108 struct sg_table *sgt, unsigned len)
110 struct etnaviv_iommu_domain *domain = iommu->domain;
111 struct scatterlist *sg;
112 unsigned int da = iova;
113 int i;
115 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
116 size_t bytes = sg_dma_len(sg) + sg->offset;
118 etnaviv_domain_unmap(domain, da, bytes);
120 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
122 BUG_ON(!PAGE_ALIGNED(bytes));
124 da += bytes;
128 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
129 struct etnaviv_vram_mapping *mapping)
131 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
133 etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
134 etnaviv_obj->sgt, etnaviv_obj->base.size);
135 drm_mm_remove_node(&mapping->vram_node);
138 static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
139 struct drm_mm_node *node, size_t size)
141 struct etnaviv_vram_mapping *free = NULL;
142 enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
143 int ret;
145 lockdep_assert_held(&mmu->lock);
147 while (1) {
148 struct etnaviv_vram_mapping *m, *n;
149 struct drm_mm_scan scan;
150 struct list_head list;
151 bool found;
153 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
154 size, 0, 0, 0, U64_MAX, mode);
155 if (ret != -ENOSPC)
156 break;
158 /* Try to retire some entries */
159 drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode);
161 found = 0;
162 INIT_LIST_HEAD(&list);
163 list_for_each_entry(free, &mmu->mappings, mmu_node) {
164 /* If this vram node has not been used, skip this. */
165 if (!free->vram_node.mm)
166 continue;
169 * If the iova is pinned, then it's in-use,
170 * so we must keep its mapping.
172 if (free->use)
173 continue;
175 list_add(&free->scan_node, &list);
176 if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
177 found = true;
178 break;
182 if (!found) {
183 /* Nothing found, clean up and fail */
184 list_for_each_entry_safe(m, n, &list, scan_node)
185 BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
186 break;
190 * drm_mm does not allow any other operations while
191 * scanning, so we have to remove all blocks first.
192 * If drm_mm_scan_remove_block() returns false, we
193 * can leave the block pinned.
195 list_for_each_entry_safe(m, n, &list, scan_node)
196 if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
197 list_del_init(&m->scan_node);
200 * Unmap the blocks which need to be reaped from the MMU.
201 * Clear the mmu pointer to prevent the mapping_get finding
202 * this mapping.
204 list_for_each_entry_safe(m, n, &list, scan_node) {
205 etnaviv_iommu_remove_mapping(mmu, m);
206 m->mmu = NULL;
207 list_del_init(&m->mmu_node);
208 list_del_init(&m->scan_node);
211 mode = DRM_MM_INSERT_EVICT;
214 * We removed enough mappings so that the new allocation will
215 * succeed, retry the allocation one more time.
219 return ret;
222 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
223 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
224 struct etnaviv_vram_mapping *mapping)
226 struct sg_table *sgt = etnaviv_obj->sgt;
227 struct drm_mm_node *node;
228 int ret;
230 lockdep_assert_held(&etnaviv_obj->lock);
232 mutex_lock(&mmu->lock);
234 /* v1 MMU can optimize single entry (contiguous) scatterlists */
235 if (mmu->version == ETNAVIV_IOMMU_V1 &&
236 sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
237 u32 iova;
239 iova = sg_dma_address(sgt->sgl) - memory_base;
240 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
241 mapping->iova = iova;
242 list_add_tail(&mapping->mmu_node, &mmu->mappings);
243 ret = 0;
244 goto unlock;
248 node = &mapping->vram_node;
250 ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
251 if (ret < 0)
252 goto unlock;
254 mapping->iova = node->start;
255 ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
256 ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
258 if (ret < 0) {
259 drm_mm_remove_node(node);
260 goto unlock;
263 list_add_tail(&mapping->mmu_node, &mmu->mappings);
264 mmu->flush_seq++;
265 unlock:
266 mutex_unlock(&mmu->lock);
268 return ret;
271 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
272 struct etnaviv_vram_mapping *mapping)
274 WARN_ON(mapping->use);
276 mutex_lock(&mmu->lock);
278 /* If the vram node is on the mm, unmap and remove the node */
279 if (mapping->vram_node.mm == &mmu->mm)
280 etnaviv_iommu_remove_mapping(mmu, mapping);
282 list_del(&mapping->mmu_node);
283 mmu->flush_seq++;
284 mutex_unlock(&mmu->lock);
287 void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
289 drm_mm_takedown(&mmu->mm);
290 mmu->domain->ops->free(mmu->domain);
291 kfree(mmu);
294 struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
296 enum etnaviv_iommu_version version;
297 struct etnaviv_iommu *mmu;
299 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
300 if (!mmu)
301 return ERR_PTR(-ENOMEM);
303 if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
304 mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
305 version = ETNAVIV_IOMMU_V1;
306 } else {
307 mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
308 version = ETNAVIV_IOMMU_V2;
311 if (!mmu->domain) {
312 dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
313 kfree(mmu);
314 return ERR_PTR(-ENOMEM);
317 mmu->gpu = gpu;
318 mmu->version = version;
319 mutex_init(&mmu->lock);
320 INIT_LIST_HEAD(&mmu->mappings);
322 drm_mm_init(&mmu->mm, mmu->domain->base, mmu->domain->size);
324 return mmu;
327 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
329 if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
330 etnaviv_iommuv1_restore(gpu);
331 else
332 etnaviv_iommuv2_restore(gpu);
335 int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
336 struct drm_mm_node *vram_node, size_t size,
337 u32 *iova)
339 struct etnaviv_iommu *mmu = gpu->mmu;
341 if (mmu->version == ETNAVIV_IOMMU_V1) {
342 *iova = paddr - gpu->memory_base;
343 return 0;
344 } else {
345 int ret;
347 mutex_lock(&mmu->lock);
348 ret = etnaviv_iommu_find_iova(mmu, vram_node, size);
349 if (ret < 0) {
350 mutex_unlock(&mmu->lock);
351 return ret;
353 ret = etnaviv_domain_map(mmu->domain, vram_node->start, paddr,
354 size, ETNAVIV_PROT_READ);
355 if (ret < 0) {
356 drm_mm_remove_node(vram_node);
357 mutex_unlock(&mmu->lock);
358 return ret;
360 mmu->flush_seq++;
361 mutex_unlock(&mmu->lock);
363 *iova = (u32)vram_node->start;
364 return 0;
368 void etnaviv_iommu_put_suballoc_va(struct etnaviv_gpu *gpu,
369 struct drm_mm_node *vram_node, size_t size,
370 u32 iova)
372 struct etnaviv_iommu *mmu = gpu->mmu;
374 if (mmu->version == ETNAVIV_IOMMU_V2) {
375 mutex_lock(&mmu->lock);
376 etnaviv_domain_unmap(mmu->domain, iova, size);
377 drm_mm_remove_node(vram_node);
378 mutex_unlock(&mmu->lock);
381 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
383 return iommu->domain->ops->dump_size(iommu->domain);
386 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
388 iommu->domain->ops->dump(iommu->domain, buf);