drm/nouveau/fb: add gm20b device
[linux/fpc-iii.git] / drivers / gpu / drm / etnaviv / etnaviv_mmu.c
blob169ac96e8f0861f9648e0e3ca3292ca1da61556c
1 /*
2 * Copyright (C) 2015 Etnaviv Project
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include "common.xml.h"
18 #include "etnaviv_drv.h"
19 #include "etnaviv_gem.h"
20 #include "etnaviv_gpu.h"
21 #include "etnaviv_iommu.h"
22 #include "etnaviv_mmu.h"
24 static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
25 unsigned long iova, int flags, void *arg)
27 DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
28 return 0;
31 int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
32 struct sg_table *sgt, unsigned len, int prot)
34 struct iommu_domain *domain = iommu->domain;
35 struct scatterlist *sg;
36 unsigned int da = iova;
37 unsigned int i, j;
38 int ret;
40 if (!domain || !sgt)
41 return -EINVAL;
43 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
44 u32 pa = sg_dma_address(sg) - sg->offset;
45 size_t bytes = sg_dma_len(sg) + sg->offset;
47 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
49 ret = iommu_map(domain, da, pa, bytes, prot);
50 if (ret)
51 goto fail;
53 da += bytes;
56 return 0;
58 fail:
59 da = iova;
61 for_each_sg(sgt->sgl, sg, i, j) {
62 size_t bytes = sg_dma_len(sg) + sg->offset;
64 iommu_unmap(domain, da, bytes);
65 da += bytes;
67 return ret;
70 int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
71 struct sg_table *sgt, unsigned len)
73 struct iommu_domain *domain = iommu->domain;
74 struct scatterlist *sg;
75 unsigned int da = iova;
76 int i;
78 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
79 size_t bytes = sg_dma_len(sg) + sg->offset;
80 size_t unmapped;
82 unmapped = iommu_unmap(domain, da, bytes);
83 if (unmapped < bytes)
84 return unmapped;
86 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
88 BUG_ON(!PAGE_ALIGNED(bytes));
90 da += bytes;
93 return 0;
96 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
97 struct etnaviv_vram_mapping *mapping)
99 struct etnaviv_gem_object *etnaviv_obj = mapping->object;
101 etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
102 etnaviv_obj->sgt, etnaviv_obj->base.size);
103 drm_mm_remove_node(&mapping->vram_node);
106 static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
107 struct drm_mm_node *node, size_t size)
109 struct etnaviv_vram_mapping *free = NULL;
110 int ret;
112 lockdep_assert_held(&mmu->lock);
114 while (1) {
115 struct etnaviv_vram_mapping *m, *n;
116 struct list_head list;
117 bool found;
119 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
120 size, 0, mmu->last_iova, ~0UL,
121 DRM_MM_SEARCH_DEFAULT);
123 if (ret != -ENOSPC)
124 break;
127 * If we did not search from the start of the MMU region,
128 * try again in case there are free slots.
130 if (mmu->last_iova) {
131 mmu->last_iova = 0;
132 mmu->need_flush = true;
133 continue;
136 /* Try to retire some entries */
137 drm_mm_init_scan(&mmu->mm, size, 0, 0);
139 found = 0;
140 INIT_LIST_HEAD(&list);
141 list_for_each_entry(free, &mmu->mappings, mmu_node) {
142 /* If this vram node has not been used, skip this. */
143 if (!free->vram_node.mm)
144 continue;
147 * If the iova is pinned, then it's in-use,
148 * so we must keep its mapping.
150 if (free->use)
151 continue;
153 list_add(&free->scan_node, &list);
154 if (drm_mm_scan_add_block(&free->vram_node)) {
155 found = true;
156 break;
160 if (!found) {
161 /* Nothing found, clean up and fail */
162 list_for_each_entry_safe(m, n, &list, scan_node)
163 BUG_ON(drm_mm_scan_remove_block(&m->vram_node));
164 break;
168 * drm_mm does not allow any other operations while
169 * scanning, so we have to remove all blocks first.
170 * If drm_mm_scan_remove_block() returns false, we
171 * can leave the block pinned.
173 list_for_each_entry_safe(m, n, &list, scan_node)
174 if (!drm_mm_scan_remove_block(&m->vram_node))
175 list_del_init(&m->scan_node);
178 * Unmap the blocks which need to be reaped from the MMU.
179 * Clear the mmu pointer to prevent the mapping_get finding
180 * this mapping.
182 list_for_each_entry_safe(m, n, &list, scan_node) {
183 etnaviv_iommu_remove_mapping(mmu, m);
184 m->mmu = NULL;
185 list_del_init(&m->mmu_node);
186 list_del_init(&m->scan_node);
190 * We removed enough mappings so that the new allocation will
191 * succeed. Ensure that the MMU will be flushed before the
192 * associated commit requesting this mapping, and retry the
193 * allocation one more time.
195 mmu->need_flush = true;
198 return ret;
201 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
202 struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
203 struct etnaviv_vram_mapping *mapping)
205 struct sg_table *sgt = etnaviv_obj->sgt;
206 struct drm_mm_node *node;
207 int ret;
209 lockdep_assert_held(&etnaviv_obj->lock);
211 mutex_lock(&mmu->lock);
213 /* v1 MMU can optimize single entry (contiguous) scatterlists */
214 if (mmu->version == ETNAVIV_IOMMU_V1 &&
215 sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
216 u32 iova;
218 iova = sg_dma_address(sgt->sgl) - memory_base;
219 if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
220 mapping->iova = iova;
221 list_add_tail(&mapping->mmu_node, &mmu->mappings);
222 mutex_unlock(&mmu->lock);
223 return 0;
227 node = &mapping->vram_node;
229 ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
230 if (ret < 0) {
231 mutex_unlock(&mmu->lock);
232 return ret;
235 mmu->last_iova = node->start + etnaviv_obj->base.size;
236 mapping->iova = node->start;
237 ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
238 IOMMU_READ | IOMMU_WRITE);
240 if (ret < 0) {
241 drm_mm_remove_node(node);
242 mutex_unlock(&mmu->lock);
243 return ret;
246 list_add_tail(&mapping->mmu_node, &mmu->mappings);
247 mutex_unlock(&mmu->lock);
249 return ret;
252 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
253 struct etnaviv_vram_mapping *mapping)
255 WARN_ON(mapping->use);
257 mutex_lock(&mmu->lock);
259 /* If the vram node is on the mm, unmap and remove the node */
260 if (mapping->vram_node.mm == &mmu->mm)
261 etnaviv_iommu_remove_mapping(mmu, mapping);
263 list_del(&mapping->mmu_node);
264 mutex_unlock(&mmu->lock);
267 void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
269 drm_mm_takedown(&mmu->mm);
270 iommu_domain_free(mmu->domain);
271 kfree(mmu);
274 struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
276 enum etnaviv_iommu_version version;
277 struct etnaviv_iommu *mmu;
279 mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
280 if (!mmu)
281 return ERR_PTR(-ENOMEM);
283 if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
284 mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
285 version = ETNAVIV_IOMMU_V1;
286 } else {
287 mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
288 version = ETNAVIV_IOMMU_V2;
291 if (!mmu->domain) {
292 dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
293 kfree(mmu);
294 return ERR_PTR(-ENOMEM);
297 mmu->gpu = gpu;
298 mmu->version = version;
299 mutex_init(&mmu->lock);
300 INIT_LIST_HEAD(&mmu->mappings);
302 drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
303 mmu->domain->geometry.aperture_end -
304 mmu->domain->geometry.aperture_start + 1);
306 iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
308 return mmu;
311 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
313 if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
314 etnaviv_iommuv1_restore(gpu);
315 else
316 etnaviv_iommuv2_restore(gpu);
319 u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
320 struct etnaviv_cmdbuf *buf)
322 struct etnaviv_iommu *mmu = gpu->mmu;
324 if (mmu->version == ETNAVIV_IOMMU_V1) {
325 return buf->paddr - gpu->memory_base;
326 } else {
327 int ret;
329 if (buf->vram_node.allocated)
330 return (u32)buf->vram_node.start;
332 mutex_lock(&mmu->lock);
333 ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
334 buf->size + SZ_64K);
335 if (ret < 0) {
336 mutex_unlock(&mmu->lock);
337 return 0;
339 ret = iommu_map(mmu->domain, buf->vram_node.start, buf->paddr,
340 buf->size, IOMMU_READ);
341 if (ret < 0) {
342 drm_mm_remove_node(&buf->vram_node);
343 mutex_unlock(&mmu->lock);
344 return 0;
347 * At least on GC3000 the FE MMU doesn't properly flush old TLB
348 * entries. Make sure to space the command buffers out in a way
349 * that the FE MMU prefetch won't load invalid entries.
351 mmu->last_iova = buf->vram_node.start + buf->size + SZ_64K;
352 gpu->mmu->need_flush = true;
353 mutex_unlock(&mmu->lock);
355 return (u32)buf->vram_node.start;
359 void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
360 struct etnaviv_cmdbuf *buf)
362 struct etnaviv_iommu *mmu = gpu->mmu;
364 if (mmu->version == ETNAVIV_IOMMU_V2 && buf->vram_node.allocated) {
365 mutex_lock(&mmu->lock);
366 iommu_unmap(mmu->domain, buf->vram_node.start, buf->size);
367 drm_mm_remove_node(&buf->vram_node);
368 mutex_unlock(&mmu->lock);
371 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
373 struct etnaviv_iommu_ops *ops;
375 ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
377 return ops->dump_size(iommu->domain);
380 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
382 struct etnaviv_iommu_ops *ops;
384 ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
386 ops->dump(iommu->domain, buf);