Merge tag 'block-5.11-2021-01-10' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / host1x / job.c
blob82d0a60ba3f770be7d81025fa612e57c79b98a76
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Tegra host1x Job
5 * Copyright (c) 2010-2015, NVIDIA Corporation.
6 */
8 #include <linux/dma-mapping.h>
9 #include <linux/err.h>
10 #include <linux/host1x.h>
11 #include <linux/iommu.h>
12 #include <linux/kref.h>
13 #include <linux/module.h>
14 #include <linux/scatterlist.h>
15 #include <linux/slab.h>
16 #include <linux/vmalloc.h>
17 #include <trace/events/host1x.h>
19 #include "channel.h"
20 #include "dev.h"
21 #include "job.h"
22 #include "syncpt.h"
24 #define HOST1X_WAIT_SYNCPT_OFFSET 0x8
26 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
27 u32 num_cmdbufs, u32 num_relocs)
29 struct host1x_job *job = NULL;
30 unsigned int num_unpins = num_relocs;
31 u64 total;
32 void *mem;
34 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
35 num_unpins += num_cmdbufs;
37 /* Check that we're not going to overflow */
38 total = sizeof(struct host1x_job) +
39 (u64)num_relocs * sizeof(struct host1x_reloc) +
40 (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
41 (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
42 (u64)num_unpins * sizeof(dma_addr_t) +
43 (u64)num_unpins * sizeof(u32 *);
44 if (total > ULONG_MAX)
45 return NULL;
47 mem = job = kzalloc(total, GFP_KERNEL);
48 if (!job)
49 return NULL;
51 kref_init(&job->ref);
52 job->channel = ch;
54 /* Redistribute memory to the structs */
55 mem += sizeof(struct host1x_job);
56 job->relocs = num_relocs ? mem : NULL;
57 mem += num_relocs * sizeof(struct host1x_reloc);
58 job->unpins = num_unpins ? mem : NULL;
59 mem += num_unpins * sizeof(struct host1x_job_unpin_data);
60 job->gathers = num_cmdbufs ? mem : NULL;
61 mem += num_cmdbufs * sizeof(struct host1x_job_gather);
62 job->addr_phys = num_unpins ? mem : NULL;
64 job->reloc_addr_phys = job->addr_phys;
65 job->gather_addr_phys = &job->addr_phys[num_relocs];
67 return job;
69 EXPORT_SYMBOL(host1x_job_alloc);
71 struct host1x_job *host1x_job_get(struct host1x_job *job)
73 kref_get(&job->ref);
74 return job;
76 EXPORT_SYMBOL(host1x_job_get);
78 static void job_free(struct kref *ref)
80 struct host1x_job *job = container_of(ref, struct host1x_job, ref);
82 kfree(job);
85 void host1x_job_put(struct host1x_job *job)
87 kref_put(&job->ref, job_free);
89 EXPORT_SYMBOL(host1x_job_put);
91 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
92 unsigned int words, unsigned int offset)
94 struct host1x_job_gather *gather = &job->gathers[job->num_gathers];
96 gather->words = words;
97 gather->bo = bo;
98 gather->offset = offset;
100 job->num_gathers++;
102 EXPORT_SYMBOL(host1x_job_add_gather);
104 static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
106 struct host1x_client *client = job->client;
107 struct device *dev = client->dev;
108 struct host1x_job_gather *g;
109 struct iommu_domain *domain;
110 unsigned int i;
111 int err;
113 domain = iommu_get_domain_for_dev(dev);
114 job->num_unpins = 0;
116 for (i = 0; i < job->num_relocs; i++) {
117 struct host1x_reloc *reloc = &job->relocs[i];
118 dma_addr_t phys_addr, *phys;
119 struct sg_table *sgt;
121 reloc->target.bo = host1x_bo_get(reloc->target.bo);
122 if (!reloc->target.bo) {
123 err = -EINVAL;
124 goto unpin;
128 * If the client device is not attached to an IOMMU, the
129 * physical address of the buffer object can be used.
131 * Similarly, when an IOMMU domain is shared between all
132 * host1x clients, the IOVA is already available, so no
133 * need to map the buffer object again.
135 * XXX Note that this isn't always safe to do because it
136 * relies on an assumption that no cache maintenance is
137 * needed on the buffer objects.
139 if (!domain || client->group)
140 phys = &phys_addr;
141 else
142 phys = NULL;
144 sgt = host1x_bo_pin(dev, reloc->target.bo, phys);
145 if (IS_ERR(sgt)) {
146 err = PTR_ERR(sgt);
147 goto unpin;
150 if (sgt) {
151 unsigned long mask = HOST1X_RELOC_READ |
152 HOST1X_RELOC_WRITE;
153 enum dma_data_direction dir;
155 switch (reloc->flags & mask) {
156 case HOST1X_RELOC_READ:
157 dir = DMA_TO_DEVICE;
158 break;
160 case HOST1X_RELOC_WRITE:
161 dir = DMA_FROM_DEVICE;
162 break;
164 case HOST1X_RELOC_READ | HOST1X_RELOC_WRITE:
165 dir = DMA_BIDIRECTIONAL;
166 break;
168 default:
169 err = -EINVAL;
170 goto unpin;
173 err = dma_map_sgtable(dev, sgt, dir, 0);
174 if (err)
175 goto unpin;
177 job->unpins[job->num_unpins].dev = dev;
178 job->unpins[job->num_unpins].dir = dir;
179 phys_addr = sg_dma_address(sgt->sgl);
182 job->addr_phys[job->num_unpins] = phys_addr;
183 job->unpins[job->num_unpins].bo = reloc->target.bo;
184 job->unpins[job->num_unpins].sgt = sgt;
185 job->num_unpins++;
189 * We will copy gathers BO content later, so there is no need to
190 * hold and pin them.
192 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
193 return 0;
195 for (i = 0; i < job->num_gathers; i++) {
196 size_t gather_size = 0;
197 struct scatterlist *sg;
198 struct sg_table *sgt;
199 dma_addr_t phys_addr;
200 unsigned long shift;
201 struct iova *alloc;
202 dma_addr_t *phys;
203 unsigned int j;
205 g = &job->gathers[i];
206 g->bo = host1x_bo_get(g->bo);
207 if (!g->bo) {
208 err = -EINVAL;
209 goto unpin;
213 * If the host1x is not attached to an IOMMU, there is no need
214 * to map the buffer object for the host1x, since the physical
215 * address can simply be used.
217 if (!iommu_get_domain_for_dev(host->dev))
218 phys = &phys_addr;
219 else
220 phys = NULL;
222 sgt = host1x_bo_pin(host->dev, g->bo, phys);
223 if (IS_ERR(sgt)) {
224 err = PTR_ERR(sgt);
225 goto put;
228 if (host->domain) {
229 for_each_sgtable_sg(sgt, sg, j)
230 gather_size += sg->length;
231 gather_size = iova_align(&host->iova, gather_size);
233 shift = iova_shift(&host->iova);
234 alloc = alloc_iova(&host->iova, gather_size >> shift,
235 host->iova_end >> shift, true);
236 if (!alloc) {
237 err = -ENOMEM;
238 goto put;
241 err = iommu_map_sgtable(host->domain,
242 iova_dma_addr(&host->iova, alloc),
243 sgt, IOMMU_READ);
244 if (err == 0) {
245 __free_iova(&host->iova, alloc);
246 err = -EINVAL;
247 goto put;
250 job->unpins[job->num_unpins].size = gather_size;
251 phys_addr = iova_dma_addr(&host->iova, alloc);
252 } else if (sgt) {
253 err = dma_map_sgtable(host->dev, sgt, DMA_TO_DEVICE, 0);
254 if (err)
255 goto put;
257 job->unpins[job->num_unpins].dir = DMA_TO_DEVICE;
258 job->unpins[job->num_unpins].dev = host->dev;
259 phys_addr = sg_dma_address(sgt->sgl);
262 job->addr_phys[job->num_unpins] = phys_addr;
263 job->gather_addr_phys[i] = phys_addr;
265 job->unpins[job->num_unpins].bo = g->bo;
266 job->unpins[job->num_unpins].sgt = sgt;
267 job->num_unpins++;
270 return 0;
272 put:
273 host1x_bo_put(g->bo);
274 unpin:
275 host1x_job_unpin(job);
276 return err;
279 static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
281 void *cmdbuf_addr = NULL;
282 struct host1x_bo *cmdbuf = g->bo;
283 unsigned int i;
285 /* pin & patch the relocs for one gather */
286 for (i = 0; i < job->num_relocs; i++) {
287 struct host1x_reloc *reloc = &job->relocs[i];
288 u32 reloc_addr = (job->reloc_addr_phys[i] +
289 reloc->target.offset) >> reloc->shift;
290 u32 *target;
292 /* skip all other gathers */
293 if (cmdbuf != reloc->cmdbuf.bo)
294 continue;
296 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
297 target = (u32 *)job->gather_copy_mapped +
298 reloc->cmdbuf.offset / sizeof(u32) +
299 g->offset / sizeof(u32);
300 goto patch_reloc;
303 if (!cmdbuf_addr) {
304 cmdbuf_addr = host1x_bo_mmap(cmdbuf);
306 if (unlikely(!cmdbuf_addr)) {
307 pr_err("Could not map cmdbuf for relocation\n");
308 return -ENOMEM;
312 target = cmdbuf_addr + reloc->cmdbuf.offset;
313 patch_reloc:
314 *target = reloc_addr;
317 if (cmdbuf_addr)
318 host1x_bo_munmap(cmdbuf, cmdbuf_addr);
320 return 0;
323 static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
324 unsigned int offset)
326 offset *= sizeof(u32);
328 if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
329 return false;
331 /* relocation shift value validation isn't implemented yet */
332 if (reloc->shift)
333 return false;
335 return true;
338 struct host1x_firewall {
339 struct host1x_job *job;
340 struct device *dev;
342 unsigned int num_relocs;
343 struct host1x_reloc *reloc;
345 struct host1x_bo *cmdbuf;
346 unsigned int offset;
348 u32 words;
349 u32 class;
350 u32 reg;
351 u32 mask;
352 u32 count;
355 static int check_register(struct host1x_firewall *fw, unsigned long offset)
357 if (!fw->job->is_addr_reg)
358 return 0;
360 if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
361 if (!fw->num_relocs)
362 return -EINVAL;
364 if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
365 return -EINVAL;
367 fw->num_relocs--;
368 fw->reloc++;
371 return 0;
374 static int check_class(struct host1x_firewall *fw, u32 class)
376 if (!fw->job->is_valid_class) {
377 if (fw->class != class)
378 return -EINVAL;
379 } else {
380 if (!fw->job->is_valid_class(fw->class))
381 return -EINVAL;
384 return 0;
387 static int check_mask(struct host1x_firewall *fw)
389 u32 mask = fw->mask;
390 u32 reg = fw->reg;
391 int ret;
393 while (mask) {
394 if (fw->words == 0)
395 return -EINVAL;
397 if (mask & 1) {
398 ret = check_register(fw, reg);
399 if (ret < 0)
400 return ret;
402 fw->words--;
403 fw->offset++;
405 mask >>= 1;
406 reg++;
409 return 0;
412 static int check_incr(struct host1x_firewall *fw)
414 u32 count = fw->count;
415 u32 reg = fw->reg;
416 int ret;
418 while (count) {
419 if (fw->words == 0)
420 return -EINVAL;
422 ret = check_register(fw, reg);
423 if (ret < 0)
424 return ret;
426 reg++;
427 fw->words--;
428 fw->offset++;
429 count--;
432 return 0;
435 static int check_nonincr(struct host1x_firewall *fw)
437 u32 count = fw->count;
438 int ret;
440 while (count) {
441 if (fw->words == 0)
442 return -EINVAL;
444 ret = check_register(fw, fw->reg);
445 if (ret < 0)
446 return ret;
448 fw->words--;
449 fw->offset++;
450 count--;
453 return 0;
456 static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
458 u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
459 (g->offset / sizeof(u32));
460 u32 job_class = fw->class;
461 int err = 0;
463 fw->words = g->words;
464 fw->cmdbuf = g->bo;
465 fw->offset = 0;
467 while (fw->words && !err) {
468 u32 word = cmdbuf_base[fw->offset];
469 u32 opcode = (word & 0xf0000000) >> 28;
471 fw->mask = 0;
472 fw->reg = 0;
473 fw->count = 0;
474 fw->words--;
475 fw->offset++;
477 switch (opcode) {
478 case 0:
479 fw->class = word >> 6 & 0x3ff;
480 fw->mask = word & 0x3f;
481 fw->reg = word >> 16 & 0xfff;
482 err = check_class(fw, job_class);
483 if (!err)
484 err = check_mask(fw);
485 if (err)
486 goto out;
487 break;
488 case 1:
489 fw->reg = word >> 16 & 0xfff;
490 fw->count = word & 0xffff;
491 err = check_incr(fw);
492 if (err)
493 goto out;
494 break;
496 case 2:
497 fw->reg = word >> 16 & 0xfff;
498 fw->count = word & 0xffff;
499 err = check_nonincr(fw);
500 if (err)
501 goto out;
502 break;
504 case 3:
505 fw->mask = word & 0xffff;
506 fw->reg = word >> 16 & 0xfff;
507 err = check_mask(fw);
508 if (err)
509 goto out;
510 break;
511 case 4:
512 case 14:
513 break;
514 default:
515 err = -EINVAL;
516 break;
520 out:
521 return err;
524 static inline int copy_gathers(struct device *host, struct host1x_job *job,
525 struct device *dev)
527 struct host1x_firewall fw;
528 size_t size = 0;
529 size_t offset = 0;
530 unsigned int i;
532 fw.job = job;
533 fw.dev = dev;
534 fw.reloc = job->relocs;
535 fw.num_relocs = job->num_relocs;
536 fw.class = job->class;
538 for (i = 0; i < job->num_gathers; i++) {
539 struct host1x_job_gather *g = &job->gathers[i];
541 size += g->words * sizeof(u32);
545 * Try a non-blocking allocation from a higher priority pools first,
546 * as awaiting for the allocation here is a major performance hit.
548 job->gather_copy_mapped = dma_alloc_wc(host, size, &job->gather_copy,
549 GFP_NOWAIT);
551 /* the higher priority allocation failed, try the generic-blocking */
552 if (!job->gather_copy_mapped)
553 job->gather_copy_mapped = dma_alloc_wc(host, size,
554 &job->gather_copy,
555 GFP_KERNEL);
556 if (!job->gather_copy_mapped)
557 return -ENOMEM;
559 job->gather_copy_size = size;
561 for (i = 0; i < job->num_gathers; i++) {
562 struct host1x_job_gather *g = &job->gathers[i];
563 void *gather;
565 /* Copy the gather */
566 gather = host1x_bo_mmap(g->bo);
567 memcpy(job->gather_copy_mapped + offset, gather + g->offset,
568 g->words * sizeof(u32));
569 host1x_bo_munmap(g->bo, gather);
571 /* Store the location in the buffer */
572 g->base = job->gather_copy;
573 g->offset = offset;
575 /* Validate the job */
576 if (validate(&fw, g))
577 return -EINVAL;
579 offset += g->words * sizeof(u32);
582 /* No relocs should remain at this point */
583 if (fw.num_relocs)
584 return -EINVAL;
586 return 0;
589 int host1x_job_pin(struct host1x_job *job, struct device *dev)
591 int err;
592 unsigned int i, j;
593 struct host1x *host = dev_get_drvdata(dev->parent);
595 /* pin memory */
596 err = pin_job(host, job);
597 if (err)
598 goto out;
600 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
601 err = copy_gathers(host->dev, job, dev);
602 if (err)
603 goto out;
606 /* patch gathers */
607 for (i = 0; i < job->num_gathers; i++) {
608 struct host1x_job_gather *g = &job->gathers[i];
610 /* process each gather mem only once */
611 if (g->handled)
612 continue;
614 /* copy_gathers() sets gathers base if firewall is enabled */
615 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
616 g->base = job->gather_addr_phys[i];
618 for (j = i + 1; j < job->num_gathers; j++) {
619 if (job->gathers[j].bo == g->bo) {
620 job->gathers[j].handled = true;
621 job->gathers[j].base = g->base;
625 err = do_relocs(job, g);
626 if (err)
627 break;
630 out:
631 if (err)
632 host1x_job_unpin(job);
633 wmb();
635 return err;
637 EXPORT_SYMBOL(host1x_job_pin);
639 void host1x_job_unpin(struct host1x_job *job)
641 struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
642 unsigned int i;
644 for (i = 0; i < job->num_unpins; i++) {
645 struct host1x_job_unpin_data *unpin = &job->unpins[i];
646 struct device *dev = unpin->dev ?: host->dev;
647 struct sg_table *sgt = unpin->sgt;
649 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
650 unpin->size && host->domain) {
651 iommu_unmap(host->domain, job->addr_phys[i],
652 unpin->size);
653 free_iova(&host->iova,
654 iova_pfn(&host->iova, job->addr_phys[i]));
657 if (unpin->dev && sgt)
658 dma_unmap_sgtable(unpin->dev, sgt, unpin->dir, 0);
660 host1x_bo_unpin(dev, unpin->bo, sgt);
661 host1x_bo_put(unpin->bo);
664 job->num_unpins = 0;
666 if (job->gather_copy_size)
667 dma_free_wc(host->dev, job->gather_copy_size,
668 job->gather_copy_mapped, job->gather_copy);
670 EXPORT_SYMBOL(host1x_job_unpin);
673 * Debug routine used to dump job entries
675 void host1x_job_dump(struct device *dev, struct host1x_job *job)
677 dev_dbg(dev, " SYNCPT_ID %d\n", job->syncpt_id);
678 dev_dbg(dev, " SYNCPT_VAL %d\n", job->syncpt_end);
679 dev_dbg(dev, " FIRST_GET 0x%x\n", job->first_get);
680 dev_dbg(dev, " TIMEOUT %d\n", job->timeout);
681 dev_dbg(dev, " NUM_SLOTS %d\n", job->num_slots);
682 dev_dbg(dev, " NUM_HANDLES %d\n", job->num_unpins);