4 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/dma-mapping.h>
20 #include <linux/err.h>
21 #include <linux/host1x.h>
22 #include <linux/kref.h>
23 #include <linux/module.h>
24 #include <linux/scatterlist.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <trace/events/host1x.h>
34 struct host1x_job
*host1x_job_alloc(struct host1x_channel
*ch
,
35 u32 num_cmdbufs
, u32 num_relocs
,
38 struct host1x_job
*job
= NULL
;
39 unsigned int num_unpins
= num_cmdbufs
+ num_relocs
;
43 /* Check that we're not going to overflow */
44 total
= sizeof(struct host1x_job
) +
45 (u64
)num_relocs
* sizeof(struct host1x_reloc
) +
46 (u64
)num_unpins
* sizeof(struct host1x_job_unpin_data
) +
47 (u64
)num_waitchks
* sizeof(struct host1x_waitchk
) +
48 (u64
)num_cmdbufs
* sizeof(struct host1x_job_gather
) +
49 (u64
)num_unpins
* sizeof(dma_addr_t
) +
50 (u64
)num_unpins
* sizeof(u32
*);
51 if (total
> ULONG_MAX
)
54 mem
= job
= kzalloc(total
, GFP_KERNEL
);
61 /* Redistribute memory to the structs */
62 mem
+= sizeof(struct host1x_job
);
63 job
->relocarray
= num_relocs
? mem
: NULL
;
64 mem
+= num_relocs
* sizeof(struct host1x_reloc
);
65 job
->unpins
= num_unpins
? mem
: NULL
;
66 mem
+= num_unpins
* sizeof(struct host1x_job_unpin_data
);
67 job
->waitchk
= num_waitchks
? mem
: NULL
;
68 mem
+= num_waitchks
* sizeof(struct host1x_waitchk
);
69 job
->gathers
= num_cmdbufs
? mem
: NULL
;
70 mem
+= num_cmdbufs
* sizeof(struct host1x_job_gather
);
71 job
->addr_phys
= num_unpins
? mem
: NULL
;
73 job
->reloc_addr_phys
= job
->addr_phys
;
74 job
->gather_addr_phys
= &job
->addr_phys
[num_relocs
];
78 EXPORT_SYMBOL(host1x_job_alloc
);
80 struct host1x_job
*host1x_job_get(struct host1x_job
*job
)
85 EXPORT_SYMBOL(host1x_job_get
);
87 static void job_free(struct kref
*ref
)
89 struct host1x_job
*job
= container_of(ref
, struct host1x_job
, ref
);
94 void host1x_job_put(struct host1x_job
*job
)
96 kref_put(&job
->ref
, job_free
);
98 EXPORT_SYMBOL(host1x_job_put
);
100 void host1x_job_add_gather(struct host1x_job
*job
, struct host1x_bo
*bo
,
101 u32 words
, u32 offset
)
103 struct host1x_job_gather
*cur_gather
= &job
->gathers
[job
->num_gathers
];
105 cur_gather
->words
= words
;
107 cur_gather
->offset
= offset
;
110 EXPORT_SYMBOL(host1x_job_add_gather
);
113 * NULL an already satisfied WAIT_SYNCPT host method, by patching its
114 * args in the command stream. The method data is changed to reference
115 * a reserved (never given out or incr) HOST1X_SYNCPT_RESERVED syncpt
116 * with a matching threshold value of 0, so is guaranteed to be popped
119 static void host1x_syncpt_patch_offset(struct host1x_syncpt
*sp
,
120 struct host1x_bo
*h
, u32 offset
)
122 void *patch_addr
= NULL
;
125 patch_addr
= host1x_bo_kmap(h
, offset
>> PAGE_SHIFT
);
127 host1x_syncpt_patch_wait(sp
,
128 patch_addr
+ (offset
& ~PAGE_MASK
));
129 host1x_bo_kunmap(h
, offset
>> PAGE_SHIFT
, patch_addr
);
131 pr_err("Could not map cmdbuf for wait check\n");
135 * Check driver supplied waitchk structs for syncpt thresholds
136 * that have already been satisfied and NULL the comparison (to
137 * avoid a wrap condition in the HW).
139 static int do_waitchks(struct host1x_job
*job
, struct host1x
*host
,
140 struct host1x_bo
*patch
)
144 /* compare syncpt vs wait threshold */
145 for (i
= 0; i
< job
->num_waitchk
; i
++) {
146 struct host1x_waitchk
*wait
= &job
->waitchk
[i
];
147 struct host1x_syncpt
*sp
=
148 host1x_syncpt_get(host
, wait
->syncpt_id
);
150 /* validate syncpt id */
151 if (wait
->syncpt_id
> host1x_syncpt_nb_pts(host
))
154 /* skip all other gathers */
155 if (patch
!= wait
->bo
)
158 trace_host1x_syncpt_wait_check(wait
->bo
, wait
->offset
,
159 wait
->syncpt_id
, wait
->thresh
,
160 host1x_syncpt_read_min(sp
));
162 if (host1x_syncpt_is_expired(sp
, wait
->thresh
)) {
164 "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n",
165 wait
->syncpt_id
, sp
->name
, wait
->thresh
,
166 host1x_syncpt_read_min(sp
));
168 host1x_syncpt_patch_offset(sp
, patch
, wait
->offset
);
177 static unsigned int pin_job(struct host1x_job
*job
)
183 for (i
= 0; i
< job
->num_relocs
; i
++) {
184 struct host1x_reloc
*reloc
= &job
->relocarray
[i
];
185 struct sg_table
*sgt
;
186 dma_addr_t phys_addr
;
188 reloc
->target
.bo
= host1x_bo_get(reloc
->target
.bo
);
189 if (!reloc
->target
.bo
)
192 phys_addr
= host1x_bo_pin(reloc
->target
.bo
, &sgt
);
196 job
->addr_phys
[job
->num_unpins
] = phys_addr
;
197 job
->unpins
[job
->num_unpins
].bo
= reloc
->target
.bo
;
198 job
->unpins
[job
->num_unpins
].sgt
= sgt
;
202 for (i
= 0; i
< job
->num_gathers
; i
++) {
203 struct host1x_job_gather
*g
= &job
->gathers
[i
];
204 struct sg_table
*sgt
;
205 dma_addr_t phys_addr
;
207 g
->bo
= host1x_bo_get(g
->bo
);
211 phys_addr
= host1x_bo_pin(g
->bo
, &sgt
);
215 job
->addr_phys
[job
->num_unpins
] = phys_addr
;
216 job
->unpins
[job
->num_unpins
].bo
= g
->bo
;
217 job
->unpins
[job
->num_unpins
].sgt
= sgt
;
221 return job
->num_unpins
;
224 host1x_job_unpin(job
);
228 static unsigned int do_relocs(struct host1x_job
*job
, struct host1x_bo
*cmdbuf
)
232 void *cmdbuf_page_addr
= NULL
;
234 /* pin & patch the relocs for one gather */
235 for (i
= 0; i
< job
->num_relocs
; i
++) {
236 struct host1x_reloc
*reloc
= &job
->relocarray
[i
];
237 u32 reloc_addr
= (job
->reloc_addr_phys
[i
] +
238 reloc
->target
.offset
) >> reloc
->shift
;
241 /* skip all other gathers */
242 if (cmdbuf
!= reloc
->cmdbuf
.bo
)
245 if (last_page
!= reloc
->cmdbuf
.offset
>> PAGE_SHIFT
) {
246 if (cmdbuf_page_addr
)
247 host1x_bo_kunmap(cmdbuf
, last_page
,
250 cmdbuf_page_addr
= host1x_bo_kmap(cmdbuf
,
251 reloc
->cmdbuf
.offset
>> PAGE_SHIFT
);
252 last_page
= reloc
->cmdbuf
.offset
>> PAGE_SHIFT
;
254 if (unlikely(!cmdbuf_page_addr
)) {
255 pr_err("Could not map cmdbuf for relocation\n");
260 target
= cmdbuf_page_addr
+ (reloc
->cmdbuf
.offset
& ~PAGE_MASK
);
261 *target
= reloc_addr
;
264 if (cmdbuf_page_addr
)
265 host1x_bo_kunmap(cmdbuf
, last_page
, cmdbuf_page_addr
);
270 static bool check_reloc(struct host1x_reloc
*reloc
, struct host1x_bo
*cmdbuf
,
273 offset
*= sizeof(u32
);
275 if (reloc
->cmdbuf
.bo
!= cmdbuf
|| reloc
->cmdbuf
.offset
!= offset
)
281 struct host1x_firewall
{
282 struct host1x_job
*job
;
285 unsigned int num_relocs
;
286 struct host1x_reloc
*reloc
;
288 struct host1x_bo
*cmdbuf
;
298 static int check_register(struct host1x_firewall
*fw
, unsigned long offset
)
300 if (fw
->job
->is_addr_reg(fw
->dev
, fw
->class, offset
)) {
304 if (!check_reloc(fw
->reloc
, fw
->cmdbuf
, fw
->offset
))
314 static int check_mask(struct host1x_firewall
*fw
)
325 ret
= check_register(fw
, reg
);
339 static int check_incr(struct host1x_firewall
*fw
)
341 u32 count
= fw
->count
;
349 ret
= check_register(fw
, reg
);
362 static int check_nonincr(struct host1x_firewall
*fw
)
364 u32 count
= fw
->count
;
371 ret
= check_register(fw
, fw
->reg
);
383 static int validate(struct host1x_firewall
*fw
, struct host1x_job_gather
*g
)
385 u32
*cmdbuf_base
= (u32
*)fw
->job
->gather_copy_mapped
+
386 (g
->offset
/ sizeof(u32
));
389 if (!fw
->job
->is_addr_reg
)
392 fw
->words
= g
->words
;
396 while (fw
->words
&& !err
) {
397 u32 word
= cmdbuf_base
[fw
->offset
];
398 u32 opcode
= (word
& 0xf0000000) >> 28;
408 fw
->class = word
>> 6 & 0x3ff;
409 fw
->mask
= word
& 0x3f;
410 fw
->reg
= word
>> 16 & 0xfff;
411 err
= check_mask(fw
);
416 fw
->reg
= word
>> 16 & 0xfff;
417 fw
->count
= word
& 0xffff;
418 err
= check_incr(fw
);
424 fw
->reg
= word
>> 16 & 0xfff;
425 fw
->count
= word
& 0xffff;
426 err
= check_nonincr(fw
);
432 fw
->mask
= word
& 0xffff;
433 fw
->reg
= word
>> 16 & 0xfff;
434 err
= check_mask(fw
);
452 static inline int copy_gathers(struct host1x_job
*job
, struct device
*dev
)
454 struct host1x_firewall fw
;
461 fw
.reloc
= job
->relocarray
;
462 fw
.num_relocs
= job
->num_relocs
;
465 for (i
= 0; i
< job
->num_gathers
; i
++) {
466 struct host1x_job_gather
*g
= &job
->gathers
[i
];
467 size
+= g
->words
* sizeof(u32
);
470 job
->gather_copy_mapped
= dma_alloc_writecombine(dev
, size
,
473 if (!job
->gather_copy_mapped
) {
474 job
->gather_copy_mapped
= NULL
;
478 job
->gather_copy_size
= size
;
480 for (i
= 0; i
< job
->num_gathers
; i
++) {
481 struct host1x_job_gather
*g
= &job
->gathers
[i
];
484 /* Copy the gather */
485 gather
= host1x_bo_mmap(g
->bo
);
486 memcpy(job
->gather_copy_mapped
+ offset
, gather
+ g
->offset
,
487 g
->words
* sizeof(u32
));
488 host1x_bo_munmap(g
->bo
, gather
);
490 /* Store the location in the buffer */
491 g
->base
= job
->gather_copy
;
494 /* Validate the job */
495 if (validate(&fw
, g
))
498 offset
+= g
->words
* sizeof(u32
);
501 /* No relocs should remain at this point */
508 int host1x_job_pin(struct host1x_job
*job
, struct device
*dev
)
512 struct host1x
*host
= dev_get_drvdata(dev
->parent
);
513 DECLARE_BITMAP(waitchk_mask
, host1x_syncpt_nb_pts(host
));
515 bitmap_zero(waitchk_mask
, host1x_syncpt_nb_pts(host
));
516 for (i
= 0; i
< job
->num_waitchk
; i
++) {
517 u32 syncpt_id
= job
->waitchk
[i
].syncpt_id
;
518 if (syncpt_id
< host1x_syncpt_nb_pts(host
))
519 set_bit(syncpt_id
, waitchk_mask
);
522 /* get current syncpt values for waitchk */
523 for_each_set_bit(i
, waitchk_mask
, host1x_syncpt_nb_pts(host
))
524 host1x_syncpt_load(host
->syncpt
+ i
);
532 for (i
= 0; i
< job
->num_gathers
; i
++) {
533 struct host1x_job_gather
*g
= &job
->gathers
[i
];
535 /* process each gather mem only once */
539 g
->base
= job
->gather_addr_phys
[i
];
541 for (j
= i
+ 1; j
< job
->num_gathers
; j
++)
542 if (job
->gathers
[j
].bo
== g
->bo
)
543 job
->gathers
[j
].handled
= true;
545 err
= do_relocs(job
, g
->bo
);
549 err
= do_waitchks(job
, host
, g
->bo
);
554 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL
) && !err
) {
555 err
= copy_gathers(job
, dev
);
557 host1x_job_unpin(job
);
567 EXPORT_SYMBOL(host1x_job_pin
);
569 void host1x_job_unpin(struct host1x_job
*job
)
573 for (i
= 0; i
< job
->num_unpins
; i
++) {
574 struct host1x_job_unpin_data
*unpin
= &job
->unpins
[i
];
575 host1x_bo_unpin(unpin
->bo
, unpin
->sgt
);
576 host1x_bo_put(unpin
->bo
);
580 if (job
->gather_copy_size
)
581 dma_free_writecombine(job
->channel
->dev
, job
->gather_copy_size
,
582 job
->gather_copy_mapped
,
585 EXPORT_SYMBOL(host1x_job_unpin
);
588 * Debug routine used to dump job entries
590 void host1x_job_dump(struct device
*dev
, struct host1x_job
*job
)
592 dev_dbg(dev
, " SYNCPT_ID %d\n", job
->syncpt_id
);
593 dev_dbg(dev
, " SYNCPT_VAL %d\n", job
->syncpt_end
);
594 dev_dbg(dev
, " FIRST_GET 0x%x\n", job
->first_get
);
595 dev_dbg(dev
, " TIMEOUT %d\n", job
->timeout
);
596 dev_dbg(dev
, " NUM_SLOTS %d\n", job
->num_slots
);
597 dev_dbg(dev
, " NUM_HANDLES %d\n", job
->num_unpins
);