2 * Tegra host1x Command DMA
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <asm/cacheflush.h>
21 #include <linux/device.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/host1x.h>
24 #include <linux/interrupt.h>
25 #include <linux/kernel.h>
26 #include <linux/kfifo.h>
27 #include <linux/slab.h>
28 #include <trace/events/host1x.h>
39 * The push buffer is a circular array of words to be fetched by command DMA.
40 * Note that it works slightly differently to the sync queue; fence == pos
41 * means that the push buffer is full, not empty.
44 #define HOST1X_PUSHBUFFER_SLOTS 512
47 * Clean up push buffer resources
49 static void host1x_pushbuffer_destroy(struct push_buffer
*pb
)
51 struct host1x_cdma
*cdma
= pb_to_cdma(pb
);
52 struct host1x
*host1x
= cdma_to_host1x(cdma
);
58 iommu_unmap(host1x
->domain
, pb
->dma
, pb
->alloc_size
);
59 free_iova(&host1x
->iova
, iova_pfn(&host1x
->iova
, pb
->dma
));
62 dma_free_wc(host1x
->dev
, pb
->alloc_size
, pb
->mapped
, pb
->phys
);
69 * Init push buffer resources
71 static int host1x_pushbuffer_init(struct push_buffer
*pb
)
73 struct host1x_cdma
*cdma
= pb_to_cdma(pb
);
74 struct host1x
*host1x
= cdma_to_host1x(cdma
);
81 pb
->size
= HOST1X_PUSHBUFFER_SLOTS
* 8;
85 /* initialize buffer pointers */
86 pb
->fence
= pb
->size
- 8;
92 size
= iova_align(&host1x
->iova
, size
);
94 pb
->mapped
= dma_alloc_wc(host1x
->dev
, size
, &pb
->phys
,
99 shift
= iova_shift(&host1x
->iova
);
100 alloc
= alloc_iova(&host1x
->iova
, size
>> shift
,
101 host1x
->iova_end
>> shift
, true);
107 pb
->dma
= iova_dma_addr(&host1x
->iova
, alloc
);
108 err
= iommu_map(host1x
->domain
, pb
->dma
, pb
->phys
, size
,
111 goto iommu_free_iova
;
113 pb
->mapped
= dma_alloc_wc(host1x
->dev
, size
, &pb
->phys
,
121 pb
->alloc_size
= size
;
123 host1x_hw_pushbuffer_init(host1x
, pb
);
128 __free_iova(&host1x
->iova
, alloc
);
130 dma_free_wc(host1x
->dev
, pb
->alloc_size
, pb
->mapped
, pb
->phys
);
136 * Push two words to the push buffer
137 * Caller must ensure push buffer is not full
139 static void host1x_pushbuffer_push(struct push_buffer
*pb
, u32 op1
, u32 op2
)
141 u32
*p
= (u32
*)((void *)pb
->mapped
+ pb
->pos
);
143 WARN_ON(pb
->pos
== pb
->fence
);
146 pb
->pos
= (pb
->pos
+ 8) & (pb
->size
- 1);
150 * Pop a number of two word slots from the push buffer
151 * Caller must ensure push buffer is not empty
153 static void host1x_pushbuffer_pop(struct push_buffer
*pb
, unsigned int slots
)
155 /* Advance the next write position */
156 pb
->fence
= (pb
->fence
+ slots
* 8) & (pb
->size
- 1);
160 * Return the number of two word slots free in the push buffer
162 static u32
host1x_pushbuffer_space(struct push_buffer
*pb
)
164 return ((pb
->fence
- pb
->pos
) & (pb
->size
- 1)) / 8;
168 * Sleep (if necessary) until the requested event happens
169 * - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
171 * - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
172 * - Return the amount of space (> 0)
173 * Must be called with the cdma lock held.
175 unsigned int host1x_cdma_wait_locked(struct host1x_cdma
*cdma
,
176 enum cdma_event event
)
179 struct push_buffer
*pb
= &cdma
->push_buffer
;
183 case CDMA_EVENT_SYNC_QUEUE_EMPTY
:
184 space
= list_empty(&cdma
->sync_queue
) ? 1 : 0;
187 case CDMA_EVENT_PUSH_BUFFER_SPACE
:
188 space
= host1x_pushbuffer_space(pb
);
199 trace_host1x_wait_cdma(dev_name(cdma_to_channel(cdma
)->dev
),
202 /* If somebody has managed to already start waiting, yield */
203 if (cdma
->event
!= CDMA_EVENT_NONE
) {
204 mutex_unlock(&cdma
->lock
);
206 mutex_lock(&cdma
->lock
);
212 mutex_unlock(&cdma
->lock
);
214 mutex_lock(&cdma
->lock
);
221 * Start timer that tracks the time spent by the job.
222 * Must be called with the cdma lock held.
224 static void cdma_start_timer_locked(struct host1x_cdma
*cdma
,
225 struct host1x_job
*job
)
227 struct host1x
*host
= cdma_to_host1x(cdma
);
229 if (cdma
->timeout
.client
) {
230 /* timer already started */
234 cdma
->timeout
.client
= job
->client
;
235 cdma
->timeout
.syncpt
= host1x_syncpt_get(host
, job
->syncpt_id
);
236 cdma
->timeout
.syncpt_val
= job
->syncpt_end
;
237 cdma
->timeout
.start_ktime
= ktime_get();
239 schedule_delayed_work(&cdma
->timeout
.wq
,
240 msecs_to_jiffies(job
->timeout
));
244 * Stop timer when a buffer submission completes.
245 * Must be called with the cdma lock held.
247 static void stop_cdma_timer_locked(struct host1x_cdma
*cdma
)
249 cancel_delayed_work(&cdma
->timeout
.wq
);
250 cdma
->timeout
.client
= 0;
254 * For all sync queue entries that have already finished according to the
255 * current sync point registers:
256 * - unpin & unref their mems
257 * - pop their push buffer slots
258 * - remove them from the sync queue
259 * This is normally called from the host code's worker thread, but can be
260 * called manually if necessary.
261 * Must be called with the cdma lock held.
263 static void update_cdma_locked(struct host1x_cdma
*cdma
)
266 struct host1x
*host1x
= cdma_to_host1x(cdma
);
267 struct host1x_job
*job
, *n
;
269 /* If CDMA is stopped, queue is cleared and we can return */
274 * Walk the sync queue, reading the sync point registers as necessary,
275 * to consume as many sync queue entries as possible without blocking
277 list_for_each_entry_safe(job
, n
, &cdma
->sync_queue
, list
) {
278 struct host1x_syncpt
*sp
=
279 host1x_syncpt_get(host1x
, job
->syncpt_id
);
281 /* Check whether this syncpt has completed, and bail if not */
282 if (!host1x_syncpt_is_expired(sp
, job
->syncpt_end
)) {
283 /* Start timer on next pending syncpt */
285 cdma_start_timer_locked(cdma
, job
);
290 /* Cancel timeout, when a buffer completes */
291 if (cdma
->timeout
.client
)
292 stop_cdma_timer_locked(cdma
);
294 /* Unpin the memory */
295 host1x_job_unpin(job
);
297 /* Pop push buffer slots */
298 if (job
->num_slots
) {
299 struct push_buffer
*pb
= &cdma
->push_buffer
;
301 host1x_pushbuffer_pop(pb
, job
->num_slots
);
303 if (cdma
->event
== CDMA_EVENT_PUSH_BUFFER_SPACE
)
307 list_del(&job
->list
);
311 if (cdma
->event
== CDMA_EVENT_SYNC_QUEUE_EMPTY
&&
312 list_empty(&cdma
->sync_queue
))
316 cdma
->event
= CDMA_EVENT_NONE
;
321 void host1x_cdma_update_sync_queue(struct host1x_cdma
*cdma
,
324 struct host1x
*host1x
= cdma_to_host1x(cdma
);
325 u32 restart_addr
, syncpt_incrs
, syncpt_val
;
326 struct host1x_job
*job
= NULL
;
328 syncpt_val
= host1x_syncpt_load(cdma
->timeout
.syncpt
);
330 dev_dbg(dev
, "%s: starting cleanup (thresh %d)\n",
331 __func__
, syncpt_val
);
334 * Move the sync_queue read pointer to the first entry that hasn't
335 * completed based on the current HW syncpt value. It's likely there
336 * won't be any (i.e. we're still at the head), but covers the case
337 * where a syncpt incr happens just prior/during the teardown.
340 dev_dbg(dev
, "%s: skip completed buffers still in sync_queue\n",
343 list_for_each_entry(job
, &cdma
->sync_queue
, list
) {
344 if (syncpt_val
< job
->syncpt_end
)
347 host1x_job_dump(dev
, job
);
351 * Walk the sync_queue, first incrementing with the CPU syncpts that
352 * are partially executed (the first buffer) or fully skipped while
353 * still in the current context (slots are also NOP-ed).
355 * At the point contexts are interleaved, syncpt increments must be
356 * done inline with the pushbuffer from a GATHER buffer to maintain
357 * the order (slots are modified to be a GATHER of syncpt incrs).
359 * Note: save in restart_addr the location where the timed out buffer
360 * started in the PB, so we can start the refetch from there (with the
361 * modified NOP-ed PB slots). This lets things appear to have completed
362 * properly for this buffer and resources are freed.
365 dev_dbg(dev
, "%s: perform CPU incr on pending same ctx buffers\n",
368 if (!list_empty(&cdma
->sync_queue
))
369 restart_addr
= job
->first_get
;
371 restart_addr
= cdma
->last_pos
;
373 /* do CPU increments as long as this context continues */
374 list_for_each_entry_from(job
, &cdma
->sync_queue
, list
) {
375 /* different context, gets us out of this loop */
376 if (job
->client
!= cdma
->timeout
.client
)
379 /* won't need a timeout when replayed */
382 syncpt_incrs
= job
->syncpt_end
- syncpt_val
;
383 dev_dbg(dev
, "%s: CPU incr (%d)\n", __func__
, syncpt_incrs
);
385 host1x_job_dump(dev
, job
);
387 /* safe to use CPU to incr syncpts */
388 host1x_hw_cdma_timeout_cpu_incr(host1x
, cdma
, job
->first_get
,
389 syncpt_incrs
, job
->syncpt_end
,
392 syncpt_val
+= syncpt_incrs
;
396 * The following sumbits from the same client may be dependent on the
397 * failed submit and therefore they may fail. Force a small timeout
398 * to make the queue cleanup faster.
401 list_for_each_entry_from(job
, &cdma
->sync_queue
, list
)
402 if (job
->client
== cdma
->timeout
.client
)
403 job
->timeout
= min_t(unsigned int, job
->timeout
, 500);
405 dev_dbg(dev
, "%s: finished sync_queue modification\n", __func__
);
407 /* roll back DMAGET and start up channel again */
408 host1x_hw_cdma_resume(host1x
, cdma
, restart_addr
);
414 int host1x_cdma_init(struct host1x_cdma
*cdma
)
418 mutex_init(&cdma
->lock
);
419 sema_init(&cdma
->sem
, 0);
421 INIT_LIST_HEAD(&cdma
->sync_queue
);
423 cdma
->event
= CDMA_EVENT_NONE
;
424 cdma
->running
= false;
425 cdma
->torndown
= false;
427 err
= host1x_pushbuffer_init(&cdma
->push_buffer
);
437 int host1x_cdma_deinit(struct host1x_cdma
*cdma
)
439 struct push_buffer
*pb
= &cdma
->push_buffer
;
440 struct host1x
*host1x
= cdma_to_host1x(cdma
);
443 pr_warn("%s: CDMA still running\n", __func__
);
447 host1x_pushbuffer_destroy(pb
);
448 host1x_hw_cdma_timeout_destroy(host1x
, cdma
);
454 * Begin a cdma submit
456 int host1x_cdma_begin(struct host1x_cdma
*cdma
, struct host1x_job
*job
)
458 struct host1x
*host1x
= cdma_to_host1x(cdma
);
460 mutex_lock(&cdma
->lock
);
463 /* init state on first submit with timeout value */
464 if (!cdma
->timeout
.initialized
) {
467 err
= host1x_hw_cdma_timeout_init(host1x
, cdma
,
470 mutex_unlock(&cdma
->lock
);
477 host1x_hw_cdma_start(host1x
, cdma
);
479 cdma
->slots_free
= 0;
480 cdma
->slots_used
= 0;
481 cdma
->first_get
= cdma
->push_buffer
.pos
;
483 trace_host1x_cdma_begin(dev_name(job
->channel
->dev
));
488 * Push two words into a push buffer slot
489 * Blocks as necessary if the push buffer is full.
491 void host1x_cdma_push(struct host1x_cdma
*cdma
, u32 op1
, u32 op2
)
493 struct host1x
*host1x
= cdma_to_host1x(cdma
);
494 struct push_buffer
*pb
= &cdma
->push_buffer
;
495 u32 slots_free
= cdma
->slots_free
;
497 if (host1x_debug_trace_cmdbuf
)
498 trace_host1x_cdma_push(dev_name(cdma_to_channel(cdma
)->dev
),
501 if (slots_free
== 0) {
502 host1x_hw_cdma_flush(host1x
, cdma
);
503 slots_free
= host1x_cdma_wait_locked(cdma
,
504 CDMA_EVENT_PUSH_BUFFER_SPACE
);
507 cdma
->slots_free
= slots_free
- 1;
509 host1x_pushbuffer_push(pb
, op1
, op2
);
514 * Kick off DMA, add job to the sync queue, and a number of slots to be freed
515 * from the pushbuffer. The handles for a submit must all be pinned at the same
516 * time, but they can be unpinned in smaller chunks.
518 void host1x_cdma_end(struct host1x_cdma
*cdma
,
519 struct host1x_job
*job
)
521 struct host1x
*host1x
= cdma_to_host1x(cdma
);
522 bool idle
= list_empty(&cdma
->sync_queue
);
524 host1x_hw_cdma_flush(host1x
, cdma
);
526 job
->first_get
= cdma
->first_get
;
527 job
->num_slots
= cdma
->slots_used
;
529 list_add_tail(&job
->list
, &cdma
->sync_queue
);
531 /* start timer on idle -> active transitions */
532 if (job
->timeout
&& idle
)
533 cdma_start_timer_locked(cdma
, job
);
535 trace_host1x_cdma_end(dev_name(job
->channel
->dev
));
536 mutex_unlock(&cdma
->lock
);
540 * Update cdma state according to current sync point values
542 void host1x_cdma_update(struct host1x_cdma
*cdma
)
544 mutex_lock(&cdma
->lock
);
545 update_cdma_locked(cdma
);
546 mutex_unlock(&cdma
->lock
);