dt-bindings: mtd: ingenic: Use standard ecc-engine property
[linux/fpc-iii.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_fence.c
blobee47c11e92ce7f021e7ce87d5613c7a9543eb900
1 /*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Dave Airlie
31 #include <linux/seq_file.h>
32 #include <linux/atomic.h>
33 #include <linux/wait.h>
34 #include <linux/kref.h>
35 #include <linux/slab.h>
36 #include <linux/firmware.h>
37 #include <drm/drmP.h>
38 #include "amdgpu.h"
39 #include "amdgpu_trace.h"
42 * Fences
43 * Fences mark an event in the GPUs pipeline and are used
44 * for GPU/CPU synchronization. When the fence is written,
45 * it is expected that all buffers associated with that fence
46 * are no longer in use by the associated ring on the GPU and
47 * that the the relevant GPU caches have been flushed.
50 struct amdgpu_fence {
51 struct dma_fence base;
53 /* RB, DMA, etc. */
54 struct amdgpu_ring *ring;
57 static struct kmem_cache *amdgpu_fence_slab;
59 int amdgpu_fence_slab_init(void)
61 amdgpu_fence_slab = kmem_cache_create(
62 "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
63 SLAB_HWCACHE_ALIGN, NULL);
64 if (!amdgpu_fence_slab)
65 return -ENOMEM;
66 return 0;
69 void amdgpu_fence_slab_fini(void)
71 rcu_barrier();
72 kmem_cache_destroy(amdgpu_fence_slab);
75 * Cast helper
77 static const struct dma_fence_ops amdgpu_fence_ops;
78 static inline struct amdgpu_fence *to_amdgpu_fence(struct dma_fence *f)
80 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
82 if (__f->base.ops == &amdgpu_fence_ops)
83 return __f;
85 return NULL;
88 /**
89 * amdgpu_fence_write - write a fence value
91 * @ring: ring the fence is associated with
92 * @seq: sequence number to write
94 * Writes a fence value to memory (all asics).
96 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
98 struct amdgpu_fence_driver *drv = &ring->fence_drv;
100 if (drv->cpu_addr)
101 *drv->cpu_addr = cpu_to_le32(seq);
105 * amdgpu_fence_read - read a fence value
107 * @ring: ring the fence is associated with
109 * Reads a fence value from memory (all asics).
110 * Returns the value of the fence read from memory.
112 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
114 struct amdgpu_fence_driver *drv = &ring->fence_drv;
115 u32 seq = 0;
117 if (drv->cpu_addr)
118 seq = le32_to_cpu(*drv->cpu_addr);
119 else
120 seq = atomic_read(&drv->last_seq);
122 return seq;
126 * amdgpu_fence_emit - emit a fence on the requested ring
128 * @ring: ring the fence is associated with
129 * @f: resulting fence object
131 * Emits a fence command on the requested ring (all asics).
132 * Returns 0 on success, -ENOMEM on failure.
134 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
135 unsigned flags)
137 struct amdgpu_device *adev = ring->adev;
138 struct amdgpu_fence *fence;
139 struct dma_fence *old, **ptr;
140 uint32_t seq;
142 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
143 if (fence == NULL)
144 return -ENOMEM;
146 seq = ++ring->fence_drv.sync_seq;
147 fence->ring = ring;
148 dma_fence_init(&fence->base, &amdgpu_fence_ops,
149 &ring->fence_drv.lock,
150 adev->fence_context + ring->idx,
151 seq);
152 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
153 seq, flags | AMDGPU_FENCE_FLAG_INT);
155 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
156 /* This function can't be called concurrently anyway, otherwise
157 * emitting the fence would mess up the hardware ring buffer.
159 old = rcu_dereference_protected(*ptr, 1);
160 if (old && !dma_fence_is_signaled(old)) {
161 DRM_INFO("rcu slot is busy\n");
162 dma_fence_wait(old, false);
165 rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
167 *f = &fence->base;
169 return 0;
173 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
175 * @ring: ring the fence is associated with
176 * @s: resulting sequence number
178 * Emits a fence command on the requested ring (all asics).
179 * Used For polling fence.
180 * Returns 0 on success, -ENOMEM on failure.
182 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
184 uint32_t seq;
186 if (!s)
187 return -EINVAL;
189 seq = ++ring->fence_drv.sync_seq;
190 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
191 seq, 0);
193 *s = seq;
195 return 0;
199 * amdgpu_fence_schedule_fallback - schedule fallback check
201 * @ring: pointer to struct amdgpu_ring
203 * Start a timer as fallback to our interrupts.
205 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
207 mod_timer(&ring->fence_drv.fallback_timer,
208 jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
212 * amdgpu_fence_process - check for fence activity
214 * @ring: pointer to struct amdgpu_ring
216 * Checks the current fence value and calculates the last
217 * signalled fence value. Wakes the fence queue if the
218 * sequence number has increased.
220 * Returns true if fence was processed
222 bool amdgpu_fence_process(struct amdgpu_ring *ring)
224 struct amdgpu_fence_driver *drv = &ring->fence_drv;
225 uint32_t seq, last_seq;
226 int r;
228 do {
229 last_seq = atomic_read(&ring->fence_drv.last_seq);
230 seq = amdgpu_fence_read(ring);
232 } while (atomic_cmpxchg(&drv->last_seq, last_seq, seq) != last_seq);
234 if (del_timer(&ring->fence_drv.fallback_timer) &&
235 seq != ring->fence_drv.sync_seq)
236 amdgpu_fence_schedule_fallback(ring);
238 if (unlikely(seq == last_seq))
239 return false;
241 last_seq &= drv->num_fences_mask;
242 seq &= drv->num_fences_mask;
244 do {
245 struct dma_fence *fence, **ptr;
247 ++last_seq;
248 last_seq &= drv->num_fences_mask;
249 ptr = &drv->fences[last_seq];
251 /* There is always exactly one thread signaling this fence slot */
252 fence = rcu_dereference_protected(*ptr, 1);
253 RCU_INIT_POINTER(*ptr, NULL);
255 if (!fence)
256 continue;
258 r = dma_fence_signal(fence);
259 if (!r)
260 DMA_FENCE_TRACE(fence, "signaled from irq context\n");
261 else
262 BUG();
264 dma_fence_put(fence);
265 } while (last_seq != seq);
267 return true;
271 * amdgpu_fence_fallback - fallback for hardware interrupts
273 * @work: delayed work item
275 * Checks for fence activity.
277 static void amdgpu_fence_fallback(struct timer_list *t)
279 struct amdgpu_ring *ring = from_timer(ring, t,
280 fence_drv.fallback_timer);
282 if (amdgpu_fence_process(ring))
283 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
287 * amdgpu_fence_wait_empty - wait for all fences to signal
289 * @adev: amdgpu device pointer
290 * @ring: ring index the fence is associated with
292 * Wait for all fences on the requested ring to signal (all asics).
293 * Returns 0 if the fences have passed, error for all other cases.
295 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
297 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
298 struct dma_fence *fence, **ptr;
299 int r;
301 if (!seq)
302 return 0;
304 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
305 rcu_read_lock();
306 fence = rcu_dereference(*ptr);
307 if (!fence || !dma_fence_get_rcu(fence)) {
308 rcu_read_unlock();
309 return 0;
311 rcu_read_unlock();
313 r = dma_fence_wait(fence, false);
314 dma_fence_put(fence);
315 return r;
319 * amdgpu_fence_wait_polling - busy wait for givn sequence number
321 * @ring: ring index the fence is associated with
322 * @wait_seq: sequence number to wait
323 * @timeout: the timeout for waiting in usecs
325 * Wait for all fences on the requested ring to signal (all asics).
326 * Returns left time if no timeout, 0 or minus if timeout.
328 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
329 uint32_t wait_seq,
330 signed long timeout)
332 uint32_t seq;
334 do {
335 seq = amdgpu_fence_read(ring);
336 udelay(5);
337 timeout -= 5;
338 } while ((int32_t)(wait_seq - seq) > 0 && timeout > 0);
340 return timeout > 0 ? timeout : 0;
343 * amdgpu_fence_count_emitted - get the count of emitted fences
345 * @ring: ring the fence is associated with
347 * Get the number of fences emitted on the requested ring (all asics).
348 * Returns the number of emitted fences on the ring. Used by the
349 * dynpm code to ring track activity.
351 unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
353 uint64_t emitted;
355 /* We are not protected by ring lock when reading the last sequence
356 * but it's ok to report slightly wrong fence count here.
358 amdgpu_fence_process(ring);
359 emitted = 0x100000000ull;
360 emitted -= atomic_read(&ring->fence_drv.last_seq);
361 emitted += READ_ONCE(ring->fence_drv.sync_seq);
362 return lower_32_bits(emitted);
366 * amdgpu_fence_driver_start_ring - make the fence driver
367 * ready for use on the requested ring.
369 * @ring: ring to start the fence driver on
370 * @irq_src: interrupt source to use for this ring
371 * @irq_type: interrupt type to use for this ring
373 * Make the fence driver ready for processing (all asics).
374 * Not all asics have all rings, so each asic will only
375 * start the fence driver on the rings it has.
376 * Returns 0 for success, errors for failure.
378 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
379 struct amdgpu_irq_src *irq_src,
380 unsigned irq_type)
382 struct amdgpu_device *adev = ring->adev;
383 uint64_t index;
385 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
386 ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
387 ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
388 } else {
389 /* put fence directly behind firmware */
390 index = ALIGN(adev->uvd.fw->size, 8);
391 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
392 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
394 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
395 amdgpu_irq_get(adev, irq_src, irq_type);
397 ring->fence_drv.irq_src = irq_src;
398 ring->fence_drv.irq_type = irq_type;
399 ring->fence_drv.initialized = true;
401 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr "
402 "0x%016llx, cpu addr 0x%p\n", ring->name,
403 ring->fence_drv.gpu_addr, ring->fence_drv.cpu_addr);
404 return 0;
408 * amdgpu_fence_driver_init_ring - init the fence driver
409 * for the requested ring.
411 * @ring: ring to init the fence driver on
412 * @num_hw_submission: number of entries on the hardware queue
414 * Init the fence driver for the requested ring (all asics).
415 * Helper function for amdgpu_fence_driver_init().
417 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
418 unsigned num_hw_submission)
420 long timeout;
421 int r;
423 /* Check that num_hw_submission is a power of two */
424 if ((num_hw_submission & (num_hw_submission - 1)) != 0)
425 return -EINVAL;
427 ring->fence_drv.cpu_addr = NULL;
428 ring->fence_drv.gpu_addr = 0;
429 ring->fence_drv.sync_seq = 0;
430 atomic_set(&ring->fence_drv.last_seq, 0);
431 ring->fence_drv.initialized = false;
433 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
435 ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
436 spin_lock_init(&ring->fence_drv.lock);
437 ring->fence_drv.fences = kcalloc(num_hw_submission * 2, sizeof(void *),
438 GFP_KERNEL);
439 if (!ring->fence_drv.fences)
440 return -ENOMEM;
442 /* No need to setup the GPU scheduler for KIQ ring */
443 if (ring->funcs->type != AMDGPU_RING_TYPE_KIQ) {
444 /* for non-sriov case, no timeout enforce on compute ring */
445 if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
446 && !amdgpu_sriov_vf(ring->adev))
447 timeout = MAX_SCHEDULE_TIMEOUT;
448 else
449 timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
451 r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
452 num_hw_submission, amdgpu_job_hang_limit,
453 timeout, ring->name);
454 if (r) {
455 DRM_ERROR("Failed to create scheduler on ring %s.\n",
456 ring->name);
457 return r;
461 return 0;
465 * amdgpu_fence_driver_init - init the fence driver
466 * for all possible rings.
468 * @adev: amdgpu device pointer
470 * Init the fence driver for all possible rings (all asics).
471 * Not all asics have all rings, so each asic will only
472 * start the fence driver on the rings it has using
473 * amdgpu_fence_driver_start_ring().
474 * Returns 0 for success.
476 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
478 if (amdgpu_debugfs_fence_init(adev))
479 dev_err(adev->dev, "fence debugfs file creation failed\n");
481 return 0;
485 * amdgpu_fence_driver_fini - tear down the fence driver
486 * for all possible rings.
488 * @adev: amdgpu device pointer
490 * Tear down the fence driver for all possible rings (all asics).
492 void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
494 unsigned i, j;
495 int r;
497 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
498 struct amdgpu_ring *ring = adev->rings[i];
500 if (!ring || !ring->fence_drv.initialized)
501 continue;
502 r = amdgpu_fence_wait_empty(ring);
503 if (r) {
504 /* no need to trigger GPU reset as we are unloading */
505 amdgpu_fence_driver_force_completion(ring);
507 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
508 ring->fence_drv.irq_type);
509 drm_sched_fini(&ring->sched);
510 del_timer_sync(&ring->fence_drv.fallback_timer);
511 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
512 dma_fence_put(ring->fence_drv.fences[j]);
513 kfree(ring->fence_drv.fences);
514 ring->fence_drv.fences = NULL;
515 ring->fence_drv.initialized = false;
520 * amdgpu_fence_driver_suspend - suspend the fence driver
521 * for all possible rings.
523 * @adev: amdgpu device pointer
525 * Suspend the fence driver for all possible rings (all asics).
527 void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
529 int i, r;
531 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
532 struct amdgpu_ring *ring = adev->rings[i];
533 if (!ring || !ring->fence_drv.initialized)
534 continue;
536 /* wait for gpu to finish processing current batch */
537 r = amdgpu_fence_wait_empty(ring);
538 if (r) {
539 /* delay GPU reset to resume */
540 amdgpu_fence_driver_force_completion(ring);
543 /* disable the interrupt */
544 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
545 ring->fence_drv.irq_type);
550 * amdgpu_fence_driver_resume - resume the fence driver
551 * for all possible rings.
553 * @adev: amdgpu device pointer
555 * Resume the fence driver for all possible rings (all asics).
556 * Not all asics have all rings, so each asic will only
557 * start the fence driver on the rings it has using
558 * amdgpu_fence_driver_start_ring().
559 * Returns 0 for success.
561 void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
563 int i;
565 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
566 struct amdgpu_ring *ring = adev->rings[i];
567 if (!ring || !ring->fence_drv.initialized)
568 continue;
570 /* enable the interrupt */
571 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
572 ring->fence_drv.irq_type);
577 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
579 * @ring: fence of the ring to signal
582 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
584 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
585 amdgpu_fence_process(ring);
589 * Common fence implementation
592 static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
594 return "amdgpu";
597 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
599 struct amdgpu_fence *fence = to_amdgpu_fence(f);
600 return (const char *)fence->ring->name;
604 * amdgpu_fence_enable_signaling - enable signalling on fence
605 * @fence: fence
607 * This function is called with fence_queue lock held, and adds a callback
608 * to fence_queue that checks if this fence is signaled, and if so it
609 * signals the fence and removes itself.
611 static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
613 struct amdgpu_fence *fence = to_amdgpu_fence(f);
614 struct amdgpu_ring *ring = fence->ring;
616 if (!timer_pending(&ring->fence_drv.fallback_timer))
617 amdgpu_fence_schedule_fallback(ring);
619 DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
621 return true;
625 * amdgpu_fence_free - free up the fence memory
627 * @rcu: RCU callback head
629 * Free up the fence memory after the RCU grace period.
631 static void amdgpu_fence_free(struct rcu_head *rcu)
633 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
634 struct amdgpu_fence *fence = to_amdgpu_fence(f);
635 kmem_cache_free(amdgpu_fence_slab, fence);
639 * amdgpu_fence_release - callback that fence can be freed
641 * @fence: fence
643 * This function is called when the reference count becomes zero.
644 * It just RCU schedules freeing up the fence.
646 static void amdgpu_fence_release(struct dma_fence *f)
648 call_rcu(&f->rcu, amdgpu_fence_free);
651 static const struct dma_fence_ops amdgpu_fence_ops = {
652 .get_driver_name = amdgpu_fence_get_driver_name,
653 .get_timeline_name = amdgpu_fence_get_timeline_name,
654 .enable_signaling = amdgpu_fence_enable_signaling,
655 .release = amdgpu_fence_release,
659 * Fence debugfs
661 #if defined(CONFIG_DEBUG_FS)
662 static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
664 struct drm_info_node *node = (struct drm_info_node *)m->private;
665 struct drm_device *dev = node->minor->dev;
666 struct amdgpu_device *adev = dev->dev_private;
667 int i;
669 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
670 struct amdgpu_ring *ring = adev->rings[i];
671 if (!ring || !ring->fence_drv.initialized)
672 continue;
674 amdgpu_fence_process(ring);
676 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
677 seq_printf(m, "Last signaled fence 0x%08x\n",
678 atomic_read(&ring->fence_drv.last_seq));
679 seq_printf(m, "Last emitted 0x%08x\n",
680 ring->fence_drv.sync_seq);
682 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
683 continue;
685 /* set in CP_VMID_PREEMPT and preemption occurred */
686 seq_printf(m, "Last preempted 0x%08x\n",
687 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
688 /* set in CP_VMID_RESET and reset occurred */
689 seq_printf(m, "Last reset 0x%08x\n",
690 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
691 /* Both preemption and reset occurred */
692 seq_printf(m, "Last both 0x%08x\n",
693 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));
695 return 0;
699 * amdgpu_debugfs_gpu_recover - manually trigger a gpu reset & recover
701 * Manually trigger a gpu reset at the next fence wait.
703 static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
705 struct drm_info_node *node = (struct drm_info_node *) m->private;
706 struct drm_device *dev = node->minor->dev;
707 struct amdgpu_device *adev = dev->dev_private;
709 seq_printf(m, "gpu recover\n");
710 amdgpu_device_gpu_recover(adev, NULL);
712 return 0;
715 static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
716 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
717 {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
720 static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
721 {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
723 #endif
725 int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
727 #if defined(CONFIG_DEBUG_FS)
728 if (amdgpu_sriov_vf(adev))
729 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov, 1);
730 return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list, 2);
731 #else
732 return 0;
733 #endif