2 * Copyright 2010 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
26 #include "radeon_asic.h"
27 #include "evergreend.h"
29 u32
evergreen_gpu_check_soft_reset(struct radeon_device
*rdev
);
32 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
34 * @rdev: radeon_device pointer
35 * @fence: radeon fence object
37 * Add a DMA fence packet to the ring to write
38 * the fence seq number and DMA trap packet to generate
39 * an interrupt if needed (evergreen-SI).
41 void evergreen_dma_fence_ring_emit(struct radeon_device
*rdev
,
42 struct radeon_fence
*fence
)
44 struct radeon_ring
*ring
= &rdev
->ring
[fence
->ring
];
45 u64 addr
= rdev
->fence_drv
[fence
->ring
].gpu_addr
;
47 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_FENCE
, 0, 0));
48 radeon_ring_write(ring
, addr
& 0xfffffffc);
49 radeon_ring_write(ring
, (upper_32_bits(addr
) & 0xff));
50 radeon_ring_write(ring
, fence
->seq
);
51 /* generate an interrupt */
52 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_TRAP
, 0, 0));
54 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_SRBM_WRITE
, 0, 0));
55 radeon_ring_write(ring
, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL
>> 2));
56 radeon_ring_write(ring
, 1);
60 * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
62 * @rdev: radeon_device pointer
63 * @ib: IB object to schedule
65 * Schedule an IB in the DMA ring (evergreen).
67 void evergreen_dma_ring_ib_execute(struct radeon_device
*rdev
,
70 struct radeon_ring
*ring
= &rdev
->ring
[ib
->ring
];
72 if (rdev
->wb
.enabled
) {
73 u32 next_rptr
= ring
->wptr
+ 4;
74 while ((next_rptr
& 7) != 5)
77 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_WRITE
, 0, 1));
78 radeon_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
79 radeon_ring_write(ring
, upper_32_bits(ring
->next_rptr_gpu_addr
) & 0xff);
80 radeon_ring_write(ring
, next_rptr
);
83 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
84 * Pad as necessary with NOPs.
86 while ((ring
->wptr
& 7) != 5)
87 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_NOP
, 0, 0));
88 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER
, 0, 0));
89 radeon_ring_write(ring
, (ib
->gpu_addr
& 0xFFFFFFE0));
90 radeon_ring_write(ring
, (ib
->length_dw
<< 12) | (upper_32_bits(ib
->gpu_addr
) & 0xFF));
95 * evergreen_copy_dma - copy pages using the DMA engine
97 * @rdev: radeon_device pointer
98 * @src_offset: src GPU address
99 * @dst_offset: dst GPU address
100 * @num_gpu_pages: number of GPU pages to xfer
101 * @fence: radeon fence object
103 * Copy GPU paging using the DMA engine (evergreen-cayman).
104 * Used by the radeon ttm implementation to move pages if
105 * registered as the asic copy callback.
107 struct radeon_fence
*evergreen_copy_dma(struct radeon_device
*rdev
,
110 unsigned num_gpu_pages
,
111 struct reservation_object
*resv
)
113 struct radeon_fence
*fence
;
114 struct radeon_sync sync
;
115 int ring_index
= rdev
->asic
->copy
.dma_ring_index
;
116 struct radeon_ring
*ring
= &rdev
->ring
[ring_index
];
117 u32 size_in_dw
, cur_size_in_dw
;
121 radeon_sync_create(&sync
);
123 size_in_dw
= (num_gpu_pages
<< RADEON_GPU_PAGE_SHIFT
) / 4;
124 num_loops
= DIV_ROUND_UP(size_in_dw
, 0xfffff);
125 r
= radeon_ring_lock(rdev
, ring
, num_loops
* 5 + 11);
127 DRM_ERROR("radeon: moving bo (%d).\n", r
);
128 radeon_sync_free(rdev
, &sync
, NULL
);
132 radeon_sync_resv(rdev
, &sync
, resv
, false);
133 radeon_sync_rings(rdev
, &sync
, ring
->idx
);
135 for (i
= 0; i
< num_loops
; i
++) {
136 cur_size_in_dw
= size_in_dw
;
137 if (cur_size_in_dw
> 0xFFFFF)
138 cur_size_in_dw
= 0xFFFFF;
139 size_in_dw
-= cur_size_in_dw
;
140 radeon_ring_write(ring
, DMA_PACKET(DMA_PACKET_COPY
, 0, cur_size_in_dw
));
141 radeon_ring_write(ring
, dst_offset
& 0xfffffffc);
142 radeon_ring_write(ring
, src_offset
& 0xfffffffc);
143 radeon_ring_write(ring
, upper_32_bits(dst_offset
) & 0xff);
144 radeon_ring_write(ring
, upper_32_bits(src_offset
) & 0xff);
145 src_offset
+= cur_size_in_dw
* 4;
146 dst_offset
+= cur_size_in_dw
* 4;
149 r
= radeon_fence_emit(rdev
, &fence
, ring
->idx
);
151 radeon_ring_unlock_undo(rdev
, ring
);
152 radeon_sync_free(rdev
, &sync
, NULL
);
156 radeon_ring_unlock_commit(rdev
, ring
, false);
157 radeon_sync_free(rdev
, &sync
, fence
);
163 * evergreen_dma_is_lockup - Check if the DMA engine is locked up
165 * @rdev: radeon_device pointer
166 * @ring: radeon_ring structure holding ring information
168 * Check if the async DMA engine is locked up.
169 * Returns true if the engine appears to be locked up, false if not.
171 bool evergreen_dma_is_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
173 u32 reset_mask
= evergreen_gpu_check_soft_reset(rdev
);
175 if (!(reset_mask
& RADEON_RESET_DMA
)) {
176 radeon_ring_lockup_update(rdev
, ring
);
179 return radeon_ring_test_lockup(rdev
, ring
);