2 * Copyright 2009 Advanced Micro Devices, Inc.
3 * Copyright 2009 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
28 #include "radeon_drm.h"
32 #include "r600_blit_shaders.h"
34 #define DI_PT_RECTLIST 0x11
35 #define DI_INDEX_SIZE_16_BIT 0x0
36 #define DI_SRC_SEL_AUTO_INDEX 0x2
40 #define FMT_8_8_8_8 0x1a
42 #define COLOR_5_6_5 0x8
43 #define COLOR_8_8_8_8 0x1a
45 #define RECT_UNIT_H 32
46 #define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
48 /* emits 21 on rv770+, 23 on r600 */
50 set_render_target(struct radeon_device
*rdev
, int format
,
51 int w
, int h
, u64 gpu_addr
)
53 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
61 cb_color_info
= CB_FORMAT(format
) |
62 CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM
) |
63 CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
65 slice
= ((w
* h
) / 64) - 1;
67 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
68 radeon_ring_write(ring
, (CB_COLOR0_BASE
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
69 radeon_ring_write(ring
, gpu_addr
>> 8);
71 if (rdev
->family
> CHIP_R600
&& rdev
->family
< CHIP_RV770
) {
72 radeon_ring_write(ring
, PACKET3(PACKET3_SURFACE_BASE_UPDATE
, 0));
73 radeon_ring_write(ring
, 2 << 0);
76 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
77 radeon_ring_write(ring
, (CB_COLOR0_SIZE
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
78 radeon_ring_write(ring
, (pitch
<< 0) | (slice
<< 10));
80 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
81 radeon_ring_write(ring
, (CB_COLOR0_VIEW
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
82 radeon_ring_write(ring
, 0);
84 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
85 radeon_ring_write(ring
, (CB_COLOR0_INFO
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
86 radeon_ring_write(ring
, cb_color_info
);
88 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
89 radeon_ring_write(ring
, (CB_COLOR0_TILE
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
90 radeon_ring_write(ring
, 0);
92 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
93 radeon_ring_write(ring
, (CB_COLOR0_FRAG
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
94 radeon_ring_write(ring
, 0);
96 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
97 radeon_ring_write(ring
, (CB_COLOR0_MASK
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
98 radeon_ring_write(ring
, 0);
103 cp_set_surface_sync(struct radeon_device
*rdev
,
104 u32 sync_type
, u32 size
,
107 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
110 if (size
== 0xffffffff)
111 cp_coher_size
= 0xffffffff;
113 cp_coher_size
= ((size
+ 255) >> 8);
115 radeon_ring_write(ring
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
116 radeon_ring_write(ring
, sync_type
);
117 radeon_ring_write(ring
, cp_coher_size
);
118 radeon_ring_write(ring
, mc_addr
>> 8);
119 radeon_ring_write(ring
, 10); /* poll interval */
122 /* emits 21dw + 1 surface sync = 26dw */
124 set_shaders(struct radeon_device
*rdev
)
126 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
128 u32 sq_pgm_resources
;
130 /* setup shader regs */
131 sq_pgm_resources
= (1 << 0);
134 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.vs_offset
;
135 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
136 radeon_ring_write(ring
, (SQ_PGM_START_VS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
137 radeon_ring_write(ring
, gpu_addr
>> 8);
139 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
140 radeon_ring_write(ring
, (SQ_PGM_RESOURCES_VS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
141 radeon_ring_write(ring
, sq_pgm_resources
);
143 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
144 radeon_ring_write(ring
, (SQ_PGM_CF_OFFSET_VS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
145 radeon_ring_write(ring
, 0);
148 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.ps_offset
;
149 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
150 radeon_ring_write(ring
, (SQ_PGM_START_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
151 radeon_ring_write(ring
, gpu_addr
>> 8);
153 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
154 radeon_ring_write(ring
, (SQ_PGM_RESOURCES_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
155 radeon_ring_write(ring
, sq_pgm_resources
| (1 << 28));
157 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
158 radeon_ring_write(ring
, (SQ_PGM_EXPORTS_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
159 radeon_ring_write(ring
, 2);
161 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
162 radeon_ring_write(ring
, (SQ_PGM_CF_OFFSET_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
163 radeon_ring_write(ring
, 0);
165 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.vs_offset
;
166 cp_set_surface_sync(rdev
, PACKET3_SH_ACTION_ENA
, 512, gpu_addr
);
169 /* emits 9 + 1 sync (5) = 14*/
171 set_vtx_resource(struct radeon_device
*rdev
, u64 gpu_addr
)
173 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
174 u32 sq_vtx_constant_word2
;
176 sq_vtx_constant_word2
= SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr
) & 0xff) |
179 sq_vtx_constant_word2
|= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32
);
182 radeon_ring_write(ring
, PACKET3(PACKET3_SET_RESOURCE
, 7));
183 radeon_ring_write(ring
, 0x460);
184 radeon_ring_write(ring
, gpu_addr
& 0xffffffff);
185 radeon_ring_write(ring
, 48 - 1);
186 radeon_ring_write(ring
, sq_vtx_constant_word2
);
187 radeon_ring_write(ring
, 1 << 0);
188 radeon_ring_write(ring
, 0);
189 radeon_ring_write(ring
, 0);
190 radeon_ring_write(ring
, SQ_TEX_VTX_VALID_BUFFER
<< 30);
192 if ((rdev
->family
== CHIP_RV610
) ||
193 (rdev
->family
== CHIP_RV620
) ||
194 (rdev
->family
== CHIP_RS780
) ||
195 (rdev
->family
== CHIP_RS880
) ||
196 (rdev
->family
== CHIP_RV710
))
197 cp_set_surface_sync(rdev
,
198 PACKET3_TC_ACTION_ENA
, 48, gpu_addr
);
200 cp_set_surface_sync(rdev
,
201 PACKET3_VC_ACTION_ENA
, 48, gpu_addr
);
206 set_tex_resource(struct radeon_device
*rdev
,
207 int format
, int w
, int h
, int pitch
,
208 u64 gpu_addr
, u32 size
)
210 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
211 uint32_t sq_tex_resource_word0
, sq_tex_resource_word1
, sq_tex_resource_word4
;
216 sq_tex_resource_word0
= S_038000_DIM(V_038000_SQ_TEX_DIM_2D
) |
217 S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1
);
218 sq_tex_resource_word0
|= S_038000_PITCH((pitch
>> 3) - 1) |
219 S_038000_TEX_WIDTH(w
- 1);
221 sq_tex_resource_word1
= S_038004_DATA_FORMAT(format
);
222 sq_tex_resource_word1
|= S_038004_TEX_HEIGHT(h
- 1);
224 sq_tex_resource_word4
= S_038010_REQUEST_SIZE(1) |
225 S_038010_DST_SEL_X(SQ_SEL_X
) |
226 S_038010_DST_SEL_Y(SQ_SEL_Y
) |
227 S_038010_DST_SEL_Z(SQ_SEL_Z
) |
228 S_038010_DST_SEL_W(SQ_SEL_W
);
230 cp_set_surface_sync(rdev
,
231 PACKET3_TC_ACTION_ENA
, size
, gpu_addr
);
233 radeon_ring_write(ring
, PACKET3(PACKET3_SET_RESOURCE
, 7));
234 radeon_ring_write(ring
, 0);
235 radeon_ring_write(ring
, sq_tex_resource_word0
);
236 radeon_ring_write(ring
, sq_tex_resource_word1
);
237 radeon_ring_write(ring
, gpu_addr
>> 8);
238 radeon_ring_write(ring
, gpu_addr
>> 8);
239 radeon_ring_write(ring
, sq_tex_resource_word4
);
240 radeon_ring_write(ring
, 0);
241 radeon_ring_write(ring
, SQ_TEX_VTX_VALID_TEXTURE
<< 30);
246 set_scissors(struct radeon_device
*rdev
, int x1
, int y1
,
249 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
250 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
251 radeon_ring_write(ring
, (PA_SC_SCREEN_SCISSOR_TL
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
252 radeon_ring_write(ring
, (x1
<< 0) | (y1
<< 16));
253 radeon_ring_write(ring
, (x2
<< 0) | (y2
<< 16));
255 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
256 radeon_ring_write(ring
, (PA_SC_GENERIC_SCISSOR_TL
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
257 radeon_ring_write(ring
, (x1
<< 0) | (y1
<< 16) | (1 << 31));
258 radeon_ring_write(ring
, (x2
<< 0) | (y2
<< 16));
260 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
261 radeon_ring_write(ring
, (PA_SC_WINDOW_SCISSOR_TL
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
262 radeon_ring_write(ring
, (x1
<< 0) | (y1
<< 16) | (1 << 31));
263 radeon_ring_write(ring
, (x2
<< 0) | (y2
<< 16));
268 draw_auto(struct radeon_device
*rdev
)
270 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
271 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
272 radeon_ring_write(ring
, (VGT_PRIMITIVE_TYPE
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
273 radeon_ring_write(ring
, DI_PT_RECTLIST
);
275 radeon_ring_write(ring
, PACKET3(PACKET3_INDEX_TYPE
, 0));
276 radeon_ring_write(ring
,
280 DI_INDEX_SIZE_16_BIT
);
282 radeon_ring_write(ring
, PACKET3(PACKET3_NUM_INSTANCES
, 0));
283 radeon_ring_write(ring
, 1);
285 radeon_ring_write(ring
, PACKET3(PACKET3_DRAW_INDEX_AUTO
, 1));
286 radeon_ring_write(ring
, 3);
287 radeon_ring_write(ring
, DI_SRC_SEL_AUTO_INDEX
);
293 set_default_state(struct radeon_device
*rdev
)
295 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
296 u32 sq_config
, sq_gpr_resource_mgmt_1
, sq_gpr_resource_mgmt_2
;
297 u32 sq_thread_resource_mgmt
, sq_stack_resource_mgmt_1
, sq_stack_resource_mgmt_2
;
298 int num_ps_gprs
, num_vs_gprs
, num_temp_gprs
, num_gs_gprs
, num_es_gprs
;
299 int num_ps_threads
, num_vs_threads
, num_gs_threads
, num_es_threads
;
300 int num_ps_stack_entries
, num_vs_stack_entries
, num_gs_stack_entries
, num_es_stack_entries
;
304 switch (rdev
->family
) {
311 num_ps_threads
= 136;
315 num_ps_stack_entries
= 128;
316 num_vs_stack_entries
= 128;
317 num_gs_stack_entries
= 0;
318 num_es_stack_entries
= 0;
327 num_ps_threads
= 144;
331 num_ps_stack_entries
= 40;
332 num_vs_stack_entries
= 40;
333 num_gs_stack_entries
= 32;
334 num_es_stack_entries
= 16;
346 num_ps_threads
= 136;
350 num_ps_stack_entries
= 40;
351 num_vs_stack_entries
= 40;
352 num_gs_stack_entries
= 32;
353 num_es_stack_entries
= 16;
361 num_ps_threads
= 136;
365 num_ps_stack_entries
= 40;
366 num_vs_stack_entries
= 40;
367 num_gs_stack_entries
= 32;
368 num_es_stack_entries
= 16;
376 num_ps_threads
= 188;
380 num_ps_stack_entries
= 256;
381 num_vs_stack_entries
= 256;
382 num_gs_stack_entries
= 0;
383 num_es_stack_entries
= 0;
392 num_ps_threads
= 188;
396 num_ps_stack_entries
= 128;
397 num_vs_stack_entries
= 128;
398 num_gs_stack_entries
= 0;
399 num_es_stack_entries
= 0;
407 num_ps_threads
= 144;
411 num_ps_stack_entries
= 128;
412 num_vs_stack_entries
= 128;
413 num_gs_stack_entries
= 0;
414 num_es_stack_entries
= 0;
418 if ((rdev
->family
== CHIP_RV610
) ||
419 (rdev
->family
== CHIP_RV620
) ||
420 (rdev
->family
== CHIP_RS780
) ||
421 (rdev
->family
== CHIP_RS880
) ||
422 (rdev
->family
== CHIP_RV710
))
425 sq_config
= VC_ENABLE
;
427 sq_config
|= (DX9_CONSTS
|
428 ALU_INST_PREFER_VECTOR
|
434 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(num_ps_gprs
) |
435 NUM_VS_GPRS(num_vs_gprs
) |
436 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs
));
437 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(num_gs_gprs
) |
438 NUM_ES_GPRS(num_es_gprs
));
439 sq_thread_resource_mgmt
= (NUM_PS_THREADS(num_ps_threads
) |
440 NUM_VS_THREADS(num_vs_threads
) |
441 NUM_GS_THREADS(num_gs_threads
) |
442 NUM_ES_THREADS(num_es_threads
));
443 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(num_ps_stack_entries
) |
444 NUM_VS_STACK_ENTRIES(num_vs_stack_entries
));
445 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(num_gs_stack_entries
) |
446 NUM_ES_STACK_ENTRIES(num_es_stack_entries
));
448 /* emit an IB pointing at default state */
449 dwords
= ALIGN(rdev
->r600_blit
.state_len
, 0x10);
450 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.state_offset
;
451 radeon_ring_write(ring
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
452 radeon_ring_write(ring
,
456 (gpu_addr
& 0xFFFFFFFC));
457 radeon_ring_write(ring
, upper_32_bits(gpu_addr
) & 0xFF);
458 radeon_ring_write(ring
, dwords
);
461 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 6));
462 radeon_ring_write(ring
, (SQ_CONFIG
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
463 radeon_ring_write(ring
, sq_config
);
464 radeon_ring_write(ring
, sq_gpr_resource_mgmt_1
);
465 radeon_ring_write(ring
, sq_gpr_resource_mgmt_2
);
466 radeon_ring_write(ring
, sq_thread_resource_mgmt
);
467 radeon_ring_write(ring
, sq_stack_resource_mgmt_1
);
468 radeon_ring_write(ring
, sq_stack_resource_mgmt_2
);
471 #define I2F_MAX_BITS 15
472 #define I2F_MAX_INPUT ((1 << I2F_MAX_BITS) - 1)
473 #define I2F_SHIFT (24 - I2F_MAX_BITS)
476 * Converts unsigned integer into 32-bit IEEE floating point representation.
477 * Conversion is not universal and only works for the range from 0
478 * to 2^I2F_MAX_BITS-1. Currently we only use it with inputs between
479 * 0 and 16384 (inclusive), so I2F_MAX_BITS=15 is enough. If necessary,
480 * I2F_MAX_BITS can be increased, but that will add to the loop iterations
481 * and slow us down. Conversion is done by shifting the input and counting
482 * down until the first 1 reaches bit position 23. The resulting counter
483 * and the shifted input are, respectively, the exponent and the fraction.
484 * The sign is always zero.
486 static uint32_t i2f(uint32_t input
)
488 u32 result
, i
, exponent
, fraction
;
490 WARN_ON_ONCE(input
> I2F_MAX_INPUT
);
492 if ((input
& I2F_MAX_INPUT
) == 0)
495 exponent
= 126 + I2F_MAX_BITS
;
496 fraction
= (input
& I2F_MAX_INPUT
) << I2F_SHIFT
;
498 for (i
= 0; i
< I2F_MAX_BITS
; i
++) {
499 if (fraction
& 0x800000)
502 fraction
= fraction
<< 1;
503 exponent
= exponent
- 1;
506 result
= exponent
<< 23 | (fraction
& 0x7fffff);
511 int r600_blit_init(struct radeon_device
*rdev
)
517 int num_packet2s
= 0;
519 rdev
->r600_blit
.primitives
.set_render_target
= set_render_target
;
520 rdev
->r600_blit
.primitives
.cp_set_surface_sync
= cp_set_surface_sync
;
521 rdev
->r600_blit
.primitives
.set_shaders
= set_shaders
;
522 rdev
->r600_blit
.primitives
.set_vtx_resource
= set_vtx_resource
;
523 rdev
->r600_blit
.primitives
.set_tex_resource
= set_tex_resource
;
524 rdev
->r600_blit
.primitives
.set_scissors
= set_scissors
;
525 rdev
->r600_blit
.primitives
.draw_auto
= draw_auto
;
526 rdev
->r600_blit
.primitives
.set_default_state
= set_default_state
;
528 rdev
->r600_blit
.ring_size_common
= 40; /* shaders + def state */
529 rdev
->r600_blit
.ring_size_common
+= 16; /* fence emit for VB IB */
530 rdev
->r600_blit
.ring_size_common
+= 5; /* done copy */
531 rdev
->r600_blit
.ring_size_common
+= 16; /* fence emit for done copy */
533 rdev
->r600_blit
.ring_size_per_loop
= 76;
534 /* set_render_target emits 2 extra dwords on rv6xx */
535 if (rdev
->family
> CHIP_R600
&& rdev
->family
< CHIP_RV770
)
536 rdev
->r600_blit
.ring_size_per_loop
+= 2;
538 rdev
->r600_blit
.max_dim
= 8192;
540 /* pin copy shader into vram if already initialized */
541 if (rdev
->r600_blit
.shader_obj
)
544 mutex_init(&rdev
->r600_blit
.mutex
);
545 rdev
->r600_blit
.state_offset
= 0;
547 if (rdev
->family
>= CHIP_RV770
)
548 rdev
->r600_blit
.state_len
= r7xx_default_size
;
550 rdev
->r600_blit
.state_len
= r6xx_default_size
;
552 dwords
= rdev
->r600_blit
.state_len
;
553 while (dwords
& 0xf) {
554 packet2s
[num_packet2s
++] = cpu_to_le32(PACKET2(0));
558 obj_size
= dwords
* 4;
559 obj_size
= ALIGN(obj_size
, 256);
561 rdev
->r600_blit
.vs_offset
= obj_size
;
562 obj_size
+= r6xx_vs_size
* 4;
563 obj_size
= ALIGN(obj_size
, 256);
565 rdev
->r600_blit
.ps_offset
= obj_size
;
566 obj_size
+= r6xx_ps_size
* 4;
567 obj_size
= ALIGN(obj_size
, 256);
569 r
= radeon_bo_create(rdev
, obj_size
, PAGE_SIZE
, true, RADEON_GEM_DOMAIN_VRAM
,
570 &rdev
->r600_blit
.shader_obj
);
572 DRM_ERROR("r600 failed to allocate shader\n");
576 DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
578 rdev
->r600_blit
.vs_offset
, rdev
->r600_blit
.ps_offset
);
580 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
581 if (unlikely(r
!= 0))
583 r
= radeon_bo_kmap(rdev
->r600_blit
.shader_obj
, &ptr
);
585 DRM_ERROR("failed to map blit object %d\n", r
);
588 if (rdev
->family
>= CHIP_RV770
)
589 memcpy_toio(ptr
+ rdev
->r600_blit
.state_offset
,
590 r7xx_default_state
, rdev
->r600_blit
.state_len
* 4);
592 memcpy_toio(ptr
+ rdev
->r600_blit
.state_offset
,
593 r6xx_default_state
, rdev
->r600_blit
.state_len
* 4);
595 memcpy_toio(ptr
+ rdev
->r600_blit
.state_offset
+ (rdev
->r600_blit
.state_len
* 4),
596 packet2s
, num_packet2s
* 4);
597 for (i
= 0; i
< r6xx_vs_size
; i
++)
598 *(u32
*)((unsigned long)ptr
+ rdev
->r600_blit
.vs_offset
+ i
* 4) = cpu_to_le32(r6xx_vs
[i
]);
599 for (i
= 0; i
< r6xx_ps_size
; i
++)
600 *(u32
*)((unsigned long)ptr
+ rdev
->r600_blit
.ps_offset
+ i
* 4) = cpu_to_le32(r6xx_ps
[i
]);
601 radeon_bo_kunmap(rdev
->r600_blit
.shader_obj
);
602 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
605 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
606 if (unlikely(r
!= 0))
608 r
= radeon_bo_pin(rdev
->r600_blit
.shader_obj
, RADEON_GEM_DOMAIN_VRAM
,
609 &rdev
->r600_blit
.shader_gpu_addr
);
610 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
612 dev_err(rdev
->dev
, "(%d) pin blit object failed\n", r
);
615 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.real_vram_size
);
619 void r600_blit_fini(struct radeon_device
*rdev
)
623 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
624 if (rdev
->r600_blit
.shader_obj
== NULL
)
626 /* If we can't reserve the bo, unref should be enough to destroy
627 * it when it becomes idle.
629 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
631 radeon_bo_unpin(rdev
->r600_blit
.shader_obj
);
632 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
634 radeon_bo_unref(&rdev
->r600_blit
.shader_obj
);
637 static int r600_vb_ib_get(struct radeon_device
*rdev
, unsigned size
)
640 r
= radeon_ib_get(rdev
, RADEON_RING_TYPE_GFX_INDEX
,
641 &rdev
->r600_blit
.vb_ib
, size
);
643 DRM_ERROR("failed to get IB for vertex buffer\n");
647 rdev
->r600_blit
.vb_total
= size
;
648 rdev
->r600_blit
.vb_used
= 0;
652 static void r600_vb_ib_put(struct radeon_device
*rdev
)
654 radeon_fence_emit(rdev
, rdev
->r600_blit
.vb_ib
->fence
);
655 radeon_ib_free(rdev
, &rdev
->r600_blit
.vb_ib
);
658 static unsigned r600_blit_create_rect(unsigned num_gpu_pages
,
659 int *width
, int *height
, int max_dim
)
662 unsigned pages
= num_gpu_pages
;
665 if (num_gpu_pages
== 0) {
666 /* not supposed to be called with no pages, but just in case */
674 while (num_gpu_pages
/ rect_order
) {
682 max_pages
= (max_dim
* h
) / (RECT_UNIT_W
* RECT_UNIT_H
);
683 if (pages
> max_pages
)
685 w
= (pages
* RECT_UNIT_W
* RECT_UNIT_H
) / h
;
686 w
= (w
/ RECT_UNIT_W
) * RECT_UNIT_W
;
687 pages
= (w
* h
) / (RECT_UNIT_W
* RECT_UNIT_H
);
692 DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h
, w
, pages
);
694 /* return width and height only of the caller wants it */
704 int r600_blit_prepare_copy(struct radeon_device
*rdev
, unsigned num_gpu_pages
)
706 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
710 int dwords_per_loop
= rdev
->r600_blit
.ring_size_per_loop
;
713 while (num_gpu_pages
) {
715 r600_blit_create_rect(num_gpu_pages
, NULL
, NULL
,
716 rdev
->r600_blit
.max_dim
);
720 /* 48 bytes for vertex per loop */
721 r
= r600_vb_ib_get(rdev
, (num_loops
*48)+256);
725 /* calculate number of loops correctly */
726 ring_size
= num_loops
* dwords_per_loop
;
727 ring_size
+= rdev
->r600_blit
.ring_size_common
;
728 r
= radeon_ring_lock(rdev
, ring
, ring_size
);
732 rdev
->r600_blit
.primitives
.set_default_state(rdev
);
733 rdev
->r600_blit
.primitives
.set_shaders(rdev
);
737 void r600_blit_done_copy(struct radeon_device
*rdev
, struct radeon_fence
*fence
)
741 if (rdev
->r600_blit
.vb_ib
)
742 r600_vb_ib_put(rdev
);
745 r
= radeon_fence_emit(rdev
, fence
);
747 radeon_ring_unlock_commit(rdev
, &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
]);
750 void r600_kms_blit_copy(struct radeon_device
*rdev
,
751 u64 src_gpu_addr
, u64 dst_gpu_addr
,
752 unsigned num_gpu_pages
)
757 DRM_DEBUG("emitting copy %16llx %16llx %d %d\n",
758 src_gpu_addr
, dst_gpu_addr
,
759 num_gpu_pages
, rdev
->r600_blit
.vb_used
);
760 vb
= (u32
*)(rdev
->r600_blit
.vb_ib
->ptr
+ rdev
->r600_blit
.vb_used
);
762 while (num_gpu_pages
) {
764 unsigned size_in_bytes
;
765 unsigned pages_per_loop
=
766 r600_blit_create_rect(num_gpu_pages
, &w
, &h
,
767 rdev
->r600_blit
.max_dim
);
769 size_in_bytes
= pages_per_loop
* RADEON_GPU_PAGE_SIZE
;
770 DRM_DEBUG("rectangle w=%d h=%d\n", w
, h
);
772 if ((rdev
->r600_blit
.vb_used
+ 48) > rdev
->r600_blit
.vb_total
) {
791 rdev
->r600_blit
.primitives
.set_tex_resource(rdev
, FMT_8_8_8_8
,
792 w
, h
, w
, src_gpu_addr
, size_in_bytes
);
793 rdev
->r600_blit
.primitives
.set_render_target(rdev
, COLOR_8_8_8_8
,
795 rdev
->r600_blit
.primitives
.set_scissors(rdev
, 0, 0, w
, h
);
796 vb_gpu_addr
= rdev
->r600_blit
.vb_ib
->gpu_addr
+ rdev
->r600_blit
.vb_used
;
797 rdev
->r600_blit
.primitives
.set_vtx_resource(rdev
, vb_gpu_addr
);
798 rdev
->r600_blit
.primitives
.draw_auto(rdev
);
799 rdev
->r600_blit
.primitives
.cp_set_surface_sync(rdev
,
800 PACKET3_CB_ACTION_ENA
| PACKET3_CB0_DEST_BASE_ENA
,
801 size_in_bytes
, dst_gpu_addr
);
804 rdev
->r600_blit
.vb_used
+= 4*12;
805 src_gpu_addr
+= size_in_bytes
;
806 dst_gpu_addr
+= size_in_bytes
;
807 num_gpu_pages
-= pages_per_loop
;