3 #include "radeon_drm.h"
7 #include "r600_blit_shaders.h"
9 #define DI_PT_RECTLIST 0x11
10 #define DI_INDEX_SIZE_16_BIT 0x0
11 #define DI_SRC_SEL_AUTO_INDEX 0x2
15 #define FMT_8_8_8_8 0x1a
17 #define COLOR_5_6_5 0x8
18 #define COLOR_8_8_8_8 0x1a
20 /* emits 21 on rv770+, 23 on r600 */
22 set_render_target(struct radeon_device
*rdev
, int format
,
23 int w
, int h
, u64 gpu_addr
)
32 cb_color_info
= ((format
<< 2) | (1 << 27));
34 slice
= ((w
* h
) / 64) - 1;
36 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
37 radeon_ring_write(rdev
, (CB_COLOR0_BASE
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
38 radeon_ring_write(rdev
, gpu_addr
>> 8);
40 if (rdev
->family
> CHIP_R600
&& rdev
->family
< CHIP_RV770
) {
41 radeon_ring_write(rdev
, PACKET3(PACKET3_SURFACE_BASE_UPDATE
, 0));
42 radeon_ring_write(rdev
, 2 << 0);
45 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
46 radeon_ring_write(rdev
, (CB_COLOR0_SIZE
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
47 radeon_ring_write(rdev
, (pitch
<< 0) | (slice
<< 10));
49 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
50 radeon_ring_write(rdev
, (CB_COLOR0_VIEW
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
51 radeon_ring_write(rdev
, 0);
53 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
54 radeon_ring_write(rdev
, (CB_COLOR0_INFO
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
55 radeon_ring_write(rdev
, cb_color_info
);
57 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
58 radeon_ring_write(rdev
, (CB_COLOR0_TILE
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
59 radeon_ring_write(rdev
, 0);
61 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
62 radeon_ring_write(rdev
, (CB_COLOR0_FRAG
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
63 radeon_ring_write(rdev
, 0);
65 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
66 radeon_ring_write(rdev
, (CB_COLOR0_MASK
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
67 radeon_ring_write(rdev
, 0);
72 cp_set_surface_sync(struct radeon_device
*rdev
,
73 u32 sync_type
, u32 size
,
78 if (size
== 0xffffffff)
79 cp_coher_size
= 0xffffffff;
81 cp_coher_size
= ((size
+ 255) >> 8);
83 radeon_ring_write(rdev
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
84 radeon_ring_write(rdev
, sync_type
);
85 radeon_ring_write(rdev
, cp_coher_size
);
86 radeon_ring_write(rdev
, mc_addr
>> 8);
87 radeon_ring_write(rdev
, 10); /* poll interval */
90 /* emits 21dw + 1 surface sync = 26dw */
92 set_shaders(struct radeon_device
*rdev
)
97 /* setup shader regs */
98 sq_pgm_resources
= (1 << 0);
101 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.vs_offset
;
102 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
103 radeon_ring_write(rdev
, (SQ_PGM_START_VS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
104 radeon_ring_write(rdev
, gpu_addr
>> 8);
106 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
107 radeon_ring_write(rdev
, (SQ_PGM_RESOURCES_VS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
108 radeon_ring_write(rdev
, sq_pgm_resources
);
110 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
111 radeon_ring_write(rdev
, (SQ_PGM_CF_OFFSET_VS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
112 radeon_ring_write(rdev
, 0);
115 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.ps_offset
;
116 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
117 radeon_ring_write(rdev
, (SQ_PGM_START_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
118 radeon_ring_write(rdev
, gpu_addr
>> 8);
120 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
121 radeon_ring_write(rdev
, (SQ_PGM_RESOURCES_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
122 radeon_ring_write(rdev
, sq_pgm_resources
| (1 << 28));
124 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
125 radeon_ring_write(rdev
, (SQ_PGM_EXPORTS_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
126 radeon_ring_write(rdev
, 2);
128 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
129 radeon_ring_write(rdev
, (SQ_PGM_CF_OFFSET_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
130 radeon_ring_write(rdev
, 0);
132 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.vs_offset
;
133 cp_set_surface_sync(rdev
, PACKET3_SH_ACTION_ENA
, 512, gpu_addr
);
136 /* emits 9 + 1 sync (5) = 14*/
138 set_vtx_resource(struct radeon_device
*rdev
, u64 gpu_addr
)
140 u32 sq_vtx_constant_word2
;
142 sq_vtx_constant_word2
= ((upper_32_bits(gpu_addr
) & 0xff) | (16 << 8));
144 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_RESOURCE
, 7));
145 radeon_ring_write(rdev
, 0x460);
146 radeon_ring_write(rdev
, gpu_addr
& 0xffffffff);
147 radeon_ring_write(rdev
, 48 - 1);
148 radeon_ring_write(rdev
, sq_vtx_constant_word2
);
149 radeon_ring_write(rdev
, 1 << 0);
150 radeon_ring_write(rdev
, 0);
151 radeon_ring_write(rdev
, 0);
152 radeon_ring_write(rdev
, SQ_TEX_VTX_VALID_BUFFER
<< 30);
154 if ((rdev
->family
== CHIP_RV610
) ||
155 (rdev
->family
== CHIP_RV620
) ||
156 (rdev
->family
== CHIP_RS780
) ||
157 (rdev
->family
== CHIP_RS880
) ||
158 (rdev
->family
== CHIP_RV710
))
159 cp_set_surface_sync(rdev
,
160 PACKET3_TC_ACTION_ENA
, 48, gpu_addr
);
162 cp_set_surface_sync(rdev
,
163 PACKET3_VC_ACTION_ENA
, 48, gpu_addr
);
168 set_tex_resource(struct radeon_device
*rdev
,
169 int format
, int w
, int h
, int pitch
,
172 uint32_t sq_tex_resource_word0
, sq_tex_resource_word1
, sq_tex_resource_word4
;
177 sq_tex_resource_word0
= (1 << 0);
178 sq_tex_resource_word0
|= ((((pitch
>> 3) - 1) << 8) |
181 sq_tex_resource_word1
= (format
<< 26);
182 sq_tex_resource_word1
|= ((h
- 1) << 0);
184 sq_tex_resource_word4
= ((1 << 14) |
190 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_RESOURCE
, 7));
191 radeon_ring_write(rdev
, 0);
192 radeon_ring_write(rdev
, sq_tex_resource_word0
);
193 radeon_ring_write(rdev
, sq_tex_resource_word1
);
194 radeon_ring_write(rdev
, gpu_addr
>> 8);
195 radeon_ring_write(rdev
, gpu_addr
>> 8);
196 radeon_ring_write(rdev
, sq_tex_resource_word4
);
197 radeon_ring_write(rdev
, 0);
198 radeon_ring_write(rdev
, SQ_TEX_VTX_VALID_TEXTURE
<< 30);
203 set_scissors(struct radeon_device
*rdev
, int x1
, int y1
,
206 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
207 radeon_ring_write(rdev
, (PA_SC_SCREEN_SCISSOR_TL
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
208 radeon_ring_write(rdev
, (x1
<< 0) | (y1
<< 16));
209 radeon_ring_write(rdev
, (x2
<< 0) | (y2
<< 16));
211 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
212 radeon_ring_write(rdev
, (PA_SC_GENERIC_SCISSOR_TL
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
213 radeon_ring_write(rdev
, (x1
<< 0) | (y1
<< 16) | (1 << 31));
214 radeon_ring_write(rdev
, (x2
<< 0) | (y2
<< 16));
216 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
217 radeon_ring_write(rdev
, (PA_SC_WINDOW_SCISSOR_TL
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
218 radeon_ring_write(rdev
, (x1
<< 0) | (y1
<< 16) | (1 << 31));
219 radeon_ring_write(rdev
, (x2
<< 0) | (y2
<< 16));
224 draw_auto(struct radeon_device
*rdev
)
226 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
227 radeon_ring_write(rdev
, (VGT_PRIMITIVE_TYPE
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
228 radeon_ring_write(rdev
, DI_PT_RECTLIST
);
230 radeon_ring_write(rdev
, PACKET3(PACKET3_INDEX_TYPE
, 0));
231 radeon_ring_write(rdev
, DI_INDEX_SIZE_16_BIT
);
233 radeon_ring_write(rdev
, PACKET3(PACKET3_NUM_INSTANCES
, 0));
234 radeon_ring_write(rdev
, 1);
236 radeon_ring_write(rdev
, PACKET3(PACKET3_DRAW_INDEX_AUTO
, 1));
237 radeon_ring_write(rdev
, 3);
238 radeon_ring_write(rdev
, DI_SRC_SEL_AUTO_INDEX
);
244 set_default_state(struct radeon_device
*rdev
)
246 u32 sq_config
, sq_gpr_resource_mgmt_1
, sq_gpr_resource_mgmt_2
;
247 u32 sq_thread_resource_mgmt
, sq_stack_resource_mgmt_1
, sq_stack_resource_mgmt_2
;
248 int num_ps_gprs
, num_vs_gprs
, num_temp_gprs
, num_gs_gprs
, num_es_gprs
;
249 int num_ps_threads
, num_vs_threads
, num_gs_threads
, num_es_threads
;
250 int num_ps_stack_entries
, num_vs_stack_entries
, num_gs_stack_entries
, num_es_stack_entries
;
254 switch (rdev
->family
) {
261 num_ps_threads
= 136;
265 num_ps_stack_entries
= 128;
266 num_vs_stack_entries
= 128;
267 num_gs_stack_entries
= 0;
268 num_es_stack_entries
= 0;
277 num_ps_threads
= 144;
281 num_ps_stack_entries
= 40;
282 num_vs_stack_entries
= 40;
283 num_gs_stack_entries
= 32;
284 num_es_stack_entries
= 16;
296 num_ps_threads
= 136;
300 num_ps_stack_entries
= 40;
301 num_vs_stack_entries
= 40;
302 num_gs_stack_entries
= 32;
303 num_es_stack_entries
= 16;
311 num_ps_threads
= 136;
315 num_ps_stack_entries
= 40;
316 num_vs_stack_entries
= 40;
317 num_gs_stack_entries
= 32;
318 num_es_stack_entries
= 16;
326 num_ps_threads
= 188;
330 num_ps_stack_entries
= 256;
331 num_vs_stack_entries
= 256;
332 num_gs_stack_entries
= 0;
333 num_es_stack_entries
= 0;
342 num_ps_threads
= 188;
346 num_ps_stack_entries
= 128;
347 num_vs_stack_entries
= 128;
348 num_gs_stack_entries
= 0;
349 num_es_stack_entries
= 0;
357 num_ps_threads
= 144;
361 num_ps_stack_entries
= 128;
362 num_vs_stack_entries
= 128;
363 num_gs_stack_entries
= 0;
364 num_es_stack_entries
= 0;
368 if ((rdev
->family
== CHIP_RV610
) ||
369 (rdev
->family
== CHIP_RV620
) ||
370 (rdev
->family
== CHIP_RS780
) ||
371 (rdev
->family
== CHIP_RS880
) ||
372 (rdev
->family
== CHIP_RV710
))
375 sq_config
= VC_ENABLE
;
377 sq_config
|= (DX9_CONSTS
|
378 ALU_INST_PREFER_VECTOR
|
384 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(num_ps_gprs
) |
385 NUM_VS_GPRS(num_vs_gprs
) |
386 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs
));
387 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(num_gs_gprs
) |
388 NUM_ES_GPRS(num_es_gprs
));
389 sq_thread_resource_mgmt
= (NUM_PS_THREADS(num_ps_threads
) |
390 NUM_VS_THREADS(num_vs_threads
) |
391 NUM_GS_THREADS(num_gs_threads
) |
392 NUM_ES_THREADS(num_es_threads
));
393 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(num_ps_stack_entries
) |
394 NUM_VS_STACK_ENTRIES(num_vs_stack_entries
));
395 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(num_gs_stack_entries
) |
396 NUM_ES_STACK_ENTRIES(num_es_stack_entries
));
398 /* emit an IB pointing at default state */
399 dwords
= (rdev
->r600_blit
.state_len
+ 0xf) & ~0xf;
400 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.state_offset
;
401 radeon_ring_write(rdev
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
402 radeon_ring_write(rdev
, gpu_addr
& 0xFFFFFFFC);
403 radeon_ring_write(rdev
, upper_32_bits(gpu_addr
) & 0xFF);
404 radeon_ring_write(rdev
, dwords
);
406 radeon_ring_write(rdev
, PACKET3(PACKET3_EVENT_WRITE
, 0));
407 radeon_ring_write(rdev
, CACHE_FLUSH_AND_INV_EVENT
);
409 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 6));
410 radeon_ring_write(rdev
, (SQ_CONFIG
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
411 radeon_ring_write(rdev
, sq_config
);
412 radeon_ring_write(rdev
, sq_gpr_resource_mgmt_1
);
413 radeon_ring_write(rdev
, sq_gpr_resource_mgmt_2
);
414 radeon_ring_write(rdev
, sq_thread_resource_mgmt
);
415 radeon_ring_write(rdev
, sq_stack_resource_mgmt_1
);
416 radeon_ring_write(rdev
, sq_stack_resource_mgmt_2
);
419 static inline uint32_t i2f(uint32_t input
)
421 u32 result
, i
, exponent
, fraction
;
423 if ((input
& 0x3fff) == 0)
424 result
= 0; /* 0 is a special case */
426 exponent
= 140; /* exponent biased by 127; */
427 fraction
= (input
& 0x3fff) << 10; /* cheat and only
428 handle numbers below 2^^15 */
429 for (i
= 0; i
< 14; i
++) {
430 if (fraction
& 0x800000)
433 fraction
= fraction
<< 1; /* keep
434 shifting left until top bit = 1 */
435 exponent
= exponent
- 1;
438 result
= exponent
<< 23 | (fraction
& 0x7fffff); /* mask
439 off top bit; assumed 1 */
444 int r600_blit_init(struct radeon_device
*rdev
)
450 int num_packet2s
= 0;
452 rdev
->r600_blit
.state_offset
= 0;
454 if (rdev
->family
>= CHIP_RV770
)
455 rdev
->r600_blit
.state_len
= r7xx_default_size
;
457 rdev
->r600_blit
.state_len
= r6xx_default_size
;
459 dwords
= rdev
->r600_blit
.state_len
;
460 while (dwords
& 0xf) {
461 packet2s
[num_packet2s
++] = PACKET2(0);
465 obj_size
= dwords
* 4;
466 obj_size
= ALIGN(obj_size
, 256);
468 rdev
->r600_blit
.vs_offset
= obj_size
;
469 obj_size
+= r6xx_vs_size
* 4;
470 obj_size
= ALIGN(obj_size
, 256);
472 rdev
->r600_blit
.ps_offset
= obj_size
;
473 obj_size
+= r6xx_ps_size
* 4;
474 obj_size
= ALIGN(obj_size
, 256);
476 r
= radeon_bo_create(rdev
, NULL
, obj_size
, true, RADEON_GEM_DOMAIN_VRAM
,
477 &rdev
->r600_blit
.shader_obj
);
479 DRM_ERROR("r600 failed to allocate shader\n");
483 DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
485 rdev
->r600_blit
.vs_offset
, rdev
->r600_blit
.ps_offset
);
487 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
488 if (unlikely(r
!= 0))
490 r
= radeon_bo_kmap(rdev
->r600_blit
.shader_obj
, &ptr
);
492 DRM_ERROR("failed to map blit object %d\n", r
);
495 if (rdev
->family
>= CHIP_RV770
)
496 memcpy_toio(ptr
+ rdev
->r600_blit
.state_offset
,
497 r7xx_default_state
, rdev
->r600_blit
.state_len
* 4);
499 memcpy_toio(ptr
+ rdev
->r600_blit
.state_offset
,
500 r6xx_default_state
, rdev
->r600_blit
.state_len
* 4);
502 memcpy_toio(ptr
+ rdev
->r600_blit
.state_offset
+ (rdev
->r600_blit
.state_len
* 4),
503 packet2s
, num_packet2s
* 4);
504 memcpy(ptr
+ rdev
->r600_blit
.vs_offset
, r6xx_vs
, r6xx_vs_size
* 4);
505 memcpy(ptr
+ rdev
->r600_blit
.ps_offset
, r6xx_ps
, r6xx_ps_size
* 4);
506 radeon_bo_kunmap(rdev
->r600_blit
.shader_obj
);
507 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
511 void r600_blit_fini(struct radeon_device
*rdev
)
515 if (rdev
->r600_blit
.shader_obj
== NULL
)
517 /* If we can't reserve the bo, unref should be enough to destroy
518 * it when it becomes idle.
520 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
522 radeon_bo_unpin(rdev
->r600_blit
.shader_obj
);
523 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
525 radeon_bo_unref(&rdev
->r600_blit
.shader_obj
);
528 int r600_vb_ib_get(struct radeon_device
*rdev
)
531 r
= radeon_ib_get(rdev
, &rdev
->r600_blit
.vb_ib
);
533 DRM_ERROR("failed to get IB for vertex buffer\n");
537 rdev
->r600_blit
.vb_total
= 64*1024;
538 rdev
->r600_blit
.vb_used
= 0;
542 void r600_vb_ib_put(struct radeon_device
*rdev
)
544 radeon_fence_emit(rdev
, rdev
->r600_blit
.vb_ib
->fence
);
545 mutex_lock(&rdev
->ib_pool
.mutex
);
546 list_add_tail(&rdev
->r600_blit
.vb_ib
->list
, &rdev
->ib_pool
.scheduled_ibs
);
547 mutex_unlock(&rdev
->ib_pool
.mutex
);
548 radeon_ib_free(rdev
, &rdev
->r600_blit
.vb_ib
);
551 int r600_blit_prepare_copy(struct radeon_device
*rdev
, int size_bytes
)
554 int ring_size
, line_size
;
556 /* loops of emits 64 + fence emit possible */
557 int dwords_per_loop
= 76, num_loops
;
559 r
= r600_vb_ib_get(rdev
);
562 /* set_render_target emits 2 extra dwords on rv6xx */
563 if (rdev
->family
> CHIP_R600
&& rdev
->family
< CHIP_RV770
)
564 dwords_per_loop
+= 2;
566 /* 8 bpp vs 32 bpp for xfer unit */
572 max_size
= 8192 * line_size
;
574 /* major loops cover the max size transfer */
575 num_loops
= ((size_bytes
+ max_size
) / max_size
);
576 /* minor loops cover the extra non aligned bits */
577 num_loops
+= ((size_bytes
% line_size
) ? 1 : 0);
578 /* calculate number of loops correctly */
579 ring_size
= num_loops
* dwords_per_loop
;
580 /* set default + shaders */
581 ring_size
+= 40; /* shaders + def state */
582 ring_size
+= 7; /* fence emit for VB IB */
583 ring_size
+= 5; /* done copy */
584 ring_size
+= 7; /* fence emit for done copy */
585 r
= radeon_ring_lock(rdev
, ring_size
);
588 set_default_state(rdev
); /* 14 */
589 set_shaders(rdev
); /* 26 */
593 void r600_blit_done_copy(struct radeon_device
*rdev
, struct radeon_fence
*fence
)
597 radeon_ring_write(rdev
, PACKET3(PACKET3_EVENT_WRITE
, 0));
598 radeon_ring_write(rdev
, CACHE_FLUSH_AND_INV_EVENT
);
599 /* wait for 3D idle clean */
600 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
601 radeon_ring_write(rdev
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
602 radeon_ring_write(rdev
, WAIT_3D_IDLE_bit
| WAIT_3D_IDLECLEAN_bit
);
604 if (rdev
->r600_blit
.vb_ib
)
605 r600_vb_ib_put(rdev
);
608 r
= radeon_fence_emit(rdev
, fence
);
610 radeon_ring_unlock_commit(rdev
);
613 void r600_kms_blit_copy(struct radeon_device
*rdev
,
614 u64 src_gpu_addr
, u64 dst_gpu_addr
,
621 DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr
, dst_gpu_addr
,
622 size_bytes
, rdev
->r600_blit
.vb_used
);
623 vb
= (u32
*)(rdev
->r600_blit
.vb_ib
->ptr
+ rdev
->r600_blit
.vb_used
);
624 if ((size_bytes
& 3) || (src_gpu_addr
& 3) || (dst_gpu_addr
& 3)) {
628 int cur_size
= size_bytes
;
629 int src_x
= src_gpu_addr
& 255;
630 int dst_x
= dst_gpu_addr
& 255;
632 src_gpu_addr
= src_gpu_addr
& ~255;
633 dst_gpu_addr
= dst_gpu_addr
& ~255;
635 if (!src_x
&& !dst_x
) {
636 h
= (cur_size
/ max_bytes
);
642 cur_size
= max_bytes
;
644 if (cur_size
> max_bytes
)
645 cur_size
= max_bytes
;
646 if (cur_size
> (max_bytes
- dst_x
))
647 cur_size
= (max_bytes
- dst_x
);
648 if (cur_size
> (max_bytes
- src_x
))
649 cur_size
= (max_bytes
- src_x
);
652 if ((rdev
->r600_blit
.vb_used
+ 48) > rdev
->r600_blit
.vb_total
) {
656 r600_vb_ib_put(rdev
);
658 r600_nomm_put_vb(dev
);
659 r600_nomm_get_vb(dev
);
660 if (!dev_priv
->blit_vb
)
663 vb
= r600_nomm_get_vb_ptr(dev
);
677 vb
[8] = i2f(dst_x
+ cur_size
);
679 vb
[10] = i2f(src_x
+ cur_size
);
683 set_tex_resource(rdev
, FMT_8
,
684 src_x
+ cur_size
, h
, src_x
+ cur_size
,
688 cp_set_surface_sync(rdev
,
689 PACKET3_TC_ACTION_ENA
, (src_x
+ cur_size
* h
), src_gpu_addr
);
692 set_render_target(rdev
, COLOR_8
,
697 set_scissors(rdev
, dst_x
, 0, dst_x
+ cur_size
, h
);
700 vb_gpu_addr
= rdev
->r600_blit
.vb_ib
->gpu_addr
+ rdev
->r600_blit
.vb_used
;
701 set_vtx_resource(rdev
, vb_gpu_addr
);
707 cp_set_surface_sync(rdev
,
708 PACKET3_CB_ACTION_ENA
| PACKET3_CB0_DEST_BASE_ENA
,
709 cur_size
* h
, dst_gpu_addr
);
712 rdev
->r600_blit
.vb_used
+= 12 * 4;
714 src_gpu_addr
+= cur_size
* h
;
715 dst_gpu_addr
+= cur_size
* h
;
716 size_bytes
-= cur_size
* h
;
719 max_bytes
= 8192 * 4;
722 int cur_size
= size_bytes
;
723 int src_x
= (src_gpu_addr
& 255);
724 int dst_x
= (dst_gpu_addr
& 255);
726 src_gpu_addr
= src_gpu_addr
& ~255;
727 dst_gpu_addr
= dst_gpu_addr
& ~255;
729 if (!src_x
&& !dst_x
) {
730 h
= (cur_size
/ max_bytes
);
736 cur_size
= max_bytes
;
738 if (cur_size
> max_bytes
)
739 cur_size
= max_bytes
;
740 if (cur_size
> (max_bytes
- dst_x
))
741 cur_size
= (max_bytes
- dst_x
);
742 if (cur_size
> (max_bytes
- src_x
))
743 cur_size
= (max_bytes
- src_x
);
746 if ((rdev
->r600_blit
.vb_used
+ 48) > rdev
->r600_blit
.vb_total
) {
750 if ((rdev
->blit_vb
->used
+ 48) > rdev
->blit_vb
->total
) {
751 r600_nomm_put_vb(dev
);
752 r600_nomm_get_vb(dev
);
757 vb
= r600_nomm_get_vb_ptr(dev
);
761 vb
[0] = i2f(dst_x
/ 4);
763 vb
[2] = i2f(src_x
/ 4);
766 vb
[4] = i2f(dst_x
/ 4);
768 vb
[6] = i2f(src_x
/ 4);
771 vb
[8] = i2f((dst_x
+ cur_size
) / 4);
773 vb
[10] = i2f((src_x
+ cur_size
) / 4);
777 set_tex_resource(rdev
, FMT_8_8_8_8
,
778 (src_x
+ cur_size
) / 4,
779 h
, (src_x
+ cur_size
) / 4,
782 cp_set_surface_sync(rdev
,
783 PACKET3_TC_ACTION_ENA
, (src_x
+ cur_size
* h
), src_gpu_addr
);
786 set_render_target(rdev
, COLOR_8_8_8_8
,
787 (dst_x
+ cur_size
) / 4, h
,
791 set_scissors(rdev
, (dst_x
/ 4), 0, (dst_x
+ cur_size
/ 4), h
);
793 /* Vertex buffer setup 14 */
794 vb_gpu_addr
= rdev
->r600_blit
.vb_ib
->gpu_addr
+ rdev
->r600_blit
.vb_used
;
795 set_vtx_resource(rdev
, vb_gpu_addr
);
801 cp_set_surface_sync(rdev
,
802 PACKET3_CB_ACTION_ENA
| PACKET3_CB0_DEST_BASE_ENA
,
803 cur_size
* h
, dst_gpu_addr
);
805 /* 78 ring dwords per loop */
807 rdev
->r600_blit
.vb_used
+= 12 * 4;
809 src_gpu_addr
+= cur_size
* h
;
810 dst_gpu_addr
+= cur_size
* h
;
811 size_bytes
-= cur_size
* h
;