3 #include "radeon_drm.h"
7 #include "r600_blit_shaders.h"
9 #define DI_PT_RECTLIST 0x11
10 #define DI_INDEX_SIZE_16_BIT 0x0
11 #define DI_SRC_SEL_AUTO_INDEX 0x2
15 #define FMT_8_8_8_8 0x1a
17 #define COLOR_5_6_5 0x8
18 #define COLOR_8_8_8_8 0x1a
20 /* emits 21 on rv770+, 23 on r600 */
22 set_render_target(struct radeon_device
*rdev
, int format
,
23 int w
, int h
, u64 gpu_addr
)
32 cb_color_info
= ((format
<< 2) | (1 << 27));
34 slice
= ((w
* h
) / 64) - 1;
36 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
37 radeon_ring_write(rdev
, (CB_COLOR0_BASE
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
38 radeon_ring_write(rdev
, gpu_addr
>> 8);
40 if (rdev
->family
> CHIP_R600
&& rdev
->family
< CHIP_RV770
) {
41 radeon_ring_write(rdev
, PACKET3(PACKET3_SURFACE_BASE_UPDATE
, 0));
42 radeon_ring_write(rdev
, 2 << 0);
45 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
46 radeon_ring_write(rdev
, (CB_COLOR0_SIZE
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
47 radeon_ring_write(rdev
, (pitch
<< 0) | (slice
<< 10));
49 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
50 radeon_ring_write(rdev
, (CB_COLOR0_VIEW
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
51 radeon_ring_write(rdev
, 0);
53 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
54 radeon_ring_write(rdev
, (CB_COLOR0_INFO
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
55 radeon_ring_write(rdev
, cb_color_info
);
57 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
58 radeon_ring_write(rdev
, (CB_COLOR0_TILE
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
59 radeon_ring_write(rdev
, 0);
61 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
62 radeon_ring_write(rdev
, (CB_COLOR0_FRAG
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
63 radeon_ring_write(rdev
, 0);
65 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
66 radeon_ring_write(rdev
, (CB_COLOR0_MASK
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
67 radeon_ring_write(rdev
, 0);
72 cp_set_surface_sync(struct radeon_device
*rdev
,
73 u32 sync_type
, u32 size
,
78 if (size
== 0xffffffff)
79 cp_coher_size
= 0xffffffff;
81 cp_coher_size
= ((size
+ 255) >> 8);
83 radeon_ring_write(rdev
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
84 radeon_ring_write(rdev
, sync_type
);
85 radeon_ring_write(rdev
, cp_coher_size
);
86 radeon_ring_write(rdev
, mc_addr
>> 8);
87 radeon_ring_write(rdev
, 10); /* poll interval */
90 /* emits 21dw + 1 surface sync = 26dw */
92 set_shaders(struct radeon_device
*rdev
)
97 /* setup shader regs */
98 sq_pgm_resources
= (1 << 0);
101 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.vs_offset
;
102 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
103 radeon_ring_write(rdev
, (SQ_PGM_START_VS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
104 radeon_ring_write(rdev
, gpu_addr
>> 8);
106 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
107 radeon_ring_write(rdev
, (SQ_PGM_RESOURCES_VS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
108 radeon_ring_write(rdev
, sq_pgm_resources
);
110 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
111 radeon_ring_write(rdev
, (SQ_PGM_CF_OFFSET_VS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
112 radeon_ring_write(rdev
, 0);
115 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.ps_offset
;
116 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
117 radeon_ring_write(rdev
, (SQ_PGM_START_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
118 radeon_ring_write(rdev
, gpu_addr
>> 8);
120 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
121 radeon_ring_write(rdev
, (SQ_PGM_RESOURCES_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
122 radeon_ring_write(rdev
, sq_pgm_resources
| (1 << 28));
124 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
125 radeon_ring_write(rdev
, (SQ_PGM_EXPORTS_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
126 radeon_ring_write(rdev
, 2);
128 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
129 radeon_ring_write(rdev
, (SQ_PGM_CF_OFFSET_PS
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
130 radeon_ring_write(rdev
, 0);
132 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.vs_offset
;
133 cp_set_surface_sync(rdev
, PACKET3_SH_ACTION_ENA
, 512, gpu_addr
);
136 /* emits 9 + 1 sync (5) = 14*/
138 set_vtx_resource(struct radeon_device
*rdev
, u64 gpu_addr
)
140 u32 sq_vtx_constant_word2
;
142 sq_vtx_constant_word2
= ((upper_32_bits(gpu_addr
) & 0xff) | (16 << 8));
144 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_RESOURCE
, 7));
145 radeon_ring_write(rdev
, 0x460);
146 radeon_ring_write(rdev
, gpu_addr
& 0xffffffff);
147 radeon_ring_write(rdev
, 48 - 1);
148 radeon_ring_write(rdev
, sq_vtx_constant_word2
);
149 radeon_ring_write(rdev
, 1 << 0);
150 radeon_ring_write(rdev
, 0);
151 radeon_ring_write(rdev
, 0);
152 radeon_ring_write(rdev
, SQ_TEX_VTX_VALID_BUFFER
<< 30);
154 if ((rdev
->family
== CHIP_RV610
) ||
155 (rdev
->family
== CHIP_RV620
) ||
156 (rdev
->family
== CHIP_RS780
) ||
157 (rdev
->family
== CHIP_RS880
) ||
158 (rdev
->family
== CHIP_RV710
))
159 cp_set_surface_sync(rdev
,
160 PACKET3_TC_ACTION_ENA
, 48, gpu_addr
);
162 cp_set_surface_sync(rdev
,
163 PACKET3_VC_ACTION_ENA
, 48, gpu_addr
);
168 set_tex_resource(struct radeon_device
*rdev
,
169 int format
, int w
, int h
, int pitch
,
172 uint32_t sq_tex_resource_word0
, sq_tex_resource_word1
, sq_tex_resource_word4
;
177 sq_tex_resource_word0
= (1 << 0);
178 sq_tex_resource_word0
|= ((((pitch
>> 3) - 1) << 8) |
181 sq_tex_resource_word1
= (format
<< 26);
182 sq_tex_resource_word1
|= ((h
- 1) << 0);
184 sq_tex_resource_word4
= ((1 << 14) |
190 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_RESOURCE
, 7));
191 radeon_ring_write(rdev
, 0);
192 radeon_ring_write(rdev
, sq_tex_resource_word0
);
193 radeon_ring_write(rdev
, sq_tex_resource_word1
);
194 radeon_ring_write(rdev
, gpu_addr
>> 8);
195 radeon_ring_write(rdev
, gpu_addr
>> 8);
196 radeon_ring_write(rdev
, sq_tex_resource_word4
);
197 radeon_ring_write(rdev
, 0);
198 radeon_ring_write(rdev
, SQ_TEX_VTX_VALID_TEXTURE
<< 30);
203 set_scissors(struct radeon_device
*rdev
, int x1
, int y1
,
206 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
207 radeon_ring_write(rdev
, (PA_SC_SCREEN_SCISSOR_TL
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
208 radeon_ring_write(rdev
, (x1
<< 0) | (y1
<< 16));
209 radeon_ring_write(rdev
, (x2
<< 0) | (y2
<< 16));
211 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
212 radeon_ring_write(rdev
, (PA_SC_GENERIC_SCISSOR_TL
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
213 radeon_ring_write(rdev
, (x1
<< 0) | (y1
<< 16) | (1 << 31));
214 radeon_ring_write(rdev
, (x2
<< 0) | (y2
<< 16));
216 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
217 radeon_ring_write(rdev
, (PA_SC_WINDOW_SCISSOR_TL
- PACKET3_SET_CONTEXT_REG_OFFSET
) >> 2);
218 radeon_ring_write(rdev
, (x1
<< 0) | (y1
<< 16) | (1 << 31));
219 radeon_ring_write(rdev
, (x2
<< 0) | (y2
<< 16));
224 draw_auto(struct radeon_device
*rdev
)
226 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
227 radeon_ring_write(rdev
, (VGT_PRIMITIVE_TYPE
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
228 radeon_ring_write(rdev
, DI_PT_RECTLIST
);
230 radeon_ring_write(rdev
, PACKET3(PACKET3_INDEX_TYPE
, 0));
231 radeon_ring_write(rdev
, DI_INDEX_SIZE_16_BIT
);
233 radeon_ring_write(rdev
, PACKET3(PACKET3_NUM_INSTANCES
, 0));
234 radeon_ring_write(rdev
, 1);
236 radeon_ring_write(rdev
, PACKET3(PACKET3_DRAW_INDEX_AUTO
, 1));
237 radeon_ring_write(rdev
, 3);
238 radeon_ring_write(rdev
, DI_SRC_SEL_AUTO_INDEX
);
244 set_default_state(struct radeon_device
*rdev
)
246 u32 sq_config
, sq_gpr_resource_mgmt_1
, sq_gpr_resource_mgmt_2
;
247 u32 sq_thread_resource_mgmt
, sq_stack_resource_mgmt_1
, sq_stack_resource_mgmt_2
;
248 int num_ps_gprs
, num_vs_gprs
, num_temp_gprs
, num_gs_gprs
, num_es_gprs
;
249 int num_ps_threads
, num_vs_threads
, num_gs_threads
, num_es_threads
;
250 int num_ps_stack_entries
, num_vs_stack_entries
, num_gs_stack_entries
, num_es_stack_entries
;
254 switch (rdev
->family
) {
261 num_ps_threads
= 136;
265 num_ps_stack_entries
= 128;
266 num_vs_stack_entries
= 128;
267 num_gs_stack_entries
= 0;
268 num_es_stack_entries
= 0;
277 num_ps_threads
= 144;
281 num_ps_stack_entries
= 40;
282 num_vs_stack_entries
= 40;
283 num_gs_stack_entries
= 32;
284 num_es_stack_entries
= 16;
296 num_ps_threads
= 136;
300 num_ps_stack_entries
= 40;
301 num_vs_stack_entries
= 40;
302 num_gs_stack_entries
= 32;
303 num_es_stack_entries
= 16;
311 num_ps_threads
= 136;
315 num_ps_stack_entries
= 40;
316 num_vs_stack_entries
= 40;
317 num_gs_stack_entries
= 32;
318 num_es_stack_entries
= 16;
326 num_ps_threads
= 188;
330 num_ps_stack_entries
= 256;
331 num_vs_stack_entries
= 256;
332 num_gs_stack_entries
= 0;
333 num_es_stack_entries
= 0;
342 num_ps_threads
= 188;
346 num_ps_stack_entries
= 128;
347 num_vs_stack_entries
= 128;
348 num_gs_stack_entries
= 0;
349 num_es_stack_entries
= 0;
357 num_ps_threads
= 144;
361 num_ps_stack_entries
= 128;
362 num_vs_stack_entries
= 128;
363 num_gs_stack_entries
= 0;
364 num_es_stack_entries
= 0;
368 if ((rdev
->family
== CHIP_RV610
) ||
369 (rdev
->family
== CHIP_RV620
) ||
370 (rdev
->family
== CHIP_RS780
) ||
371 (rdev
->family
== CHIP_RS780
) ||
372 (rdev
->family
== CHIP_RV710
))
375 sq_config
= VC_ENABLE
;
377 sq_config
|= (DX9_CONSTS
|
378 ALU_INST_PREFER_VECTOR
|
384 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(num_ps_gprs
) |
385 NUM_VS_GPRS(num_vs_gprs
) |
386 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs
));
387 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(num_gs_gprs
) |
388 NUM_ES_GPRS(num_es_gprs
));
389 sq_thread_resource_mgmt
= (NUM_PS_THREADS(num_ps_threads
) |
390 NUM_VS_THREADS(num_vs_threads
) |
391 NUM_GS_THREADS(num_gs_threads
) |
392 NUM_ES_THREADS(num_es_threads
));
393 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(num_ps_stack_entries
) |
394 NUM_VS_STACK_ENTRIES(num_vs_stack_entries
));
395 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(num_gs_stack_entries
) |
396 NUM_ES_STACK_ENTRIES(num_es_stack_entries
));
398 /* emit an IB pointing at default state */
399 dwords
= (rdev
->r600_blit
.state_len
+ 0xf) & ~0xf;
400 gpu_addr
= rdev
->r600_blit
.shader_gpu_addr
+ rdev
->r600_blit
.state_offset
;
401 radeon_ring_write(rdev
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
402 radeon_ring_write(rdev
, gpu_addr
& 0xFFFFFFFC);
403 radeon_ring_write(rdev
, upper_32_bits(gpu_addr
) & 0xFF);
404 radeon_ring_write(rdev
, dwords
);
406 radeon_ring_write(rdev
, PACKET3(PACKET3_EVENT_WRITE
, 0));
407 radeon_ring_write(rdev
, CACHE_FLUSH_AND_INV_EVENT
);
409 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 6));
410 radeon_ring_write(rdev
, (SQ_CONFIG
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
411 radeon_ring_write(rdev
, sq_config
);
412 radeon_ring_write(rdev
, sq_gpr_resource_mgmt_1
);
413 radeon_ring_write(rdev
, sq_gpr_resource_mgmt_2
);
414 radeon_ring_write(rdev
, sq_thread_resource_mgmt
);
415 radeon_ring_write(rdev
, sq_stack_resource_mgmt_1
);
416 radeon_ring_write(rdev
, sq_stack_resource_mgmt_2
);
419 static inline uint32_t i2f(uint32_t input
)
421 u32 result
, i
, exponent
, fraction
;
423 if ((input
& 0x3fff) == 0)
424 result
= 0; /* 0 is a special case */
426 exponent
= 140; /* exponent biased by 127; */
427 fraction
= (input
& 0x3fff) << 10; /* cheat and only
428 handle numbers below 2^^15 */
429 for (i
= 0; i
< 14; i
++) {
430 if (fraction
& 0x800000)
433 fraction
= fraction
<< 1; /* keep
434 shifting left until top bit = 1 */
435 exponent
= exponent
- 1;
438 result
= exponent
<< 23 | (fraction
& 0x7fffff); /* mask
439 off top bit; assumed 1 */
444 int r600_blit_init(struct radeon_device
*rdev
)
450 int num_packet2s
= 0;
452 rdev
->r600_blit
.state_offset
= 0;
454 if (rdev
->family
>= CHIP_RV770
)
455 rdev
->r600_blit
.state_len
= r7xx_default_size
;
457 rdev
->r600_blit
.state_len
= r6xx_default_size
;
459 dwords
= rdev
->r600_blit
.state_len
;
460 while (dwords
& 0xf) {
461 packet2s
[num_packet2s
++] = PACKET2(0);
465 obj_size
= dwords
* 4;
466 obj_size
= ALIGN(obj_size
, 256);
468 rdev
->r600_blit
.vs_offset
= obj_size
;
469 obj_size
+= r6xx_vs_size
* 4;
470 obj_size
= ALIGN(obj_size
, 256);
472 rdev
->r600_blit
.ps_offset
= obj_size
;
473 obj_size
+= r6xx_ps_size
* 4;
474 obj_size
= ALIGN(obj_size
, 256);
476 r
= radeon_object_create(rdev
, NULL
, obj_size
,
477 true, RADEON_GEM_DOMAIN_VRAM
,
478 false, &rdev
->r600_blit
.shader_obj
);
480 DRM_ERROR("r600 failed to allocate shader\n");
484 DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
486 rdev
->r600_blit
.vs_offset
, rdev
->r600_blit
.ps_offset
);
488 r
= radeon_object_kmap(rdev
->r600_blit
.shader_obj
, &ptr
);
490 DRM_ERROR("failed to map blit object %d\n", r
);
494 if (rdev
->family
>= CHIP_RV770
)
495 memcpy_toio(ptr
+ rdev
->r600_blit
.state_offset
,
496 r7xx_default_state
, rdev
->r600_blit
.state_len
* 4);
498 memcpy_toio(ptr
+ rdev
->r600_blit
.state_offset
,
499 r6xx_default_state
, rdev
->r600_blit
.state_len
* 4);
501 memcpy_toio(ptr
+ rdev
->r600_blit
.state_offset
+ (rdev
->r600_blit
.state_len
* 4),
502 packet2s
, num_packet2s
* 4);
505 memcpy(ptr
+ rdev
->r600_blit
.vs_offset
, r6xx_vs
, r6xx_vs_size
* 4);
506 memcpy(ptr
+ rdev
->r600_blit
.ps_offset
, r6xx_ps
, r6xx_ps_size
* 4);
508 radeon_object_kunmap(rdev
->r600_blit
.shader_obj
);
512 void r600_blit_fini(struct radeon_device
*rdev
)
514 radeon_object_unpin(rdev
->r600_blit
.shader_obj
);
515 radeon_object_unref(&rdev
->r600_blit
.shader_obj
);
518 int r600_vb_ib_get(struct radeon_device
*rdev
)
521 r
= radeon_ib_get(rdev
, &rdev
->r600_blit
.vb_ib
);
523 DRM_ERROR("failed to get IB for vertex buffer\n");
527 rdev
->r600_blit
.vb_total
= 64*1024;
528 rdev
->r600_blit
.vb_used
= 0;
532 void r600_vb_ib_put(struct radeon_device
*rdev
)
534 radeon_fence_emit(rdev
, rdev
->r600_blit
.vb_ib
->fence
);
535 mutex_lock(&rdev
->ib_pool
.mutex
);
536 list_add_tail(&rdev
->r600_blit
.vb_ib
->list
, &rdev
->ib_pool
.scheduled_ibs
);
537 mutex_unlock(&rdev
->ib_pool
.mutex
);
538 radeon_ib_free(rdev
, &rdev
->r600_blit
.vb_ib
);
541 int r600_blit_prepare_copy(struct radeon_device
*rdev
, int size_bytes
)
544 int ring_size
, line_size
;
546 /* loops of emits 64 + fence emit possible */
547 int dwords_per_loop
= 76, num_loops
;
549 r
= r600_vb_ib_get(rdev
);
552 /* set_render_target emits 2 extra dwords on rv6xx */
553 if (rdev
->family
> CHIP_R600
&& rdev
->family
< CHIP_RV770
)
554 dwords_per_loop
+= 2;
556 /* 8 bpp vs 32 bpp for xfer unit */
562 max_size
= 8192 * line_size
;
564 /* major loops cover the max size transfer */
565 num_loops
= ((size_bytes
+ max_size
) / max_size
);
566 /* minor loops cover the extra non aligned bits */
567 num_loops
+= ((size_bytes
% line_size
) ? 1 : 0);
568 /* calculate number of loops correctly */
569 ring_size
= num_loops
* dwords_per_loop
;
570 /* set default + shaders */
571 ring_size
+= 40; /* shaders + def state */
572 ring_size
+= 3; /* fence emit for VB IB */
573 ring_size
+= 5; /* done copy */
574 ring_size
+= 3; /* fence emit for done copy */
575 r
= radeon_ring_lock(rdev
, ring_size
);
578 set_default_state(rdev
); /* 14 */
579 set_shaders(rdev
); /* 26 */
583 void r600_blit_done_copy(struct radeon_device
*rdev
, struct radeon_fence
*fence
)
587 radeon_ring_write(rdev
, PACKET3(PACKET3_EVENT_WRITE
, 0));
588 radeon_ring_write(rdev
, CACHE_FLUSH_AND_INV_EVENT
);
589 /* wait for 3D idle clean */
590 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
591 radeon_ring_write(rdev
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
592 radeon_ring_write(rdev
, WAIT_3D_IDLE_bit
| WAIT_3D_IDLECLEAN_bit
);
594 if (rdev
->r600_blit
.vb_ib
)
595 r600_vb_ib_put(rdev
);
598 r
= radeon_fence_emit(rdev
, fence
);
600 radeon_ring_unlock_commit(rdev
);
603 void r600_kms_blit_copy(struct radeon_device
*rdev
,
604 u64 src_gpu_addr
, u64 dst_gpu_addr
,
611 DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr
, dst_gpu_addr
,
612 size_bytes
, rdev
->r600_blit
.vb_used
);
613 if ((size_bytes
& 3) || (src_gpu_addr
& 3) || (dst_gpu_addr
& 3)) {
617 int cur_size
= size_bytes
;
618 int src_x
= src_gpu_addr
& 255;
619 int dst_x
= dst_gpu_addr
& 255;
621 src_gpu_addr
= src_gpu_addr
& ~255;
622 dst_gpu_addr
= dst_gpu_addr
& ~255;
624 if (!src_x
&& !dst_x
) {
625 h
= (cur_size
/ max_bytes
);
631 cur_size
= max_bytes
;
633 if (cur_size
> max_bytes
)
634 cur_size
= max_bytes
;
635 if (cur_size
> (max_bytes
- dst_x
))
636 cur_size
= (max_bytes
- dst_x
);
637 if (cur_size
> (max_bytes
- src_x
))
638 cur_size
= (max_bytes
- src_x
);
641 if ((rdev
->r600_blit
.vb_used
+ 48) > rdev
->r600_blit
.vb_total
) {
645 r600_vb_ib_put(rdev
);
647 r600_nomm_put_vb(dev
);
648 r600_nomm_get_vb(dev
);
649 if (!dev_priv
->blit_vb
)
652 vb
= r600_nomm_get_vb_ptr(dev
);
655 vb
= (u32
*)(rdev
->r600_blit
.vb_ib
->ptr
+ rdev
->r600_blit
.vb_used
);
667 vb
[8] = i2f(dst_x
+ cur_size
);
669 vb
[10] = i2f(src_x
+ cur_size
);
673 set_tex_resource(rdev
, FMT_8
,
674 src_x
+ cur_size
, h
, src_x
+ cur_size
,
678 cp_set_surface_sync(rdev
,
679 PACKET3_TC_ACTION_ENA
, (src_x
+ cur_size
* h
), src_gpu_addr
);
682 set_render_target(rdev
, COLOR_8
,
687 set_scissors(rdev
, dst_x
, 0, dst_x
+ cur_size
, h
);
690 vb_gpu_addr
= rdev
->r600_blit
.vb_ib
->gpu_addr
+ rdev
->r600_blit
.vb_used
;
691 set_vtx_resource(rdev
, vb_gpu_addr
);
697 cp_set_surface_sync(rdev
,
698 PACKET3_CB_ACTION_ENA
| PACKET3_CB0_DEST_BASE_ENA
,
699 cur_size
* h
, dst_gpu_addr
);
702 rdev
->r600_blit
.vb_used
+= 12 * 4;
704 src_gpu_addr
+= cur_size
* h
;
705 dst_gpu_addr
+= cur_size
* h
;
706 size_bytes
-= cur_size
* h
;
709 max_bytes
= 8192 * 4;
712 int cur_size
= size_bytes
;
713 int src_x
= (src_gpu_addr
& 255);
714 int dst_x
= (dst_gpu_addr
& 255);
716 src_gpu_addr
= src_gpu_addr
& ~255;
717 dst_gpu_addr
= dst_gpu_addr
& ~255;
719 if (!src_x
&& !dst_x
) {
720 h
= (cur_size
/ max_bytes
);
726 cur_size
= max_bytes
;
728 if (cur_size
> max_bytes
)
729 cur_size
= max_bytes
;
730 if (cur_size
> (max_bytes
- dst_x
))
731 cur_size
= (max_bytes
- dst_x
);
732 if (cur_size
> (max_bytes
- src_x
))
733 cur_size
= (max_bytes
- src_x
);
736 if ((rdev
->r600_blit
.vb_used
+ 48) > rdev
->r600_blit
.vb_total
) {
740 if ((rdev
->blit_vb
->used
+ 48) > rdev
->blit_vb
->total
) {
741 r600_nomm_put_vb(dev
);
742 r600_nomm_get_vb(dev
);
747 vb
= r600_nomm_get_vb_ptr(dev
);
750 vb
= (u32
*)(rdev
->r600_blit
.vb_ib
->ptr
+ rdev
->r600_blit
.vb_used
);
752 vb
[0] = i2f(dst_x
/ 4);
754 vb
[2] = i2f(src_x
/ 4);
757 vb
[4] = i2f(dst_x
/ 4);
759 vb
[6] = i2f(src_x
/ 4);
762 vb
[8] = i2f((dst_x
+ cur_size
) / 4);
764 vb
[10] = i2f((src_x
+ cur_size
) / 4);
768 set_tex_resource(rdev
, FMT_8_8_8_8
,
769 (src_x
+ cur_size
) / 4,
770 h
, (src_x
+ cur_size
) / 4,
773 cp_set_surface_sync(rdev
,
774 PACKET3_TC_ACTION_ENA
, (src_x
+ cur_size
* h
), src_gpu_addr
);
777 set_render_target(rdev
, COLOR_8_8_8_8
,
778 (dst_x
+ cur_size
) / 4, h
,
782 set_scissors(rdev
, (dst_x
/ 4), 0, (dst_x
+ cur_size
/ 4), h
);
784 /* Vertex buffer setup 14 */
785 vb_gpu_addr
= rdev
->r600_blit
.vb_ib
->gpu_addr
+ rdev
->r600_blit
.vb_used
;
786 set_vtx_resource(rdev
, vb_gpu_addr
);
792 cp_set_surface_sync(rdev
,
793 PACKET3_CB_ACTION_ENA
| PACKET3_CB0_DEST_BASE_ENA
,
794 cur_size
* h
, dst_gpu_addr
);
796 /* 78 ring dwords per loop */
798 rdev
->r600_blit
.vb_used
+= 12 * 4;
800 src_gpu_addr
+= cur_size
* h
;
801 dst_gpu_addr
+= cur_size
* h
;
802 size_bytes
-= cur_size
* h
;