2 * Copyright 2009 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Alex Deucher <alexander.deucher@amd.com>
28 #include "radeon_drm.h"
29 #include "radeon_drv.h"
31 #include "r600_blit_shaders.h"
33 #define DI_PT_RECTLIST 0x11
34 #define DI_INDEX_SIZE_16_BIT 0x0
35 #define DI_SRC_SEL_AUTO_INDEX 0x2
39 #define FMT_8_8_8_8 0x1a
41 #define COLOR_5_6_5 0x8
42 #define COLOR_8_8_8_8 0x1a
45 set_render_target(drm_radeon_private_t
*dev_priv
, int format
, int w
, int h
, u64 gpu_addr
)
56 cb_color_info
= ((format
<< 2) | (1 << 27));
58 slice
= ((w
* h
) / 64) - 1;
60 if (((dev_priv
->flags
& RADEON_FAMILY_MASK
) > CHIP_R600
) &&
61 ((dev_priv
->flags
& RADEON_FAMILY_MASK
) < CHIP_RV770
)) {
63 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 1));
64 OUT_RING((R600_CB_COLOR0_BASE
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
65 OUT_RING(gpu_addr
>> 8);
66 OUT_RING(CP_PACKET3(R600_IT_SURFACE_BASE_UPDATE
, 0));
70 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 1));
71 OUT_RING((R600_CB_COLOR0_BASE
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
72 OUT_RING(gpu_addr
>> 8);
75 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 1));
76 OUT_RING((R600_CB_COLOR0_SIZE
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
77 OUT_RING((pitch
<< 0) | (slice
<< 10));
79 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 1));
80 OUT_RING((R600_CB_COLOR0_VIEW
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
83 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 1));
84 OUT_RING((R600_CB_COLOR0_INFO
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
85 OUT_RING(cb_color_info
);
87 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 1));
88 OUT_RING((R600_CB_COLOR0_TILE
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
91 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 1));
92 OUT_RING((R600_CB_COLOR0_FRAG
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
95 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 1));
96 OUT_RING((R600_CB_COLOR0_MASK
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
103 cp_set_surface_sync(drm_radeon_private_t
*dev_priv
,
104 u32 sync_type
, u32 size
, u64 mc_addr
)
110 if (size
== 0xffffffff)
111 cp_coher_size
= 0xffffffff;
113 cp_coher_size
= ((size
+ 255) >> 8);
116 OUT_RING(CP_PACKET3(R600_IT_SURFACE_SYNC
, 3));
118 OUT_RING(cp_coher_size
);
119 OUT_RING((mc_addr
>> 8));
120 OUT_RING(10); /* poll interval */
125 set_shaders(struct drm_device
*dev
)
127 drm_radeon_private_t
*dev_priv
= dev
->dev_private
;
131 uint32_t sq_pgm_resources
;
136 vs
= (u32
*) ((char *)dev
->agp_buffer_map
->handle
+ dev_priv
->blit_vb
->offset
);
137 ps
= (u32
*) ((char *)dev
->agp_buffer_map
->handle
+ dev_priv
->blit_vb
->offset
+ 256);
139 for (i
= 0; i
< r6xx_vs_size
; i
++)
141 for (i
= 0; i
< r6xx_ps_size
; i
++)
144 dev_priv
->blit_vb
->used
= 512;
146 gpu_addr
= dev_priv
->gart_buffers_offset
+ dev_priv
->blit_vb
->offset
;
148 /* setup shader regs */
149 sq_pgm_resources
= (1 << 0);
153 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 1));
154 OUT_RING((R600_SQ_PGM_START_VS
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
155 OUT_RING(gpu_addr
>> 8);
157 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 1));
158 OUT_RING((R600_SQ_PGM_RESOURCES_VS
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
159 OUT_RING(sq_pgm_resources
);
161 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 1));
162 OUT_RING((R600_SQ_PGM_CF_OFFSET_VS
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
166 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 1));
167 OUT_RING((R600_SQ_PGM_START_PS
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
168 OUT_RING((gpu_addr
+ 256) >> 8);
170 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 1));
171 OUT_RING((R600_SQ_PGM_RESOURCES_PS
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
172 OUT_RING(sq_pgm_resources
| (1 << 28));
174 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 1));
175 OUT_RING((R600_SQ_PGM_EXPORTS_PS
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
178 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 1));
179 OUT_RING((R600_SQ_PGM_CF_OFFSET_PS
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
183 cp_set_surface_sync(dev_priv
,
184 R600_SH_ACTION_ENA
, 512, gpu_addr
);
188 set_vtx_resource(drm_radeon_private_t
*dev_priv
, u64 gpu_addr
)
190 uint32_t sq_vtx_constant_word2
;
194 sq_vtx_constant_word2
= (((gpu_addr
>> 32) & 0xff) | (16 << 8));
197 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE
, 7));
199 OUT_RING(gpu_addr
& 0xffffffff);
201 OUT_RING(sq_vtx_constant_word2
);
205 OUT_RING(R600_SQ_TEX_VTX_VALID_BUFFER
<< 30);
208 if (((dev_priv
->flags
& RADEON_FAMILY_MASK
) == CHIP_RV610
) ||
209 ((dev_priv
->flags
& RADEON_FAMILY_MASK
) == CHIP_RV620
) ||
210 ((dev_priv
->flags
& RADEON_FAMILY_MASK
) == CHIP_RS780
) ||
211 ((dev_priv
->flags
& RADEON_FAMILY_MASK
) == CHIP_RS880
) ||
212 ((dev_priv
->flags
& RADEON_FAMILY_MASK
) == CHIP_RV710
))
213 cp_set_surface_sync(dev_priv
,
214 R600_TC_ACTION_ENA
, 48, gpu_addr
);
216 cp_set_surface_sync(dev_priv
,
217 R600_VC_ACTION_ENA
, 48, gpu_addr
);
221 set_tex_resource(drm_radeon_private_t
*dev_priv
,
222 int format
, int w
, int h
, int pitch
, u64 gpu_addr
)
224 uint32_t sq_tex_resource_word0
, sq_tex_resource_word1
, sq_tex_resource_word4
;
231 sq_tex_resource_word0
= (1 << 0);
232 sq_tex_resource_word0
|= ((((pitch
>> 3) - 1) << 8) |
235 sq_tex_resource_word1
= (format
<< 26);
236 sq_tex_resource_word1
|= ((h
- 1) << 0);
238 sq_tex_resource_word4
= ((1 << 14) |
245 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE
, 7));
247 OUT_RING(sq_tex_resource_word0
);
248 OUT_RING(sq_tex_resource_word1
);
249 OUT_RING(gpu_addr
>> 8);
250 OUT_RING(gpu_addr
>> 8);
251 OUT_RING(sq_tex_resource_word4
);
253 OUT_RING(R600_SQ_TEX_VTX_VALID_TEXTURE
<< 30);
259 set_scissors(drm_radeon_private_t
*dev_priv
, int x1
, int y1
, int x2
, int y2
)
265 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 2));
266 OUT_RING((R600_PA_SC_SCREEN_SCISSOR_TL
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
267 OUT_RING((x1
<< 0) | (y1
<< 16));
268 OUT_RING((x2
<< 0) | (y2
<< 16));
270 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 2));
271 OUT_RING((R600_PA_SC_GENERIC_SCISSOR_TL
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
272 OUT_RING((x1
<< 0) | (y1
<< 16) | (1 << 31));
273 OUT_RING((x2
<< 0) | (y2
<< 16));
275 OUT_RING(CP_PACKET3(R600_IT_SET_CONTEXT_REG
, 2));
276 OUT_RING((R600_PA_SC_WINDOW_SCISSOR_TL
- R600_SET_CONTEXT_REG_OFFSET
) >> 2);
277 OUT_RING((x1
<< 0) | (y1
<< 16) | (1 << 31));
278 OUT_RING((x2
<< 0) | (y2
<< 16));
283 draw_auto(drm_radeon_private_t
*dev_priv
)
289 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG
, 1));
290 OUT_RING((R600_VGT_PRIMITIVE_TYPE
- R600_SET_CONFIG_REG_OFFSET
) >> 2);
291 OUT_RING(DI_PT_RECTLIST
);
293 OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE
, 0));
294 OUT_RING(DI_INDEX_SIZE_16_BIT
);
296 OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES
, 0));
299 OUT_RING(CP_PACKET3(R600_IT_DRAW_INDEX_AUTO
, 1));
301 OUT_RING(DI_SRC_SEL_AUTO_INDEX
);
308 set_default_state(drm_radeon_private_t
*dev_priv
)
311 u32 sq_config
, sq_gpr_resource_mgmt_1
, sq_gpr_resource_mgmt_2
;
312 u32 sq_thread_resource_mgmt
, sq_stack_resource_mgmt_1
, sq_stack_resource_mgmt_2
;
313 int num_ps_gprs
, num_vs_gprs
, num_temp_gprs
, num_gs_gprs
, num_es_gprs
;
314 int num_ps_threads
, num_vs_threads
, num_gs_threads
, num_es_threads
;
315 int num_ps_stack_entries
, num_vs_stack_entries
, num_gs_stack_entries
, num_es_stack_entries
;
318 switch ((dev_priv
->flags
& RADEON_FAMILY_MASK
)) {
325 num_ps_threads
= 136;
329 num_ps_stack_entries
= 128;
330 num_vs_stack_entries
= 128;
331 num_gs_stack_entries
= 0;
332 num_es_stack_entries
= 0;
341 num_ps_threads
= 144;
345 num_ps_stack_entries
= 40;
346 num_vs_stack_entries
= 40;
347 num_gs_stack_entries
= 32;
348 num_es_stack_entries
= 16;
360 num_ps_threads
= 136;
364 num_ps_stack_entries
= 40;
365 num_vs_stack_entries
= 40;
366 num_gs_stack_entries
= 32;
367 num_es_stack_entries
= 16;
375 num_ps_threads
= 136;
379 num_ps_stack_entries
= 40;
380 num_vs_stack_entries
= 40;
381 num_gs_stack_entries
= 32;
382 num_es_stack_entries
= 16;
390 num_ps_threads
= 188;
394 num_ps_stack_entries
= 256;
395 num_vs_stack_entries
= 256;
396 num_gs_stack_entries
= 0;
397 num_es_stack_entries
= 0;
406 num_ps_threads
= 188;
410 num_ps_stack_entries
= 128;
411 num_vs_stack_entries
= 128;
412 num_gs_stack_entries
= 0;
413 num_es_stack_entries
= 0;
421 num_ps_threads
= 144;
425 num_ps_stack_entries
= 128;
426 num_vs_stack_entries
= 128;
427 num_gs_stack_entries
= 0;
428 num_es_stack_entries
= 0;
432 if (((dev_priv
->flags
& RADEON_FAMILY_MASK
) == CHIP_RV610
) ||
433 ((dev_priv
->flags
& RADEON_FAMILY_MASK
) == CHIP_RV620
) ||
434 ((dev_priv
->flags
& RADEON_FAMILY_MASK
) == CHIP_RS780
) ||
435 ((dev_priv
->flags
& RADEON_FAMILY_MASK
) == CHIP_RS880
) ||
436 ((dev_priv
->flags
& RADEON_FAMILY_MASK
) == CHIP_RV710
))
439 sq_config
= R600_VC_ENABLE
;
441 sq_config
|= (R600_DX9_CONSTS
|
442 R600_ALU_INST_PREFER_VECTOR
|
448 sq_gpr_resource_mgmt_1
= (R600_NUM_PS_GPRS(num_ps_gprs
) |
449 R600_NUM_VS_GPRS(num_vs_gprs
) |
450 R600_NUM_CLAUSE_TEMP_GPRS(num_temp_gprs
));
451 sq_gpr_resource_mgmt_2
= (R600_NUM_GS_GPRS(num_gs_gprs
) |
452 R600_NUM_ES_GPRS(num_es_gprs
));
453 sq_thread_resource_mgmt
= (R600_NUM_PS_THREADS(num_ps_threads
) |
454 R600_NUM_VS_THREADS(num_vs_threads
) |
455 R600_NUM_GS_THREADS(num_gs_threads
) |
456 R600_NUM_ES_THREADS(num_es_threads
));
457 sq_stack_resource_mgmt_1
= (R600_NUM_PS_STACK_ENTRIES(num_ps_stack_entries
) |
458 R600_NUM_VS_STACK_ENTRIES(num_vs_stack_entries
));
459 sq_stack_resource_mgmt_2
= (R600_NUM_GS_STACK_ENTRIES(num_gs_stack_entries
) |
460 R600_NUM_ES_STACK_ENTRIES(num_es_stack_entries
));
462 if ((dev_priv
->flags
& RADEON_FAMILY_MASK
) >= CHIP_RV770
) {
463 BEGIN_RING(r7xx_default_size
+ 10);
464 for (i
= 0; i
< r7xx_default_size
; i
++)
465 OUT_RING(r7xx_default_state
[i
]);
467 BEGIN_RING(r6xx_default_size
+ 10);
468 for (i
= 0; i
< r6xx_default_size
; i
++)
469 OUT_RING(r6xx_default_state
[i
]);
471 OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE
, 0));
472 OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT
);
474 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG
, 6));
475 OUT_RING((R600_SQ_CONFIG
- R600_SET_CONFIG_REG_OFFSET
) >> 2);
477 OUT_RING(sq_gpr_resource_mgmt_1
);
478 OUT_RING(sq_gpr_resource_mgmt_2
);
479 OUT_RING(sq_thread_resource_mgmt
);
480 OUT_RING(sq_stack_resource_mgmt_1
);
481 OUT_RING(sq_stack_resource_mgmt_2
);
485 static inline uint32_t i2f(uint32_t input
)
487 u32 result
, i
, exponent
, fraction
;
489 if ((input
& 0x3fff) == 0)
490 result
= 0; /* 0 is a special case */
492 exponent
= 140; /* exponent biased by 127; */
493 fraction
= (input
& 0x3fff) << 10; /* cheat and only
494 handle numbers below 2^^15 */
495 for (i
= 0; i
< 14; i
++) {
496 if (fraction
& 0x800000)
499 fraction
= fraction
<< 1; /* keep
500 shifting left until top bit = 1 */
501 exponent
= exponent
- 1;
504 result
= exponent
<< 23 | (fraction
& 0x7fffff); /* mask
505 off top bit; assumed 1 */
511 static inline int r600_nomm_get_vb(struct drm_device
*dev
)
513 drm_radeon_private_t
*dev_priv
= dev
->dev_private
;
514 dev_priv
->blit_vb
= radeon_freelist_get(dev
);
515 if (!dev_priv
->blit_vb
) {
516 DRM_ERROR("Unable to allocate vertex buffer for blit\n");
522 static inline void r600_nomm_put_vb(struct drm_device
*dev
)
524 drm_radeon_private_t
*dev_priv
= dev
->dev_private
;
526 dev_priv
->blit_vb
->used
= 0;
527 radeon_cp_discard_buffer(dev
, dev_priv
->blit_vb
->file_priv
->master
, dev_priv
->blit_vb
);
530 static inline void *r600_nomm_get_vb_ptr(struct drm_device
*dev
)
532 drm_radeon_private_t
*dev_priv
= dev
->dev_private
;
533 return (((char *)dev
->agp_buffer_map
->handle
+
534 dev_priv
->blit_vb
->offset
+ dev_priv
->blit_vb
->used
));
538 r600_prepare_blit_copy(struct drm_device
*dev
, struct drm_file
*file_priv
)
540 drm_radeon_private_t
*dev_priv
= dev
->dev_private
;
543 r600_nomm_get_vb(dev
);
545 dev_priv
->blit_vb
->file_priv
= file_priv
;
547 set_default_state(dev_priv
);
555 r600_done_blit_copy(struct drm_device
*dev
)
557 drm_radeon_private_t
*dev_priv
= dev
->dev_private
;
562 OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE
, 0));
563 OUT_RING(R600_CACHE_FLUSH_AND_INV_EVENT
);
564 /* wait for 3D idle clean */
565 OUT_RING(CP_PACKET3(R600_IT_SET_CONFIG_REG
, 1));
566 OUT_RING((R600_WAIT_UNTIL
- R600_SET_CONFIG_REG_OFFSET
) >> 2);
567 OUT_RING(RADEON_WAIT_3D_IDLE
| RADEON_WAIT_3D_IDLECLEAN
);
572 r600_nomm_put_vb(dev
);
576 r600_blit_copy(struct drm_device
*dev
,
577 uint64_t src_gpu_addr
, uint64_t dst_gpu_addr
,
580 drm_radeon_private_t
*dev_priv
= dev
->dev_private
;
585 if ((size_bytes
& 3) || (src_gpu_addr
& 3) || (dst_gpu_addr
& 3)) {
589 int cur_size
= size_bytes
;
590 int src_x
= src_gpu_addr
& 255;
591 int dst_x
= dst_gpu_addr
& 255;
593 src_gpu_addr
= src_gpu_addr
& ~255;
594 dst_gpu_addr
= dst_gpu_addr
& ~255;
596 if (!src_x
&& !dst_x
) {
597 h
= (cur_size
/ max_bytes
);
603 cur_size
= max_bytes
;
605 if (cur_size
> max_bytes
)
606 cur_size
= max_bytes
;
607 if (cur_size
> (max_bytes
- dst_x
))
608 cur_size
= (max_bytes
- dst_x
);
609 if (cur_size
> (max_bytes
- src_x
))
610 cur_size
= (max_bytes
- src_x
);
613 if ((dev_priv
->blit_vb
->used
+ 48) > dev_priv
->blit_vb
->total
) {
615 r600_nomm_put_vb(dev
);
616 r600_nomm_get_vb(dev
);
617 if (!dev_priv
->blit_vb
)
621 vb
= r600_nomm_get_vb_ptr(dev
);
633 vb
[8] = i2f(dst_x
+ cur_size
);
635 vb
[10] = i2f(src_x
+ cur_size
);
639 set_tex_resource(dev_priv
, FMT_8
,
640 src_x
+ cur_size
, h
, src_x
+ cur_size
,
643 cp_set_surface_sync(dev_priv
,
644 R600_TC_ACTION_ENA
, (src_x
+ cur_size
* h
), src_gpu_addr
);
647 set_render_target(dev_priv
, COLOR_8
,
652 set_scissors(dev_priv
, dst_x
, 0, dst_x
+ cur_size
, h
);
654 /* Vertex buffer setup */
655 vb_addr
= dev_priv
->gart_buffers_offset
+
656 dev_priv
->blit_vb
->offset
+
657 dev_priv
->blit_vb
->used
;
658 set_vtx_resource(dev_priv
, vb_addr
);
663 cp_set_surface_sync(dev_priv
,
664 R600_CB_ACTION_ENA
| R600_CB0_DEST_BASE_ENA
,
665 cur_size
* h
, dst_gpu_addr
);
668 dev_priv
->blit_vb
->used
+= 12 * 4;
670 src_gpu_addr
+= cur_size
* h
;
671 dst_gpu_addr
+= cur_size
* h
;
672 size_bytes
-= cur_size
* h
;
675 max_bytes
= 8192 * 4;
678 int cur_size
= size_bytes
;
679 int src_x
= (src_gpu_addr
& 255);
680 int dst_x
= (dst_gpu_addr
& 255);
682 src_gpu_addr
= src_gpu_addr
& ~255;
683 dst_gpu_addr
= dst_gpu_addr
& ~255;
685 if (!src_x
&& !dst_x
) {
686 h
= (cur_size
/ max_bytes
);
692 cur_size
= max_bytes
;
694 if (cur_size
> max_bytes
)
695 cur_size
= max_bytes
;
696 if (cur_size
> (max_bytes
- dst_x
))
697 cur_size
= (max_bytes
- dst_x
);
698 if (cur_size
> (max_bytes
- src_x
))
699 cur_size
= (max_bytes
- src_x
);
702 if ((dev_priv
->blit_vb
->used
+ 48) > dev_priv
->blit_vb
->total
) {
703 r600_nomm_put_vb(dev
);
704 r600_nomm_get_vb(dev
);
705 if (!dev_priv
->blit_vb
)
710 vb
= r600_nomm_get_vb_ptr(dev
);
712 vb
[0] = i2f(dst_x
/ 4);
714 vb
[2] = i2f(src_x
/ 4);
717 vb
[4] = i2f(dst_x
/ 4);
719 vb
[6] = i2f(src_x
/ 4);
722 vb
[8] = i2f((dst_x
+ cur_size
) / 4);
724 vb
[10] = i2f((src_x
+ cur_size
) / 4);
728 set_tex_resource(dev_priv
, FMT_8_8_8_8
,
729 (src_x
+ cur_size
) / 4,
730 h
, (src_x
+ cur_size
) / 4,
733 cp_set_surface_sync(dev_priv
,
734 R600_TC_ACTION_ENA
, (src_x
+ cur_size
* h
), src_gpu_addr
);
737 set_render_target(dev_priv
, COLOR_8_8_8_8
,
738 (dst_x
+ cur_size
) / 4, h
,
742 set_scissors(dev_priv
, (dst_x
/ 4), 0, (dst_x
+ cur_size
/ 4), h
);
744 /* Vertex buffer setup */
745 vb_addr
= dev_priv
->gart_buffers_offset
+
746 dev_priv
->blit_vb
->offset
+
747 dev_priv
->blit_vb
->used
;
748 set_vtx_resource(dev_priv
, vb_addr
);
753 cp_set_surface_sync(dev_priv
,
754 R600_CB_ACTION_ENA
| R600_CB0_DEST_BASE_ENA
,
755 cur_size
* h
, dst_gpu_addr
);
758 dev_priv
->blit_vb
->used
+= 12 * 4;
760 src_gpu_addr
+= cur_size
* h
;
761 dst_gpu_addr
+= cur_size
* h
;
762 size_bytes
-= cur_size
* h
;
768 r600_blit_swap(struct drm_device
*dev
,
769 uint64_t src_gpu_addr
, uint64_t dst_gpu_addr
,
770 int sx
, int sy
, int dx
, int dy
,
771 int w
, int h
, int src_pitch
, int dst_pitch
, int cpp
)
773 drm_radeon_private_t
*dev_priv
= dev
->dev_private
;
774 int cb_format
, tex_format
;
778 if ((dev_priv
->blit_vb
->used
+ 48) > dev_priv
->blit_vb
->total
) {
780 r600_nomm_put_vb(dev
);
781 r600_nomm_get_vb(dev
);
782 if (!dev_priv
->blit_vb
)
787 vb
= r600_nomm_get_vb_ptr(dev
);
790 cb_format
= COLOR_8_8_8_8
;
791 tex_format
= FMT_8_8_8_8
;
792 } else if (cpp
== 2) {
793 cb_format
= COLOR_5_6_5
;
794 tex_format
= FMT_5_6_5
;
812 vb
[10] = i2f(sx
+ w
);
813 vb
[11] = i2f(sy
+ h
);
816 set_tex_resource(dev_priv
, tex_format
,
818 sy
+ h
, src_pitch
/ cpp
,
821 cp_set_surface_sync(dev_priv
,
822 R600_TC_ACTION_ENA
, (src_pitch
* (sy
+ h
)), src_gpu_addr
);
825 set_render_target(dev_priv
, cb_format
,
826 dst_pitch
/ cpp
, dy
+ h
,
830 set_scissors(dev_priv
, dx
, dy
, dx
+ w
, dy
+ h
);
832 /* Vertex buffer setup */
833 vb_addr
= dev_priv
->gart_buffers_offset
+
834 dev_priv
->blit_vb
->offset
+
835 dev_priv
->blit_vb
->used
;
836 set_vtx_resource(dev_priv
, vb_addr
);
841 cp_set_surface_sync(dev_priv
,
842 R600_CB_ACTION_ENA
| R600_CB0_DEST_BASE_ENA
,
843 dst_pitch
* (dy
+ h
), dst_gpu_addr
);
845 dev_priv
->blit_vb
->used
+= 12 * 4;