2 * Copyright 2003 Tungsten Graphics, inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * TUNGSTEN GRAPHICS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Keith Whitwell <keithw@tungstengraphics.com>
29 #include "pipe/p_config.h"
30 #include "pipe/p_compiler.h"
31 #include "util/u_memory.h"
32 #include "util/u_math.h"
33 #include "util/u_format.h"
35 #include "translate.h"
38 #if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
40 #include "rtasm/rtasm_cpu.h"
41 #include "rtasm/rtasm_x86sse.h"
50 struct translate_buffer
{
56 struct translate_buffer_variant
{
57 unsigned buffer_index
;
58 unsigned instance_divisor
;
59 void *ptr
; /* updated either per vertex or per instance */
63 #define ELEMENT_BUFFER_INSTANCE_ID 1001
78 #define C(v) {(float)(v), (float)(v), (float)(v), (float)(v)}
79 static float consts
[NUM_CONSTS
][4] = {
85 C(1.0 / 2147483647.0),
90 struct translate_sse
{
91 struct translate translate
;
93 struct x86_function linear_func
;
94 struct x86_function elt_func
;
95 struct x86_function elt16_func
;
96 struct x86_function elt8_func
;
97 struct x86_function
*func
;
99 PIPE_ALIGN_VAR(16) float consts
[NUM_CONSTS
][4];
100 int8_t reg_to_const
[16];
101 int8_t const_to_reg
[NUM_CONSTS
];
103 struct translate_buffer buffer
[PIPE_MAX_ATTRIBS
];
106 /* Multiple buffer variants can map to a single buffer. */
107 struct translate_buffer_variant buffer_variant
[PIPE_MAX_ATTRIBS
];
108 unsigned nr_buffer_variants
;
110 /* Multiple elements can map to a single buffer variant. */
111 unsigned element_to_buffer_variant
[PIPE_MAX_ATTRIBS
];
113 boolean use_instancing
;
114 unsigned instance_id
;
116 /* these are actually known values, but putting them in a struct
117 * like this is helpful to keep them in sync across the file.
119 struct x86_reg tmp_EAX
;
120 struct x86_reg tmp2_EDX
;
121 struct x86_reg src_ECX
;
122 struct x86_reg idx_ESI
; /* either start+i or &elt[i] */
123 struct x86_reg machine_EDI
;
124 struct x86_reg outbuf_EBX
;
125 struct x86_reg count_EBP
; /* decrements to zero */
128 static int get_offset( const void *a
, const void *b
)
130 return (const char *)b
- (const char *)a
;
133 static struct x86_reg
get_const( struct translate_sse
*p
, unsigned id
)
138 if(p
->const_to_reg
[id
] >= 0)
139 return x86_make_reg(file_XMM
, p
->const_to_reg
[id
]);
141 for(i
= 2; i
< 8; ++i
)
143 if(p
->reg_to_const
[i
] < 0)
147 /* TODO: be smarter here */
151 reg
= x86_make_reg(file_XMM
, i
);
153 if(p
->reg_to_const
[i
] >= 0)
154 p
->const_to_reg
[p
->reg_to_const
[i
]] = -1;
156 p
->reg_to_const
[i
] = id
;
157 p
->const_to_reg
[id
] = i
;
159 /* TODO: this should happen outside the loop, if possible */
160 sse_movaps(p
->func
, reg
,
161 x86_make_disp(p
->machine_EDI
,
162 get_offset(p
, &p
->consts
[id
][0])));
167 /* load the data in a SSE2 register, padding with zeros */
168 static boolean
emit_load_sse2( struct translate_sse
*p
,
173 struct x86_reg tmpXMM
= x86_make_reg(file_XMM
, 1);
174 struct x86_reg tmp
= p
->tmp_EAX
;
178 x86_movzx8(p
->func
, tmp
, src
);
179 sse2_movd(p
->func
, data
, tmp
);
182 x86_movzx16(p
->func
, tmp
, src
);
183 sse2_movd(p
->func
, data
, tmp
);
186 x86_movzx8(p
->func
, tmp
, x86_make_disp(src
, 2));
187 x86_shl_imm(p
->func
, tmp
, 16);
188 x86_mov16(p
->func
, tmp
, src
);
189 sse2_movd(p
->func
, data
, tmp
);
192 sse2_movd(p
->func
, data
, src
);
195 sse2_movd(p
->func
, data
, src
);
196 x86_movzx16(p
->func
, tmp
, x86_make_disp(src
, 4));
197 sse2_movd(p
->func
, tmpXMM
, tmp
);
198 sse2_punpckldq(p
->func
, data
, tmpXMM
);
201 sse2_movq(p
->func
, data
, src
);
204 sse2_movq(p
->func
, data
, src
);
205 sse2_movd(p
->func
, tmpXMM
, x86_make_disp(src
, 8));
206 sse2_punpcklqdq(p
->func
, data
, tmpXMM
);
209 sse2_movdqu(p
->func
, data
, src
);
217 /* this value can be passed for the out_chans argument */
218 #define CHANNELS_0001 5
220 /* this function will load #chans float values, and will
221 * pad the register with zeroes at least up to out_chans.
223 * If out_chans is set to CHANNELS_0001, then the fourth
224 * value will be padded with 1. Only pass this value if
225 * chans < 4 or results are undefined.
227 static void emit_load_float32( struct translate_sse
*p
,
239 sse_movss(p
->func
, data
, arg0
);
240 if(out_chans
== CHANNELS_0001
)
241 sse_orps(p
->func
, data
, get_const(p
, CONST_IDENTITY
) );
247 if(out_chans
== CHANNELS_0001
)
248 sse_shufps(p
->func
, data
, get_const(p
, CONST_IDENTITY
), SHUF(X
, Y
, Z
, W
) );
249 else if(out_chans
> 2)
250 sse_movlhps(p
->func
, data
, get_const(p
, CONST_IDENTITY
) );
251 sse_movlps(p
->func
, data
, arg0
);
254 /* Have to jump through some hoops:
257 * c 0 0 1 if out_chans == CHANNELS_0001
261 sse_movss(p
->func
, data
, x86_make_disp(arg0
, 8));
262 if(out_chans
== CHANNELS_0001
)
263 sse_shufps(p
->func
, data
, get_const(p
, CONST_IDENTITY
), SHUF(X
,Y
,Z
,W
) );
264 sse_shufps(p
->func
, data
, data
, SHUF(Y
,Z
,X
,W
) );
265 sse_movlps(p
->func
, data
, arg0
);
268 sse_movups(p
->func
, data
, arg0
);
273 /* this function behaves like emit_load_float32, but loads
274 64-bit floating point numbers, converting them to 32-bit
276 static void emit_load_float64to32( struct translate_sse
*p
,
282 struct x86_reg tmpXMM
= x86_make_reg(file_XMM
, 1);
286 sse2_movsd(p
->func
, data
, arg0
);
288 sse2_cvtpd2ps(p
->func
, data
, data
);
290 sse2_cvtsd2ss(p
->func
, data
, data
);
291 if(out_chans
== CHANNELS_0001
)
292 sse_shufps(p
->func
, data
, get_const(p
, CONST_IDENTITY
), SHUF(X
, Y
, Z
, W
) );
295 sse2_movupd(p
->func
, data
, arg0
);
296 sse2_cvtpd2ps(p
->func
, data
, data
);
297 if(out_chans
== CHANNELS_0001
)
298 sse_shufps(p
->func
, data
, get_const(p
, CONST_IDENTITY
), SHUF(X
, Y
, Z
, W
) );
299 else if(out_chans
> 2)
300 sse_movlhps(p
->func
, data
, get_const(p
, CONST_IDENTITY
) );
303 sse2_movupd(p
->func
, data
, arg0
);
304 sse2_cvtpd2ps(p
->func
, data
, data
);
305 sse2_movsd(p
->func
, tmpXMM
, x86_make_disp(arg0
, 16));
307 sse2_cvtpd2ps(p
->func
, tmpXMM
, tmpXMM
);
309 sse2_cvtsd2ss(p
->func
, tmpXMM
, tmpXMM
);
310 sse_movlhps(p
->func
, data
, tmpXMM
);
311 if(out_chans
== CHANNELS_0001
)
312 sse_orps(p
->func
, data
, get_const(p
, CONST_IDENTITY
) );
315 sse2_movupd(p
->func
, data
, arg0
);
316 sse2_cvtpd2ps(p
->func
, data
, data
);
317 sse2_movupd(p
->func
, tmpXMM
, x86_make_disp(arg0
, 16));
318 sse2_cvtpd2ps(p
->func
, tmpXMM
, tmpXMM
);
319 sse_movlhps(p
->func
, data
, tmpXMM
);
324 static void emit_mov64(struct translate_sse
*p
, struct x86_reg dst_gpr
, struct x86_reg dst_xmm
, struct x86_reg src_gpr
, struct x86_reg src_xmm
)
326 if(x86_target(p
->func
) != X86_32
)
327 x64_mov64(p
->func
, dst_gpr
, src_gpr
);
330 /* TODO: when/on which CPUs is SSE2 actually better than SSE? */
331 if(x86_target_caps(p
->func
) & X86_SSE2
)
332 sse2_movq(p
->func
, dst_xmm
, src_xmm
);
334 sse_movlps(p
->func
, dst_xmm
, src_xmm
);
338 static void emit_load64(struct translate_sse
*p
, struct x86_reg dst_gpr
, struct x86_reg dst_xmm
, struct x86_reg src
)
340 emit_mov64(p
, dst_gpr
, dst_xmm
, src
, src
);
343 static void emit_store64(struct translate_sse
*p
, struct x86_reg dst
, struct x86_reg src_gpr
, struct x86_reg src_xmm
)
345 emit_mov64(p
, dst
, dst
, src_gpr
, src_xmm
);
348 static void emit_mov128(struct translate_sse
*p
, struct x86_reg dst
, struct x86_reg src
)
350 if(x86_target_caps(p
->func
) & X86_SSE2
)
351 sse2_movdqu(p
->func
, dst
, src
);
353 sse_movups(p
->func
, dst
, src
);
356 /* TODO: this uses unaligned accesses liberally, which is great on Nehalem,
357 * but may or may not be good on older processors
358 * TODO: may perhaps want to use non-temporal stores here if possible
360 static void emit_memcpy(struct translate_sse
*p
, struct x86_reg dst
, struct x86_reg src
, unsigned size
)
362 struct x86_reg dataXMM
= x86_make_reg(file_XMM
, 0);
363 struct x86_reg dataXMM2
= x86_make_reg(file_XMM
, 1);
364 struct x86_reg dataGPR
= p
->tmp_EAX
;
365 struct x86_reg dataGPR2
= p
->tmp2_EDX
;
372 x86_mov8(p
->func
, dataGPR
, src
);
373 x86_mov8(p
->func
, dst
, dataGPR
);
376 x86_mov16(p
->func
, dataGPR
, src
);
377 x86_mov16(p
->func
, dst
, dataGPR
);
380 x86_mov16(p
->func
, dataGPR
, src
);
381 x86_mov8(p
->func
, dataGPR2
, x86_make_disp(src
, 2));
382 x86_mov16(p
->func
, dst
, dataGPR
);
383 x86_mov8(p
->func
, x86_make_disp(dst
, 2), dataGPR2
);
386 x86_mov(p
->func
, dataGPR
, src
);
387 x86_mov(p
->func
, dst
, dataGPR
);
390 x86_mov(p
->func
, dataGPR
, src
);
391 x86_mov16(p
->func
, dataGPR2
, x86_make_disp(src
, 4));
392 x86_mov(p
->func
, dst
, dataGPR
);
393 x86_mov16(p
->func
, x86_make_disp(dst
, 4), dataGPR2
);
397 else if(!(x86_target_caps(p
->func
) & X86_SSE
))
400 assert((size
& 3) == 0);
401 for(i
= 0; i
< size
; i
+= 4)
403 x86_mov(p
->func
, dataGPR
, x86_make_disp(src
, i
));
404 x86_mov(p
->func
, x86_make_disp(dst
, i
), dataGPR
);
412 emit_load64(p
, dataGPR
, dataXMM
, src
);
413 emit_store64(p
, dst
, dataGPR
, dataXMM
);
416 emit_load64(p
, dataGPR2
, dataXMM
, src
);
417 x86_mov(p
->func
, dataGPR
, x86_make_disp(src
, 8));
418 emit_store64(p
, dst
, dataGPR2
, dataXMM
);
419 x86_mov(p
->func
, x86_make_disp(dst
, 8), dataGPR
);
422 emit_mov128(p
, dataXMM
, src
);
423 emit_mov128(p
, dst
, dataXMM
);
426 emit_mov128(p
, dataXMM
, src
);
427 emit_load64(p
, dataGPR
, dataXMM2
, x86_make_disp(src
, 16));
428 emit_mov128(p
, dst
, dataXMM
);
429 emit_store64(p
, x86_make_disp(dst
, 16), dataGPR
, dataXMM2
);
432 emit_mov128(p
, dataXMM
, src
);
433 emit_mov128(p
, dataXMM2
, x86_make_disp(src
, 16));
434 emit_mov128(p
, dst
, dataXMM
);
435 emit_mov128(p
, x86_make_disp(dst
, 16), dataXMM2
);
443 static boolean
translate_attr_convert( struct translate_sse
*p
,
444 const struct translate_element
*a
,
449 const struct util_format_description
* input_desc
= util_format_description(a
->input_format
);
450 const struct util_format_description
* output_desc
= util_format_description(a
->output_format
);
452 boolean id_swizzle
= TRUE
;
453 unsigned swizzle
[4] = {UTIL_FORMAT_SWIZZLE_NONE
, UTIL_FORMAT_SWIZZLE_NONE
, UTIL_FORMAT_SWIZZLE_NONE
, UTIL_FORMAT_SWIZZLE_NONE
};
454 unsigned needed_chans
= 0;
455 unsigned imms
[2] = {0, 0x3f800000};
457 if(a
->output_format
== PIPE_FORMAT_NONE
|| a
->input_format
== PIPE_FORMAT_NONE
)
460 if(input_desc
->channel
[0].size
& 7)
463 if(input_desc
->colorspace
!= output_desc
->colorspace
)
466 for(i
= 1; i
< input_desc
->nr_channels
; ++i
)
468 if(memcmp(&input_desc
->channel
[i
], &input_desc
->channel
[0], sizeof(input_desc
->channel
[0])))
472 for(i
= 1; i
< output_desc
->nr_channels
; ++i
)
474 if(memcmp(&output_desc
->channel
[i
], &output_desc
->channel
[0], sizeof(output_desc
->channel
[0])))
478 for(i
= 0; i
< output_desc
->nr_channels
; ++i
)
480 if(output_desc
->swizzle
[i
] < 4)
481 swizzle
[output_desc
->swizzle
[i
]] = input_desc
->swizzle
[i
];
484 if((x86_target_caps(p
->func
) & X86_SSE
) && (0
485 || a
->output_format
== PIPE_FORMAT_R32_FLOAT
486 || a
->output_format
== PIPE_FORMAT_R32G32_FLOAT
487 || a
->output_format
== PIPE_FORMAT_R32G32B32_FLOAT
488 || a
->output_format
== PIPE_FORMAT_R32G32B32A32_FLOAT
))
490 struct x86_reg dataXMM
= x86_make_reg(file_XMM
, 0);
492 for(i
= 0; i
< output_desc
->nr_channels
; ++i
)
494 if(swizzle
[i
] == UTIL_FORMAT_SWIZZLE_0
&& i
>= input_desc
->nr_channels
)
498 for(i
= 0; i
< output_desc
->nr_channels
; ++i
)
501 needed_chans
= MAX2(needed_chans
, swizzle
[i
] + 1);
502 if(swizzle
[i
] < UTIL_FORMAT_SWIZZLE_0
&& swizzle
[i
] != i
)
508 switch(input_desc
->channel
[0].type
)
510 case UTIL_FORMAT_TYPE_UNSIGNED
:
511 if(!(x86_target_caps(p
->func
) & X86_SSE2
))
513 emit_load_sse2(p
, dataXMM
, src
, input_desc
->channel
[0].size
* input_desc
->nr_channels
>> 3);
515 /* TODO: add support for SSE4.1 pmovzx */
516 switch(input_desc
->channel
[0].size
)
519 /* TODO: this may be inefficient due to get_identity() being used both as a float and integer register */
520 sse2_punpcklbw(p
->func
, dataXMM
, get_const(p
, CONST_IDENTITY
));
521 sse2_punpcklbw(p
->func
, dataXMM
, get_const(p
, CONST_IDENTITY
));
524 sse2_punpcklwd(p
->func
, dataXMM
, get_const(p
, CONST_IDENTITY
));
526 case 32: /* we lose precision here */
527 sse2_psrld_imm(p
->func
, dataXMM
, 1);
532 sse2_cvtdq2ps(p
->func
, dataXMM
, dataXMM
);
533 if(input_desc
->channel
[0].normalized
)
535 struct x86_reg factor
;
536 switch(input_desc
->channel
[0].size
)
539 factor
= get_const(p
, CONST_INV_255
);
542 factor
= get_const(p
, CONST_INV_65535
);
545 factor
= get_const(p
, CONST_INV_2147483647
);
555 sse_mulps(p
->func
, dataXMM
, factor
);
557 else if(input_desc
->channel
[0].size
== 32)
558 sse_addps(p
->func
, dataXMM
, dataXMM
); /* compensate for the bit we threw away to fit u32 into s32 */
560 case UTIL_FORMAT_TYPE_SIGNED
:
561 if(!(x86_target_caps(p
->func
) & X86_SSE2
))
563 emit_load_sse2(p
, dataXMM
, src
, input_desc
->channel
[0].size
* input_desc
->nr_channels
>> 3);
565 /* TODO: add support for SSE4.1 pmovsx */
566 switch(input_desc
->channel
[0].size
)
569 sse2_punpcklbw(p
->func
, dataXMM
, dataXMM
);
570 sse2_punpcklbw(p
->func
, dataXMM
, dataXMM
);
571 sse2_psrad_imm(p
->func
, dataXMM
, 24);
574 sse2_punpcklwd(p
->func
, dataXMM
, dataXMM
);
575 sse2_psrad_imm(p
->func
, dataXMM
, 16);
577 case 32: /* we lose precision here */
582 sse2_cvtdq2ps(p
->func
, dataXMM
, dataXMM
);
583 if(input_desc
->channel
[0].normalized
)
585 struct x86_reg factor
;
586 switch(input_desc
->channel
[0].size
)
589 factor
= get_const(p
, CONST_INV_127
);
592 factor
= get_const(p
, CONST_INV_32767
);
595 factor
= get_const(p
, CONST_INV_2147483647
);
605 sse_mulps(p
->func
, dataXMM
, factor
);
610 case UTIL_FORMAT_TYPE_FLOAT
:
611 if(input_desc
->channel
[0].size
!= 32 && input_desc
->channel
[0].size
!= 64)
613 if(swizzle
[3] == UTIL_FORMAT_SWIZZLE_1
&& input_desc
->nr_channels
<= 3)
615 swizzle
[3] = UTIL_FORMAT_SWIZZLE_W
;
616 needed_chans
= CHANNELS_0001
;
618 switch(input_desc
->channel
[0].size
)
621 emit_load_float32(p
, dataXMM
, src
, needed_chans
, input_desc
->nr_channels
);
623 case 64: /* we lose precision here */
624 if(!(x86_target_caps(p
->func
) & X86_SSE2
))
626 emit_load_float64to32(p
, dataXMM
, src
, needed_chans
, input_desc
->nr_channels
);
637 sse_shufps(p
->func
, dataXMM
, dataXMM
, SHUF(swizzle
[0], swizzle
[1], swizzle
[2], swizzle
[3]) );
640 if(output_desc
->nr_channels
>= 4
641 && swizzle
[0] < UTIL_FORMAT_SWIZZLE_0
642 && swizzle
[1] < UTIL_FORMAT_SWIZZLE_0
643 && swizzle
[2] < UTIL_FORMAT_SWIZZLE_0
644 && swizzle
[3] < UTIL_FORMAT_SWIZZLE_0
646 sse_movups(p
->func
, dst
, dataXMM
);
649 if(output_desc
->nr_channels
>= 2
650 && swizzle
[0] < UTIL_FORMAT_SWIZZLE_0
651 && swizzle
[1] < UTIL_FORMAT_SWIZZLE_0
)
652 sse_movlps(p
->func
, dst
, dataXMM
);
655 if(swizzle
[0] < UTIL_FORMAT_SWIZZLE_0
)
656 sse_movss(p
->func
, dst
, dataXMM
);
658 x86_mov_imm(p
->func
, dst
, imms
[swizzle
[0] - UTIL_FORMAT_SWIZZLE_0
]);
660 if(output_desc
->nr_channels
>= 2)
662 if(swizzle
[1] < UTIL_FORMAT_SWIZZLE_0
)
664 sse_shufps(p
->func
, dataXMM
, dataXMM
, SHUF(1, 1, 2, 3));
665 sse_movss(p
->func
, x86_make_disp(dst
, 4), dataXMM
);
668 x86_mov_imm(p
->func
, x86_make_disp(dst
, 4), imms
[swizzle
[1] - UTIL_FORMAT_SWIZZLE_0
]);
672 if(output_desc
->nr_channels
>= 3)
674 if(output_desc
->nr_channels
>= 4
675 && swizzle
[2] < UTIL_FORMAT_SWIZZLE_0
676 && swizzle
[3] < UTIL_FORMAT_SWIZZLE_0
)
677 sse_movhps(p
->func
, x86_make_disp(dst
, 8), dataXMM
);
680 if(swizzle
[2] < UTIL_FORMAT_SWIZZLE_0
)
682 sse_shufps(p
->func
, dataXMM
, dataXMM
, SHUF(2, 2, 2, 3));
683 sse_movss(p
->func
, x86_make_disp(dst
, 8), dataXMM
);
686 x86_mov_imm(p
->func
, x86_make_disp(dst
, 8), imms
[swizzle
[2] - UTIL_FORMAT_SWIZZLE_0
]);
688 if(output_desc
->nr_channels
>= 4)
690 if(swizzle
[3] < UTIL_FORMAT_SWIZZLE_0
)
692 sse_shufps(p
->func
, dataXMM
, dataXMM
, SHUF(3, 3, 3, 3));
693 sse_movss(p
->func
, x86_make_disp(dst
, 12), dataXMM
);
696 x86_mov_imm(p
->func
, x86_make_disp(dst
, 12), imms
[swizzle
[3] - UTIL_FORMAT_SWIZZLE_0
]);
703 else if((x86_target_caps(p
->func
) & X86_SSE2
) && input_desc
->channel
[0].size
== 8 && output_desc
->channel
[0].size
== 16
704 && output_desc
->channel
[0].normalized
== input_desc
->channel
[0].normalized
706 || (input_desc
->channel
[0].type
== UTIL_FORMAT_TYPE_UNSIGNED
&& output_desc
->channel
[0].type
== UTIL_FORMAT_TYPE_UNSIGNED
)
707 || (input_desc
->channel
[0].type
== UTIL_FORMAT_TYPE_UNSIGNED
&& output_desc
->channel
[0].type
== UTIL_FORMAT_TYPE_SIGNED
)
708 || (input_desc
->channel
[0].type
== UTIL_FORMAT_TYPE_SIGNED
&& output_desc
->channel
[0].type
== UTIL_FORMAT_TYPE_SIGNED
)
711 struct x86_reg dataXMM
= x86_make_reg(file_XMM
, 0);
712 struct x86_reg tmpXMM
= x86_make_reg(file_XMM
, 1);
713 struct x86_reg tmp
= p
->tmp_EAX
;
714 unsigned imms
[2] = {0, 1};
716 for(i
= 0; i
< output_desc
->nr_channels
; ++i
)
718 if(swizzle
[i
] == UTIL_FORMAT_SWIZZLE_0
&& i
>= input_desc
->nr_channels
)
722 for(i
= 0; i
< output_desc
->nr_channels
; ++i
)
725 needed_chans
= MAX2(needed_chans
, swizzle
[i
] + 1);
726 if(swizzle
[i
] < UTIL_FORMAT_SWIZZLE_0
&& swizzle
[i
] != i
)
732 emit_load_sse2(p
, dataXMM
, src
, input_desc
->channel
[0].size
* input_desc
->nr_channels
>> 3);
734 switch(input_desc
->channel
[0].type
)
736 case UTIL_FORMAT_TYPE_UNSIGNED
:
737 if(input_desc
->channel
[0].normalized
)
739 sse2_punpcklbw(p
->func
, dataXMM
, dataXMM
);
740 if(output_desc
->channel
[0].type
== UTIL_FORMAT_TYPE_SIGNED
)
741 sse2_psrlw_imm(p
->func
, dataXMM
, 1);
744 sse2_punpcklbw(p
->func
, dataXMM
, get_const(p
, CONST_IDENTITY
));
746 case UTIL_FORMAT_TYPE_SIGNED
:
747 if(input_desc
->channel
[0].normalized
)
749 sse2_movq(p
->func
, tmpXMM
, get_const(p
, CONST_IDENTITY
));
750 sse2_punpcklbw(p
->func
, tmpXMM
, dataXMM
);
751 sse2_psllw_imm(p
->func
, dataXMM
, 9);
752 sse2_psrlw_imm(p
->func
, dataXMM
, 8);
753 sse2_por(p
->func
, tmpXMM
, dataXMM
);
754 sse2_psrlw_imm(p
->func
, dataXMM
, 7);
755 sse2_por(p
->func
, tmpXMM
, dataXMM
);
757 struct x86_reg t
= dataXMM
;
764 sse2_punpcklbw(p
->func
, dataXMM
, dataXMM
);
765 sse2_psraw_imm(p
->func
, dataXMM
, 8);
772 if(output_desc
->channel
[0].normalized
)
773 imms
[1] = (output_desc
->channel
[0].type
== UTIL_FORMAT_TYPE_UNSIGNED
) ? 0xffff : 0x7ffff;
776 sse2_pshuflw(p
->func
, dataXMM
, dataXMM
, (swizzle
[0] & 3) | ((swizzle
[1] & 3) << 2) | ((swizzle
[2] & 3) << 4) | ((swizzle
[3] & 3) << 6));
779 if(output_desc
->nr_channels
>= 4
780 && swizzle
[0] < UTIL_FORMAT_SWIZZLE_0
781 && swizzle
[1] < UTIL_FORMAT_SWIZZLE_0
782 && swizzle
[2] < UTIL_FORMAT_SWIZZLE_0
783 && swizzle
[3] < UTIL_FORMAT_SWIZZLE_0
785 sse2_movq(p
->func
, dst
, dataXMM
);
788 if(swizzle
[0] < UTIL_FORMAT_SWIZZLE_0
)
790 if(output_desc
->nr_channels
>= 2 && swizzle
[1] < UTIL_FORMAT_SWIZZLE_0
)
791 sse2_movd(p
->func
, dst
, dataXMM
);
794 sse2_movd(p
->func
, tmp
, dataXMM
);
795 x86_mov16(p
->func
, dst
, tmp
);
796 if(output_desc
->nr_channels
>= 2)
797 x86_mov16_imm(p
->func
, x86_make_disp(dst
, 2), imms
[swizzle
[1] - UTIL_FORMAT_SWIZZLE_0
]);
802 if(output_desc
->nr_channels
>= 2 && swizzle
[1] >= UTIL_FORMAT_SWIZZLE_0
)
803 x86_mov_imm(p
->func
, dst
, (imms
[swizzle
[1] - UTIL_FORMAT_SWIZZLE_0
] << 16) | imms
[swizzle
[0] - UTIL_FORMAT_SWIZZLE_0
]);
806 x86_mov16_imm(p
->func
, dst
, imms
[swizzle
[0] - UTIL_FORMAT_SWIZZLE_0
]);
807 if(output_desc
->nr_channels
>= 2)
809 sse2_movd(p
->func
, tmp
, dataXMM
);
810 x86_shr_imm(p
->func
, tmp
, 16);
811 x86_mov16(p
->func
, x86_make_disp(dst
, 2), tmp
);
816 if(output_desc
->nr_channels
>= 3)
818 if(swizzle
[2] < UTIL_FORMAT_SWIZZLE_0
)
820 if(output_desc
->nr_channels
>= 4 && swizzle
[3] < UTIL_FORMAT_SWIZZLE_0
)
822 sse2_psrlq_imm(p
->func
, dataXMM
, 32);
823 sse2_movd(p
->func
, x86_make_disp(dst
, 4), dataXMM
);
827 sse2_psrlq_imm(p
->func
, dataXMM
, 32);
828 sse2_movd(p
->func
, tmp
, dataXMM
);
829 x86_mov16(p
->func
, x86_make_disp(dst
, 4), tmp
);
830 if(output_desc
->nr_channels
>= 4)
832 x86_mov16_imm(p
->func
, x86_make_disp(dst
, 6), imms
[swizzle
[3] - UTIL_FORMAT_SWIZZLE_0
]);
838 if(output_desc
->nr_channels
>= 4 && swizzle
[3] >= UTIL_FORMAT_SWIZZLE_0
)
839 x86_mov_imm(p
->func
, x86_make_disp(dst
, 4), (imms
[swizzle
[3] - UTIL_FORMAT_SWIZZLE_0
] << 16) | imms
[swizzle
[2] - UTIL_FORMAT_SWIZZLE_0
]);
842 x86_mov16_imm(p
->func
, x86_make_disp(dst
, 4), imms
[swizzle
[2] - UTIL_FORMAT_SWIZZLE_0
]);
844 if(output_desc
->nr_channels
>= 4)
846 sse2_psrlq_imm(p
->func
, dataXMM
, 48);
847 sse2_movd(p
->func
, tmp
, dataXMM
);
848 x86_mov16(p
->func
, x86_make_disp(dst
, 6), tmp
);
856 else if(!memcmp(&output_desc
->channel
[0], &input_desc
->channel
[0], sizeof(output_desc
->channel
[0])))
858 struct x86_reg tmp
= p
->tmp_EAX
;
860 if(input_desc
->channel
[0].size
== 8 && input_desc
->nr_channels
== 4 && output_desc
->nr_channels
== 4
861 && swizzle
[0] == UTIL_FORMAT_SWIZZLE_W
862 && swizzle
[1] == UTIL_FORMAT_SWIZZLE_Z
863 && swizzle
[2] == UTIL_FORMAT_SWIZZLE_Y
864 && swizzle
[3] == UTIL_FORMAT_SWIZZLE_X
)
866 /* TODO: support movbe */
867 x86_mov(p
->func
, tmp
, src
);
868 x86_bswap(p
->func
, tmp
);
869 x86_mov(p
->func
, dst
, tmp
);
873 for(i
= 0; i
< output_desc
->nr_channels
; ++i
)
875 switch(output_desc
->channel
[0].size
)
878 if(swizzle
[i
] >= UTIL_FORMAT_SWIZZLE_0
)
881 if(swizzle
[i
] == UTIL_FORMAT_SWIZZLE_1
)
883 switch(output_desc
->channel
[0].type
)
885 case UTIL_FORMAT_TYPE_UNSIGNED
:
886 v
= output_desc
->channel
[0].normalized
? 0xff : 1;
888 case UTIL_FORMAT_TYPE_SIGNED
:
889 v
= output_desc
->channel
[0].normalized
? 0x7f : 1;
895 x86_mov8_imm(p
->func
, x86_make_disp(dst
, i
* 1), v
);
899 x86_mov8(p
->func
, tmp
, x86_make_disp(src
, swizzle
[i
] * 1));
900 x86_mov8(p
->func
, x86_make_disp(dst
, i
* 1), tmp
);
904 if(swizzle
[i
] >= UTIL_FORMAT_SWIZZLE_0
)
907 if(swizzle
[i
] == UTIL_FORMAT_SWIZZLE_1
)
909 switch(output_desc
->channel
[1].type
)
911 case UTIL_FORMAT_TYPE_UNSIGNED
:
912 v
= output_desc
->channel
[1].normalized
? 0xffff : 1;
914 case UTIL_FORMAT_TYPE_SIGNED
:
915 v
= output_desc
->channel
[1].normalized
? 0x7fff : 1;
917 case UTIL_FORMAT_TYPE_FLOAT
:
924 x86_mov16_imm(p
->func
, x86_make_disp(dst
, i
* 2), v
);
926 else if(swizzle
[i
] == UTIL_FORMAT_SWIZZLE_0
)
927 x86_mov16_imm(p
->func
, x86_make_disp(dst
, i
* 2), 0);
930 x86_mov16(p
->func
, tmp
, x86_make_disp(src
, swizzle
[i
] * 2));
931 x86_mov16(p
->func
, x86_make_disp(dst
, i
* 2), tmp
);
935 if(swizzle
[i
] >= UTIL_FORMAT_SWIZZLE_0
)
938 if(swizzle
[i
] == UTIL_FORMAT_SWIZZLE_1
)
940 switch(output_desc
->channel
[1].type
)
942 case UTIL_FORMAT_TYPE_UNSIGNED
:
943 v
= output_desc
->channel
[1].normalized
? 0xffffffff : 1;
945 case UTIL_FORMAT_TYPE_SIGNED
:
946 v
= output_desc
->channel
[1].normalized
? 0x7fffffff : 1;
948 case UTIL_FORMAT_TYPE_FLOAT
:
955 x86_mov_imm(p
->func
, x86_make_disp(dst
, i
* 4), v
);
959 x86_mov(p
->func
, tmp
, x86_make_disp(src
, swizzle
[i
] * 4));
960 x86_mov(p
->func
, x86_make_disp(dst
, i
* 4), tmp
);
964 if(swizzle
[i
] >= UTIL_FORMAT_SWIZZLE_0
)
968 if(swizzle
[i
] == UTIL_FORMAT_SWIZZLE_1
)
970 switch(output_desc
->channel
[1].type
)
972 case UTIL_FORMAT_TYPE_UNSIGNED
:
973 h
= output_desc
->channel
[1].normalized
? 0xffffffff : 0;
974 l
= output_desc
->channel
[1].normalized
? 0xffffffff : 1;
976 case UTIL_FORMAT_TYPE_SIGNED
:
977 h
= output_desc
->channel
[1].normalized
? 0x7fffffff : 0;
978 l
= output_desc
->channel
[1].normalized
? 0xffffffff : 1;
980 case UTIL_FORMAT_TYPE_FLOAT
:
988 x86_mov_imm(p
->func
, x86_make_disp(dst
, i
* 8), l
);
989 x86_mov_imm(p
->func
, x86_make_disp(dst
, i
* 8 + 4), h
);
993 if(x86_target_caps(p
->func
) & X86_SSE
)
995 struct x86_reg tmpXMM
= x86_make_reg(file_XMM
, 0);
996 emit_load64(p
, tmp
, tmpXMM
, x86_make_disp(src
, swizzle
[i
] * 8));
997 emit_store64(p
, x86_make_disp(dst
, i
* 8), tmp
, tmpXMM
);
1001 x86_mov(p
->func
, tmp
, x86_make_disp(src
, swizzle
[i
] * 8));
1002 x86_mov(p
->func
, x86_make_disp(dst
, i
* 8), tmp
);
1003 x86_mov(p
->func
, tmp
, x86_make_disp(src
, swizzle
[i
] * 8 + 4));
1004 x86_mov(p
->func
, x86_make_disp(dst
, i
* 8 + 4), tmp
);
1014 /* special case for draw's EMIT_4UB (RGBA) and EMIT_4UB_BGRA */
1015 else if((x86_target_caps(p
->func
) & X86_SSE2
) &&
1016 a
->input_format
== PIPE_FORMAT_R32G32B32A32_FLOAT
&& (0
1017 || a
->output_format
== PIPE_FORMAT_B8G8R8A8_UNORM
1018 || a
->output_format
== PIPE_FORMAT_R8G8B8A8_UNORM
1021 struct x86_reg dataXMM
= x86_make_reg(file_XMM
, 0);
1024 sse_movups(p
->func
, dataXMM
, src
);
1026 if (a
->output_format
== PIPE_FORMAT_B8G8R8A8_UNORM
)
1027 sse_shufps(p
->func
, dataXMM
, dataXMM
, SHUF(2,1,0,3));
1029 /* scale by 255.0 */
1030 sse_mulps(p
->func
, dataXMM
, get_const(p
, CONST_255
));
1033 sse2_cvtps2dq(p
->func
, dataXMM
, dataXMM
);
1034 sse2_packssdw(p
->func
, dataXMM
, dataXMM
);
1035 sse2_packuswb(p
->func
, dataXMM
, dataXMM
);
1036 sse2_movd(p
->func
, dst
, dataXMM
);
1044 static boolean
translate_attr( struct translate_sse
*p
,
1045 const struct translate_element
*a
,
1049 if(a
->input_format
== a
->output_format
)
1051 emit_memcpy(p
, dst
, src
, util_format_get_stride(a
->input_format
, 1));
1055 return translate_attr_convert(p
, a
, src
, dst
);
1058 static boolean
init_inputs( struct translate_sse
*p
,
1059 unsigned index_size
)
1062 struct x86_reg instance_id
= x86_make_disp(p
->machine_EDI
,
1063 get_offset(p
, &p
->instance_id
));
1065 for (i
= 0; i
< p
->nr_buffer_variants
; i
++) {
1066 struct translate_buffer_variant
*variant
= &p
->buffer_variant
[i
];
1067 struct translate_buffer
*buffer
= &p
->buffer
[variant
->buffer_index
];
1069 if (!index_size
|| variant
->instance_divisor
) {
1070 struct x86_reg buf_max_index
= x86_make_disp(p
->machine_EDI
,
1071 get_offset(p
, &buffer
->max_index
));
1072 struct x86_reg buf_stride
= x86_make_disp(p
->machine_EDI
,
1073 get_offset(p
, &buffer
->stride
));
1074 struct x86_reg buf_ptr
= x86_make_disp(p
->machine_EDI
,
1075 get_offset(p
, &variant
->ptr
));
1076 struct x86_reg buf_base_ptr
= x86_make_disp(p
->machine_EDI
,
1077 get_offset(p
, &buffer
->base_ptr
));
1078 struct x86_reg elt
= p
->idx_ESI
;
1079 struct x86_reg tmp_EAX
= p
->tmp_EAX
;
1081 /* Calculate pointer to first attrib:
1082 * base_ptr + stride * index, where index depends on instance divisor
1084 if (variant
->instance_divisor
) {
1085 /* Our index is instance ID divided by instance divisor.
1087 x86_mov(p
->func
, tmp_EAX
, instance_id
);
1089 if (variant
->instance_divisor
!= 1) {
1090 struct x86_reg tmp_EDX
= p
->tmp2_EDX
;
1091 struct x86_reg tmp_ECX
= p
->src_ECX
;
1093 /* TODO: Add x86_shr() to rtasm and use it whenever
1094 * instance divisor is power of two.
1097 x86_xor(p
->func
, tmp_EDX
, tmp_EDX
);
1098 x86_mov_reg_imm(p
->func
, tmp_ECX
, variant
->instance_divisor
);
1099 x86_div(p
->func
, tmp_ECX
); /* EAX = EDX:EAX / ECX */
1102 /* XXX we need to clamp the index here too, but to a
1103 * per-array max value, not the draw->pt.max_index value
1104 * that's being given to us via translate->set_buffer().
1107 x86_mov(p
->func
, tmp_EAX
, elt
);
1109 /* Clamp to max_index
1111 x86_cmp(p
->func
, tmp_EAX
, buf_max_index
);
1112 x86_cmovcc(p
->func
, tmp_EAX
, buf_max_index
, cc_AE
);
1115 x86_imul(p
->func
, tmp_EAX
, buf_stride
);
1117 x86_add(p
->func
, tmp_EAX
, buf_base_ptr
);
1119 x86_cmp(p
->func
, p
->count_EBP
, p
->tmp_EAX
);
1121 /* In the linear case, keep the buffer pointer instead of the
1124 if (!index_size
&& p
->nr_buffer_variants
== 1)
1127 x86_mov(p
->func
, elt
, tmp_EAX
);
1132 x86_mov(p
->func
, buf_ptr
, tmp_EAX
);
1141 static struct x86_reg
get_buffer_ptr( struct translate_sse
*p
,
1142 unsigned index_size
,
1144 struct x86_reg elt
)
1146 if (var_idx
== ELEMENT_BUFFER_INSTANCE_ID
) {
1147 return x86_make_disp(p
->machine_EDI
,
1148 get_offset(p
, &p
->instance_id
));
1150 if (!index_size
&& p
->nr_buffer_variants
== 1) {
1153 else if (!index_size
|| p
->buffer_variant
[var_idx
].instance_divisor
) {
1154 struct x86_reg ptr
= p
->src_ECX
;
1155 struct x86_reg buf_ptr
=
1156 x86_make_disp(p
->machine_EDI
,
1157 get_offset(p
, &p
->buffer_variant
[var_idx
].ptr
));
1160 x86_mov(p
->func
, ptr
, buf_ptr
);
1164 struct x86_reg ptr
= p
->src_ECX
;
1165 const struct translate_buffer_variant
*variant
= &p
->buffer_variant
[var_idx
];
1167 struct x86_reg buf_stride
=
1168 x86_make_disp(p
->machine_EDI
,
1169 get_offset(p
, &p
->buffer
[variant
->buffer_index
].stride
));
1171 struct x86_reg buf_base_ptr
=
1172 x86_make_disp(p
->machine_EDI
,
1173 get_offset(p
, &p
->buffer
[variant
->buffer_index
].base_ptr
));
1175 struct x86_reg buf_max_index
=
1176 x86_make_disp(p
->machine_EDI
,
1177 get_offset(p
, &p
->buffer
[variant
->buffer_index
].max_index
));
1181 /* Calculate pointer to current attrib:
1186 x86_movzx8(p
->func
, ptr
, elt
);
1189 x86_movzx16(p
->func
, ptr
, elt
);
1192 x86_mov(p
->func
, ptr
, elt
);
1196 /* Clamp to max_index
1198 x86_cmp(p
->func
, ptr
, buf_max_index
);
1199 x86_cmovcc(p
->func
, ptr
, buf_max_index
, cc_AE
);
1201 x86_imul(p
->func
, ptr
, buf_stride
);
1203 x86_add(p
->func
, ptr
, buf_base_ptr
);
1210 static boolean
incr_inputs( struct translate_sse
*p
,
1211 unsigned index_size
)
1213 if (!index_size
&& p
->nr_buffer_variants
== 1) {
1214 struct x86_reg stride
= x86_make_disp(p
->machine_EDI
,
1215 get_offset(p
, &p
->buffer
[0].stride
));
1217 if (p
->buffer_variant
[0].instance_divisor
== 0) {
1219 x86_add(p
->func
, p
->idx_ESI
, stride
);
1220 sse_prefetchnta(p
->func
, x86_make_disp(p
->idx_ESI
, 192));
1223 else if (!index_size
) {
1226 /* Is this worthwhile??
1228 for (i
= 0; i
< p
->nr_buffer_variants
; i
++) {
1229 struct translate_buffer_variant
*variant
= &p
->buffer_variant
[i
];
1230 struct x86_reg buf_ptr
= x86_make_disp(p
->machine_EDI
,
1231 get_offset(p
, &variant
->ptr
));
1232 struct x86_reg buf_stride
= x86_make_disp(p
->machine_EDI
,
1233 get_offset(p
, &p
->buffer
[variant
->buffer_index
].stride
));
1235 if (variant
->instance_divisor
== 0) {
1236 x86_mov(p
->func
, p
->tmp_EAX
, buf_stride
);
1238 x86_add(p
->func
, p
->tmp_EAX
, buf_ptr
);
1239 if (i
== 0) sse_prefetchnta(p
->func
, x86_make_disp(p
->tmp_EAX
, 192));
1241 x86_mov(p
->func
, buf_ptr
, p
->tmp_EAX
);
1247 x86_lea(p
->func
, p
->idx_ESI
, x86_make_disp(p
->idx_ESI
, index_size
));
1254 /* Build run( struct translate *machine,
1257 * void *output_buffer )
1259 * run_elts( struct translate *machine,
1262 * void *output_buffer )
1264 * Lots of hardcoding
1266 * EAX -- pointer to current output vertex
1267 * ECX -- pointer to current attribute
1270 static boolean
build_vertex_emit( struct translate_sse
*p
,
1271 struct x86_function
*func
,
1272 unsigned index_size
)
1277 memset(p
->reg_to_const
, 0xff, sizeof(p
->reg_to_const
));
1278 memset(p
->const_to_reg
, 0xff, sizeof(p
->const_to_reg
));
1280 p
->tmp_EAX
= x86_make_reg(file_REG32
, reg_AX
);
1281 p
->idx_ESI
= x86_make_reg(file_REG32
, reg_SI
);
1282 p
->outbuf_EBX
= x86_make_reg(file_REG32
, reg_BX
);
1283 p
->machine_EDI
= x86_make_reg(file_REG32
, reg_DI
);
1284 p
->count_EBP
= x86_make_reg(file_REG32
, reg_BP
);
1285 p
->tmp2_EDX
= x86_make_reg(file_REG32
, reg_DX
);
1286 p
->src_ECX
= x86_make_reg(file_REG32
, reg_CX
);
1290 x86_init_func(p
->func
);
1292 if(x86_target(p
->func
) == X86_64_WIN64_ABI
)
1294 /* the ABI guarantees a 16-byte aligned 32-byte "shadow space" above the return address */
1295 sse2_movdqa(p
->func
, x86_make_disp(x86_make_reg(file_REG32
, reg_SP
), 8), x86_make_reg(file_XMM
, 6));
1296 sse2_movdqa(p
->func
, x86_make_disp(x86_make_reg(file_REG32
, reg_SP
), 24), x86_make_reg(file_XMM
, 7));
1299 x86_push(p
->func
, p
->outbuf_EBX
);
1300 x86_push(p
->func
, p
->count_EBP
);
1302 /* on non-Win64 x86-64, these are already in the right registers */
1303 if(x86_target(p
->func
) != X86_64_STD_ABI
)
1305 x86_push(p
->func
, p
->machine_EDI
);
1306 x86_push(p
->func
, p
->idx_ESI
);
1308 x86_mov(p
->func
, p
->machine_EDI
, x86_fn_arg(p
->func
, 1));
1309 x86_mov(p
->func
, p
->idx_ESI
, x86_fn_arg(p
->func
, 2));
1312 x86_mov(p
->func
, p
->count_EBP
, x86_fn_arg(p
->func
, 3));
1314 if(x86_target(p
->func
) != X86_32
)
1315 x64_mov64(p
->func
, p
->outbuf_EBX
, x86_fn_arg(p
->func
, 5));
1317 x86_mov(p
->func
, p
->outbuf_EBX
, x86_fn_arg(p
->func
, 5));
1319 /* Load instance ID.
1321 if (p
->use_instancing
) {
1324 x86_fn_arg(p
->func
, 4));
1326 x86_make_disp(p
->machine_EDI
, get_offset(p
, &p
->instance_id
)),
1330 /* Get vertex count, compare to zero
1332 x86_xor(p
->func
, p
->tmp_EAX
, p
->tmp_EAX
);
1333 x86_cmp(p
->func
, p
->count_EBP
, p
->tmp_EAX
);
1334 fixup
= x86_jcc_forward(p
->func
, cc_E
);
1336 /* always load, needed or not:
1338 init_inputs(p
, index_size
);
1340 /* Note address for loop jump
1342 label
= x86_get_label(p
->func
);
1344 struct x86_reg elt
= !index_size
? p
->idx_ESI
: x86_deref(p
->idx_ESI
);
1345 int last_variant
= -1;
1348 for (j
= 0; j
< p
->translate
.key
.nr_elements
; j
++) {
1349 const struct translate_element
*a
= &p
->translate
.key
.element
[j
];
1350 unsigned variant
= p
->element_to_buffer_variant
[j
];
1352 /* Figure out source pointer address:
1354 if (variant
!= last_variant
) {
1355 last_variant
= variant
;
1356 vb
= get_buffer_ptr(p
, index_size
, variant
, elt
);
1359 if (!translate_attr( p
, a
,
1360 x86_make_disp(vb
, a
->input_offset
),
1361 x86_make_disp(p
->outbuf_EBX
, a
->output_offset
)))
1365 /* Next output vertex:
1370 x86_make_disp(p
->outbuf_EBX
,
1371 p
->translate
.key
.output_stride
));
1375 incr_inputs( p
, index_size
);
1378 /* decr count, loop if not zero
1380 x86_dec(p
->func
, p
->count_EBP
);
1381 x86_jcc(p
->func
, cc_NZ
, label
);
1385 if (p
->func
->need_emms
)
1388 /* Land forward jump here:
1390 x86_fixup_fwd_jump(p
->func
, fixup
);
1392 /* Pop regs and return
1395 if(x86_target(p
->func
) != X86_64_STD_ABI
)
1397 x86_pop(p
->func
, p
->idx_ESI
);
1398 x86_pop(p
->func
, p
->machine_EDI
);
1401 x86_pop(p
->func
, p
->count_EBP
);
1402 x86_pop(p
->func
, p
->outbuf_EBX
);
1404 if(x86_target(p
->func
) == X86_64_WIN64_ABI
)
1406 sse2_movdqa(p
->func
, x86_make_reg(file_XMM
, 6), x86_make_disp(x86_make_reg(file_REG32
, reg_SP
), 8));
1407 sse2_movdqa(p
->func
, x86_make_reg(file_XMM
, 7), x86_make_disp(x86_make_reg(file_REG32
, reg_SP
), 24));
1420 static void translate_sse_set_buffer( struct translate
*translate
,
1424 unsigned max_index
)
1426 struct translate_sse
*p
= (struct translate_sse
*)translate
;
1428 if (buf
< p
->nr_buffers
) {
1429 p
->buffer
[buf
].base_ptr
= (char *)ptr
;
1430 p
->buffer
[buf
].stride
= stride
;
1431 p
->buffer
[buf
].max_index
= max_index
;
1434 if (0) debug_printf("%s %d/%d: %p %d\n",
1441 static void translate_sse_release( struct translate
*translate
)
1443 struct translate_sse
*p
= (struct translate_sse
*)translate
;
1445 x86_release_func( &p
->linear_func
);
1446 x86_release_func( &p
->elt_func
);
1452 struct translate
*translate_sse2_create( const struct translate_key
*key
)
1454 struct translate_sse
*p
= NULL
;
1457 /* this is misnamed, it actually refers to whether rtasm is enabled or not */
1458 if (!rtasm_cpu_has_sse())
1461 p
= os_malloc_aligned(sizeof(struct translate_sse
), 16);
1464 memset(p
, 0, sizeof(*p
));
1465 memcpy(p
->consts
, consts
, sizeof(consts
));
1467 p
->translate
.key
= *key
;
1468 p
->translate
.release
= translate_sse_release
;
1469 p
->translate
.set_buffer
= translate_sse_set_buffer
;
1471 for (i
= 0; i
< key
->nr_elements
; i
++) {
1472 if (key
->element
[i
].type
== TRANSLATE_ELEMENT_NORMAL
) {
1475 p
->nr_buffers
= MAX2(p
->nr_buffers
, key
->element
[i
].input_buffer
+ 1);
1477 if (key
->element
[i
].instance_divisor
) {
1478 p
->use_instancing
= TRUE
;
1482 * Map vertex element to vertex buffer variant.
1484 for (j
= 0; j
< p
->nr_buffer_variants
; j
++) {
1485 if (p
->buffer_variant
[j
].buffer_index
== key
->element
[i
].input_buffer
&&
1486 p
->buffer_variant
[j
].instance_divisor
== key
->element
[i
].instance_divisor
) {
1490 if (j
== p
->nr_buffer_variants
) {
1491 p
->buffer_variant
[j
].buffer_index
= key
->element
[i
].input_buffer
;
1492 p
->buffer_variant
[j
].instance_divisor
= key
->element
[i
].instance_divisor
;
1493 p
->nr_buffer_variants
++;
1495 p
->element_to_buffer_variant
[i
] = j
;
1497 assert(key
->element
[i
].type
== TRANSLATE_ELEMENT_INSTANCE_ID
);
1499 p
->element_to_buffer_variant
[i
] = ELEMENT_BUFFER_INSTANCE_ID
;
1503 if (0) debug_printf("nr_buffers: %d\n", p
->nr_buffers
);
1505 if (!build_vertex_emit(p
, &p
->linear_func
, 0))
1508 if (!build_vertex_emit(p
, &p
->elt_func
, 4))
1511 if (!build_vertex_emit(p
, &p
->elt16_func
, 2))
1514 if (!build_vertex_emit(p
, &p
->elt8_func
, 1))
1517 p
->translate
.run
= (run_func
) x86_get_func(&p
->linear_func
);
1518 if (p
->translate
.run
== NULL
)
1521 p
->translate
.run_elts
= (run_elts_func
) x86_get_func(&p
->elt_func
);
1522 if (p
->translate
.run_elts
== NULL
)
1525 p
->translate
.run_elts16
= (run_elts16_func
) x86_get_func(&p
->elt16_func
);
1526 if (p
->translate
.run_elts16
== NULL
)
1529 p
->translate
.run_elts8
= (run_elts8_func
) x86_get_func(&p
->elt8_func
);
1530 if (p
->translate
.run_elts8
== NULL
)
1533 return &p
->translate
;
1537 translate_sse_release( &p
->translate
);
1546 struct translate
*translate_sse2_create( const struct translate_key
*key
)