1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
31 * Helper functions for type conversions.
33 * We want to use the fastest type for a given computation whenever feasible.
34 * The other side of this is that we need to be able convert between several
35 * types accurately and efficiently.
37 * Conversion between types of different bit width is quite complex since a
39 * To remember there are a few invariants in type conversions:
41 * - register width must remain constant:
43 * src_type.width * src_type.length == dst_type.width * dst_type.length
45 * - total number of elements must remain constant:
47 * src_type.length * num_srcs == dst_type.length * num_dsts
49 * It is not always possible to do the conversion both accurately and
50 * efficiently, usually due to lack of adequate machine instructions. In these
51 * cases it is important not to cut shortcuts here and sacrifice accuracy, as
52 * there this functions can be used anywhere. In the future we might have a
53 * precision parameter which can gauge the accuracy vs efficiency compromise,
54 * but for now if the data conversion between two stages happens to be the
55 * bottleneck, then most likely should just avoid converting at all and run
56 * both stages with the same type.
58 * Make sure to run lp_test_conv unit test after any change to this file.
60 * @author Jose Fonseca <jfonseca@vmware.com>
64 #include "util/u_debug.h"
65 #include "util/u_math.h"
67 #include "lp_bld_type.h"
68 #include "lp_bld_const.h"
69 #include "lp_bld_arit.h"
70 #include "lp_bld_pack.h"
71 #include "lp_bld_conv.h"
75 * Special case for converting clamped IEEE-754 floats to unsigned norms.
77 * The mathematical voodoo below may seem excessive but it is actually
78 * paramount we do it this way for several reasons. First, there is no single
79 * precision FP to unsigned integer conversion Intel SSE instruction. Second,
80 * secondly, even if there was, since the FP's mantissa takes only a fraction
81 * of register bits the typically scale and cast approach would require double
82 * precision for accurate results, and therefore half the throughput
84 * Although the result values can be scaled to an arbitrary bit width specified
85 * by dst_width, the actual result type will have the same width.
88 lp_build_clamped_float_to_unsigned_norm(LLVMBuilderRef builder
,
89 struct lp_type src_type
,
93 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(src_type
);
97 unsigned long long ubound
;
98 unsigned long long mask
;
102 assert(src_type
.floating
);
104 mantissa
= lp_mantissa(src_type
);
106 /* We cannot carry more bits than the mantissa */
107 n
= MIN2(mantissa
, dst_width
);
109 /* This magic coefficients will make the desired result to appear in the
110 * lowest significant bits of the mantissa.
112 ubound
= ((unsigned long long)1 << n
);
114 scale
= (double)mask
/ubound
;
115 bias
= (double)((unsigned long long)1 << (mantissa
- n
));
117 res
= LLVMBuildMul(builder
, src
, lp_build_const_vec(src_type
, scale
), "");
118 res
= LLVMBuildAdd(builder
, res
, lp_build_const_vec(src_type
, bias
), "");
119 res
= LLVMBuildBitCast(builder
, res
, int_vec_type
, "");
122 int shift
= dst_width
- n
;
123 res
= LLVMBuildShl(builder
, res
, lp_build_const_int_vec(src_type
, shift
), "");
125 /* TODO: Fill in the empty lower bits for additional precision? */
126 /* YES: this fixes progs/trivial/tri-z-eq.c.
127 * Otherwise vertex Z=1.0 values get converted to something like
128 * 0xfffffb00 and the test for equality with 0xffffffff fails.
133 msb
= LLVMBuildLShr(builder
, res
, lp_build_const_int_vec(src_type
, dst_width
- 1), "");
134 msb
= LLVMBuildShl(builder
, msb
, lp_build_const_int_vec(src_type
, shift
), "");
135 msb
= LLVMBuildSub(builder
, msb
, lp_build_const_int_vec(src_type
, 1), "");
136 res
= LLVMBuildOr(builder
, res
, msb
, "");
140 res
= LLVMBuildOr(builder
, res
, LLVMBuildLShr(builder
, res
, lp_build_const_int_vec(src_type
, n
), ""), "");
147 res
= LLVMBuildAnd(builder
, res
, lp_build_const_int_vec(src_type
, mask
), "");
154 * Inverse of lp_build_clamped_float_to_unsigned_norm above.
157 lp_build_unsigned_norm_to_float(LLVMBuilderRef builder
,
159 struct lp_type dst_type
,
162 LLVMTypeRef vec_type
= lp_build_vec_type(dst_type
);
163 LLVMTypeRef int_vec_type
= lp_build_int_vec_type(dst_type
);
168 unsigned long long ubound
;
169 unsigned long long mask
;
173 mantissa
= lp_mantissa(dst_type
);
175 n
= MIN2(mantissa
, src_width
);
177 ubound
= ((unsigned long long)1 << n
);
179 scale
= (double)ubound
/mask
;
180 bias
= (double)((unsigned long long)1 << (mantissa
- n
));
184 if(src_width
> mantissa
) {
185 int shift
= src_width
- mantissa
;
186 res
= LLVMBuildLShr(builder
, res
, lp_build_const_int_vec(dst_type
, shift
), "");
189 bias_
= lp_build_const_vec(dst_type
, bias
);
191 res
= LLVMBuildOr(builder
,
193 LLVMBuildBitCast(builder
, bias_
, int_vec_type
, ""), "");
195 res
= LLVMBuildBitCast(builder
, res
, vec_type
, "");
197 res
= LLVMBuildSub(builder
, res
, bias_
, "");
198 res
= LLVMBuildMul(builder
, res
, lp_build_const_vec(dst_type
, scale
), "");
205 * Generic type conversion.
207 * TODO: Take a precision argument, or even better, add a new precision member
208 * to the lp_type union.
211 lp_build_conv(LLVMBuilderRef builder
,
212 struct lp_type src_type
,
213 struct lp_type dst_type
,
214 const LLVMValueRef
*src
, unsigned num_srcs
,
215 LLVMValueRef
*dst
, unsigned num_dsts
)
217 struct lp_type tmp_type
;
218 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
222 /* Register width must remain constant */
223 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
225 /* We must not loose or gain channels. Only precision */
226 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
228 assert(src_type
.length
<= LP_MAX_VECTOR_LENGTH
);
229 assert(dst_type
.length
<= LP_MAX_VECTOR_LENGTH
);
232 for(i
= 0; i
< num_srcs
; ++i
)
240 if(memcmp(&src_type
, &dst_type
, sizeof src_type
) != 0) {
241 struct lp_build_context bld
;
242 double src_min
= lp_const_min(src_type
);
243 double dst_min
= lp_const_min(dst_type
);
244 double src_max
= lp_const_max(src_type
);
245 double dst_max
= lp_const_max(dst_type
);
248 lp_build_context_init(&bld
, builder
, tmp_type
);
250 if(src_min
< dst_min
) {
254 thres
= lp_build_const_vec(src_type
, dst_min
);
255 for(i
= 0; i
< num_tmps
; ++i
)
256 tmp
[i
] = lp_build_max(&bld
, tmp
[i
], thres
);
259 if(src_max
> dst_max
) {
263 thres
= lp_build_const_vec(src_type
, dst_max
);
264 for(i
= 0; i
< num_tmps
; ++i
)
265 tmp
[i
] = lp_build_min(&bld
, tmp
[i
], thres
);
270 * Scale to the narrowest range
273 if(dst_type
.floating
) {
276 else if(tmp_type
.floating
) {
277 if(!dst_type
.fixed
&& !dst_type
.sign
&& dst_type
.norm
) {
278 for(i
= 0; i
< num_tmps
; ++i
) {
279 tmp
[i
] = lp_build_clamped_float_to_unsigned_norm(builder
,
284 tmp_type
.floating
= FALSE
;
287 double dst_scale
= lp_const_scale(dst_type
);
288 LLVMTypeRef tmp_vec_type
;
290 if (dst_scale
!= 1.0) {
291 LLVMValueRef scale
= lp_build_const_vec(tmp_type
, dst_scale
);
292 for(i
= 0; i
< num_tmps
; ++i
)
293 tmp
[i
] = LLVMBuildMul(builder
, tmp
[i
], scale
, "");
296 /* Use an equally sized integer for intermediate computations */
297 tmp_type
.floating
= FALSE
;
298 tmp_vec_type
= lp_build_vec_type(tmp_type
);
299 for(i
= 0; i
< num_tmps
; ++i
) {
302 tmp
[i
] = LLVMBuildFPToSI(builder
, tmp
[i
], tmp_vec_type
, "");
304 tmp
[i
] = LLVMBuildFPToUI(builder
, tmp
[i
], tmp_vec_type
, "");
306 /* FIXME: there is no SSE counterpart for LLVMBuildFPToUI */
307 tmp
[i
] = LLVMBuildFPToSI(builder
, tmp
[i
], tmp_vec_type
, "");
313 unsigned src_shift
= lp_const_shift(src_type
);
314 unsigned dst_shift
= lp_const_shift(dst_type
);
316 /* FIXME: compensate different offsets too */
317 if(src_shift
> dst_shift
) {
318 LLVMValueRef shift
= lp_build_const_int_vec(tmp_type
, src_shift
- dst_shift
);
319 for(i
= 0; i
< num_tmps
; ++i
)
321 tmp
[i
] = LLVMBuildAShr(builder
, tmp
[i
], shift
, "");
323 tmp
[i
] = LLVMBuildLShr(builder
, tmp
[i
], shift
, "");
328 * Truncate or expand bit width
331 assert(!tmp_type
.floating
|| tmp_type
.width
== dst_type
.width
);
333 if(tmp_type
.width
> dst_type
.width
) {
334 assert(num_dsts
== 1);
335 tmp
[0] = lp_build_pack(builder
, tmp_type
, dst_type
, TRUE
, tmp
, num_tmps
);
336 tmp_type
.width
= dst_type
.width
;
337 tmp_type
.length
= dst_type
.length
;
341 if(tmp_type
.width
< dst_type
.width
) {
342 assert(num_tmps
== 1);
343 lp_build_unpack(builder
, tmp_type
, dst_type
, tmp
[0], tmp
, num_dsts
);
344 tmp_type
.width
= dst_type
.width
;
345 tmp_type
.length
= dst_type
.length
;
349 assert(tmp_type
.width
== dst_type
.width
);
350 assert(tmp_type
.length
== dst_type
.length
);
351 assert(num_tmps
== num_dsts
);
354 * Scale to the widest range
357 if(src_type
.floating
) {
360 else if(!src_type
.floating
&& dst_type
.floating
) {
361 if(!src_type
.fixed
&& !src_type
.sign
&& src_type
.norm
) {
362 for(i
= 0; i
< num_tmps
; ++i
) {
363 tmp
[i
] = lp_build_unsigned_norm_to_float(builder
,
368 tmp_type
.floating
= TRUE
;
371 double src_scale
= lp_const_scale(src_type
);
372 LLVMTypeRef tmp_vec_type
;
374 /* Use an equally sized integer for intermediate computations */
375 tmp_type
.floating
= TRUE
;
376 tmp_type
.sign
= TRUE
;
377 tmp_vec_type
= lp_build_vec_type(tmp_type
);
378 for(i
= 0; i
< num_tmps
; ++i
) {
381 tmp
[i
] = LLVMBuildSIToFP(builder
, tmp
[i
], tmp_vec_type
, "");
383 tmp
[i
] = LLVMBuildUIToFP(builder
, tmp
[i
], tmp_vec_type
, "");
385 /* FIXME: there is no SSE counterpart for LLVMBuildUIToFP */
386 tmp
[i
] = LLVMBuildSIToFP(builder
, tmp
[i
], tmp_vec_type
, "");
390 if (src_scale
!= 1.0) {
391 LLVMValueRef scale
= lp_build_const_vec(tmp_type
, 1.0/src_scale
);
392 for(i
= 0; i
< num_tmps
; ++i
)
393 tmp
[i
] = LLVMBuildMul(builder
, tmp
[i
], scale
, "");
398 unsigned src_shift
= lp_const_shift(src_type
);
399 unsigned dst_shift
= lp_const_shift(dst_type
);
401 /* FIXME: compensate different offsets too */
402 if(src_shift
< dst_shift
) {
403 LLVMValueRef shift
= lp_build_const_int_vec(tmp_type
, dst_shift
- src_shift
);
404 for(i
= 0; i
< num_tmps
; ++i
)
405 tmp
[i
] = LLVMBuildShl(builder
, tmp
[i
], shift
, "");
409 for(i
= 0; i
< num_dsts
; ++i
)
415 * Bit mask conversion.
417 * This will convert the integer masks that match the given types.
419 * The mask values should 0 or -1, i.e., all bits either set to zero or one.
420 * Any other value will likely cause in unpredictable results.
422 * This is basically a very trimmed down version of lp_build_conv.
425 lp_build_conv_mask(LLVMBuilderRef builder
,
426 struct lp_type src_type
,
427 struct lp_type dst_type
,
428 const LLVMValueRef
*src
, unsigned num_srcs
,
429 LLVMValueRef
*dst
, unsigned num_dsts
)
431 /* Register width must remain constant */
432 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
434 /* We must not loose or gain channels. Only precision */
435 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
440 * We assume all values are 0 or -1
443 src_type
.floating
= FALSE
;
444 src_type
.fixed
= FALSE
;
445 src_type
.sign
= TRUE
;
446 src_type
.norm
= FALSE
;
448 dst_type
.floating
= FALSE
;
449 dst_type
.fixed
= FALSE
;
450 dst_type
.sign
= TRUE
;
451 dst_type
.norm
= FALSE
;
454 * Truncate or expand bit width
457 if(src_type
.width
> dst_type
.width
) {
458 assert(num_dsts
== 1);
459 dst
[0] = lp_build_pack(builder
, src_type
, dst_type
, TRUE
, src
, num_srcs
);
461 else if(src_type
.width
< dst_type
.width
) {
462 assert(num_srcs
== 1);
463 lp_build_unpack(builder
, src_type
, dst_type
, src
[0], dst
, num_dsts
);
466 assert(num_srcs
== num_dsts
);
467 memcpy(dst
, src
, num_dsts
* sizeof *dst
);