1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
31 * Helper functions for packing/unpacking.
33 * Pack/unpacking is necessary for conversion between types of different
36 * They are also commonly used when an computation needs higher
37 * precision for the intermediate values. For example, if one needs the
42 * to use more precision for intermediate results then one should implement it
46 * compute(LLVMBuilderRef builder struct lp_type type, LLVMValueRef a, LLVMValueRef b)
48 * struct lp_type wide_type = lp_wider_type(type);
49 * LLVMValueRef al, ah, bl, bh, cl, ch, c;
51 * lp_build_unpack2(builder, type, wide_type, a, &al, &ah);
52 * lp_build_unpack2(builder, type, wide_type, b, &bl, &bh);
54 * cl = compute_half(al, bl);
55 * ch = compute_half(ah, bh);
57 * c = lp_build_pack2(bld->builder, wide_type, type, cl, ch);
62 * where compute_half() would do the computation for half the elements with
63 * twice the precision.
65 * @author Jose Fonseca <jfonseca@vmware.com>
69 #include "util/u_debug.h"
70 #include "util/u_math.h"
71 #include "util/u_cpu_detect.h"
73 #include "lp_bld_type.h"
74 #include "lp_bld_const.h"
75 #include "lp_bld_init.h"
76 #include "lp_bld_intr.h"
77 #include "lp_bld_arit.h"
78 #include "lp_bld_pack.h"
82 * Build shuffle vectors that match PUNPCKLxx and PUNPCKHxx instructions.
85 lp_build_const_unpack_shuffle(struct gallivm_state
*gallivm
,
86 unsigned n
, unsigned lo_hi
)
88 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
91 assert(n
<= LP_MAX_VECTOR_LENGTH
);
94 /* TODO: cache results in a static table */
96 for(i
= 0, j
= lo_hi
*n
/2; i
< n
; i
+= 2, ++j
) {
97 elems
[i
+ 0] = lp_build_const_int32(gallivm
, 0 + j
);
98 elems
[i
+ 1] = lp_build_const_int32(gallivm
, n
+ j
);
101 return LLVMConstVector(elems
, n
);
106 * Build shuffle vectors that match PACKxx instructions.
109 lp_build_const_pack_shuffle(struct gallivm_state
*gallivm
, unsigned n
)
111 LLVMValueRef elems
[LP_MAX_VECTOR_LENGTH
];
114 assert(n
<= LP_MAX_VECTOR_LENGTH
);
116 for(i
= 0; i
< n
; ++i
)
117 elems
[i
] = lp_build_const_int32(gallivm
, 2*i
);
119 return LLVMConstVector(elems
, n
);
124 * Interleave vector elements.
126 * Matches the PUNPCKLxx and PUNPCKHxx SSE instructions.
129 lp_build_interleave2(struct gallivm_state
*gallivm
,
135 LLVMValueRef shuffle
;
137 shuffle
= lp_build_const_unpack_shuffle(gallivm
, type
.length
, lo_hi
);
139 return LLVMBuildShuffleVector(gallivm
->builder
, a
, b
, shuffle
, "");
144 * Double the bit width.
146 * This will only change the number of bits the values are represented, not the
150 lp_build_unpack2(struct gallivm_state
*gallivm
,
151 struct lp_type src_type
,
152 struct lp_type dst_type
,
154 LLVMValueRef
*dst_lo
,
155 LLVMValueRef
*dst_hi
)
157 LLVMBuilderRef builder
= gallivm
->builder
;
159 LLVMTypeRef dst_vec_type
;
161 assert(!src_type
.floating
);
162 assert(!dst_type
.floating
);
163 assert(dst_type
.width
== src_type
.width
* 2);
164 assert(dst_type
.length
* 2 == src_type
.length
);
166 if(dst_type
.sign
&& src_type
.sign
) {
167 /* Replicate the sign bit in the most significant bits */
168 msb
= LLVMBuildAShr(builder
, src
, lp_build_const_int_vec(gallivm
, src_type
, src_type
.width
- 1), "");
171 /* Most significant bits always zero */
172 msb
= lp_build_zero(gallivm
, src_type
);
174 /* Interleave bits */
175 #ifdef PIPE_ARCH_LITTLE_ENDIAN
176 *dst_lo
= lp_build_interleave2(gallivm
, src_type
, src
, msb
, 0);
177 *dst_hi
= lp_build_interleave2(gallivm
, src_type
, src
, msb
, 1);
179 *dst_lo
= lp_build_interleave2(gallivm
, src_type
, msb
, src
, 0);
180 *dst_hi
= lp_build_interleave2(gallivm
, src_type
, msb
, src
, 1);
183 /* Cast the result into the new type (twice as wide) */
185 dst_vec_type
= lp_build_vec_type(gallivm
, dst_type
);
187 *dst_lo
= LLVMBuildBitCast(builder
, *dst_lo
, dst_vec_type
, "");
188 *dst_hi
= LLVMBuildBitCast(builder
, *dst_hi
, dst_vec_type
, "");
193 * Expand the bit width.
195 * This will only change the number of bits the values are represented, not the
199 lp_build_unpack(struct gallivm_state
*gallivm
,
200 struct lp_type src_type
,
201 struct lp_type dst_type
,
203 LLVMValueRef
*dst
, unsigned num_dsts
)
208 /* Register width must remain constant */
209 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
211 /* We must not loose or gain channels. Only precision */
212 assert(src_type
.length
== dst_type
.length
* num_dsts
);
217 while(src_type
.width
< dst_type
.width
) {
218 struct lp_type tmp_type
= src_type
;
221 tmp_type
.length
/= 2;
223 for(i
= num_tmps
; i
--; ) {
224 lp_build_unpack2(gallivm
, src_type
, tmp_type
, dst
[i
], &dst
[2*i
+ 0], &dst
[2*i
+ 1]);
232 assert(num_tmps
== num_dsts
);
237 * Non-interleaved pack.
239 * This will move values as
241 * lo = __ l0 __ l1 __ l2 __.. __ ln
242 * hi = __ h0 __ h1 __ h2 __.. __ hn
243 * res = l0 l1 l2 .. ln h0 h1 h2 .. hn
245 * This will only change the number of bits the values are represented, not the
248 * It is assumed the values are already clamped into the destination type range.
249 * Values outside that range will produce undefined results. Use
250 * lp_build_packs2 instead.
253 lp_build_pack2(struct gallivm_state
*gallivm
,
254 struct lp_type src_type
,
255 struct lp_type dst_type
,
259 LLVMBuilderRef builder
= gallivm
->builder
;
260 #if HAVE_LLVM < 0x0207
261 LLVMTypeRef src_vec_type
= lp_build_vec_type(gallivm
, src_type
);
263 LLVMTypeRef dst_vec_type
= lp_build_vec_type(gallivm
, dst_type
);
264 LLVMValueRef shuffle
;
265 LLVMValueRef res
= NULL
;
267 assert(!src_type
.floating
);
268 assert(!dst_type
.floating
);
269 assert(src_type
.width
== dst_type
.width
* 2);
270 assert(src_type
.length
* 2 == dst_type
.length
);
272 /* Check for special cases first */
273 if(util_cpu_caps
.has_sse2
&& src_type
.width
* src_type
.length
== 128) {
274 switch(src_type
.width
) {
277 #if HAVE_LLVM >= 0x0207
278 res
= lp_build_intrinsic_binary(builder
, "llvm.x86.sse2.packssdw.128", dst_vec_type
, lo
, hi
);
280 res
= lp_build_intrinsic_binary(builder
, "llvm.x86.sse2.packssdw.128", src_vec_type
, lo
, hi
);
284 if (util_cpu_caps
.has_sse4_1
) {
285 return lp_build_intrinsic_binary(builder
, "llvm.x86.sse41.packusdw", dst_vec_type
, lo
, hi
);
288 /* use generic shuffle below */
296 #if HAVE_LLVM >= 0x0207
297 res
= lp_build_intrinsic_binary(builder
, "llvm.x86.sse2.packsswb.128", dst_vec_type
, lo
, hi
);
299 res
= lp_build_intrinsic_binary(builder
, "llvm.x86.sse2.packsswb.128", src_vec_type
, lo
, hi
);
302 #if HAVE_LLVM >= 0x0207
303 res
= lp_build_intrinsic_binary(builder
, "llvm.x86.sse2.packuswb.128", dst_vec_type
, lo
, hi
);
305 res
= lp_build_intrinsic_binary(builder
, "llvm.x86.sse2.packuswb.128", src_vec_type
, lo
, hi
);
311 return LLVMGetUndef(dst_vec_type
);
316 res
= LLVMBuildBitCast(builder
, res
, dst_vec_type
, "");
321 /* generic shuffle */
322 lo
= LLVMBuildBitCast(builder
, lo
, dst_vec_type
, "");
323 hi
= LLVMBuildBitCast(builder
, hi
, dst_vec_type
, "");
325 shuffle
= lp_build_const_pack_shuffle(gallivm
, dst_type
.length
);
327 res
= LLVMBuildShuffleVector(builder
, lo
, hi
, shuffle
, "");
335 * Non-interleaved pack and saturate.
337 * Same as lp_build_pack2 but will saturate values so that they fit into the
341 lp_build_packs2(struct gallivm_state
*gallivm
,
342 struct lp_type src_type
,
343 struct lp_type dst_type
,
349 assert(!src_type
.floating
);
350 assert(!dst_type
.floating
);
351 assert(src_type
.sign
== dst_type
.sign
);
352 assert(src_type
.width
== dst_type
.width
* 2);
353 assert(src_type
.length
* 2 == dst_type
.length
);
357 /* All X86 SSE non-interleaved pack instructions take signed inputs and
358 * saturate them, so no need to clamp for those cases. */
359 if(util_cpu_caps
.has_sse2
&&
360 src_type
.width
* src_type
.length
== 128 &&
365 struct lp_build_context bld
;
366 unsigned dst_bits
= dst_type
.sign
? dst_type
.width
- 1 : dst_type
.width
;
367 LLVMValueRef dst_max
= lp_build_const_int_vec(gallivm
, src_type
, ((unsigned long long)1 << dst_bits
) - 1);
368 lp_build_context_init(&bld
, gallivm
, src_type
);
369 lo
= lp_build_min(&bld
, lo
, dst_max
);
370 hi
= lp_build_min(&bld
, hi
, dst_max
);
371 /* FIXME: What about lower bound? */
374 return lp_build_pack2(gallivm
, src_type
, dst_type
, lo
, hi
);
379 * Truncate the bit width.
381 * TODO: Handle saturation consistently.
384 lp_build_pack(struct gallivm_state
*gallivm
,
385 struct lp_type src_type
,
386 struct lp_type dst_type
,
388 const LLVMValueRef
*src
, unsigned num_srcs
)
390 LLVMValueRef (*pack2
)(struct gallivm_state
*gallivm
,
391 struct lp_type src_type
,
392 struct lp_type dst_type
,
395 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
399 /* Register width must remain constant */
400 assert(src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
);
402 /* We must not loose or gain channels. Only precision */
403 assert(src_type
.length
* num_srcs
== dst_type
.length
);
406 pack2
= &lp_build_pack2
;
408 pack2
= &lp_build_packs2
;
410 for(i
= 0; i
< num_srcs
; ++i
)
413 while(src_type
.width
> dst_type
.width
) {
414 struct lp_type tmp_type
= src_type
;
417 tmp_type
.length
*= 2;
419 /* Take in consideration the sign changes only in the last step */
420 if(tmp_type
.width
== dst_type
.width
)
421 tmp_type
.sign
= dst_type
.sign
;
425 for(i
= 0; i
< num_srcs
; ++i
)
426 tmp
[i
] = pack2(gallivm
, src_type
, tmp_type
,
427 tmp
[2*i
+ 0], tmp
[2*i
+ 1]);
432 assert(num_srcs
== 1);
439 * Truncate or expand the bitwidth.
441 * NOTE: Getting the right sign flags is crucial here, as we employ some
442 * intrinsics that do saturation.
445 lp_build_resize(struct gallivm_state
*gallivm
,
446 struct lp_type src_type
,
447 struct lp_type dst_type
,
448 const LLVMValueRef
*src
, unsigned num_srcs
,
449 LLVMValueRef
*dst
, unsigned num_dsts
)
451 LLVMBuilderRef builder
= gallivm
->builder
;
452 LLVMValueRef tmp
[LP_MAX_VECTOR_LENGTH
];
456 * We don't support float <-> int conversion here. That must be done
457 * before/after calling this function.
459 assert(src_type
.floating
== dst_type
.floating
);
462 * We don't support double <-> float conversion yet, although it could be
463 * added with little effort.
465 assert((!src_type
.floating
&& !dst_type
.floating
) ||
466 src_type
.width
== dst_type
.width
);
468 /* We must not loose or gain channels. Only precision */
469 assert(src_type
.length
* num_srcs
== dst_type
.length
* num_dsts
);
471 /* We don't support M:N conversion, only 1:N, M:1, or 1:1 */
472 assert(num_srcs
== 1 || num_dsts
== 1);
474 assert(src_type
.length
<= LP_MAX_VECTOR_LENGTH
);
475 assert(dst_type
.length
<= LP_MAX_VECTOR_LENGTH
);
476 assert(num_srcs
<= LP_MAX_VECTOR_LENGTH
);
477 assert(num_dsts
<= LP_MAX_VECTOR_LENGTH
);
479 if (src_type
.width
> dst_type
.width
) {
481 * Truncate bit width.
484 assert(num_dsts
== 1);
486 if (src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
) {
488 * Register width remains constant -- use vector packing intrinsics
491 tmp
[0] = lp_build_pack(gallivm
, src_type
, dst_type
, TRUE
, src
, num_srcs
);
495 * Do it element-wise.
498 assert(src_type
.length
== dst_type
.length
);
499 tmp
[0] = lp_build_undef(gallivm
, dst_type
);
500 for (i
= 0; i
< dst_type
.length
; ++i
) {
501 LLVMValueRef index
= lp_build_const_int32(gallivm
, i
);
502 LLVMValueRef val
= LLVMBuildExtractElement(builder
, src
[0], index
, "");
503 val
= LLVMBuildTrunc(builder
, val
, lp_build_elem_type(gallivm
, dst_type
), "");
504 tmp
[0] = LLVMBuildInsertElement(builder
, tmp
[0], val
, index
, "");
508 else if (src_type
.width
< dst_type
.width
) {
513 assert(num_srcs
== 1);
515 if (src_type
.width
* src_type
.length
== dst_type
.width
* dst_type
.length
) {
517 * Register width remains constant -- use vector unpack intrinsics
519 lp_build_unpack(gallivm
, src_type
, dst_type
, src
[0], tmp
, num_dsts
);
523 * Do it element-wise.
526 assert(src_type
.length
== dst_type
.length
);
527 tmp
[0] = lp_build_undef(gallivm
, dst_type
);
528 for (i
= 0; i
< dst_type
.length
; ++i
) {
529 LLVMValueRef index
= lp_build_const_int32(gallivm
, i
);
530 LLVMValueRef val
= LLVMBuildExtractElement(builder
, src
[0], index
, "");
532 if (src_type
.sign
&& dst_type
.sign
) {
533 val
= LLVMBuildSExt(builder
, val
, lp_build_elem_type(gallivm
, dst_type
), "");
535 val
= LLVMBuildZExt(builder
, val
, lp_build_elem_type(gallivm
, dst_type
), "");
537 tmp
[0] = LLVMBuildInsertElement(builder
, tmp
[0], val
, index
, "");
546 assert(num_srcs
== 1);
547 assert(num_dsts
== 1);
552 for(i
= 0; i
< num_dsts
; ++i
)