1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gn -mattr=+avx512f | FileCheck %s --check-prefix=ALL --check-prefix=KNL
3 ; RUN: llc < %s -mtriple=x86_64-unknown-linux-gn -mattr=+avx512f,+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=SKX
6 define void @any_extend_load_v8i64(<8 x i8> * %ptr) {
7 ; KNL-LABEL: any_extend_load_v8i64:
9 ; KNL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
10 ; KNL-NEXT: vpmovzxbq {{.*#+}} ymm1 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
11 ; KNL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
12 ; KNL-NEXT: vpmovzxbq {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,zero,zero,zero,xmm0[2],zero,zero,zero,zero,zero,zero,zero,xmm0[3],zero,zero,zero,zero,zero,zero,zero
13 ; KNL-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
14 ; KNL-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
15 ; KNL-NEXT: vpmovqb %zmm0, (%rdi)
16 ; KNL-NEXT: vzeroupper
19 ; SKX-LABEL: any_extend_load_v8i64:
21 ; SKX-NEXT: vpmovzxbq {{.*#+}} zmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero,mem[4],zero,zero,zero,zero,zero,zero,zero,mem[5],zero,zero,zero,zero,zero,zero,zero,mem[6],zero,zero,zero,zero,zero,zero,zero,mem[7],zero,zero,zero,zero,zero,zero,zero
22 ; SKX-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
23 ; SKX-NEXT: vpmovqb %zmm0, (%rdi)
24 ; SKX-NEXT: vzeroupper
26 %wide.load = load <8 x i8>, <8 x i8>* %ptr, align 1
27 %1 = zext <8 x i8> %wide.load to <8 x i64>
28 %2 = add nuw nsw <8 x i64> %1, <i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4, i64 4>
29 %3 = xor <8 x i64> %2, zeroinitializer
30 %4 = trunc <8 x i64> %3 to <8 x i8>
31 store <8 x i8> %4, <8 x i8>* %ptr, align 1
35 define void @any_extend_load_v8i32(<8 x i8> * %ptr) {
36 ; KNL-LABEL: any_extend_load_v8i32:
38 ; KNL-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
39 ; KNL-NEXT: vpbroadcastd {{.*#+}} ymm1 = [4,4,4,4,4,4,4,4]
40 ; KNL-NEXT: vpaddd %ymm1, %ymm0, %ymm0
41 ; KNL-NEXT: vpmovdb %zmm0, %xmm0
42 ; KNL-NEXT: vmovq %xmm0, (%rdi)
43 ; KNL-NEXT: vzeroupper
46 ; SKX-LABEL: any_extend_load_v8i32:
48 ; SKX-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
49 ; SKX-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0
50 ; SKX-NEXT: vpmovdb %ymm0, (%rdi)
51 ; SKX-NEXT: vzeroupper
53 %wide.load = load <8 x i8>, <8 x i8>* %ptr, align 1
54 %1 = zext <8 x i8> %wide.load to <8 x i32>
55 %2 = add nuw nsw <8 x i32> %1, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
56 %3 = xor <8 x i32> %2, zeroinitializer
57 %4 = trunc <8 x i32> %3 to <8 x i8>
58 store <8 x i8> %4, <8 x i8>* %ptr, align 1
62 define void @any_extend_load_v8i16(<8 x i8> * %ptr) {
63 ; KNL-LABEL: any_extend_load_v8i16:
65 ; KNL-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
66 ; KNL-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
67 ; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
68 ; KNL-NEXT: vmovq %xmm0, (%rdi)
71 ; SKX-LABEL: any_extend_load_v8i16:
73 ; SKX-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
74 ; SKX-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
75 ; SKX-NEXT: vpmovwb %xmm0, (%rdi)
77 %wide.load = load <8 x i8>, <8 x i8>* %ptr, align 1
78 %1 = zext <8 x i8> %wide.load to <8 x i16>
79 %2 = add nuw nsw <8 x i16> %1, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
80 %3 = xor <8 x i16> %2, zeroinitializer
81 %4 = trunc <8 x i16> %3 to <8 x i8>
82 store <8 x i8> %4, <8 x i8>* %ptr, align 1