1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+aes -o - %s| FileCheck %s --check-prefixes=CHECK
4 ; User code intends to execute {pmull, pmull2} instructions on {lower, higher} half of the same vector registers directly.
5 ; Test that PMULL2 are generated for higher-half operands.
6 ; The suboptimal code generation fails to use higher-half contents in place; instead, it moves higher-lane contents to lower lane
7 ; to make use of PMULL everywhere, and generates unnecessary moves.
8 define void @test1(ptr %0, ptr %1) {
11 ; CHECK-NEXT: mov w8, #56824 // =0xddf8
12 ; CHECK-NEXT: mov w9, #61186 // =0xef02
13 ; CHECK-NEXT: movk w8, #40522, lsl #16
14 ; CHECK-NEXT: movk w9, #29710, lsl #16
15 ; CHECK-NEXT: ldp q0, q1, [x1]
16 ; CHECK-NEXT: dup v2.2d, x8
17 ; CHECK-NEXT: fmov d3, x9
18 ; CHECK-NEXT: pmull v4.1q, v0.1d, v3.1d
19 ; CHECK-NEXT: pmull v3.1q, v1.1d, v3.1d
20 ; CHECK-NEXT: pmull2 v0.1q, v0.2d, v2.2d
21 ; CHECK-NEXT: pmull2 v1.1q, v1.2d, v2.2d
22 ; CHECK-NEXT: eor v0.16b, v4.16b, v0.16b
23 ; CHECK-NEXT: eor v1.16b, v3.16b, v1.16b
24 ; CHECK-NEXT: stp q0, q1, [x1]
26 %3 = load <2 x i64>, ptr %1
27 %4 = getelementptr inbounds <2 x i64>, ptr %1, i64 1
28 %5 = load <2 x i64>, ptr %4
29 %6 = extractelement <2 x i64> %3, i64 1
30 %7 = tail call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %6, i64 2655706616)
31 %8 = extractelement <2 x i64> %5, i64 1
32 %9 = tail call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %8, i64 2655706616)
33 %10 = load <2 x i64>, ptr %0
34 %11 = getelementptr inbounds i8, ptr %0, i64 16
35 %12 = load <2 x i64>, ptr %11
36 %13 = extractelement <2 x i64> %3, i64 0
37 %14 = tail call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %13, i64 1947135746)
38 %15 = extractelement <2 x i64> %5, i64 0
39 %16 = tail call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %15, i64 1947135746)
40 %17 = xor <16 x i8> %14, %7
41 %18 = xor <16 x i8> %16, %9
42 store <16 x i8> %17, ptr %1
43 store <16 x i8> %18, ptr %4
47 ; One operand is higher-half of SIMD register, and the other operand is lower-half of another SIMD register.
48 ; Tests that codegen doesn't generate unnecessary moves.
49 define void @test2(ptr %0, <2 x i64> %1, <2 x i64> %2) {
52 ; CHECK-NEXT: dup v1.2d, v1.d[0]
53 ; CHECK-NEXT: pmull2 v0.1q, v0.2d, v1.2d
54 ; CHECK-NEXT: str q0, [x0]
56 %4 = extractelement <2 x i64> %1, i64 1
57 %5 = extractelement <2 x i64> %2, i64 0
58 %6 = tail call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %4, i64 %5)
59 store <16 x i8> %6, ptr %0, align 16
63 ; Operand %4 is the higher-half of v2i64, and operand %2 is an input parameter of i64.
64 ; Test that %2 is duplicated into the proper lane of SIMD directly for optimal codegen.
65 define void @test3(ptr %0, <2 x i64> %1, i64 %2) {
68 ; CHECK-NEXT: dup v1.2d, x1
69 ; CHECK-NEXT: pmull2 v0.1q, v0.2d, v1.2d
70 ; CHECK-NEXT: str q0, [x0]
72 %4 = extractelement <2 x i64> %1, i64 1
73 %5 = tail call <16 x i8> @llvm.aarch64.neon.pmull64(i64 %4, i64 %2)
74 store <16 x i8> %5, ptr %0, align 16
78 declare <16 x i8> @llvm.aarch64.neon.pmull64(i64, i64)