1 // RUN: mlir-opt %s -convert-amdgpu-to-rocdl=chipset=gfx940 | FileCheck %s
3 // CHECK-LABEL: func @ext_scalar
4 // CHECK: [[V:%.+]] = builtin.unrealized_conversion_cast %{{.+}} : f8E5M2FNUZ to i8
5 // CHECK-DAG: [[UNDEF:%.+]] = llvm.mlir.undef : vector<4xi8>
6 // CHECK-DAG: [[C0_1:%.+]] = llvm.mlir.constant(0 : i32) : i32
7 // CHECK: [[VEC:%.+]] = llvm.insertelement [[V]], [[UNDEF]]{{\[}}[[C0_1]] : i32] : vector<4xi8>
8 // CHECK: [[CAST:%.+]] = llvm.bitcast [[VEC]] : vector<4xi8> to i32
9 // CHECK: [[C0_2:%.+]] = llvm.mlir.constant(0 : i32) : i32
10 // CHECK: [[EXT:%.+]] = rocdl.cvt.f32.bf8 [[CAST]]{{\[}}[[C0_2]]] : f32
11 // CHECK: return [[EXT]]
12 func.func @ext_scalar(%v: f8E5M2FNUZ) -> f32 {
13 %ret = amdgpu.ext_packed_fp8 %v[0] : f8E5M2FNUZ to f32
14 func.return %ret : f32
17 // CHECK-LABEL: func @ext_short_vec
18 // CHECK: [[V:%.+]] = builtin.unrealized_conversion_cast %{{.+}} : vector<2xf8E4M3FNUZ> to vector<2xi8>
19 // CHECK-DAG: [[UNDEF:%.+]] = llvm.mlir.undef : vector<4xi8>
20 // CHECK-DAG: [[C0:%.+]] = llvm.mlir.constant(0 : i32) : i32
21 // CHECK: [[ELEM_0:%.+]] = llvm.extractelement [[V]]{{\[}}[[C0]] : i32] : vector<2xi8>
22 // CHECK: [[VEC_0:%.+]] = llvm.insertelement [[ELEM_0]], [[UNDEF]]{{\[}}[[C0]] : i32] : vector<4xi8>
23 // CHECK: [[C1_1:%.+]] = llvm.mlir.constant(1 : i32) : i32
24 // CHECK: [[ELEM_1:%.+]] = llvm.extractelement [[V]]{{\[}}[[C1_1]] : i32] : vector<2xi8>
25 // CHECK: [[VEC_1:%.+]] = llvm.insertelement [[ELEM_1]], [[VEC_0]]{{\[}}[[C1_1]] : i32] : vector<4xi8>
26 // CHECK: [[CAST:%.+]] = llvm.bitcast [[VEC_1]] : vector<4xi8> to i32
27 // CHECK: [[C1_2:%.+]] = llvm.mlir.constant(1 : i32) : i32
28 // CHECK: [[EXT:%.+]] = rocdl.cvt.f32.fp8 [[CAST]]{{\[}}[[C1_2]]] : f32
29 // CHECK: return [[EXT]]
30 func.func @ext_short_vec(%v: vector<2xf8E4M3FNUZ>) -> f32 {
31 %ret = amdgpu.ext_packed_fp8 %v[1] : vector<2xf8E4M3FNUZ> to f32
32 func.return %ret : f32
35 // CHECK-LABEL: func @ext_full_vec(
36 // CHECK: [[V:%.+]] = builtin.unrealized_conversion_cast %{{.+}} : vector<4xf8E4M3FNUZ> to vector<4xi8>
37 // CHECK: [[CAST:%.+]] = llvm.bitcast [[V]] : vector<4xi8> to i32
38 // CHECK: [[C3:%.+]] = llvm.mlir.constant(3 : i32) : i32
39 // CHECK: [[EXT:%.+]] = rocdl.cvt.f32.fp8 [[CAST]]{{\[}}[[C3]]] : f32
40 // CHECK: return [[EXT]] : f32
42 func.func @ext_full_vec(%v: vector<4xf8E4M3FNUZ>) -> f32 {
43 %ret = amdgpu.ext_packed_fp8 %v[3] : vector<4xf8E4M3FNUZ> to f32
44 func.return %ret : f32
47 // CHECK-LABEL: func @packed_trunc
48 // CHECK-SAME: ([[V:%.+]]: f32)
49 // CHECK: [[V2:%.+]] = llvm.mlir.undef : f32
50 // CHECK: [[EXISTING:%.+]] = llvm.mlir.undef : i32
51 // CHECK: [[FALSE:%.+]] = llvm.mlir.constant(false) : i1
52 // CHECK: [[PACKED:%.+]] = rocdl.cvt.pk.fp8.f32 [[V]], [[V2]] -> [[EXISTING]]{{\[}}[[FALSE]]] : i32
53 // CHECK: [[CAST:%.+]] = llvm.bitcast [[PACKED]] : i32 to vector<4xi8>
54 // CHECK: builtin.unrealized_conversion_cast [[CAST]] : vector<4xi8> to vector<4xf8E4M3FNUZ>
55 func.func @packed_trunc(%v: f32) -> vector<4xf8E4M3FNUZ> {
56 %ret = amdgpu.packed_trunc_2xfp8 %v, undef into undef[word 0] : f32 to vector<4xf8E4M3FNUZ>
57 func.return %ret : vector<4xf8E4M3FNUZ>
60 // CHECK-LABEL: func @packed_truncx2
61 // CHECK-SAME: ([[V:%.+]]: f32, [[W:%.+]]: f32)
62 // CHECK: [[EXISTING:%.+]] = llvm.mlir.undef : i32
63 // CHECK: [[FALSE:%.+]] = llvm.mlir.constant(false) : i1
64 // CHECK: [[PACKED:%.+]] = rocdl.cvt.pk.fp8.f32 [[V]], [[W]] -> [[EXISTING]]{{\[}}[[FALSE]]] : i32
65 // CHECK: [[CAST:%.+]] = llvm.bitcast [[PACKED]] : i32 to vector<4xi8>
66 // CHECK: builtin.unrealized_conversion_cast [[CAST]] : vector<4xi8> to vector<4xf8E4M3FNUZ>
67 func.func @packed_truncx2(%v: f32, %w: f32) -> vector<4xf8E4M3FNUZ> {
68 %ret = amdgpu.packed_trunc_2xfp8 %v, %w into undef[word 0] : f32 to vector<4xf8E4M3FNUZ>
69 func.return %ret : vector<4xf8E4M3FNUZ>
72 // CHECK-LABEL: func @packed_truncx2_into
73 // CHECK-SAME: ([[V:%.+]]: f32, [[W:%.+]]: f32, [[EXISTING:%.+]]: vector<4xf8E5M2FNUZ>)
74 // CHECK: [[EXISTING_BYTES:%.+]] = builtin.unrealized_conversion_cast [[EXISTING]] : vector<4xf8E5M2FNUZ> to vector<4xi8>
75 // CHECK: [[EXISTING_INT:%.+]] = llvm.bitcast [[EXISTING_BYTES]] : vector<4xi8> to i32
76 // CHECK: [[TRUE:%.+]] = llvm.mlir.constant(true) : i1
77 // CHECK: [[PACKED:%.+]] = rocdl.cvt.pk.bf8.f32 [[V]], [[W]] -> [[EXISTING_INT]]{{\[}}[[TRUE]]] : i32
78 // CHECK: [[CAST:%.+]] = llvm.bitcast [[PACKED]] : i32 to vector<4xi8>
79 // CHECK: builtin.unrealized_conversion_cast [[CAST]] : vector<4xi8> to vector<4xf8E5M2FNUZ>
80 func.func @packed_truncx2_into(%v: f32, %w: f32, %existing: vector<4xf8E5M2FNUZ>) -> vector<4xf8E5M2FNUZ> {
81 %ret = amdgpu.packed_trunc_2xfp8 %v, %w into %existing[word 1] : f32 to vector<4xf8E5M2FNUZ> into vector<4xf8E5M2FNUZ>
82 func.return %ret : vector<4xf8E5M2FNUZ>
85 // CHECK-LABEL: func @packed_stoch_round
86 // CHECK-SAME: ([[V:%.+]]: f32, [[S:%.+]]: i32)
87 // CHECK: [[EXISTING:%.+]] = llvm.mlir.undef : i32
88 // CHECK: [[C0:%.+]] = llvm.mlir.constant(0 : i32) : i32
89 // CHECK: [[PACKED:%.+]] = rocdl.cvt.sr.fp8.f32 [[V]], [[S]] -> [[EXISTING]]{{\[}}[[C0]]] : i32
90 // CHECK: [[CAST:%.+]] = llvm.bitcast [[PACKED]] : i32 to vector<4xi8>
91 // CHECK: builtin.unrealized_conversion_cast [[CAST]] : vector<4xi8> to vector<4xf8E4M3FNUZ>
92 func.func @packed_stoch_round(%v: f32, %s: i32) -> vector<4xf8E4M3FNUZ> {
93 %ret = amdgpu.packed_stoch_round_fp8 %v + %s into undef[0] : f32 to vector<4xf8E4M3FNUZ>
94 func.return %ret : vector<4xf8E4M3FNUZ>
97 // CHECK-LABEL: func @packed_stoch_round_into
98 // CHECK-SAME: ([[V:%.+]]: f32, [[S:%.+]]: i32, [[EXISTING:%.+]]: vector<4xf8E5M2FNUZ>)
99 // CHECK: [[EXISTING_BYTES:%.+]] = builtin.unrealized_conversion_cast [[EXISTING]] : vector<4xf8E5M2FNUZ> to vector<4xi8>
100 // CHECK: [[EXISTING_INT:%.+]] = llvm.bitcast [[EXISTING_BYTES]] : vector<4xi8> to i32
101 // CHECK: [[C1:%.+]] = llvm.mlir.constant(1 : i32) : i32
102 // CHECK: [[PACKED:%.+]] = rocdl.cvt.sr.bf8.f32 [[V]], [[S]] -> [[EXISTING_INT]]{{\[}}[[C1]]] : i32
103 // CHECK: [[CAST:%.+]] = llvm.bitcast [[PACKED]] : i32 to vector<4xi8>
104 // CHECK: builtin.unrealized_conversion_cast [[CAST]] : vector<4xi8> to vector<4xf8E5M2FNUZ>
105 func.func @packed_stoch_round_into(%v: f32, %s: i32, %existing: vector<4xf8E5M2FNUZ>) -> vector<4xf8E5M2FNUZ> {
106 %ret = amdgpu.packed_stoch_round_fp8 %v + %s into %existing[1] : f32 to vector<4xf8E5M2FNUZ> into vector<4xf8E5M2FNUZ>
107 func.return %ret : vector<4xf8E5M2FNUZ>