Revert "[llvm] Improve llvm.objectsize computation by computing GEP, alloca and mallo...
[llvm-project.git] / clang / test / CodeGen / LoongArch / inline-asm-operand-modifiers.c
blobb36fe7a7b69bb0686f6b49d94538896958e672eb
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple loongarch32 -O2 -emit-llvm %s -o - | FileCheck %s
3 // RUN: %clang_cc1 -triple loongarch64 -O2 -emit-llvm %s -o - | FileCheck %s
5 /// Test LoongArch specific operand modifiers (i.e. operand codes).
7 // CHECK-LABEL: @test_z_zero(
8 // CHECK-NEXT: entry:
9 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 asm sideeffect "add.w $0, $1, ${2:z}", "=r,r,ri"(i32 [[A:%.*]], i32 0) #[[ATTR1:[0-9]+]], !srcloc !2
10 // CHECK-NEXT: ret void
12 void test_z_zero(int a) {
13 int tmp;
14 asm volatile ("add.w %0, %1, %z2" : "=r" (tmp) : "r" (a), "ri" (0));
17 // CHECK-LABEL: @test_z_nonzero(
18 // CHECK-NEXT: entry:
19 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 asm sideeffect "add.w $0, $1, ${2:z}", "=r,r,ri"(i32 [[A:%.*]], i32 1) #[[ATTR1]], !srcloc !3
20 // CHECK-NEXT: ret void
22 void test_z_nonzero(int a) {
23 int tmp;
24 asm volatile ("add.w %0, %1, %z2" : "=r" (tmp) : "r" (a), "ri" (1));