From 8f620bdbb9cc7a55e7c3a980502a6e5a89792f46 Mon Sep 17 00:00:00 2001 From: Chandler Carruth Date: Tue, 4 Sep 2018 12:38:00 +0000 Subject: [PATCH] [x86/SLH] Add a real Clang flag and LLVM IR attribute for Speculative Load Hardening. Wires up the existing pass to work with a proper IR attribute rather than just a hidden/internal flag. The internal flag continues to work for now, but I'll likely remove it soon. Most of the churn here is adding the IR attribute. I talked about this Kristof Beyls and he seemed at least initially OK with this direction. The idea of using a full attribute here is that we *do* expect at least some forms of this for other architectures. There isn't anything *inherently* x86-specific about this technique, just that we only have an implementation for x86 at the moment. While we could potentially expose this as a Clang-level attribute as well, that seems like a good question to defer for the moment as it isn't 100% clear whether that or some other programmer interface (or both?) would be best. We'll defer the programmer interface side of this for now, but at least get to the point where the feature can be enabled without relying on implementation details. This also allows us to do something that was really hard before: we can enable *just* the indirect call retpolines when using SLH. For x86, we don't have any other way to mitigate indirect calls. Other architectures may take a different approach of course, and none of this is surfaced to user-level flags. Differential Revision: https://reviews.llvm.org/D51157 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@341363 91177308-0d34-0410-b5e6-96231b3b80d8 --- docs/LangRef.rst | 22 ++++++++++++++++++ include/llvm/Bitcode/LLVMBitCodes.h | 1 + include/llvm/IR/Attributes.td | 9 ++++++++ lib/AsmParser/LLLexer.cpp | 1 + lib/AsmParser/LLParser.cpp | 5 ++++ lib/AsmParser/LLToken.h | 1 + lib/Bitcode/Reader/BitcodeReader.cpp | 4 ++++ lib/Bitcode/Writer/BitcodeWriter.cpp | 2 ++ lib/IR/Attributes.cpp | 2 ++ lib/IR/Verifier.cpp | 1 + lib/Target/X86/X86SpeculativeLoadHardening.cpp | 11 +++++++++ lib/Target/X86/X86TargetMachine.cpp | 8 +------ lib/Transforms/IPO/ForceFunctionAttrs.cpp | 1 + lib/Transforms/Utils/CodeExtractor.cpp | 1 + test/CodeGen/X86/O0-pipeline.ll | 1 + test/CodeGen/X86/O3-pipeline.ll | 1 + .../X86/speculative-load-hardening-gather.ll | 8 +++---- test/CodeGen/X86/speculative-load-hardening.ll | 20 ++++++++-------- test/Transforms/Inline/attributes.ll | 27 ++++++++++++++++++++++ 19 files changed, 105 insertions(+), 21 deletions(-) diff --git a/docs/LangRef.rst b/docs/LangRef.rst index 167a33e7c3d..2aba2077653 100644 --- a/docs/LangRef.rst +++ b/docs/LangRef.rst @@ -1636,6 +1636,28 @@ example: This attribute indicates that HWAddressSanitizer checks (dynamic address safety analysis based on tagged pointers) are enabled for this function. +``speculative_load_hardening`` + This attribute indicates that + `Speculative Load Hardening `_ + should be enabled for the function body. This is a best-effort attempt to + mitigate all known speculative execution information leak vulnerabilities + that are based on the fundamental principles of modern processors' + speculative execution. These vulnerabilities are classified as "Spectre + variant #1" vulnerabilities typically. Notably, this does not attempt to + mitigate any vulnerabilities where the speculative execution and/or + prediction devices of specific processors can be *completely* undermined + (such as "Branch Target Injection", a.k.a, "Spectre variant #2"). Instead, + this is a target-independent request to harden against the completely + generic risk posed by speculative execution to incorrectly load secret data, + making it available to some micro-architectural side-channel for information + leak. For a processor without any speculative execution or predictors, this + is expected to be a no-op. + + When inlining, the attribute is sticky. Inlining a function that carries + this attribute will cause the caller to gain the attribute. This is intended + to provide a maximally conservative model where the code in a function + annotated with this attribute will always (even after inlining) end up + hardened. ``speculatable`` This function attribute indicates that the function does not have any effects besides calculating its result and does not have undefined behavior. diff --git a/include/llvm/Bitcode/LLVMBitCodes.h b/include/llvm/Bitcode/LLVMBitCodes.h index 6723cf42dd2..5467ecc2626 100644 --- a/include/llvm/Bitcode/LLVMBitCodes.h +++ b/include/llvm/Bitcode/LLVMBitCodes.h @@ -591,6 +591,7 @@ enum AttributeKindCodes { ATTR_KIND_NOCF_CHECK = 56, ATTR_KIND_OPT_FOR_FUZZING = 57, ATTR_KIND_SHADOWCALLSTACK = 58, + ATTR_KIND_SPECULATIVE_LOAD_HARDENING = 59, }; enum ComdatSelectionKindCodes { diff --git a/include/llvm/IR/Attributes.td b/include/llvm/IR/Attributes.td index 39978c41ac7..e786d85d05a 100644 --- a/include/llvm/IR/Attributes.td +++ b/include/llvm/IR/Attributes.td @@ -176,6 +176,14 @@ def SanitizeMemory : EnumAttr<"sanitize_memory">; /// HWAddressSanitizer is on. def SanitizeHWAddress : EnumAttr<"sanitize_hwaddress">; +/// Speculative Load Hardening is enabled. +/// +/// Note that this uses the default compatibility (always compatible during +/// inlining) and a conservative merge strategy where inlining an attributed +/// body will add the attribute to the caller. This ensures that code carrying +/// this attribute will always be lowered with hardening enabled. +def SpeculativeLoadHardening : EnumAttr<"speculative_load_hardening">; + /// Argument is swift error. def SwiftError : EnumAttr<"swifterror">; @@ -232,6 +240,7 @@ def : MergeRule<"setAND">; def : MergeRule<"setOR">; def : MergeRule<"setOR">; def : MergeRule<"setOR">; +def : MergeRule<"setOR">; def : MergeRule<"adjustCallerSSPLevel">; def : MergeRule<"adjustCallerStackProbes">; def : MergeRule<"adjustCallerStackProbeSize">; diff --git a/lib/AsmParser/LLLexer.cpp b/lib/AsmParser/LLLexer.cpp index d868ec286aa..f4e340b4113 100644 --- a/lib/AsmParser/LLLexer.cpp +++ b/lib/AsmParser/LLLexer.cpp @@ -678,6 +678,7 @@ lltok::Kind LLLexer::LexIdentifier() { KEYWORD(sanitize_hwaddress); KEYWORD(sanitize_thread); KEYWORD(sanitize_memory); + KEYWORD(speculative_load_hardening); KEYWORD(swifterror); KEYWORD(swiftself); KEYWORD(uwtable); diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp index a63228ddc03..e2ee8d6c167 100644 --- a/lib/AsmParser/LLParser.cpp +++ b/lib/AsmParser/LLParser.cpp @@ -1276,6 +1276,9 @@ bool LLParser::ParseFnAttributeValuePairs(AttrBuilder &B, B.addAttribute(Attribute::SanitizeThread); break; case lltok::kw_sanitize_memory: B.addAttribute(Attribute::SanitizeMemory); break; + case lltok::kw_speculative_load_hardening: + B.addAttribute(Attribute::SpeculativeLoadHardening); + break; case lltok::kw_strictfp: B.addAttribute(Attribute::StrictFP); break; case lltok::kw_uwtable: B.addAttribute(Attribute::UWTable); break; case lltok::kw_writeonly: B.addAttribute(Attribute::WriteOnly); break; @@ -1619,6 +1622,7 @@ bool LLParser::ParseOptionalParamAttrs(AttrBuilder &B) { case lltok::kw_sanitize_hwaddress: case lltok::kw_sanitize_memory: case lltok::kw_sanitize_thread: + case lltok::kw_speculative_load_hardening: case lltok::kw_ssp: case lltok::kw_sspreq: case lltok::kw_sspstrong: @@ -1715,6 +1719,7 @@ bool LLParser::ParseOptionalReturnAttrs(AttrBuilder &B) { case lltok::kw_sanitize_hwaddress: case lltok::kw_sanitize_memory: case lltok::kw_sanitize_thread: + case lltok::kw_speculative_load_hardening: case lltok::kw_ssp: case lltok::kw_sspreq: case lltok::kw_sspstrong: diff --git a/lib/AsmParser/LLToken.h b/lib/AsmParser/LLToken.h index 2da14da4a43..4a3975955fa 100644 --- a/lib/AsmParser/LLToken.h +++ b/lib/AsmParser/LLToken.h @@ -219,6 +219,7 @@ enum Kind { kw_sret, kw_sanitize_thread, kw_sanitize_memory, + kw_speculative_load_hardening, kw_strictfp, kw_swifterror, kw_swiftself, diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp index 11bc6db8ba1..7ef52acd464 100644 --- a/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/lib/Bitcode/Reader/BitcodeReader.cpp @@ -1165,6 +1165,8 @@ static uint64_t getRawAttributeMask(Attribute::AttrKind Val) { case Attribute::NoCfCheck: return 1ULL << 57; case Attribute::OptForFuzzing: return 1ULL << 58; case Attribute::ShadowCallStack: return 1ULL << 59; + case Attribute::SpeculativeLoadHardening: + return 1ULL << 60; case Attribute::Dereferenceable: llvm_unreachable("dereferenceable attribute not supported in raw format"); break; @@ -1389,6 +1391,8 @@ static Attribute::AttrKind getAttrFromCode(uint64_t Code) { return Attribute::SanitizeThread; case bitc::ATTR_KIND_SANITIZE_MEMORY: return Attribute::SanitizeMemory; + case bitc::ATTR_KIND_SPECULATIVE_LOAD_HARDENING: + return Attribute::SpeculativeLoadHardening; case bitc::ATTR_KIND_SWIFT_ERROR: return Attribute::SwiftError; case bitc::ATTR_KIND_SWIFT_SELF: diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp index c768021a0a4..1262401266e 100644 --- a/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -690,6 +690,8 @@ static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) { return bitc::ATTR_KIND_SANITIZE_THREAD; case Attribute::SanitizeMemory: return bitc::ATTR_KIND_SANITIZE_MEMORY; + case Attribute::SpeculativeLoadHardening: + return bitc::ATTR_KIND_SPECULATIVE_LOAD_HARDENING; case Attribute::SwiftError: return bitc::ATTR_KIND_SWIFT_ERROR; case Attribute::SwiftSelf: diff --git a/lib/IR/Attributes.cpp b/lib/IR/Attributes.cpp index d87187481be..d1330e93e21 100644 --- a/lib/IR/Attributes.cpp +++ b/lib/IR/Attributes.cpp @@ -323,6 +323,8 @@ std::string Attribute::getAsString(bool InAttrGrp) const { return "returns_twice"; if (hasAttribute(Attribute::SExt)) return "signext"; + if (hasAttribute(Attribute::SpeculativeLoadHardening)) + return "speculative_load_hardening"; if (hasAttribute(Attribute::Speculatable)) return "speculatable"; if (hasAttribute(Attribute::StackProtect)) diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp index 2dcb9c4adf8..fc6bc4ff9d1 100644 --- a/lib/IR/Verifier.cpp +++ b/lib/IR/Verifier.cpp @@ -1478,6 +1478,7 @@ static bool isFuncOnlyAttr(Attribute::AttrKind Kind) { case Attribute::InaccessibleMemOnly: case Attribute::InaccessibleMemOrArgMemOnly: case Attribute::AllocSize: + case Attribute::SpeculativeLoadHardening: case Attribute::Speculatable: case Attribute::StrictFP: return true; diff --git a/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/lib/Target/X86/X86SpeculativeLoadHardening.cpp index e9b4032a199..7979315a2ae 100644 --- a/lib/Target/X86/X86SpeculativeLoadHardening.cpp +++ b/lib/Target/X86/X86SpeculativeLoadHardening.cpp @@ -75,6 +75,11 @@ STATISTIC(NumCallsOrJumpsHardened, STATISTIC(NumInstsInserted, "Number of instructions inserted"); STATISTIC(NumLFENCEsInserted, "Number of lfence instructions inserted"); +static cl::opt EnableSpeculativeLoadHardening( + "x86-speculative-load-hardening", + cl::desc("Force enable speculative load hardening"), cl::init(false), + cl::Hidden); + static cl::opt HardenEdgesWithLFENCE( PASS_KEY "-lfence", cl::desc( @@ -404,6 +409,12 @@ bool X86SpeculativeLoadHardeningPass::runOnMachineFunction( LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName() << " **********\n"); + // Only run if this pass is forced enabled or we detect the relevant function + // attribute requesting SLH. + if (!EnableSpeculativeLoadHardening && + !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening)) + return false; + Subtarget = &MF.getSubtarget(); MRI = &MF.getRegInfo(); TII = Subtarget->getInstrInfo(); diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp index 969cd0ab7e7..dd1bbc761b2 100644 --- a/lib/Target/X86/X86TargetMachine.cpp +++ b/lib/Target/X86/X86TargetMachine.cpp @@ -54,10 +54,6 @@ static cl::opt EnableMachineCombinerPass("x86-machine-combiner", cl::desc("Enable the machine combiner pass"), cl::init(true), cl::Hidden); -static cl::opt EnableSpeculativeLoadHardening( - "x86-speculative-load-hardening", - cl::desc("Enable speculative load hardening"), cl::init(false), cl::Hidden); - namespace llvm { void initializeWinEHStatePassPass(PassRegistry &); @@ -475,9 +471,7 @@ void X86PassConfig::addPreRegAlloc() { addPass(createX86AvoidStoreForwardingBlocks()); } - if (EnableSpeculativeLoadHardening) - addPass(createX86SpeculativeLoadHardeningPass()); - + addPass(createX86SpeculativeLoadHardeningPass()); addPass(createX86FlagsCopyLoweringPass()); addPass(createX86WinAllocaExpander()); } diff --git a/lib/Transforms/IPO/ForceFunctionAttrs.cpp b/lib/Transforms/IPO/ForceFunctionAttrs.cpp index 37273f97541..4dc1529ddbf 100644 --- a/lib/Transforms/IPO/ForceFunctionAttrs.cpp +++ b/lib/Transforms/IPO/ForceFunctionAttrs.cpp @@ -58,6 +58,7 @@ static Attribute::AttrKind parseAttrKind(StringRef Kind) { .Case("sanitize_hwaddress", Attribute::SanitizeHWAddress) .Case("sanitize_memory", Attribute::SanitizeMemory) .Case("sanitize_thread", Attribute::SanitizeThread) + .Case("speculative_load_hardening", Attribute::SpeculativeLoadHardening) .Case("ssp", Attribute::StackProtect) .Case("sspreq", Attribute::StackProtectReq) .Case("sspstrong", Attribute::StackProtectStrong) diff --git a/lib/Transforms/Utils/CodeExtractor.cpp b/lib/Transforms/Utils/CodeExtractor.cpp index d9f39bd1749..7f26c53ecf3 100644 --- a/lib/Transforms/Utils/CodeExtractor.cpp +++ b/lib/Transforms/Utils/CodeExtractor.cpp @@ -753,6 +753,7 @@ Function *CodeExtractor::constructFunction(const ValueSet &inputs, case Attribute::SanitizeMemory: case Attribute::SanitizeThread: case Attribute::SanitizeHWAddress: + case Attribute::SpeculativeLoadHardening: case Attribute::StackProtect: case Attribute::StackProtectReq: case Attribute::StackProtectStrong: diff --git a/test/CodeGen/X86/O0-pipeline.ll b/test/CodeGen/X86/O0-pipeline.ll index 569f6a76234..05c0f358e7b 100644 --- a/test/CodeGen/X86/O0-pipeline.ll +++ b/test/CodeGen/X86/O0-pipeline.ll @@ -37,6 +37,7 @@ ; CHECK-NEXT: X86 PIC Global Base Reg Initialization ; CHECK-NEXT: Expand ISel Pseudo-instructions ; CHECK-NEXT: Local Stack Slot Allocation +; CHECK-NEXT: X86 speculative load hardening ; CHECK-NEXT: MachineDominator Tree Construction ; CHECK-NEXT: X86 EFLAGS copy lowering ; CHECK-NEXT: X86 WinAlloca Expander diff --git a/test/CodeGen/X86/O3-pipeline.ll b/test/CodeGen/X86/O3-pipeline.ll index cf1e5036bd5..44fda3c9e7a 100644 --- a/test/CodeGen/X86/O3-pipeline.ll +++ b/test/CodeGen/X86/O3-pipeline.ll @@ -90,6 +90,7 @@ ; CHECK-NEXT: X86 LEA Optimize ; CHECK-NEXT: X86 Optimize Call Frame ; CHECK-NEXT: X86 Avoid Store Forwarding Block +; CHECK-NEXT: X86 speculative load hardening ; CHECK-NEXT: MachineDominator Tree Construction ; CHECK-NEXT: X86 EFLAGS copy lowering ; CHECK-NEXT: X86 WinAlloca Expander diff --git a/test/CodeGen/X86/speculative-load-hardening-gather.ll b/test/CodeGen/X86/speculative-load-hardening-gather.ll index 8f4a47eac11..96cfac3b752 100644 --- a/test/CodeGen/X86/speculative-load-hardening-gather.ll +++ b/test/CodeGen/X86/speculative-load-hardening-gather.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening | FileCheck %s +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, i8*, <4 x i32>, <4 x float>, i8) @@ -950,6 +950,6 @@ entry: ret <4 x i64> %v } -attributes #0 = { nounwind "target-features"="+avx2" } -attributes #1 = { nounwind "target-features"="+avx512f" } -attributes #2 = { nounwind "target-features"="+avx512vl" } +attributes #0 = { nounwind speculative_load_hardening "target-features"="+avx2" } +attributes #1 = { nounwind speculative_load_hardening "target-features"="+avx512f" } +attributes #2 = { nounwind speculative_load_hardening "target-features"="+avx512vl" } diff --git a/test/CodeGen/X86/speculative-load-hardening.ll b/test/CodeGen/X86/speculative-load-hardening.ll index 152fc411cff..f5949dbf467 100644 --- a/test/CodeGen/X86/speculative-load-hardening.ll +++ b/test/CodeGen/X86/speculative-load-hardening.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening | FileCheck %s --check-prefix=X64 -; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening -x86-slh-lfence | FileCheck %s --check-prefix=X64-LFENCE +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=X64 +; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-slh-lfence | FileCheck %s --check-prefix=X64-LFENCE ; ; FIXME: Add support for 32-bit and other EH ABIs. @@ -8,7 +8,7 @@ declare void @leak(i32 %v1, i32 %v2) declare void @sink(i32) -define i32 @test_trivial_entry_load(i32* %ptr) { +define i32 @test_trivial_entry_load(i32* %ptr) speculative_load_hardening { ; X64-LABEL: test_trivial_entry_load: ; X64: # %bb.0: # %entry ; X64-NEXT: movq %rsp, %rcx @@ -29,7 +29,7 @@ entry: ret i32 %v } -define void @test_basic_conditions(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %ptr2, i32** %ptr3) { +define void @test_basic_conditions(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %ptr2, i32** %ptr3) speculative_load_hardening { ; X64-LABEL: test_basic_conditions: ; X64: # %bb.0: # %entry ; X64-NEXT: pushq %r15 @@ -193,7 +193,7 @@ exit: ret void } -define void @test_basic_loop(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2) nounwind { +define void @test_basic_loop(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2) nounwind speculative_load_hardening { ; X64-LABEL: test_basic_loop: ; X64: # %bb.0: # %entry ; X64-NEXT: pushq %rbp @@ -301,7 +301,7 @@ exit: ret void } -define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %ptr2) nounwind { +define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %ptr2) nounwind speculative_load_hardening { ; X64-LABEL: test_basic_nested_loop: ; X64: # %bb.0: # %entry ; X64-NEXT: pushq %rbp @@ -497,7 +497,7 @@ declare i8* @__cxa_allocate_exception(i64) local_unnamed_addr declare void @__cxa_throw(i8*, i8*, i8*) local_unnamed_addr -define void @test_basic_eh(i32 %a, i32* %ptr1, i32* %ptr2) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { +define void @test_basic_eh(i32 %a, i32* %ptr1, i32* %ptr2) speculative_load_hardening personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) { ; X64-LABEL: test_basic_eh: ; X64: # %bb.0: # %entry ; X64-NEXT: pushq %rbp @@ -668,7 +668,7 @@ declare void @sink_float(float) declare void @sink_double(double) ; Test direct and converting loads of floating point values. -define void @test_fp_loads(float* %fptr, double* %dptr, i32* %i32ptr, i64* %i64ptr) nounwind { +define void @test_fp_loads(float* %fptr, double* %dptr, i32* %i32ptr, i64* %i64ptr) nounwind speculative_load_hardening { ; X64-LABEL: test_fp_loads: ; X64: # %bb.0: # %entry ; X64-NEXT: pushq %r15 @@ -855,7 +855,7 @@ declare void @sink_v4i32(<4 x i32>) declare void @sink_v2i64(<2 x i64>) ; Test loads of vectors. -define void @test_vec_loads(<4 x float>* %v4f32ptr, <2 x double>* %v2f64ptr, <16 x i8>* %v16i8ptr, <8 x i16>* %v8i16ptr, <4 x i32>* %v4i32ptr, <2 x i64>* %v2i64ptr) nounwind { +define void @test_vec_loads(<4 x float>* %v4f32ptr, <2 x double>* %v2f64ptr, <16 x i8>* %v16i8ptr, <8 x i16>* %v8i16ptr, <4 x i32>* %v4i32ptr, <2 x i64>* %v2i64ptr) nounwind speculative_load_hardening { ; X64-LABEL: test_vec_loads: ; X64: # %bb.0: # %entry ; X64-NEXT: pushq %rbp @@ -996,7 +996,7 @@ entry: ret void } -define void @test_deferred_hardening(i32* %ptr1, i32* %ptr2, i32 %x) nounwind { +define void @test_deferred_hardening(i32* %ptr1, i32* %ptr2, i32 %x) nounwind speculative_load_hardening { ; X64-LABEL: test_deferred_hardening: ; X64: # %bb.0: # %entry ; X64-NEXT: pushq %r15 diff --git a/test/Transforms/Inline/attributes.ll b/test/Transforms/Inline/attributes.ll index 66a831bf817..028f3b0f197 100644 --- a/test/Transforms/Inline/attributes.ll +++ b/test/Transforms/Inline/attributes.ll @@ -26,6 +26,10 @@ define i32 @safestack_callee(i32 %i) safestack { ret i32 %i } +define i32 @slh_callee(i32 %i) speculative_load_hardening { + ret i32 %i +} + define i32 @alwaysinline_callee(i32 %i) alwaysinline { ret i32 %i } @@ -161,6 +165,28 @@ define i32 @test_safestack(i32 %arg) safestack { ; CHECK-NEXT: ret i32 } +; Can inline a normal function into an SLH'ed function. +define i32 @test_caller_slh(i32 %i) speculative_load_hardening { +; CHECK-LABEL: @test_caller_slh( +; CHECK-SAME: ) [[SLH:.*]] { +; CHECK-NOT: call +; CHECK: ret i32 +entry: + %callee = call i32 @noattr_callee(i32 %i) + ret i32 %callee +} + +; Can inline a SLH'ed function into a normal one, propagating SLH. +define i32 @test_callee_slh(i32 %i) { +; CHECK-LABEL: @test_callee_slh( +; CHECK-SAME: ) [[SLH:.*]] { +; CHECK-NOT: call +; CHECK: ret i32 +entry: + %callee = call i32 @slh_callee(i32 %i) + ret i32 %callee +} + ; Check that a function doesn't get inlined if target-cpu strings don't match ; exactly. define i32 @test_target_cpu_callee0(i32 %i) "target-cpu"="corei7" { @@ -384,6 +410,7 @@ define i32 @test_null-pointer-is-valid2(i32 %i) "null-pointer-is-valid"="true" { ; CHECK-NEXT: ret i32 } +; CHECK: attributes [[SLH]] = { speculative_load_hardening } ; CHECK: attributes [[FPMAD_FALSE]] = { "less-precise-fpmad"="false" } ; CHECK: attributes [[FPMAD_TRUE]] = { "less-precise-fpmad"="true" } ; CHECK: attributes [[NOIMPLICITFLOAT]] = { noimplicitfloat } -- 2.11.4.GIT