1 //===------ BPFPreserveStaticOffset.cpp -----------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // TLDR: replaces llvm.preserve.static.offset + GEP + load / store
10 // with llvm.bpf.getelementptr.and.load / store
12 // This file implements BPFPreserveStaticOffsetPass transformation.
13 // This transformation address two BPF verifier specific issues:
15 // (a) Access to the fields of some structural types is allowed only
16 // using load and store instructions with static immediate offsets.
18 // Examples of such types are `struct __sk_buff` and `struct
19 // bpf_sock_ops`. This is so because offsets of the fields of
20 // these structures do not match real offsets in the running
21 // kernel. During BPF program load LDX and STX instructions
22 // referring to the fields of these types are rewritten so that
23 // offsets match real offsets. For this rewrite to happen field
24 // offsets have to be encoded as immediate operands of the
27 // See kernel/bpf/verifier.c:convert_ctx_access function in the
28 // Linux kernel source tree for details.
30 // (b) Pointers to context parameters of BPF programs must not be
31 // modified before access.
33 // During BPF program verification a tag PTR_TO_CTX is tracked for
34 // register values. In case if register with such tag is modified
35 // BPF program is not allowed to read or write memory using this
36 // register. See kernel/bpf/verifier.c:check_mem_access function
37 // in the Linux kernel source tree for details.
39 // The following sequence of the IR instructions:
41 // %x = getelementptr %ptr, %constant_offset
44 // Is translated as a single machine instruction:
46 // LDW %ptr, %constant_offset
48 // In order for cases (a) and (b) to work the sequence %x-%y above has
49 // to be preserved by the IR passes.
51 // However, several optimization passes might sink `load` instruction
52 // or hoist `getelementptr` instruction so that the instructions are
53 // no longer in sequence. Examples of such passes are:
54 // SimplifyCFGPass, InstCombinePass, GVNPass.
55 // After such modification the verifier would reject the BPF program.
57 // To avoid this issue the patterns like (load/store (getelementptr ...))
58 // are replaced by calls to BPF specific intrinsic functions:
59 // - llvm.bpf.getelementptr.and.load
60 // - llvm.bpf.getelementptr.and.store
62 // These calls are lowered back to (load/store (getelementptr ...))
63 // by BPFCheckAndAdjustIR pass right before the translation from IR to
64 // machine instructions.
66 // The transformation is split into the following steps:
67 // - When IR is generated from AST the calls to intrinsic function
68 // llvm.preserve.static.offset are inserted.
69 // - BPFPreserveStaticOffsetPass is executed as early as possible
70 // with AllowPatial set to true, this handles marked GEP chains
71 // with constant offsets.
72 // - BPFPreserveStaticOffsetPass is executed at ScalarOptimizerLateEPCallback
73 // with AllowPatial set to false, this handles marked GEP chains
74 // with offsets that became constant after loop unrolling, e.g.
75 // to handle the following code:
77 // struct context { int x[4]; } __attribute__((preserve_static_offset));
79 // struct context *ctx = ...;
80 // #pragma clang loop unroll(full)
81 // for (int i = 0; i < 4; ++i)
84 // The early BPFPreserveStaticOffsetPass run is necessary to allow
85 // additional GVN / CSE opportunities after functions inlining.
86 // The relative order of optimization applied to function:
89 // - function inlining (2)
93 // - ScalarOptimizerLateEPCallback (3)
95 // When function A is inlined into function B all optimizations for A
96 // are already done, while some passes remain for B. In case if
97 // BPFPreserveStaticOffsetPass is done at (3) but not done at (1)
98 // the code after (2) would contain a mix of
99 // (load (gep %p)) and (get.and.load %p) usages:
100 // - the (load (gep %p)) would come from the calling function;
101 // - the (get.and.load %p) would come from the callee function.
102 // Thus clobbering CSE / GVN passes done after inlining.
106 #include "llvm/ADT/SmallPtrSet.h"
107 #include "llvm/ADT/SmallVector.h"
108 #include "llvm/IR/Attributes.h"
109 #include "llvm/IR/BasicBlock.h"
110 #include "llvm/IR/Constants.h"
111 #include "llvm/IR/DebugInfoMetadata.h"
112 #include "llvm/IR/DiagnosticInfo.h"
113 #include "llvm/IR/IRBuilder.h"
114 #include "llvm/IR/InstIterator.h"
115 #include "llvm/IR/Instructions.h"
116 #include "llvm/IR/Intrinsics.h"
117 #include "llvm/IR/IntrinsicsBPF.h"
118 #include "llvm/IR/Module.h"
119 #include "llvm/Support/Debug.h"
120 #include "llvm/Support/ErrorHandling.h"
122 #define DEBUG_TYPE "bpf-preserve-static-offset"
124 using namespace llvm
;
126 static const unsigned GepAndLoadFirstIdxArg
= 6;
127 static const unsigned GepAndStoreFirstIdxArg
= 7;
129 static bool isIntrinsicCall(Value
*I
, Intrinsic::ID Id
) {
130 if (auto *Call
= dyn_cast
<CallInst
>(I
))
131 if (Function
*Func
= Call
->getCalledFunction())
132 return Func
->getIntrinsicID() == Id
;
136 static bool isPreserveStaticOffsetCall(Value
*I
) {
137 return isIntrinsicCall(I
, Intrinsic::preserve_static_offset
);
140 static CallInst
*isGEPAndLoad(Value
*I
) {
141 if (isIntrinsicCall(I
, Intrinsic::bpf_getelementptr_and_load
))
142 return cast
<CallInst
>(I
);
146 static CallInst
*isGEPAndStore(Value
*I
) {
147 if (isIntrinsicCall(I
, Intrinsic::bpf_getelementptr_and_store
))
148 return cast
<CallInst
>(I
);
152 template <class T
= Instruction
>
153 static DILocation
*mergeDILocations(SmallVector
<T
*> &Insns
) {
154 DILocation
*Merged
= (*Insns
.begin())->getDebugLoc();
156 Merged
= DILocation::getMergedLocation(Merged
, I
->getDebugLoc());
160 static CallInst
*makeIntrinsicCall(Module
*M
,
161 Intrinsic::BPFIntrinsics Intrinsic
,
162 ArrayRef
<Type
*> Types
,
163 ArrayRef
<Value
*> Args
) {
165 Function
*Fn
= Intrinsic::getOrInsertDeclaration(M
, Intrinsic
, Types
);
166 return CallInst::Create(Fn
, Args
);
169 static void setParamElementType(CallInst
*Call
, unsigned ArgNo
, Type
*Type
) {
170 LLVMContext
&C
= Call
->getContext();
171 Call
->addParamAttr(ArgNo
, Attribute::get(C
, Attribute::ElementType
, Type
));
174 static void setParamReadNone(CallInst
*Call
, unsigned ArgNo
) {
175 LLVMContext
&C
= Call
->getContext();
176 Call
->addParamAttr(ArgNo
, Attribute::get(C
, Attribute::ReadNone
));
179 static void setParamReadOnly(CallInst
*Call
, unsigned ArgNo
) {
180 LLVMContext
&C
= Call
->getContext();
181 Call
->addParamAttr(ArgNo
, Attribute::get(C
, Attribute::ReadOnly
));
184 static void setParamWriteOnly(CallInst
*Call
, unsigned ArgNo
) {
185 LLVMContext
&C
= Call
->getContext();
186 Call
->addParamAttr(ArgNo
, Attribute::get(C
, Attribute::WriteOnly
));
190 struct GEPChainInfo
{
192 Type
*SourceElementType
;
193 SmallVector
<Value
*> Indices
;
194 SmallVector
<GetElementPtrInst
*> Members
;
196 GEPChainInfo() { reset(); }
200 SourceElementType
= nullptr;
205 } // Anonymous namespace
207 template <class T
= std::disjunction
<LoadInst
, StoreInst
>>
208 static void fillCommonArgs(LLVMContext
&C
, SmallVector
<Value
*> &Args
,
209 GEPChainInfo
&GEP
, T
*Insn
) {
210 Type
*Int8Ty
= Type::getInt8Ty(C
);
211 Type
*Int1Ty
= Type::getInt1Ty(C
);
212 // Implementation of Align guarantees that ShiftValue < 64
213 unsigned AlignShiftValue
= Log2_64(Insn
->getAlign().value());
214 Args
.push_back(GEP
.Members
[0]->getPointerOperand());
215 Args
.push_back(ConstantInt::get(Int1Ty
, Insn
->isVolatile()));
216 Args
.push_back(ConstantInt::get(Int8Ty
, (unsigned)Insn
->getOrdering()));
217 Args
.push_back(ConstantInt::get(Int8Ty
, (unsigned)Insn
->getSyncScopeID()));
218 Args
.push_back(ConstantInt::get(Int8Ty
, AlignShiftValue
));
219 Args
.push_back(ConstantInt::get(Int1Ty
, GEP
.InBounds
));
220 Args
.append(GEP
.Indices
.begin(), GEP
.Indices
.end());
223 static Instruction
*makeGEPAndLoad(Module
*M
, GEPChainInfo
&GEP
,
225 SmallVector
<Value
*> Args
;
226 fillCommonArgs(M
->getContext(), Args
, GEP
, Load
);
227 CallInst
*Call
= makeIntrinsicCall(M
, Intrinsic::bpf_getelementptr_and_load
,
228 {Load
->getType()}, Args
);
229 setParamElementType(Call
, 0, GEP
.SourceElementType
);
230 Call
->applyMergedLocation(mergeDILocations(GEP
.Members
), Load
->getDebugLoc());
231 Call
->setName((*GEP
.Members
.rbegin())->getName());
232 if (Load
->isUnordered()) {
233 Call
->setOnlyReadsMemory();
234 Call
->setOnlyAccessesArgMemory();
235 setParamReadOnly(Call
, 0);
237 for (unsigned I
= GepAndLoadFirstIdxArg
; I
< Args
.size(); ++I
)
238 Call
->addParamAttr(I
, Attribute::ImmArg
);
239 Call
->setAAMetadata(Load
->getAAMetadata());
243 static Instruction
*makeGEPAndStore(Module
*M
, GEPChainInfo
&GEP
,
245 SmallVector
<Value
*> Args
;
246 Args
.push_back(Store
->getValueOperand());
247 fillCommonArgs(M
->getContext(), Args
, GEP
, Store
);
249 makeIntrinsicCall(M
, Intrinsic::bpf_getelementptr_and_store
,
250 {Store
->getValueOperand()->getType()}, Args
);
251 setParamElementType(Call
, 1, GEP
.SourceElementType
);
252 if (Store
->getValueOperand()->getType()->isPointerTy())
253 setParamReadNone(Call
, 0);
254 Call
->applyMergedLocation(mergeDILocations(GEP
.Members
),
255 Store
->getDebugLoc());
256 if (Store
->isUnordered()) {
257 Call
->setOnlyWritesMemory();
258 Call
->setOnlyAccessesArgMemory();
259 setParamWriteOnly(Call
, 1);
261 for (unsigned I
= GepAndStoreFirstIdxArg
; I
< Args
.size(); ++I
)
262 Call
->addParamAttr(I
, Attribute::ImmArg
);
263 Call
->setAAMetadata(Store
->getAAMetadata());
267 static unsigned getOperandAsUnsigned(CallInst
*Call
, unsigned ArgNo
) {
268 if (auto *Int
= dyn_cast
<ConstantInt
>(Call
->getOperand(ArgNo
)))
269 return Int
->getValue().getZExtValue();
271 raw_string_ostream
ReportS(Report
);
272 ReportS
<< "Expecting ConstantInt as argument #" << ArgNo
<< " of " << *Call
274 report_fatal_error(StringRef(Report
));
277 static GetElementPtrInst
*reconstructGEP(CallInst
*Call
, int Delta
) {
278 SmallVector
<Value
*> Indices
;
279 Indices
.append(Call
->data_operands_begin() + 6 + Delta
,
280 Call
->data_operands_end());
281 Type
*GEPPointeeType
= Call
->getParamElementType(Delta
);
283 GetElementPtrInst::Create(GEPPointeeType
, Call
->getOperand(Delta
),
284 ArrayRef
<Value
*>(Indices
), Call
->getName());
285 GEP
->setIsInBounds(getOperandAsUnsigned(Call
, 5 + Delta
));
289 template <class T
= std::disjunction
<LoadInst
, StoreInst
>>
290 static void reconstructCommon(CallInst
*Call
, GetElementPtrInst
*GEP
, T
*Insn
,
292 Insn
->setVolatile(getOperandAsUnsigned(Call
, 1 + Delta
));
293 Insn
->setOrdering((AtomicOrdering
)getOperandAsUnsigned(Call
, 2 + Delta
));
294 Insn
->setSyncScopeID(getOperandAsUnsigned(Call
, 3 + Delta
));
295 unsigned AlignShiftValue
= getOperandAsUnsigned(Call
, 4 + Delta
);
296 Insn
->setAlignment(Align(1ULL << AlignShiftValue
));
297 GEP
->setDebugLoc(Call
->getDebugLoc());
298 Insn
->setDebugLoc(Call
->getDebugLoc());
299 Insn
->setAAMetadata(Call
->getAAMetadata());
302 std::pair
<GetElementPtrInst
*, LoadInst
*>
303 BPFPreserveStaticOffsetPass::reconstructLoad(CallInst
*Call
) {
304 GetElementPtrInst
*GEP
= reconstructGEP(Call
, 0);
305 Type
*ReturnType
= Call
->getFunctionType()->getReturnType();
306 auto *Load
= new LoadInst(ReturnType
, GEP
, "",
307 /* These would be set in reconstructCommon */
309 reconstructCommon(Call
, GEP
, Load
, 0);
310 return std::pair
{GEP
, Load
};
313 std::pair
<GetElementPtrInst
*, StoreInst
*>
314 BPFPreserveStaticOffsetPass::reconstructStore(CallInst
*Call
) {
315 GetElementPtrInst
*GEP
= reconstructGEP(Call
, 1);
316 auto *Store
= new StoreInst(Call
->getOperand(0), GEP
,
317 /* These would be set in reconstructCommon */
319 reconstructCommon(Call
, GEP
, Store
, 1);
320 return std::pair
{GEP
, Store
};
323 static bool isZero(Value
*V
) {
324 auto *CI
= dyn_cast
<ConstantInt
>(V
);
325 return CI
&& CI
->isZero();
328 // Given a chain of GEP instructions collect information necessary to
329 // merge this chain as a single GEP instruction of form:
330 // getelementptr %<type>, ptr %p, i32 0, <field_idx1>, <field_idx2>, ...
331 static bool foldGEPChainAsStructAccess(SmallVector
<GetElementPtrInst
*> &GEPs
,
332 GEPChainInfo
&Info
) {
336 if (!all_of(GEPs
, [=](GetElementPtrInst
*GEP
) {
337 return GEP
->hasAllConstantIndices();
341 GetElementPtrInst
*First
= GEPs
[0];
342 Info
.InBounds
= First
->isInBounds();
343 Info
.SourceElementType
= First
->getSourceElementType();
344 Type
*ResultElementType
= First
->getResultElementType();
345 Info
.Indices
.append(First
->idx_begin(), First
->idx_end());
346 Info
.Members
.push_back(First
);
348 for (auto *Iter
= GEPs
.begin() + 1; Iter
!= GEPs
.end(); ++Iter
) {
349 GetElementPtrInst
*GEP
= *Iter
;
350 if (!isZero(*GEP
->idx_begin())) {
354 if (!GEP
->getSourceElementType() ||
355 GEP
->getSourceElementType() != ResultElementType
) {
359 Info
.InBounds
&= GEP
->isInBounds();
360 Info
.Indices
.append(GEP
->idx_begin() + 1, GEP
->idx_end());
361 Info
.Members
.push_back(GEP
);
362 ResultElementType
= GEP
->getResultElementType();
368 // Given a chain of GEP instructions collect information necessary to
369 // merge this chain as a single GEP instruction of form:
370 // getelementptr i8, ptr %p, i64 %offset
371 static bool foldGEPChainAsU8Access(SmallVector
<GetElementPtrInst
*> &GEPs
,
372 GEPChainInfo
&Info
) {
376 GetElementPtrInst
*First
= GEPs
[0];
377 const DataLayout
&DL
= First
->getDataLayout();
378 LLVMContext
&C
= First
->getContext();
379 Type
*PtrTy
= First
->getType()->getScalarType();
380 APInt
Offset(DL
.getIndexTypeSizeInBits(PtrTy
), 0);
381 for (GetElementPtrInst
*GEP
: GEPs
) {
382 if (!GEP
->accumulateConstantOffset(DL
, Offset
)) {
386 Info
.InBounds
&= GEP
->isInBounds();
387 Info
.Members
.push_back(GEP
);
389 Info
.SourceElementType
= Type::getInt8Ty(C
);
390 Info
.Indices
.push_back(ConstantInt::get(C
, Offset
));
395 static void reportNonStaticGEPChain(Instruction
*Insn
) {
396 auto Msg
= DiagnosticInfoUnsupported(
397 *Insn
->getFunction(),
398 Twine("Non-constant offset in access to a field of a type marked "
399 "with preserve_static_offset might be rejected by BPF verifier")
400 .concat(Insn
->getDebugLoc()
402 : " (pass -g option to get exact location)"),
403 Insn
->getDebugLoc(), DS_Warning
);
404 Insn
->getContext().diagnose(Msg
);
407 static bool allZeroIndices(SmallVector
<GetElementPtrInst
*> &GEPs
) {
408 return GEPs
.empty() || all_of(GEPs
, [=](GetElementPtrInst
*GEP
) {
409 return GEP
->hasAllZeroIndices();
413 static bool tryToReplaceWithGEPBuiltin(Instruction
*LoadOrStoreTemplate
,
414 SmallVector
<GetElementPtrInst
*> &GEPs
,
415 Instruction
*InsnToReplace
) {
416 GEPChainInfo GEPChain
;
417 if (!foldGEPChainAsStructAccess(GEPs
, GEPChain
) &&
418 !foldGEPChainAsU8Access(GEPs
, GEPChain
)) {
421 Module
*M
= InsnToReplace
->getModule();
422 if (auto *Load
= dyn_cast
<LoadInst
>(LoadOrStoreTemplate
)) {
423 Instruction
*Replacement
= makeGEPAndLoad(M
, GEPChain
, Load
);
424 Replacement
->insertBefore(InsnToReplace
);
425 InsnToReplace
->replaceAllUsesWith(Replacement
);
427 if (auto *Store
= dyn_cast
<StoreInst
>(LoadOrStoreTemplate
)) {
428 Instruction
*Replacement
= makeGEPAndStore(M
, GEPChain
, Store
);
429 Replacement
->insertBefore(InsnToReplace
);
434 // Check if U->getPointerOperand() == I
435 static bool isPointerOperand(Value
*I
, User
*U
) {
436 if (auto *L
= dyn_cast
<LoadInst
>(U
))
437 return L
->getPointerOperand() == I
;
438 if (auto *S
= dyn_cast
<StoreInst
>(U
))
439 return S
->getPointerOperand() == I
;
440 if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(U
))
441 return GEP
->getPointerOperand() == I
;
442 if (auto *Call
= isGEPAndLoad(U
))
443 return Call
->getArgOperand(0) == I
;
444 if (auto *Call
= isGEPAndStore(U
))
445 return Call
->getArgOperand(1) == I
;
449 static bool isInlineableCall(User
*U
) {
450 if (auto *Call
= dyn_cast
<CallInst
>(U
))
451 return Call
->hasFnAttr(Attribute::InlineHint
);
455 static void rewriteAccessChain(Instruction
*Insn
,
456 SmallVector
<GetElementPtrInst
*> &GEPs
,
457 SmallVector
<Instruction
*> &Visited
,
458 bool AllowPatial
, bool &StillUsed
);
460 static void rewriteUses(Instruction
*Insn
,
461 SmallVector
<GetElementPtrInst
*> &GEPs
,
462 SmallVector
<Instruction
*> &Visited
, bool AllowPatial
,
464 for (User
*U
: Insn
->users()) {
465 auto *UI
= dyn_cast
<Instruction
>(U
);
466 if (UI
&& (isPointerOperand(Insn
, UI
) || isPreserveStaticOffsetCall(UI
) ||
467 isInlineableCall(UI
)))
468 rewriteAccessChain(UI
, GEPs
, Visited
, AllowPatial
, StillUsed
);
471 llvm::dbgs() << "unsupported usage in BPFPreserveStaticOffsetPass:\n";
472 llvm::dbgs() << " Insn: " << *Insn
<< "\n";
473 llvm::dbgs() << " User: " << *U
<< "\n";
478 // A DFS traversal of GEP chain trees starting from Root.
480 // Recursion descends through GEP instructions and
481 // llvm.preserve.static.offset calls. Recursion stops at any other
482 // instruction. If load or store instruction is reached it is replaced
483 // by a call to `llvm.bpf.getelementptr.and.load` or
484 // `llvm.bpf.getelementptr.and.store` intrinsic.
485 // If `llvm.bpf.getelementptr.and.load/store` is reached the accumulated
486 // GEPs are merged into the intrinsic call.
487 // If nested calls to `llvm.preserve.static.offset` are encountered these
488 // calls are marked for deletion.
490 // Parameters description:
491 // - Insn - current position in the tree
492 // - GEPs - GEP instructions for the current branch
493 // - Visited - a list of visited instructions in DFS order,
494 // order is important for unused instruction deletion.
495 // - AllowPartial - when true GEP chains that can't be folded are
496 // not reported, otherwise diagnostic message is show for such chains.
497 // - StillUsed - set to true if one of the GEP chains could not be
498 // folded, makes sense when AllowPartial is false, means that root
499 // preserve.static.offset call is still in use and should remain
500 // until the next run of this pass.
501 static void rewriteAccessChain(Instruction
*Insn
,
502 SmallVector
<GetElementPtrInst
*> &GEPs
,
503 SmallVector
<Instruction
*> &Visited
,
504 bool AllowPatial
, bool &StillUsed
) {
505 auto MarkAndTraverseUses
= [&]() {
506 Visited
.push_back(Insn
);
507 rewriteUses(Insn
, GEPs
, Visited
, AllowPatial
, StillUsed
);
509 auto TryToReplace
= [&](Instruction
*LoadOrStore
) {
510 // Do nothing for (preserve.static.offset (load/store ..)) or for
511 // GEPs with zero indices. Such constructs lead to zero offset and
512 // are simplified by other passes.
513 if (allZeroIndices(GEPs
))
515 if (tryToReplaceWithGEPBuiltin(LoadOrStore
, GEPs
, Insn
)) {
516 Visited
.push_back(Insn
);
520 reportNonStaticGEPChain(Insn
);
523 if (isa
<LoadInst
>(Insn
) || isa
<StoreInst
>(Insn
)) {
525 } else if (isGEPAndLoad(Insn
)) {
527 BPFPreserveStaticOffsetPass::reconstructLoad(cast
<CallInst
>(Insn
));
533 } else if (isGEPAndStore(Insn
)) {
534 // This case can't be merged with the above because
535 // `delete Load` / `delete Store` wants a concrete type,
536 // destructor of Instruction is protected.
538 BPFPreserveStaticOffsetPass::reconstructStore(cast
<CallInst
>(Insn
));
544 } else if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(Insn
)) {
546 MarkAndTraverseUses();
548 } else if (isPreserveStaticOffsetCall(Insn
)) {
549 MarkAndTraverseUses();
550 } else if (isInlineableCall(Insn
)) {
551 // Preserve preserve.static.offset call for parameters of
552 // functions that might be inlined. These would be removed on a
553 // second pass after inlining.
554 // Might happen when a pointer to a preserve_static_offset
555 // structure is passed as parameter of a function that would be
556 // inlined inside a loop that would be unrolled.
560 SmallString
<128> Buf
;
561 raw_svector_ostream
BufStream(Buf
);
564 Twine("Unexpected rewriteAccessChain Insn = ").concat(Buf
));
568 static void removeMarkerCall(Instruction
*Marker
) {
569 Marker
->replaceAllUsesWith(Marker
->getOperand(0));
570 Marker
->eraseFromParent();
573 static bool rewriteAccessChain(Instruction
*Marker
, bool AllowPatial
,
574 SmallPtrSetImpl
<Instruction
*> &RemovedMarkers
) {
575 SmallVector
<GetElementPtrInst
*> GEPs
;
576 SmallVector
<Instruction
*> Visited
;
577 bool StillUsed
= false;
578 rewriteUses(Marker
, GEPs
, Visited
, AllowPatial
, StillUsed
);
579 // Check if Visited instructions could be removed, iterate in
580 // reverse to unblock instructions higher in the chain.
581 for (auto V
= Visited
.rbegin(); V
!= Visited
.rend(); ++V
) {
582 if (isPreserveStaticOffsetCall(*V
)) {
583 removeMarkerCall(*V
);
584 RemovedMarkers
.insert(*V
);
585 } else if ((*V
)->use_empty()) {
586 (*V
)->eraseFromParent();
592 static std::vector
<Instruction
*>
593 collectPreserveStaticOffsetCalls(Function
&F
) {
594 std::vector
<Instruction
*> Calls
;
595 for (Instruction
&Insn
: instructions(F
))
596 if (isPreserveStaticOffsetCall(&Insn
))
597 Calls
.push_back(&Insn
);
601 bool isPreserveArrayIndex(Value
*V
) {
602 return isIntrinsicCall(V
, Intrinsic::preserve_array_access_index
);
605 bool isPreserveStructIndex(Value
*V
) {
606 return isIntrinsicCall(V
, Intrinsic::preserve_struct_access_index
);
609 bool isPreserveUnionIndex(Value
*V
) {
610 return isIntrinsicCall(V
, Intrinsic::preserve_union_access_index
);
613 static void removePAICalls(Instruction
*Marker
) {
614 auto IsPointerOperand
= [](Value
*Op
, User
*U
) {
615 if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(U
))
616 return GEP
->getPointerOperand() == Op
;
617 if (isPreserveStaticOffsetCall(U
) || isPreserveArrayIndex(U
) ||
618 isPreserveStructIndex(U
) || isPreserveUnionIndex(U
))
619 return cast
<CallInst
>(U
)->getArgOperand(0) == Op
;
623 SmallVector
<Value
*, 32> WorkList
;
624 WorkList
.push_back(Marker
);
626 Value
*V
= WorkList
.pop_back_val();
627 for (User
*U
: V
->users())
628 if (IsPointerOperand(V
, U
))
629 WorkList
.push_back(U
);
630 auto *Call
= dyn_cast
<CallInst
>(V
);
633 if (isPreserveArrayIndex(V
))
634 BPFCoreSharedInfo::removeArrayAccessCall(Call
);
635 else if (isPreserveStructIndex(V
))
636 BPFCoreSharedInfo::removeStructAccessCall(Call
);
637 else if (isPreserveUnionIndex(V
))
638 BPFCoreSharedInfo::removeUnionAccessCall(Call
);
639 } while (!WorkList
.empty());
642 // Look for sequences:
643 // - llvm.preserve.static.offset -> getelementptr... -> load
644 // - llvm.preserve.static.offset -> getelementptr... -> store
645 // And replace those with calls to intrinsics:
646 // - llvm.bpf.getelementptr.and.load
647 // - llvm.bpf.getelementptr.and.store
648 static bool rewriteFunction(Function
&F
, bool AllowPartial
) {
649 LLVM_DEBUG(dbgs() << "********** BPFPreserveStaticOffsetPass (AllowPartial="
650 << AllowPartial
<< ") ************\n");
652 auto MarkerCalls
= collectPreserveStaticOffsetCalls(F
);
653 SmallPtrSet
<Instruction
*, 16> RemovedMarkers
;
655 LLVM_DEBUG(dbgs() << "There are " << MarkerCalls
.size()
656 << " preserve.static.offset calls\n");
658 if (MarkerCalls
.empty())
661 for (auto *Call
: MarkerCalls
)
662 removePAICalls(Call
);
664 for (auto *Call
: MarkerCalls
) {
665 if (RemovedMarkers
.contains(Call
))
667 bool StillUsed
= rewriteAccessChain(Call
, AllowPartial
, RemovedMarkers
);
668 if (!StillUsed
|| !AllowPartial
)
669 removeMarkerCall(Call
);
676 llvm::BPFPreserveStaticOffsetPass::run(Function
&F
,
677 FunctionAnalysisManager
&AM
) {
678 return rewriteFunction(F
, AllowPartial
) ? PreservedAnalyses::none()
679 : PreservedAnalyses::all();