Revert " [LoongArch][ISel] Check the number of sign bits in `PatGprGpr_32` (#107432)"
[llvm-project.git] / llvm / lib / Target / BPF / BPFAbstractMemberAccess.cpp
blob4be6220b358ba3b879f0ce2136aa593762500c29
1 //===------ BPFAbstractMemberAccess.cpp - Abstracting Member Accesses -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass abstracted struct/union member accesses in order to support
10 // compile-once run-everywhere (CO-RE). The CO-RE intends to compile the program
11 // which can run on different kernels. In particular, if bpf program tries to
12 // access a particular kernel data structure member, the details of the
13 // intermediate member access will be remembered so bpf loader can do
14 // necessary adjustment right before program loading.
16 // For example,
18 // struct s {
19 // int a;
20 // int b;
21 // };
22 // struct t {
23 // struct s c;
24 // int d;
25 // };
26 // struct t e;
28 // For the member access e.c.b, the compiler will generate code
29 // &e + 4
31 // The compile-once run-everywhere instead generates the following code
32 // r = 4
33 // &e + r
34 // The "4" in "r = 4" can be changed based on a particular kernel version.
35 // For example, on a particular kernel version, if struct s is changed to
37 // struct s {
38 // int new_field;
39 // int a;
40 // int b;
41 // }
43 // By repeating the member access on the host, the bpf loader can
44 // adjust "r = 4" as "r = 8".
46 // This feature relies on the following three intrinsic calls:
47 // addr = preserve_array_access_index(base, dimension, index)
48 // addr = preserve_union_access_index(base, di_index)
49 // !llvm.preserve.access.index <union_ditype>
50 // addr = preserve_struct_access_index(base, gep_index, di_index)
51 // !llvm.preserve.access.index <struct_ditype>
53 // Bitfield member access needs special attention. User cannot take the
54 // address of a bitfield acceess. To facilitate kernel verifier
55 // for easy bitfield code optimization, a new clang intrinsic is introduced:
56 // uint32_t __builtin_preserve_field_info(member_access, info_kind)
57 // In IR, a chain with two (or more) intrinsic calls will be generated:
58 // ...
59 // addr = preserve_struct_access_index(base, 1, 1) !struct s
60 // uint32_t result = bpf_preserve_field_info(addr, info_kind)
62 // Suppose the info_kind is FIELD_SIGNEDNESS,
63 // The above two IR intrinsics will be replaced with
64 // a relocatable insn:
65 // signness = /* signness of member_access */
66 // and signness can be changed by bpf loader based on the
67 // types on the host.
69 // User can also test whether a field exists or not with
70 // uint32_t result = bpf_preserve_field_info(member_access, FIELD_EXISTENCE)
71 // The field will be always available (result = 1) during initial
72 // compilation, but bpf loader can patch with the correct value
73 // on the target host where the member_access may or may not be available
75 //===----------------------------------------------------------------------===//
77 #include "BPF.h"
78 #include "BPFCORE.h"
79 #include "BPFTargetMachine.h"
80 #include "llvm/BinaryFormat/Dwarf.h"
81 #include "llvm/DebugInfo/BTF/BTF.h"
82 #include "llvm/IR/DebugInfoMetadata.h"
83 #include "llvm/IR/GlobalVariable.h"
84 #include "llvm/IR/Instruction.h"
85 #include "llvm/IR/Instructions.h"
86 #include "llvm/IR/IntrinsicsBPF.h"
87 #include "llvm/IR/Module.h"
88 #include "llvm/IR/PassManager.h"
89 #include "llvm/IR/Type.h"
90 #include "llvm/IR/User.h"
91 #include "llvm/IR/Value.h"
92 #include "llvm/IR/ValueHandle.h"
93 #include "llvm/Pass.h"
94 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
95 #include <stack>
97 #define DEBUG_TYPE "bpf-abstract-member-access"
99 namespace llvm {
100 constexpr StringRef BPFCoreSharedInfo::AmaAttr;
101 uint32_t BPFCoreSharedInfo::SeqNum;
103 Instruction *BPFCoreSharedInfo::insertPassThrough(Module *M, BasicBlock *BB,
104 Instruction *Input,
105 Instruction *Before) {
106 Function *Fn = Intrinsic::getDeclaration(
107 M, Intrinsic::bpf_passthrough, {Input->getType(), Input->getType()});
108 Constant *SeqNumVal = ConstantInt::get(Type::getInt32Ty(BB->getContext()),
109 BPFCoreSharedInfo::SeqNum++);
111 auto *NewInst = CallInst::Create(Fn, {SeqNumVal, Input});
112 NewInst->insertBefore(Before);
113 return NewInst;
115 } // namespace llvm
117 using namespace llvm;
119 namespace {
120 class BPFAbstractMemberAccess final {
121 public:
122 BPFAbstractMemberAccess(BPFTargetMachine *TM) : TM(TM) {}
124 bool run(Function &F);
126 struct CallInfo {
127 uint32_t Kind;
128 uint32_t AccessIndex;
129 MaybeAlign RecordAlignment;
130 MDNode *Metadata;
131 WeakTrackingVH Base;
133 typedef std::stack<std::pair<CallInst *, CallInfo>> CallInfoStack;
135 private:
136 enum : uint32_t {
137 BPFPreserveArrayAI = 1,
138 BPFPreserveUnionAI = 2,
139 BPFPreserveStructAI = 3,
140 BPFPreserveFieldInfoAI = 4,
143 TargetMachine *TM;
144 const DataLayout *DL = nullptr;
145 Module *M = nullptr;
147 static std::map<std::string, GlobalVariable *> GEPGlobals;
148 // A map to link preserve_*_access_index intrinsic calls.
149 std::map<CallInst *, std::pair<CallInst *, CallInfo>> AIChain;
150 // A map to hold all the base preserve_*_access_index intrinsic calls.
151 // The base call is not an input of any other preserve_*
152 // intrinsics.
153 std::map<CallInst *, CallInfo> BaseAICalls;
154 // A map to hold <AnonRecord, TypeDef> relationships
155 std::map<DICompositeType *, DIDerivedType *> AnonRecords;
157 void CheckAnonRecordType(DIDerivedType *ParentTy, DIType *Ty);
158 void CheckCompositeType(DIDerivedType *ParentTy, DICompositeType *CTy);
159 void CheckDerivedType(DIDerivedType *ParentTy, DIDerivedType *DTy);
160 void ResetMetadata(struct CallInfo &CInfo);
162 bool doTransformation(Function &F);
164 void traceAICall(CallInst *Call, CallInfo &ParentInfo);
165 void traceBitCast(BitCastInst *BitCast, CallInst *Parent,
166 CallInfo &ParentInfo);
167 void traceGEP(GetElementPtrInst *GEP, CallInst *Parent,
168 CallInfo &ParentInfo);
169 void collectAICallChains(Function &F);
171 bool IsPreserveDIAccessIndexCall(const CallInst *Call, CallInfo &Cinfo);
172 bool IsValidAIChain(const MDNode *ParentMeta, uint32_t ParentAI,
173 const MDNode *ChildMeta);
174 bool removePreserveAccessIndexIntrinsic(Function &F);
175 bool HasPreserveFieldInfoCall(CallInfoStack &CallStack);
176 void GetStorageBitRange(DIDerivedType *MemberTy, Align RecordAlignment,
177 uint32_t &StartBitOffset, uint32_t &EndBitOffset);
178 uint32_t GetFieldInfo(uint32_t InfoKind, DICompositeType *CTy,
179 uint32_t AccessIndex, uint32_t PatchImm,
180 MaybeAlign RecordAlignment);
182 Value *computeBaseAndAccessKey(CallInst *Call, CallInfo &CInfo,
183 std::string &AccessKey, MDNode *&BaseMeta);
184 MDNode *computeAccessKey(CallInst *Call, CallInfo &CInfo,
185 std::string &AccessKey, bool &IsInt32Ret);
186 bool transformGEPChain(CallInst *Call, CallInfo &CInfo);
189 std::map<std::string, GlobalVariable *> BPFAbstractMemberAccess::GEPGlobals;
190 } // End anonymous namespace
192 bool BPFAbstractMemberAccess::run(Function &F) {
193 LLVM_DEBUG(dbgs() << "********** Abstract Member Accesses **********\n");
195 M = F.getParent();
196 if (!M)
197 return false;
199 // Bail out if no debug info.
200 if (M->debug_compile_units().empty())
201 return false;
203 // For each argument/return/local_variable type, trace the type
204 // pattern like '[derived_type]* [composite_type]' to check
205 // and remember (anon record -> typedef) relations where the
206 // anon record is defined as
207 // typedef [const/volatile/restrict]* [anon record]
208 DISubprogram *SP = F.getSubprogram();
209 if (SP && SP->isDefinition()) {
210 for (DIType *Ty: SP->getType()->getTypeArray())
211 CheckAnonRecordType(nullptr, Ty);
212 for (const DINode *DN : SP->getRetainedNodes()) {
213 if (const auto *DV = dyn_cast<DILocalVariable>(DN))
214 CheckAnonRecordType(nullptr, DV->getType());
218 DL = &M->getDataLayout();
219 return doTransformation(F);
222 void BPFAbstractMemberAccess::ResetMetadata(struct CallInfo &CInfo) {
223 if (auto Ty = dyn_cast<DICompositeType>(CInfo.Metadata)) {
224 if (AnonRecords.find(Ty) != AnonRecords.end()) {
225 if (AnonRecords[Ty] != nullptr)
226 CInfo.Metadata = AnonRecords[Ty];
231 void BPFAbstractMemberAccess::CheckCompositeType(DIDerivedType *ParentTy,
232 DICompositeType *CTy) {
233 if (!CTy->getName().empty() || !ParentTy ||
234 ParentTy->getTag() != dwarf::DW_TAG_typedef)
235 return;
237 if (AnonRecords.find(CTy) == AnonRecords.end()) {
238 AnonRecords[CTy] = ParentTy;
239 return;
242 // Two or more typedef's may point to the same anon record.
243 // If this is the case, set the typedef DIType to be nullptr
244 // to indicate the duplication case.
245 DIDerivedType *CurrTy = AnonRecords[CTy];
246 if (CurrTy == ParentTy)
247 return;
248 AnonRecords[CTy] = nullptr;
251 void BPFAbstractMemberAccess::CheckDerivedType(DIDerivedType *ParentTy,
252 DIDerivedType *DTy) {
253 DIType *BaseType = DTy->getBaseType();
254 if (!BaseType)
255 return;
257 unsigned Tag = DTy->getTag();
258 if (Tag == dwarf::DW_TAG_pointer_type)
259 CheckAnonRecordType(nullptr, BaseType);
260 else if (Tag == dwarf::DW_TAG_typedef)
261 CheckAnonRecordType(DTy, BaseType);
262 else
263 CheckAnonRecordType(ParentTy, BaseType);
266 void BPFAbstractMemberAccess::CheckAnonRecordType(DIDerivedType *ParentTy,
267 DIType *Ty) {
268 if (!Ty)
269 return;
271 if (auto *CTy = dyn_cast<DICompositeType>(Ty))
272 return CheckCompositeType(ParentTy, CTy);
273 else if (auto *DTy = dyn_cast<DIDerivedType>(Ty))
274 return CheckDerivedType(ParentTy, DTy);
277 static bool SkipDIDerivedTag(unsigned Tag, bool skipTypedef) {
278 if (Tag != dwarf::DW_TAG_typedef && Tag != dwarf::DW_TAG_const_type &&
279 Tag != dwarf::DW_TAG_volatile_type &&
280 Tag != dwarf::DW_TAG_restrict_type &&
281 Tag != dwarf::DW_TAG_member)
282 return false;
283 if (Tag == dwarf::DW_TAG_typedef && !skipTypedef)
284 return false;
285 return true;
288 static DIType * stripQualifiers(DIType *Ty, bool skipTypedef = true) {
289 while (auto *DTy = dyn_cast<DIDerivedType>(Ty)) {
290 if (!SkipDIDerivedTag(DTy->getTag(), skipTypedef))
291 break;
292 Ty = DTy->getBaseType();
294 return Ty;
297 static const DIType * stripQualifiers(const DIType *Ty) {
298 while (auto *DTy = dyn_cast<DIDerivedType>(Ty)) {
299 if (!SkipDIDerivedTag(DTy->getTag(), true))
300 break;
301 Ty = DTy->getBaseType();
303 return Ty;
306 static uint32_t calcArraySize(const DICompositeType *CTy, uint32_t StartDim) {
307 DINodeArray Elements = CTy->getElements();
308 uint32_t DimSize = 1;
309 for (uint32_t I = StartDim; I < Elements.size(); ++I) {
310 if (auto *Element = dyn_cast_or_null<DINode>(Elements[I]))
311 if (Element->getTag() == dwarf::DW_TAG_subrange_type) {
312 const DISubrange *SR = cast<DISubrange>(Element);
313 auto *CI = SR->getCount().dyn_cast<ConstantInt *>();
314 DimSize *= CI->getSExtValue();
318 return DimSize;
321 static Type *getBaseElementType(const CallInst *Call) {
322 // Element type is stored in an elementtype() attribute on the first param.
323 return Call->getParamElementType(0);
326 static uint64_t getConstant(const Value *IndexValue) {
327 const ConstantInt *CV = dyn_cast<ConstantInt>(IndexValue);
328 assert(CV);
329 return CV->getValue().getZExtValue();
332 /// Check whether a call is a preserve_*_access_index intrinsic call or not.
333 bool BPFAbstractMemberAccess::IsPreserveDIAccessIndexCall(const CallInst *Call,
334 CallInfo &CInfo) {
335 if (!Call)
336 return false;
338 const auto *GV = dyn_cast<GlobalValue>(Call->getCalledOperand());
339 if (!GV)
340 return false;
341 if (GV->getName().starts_with("llvm.preserve.array.access.index")) {
342 CInfo.Kind = BPFPreserveArrayAI;
343 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index);
344 if (!CInfo.Metadata)
345 report_fatal_error("Missing metadata for llvm.preserve.array.access.index intrinsic");
346 CInfo.AccessIndex = getConstant(Call->getArgOperand(2));
347 CInfo.Base = Call->getArgOperand(0);
348 CInfo.RecordAlignment = DL->getABITypeAlign(getBaseElementType(Call));
349 return true;
351 if (GV->getName().starts_with("llvm.preserve.union.access.index")) {
352 CInfo.Kind = BPFPreserveUnionAI;
353 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index);
354 if (!CInfo.Metadata)
355 report_fatal_error("Missing metadata for llvm.preserve.union.access.index intrinsic");
356 ResetMetadata(CInfo);
357 CInfo.AccessIndex = getConstant(Call->getArgOperand(1));
358 CInfo.Base = Call->getArgOperand(0);
359 return true;
361 if (GV->getName().starts_with("llvm.preserve.struct.access.index")) {
362 CInfo.Kind = BPFPreserveStructAI;
363 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index);
364 if (!CInfo.Metadata)
365 report_fatal_error("Missing metadata for llvm.preserve.struct.access.index intrinsic");
366 ResetMetadata(CInfo);
367 CInfo.AccessIndex = getConstant(Call->getArgOperand(2));
368 CInfo.Base = Call->getArgOperand(0);
369 CInfo.RecordAlignment = DL->getABITypeAlign(getBaseElementType(Call));
370 return true;
372 if (GV->getName().starts_with("llvm.bpf.preserve.field.info")) {
373 CInfo.Kind = BPFPreserveFieldInfoAI;
374 CInfo.Metadata = nullptr;
375 // Check validity of info_kind as clang did not check this.
376 uint64_t InfoKind = getConstant(Call->getArgOperand(1));
377 if (InfoKind >= BTF::MAX_FIELD_RELOC_KIND)
378 report_fatal_error("Incorrect info_kind for llvm.bpf.preserve.field.info intrinsic");
379 CInfo.AccessIndex = InfoKind;
380 return true;
382 if (GV->getName().starts_with("llvm.bpf.preserve.type.info")) {
383 CInfo.Kind = BPFPreserveFieldInfoAI;
384 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index);
385 if (!CInfo.Metadata)
386 report_fatal_error("Missing metadata for llvm.preserve.type.info intrinsic");
387 uint64_t Flag = getConstant(Call->getArgOperand(1));
388 if (Flag >= BPFCoreSharedInfo::MAX_PRESERVE_TYPE_INFO_FLAG)
389 report_fatal_error("Incorrect flag for llvm.bpf.preserve.type.info intrinsic");
390 if (Flag == BPFCoreSharedInfo::PRESERVE_TYPE_INFO_EXISTENCE)
391 CInfo.AccessIndex = BTF::TYPE_EXISTENCE;
392 else if (Flag == BPFCoreSharedInfo::PRESERVE_TYPE_INFO_MATCH)
393 CInfo.AccessIndex = BTF::TYPE_MATCH;
394 else
395 CInfo.AccessIndex = BTF::TYPE_SIZE;
396 return true;
398 if (GV->getName().starts_with("llvm.bpf.preserve.enum.value")) {
399 CInfo.Kind = BPFPreserveFieldInfoAI;
400 CInfo.Metadata = Call->getMetadata(LLVMContext::MD_preserve_access_index);
401 if (!CInfo.Metadata)
402 report_fatal_error("Missing metadata for llvm.preserve.enum.value intrinsic");
403 uint64_t Flag = getConstant(Call->getArgOperand(2));
404 if (Flag >= BPFCoreSharedInfo::MAX_PRESERVE_ENUM_VALUE_FLAG)
405 report_fatal_error("Incorrect flag for llvm.bpf.preserve.enum.value intrinsic");
406 if (Flag == BPFCoreSharedInfo::PRESERVE_ENUM_VALUE_EXISTENCE)
407 CInfo.AccessIndex = BTF::ENUM_VALUE_EXISTENCE;
408 else
409 CInfo.AccessIndex = BTF::ENUM_VALUE;
410 return true;
413 return false;
416 static void replaceWithGEP(CallInst *Call, uint32_t DimensionIndex,
417 uint32_t GEPIndex) {
418 uint32_t Dimension = 1;
419 if (DimensionIndex > 0)
420 Dimension = getConstant(Call->getArgOperand(DimensionIndex));
422 Constant *Zero =
423 ConstantInt::get(Type::getInt32Ty(Call->getParent()->getContext()), 0);
424 SmallVector<Value *, 4> IdxList;
425 for (unsigned I = 0; I < Dimension; ++I)
426 IdxList.push_back(Zero);
427 IdxList.push_back(Call->getArgOperand(GEPIndex));
429 auto *GEP = GetElementPtrInst::CreateInBounds(getBaseElementType(Call),
430 Call->getArgOperand(0), IdxList,
431 "", Call->getIterator());
432 Call->replaceAllUsesWith(GEP);
433 Call->eraseFromParent();
436 void BPFCoreSharedInfo::removeArrayAccessCall(CallInst *Call) {
437 replaceWithGEP(Call, 1, 2);
440 void BPFCoreSharedInfo::removeStructAccessCall(CallInst *Call) {
441 replaceWithGEP(Call, 0, 1);
444 void BPFCoreSharedInfo::removeUnionAccessCall(CallInst *Call) {
445 Call->replaceAllUsesWith(Call->getArgOperand(0));
446 Call->eraseFromParent();
449 bool BPFAbstractMemberAccess::removePreserveAccessIndexIntrinsic(Function &F) {
450 std::vector<CallInst *> PreserveArrayIndexCalls;
451 std::vector<CallInst *> PreserveUnionIndexCalls;
452 std::vector<CallInst *> PreserveStructIndexCalls;
453 bool Found = false;
455 for (auto &BB : F)
456 for (auto &I : BB) {
457 auto *Call = dyn_cast<CallInst>(&I);
458 CallInfo CInfo;
459 if (!IsPreserveDIAccessIndexCall(Call, CInfo))
460 continue;
462 Found = true;
463 if (CInfo.Kind == BPFPreserveArrayAI)
464 PreserveArrayIndexCalls.push_back(Call);
465 else if (CInfo.Kind == BPFPreserveUnionAI)
466 PreserveUnionIndexCalls.push_back(Call);
467 else
468 PreserveStructIndexCalls.push_back(Call);
471 // do the following transformation:
472 // . addr = preserve_array_access_index(base, dimension, index)
473 // is transformed to
474 // addr = GEP(base, dimenion's zero's, index)
475 // . addr = preserve_union_access_index(base, di_index)
476 // is transformed to
477 // addr = base, i.e., all usages of "addr" are replaced by "base".
478 // . addr = preserve_struct_access_index(base, gep_index, di_index)
479 // is transformed to
480 // addr = GEP(base, 0, gep_index)
481 for (CallInst *Call : PreserveArrayIndexCalls)
482 BPFCoreSharedInfo::removeArrayAccessCall(Call);
483 for (CallInst *Call : PreserveStructIndexCalls)
484 BPFCoreSharedInfo::removeStructAccessCall(Call);
485 for (CallInst *Call : PreserveUnionIndexCalls)
486 BPFCoreSharedInfo::removeUnionAccessCall(Call);
488 return Found;
491 /// Check whether the access index chain is valid. We check
492 /// here because there may be type casts between two
493 /// access indexes. We want to ensure memory access still valid.
494 bool BPFAbstractMemberAccess::IsValidAIChain(const MDNode *ParentType,
495 uint32_t ParentAI,
496 const MDNode *ChildType) {
497 if (!ChildType)
498 return true; // preserve_field_info, no type comparison needed.
500 const DIType *PType = stripQualifiers(cast<DIType>(ParentType));
501 const DIType *CType = stripQualifiers(cast<DIType>(ChildType));
503 // Child is a derived/pointer type, which is due to type casting.
504 // Pointer type cannot be in the middle of chain.
505 if (isa<DIDerivedType>(CType))
506 return false;
508 // Parent is a pointer type.
509 if (const auto *PtrTy = dyn_cast<DIDerivedType>(PType)) {
510 if (PtrTy->getTag() != dwarf::DW_TAG_pointer_type)
511 return false;
512 return stripQualifiers(PtrTy->getBaseType()) == CType;
515 // Otherwise, struct/union/array types
516 const auto *PTy = dyn_cast<DICompositeType>(PType);
517 const auto *CTy = dyn_cast<DICompositeType>(CType);
518 assert(PTy && CTy && "ParentType or ChildType is null or not composite");
520 uint32_t PTyTag = PTy->getTag();
521 assert(PTyTag == dwarf::DW_TAG_array_type ||
522 PTyTag == dwarf::DW_TAG_structure_type ||
523 PTyTag == dwarf::DW_TAG_union_type);
525 uint32_t CTyTag = CTy->getTag();
526 assert(CTyTag == dwarf::DW_TAG_array_type ||
527 CTyTag == dwarf::DW_TAG_structure_type ||
528 CTyTag == dwarf::DW_TAG_union_type);
530 // Multi dimensional arrays, base element should be the same
531 if (PTyTag == dwarf::DW_TAG_array_type && PTyTag == CTyTag)
532 return PTy->getBaseType() == CTy->getBaseType();
534 DIType *Ty;
535 if (PTyTag == dwarf::DW_TAG_array_type)
536 Ty = PTy->getBaseType();
537 else
538 Ty = dyn_cast<DIType>(PTy->getElements()[ParentAI]);
540 return dyn_cast<DICompositeType>(stripQualifiers(Ty)) == CTy;
543 void BPFAbstractMemberAccess::traceAICall(CallInst *Call,
544 CallInfo &ParentInfo) {
545 for (User *U : Call->users()) {
546 Instruction *Inst = dyn_cast<Instruction>(U);
547 if (!Inst)
548 continue;
550 if (auto *BI = dyn_cast<BitCastInst>(Inst)) {
551 traceBitCast(BI, Call, ParentInfo);
552 } else if (auto *CI = dyn_cast<CallInst>(Inst)) {
553 CallInfo ChildInfo;
555 if (IsPreserveDIAccessIndexCall(CI, ChildInfo) &&
556 IsValidAIChain(ParentInfo.Metadata, ParentInfo.AccessIndex,
557 ChildInfo.Metadata)) {
558 AIChain[CI] = std::make_pair(Call, ParentInfo);
559 traceAICall(CI, ChildInfo);
560 } else {
561 BaseAICalls[Call] = ParentInfo;
563 } else if (auto *GI = dyn_cast<GetElementPtrInst>(Inst)) {
564 if (GI->hasAllZeroIndices())
565 traceGEP(GI, Call, ParentInfo);
566 else
567 BaseAICalls[Call] = ParentInfo;
568 } else {
569 BaseAICalls[Call] = ParentInfo;
574 void BPFAbstractMemberAccess::traceBitCast(BitCastInst *BitCast,
575 CallInst *Parent,
576 CallInfo &ParentInfo) {
577 for (User *U : BitCast->users()) {
578 Instruction *Inst = dyn_cast<Instruction>(U);
579 if (!Inst)
580 continue;
582 if (auto *BI = dyn_cast<BitCastInst>(Inst)) {
583 traceBitCast(BI, Parent, ParentInfo);
584 } else if (auto *CI = dyn_cast<CallInst>(Inst)) {
585 CallInfo ChildInfo;
586 if (IsPreserveDIAccessIndexCall(CI, ChildInfo) &&
587 IsValidAIChain(ParentInfo.Metadata, ParentInfo.AccessIndex,
588 ChildInfo.Metadata)) {
589 AIChain[CI] = std::make_pair(Parent, ParentInfo);
590 traceAICall(CI, ChildInfo);
591 } else {
592 BaseAICalls[Parent] = ParentInfo;
594 } else if (auto *GI = dyn_cast<GetElementPtrInst>(Inst)) {
595 if (GI->hasAllZeroIndices())
596 traceGEP(GI, Parent, ParentInfo);
597 else
598 BaseAICalls[Parent] = ParentInfo;
599 } else {
600 BaseAICalls[Parent] = ParentInfo;
605 void BPFAbstractMemberAccess::traceGEP(GetElementPtrInst *GEP, CallInst *Parent,
606 CallInfo &ParentInfo) {
607 for (User *U : GEP->users()) {
608 Instruction *Inst = dyn_cast<Instruction>(U);
609 if (!Inst)
610 continue;
612 if (auto *BI = dyn_cast<BitCastInst>(Inst)) {
613 traceBitCast(BI, Parent, ParentInfo);
614 } else if (auto *CI = dyn_cast<CallInst>(Inst)) {
615 CallInfo ChildInfo;
616 if (IsPreserveDIAccessIndexCall(CI, ChildInfo) &&
617 IsValidAIChain(ParentInfo.Metadata, ParentInfo.AccessIndex,
618 ChildInfo.Metadata)) {
619 AIChain[CI] = std::make_pair(Parent, ParentInfo);
620 traceAICall(CI, ChildInfo);
621 } else {
622 BaseAICalls[Parent] = ParentInfo;
624 } else if (auto *GI = dyn_cast<GetElementPtrInst>(Inst)) {
625 if (GI->hasAllZeroIndices())
626 traceGEP(GI, Parent, ParentInfo);
627 else
628 BaseAICalls[Parent] = ParentInfo;
629 } else {
630 BaseAICalls[Parent] = ParentInfo;
635 void BPFAbstractMemberAccess::collectAICallChains(Function &F) {
636 AIChain.clear();
637 BaseAICalls.clear();
639 for (auto &BB : F)
640 for (auto &I : BB) {
641 CallInfo CInfo;
642 auto *Call = dyn_cast<CallInst>(&I);
643 if (!IsPreserveDIAccessIndexCall(Call, CInfo) ||
644 AIChain.find(Call) != AIChain.end())
645 continue;
647 traceAICall(Call, CInfo);
651 /// Get the start and the end of storage offset for \p MemberTy.
652 void BPFAbstractMemberAccess::GetStorageBitRange(DIDerivedType *MemberTy,
653 Align RecordAlignment,
654 uint32_t &StartBitOffset,
655 uint32_t &EndBitOffset) {
656 uint32_t MemberBitSize = MemberTy->getSizeInBits();
657 uint32_t MemberBitOffset = MemberTy->getOffsetInBits();
659 if (RecordAlignment > 8) {
660 // If the Bits are within an aligned 8-byte, set the RecordAlignment
661 // to 8, other report the fatal error.
662 if (MemberBitOffset / 64 != (MemberBitOffset + MemberBitSize) / 64)
663 report_fatal_error("Unsupported field expression for llvm.bpf.preserve.field.info, "
664 "requiring too big alignment");
665 RecordAlignment = Align(8);
668 uint32_t AlignBits = RecordAlignment.value() * 8;
669 if (MemberBitSize > AlignBits)
670 report_fatal_error("Unsupported field expression for llvm.bpf.preserve.field.info, "
671 "bitfield size greater than record alignment");
673 StartBitOffset = MemberBitOffset & ~(AlignBits - 1);
674 if ((StartBitOffset + AlignBits) < (MemberBitOffset + MemberBitSize))
675 report_fatal_error("Unsupported field expression for llvm.bpf.preserve.field.info, "
676 "cross alignment boundary");
677 EndBitOffset = StartBitOffset + AlignBits;
680 uint32_t BPFAbstractMemberAccess::GetFieldInfo(uint32_t InfoKind,
681 DICompositeType *CTy,
682 uint32_t AccessIndex,
683 uint32_t PatchImm,
684 MaybeAlign RecordAlignment) {
685 if (InfoKind == BTF::FIELD_EXISTENCE)
686 return 1;
688 uint32_t Tag = CTy->getTag();
689 if (InfoKind == BTF::FIELD_BYTE_OFFSET) {
690 if (Tag == dwarf::DW_TAG_array_type) {
691 auto *EltTy = stripQualifiers(CTy->getBaseType());
692 PatchImm += AccessIndex * calcArraySize(CTy, 1) *
693 (EltTy->getSizeInBits() >> 3);
694 } else if (Tag == dwarf::DW_TAG_structure_type) {
695 auto *MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]);
696 if (!MemberTy->isBitField()) {
697 PatchImm += MemberTy->getOffsetInBits() >> 3;
698 } else {
699 unsigned SBitOffset, NextSBitOffset;
700 GetStorageBitRange(MemberTy, *RecordAlignment, SBitOffset,
701 NextSBitOffset);
702 PatchImm += SBitOffset >> 3;
705 return PatchImm;
708 if (InfoKind == BTF::FIELD_BYTE_SIZE) {
709 if (Tag == dwarf::DW_TAG_array_type) {
710 auto *EltTy = stripQualifiers(CTy->getBaseType());
711 return calcArraySize(CTy, 1) * (EltTy->getSizeInBits() >> 3);
712 } else {
713 auto *MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]);
714 uint32_t SizeInBits = MemberTy->getSizeInBits();
715 if (!MemberTy->isBitField())
716 return SizeInBits >> 3;
718 unsigned SBitOffset, NextSBitOffset;
719 GetStorageBitRange(MemberTy, *RecordAlignment, SBitOffset,
720 NextSBitOffset);
721 SizeInBits = NextSBitOffset - SBitOffset;
722 if (SizeInBits & (SizeInBits - 1))
723 report_fatal_error("Unsupported field expression for llvm.bpf.preserve.field.info");
724 return SizeInBits >> 3;
728 if (InfoKind == BTF::FIELD_SIGNEDNESS) {
729 const DIType *BaseTy;
730 if (Tag == dwarf::DW_TAG_array_type) {
731 // Signedness only checked when final array elements are accessed.
732 if (CTy->getElements().size() != 1)
733 report_fatal_error("Invalid array expression for llvm.bpf.preserve.field.info");
734 BaseTy = stripQualifiers(CTy->getBaseType());
735 } else {
736 auto *MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]);
737 BaseTy = stripQualifiers(MemberTy->getBaseType());
740 // Only basic types and enum types have signedness.
741 const auto *BTy = dyn_cast<DIBasicType>(BaseTy);
742 while (!BTy) {
743 const auto *CompTy = dyn_cast<DICompositeType>(BaseTy);
744 // Report an error if the field expression does not have signedness.
745 if (!CompTy || CompTy->getTag() != dwarf::DW_TAG_enumeration_type)
746 report_fatal_error("Invalid field expression for llvm.bpf.preserve.field.info");
747 BaseTy = stripQualifiers(CompTy->getBaseType());
748 BTy = dyn_cast<DIBasicType>(BaseTy);
750 uint32_t Encoding = BTy->getEncoding();
751 return (Encoding == dwarf::DW_ATE_signed || Encoding == dwarf::DW_ATE_signed_char);
754 if (InfoKind == BTF::FIELD_LSHIFT_U64) {
755 // The value is loaded into a value with FIELD_BYTE_SIZE size,
756 // and then zero or sign extended to U64.
757 // FIELD_LSHIFT_U64 and FIELD_RSHIFT_U64 are operations
758 // to extract the original value.
759 const Triple &Triple = TM->getTargetTriple();
760 DIDerivedType *MemberTy = nullptr;
761 bool IsBitField = false;
762 uint32_t SizeInBits;
764 if (Tag == dwarf::DW_TAG_array_type) {
765 auto *EltTy = stripQualifiers(CTy->getBaseType());
766 SizeInBits = calcArraySize(CTy, 1) * EltTy->getSizeInBits();
767 } else {
768 MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]);
769 SizeInBits = MemberTy->getSizeInBits();
770 IsBitField = MemberTy->isBitField();
773 if (!IsBitField) {
774 if (SizeInBits > 64)
775 report_fatal_error("too big field size for llvm.bpf.preserve.field.info");
776 return 64 - SizeInBits;
779 unsigned SBitOffset, NextSBitOffset;
780 GetStorageBitRange(MemberTy, *RecordAlignment, SBitOffset, NextSBitOffset);
781 if (NextSBitOffset - SBitOffset > 64)
782 report_fatal_error("too big field size for llvm.bpf.preserve.field.info");
784 unsigned OffsetInBits = MemberTy->getOffsetInBits();
785 if (Triple.getArch() == Triple::bpfel)
786 return SBitOffset + 64 - OffsetInBits - SizeInBits;
787 else
788 return OffsetInBits + 64 - NextSBitOffset;
791 if (InfoKind == BTF::FIELD_RSHIFT_U64) {
792 DIDerivedType *MemberTy = nullptr;
793 bool IsBitField = false;
794 uint32_t SizeInBits;
795 if (Tag == dwarf::DW_TAG_array_type) {
796 auto *EltTy = stripQualifiers(CTy->getBaseType());
797 SizeInBits = calcArraySize(CTy, 1) * EltTy->getSizeInBits();
798 } else {
799 MemberTy = cast<DIDerivedType>(CTy->getElements()[AccessIndex]);
800 SizeInBits = MemberTy->getSizeInBits();
801 IsBitField = MemberTy->isBitField();
804 if (!IsBitField) {
805 if (SizeInBits > 64)
806 report_fatal_error("too big field size for llvm.bpf.preserve.field.info");
807 return 64 - SizeInBits;
810 unsigned SBitOffset, NextSBitOffset;
811 GetStorageBitRange(MemberTy, *RecordAlignment, SBitOffset, NextSBitOffset);
812 if (NextSBitOffset - SBitOffset > 64)
813 report_fatal_error("too big field size for llvm.bpf.preserve.field.info");
815 return 64 - SizeInBits;
818 llvm_unreachable("Unknown llvm.bpf.preserve.field.info info kind");
821 bool BPFAbstractMemberAccess::HasPreserveFieldInfoCall(CallInfoStack &CallStack) {
822 // This is called in error return path, no need to maintain CallStack.
823 while (CallStack.size()) {
824 auto StackElem = CallStack.top();
825 if (StackElem.second.Kind == BPFPreserveFieldInfoAI)
826 return true;
827 CallStack.pop();
829 return false;
832 /// Compute the base of the whole preserve_* intrinsics chains, i.e., the base
833 /// pointer of the first preserve_*_access_index call, and construct the access
834 /// string, which will be the name of a global variable.
835 Value *BPFAbstractMemberAccess::computeBaseAndAccessKey(CallInst *Call,
836 CallInfo &CInfo,
837 std::string &AccessKey,
838 MDNode *&TypeMeta) {
839 Value *Base = nullptr;
840 std::string TypeName;
841 CallInfoStack CallStack;
843 // Put the access chain into a stack with the top as the head of the chain.
844 while (Call) {
845 CallStack.push(std::make_pair(Call, CInfo));
846 CInfo = AIChain[Call].second;
847 Call = AIChain[Call].first;
850 // The access offset from the base of the head of chain is also
851 // calculated here as all debuginfo types are available.
853 // Get type name and calculate the first index.
854 // We only want to get type name from typedef, structure or union.
855 // If user wants a relocation like
856 // int *p; ... __builtin_preserve_access_index(&p[4]) ...
857 // or
858 // int a[10][20]; ... __builtin_preserve_access_index(&a[2][3]) ...
859 // we will skip them.
860 uint32_t FirstIndex = 0;
861 uint32_t PatchImm = 0; // AccessOffset or the requested field info
862 uint32_t InfoKind = BTF::FIELD_BYTE_OFFSET;
863 while (CallStack.size()) {
864 auto StackElem = CallStack.top();
865 Call = StackElem.first;
866 CInfo = StackElem.second;
868 if (!Base)
869 Base = CInfo.Base;
871 DIType *PossibleTypeDef = stripQualifiers(cast<DIType>(CInfo.Metadata),
872 false);
873 DIType *Ty = stripQualifiers(PossibleTypeDef);
874 if (CInfo.Kind == BPFPreserveUnionAI ||
875 CInfo.Kind == BPFPreserveStructAI) {
876 // struct or union type. If the typedef is in the metadata, always
877 // use the typedef.
878 TypeName = std::string(PossibleTypeDef->getName());
879 TypeMeta = PossibleTypeDef;
880 PatchImm += FirstIndex * (Ty->getSizeInBits() >> 3);
881 break;
884 assert(CInfo.Kind == BPFPreserveArrayAI);
886 // Array entries will always be consumed for accumulative initial index.
887 CallStack.pop();
889 // BPFPreserveArrayAI
890 uint64_t AccessIndex = CInfo.AccessIndex;
892 DIType *BaseTy = nullptr;
893 bool CheckElemType = false;
894 if (const auto *CTy = dyn_cast<DICompositeType>(Ty)) {
895 // array type
896 assert(CTy->getTag() == dwarf::DW_TAG_array_type);
899 FirstIndex += AccessIndex * calcArraySize(CTy, 1);
900 BaseTy = stripQualifiers(CTy->getBaseType());
901 CheckElemType = CTy->getElements().size() == 1;
902 } else {
903 // pointer type
904 auto *DTy = cast<DIDerivedType>(Ty);
905 assert(DTy->getTag() == dwarf::DW_TAG_pointer_type);
907 BaseTy = stripQualifiers(DTy->getBaseType());
908 CTy = dyn_cast<DICompositeType>(BaseTy);
909 if (!CTy) {
910 CheckElemType = true;
911 } else if (CTy->getTag() != dwarf::DW_TAG_array_type) {
912 FirstIndex += AccessIndex;
913 CheckElemType = true;
914 } else {
915 FirstIndex += AccessIndex * calcArraySize(CTy, 0);
919 if (CheckElemType) {
920 auto *CTy = dyn_cast<DICompositeType>(BaseTy);
921 if (!CTy) {
922 if (HasPreserveFieldInfoCall(CallStack))
923 report_fatal_error("Invalid field access for llvm.preserve.field.info intrinsic");
924 return nullptr;
927 unsigned CTag = CTy->getTag();
928 if (CTag == dwarf::DW_TAG_structure_type || CTag == dwarf::DW_TAG_union_type) {
929 TypeName = std::string(CTy->getName());
930 } else {
931 if (HasPreserveFieldInfoCall(CallStack))
932 report_fatal_error("Invalid field access for llvm.preserve.field.info intrinsic");
933 return nullptr;
935 TypeMeta = CTy;
936 PatchImm += FirstIndex * (CTy->getSizeInBits() >> 3);
937 break;
940 assert(TypeName.size());
941 AccessKey += std::to_string(FirstIndex);
943 // Traverse the rest of access chain to complete offset calculation
944 // and access key construction.
945 while (CallStack.size()) {
946 auto StackElem = CallStack.top();
947 CInfo = StackElem.second;
948 CallStack.pop();
950 if (CInfo.Kind == BPFPreserveFieldInfoAI) {
951 InfoKind = CInfo.AccessIndex;
952 if (InfoKind == BTF::FIELD_EXISTENCE)
953 PatchImm = 1;
954 break;
957 // If the next Call (the top of the stack) is a BPFPreserveFieldInfoAI,
958 // the action will be extracting field info.
959 if (CallStack.size()) {
960 auto StackElem2 = CallStack.top();
961 CallInfo CInfo2 = StackElem2.second;
962 if (CInfo2.Kind == BPFPreserveFieldInfoAI) {
963 InfoKind = CInfo2.AccessIndex;
964 assert(CallStack.size() == 1);
968 // Access Index
969 uint64_t AccessIndex = CInfo.AccessIndex;
970 AccessKey += ":" + std::to_string(AccessIndex);
972 MDNode *MDN = CInfo.Metadata;
973 // At this stage, it cannot be pointer type.
974 auto *CTy = cast<DICompositeType>(stripQualifiers(cast<DIType>(MDN)));
975 PatchImm = GetFieldInfo(InfoKind, CTy, AccessIndex, PatchImm,
976 CInfo.RecordAlignment);
979 // Access key is the
980 // "llvm." + type name + ":" + reloc type + ":" + patched imm + "$" +
981 // access string,
982 // uniquely identifying one relocation.
983 // The prefix "llvm." indicates this is a temporary global, which should
984 // not be emitted to ELF file.
985 AccessKey = "llvm." + TypeName + ":" + std::to_string(InfoKind) + ":" +
986 std::to_string(PatchImm) + "$" + AccessKey;
988 return Base;
991 MDNode *BPFAbstractMemberAccess::computeAccessKey(CallInst *Call,
992 CallInfo &CInfo,
993 std::string &AccessKey,
994 bool &IsInt32Ret) {
995 DIType *Ty = stripQualifiers(cast<DIType>(CInfo.Metadata), false);
996 assert(!Ty->getName().empty());
998 int64_t PatchImm;
999 std::string AccessStr("0");
1000 if (CInfo.AccessIndex == BTF::TYPE_EXISTENCE ||
1001 CInfo.AccessIndex == BTF::TYPE_MATCH) {
1002 PatchImm = 1;
1003 } else if (CInfo.AccessIndex == BTF::TYPE_SIZE) {
1004 // typedef debuginfo type has size 0, get the eventual base type.
1005 DIType *BaseTy = stripQualifiers(Ty, true);
1006 PatchImm = BaseTy->getSizeInBits() / 8;
1007 } else {
1008 // ENUM_VALUE_EXISTENCE and ENUM_VALUE
1009 IsInt32Ret = false;
1011 // The argument could be a global variable or a getelementptr with base to
1012 // a global variable depending on whether the clang option `opaque-options`
1013 // is set or not.
1014 const GlobalVariable *GV =
1015 cast<GlobalVariable>(Call->getArgOperand(1)->stripPointerCasts());
1016 assert(GV->hasInitializer());
1017 const ConstantDataArray *DA = cast<ConstantDataArray>(GV->getInitializer());
1018 assert(DA->isString());
1019 StringRef ValueStr = DA->getAsString();
1021 // ValueStr format: <EnumeratorStr>:<Value>
1022 size_t Separator = ValueStr.find_first_of(':');
1023 StringRef EnumeratorStr = ValueStr.substr(0, Separator);
1025 // Find enumerator index in the debuginfo
1026 DIType *BaseTy = stripQualifiers(Ty, true);
1027 const auto *CTy = cast<DICompositeType>(BaseTy);
1028 assert(CTy->getTag() == dwarf::DW_TAG_enumeration_type);
1029 int EnumIndex = 0;
1030 for (const auto Element : CTy->getElements()) {
1031 const auto *Enum = cast<DIEnumerator>(Element);
1032 if (Enum->getName() == EnumeratorStr) {
1033 AccessStr = std::to_string(EnumIndex);
1034 break;
1036 EnumIndex++;
1039 if (CInfo.AccessIndex == BTF::ENUM_VALUE) {
1040 StringRef EValueStr = ValueStr.substr(Separator + 1);
1041 PatchImm = std::stoll(std::string(EValueStr));
1042 } else {
1043 PatchImm = 1;
1047 AccessKey = "llvm." + Ty->getName().str() + ":" +
1048 std::to_string(CInfo.AccessIndex) + std::string(":") +
1049 std::to_string(PatchImm) + std::string("$") + AccessStr;
1051 return Ty;
1054 /// Call/Kind is the base preserve_*_access_index() call. Attempts to do
1055 /// transformation to a chain of relocable GEPs.
1056 bool BPFAbstractMemberAccess::transformGEPChain(CallInst *Call,
1057 CallInfo &CInfo) {
1058 std::string AccessKey;
1059 MDNode *TypeMeta;
1060 Value *Base = nullptr;
1061 bool IsInt32Ret;
1063 IsInt32Ret = CInfo.Kind == BPFPreserveFieldInfoAI;
1064 if (CInfo.Kind == BPFPreserveFieldInfoAI && CInfo.Metadata) {
1065 TypeMeta = computeAccessKey(Call, CInfo, AccessKey, IsInt32Ret);
1066 } else {
1067 Base = computeBaseAndAccessKey(Call, CInfo, AccessKey, TypeMeta);
1068 if (!Base)
1069 return false;
1072 BasicBlock *BB = Call->getParent();
1073 GlobalVariable *GV;
1075 if (GEPGlobals.find(AccessKey) == GEPGlobals.end()) {
1076 IntegerType *VarType;
1077 if (IsInt32Ret)
1078 VarType = Type::getInt32Ty(BB->getContext()); // 32bit return value
1079 else
1080 VarType = Type::getInt64Ty(BB->getContext()); // 64bit ptr or enum value
1082 GV = new GlobalVariable(*M, VarType, false, GlobalVariable::ExternalLinkage,
1083 nullptr, AccessKey);
1084 GV->addAttribute(BPFCoreSharedInfo::AmaAttr);
1085 GV->setMetadata(LLVMContext::MD_preserve_access_index, TypeMeta);
1086 GEPGlobals[AccessKey] = GV;
1087 } else {
1088 GV = GEPGlobals[AccessKey];
1091 if (CInfo.Kind == BPFPreserveFieldInfoAI) {
1092 // Load the global variable which represents the returned field info.
1093 LoadInst *LDInst;
1094 if (IsInt32Ret)
1095 LDInst = new LoadInst(Type::getInt32Ty(BB->getContext()), GV, "",
1096 Call->getIterator());
1097 else
1098 LDInst = new LoadInst(Type::getInt64Ty(BB->getContext()), GV, "",
1099 Call->getIterator());
1101 Instruction *PassThroughInst =
1102 BPFCoreSharedInfo::insertPassThrough(M, BB, LDInst, Call);
1103 Call->replaceAllUsesWith(PassThroughInst);
1104 Call->eraseFromParent();
1105 return true;
1108 // For any original GEP Call and Base %2 like
1109 // %4 = bitcast %struct.net_device** %dev1 to i64*
1110 // it is transformed to:
1111 // %6 = load llvm.sk_buff:0:50$0:0:0:2:0
1112 // %7 = bitcast %struct.sk_buff* %2 to i8*
1113 // %8 = getelementptr i8, i8* %7, %6
1114 // %9 = bitcast i8* %8 to i64*
1115 // using %9 instead of %4
1116 // The original Call inst is removed.
1118 // Load the global variable.
1119 auto *LDInst = new LoadInst(Type::getInt64Ty(BB->getContext()), GV, "",
1120 Call->getIterator());
1122 // Generate a BitCast
1123 auto *BCInst =
1124 new BitCastInst(Base, PointerType::getUnqual(BB->getContext()));
1125 BCInst->insertBefore(Call);
1127 // Generate a GetElementPtr
1128 auto *GEP = GetElementPtrInst::Create(Type::getInt8Ty(BB->getContext()),
1129 BCInst, LDInst);
1130 GEP->insertBefore(Call);
1132 // Generate a BitCast
1133 auto *BCInst2 = new BitCastInst(GEP, Call->getType());
1134 BCInst2->insertBefore(Call);
1136 // For the following code,
1137 // Block0:
1138 // ...
1139 // if (...) goto Block1 else ...
1140 // Block1:
1141 // %6 = load llvm.sk_buff:0:50$0:0:0:2:0
1142 // %7 = bitcast %struct.sk_buff* %2 to i8*
1143 // %8 = getelementptr i8, i8* %7, %6
1144 // ...
1145 // goto CommonExit
1146 // Block2:
1147 // ...
1148 // if (...) goto Block3 else ...
1149 // Block3:
1150 // %6 = load llvm.bpf_map:0:40$0:0:0:2:0
1151 // %7 = bitcast %struct.sk_buff* %2 to i8*
1152 // %8 = getelementptr i8, i8* %7, %6
1153 // ...
1154 // goto CommonExit
1155 // CommonExit
1156 // SimplifyCFG may generate:
1157 // Block0:
1158 // ...
1159 // if (...) goto Block_Common else ...
1160 // Block2:
1161 // ...
1162 // if (...) goto Block_Common else ...
1163 // Block_Common:
1164 // PHI = [llvm.sk_buff:0:50$0:0:0:2:0, llvm.bpf_map:0:40$0:0:0:2:0]
1165 // %6 = load PHI
1166 // %7 = bitcast %struct.sk_buff* %2 to i8*
1167 // %8 = getelementptr i8, i8* %7, %6
1168 // ...
1169 // goto CommonExit
1170 // For the above code, we cannot perform proper relocation since
1171 // "load PHI" has two possible relocations.
1173 // To prevent above tail merging, we use __builtin_bpf_passthrough()
1174 // where one of its parameters is a seq_num. Since two
1175 // __builtin_bpf_passthrough() funcs will always have different seq_num,
1176 // tail merging cannot happen. The __builtin_bpf_passthrough() will be
1177 // removed in the beginning of Target IR passes.
1179 // This approach is also used in other places when global var
1180 // representing a relocation is used.
1181 Instruction *PassThroughInst =
1182 BPFCoreSharedInfo::insertPassThrough(M, BB, BCInst2, Call);
1183 Call->replaceAllUsesWith(PassThroughInst);
1184 Call->eraseFromParent();
1186 return true;
1189 bool BPFAbstractMemberAccess::doTransformation(Function &F) {
1190 bool Transformed = false;
1192 // Collect PreserveDIAccessIndex Intrinsic call chains.
1193 // The call chains will be used to generate the access
1194 // patterns similar to GEP.
1195 collectAICallChains(F);
1197 for (auto &C : BaseAICalls)
1198 Transformed = transformGEPChain(C.first, C.second) || Transformed;
1200 return removePreserveAccessIndexIntrinsic(F) || Transformed;
1203 PreservedAnalyses
1204 BPFAbstractMemberAccessPass::run(Function &F, FunctionAnalysisManager &AM) {
1205 return BPFAbstractMemberAccess(TM).run(F) ? PreservedAnalyses::none()
1206 : PreservedAnalyses::all();