Add gfx950 mfma instructions to ROCDL dialect (#123361)
[llvm-project.git] / llvm / lib / Target / Hexagon / HexagonVectorCombine.cpp
blobce933108b83b12a24648c7adfa56b12daf664bc5
1 //===-- HexagonVectorCombine.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // HexagonVectorCombine is a utility class implementing a variety of functions
9 // that assist in vector-based optimizations.
11 // AlignVectors: replace unaligned vector loads and stores with aligned ones.
12 // HvxIdioms: recognize various opportunities to generate HVX intrinsic code.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/ArrayRef.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/InstSimplifyFolder.h"
23 #include "llvm/Analysis/InstructionSimplify.h"
24 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
25 #include "llvm/Analysis/TargetLibraryInfo.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/Analysis/VectorUtils.h"
28 #include "llvm/CodeGen/TargetPassConfig.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/IRBuilder.h"
32 #include "llvm/IR/IntrinsicInst.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/IR/IntrinsicsHexagon.h"
35 #include "llvm/IR/Metadata.h"
36 #include "llvm/IR/PatternMatch.h"
37 #include "llvm/InitializePasses.h"
38 #include "llvm/Pass.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/KnownBits.h"
41 #include "llvm/Support/MathExtras.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Target/TargetMachine.h"
44 #include "llvm/Transforms/Utils/Local.h"
46 #include "HexagonSubtarget.h"
47 #include "HexagonTargetMachine.h"
49 #include <algorithm>
50 #include <deque>
51 #include <map>
52 #include <optional>
53 #include <set>
54 #include <utility>
55 #include <vector>
57 #define DEBUG_TYPE "hexagon-vc"
59 using namespace llvm;
61 namespace {
62 cl::opt<bool> DumpModule("hvc-dump-module", cl::Hidden);
63 cl::opt<bool> VAEnabled("hvc-va", cl::Hidden, cl::init(true)); // Align
64 cl::opt<bool> VIEnabled("hvc-vi", cl::Hidden, cl::init(true)); // Idioms
65 cl::opt<bool> VADoFullStores("hvc-va-full-stores", cl::Hidden);
67 cl::opt<unsigned> VAGroupCountLimit("hvc-va-group-count-limit", cl::Hidden,
68 cl::init(~0));
69 cl::opt<unsigned> VAGroupSizeLimit("hvc-va-group-size-limit", cl::Hidden,
70 cl::init(~0));
72 class HexagonVectorCombine {
73 public:
74 HexagonVectorCombine(Function &F_, AliasAnalysis &AA_, AssumptionCache &AC_,
75 DominatorTree &DT_, ScalarEvolution &SE_,
76 TargetLibraryInfo &TLI_, const TargetMachine &TM_)
77 : F(F_), DL(F.getDataLayout()), AA(AA_), AC(AC_), DT(DT_),
78 SE(SE_), TLI(TLI_),
79 HST(static_cast<const HexagonSubtarget &>(*TM_.getSubtargetImpl(F))) {}
81 bool run();
83 // Common integer type.
84 IntegerType *getIntTy(unsigned Width = 32) const;
85 // Byte type: either scalar (when Length = 0), or vector with given
86 // element count.
87 Type *getByteTy(int ElemCount = 0) const;
88 // Boolean type: either scalar (when Length = 0), or vector with given
89 // element count.
90 Type *getBoolTy(int ElemCount = 0) const;
91 // Create a ConstantInt of type returned by getIntTy with the value Val.
92 ConstantInt *getConstInt(int Val, unsigned Width = 32) const;
93 // Get the integer value of V, if it exists.
94 std::optional<APInt> getIntValue(const Value *Val) const;
95 // Is Val a constant 0, or a vector of 0s?
96 bool isZero(const Value *Val) const;
97 // Is Val an undef value?
98 bool isUndef(const Value *Val) const;
99 // Is Val a scalar (i1 true) or a vector of (i1 true)?
100 bool isTrue(const Value *Val) const;
101 // Is Val a scalar (i1 false) or a vector of (i1 false)?
102 bool isFalse(const Value *Val) const;
104 // Get HVX vector type with the given element type.
105 VectorType *getHvxTy(Type *ElemTy, bool Pair = false) const;
107 enum SizeKind {
108 Store, // Store size
109 Alloc, // Alloc size
111 int getSizeOf(const Value *Val, SizeKind Kind = Store) const;
112 int getSizeOf(const Type *Ty, SizeKind Kind = Store) const;
113 int getTypeAlignment(Type *Ty) const;
114 size_t length(Value *Val) const;
115 size_t length(Type *Ty) const;
117 Constant *getNullValue(Type *Ty) const;
118 Constant *getFullValue(Type *Ty) const;
119 Constant *getConstSplat(Type *Ty, int Val) const;
121 Value *simplify(Value *Val) const;
123 Value *insertb(IRBuilderBase &Builder, Value *Dest, Value *Src, int Start,
124 int Length, int Where) const;
125 Value *vlalignb(IRBuilderBase &Builder, Value *Lo, Value *Hi,
126 Value *Amt) const;
127 Value *vralignb(IRBuilderBase &Builder, Value *Lo, Value *Hi,
128 Value *Amt) const;
129 Value *concat(IRBuilderBase &Builder, ArrayRef<Value *> Vecs) const;
130 Value *vresize(IRBuilderBase &Builder, Value *Val, int NewSize,
131 Value *Pad) const;
132 Value *rescale(IRBuilderBase &Builder, Value *Mask, Type *FromTy,
133 Type *ToTy) const;
134 Value *vlsb(IRBuilderBase &Builder, Value *Val) const;
135 Value *vbytes(IRBuilderBase &Builder, Value *Val) const;
136 Value *subvector(IRBuilderBase &Builder, Value *Val, unsigned Start,
137 unsigned Length) const;
138 Value *sublo(IRBuilderBase &Builder, Value *Val) const;
139 Value *subhi(IRBuilderBase &Builder, Value *Val) const;
140 Value *vdeal(IRBuilderBase &Builder, Value *Val0, Value *Val1) const;
141 Value *vshuff(IRBuilderBase &Builder, Value *Val0, Value *Val1) const;
143 Value *createHvxIntrinsic(IRBuilderBase &Builder, Intrinsic::ID IntID,
144 Type *RetTy, ArrayRef<Value *> Args,
145 ArrayRef<Type *> ArgTys = {},
146 ArrayRef<Value *> MDSources = {}) const;
147 SmallVector<Value *> splitVectorElements(IRBuilderBase &Builder, Value *Vec,
148 unsigned ToWidth) const;
149 Value *joinVectorElements(IRBuilderBase &Builder, ArrayRef<Value *> Values,
150 VectorType *ToType) const;
152 std::optional<int> calculatePointerDifference(Value *Ptr0, Value *Ptr1) const;
154 unsigned getNumSignificantBits(const Value *V,
155 const Instruction *CtxI = nullptr) const;
156 KnownBits getKnownBits(const Value *V,
157 const Instruction *CtxI = nullptr) const;
159 bool isSafeToClone(const Instruction &In) const;
161 template <typename T = std::vector<Instruction *>>
162 bool isSafeToMoveBeforeInBB(const Instruction &In,
163 BasicBlock::const_iterator To,
164 const T &IgnoreInsts = {}) const;
166 // This function is only used for assertions at the moment.
167 [[maybe_unused]] bool isByteVecTy(Type *Ty) const;
169 Function &F;
170 const DataLayout &DL;
171 AliasAnalysis &AA;
172 AssumptionCache &AC;
173 DominatorTree &DT;
174 ScalarEvolution &SE;
175 TargetLibraryInfo &TLI;
176 const HexagonSubtarget &HST;
178 private:
179 Value *getElementRange(IRBuilderBase &Builder, Value *Lo, Value *Hi,
180 int Start, int Length) const;
183 class AlignVectors {
184 // This code tries to replace unaligned vector loads/stores with aligned
185 // ones.
186 // Consider unaligned load:
187 // %v = original_load %some_addr, align <bad>
188 // %user = %v
189 // It will generate
190 // = load ..., align <good>
191 // = load ..., align <good>
192 // = valign
193 // etc.
194 // %synthesize = combine/shuffle the loaded data so that it looks
195 // exactly like what "original_load" has loaded.
196 // %user = %synthesize
197 // Similarly for stores.
198 public:
199 AlignVectors(const HexagonVectorCombine &HVC_) : HVC(HVC_) {}
201 bool run();
203 private:
204 using InstList = std::vector<Instruction *>;
205 using InstMap = DenseMap<Instruction *, Instruction *>;
207 struct AddrInfo {
208 AddrInfo(const AddrInfo &) = default;
209 AddrInfo(const HexagonVectorCombine &HVC, Instruction *I, Value *A, Type *T,
210 Align H)
211 : Inst(I), Addr(A), ValTy(T), HaveAlign(H),
212 NeedAlign(HVC.getTypeAlignment(ValTy)) {}
213 AddrInfo &operator=(const AddrInfo &) = default;
215 // XXX: add Size member?
216 Instruction *Inst;
217 Value *Addr;
218 Type *ValTy;
219 Align HaveAlign;
220 Align NeedAlign;
221 int Offset = 0; // Offset (in bytes) from the first member of the
222 // containing AddrList.
224 using AddrList = std::vector<AddrInfo>;
226 struct InstrLess {
227 bool operator()(const Instruction *A, const Instruction *B) const {
228 return A->comesBefore(B);
231 using DepList = std::set<Instruction *, InstrLess>;
233 struct MoveGroup {
234 MoveGroup(const AddrInfo &AI, Instruction *B, bool Hvx, bool Load)
235 : Base(B), Main{AI.Inst}, Clones{}, IsHvx(Hvx), IsLoad(Load) {}
236 MoveGroup() = default;
237 Instruction *Base; // Base instruction of the parent address group.
238 InstList Main; // Main group of instructions.
239 InstList Deps; // List of dependencies.
240 InstMap Clones; // Map from original Deps to cloned ones.
241 bool IsHvx; // Is this group of HVX instructions?
242 bool IsLoad; // Is this a load group?
244 using MoveList = std::vector<MoveGroup>;
246 struct ByteSpan {
247 // A representation of "interesting" bytes within a given span of memory.
248 // These bytes are those that are loaded or stored, and they don't have
249 // to cover the entire span of memory.
251 // The representation works by picking a contiguous sequence of bytes
252 // from somewhere within a llvm::Value, and placing it at a given offset
253 // within the span.
255 // The sequence of bytes from llvm:Value is represented by Segment.
256 // Block is Segment, plus where it goes in the span.
258 // An important feature of ByteSpan is being able to make a "section",
259 // i.e. creating another ByteSpan corresponding to a range of offsets
260 // relative to the source span.
262 struct Segment {
263 // Segment of a Value: 'Len' bytes starting at byte 'Begin'.
264 Segment(Value *Val, int Begin, int Len)
265 : Val(Val), Start(Begin), Size(Len) {}
266 Segment(const Segment &Seg) = default;
267 Segment &operator=(const Segment &Seg) = default;
268 Value *Val; // Value representable as a sequence of bytes.
269 int Start; // First byte of the value that belongs to the segment.
270 int Size; // Number of bytes in the segment.
273 struct Block {
274 Block(Value *Val, int Len, int Pos) : Seg(Val, 0, Len), Pos(Pos) {}
275 Block(Value *Val, int Off, int Len, int Pos)
276 : Seg(Val, Off, Len), Pos(Pos) {}
277 Block(const Block &Blk) = default;
278 Block &operator=(const Block &Blk) = default;
279 Segment Seg; // Value segment.
280 int Pos; // Position (offset) of the block in the span.
283 int extent() const;
284 ByteSpan section(int Start, int Length) const;
285 ByteSpan &shift(int Offset);
286 SmallVector<Value *, 8> values() const;
288 int size() const { return Blocks.size(); }
289 Block &operator[](int i) { return Blocks[i]; }
290 const Block &operator[](int i) const { return Blocks[i]; }
292 std::vector<Block> Blocks;
294 using iterator = decltype(Blocks)::iterator;
295 iterator begin() { return Blocks.begin(); }
296 iterator end() { return Blocks.end(); }
297 using const_iterator = decltype(Blocks)::const_iterator;
298 const_iterator begin() const { return Blocks.begin(); }
299 const_iterator end() const { return Blocks.end(); }
302 Align getAlignFromValue(const Value *V) const;
303 std::optional<AddrInfo> getAddrInfo(Instruction &In) const;
304 bool isHvx(const AddrInfo &AI) const;
305 // This function is only used for assertions at the moment.
306 [[maybe_unused]] bool isSectorTy(Type *Ty) const;
308 Value *getPayload(Value *Val) const;
309 Value *getMask(Value *Val) const;
310 Value *getPassThrough(Value *Val) const;
312 Value *createAdjustedPointer(IRBuilderBase &Builder, Value *Ptr, Type *ValTy,
313 int Adjust,
314 const InstMap &CloneMap = InstMap()) const;
315 Value *createAlignedPointer(IRBuilderBase &Builder, Value *Ptr, Type *ValTy,
316 int Alignment,
317 const InstMap &CloneMap = InstMap()) const;
319 Value *createLoad(IRBuilderBase &Builder, Type *ValTy, Value *Ptr,
320 Value *Predicate, int Alignment, Value *Mask,
321 Value *PassThru, ArrayRef<Value *> MDSources = {}) const;
322 Value *createSimpleLoad(IRBuilderBase &Builder, Type *ValTy, Value *Ptr,
323 int Alignment,
324 ArrayRef<Value *> MDSources = {}) const;
326 Value *createStore(IRBuilderBase &Builder, Value *Val, Value *Ptr,
327 Value *Predicate, int Alignment, Value *Mask,
328 ArrayRef<Value *> MDSources = {}) const;
329 Value *createSimpleStore(IRBuilderBase &Builder, Value *Val, Value *Ptr,
330 int Alignment,
331 ArrayRef<Value *> MDSources = {}) const;
333 Value *createPredicatedLoad(IRBuilderBase &Builder, Type *ValTy, Value *Ptr,
334 Value *Predicate, int Alignment,
335 ArrayRef<Value *> MDSources = {}) const;
336 Value *createPredicatedStore(IRBuilderBase &Builder, Value *Val, Value *Ptr,
337 Value *Predicate, int Alignment,
338 ArrayRef<Value *> MDSources = {}) const;
340 DepList getUpwardDeps(Instruction *In, Instruction *Base) const;
341 bool createAddressGroups();
342 MoveList createLoadGroups(const AddrList &Group) const;
343 MoveList createStoreGroups(const AddrList &Group) const;
344 bool moveTogether(MoveGroup &Move) const;
345 template <typename T> InstMap cloneBefore(Instruction *To, T &&Insts) const;
347 void realignLoadGroup(IRBuilderBase &Builder, const ByteSpan &VSpan,
348 int ScLen, Value *AlignVal, Value *AlignAddr) const;
349 void realignStoreGroup(IRBuilderBase &Builder, const ByteSpan &VSpan,
350 int ScLen, Value *AlignVal, Value *AlignAddr) const;
351 bool realignGroup(const MoveGroup &Move) const;
353 Value *makeTestIfUnaligned(IRBuilderBase &Builder, Value *AlignVal,
354 int Alignment) const;
356 friend raw_ostream &operator<<(raw_ostream &OS, const AddrInfo &AI);
357 friend raw_ostream &operator<<(raw_ostream &OS, const MoveGroup &MG);
358 friend raw_ostream &operator<<(raw_ostream &OS, const ByteSpan::Block &B);
359 friend raw_ostream &operator<<(raw_ostream &OS, const ByteSpan &BS);
361 std::map<Instruction *, AddrList> AddrGroups;
362 const HexagonVectorCombine &HVC;
365 LLVM_ATTRIBUTE_UNUSED
366 raw_ostream &operator<<(raw_ostream &OS, const AlignVectors::AddrInfo &AI) {
367 OS << "Inst: " << AI.Inst << " " << *AI.Inst << '\n';
368 OS << "Addr: " << *AI.Addr << '\n';
369 OS << "Type: " << *AI.ValTy << '\n';
370 OS << "HaveAlign: " << AI.HaveAlign.value() << '\n';
371 OS << "NeedAlign: " << AI.NeedAlign.value() << '\n';
372 OS << "Offset: " << AI.Offset;
373 return OS;
376 LLVM_ATTRIBUTE_UNUSED
377 raw_ostream &operator<<(raw_ostream &OS, const AlignVectors::MoveGroup &MG) {
378 OS << "IsLoad:" << (MG.IsLoad ? "yes" : "no");
379 OS << ", IsHvx:" << (MG.IsHvx ? "yes" : "no") << '\n';
380 OS << "Main\n";
381 for (Instruction *I : MG.Main)
382 OS << " " << *I << '\n';
383 OS << "Deps\n";
384 for (Instruction *I : MG.Deps)
385 OS << " " << *I << '\n';
386 OS << "Clones\n";
387 for (auto [K, V] : MG.Clones) {
388 OS << " ";
389 K->printAsOperand(OS, false);
390 OS << "\t-> " << *V << '\n';
392 return OS;
395 LLVM_ATTRIBUTE_UNUSED
396 raw_ostream &operator<<(raw_ostream &OS,
397 const AlignVectors::ByteSpan::Block &B) {
398 OS << " @" << B.Pos << " [" << B.Seg.Start << ',' << B.Seg.Size << "] ";
399 if (B.Seg.Val == reinterpret_cast<const Value *>(&B)) {
400 OS << "(self:" << B.Seg.Val << ')';
401 } else if (B.Seg.Val != nullptr) {
402 OS << *B.Seg.Val;
403 } else {
404 OS << "(null)";
406 return OS;
409 LLVM_ATTRIBUTE_UNUSED
410 raw_ostream &operator<<(raw_ostream &OS, const AlignVectors::ByteSpan &BS) {
411 OS << "ByteSpan[size=" << BS.size() << ", extent=" << BS.extent() << '\n';
412 for (const AlignVectors::ByteSpan::Block &B : BS)
413 OS << B << '\n';
414 OS << ']';
415 return OS;
418 class HvxIdioms {
419 public:
420 HvxIdioms(const HexagonVectorCombine &HVC_) : HVC(HVC_) {
421 auto *Int32Ty = HVC.getIntTy(32);
422 HvxI32Ty = HVC.getHvxTy(Int32Ty, /*Pair=*/false);
423 HvxP32Ty = HVC.getHvxTy(Int32Ty, /*Pair=*/true);
426 bool run();
428 private:
429 enum Signedness { Positive, Signed, Unsigned };
431 // Value + sign
432 // This is to keep track of whether the value should be treated as signed
433 // or unsigned, or is known to be positive.
434 struct SValue {
435 Value *Val;
436 Signedness Sgn;
439 struct FxpOp {
440 unsigned Opcode;
441 unsigned Frac; // Number of fraction bits
442 SValue X, Y;
443 // If present, add 1 << RoundAt before shift:
444 std::optional<unsigned> RoundAt;
445 VectorType *ResTy;
448 auto getNumSignificantBits(Value *V, Instruction *In) const
449 -> std::pair<unsigned, Signedness>;
450 auto canonSgn(SValue X, SValue Y) const -> std::pair<SValue, SValue>;
452 auto matchFxpMul(Instruction &In) const -> std::optional<FxpOp>;
453 auto processFxpMul(Instruction &In, const FxpOp &Op) const -> Value *;
455 auto processFxpMulChopped(IRBuilderBase &Builder, Instruction &In,
456 const FxpOp &Op) const -> Value *;
457 auto createMulQ15(IRBuilderBase &Builder, SValue X, SValue Y,
458 bool Rounding) const -> Value *;
459 auto createMulQ31(IRBuilderBase &Builder, SValue X, SValue Y,
460 bool Rounding) const -> Value *;
461 // Return {Result, Carry}, where Carry is a vector predicate.
462 auto createAddCarry(IRBuilderBase &Builder, Value *X, Value *Y,
463 Value *CarryIn = nullptr) const
464 -> std::pair<Value *, Value *>;
465 auto createMul16(IRBuilderBase &Builder, SValue X, SValue Y) const -> Value *;
466 auto createMulH16(IRBuilderBase &Builder, SValue X, SValue Y) const
467 -> Value *;
468 auto createMul32(IRBuilderBase &Builder, SValue X, SValue Y) const
469 -> std::pair<Value *, Value *>;
470 auto createAddLong(IRBuilderBase &Builder, ArrayRef<Value *> WordX,
471 ArrayRef<Value *> WordY) const -> SmallVector<Value *>;
472 auto createMulLong(IRBuilderBase &Builder, ArrayRef<Value *> WordX,
473 Signedness SgnX, ArrayRef<Value *> WordY,
474 Signedness SgnY) const -> SmallVector<Value *>;
476 VectorType *HvxI32Ty;
477 VectorType *HvxP32Ty;
478 const HexagonVectorCombine &HVC;
480 friend raw_ostream &operator<<(raw_ostream &, const FxpOp &);
483 [[maybe_unused]] raw_ostream &operator<<(raw_ostream &OS,
484 const HvxIdioms::FxpOp &Op) {
485 static const char *SgnNames[] = {"Positive", "Signed", "Unsigned"};
486 OS << Instruction::getOpcodeName(Op.Opcode) << '.' << Op.Frac;
487 if (Op.RoundAt.has_value()) {
488 if (Op.Frac != 0 && *Op.RoundAt == Op.Frac - 1) {
489 OS << ":rnd";
490 } else {
491 OS << " + 1<<" << *Op.RoundAt;
494 OS << "\n X:(" << SgnNames[Op.X.Sgn] << ") " << *Op.X.Val << "\n"
495 << " Y:(" << SgnNames[Op.Y.Sgn] << ") " << *Op.Y.Val;
496 return OS;
499 } // namespace
501 namespace {
503 template <typename T> T *getIfUnordered(T *MaybeT) {
504 return MaybeT && MaybeT->isUnordered() ? MaybeT : nullptr;
506 template <typename T> T *isCandidate(Instruction *In) {
507 return dyn_cast<T>(In);
509 template <> LoadInst *isCandidate<LoadInst>(Instruction *In) {
510 return getIfUnordered(dyn_cast<LoadInst>(In));
512 template <> StoreInst *isCandidate<StoreInst>(Instruction *In) {
513 return getIfUnordered(dyn_cast<StoreInst>(In));
516 #if !defined(_MSC_VER) || _MSC_VER >= 1926
517 // VS2017 and some versions of VS2019 have trouble compiling this:
518 // error C2976: 'std::map': too few template arguments
519 // VS 2019 16.x is known to work, except for 16.4/16.5 (MSC_VER 1924/1925)
520 template <typename Pred, typename... Ts>
521 void erase_if(std::map<Ts...> &map, Pred p)
522 #else
523 template <typename Pred, typename T, typename U>
524 void erase_if(std::map<T, U> &map, Pred p)
525 #endif
527 for (auto i = map.begin(), e = map.end(); i != e;) {
528 if (p(*i))
529 i = map.erase(i);
530 else
531 i = std::next(i);
535 // Forward other erase_ifs to the LLVM implementations.
536 template <typename Pred, typename T> void erase_if(T &&container, Pred p) {
537 llvm::erase_if(std::forward<T>(container), p);
540 } // namespace
542 // --- Begin AlignVectors
544 // For brevity, only consider loads. We identify a group of loads where we
545 // know the relative differences between their addresses, so we know how they
546 // are laid out in memory (relative to one another). These loads can overlap,
547 // can be shorter or longer than the desired vector length.
548 // Ultimately we want to generate a sequence of aligned loads that will load
549 // every byte that the original loads loaded, and have the program use these
550 // loaded values instead of the original loads.
551 // We consider the contiguous memory area spanned by all these loads.
553 // Let's say that a single aligned vector load can load 16 bytes at a time.
554 // If the program wanted to use a byte at offset 13 from the beginning of the
555 // original span, it will be a byte at offset 13+x in the aligned data for
556 // some x>=0. This may happen to be in the first aligned load, or in the load
557 // following it. Since we generally don't know what the that alignment value
558 // is at compile time, we proactively do valigns on the aligned loads, so that
559 // byte that was at offset 13 is still at offset 13 after the valigns.
561 // This will be the starting point for making the rest of the program use the
562 // data loaded by the new loads.
563 // For each original load, and its users:
564 // %v = load ...
565 // ... = %v
566 // ... = %v
567 // we create
568 // %new_v = extract/combine/shuffle data from loaded/valigned vectors so
569 // it contains the same value as %v did before
570 // then replace all users of %v with %new_v.
571 // ... = %new_v
572 // ... = %new_v
574 auto AlignVectors::ByteSpan::extent() const -> int {
575 if (size() == 0)
576 return 0;
577 int Min = Blocks[0].Pos;
578 int Max = Blocks[0].Pos + Blocks[0].Seg.Size;
579 for (int i = 1, e = size(); i != e; ++i) {
580 Min = std::min(Min, Blocks[i].Pos);
581 Max = std::max(Max, Blocks[i].Pos + Blocks[i].Seg.Size);
583 return Max - Min;
586 auto AlignVectors::ByteSpan::section(int Start, int Length) const -> ByteSpan {
587 ByteSpan Section;
588 for (const ByteSpan::Block &B : Blocks) {
589 int L = std::max(B.Pos, Start); // Left end.
590 int R = std::min(B.Pos + B.Seg.Size, Start + Length); // Right end+1.
591 if (L < R) {
592 // How much to chop off the beginning of the segment:
593 int Off = L > B.Pos ? L - B.Pos : 0;
594 Section.Blocks.emplace_back(B.Seg.Val, B.Seg.Start + Off, R - L, L);
597 return Section;
600 auto AlignVectors::ByteSpan::shift(int Offset) -> ByteSpan & {
601 for (Block &B : Blocks)
602 B.Pos += Offset;
603 return *this;
606 auto AlignVectors::ByteSpan::values() const -> SmallVector<Value *, 8> {
607 SmallVector<Value *, 8> Values(Blocks.size());
608 for (int i = 0, e = Blocks.size(); i != e; ++i)
609 Values[i] = Blocks[i].Seg.Val;
610 return Values;
613 auto AlignVectors::getAlignFromValue(const Value *V) const -> Align {
614 const auto *C = dyn_cast<ConstantInt>(V);
615 assert(C && "Alignment must be a compile-time constant integer");
616 return C->getAlignValue();
619 auto AlignVectors::getAddrInfo(Instruction &In) const
620 -> std::optional<AddrInfo> {
621 if (auto *L = isCandidate<LoadInst>(&In))
622 return AddrInfo(HVC, L, L->getPointerOperand(), L->getType(),
623 L->getAlign());
624 if (auto *S = isCandidate<StoreInst>(&In))
625 return AddrInfo(HVC, S, S->getPointerOperand(),
626 S->getValueOperand()->getType(), S->getAlign());
627 if (auto *II = isCandidate<IntrinsicInst>(&In)) {
628 Intrinsic::ID ID = II->getIntrinsicID();
629 switch (ID) {
630 case Intrinsic::masked_load:
631 return AddrInfo(HVC, II, II->getArgOperand(0), II->getType(),
632 getAlignFromValue(II->getArgOperand(1)));
633 case Intrinsic::masked_store:
634 return AddrInfo(HVC, II, II->getArgOperand(1),
635 II->getArgOperand(0)->getType(),
636 getAlignFromValue(II->getArgOperand(2)));
639 return std::nullopt;
642 auto AlignVectors::isHvx(const AddrInfo &AI) const -> bool {
643 return HVC.HST.isTypeForHVX(AI.ValTy);
646 auto AlignVectors::getPayload(Value *Val) const -> Value * {
647 if (auto *In = dyn_cast<Instruction>(Val)) {
648 Intrinsic::ID ID = 0;
649 if (auto *II = dyn_cast<IntrinsicInst>(In))
650 ID = II->getIntrinsicID();
651 if (isa<StoreInst>(In) || ID == Intrinsic::masked_store)
652 return In->getOperand(0);
654 return Val;
657 auto AlignVectors::getMask(Value *Val) const -> Value * {
658 if (auto *II = dyn_cast<IntrinsicInst>(Val)) {
659 switch (II->getIntrinsicID()) {
660 case Intrinsic::masked_load:
661 return II->getArgOperand(2);
662 case Intrinsic::masked_store:
663 return II->getArgOperand(3);
667 Type *ValTy = getPayload(Val)->getType();
668 if (auto *VecTy = dyn_cast<VectorType>(ValTy))
669 return HVC.getFullValue(HVC.getBoolTy(HVC.length(VecTy)));
670 return HVC.getFullValue(HVC.getBoolTy());
673 auto AlignVectors::getPassThrough(Value *Val) const -> Value * {
674 if (auto *II = dyn_cast<IntrinsicInst>(Val)) {
675 if (II->getIntrinsicID() == Intrinsic::masked_load)
676 return II->getArgOperand(3);
678 return UndefValue::get(getPayload(Val)->getType());
681 auto AlignVectors::createAdjustedPointer(IRBuilderBase &Builder, Value *Ptr,
682 Type *ValTy, int Adjust,
683 const InstMap &CloneMap) const
684 -> Value * {
685 if (auto *I = dyn_cast<Instruction>(Ptr))
686 if (Instruction *New = CloneMap.lookup(I))
687 Ptr = New;
688 return Builder.CreatePtrAdd(Ptr, HVC.getConstInt(Adjust), "gep");
691 auto AlignVectors::createAlignedPointer(IRBuilderBase &Builder, Value *Ptr,
692 Type *ValTy, int Alignment,
693 const InstMap &CloneMap) const
694 -> Value * {
695 auto remap = [&](Value *V) -> Value * {
696 if (auto *I = dyn_cast<Instruction>(V)) {
697 for (auto [Old, New] : CloneMap)
698 I->replaceUsesOfWith(Old, New);
699 return I;
701 return V;
703 Value *AsInt = Builder.CreatePtrToInt(Ptr, HVC.getIntTy(), "pti");
704 Value *Mask = HVC.getConstInt(-Alignment);
705 Value *And = Builder.CreateAnd(remap(AsInt), Mask, "and");
706 return Builder.CreateIntToPtr(
707 And, PointerType::getUnqual(ValTy->getContext()), "itp");
710 auto AlignVectors::createLoad(IRBuilderBase &Builder, Type *ValTy, Value *Ptr,
711 Value *Predicate, int Alignment, Value *Mask,
712 Value *PassThru,
713 ArrayRef<Value *> MDSources) const -> Value * {
714 bool HvxHasPredLoad = HVC.HST.useHVXV62Ops();
715 // Predicate is nullptr if not creating predicated load
716 if (Predicate) {
717 assert(!Predicate->getType()->isVectorTy() &&
718 "Expectning scalar predicate");
719 if (HVC.isFalse(Predicate))
720 return UndefValue::get(ValTy);
721 if (!HVC.isTrue(Predicate) && HvxHasPredLoad) {
722 Value *Load = createPredicatedLoad(Builder, ValTy, Ptr, Predicate,
723 Alignment, MDSources);
724 return Builder.CreateSelect(Mask, Load, PassThru);
726 // Predicate == true here.
728 assert(!HVC.isUndef(Mask)); // Should this be allowed?
729 if (HVC.isZero(Mask))
730 return PassThru;
731 if (HVC.isTrue(Mask))
732 return createSimpleLoad(Builder, ValTy, Ptr, Alignment, MDSources);
734 Instruction *Load = Builder.CreateMaskedLoad(ValTy, Ptr, Align(Alignment),
735 Mask, PassThru, "mld");
736 propagateMetadata(Load, MDSources);
737 return Load;
740 auto AlignVectors::createSimpleLoad(IRBuilderBase &Builder, Type *ValTy,
741 Value *Ptr, int Alignment,
742 ArrayRef<Value *> MDSources) const
743 -> Value * {
744 Instruction *Load =
745 Builder.CreateAlignedLoad(ValTy, Ptr, Align(Alignment), "ald");
746 propagateMetadata(Load, MDSources);
747 return Load;
750 auto AlignVectors::createPredicatedLoad(IRBuilderBase &Builder, Type *ValTy,
751 Value *Ptr, Value *Predicate,
752 int Alignment,
753 ArrayRef<Value *> MDSources) const
754 -> Value * {
755 assert(HVC.HST.isTypeForHVX(ValTy) &&
756 "Predicates 'scalar' vector loads not yet supported");
757 assert(Predicate);
758 assert(!Predicate->getType()->isVectorTy() && "Expectning scalar predicate");
759 assert(HVC.getSizeOf(ValTy, HVC.Alloc) % Alignment == 0);
760 if (HVC.isFalse(Predicate))
761 return UndefValue::get(ValTy);
762 if (HVC.isTrue(Predicate))
763 return createSimpleLoad(Builder, ValTy, Ptr, Alignment, MDSources);
765 auto V6_vL32b_pred_ai = HVC.HST.getIntrinsicId(Hexagon::V6_vL32b_pred_ai);
766 // FIXME: This may not put the offset from Ptr into the vmem offset.
767 return HVC.createHvxIntrinsic(Builder, V6_vL32b_pred_ai, ValTy,
768 {Predicate, Ptr, HVC.getConstInt(0)}, {},
769 MDSources);
772 auto AlignVectors::createStore(IRBuilderBase &Builder, Value *Val, Value *Ptr,
773 Value *Predicate, int Alignment, Value *Mask,
774 ArrayRef<Value *> MDSources) const -> Value * {
775 if (HVC.isZero(Mask) || HVC.isUndef(Val) || HVC.isUndef(Mask))
776 return UndefValue::get(Val->getType());
777 assert(!Predicate || (!Predicate->getType()->isVectorTy() &&
778 "Expectning scalar predicate"));
779 if (Predicate) {
780 if (HVC.isFalse(Predicate))
781 return UndefValue::get(Val->getType());
782 if (HVC.isTrue(Predicate))
783 Predicate = nullptr;
785 // Here both Predicate and Mask are true or unknown.
787 if (HVC.isTrue(Mask)) {
788 if (Predicate) { // Predicate unknown
789 return createPredicatedStore(Builder, Val, Ptr, Predicate, Alignment,
790 MDSources);
792 // Predicate is true:
793 return createSimpleStore(Builder, Val, Ptr, Alignment, MDSources);
796 // Mask is unknown
797 if (!Predicate) {
798 Instruction *Store =
799 Builder.CreateMaskedStore(Val, Ptr, Align(Alignment), Mask);
800 propagateMetadata(Store, MDSources);
801 return Store;
804 // Both Predicate and Mask are unknown.
805 // Emulate masked store with predicated-load + mux + predicated-store.
806 Value *PredLoad = createPredicatedLoad(Builder, Val->getType(), Ptr,
807 Predicate, Alignment, MDSources);
808 Value *Mux = Builder.CreateSelect(Mask, Val, PredLoad);
809 return createPredicatedStore(Builder, Mux, Ptr, Predicate, Alignment,
810 MDSources);
813 auto AlignVectors::createSimpleStore(IRBuilderBase &Builder, Value *Val,
814 Value *Ptr, int Alignment,
815 ArrayRef<Value *> MDSources) const
816 -> Value * {
817 Instruction *Store = Builder.CreateAlignedStore(Val, Ptr, Align(Alignment));
818 propagateMetadata(Store, MDSources);
819 return Store;
822 auto AlignVectors::createPredicatedStore(IRBuilderBase &Builder, Value *Val,
823 Value *Ptr, Value *Predicate,
824 int Alignment,
825 ArrayRef<Value *> MDSources) const
826 -> Value * {
827 assert(HVC.HST.isTypeForHVX(Val->getType()) &&
828 "Predicates 'scalar' vector stores not yet supported");
829 assert(Predicate);
830 if (HVC.isFalse(Predicate))
831 return UndefValue::get(Val->getType());
832 if (HVC.isTrue(Predicate))
833 return createSimpleStore(Builder, Val, Ptr, Alignment, MDSources);
835 assert(HVC.getSizeOf(Val, HVC.Alloc) % Alignment == 0);
836 auto V6_vS32b_pred_ai = HVC.HST.getIntrinsicId(Hexagon::V6_vS32b_pred_ai);
837 // FIXME: This may not put the offset from Ptr into the vmem offset.
838 return HVC.createHvxIntrinsic(Builder, V6_vS32b_pred_ai, nullptr,
839 {Predicate, Ptr, HVC.getConstInt(0), Val}, {},
840 MDSources);
843 auto AlignVectors::getUpwardDeps(Instruction *In, Instruction *Base) const
844 -> DepList {
845 BasicBlock *Parent = Base->getParent();
846 assert(In->getParent() == Parent &&
847 "Base and In should be in the same block");
848 assert(Base->comesBefore(In) && "Base should come before In");
850 DepList Deps;
851 std::deque<Instruction *> WorkQ = {In};
852 while (!WorkQ.empty()) {
853 Instruction *D = WorkQ.front();
854 WorkQ.pop_front();
855 if (D != In)
856 Deps.insert(D);
857 for (Value *Op : D->operands()) {
858 if (auto *I = dyn_cast<Instruction>(Op)) {
859 if (I->getParent() == Parent && Base->comesBefore(I))
860 WorkQ.push_back(I);
864 return Deps;
867 auto AlignVectors::createAddressGroups() -> bool {
868 // An address group created here may contain instructions spanning
869 // multiple basic blocks.
870 AddrList WorkStack;
872 auto findBaseAndOffset = [&](AddrInfo &AI) -> std::pair<Instruction *, int> {
873 for (AddrInfo &W : WorkStack) {
874 if (auto D = HVC.calculatePointerDifference(AI.Addr, W.Addr))
875 return std::make_pair(W.Inst, *D);
877 return std::make_pair(nullptr, 0);
880 auto traverseBlock = [&](DomTreeNode *DomN, auto Visit) -> void {
881 BasicBlock &Block = *DomN->getBlock();
882 for (Instruction &I : Block) {
883 auto AI = this->getAddrInfo(I); // Use this-> for gcc6.
884 if (!AI)
885 continue;
886 auto F = findBaseAndOffset(*AI);
887 Instruction *GroupInst;
888 if (Instruction *BI = F.first) {
889 AI->Offset = F.second;
890 GroupInst = BI;
891 } else {
892 WorkStack.push_back(*AI);
893 GroupInst = AI->Inst;
895 AddrGroups[GroupInst].push_back(*AI);
898 for (DomTreeNode *C : DomN->children())
899 Visit(C, Visit);
901 while (!WorkStack.empty() && WorkStack.back().Inst->getParent() == &Block)
902 WorkStack.pop_back();
905 traverseBlock(HVC.DT.getRootNode(), traverseBlock);
906 assert(WorkStack.empty());
908 // AddrGroups are formed.
910 // Remove groups of size 1.
911 erase_if(AddrGroups, [](auto &G) { return G.second.size() == 1; });
912 // Remove groups that don't use HVX types.
913 erase_if(AddrGroups, [&](auto &G) {
914 return llvm::none_of(
915 G.second, [&](auto &I) { return HVC.HST.isTypeForHVX(I.ValTy); });
918 return !AddrGroups.empty();
921 auto AlignVectors::createLoadGroups(const AddrList &Group) const -> MoveList {
922 // Form load groups.
923 // To avoid complications with moving code across basic blocks, only form
924 // groups that are contained within a single basic block.
925 unsigned SizeLimit = VAGroupSizeLimit;
926 if (SizeLimit == 0)
927 return {};
929 auto tryAddTo = [&](const AddrInfo &Info, MoveGroup &Move) {
930 assert(!Move.Main.empty() && "Move group should have non-empty Main");
931 if (Move.Main.size() >= SizeLimit)
932 return false;
933 // Don't mix HVX and non-HVX instructions.
934 if (Move.IsHvx != isHvx(Info))
935 return false;
936 // Leading instruction in the load group.
937 Instruction *Base = Move.Main.front();
938 if (Base->getParent() != Info.Inst->getParent())
939 return false;
940 // Check if it's safe to move the load.
941 if (!HVC.isSafeToMoveBeforeInBB(*Info.Inst, Base->getIterator()))
942 return false;
943 // And if it's safe to clone the dependencies.
944 auto isSafeToCopyAtBase = [&](const Instruction *I) {
945 return HVC.isSafeToMoveBeforeInBB(*I, Base->getIterator()) &&
946 HVC.isSafeToClone(*I);
948 DepList Deps = getUpwardDeps(Info.Inst, Base);
949 if (!llvm::all_of(Deps, isSafeToCopyAtBase))
950 return false;
952 Move.Main.push_back(Info.Inst);
953 llvm::append_range(Move.Deps, Deps);
954 return true;
957 MoveList LoadGroups;
959 for (const AddrInfo &Info : Group) {
960 if (!Info.Inst->mayReadFromMemory())
961 continue;
962 if (LoadGroups.empty() || !tryAddTo(Info, LoadGroups.back()))
963 LoadGroups.emplace_back(Info, Group.front().Inst, isHvx(Info), true);
966 // Erase singleton groups.
967 erase_if(LoadGroups, [](const MoveGroup &G) { return G.Main.size() <= 1; });
969 // Erase HVX groups on targets < HvxV62 (due to lack of predicated loads).
970 if (!HVC.HST.useHVXV62Ops())
971 erase_if(LoadGroups, [](const MoveGroup &G) { return G.IsHvx; });
973 return LoadGroups;
976 auto AlignVectors::createStoreGroups(const AddrList &Group) const -> MoveList {
977 // Form store groups.
978 // To avoid complications with moving code across basic blocks, only form
979 // groups that are contained within a single basic block.
980 unsigned SizeLimit = VAGroupSizeLimit;
981 if (SizeLimit == 0)
982 return {};
984 auto tryAddTo = [&](const AddrInfo &Info, MoveGroup &Move) {
985 assert(!Move.Main.empty() && "Move group should have non-empty Main");
986 if (Move.Main.size() >= SizeLimit)
987 return false;
988 // For stores with return values we'd have to collect downward depenencies.
989 // There are no such stores that we handle at the moment, so omit that.
990 assert(Info.Inst->getType()->isVoidTy() &&
991 "Not handling stores with return values");
992 // Don't mix HVX and non-HVX instructions.
993 if (Move.IsHvx != isHvx(Info))
994 return false;
995 // For stores we need to be careful whether it's safe to move them.
996 // Stores that are otherwise safe to move together may not appear safe
997 // to move over one another (i.e. isSafeToMoveBefore may return false).
998 Instruction *Base = Move.Main.front();
999 if (Base->getParent() != Info.Inst->getParent())
1000 return false;
1001 if (!HVC.isSafeToMoveBeforeInBB(*Info.Inst, Base->getIterator(), Move.Main))
1002 return false;
1003 Move.Main.push_back(Info.Inst);
1004 return true;
1007 MoveList StoreGroups;
1009 for (auto I = Group.rbegin(), E = Group.rend(); I != E; ++I) {
1010 const AddrInfo &Info = *I;
1011 if (!Info.Inst->mayWriteToMemory())
1012 continue;
1013 if (StoreGroups.empty() || !tryAddTo(Info, StoreGroups.back()))
1014 StoreGroups.emplace_back(Info, Group.front().Inst, isHvx(Info), false);
1017 // Erase singleton groups.
1018 erase_if(StoreGroups, [](const MoveGroup &G) { return G.Main.size() <= 1; });
1020 // Erase HVX groups on targets < HvxV62 (due to lack of predicated loads).
1021 if (!HVC.HST.useHVXV62Ops())
1022 erase_if(StoreGroups, [](const MoveGroup &G) { return G.IsHvx; });
1024 // Erase groups where every store is a full HVX vector. The reason is that
1025 // aligning predicated stores generates complex code that may be less
1026 // efficient than a sequence of unaligned vector stores.
1027 if (!VADoFullStores) {
1028 erase_if(StoreGroups, [this](const MoveGroup &G) {
1029 return G.IsHvx && llvm::all_of(G.Main, [this](Instruction *S) {
1030 auto MaybeInfo = this->getAddrInfo(*S);
1031 assert(MaybeInfo.has_value());
1032 return HVC.HST.isHVXVectorType(
1033 EVT::getEVT(MaybeInfo->ValTy, false));
1038 return StoreGroups;
1041 auto AlignVectors::moveTogether(MoveGroup &Move) const -> bool {
1042 // Move all instructions to be adjacent.
1043 assert(!Move.Main.empty() && "Move group should have non-empty Main");
1044 Instruction *Where = Move.Main.front();
1046 if (Move.IsLoad) {
1047 // Move all the loads (and dependencies) to where the first load is.
1048 // Clone all deps to before Where, keeping order.
1049 Move.Clones = cloneBefore(Where, Move.Deps);
1050 // Move all main instructions to after Where, keeping order.
1051 ArrayRef<Instruction *> Main(Move.Main);
1052 for (Instruction *M : Main) {
1053 if (M != Where)
1054 M->moveAfter(Where);
1055 for (auto [Old, New] : Move.Clones)
1056 M->replaceUsesOfWith(Old, New);
1057 Where = M;
1059 // Replace Deps with the clones.
1060 for (int i = 0, e = Move.Deps.size(); i != e; ++i)
1061 Move.Deps[i] = Move.Clones[Move.Deps[i]];
1062 } else {
1063 // Move all the stores to where the last store is.
1064 // NOTE: Deps are empty for "store" groups. If they need to be
1065 // non-empty, decide on the order.
1066 assert(Move.Deps.empty());
1067 // Move all main instructions to before Where, inverting order.
1068 ArrayRef<Instruction *> Main(Move.Main);
1069 for (Instruction *M : Main.drop_front(1)) {
1070 M->moveBefore(Where);
1071 Where = M;
1075 return Move.Main.size() + Move.Deps.size() > 1;
1078 template <typename T>
1079 auto AlignVectors::cloneBefore(Instruction *To, T &&Insts) const -> InstMap {
1080 InstMap Map;
1082 for (Instruction *I : Insts) {
1083 assert(HVC.isSafeToClone(*I));
1084 Instruction *C = I->clone();
1085 C->setName(Twine("c.") + I->getName() + ".");
1086 C->insertBefore(To);
1088 for (auto [Old, New] : Map)
1089 C->replaceUsesOfWith(Old, New);
1090 Map.insert(std::make_pair(I, C));
1092 return Map;
1095 auto AlignVectors::realignLoadGroup(IRBuilderBase &Builder,
1096 const ByteSpan &VSpan, int ScLen,
1097 Value *AlignVal, Value *AlignAddr) const
1098 -> void {
1099 LLVM_DEBUG(dbgs() << __func__ << "\n");
1101 Type *SecTy = HVC.getByteTy(ScLen);
1102 int NumSectors = (VSpan.extent() + ScLen - 1) / ScLen;
1103 bool DoAlign = !HVC.isZero(AlignVal);
1104 BasicBlock::iterator BasePos = Builder.GetInsertPoint();
1105 BasicBlock *BaseBlock = Builder.GetInsertBlock();
1107 ByteSpan ASpan;
1108 auto *True = HVC.getFullValue(HVC.getBoolTy(ScLen));
1109 auto *Undef = UndefValue::get(SecTy);
1111 // Created load does not have to be "Instruction" (e.g. "undef").
1112 SmallVector<Value *> Loads(NumSectors + DoAlign, nullptr);
1114 // We could create all of the aligned loads, and generate the valigns
1115 // at the location of the first load, but for large load groups, this
1116 // could create highly suboptimal code (there have been groups of 140+
1117 // loads in real code).
1118 // Instead, place the loads/valigns as close to the users as possible.
1119 // In any case we need to have a mapping from the blocks of VSpan (the
1120 // span covered by the pre-existing loads) to ASpan (the span covered
1121 // by the aligned loads). There is a small problem, though: ASpan needs
1122 // to have pointers to the loads/valigns, but we don't have these loads
1123 // because we don't know where to put them yet. We find out by creating
1124 // a section of ASpan that corresponds to values (blocks) from VSpan,
1125 // and checking where the new load should be placed. We need to attach
1126 // this location information to each block in ASpan somehow, so we put
1127 // distincts values for Seg.Val in each ASpan.Blocks[i], and use a map
1128 // to store the location for each Seg.Val.
1129 // The distinct values happen to be Blocks[i].Seg.Val = &Blocks[i],
1130 // which helps with printing ByteSpans without crashing when printing
1131 // Segments with these temporary identifiers in place of Val.
1133 // Populate the blocks first, to avoid reallocations of the vector
1134 // interfering with generating the placeholder addresses.
1135 for (int Index = 0; Index != NumSectors; ++Index)
1136 ASpan.Blocks.emplace_back(nullptr, ScLen, Index * ScLen);
1137 for (int Index = 0; Index != NumSectors; ++Index) {
1138 ASpan.Blocks[Index].Seg.Val =
1139 reinterpret_cast<Value *>(&ASpan.Blocks[Index]);
1142 // Multiple values from VSpan can map to the same value in ASpan. Since we
1143 // try to create loads lazily, we need to find the earliest use for each
1144 // value from ASpan.
1145 DenseMap<void *, Instruction *> EarliestUser;
1146 auto isEarlier = [](Instruction *A, Instruction *B) {
1147 if (B == nullptr)
1148 return true;
1149 if (A == nullptr)
1150 return false;
1151 assert(A->getParent() == B->getParent());
1152 return A->comesBefore(B);
1154 auto earliestUser = [&](const auto &Uses) {
1155 Instruction *User = nullptr;
1156 for (const Use &U : Uses) {
1157 auto *I = dyn_cast<Instruction>(U.getUser());
1158 assert(I != nullptr && "Load used in a non-instruction?");
1159 // Make sure we only consider users in this block, but we need
1160 // to remember if there were users outside the block too. This is
1161 // because if no users are found, aligned loads will not be created.
1162 if (I->getParent() == BaseBlock) {
1163 if (!isa<PHINode>(I))
1164 User = std::min(User, I, isEarlier);
1165 } else {
1166 User = std::min(User, BaseBlock->getTerminator(), isEarlier);
1169 return User;
1172 for (const ByteSpan::Block &B : VSpan) {
1173 ByteSpan ASection = ASpan.section(B.Pos, B.Seg.Size);
1174 for (const ByteSpan::Block &S : ASection) {
1175 EarliestUser[S.Seg.Val] = std::min(
1176 EarliestUser[S.Seg.Val], earliestUser(B.Seg.Val->uses()), isEarlier);
1180 LLVM_DEBUG({
1181 dbgs() << "ASpan:\n" << ASpan << '\n';
1182 dbgs() << "Earliest users of ASpan:\n";
1183 for (auto &[Val, User] : EarliestUser) {
1184 dbgs() << Val << "\n ->" << *User << '\n';
1188 auto createLoad = [&](IRBuilderBase &Builder, const ByteSpan &VSpan,
1189 int Index, bool MakePred) {
1190 Value *Ptr =
1191 createAdjustedPointer(Builder, AlignAddr, SecTy, Index * ScLen);
1192 Value *Predicate =
1193 MakePred ? makeTestIfUnaligned(Builder, AlignVal, ScLen) : nullptr;
1195 // If vector shifting is potentially needed, accumulate metadata
1196 // from source sections of twice the load width.
1197 int Start = (Index - DoAlign) * ScLen;
1198 int Width = (1 + DoAlign) * ScLen;
1199 return this->createLoad(Builder, SecTy, Ptr, Predicate, ScLen, True, Undef,
1200 VSpan.section(Start, Width).values());
1203 auto moveBefore = [this](Instruction *In, Instruction *To) {
1204 // Move In and its upward dependencies to before To.
1205 assert(In->getParent() == To->getParent());
1206 DepList Deps = getUpwardDeps(In, To);
1207 In->moveBefore(To);
1208 // DepList is sorted with respect to positions in the basic block.
1209 InstMap Map = cloneBefore(In, Deps);
1210 for (auto [Old, New] : Map)
1211 In->replaceUsesOfWith(Old, New);
1214 // Generate necessary loads at appropriate locations.
1215 LLVM_DEBUG(dbgs() << "Creating loads for ASpan sectors\n");
1216 for (int Index = 0; Index != NumSectors + 1; ++Index) {
1217 // In ASpan, each block will be either a single aligned load, or a
1218 // valign of a pair of loads. In the latter case, an aligned load j
1219 // will belong to the current valign, and the one in the previous
1220 // block (for j > 0).
1221 // Place the load at a location which will dominate the valign, assuming
1222 // the valign will be placed right before the earliest user.
1223 Instruction *PrevAt =
1224 DoAlign && Index > 0 ? EarliestUser[&ASpan[Index - 1]] : nullptr;
1225 Instruction *ThisAt =
1226 Index < NumSectors ? EarliestUser[&ASpan[Index]] : nullptr;
1227 if (auto *Where = std::min(PrevAt, ThisAt, isEarlier)) {
1228 Builder.SetInsertPoint(Where);
1229 Loads[Index] =
1230 createLoad(Builder, VSpan, Index, DoAlign && Index == NumSectors);
1231 // We know it's safe to put the load at BasePos, but we'd prefer to put
1232 // it at "Where". To see if the load is safe to be placed at Where, put
1233 // it there first and then check if it's safe to move it to BasePos.
1234 // If not, then the load needs to be placed at BasePos.
1235 // We can't do this check proactively because we need the load to exist
1236 // in order to check legality.
1237 if (auto *Load = dyn_cast<Instruction>(Loads[Index])) {
1238 if (!HVC.isSafeToMoveBeforeInBB(*Load, BasePos))
1239 moveBefore(Load, &*BasePos);
1241 LLVM_DEBUG(dbgs() << "Loads[" << Index << "]:" << *Loads[Index] << '\n');
1245 // Generate valigns if needed, and fill in proper values in ASpan
1246 LLVM_DEBUG(dbgs() << "Creating values for ASpan sectors\n");
1247 for (int Index = 0; Index != NumSectors; ++Index) {
1248 ASpan[Index].Seg.Val = nullptr;
1249 if (auto *Where = EarliestUser[&ASpan[Index]]) {
1250 Builder.SetInsertPoint(Where);
1251 Value *Val = Loads[Index];
1252 assert(Val != nullptr);
1253 if (DoAlign) {
1254 Value *NextLoad = Loads[Index + 1];
1255 assert(NextLoad != nullptr);
1256 Val = HVC.vralignb(Builder, Val, NextLoad, AlignVal);
1258 ASpan[Index].Seg.Val = Val;
1259 LLVM_DEBUG(dbgs() << "ASpan[" << Index << "]:" << *Val << '\n');
1263 for (const ByteSpan::Block &B : VSpan) {
1264 ByteSpan ASection = ASpan.section(B.Pos, B.Seg.Size).shift(-B.Pos);
1265 Value *Accum = UndefValue::get(HVC.getByteTy(B.Seg.Size));
1266 Builder.SetInsertPoint(cast<Instruction>(B.Seg.Val));
1268 // We're generating a reduction, where each instruction depends on
1269 // the previous one, so we need to order them according to the position
1270 // of their inputs in the code.
1271 std::vector<ByteSpan::Block *> ABlocks;
1272 for (ByteSpan::Block &S : ASection) {
1273 if (S.Seg.Val != nullptr)
1274 ABlocks.push_back(&S);
1276 llvm::sort(ABlocks,
1277 [&](const ByteSpan::Block *A, const ByteSpan::Block *B) {
1278 return isEarlier(cast<Instruction>(A->Seg.Val),
1279 cast<Instruction>(B->Seg.Val));
1281 for (ByteSpan::Block *S : ABlocks) {
1282 // The processing of the data loaded by the aligned loads
1283 // needs to be inserted after the data is available.
1284 Instruction *SegI = cast<Instruction>(S->Seg.Val);
1285 Builder.SetInsertPoint(&*std::next(SegI->getIterator()));
1286 Value *Pay = HVC.vbytes(Builder, getPayload(S->Seg.Val));
1287 Accum =
1288 HVC.insertb(Builder, Accum, Pay, S->Seg.Start, S->Seg.Size, S->Pos);
1290 // Instead of casting everything to bytes for the vselect, cast to the
1291 // original value type. This will avoid complications with casting masks.
1292 // For example, in cases when the original mask applied to i32, it could
1293 // be converted to a mask applicable to i8 via pred_typecast intrinsic,
1294 // but if the mask is not exactly of HVX length, extra handling would be
1295 // needed to make it work.
1296 Type *ValTy = getPayload(B.Seg.Val)->getType();
1297 Value *Cast = Builder.CreateBitCast(Accum, ValTy, "cst");
1298 Value *Sel = Builder.CreateSelect(getMask(B.Seg.Val), Cast,
1299 getPassThrough(B.Seg.Val), "sel");
1300 B.Seg.Val->replaceAllUsesWith(Sel);
1304 auto AlignVectors::realignStoreGroup(IRBuilderBase &Builder,
1305 const ByteSpan &VSpan, int ScLen,
1306 Value *AlignVal, Value *AlignAddr) const
1307 -> void {
1308 LLVM_DEBUG(dbgs() << __func__ << "\n");
1310 Type *SecTy = HVC.getByteTy(ScLen);
1311 int NumSectors = (VSpan.extent() + ScLen - 1) / ScLen;
1312 bool DoAlign = !HVC.isZero(AlignVal);
1314 // Stores.
1315 ByteSpan ASpanV, ASpanM;
1317 // Return a vector value corresponding to the input value Val:
1318 // either <1 x Val> for scalar Val, or Val itself for vector Val.
1319 auto MakeVec = [](IRBuilderBase &Builder, Value *Val) -> Value * {
1320 Type *Ty = Val->getType();
1321 if (Ty->isVectorTy())
1322 return Val;
1323 auto *VecTy = VectorType::get(Ty, 1, /*Scalable=*/false);
1324 return Builder.CreateBitCast(Val, VecTy, "cst");
1327 // Create an extra "undef" sector at the beginning and at the end.
1328 // They will be used as the left/right filler in the vlalign step.
1329 for (int Index = (DoAlign ? -1 : 0); Index != NumSectors + DoAlign; ++Index) {
1330 // For stores, the size of each section is an aligned vector length.
1331 // Adjust the store offsets relative to the section start offset.
1332 ByteSpan VSection =
1333 VSpan.section(Index * ScLen, ScLen).shift(-Index * ScLen);
1334 Value *Undef = UndefValue::get(SecTy);
1335 Value *Zero = HVC.getNullValue(SecTy);
1336 Value *AccumV = Undef;
1337 Value *AccumM = Zero;
1338 for (ByteSpan::Block &S : VSection) {
1339 Value *Pay = getPayload(S.Seg.Val);
1340 Value *Mask = HVC.rescale(Builder, MakeVec(Builder, getMask(S.Seg.Val)),
1341 Pay->getType(), HVC.getByteTy());
1342 Value *PartM = HVC.insertb(Builder, Zero, HVC.vbytes(Builder, Mask),
1343 S.Seg.Start, S.Seg.Size, S.Pos);
1344 AccumM = Builder.CreateOr(AccumM, PartM);
1346 Value *PartV = HVC.insertb(Builder, Undef, HVC.vbytes(Builder, Pay),
1347 S.Seg.Start, S.Seg.Size, S.Pos);
1349 AccumV = Builder.CreateSelect(
1350 Builder.CreateICmp(CmpInst::ICMP_NE, PartM, Zero), PartV, AccumV);
1352 ASpanV.Blocks.emplace_back(AccumV, ScLen, Index * ScLen);
1353 ASpanM.Blocks.emplace_back(AccumM, ScLen, Index * ScLen);
1356 LLVM_DEBUG({
1357 dbgs() << "ASpanV before vlalign:\n" << ASpanV << '\n';
1358 dbgs() << "ASpanM before vlalign:\n" << ASpanM << '\n';
1361 // vlalign
1362 if (DoAlign) {
1363 for (int Index = 1; Index != NumSectors + 2; ++Index) {
1364 Value *PrevV = ASpanV[Index - 1].Seg.Val, *ThisV = ASpanV[Index].Seg.Val;
1365 Value *PrevM = ASpanM[Index - 1].Seg.Val, *ThisM = ASpanM[Index].Seg.Val;
1366 assert(isSectorTy(PrevV->getType()) && isSectorTy(PrevM->getType()));
1367 ASpanV[Index - 1].Seg.Val = HVC.vlalignb(Builder, PrevV, ThisV, AlignVal);
1368 ASpanM[Index - 1].Seg.Val = HVC.vlalignb(Builder, PrevM, ThisM, AlignVal);
1372 LLVM_DEBUG({
1373 dbgs() << "ASpanV after vlalign:\n" << ASpanV << '\n';
1374 dbgs() << "ASpanM after vlalign:\n" << ASpanM << '\n';
1377 auto createStore = [&](IRBuilderBase &Builder, const ByteSpan &ASpanV,
1378 const ByteSpan &ASpanM, int Index, bool MakePred) {
1379 Value *Val = ASpanV[Index].Seg.Val;
1380 Value *Mask = ASpanM[Index].Seg.Val; // bytes
1381 if (HVC.isUndef(Val) || HVC.isZero(Mask))
1382 return;
1383 Value *Ptr =
1384 createAdjustedPointer(Builder, AlignAddr, SecTy, Index * ScLen);
1385 Value *Predicate =
1386 MakePred ? makeTestIfUnaligned(Builder, AlignVal, ScLen) : nullptr;
1388 // If vector shifting is potentially needed, accumulate metadata
1389 // from source sections of twice the store width.
1390 int Start = (Index - DoAlign) * ScLen;
1391 int Width = (1 + DoAlign) * ScLen;
1392 this->createStore(Builder, Val, Ptr, Predicate, ScLen,
1393 HVC.vlsb(Builder, Mask),
1394 VSpan.section(Start, Width).values());
1397 for (int Index = 0; Index != NumSectors + DoAlign; ++Index) {
1398 createStore(Builder, ASpanV, ASpanM, Index, DoAlign && Index == NumSectors);
1402 auto AlignVectors::realignGroup(const MoveGroup &Move) const -> bool {
1403 LLVM_DEBUG(dbgs() << "Realigning group:\n" << Move << '\n');
1405 // TODO: Needs support for masked loads/stores of "scalar" vectors.
1406 if (!Move.IsHvx)
1407 return false;
1409 // Return the element with the maximum alignment from Range,
1410 // where GetValue obtains the value to compare from an element.
1411 auto getMaxOf = [](auto Range, auto GetValue) {
1412 return *llvm::max_element(Range, [&GetValue](auto &A, auto &B) {
1413 return GetValue(A) < GetValue(B);
1417 const AddrList &BaseInfos = AddrGroups.at(Move.Base);
1419 // Conceptually, there is a vector of N bytes covering the addresses
1420 // starting from the minimum offset (i.e. Base.Addr+Start). This vector
1421 // represents a contiguous memory region that spans all accessed memory
1422 // locations.
1423 // The correspondence between loaded or stored values will be expressed
1424 // in terms of this vector. For example, the 0th element of the vector
1425 // from the Base address info will start at byte Start from the beginning
1426 // of this conceptual vector.
1428 // This vector will be loaded/stored starting at the nearest down-aligned
1429 // address and the amount od the down-alignment will be AlignVal:
1430 // valign(load_vector(align_down(Base+Start)), AlignVal)
1432 std::set<Instruction *> TestSet(Move.Main.begin(), Move.Main.end());
1433 AddrList MoveInfos;
1434 llvm::copy_if(
1435 BaseInfos, std::back_inserter(MoveInfos),
1436 [&TestSet](const AddrInfo &AI) { return TestSet.count(AI.Inst); });
1438 // Maximum alignment present in the whole address group.
1439 const AddrInfo &WithMaxAlign =
1440 getMaxOf(MoveInfos, [](const AddrInfo &AI) { return AI.HaveAlign; });
1441 Align MaxGiven = WithMaxAlign.HaveAlign;
1443 // Minimum alignment present in the move address group.
1444 const AddrInfo &WithMinOffset =
1445 getMaxOf(MoveInfos, [](const AddrInfo &AI) { return -AI.Offset; });
1447 const AddrInfo &WithMaxNeeded =
1448 getMaxOf(MoveInfos, [](const AddrInfo &AI) { return AI.NeedAlign; });
1449 Align MinNeeded = WithMaxNeeded.NeedAlign;
1451 // Set the builder's insertion point right before the load group, or
1452 // immediately after the store group. (Instructions in a store group are
1453 // listed in reverse order.)
1454 Instruction *InsertAt = Move.Main.front();
1455 if (!Move.IsLoad) {
1456 // There should be a terminator (which store isn't, but check anyways).
1457 assert(InsertAt->getIterator() != InsertAt->getParent()->end());
1458 InsertAt = &*std::next(InsertAt->getIterator());
1461 IRBuilder Builder(InsertAt->getParent(), InsertAt->getIterator(),
1462 InstSimplifyFolder(HVC.DL));
1463 Value *AlignAddr = nullptr; // Actual aligned address.
1464 Value *AlignVal = nullptr; // Right-shift amount (for valign).
1466 if (MinNeeded <= MaxGiven) {
1467 int Start = WithMinOffset.Offset;
1468 int OffAtMax = WithMaxAlign.Offset;
1469 // Shift the offset of the maximally aligned instruction (OffAtMax)
1470 // back by just enough multiples of the required alignment to cover the
1471 // distance from Start to OffAtMax.
1472 // Calculate the address adjustment amount based on the address with the
1473 // maximum alignment. This is to allow a simple gep instruction instead
1474 // of potential bitcasts to i8*.
1475 int Adjust = -alignTo(OffAtMax - Start, MinNeeded.value());
1476 AlignAddr = createAdjustedPointer(Builder, WithMaxAlign.Addr,
1477 WithMaxAlign.ValTy, Adjust, Move.Clones);
1478 int Diff = Start - (OffAtMax + Adjust);
1479 AlignVal = HVC.getConstInt(Diff);
1480 assert(Diff >= 0);
1481 assert(static_cast<decltype(MinNeeded.value())>(Diff) < MinNeeded.value());
1482 } else {
1483 // WithMinOffset is the lowest address in the group,
1484 // WithMinOffset.Addr = Base+Start.
1485 // Align instructions for both HVX (V6_valign) and scalar (S2_valignrb)
1486 // mask off unnecessary bits, so it's ok to just the original pointer as
1487 // the alignment amount.
1488 // Do an explicit down-alignment of the address to avoid creating an
1489 // aligned instruction with an address that is not really aligned.
1490 AlignAddr =
1491 createAlignedPointer(Builder, WithMinOffset.Addr, WithMinOffset.ValTy,
1492 MinNeeded.value(), Move.Clones);
1493 AlignVal =
1494 Builder.CreatePtrToInt(WithMinOffset.Addr, HVC.getIntTy(), "pti");
1495 if (auto *I = dyn_cast<Instruction>(AlignVal)) {
1496 for (auto [Old, New] : Move.Clones)
1497 I->replaceUsesOfWith(Old, New);
1501 ByteSpan VSpan;
1502 for (const AddrInfo &AI : MoveInfos) {
1503 VSpan.Blocks.emplace_back(AI.Inst, HVC.getSizeOf(AI.ValTy),
1504 AI.Offset - WithMinOffset.Offset);
1507 // The aligned loads/stores will use blocks that are either scalars,
1508 // or HVX vectors. Let "sector" be the unified term for such a block.
1509 // blend(scalar, vector) -> sector...
1510 int ScLen = Move.IsHvx ? HVC.HST.getVectorLength()
1511 : std::max<int>(MinNeeded.value(), 4);
1512 assert(!Move.IsHvx || ScLen == 64 || ScLen == 128);
1513 assert(Move.IsHvx || ScLen == 4 || ScLen == 8);
1515 LLVM_DEBUG({
1516 dbgs() << "ScLen: " << ScLen << "\n";
1517 dbgs() << "AlignVal:" << *AlignVal << "\n";
1518 dbgs() << "AlignAddr:" << *AlignAddr << "\n";
1519 dbgs() << "VSpan:\n" << VSpan << '\n';
1522 if (Move.IsLoad)
1523 realignLoadGroup(Builder, VSpan, ScLen, AlignVal, AlignAddr);
1524 else
1525 realignStoreGroup(Builder, VSpan, ScLen, AlignVal, AlignAddr);
1527 for (auto *Inst : Move.Main)
1528 Inst->eraseFromParent();
1530 return true;
1533 auto AlignVectors::makeTestIfUnaligned(IRBuilderBase &Builder, Value *AlignVal,
1534 int Alignment) const -> Value * {
1535 auto *AlignTy = AlignVal->getType();
1536 Value *And = Builder.CreateAnd(
1537 AlignVal, ConstantInt::get(AlignTy, Alignment - 1), "and");
1538 Value *Zero = ConstantInt::get(AlignTy, 0);
1539 return Builder.CreateICmpNE(And, Zero, "isz");
1542 auto AlignVectors::isSectorTy(Type *Ty) const -> bool {
1543 if (!HVC.isByteVecTy(Ty))
1544 return false;
1545 int Size = HVC.getSizeOf(Ty);
1546 if (HVC.HST.isTypeForHVX(Ty))
1547 return Size == static_cast<int>(HVC.HST.getVectorLength());
1548 return Size == 4 || Size == 8;
1551 auto AlignVectors::run() -> bool {
1552 LLVM_DEBUG(dbgs() << "Running HVC::AlignVectors on " << HVC.F.getName()
1553 << '\n');
1554 if (!createAddressGroups())
1555 return false;
1557 LLVM_DEBUG({
1558 dbgs() << "Address groups(" << AddrGroups.size() << "):\n";
1559 for (auto &[In, AL] : AddrGroups) {
1560 for (const AddrInfo &AI : AL)
1561 dbgs() << "---\n" << AI << '\n';
1565 bool Changed = false;
1566 MoveList LoadGroups, StoreGroups;
1568 for (auto &G : AddrGroups) {
1569 llvm::append_range(LoadGroups, createLoadGroups(G.second));
1570 llvm::append_range(StoreGroups, createStoreGroups(G.second));
1573 LLVM_DEBUG({
1574 dbgs() << "\nLoad groups(" << LoadGroups.size() << "):\n";
1575 for (const MoveGroup &G : LoadGroups)
1576 dbgs() << G << "\n";
1577 dbgs() << "Store groups(" << StoreGroups.size() << "):\n";
1578 for (const MoveGroup &G : StoreGroups)
1579 dbgs() << G << "\n";
1582 // Cumulative limit on the number of groups.
1583 unsigned CountLimit = VAGroupCountLimit;
1584 if (CountLimit == 0)
1585 return false;
1587 if (LoadGroups.size() > CountLimit) {
1588 LoadGroups.resize(CountLimit);
1589 StoreGroups.clear();
1590 } else {
1591 unsigned StoreLimit = CountLimit - LoadGroups.size();
1592 if (StoreGroups.size() > StoreLimit)
1593 StoreGroups.resize(StoreLimit);
1596 for (auto &M : LoadGroups)
1597 Changed |= moveTogether(M);
1598 for (auto &M : StoreGroups)
1599 Changed |= moveTogether(M);
1601 LLVM_DEBUG(dbgs() << "After moveTogether:\n" << HVC.F);
1603 for (auto &M : LoadGroups)
1604 Changed |= realignGroup(M);
1605 for (auto &M : StoreGroups)
1606 Changed |= realignGroup(M);
1608 return Changed;
1611 // --- End AlignVectors
1613 // --- Begin HvxIdioms
1615 auto HvxIdioms::getNumSignificantBits(Value *V, Instruction *In) const
1616 -> std::pair<unsigned, Signedness> {
1617 unsigned Bits = HVC.getNumSignificantBits(V, In);
1618 // The significant bits are calculated including the sign bit. This may
1619 // add an extra bit for zero-extended values, e.g. (zext i32 to i64) may
1620 // result in 33 significant bits. To avoid extra words, skip the extra
1621 // sign bit, but keep information that the value is to be treated as
1622 // unsigned.
1623 KnownBits Known = HVC.getKnownBits(V, In);
1624 Signedness Sign = Signed;
1625 unsigned NumToTest = 0; // Number of bits used in test for unsignedness.
1626 if (isPowerOf2_32(Bits))
1627 NumToTest = Bits;
1628 else if (Bits > 1 && isPowerOf2_32(Bits - 1))
1629 NumToTest = Bits - 1;
1631 if (NumToTest != 0 && Known.Zero.ashr(NumToTest).isAllOnes()) {
1632 Sign = Unsigned;
1633 Bits = NumToTest;
1636 // If the top bit of the nearest power-of-2 is zero, this value is
1637 // positive. It could be treated as either signed or unsigned.
1638 if (unsigned Pow2 = PowerOf2Ceil(Bits); Pow2 != Bits) {
1639 if (Known.Zero.ashr(Pow2 - 1).isAllOnes())
1640 Sign = Positive;
1642 return {Bits, Sign};
1645 auto HvxIdioms::canonSgn(SValue X, SValue Y) const
1646 -> std::pair<SValue, SValue> {
1647 // Canonicalize the signedness of X and Y, so that the result is one of:
1648 // S, S
1649 // U/P, S
1650 // U/P, U/P
1651 if (X.Sgn == Signed && Y.Sgn != Signed)
1652 std::swap(X, Y);
1653 return {X, Y};
1656 // Match
1657 // (X * Y) [>> N], or
1658 // ((X * Y) + (1 << M)) >> N
1659 auto HvxIdioms::matchFxpMul(Instruction &In) const -> std::optional<FxpOp> {
1660 using namespace PatternMatch;
1661 auto *Ty = In.getType();
1663 if (!Ty->isVectorTy() || !Ty->getScalarType()->isIntegerTy())
1664 return std::nullopt;
1666 unsigned Width = cast<IntegerType>(Ty->getScalarType())->getBitWidth();
1668 FxpOp Op;
1669 Value *Exp = &In;
1671 // Fixed-point multiplication is always shifted right (except when the
1672 // fraction is 0 bits).
1673 auto m_Shr = [](auto &&V, auto &&S) {
1674 return m_CombineOr(m_LShr(V, S), m_AShr(V, S));
1677 const APInt *Qn = nullptr;
1678 if (Value * T; match(Exp, m_Shr(m_Value(T), m_APInt(Qn)))) {
1679 Op.Frac = Qn->getZExtValue();
1680 Exp = T;
1681 } else {
1682 Op.Frac = 0;
1685 if (Op.Frac > Width)
1686 return std::nullopt;
1688 // Check if there is rounding added.
1689 const APInt *C = nullptr;
1690 if (Value * T; Op.Frac > 0 && match(Exp, m_Add(m_Value(T), m_APInt(C)))) {
1691 uint64_t CV = C->getZExtValue();
1692 if (CV != 0 && !isPowerOf2_64(CV))
1693 return std::nullopt;
1694 if (CV != 0)
1695 Op.RoundAt = Log2_64(CV);
1696 Exp = T;
1699 // Check if the rest is a multiplication.
1700 if (match(Exp, m_Mul(m_Value(Op.X.Val), m_Value(Op.Y.Val)))) {
1701 Op.Opcode = Instruction::Mul;
1702 // FIXME: The information below is recomputed.
1703 Op.X.Sgn = getNumSignificantBits(Op.X.Val, &In).second;
1704 Op.Y.Sgn = getNumSignificantBits(Op.Y.Val, &In).second;
1705 Op.ResTy = cast<VectorType>(Ty);
1706 return Op;
1709 return std::nullopt;
1712 auto HvxIdioms::processFxpMul(Instruction &In, const FxpOp &Op) const
1713 -> Value * {
1714 assert(Op.X.Val->getType() == Op.Y.Val->getType());
1716 auto *VecTy = dyn_cast<VectorType>(Op.X.Val->getType());
1717 if (VecTy == nullptr)
1718 return nullptr;
1719 auto *ElemTy = cast<IntegerType>(VecTy->getElementType());
1720 unsigned ElemWidth = ElemTy->getBitWidth();
1722 // TODO: This can be relaxed after legalization is done pre-isel.
1723 if ((HVC.length(VecTy) * ElemWidth) % (8 * HVC.HST.getVectorLength()) != 0)
1724 return nullptr;
1726 // There are no special intrinsics that should be used for multiplying
1727 // signed 8-bit values, so just skip them. Normal codegen should handle
1728 // this just fine.
1729 if (ElemWidth <= 8)
1730 return nullptr;
1731 // Similarly, if this is just a multiplication that can be handled without
1732 // intervention, then leave it alone.
1733 if (ElemWidth <= 32 && Op.Frac == 0)
1734 return nullptr;
1736 auto [BitsX, SignX] = getNumSignificantBits(Op.X.Val, &In);
1737 auto [BitsY, SignY] = getNumSignificantBits(Op.Y.Val, &In);
1739 // TODO: Add multiplication of vectors by scalar registers (up to 4 bytes).
1741 Value *X = Op.X.Val, *Y = Op.Y.Val;
1742 IRBuilder Builder(In.getParent(), In.getIterator(),
1743 InstSimplifyFolder(HVC.DL));
1745 auto roundUpWidth = [](unsigned Width) -> unsigned {
1746 if (Width <= 32 && !isPowerOf2_32(Width)) {
1747 // If the element width is not a power of 2, round it up
1748 // to the next one. Do this for widths not exceeding 32.
1749 return PowerOf2Ceil(Width);
1751 if (Width > 32 && Width % 32 != 0) {
1752 // For wider elements, round it up to the multiple of 32.
1753 return alignTo(Width, 32u);
1755 return Width;
1758 BitsX = roundUpWidth(BitsX);
1759 BitsY = roundUpWidth(BitsY);
1761 // For elementwise multiplication vectors must have the same lengths, so
1762 // resize the elements of both inputs to the same width, the max of the
1763 // calculated significant bits.
1764 unsigned Width = std::max(BitsX, BitsY);
1766 auto *ResizeTy = VectorType::get(HVC.getIntTy(Width), VecTy);
1767 if (Width < ElemWidth) {
1768 X = Builder.CreateTrunc(X, ResizeTy, "trn");
1769 Y = Builder.CreateTrunc(Y, ResizeTy, "trn");
1770 } else if (Width > ElemWidth) {
1771 X = SignX == Signed ? Builder.CreateSExt(X, ResizeTy, "sxt")
1772 : Builder.CreateZExt(X, ResizeTy, "zxt");
1773 Y = SignY == Signed ? Builder.CreateSExt(Y, ResizeTy, "sxt")
1774 : Builder.CreateZExt(Y, ResizeTy, "zxt");
1777 assert(X->getType() == Y->getType() && X->getType() == ResizeTy);
1779 unsigned VecLen = HVC.length(ResizeTy);
1780 unsigned ChopLen = (8 * HVC.HST.getVectorLength()) / std::min(Width, 32u);
1782 SmallVector<Value *> Results;
1783 FxpOp ChopOp = Op;
1784 ChopOp.ResTy = VectorType::get(Op.ResTy->getElementType(), ChopLen, false);
1786 for (unsigned V = 0; V != VecLen / ChopLen; ++V) {
1787 ChopOp.X.Val = HVC.subvector(Builder, X, V * ChopLen, ChopLen);
1788 ChopOp.Y.Val = HVC.subvector(Builder, Y, V * ChopLen, ChopLen);
1789 Results.push_back(processFxpMulChopped(Builder, In, ChopOp));
1790 if (Results.back() == nullptr)
1791 break;
1794 if (Results.empty() || Results.back() == nullptr)
1795 return nullptr;
1797 Value *Cat = HVC.concat(Builder, Results);
1798 Value *Ext = SignX == Signed || SignY == Signed
1799 ? Builder.CreateSExt(Cat, VecTy, "sxt")
1800 : Builder.CreateZExt(Cat, VecTy, "zxt");
1801 return Ext;
1804 auto HvxIdioms::processFxpMulChopped(IRBuilderBase &Builder, Instruction &In,
1805 const FxpOp &Op) const -> Value * {
1806 assert(Op.X.Val->getType() == Op.Y.Val->getType());
1807 auto *InpTy = cast<VectorType>(Op.X.Val->getType());
1808 unsigned Width = InpTy->getScalarSizeInBits();
1809 bool Rounding = Op.RoundAt.has_value();
1811 if (!Op.RoundAt || *Op.RoundAt == Op.Frac - 1) {
1812 // The fixed-point intrinsics do signed multiplication.
1813 if (Width == Op.Frac + 1 && Op.X.Sgn != Unsigned && Op.Y.Sgn != Unsigned) {
1814 Value *QMul = nullptr;
1815 if (Width == 16) {
1816 QMul = createMulQ15(Builder, Op.X, Op.Y, Rounding);
1817 } else if (Width == 32) {
1818 QMul = createMulQ31(Builder, Op.X, Op.Y, Rounding);
1820 if (QMul != nullptr)
1821 return QMul;
1825 assert(Width >= 32 || isPowerOf2_32(Width)); // Width <= 32 => Width is 2^n
1826 assert(Width < 32 || Width % 32 == 0); // Width > 32 => Width is 32*k
1828 // If Width < 32, then it should really be 16.
1829 if (Width < 32) {
1830 if (Width < 16)
1831 return nullptr;
1832 // Getting here with Op.Frac == 0 isn't wrong, but suboptimal: here we
1833 // generate a full precision products, which is unnecessary if there is
1834 // no shift.
1835 assert(Width == 16);
1836 assert(Op.Frac != 0 && "Unshifted mul should have been skipped");
1837 if (Op.Frac == 16) {
1838 // Multiply high
1839 if (Value *MulH = createMulH16(Builder, Op.X, Op.Y))
1840 return MulH;
1842 // Do full-precision multiply and shift.
1843 Value *Prod32 = createMul16(Builder, Op.X, Op.Y);
1844 if (Rounding) {
1845 Value *RoundVal = HVC.getConstSplat(Prod32->getType(), 1 << *Op.RoundAt);
1846 Prod32 = Builder.CreateAdd(Prod32, RoundVal, "add");
1849 Value *ShiftAmt = HVC.getConstSplat(Prod32->getType(), Op.Frac);
1850 Value *Shifted = Op.X.Sgn == Signed || Op.Y.Sgn == Signed
1851 ? Builder.CreateAShr(Prod32, ShiftAmt, "asr")
1852 : Builder.CreateLShr(Prod32, ShiftAmt, "lsr");
1853 return Builder.CreateTrunc(Shifted, InpTy, "trn");
1856 // Width >= 32
1858 // Break up the arguments Op.X and Op.Y into vectors of smaller widths
1859 // in preparation of doing the multiplication by 32-bit parts.
1860 auto WordX = HVC.splitVectorElements(Builder, Op.X.Val, /*ToWidth=*/32);
1861 auto WordY = HVC.splitVectorElements(Builder, Op.Y.Val, /*ToWidth=*/32);
1862 auto WordP = createMulLong(Builder, WordX, Op.X.Sgn, WordY, Op.Y.Sgn);
1864 auto *HvxWordTy = cast<VectorType>(WordP.front()->getType());
1866 // Add the optional rounding to the proper word.
1867 if (Op.RoundAt.has_value()) {
1868 Value *Zero = HVC.getNullValue(WordX[0]->getType());
1869 SmallVector<Value *> RoundV(WordP.size(), Zero);
1870 RoundV[*Op.RoundAt / 32] =
1871 HVC.getConstSplat(HvxWordTy, 1 << (*Op.RoundAt % 32));
1872 WordP = createAddLong(Builder, WordP, RoundV);
1875 // createRightShiftLong?
1877 // Shift all products right by Op.Frac.
1878 unsigned SkipWords = Op.Frac / 32;
1879 Constant *ShiftAmt = HVC.getConstSplat(HvxWordTy, Op.Frac % 32);
1881 for (int Dst = 0, End = WordP.size() - SkipWords; Dst != End; ++Dst) {
1882 int Src = Dst + SkipWords;
1883 Value *Lo = WordP[Src];
1884 if (Src + 1 < End) {
1885 Value *Hi = WordP[Src + 1];
1886 WordP[Dst] = Builder.CreateIntrinsic(HvxWordTy, Intrinsic::fshr,
1887 {Hi, Lo, ShiftAmt},
1888 /*FMFSource*/ nullptr, "int");
1889 } else {
1890 // The shift of the most significant word.
1891 WordP[Dst] = Builder.CreateAShr(Lo, ShiftAmt, "asr");
1894 if (SkipWords != 0)
1895 WordP.resize(WordP.size() - SkipWords);
1897 return HVC.joinVectorElements(Builder, WordP, Op.ResTy);
1900 auto HvxIdioms::createMulQ15(IRBuilderBase &Builder, SValue X, SValue Y,
1901 bool Rounding) const -> Value * {
1902 assert(X.Val->getType() == Y.Val->getType());
1903 assert(X.Val->getType()->getScalarType() == HVC.getIntTy(16));
1904 assert(HVC.HST.isHVXVectorType(EVT::getEVT(X.Val->getType(), false)));
1906 // There is no non-rounding intrinsic for i16.
1907 if (!Rounding || X.Sgn == Unsigned || Y.Sgn == Unsigned)
1908 return nullptr;
1910 auto V6_vmpyhvsrs = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyhvsrs);
1911 return HVC.createHvxIntrinsic(Builder, V6_vmpyhvsrs, X.Val->getType(),
1912 {X.Val, Y.Val});
1915 auto HvxIdioms::createMulQ31(IRBuilderBase &Builder, SValue X, SValue Y,
1916 bool Rounding) const -> Value * {
1917 Type *InpTy = X.Val->getType();
1918 assert(InpTy == Y.Val->getType());
1919 assert(InpTy->getScalarType() == HVC.getIntTy(32));
1920 assert(HVC.HST.isHVXVectorType(EVT::getEVT(InpTy, false)));
1922 if (X.Sgn == Unsigned || Y.Sgn == Unsigned)
1923 return nullptr;
1925 auto V6_vmpyewuh = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyewuh);
1926 auto V6_vmpyo_acc = Rounding
1927 ? HVC.HST.getIntrinsicId(Hexagon::V6_vmpyowh_rnd_sacc)
1928 : HVC.HST.getIntrinsicId(Hexagon::V6_vmpyowh_sacc);
1929 Value *V1 =
1930 HVC.createHvxIntrinsic(Builder, V6_vmpyewuh, InpTy, {X.Val, Y.Val});
1931 return HVC.createHvxIntrinsic(Builder, V6_vmpyo_acc, InpTy,
1932 {V1, X.Val, Y.Val});
1935 auto HvxIdioms::createAddCarry(IRBuilderBase &Builder, Value *X, Value *Y,
1936 Value *CarryIn) const
1937 -> std::pair<Value *, Value *> {
1938 assert(X->getType() == Y->getType());
1939 auto VecTy = cast<VectorType>(X->getType());
1940 if (VecTy == HvxI32Ty && HVC.HST.useHVXV62Ops()) {
1941 SmallVector<Value *> Args = {X, Y};
1942 Intrinsic::ID AddCarry;
1943 if (CarryIn == nullptr && HVC.HST.useHVXV66Ops()) {
1944 AddCarry = HVC.HST.getIntrinsicId(Hexagon::V6_vaddcarryo);
1945 } else {
1946 AddCarry = HVC.HST.getIntrinsicId(Hexagon::V6_vaddcarry);
1947 if (CarryIn == nullptr)
1948 CarryIn = HVC.getNullValue(HVC.getBoolTy(HVC.length(VecTy)));
1949 Args.push_back(CarryIn);
1951 Value *Ret = HVC.createHvxIntrinsic(Builder, AddCarry,
1952 /*RetTy=*/nullptr, Args);
1953 Value *Result = Builder.CreateExtractValue(Ret, {0}, "ext");
1954 Value *CarryOut = Builder.CreateExtractValue(Ret, {1}, "ext");
1955 return {Result, CarryOut};
1958 // In other cases, do a regular add, and unsigned compare-less-than.
1959 // The carry-out can originate in two places: adding the carry-in or adding
1960 // the two input values.
1961 Value *Result1 = X; // Result1 = X + CarryIn
1962 if (CarryIn != nullptr) {
1963 unsigned Width = VecTy->getScalarSizeInBits();
1964 uint32_t Mask = 1;
1965 if (Width < 32) {
1966 for (unsigned i = 0, e = 32 / Width; i != e; ++i)
1967 Mask = (Mask << Width) | 1;
1969 auto V6_vandqrt = HVC.HST.getIntrinsicId(Hexagon::V6_vandqrt);
1970 Value *ValueIn =
1971 HVC.createHvxIntrinsic(Builder, V6_vandqrt, /*RetTy=*/nullptr,
1972 {CarryIn, HVC.getConstInt(Mask)});
1973 Result1 = Builder.CreateAdd(X, ValueIn, "add");
1976 Value *CarryOut1 = Builder.CreateCmp(CmpInst::ICMP_ULT, Result1, X, "cmp");
1977 Value *Result2 = Builder.CreateAdd(Result1, Y, "add");
1978 Value *CarryOut2 = Builder.CreateCmp(CmpInst::ICMP_ULT, Result2, Y, "cmp");
1979 return {Result2, Builder.CreateOr(CarryOut1, CarryOut2, "orb")};
1982 auto HvxIdioms::createMul16(IRBuilderBase &Builder, SValue X, SValue Y) const
1983 -> Value * {
1984 Intrinsic::ID V6_vmpyh = 0;
1985 std::tie(X, Y) = canonSgn(X, Y);
1987 if (X.Sgn == Signed) {
1988 V6_vmpyh = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyhv);
1989 } else if (Y.Sgn == Signed) {
1990 // In vmpyhus the second operand is unsigned
1991 V6_vmpyh = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyhus);
1992 } else {
1993 V6_vmpyh = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyuhv);
1996 // i16*i16 -> i32 / interleaved
1997 Value *P =
1998 HVC.createHvxIntrinsic(Builder, V6_vmpyh, HvxP32Ty, {Y.Val, X.Val});
1999 // Deinterleave
2000 return HVC.vshuff(Builder, HVC.sublo(Builder, P), HVC.subhi(Builder, P));
2003 auto HvxIdioms::createMulH16(IRBuilderBase &Builder, SValue X, SValue Y) const
2004 -> Value * {
2005 Type *HvxI16Ty = HVC.getHvxTy(HVC.getIntTy(16), /*Pair=*/false);
2007 if (HVC.HST.useHVXV69Ops()) {
2008 if (X.Sgn != Signed && Y.Sgn != Signed) {
2009 auto V6_vmpyuhvs = HVC.HST.getIntrinsicId(Hexagon::V6_vmpyuhvs);
2010 return HVC.createHvxIntrinsic(Builder, V6_vmpyuhvs, HvxI16Ty,
2011 {X.Val, Y.Val});
2015 Type *HvxP16Ty = HVC.getHvxTy(HVC.getIntTy(16), /*Pair=*/true);
2016 Value *Pair16 =
2017 Builder.CreateBitCast(createMul16(Builder, X, Y), HvxP16Ty, "cst");
2018 unsigned Len = HVC.length(HvxP16Ty) / 2;
2020 SmallVector<int, 128> PickOdd(Len);
2021 for (int i = 0; i != static_cast<int>(Len); ++i)
2022 PickOdd[i] = 2 * i + 1;
2024 return Builder.CreateShuffleVector(
2025 HVC.sublo(Builder, Pair16), HVC.subhi(Builder, Pair16), PickOdd, "shf");
2028 auto HvxIdioms::createMul32(IRBuilderBase &Builder, SValue X, SValue Y) const
2029 -> std::pair<Value *, Value *> {
2030 assert(X.Val->getType() == Y.Val->getType());
2031 assert(X.Val->getType() == HvxI32Ty);
2033 Intrinsic::ID V6_vmpy_parts;
2034 std::tie(X, Y) = canonSgn(X, Y);
2036 if (X.Sgn == Signed) {
2037 V6_vmpy_parts = Intrinsic::hexagon_V6_vmpyss_parts;
2038 } else if (Y.Sgn == Signed) {
2039 V6_vmpy_parts = Intrinsic::hexagon_V6_vmpyus_parts;
2040 } else {
2041 V6_vmpy_parts = Intrinsic::hexagon_V6_vmpyuu_parts;
2044 Value *Parts = HVC.createHvxIntrinsic(Builder, V6_vmpy_parts, nullptr,
2045 {X.Val, Y.Val}, {HvxI32Ty});
2046 Value *Hi = Builder.CreateExtractValue(Parts, {0}, "ext");
2047 Value *Lo = Builder.CreateExtractValue(Parts, {1}, "ext");
2048 return {Lo, Hi};
2051 auto HvxIdioms::createAddLong(IRBuilderBase &Builder, ArrayRef<Value *> WordX,
2052 ArrayRef<Value *> WordY) const
2053 -> SmallVector<Value *> {
2054 assert(WordX.size() == WordY.size());
2055 unsigned Idx = 0, Length = WordX.size();
2056 SmallVector<Value *> Sum(Length);
2058 while (Idx != Length) {
2059 if (HVC.isZero(WordX[Idx]))
2060 Sum[Idx] = WordY[Idx];
2061 else if (HVC.isZero(WordY[Idx]))
2062 Sum[Idx] = WordX[Idx];
2063 else
2064 break;
2065 ++Idx;
2068 Value *Carry = nullptr;
2069 for (; Idx != Length; ++Idx) {
2070 std::tie(Sum[Idx], Carry) =
2071 createAddCarry(Builder, WordX[Idx], WordY[Idx], Carry);
2074 // This drops the final carry beyond the highest word.
2075 return Sum;
2078 auto HvxIdioms::createMulLong(IRBuilderBase &Builder, ArrayRef<Value *> WordX,
2079 Signedness SgnX, ArrayRef<Value *> WordY,
2080 Signedness SgnY) const -> SmallVector<Value *> {
2081 SmallVector<SmallVector<Value *>> Products(WordX.size() + WordY.size());
2083 // WordX[i] * WordY[j] produces words i+j and i+j+1 of the results,
2084 // that is halves 2(i+j), 2(i+j)+1, 2(i+j)+2, 2(i+j)+3.
2085 for (int i = 0, e = WordX.size(); i != e; ++i) {
2086 for (int j = 0, f = WordY.size(); j != f; ++j) {
2087 // Check the 4 halves that this multiplication can generate.
2088 Signedness SX = (i + 1 == e) ? SgnX : Unsigned;
2089 Signedness SY = (j + 1 == f) ? SgnY : Unsigned;
2090 auto [Lo, Hi] = createMul32(Builder, {WordX[i], SX}, {WordY[j], SY});
2091 Products[i + j + 0].push_back(Lo);
2092 Products[i + j + 1].push_back(Hi);
2096 Value *Zero = HVC.getNullValue(WordX[0]->getType());
2098 auto pop_back_or_zero = [Zero](auto &Vector) -> Value * {
2099 if (Vector.empty())
2100 return Zero;
2101 auto Last = Vector.back();
2102 Vector.pop_back();
2103 return Last;
2106 for (int i = 0, e = Products.size(); i != e; ++i) {
2107 while (Products[i].size() > 1) {
2108 Value *Carry = nullptr; // no carry-in
2109 for (int j = i; j != e; ++j) {
2110 auto &ProdJ = Products[j];
2111 auto [Sum, CarryOut] = createAddCarry(Builder, pop_back_or_zero(ProdJ),
2112 pop_back_or_zero(ProdJ), Carry);
2113 ProdJ.insert(ProdJ.begin(), Sum);
2114 Carry = CarryOut;
2119 SmallVector<Value *> WordP;
2120 for (auto &P : Products) {
2121 assert(P.size() == 1 && "Should have been added together");
2122 WordP.push_back(P.front());
2125 return WordP;
2128 auto HvxIdioms::run() -> bool {
2129 bool Changed = false;
2131 for (BasicBlock &B : HVC.F) {
2132 for (auto It = B.rbegin(); It != B.rend(); ++It) {
2133 if (auto Fxm = matchFxpMul(*It)) {
2134 Value *New = processFxpMul(*It, *Fxm);
2135 // Always report "changed" for now.
2136 Changed = true;
2137 if (!New)
2138 continue;
2139 bool StartOver = !isa<Instruction>(New);
2140 It->replaceAllUsesWith(New);
2141 RecursivelyDeleteTriviallyDeadInstructions(&*It, &HVC.TLI);
2142 It = StartOver ? B.rbegin()
2143 : cast<Instruction>(New)->getReverseIterator();
2144 Changed = true;
2149 return Changed;
2152 // --- End HvxIdioms
2154 auto HexagonVectorCombine::run() -> bool {
2155 if (DumpModule)
2156 dbgs() << "Module before HexagonVectorCombine\n" << *F.getParent();
2158 bool Changed = false;
2159 if (HST.useHVXOps()) {
2160 if (VAEnabled)
2161 Changed |= AlignVectors(*this).run();
2162 if (VIEnabled)
2163 Changed |= HvxIdioms(*this).run();
2166 if (DumpModule) {
2167 dbgs() << "Module " << (Changed ? "(modified)" : "(unchanged)")
2168 << " after HexagonVectorCombine\n"
2169 << *F.getParent();
2171 return Changed;
2174 auto HexagonVectorCombine::getIntTy(unsigned Width) const -> IntegerType * {
2175 return IntegerType::get(F.getContext(), Width);
2178 auto HexagonVectorCombine::getByteTy(int ElemCount) const -> Type * {
2179 assert(ElemCount >= 0);
2180 IntegerType *ByteTy = Type::getInt8Ty(F.getContext());
2181 if (ElemCount == 0)
2182 return ByteTy;
2183 return VectorType::get(ByteTy, ElemCount, /*Scalable=*/false);
2186 auto HexagonVectorCombine::getBoolTy(int ElemCount) const -> Type * {
2187 assert(ElemCount >= 0);
2188 IntegerType *BoolTy = Type::getInt1Ty(F.getContext());
2189 if (ElemCount == 0)
2190 return BoolTy;
2191 return VectorType::get(BoolTy, ElemCount, /*Scalable=*/false);
2194 auto HexagonVectorCombine::getConstInt(int Val, unsigned Width) const
2195 -> ConstantInt * {
2196 return ConstantInt::getSigned(getIntTy(Width), Val);
2199 auto HexagonVectorCombine::isZero(const Value *Val) const -> bool {
2200 if (auto *C = dyn_cast<Constant>(Val))
2201 return C->isZeroValue();
2202 return false;
2205 auto HexagonVectorCombine::getIntValue(const Value *Val) const
2206 -> std::optional<APInt> {
2207 if (auto *CI = dyn_cast<ConstantInt>(Val))
2208 return CI->getValue();
2209 return std::nullopt;
2212 auto HexagonVectorCombine::isUndef(const Value *Val) const -> bool {
2213 return isa<UndefValue>(Val);
2216 auto HexagonVectorCombine::isTrue(const Value *Val) const -> bool {
2217 return Val == ConstantInt::getTrue(Val->getType());
2220 auto HexagonVectorCombine::isFalse(const Value *Val) const -> bool {
2221 return isZero(Val);
2224 auto HexagonVectorCombine::getHvxTy(Type *ElemTy, bool Pair) const
2225 -> VectorType * {
2226 EVT ETy = EVT::getEVT(ElemTy, false);
2227 assert(ETy.isSimple() && "Invalid HVX element type");
2228 // Do not allow boolean types here: they don't have a fixed length.
2229 assert(HST.isHVXElementType(ETy.getSimpleVT(), /*IncludeBool=*/false) &&
2230 "Invalid HVX element type");
2231 unsigned HwLen = HST.getVectorLength();
2232 unsigned NumElems = (8 * HwLen) / ETy.getSizeInBits();
2233 return VectorType::get(ElemTy, Pair ? 2 * NumElems : NumElems,
2234 /*Scalable=*/false);
2237 auto HexagonVectorCombine::getSizeOf(const Value *Val, SizeKind Kind) const
2238 -> int {
2239 return getSizeOf(Val->getType(), Kind);
2242 auto HexagonVectorCombine::getSizeOf(const Type *Ty, SizeKind Kind) const
2243 -> int {
2244 auto *NcTy = const_cast<Type *>(Ty);
2245 switch (Kind) {
2246 case Store:
2247 return DL.getTypeStoreSize(NcTy).getFixedValue();
2248 case Alloc:
2249 return DL.getTypeAllocSize(NcTy).getFixedValue();
2251 llvm_unreachable("Unhandled SizeKind enum");
2254 auto HexagonVectorCombine::getTypeAlignment(Type *Ty) const -> int {
2255 // The actual type may be shorter than the HVX vector, so determine
2256 // the alignment based on subtarget info.
2257 if (HST.isTypeForHVX(Ty))
2258 return HST.getVectorLength();
2259 return DL.getABITypeAlign(Ty).value();
2262 auto HexagonVectorCombine::length(Value *Val) const -> size_t {
2263 return length(Val->getType());
2266 auto HexagonVectorCombine::length(Type *Ty) const -> size_t {
2267 auto *VecTy = dyn_cast<VectorType>(Ty);
2268 assert(VecTy && "Must be a vector type");
2269 return VecTy->getElementCount().getFixedValue();
2272 auto HexagonVectorCombine::getNullValue(Type *Ty) const -> Constant * {
2273 assert(Ty->isIntOrIntVectorTy());
2274 auto Zero = ConstantInt::get(Ty->getScalarType(), 0);
2275 if (auto *VecTy = dyn_cast<VectorType>(Ty))
2276 return ConstantVector::getSplat(VecTy->getElementCount(), Zero);
2277 return Zero;
2280 auto HexagonVectorCombine::getFullValue(Type *Ty) const -> Constant * {
2281 assert(Ty->isIntOrIntVectorTy());
2282 auto Minus1 = ConstantInt::get(Ty->getScalarType(), -1);
2283 if (auto *VecTy = dyn_cast<VectorType>(Ty))
2284 return ConstantVector::getSplat(VecTy->getElementCount(), Minus1);
2285 return Minus1;
2288 auto HexagonVectorCombine::getConstSplat(Type *Ty, int Val) const
2289 -> Constant * {
2290 assert(Ty->isVectorTy());
2291 auto VecTy = cast<VectorType>(Ty);
2292 Type *ElemTy = VecTy->getElementType();
2293 // Add support for floats if needed.
2294 auto *Splat = ConstantVector::getSplat(VecTy->getElementCount(),
2295 ConstantInt::get(ElemTy, Val));
2296 return Splat;
2299 auto HexagonVectorCombine::simplify(Value *V) const -> Value * {
2300 if (auto *In = dyn_cast<Instruction>(V)) {
2301 SimplifyQuery Q(DL, &TLI, &DT, &AC, In);
2302 return simplifyInstruction(In, Q);
2304 return nullptr;
2307 // Insert bytes [Start..Start+Length) of Src into Dst at byte Where.
2308 auto HexagonVectorCombine::insertb(IRBuilderBase &Builder, Value *Dst,
2309 Value *Src, int Start, int Length,
2310 int Where) const -> Value * {
2311 assert(isByteVecTy(Dst->getType()) && isByteVecTy(Src->getType()));
2312 int SrcLen = getSizeOf(Src);
2313 int DstLen = getSizeOf(Dst);
2314 assert(0 <= Start && Start + Length <= SrcLen);
2315 assert(0 <= Where && Where + Length <= DstLen);
2317 int P2Len = PowerOf2Ceil(SrcLen | DstLen);
2318 auto *Undef = UndefValue::get(getByteTy());
2319 Value *P2Src = vresize(Builder, Src, P2Len, Undef);
2320 Value *P2Dst = vresize(Builder, Dst, P2Len, Undef);
2322 SmallVector<int, 256> SMask(P2Len);
2323 for (int i = 0; i != P2Len; ++i) {
2324 // If i is in [Where, Where+Length), pick Src[Start+(i-Where)].
2325 // Otherwise, pick Dst[i];
2326 SMask[i] =
2327 (Where <= i && i < Where + Length) ? P2Len + Start + (i - Where) : i;
2330 Value *P2Insert = Builder.CreateShuffleVector(P2Dst, P2Src, SMask, "shf");
2331 return vresize(Builder, P2Insert, DstLen, Undef);
2334 auto HexagonVectorCombine::vlalignb(IRBuilderBase &Builder, Value *Lo,
2335 Value *Hi, Value *Amt) const -> Value * {
2336 assert(Lo->getType() == Hi->getType() && "Argument type mismatch");
2337 if (isZero(Amt))
2338 return Hi;
2339 int VecLen = getSizeOf(Hi);
2340 if (auto IntAmt = getIntValue(Amt))
2341 return getElementRange(Builder, Lo, Hi, VecLen - IntAmt->getSExtValue(),
2342 VecLen);
2344 if (HST.isTypeForHVX(Hi->getType())) {
2345 assert(static_cast<unsigned>(VecLen) == HST.getVectorLength() &&
2346 "Expecting an exact HVX type");
2347 return createHvxIntrinsic(Builder, HST.getIntrinsicId(Hexagon::V6_vlalignb),
2348 Hi->getType(), {Hi, Lo, Amt});
2351 if (VecLen == 4) {
2352 Value *Pair = concat(Builder, {Lo, Hi});
2353 Value *Shift =
2354 Builder.CreateLShr(Builder.CreateShl(Pair, Amt, "shl"), 32, "lsr");
2355 Value *Trunc =
2356 Builder.CreateTrunc(Shift, Type::getInt32Ty(F.getContext()), "trn");
2357 return Builder.CreateBitCast(Trunc, Hi->getType(), "cst");
2359 if (VecLen == 8) {
2360 Value *Sub = Builder.CreateSub(getConstInt(VecLen), Amt, "sub");
2361 return vralignb(Builder, Lo, Hi, Sub);
2363 llvm_unreachable("Unexpected vector length");
2366 auto HexagonVectorCombine::vralignb(IRBuilderBase &Builder, Value *Lo,
2367 Value *Hi, Value *Amt) const -> Value * {
2368 assert(Lo->getType() == Hi->getType() && "Argument type mismatch");
2369 if (isZero(Amt))
2370 return Lo;
2371 int VecLen = getSizeOf(Lo);
2372 if (auto IntAmt = getIntValue(Amt))
2373 return getElementRange(Builder, Lo, Hi, IntAmt->getSExtValue(), VecLen);
2375 if (HST.isTypeForHVX(Lo->getType())) {
2376 assert(static_cast<unsigned>(VecLen) == HST.getVectorLength() &&
2377 "Expecting an exact HVX type");
2378 return createHvxIntrinsic(Builder, HST.getIntrinsicId(Hexagon::V6_valignb),
2379 Lo->getType(), {Hi, Lo, Amt});
2382 if (VecLen == 4) {
2383 Value *Pair = concat(Builder, {Lo, Hi});
2384 Value *Shift = Builder.CreateLShr(Pair, Amt, "lsr");
2385 Value *Trunc =
2386 Builder.CreateTrunc(Shift, Type::getInt32Ty(F.getContext()), "trn");
2387 return Builder.CreateBitCast(Trunc, Lo->getType(), "cst");
2389 if (VecLen == 8) {
2390 Type *Int64Ty = Type::getInt64Ty(F.getContext());
2391 Value *Lo64 = Builder.CreateBitCast(Lo, Int64Ty, "cst");
2392 Value *Hi64 = Builder.CreateBitCast(Hi, Int64Ty, "cst");
2393 Value *Call = Builder.CreateIntrinsic(Intrinsic::hexagon_S2_valignrb, {},
2394 {Hi64, Lo64, Amt},
2395 /*FMFSource=*/nullptr, "cup");
2396 return Builder.CreateBitCast(Call, Lo->getType(), "cst");
2398 llvm_unreachable("Unexpected vector length");
2401 // Concatenates a sequence of vectors of the same type.
2402 auto HexagonVectorCombine::concat(IRBuilderBase &Builder,
2403 ArrayRef<Value *> Vecs) const -> Value * {
2404 assert(!Vecs.empty());
2405 SmallVector<int, 256> SMask;
2406 std::vector<Value *> Work[2];
2407 int ThisW = 0, OtherW = 1;
2409 Work[ThisW].assign(Vecs.begin(), Vecs.end());
2410 while (Work[ThisW].size() > 1) {
2411 auto *Ty = cast<VectorType>(Work[ThisW].front()->getType());
2412 SMask.resize(length(Ty) * 2);
2413 std::iota(SMask.begin(), SMask.end(), 0);
2415 Work[OtherW].clear();
2416 if (Work[ThisW].size() % 2 != 0)
2417 Work[ThisW].push_back(UndefValue::get(Ty));
2418 for (int i = 0, e = Work[ThisW].size(); i < e; i += 2) {
2419 Value *Joined = Builder.CreateShuffleVector(
2420 Work[ThisW][i], Work[ThisW][i + 1], SMask, "shf");
2421 Work[OtherW].push_back(Joined);
2423 std::swap(ThisW, OtherW);
2426 // Since there may have been some undefs appended to make shuffle operands
2427 // have the same type, perform the last shuffle to only pick the original
2428 // elements.
2429 SMask.resize(Vecs.size() * length(Vecs.front()->getType()));
2430 std::iota(SMask.begin(), SMask.end(), 0);
2431 Value *Total = Work[ThisW].front();
2432 return Builder.CreateShuffleVector(Total, SMask, "shf");
2435 auto HexagonVectorCombine::vresize(IRBuilderBase &Builder, Value *Val,
2436 int NewSize, Value *Pad) const -> Value * {
2437 assert(isa<VectorType>(Val->getType()));
2438 auto *ValTy = cast<VectorType>(Val->getType());
2439 assert(ValTy->getElementType() == Pad->getType());
2441 int CurSize = length(ValTy);
2442 if (CurSize == NewSize)
2443 return Val;
2444 // Truncate?
2445 if (CurSize > NewSize)
2446 return getElementRange(Builder, Val, /*Ignored*/ Val, 0, NewSize);
2447 // Extend.
2448 SmallVector<int, 128> SMask(NewSize);
2449 std::iota(SMask.begin(), SMask.begin() + CurSize, 0);
2450 std::fill(SMask.begin() + CurSize, SMask.end(), CurSize);
2451 Value *PadVec = Builder.CreateVectorSplat(CurSize, Pad, "spt");
2452 return Builder.CreateShuffleVector(Val, PadVec, SMask, "shf");
2455 auto HexagonVectorCombine::rescale(IRBuilderBase &Builder, Value *Mask,
2456 Type *FromTy, Type *ToTy) const -> Value * {
2457 // Mask is a vector <N x i1>, where each element corresponds to an
2458 // element of FromTy. Remap it so that each element will correspond
2459 // to an element of ToTy.
2460 assert(isa<VectorType>(Mask->getType()));
2462 Type *FromSTy = FromTy->getScalarType();
2463 Type *ToSTy = ToTy->getScalarType();
2464 if (FromSTy == ToSTy)
2465 return Mask;
2467 int FromSize = getSizeOf(FromSTy);
2468 int ToSize = getSizeOf(ToSTy);
2469 assert(FromSize % ToSize == 0 || ToSize % FromSize == 0);
2471 auto *MaskTy = cast<VectorType>(Mask->getType());
2472 int FromCount = length(MaskTy);
2473 int ToCount = (FromCount * FromSize) / ToSize;
2474 assert((FromCount * FromSize) % ToSize == 0);
2476 auto *FromITy = getIntTy(FromSize * 8);
2477 auto *ToITy = getIntTy(ToSize * 8);
2479 // Mask <N x i1> -> sext to <N x FromTy> -> bitcast to <M x ToTy> ->
2480 // -> trunc to <M x i1>.
2481 Value *Ext = Builder.CreateSExt(
2482 Mask, VectorType::get(FromITy, FromCount, /*Scalable=*/false), "sxt");
2483 Value *Cast = Builder.CreateBitCast(
2484 Ext, VectorType::get(ToITy, ToCount, /*Scalable=*/false), "cst");
2485 return Builder.CreateTrunc(
2486 Cast, VectorType::get(getBoolTy(), ToCount, /*Scalable=*/false), "trn");
2489 // Bitcast to bytes, and return least significant bits.
2490 auto HexagonVectorCombine::vlsb(IRBuilderBase &Builder, Value *Val) const
2491 -> Value * {
2492 Type *ScalarTy = Val->getType()->getScalarType();
2493 if (ScalarTy == getBoolTy())
2494 return Val;
2496 Value *Bytes = vbytes(Builder, Val);
2497 if (auto *VecTy = dyn_cast<VectorType>(Bytes->getType()))
2498 return Builder.CreateTrunc(Bytes, getBoolTy(getSizeOf(VecTy)), "trn");
2499 // If Bytes is a scalar (i.e. Val was a scalar byte), return i1, not
2500 // <1 x i1>.
2501 return Builder.CreateTrunc(Bytes, getBoolTy(), "trn");
2504 // Bitcast to bytes for non-bool. For bool, convert i1 -> i8.
2505 auto HexagonVectorCombine::vbytes(IRBuilderBase &Builder, Value *Val) const
2506 -> Value * {
2507 Type *ScalarTy = Val->getType()->getScalarType();
2508 if (ScalarTy == getByteTy())
2509 return Val;
2511 if (ScalarTy != getBoolTy())
2512 return Builder.CreateBitCast(Val, getByteTy(getSizeOf(Val)), "cst");
2513 // For bool, return a sext from i1 to i8.
2514 if (auto *VecTy = dyn_cast<VectorType>(Val->getType()))
2515 return Builder.CreateSExt(Val, VectorType::get(getByteTy(), VecTy), "sxt");
2516 return Builder.CreateSExt(Val, getByteTy(), "sxt");
2519 auto HexagonVectorCombine::subvector(IRBuilderBase &Builder, Value *Val,
2520 unsigned Start, unsigned Length) const
2521 -> Value * {
2522 assert(Start + Length <= length(Val));
2523 return getElementRange(Builder, Val, /*Ignored*/ Val, Start, Length);
2526 auto HexagonVectorCombine::sublo(IRBuilderBase &Builder, Value *Val) const
2527 -> Value * {
2528 size_t Len = length(Val);
2529 assert(Len % 2 == 0 && "Length should be even");
2530 return subvector(Builder, Val, 0, Len / 2);
2533 auto HexagonVectorCombine::subhi(IRBuilderBase &Builder, Value *Val) const
2534 -> Value * {
2535 size_t Len = length(Val);
2536 assert(Len % 2 == 0 && "Length should be even");
2537 return subvector(Builder, Val, Len / 2, Len / 2);
2540 auto HexagonVectorCombine::vdeal(IRBuilderBase &Builder, Value *Val0,
2541 Value *Val1) const -> Value * {
2542 assert(Val0->getType() == Val1->getType());
2543 int Len = length(Val0);
2544 SmallVector<int, 128> Mask(2 * Len);
2546 for (int i = 0; i != Len; ++i) {
2547 Mask[i] = 2 * i; // Even
2548 Mask[i + Len] = 2 * i + 1; // Odd
2550 return Builder.CreateShuffleVector(Val0, Val1, Mask, "shf");
2553 auto HexagonVectorCombine::vshuff(IRBuilderBase &Builder, Value *Val0,
2554 Value *Val1) const -> Value * { //
2555 assert(Val0->getType() == Val1->getType());
2556 int Len = length(Val0);
2557 SmallVector<int, 128> Mask(2 * Len);
2559 for (int i = 0; i != Len; ++i) {
2560 Mask[2 * i + 0] = i; // Val0
2561 Mask[2 * i + 1] = i + Len; // Val1
2563 return Builder.CreateShuffleVector(Val0, Val1, Mask, "shf");
2566 auto HexagonVectorCombine::createHvxIntrinsic(IRBuilderBase &Builder,
2567 Intrinsic::ID IntID, Type *RetTy,
2568 ArrayRef<Value *> Args,
2569 ArrayRef<Type *> ArgTys,
2570 ArrayRef<Value *> MDSources) const
2571 -> Value * {
2572 auto getCast = [&](IRBuilderBase &Builder, Value *Val,
2573 Type *DestTy) -> Value * {
2574 Type *SrcTy = Val->getType();
2575 if (SrcTy == DestTy)
2576 return Val;
2578 // Non-HVX type. It should be a scalar, and it should already have
2579 // a valid type.
2580 assert(HST.isTypeForHVX(SrcTy, /*IncludeBool=*/true));
2582 Type *BoolTy = Type::getInt1Ty(F.getContext());
2583 if (cast<VectorType>(SrcTy)->getElementType() != BoolTy)
2584 return Builder.CreateBitCast(Val, DestTy, "cst");
2586 // Predicate HVX vector.
2587 unsigned HwLen = HST.getVectorLength();
2588 Intrinsic::ID TC = HwLen == 64 ? Intrinsic::hexagon_V6_pred_typecast
2589 : Intrinsic::hexagon_V6_pred_typecast_128B;
2590 return Builder.CreateIntrinsic(TC, {DestTy, Val->getType()}, {Val},
2591 /*FMFSource=*/nullptr, "cup");
2594 Function *IntrFn =
2595 Intrinsic::getOrInsertDeclaration(F.getParent(), IntID, ArgTys);
2596 FunctionType *IntrTy = IntrFn->getFunctionType();
2598 SmallVector<Value *, 4> IntrArgs;
2599 for (int i = 0, e = Args.size(); i != e; ++i) {
2600 Value *A = Args[i];
2601 Type *T = IntrTy->getParamType(i);
2602 if (A->getType() != T) {
2603 IntrArgs.push_back(getCast(Builder, A, T));
2604 } else {
2605 IntrArgs.push_back(A);
2608 StringRef MaybeName = !IntrTy->getReturnType()->isVoidTy() ? "cup" : "";
2609 CallInst *Call = Builder.CreateCall(IntrFn, IntrArgs, MaybeName);
2611 MemoryEffects ME = Call->getAttributes().getMemoryEffects();
2612 if (!ME.doesNotAccessMemory() && !ME.onlyAccessesInaccessibleMem())
2613 propagateMetadata(Call, MDSources);
2615 Type *CallTy = Call->getType();
2616 if (RetTy == nullptr || CallTy == RetTy)
2617 return Call;
2618 // Scalar types should have RetTy matching the call return type.
2619 assert(HST.isTypeForHVX(CallTy, /*IncludeBool=*/true));
2620 return getCast(Builder, Call, RetTy);
2623 auto HexagonVectorCombine::splitVectorElements(IRBuilderBase &Builder,
2624 Value *Vec,
2625 unsigned ToWidth) const
2626 -> SmallVector<Value *> {
2627 // Break a vector of wide elements into a series of vectors with narrow
2628 // elements:
2629 // (...c0:b0:a0, ...c1:b1:a1, ...c2:b2:a2, ...)
2630 // -->
2631 // (a0, a1, a2, ...) // lowest "ToWidth" bits
2632 // (b0, b1, b2, ...) // the next lowest...
2633 // (c0, c1, c2, ...) // ...
2634 // ...
2636 // The number of elements in each resulting vector is the same as
2637 // in the original vector.
2639 auto *VecTy = cast<VectorType>(Vec->getType());
2640 assert(VecTy->getElementType()->isIntegerTy());
2641 unsigned FromWidth = VecTy->getScalarSizeInBits();
2642 assert(isPowerOf2_32(ToWidth) && isPowerOf2_32(FromWidth));
2643 assert(ToWidth <= FromWidth && "Breaking up into wider elements?");
2644 unsigned NumResults = FromWidth / ToWidth;
2646 SmallVector<Value *> Results(NumResults);
2647 Results[0] = Vec;
2648 unsigned Length = length(VecTy);
2650 // Do it by splitting in half, since those operations correspond to deal
2651 // instructions.
2652 auto splitInHalf = [&](unsigned Begin, unsigned End, auto splitFunc) -> void {
2653 // Take V = Results[Begin], split it in L, H.
2654 // Store Results[Begin] = L, Results[(Begin+End)/2] = H
2655 // Call itself recursively split(Begin, Half), split(Half+1, End)
2656 if (Begin + 1 == End)
2657 return;
2659 Value *Val = Results[Begin];
2660 unsigned Width = Val->getType()->getScalarSizeInBits();
2662 auto *VTy = VectorType::get(getIntTy(Width / 2), 2 * Length, false);
2663 Value *VVal = Builder.CreateBitCast(Val, VTy, "cst");
2665 Value *Res = vdeal(Builder, sublo(Builder, VVal), subhi(Builder, VVal));
2667 unsigned Half = (Begin + End) / 2;
2668 Results[Begin] = sublo(Builder, Res);
2669 Results[Half] = subhi(Builder, Res);
2671 splitFunc(Begin, Half, splitFunc);
2672 splitFunc(Half, End, splitFunc);
2675 splitInHalf(0, NumResults, splitInHalf);
2676 return Results;
2679 auto HexagonVectorCombine::joinVectorElements(IRBuilderBase &Builder,
2680 ArrayRef<Value *> Values,
2681 VectorType *ToType) const
2682 -> Value * {
2683 assert(ToType->getElementType()->isIntegerTy());
2685 // If the list of values does not have power-of-2 elements, append copies
2686 // of the sign bit to it, to make the size be 2^n.
2687 // The reason for this is that the values will be joined in pairs, because
2688 // otherwise the shuffles will result in convoluted code. With pairwise
2689 // joins, the shuffles will hopefully be folded into a perfect shuffle.
2690 // The output will need to be sign-extended to a type with element width
2691 // being a power-of-2 anyways.
2692 SmallVector<Value *> Inputs(Values);
2694 unsigned ToWidth = ToType->getScalarSizeInBits();
2695 unsigned Width = Inputs.front()->getType()->getScalarSizeInBits();
2696 assert(Width <= ToWidth);
2697 assert(isPowerOf2_32(Width) && isPowerOf2_32(ToWidth));
2698 unsigned Length = length(Inputs.front()->getType());
2700 unsigned NeedInputs = ToWidth / Width;
2701 if (Inputs.size() != NeedInputs) {
2702 // Having too many inputs is ok: drop the high bits (usual wrap-around).
2703 // If there are too few, fill them with the sign bit.
2704 Value *Last = Inputs.back();
2705 Value *Sign = Builder.CreateAShr(
2706 Last, getConstSplat(Last->getType(), Width - 1), "asr");
2707 Inputs.resize(NeedInputs, Sign);
2710 while (Inputs.size() > 1) {
2711 Width *= 2;
2712 auto *VTy = VectorType::get(getIntTy(Width), Length, false);
2713 for (int i = 0, e = Inputs.size(); i < e; i += 2) {
2714 Value *Res = vshuff(Builder, Inputs[i], Inputs[i + 1]);
2715 Inputs[i / 2] = Builder.CreateBitCast(Res, VTy, "cst");
2717 Inputs.resize(Inputs.size() / 2);
2720 assert(Inputs.front()->getType() == ToType);
2721 return Inputs.front();
2724 auto HexagonVectorCombine::calculatePointerDifference(Value *Ptr0,
2725 Value *Ptr1) const
2726 -> std::optional<int> {
2727 // Try SCEV first.
2728 const SCEV *Scev0 = SE.getSCEV(Ptr0);
2729 const SCEV *Scev1 = SE.getSCEV(Ptr1);
2730 const SCEV *ScevDiff = SE.getMinusSCEV(Scev0, Scev1);
2731 if (auto *Const = dyn_cast<SCEVConstant>(ScevDiff)) {
2732 APInt V = Const->getAPInt();
2733 if (V.isSignedIntN(8 * sizeof(int)))
2734 return static_cast<int>(V.getSExtValue());
2737 struct Builder : IRBuilder<> {
2738 Builder(BasicBlock *B) : IRBuilder<>(B->getTerminator()) {}
2739 ~Builder() {
2740 for (Instruction *I : llvm::reverse(ToErase))
2741 I->eraseFromParent();
2743 SmallVector<Instruction *, 8> ToErase;
2746 #define CallBuilder(B, F) \
2747 [&](auto &B_) { \
2748 Value *V = B_.F; \
2749 if (auto *I = dyn_cast<Instruction>(V)) \
2750 B_.ToErase.push_back(I); \
2751 return V; \
2752 }(B)
2754 auto Simplify = [this](Value *V) {
2755 if (Value *S = simplify(V))
2756 return S;
2757 return V;
2760 auto StripBitCast = [](Value *V) {
2761 while (auto *C = dyn_cast<BitCastInst>(V))
2762 V = C->getOperand(0);
2763 return V;
2766 Ptr0 = StripBitCast(Ptr0);
2767 Ptr1 = StripBitCast(Ptr1);
2768 if (!isa<GetElementPtrInst>(Ptr0) || !isa<GetElementPtrInst>(Ptr1))
2769 return std::nullopt;
2771 auto *Gep0 = cast<GetElementPtrInst>(Ptr0);
2772 auto *Gep1 = cast<GetElementPtrInst>(Ptr1);
2773 if (Gep0->getPointerOperand() != Gep1->getPointerOperand())
2774 return std::nullopt;
2775 if (Gep0->getSourceElementType() != Gep1->getSourceElementType())
2776 return std::nullopt;
2778 Builder B(Gep0->getParent());
2779 int Scale = getSizeOf(Gep0->getSourceElementType(), Alloc);
2781 // FIXME: for now only check GEPs with a single index.
2782 if (Gep0->getNumOperands() != 2 || Gep1->getNumOperands() != 2)
2783 return std::nullopt;
2785 Value *Idx0 = Gep0->getOperand(1);
2786 Value *Idx1 = Gep1->getOperand(1);
2788 // First, try to simplify the subtraction directly.
2789 if (auto *Diff = dyn_cast<ConstantInt>(
2790 Simplify(CallBuilder(B, CreateSub(Idx0, Idx1)))))
2791 return Diff->getSExtValue() * Scale;
2793 KnownBits Known0 = getKnownBits(Idx0, Gep0);
2794 KnownBits Known1 = getKnownBits(Idx1, Gep1);
2795 APInt Unknown = ~(Known0.Zero | Known0.One) | ~(Known1.Zero | Known1.One);
2796 if (Unknown.isAllOnes())
2797 return std::nullopt;
2799 Value *MaskU = ConstantInt::get(Idx0->getType(), Unknown);
2800 Value *AndU0 = Simplify(CallBuilder(B, CreateAnd(Idx0, MaskU)));
2801 Value *AndU1 = Simplify(CallBuilder(B, CreateAnd(Idx1, MaskU)));
2802 Value *SubU = Simplify(CallBuilder(B, CreateSub(AndU0, AndU1)));
2803 int Diff0 = 0;
2804 if (auto *C = dyn_cast<ConstantInt>(SubU)) {
2805 Diff0 = C->getSExtValue();
2806 } else {
2807 return std::nullopt;
2810 Value *MaskK = ConstantInt::get(MaskU->getType(), ~Unknown);
2811 Value *AndK0 = Simplify(CallBuilder(B, CreateAnd(Idx0, MaskK)));
2812 Value *AndK1 = Simplify(CallBuilder(B, CreateAnd(Idx1, MaskK)));
2813 Value *SubK = Simplify(CallBuilder(B, CreateSub(AndK0, AndK1)));
2814 int Diff1 = 0;
2815 if (auto *C = dyn_cast<ConstantInt>(SubK)) {
2816 Diff1 = C->getSExtValue();
2817 } else {
2818 return std::nullopt;
2821 return (Diff0 + Diff1) * Scale;
2823 #undef CallBuilder
2826 auto HexagonVectorCombine::getNumSignificantBits(const Value *V,
2827 const Instruction *CtxI) const
2828 -> unsigned {
2829 return ComputeMaxSignificantBits(V, DL, /*Depth=*/0, &AC, CtxI, &DT);
2832 auto HexagonVectorCombine::getKnownBits(const Value *V,
2833 const Instruction *CtxI) const
2834 -> KnownBits {
2835 return computeKnownBits(V, DL, /*Depth=*/0, &AC, CtxI, &DT);
2838 auto HexagonVectorCombine::isSafeToClone(const Instruction &In) const -> bool {
2839 if (In.mayHaveSideEffects() || In.isAtomic() || In.isVolatile() ||
2840 In.isFenceLike() || In.mayReadOrWriteMemory()) {
2841 return false;
2843 if (isa<CallBase>(In) || isa<AllocaInst>(In))
2844 return false;
2845 return true;
2848 template <typename T>
2849 auto HexagonVectorCombine::isSafeToMoveBeforeInBB(const Instruction &In,
2850 BasicBlock::const_iterator To,
2851 const T &IgnoreInsts) const
2852 -> bool {
2853 auto getLocOrNone =
2854 [this](const Instruction &I) -> std::optional<MemoryLocation> {
2855 if (const auto *II = dyn_cast<IntrinsicInst>(&I)) {
2856 switch (II->getIntrinsicID()) {
2857 case Intrinsic::masked_load:
2858 return MemoryLocation::getForArgument(II, 0, TLI);
2859 case Intrinsic::masked_store:
2860 return MemoryLocation::getForArgument(II, 1, TLI);
2863 return MemoryLocation::getOrNone(&I);
2866 // The source and the destination must be in the same basic block.
2867 const BasicBlock &Block = *In.getParent();
2868 assert(Block.begin() == To || Block.end() == To || To->getParent() == &Block);
2869 // No PHIs.
2870 if (isa<PHINode>(In) || (To != Block.end() && isa<PHINode>(*To)))
2871 return false;
2873 if (!mayHaveNonDefUseDependency(In))
2874 return true;
2875 bool MayWrite = In.mayWriteToMemory();
2876 auto MaybeLoc = getLocOrNone(In);
2878 auto From = In.getIterator();
2879 if (From == To)
2880 return true;
2881 bool MoveUp = (To != Block.end() && To->comesBefore(&In));
2882 auto Range =
2883 MoveUp ? std::make_pair(To, From) : std::make_pair(std::next(From), To);
2884 for (auto It = Range.first; It != Range.second; ++It) {
2885 const Instruction &I = *It;
2886 if (llvm::is_contained(IgnoreInsts, &I))
2887 continue;
2888 // assume intrinsic can be ignored
2889 if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
2890 if (II->getIntrinsicID() == Intrinsic::assume)
2891 continue;
2893 // Parts based on isSafeToMoveBefore from CoveMoverUtils.cpp.
2894 if (I.mayThrow())
2895 return false;
2896 if (auto *CB = dyn_cast<CallBase>(&I)) {
2897 if (!CB->hasFnAttr(Attribute::WillReturn))
2898 return false;
2899 if (!CB->hasFnAttr(Attribute::NoSync))
2900 return false;
2902 if (I.mayReadOrWriteMemory()) {
2903 auto MaybeLocI = getLocOrNone(I);
2904 if (MayWrite || I.mayWriteToMemory()) {
2905 if (!MaybeLoc || !MaybeLocI)
2906 return false;
2907 if (!AA.isNoAlias(*MaybeLoc, *MaybeLocI))
2908 return false;
2912 return true;
2915 auto HexagonVectorCombine::isByteVecTy(Type *Ty) const -> bool {
2916 if (auto *VecTy = dyn_cast<VectorType>(Ty))
2917 return VecTy->getElementType() == getByteTy();
2918 return false;
2921 auto HexagonVectorCombine::getElementRange(IRBuilderBase &Builder, Value *Lo,
2922 Value *Hi, int Start,
2923 int Length) const -> Value * {
2924 assert(0 <= Start && size_t(Start + Length) < length(Lo) + length(Hi));
2925 SmallVector<int, 128> SMask(Length);
2926 std::iota(SMask.begin(), SMask.end(), Start);
2927 return Builder.CreateShuffleVector(Lo, Hi, SMask, "shf");
2930 // Pass management.
2932 namespace llvm {
2933 void initializeHexagonVectorCombineLegacyPass(PassRegistry &);
2934 FunctionPass *createHexagonVectorCombineLegacyPass();
2935 } // namespace llvm
2937 namespace {
2938 class HexagonVectorCombineLegacy : public FunctionPass {
2939 public:
2940 static char ID;
2942 HexagonVectorCombineLegacy() : FunctionPass(ID) {}
2944 StringRef getPassName() const override { return "Hexagon Vector Combine"; }
2946 void getAnalysisUsage(AnalysisUsage &AU) const override {
2947 AU.setPreservesCFG();
2948 AU.addRequired<AAResultsWrapperPass>();
2949 AU.addRequired<AssumptionCacheTracker>();
2950 AU.addRequired<DominatorTreeWrapperPass>();
2951 AU.addRequired<ScalarEvolutionWrapperPass>();
2952 AU.addRequired<TargetLibraryInfoWrapperPass>();
2953 AU.addRequired<TargetPassConfig>();
2954 FunctionPass::getAnalysisUsage(AU);
2957 bool runOnFunction(Function &F) override {
2958 if (skipFunction(F))
2959 return false;
2960 AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2961 AssumptionCache &AC =
2962 getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2963 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2964 ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2965 TargetLibraryInfo &TLI =
2966 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2967 auto &TM = getAnalysis<TargetPassConfig>().getTM<HexagonTargetMachine>();
2968 HexagonVectorCombine HVC(F, AA, AC, DT, SE, TLI, TM);
2969 return HVC.run();
2972 } // namespace
2974 char HexagonVectorCombineLegacy::ID = 0;
2976 INITIALIZE_PASS_BEGIN(HexagonVectorCombineLegacy, DEBUG_TYPE,
2977 "Hexagon Vector Combine", false, false)
2978 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2979 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2980 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2981 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
2982 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2983 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
2984 INITIALIZE_PASS_END(HexagonVectorCombineLegacy, DEBUG_TYPE,
2985 "Hexagon Vector Combine", false, false)
2987 FunctionPass *llvm::createHexagonVectorCombineLegacyPass() {
2988 return new HexagonVectorCombineLegacy();