Fix think-o: emit all 8 bytes of the EOF marker. Also reflow a line in a
[llvm/stm8.git] / lib / VMCore / AutoUpgrade.cpp
blob4541f381ed4a172b6dfdd40c9954f6a97f09e402
1 //===-- AutoUpgrade.cpp - Implement auto-upgrade helper functions ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the auto-upgrade helper functions
12 //===----------------------------------------------------------------------===//
14 #include "llvm/AutoUpgrade.h"
15 #include "llvm/Constants.h"
16 #include "llvm/Function.h"
17 #include "llvm/LLVMContext.h"
18 #include "llvm/Module.h"
19 #include "llvm/IntrinsicInst.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/Support/CallSite.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/IRBuilder.h"
24 #include <cstring>
25 using namespace llvm;
28 static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
29 assert(F && "Illegal to upgrade a non-existent Function.");
31 // Get the Function's name.
32 const std::string& Name = F->getName();
34 // Convenience
35 const FunctionType *FTy = F->getFunctionType();
37 // Quickly eliminate it, if it's not a candidate.
38 if (Name.length() <= 8 || Name[0] != 'l' || Name[1] != 'l' ||
39 Name[2] != 'v' || Name[3] != 'm' || Name[4] != '.')
40 return false;
42 Module *M = F->getParent();
43 switch (Name[5]) {
44 default: break;
45 case 'a':
46 // This upgrades the llvm.atomic.lcs, llvm.atomic.las, llvm.atomic.lss,
47 // and atomics with default address spaces to their new names to their new
48 // function name (e.g. llvm.atomic.add.i32 => llvm.atomic.add.i32.p0i32)
49 if (Name.compare(5,7,"atomic.",7) == 0) {
50 if (Name.compare(12,3,"lcs",3) == 0) {
51 std::string::size_type delim = Name.find('.',12);
52 F->setName("llvm.atomic.cmp.swap" + Name.substr(delim) +
53 ".p0" + Name.substr(delim+1));
54 NewFn = F;
55 return true;
57 else if (Name.compare(12,3,"las",3) == 0) {
58 std::string::size_type delim = Name.find('.',12);
59 F->setName("llvm.atomic.load.add"+Name.substr(delim)
60 + ".p0" + Name.substr(delim+1));
61 NewFn = F;
62 return true;
64 else if (Name.compare(12,3,"lss",3) == 0) {
65 std::string::size_type delim = Name.find('.',12);
66 F->setName("llvm.atomic.load.sub"+Name.substr(delim)
67 + ".p0" + Name.substr(delim+1));
68 NewFn = F;
69 return true;
71 else if (Name.rfind(".p") == std::string::npos) {
72 // We don't have an address space qualifier so this has be upgraded
73 // to the new name. Copy the type name at the end of the intrinsic
74 // and add to it
75 std::string::size_type delim = Name.find_last_of('.');
76 assert(delim != std::string::npos && "can not find type");
77 F->setName(Name + ".p0" + Name.substr(delim+1));
78 NewFn = F;
79 return true;
81 } else if (Name.compare(5, 9, "arm.neon.", 9) == 0) {
82 if (((Name.compare(14, 5, "vmovl", 5) == 0 ||
83 Name.compare(14, 5, "vaddl", 5) == 0 ||
84 Name.compare(14, 5, "vsubl", 5) == 0 ||
85 Name.compare(14, 5, "vaddw", 5) == 0 ||
86 Name.compare(14, 5, "vsubw", 5) == 0 ||
87 Name.compare(14, 5, "vmlal", 5) == 0 ||
88 Name.compare(14, 5, "vmlsl", 5) == 0 ||
89 Name.compare(14, 5, "vabdl", 5) == 0 ||
90 Name.compare(14, 5, "vabal", 5) == 0) &&
91 (Name.compare(19, 2, "s.", 2) == 0 ||
92 Name.compare(19, 2, "u.", 2) == 0)) ||
94 (Name.compare(14, 4, "vaba", 4) == 0 &&
95 (Name.compare(18, 2, "s.", 2) == 0 ||
96 Name.compare(18, 2, "u.", 2) == 0)) ||
98 (Name.compare(14, 6, "vmovn.", 6) == 0)) {
100 // Calls to these are transformed into IR without intrinsics.
101 NewFn = 0;
102 return true;
104 // Old versions of NEON ld/st intrinsics are missing alignment arguments.
105 bool isVLd = (Name.compare(14, 3, "vld", 3) == 0);
106 bool isVSt = (Name.compare(14, 3, "vst", 3) == 0);
107 if (isVLd || isVSt) {
108 unsigned NumVecs = Name.at(17) - '0';
109 if (NumVecs == 0 || NumVecs > 4)
110 return false;
111 bool isLaneOp = (Name.compare(18, 5, "lane.", 5) == 0);
112 if (!isLaneOp && Name.at(18) != '.')
113 return false;
114 unsigned ExpectedArgs = 2; // for the address and alignment
115 if (isVSt || isLaneOp)
116 ExpectedArgs += NumVecs;
117 if (isLaneOp)
118 ExpectedArgs += 1; // for the lane number
119 unsigned NumP = FTy->getNumParams();
120 if (NumP != ExpectedArgs - 1)
121 return false;
123 // Change the name of the old (bad) intrinsic, because
124 // its type is incorrect, but we cannot overload that name.
125 F->setName("");
127 // One argument is missing: add the alignment argument.
128 std::vector<const Type*> NewParams;
129 for (unsigned p = 0; p < NumP; ++p)
130 NewParams.push_back(FTy->getParamType(p));
131 NewParams.push_back(Type::getInt32Ty(F->getContext()));
132 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(),
133 NewParams, false);
134 NewFn = cast<Function>(M->getOrInsertFunction(Name, NewFTy));
135 return true;
138 break;
139 case 'b':
140 // This upgrades the name of the llvm.bswap intrinsic function to only use
141 // a single type name for overloading. We only care about the old format
142 // 'llvm.bswap.i*.i*', so check for 'bswap.' and then for there being
143 // a '.' after 'bswap.'
144 if (Name.compare(5,6,"bswap.",6) == 0) {
145 std::string::size_type delim = Name.find('.',11);
147 if (delim != std::string::npos) {
148 // Construct the new name as 'llvm.bswap' + '.i*'
149 F->setName(Name.substr(0,10)+Name.substr(delim));
150 NewFn = F;
151 return true;
154 break;
156 case 'c':
157 // We only want to fix the 'llvm.ct*' intrinsics which do not have the
158 // correct return type, so we check for the name, and then check if the
159 // return type does not match the parameter type.
160 if ( (Name.compare(5,5,"ctpop",5) == 0 ||
161 Name.compare(5,4,"ctlz",4) == 0 ||
162 Name.compare(5,4,"cttz",4) == 0) &&
163 FTy->getReturnType() != FTy->getParamType(0)) {
164 // We first need to change the name of the old (bad) intrinsic, because
165 // its type is incorrect, but we cannot overload that name. We
166 // arbitrarily unique it here allowing us to construct a correctly named
167 // and typed function below.
168 F->setName("");
170 // Now construct the new intrinsic with the correct name and type. We
171 // leave the old function around in order to query its type, whatever it
172 // may be, and correctly convert up to the new type.
173 NewFn = cast<Function>(M->getOrInsertFunction(Name,
174 FTy->getParamType(0),
175 FTy->getParamType(0),
176 (Type *)0));
177 return true;
179 break;
181 case 'e':
182 // The old llvm.eh.selector.i32 is equivalent to the new llvm.eh.selector.
183 if (Name.compare("llvm.eh.selector.i32") == 0) {
184 F->setName("llvm.eh.selector");
185 NewFn = F;
186 return true;
188 // The old llvm.eh.typeid.for.i32 is equivalent to llvm.eh.typeid.for.
189 if (Name.compare("llvm.eh.typeid.for.i32") == 0) {
190 F->setName("llvm.eh.typeid.for");
191 NewFn = F;
192 return true;
194 // Convert the old llvm.eh.selector.i64 to a call to llvm.eh.selector.
195 if (Name.compare("llvm.eh.selector.i64") == 0) {
196 NewFn = Intrinsic::getDeclaration(M, Intrinsic::eh_selector);
197 return true;
199 // Convert the old llvm.eh.typeid.for.i64 to a call to llvm.eh.typeid.for.
200 if (Name.compare("llvm.eh.typeid.for.i64") == 0) {
201 NewFn = Intrinsic::getDeclaration(M, Intrinsic::eh_typeid_for);
202 return true;
204 break;
206 case 'm': {
207 // This upgrades the llvm.memcpy, llvm.memmove, and llvm.memset to the
208 // new format that allows overloading the pointer for different address
209 // space (e.g., llvm.memcpy.i16 => llvm.memcpy.p0i8.p0i8.i16)
210 const char* NewFnName = NULL;
211 if (Name.compare(5,8,"memcpy.i",8) == 0) {
212 if (Name[13] == '8')
213 NewFnName = "llvm.memcpy.p0i8.p0i8.i8";
214 else if (Name.compare(13,2,"16") == 0)
215 NewFnName = "llvm.memcpy.p0i8.p0i8.i16";
216 else if (Name.compare(13,2,"32") == 0)
217 NewFnName = "llvm.memcpy.p0i8.p0i8.i32";
218 else if (Name.compare(13,2,"64") == 0)
219 NewFnName = "llvm.memcpy.p0i8.p0i8.i64";
220 } else if (Name.compare(5,9,"memmove.i",9) == 0) {
221 if (Name[14] == '8')
222 NewFnName = "llvm.memmove.p0i8.p0i8.i8";
223 else if (Name.compare(14,2,"16") == 0)
224 NewFnName = "llvm.memmove.p0i8.p0i8.i16";
225 else if (Name.compare(14,2,"32") == 0)
226 NewFnName = "llvm.memmove.p0i8.p0i8.i32";
227 else if (Name.compare(14,2,"64") == 0)
228 NewFnName = "llvm.memmove.p0i8.p0i8.i64";
230 else if (Name.compare(5,8,"memset.i",8) == 0) {
231 if (Name[13] == '8')
232 NewFnName = "llvm.memset.p0i8.i8";
233 else if (Name.compare(13,2,"16") == 0)
234 NewFnName = "llvm.memset.p0i8.i16";
235 else if (Name.compare(13,2,"32") == 0)
236 NewFnName = "llvm.memset.p0i8.i32";
237 else if (Name.compare(13,2,"64") == 0)
238 NewFnName = "llvm.memset.p0i8.i64";
240 if (NewFnName) {
241 NewFn = cast<Function>(M->getOrInsertFunction(NewFnName,
242 FTy->getReturnType(),
243 FTy->getParamType(0),
244 FTy->getParamType(1),
245 FTy->getParamType(2),
246 FTy->getParamType(3),
247 Type::getInt1Ty(F->getContext()),
248 (Type *)0));
249 return true;
251 break;
253 case 'p':
254 // This upgrades the llvm.part.select overloaded intrinsic names to only
255 // use one type specifier in the name. We only care about the old format
256 // 'llvm.part.select.i*.i*', and solve as above with bswap.
257 if (Name.compare(5,12,"part.select.",12) == 0) {
258 std::string::size_type delim = Name.find('.',17);
260 if (delim != std::string::npos) {
261 // Construct a new name as 'llvm.part.select' + '.i*'
262 F->setName(Name.substr(0,16)+Name.substr(delim));
263 NewFn = F;
264 return true;
266 break;
269 // This upgrades the llvm.part.set intrinsics similarly as above, however
270 // we care about 'llvm.part.set.i*.i*.i*', but only the first two types
271 // must match. There is an additional type specifier after these two
272 // matching types that we must retain when upgrading. Thus, we require
273 // finding 2 periods, not just one, after the intrinsic name.
274 if (Name.compare(5,9,"part.set.",9) == 0) {
275 std::string::size_type delim = Name.find('.',14);
277 if (delim != std::string::npos &&
278 Name.find('.',delim+1) != std::string::npos) {
279 // Construct a new name as 'llvm.part.select' + '.i*.i*'
280 F->setName(Name.substr(0,13)+Name.substr(delim));
281 NewFn = F;
282 return true;
284 break;
287 break;
288 case 'x':
289 // This fixes all MMX shift intrinsic instructions to take a
290 // x86_mmx instead of a v1i64, v2i32, v4i16, or v8i8.
291 if (Name.compare(5, 8, "x86.mmx.", 8) == 0) {
292 const Type *X86_MMXTy = VectorType::getX86_MMXTy(FTy->getContext());
294 if (Name.compare(13, 4, "padd", 4) == 0 ||
295 Name.compare(13, 4, "psub", 4) == 0 ||
296 Name.compare(13, 4, "pmul", 4) == 0 ||
297 Name.compare(13, 5, "pmadd", 5) == 0 ||
298 Name.compare(13, 4, "pand", 4) == 0 ||
299 Name.compare(13, 3, "por", 3) == 0 ||
300 Name.compare(13, 4, "pxor", 4) == 0 ||
301 Name.compare(13, 4, "pavg", 4) == 0 ||
302 Name.compare(13, 4, "pmax", 4) == 0 ||
303 Name.compare(13, 4, "pmin", 4) == 0 ||
304 Name.compare(13, 4, "psad", 4) == 0 ||
305 Name.compare(13, 4, "psll", 4) == 0 ||
306 Name.compare(13, 4, "psrl", 4) == 0 ||
307 Name.compare(13, 4, "psra", 4) == 0 ||
308 Name.compare(13, 4, "pack", 4) == 0 ||
309 Name.compare(13, 6, "punpck", 6) == 0 ||
310 Name.compare(13, 4, "pcmp", 4) == 0) {
311 assert(FTy->getNumParams() == 2 && "MMX intrinsic takes 2 args!");
312 const Type *SecondParamTy = X86_MMXTy;
314 if (Name.compare(13, 5, "pslli", 5) == 0 ||
315 Name.compare(13, 5, "psrli", 5) == 0 ||
316 Name.compare(13, 5, "psrai", 5) == 0)
317 SecondParamTy = FTy->getParamType(1);
319 // Don't do anything if it has the correct types.
320 if (FTy->getReturnType() == X86_MMXTy &&
321 FTy->getParamType(0) == X86_MMXTy &&
322 FTy->getParamType(1) == SecondParamTy)
323 break;
325 // We first need to change the name of the old (bad) intrinsic, because
326 // its type is incorrect, but we cannot overload that name. We
327 // arbitrarily unique it here allowing us to construct a correctly named
328 // and typed function below.
329 F->setName("");
331 // Now construct the new intrinsic with the correct name and type. We
332 // leave the old function around in order to query its type, whatever it
333 // may be, and correctly convert up to the new type.
334 NewFn = cast<Function>(M->getOrInsertFunction(Name,
335 X86_MMXTy, X86_MMXTy,
336 SecondParamTy, (Type*)0));
337 return true;
340 if (Name.compare(13, 8, "maskmovq", 8) == 0) {
341 // Don't do anything if it has the correct types.
342 if (FTy->getParamType(0) == X86_MMXTy &&
343 FTy->getParamType(1) == X86_MMXTy)
344 break;
346 F->setName("");
347 NewFn = cast<Function>(M->getOrInsertFunction(Name,
348 FTy->getReturnType(),
349 X86_MMXTy,
350 X86_MMXTy,
351 FTy->getParamType(2),
352 (Type*)0));
353 return true;
356 if (Name.compare(13, 8, "pmovmskb", 8) == 0) {
357 if (FTy->getParamType(0) == X86_MMXTy)
358 break;
360 F->setName("");
361 NewFn = cast<Function>(M->getOrInsertFunction(Name,
362 FTy->getReturnType(),
363 X86_MMXTy,
364 (Type*)0));
365 return true;
368 if (Name.compare(13, 5, "movnt", 5) == 0) {
369 if (FTy->getParamType(1) == X86_MMXTy)
370 break;
372 F->setName("");
373 NewFn = cast<Function>(M->getOrInsertFunction(Name,
374 FTy->getReturnType(),
375 FTy->getParamType(0),
376 X86_MMXTy,
377 (Type*)0));
378 return true;
381 if (Name.compare(13, 7, "palignr", 7) == 0) {
382 if (FTy->getReturnType() == X86_MMXTy &&
383 FTy->getParamType(0) == X86_MMXTy &&
384 FTy->getParamType(1) == X86_MMXTy)
385 break;
387 F->setName("");
388 NewFn = cast<Function>(M->getOrInsertFunction(Name,
389 X86_MMXTy,
390 X86_MMXTy,
391 X86_MMXTy,
392 FTy->getParamType(2),
393 (Type*)0));
394 return true;
397 if (Name.compare(13, 5, "pextr", 5) == 0) {
398 if (FTy->getParamType(0) == X86_MMXTy)
399 break;
401 F->setName("");
402 NewFn = cast<Function>(M->getOrInsertFunction(Name,
403 FTy->getReturnType(),
404 X86_MMXTy,
405 FTy->getParamType(1),
406 (Type*)0));
407 return true;
410 if (Name.compare(13, 5, "pinsr", 5) == 0) {
411 if (FTy->getReturnType() == X86_MMXTy &&
412 FTy->getParamType(0) == X86_MMXTy)
413 break;
415 F->setName("");
416 NewFn = cast<Function>(M->getOrInsertFunction(Name,
417 X86_MMXTy,
418 X86_MMXTy,
419 FTy->getParamType(1),
420 FTy->getParamType(2),
421 (Type*)0));
422 return true;
425 if (Name.compare(13, 12, "cvtsi32.si64", 12) == 0) {
426 if (FTy->getReturnType() == X86_MMXTy)
427 break;
429 F->setName("");
430 NewFn = cast<Function>(M->getOrInsertFunction(Name,
431 X86_MMXTy,
432 FTy->getParamType(0),
433 (Type*)0));
434 return true;
437 if (Name.compare(13, 12, "cvtsi64.si32", 12) == 0) {
438 if (FTy->getParamType(0) == X86_MMXTy)
439 break;
441 F->setName("");
442 NewFn = cast<Function>(M->getOrInsertFunction(Name,
443 FTy->getReturnType(),
444 X86_MMXTy,
445 (Type*)0));
446 return true;
449 if (Name.compare(13, 8, "vec.init", 8) == 0) {
450 if (FTy->getReturnType() == X86_MMXTy)
451 break;
453 F->setName("");
455 if (Name.compare(21, 2, ".b", 2) == 0)
456 NewFn = cast<Function>(M->getOrInsertFunction(Name,
457 X86_MMXTy,
458 FTy->getParamType(0),
459 FTy->getParamType(1),
460 FTy->getParamType(2),
461 FTy->getParamType(3),
462 FTy->getParamType(4),
463 FTy->getParamType(5),
464 FTy->getParamType(6),
465 FTy->getParamType(7),
466 (Type*)0));
467 else if (Name.compare(21, 2, ".w", 2) == 0)
468 NewFn = cast<Function>(M->getOrInsertFunction(Name,
469 X86_MMXTy,
470 FTy->getParamType(0),
471 FTy->getParamType(1),
472 FTy->getParamType(2),
473 FTy->getParamType(3),
474 (Type*)0));
475 else if (Name.compare(21, 2, ".d", 2) == 0)
476 NewFn = cast<Function>(M->getOrInsertFunction(Name,
477 X86_MMXTy,
478 FTy->getParamType(0),
479 FTy->getParamType(1),
480 (Type*)0));
481 return true;
485 if (Name.compare(13, 9, "vec.ext.d", 9) == 0) {
486 if (FTy->getReturnType() == X86_MMXTy &&
487 FTy->getParamType(0) == X86_MMXTy)
488 break;
490 F->setName("");
491 NewFn = cast<Function>(M->getOrInsertFunction(Name,
492 X86_MMXTy,
493 X86_MMXTy,
494 FTy->getParamType(1),
495 (Type*)0));
496 return true;
499 if (Name.compare(13, 9, "emms", 4) == 0 ||
500 Name.compare(13, 9, "femms", 5) == 0) {
501 NewFn = 0;
502 break;
505 // We really shouldn't get here ever.
506 assert(0 && "Invalid MMX intrinsic!");
507 break;
508 } else if (Name.compare(5,17,"x86.sse2.loadh.pd",17) == 0 ||
509 Name.compare(5,17,"x86.sse2.loadl.pd",17) == 0 ||
510 Name.compare(5,16,"x86.sse2.movl.dq",16) == 0 ||
511 Name.compare(5,15,"x86.sse2.movs.d",15) == 0 ||
512 Name.compare(5,16,"x86.sse2.shuf.pd",16) == 0 ||
513 Name.compare(5,18,"x86.sse2.unpckh.pd",18) == 0 ||
514 Name.compare(5,18,"x86.sse2.unpckl.pd",18) == 0 ||
515 Name.compare(5,20,"x86.sse2.punpckh.qdq",20) == 0 ||
516 Name.compare(5,20,"x86.sse2.punpckl.qdq",20) == 0) {
517 // Calls to these intrinsics are transformed into ShuffleVector's.
518 NewFn = 0;
519 return true;
520 } else if (Name.compare(5, 16, "x86.sse41.pmulld", 16) == 0) {
521 // Calls to these intrinsics are transformed into vector multiplies.
522 NewFn = 0;
523 return true;
524 } else if (Name.compare(5, 18, "x86.ssse3.palign.r", 18) == 0 ||
525 Name.compare(5, 22, "x86.ssse3.palign.r.128", 22) == 0) {
526 // Calls to these intrinsics are transformed into vector shuffles, shifts,
527 // or 0.
528 NewFn = 0;
529 return true;
530 } else if (Name.compare(5, 16, "x86.sse.loadu.ps", 16) == 0 ||
531 Name.compare(5, 17, "x86.sse2.loadu.dq", 17) == 0 ||
532 Name.compare(5, 17, "x86.sse2.loadu.pd", 17) == 0) {
533 // Calls to these instructions are transformed into unaligned loads.
534 NewFn = 0;
535 return true;
536 } else if (Name.compare(5, 17, "x86.ssse3.pshuf.w", 17) == 0) {
537 // This is an SSE/MMX instruction.
538 const Type *X86_MMXTy = VectorType::getX86_MMXTy(FTy->getContext());
539 NewFn =
540 cast<Function>(M->getOrInsertFunction("llvm.x86.sse.pshuf.w",
541 X86_MMXTy,
542 X86_MMXTy,
543 Type::getInt8Ty(F->getContext()),
544 (Type*)0));
545 return true;
548 break;
551 // This may not belong here. This function is effectively being overloaded
552 // to both detect an intrinsic which needs upgrading, and to provide the
553 // upgraded form of the intrinsic. We should perhaps have two separate
554 // functions for this.
555 return false;
558 bool llvm::UpgradeIntrinsicFunction(Function *F, Function *&NewFn) {
559 NewFn = 0;
560 bool Upgraded = UpgradeIntrinsicFunction1(F, NewFn);
562 // Upgrade intrinsic attributes. This does not change the function.
563 if (NewFn)
564 F = NewFn;
565 if (unsigned id = F->getIntrinsicID())
566 F->setAttributes(Intrinsic::getAttributes((Intrinsic::ID)id));
567 return Upgraded;
570 bool llvm::UpgradeGlobalVariable(GlobalVariable *GV) {
571 StringRef Name(GV->getName());
573 // We are only upgrading one symbol here.
574 if (Name == ".llvm.eh.catch.all.value") {
575 GV->setName("llvm.eh.catch.all.value");
576 return true;
579 return false;
582 /// ExtendNEONArgs - For NEON "long" and "wide" operations, where the results
583 /// have vector elements twice as big as one or both source operands, do the
584 /// sign- or zero-extension that used to be handled by intrinsics. The
585 /// extended values are returned via V0 and V1.
586 static void ExtendNEONArgs(CallInst *CI, Value *Arg0, Value *Arg1,
587 Value *&V0, Value *&V1) {
588 Function *F = CI->getCalledFunction();
589 const std::string& Name = F->getName();
590 bool isLong = (Name.at(18) == 'l');
591 bool isSigned = (Name.at(19) == 's');
593 if (isSigned) {
594 if (isLong)
595 V0 = new SExtInst(Arg0, CI->getType(), "", CI);
596 else
597 V0 = Arg0;
598 V1 = new SExtInst(Arg1, CI->getType(), "", CI);
599 } else {
600 if (isLong)
601 V0 = new ZExtInst(Arg0, CI->getType(), "", CI);
602 else
603 V0 = Arg0;
604 V1 = new ZExtInst(Arg1, CI->getType(), "", CI);
608 /// CallVABD - As part of expanding a call to one of the old NEON vabdl, vaba,
609 /// or vabal intrinsics, construct a call to a vabd intrinsic. Examine the
610 /// name of the old intrinsic to determine whether to use a signed or unsigned
611 /// vabd intrinsic. Get the type from the old call instruction, adjusted for
612 /// half-size vector elements if the old intrinsic was vabdl or vabal.
613 static Instruction *CallVABD(CallInst *CI, Value *Arg0, Value *Arg1) {
614 Function *F = CI->getCalledFunction();
615 const std::string& Name = F->getName();
616 bool isLong = (Name.at(18) == 'l');
617 bool isSigned = (Name.at(isLong ? 19 : 18) == 's');
619 Intrinsic::ID intID;
620 if (isSigned)
621 intID = Intrinsic::arm_neon_vabds;
622 else
623 intID = Intrinsic::arm_neon_vabdu;
625 const Type *Ty = CI->getType();
626 if (isLong)
627 Ty = VectorType::getTruncatedElementVectorType(cast<const VectorType>(Ty));
629 Function *VABD = Intrinsic::getDeclaration(F->getParent(), intID, &Ty, 1);
630 Value *Operands[2];
631 Operands[0] = Arg0;
632 Operands[1] = Arg1;
633 return CallInst::Create(VABD, Operands, Operands+2,
634 "upgraded."+CI->getName(), CI);
637 /// ConstructNewCallInst - Construct a new CallInst with the signature of NewFn.
638 static void ConstructNewCallInst(Function *NewFn, CallInst *OldCI,
639 Value **Operands, unsigned NumOps,
640 bool AssignName = true) {
641 // Construct a new CallInst.
642 CallInst *NewCI =
643 CallInst::Create(NewFn, Operands, Operands + NumOps,
644 AssignName ? "upgraded." + OldCI->getName() : "", OldCI);
646 NewCI->setTailCall(OldCI->isTailCall());
647 NewCI->setCallingConv(OldCI->getCallingConv());
649 // Handle any uses of the old CallInst. If the type has changed, add a cast.
650 if (!OldCI->use_empty()) {
651 if (OldCI->getType() != NewCI->getType()) {
652 Function *OldFn = OldCI->getCalledFunction();
653 CastInst *RetCast =
654 CastInst::Create(CastInst::getCastOpcode(NewCI, true,
655 OldFn->getReturnType(), true),
656 NewCI, OldFn->getReturnType(), NewCI->getName(),OldCI);
658 // Replace all uses of the old call with the new cast which has the
659 // correct type.
660 OldCI->replaceAllUsesWith(RetCast);
661 } else {
662 OldCI->replaceAllUsesWith(NewCI);
666 // Clean up the old call now that it has been completely upgraded.
667 OldCI->eraseFromParent();
670 // UpgradeIntrinsicCall - Upgrade a call to an old intrinsic to be a call the
671 // upgraded intrinsic. All argument and return casting must be provided in
672 // order to seamlessly integrate with existing context.
673 void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
674 Function *F = CI->getCalledFunction();
675 LLVMContext &C = CI->getContext();
676 ImmutableCallSite CS(CI);
678 assert(F && "CallInst has no function associated with it.");
680 if (!NewFn) {
681 // Get the Function's name.
682 const std::string& Name = F->getName();
684 // Upgrade ARM NEON intrinsics.
685 if (Name.compare(5, 9, "arm.neon.", 9) == 0) {
686 Instruction *NewI;
687 Value *V0, *V1;
688 if (Name.compare(14, 7, "vmovls.", 7) == 0) {
689 NewI = new SExtInst(CI->getArgOperand(0), CI->getType(),
690 "upgraded." + CI->getName(), CI);
691 } else if (Name.compare(14, 7, "vmovlu.", 7) == 0) {
692 NewI = new ZExtInst(CI->getArgOperand(0), CI->getType(),
693 "upgraded." + CI->getName(), CI);
694 } else if (Name.compare(14, 4, "vadd", 4) == 0) {
695 ExtendNEONArgs(CI, CI->getArgOperand(0), CI->getArgOperand(1), V0, V1);
696 NewI = BinaryOperator::CreateAdd(V0, V1, "upgraded."+CI->getName(), CI);
697 } else if (Name.compare(14, 4, "vsub", 4) == 0) {
698 ExtendNEONArgs(CI, CI->getArgOperand(0), CI->getArgOperand(1), V0, V1);
699 NewI = BinaryOperator::CreateSub(V0, V1,"upgraded."+CI->getName(),CI);
700 } else if (Name.compare(14, 4, "vmul", 4) == 0) {
701 ExtendNEONArgs(CI, CI->getArgOperand(0), CI->getArgOperand(1), V0, V1);
702 NewI = BinaryOperator::CreateMul(V0, V1,"upgraded."+CI->getName(),CI);
703 } else if (Name.compare(14, 4, "vmla", 4) == 0) {
704 ExtendNEONArgs(CI, CI->getArgOperand(1), CI->getArgOperand(2), V0, V1);
705 Instruction *MulI = BinaryOperator::CreateMul(V0, V1, "", CI);
706 NewI = BinaryOperator::CreateAdd(CI->getArgOperand(0), MulI,
707 "upgraded."+CI->getName(), CI);
708 } else if (Name.compare(14, 4, "vmls", 4) == 0) {
709 ExtendNEONArgs(CI, CI->getArgOperand(1), CI->getArgOperand(2), V0, V1);
710 Instruction *MulI = BinaryOperator::CreateMul(V0, V1, "", CI);
711 NewI = BinaryOperator::CreateSub(CI->getArgOperand(0), MulI,
712 "upgraded."+CI->getName(), CI);
713 } else if (Name.compare(14, 4, "vabd", 4) == 0) {
714 NewI = CallVABD(CI, CI->getArgOperand(0), CI->getArgOperand(1));
715 NewI = new ZExtInst(NewI, CI->getType(), "upgraded."+CI->getName(), CI);
716 } else if (Name.compare(14, 4, "vaba", 4) == 0) {
717 NewI = CallVABD(CI, CI->getArgOperand(1), CI->getArgOperand(2));
718 if (Name.at(18) == 'l')
719 NewI = new ZExtInst(NewI, CI->getType(), "", CI);
720 NewI = BinaryOperator::CreateAdd(CI->getArgOperand(0), NewI,
721 "upgraded."+CI->getName(), CI);
722 } else if (Name.compare(14, 6, "vmovn.", 6) == 0) {
723 NewI = new TruncInst(CI->getArgOperand(0), CI->getType(),
724 "upgraded." + CI->getName(), CI);
725 } else {
726 llvm_unreachable("Unknown arm.neon function for CallInst upgrade.");
728 // Replace any uses of the old CallInst.
729 if (!CI->use_empty())
730 CI->replaceAllUsesWith(NewI);
731 CI->eraseFromParent();
732 return;
735 bool isLoadH = false, isLoadL = false, isMovL = false;
736 bool isMovSD = false, isShufPD = false;
737 bool isUnpckhPD = false, isUnpcklPD = false;
738 bool isPunpckhQPD = false, isPunpcklQPD = false;
739 if (F->getName() == "llvm.x86.sse2.loadh.pd")
740 isLoadH = true;
741 else if (F->getName() == "llvm.x86.sse2.loadl.pd")
742 isLoadL = true;
743 else if (F->getName() == "llvm.x86.sse2.movl.dq")
744 isMovL = true;
745 else if (F->getName() == "llvm.x86.sse2.movs.d")
746 isMovSD = true;
747 else if (F->getName() == "llvm.x86.sse2.shuf.pd")
748 isShufPD = true;
749 else if (F->getName() == "llvm.x86.sse2.unpckh.pd")
750 isUnpckhPD = true;
751 else if (F->getName() == "llvm.x86.sse2.unpckl.pd")
752 isUnpcklPD = true;
753 else if (F->getName() == "llvm.x86.sse2.punpckh.qdq")
754 isPunpckhQPD = true;
755 else if (F->getName() == "llvm.x86.sse2.punpckl.qdq")
756 isPunpcklQPD = true;
758 if (isLoadH || isLoadL || isMovL || isMovSD || isShufPD ||
759 isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
760 std::vector<Constant*> Idxs;
761 Value *Op0 = CI->getArgOperand(0);
762 ShuffleVectorInst *SI = NULL;
763 if (isLoadH || isLoadL) {
764 Value *Op1 = UndefValue::get(Op0->getType());
765 Value *Addr = new BitCastInst(CI->getArgOperand(1),
766 Type::getDoublePtrTy(C),
767 "upgraded.", CI);
768 Value *Load = new LoadInst(Addr, "upgraded.", false, 8, CI);
769 Value *Idx = ConstantInt::get(Type::getInt32Ty(C), 0);
770 Op1 = InsertElementInst::Create(Op1, Load, Idx, "upgraded.", CI);
772 if (isLoadH) {
773 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
774 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
775 } else {
776 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
777 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
779 Value *Mask = ConstantVector::get(Idxs);
780 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
781 } else if (isMovL) {
782 Constant *Zero = ConstantInt::get(Type::getInt32Ty(C), 0);
783 Idxs.push_back(Zero);
784 Idxs.push_back(Zero);
785 Idxs.push_back(Zero);
786 Idxs.push_back(Zero);
787 Value *ZeroV = ConstantVector::get(Idxs);
789 Idxs.clear();
790 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 4));
791 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 5));
792 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
793 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 3));
794 Value *Mask = ConstantVector::get(Idxs);
795 SI = new ShuffleVectorInst(ZeroV, Op0, Mask, "upgraded.", CI);
796 } else if (isMovSD ||
797 isUnpckhPD || isUnpcklPD || isPunpckhQPD || isPunpcklQPD) {
798 Value *Op1 = CI->getArgOperand(1);
799 if (isMovSD) {
800 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
801 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
802 } else if (isUnpckhPD || isPunpckhQPD) {
803 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
804 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 3));
805 } else {
806 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
807 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), 2));
809 Value *Mask = ConstantVector::get(Idxs);
810 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
811 } else if (isShufPD) {
812 Value *Op1 = CI->getArgOperand(1);
813 unsigned MaskVal =
814 cast<ConstantInt>(CI->getArgOperand(2))->getZExtValue();
815 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C), MaskVal & 1));
816 Idxs.push_back(ConstantInt::get(Type::getInt32Ty(C),
817 ((MaskVal >> 1) & 1)+2));
818 Value *Mask = ConstantVector::get(Idxs);
819 SI = new ShuffleVectorInst(Op0, Op1, Mask, "upgraded.", CI);
822 assert(SI && "Unexpected!");
824 // Handle any uses of the old CallInst.
825 if (!CI->use_empty())
826 // Replace all uses of the old call with the new cast which has the
827 // correct type.
828 CI->replaceAllUsesWith(SI);
830 // Clean up the old call now that it has been completely upgraded.
831 CI->eraseFromParent();
832 } else if (F->getName() == "llvm.x86.sse41.pmulld") {
833 // Upgrade this set of intrinsics into vector multiplies.
834 Instruction *Mul = BinaryOperator::CreateMul(CI->getArgOperand(0),
835 CI->getArgOperand(1),
836 CI->getName(),
837 CI);
838 // Fix up all the uses with our new multiply.
839 if (!CI->use_empty())
840 CI->replaceAllUsesWith(Mul);
842 // Remove upgraded multiply.
843 CI->eraseFromParent();
844 } else if (F->getName() == "llvm.x86.ssse3.palign.r") {
845 Value *Op1 = CI->getArgOperand(0);
846 Value *Op2 = CI->getArgOperand(1);
847 Value *Op3 = CI->getArgOperand(2);
848 unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
849 Value *Rep;
850 IRBuilder<> Builder(C);
851 Builder.SetInsertPoint(CI->getParent(), CI);
853 // If palignr is shifting the pair of input vectors less than 9 bytes,
854 // emit a shuffle instruction.
855 if (shiftVal <= 8) {
856 const Type *IntTy = Type::getInt32Ty(C);
857 const Type *EltTy = Type::getInt8Ty(C);
858 const Type *VecTy = VectorType::get(EltTy, 8);
860 Op2 = Builder.CreateBitCast(Op2, VecTy);
861 Op1 = Builder.CreateBitCast(Op1, VecTy);
863 llvm::SmallVector<llvm::Constant*, 8> Indices;
864 for (unsigned i = 0; i != 8; ++i)
865 Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
867 Value *SV = ConstantVector::get(Indices);
868 Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
869 Rep = Builder.CreateBitCast(Rep, F->getReturnType());
872 // If palignr is shifting the pair of input vectors more than 8 but less
873 // than 16 bytes, emit a logical right shift of the destination.
874 else if (shiftVal < 16) {
875 // MMX has these as 1 x i64 vectors for some odd optimization reasons.
876 const Type *EltTy = Type::getInt64Ty(C);
877 const Type *VecTy = VectorType::get(EltTy, 1);
879 Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
880 Op2 = ConstantInt::get(VecTy, (shiftVal-8) * 8);
882 // create i32 constant
883 Function *I =
884 Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_mmx_psrl_q);
885 Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
888 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
889 else {
890 Rep = Constant::getNullValue(F->getReturnType());
893 // Replace any uses with our new instruction.
894 if (!CI->use_empty())
895 CI->replaceAllUsesWith(Rep);
897 // Remove upgraded instruction.
898 CI->eraseFromParent();
900 } else if (F->getName() == "llvm.x86.ssse3.palign.r.128") {
901 Value *Op1 = CI->getArgOperand(0);
902 Value *Op2 = CI->getArgOperand(1);
903 Value *Op3 = CI->getArgOperand(2);
904 unsigned shiftVal = cast<ConstantInt>(Op3)->getZExtValue();
905 Value *Rep;
906 IRBuilder<> Builder(C);
907 Builder.SetInsertPoint(CI->getParent(), CI);
909 // If palignr is shifting the pair of input vectors less than 17 bytes,
910 // emit a shuffle instruction.
911 if (shiftVal <= 16) {
912 const Type *IntTy = Type::getInt32Ty(C);
913 const Type *EltTy = Type::getInt8Ty(C);
914 const Type *VecTy = VectorType::get(EltTy, 16);
916 Op2 = Builder.CreateBitCast(Op2, VecTy);
917 Op1 = Builder.CreateBitCast(Op1, VecTy);
919 llvm::SmallVector<llvm::Constant*, 16> Indices;
920 for (unsigned i = 0; i != 16; ++i)
921 Indices.push_back(ConstantInt::get(IntTy, shiftVal + i));
923 Value *SV = ConstantVector::get(Indices);
924 Rep = Builder.CreateShuffleVector(Op2, Op1, SV, "palignr");
925 Rep = Builder.CreateBitCast(Rep, F->getReturnType());
928 // If palignr is shifting the pair of input vectors more than 16 but less
929 // than 32 bytes, emit a logical right shift of the destination.
930 else if (shiftVal < 32) {
931 const Type *EltTy = Type::getInt64Ty(C);
932 const Type *VecTy = VectorType::get(EltTy, 2);
933 const Type *IntTy = Type::getInt32Ty(C);
935 Op1 = Builder.CreateBitCast(Op1, VecTy, "cast");
936 Op2 = ConstantInt::get(IntTy, (shiftVal-16) * 8);
938 // create i32 constant
939 Function *I =
940 Intrinsic::getDeclaration(F->getParent(), Intrinsic::x86_sse2_psrl_dq);
941 Rep = Builder.CreateCall2(I, Op1, Op2, "palignr");
944 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
945 else {
946 Rep = Constant::getNullValue(F->getReturnType());
949 // Replace any uses with our new instruction.
950 if (!CI->use_empty())
951 CI->replaceAllUsesWith(Rep);
953 // Remove upgraded instruction.
954 CI->eraseFromParent();
956 } else if (F->getName() == "llvm.x86.sse.loadu.ps" ||
957 F->getName() == "llvm.x86.sse2.loadu.dq" ||
958 F->getName() == "llvm.x86.sse2.loadu.pd") {
959 // Convert to a native, unaligned load.
960 const Type *VecTy = CI->getType();
961 const Type *IntTy = IntegerType::get(C, 128);
962 IRBuilder<> Builder(C);
963 Builder.SetInsertPoint(CI->getParent(), CI);
965 Value *BC = Builder.CreateBitCast(CI->getArgOperand(0),
966 PointerType::getUnqual(IntTy),
967 "cast");
968 LoadInst *LI = Builder.CreateLoad(BC, CI->getName());
969 LI->setAlignment(1); // Unaligned load.
970 BC = Builder.CreateBitCast(LI, VecTy, "new.cast");
972 // Fix up all the uses with our new load.
973 if (!CI->use_empty())
974 CI->replaceAllUsesWith(BC);
976 // Remove intrinsic.
977 CI->eraseFromParent();
978 } else {
979 llvm_unreachable("Unknown function for CallInst upgrade.");
981 return;
984 switch (NewFn->getIntrinsicID()) {
985 default: llvm_unreachable("Unknown function for CallInst upgrade.");
986 case Intrinsic::arm_neon_vld1:
987 case Intrinsic::arm_neon_vld2:
988 case Intrinsic::arm_neon_vld3:
989 case Intrinsic::arm_neon_vld4:
990 case Intrinsic::arm_neon_vst1:
991 case Intrinsic::arm_neon_vst2:
992 case Intrinsic::arm_neon_vst3:
993 case Intrinsic::arm_neon_vst4:
994 case Intrinsic::arm_neon_vld2lane:
995 case Intrinsic::arm_neon_vld3lane:
996 case Intrinsic::arm_neon_vld4lane:
997 case Intrinsic::arm_neon_vst2lane:
998 case Intrinsic::arm_neon_vst3lane:
999 case Intrinsic::arm_neon_vst4lane: {
1000 // Add a default alignment argument of 1.
1001 SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
1002 Operands.push_back(ConstantInt::get(Type::getInt32Ty(C), 1));
1003 CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
1004 CI->getName(), CI);
1005 NewCI->setTailCall(CI->isTailCall());
1006 NewCI->setCallingConv(CI->getCallingConv());
1008 // Handle any uses of the old CallInst.
1009 if (!CI->use_empty())
1010 // Replace all uses of the old call with the new cast which has the
1011 // correct type.
1012 CI->replaceAllUsesWith(NewCI);
1014 // Clean up the old call now that it has been completely upgraded.
1015 CI->eraseFromParent();
1016 break;
1019 case Intrinsic::x86_mmx_padd_b:
1020 case Intrinsic::x86_mmx_padd_w:
1021 case Intrinsic::x86_mmx_padd_d:
1022 case Intrinsic::x86_mmx_padd_q:
1023 case Intrinsic::x86_mmx_padds_b:
1024 case Intrinsic::x86_mmx_padds_w:
1025 case Intrinsic::x86_mmx_paddus_b:
1026 case Intrinsic::x86_mmx_paddus_w:
1027 case Intrinsic::x86_mmx_psub_b:
1028 case Intrinsic::x86_mmx_psub_w:
1029 case Intrinsic::x86_mmx_psub_d:
1030 case Intrinsic::x86_mmx_psub_q:
1031 case Intrinsic::x86_mmx_psubs_b:
1032 case Intrinsic::x86_mmx_psubs_w:
1033 case Intrinsic::x86_mmx_psubus_b:
1034 case Intrinsic::x86_mmx_psubus_w:
1035 case Intrinsic::x86_mmx_pmulh_w:
1036 case Intrinsic::x86_mmx_pmull_w:
1037 case Intrinsic::x86_mmx_pmulhu_w:
1038 case Intrinsic::x86_mmx_pmulu_dq:
1039 case Intrinsic::x86_mmx_pmadd_wd:
1040 case Intrinsic::x86_mmx_pand:
1041 case Intrinsic::x86_mmx_pandn:
1042 case Intrinsic::x86_mmx_por:
1043 case Intrinsic::x86_mmx_pxor:
1044 case Intrinsic::x86_mmx_pavg_b:
1045 case Intrinsic::x86_mmx_pavg_w:
1046 case Intrinsic::x86_mmx_pmaxu_b:
1047 case Intrinsic::x86_mmx_pmaxs_w:
1048 case Intrinsic::x86_mmx_pminu_b:
1049 case Intrinsic::x86_mmx_pmins_w:
1050 case Intrinsic::x86_mmx_psad_bw:
1051 case Intrinsic::x86_mmx_psll_w:
1052 case Intrinsic::x86_mmx_psll_d:
1053 case Intrinsic::x86_mmx_psll_q:
1054 case Intrinsic::x86_mmx_pslli_w:
1055 case Intrinsic::x86_mmx_pslli_d:
1056 case Intrinsic::x86_mmx_pslli_q:
1057 case Intrinsic::x86_mmx_psrl_w:
1058 case Intrinsic::x86_mmx_psrl_d:
1059 case Intrinsic::x86_mmx_psrl_q:
1060 case Intrinsic::x86_mmx_psrli_w:
1061 case Intrinsic::x86_mmx_psrli_d:
1062 case Intrinsic::x86_mmx_psrli_q:
1063 case Intrinsic::x86_mmx_psra_w:
1064 case Intrinsic::x86_mmx_psra_d:
1065 case Intrinsic::x86_mmx_psrai_w:
1066 case Intrinsic::x86_mmx_psrai_d:
1067 case Intrinsic::x86_mmx_packsswb:
1068 case Intrinsic::x86_mmx_packssdw:
1069 case Intrinsic::x86_mmx_packuswb:
1070 case Intrinsic::x86_mmx_punpckhbw:
1071 case Intrinsic::x86_mmx_punpckhwd:
1072 case Intrinsic::x86_mmx_punpckhdq:
1073 case Intrinsic::x86_mmx_punpcklbw:
1074 case Intrinsic::x86_mmx_punpcklwd:
1075 case Intrinsic::x86_mmx_punpckldq:
1076 case Intrinsic::x86_mmx_pcmpeq_b:
1077 case Intrinsic::x86_mmx_pcmpeq_w:
1078 case Intrinsic::x86_mmx_pcmpeq_d:
1079 case Intrinsic::x86_mmx_pcmpgt_b:
1080 case Intrinsic::x86_mmx_pcmpgt_w:
1081 case Intrinsic::x86_mmx_pcmpgt_d: {
1082 Value *Operands[2];
1084 // Cast the operand to the X86 MMX type.
1085 Operands[0] = new BitCastInst(CI->getArgOperand(0),
1086 NewFn->getFunctionType()->getParamType(0),
1087 "upgraded.", CI);
1089 switch (NewFn->getIntrinsicID()) {
1090 default:
1091 // Cast to the X86 MMX type.
1092 Operands[1] = new BitCastInst(CI->getArgOperand(1),
1093 NewFn->getFunctionType()->getParamType(1),
1094 "upgraded.", CI);
1095 break;
1096 case Intrinsic::x86_mmx_pslli_w:
1097 case Intrinsic::x86_mmx_pslli_d:
1098 case Intrinsic::x86_mmx_pslli_q:
1099 case Intrinsic::x86_mmx_psrli_w:
1100 case Intrinsic::x86_mmx_psrli_d:
1101 case Intrinsic::x86_mmx_psrli_q:
1102 case Intrinsic::x86_mmx_psrai_w:
1103 case Intrinsic::x86_mmx_psrai_d:
1104 // These take an i32 as their second parameter.
1105 Operands[1] = CI->getArgOperand(1);
1106 break;
1109 ConstructNewCallInst(NewFn, CI, Operands, 2);
1110 break;
1112 case Intrinsic::x86_mmx_maskmovq: {
1113 Value *Operands[3];
1115 // Cast the operands to the X86 MMX type.
1116 Operands[0] = new BitCastInst(CI->getArgOperand(0),
1117 NewFn->getFunctionType()->getParamType(0),
1118 "upgraded.", CI);
1119 Operands[1] = new BitCastInst(CI->getArgOperand(1),
1120 NewFn->getFunctionType()->getParamType(1),
1121 "upgraded.", CI);
1122 Operands[2] = CI->getArgOperand(2);
1124 ConstructNewCallInst(NewFn, CI, Operands, 3, false);
1125 break;
1127 case Intrinsic::x86_mmx_pmovmskb: {
1128 Value *Operands[1];
1130 // Cast the operand to the X86 MMX type.
1131 Operands[0] = new BitCastInst(CI->getArgOperand(0),
1132 NewFn->getFunctionType()->getParamType(0),
1133 "upgraded.", CI);
1135 ConstructNewCallInst(NewFn, CI, Operands, 1);
1136 break;
1138 case Intrinsic::x86_mmx_movnt_dq: {
1139 Value *Operands[2];
1141 Operands[0] = CI->getArgOperand(0);
1143 // Cast the operand to the X86 MMX type.
1144 Operands[1] = new BitCastInst(CI->getArgOperand(1),
1145 NewFn->getFunctionType()->getParamType(1),
1146 "upgraded.", CI);
1148 ConstructNewCallInst(NewFn, CI, Operands, 2, false);
1149 break;
1151 case Intrinsic::x86_mmx_palignr_b: {
1152 Value *Operands[3];
1154 // Cast the operands to the X86 MMX type.
1155 Operands[0] = new BitCastInst(CI->getArgOperand(0),
1156 NewFn->getFunctionType()->getParamType(0),
1157 "upgraded.", CI);
1158 Operands[1] = new BitCastInst(CI->getArgOperand(1),
1159 NewFn->getFunctionType()->getParamType(1),
1160 "upgraded.", CI);
1161 Operands[2] = CI->getArgOperand(2);
1163 ConstructNewCallInst(NewFn, CI, Operands, 3);
1164 break;
1166 case Intrinsic::x86_mmx_pextr_w: {
1167 Value *Operands[2];
1169 // Cast the operands to the X86 MMX type.
1170 Operands[0] = new BitCastInst(CI->getArgOperand(0),
1171 NewFn->getFunctionType()->getParamType(0),
1172 "upgraded.", CI);
1173 Operands[1] = CI->getArgOperand(1);
1175 ConstructNewCallInst(NewFn, CI, Operands, 2);
1176 break;
1178 case Intrinsic::x86_mmx_pinsr_w: {
1179 Value *Operands[3];
1181 // Cast the operands to the X86 MMX type.
1182 Operands[0] = new BitCastInst(CI->getArgOperand(0),
1183 NewFn->getFunctionType()->getParamType(0),
1184 "upgraded.", CI);
1185 Operands[1] = CI->getArgOperand(1);
1186 Operands[2] = CI->getArgOperand(2);
1188 ConstructNewCallInst(NewFn, CI, Operands, 3);
1189 break;
1191 case Intrinsic::x86_sse_pshuf_w: {
1192 IRBuilder<> Builder(C);
1193 Builder.SetInsertPoint(CI->getParent(), CI);
1195 // Cast the operand to the X86 MMX type.
1196 Value *Operands[2];
1197 Operands[0] =
1198 Builder.CreateBitCast(CI->getArgOperand(0),
1199 NewFn->getFunctionType()->getParamType(0),
1200 "upgraded.");
1201 Operands[1] =
1202 Builder.CreateTrunc(CI->getArgOperand(1),
1203 Type::getInt8Ty(C),
1204 "upgraded.");
1206 ConstructNewCallInst(NewFn, CI, Operands, 2);
1207 break;
1210 case Intrinsic::ctlz:
1211 case Intrinsic::ctpop:
1212 case Intrinsic::cttz: {
1213 // Build a small vector of the original arguments.
1214 SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
1216 // Construct a new CallInst
1217 CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
1218 "upgraded."+CI->getName(), CI);
1219 NewCI->setTailCall(CI->isTailCall());
1220 NewCI->setCallingConv(CI->getCallingConv());
1222 // Handle any uses of the old CallInst.
1223 if (!CI->use_empty()) {
1224 // Check for sign extend parameter attributes on the return values.
1225 bool SrcSExt = NewFn->getAttributes().paramHasAttr(0, Attribute::SExt);
1226 bool DestSExt = F->getAttributes().paramHasAttr(0, Attribute::SExt);
1228 // Construct an appropriate cast from the new return type to the old.
1229 CastInst *RetCast = CastInst::Create(
1230 CastInst::getCastOpcode(NewCI, SrcSExt,
1231 F->getReturnType(),
1232 DestSExt),
1233 NewCI, F->getReturnType(),
1234 NewCI->getName(), CI);
1235 NewCI->moveBefore(RetCast);
1237 // Replace all uses of the old call with the new cast which has the
1238 // correct type.
1239 CI->replaceAllUsesWith(RetCast);
1242 // Clean up the old call now that it has been completely upgraded.
1243 CI->eraseFromParent();
1245 break;
1246 case Intrinsic::eh_selector:
1247 case Intrinsic::eh_typeid_for: {
1248 // Only the return type changed.
1249 SmallVector<Value*, 8> Operands(CS.arg_begin(), CS.arg_end());
1250 CallInst *NewCI = CallInst::Create(NewFn, Operands.begin(), Operands.end(),
1251 "upgraded." + CI->getName(), CI);
1252 NewCI->setTailCall(CI->isTailCall());
1253 NewCI->setCallingConv(CI->getCallingConv());
1255 // Handle any uses of the old CallInst.
1256 if (!CI->use_empty()) {
1257 // Construct an appropriate cast from the new return type to the old.
1258 CastInst *RetCast =
1259 CastInst::Create(CastInst::getCastOpcode(NewCI, true,
1260 F->getReturnType(), true),
1261 NewCI, F->getReturnType(), NewCI->getName(), CI);
1262 CI->replaceAllUsesWith(RetCast);
1264 CI->eraseFromParent();
1266 break;
1267 case Intrinsic::memcpy:
1268 case Intrinsic::memmove:
1269 case Intrinsic::memset: {
1270 // Add isVolatile
1271 const llvm::Type *I1Ty = llvm::Type::getInt1Ty(CI->getContext());
1272 Value *Operands[5] = { CI->getArgOperand(0), CI->getArgOperand(1),
1273 CI->getArgOperand(2), CI->getArgOperand(3),
1274 llvm::ConstantInt::get(I1Ty, 0) };
1275 CallInst *NewCI = CallInst::Create(NewFn, Operands, Operands+5,
1276 CI->getName(), CI);
1277 NewCI->setTailCall(CI->isTailCall());
1278 NewCI->setCallingConv(CI->getCallingConv());
1279 // Handle any uses of the old CallInst.
1280 if (!CI->use_empty())
1281 // Replace all uses of the old call with the new cast which has the
1282 // correct type.
1283 CI->replaceAllUsesWith(NewCI);
1285 // Clean up the old call now that it has been completely upgraded.
1286 CI->eraseFromParent();
1287 break;
1292 // This tests each Function to determine if it needs upgrading. When we find
1293 // one we are interested in, we then upgrade all calls to reflect the new
1294 // function.
1295 void llvm::UpgradeCallsToIntrinsic(Function* F) {
1296 assert(F && "Illegal attempt to upgrade a non-existent intrinsic.");
1298 // Upgrade the function and check if it is a totaly new function.
1299 Function* NewFn;
1300 if (UpgradeIntrinsicFunction(F, NewFn)) {
1301 if (NewFn != F) {
1302 // Replace all uses to the old function with the new one if necessary.
1303 for (Value::use_iterator UI = F->use_begin(), UE = F->use_end();
1304 UI != UE; ) {
1305 if (CallInst* CI = dyn_cast<CallInst>(*UI++))
1306 UpgradeIntrinsicCall(CI, NewFn);
1308 // Remove old function, no longer used, from the module.
1309 F->eraseFromParent();
1314 /// This function strips all debug info intrinsics, except for llvm.dbg.declare.
1315 /// If an llvm.dbg.declare intrinsic is invalid, then this function simply
1316 /// strips that use.
1317 void llvm::CheckDebugInfoIntrinsics(Module *M) {
1320 if (Function *FuncStart = M->getFunction("llvm.dbg.func.start")) {
1321 while (!FuncStart->use_empty()) {
1322 CallInst *CI = cast<CallInst>(FuncStart->use_back());
1323 CI->eraseFromParent();
1325 FuncStart->eraseFromParent();
1328 if (Function *StopPoint = M->getFunction("llvm.dbg.stoppoint")) {
1329 while (!StopPoint->use_empty()) {
1330 CallInst *CI = cast<CallInst>(StopPoint->use_back());
1331 CI->eraseFromParent();
1333 StopPoint->eraseFromParent();
1336 if (Function *RegionStart = M->getFunction("llvm.dbg.region.start")) {
1337 while (!RegionStart->use_empty()) {
1338 CallInst *CI = cast<CallInst>(RegionStart->use_back());
1339 CI->eraseFromParent();
1341 RegionStart->eraseFromParent();
1344 if (Function *RegionEnd = M->getFunction("llvm.dbg.region.end")) {
1345 while (!RegionEnd->use_empty()) {
1346 CallInst *CI = cast<CallInst>(RegionEnd->use_back());
1347 CI->eraseFromParent();
1349 RegionEnd->eraseFromParent();
1352 if (Function *Declare = M->getFunction("llvm.dbg.declare")) {
1353 if (!Declare->use_empty()) {
1354 DbgDeclareInst *DDI = cast<DbgDeclareInst>(Declare->use_back());
1355 if (!isa<MDNode>(DDI->getArgOperand(0)) ||
1356 !isa<MDNode>(DDI->getArgOperand(1))) {
1357 while (!Declare->use_empty()) {
1358 CallInst *CI = cast<CallInst>(Declare->use_back());
1359 CI->eraseFromParent();
1361 Declare->eraseFromParent();