llvm.org GIT mirror llvm / 04fe990
Improve BasicAA CS-CS queries BasicAA contains knowledge of certain intrinsics, such as memcpy and memset, and uses that information to form more-accurate answers to CallSite vs. Loc ModRef queries. Unfortunately, it did not use this information when answering CallSite vs. CallSite queries. Generically, when an intrinsic takes one or more pointers and the intrinsic is marked only to read/write from its arguments, the offset/size is unknown. As a result, the generic code that answers CallSite vs. CallSite (and CallSite vs. Loc) queries in AA uses UnknownSize when forming Locs from an intrinsic's arguments. While BasicAA's CallSite vs. Loc override could use more-accurate size information for some intrinsics, it did not do the same for CallSite vs. CallSite queries. This change refactors the intrinsic-specific logic in BasicAA into a generic AA query function: getArgLocation, which is overridden by BasicAA to supply the intrinsic-specific knowledge, and used by AA's generic implementation. This allows the intrinsic-specific knowledge to be used by both CallSite vs. Loc and CallSite vs. CallSite queries, and simplifies the BasicAA implementation. Currently, only one function, Mac's memset_pattern16, is handled by BasicAA (all the rest are intrinsics). As a side-effect of this refactoring, BasicAA's getModRefBehavior override now also returns OnlyAccessesArgumentPointees for this function (which is an improvement). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@212572 91177308-0d34-0410-b5e6-96231b3b80d8 Hal Finkel 5 years ago
5 changed file(s) with 371 addition(s) and 126 deletion(s). Raw diff Collapse all Expand all
272272 /// classified into one of the behaviors above.
273273 UnknownModRefBehavior = Anywhere | ModRef
274274 };
275
276 /// Get the location associated with a pointer argument of a callsite.
277 /// The mask bits are set to indicate the allowed aliasing ModRef kinds.
278 /// Note that these mask bits do not necessarily account for the overall
279 /// behavior of the function, but rather only provide additional
280 /// per-argument information.
281 virtual Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
282 ModRefResult &Mask);
275283
276284 /// getModRefBehavior - Return the behavior when calling the given call site.
277285 virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS);
5959 return AA->pointsToConstantMemory(Loc, OrLocal);
6060 }
6161
62 AliasAnalysis::Location
63 AliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
64 AliasAnalysis::ModRefResult &Mask) {
65 assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
66 return AA->getArgLocation(CS, ArgIdx, Mask);
67 }
68
6269 void AliasAnalysis::deleteValue(Value *V) {
6370 assert(AA && "AA didn't call InitializeAliasAnalysis in its run method!");
6471 AA->deleteValue(V);
9097
9198 if (onlyAccessesArgPointees(MRB)) {
9299 bool doesAlias = false;
100 ModRefResult AllArgsMask = NoModRef;
93101 if (doesAccessArgPointees(MRB)) {
94 MDNode *CSTag = CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa);
95102 for (ImmutableCallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
96103 AI != AE; ++AI) {
97104 const Value *Arg = *AI;
98105 if (!Arg->getType()->isPointerTy())
99106 continue;
100 Location CSLoc(Arg, UnknownSize, CSTag);
107 ModRefResult ArgMask;
108 Location CSLoc =
109 getArgLocation(CS, (unsigned) std::distance(CS.arg_begin(), AI),
110 ArgMask);
101111 if (!isNoAlias(CSLoc, Loc)) {
102112 doesAlias = true;
103 break;
113 AllArgsMask = ModRefResult(AllArgsMask | ArgMask);
104114 }
105115 }
106116 }
107117 if (!doesAlias)
108118 return NoModRef;
119 Mask = ModRefResult(Mask & AllArgsMask);
109120 }
110121
111122 // If Loc is a constant memory location, the call definitely could not
149160 if (onlyAccessesArgPointees(CS2B)) {
150161 AliasAnalysis::ModRefResult R = NoModRef;
151162 if (doesAccessArgPointees(CS2B)) {
152 MDNode *CS2Tag = CS2.getInstruction()->getMetadata(LLVMContext::MD_tbaa);
153163 for (ImmutableCallSite::arg_iterator
154164 I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
155165 const Value *Arg = *I;
156166 if (!Arg->getType()->isPointerTy())
157167 continue;
158 Location CS2Loc(Arg, UnknownSize, CS2Tag);
159 R = ModRefResult((R | getModRefInfo(CS1, CS2Loc)) & Mask);
168 ModRefResult ArgMask;
169 Location CS2Loc =
170 getArgLocation(CS2, (unsigned) std::distance(CS2.arg_begin(), I),
171 ArgMask);
172 // ArgMask indicates what CS2 might do to CS2Loc, and the dependence of
173 // CS1 on that location is the inverse.
174 if (ArgMask == Mod)
175 ArgMask = ModRef;
176 else if (ArgMask == Ref)
177 ArgMask = Mod;
178
179 R = ModRefResult((R | (getModRefInfo(CS1, CS2Loc) & ArgMask)) & Mask);
160180 if (R == Mask)
161181 break;
162182 }
169189 if (onlyAccessesArgPointees(CS1B)) {
170190 AliasAnalysis::ModRefResult R = NoModRef;
171191 if (doesAccessArgPointees(CS1B)) {
172 MDNode *CS1Tag = CS1.getInstruction()->getMetadata(LLVMContext::MD_tbaa);
173192 for (ImmutableCallSite::arg_iterator
174193 I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) {
175194 const Value *Arg = *I;
176195 if (!Arg->getType()->isPointerTy())
177196 continue;
178 Location CS1Loc(Arg, UnknownSize, CS1Tag);
179 if (getModRefInfo(CS2, CS1Loc) != NoModRef) {
197 ModRefResult ArgMask;
198 Location CS1Loc =
199 getArgLocation(CS1, (unsigned) std::distance(CS1.arg_begin(), I),
200 ArgMask);
201 if ((getModRefInfo(CS2, CS1Loc) & ArgMask) != NoModRef) {
180202 R = Mask;
181203 break;
182204 }
489489 /// global) or not.
490490 bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override;
491491
492 /// Get the location associated with a pointer argument of a callsite.
493 Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
494 ModRefResult &Mask) override;
495
492496 /// getModRefBehavior - Return the behavior when calling the given
493497 /// call site.
494498 ModRefBehavior getModRefBehavior(ImmutableCallSite CS) override;
652656 return Worklist.empty();
653657 }
654658
659 static bool isMemsetPattern16(const Function *MS,
660 const TargetLibraryInfo &TLI) {
661 if (TLI.has(LibFunc::memset_pattern16) &&
662 MS->getName() == "memset_pattern16") {
663 FunctionType *MemsetType = MS->getFunctionType();
664 if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 &&
665 isa(MemsetType->getParamType(0)) &&
666 isa(MemsetType->getParamType(1)) &&
667 isa(MemsetType->getParamType(2)))
668 return true;
669 }
670
671 return false;
672 }
673
655674 /// getModRefBehavior - Return the behavior when calling the given call site.
656675 AliasAnalysis::ModRefBehavior
657676 BasicAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
691710 if (F->onlyReadsMemory())
692711 Min = OnlyReadsMemory;
693712
713 const TargetLibraryInfo &TLI = getAnalysis();
714 if (isMemsetPattern16(F, TLI))
715 Min = OnlyAccessesArgumentPointees;
716
694717 // Otherwise be conservative.
695718 return ModRefBehavior(AliasAnalysis::getModRefBehavior(F) & Min);
719 }
720
721 AliasAnalysis::Location
722 BasicAliasAnalysis::getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
723 ModRefResult &Mask) {
724 Location Loc = AliasAnalysis::getArgLocation(CS, ArgIdx, Mask);
725 const TargetLibraryInfo &TLI = getAnalysis();
726 const IntrinsicInst *II = dyn_cast(CS.getInstruction());
727 if (II != nullptr)
728 switch (II->getIntrinsicID()) {
729 default: break;
730 case Intrinsic::memset:
731 case Intrinsic::memcpy:
732 case Intrinsic::memmove: {
733 assert((ArgIdx == 0 || ArgIdx == 1) &&
734 "Invalid argument index for memory intrinsic");
735 if (ConstantInt *LenCI = dyn_cast(II->getArgOperand(2)))
736 Loc.Size = LenCI->getZExtValue();
737 assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
738 "Memory intrinsic location pointer not argument?");
739 Mask = ArgIdx ? Ref : Mod;
740 break;
741 }
742 case Intrinsic::lifetime_start:
743 case Intrinsic::lifetime_end:
744 case Intrinsic::invariant_start: {
745 assert(ArgIdx == 1 && "Invalid argument index");
746 assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
747 "Intrinsic location pointer not argument?");
748 Loc.Size = cast(II->getArgOperand(0))->getZExtValue();
749 break;
750 }
751 case Intrinsic::invariant_end: {
752 assert(ArgIdx == 2 && "Invalid argument index");
753 assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
754 "Intrinsic location pointer not argument?");
755 Loc.Size = cast(II->getArgOperand(1))->getZExtValue();
756 break;
757 }
758 case Intrinsic::arm_neon_vld1: {
759 assert(ArgIdx == 0 && "Invalid argument index");
760 assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
761 "Intrinsic location pointer not argument?");
762 // LLVM's vld1 and vst1 intrinsics currently only support a single
763 // vector register.
764 if (DL)
765 Loc.Size = DL->getTypeStoreSize(II->getType());
766 break;
767 }
768 case Intrinsic::arm_neon_vst1: {
769 assert(ArgIdx == 0 && "Invalid argument index");
770 assert(Loc.Ptr == II->getArgOperand(ArgIdx) &&
771 "Intrinsic location pointer not argument?");
772 if (DL)
773 Loc.Size = DL->getTypeStoreSize(II->getArgOperand(1)->getType());
774 break;
775 }
776 }
777
778 // We can bound the aliasing properties of memset_pattern16 just as we can
779 // for memcpy/memset. This is particularly important because the
780 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
781 // whenever possible.
782 else if (CS.getCalledFunction() &&
783 isMemsetPattern16(CS.getCalledFunction(), TLI)) {
784 assert((ArgIdx == 0 || ArgIdx == 1) &&
785 "Invalid argument index for memset_pattern16");
786 if (ArgIdx == 1)
787 Loc.Size = 16;
788 else if (const ConstantInt *LenCI =
789 dyn_cast(CS.getArgument(2)))
790 Loc.Size = LenCI->getZExtValue();
791 assert(Loc.Ptr == CS.getArgument(ArgIdx) &&
792 "memset_pattern16 location pointer not argument?");
793 Mask = ArgIdx ? Ref : Mod;
794 }
795 // FIXME: Handle memset_pattern4 and memset_pattern8 also.
796
797 return Loc;
696798 }
697799
698800 /// getModRefInfo - Check to see if the specified callsite can clobber the
747849 return NoModRef;
748850 }
749851
750 const TargetLibraryInfo &TLI = getAnalysis();
751 ModRefResult Min = ModRef;
752
753 // Finally, handle specific knowledge of intrinsics.
754 const IntrinsicInst *II = dyn_cast(CS.getInstruction());
755 if (II != nullptr)
756 switch (II->getIntrinsicID()) {
757 default: break;
758 case Intrinsic::memcpy:
759 case Intrinsic::memmove: {
760 uint64_t Len = UnknownSize;
761 if (ConstantInt *LenCI = dyn_cast(II->getArgOperand(2)))
762 Len = LenCI->getZExtValue();
763 Value *Dest = II->getArgOperand(0);
764 Value *Src = II->getArgOperand(1);
765 // If it can't overlap the source dest, then it doesn't modref the loc.
766 if (isNoAlias(Location(Dest, Len), Loc)) {
767 if (isNoAlias(Location(Src, Len), Loc))
768 return NoModRef;
769 // If it can't overlap the dest, then worst case it reads the loc.
770 Min = Ref;
771 } else if (isNoAlias(Location(Src, Len), Loc)) {
772 // If it can't overlap the source, then worst case it mutates the loc.
773 Min = Mod;
774 }
775 break;
776 }
777 case Intrinsic::memset:
778 // Since memset is 'accesses arguments' only, the AliasAnalysis base class
779 // will handle it for the variable length case.
780 if (ConstantInt *LenCI = dyn_cast(II->getArgOperand(2))) {
781 uint64_t Len = LenCI->getZExtValue();
782 Value *Dest = II->getArgOperand(0);
783 if (isNoAlias(Location(Dest, Len), Loc))
784 return NoModRef;
785 }
786 // We know that memset doesn't load anything.
787 Min = Mod;
788 break;
789 case Intrinsic::lifetime_start:
790 case Intrinsic::lifetime_end:
791 case Intrinsic::invariant_start: {
792 uint64_t PtrSize =
793 cast(II->getArgOperand(0))->getZExtValue();
794 if (isNoAlias(Location(II->getArgOperand(1),
795 PtrSize,
796 II->getMetadata(LLVMContext::MD_tbaa)),
797 Loc))
798 return NoModRef;
799 break;
800 }
801 case Intrinsic::invariant_end: {
802 uint64_t PtrSize =
803 cast(II->getArgOperand(1))->getZExtValue();
804 if (isNoAlias(Location(II->getArgOperand(2),
805 PtrSize,
806 II->getMetadata(LLVMContext::MD_tbaa)),
807 Loc))
808 return NoModRef;
809 break;
810 }
811 case Intrinsic::arm_neon_vld1: {
812 // LLVM's vld1 and vst1 intrinsics currently only support a single
813 // vector register.
814 uint64_t Size =
815 DL ? DL->getTypeStoreSize(II->getType()) : UnknownSize;
816 if (isNoAlias(Location(II->getArgOperand(0), Size,
817 II->getMetadata(LLVMContext::MD_tbaa)),
818 Loc))
819 return NoModRef;
820 break;
821 }
822 case Intrinsic::arm_neon_vst1: {
823 uint64_t Size =
824 DL ? DL->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize;
825 if (isNoAlias(Location(II->getArgOperand(0), Size,
826 II->getMetadata(LLVMContext::MD_tbaa)),
827 Loc))
828 return NoModRef;
829 break;
830 }
831 }
832
833 // We can bound the aliasing properties of memset_pattern16 just as we can
834 // for memcpy/memset. This is particularly important because the
835 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
836 // whenever possible.
837 else if (TLI.has(LibFunc::memset_pattern16) &&
838 CS.getCalledFunction() &&
839 CS.getCalledFunction()->getName() == "memset_pattern16") {
840 const Function *MS = CS.getCalledFunction();
841 FunctionType *MemsetType = MS->getFunctionType();
842 if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 &&
843 isa(MemsetType->getParamType(0)) &&
844 isa(MemsetType->getParamType(1)) &&
845 isa(MemsetType->getParamType(2))) {
846 uint64_t Len = UnknownSize;
847 if (const ConstantInt *LenCI = dyn_cast(CS.getArgument(2)))
848 Len = LenCI->getZExtValue();
849 const Value *Dest = CS.getArgument(0);
850 const Value *Src = CS.getArgument(1);
851 // If it can't overlap the source dest, then it doesn't modref the loc.
852 if (isNoAlias(Location(Dest, Len), Loc)) {
853 // Always reads 16 bytes of the source.
854 if (isNoAlias(Location(Src, 16), Loc))
855 return NoModRef;
856 // If it can't overlap the dest, then worst case it reads the loc.
857 Min = Ref;
858 // Always reads 16 bytes of the source.
859 } else if (isNoAlias(Location(Src, 16), Loc)) {
860 // If it can't overlap the source, then worst case it mutates the loc.
861 Min = Mod;
862 }
863 }
864 }
865
866852 // The AliasAnalysis base class has some smarts, lets use them.
867 return ModRefResult(AliasAnalysis::getModRefInfo(CS, Loc) & Min);
853 return AliasAnalysis::getModRefInfo(CS, Loc);
868854 }
869855
870856 /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
1414 #include "llvm/Analysis/Passes.h"
1515 #include "llvm/Analysis/AliasAnalysis.h"
1616 #include "llvm/IR/DataLayout.h"
17 #include "llvm/IR/LLVMContext.h"
1718 #include "llvm/Pass.h"
1819 using namespace llvm;
1920
5253 bool pointsToConstantMemory(const Location &Loc, bool OrLocal) override {
5354 return false;
5455 }
56 Location getArgLocation(ImmutableCallSite CS, unsigned ArgIdx,
57 ModRefResult &Mask) override {
58 Mask = ModRef;
59 return Location(CS.getArgument(ArgIdx), UnknownSize,
60 CS.getInstruction()->getMetadata(LLVMContext::MD_tbaa));
61 }
62
5563 ModRefResult getModRefInfo(ImmutableCallSite CS,
5664 const Location &Loc) override {
5765 return ModRef;
0 ; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info -disable-output 2>&1 | FileCheck %s
1 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
2 target triple = "arm-apple-ios"
3
4 declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*, i32) nounwind readonly
5 declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>, i32) nounwind
6
7 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind
8 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
9
10 define <8 x i16> @test1(i8* %p, <8 x i16> %y) {
11 entry:
12 %q = getelementptr i8* %p, i64 16
13 %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
14 call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
15 %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) nounwind
16 %c = add <8 x i16> %a, %b
17 ret <8 x i16> %c
18
19 ; CHECK-LABEL: Function: test1:
20
21 ; CHECK: NoAlias: i8* %p, i8* %q
22 ; CHECK: Just Ref: Ptr: i8* %p <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
23 ; CHECK: NoModRef: Ptr: i8* %q <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
24 ; CHECK: NoModRef: Ptr: i8* %p <-> call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
25 ; CHECK: Both ModRef: Ptr: i8* %q <-> call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
26 ; CHECK: Just Ref: Ptr: i8* %p <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
27 ; CHECK: NoModRef: Ptr: i8* %q <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
28 ; CHECK: NoModRef: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1 <-> call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
29 ; CHECK: NoModRef: %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1 <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
30 ; CHECK: NoModRef: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
31 ; CHECK: NoModRef: call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16) <-> %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
32 ; CHECK: NoModRef: %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1 <-> %a = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1
33 ; CHECK: NoModRef: %b = call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %p, i32 16) #1 <-> call void @llvm.arm.neon.vst1.v8i16(i8* %q, <8 x i16> %y, i32 16)
34 }
35
36 define void @test2(i8* %P, i8* %Q) nounwind ssp {
37 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
38 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
39 ret void
40
41 ; CHECK-LABEL: Function: test2:
42
43 ; CHECK: MayAlias: i8* %P, i8* %Q
44 ; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
45 ; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
46 ; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
47 ; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
48 ; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
49 ; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
50 }
51
52 define void @test2a(i8* noalias %P, i8* noalias %Q) nounwind ssp {
53 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
54 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
55 ret void
56
57 ; CHECK-LABEL: Function: test2a:
58
59 ; CHECK: NoAlias: i8* %P, i8* %Q
60 ; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
61 ; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
62 ; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
63 ; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
64 ; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
65 ; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
66 }
67
68 define void @test2b(i8* noalias %P, i8* noalias %Q) nounwind ssp {
69 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
70 %R = getelementptr i8* %P, i64 12
71 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
72 ret void
73
74 ; CHECK-LABEL: Function: test2b:
75
76 ; CHECK: NoAlias: i8* %P, i8* %Q
77 ; CHECK: NoAlias: i8* %P, i8* %R
78 ; CHECK: NoAlias: i8* %Q, i8* %R
79 ; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
80 ; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
81 ; CHECK: NoModRef: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
82 ; CHECK: NoModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
83 ; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
84 ; CHECK: Just Mod: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
85 ; CHECK: NoModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
86 ; CHECK: NoModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
87 }
88
89 define void @test2c(i8* noalias %P, i8* noalias %Q) nounwind ssp {
90 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
91 %R = getelementptr i8* %P, i64 11
92 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
93 ret void
94
95 ; CHECK-LABEL: Function: test2c:
96
97 ; CHECK: NoAlias: i8* %P, i8* %Q
98 ; CHECK: NoAlias: i8* %P, i8* %R
99 ; CHECK: NoAlias: i8* %Q, i8* %R
100 ; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
101 ; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
102 ; CHECK: Just Mod: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
103 ; CHECK: NoModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
104 ; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
105 ; CHECK: Just Mod: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
106 ; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
107 ; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
108 }
109
110 define void @test2d(i8* noalias %P, i8* noalias %Q) nounwind ssp {
111 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
112 %R = getelementptr i8* %P, i64 -12
113 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
114 ret void
115
116 ; CHECK-LABEL: Function: test2d:
117
118 ; CHECK: NoAlias: i8* %P, i8* %Q
119 ; CHECK: NoAlias: i8* %P, i8* %R
120 ; CHECK: NoAlias: i8* %Q, i8* %R
121 ; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
122 ; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
123 ; CHECK: NoModRef: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
124 ; CHECK: NoModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
125 ; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
126 ; CHECK: Just Mod: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
127 ; CHECK: NoModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
128 ; CHECK: NoModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
129 }
130
131 define void @test2e(i8* noalias %P, i8* noalias %Q) nounwind ssp {
132 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
133 %R = getelementptr i8* %P, i64 -11
134 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
135 ret void
136
137 ; CHECK-LABEL: Function: test2e:
138
139 ; CHECK: NoAlias: i8* %P, i8* %Q
140 ; CHECK: NoAlias: i8* %P, i8* %R
141 ; CHECK: NoAlias: i8* %Q, i8* %R
142 ; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
143 ; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
144 ; CHECK: NoModRef: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
145 ; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
146 ; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
147 ; CHECK: Just Mod: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
148 ; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false)
149 ; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %R, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
150 }
151
152 define void @test3(i8* %P, i8* %Q) nounwind ssp {
153 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
154 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
155 ret void
156
157 ; CHECK-LABEL: Function: test3:
158
159 ; CHECK: MayAlias: i8* %P, i8* %Q
160 ; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
161 ; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
162 ; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
163 ; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
164 ; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
165 ; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
166 }
167
168 define void @test3a(i8* noalias %P, i8* noalias %Q) nounwind ssp {
169 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
170 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
171 ret void
172
173 ; CHECK-LABEL: Function: test3a:
174
175 ; CHECK: NoAlias: i8* %P, i8* %Q
176 ; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
177 ; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
178 ; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
179 ; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
180 ; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
181 ; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 8, i32 1, i1 false)
182 }
183
184 define void @test4(i8* %P, i8* noalias %Q) nounwind ssp {
185 tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i32 1, i1 false)
186 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
187 ret void
188
189 ; CHECK-LABEL: Function: test4:
190
191 ; CHECK: NoAlias: i8* %P, i8* %Q
192 ; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i32 1, i1 false)
193 ; CHECK: NoModRef: Ptr: i8* %Q <-> tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i32 1, i1 false)
194 ; CHECK: Just Mod: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
195 ; CHECK: Just Ref: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
196 ; CHECK: Just Mod: tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
197 ; CHECK: Just Mod: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memset.p0i8.i64(i8* %P, i8 42, i64 8, i32 1, i1 false)
198 }
199
200 define void @test5(i8* %P, i8* %Q, i8* %R) nounwind ssp {
201 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
202 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false)
203 ret void
204
205 ; CHECK-LABEL: Function: test5:
206
207 ; CHECK: MayAlias: i8* %P, i8* %Q
208 ; CHECK: MayAlias: i8* %P, i8* %R
209 ; CHECK: MayAlias: i8* %Q, i8* %R
210 ; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
211 ; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
212 ; CHECK: Both ModRef: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
213 ; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false)
214 ; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false)
215 ; CHECK: Both ModRef: Ptr: i8* %R <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false)
216 ; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false)
217 ; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %R, i64 12, i32 1, i1 false) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
218 }
219
220 attributes #0 = { nounwind }