llvm.org GIT mirror llvm / 8ef7b17
Add @llvm.assume, lowering, and some basic properties This is the first commit in a series that add an @llvm.assume intrinsic which can be used to provide the optimizer with a condition it may assume to be true (when the control flow would hit the intrinsic call). Some basic properties are added here: - llvm.invariant(true) is dead. - llvm.invariant(false) is unreachable (this directly corresponds to the documented behavior of MSVC's __assume(0)), so is llvm.invariant(undef). The intrinsic is tagged as writing arbitrarily, in order to maintain control dependencies. BasicAA has been updated, however, to return NoModRef for any particular location-based query so that we don't unnecessarily block code motion. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@213973 91177308-0d34-0410-b5e6-96231b3b80d8 Hal Finkel 5 years ago
12 changed file(s) with 174 addition(s) and 7 deletion(s). Raw diff Collapse all Expand all
94269426
94279427 This intrinsic is lowered to the ``val``.
94289428
9429 '``llvm.assume``' Intrinsic
9430 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
9431
9432 Syntax:
9433 """""""
9434
9435 ::
9436
9437 declare void @llvm.assume(i1 %cond)
9438
9439 Overview:
9440 """""""""
9441
9442 The ``llvm.assume`` allows the optimizer to assume that the provided
9443 condition is true. This information can then be used in simplifying other parts
9444 of the code.
9445
9446 Arguments:
9447 """"""""""
9448
9449 The condition which the optimizer may assume is always true.
9450
9451 Semantics:
9452 """"""""""
9453
9454 The intrinsic allows the optimizer to assume that the provided condition is
9455 always true whenever the control flow reaches the intrinsic call. No code is
9456 generated for this intrinsic, and instructions that contribute only to the
9457 provided condition are not used for code generation. If the condition is
9458 violated during execution, the behavior is undefined.
9459
9460 Please note that optimizer might limit the transformations performed on values
9461 used by the ``llvm.assume`` intrinsic in order to preserve the instructions
9462 only used to form the intrinsic's input argument. This might prove undesirable
9463 if the extra information provided by the ``llvm.assume`` intrinsic does cause
9464 sufficient overall improvement in code quality. For this reason,
9465 ``llvm.assume`` should not be used to document basic mathematical invariants
9466 that the optimizer can otherwise deduce or facts that are of little use to the
9467 optimizer.
9468
94299469 '``llvm.donothing``' Intrinsic
94309470 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
94319471
275275 def int_pcmarker : Intrinsic<[], [llvm_i32_ty]>;
276276
277277 def int_readcyclecounter : Intrinsic<[llvm_i64_ty]>;
278
279 // The assume intrinsic is marked as arbitrarily writing so that proper
280 // control dependencies will be maintained.
281 def int_assume : Intrinsic<[], [llvm_i1_ty], []>;
278282
279283 // Stack Protector Intrinsic - The stackprotector intrinsic writes the stack
280284 // guard to the correct place on the stack frame.
470470 const Location &Loc) override;
471471
472472 ModRefResult getModRefInfo(ImmutableCallSite CS1,
473 ImmutableCallSite CS2) override {
474 // The AliasAnalysis base class has some smarts, lets use them.
475 return AliasAnalysis::getModRefInfo(CS1, CS2);
476 }
473 ImmutableCallSite CS2) override;
477474
478475 /// pointsToConstantMemory - Chase pointers until we find a (constant
479476 /// global) or not.
787784 return Loc;
788785 }
789786
787 static bool isAssumeIntrinsic(ImmutableCallSite CS) {
788 const IntrinsicInst *II = dyn_cast(CS.getInstruction());
789 if (II && II->getIntrinsicID() == Intrinsic::assume)
790 return true;
791
792 return false;
793 }
794
790795 /// getModRefInfo - Check to see if the specified callsite can clobber the
791796 /// specified memory object. Since we only look at local properties of this
792797 /// function, we really can't say much about this query. We do, however, use
839844 return NoModRef;
840845 }
841846
847 // While the assume intrinsic is marked as arbitrarily writing so that
848 // proper control dependencies will be maintained, it never aliases any
849 // particular memory location.
850 if (isAssumeIntrinsic(CS))
851 return NoModRef;
852
842853 // The AliasAnalysis base class has some smarts, lets use them.
843854 return AliasAnalysis::getModRefInfo(CS, Loc);
855 }
856
857 AliasAnalysis::ModRefResult
858 BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS1,
859 ImmutableCallSite CS2) {
860 // While the assume intrinsic is marked as arbitrarily writing so that
861 // proper control dependencies will be maintained, it never aliases any
862 // particular memory location.
863 if (isAssumeIntrinsic(CS1) || isAssumeIntrinsic(CS2))
864 return NoModRef;
865
866 // The AliasAnalysis base class has some smarts, lets use them.
867 return AliasAnalysis::getModRefInfo(CS1, CS2);
844868 }
845869
846870 /// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
384384 // FIXME: This is wrong for libc intrinsics.
385385 return TCC_Basic;
386386
387 case Intrinsic::assume:
387388 case Intrinsic::dbg_declare:
388389 case Intrinsic::dbg_value:
389390 case Intrinsic::invariant_start:
571571 case Intrinsic::pow: ISD = ISD::FPOW; break;
572572 case Intrinsic::fma: ISD = ISD::FMA; break;
573573 case Intrinsic::fmuladd: ISD = ISD::FMA; break;
574 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
574575 case Intrinsic::lifetime_start:
575576 case Intrinsic::lifetime_end:
576577 return 0;
458458 CI->replaceAllUsesWith(CI->getOperand(0));
459459 break;
460460
461 case Intrinsic::assume:
461462 case Intrinsic::var_annotation:
462 break; // Strip out annotate intrinsic
463
463 break; // Strip out these intrinsics
464
464465 case Intrinsic::memcpy: {
465466 Type *IntPtr = DL.getIntPtrType(Context);
466467 Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
53035303 // Drop the intrinsic, but forward the value
53045304 setValue(&I, getValue(I.getOperand(0)));
53055305 return nullptr;
5306 case Intrinsic::assume:
53065307 case Intrinsic::var_annotation:
5307 // Discard annotate attributes
5308 // Discard annotate attributes and assumptions
53085309 return nullptr;
53095310
53105311 case Intrinsic::init_trampoline: {
300300 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
301301 II->getIntrinsicID() == Intrinsic::lifetime_end)
302302 return isa(II->getArgOperand(1));
303
304 // Assumptions are dead if their condition is trivially true.
305 if (II->getIntrinsicID() == Intrinsic::assume) {
306 if (ConstantInt *Cond = dyn_cast(II->getArgOperand(0)))
307 return !Cond->isZero();
308
309 return false;
310 }
303311 }
304312
305313 if (isAllocLikeFn(I, TLI)) return true;
11821190 // instructions into LLVM unreachable insts. The instruction combining pass
11831191 // canonicalizes unreachable insts into stores to null or undef.
11841192 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;++BBI){
1193 // Assumptions that are known to be false are equivalent to unreachable.
1194 // Also, if the condition is undefined, then we make the choice most
1195 // beneficial to the optimizer, and choose that to also be unreachable.
1196 if (IntrinsicInst *II = dyn_cast(BBI))
1197 if (II->getIntrinsicID() == Intrinsic::assume) {
1198 bool MakeUnreachable = false;
1199 if (isa(II->getArgOperand(0)))
1200 MakeUnreachable = true;
1201 else if (ConstantInt *Cond =
1202 dyn_cast(II->getArgOperand(0)))
1203 MakeUnreachable = Cond->isZero();
1204
1205 if (MakeUnreachable) {
1206 // Don't insert a call to llvm.trap right before the unreachable.
1207 changeToUnreachable(BBI, false);
1208 Changed = true;
1209 break;
1210 }
1211 }
1212
11851213 if (CallInst *CI = dyn_cast(BBI)) {
11861214 if (CI->doesNotReturn()) {
11871215 // If we found a call to a no-return function, insert an unreachable
0 ; RUN: opt < %s -basicaa -aa-eval -print-all-alias-modref-info -disable-output 2>&1 | FileCheck %s
1 target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-v64:32:64-v128:32:128-a0:0:32-n32"
2
3 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) #0
4 declare void @llvm.assume(i1) #0
5
6 define void @test1(i8* %P, i8* %Q) nounwind ssp {
7 tail call void @llvm.assume(i1 true)
8 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
9 ret void
10
11 ; CHECK-LABEL: Function: test1:
12
13 ; CHECK: MayAlias: i8* %P, i8* %Q
14 ; CHECK: NoModRef: Ptr: i8* %P <-> tail call void @llvm.assume(i1 true)
15 ; CHECK: NoModRef: Ptr: i8* %Q <-> tail call void @llvm.assume(i1 true)
16 ; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
17 ; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
18 ; CHECK: NoModRef: tail call void @llvm.assume(i1 true) <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false)
19 ; CHECK: NoModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) <-> tail call void @llvm.assume(i1 true)
20 }
21
22 attributes #0 = { nounwind }
0 ; RUN: llc < %s
1
2 define void @main() {
3 call void @llvm.assume(i1 1)
4 ret void
5 }
6
7 declare void @llvm.assume(i1) nounwind
8
0 ; RUN: opt -instsimplify -S < %s | FileCheck %s
1
2 define void @test1() {
3 call void @llvm.assume(i1 1)
4 ret void
5
6 ; CHECK-LABEL: @test1
7 ; CHECK-NOT: llvm.assume
8 ; CHECK: ret void
9 }
10
11 declare void @llvm.assume(i1) nounwind
12
0 ; RUN: opt -simplifycfg -S < %s | FileCheck %s
1
2 define void @test1() {
3 call void @llvm.assume(i1 0)
4 ret void
5
6 ; CHECK-LABEL: @test1
7 ; CHECK-NOT: llvm.assume
8 ; CHECK: unreachable
9 }
10
11 define void @test2() {
12 call void @llvm.assume(i1 undef)
13 ret void
14
15 ; CHECK-LABEL: @test2
16 ; CHECK-NOT: llvm.assume
17 ; CHECK: unreachable
18 }
19
20 declare void @llvm.assume(i1) nounwind
21