llvm.org GIT mirror llvm / 303e799
Add a transform pass to make the executable semantics of poison explicit in the IR Implements a transform pass which instruments IR such that poison semantics are made explicit. That is, it provides a (possibly partial) executable semantics for every instruction w.r.t. poison as specified in the LLVM LangRef. There are obvious parallels to the sanitizer tools, but this pass is focused purely on the semantics of LLVM IR, not any particular source language. The target audience for this tool is developers working on or targetting LLVM from a frontend. The idea is to be able to take arbitrary IR (with the assumption of known inputs), and evaluate it concretely after having made poison semantics explicit to detect cases where either a) the original code executes UB, or b) a transform pass introduces UB which didn't exist in the original program. At the moment, this is mostly the framework and still needs to be fleshed out. By reusing existing code we have decent coverage, but there's a lot of cases not yet handled. What's here is good enough to handle interesting cases though; for instance, one of the recent LFTR bugs involved UB being triggered by integer induction variables with nsw/nuw flags would be reported by the current code. (See comment in PoisonChecking.cpp for full explanation and context) Differential Revision: https://reviews.llvm.org/D64215 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@365536 91177308-0d34-0410-b5e6-96231b3b80d8 Philip Reames 3 months ago
8 changed file(s) with 609 addition(s) and 1 deletion(s). Raw diff Collapse all Expand all
0 //===- PoisonChecking.h - ---------------------------------------*- C++ -*-===//
1 //
2 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
3 // See https://llvm.org/LICENSE.txt for license information.
4 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5 //
6 //===----------------------------------------------------------------------===//
7
8
9 #ifndef LLVM_TRANSFORMS_INSTRUMENTATION_POISON_CHECKING_H
10 #define LLVM_TRANSFORMS_INSTRUMENTATION_POISON_CHECKING_H
11
12 #include "llvm/IR/PassManager.h"
13
14 namespace llvm {
15
16 struct PoisonCheckingPass : public PassInfoMixin {
17 PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
18 PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
19 };
20
21 }
22
23
24 #endif // LLVM_TRANSFORMS_INSTRUMENTATION_POISON_CHECKING_H
137137 cast(this)->setIsInBounds(false);
138138 break;
139139 }
140 }
140 // TODO: FastMathFlags!
141 }
142
141143
142144 bool Instruction::isExact() const {
143145 return cast(this)->isExact();
9999 #include "llvm/Transforms/Instrumentation/InstrProfiling.h"
100100 #include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
101101 #include "llvm/Transforms/Instrumentation/PGOInstrumentation.h"
102 #include "llvm/Transforms/Instrumentation/PoisonChecking.h"
102103 #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
103104 #include "llvm/Transforms/Scalar/ADCE.h"
104105 #include "llvm/Transforms/Scalar/AlignmentFromAssumptions.h"
8585 MODULE_PASS("verify", VerifierPass())
8686 MODULE_PASS("asan-module", ModuleAddressSanitizerPass(/*CompileKernel=*/false, false, true, false))
8787 MODULE_PASS("kasan-module", ModuleAddressSanitizerPass(/*CompileKernel=*/true, false, true, false))
88 MODULE_PASS("poison-checking", PoisonCheckingPass())
8889 #undef MODULE_PASS
8990
9091 #ifndef CGSCC_ANALYSIS
1111 InstrProfiling.cpp
1212 PGOInstrumentation.cpp
1313 PGOMemOPSizeOpt.cpp
14 PoisonChecking.cpp
1415 SanitizerCoverage.cpp
1516 ThreadSanitizer.cpp
1617 HWAddressSanitizer.cpp
0 //===- PoisonChecking.cpp - -----------------------------------------------===//
1 //
2 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
3 // See https://llvm.org/LICENSE.txt for license information.
4 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Implements a transform pass which instruments IR such that poison semantics
9 // are made explicit. That is, it provides a (possibly partial) executable
10 // semantics for every instruction w.r.t. poison as specified in the LLVM
11 // LangRef. There are obvious parallels to the sanitizer tools, but this pass
12 // is focused purely on the semantics of LLVM IR, not any particular source
13 // language. If you're looking for something to see if your C/C++ contains
14 // UB, this is not it.
15 //
16 // The rewritten semantics of each instruction will include the following
17 // components:
18 //
19 // 1) The original instruction, unmodified.
20 // 2) A propagation rule which translates dynamic information about the poison
21 // state of each input to whether the dynamic output of the instruction
22 // produces poison.
23 // 3) A flag validation rule which validates any poison producing flags on the
24 // instruction itself (e.g. checks for overflow on nsw).
25 // 4) A check rule which traps (to a handler function) if this instruction must
26 // execute undefined behavior given the poison state of it's inputs.
27 //
28 // At the moment, the UB detection is done in a best effort manner; that is,
29 // the resulting code may produce a false negative result (not report UB when
30 // it actually exists according to the LangRef spec), but should never produce
31 // a false positive (report UB where it doesn't exist). The intention is to
32 // eventually support a "strict" mode which never dynamically reports a false
33 // negative at the cost of rejecting some valid inputs to translation.
34 //
35 // Use cases for this pass include:
36 // - Understanding (and testing!) the implications of the definition of poison
37 // from the LangRef.
38 // - Validating the output of a IR fuzzer to ensure that all programs produced
39 // are well defined on the specific input used.
40 // - Finding/confirming poison specific miscompiles by checking the poison
41 // status of an input/IR pair is the same before and after an optimization
42 // transform.
43 // - Checking that a bugpoint reduction does not introduce UB which didn't
44 // exist in the original program being reduced.
45 //
46 // The major sources of inaccuracy are currently:
47 // - Most validation rules not yet implemented for instructions with poison
48 // relavant flags. At the moment, only nsw/nuw on add/sub are supported.
49 // - UB which is control dependent on a branch on poison is not yet
50 // reported. Currently, only data flow dependence is modeled.
51 // - Poison which is propagated through memory is not modeled. As such,
52 // storing poison to memory and then reloading it will cause a false negative
53 // as we consider the reloaded value to not be poisoned.
54 // - Poison propagation across function boundaries is not modeled. At the
55 // moment, all arguments and return values are assumed not to be poison.
56 // - Undef is not modeled. In particular, the optimizer's freedom to pick
57 // concrete values for undef bits so as to maximize potential for producing
58 // poison is not modeled.
59 //
60 //===----------------------------------------------------------------------===//
61
62 #include "llvm/Transforms/Instrumentation/PoisonChecking.h"
63 #include "llvm/ADT/DenseMap.h"
64 #include "llvm/ADT/Statistic.h"
65 #include "llvm/Analysis/MemoryBuiltins.h"
66 #include "llvm/Analysis/ValueTracking.h"
67 #include "llvm/IR/InstVisitor.h"
68 #include "llvm/IR/IntrinsicInst.h"
69 #include "llvm/IR/IRBuilder.h"
70 #include "llvm/IR/PatternMatch.h"
71 #include "llvm/Support/Debug.h"
72
73 using namespace llvm;
74
75 #define DEBUG_TYPE "poison-checking"
76
77 static cl::opt
78 LocalCheck("poison-checking-function-local",
79 cl::init(false),
80 cl::desc("Check that returns are non-poison (for testing)"));
81
82
83 static bool isConstantFalse(Value* V) {
84 assert(V->getType()->isIntegerTy(1));
85 if (auto *CI = dyn_cast(V))
86 return CI->isZero();
87 return false;
88 }
89
90 static Value *buildOrChain(IRBuilder<> &B, ArrayRef Ops) {
91 if (Ops.size() == 0)
92 return B.getFalse();
93 unsigned i = 0;
94 for (; i < Ops.size() && isConstantFalse(Ops[i]); i++) {}
95 if (i == Ops.size())
96 return B.getFalse();
97 Value *Accum = Ops[i++];
98 for (; i < Ops.size(); i++)
99 if (!isConstantFalse(Ops[i]))
100 Accum = B.CreateOr(Accum, Ops[i]);
101 return Accum;
102 }
103
104 static void generatePoisonChecksForBinOp(Instruction &I,
105 SmallVector &Checks) {
106 assert(isa(I));
107
108 IRBuilder<> B(&I);
109 Value *LHS = I.getOperand(0);
110 Value *RHS = I.getOperand(1);
111 switch (I.getOpcode()) {
112 default:
113 return;
114 case Instruction::Add: {
115 if (I.hasNoSignedWrap()) {
116 auto *OverflowOp =
117 B.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow, LHS, RHS);
118 Checks.push_back(B.CreateExtractValue(OverflowOp, 1));
119 }
120 if (I.hasNoUnsignedWrap()) {
121 auto *OverflowOp =
122 B.CreateBinaryIntrinsic(Intrinsic::uadd_with_overflow, LHS, RHS);
123 Checks.push_back(B.CreateExtractValue(OverflowOp, 1));
124 }
125 break;
126 }
127 case Instruction::Sub: {
128 if (I.hasNoSignedWrap()) {
129 auto *OverflowOp =
130 B.CreateBinaryIntrinsic(Intrinsic::ssub_with_overflow, LHS, RHS);
131 Checks.push_back(B.CreateExtractValue(OverflowOp, 1));
132 }
133 if (I.hasNoUnsignedWrap()) {
134 auto *OverflowOp =
135 B.CreateBinaryIntrinsic(Intrinsic::usub_with_overflow, LHS, RHS);
136 Checks.push_back(B.CreateExtractValue(OverflowOp, 1));
137 }
138 break;
139 }
140 case Instruction::Mul: {
141 if (I.hasNoSignedWrap()) {
142 auto *OverflowOp =
143 B.CreateBinaryIntrinsic(Intrinsic::smul_with_overflow, LHS, RHS);
144 Checks.push_back(B.CreateExtractValue(OverflowOp, 1));
145 }
146 if (I.hasNoUnsignedWrap()) {
147 auto *OverflowOp =
148 B.CreateBinaryIntrinsic(Intrinsic::umul_with_overflow, LHS, RHS);
149 Checks.push_back(B.CreateExtractValue(OverflowOp, 1));
150 }
151 break;
152 }
153 };
154 }
155
156 static Value* generatePoisonChecks(Instruction &I) {
157 IRBuilder<> B(&I);
158 SmallVector Checks;
159 if (isa(I))
160 generatePoisonChecksForBinOp(I, Checks);
161 return buildOrChain(B, Checks);
162 }
163
164 static Value *getPoisonFor(DenseMap &ValToPoison, Value *V) {
165 auto Itr = ValToPoison.find(V);
166 if (Itr != ValToPoison.end())
167 return Itr->second;
168 if (isa(V)) {
169 return ConstantInt::getFalse(V->getContext());
170 }
171 // Return false for unknwon values - this implements a non-strict mode where
172 // unhandled IR constructs are simply considered to never produce poison. At
173 // some point in the future, we probably want a "strict mode" for testing if
174 // nothing else.
175 return ConstantInt::getFalse(V->getContext());
176 }
177
178 static void CreateAssert(IRBuilder<> &B, Value *Cond) {
179 assert(Cond->getType()->isIntegerTy(1));
180 if (auto *CI = dyn_cast(Cond))
181 if (CI->isAllOnesValue())
182 return;
183
184 Module *M = B.GetInsertBlock()->getModule();
185 M->getOrInsertFunction("__poison_checker_assert",
186 Type::getVoidTy(M->getContext()),
187 Type::getInt1Ty(M->getContext()));
188 Function *TrapFunc = M->getFunction("__poison_checker_assert");
189 B.CreateCall(TrapFunc, Cond);
190 }
191
192 static void CreateAssertNot(IRBuilder<> &B, Value *Cond) {
193 assert(Cond->getType()->isIntegerTy(1));
194 CreateAssert(B, B.CreateNot(Cond));
195 }
196
197 static bool rewrite(Function &F) {
198 auto * const Int1Ty = Type::getInt1Ty(F.getContext());
199
200 DenseMap ValToPoison;
201
202 for (BasicBlock &BB : F)
203 for (auto I = BB.begin(); isa(&*I); I++) {
204 auto *OldPHI = cast(&*I);
205 auto *NewPHI = PHINode::Create(Int1Ty,
206 OldPHI->getNumIncomingValues());
207 for (unsigned i = 0; i < OldPHI->getNumIncomingValues(); i++)
208 NewPHI->addIncoming(UndefValue::get(Int1Ty),
209 OldPHI->getIncomingBlock(i));
210 NewPHI->insertBefore(OldPHI);
211 ValToPoison[OldPHI] = NewPHI;
212 }
213
214 for (BasicBlock &BB : F)
215 for (Instruction &I : BB) {
216 if (isa(I)) continue;
217
218 IRBuilder<> B(cast(&I));
219 if (Value *Op = const_cast(getGuaranteedNonFullPoisonOp(&I)))
220 CreateAssertNot(B, getPoisonFor(ValToPoison, Op));
221
222 if (LocalCheck)
223 if (auto *RI = dyn_cast(&I))
224 if (RI->getNumOperands() != 0) {
225 Value *Op = RI->getOperand(0);
226 CreateAssertNot(B, getPoisonFor(ValToPoison, Op));
227 }
228
229 SmallVector Checks;
230 if (propagatesFullPoison(&I))
231 for (Value *V : I.operands())
232 Checks.push_back(getPoisonFor(ValToPoison, V));
233
234 if (auto *Check = generatePoisonChecks(I))
235 Checks.push_back(Check);
236 ValToPoison[&I] = buildOrChain(B, Checks);
237 }
238
239 for (BasicBlock &BB : F)
240 for (auto I = BB.begin(); isa(&*I); I++) {
241 auto *OldPHI = cast(&*I);
242 if (!ValToPoison.count(OldPHI))
243 continue; // skip the newly inserted phis
244 auto *NewPHI = cast(ValToPoison[OldPHI]);
245 for (unsigned i = 0; i < OldPHI->getNumIncomingValues(); i++) {
246 auto *OldVal = OldPHI->getIncomingValue(i);
247 NewPHI->setIncomingValue(i, getPoisonFor(ValToPoison, OldVal));
248 }
249 }
250 return true;
251 }
252
253
254 PreservedAnalyses PoisonCheckingPass::run(Module &M,
255 ModuleAnalysisManager &AM) {
256 bool Changed = false;
257 for (auto &F : M)
258 Changed |= rewrite(F);
259
260 return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
261 }
262
263 PreservedAnalyses PoisonCheckingPass::run(Function &F,
264 FunctionAnalysisManager &AM) {
265 return rewrite(F) ? PreservedAnalyses::none() : PreservedAnalyses::all();
266 }
267
268
269 /* Major TODO Items:
270 - Control dependent poison UB
271 - Strict mode - (i.e. must analyze every operand)
272 - Poison through memory
273 - Function ABIs
274
275 Minor TODO items:
276 - Add propagation rules for and/or instructions
277 - Add hasPoisonFlags predicate to ValueTracking
278 - Add poison check rules for:
279 - exact flags, out of bounds operands
280 - inbounds (can't be strict due to unknown allocation sizes)
281 - fmf and fp casts
282 */
0 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
1 ; RUN: opt -passes=poison-checking -S -poison-checking-function-local < %s | FileCheck %s
2
3 ; This file contains tests to exercise the custom flag validation rules
4
5 define i32 @add_noflags(i32 %a, i32 %b) {
6 ; CHECK-LABEL: @add_noflags(
7 ; CHECK-NEXT: [[RES:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
8 ; CHECK-NEXT: ret i32 [[RES]]
9 ;
10 %res = add i32 %a, %b
11 ret i32 %res
12 }
13
14 define i32 @add_nsw(i32 %a, i32 %b) {
15 ; CHECK-LABEL: @add_nsw(
16 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A:%.*]], i32 [[B:%.*]])
17 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
18 ; CHECK-NEXT: [[RES:%.*]] = add nsw i32 [[A]], [[B]]
19 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
20 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP3]])
21 ; CHECK-NEXT: ret i32 [[RES]]
22 ;
23 %res = add nsw i32 %a, %b
24 ret i32 %res
25 }
26
27 define i32 @add_nuw(i32 %a, i32 %b) {
28 ; CHECK-LABEL: @add_nuw(
29 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A:%.*]], i32 [[B:%.*]])
30 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
31 ; CHECK-NEXT: [[RES:%.*]] = add nuw i32 [[A]], [[B]]
32 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
33 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP3]])
34 ; CHECK-NEXT: ret i32 [[RES]]
35 ;
36 %res = add nuw i32 %a, %b
37 ret i32 %res
38 }
39
40 define i32 @add_nsw_nuw(i32 %a, i32 %b) {
41 ; CHECK-LABEL: @add_nsw_nuw(
42 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A:%.*]], i32 [[B:%.*]])
43 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
44 ; CHECK-NEXT: [[TMP3:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A]], i32 [[B]])
45 ; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
46 ; CHECK-NEXT: [[TMP5:%.*]] = or i1 [[TMP2]], [[TMP4]]
47 ; CHECK-NEXT: [[RES:%.*]] = add nuw nsw i32 [[A]], [[B]]
48 ; CHECK-NEXT: [[TMP6:%.*]] = xor i1 [[TMP5]], true
49 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP6]])
50 ; CHECK-NEXT: ret i32 [[RES]]
51 ;
52 %res = add nsw nuw i32 %a, %b
53 ret i32 %res
54 }
55
56 define i32 @sub_noflags(i32 %a, i32 %b) {
57 ; CHECK-LABEL: @sub_noflags(
58 ; CHECK-NEXT: [[RES:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
59 ; CHECK-NEXT: ret i32 [[RES]]
60 ;
61 %res = sub i32 %a, %b
62 ret i32 %res
63 }
64
65 define i32 @sub_nsw(i32 %a, i32 %b) {
66 ; CHECK-LABEL: @sub_nsw(
67 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 [[A:%.*]], i32 [[B:%.*]])
68 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
69 ; CHECK-NEXT: [[RES:%.*]] = sub nsw i32 [[A]], [[B]]
70 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
71 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP3]])
72 ; CHECK-NEXT: ret i32 [[RES]]
73 ;
74 %res = sub nsw i32 %a, %b
75 ret i32 %res
76 }
77
78 define i32 @sub_nuw(i32 %a, i32 %b) {
79 ; CHECK-LABEL: @sub_nuw(
80 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 [[A:%.*]], i32 [[B:%.*]])
81 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
82 ; CHECK-NEXT: [[RES:%.*]] = sub nuw i32 [[A]], [[B]]
83 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
84 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP3]])
85 ; CHECK-NEXT: ret i32 [[RES]]
86 ;
87 %res = sub nuw i32 %a, %b
88 ret i32 %res
89 }
90
91 define i32 @sub_nsw_nuw(i32 %a, i32 %b) {
92 ; CHECK-LABEL: @sub_nsw_nuw(
93 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 [[A:%.*]], i32 [[B:%.*]])
94 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
95 ; CHECK-NEXT: [[TMP3:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 [[A]], i32 [[B]])
96 ; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
97 ; CHECK-NEXT: [[TMP5:%.*]] = or i1 [[TMP2]], [[TMP4]]
98 ; CHECK-NEXT: [[RES:%.*]] = sub nuw nsw i32 [[A]], [[B]]
99 ; CHECK-NEXT: [[TMP6:%.*]] = xor i1 [[TMP5]], true
100 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP6]])
101 ; CHECK-NEXT: ret i32 [[RES]]
102 ;
103 %res = sub nsw nuw i32 %a, %b
104 ret i32 %res
105 }
106
107 define i32 @mul_noflags(i32 %a, i32 %b) {
108 ; CHECK-LABEL: @mul_noflags(
109 ; CHECK-NEXT: [[RES:%.*]] = mul i32 [[A:%.*]], [[B:%.*]]
110 ; CHECK-NEXT: ret i32 [[RES]]
111 ;
112 %res = mul i32 %a, %b
113 ret i32 %res
114 }
115
116 define i32 @mul_nsw(i32 %a, i32 %b) {
117 ; CHECK-LABEL: @mul_nsw(
118 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 [[A:%.*]], i32 [[B:%.*]])
119 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
120 ; CHECK-NEXT: [[RES:%.*]] = mul nsw i32 [[A]], [[B]]
121 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
122 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP3]])
123 ; CHECK-NEXT: ret i32 [[RES]]
124 ;
125 %res = mul nsw i32 %a, %b
126 ret i32 %res
127 }
128
129 define i32 @mul_nuw(i32 %a, i32 %b) {
130 ; CHECK-LABEL: @mul_nuw(
131 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[A:%.*]], i32 [[B:%.*]])
132 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
133 ; CHECK-NEXT: [[RES:%.*]] = mul nuw i32 [[A]], [[B]]
134 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
135 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP3]])
136 ; CHECK-NEXT: ret i32 [[RES]]
137 ;
138 %res = mul nuw i32 %a, %b
139 ret i32 %res
140 }
141
142 define i32 @mul_nsw_nuw(i32 %a, i32 %b) {
143 ; CHECK-LABEL: @mul_nsw_nuw(
144 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 [[A:%.*]], i32 [[B:%.*]])
145 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
146 ; CHECK-NEXT: [[TMP3:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[A]], i32 [[B]])
147 ; CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i1 } [[TMP3]], 1
148 ; CHECK-NEXT: [[TMP5:%.*]] = or i1 [[TMP2]], [[TMP4]]
149 ; CHECK-NEXT: [[RES:%.*]] = mul nuw nsw i32 [[A]], [[B]]
150 ; CHECK-NEXT: [[TMP6:%.*]] = xor i1 [[TMP5]], true
151 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP6]])
152 ; CHECK-NEXT: ret i32 [[RES]]
153 ;
154 %res = mul nsw nuw i32 %a, %b
155 ret i32 %res
156 }
157
0 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
1 ; RUN: opt -passes=poison-checking -S < %s | FileCheck %s
2
3 ; This file contains tests to exercise the UB triggering instructions with
4 ; a potential source of UB. The UB source is kept simple; we focus on the
5 ; UB triggering instructions here.
6
7 define void @store(i8* %base, i32 %a) {
8 ; CHECK-LABEL: @store(
9 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A:%.*]], i32 1)
10 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
11 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[A]], 1
12 ; CHECK-NEXT: [[P:%.*]] = getelementptr i8, i8* [[BASE:%.*]], i32 [[ADD]]
13 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
14 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP3]])
15 ; CHECK-NEXT: store i8 0, i8* [[P]]
16 ; CHECK-NEXT: ret void
17 ;
18 %add = add nsw i32 %a, 1
19 %p = getelementptr i8, i8* %base, i32 %add
20 store i8 0, i8* %p
21 ret void
22 }
23
24 define void @load(i8* %base, i32 %a) {
25 ; CHECK-LABEL: @load(
26 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A:%.*]], i32 1)
27 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
28 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[A]], 1
29 ; CHECK-NEXT: [[P:%.*]] = getelementptr i8, i8* [[BASE:%.*]], i32 [[ADD]]
30 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
31 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP3]])
32 ; CHECK-NEXT: [[TMP4:%.*]] = load volatile i8, i8* [[P]]
33 ; CHECK-NEXT: ret void
34 ;
35 %add = add nsw i32 %a, 1
36 %p = getelementptr i8, i8* %base, i32 %add
37 load volatile i8, i8* %p
38 ret void
39 }
40
41 define void @atomicrmw(i8* %base, i32 %a) {
42 ; CHECK-LABEL: @atomicrmw(
43 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A:%.*]], i32 1)
44 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
45 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[A]], 1
46 ; CHECK-NEXT: [[P:%.*]] = getelementptr i8, i8* [[BASE:%.*]], i32 [[ADD]]
47 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
48 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP3]])
49 ; CHECK-NEXT: [[TMP4:%.*]] = atomicrmw add i8* [[P]], i8 1 seq_cst
50 ; CHECK-NEXT: ret void
51 ;
52 %add = add nsw i32 %a, 1
53 %p = getelementptr i8, i8* %base, i32 %add
54 atomicrmw add i8* %p, i8 1 seq_cst
55 ret void
56 }
57
58 define void @cmpxchg(i8* %base, i32 %a) {
59 ; CHECK-LABEL: @cmpxchg(
60 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A:%.*]], i32 1)
61 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
62 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[A]], 1
63 ; CHECK-NEXT: [[P:%.*]] = getelementptr i8, i8* [[BASE:%.*]], i32 [[ADD]]
64 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
65 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP3]])
66 ; CHECK-NEXT: [[TMP4:%.*]] = cmpxchg i8* [[P]], i8 1, i8 0 seq_cst seq_cst
67 ; CHECK-NEXT: ret void
68 ;
69 %add = add nsw i32 %a, 1
70 %p = getelementptr i8, i8* %base, i32 %add
71 cmpxchg i8* %p, i8 1, i8 0 seq_cst seq_cst
72 ret void
73 }
74
75 define i32 @udiv(i8* %base, i32 %a) {
76 ; CHECK-LABEL: @udiv(
77 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A:%.*]], i32 1)
78 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
79 ; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[A]], 1
80 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
81 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP3]])
82 ; CHECK-NEXT: [[RES:%.*]] = udiv i32 2048, [[ADD]]
83 ; CHECK-NEXT: ret i32 [[RES]]
84 ;
85 %add = add nuw i32 %a, 1
86 %res = udiv i32 2048, %add
87 ret i32 %res
88 }
89
90 define i32 @sdiv(i8* %base, i32 %a) {
91 ; CHECK-LABEL: @sdiv(
92 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A:%.*]], i32 1)
93 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
94 ; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[A]], 1
95 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
96 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP3]])
97 ; CHECK-NEXT: [[RES:%.*]] = sdiv i32 2048, [[ADD]]
98 ; CHECK-NEXT: ret i32 [[RES]]
99 ;
100 %add = add nuw i32 %a, 1
101 %res = sdiv i32 2048, %add
102 ret i32 %res
103 }
104
105 define i32 @urem(i8* %base, i32 %a) {
106 ; CHECK-LABEL: @urem(
107 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A:%.*]], i32 1)
108 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
109 ; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[A]], 1
110 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
111 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP3]])
112 ; CHECK-NEXT: [[RES:%.*]] = urem i32 2048, [[ADD]]
113 ; CHECK-NEXT: ret i32 [[RES]]
114 ;
115 %add = add nuw i32 %a, 1
116 %res = urem i32 2048, %add
117 ret i32 %res
118 }
119
120 define i32 @srem(i8* %base, i32 %a) {
121 ; CHECK-LABEL: @srem(
122 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A:%.*]], i32 1)
123 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
124 ; CHECK-NEXT: [[ADD:%.*]] = add nuw i32 [[A]], 1
125 ; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true
126 ; CHECK-NEXT: call void @__poison_checker_assert(i1 [[TMP3]])
127 ; CHECK-NEXT: [[RES:%.*]] = srem i32 2048, [[ADD]]
128 ; CHECK-NEXT: ret i32 [[RES]]
129 ;
130 %add = add nuw i32 %a, 1
131 %res = srem i32 2048, %add
132 ret i32 %res
133 }
134
135
136