llvm.org GIT mirror llvm / f464481
[tsan] make sure memset/memcpy/memmove are not inlined in tsan mode git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@178230 91177308-0d34-0410-b5e6-96231b3b80d8 Kostya Serebryany 6 years ago
2 changed file(s) with 85 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
2929 #include "llvm/IR/DataLayout.h"
3030 #include "llvm/IR/Function.h"
3131 #include "llvm/IR/IRBuilder.h"
32 #include "llvm/IR/IntrinsicInst.h"
3233 #include "llvm/IR/Intrinsics.h"
3334 #include "llvm/IR/LLVMContext.h"
3435 #include "llvm/IR/Metadata.h"
5556 static cl::opt ClInstrumentAtomics(
5657 "tsan-instrument-atomics", cl::init(true),
5758 cl::desc("Instrument atomics"), cl::Hidden);
59 static cl::opt ClInstrumentMemIntrinsics(
60 "tsan-instrument-memintrinsics", cl::init(true),
61 cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
5862
5963 STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
6064 STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
8589 void initializeCallbacks(Module &M);
8690 bool instrumentLoadOrStore(Instruction *I);
8791 bool instrumentAtomic(Instruction *I);
92 bool instrumentMemIntrinsic(Instruction *I);
8893 void chooseInstructionsToInstrument(SmallVectorImpl &Local,
8994 SmallVectorImpl &All);
9095 bool addrPointsToConstantData(Value *Addr);
9196 int getMemoryAccessFuncIndex(Value *Addr);
9297
9398 DataLayout *TD;
99 Type *IntptrTy;
94100 SmallString<64> BlacklistFile;
95101 OwningPtr BL;
96102 IntegerType *OrdTy;
109115 Function *TsanAtomicSignalFence;
110116 Function *TsanVptrUpdate;
111117 Function *TsanVptrLoad;
118 Function *MemmoveFn, *MemcpyFn, *MemsetFn;
112119 };
113120 } // namespace
114121
203210 "__tsan_atomic_thread_fence", IRB.getVoidTy(), OrdTy, NULL));
204211 TsanAtomicSignalFence = checkInterfaceFunction(M.getOrInsertFunction(
205212 "__tsan_atomic_signal_fence", IRB.getVoidTy(), OrdTy, NULL));
213
214 MemmoveFn = checkInterfaceFunction(M.getOrInsertFunction(
215 "memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
216 IRB.getInt8PtrTy(), IntptrTy, NULL));
217 MemcpyFn = checkInterfaceFunction(M.getOrInsertFunction(
218 "memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
219 IntptrTy, NULL));
220 MemsetFn = checkInterfaceFunction(M.getOrInsertFunction(
221 "memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
222 IntptrTy, NULL));
206223 }
207224
208225 bool ThreadSanitizer::doInitialization(Module &M) {
213230
214231 // Always insert a call to __tsan_init into the module's CTORs.
215232 IRBuilder<> IRB(M.getContext());
233 IntptrTy = IRB.getIntPtrTy(TD);
216234 Value *TsanInit = M.getOrInsertFunction("__tsan_init",
217235 IRB.getVoidTy(), NULL);
218236 appendToGlobalCtors(M, cast(TsanInit), 0);
312330 SmallVector AllLoadsAndStores;
313331 SmallVector LocalLoadsAndStores;
314332 SmallVector AtomicAccesses;
333 SmallVector MemIntrinCalls;
315334 bool Res = false;
316335 bool HasCalls = false;
317336
328347 else if (isa(BI))
329348 RetVec.push_back(BI);
330349 else if (isa(BI) || isa(BI)) {
350 if (isa(BI))
351 MemIntrinCalls.push_back(BI);
331352 HasCalls = true;
332353 chooseInstructionsToInstrument(LocalLoadsAndStores, AllLoadsAndStores);
333354 }
349370 if (ClInstrumentAtomics)
350371 for (size_t i = 0, n = AtomicAccesses.size(); i < n; ++i) {
351372 Res |= instrumentAtomic(AtomicAccesses[i]);
373 }
374
375 if (ClInstrumentMemIntrinsics)
376 for (size_t i = 0, n = MemIntrinCalls.size(); i < n; ++i) {
377 Res |= instrumentMemIntrinsic(MemIntrinCalls[i]);
352378 }
353379
354380 // Instrument function entry/exit points if there were instrumented accesses.
430456 case SequentiallyConsistent: v = 5; break;
431457 }
432458 return IRB->getInt32(v);
459 }
460
461 // If a memset intrinsic gets inlined by the code gen, we will miss races on it.
462 // So, we either need to ensure the intrinsic is not inlined, or instrument it.
463 // We do not instrument memset/memmove/memcpy intrinsics (too complicated),
464 // instead we simply replace them with regular function calls, which are then
465 // intercepted by the run-time.
466 // Since tsan is running after everyone else, the calls should not be
467 // replaced back with intrinsics. If that becomes wrong at some point,
468 // we will need to call e.g. __tsan_memset to avoid the intrinsics.
469 bool ThreadSanitizer::instrumentMemIntrinsic(Instruction *I) {
470 IRBuilder<> IRB(I);
471 if (MemSetInst *M = dyn_cast(I)) {
472 IRB.CreateCall3(MemsetFn,
473 IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
474 IRB.CreateIntCast(M->getArgOperand(1), IRB.getInt32Ty(), false),
475 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false));
476 I->eraseFromParent();
477 } else if (MemTransferInst *M = dyn_cast(I)) {
478 IRB.CreateCall3(isa(M) ? MemcpyFn : MemmoveFn,
479 IRB.CreatePointerCast(M->getArgOperand(0), IRB.getInt8PtrTy()),
480 IRB.CreatePointerCast(M->getArgOperand(1), IRB.getInt8PtrTy()),
481 IRB.CreateIntCast(M->getArgOperand(2), IntptrTy, false));
482 I->eraseFromParent();
483 }
484 return false;
433485 }
434486
435487 // Both llvm and ThreadSanitizer atomic operations are based on C++11/C1x
1919 ; CHECK: ret i32
2020
2121
22 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
23 declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
24 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
25
26
27 ; Check that tsan converts mem intrinsics back to function calls.
28
29 define void @MemCpyTest(i8* nocapture %x, i8* nocapture %y) {
30 entry:
31 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
32 ret void
33 ; CHECK: define void @MemCpyTest
34 ; CHECK: call i8* @memcpy
35 ; CHECK: ret void
36 }
37
38 define void @MemMoveTest(i8* nocapture %x, i8* nocapture %y) {
39 entry:
40 tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
41 ret void
42 ; CHECK: define void @MemMoveTest
43 ; CHECK: call i8* @memmove
44 ; CHECK: ret void
45 }
46
47 define void @MemSetTest(i8* nocapture %x) {
48 entry:
49 tail call void @llvm.memset.p0i8.i64(i8* %x, i8 77, i64 16, i32 4, i1 false)
50 ret void
51 ; CHECK define void @MemSetTest
52 ; CHECK: call i8* @memset
53 ; CHECK: ret void
54 }