llvm.org GIT mirror llvm / 9586430
[msan] Instrument x86.*_cvt* intrinsics. Currently MSan checks that arguments of *cvt* intrinsics are fully initialized. That's too much to ask: some of them only operate on lower half, or even quarter, of the input register. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@192599 91177308-0d34-0410-b5e6-96231b3b80d8 Evgeniy Stepanov 7 years ago
2 changed file(s) with 215 addition(s) and 28 deletion(s). Raw diff Collapse all Expand all
468468 MemorySanitizer &MS;
469469 SmallVector ShadowPHINodes, OriginPHINodes;
470470 ValueMap ShadowMap, OriginMap;
471 OwningPtr VAHelper;
472
473 // The following flags disable parts of MSan instrumentation based on
474 // blacklist contents and command-line options.
471475 bool InsertChecks;
472476 bool LoadShadow;
473477 bool PoisonStack;
474478 bool PoisonUndef;
475479 bool CheckReturnValue;
476 OwningPtr VAHelper;
477480
478481 struct ShadowOriginAndInsertPoint {
479 Instruction *Shadow;
480 Instruction *Origin;
482 Value *Shadow;
483 Value *Origin;
481484 Instruction *OrigIns;
482 ShadowOriginAndInsertPoint(Instruction *S, Instruction *O, Instruction *I)
485 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
483486 : Shadow(S), Origin(O), OrigIns(I) { }
484487 ShadowOriginAndInsertPoint() : Shadow(0), Origin(0), OrigIns(0) { }
485488 };
520523 (void)NewSI;
521524
522525 if (ClCheckAccessAddress)
523 insertCheck(Addr, &I);
526 insertShadowCheck(Addr, &I);
524527
525528 if (I.isAtomic())
526529 I.setOrdering(addReleaseOrdering(I.getOrdering()));
533536 } else {
534537 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
535538
536 Constant *Cst = dyn_cast_or_null(ConvertedShadow);
537539 // TODO(eugenis): handle non-zero constant shadow by inserting an
538540 // unconditional check (can not simply fail compilation as this could
539541 // be in the dead code).
540 if (Cst)
542 if (isa(ConvertedShadow))
541543 continue;
542544
543545 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
555557
556558 void materializeChecks() {
557559 for (size_t i = 0, n = InstrumentationList.size(); i < n; i++) {
558 Instruction *Shadow = InstrumentationList[i].Shadow;
560 Value *Shadow = InstrumentationList[i].Shadow;
559561 Instruction *OrigIns = InstrumentationList[i].OrigIns;
560562 IRBuilder<> IRB(OrigIns);
561563 DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
562564 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
563565 DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
566 // See the comment in materializeStores().
567 if (isa(ConvertedShadow))
568 continue;
564569 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
565570 getCleanShadow(ConvertedShadow), "_mscmp");
566571 Instruction *CheckTerm =
570575
571576 IRB.SetInsertPoint(CheckTerm);
572577 if (MS.TrackOrigins) {
573 Instruction *Origin = InstrumentationList[i].Origin;
578 Value *Origin = InstrumentationList[i].Origin;
574579 IRB.CreateStore(Origin ? (Value*)Origin : (Value*)IRB.getInt32(0),
575580 MS.OriginTLS);
576581 }
887892 /// \brief Remember the place where a shadow check should be inserted.
888893 ///
889894 /// This location will be later instrumented with a check that will print a
890 /// UMR warning in runtime if the value is not fully defined.
891 void insertCheck(Value *Val, Instruction *OrigIns) {
892 assert(Val);
895 /// UMR warning in runtime if the shadow value is not 0.
896 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
897 assert(Shadow);
893898 if (!InsertChecks) return;
894 Instruction *Shadow = dyn_cast_or_null(getShadow(Val));
895 if (!Shadow) return;
896899 #ifndef NDEBUG
897900 Type *ShadowTy = Shadow->getType();
898901 assert((isa(ShadowTy) || isa(ShadowTy)) &&
899902 "Can only insert checks for integer and vector shadow types");
900903 #endif
904 InstrumentationList.push_back(
905 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
906 }
907
908 /// \brief Remember the place where a shadow check should be inserted.
909 ///
910 /// This location will be later instrumented with a check that will print a
911 /// UMR warning in runtime if the value is not fully defined.
912 void insertShadowCheck(Value *Val, Instruction *OrigIns) {
913 assert(Val);
914 Instruction *Shadow = dyn_cast_or_null(getShadow(Val));
915 if (!Shadow) return;
901916 Instruction *Origin = dyn_cast_or_null(getOrigin(Val));
902 InstrumentationList.push_back(
903 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
917 insertShadowCheck(Shadow, Origin, OrigIns);
904918 }
905919
906920 AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
957971 }
958972
959973 if (ClCheckAccessAddress)
960 insertCheck(I.getPointerOperand(), &I);
974 insertShadowCheck(I.getPointerOperand(), &I);
961975
962976 if (I.isAtomic())
963977 I.setOrdering(addAcquireOrdering(I.getOrdering()));
9891003 Value *ShadowPtr = getShadowPtr(Addr, I.getType(), IRB);
9901004
9911005 if (ClCheckAccessAddress)
992 insertCheck(Addr, &I);
1006 insertShadowCheck(Addr, &I);
9931007
9941008 // Only test the conditional argument of cmpxchg instruction.
9951009 // The other argument can potentially be uninitialized, but we can not
9961010 // detect this situation reliably without possible false positives.
9971011 if (isa(I))
998 insertCheck(I.getOperand(1), &I);
1012 insertShadowCheck(I.getOperand(1), &I);
9991013
10001014 IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
10011015
10141028
10151029 // Vector manipulation.
10161030 void visitExtractElementInst(ExtractElementInst &I) {
1017 insertCheck(I.getOperand(1), &I);
1031 insertShadowCheck(I.getOperand(1), &I);
10181032 IRBuilder<> IRB(&I);
10191033 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
10201034 "_msprop"));
10221036 }
10231037
10241038 void visitInsertElementInst(InsertElementInst &I) {
1025 insertCheck(I.getOperand(2), &I);
1039 insertShadowCheck(I.getOperand(2), &I);
10261040 IRBuilder<> IRB(&I);
10271041 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
10281042 I.getOperand(2), "_msprop"));
10301044 }
10311045
10321046 void visitShuffleVectorInst(ShuffleVectorInst &I) {
1033 insertCheck(I.getOperand(2), &I);
1047 insertShadowCheck(I.getOperand(2), &I);
10341048 IRBuilder<> IRB(&I);
10351049 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
10361050 I.getOperand(2), "_msprop"));
12651279 void handleDiv(Instruction &I) {
12661280 IRBuilder<> IRB(&I);
12671281 // Strict on the second argument.
1268 insertCheck(I.getOperand(1), &I);
1282 insertShadowCheck(I.getOperand(1), &I);
12691283 setShadow(&I, getShadow(&I, 0));
12701284 setOrigin(&I, getOrigin(&I, 0));
12711285 }
15481562 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
15491563
15501564 if (ClCheckAccessAddress)
1551 insertCheck(Addr, &I);
1565 insertShadowCheck(Addr, &I);
15521566
15531567 // FIXME: use ClStoreCleanOrigin
15541568 // FIXME: factor out common code from materializeStores
15751589 setShadow(&I, getCleanShadow(&I));
15761590 }
15771591
1578
15791592 if (ClCheckAccessAddress)
1580 insertCheck(Addr, &I);
1593 insertShadowCheck(Addr, &I);
15811594
15821595 if (MS.TrackOrigins) {
15831596 if (LoadShadow)
16741687 setOrigin(&I, getOrigin(Op));
16751688 }
16761689
1690 // \brief Instrument vector convert instrinsic.
1691 //
1692 // This function instruments intrinsics like cvtsi2ss:
1693 // %Out = int_xxx_cvtyyy(%ConvertOp)
1694 // or
1695 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
1696 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
1697 // number \p Out elements, and (if has 2 arguments) copies the rest of the
1698 // elements from \p CopyOp.
1699 // In most cases conversion involves floating-point value which may trigger a
1700 // hardware exception when not fully initialized. For this reason we require
1701 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
1702 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
1703 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
1704 // return a fully initialized value.
1705 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
1706 IRBuilder<> IRB(&I);
1707 Value *CopyOp, *ConvertOp;
1708
1709 switch (I.getNumArgOperands()) {
1710 case 2:
1711 CopyOp = I.getArgOperand(0);
1712 ConvertOp = I.getArgOperand(1);
1713 break;
1714 case 1:
1715 ConvertOp = I.getArgOperand(0);
1716 CopyOp = NULL;
1717 break;
1718 default:
1719 llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
1720 }
1721
1722 // The first *NumUsedElements* elements of ConvertOp are converted to the
1723 // same number of output elements. The rest of the output is copied from
1724 // CopyOp, or (if not available) filled with zeroes.
1725 // Combine shadow for elements of ConvertOp that are used in this operation,
1726 // and insert a check.
1727 // FIXME: consider propagating shadow of ConvertOp, at least in the case of
1728 // int->any conversion.
1729 Value *ConvertShadow = getShadow(ConvertOp);
1730 Value *AggShadow = 0;
1731 if (ConvertOp->getType()->isVectorTy()) {
1732 AggShadow = IRB.CreateExtractElement(
1733 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
1734 for (int i = 1; i < NumUsedElements; ++i) {
1735 Value *MoreShadow = IRB.CreateExtractElement(
1736 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
1737 AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
1738 }
1739 } else {
1740 AggShadow = ConvertShadow;
1741 }
1742 assert(AggShadow->getType()->isIntegerTy());
1743 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
1744
1745 // Build result shadow by zero-filling parts of CopyOp shadow that come from
1746 // ConvertOp.
1747 if (CopyOp) {
1748 assert(CopyOp->getType() == I.getType());
1749 assert(CopyOp->getType()->isVectorTy());
1750 Value *ResultShadow = getShadow(CopyOp);
1751 Type *EltTy = ResultShadow->getType()->getVectorElementType();
1752 for (int i = 0; i < NumUsedElements; ++i) {
1753 ResultShadow = IRB.CreateInsertElement(
1754 ResultShadow, ConstantInt::getNullValue(EltTy),
1755 ConstantInt::get(IRB.getInt32Ty(), i));
1756 }
1757 setShadow(&I, ResultShadow);
1758 setOrigin(&I, getOrigin(CopyOp));
1759 } else {
1760 setShadow(&I, getCleanShadow(&I));
1761 }
1762 }
1763
16771764 void visitIntrinsicInst(IntrinsicInst &I) {
16781765 switch (I.getIntrinsicID()) {
16791766 case llvm::Intrinsic::bswap:
16801767 handleBswap(I);
1768 break;
1769 case llvm::Intrinsic::x86_avx512_cvtsd2usi64:
1770 case llvm::Intrinsic::x86_avx512_cvtsd2usi:
1771 case llvm::Intrinsic::x86_avx512_cvtss2usi64:
1772 case llvm::Intrinsic::x86_avx512_cvtss2usi:
1773 case llvm::Intrinsic::x86_avx512_cvttss2usi64:
1774 case llvm::Intrinsic::x86_avx512_cvttss2usi:
1775 case llvm::Intrinsic::x86_avx512_cvttsd2usi64:
1776 case llvm::Intrinsic::x86_avx512_cvttsd2usi:
1777 case llvm::Intrinsic::x86_avx512_cvtusi2sd:
1778 case llvm::Intrinsic::x86_avx512_cvtusi2ss:
1779 case llvm::Intrinsic::x86_avx512_cvtusi642sd:
1780 case llvm::Intrinsic::x86_avx512_cvtusi642ss:
1781 case llvm::Intrinsic::x86_sse2_cvtsd2si64:
1782 case llvm::Intrinsic::x86_sse2_cvtsd2si:
1783 case llvm::Intrinsic::x86_sse2_cvtsd2ss:
1784 case llvm::Intrinsic::x86_sse2_cvtsi2sd:
1785 case llvm::Intrinsic::x86_sse2_cvtsi642sd:
1786 case llvm::Intrinsic::x86_sse2_cvtss2sd:
1787 case llvm::Intrinsic::x86_sse2_cvttsd2si64:
1788 case llvm::Intrinsic::x86_sse2_cvttsd2si:
1789 case llvm::Intrinsic::x86_sse_cvtsi2ss:
1790 case llvm::Intrinsic::x86_sse_cvtsi642ss:
1791 case llvm::Intrinsic::x86_sse_cvtss2si64:
1792 case llvm::Intrinsic::x86_sse_cvtss2si:
1793 case llvm::Intrinsic::x86_sse_cvttss2si64:
1794 case llvm::Intrinsic::x86_sse_cvttss2si:
1795 handleVectorConvertIntrinsic(I, 1);
1796 break;
1797 case llvm::Intrinsic::x86_sse2_cvtdq2pd:
1798 case llvm::Intrinsic::x86_sse2_cvtps2pd:
1799 case llvm::Intrinsic::x86_sse_cvtps2pi:
1800 case llvm::Intrinsic::x86_sse_cvttps2pi:
1801 handleVectorConvertIntrinsic(I, 2);
16811802 break;
16821803 default:
16831804 if (!handleUnknownIntrinsic(I))
18251946 if (!RetVal) return;
18261947 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
18271948 if (CheckReturnValue) {
1828 insertCheck(RetVal, &I);
1949 insertShadowCheck(RetVal, &I);
18291950 Value *Shadow = getCleanShadow(RetVal);
18301951 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment);
18311952 } else {
19722093 dumpInst(I);
19732094 DEBUG(dbgs() << "DEFAULT: " << I << "\n");
19742095 for (size_t i = 0, n = I.getNumOperands(); i < n; i++)
1975 insertCheck(I.getOperand(i), &I);
2096 insertShadowCheck(I.getOperand(i), &I);
19762097 setShadow(&I, getCleanShadow(&I));
19772098 setOrigin(&I, getCleanOrigin());
19782099 }
0 ; RUN: opt < %s -msan -msan-check-access-address=0 -S | FileCheck %s
1
2 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
3 target triple = "x86_64-unknown-linux-gnu"
4
5 declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>) nounwind readnone
6 declare <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double>, i32) nounwind readnone
7 declare x86_mmx @llvm.x86.sse.cvtps2pi(<4 x float>) nounwind readnone
8
9 ; Single argument vector conversion.
10
11 define i32 @test_cvtsd2si(<2 x double> %value) sanitize_memory {
12 entry:
13 %0 = tail call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %value)
14 ret i32 %0
15 }
16
17 ; CHECK: @test_cvtsd2si
18 ; CHECK: [[S:%[_01-9a-z]+]] = extractelement <2 x i64> {{.*}}, i32 0
19 ; CHECK: icmp ne {{.*}}[[S]], 0
20 ; CHECK: br
21 ; CHECK: call void @__msan_warning_noreturn
22 ; CHECK: call i32 @llvm.x86.sse2.cvtsd2si
23 ; CHECK: store i32 0, {{.*}} @__msan_retval_tls
24 ; CHECK: ret i32
25
26 ; Two-argument vector conversion.
27
28 define <2 x double> @test_cvtsi2sd(i32 %a, double %b) sanitize_memory {
29 entry:
30 %vec = insertelement <2 x double> undef, double %b, i32 1
31 %0 = tail call <2 x double> @llvm.x86.sse2.cvtsi2sd(<2 x double> %vec, i32 %a)
32 ret <2 x double> %0
33 }
34
35 ; CHECK: @test_cvtsi2sd
36 ; CHECK: [[Sa:%[_01-9a-z]+]] = load i32* {{.*}} @__msan_param_tls
37 ; CHECK: [[Sout0:%[_01-9a-z]+]] = insertelement <2 x i64> , i64 {{.*}}, i32 1
38 ; Clear low half of result shadow
39 ; CHECK: [[Sout:%[_01-9a-z]+]] = insertelement <2 x i64> {{.*}}[[Sout0]], i64 0, i32 0
40 ; Trap on %a shadow.
41 ; CHECK: icmp ne {{.*}}[[Sa]], 0
42 ; CHECK: br
43 ; CHECK: call void @__msan_warning_noreturn
44 ; CHECK: call <2 x double> @llvm.x86.sse2.cvtsi2sd
45 ; CHECK: store <2 x i64> {{.*}}[[Sout]], {{.*}} @__msan_retval_tls
46 ; CHECK: ret <2 x double>
47
48 ; x86_mmx packed vector conversion.
49
50 define x86_mmx @test_cvtps2pi(<4 x float> %value) sanitize_memory {
51 entry:
52 %0 = tail call x86_mmx @llvm.x86.sse.cvtps2pi(<4 x float> %value)
53 ret x86_mmx %0
54 }
55
56 ; CHECK: @test_cvtps2pi
57 ; CHECK: extractelement <4 x i32> {{.*}}, i32 0
58 ; CHECK: extractelement <4 x i32> {{.*}}, i32 1
59 ; CHECK: [[S:%[_01-9a-z]+]] = or i32
60 ; CHECK: icmp ne {{.*}}[[S]], 0
61 ; CHECK: br
62 ; CHECK: call void @__msan_warning_noreturn
63 ; CHECK: call x86_mmx @llvm.x86.sse.cvtps2pi
64 ; CHECK: store i64 0, {{.*}} @__msan_retval_tls
65 ; CHECK: ret x86_mmx