llvm.org GIT mirror llvm / 132fdde
hwasan: Improve precision of checks using short granule tags. A short granule is a granule of size between 1 and `TG-1` bytes. The size of a short granule is stored at the location in shadow memory where the granule's tag is normally stored, while the granule's actual tag is stored in the last byte of the granule. This means that in order to verify that a pointer tag matches a memory tag, HWASAN must check for two possibilities: * the pointer tag is equal to the memory tag in shadow memory, or * the shadow memory tag is actually a short granule size, the value being loaded is in bounds of the granule and the pointer tag is equal to the last byte of the granule. Pointer tags between 1 to `TG-1` are possible and are as likely as any other tag. This means that these tags in memory have two interpretations: the full tag interpretation (where the pointer tag is between 1 and `TG-1` and the last byte of the granule is ordinary data) and the short tag interpretation (where the pointer tag is stored in the granule). When HWASAN detects an error near a memory tag between 1 and `TG-1`, it will show both the memory tag and the last byte of the granule. Currently, it is up to the user to disambiguate the two possibilities. Because this functionality obsoletes the right aligned heap feature of the HWASAN memory allocator (and because we can no longer easily test it), the feature is removed. Also update the documentation to cover both short granule tags and outlined checks. Differential Revision: https://reviews.llvm.org/D63908 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@365551 91177308-0d34-0410-b5e6-96231b3b80d8 Peter Collingbourne 3 months ago
7 changed file(s) with 245 addition(s) and 31 deletion(s). Raw diff Collapse all Expand all
303303 .addReg(Reg)
304304 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)),
305305 *STI);
306 MCSymbol *HandlePartialSym = OutContext.createTempSymbol();
307 OutStreamer->EmitInstruction(
308 MCInstBuilder(AArch64::Bcc)
309 .addImm(AArch64CC::NE)
310 .addExpr(MCSymbolRefExpr::create(HandlePartialSym, OutContext)),
311 *STI);
312 MCSymbol *ReturnSym = OutContext.createTempSymbol();
313 OutStreamer->EmitLabel(ReturnSym);
314 OutStreamer->EmitInstruction(
315 MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
316
317 OutStreamer->EmitLabel(HandlePartialSym);
318 OutStreamer->EmitInstruction(MCInstBuilder(AArch64::SUBSWri)
319 .addReg(AArch64::WZR)
320 .addReg(AArch64::W16)
321 .addImm(15)
322 .addImm(0),
323 *STI);
306324 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
307325 OutStreamer->EmitInstruction(
308326 MCInstBuilder(AArch64::Bcc)
309 .addImm(AArch64CC::NE)
327 .addImm(AArch64CC::HI)
310328 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
311329 *STI);
330
312331 OutStreamer->EmitInstruction(
313 MCInstBuilder(AArch64::RET).addReg(AArch64::LR), *STI);
332 MCInstBuilder(AArch64::ANDXri)
333 .addReg(AArch64::X17)
334 .addReg(Reg)
335 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)),
336 *STI);
337 size_t Size = 1 << (AccessInfo & 0xf);
338 if (Size != 1)
339 OutStreamer->EmitInstruction(MCInstBuilder(AArch64::ADDXri)
340 .addReg(AArch64::X17)
341 .addReg(AArch64::X17)
342 .addImm(Size - 1)
343 .addImm(0),
344 *STI);
345 OutStreamer->EmitInstruction(MCInstBuilder(AArch64::SUBSWrs)
346 .addReg(AArch64::WZR)
347 .addReg(AArch64::W16)
348 .addReg(AArch64::W17)
349 .addImm(0),
350 *STI);
351 OutStreamer->EmitInstruction(
352 MCInstBuilder(AArch64::Bcc)
353 .addImm(AArch64CC::LS)
354 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)),
355 *STI);
356
357 OutStreamer->EmitInstruction(
358 MCInstBuilder(AArch64::ORRXri)
359 .addReg(AArch64::X16)
360 .addReg(Reg)
361 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)),
362 *STI);
363 OutStreamer->EmitInstruction(MCInstBuilder(AArch64::LDRBBui)
364 .addReg(AArch64::W16)
365 .addReg(AArch64::X16)
366 .addImm(0),
367 *STI);
368 OutStreamer->EmitInstruction(
369 MCInstBuilder(AArch64::SUBSXrs)
370 .addReg(AArch64::XZR)
371 .addReg(AArch64::X16)
372 .addReg(Reg)
373 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSR, 56)),
374 *STI);
375 OutStreamer->EmitInstruction(
376 MCInstBuilder(AArch64::Bcc)
377 .addImm(AArch64CC::EQ)
378 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)),
379 *STI);
314380
315381 OutStreamer->EmitLabel(HandleMismatchSym);
316
317382 OutStreamer->EmitInstruction(MCInstBuilder(AArch64::STPXpre)
318383 .addReg(AArch64::SP)
319384 .addReg(AArch64::X0)
197197 Value **MaybeMask);
198198
199199 bool isInterestingAlloca(const AllocaInst &AI);
200 bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag);
200 bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
201201 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
202202 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
203203 bool instrumentStack(
573573 }
574574
575575 Instruction *CheckTerm =
576 SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, !Recover,
576 SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, false,
577577 MDBuilder(*C).createBranchWeights(1, 100000));
578578
579579 IRB.SetInsertPoint(CheckTerm);
580 Value *OutOfShortGranuleTagRange =
581 IRB.CreateICmpUGT(MemTag, ConstantInt::get(Int8Ty, 15));
582 Instruction *CheckFailTerm =
583 SplitBlockAndInsertIfThen(OutOfShortGranuleTagRange, CheckTerm, !Recover,
584 MDBuilder(*C).createBranchWeights(1, 100000));
585
586 IRB.SetInsertPoint(CheckTerm);
587 Value *PtrLowBits = IRB.CreateTrunc(IRB.CreateAnd(PtrLong, 15), Int8Ty);
588 PtrLowBits = IRB.CreateAdd(
589 PtrLowBits, ConstantInt::get(Int8Ty, (1 << AccessSizeIndex) - 1));
590 Value *PtrLowBitsOOB = IRB.CreateICmpUGE(PtrLowBits, MemTag);
591 SplitBlockAndInsertIfThen(PtrLowBitsOOB, CheckTerm, false,
592 MDBuilder(*C).createBranchWeights(1, 100000),
593 nullptr, nullptr, CheckFailTerm->getParent());
594
595 IRB.SetInsertPoint(CheckTerm);
596 Value *InlineTagAddr = IRB.CreateOr(AddrLong, 15);
597 InlineTagAddr = IRB.CreateIntToPtr(InlineTagAddr, Int8PtrTy);
598 Value *InlineTag = IRB.CreateLoad(Int8Ty, InlineTagAddr);
599 Value *InlineTagMismatch = IRB.CreateICmpNE(PtrTag, InlineTag);
600 SplitBlockAndInsertIfThen(InlineTagMismatch, CheckTerm, false,
601 MDBuilder(*C).createBranchWeights(1, 100000),
602 nullptr, nullptr, CheckFailTerm->getParent());
603
604 IRB.SetInsertPoint(CheckFailTerm);
580605 InlineAsm *Asm;
581606 switch (TargetTriple.getArch()) {
582607 case Triple::x86_64:
600625 report_fatal_error("unsupported architecture");
601626 }
602627 IRB.CreateCall(Asm, PtrLong);
628 if (Recover)
629 cast(CheckFailTerm)->setSuccessor(0, CheckTerm->getParent());
603630 }
604631
605632 void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
676703 }
677704
678705 bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI,
679 Value *Tag) {
680 size_t Size = (getAllocaSizeInBytes(*AI) + Mapping.getAllocaAlignment() - 1) &
681 ~(Mapping.getAllocaAlignment() - 1);
706 Value *Tag, size_t Size) {
707 size_t AlignedSize = alignTo(Size, Mapping.getAllocaAlignment());
682708
683709 Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
684710 if (ClInstrumentWithCalls) {
685711 IRB.CreateCall(HwasanTagMemoryFunc,
686712 {IRB.CreatePointerCast(AI, Int8PtrTy), JustTag,
687 ConstantInt::get(IntptrTy, Size)});
713 ConstantInt::get(IntptrTy, AlignedSize)});
688714 } else {
689715 size_t ShadowSize = Size >> Mapping.Scale;
690716 Value *ShadowPtr = memToShadow(IRB.CreatePointerCast(AI, IntptrTy), IRB);
694720 // FIXME: the interceptor is not as fast as real memset. Consider lowering
695721 // llvm.memset right here into either a sequence of stores, or a call to
696722 // hwasan_tag_memory.
697 IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, /*Align=*/1);
723 if (ShadowSize)
724 IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, /*Align=*/1);
725 if (Size != AlignedSize) {
726 IRB.CreateStore(
727 ConstantInt::get(Int8Ty, Size % Mapping.getAllocaAlignment()),
728 IRB.CreateConstGEP1_32(Int8Ty, ShadowPtr, ShadowSize));
729 IRB.CreateStore(JustTag, IRB.CreateConstGEP1_32(
730 Int8Ty, IRB.CreateBitCast(AI, Int8PtrTy),
731 AlignedSize - 1));
732 }
698733 }
699734 return true;
700735 }
963998 DDI->setArgOperand(2, MetadataAsValue::get(*C, NewExpr));
964999 }
9651000
966 tagAlloca(IRB, AI, Tag);
1001 size_t Size = getAllocaSizeInBytes(*AI);
1002 tagAlloca(IRB, AI, Tag, Size);
9671003
9681004 for (auto RI : RetVec) {
9691005 IRB.SetInsertPoint(RI);
9701006
9711007 // Re-tag alloca memory with the special UAR tag.
9721008 Value *Tag = getUARTag(IRB, StackTag);
973 tagAlloca(IRB, AI, Tag);
1009 tagAlloca(IRB, AI, Tag, alignTo(Size, Mapping.getAllocaAlignment()));
9741010 }
9751011 }
9761012
10111047 for (auto &Inst : BB) {
10121048 if (ClInstrumentStack)
10131049 if (AllocaInst *AI = dyn_cast(&Inst)) {
1014 // Realign all allocas. We don't want small uninteresting allocas to
1015 // hide in instrumented alloca's padding.
1016 if (AI->getAlignment() < Mapping.getAllocaAlignment())
1017 AI->setAlignment(Mapping.getAllocaAlignment());
1018 // Instrument some of them.
10191050 if (isInterestingAlloca(*AI))
10201051 AllocasToInstrument.push_back(AI);
10211052 continue;
10651096 ClGenerateTagsWithCalls ? nullptr : getStackBaseTag(EntryIRB);
10661097 Changed |= instrumentStack(AllocasToInstrument, AllocaDeclareMap, RetVec,
10671098 StackTag);
1099 }
1100
1101 // Pad and align each of the allocas that we instrumented to stop small
1102 // uninteresting allocas from hiding in instrumented alloca's padding and so
1103 // that we have enough space to store real tags for short granules.
1104 DenseMap AllocaToPaddedAllocaMap;
1105 for (AllocaInst *AI : AllocasToInstrument) {
1106 uint64_t Size = getAllocaSizeInBytes(*AI);
1107 uint64_t AlignedSize = alignTo(Size, Mapping.getAllocaAlignment());
1108 AI->setAlignment(std::max(AI->getAlignment(), 16u));
1109 if (Size != AlignedSize) {
1110 Type *TypeWithPadding = StructType::get(
1111 AI->getAllocatedType(), ArrayType::get(Int8Ty, AlignedSize - Size));
1112 auto *NewAI = new AllocaInst(
1113 TypeWithPadding, AI->getType()->getAddressSpace(), nullptr, "", AI);
1114 NewAI->takeName(AI);
1115 NewAI->setAlignment(AI->getAlignment());
1116 NewAI->setUsedWithInAlloca(AI->isUsedWithInAlloca());
1117 NewAI->setSwiftError(AI->isSwiftError());
1118 NewAI->copyMetadata(*AI);
1119 Value *Zero = ConstantInt::get(Int32Ty, 0);
1120 auto *GEP = GetElementPtrInst::Create(TypeWithPadding, NewAI,
1121 {Zero, Zero}, "", AI);
1122 AI->replaceAllUsesWith(GEP);
1123 AllocaToPaddedAllocaMap[AI] = NewAI;
1124 }
1125 }
1126
1127 if (!AllocaToPaddedAllocaMap.empty()) {
1128 for (auto &BB : F)
1129 for (auto &Inst : BB)
1130 if (auto *DVI = dyn_cast(&Inst))
1131 if (auto *AI =
1132 dyn_cast_or_null(DVI->getVariableLocation()))
1133 if (auto *NewAI = AllocaToPaddedAllocaMap.lookup(AI))
1134 DVI->setArgOperand(
1135 0, MetadataAsValue::get(*C, LocalAsMetadata::get(NewAI)));
1136 for (auto &P : AllocaToPaddedAllocaMap)
1137 P.first->eraseFromParent();
10681138 }
10691139
10701140 // If we split the entry block, move any allocas that were originally in the
3939 ; CHECK-NEXT: ldrb w16, [x9, x16]
4040 ; CHECK-NEXT: cmp x16, x0, lsr #56
4141 ; CHECK-NEXT: b.ne .Ltmp0
42 ; CHECK-NEXT: .Ltmp1:
4243 ; CHECK-NEXT: ret
4344 ; CHECK-NEXT: .Ltmp0:
45 ; CHECK-NEXT: cmp w16, #15
46 ; CHECK-NEXT: b.hi .Ltmp2
47 ; CHECK-NEXT: and x17, x0, #0xf
48 ; CHECK-NEXT: add x17, x17, #255
49 ; CHECK-NEXT: cmp w16, w17
50 ; CHECK-NEXT: b.ls .Ltmp2
51 ; CHECK-NEXT: orr x16, x0, #0xf
52 ; CHECK-NEXT: ldrb w16, [x16]
53 ; CHECK-NEXT: cmp x16, x0, lsr #56
54 ; CHECK-NEXT: b.eq .Ltmp1
55 ; CHECK-NEXT: .Ltmp2:
4456 ; CHECK-NEXT: stp x0, x1, [sp, #-256]!
4557 ; CHECK-NEXT: stp x29, x30, [sp, #232]
4658 ; CHECK-NEXT: mov x1, #456
5769 ; CHECK-NEXT: ubfx x16, x1, #4, #52
5870 ; CHECK-NEXT: ldrb w16, [x9, x16]
5971 ; CHECK-NEXT: cmp x16, x1, lsr #56
60 ; CHECK-NEXT: b.ne .Ltmp1
72 ; CHECK-NEXT: b.ne .Ltmp3
73 ; CHECK-NEXT: .Ltmp4:
6174 ; CHECK-NEXT: ret
62 ; CHECK-NEXT: .Ltmp1:
75 ; CHECK-NEXT: .Ltmp3:
76 ; CHECK-NEXT: cmp w16, #15
77 ; CHECK-NEXT: b.hi .Ltmp5
78 ; CHECK-NEXT: and x17, x1, #0xf
79 ; CHECK-NEXT: add x17, x17, #2047
80 ; CHECK-NEXT: cmp w16, w17
81 ; CHECK-NEXT: b.ls .Ltmp5
82 ; CHECK-NEXT: orr x16, x1, #0xf
83 ; CHECK-NEXT: ldrb w16, [x16]
84 ; CHECK-NEXT: cmp x16, x1, lsr #56
85 ; CHECK-NEXT: b.eq .Ltmp4
86 ; CHECK-NEXT: .Ltmp5:
6387 ; CHECK-NEXT: stp x0, x1, [sp, #-256]!
6488 ; CHECK-NEXT: stp x29, x30, [sp, #232]
6589 ; CHECK-NEXT: mov x0, x1
88
99 define void @test_alloca() sanitize_hwaddress {
1010 ; CHECK-LABEL: @test_alloca(
11 ; CHECK: %[[GEP:[^ ]*]] = getelementptr { i32, [12 x i8] }, { i32, [12 x i8] }* %x, i32 0, i32 0
1112 ; CHECK: %[[T1:[^ ]*]] = call i8 @__hwasan_generate_tag()
1213 ; CHECK: %[[A:[^ ]*]] = zext i8 %[[T1]] to i64
13 ; CHECK: %[[B:[^ ]*]] = ptrtoint i32* %x to i64
14 ; CHECK: %[[B:[^ ]*]] = ptrtoint i32* %[[GEP]] to i64
1415 ; CHECK: %[[C:[^ ]*]] = shl i64 %[[A]], 56
1516 ; CHECK: or i64 %[[B]], %[[C]]
1617
1515 ; CHECK: %[[B:[^ ]*]] = lshr i64 %[[A]], 20
1616 ; CHECK: %[[BASE_TAG:[^ ]*]] = xor i64 %[[A]], %[[B]]
1717
18 ; CHECK: %[[X:[^ ]*]] = alloca i32, align 16
18 ; CHECK: %[[X:[^ ]*]] = alloca { i32, [12 x i8] }, align 16
19 ; CHECK: %[[X_GEP:[^ ]*]] = getelementptr { i32, [12 x i8] }, { i32, [12 x i8] }* %[[X]], i32 0, i32 0
1920 ; CHECK: %[[X_TAG:[^ ]*]] = xor i64 %[[BASE_TAG]], 0
20 ; CHECK: %[[X1:[^ ]*]] = ptrtoint i32* %[[X]] to i64
21 ; CHECK: %[[X1:[^ ]*]] = ptrtoint i32* %[[X_GEP]] to i64
2122 ; CHECK: %[[C:[^ ]*]] = shl i64 %[[X_TAG]], 56
2223 ; CHECK: %[[D:[^ ]*]] = or i64 %[[X1]], %[[C]]
2324 ; CHECK: %[[X_HWASAN:[^ ]*]] = inttoptr i64 %[[D]] to i32*
2425
2526 ; CHECK: %[[X_TAG2:[^ ]*]] = trunc i64 %[[X_TAG]] to i8
26 ; CHECK: %[[E:[^ ]*]] = ptrtoint i32* %[[X]] to i64
27 ; CHECK: %[[E:[^ ]*]] = ptrtoint i32* %[[X_GEP]] to i64
2728 ; CHECK: %[[F:[^ ]*]] = lshr i64 %[[E]], 4
2829 ; DYNAMIC-SHADOW: %[[X_SHADOW:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %[[F]]
2930 ; ZERO-BASED-SHADOW: %[[X_SHADOW:[^ ]*]] = inttoptr i64 %[[F]] to i8*
30 ; CHECK: call void @llvm.memset.p0i8.i64(i8* align 1 %[[X_SHADOW]], i8 %[[X_TAG2]], i64 1, i1 false)
31 ; CHECK: %[[X_SHADOW_GEP:[^ ]*]] = getelementptr i8, i8* %[[X_SHADOW]], i32 0
32 ; CHECK: store i8 4, i8* %[[X_SHADOW_GEP]]
33 ; CHECK: %[[X_I8:[^ ]*]] = bitcast i32* %[[X_GEP]] to i8*
34 ; CHECK: %[[X_I8_GEP:[^ ]*]] = getelementptr i8, i8* %[[X_I8]], i32 15
35 ; CHECK: store i8 %[[X_TAG2]], i8* %[[X_I8_GEP]]
3136 ; CHECK: call void @use32(i32* nonnull %[[X_HWASAN]])
3237
3338 ; UAR-TAGS: %[[BASE_TAG_COMPL:[^ ]*]] = xor i64 %[[BASE_TAG]], 255
3439 ; UAR-TAGS: %[[X_TAG_UAR:[^ ]*]] = trunc i64 %[[BASE_TAG_COMPL]] to i8
35 ; CHECK: %[[E2:[^ ]*]] = ptrtoint i32* %[[X]] to i64
40 ; CHECK: %[[E2:[^ ]*]] = ptrtoint i32* %[[X_GEP]] to i64
3641 ; CHECK: %[[F2:[^ ]*]] = lshr i64 %[[E2]], 4
3742 ; DYNAMIC-SHADOW: %[[X_SHADOW2:[^ ]*]] = getelementptr i8, i8* %.hwasan.shadow, i64 %[[F2]]
3843 ; ZERO-BASED-SHADOW: %[[X_SHADOW2:[^ ]*]] = inttoptr i64 %[[F2]] to i8*
2626 ; RECOVER-ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
2727 ; RECOVER: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
2828 ; RECOVER: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
29 ; RECOVER: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
30
29 ; RECOVER: br i1 %[[F]], label %[[MISMATCH:[0-9]*]], label %[[CONT:[0-9]*]], !prof {{.*}}
30
31 ; RECOVER: [[MISMATCH]]:
32 ; RECOVER: %[[NOTSHORT:[^ ]*]] = icmp ugt i8 %[[MEMTAG]], 15
33 ; RECOVER: br i1 %[[NOTSHORT]], label %[[FAIL:[0-9]*]], label %[[SHORT:[0-9]*]], !prof {{.*}}
34
35 ; RECOVER: [[FAIL]]:
3136 ; RECOVER: call void asm sideeffect "brk #2336", "{x0}"(i64 %[[A]])
3237 ; RECOVER: br label
38
39 ; RECOVER: [[SHORT]]:
40 ; RECOVER: %[[LOWBITS:[^ ]*]] = and i64 %[[A]], 15
41 ; RECOVER: %[[LOWBITS_I8:[^ ]*]] = trunc i64 %[[LOWBITS]] to i8
42 ; RECOVER: %[[LAST:[^ ]*]] = add i8 %[[LOWBITS_I8]], 0
43 ; RECOVER: %[[OOB:[^ ]*]] = icmp uge i8 %[[LAST]], %[[MEMTAG]]
44 ; RECOVER: br i1 %[[OOB]], label %[[FAIL]], label %[[INBOUNDS:[0-9]*]], !prof {{.*}}
45
46 ; RECOVER: [[INBOUNDS]]:
47 ; RECOVER: %[[EOG_ADDR:[^ ]*]] = or i64 %[[C]], 15
48 ; RECOVER: %[[EOG_PTR:[^ ]*]] = inttoptr i64 %[[EOG_ADDR]] to i8*
49 ; RECOVER: %[[EOGTAG:[^ ]*]] = load i8, i8* %[[EOG_PTR]]
50 ; RECOVER: %[[EOG_MISMATCH:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[EOGTAG]]
51 ; RECOVER: br i1 %[[EOG_MISMATCH]], label %[[FAIL]], label %[[CONT1:[0-9]*]], !prof {{.*}}
52
53 ; RECOVER: [[CONT1]]:
54 ; RECOVER: br label %[[CONT]]
55
56 ; RECOVER: [[CONT]]:
3357
3458 ; ABORT-DYNAMIC-SHADOW: call void @llvm.hwasan.check.memaccess(i8* %.hwasan.shadow, i8* %a, i32 0)
3559 ; ABORT-ZERO-BASED-SHADOW: call void @llvm.hwasan.check.memaccess(i8* null, i8* %a, i32 0)
5377 ; RECOVER-ZERO-BASED-SHADOW: %[[E:[^ ]*]] = inttoptr i64 %[[D]] to i8*
5478 ; RECOVER: %[[MEMTAG:[^ ]*]] = load i8, i8* %[[E]]
5579 ; RECOVER: %[[F:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[MEMTAG]]
56 ; RECOVER: br i1 %[[F]], label {{.*}}, label {{.*}}, !prof {{.*}}
57
80 ; RECOVER: br i1 %[[F]], label %[[MISMATCH:[0-9]*]], label %[[CONT:[0-9]*]], !prof {{.*}}
81
82 ; RECOVER: [[MISMATCH]]:
83 ; RECOVER: %[[NOTSHORT:[^ ]*]] = icmp ugt i8 %[[MEMTAG]], 15
84 ; RECOVER: br i1 %[[NOTSHORT]], label %[[FAIL:[0-9]*]], label %[[SHORT:[0-9]*]], !prof {{.*}}
85
86 ; RECOVER: [[FAIL]]:
5887 ; RECOVER: call void asm sideeffect "brk #2337", "{x0}"(i64 %[[A]])
5988 ; RECOVER: br label
89
90 ; RECOVER: [[SHORT]]:
91 ; RECOVER: %[[LOWBITS:[^ ]*]] = and i64 %[[A]], 15
92 ; RECOVER: %[[LOWBITS_I8:[^ ]*]] = trunc i64 %[[LOWBITS]] to i8
93 ; RECOVER: %[[LAST:[^ ]*]] = add i8 %[[LOWBITS_I8]], 1
94 ; RECOVER: %[[OOB:[^ ]*]] = icmp uge i8 %[[LAST]], %[[MEMTAG]]
95 ; RECOVER: br i1 %[[OOB]], label %[[FAIL]], label %[[INBOUNDS:[0-9]*]], !prof {{.*}}
96
97 ; RECOVER: [[INBOUNDS]]:
98 ; RECOVER: %[[EOG_ADDR:[^ ]*]] = or i64 %[[C]], 15
99 ; RECOVER: %[[EOG_PTR:[^ ]*]] = inttoptr i64 %[[EOG_ADDR]] to i8*
100 ; RECOVER: %[[EOGTAG:[^ ]*]] = load i8, i8* %[[EOG_PTR]]
101 ; RECOVER: %[[EOG_MISMATCH:[^ ]*]] = icmp ne i8 %[[PTRTAG]], %[[EOGTAG]]
102 ; RECOVER: br i1 %[[EOG_MISMATCH]], label %[[FAIL]], label %[[CONT1:[0-9]*]], !prof {{.*}}
103
104 ; RECOVER: [[CONT1]]:
105 ; RECOVER: br label %[[CONT]]
106
107 ; RECOVER: [[CONT]]:
60108
61109 ; ABORT: %[[A:[^ ]*]] = bitcast i16* %a to i8*
62110 ; ABORT-DYNAMIC-SHADOW: call void @llvm.hwasan.check.memaccess(i8* %.hwasan.shadow, i8* %[[A]], i32 1)
1313 ; CHECK: %[[B:[^ ]*]] = lshr i64 %[[A]], 20
1414 ; CHECK: %[[BASE_TAG:[^ ]*]] = xor i64 %[[A]], %[[B]]
1515
16 ; CHECK: %[[X:[^ ]*]] = alloca i32, align 16
16 ; CHECK: %[[X:[^ ]*]] = alloca { i32, [12 x i8] }, align 16
17 ; CHECK: %[[X_GEP:[^ ]*]] = getelementptr { i32, [12 x i8] }, { i32, [12 x i8] }* %[[X]], i32 0, i32 0
1718 ; CHECK: %[[X_TAG:[^ ]*]] = xor i64 %[[BASE_TAG]], 0
18 ; CHECK: %[[X1:[^ ]*]] = ptrtoint i32* %[[X]] to i64
19 ; CHECK: %[[X1:[^ ]*]] = ptrtoint i32* %[[X_GEP]] to i64
1920 ; CHECK: %[[C:[^ ]*]] = shl i64 %[[X_TAG]], 56
2021 ; CHECK: %[[D:[^ ]*]] = or i64 %[[C]], 72057594037927935
2122 ; CHECK: %[[E:[^ ]*]] = and i64 %[[X1]], %[[D]]