llvm.org GIT mirror llvm / 4653f51
Merging r351370: ------------------------------------------------------------------------ r351370 | mgrang | 2019-01-16 20:52:59 +0100 (Wed, 16 Jan 2019) | 14 lines [COFF, ARM64] Implement support for SEH extensions __try/__except/__finally Summary: This patch supports MS SEH extensions __try/__except/__finally. The intrinsics localescape and localrecover are responsible for communicating escaped static allocas from the try block to the handler. We need to preserve frame pointers for SEH. So we create a new function/property HasLocalEscape. Reviewers: rnk, compnerd, mstorsjo, TomTan, efriedma, ssijaric Reviewed By: rnk, efriedma Subscribers: smeenai, jrmuizel, alex, majnemer, ssijaric, ehsan, dmajor, kristina, javed.absar, kristof.beyls, chrib, llvm-commits Differential Revision: https://reviews.llvm.org/D53540 ------------------------------------------------------------------------ git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_80@351451 91177308-0d34-0410-b5e6-96231b3b80d8 Hans Wennborg 1 year, 10 months ago
10 changed file(s) with 191 addition(s) and 9 deletion(s). Raw diff Collapse all Expand all
328328 bool CallsUnwindInit = false;
329329 bool HasEHScopes = false;
330330 bool HasEHFunclets = false;
331 bool HasLocalEscape = false;
331332
332333 /// List of C++ TypeInfo used.
333334 std::vector TypeInfos;
810811 bool hasEHFunclets() const { return HasEHFunclets; }
811812 void setHasEHFunclets(bool V) { HasEHFunclets = V; }
812813
814 bool hasLocalEscape() const { return HasLocalEscape; }
815 void setHasLocalEscape(bool V) { HasLocalEscape = V; }
816
813817 /// Find or create an LandingPadInfo for the specified MachineBasicBlock.
814818 LandingPadInfo &getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad);
815819
544544 OS.AddComment(Comment);
545545 };
546546
547 // Emit a label assignment with the SEH frame offset so we can use it for
548 // llvm.eh.recoverfp.
549 StringRef FLinkageName =
550 GlobalValue::dropLLVMManglingEscape(MF->getFunction().getName());
551 MCSymbol *ParentFrameOffset =
552 Ctx.getOrCreateParentFrameOffsetSymbol(FLinkageName);
553 const MCExpr *MCOffset =
554 MCConstantExpr::create(FuncInfo.SEHSetFrameOffset, Ctx);
555 Asm->OutStreamer->EmitAssignment(ParentFrameOffset, MCOffset);
547 if (!isAArch64) {
548 // Emit a label assignment with the SEH frame offset so we can use it for
549 // llvm.eh.recoverfp.
550 StringRef FLinkageName =
551 GlobalValue::dropLLVMManglingEscape(MF->getFunction().getName());
552 MCSymbol *ParentFrameOffset =
553 Ctx.getOrCreateParentFrameOffsetSymbol(FLinkageName);
554 const MCExpr *MCOffset =
555 MCConstantExpr::create(FuncInfo.SEHSetFrameOffset, Ctx);
556 Asm->OutStreamer->EmitAssignment(ParentFrameOffset, MCOffset);
557 }
556558
557559 // Use the assembler to compute the number of table entries through label
558560 // difference and division.
936938 if (FI != INT_MAX) {
937939 const TargetFrameLowering *TFI = Asm->MF->getSubtarget().getFrameLowering();
938940 unsigned UnusedReg;
941 // FIXME: getFrameIndexReference needs to match the behavior of
942 // AArch64RegisterInfo::hasBasePointer in which one of the scenarios where
943 // SP is used is if frame size >= 256.
939944 Offset = TFI->getFrameIndexReference(*Asm->MF, FI, UnusedReg);
940945 }
941946
61806180 .addSym(FrameAllocSym)
61816181 .addFrameIndex(FI);
61826182 }
6183
6184 MF.setHasLocalEscape(true);
61836185
61846186 return nullptr;
61856187 }
693693 switch (MI->getOpcode()) {
694694 default:
695695 break;
696 case AArch64::MOVMCSym: {
697 unsigned DestReg = MI->getOperand(0).getReg();
698 const MachineOperand &MO_Sym = MI->getOperand(1);
699 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
700 MCOperand Hi_MCSym, Lo_MCSym;
701
702 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
703 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
704
705 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
706 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
707
708 MCInst MovZ;
709 MovZ.setOpcode(AArch64::MOVZXi);
710 MovZ.addOperand(MCOperand::createReg(DestReg));
711 MovZ.addOperand(Hi_MCSym);
712 MovZ.addOperand(MCOperand::createImm(16));
713 EmitToStreamer(*OutStreamer, MovZ);
714
715 MCInst MovK;
716 MovK.setOpcode(AArch64::MOVKXi);
717 MovK.addOperand(MCOperand::createReg(DestReg));
718 MovK.addOperand(MCOperand::createReg(DestReg));
719 MovK.addOperand(Lo_MCSym);
720 MovK.addOperand(MCOperand::createImm(0));
721 EmitToStreamer(*OutStreamer, MovK);
722 return;
723 }
696724 case AArch64::MOVIv2d_ns:
697725 // If the target has , lower this
698726 // instruction to movi.16b instead.
227227 MFI.getMaxCallFrameSize() > DefaultSafeSPDisplacement)
228228 return true;
229229
230 // Win64 SEH requires frame pointer if funclets are present.
231 if (MF.hasLocalEscape())
232 return true;
233
230234 return false;
231235 }
232236
27422742 case Intrinsic::aarch64_neon_umin:
27432743 return DAG.getNode(ISD::UMIN, dl, Op.getValueType(),
27442744 Op.getOperand(1), Op.getOperand(2));
2745
2746 case Intrinsic::localaddress: {
2747 // Returns one of the stack, base, or frame pointer registers, depending on
2748 // which is used to reference local variables.
2749 MachineFunction &MF = DAG.getMachineFunction();
2750 const AArch64RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2751 unsigned Reg;
2752 if (RegInfo->hasBasePointer(MF))
2753 Reg = RegInfo->getBaseRegister();
2754 else // This function handles the SP or FP case.
2755 Reg = RegInfo->getFrameRegister(MF);
2756 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg,
2757 Op.getSimpleValueType());
2758 }
2759
2760 case Intrinsic::eh_recoverfp: {
2761 // FIXME: This needs to be implemented to correctly handle highly aligned
2762 // stack objects. For now we simply return the incoming FP. Refer D53541
2763 // for more details.
2764 SDValue FnOp = Op.getOperand(1);
2765 SDValue IncomingFPOp = Op.getOperand(2);
2766 GlobalAddressSDNode *GSD = dyn_cast(FnOp);
2767 auto *Fn = dyn_cast_or_null(GSD ? GSD->getGlobal() : nullptr);
2768 if (!Fn)
2769 report_fatal_error(
2770 "llvm.eh.recoverfp must take a function as the first argument");
2771 return IncomingFPOp;
2772 }
27452773 }
27462774 }
27472775
132132 : Predicate<"false">, AssemblerPredicate<"!FeatureNoNegativeImmediates",
133133 "NegativeImmediates">;
134134
135 def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER",
136 SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
137 SDTCisInt<1>]>>;
138
135139
136140 //===----------------------------------------------------------------------===//
137141 // AArch64-specific DAG Nodes.
68006804 def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
68016805 (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
68026806
6807 def MOVMCSym : Pseudo<(outs GPR64:$dst), (ins i64imm:$sym), []>, Sched<[]>;
6808 def : Pat<(i64 (AArch64LocalRecover mcsym:$sym)), (MOVMCSym mcsym:$sym)>;
6809
68036810 include "AArch64InstrAtomics.td"
68046811 include "AArch64SVEInstrInfo.td"
465465
466466 // Modify MI as necessary to handle as much of 'Offset' as possible
467467 Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg);
468
469 if (MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE) {
470 MachineOperand &FI = MI.getOperand(FIOperandNum);
471 FI.ChangeToImmediate(Offset);
472 return;
473 }
474
468475 if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
469476 return;
470477
0 ; RUN: llc -mtriple arm64-windows -o - %s | FileCheck %s
1
2 ; Function Attrs: noinline optnone uwtable
3 define dso_local i32 @foo() {
4 entry:
5 ; CHECK-LABEL: foo
6 ; CHECK: orr w8, wzr, #0x1
7 ; CHECK: mov w0, wzr
8 ; CHECK: mov x1, x29
9 ; CHECK: .set .Lfoo$frame_escape_0, -4
10 ; CHECK: stur w8, [x29, #-4]
11 ; CHECK: bl "?fin$0@0@foo@@"
12 ; CHECK: ldur w0, [x29, #-4]
13
14 %count = alloca i32, align 4
15 call void (...) @llvm.localescape(i32* %count)
16 store i32 0, i32* %count, align 4
17 %0 = load i32, i32* %count, align 4
18 %add = add nsw i32 %0, 1
19 store i32 %add, i32* %count, align 4
20 %1 = call i8* @llvm.localaddress()
21 call void @"?fin$0@0@foo@@"(i8 0, i8* %1)
22 %2 = load i32, i32* %count, align 4
23 ret i32 %2
24 }
25
26 define internal void @"?fin$0@0@foo@@"(i8 %abnormal_termination, i8* %frame_pointer) {
27 entry:
28 ; CHECK-LABEL: @"?fin$0@0@foo@@"
29 ; CHECK: sub sp, sp, #16
30 ; CHECK: str x1, [sp, #8]
31 ; CHECK: strb w0, [sp, #7]
32 ; CHECK: movz x8, #:abs_g1_s:.Lfoo$frame_escape_0
33 ; CHECK: movk x8, #:abs_g0_nc:.Lfoo$frame_escape_0
34 ; CHECK: add x8, x1, x8
35 ; CHECK: ldr w9, [x8]
36 ; CHECK: add w9, w9, #1
37 ; CHECK: str w9, [x8]
38
39 %frame_pointer.addr = alloca i8*, align 8
40 %abnormal_termination.addr = alloca i8, align 1
41 %0 = call i8* @llvm.localrecover(i8* bitcast (i32 ()* @foo to i8*), i8* %frame_pointer, i32 0)
42 %count = bitcast i8* %0 to i32*
43 store i8* %frame_pointer, i8** %frame_pointer.addr, align 8
44 store i8 %abnormal_termination, i8* %abnormal_termination.addr, align 1
45 %1 = zext i8 %abnormal_termination to i32
46 %cmp = icmp eq i32 %1, 0
47 br i1 %cmp, label %if.then, label %if.end
48
49 if.then: ; preds = %entry
50 %2 = load i32, i32* %count, align 4
51 %add = add nsw i32 %2, 1
52 store i32 %add, i32* %count, align 4
53 br label %if.end
54
55 if.end: ; preds = %if.then, %entry
56 ret void
57 }
58
59 ; Function Attrs: nounwind readnone
60 declare i8* @llvm.localrecover(i8*, i8*, i32)
61
62 ; Function Attrs: nounwind readnone
63 declare i8* @llvm.localaddress()
64
65 ; Function Attrs: nounwind
66 declare void @llvm.localescape(...)
0 ; RUN: llc -mtriple arm64-windows %s -o - | FileCheck %s
1
2 ; Function Attrs: noinline nounwind optnone uwtable
3 define dso_local i32 @foo() {
4 entry:
5 ; CHECK-LABEL: foo
6 ; CHECK: .set .Lfoo$frame_escape_0, -4
7
8 %count = alloca i32, align 4
9 call void (...) @llvm.localescape(i32* %count)
10 ret i32 0
11 }
12
13 define internal i32 @"?filt$0@0@foo@@"(i8* %exception_pointers, i8* %frame_pointer) {
14 entry:
15 ; CHECK-LABEL: @"?filt$0@0@foo@@"
16 ; CHECK: movz x8, #:abs_g1_s:.Lfoo$frame_escape_0
17 ; CHECK: movk x8, #:abs_g0_nc:.Lfoo$frame_escape_0
18
19 %0 = call i8* @llvm.localrecover(i8* bitcast (i32 ()* @foo to i8*), i8* %frame_pointer, i32 0)
20 %count = bitcast i8* %0 to i32*
21 %1 = load i32, i32* %count, align 4
22 ret i32 %1
23 }
24
25 ; Function Attrs: nounwind readnone
26 declare i8* @llvm.localrecover(i8*, i8*, i32) #2
27
28 ; Function Attrs: nounwind
29 declare void @llvm.localescape(...) #3