llvm.org GIT mirror llvm / 0651a55
[stack protector] Fix a potential security bug in stack protector where the address of the stack guard was being spilled to the stack. Previously the address of the stack guard would get spilled to the stack if it was impossible to keep it in a register. This patch introduces a new target independent node and pseudo instruction which gets expanded post-RA to a sequence of instructions that load the stack guard value. Register allocator can now just remat the value when it can't keep it in a register. <rdar://problem/12475629> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@213967 91177308-0d34-0410-b5e6-96231b3b80d8 Akira Hatanaka 6 years ago
26 changed file(s) with 378 addition(s) and 10 deletion(s). Raw diff Collapse all Expand all
840840 let mayLoad = 1;
841841 let usesCustomInserter = 1;
842842 }
843 def LOAD_STACK_GUARD : Instruction {
844 let OutOperandList = (outs ptr_rc:$dst);
845 let InOperandList = (ins);
846 let mayLoad = 1;
847 bit isReMaterializable = 1;
848 let hasSideEffects = 0;
849 bit isPseudo = 1;
850 }
843851 }
844852
845853 //===----------------------------------------------------------------------===//
25972597 /// ARM 's' setting instructions.
25982598 virtual void
25992599 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
2600
2601 /// If this function returns true, SelectionDAGBuilder emits a
2602 /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
2603 virtual bool useLoadStackGuardNode() const {
2604 return false;
2605 }
26002606 };
26012607
26022608 /// Given an LLVM IR type and return type attributes, compute the return value
103103 /// support optimizations for dynamic languages (such as javascript) that
104104 /// rewrite calls to runtimes with more efficient code sequences.
105105 /// This also implies a stack map.
106 PATCHPOINT = 18
106 PATCHPOINT = 18,
107
108 /// This pseudo-instruction loads the stack guard value. Targets which need
109 /// to prevent the stack guard value or address from being spilled to the
110 /// stack should override TargetLowering::emitLoadStackGuardNode and
111 /// additionally expand this pseudo after register allocation.
112 LOAD_STACK_GUARD = 19
107113 };
108114 } // end namespace TargetOpcode
109115 } // end namespace llvm
18101810
18111811 unsigned Align =
18121812 TLI->getDataLayout()->getPrefTypeAlignment(IRGuard->getType());
1813 SDValue Guard = DAG.getLoad(PtrTy, getCurSDLoc(), DAG.getEntryNode(),
1814 GuardPtr, MachinePointerInfo(IRGuard, 0),
1815 true, false, false, Align);
1813
1814 SDValue Guard;
1815
1816 // If useLoadStackGuardNode returns true, retrieve the guard value from
1817 // the virtual register holding the value. Otherwise, emit a volatile load
1818 // to retrieve the stack guard value.
1819 if (TLI->useLoadStackGuardNode())
1820 Guard = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
1821 SPD.getGuardReg(), PtrTy);
1822 else
1823 Guard = DAG.getLoad(PtrTy, getCurSDLoc(), DAG.getEntryNode(),
1824 GuardPtr, MachinePointerInfo(IRGuard, 0),
1825 true, false, false, Align);
18161826
18171827 SDValue StackSlot = DAG.getLoad(PtrTy, getCurSDLoc(), DAG.getEntryNode(),
18181828 StackSlotPtr,
52275237 MachineFunction &MF = DAG.getMachineFunction();
52285238 MachineFrameInfo *MFI = MF.getFrameInfo();
52295239 EVT PtrTy = TLI->getPointerTy();
5230
5231 SDValue Src = getValue(I.getArgOperand(0)); // The guard's value.
5240 SDValue Src, Chain = getRoot();
5241
5242 if (TLI->useLoadStackGuardNode()) {
5243 // Emit a LOAD_STACK_GUARD node.
5244 MachineSDNode *Node = DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD,
5245 sdl, PtrTy, Chain);
5246 LoadInst *LI = cast(I.getArgOperand(0));
5247 MachinePointerInfo MPInfo(LI->getPointerOperand());
5248 MachineInstr::mmo_iterator MemRefs = MF.allocateMemRefsArray(1);
5249 unsigned Flags = MachineMemOperand::MOLoad |
5250 MachineMemOperand::MOInvariant;
5251 *MemRefs = MF.getMachineMemOperand(MPInfo, Flags,
5252 PtrTy.getSizeInBits() / 8,
5253 DAG.getEVTAlignment(PtrTy));
5254 Node->setMemRefs(MemRefs, MemRefs + 1);
5255
5256 // Copy the guard value to a virtual register so that it can be
5257 // retrieved in the epilogue.
5258 Src = SDValue(Node, 0);
5259 const TargetRegisterClass *RC =
5260 TLI->getRegClassFor(Src.getSimpleValueType());
5261 unsigned Reg = MF.getRegInfo().createVirtualRegister(RC);
5262
5263 SPDescriptor.setGuardReg(Reg);
5264 Chain = DAG.getCopyToReg(Chain, sdl, Reg, Src);
5265 } else {
5266 Src = getValue(I.getArgOperand(0)); // The guard's value.
5267 }
5268
52325269 AllocaInst *Slot = cast(I.getArgOperand(1));
52335270
52345271 int FI = FuncInfo.StaticAllocaMap[Slot];
52375274 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
52385275
52395276 // Store the stack protector onto the stack.
5240 Res = DAG.getStore(getRoot(), sdl, Src, FIN,
5277 Res = DAG.getStore(Chain, sdl, Src, FIN,
52415278 MachinePointerInfo::getFixedStack(FI),
52425279 true, false, 0);
52435280 setValue(&I, Res);
396396 class StackProtectorDescriptor {
397397 public:
398398 StackProtectorDescriptor() : ParentMBB(nullptr), SuccessMBB(nullptr),
399 FailureMBB(nullptr), Guard(nullptr) { }
399 FailureMBB(nullptr), Guard(nullptr),
400 GuardReg(0) { }
400401 ~StackProtectorDescriptor() { }
401402
402403 /// Returns true if all fields of the stack protector descriptor are
454455 MachineBasicBlock *getFailureMBB() { return FailureMBB; }
455456 const Value *getGuard() { return Guard; }
456457
458 unsigned getGuardReg() const { return GuardReg; }
459 void setGuardReg(unsigned R) { GuardReg = R; }
460
457461 private:
458462 /// The basic block for which we are generating the stack protector.
459463 ///
475479 /// The guard variable which we will compare against the stored value in the
476480 /// stack protector stack slot.
477481 const Value *Guard;
482
483 /// The virtual register holding the stack guard value.
484 unsigned GuardReg;
478485
479486 /// Add a successor machine basic block to ParentMBB. If the successor mbb
480487 /// has not been created yet (i.e. if SuccMBB = 0), then the machine basic
80948094 return Inst->getType()->getPrimitiveSizeInBits() <= 128;
80958095 }
80968096
8097 bool AArch64TargetLowering::useLoadStackGuardNode() const {
8098 return true;
8099 }
8100
80978101 TargetLoweringBase::LegalizeTypeAction
80988102 AArch64TargetLowering::getPreferredVectorAction(EVT VT) const {
80998103 MVT SVT = VT.getSimpleVT();
323323
324324 bool shouldExpandAtomicInIR(Instruction *Inst) const override;
325325
326 bool useLoadStackGuardNode() const override;
326327 TargetLoweringBase::LegalizeTypeAction
327328 getPreferredVectorAction(EVT VT) const override;
328329
844844 (void)succeeded;
845845 assert(succeeded && "Some operands reg class are incompatible!");
846846 MI->addRegisterDefined(AArch64::NZCV, TRI);
847 return true;
848 }
849
850 bool
851 AArch64InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
852 if (MI->getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
853 return false;
854
855 MachineBasicBlock &MBB = *MI->getParent();
856 DebugLoc DL = MI->getDebugLoc();
857 unsigned Reg = MI->getOperand(0).getReg();
858 const GlobalValue *GV =
859 cast((*MI->memoperands_begin())->getValue());
860 const TargetMachine &TM = MBB.getParent()->getTarget();
861 unsigned char OpFlags = Subtarget.ClassifyGlobalReference(GV, TM);
862 const unsigned char MO_NC = AArch64II::MO_NC;
863
864 if ((OpFlags & AArch64II::MO_GOT) != 0) {
865 BuildMI(MBB, MI, DL, get(AArch64::LOADgot), Reg)
866 .addGlobalAddress(GV, 0, AArch64II::MO_GOT);
867 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
868 .addReg(Reg, RegState::Kill).addImm(0)
869 .addMemOperand(*MI->memoperands_begin());
870 } else if (TM.getCodeModel() == CodeModel::Large) {
871 BuildMI(MBB, MI, DL, get(AArch64::MOVZXi), Reg)
872 .addGlobalAddress(GV, 0, AArch64II::MO_G3).addImm(48);
873 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
874 .addReg(Reg, RegState::Kill)
875 .addGlobalAddress(GV, 0, AArch64II::MO_G2 | MO_NC).addImm(32);
876 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
877 .addReg(Reg, RegState::Kill)
878 .addGlobalAddress(GV, 0, AArch64II::MO_G1 | MO_NC).addImm(16);
879 BuildMI(MBB, MI, DL, get(AArch64::MOVKXi), Reg)
880 .addReg(Reg, RegState::Kill)
881 .addGlobalAddress(GV, 0, AArch64II::MO_G0 | MO_NC).addImm(0);
882 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
883 .addReg(Reg, RegState::Kill).addImm(0)
884 .addMemOperand(*MI->memoperands_begin());
885 } else {
886 BuildMI(MBB, MI, DL, get(AArch64::ADRP), Reg)
887 .addGlobalAddress(GV, 0, OpFlags | AArch64II::MO_PAGE);
888 unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | MO_NC;
889 BuildMI(MBB, MI, DL, get(AArch64::LDRXui), Reg)
890 .addReg(Reg, RegState::Kill)
891 .addGlobalAddress(GV, 0, LoFlags)
892 .addMemOperand(*MI->memoperands_begin());
893 }
894
895 MBB.erase(MI);
896
847897 return true;
848898 }
849899
153153 unsigned SrcReg2, int CmpMask, int CmpValue,
154154 const MachineRegisterInfo *MRI) const override;
155155
156 bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const override;
157
156158 private:
157159 void instantiateCondBranch(MachineBasicBlock &MBB, DebugLoc DL,
158160 MachineBasicBlock *TBB,
11731173 return MI->mayLoad() && hasLoadFromStackSlot(MI, Dummy, FrameIndex);
11741174 }
11751175
1176 bool ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const{
1176 bool
1177 ARMBaseInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
1178 MachineFunction &MF = *MI->getParent()->getParent();
1179 Reloc::Model RM = MF.getTarget().getRelocationModel();
1180
1181 if (MI->getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1182 assert(getSubtarget().getTargetTriple().getObjectFormat() ==
1183 Triple::MachO &&
1184 "LOAD_STACK_GUARD currently supported only for MachO.");
1185 expandLoadStackGuard(MI, RM);
1186 MI->getParent()->erase(MI);
1187 return true;
1188 }
1189
11771190 // This hook gets to expand COPY instructions before they become
11781191 // copyPhysReg() calls. Look for VMOVS instructions that can legally be
11791192 // widened to VMOVD. We prefer the VMOVD when possible because it may be
39323945 return true;
39333946 }
39343947
3948 // LoadStackGuard has so far only been implemented for MachO. Different code
3949 // sequence is needed for other targets.
3950 void ARMBaseInstrInfo::expandLoadStackGuardBase(MachineBasicBlock::iterator MI,
3951 unsigned LoadImmOpc,
3952 unsigned LoadOpc,
3953 Reloc::Model RM) const {
3954 MachineBasicBlock &MBB = *MI->getParent();
3955 DebugLoc DL = MI->getDebugLoc();
3956 unsigned Reg = MI->getOperand(0).getReg();
3957 const GlobalValue *GV =
3958 cast((*MI->memoperands_begin())->getValue());
3959 MachineInstrBuilder MIB;
3960
3961 BuildMI(MBB, MI, DL, get(LoadImmOpc), Reg)
3962 .addGlobalAddress(GV, 0, ARMII::MO_NONLAZY);
3963
3964 if (Subtarget.GVIsIndirectSymbol(GV, RM)) {
3965 MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
3966 MIB.addReg(Reg, RegState::Kill).addImm(0);
3967 unsigned Flag = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant;
3968 MachineMemOperand *MMO = MBB.getParent()->
3969 getMachineMemOperand(MachinePointerInfo::getGOT(), Flag, 4, 4);
3970 MIB.addMemOperand(MMO);
3971 AddDefaultPred(MIB);
3972 }
3973
3974 MIB = BuildMI(MBB, MI, DL, get(LoadOpc), Reg);
3975 MIB.addReg(Reg, RegState::Kill).addImm(0);
3976 MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
3977 AddDefaultPred(MIB);
3978 }
3979
39353980 bool
39363981 ARMBaseInstrInfo::isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc,
39373982 unsigned &AddSubOpc,
1717 #include "llvm/ADT/DenseMap.h"
1818 #include "llvm/ADT/SmallSet.h"
1919 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/Support/CodeGen.h"
2021 #include "llvm/Target/TargetInstrInfo.h"
2122
2223 #define GET_INSTRINFO_HEADER
3233 protected:
3334 // Can be only subclassed.
3435 explicit ARMBaseInstrInfo(const ARMSubtarget &STI);
36
37 void expandLoadStackGuardBase(MachineBasicBlock::iterator MI,
38 unsigned LoadImmOpc, unsigned LoadOpc,
39 Reloc::Model RM) const;
3540
3641 public:
3742 // Return whether the target has an explicit NOP encoding.
285290 bool verifyInstruction(const MachineInstr *MI,
286291 StringRef &ErrInfo) const override;
287292
293 virtual void expandLoadStackGuard(MachineBasicBlock::iterator MI,
294 Reloc::Model RM) const = 0;
295
288296 private:
289297 /// Modeling special VFP / NEON fp MLA / MLS hazards.
290298
1080310803 return Inst->getType()->getPrimitiveSizeInBits() <= AtomicLimit;
1080410804 }
1080510805
10806 // This has so far only been implemented for MachO.
10807 bool ARMTargetLowering::useLoadStackGuardNode() const {
10808 return Subtarget->getTargetTriple().getObjectFormat() == Triple::MachO;
10809 }
10810
1080610811 Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
1080710812 AtomicOrdering Ord) const {
1080810813 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
396396 Value *Addr, AtomicOrdering Ord) const override;
397397
398398 bool shouldExpandAtomicInIR(Instruction *Inst) const override;
399
400 bool useLoadStackGuardNode() const override;
399401
400402 protected:
401403 std::pair
8989 return 0;
9090 }
9191
92 void ARMInstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI,
93 Reloc::Model RM) const {
94 if (RM == Reloc::Static)
95 expandLoadStackGuardBase(MI, ARM::LDRLIT_ga_abs, ARM::LDRi12, RM);
96 else
97 expandLoadStackGuardBase(MI, ARM::LDRLIT_ga_pcrel, ARM::LDRi12, RM);
98 }
99
92100 namespace {
93101 /// ARMCGBR - Create Global Base Reg pass. This initializes the PIC
94102 /// global base register for ARM ELF.
3636 /// always be able to get register info as well (through this method).
3737 ///
3838 const ARMRegisterInfo &getRegisterInfo() const override { return RI; }
39
40 private:
41 void expandLoadStackGuard(MachineBasicBlock::iterator MI,
42 Reloc::Model RM) const override;
3943 };
4044
4145 }
100100 .addFrameIndex(FI).addImm(0).addMemOperand(MMO));
101101 }
102102 }
103
104 void
105 Thumb1InstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI,
106 Reloc::Model RM) const {
107 if (RM == Reloc::Static)
108 expandLoadStackGuardBase(MI, ARM::tLDRLIT_ga_abs, ARM::tLDRi, RM);
109 else
110 expandLoadStackGuardBase(MI, ARM::tLDRLIT_ga_pcrel, ARM::tLDRi, RM);
111 }
5353 const TargetRegisterClass *RC,
5454 const TargetRegisterInfo *TRI) const override;
5555
56 private:
57 void expandLoadStackGuard(MachineBasicBlock::iterator MI,
58 Reloc::Model RM) const override;
5659 };
5760 }
5861
206206 }
207207
208208 ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI);
209 }
210
211 void
212 Thumb2InstrInfo::expandLoadStackGuard(MachineBasicBlock::iterator MI,
213 Reloc::Model RM) const {
214 if (RM == Reloc::Static)
215 expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12, RM);
216 else
217 expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12, RM);
209218 }
210219
211220 void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
6060 /// always be able to get register info as well (through this method).
6161 ///
6262 const Thumb2RegisterInfo &getRegisterInfo() const override { return RI; }
63
64 private:
65 void expandLoadStackGuard(MachineBasicBlock::iterator MI,
66 Reloc::Model RM) const override;
6367 };
6468
6569 /// getITInstrPredicate - Valid only in Thumb2 mode. This function is identical
16421642 PredictableSelectIsExpensive = !Subtarget->isAtom();
16431643
16441644 setPrefFunctionAlignment(4); // 2^4 bytes.
1645 }
1646
1647 // This has so far only been implemented for 64-bit MachO.
1648 bool X86TargetLowering::useLoadStackGuardNode() const {
1649 return Subtarget->getTargetTriple().getObjectFormat() == Triple::MachO &&
1650 Subtarget->is64Bit();
16451651 }
16461652
16471653 TargetLoweringBase::LegalizeTypeAction
795795 /// \brief Reset the operation actions based on target options.
796796 void resetOperationActions() override;
797797
798 bool useLoadStackGuardNode() const override;
798799 /// \brief Customize the preferred legalization strategy for certain types.
799800 LegalizeTypeAction getPreferredVectorAction(EVT VT) const override;
800801
39623962 return true;
39633963 }
39643964
3965 // LoadStackGuard has so far only been implemented for 64-bit MachO. Different
3966 // code sequence is needed for other targets.
3967 static void expandLoadStackGuard(MachineInstrBuilder &MIB,
3968 const TargetInstrInfo &TII) {
3969 MachineBasicBlock &MBB = *MIB->getParent();
3970 DebugLoc DL = MIB->getDebugLoc();
3971 unsigned Reg = MIB->getOperand(0).getReg();
3972 const GlobalValue *GV =
3973 cast((*MIB->memoperands_begin())->getValue());
3974 unsigned Flag = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant;
3975 MachineMemOperand *MMO = MBB.getParent()->
3976 getMachineMemOperand(MachinePointerInfo::getGOT(), Flag, 8, 8);
3977 MachineBasicBlock::iterator I = MIB;
3978
3979 BuildMI(MBB, I, DL, TII.get(X86::MOV64rm), Reg).addReg(X86::RIP).addImm(1)
3980 .addReg(0).addGlobalAddress(GV, 0, X86II::MO_GOTPCREL).addReg(0)
3981 .addMemOperand(MMO);
3982 MIB->setDebugLoc(DL);
3983 MIB->setDesc(TII.get(X86::MOV64rm));
3984 MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0);
3985 }
3986
39653987 bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
39663988 bool HasAVX = Subtarget.hasAVX();
39673989 MachineInstrBuilder MIB(*MI->getParent()->getParent(), MI);
39964018 case X86::KSET0W: return Expand2AddrUndef(MIB, get(X86::KXORWrr));
39974019 case X86::KSET1B:
39984020 case X86::KSET1W: return Expand2AddrUndef(MIB, get(X86::KXNORWrr));
4021 case TargetOpcode::LOAD_STACK_GUARD:
4022 expandLoadStackGuard(MIB, *this);
4023 return true;
39994024 }
40004025 return false;
40014026 }
0 ; RUN: llc < %s -mtriple=arm64-apple-ios -relocation-model=pic -no-integrated-as | FileCheck %s -check-prefix=DARWIN
1 ; RUN: llc < %s -mtriple=arm64-apple-ios -relocation-model=static -no-integrated-as | FileCheck %s -check-prefix=DARWIN
2 ; RUN: llc < %s -mtriple=aarch64-linux-gnu -relocation-model=pic -no-integrated-as | FileCheck %s -check-prefix=PIC-LINUX
3 ; RUN: llc < %s -mtriple=aarch64-linux-gnu -relocation-model=static -code-model=large -no-integrated-as | FileCheck %s -check-prefix=STATIC-LARGE
4 ; RUN: llc < %s -mtriple=aarch64-linux-gnu -relocation-model=static -code-model=small -no-integrated-as | FileCheck %s -check-prefix=STATIC-SMALL
5
6 ; DARWIN: foo2
7 ; DARWIN: adrp [[R0:x[0-9]+]], ___stack_chk_guard@GOTPAGE
8 ; DARWIN: ldr [[R1:x[0-9]+]], {{\[}}[[R0]], ___stack_chk_guard@GOTPAGEOFF{{\]}}
9 ; DARWIN: ldr {{x[0-9]+}}, {{\[}}[[R1]]{{\]}}
10
11 ; PIC-LINUX: foo2
12 ; PIC-LINUX: adrp [[R0:x[0-9]+]], :got:__stack_chk_guard
13 ; PIC-LINUX: ldr [[R1:x[0-9]+]], {{\[}}[[R0]], :got_lo12:__stack_chk_guard{{\]}}
14 ; PIC-LINUX: ldr {{x[0-9]+}}, {{\[}}[[R1]]{{\]}}
15
16 ; STATIC-LARGE: foo2
17 ; STATIC-LARGE: movz [[R0:x[0-9]+]], #:abs_g3:__stack_chk_guard
18 ; STATIC-LARGE: movk [[R0]], #:abs_g2_nc:__stack_chk_guard
19 ; STATIC-LARGE: movk [[R0]], #:abs_g1_nc:__stack_chk_guard
20 ; STATIC-LARGE: movk [[R0]], #:abs_g0_nc:__stack_chk_guard
21 ; STATIC-LARGE: ldr {{x[0-9]+}}, {{\[}}[[R0]]{{\]}}
22
23 ; STATIC-SMALL: foo2
24 ; STATIC-SMALL: adrp [[R0:x[0-9]+]], __stack_chk_guard
25 ; STATIC-SMALL: ldr {{x[0-9]+}}, {{\[}}[[R0]], :lo12:__stack_chk_guard{{\]}}
26
27 define i32 @test_stack_guard_remat() #0 {
28 entry:
29 %a1 = alloca [256 x i32], align 4
30 %0 = bitcast [256 x i32]* %a1 to i8*
31 call void @llvm.lifetime.start(i64 1024, i8* %0)
32 %arraydecay = getelementptr inbounds [256 x i32]* %a1, i64 0, i64 0
33 call void @foo3(i32* %arraydecay)
34 call void asm sideeffect "foo2", "~{w0},~{w1},~{w2},~{w3},~{w4},~{w5},~{w6},~{w7},~{w8},~{w9},~{w10},~{w11},~{w12},~{w13},~{w14},~{w15},~{w16},~{w17},~{w18},~{w19},~{w20},~{w21},~{w22},~{w23},~{w24},~{w25},~{w26},~{w27},~{w28},~{w29},~{w30}"()
35 call void @llvm.lifetime.end(i64 1024, i8* %0)
36 ret i32 0
37 }
38
39 ; Function Attrs: nounwind
40 declare void @llvm.lifetime.start(i64, i8* nocapture)
41
42 declare void @foo3(i32*)
43
44 ; Function Attrs: nounwind
45 declare void @llvm.lifetime.end(i64, i8* nocapture)
46
47 attributes #0 = { nounwind sspstrong "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
0 ; RUN: llc < %s -mtriple=arm-apple-ios -relocation-model=pic -no-integrated-as | FileCheck %s -check-prefix=PIC
1 ; RUN: llc < %s -mtriple=arm-apple-ios -relocation-model=static -no-integrated-as | FileCheck %s -check-prefix=STATIC
2
3 ;PIC: foo2
4 ;PIC: ldr [[R0:r[0-9]+]], [[LABEL0:LCPI[0-9_]+]]
5 ;PIC: [[LABEL1:LPC0_1]]:
6 ;PIC: ldr [[R1:r[0-9]+]], [pc, [[R0]]]
7 ;PIC: ldr [[R2:r[0-9]+]], {{\[}}[[R1]]{{\]}}
8 ;PIC: ldr {{r[0-9]+}}, {{\[}}[[R2]]{{\]}}
9
10 ;PIC: [[LABEL0]]:
11 ;PIC-NEXT: .long L___stack_chk_guard$non_lazy_ptr-([[LABEL1]]+8)
12
13 ;STATIC: foo2
14 ;STATIC: ldr [[R0:r[0-9]+]], [[LABEL0:LCPI[0-9_]+]]
15 ;STATIC: ldr {{r[0-9]+}}, {{\[}}[[R0]]{{\]}}
16
17 ;STATIC: [[LABEL0]]:
18 ;STATIC-NEXT: .long ___stack_chk_guard
19
20 ; Function Attrs: nounwind ssp
21 define i32 @test_stack_guard_remat() #0 {
22 %a1 = alloca [256 x i32], align 4
23 %1 = bitcast [256 x i32]* %a1 to i8*
24 call void @llvm.lifetime.start(i64 1024, i8* %1)
25 %2 = getelementptr inbounds [256 x i32]* %a1, i32 0, i32 0
26 call void @foo3(i32* %2) #3
27 call void asm sideeffect "foo2", "~{r0},~{r1},~{r2},~{r3},~{r4},~{r5},~{r6},~{r7},~{r8},~{r9},~{r10},~{r11},~{r12},~{sp},~{lr}"()
28 call void @llvm.lifetime.end(i64 1024, i8* %1)
29 ret i32 0
30 }
31
32 ; Function Attrs: nounwind
33 declare void @llvm.lifetime.start(i64, i8* nocapture)
34
35 declare void @foo3(i32*)
36
37 ; Function Attrs: nounwind
38 declare void @llvm.lifetime.end(i64, i8* nocapture)
39
40 attributes #0 = { nounwind ssp "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
0 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -no-integrated-as | FileCheck %s -check-prefix=CHECK
1
2 ;CHECK: foo2
3 ;CHECK: movq ___stack_chk_guard@GOTPCREL(%rip), [[R0:%[a-z0-9]+]]
4 ;CHECK: movq ([[R0]]), {{%[a-z0-9]+}}
5
6 ; Function Attrs: nounwind ssp uwtable
7 define i32 @test_stack_guard_remat() #0 {
8 entry:
9 %a1 = alloca [256 x i32], align 16
10 %0 = bitcast [256 x i32]* %a1 to i8*
11 call void @llvm.lifetime.start(i64 1024, i8* %0)
12 %arraydecay = getelementptr inbounds [256 x i32]* %a1, i64 0, i64 0
13 call void @foo3(i32* %arraydecay)
14 call void asm sideeffect "foo2", "~{r12},~{r13},~{r14},~{r15},~{ebx},~{esi},~{edi},~{dirflag},~{fpsr},~{flags}"()
15 call void @llvm.lifetime.end(i64 1024, i8* %0)
16 ret i32 0
17 }
18
19 ; Function Attrs: nounwind
20 declare void @llvm.lifetime.start(i64, i8* nocapture)
21
22 declare void @foo3(i32*)
23
24 ; Function Attrs: nounwind
25 declare void @llvm.lifetime.end(i64, i8* nocapture)
26
27 attributes #0 = { nounwind ssp uwtable "less-precise-fpmad"="false" "no-frame-pointer-elim"="true" "no-frame-pointer-elim-non-leaf" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
300300 "GC_LABEL", "KILL", "EXTRACT_SUBREG", "INSERT_SUBREG",
301301 "IMPLICIT_DEF", "SUBREG_TO_REG", "COPY_TO_REGCLASS", "DBG_VALUE",
302302 "REG_SEQUENCE", "COPY", "BUNDLE", "LIFETIME_START",
303 "LIFETIME_END", "STACKMAP", "PATCHPOINT", nullptr};
303 "LIFETIME_END", "STACKMAP", "PATCHPOINT", "LOAD_STACK_GUARD",
304 nullptr};
304305 const DenseMap &Insts = getInstructions();
305306 for (const char *const *p = FixedInstrs; *p; ++p) {
306307 const CodeGenInstruction *Instr = GetInstByName(*p, Insts, Records);