llvm.org GIT mirror llvm / 7146e90
[WebAssembly] Check in an initial CFG Stackifier pass This pass implements a simple algorithm for conversion from CFG to wasm's structured control flow. It doesn't yet handle multiple-entry loops; that will be added in a future patch. It also adds initial support for switch statements. Differential Revision: http://reviews.llvm.org/D12735 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@247818 91177308-0d34-0410-b5e6-96231b3b80d8 Dan Gohman 5 years ago
15 changed file(s) with 929 addition(s) and 6 deletion(s). Raw diff Collapse all Expand all
1111 add_llvm_target(WebAssemblyCodeGen
1212 Relooper.cpp
1313 WebAssemblyAsmPrinter.cpp
14 WebAssemblyCFGStackify.cpp
1415 WebAssemblyFastISel.cpp
1516 WebAssemblyFrameLowering.cpp
1617 WebAssemblyISelDAGToDAG.cpp
2525 FunctionPass *createWebAssemblyISelDag(WebAssemblyTargetMachine &TM,
2626 CodeGenOpt::Level OptLevel);
2727
28 FunctionPass *createWebAssemblyCFGStackify();
29
2830 } // end namespace llvm
2931
3032 #endif
7272
7373 void EmitGlobalVariable(const GlobalVariable *GV) override;
7474
75 void EmitJumpTableInfo() override;
7576 void EmitConstantPool() override;
7677 void EmitFunctionEntryLabel() override;
7778 void EmitFunctionBodyStart() override;
212213 "WebAssembly disables constant pools");
213214 }
214215
216 void WebAssemblyAsmPrinter::EmitJumpTableInfo() {
217 // Nothing to do; jump tables are incorporated into the instruction stream.
218 }
219
215220 void WebAssemblyAsmPrinter::EmitFunctionEntryLabel() {
216221 SmallString<128> Str;
217222 raw_svector_ostream OS(Str);
292297 case MachineOperand::MO_GlobalAddress: {
293298 OS << ' ' << toSymbol(MO.getGlobal()->getName());
294299 } break;
300 case MachineOperand::MO_MachineBasicBlock: {
301 OS << ' ' << toSymbol(MO.getMBB()->getSymbol()->getName());
302 } break;
295303 }
296304 OS << ')';
297305 }
0 //===-- WebAssemblyCFGStackify.cpp - CFG Stackification -------------------===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// \brief This file implements a CFG stacking pass.
11 ///
12 /// This pass reorders the blocks in a function to put them into a reverse
13 /// post-order [0], with special care to keep the order as similar as possible
14 /// to the original order, and to keep loops contiguous even in the case of
15 /// split backedges.
16 ///
17 /// Then, it inserts BLOCK and LOOP markers to mark the start of scopes, since
18 /// scope boundaries serve as the labels for WebAssembly's control transfers.
19 ///
20 /// This is sufficient to convert arbitrary CFGs into a form that works on
21 /// WebAssembly, provided that all loops are single-entry.
22 ///
23 /// [0] https://en.wikipedia.org/wiki/Depth-first_search#Vertex_orderings
24 ///
25 //===----------------------------------------------------------------------===//
26
27 #include "WebAssembly.h"
28 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
29 #include "WebAssemblySubtarget.h"
30 #include "llvm/ADT/SCCIterator.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineLoopInfo.h"
34 #include "llvm/CodeGen/Passes.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/raw_ostream.h"
37 using namespace llvm;
38
39 #define DEBUG_TYPE "wasm-cfg-stackify"
40
41 namespace {
42 class WebAssemblyCFGStackify final : public MachineFunctionPass {
43 const char *getPassName() const override {
44 return "WebAssembly CFG Stackify";
45 }
46
47 void getAnalysisUsage(AnalysisUsage &AU) const override {
48 AU.setPreservesCFG();
49 AU.addRequired();
50 AU.addPreserved();
51 MachineFunctionPass::getAnalysisUsage(AU);
52 }
53
54 bool runOnMachineFunction(MachineFunction &MF) override;
55
56 public:
57 static char ID; // Pass identification, replacement for typeid
58 WebAssemblyCFGStackify() : MachineFunctionPass(ID) {}
59 };
60 } // end anonymous namespace
61
62 char WebAssemblyCFGStackify::ID = 0;
63 FunctionPass *llvm::createWebAssemblyCFGStackify() {
64 return new WebAssemblyCFGStackify();
65 }
66
67 static void EliminateMultipleEntryLoops(MachineFunction &MF,
68 const MachineLoopInfo &MLI) {
69 SmallPtrSet InSet;
70 for (scc_iterator I = scc_begin(&MF), E = scc_end(&MF);
71 I != E; ++I) {
72 const std::vector &CurrentSCC = *I;
73
74 // Skip trivial SCCs.
75 if (CurrentSCC.size() == 1)
76 continue;
77
78 InSet.insert(CurrentSCC.begin(), CurrentSCC.end());
79 MachineBasicBlock *Header = nullptr;
80 for (MachineBasicBlock *MBB : CurrentSCC) {
81 for (MachineBasicBlock *Pred : MBB->predecessors()) {
82 if (InSet.count(Pred))
83 continue;
84 if (!Header) {
85 Header = MBB;
86 break;
87 }
88 // TODO: Implement multiple-entry loops.
89 report_fatal_error("multiple-entry loops are not supported yet");
90 }
91 }
92 assert(MLI.isLoopHeader(Header));
93
94 InSet.clear();
95 }
96 }
97
98 namespace {
99 /// Post-order traversal stack entry.
100 struct POStackEntry {
101 MachineBasicBlock *MBB;
102 SmallVector Succs;
103
104 POStackEntry(MachineBasicBlock *MBB, MachineFunction &MF,
105 const MachineLoopInfo &MLI);
106 };
107 } // end anonymous namespace
108
109 POStackEntry::POStackEntry(MachineBasicBlock *MBB, MachineFunction &MF,
110 const MachineLoopInfo &MLI)
111 : MBB(MBB), Succs(MBB->successors()) {
112 // RPO is not a unique form, since at every basic block with multiple
113 // successors, the DFS has to pick which order to visit the successors in.
114 // Sort them strategically (see below).
115 MachineLoop *Loop = MLI.getLoopFor(MBB);
116 MachineFunction::iterator Next = next(MachineFunction::iterator(MBB));
117 MachineBasicBlock *LayoutSucc = Next == MF.end() ? nullptr : &*Next;
118 std::stable_sort(
119 Succs.begin(), Succs.end(),
120 [=, &MLI](const MachineBasicBlock *A, const MachineBasicBlock *B) {
121 if (A == B)
122 return false;
123
124 // Keep loops contiguous by preferring the block that's in the same
125 // loop.
126 MachineLoop *LoopA = MLI.getLoopFor(A);
127 MachineLoop *LoopB = MLI.getLoopFor(B);
128 if (LoopA == Loop && LoopB != Loop)
129 return true;
130 if (LoopA != Loop && LoopB == Loop)
131 return false;
132
133 // Minimize perturbation by preferring the block which is the immediate
134 // layout successor.
135 if (A == LayoutSucc)
136 return true;
137 if (B == LayoutSucc)
138 return false;
139
140 // TODO: More sophisticated orderings may be profitable here.
141
142 return false;
143 });
144 }
145
146 /// Sort the blocks in RPO, taking special care to make sure that loops are
147 /// contiguous even in the case of split backedges.
148 static void SortBlocks(MachineFunction &MF, const MachineLoopInfo &MLI) {
149 // Note that we do our own RPO rather than using
150 // "llvm/ADT/PostOrderIterator.h" because we want control over the order that
151 // successors are visited in (see above). Also, we can sort the blocks in the
152 // MachineFunction as we go.
153 SmallPtrSet Visited;
154 SmallVector Stack;
155
156 MachineBasicBlock *Entry = MF.begin();
157 Visited.insert(Entry);
158 Stack.push_back(POStackEntry(Entry, MF, MLI));
159
160 for (;;) {
161 POStackEntry &Entry = Stack.back();
162 SmallVectorImpl &Succs = Entry.Succs;
163 if (!Succs.empty()) {
164 MachineBasicBlock *Succ = Succs.pop_back_val();
165 if (Visited.insert(Succ).second)
166 Stack.push_back(POStackEntry(Succ, MF, MLI));
167 continue;
168 }
169
170 // Put the block in its position in the MachineFunction.
171 MachineBasicBlock &MBB = *Entry.MBB;
172 MBB.moveBefore(MF.begin());
173
174 // Branch instructions may utilize a fallthrough, so update them if a
175 // fallthrough has been added or removed.
176 if (!MBB.empty() && MBB.back().isTerminator() && !MBB.back().isBranch() &&
177 !MBB.back().isBarrier())
178 report_fatal_error(
179 "Non-branch terminator with fallthrough cannot yet be rewritten");
180 if (MBB.empty() || !MBB.back().isTerminator() || MBB.back().isBranch())
181 MBB.updateTerminator();
182
183 Stack.pop_back();
184 if (Stack.empty())
185 break;
186 }
187
188 // Now that we've sorted the blocks in RPO, renumber them.
189 MF.RenumberBlocks();
190
191 #ifndef NDEBUG
192 for (auto &MBB : MF)
193 if (MachineLoop *Loop = MLI.getLoopFor(&MBB)) {
194 // Assert that loops are contiguous.
195 assert(Loop->getHeader() == Loop->getTopBlock());
196 assert(Loop->getHeader() == &MBB ||
197 MLI.getLoopFor(prev(MachineFunction::iterator(&MBB))) == Loop);
198 } else {
199 // Assert that non-loops have no backedge predecessors.
200 for (auto Pred : MBB.predecessors())
201 assert(Pred->getNumber() < MBB.getNumber() &&
202 "CFG still has multiple-entry loops");
203 }
204 #endif
205 }
206
207 /// Insert BLOCK markers at appropriate places.
208 static void PlaceBlockMarkers(MachineBasicBlock &MBB, MachineBasicBlock &Succ,
209 MachineFunction &MF, const MachineLoopInfo &MLI,
210 const WebAssemblyInstrInfo &TII) {
211 // Backward branches are loop backedges, and we place the LOOP markers
212 // separately. So only consider forward branches here.
213 if (Succ.getNumber() <= MBB.getNumber())
214 return;
215
216 // Place the BLOCK for a forward branch. For simplicity, we just insert
217 // blocks immediately inside loop boundaries.
218 MachineLoop *Loop = MLI.getLoopFor(&Succ);
219 MachineBasicBlock &Header = *(Loop ? Loop->getHeader() : &MF.front());
220 MachineBasicBlock::iterator InsertPos = Header.begin(), End = Header.end();
221 if (InsertPos != End) {
222 if (InsertPos->getOpcode() == WebAssembly::LOOP)
223 ++InsertPos;
224 int SuccNumber = Succ.getNumber();
225 // Position the BLOCK in nesting order.
226 for (; InsertPos != End && InsertPos->getOpcode() == WebAssembly::BLOCK;
227 ++InsertPos) {
228 int N = InsertPos->getOperand(0).getMBB()->getNumber();
229 if (N < SuccNumber)
230 break;
231 // If there's already a BLOCK for Succ, we don't need another.
232 if (N == SuccNumber)
233 return;
234 }
235 }
236
237 BuildMI(Header, InsertPos, DebugLoc(), TII.get(WebAssembly::BLOCK))
238 .addMBB(&Succ);
239 }
240
241 /// Insert LOOP and BLOCK markers at appropriate places.
242 static void PlaceMarkers(MachineFunction &MF, const MachineLoopInfo &MLI,
243 const WebAssemblyInstrInfo &TII) {
244 for (auto &MBB : MF) {
245 // Place the LOOP for loops.
246 if (MachineLoop *Loop = MLI.getLoopFor(&MBB))
247 if (Loop->getHeader() == &MBB)
248 BuildMI(MBB, MBB.begin(), DebugLoc(), TII.get(WebAssembly::LOOP))
249 .addMBB(Loop->getBottomBlock());
250
251 // Check for forward branches and switches that need BLOCKS placed.
252 for (auto &Term : MBB.terminators())
253 for (auto &MO : Term.operands())
254 if (MO.isMBB())
255 PlaceBlockMarkers(MBB, *MO.getMBB(), MF, MLI, TII);
256 }
257 }
258
259 bool WebAssemblyCFGStackify::runOnMachineFunction(MachineFunction &MF) {
260 DEBUG(dbgs() << "********** CFG Stackifying **********\n"
261 "********** Function: "
262 << MF.getName() << '\n');
263
264 const auto &MLI = getAnalysis();
265 const auto &TII = *MF.getSubtarget().getInstrInfo();
266
267 // RPO sorting needs all loops to be single-entry.
268 EliminateMultipleEntryLoops(MF, MLI);
269
270 // Sort the blocks in RPO, with contiguous loops.
271 SortBlocks(MF, MLI);
272
273 // Place the BLOCK and LOOP markers to indicate the beginnings of scopes.
274 PlaceMarkers(MF, MLI, TII);
275
276 return true;
277 }
1818 HANDLE_NODETYPE(RETURN)
1919 HANDLE_NODETYPE(ARGUMENT)
2020 HANDLE_NODETYPE(Wrapper)
21 HANDLE_NODETYPE(BRIF)
22 HANDLE_NODETYPE(SWITCH)
2123
2224 // add memory opcodes starting at ISD::FIRST_TARGET_MEMORY_OPCODE here...
1919 #include "WebAssemblyTargetObjectFile.h"
2020 #include "llvm/CodeGen/Analysis.h"
2121 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/MachineJumpTableInfo.h"
2223 #include "llvm/CodeGen/MachineRegisterInfo.h"
2324 #include "llvm/CodeGen/SelectionDAG.h"
2425 #include "llvm/IR/DiagnosticInfo.h"
113114 computeRegisterProperties(Subtarget->getRegisterInfo());
114115
115116 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
117 setOperationAction(ISD::JumpTable, MVTPtr, Custom);
116118
117119 for (auto T : {MVT::f32, MVT::f64}) {
118120 // Don't expand the floating-point types to constant pools.
151153 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
152154 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
153155 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
156
157 // Expand these forms; we pattern-match the forms that we can handle in isel.
158 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
159 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
160 setOperationAction(Op, T, Expand);
161
162 // We have custom switch handling.
163 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
154164
155165 // WebAssembly doesn't have:
156166 // - Floating-point extending loads.
364374 return SDValue();
365375 case ISD::GlobalAddress:
366376 return LowerGlobalAddress(Op, DAG);
377 case ISD::JumpTable:
378 return LowerJumpTable(Op, DAG);
379 case ISD::BR_JT:
380 return LowerBR_JT(Op, DAG);
367381 }
368382 }
369383
381395 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT));
382396 }
383397
398 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
399 SelectionDAG &DAG) const {
400 // There's no need for a Wrapper node because we always incorporate a jump
401 // table operand into a SWITCH instruction, rather than ever materializing
402 // it in a register.
403 const JumpTableSDNode *JT = cast(Op);
404 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
405 JT->getTargetFlags());
406 }
407
408 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
409 SelectionDAG &DAG) const {
410 SDLoc DL(Op);
411 SDValue Chain = Op.getOperand(0);
412 const auto *JT = cast(Op.getOperand(1));
413 SDValue Index = Op.getOperand(2);
414 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
415
416 SmallVector Ops;
417 Ops.push_back(Chain);
418 Ops.push_back(Index);
419
420 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
421 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
422
423 // TODO: For now, we just pick something arbitrary for a default case for now.
424 // We really want to sniff out the guard and put in the real default case (and
425 // delete the guard).
426 Ops.push_back(DAG.getBasicBlock(MBBs[0]));
427
428 // Add an operand for each case.
429 for (auto MBB : MBBs)
430 Ops.push_back(DAG.getBasicBlock(MBB));
431
432 return DAG.getNode(WebAssemblyISD::SWITCH, DL, MVT::Other, Ops);
433 }
434
384435 //===----------------------------------------------------------------------===//
385436 // WebAssembly Optimization Hooks
386437 //===----------------------------------------------------------------------===//
6868 // Custom lowering hooks.
6969 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
7070 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
71 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
72 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
7173 };
7274
7375 namespace WebAssembly {
2424 * switch: switch statement with fallthrough
2525 */
2626
27 let isBranch = 1, isTerminator = 1, hasCtrlDep = 1 in {
28 def BRIF : I<(outs), (ins bb_op:$dst, Int32:$a),
29 [(brcond Int32:$a, bb:$dst)]>;
30 let isBarrier = 1 in {
31 def BR : I<(outs), (ins bb_op:$dst),
32 [(br bb:$dst)]>;
33 } // isBarrier = 1
34 } // isBranch = 1, isTerminator = 1, hasCtrlDep = 1
35
36 // TODO: SelectionDAG's lowering insists on using a pointer as the index for
37 // jump tables, so in practice we don't ever use SWITCH_I64 in wasm32 mode
38 // currently.
39 let isTerminator = 1, hasCtrlDep = 1, isBarrier = 1 in {
40 def SWITCH_I32 : I<(outs), (ins Int32:$index, variable_ops),
41 [(WebAssemblyswitch Int32:$index)]>;
42 def SWITCH_I64 : I<(outs), (ins Int64:$index, variable_ops),
43 [(WebAssemblyswitch Int64:$index)]>;
44 } // isTerminator = 1, hasCtrlDep = 1, isBarrier = 1
45
46 // Placemarkers to indicate the start of a block or loop scope.
47 def BLOCK : I<(outs), (ins bb_op:$dst), []>;
48 def LOOP : I<(outs), (ins bb_op:$dst), []>;
49
2750 multiclass RETURN {
2851 def RETURN_#vt : I<(outs), (ins vt:$val), [(WebAssemblyreturn vt:$val)]>;
2952 }
3636 BuildMI(MBB, I, DL, get(WebAssembly::COPY), DestReg)
3737 .addReg(SrcReg, KillSrc ? RegState::Kill : 0);
3838 }
39
40 // Branch analysis.
41 bool WebAssemblyInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
42 MachineBasicBlock *&TBB,
43 MachineBasicBlock *&FBB,
44 SmallVectorImpl &Cond,
45 bool AllowModify) const {
46 bool HaveCond = false;
47 for (MachineInstr &MI : iterator_range(
48 MBB.getFirstInstrTerminator(), MBB.instr_end())) {
49 switch (MI.getOpcode()) {
50 default:
51 // Unhandled instruction; bail out.
52 return true;
53 case WebAssembly::BRIF:
54 if (HaveCond)
55 return true;
56 Cond.push_back(MI.getOperand(1));
57 TBB = MI.getOperand(0).getMBB();
58 HaveCond = true;
59 break;
60 case WebAssembly::BR:
61 if (!HaveCond)
62 TBB = MI.getOperand(0).getMBB();
63 else
64 FBB = MI.getOperand(0).getMBB();
65 break;
66 }
67 if (MI.isBarrier())
68 break;
69 }
70
71 return false;
72 }
73
74 unsigned WebAssemblyInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
75 MachineBasicBlock::instr_iterator I = MBB.instr_end();
76 unsigned Count = 0;
77
78 while (I != MBB.instr_begin()) {
79 --I;
80 if (I->isDebugValue())
81 continue;
82 if (!I->isTerminator())
83 break;
84 // Remove the branch.
85 I->eraseFromParent();
86 I = MBB.instr_end();
87 ++Count;
88 }
89
90 return Count;
91 }
92
93 unsigned WebAssemblyInstrInfo::InsertBranch(
94 MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
95 ArrayRef Cond, DebugLoc DL) const {
96 assert(Cond.size() <= 1);
97
98 if (Cond.empty()) {
99 if (!TBB)
100 return 0;
101
102 BuildMI(&MBB, DL, get(WebAssembly::BR)).addMBB(TBB);
103 return 1;
104 }
105
106 BuildMI(&MBB, DL, get(WebAssembly::BRIF))
107 .addMBB(TBB)
108 .addOperand(Cond[0]);
109 if (!FBB)
110 return 1;
111
112 BuildMI(&MBB, DL, get(WebAssembly::BR)).addMBB(FBB);
113 return 2;
114 }
115
116 bool WebAssemblyInstrInfo::ReverseBranchCondition(
117 SmallVectorImpl &Cond) const {
118 assert(Cond.size() == 1);
119
120 // TODO: Add branch reversal here... And re-enable MachineBlockPlacementID
121 // when we do.
122
123 return true;
124 }
3636 void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
3737 DebugLoc DL, unsigned DestReg, unsigned SrcReg,
3838 bool KillSrc) const override;
39
40 bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
41 MachineBasicBlock *&FBB,
42 SmallVectorImpl &Cond,
43 bool AllowModify = false) const override;
44 unsigned RemoveBranch(MachineBasicBlock &MBB) const override;
45 unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
46 MachineBasicBlock *FBB,
47 ArrayRef Cond,
48 DebugLoc DL) const override;
49 bool
50 ReverseBranchCondition(SmallVectorImpl &Cond) const override;
3951 };
4052
4153 } // end namespace llvm
2929 SDCallSeqEnd<[SDTCisVT<0, iPTR>, SDTCisVT<1, iPTR>]>;
3030 def SDT_WebAssemblyCall0 : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
3131 def SDT_WebAssemblyCall1 : SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>;
32 def SDT_WebAssemblySwitch : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
3233 def SDT_WebAssemblyArgument : SDTypeProfile<1, 1, [SDTCisVT<1, i32>]>;
3334 def SDT_WebAssemblyReturn : SDTypeProfile<0, -1, []>;
3435 def SDT_WebAssemblyWrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
5051 def WebAssemblycall1 : SDNode<"WebAssemblyISD::CALL1",
5152 SDT_WebAssemblyCall1,
5253 [SDNPHasChain, SDNPVariadic]>;
54 def WebAssemblyswitch : SDNode<"WebAssemblyISD::SWITCH",
55 SDT_WebAssemblySwitch,
56 [SDNPHasChain, SDNPVariadic]>;
5357 def WebAssemblyargument : SDNode<"WebAssemblyISD::ARGUMENT",
5458 SDT_WebAssemblyArgument>;
5559 def WebAssemblyreturn : SDNode<"WebAssemblyISD::RETURN",
6872 * set_local: set the current value of a local variable
6973 */
7074
75 def bb_op : Operand;
76 def tjumptable_op : Operand;
7177 def global : Operand;
7278
7379 //===----------------------------------------------------------------------===//
95101 def Immediate_F64 : I<(outs Float64:$res), (ins f64imm:$imm),
96102 [(set Float64:$res, fpimm:$imm)]>;
97103
98 // Special types of immediates.
104 // Special types of immediates. FIXME: Hard-coded as 32-bit for now.
99105 def GLOBAL : I<(outs Int32:$dst), (ins global:$addr),
100106 [(set Int32:$dst, (WebAssemblywrapper tglobaladdr:$addr))]>;
107 def JUMP_TABLE : I<(outs Int32:$dst), (ins tjumptable_op:$addr),
108 [(set Int32:$dst, (WebAssemblywrapper tjumptable:$addr))]>;
101109
102110 //===----------------------------------------------------------------------===//
103111 // Additional sets of instructions.
158158 disablePass(&PrologEpilogCodeInserterID);
159159 // Fails with: should be run after register allocation.
160160 disablePass(&MachineCopyPropagationID);
161
162 // TODO: Until we get ReverseBranchCondition support, MachineBlockPlacement
163 // can create ugly-looking control flow.
164 disablePass(&MachineBlockPlacementID);
161165 }
162166
163167 void WebAssemblyPassConfig::addPreSched2() {}
164168
165 void WebAssemblyPassConfig::addPreEmitPass() {}
169 void WebAssemblyPassConfig::addPreEmitPass() {
170 addPass(createWebAssemblyCFGStackify());
171 }
0 ; RUN: llc < %s -asm-verbose=false | FileCheck %s
1
2 ; Test the CFG stackifier pass.
3
4 target datalayout = "e-p:32:32-i64:64-n32:64-S128"
5 target triple = "wasm32-unknown-unknown"
6
7 declare void @something()
8
9 ; Test that loops are made contiguous, even in the presence of split backedges.
10
11 ; CHECK-LABEL: test0
12 ; CHECK: (loop
13 ; CHECK: (add
14 ; CHECK: (brif
15 ; CHECK: (call
16 ; CHECK: (br $BB0_1)
17 ; CHECK: (return)
18 define void @test0(i32 %n) {
19 entry:
20 br label %header
21
22 header:
23 %i = phi i32 [ 0, %entry ], [ %i.next, %back ]
24 %i.next = add i32 %i, 1
25
26 %c = icmp slt i32 %i.next, %n
27 br i1 %c, label %back, label %exit
28
29 exit:
30 ret void
31
32 back:
33 call void @something()
34 br label %header
35 }
36
37 ; Same as test0, but the branch condition is reversed.
38
39 ; CHECK-LABEL: test1
40 ; CHECK: (loop
41 ; CHECK: (add
42 ; CHECK: (brif
43 ; CHECK: (call
44 ; CHECK: (br $BB1_1)
45 ; CHECK: (return)
46 define void @test1(i32 %n) {
47 entry:
48 br label %header
49
50 header:
51 %i = phi i32 [ 0, %entry ], [ %i.next, %back ]
52 %i.next = add i32 %i, 1
53
54 %c = icmp sge i32 %i.next, %n
55 br i1 %c, label %exit, label %back
56
57 exit:
58 ret void
59
60 back:
61 call void @something()
62 br label %header
63 }
64
65 ; Test that a simple loop is handled as expected.
66
67 ; CHECK-LABEL: test2
68 ; CHECK: (block $BB2_2)
69 ; CHECK: (brif $BB2_2 {{.*}})
70 ; CHECK: BB2_1:
71 ; CHECK: (brif $BB2_1 @14)
72 ; CHECK: BB2_2:
73 ; CHECK: (return)
74 define void @test2(double* nocapture %p, i32 %n) {
75 entry:
76 %cmp.4 = icmp sgt i32 %n, 0
77 br i1 %cmp.4, label %for.body.preheader, label %for.end
78
79 for.body.preheader:
80 br label %for.body
81
82 for.body:
83 %i.05 = phi i32 [ %inc, %for.body ], [ 0, %for.body.preheader ]
84 %arrayidx = getelementptr inbounds double, double* %p, i32 %i.05
85 %0 = load double, double* %arrayidx, align 8
86 %mul = fmul double %0, 3.200000e+00
87 store double %mul, double* %arrayidx, align 8
88 %inc = add nuw nsw i32 %i.05, 1
89 %exitcond = icmp eq i32 %inc, %n
90 br i1 %exitcond, label %for.end.loopexit, label %for.body
91
92 for.end.loopexit:
93 br label %for.end
94
95 for.end:
96 ret void
97 }
98
99 ; CHECK-LABEL: doublediamond
100 ; CHECK: (block $BB3_5)
101 ; CHECK: (block $BB3_4)
102 ; CHECK: (block $BB3_2)
103 ; CHECK: (brif $BB3_2 @4)
104 ; CHECK: (br $BB3_5)
105 ; CHECK: BB3_2:
106 ; CHECK: (brif $BB3_4 @7)
107 ; CHECK: (br $BB3_5)
108 ; CHECK: BB3_4:
109 ; CHECK: BB3_5:
110 ; CHECK: (return @3)
111 define i32 @doublediamond(i32 %a, i32 %b, i32* %p) {
112 entry:
113 %c = icmp eq i32 %a, 0
114 %d = icmp eq i32 %b, 0
115 store volatile i32 0, i32* %p
116 br i1 %c, label %true, label %false
117 true:
118 store volatile i32 1, i32* %p
119 br label %exit
120 false:
121 store volatile i32 2, i32* %p
122 br i1 %d, label %ft, label %ff
123 ft:
124 store volatile i32 3, i32* %p
125 br label %exit
126 ff:
127 store volatile i32 4, i32* %p
128 br label %exit
129 exit:
130 store volatile i32 5, i32* %p
131 ret i32 0
132 }
133
134 ; CHECK-LABEL: triangle
135 ; CHECK: (block $BB4_2)
136 ; CHECK: (brif $BB4_2 @3)
137 ; CHECK: BB4_2:
138 ; CHECK: (return @2)
139 define i32 @triangle(i32* %p, i32 %a) {
140 entry:
141 %c = icmp eq i32 %a, 0
142 store volatile i32 0, i32* %p
143 br i1 %c, label %true, label %exit
144 true:
145 store volatile i32 1, i32* %p
146 br label %exit
147 exit:
148 store volatile i32 2, i32* %p
149 ret i32 0
150 }
151
152 ; CHECK-LABEL: diamond
153 ; CHECK: (block $BB5_3)
154 ; CHECK: (block $BB5_2)
155 ; CHECK: (brif $BB5_2 @3)
156 ; CHECK: (br $BB5_3)
157 ; CHECK: BB5_2:
158 ; CHECK: BB5_3:
159 ; CHECK: (return @2)
160 define i32 @diamond(i32* %p, i32 %a) {
161 entry:
162 %c = icmp eq i32 %a, 0
163 store volatile i32 0, i32* %p
164 br i1 %c, label %true, label %false
165 true:
166 store volatile i32 1, i32* %p
167 br label %exit
168 false:
169 store volatile i32 2, i32* %p
170 br label %exit
171 exit:
172 store volatile i32 3, i32* %p
173 ret i32 0
174 }
175
176 ; CHECK-LABEL: single_block
177 ; CHECK-NOT: br
178 ; CHECK: (return @1)
179 define i32 @single_block(i32* %p) {
180 entry:
181 store volatile i32 0, i32* %p
182 ret i32 0
183 }
184
185 ; CHECK-LABEL: minimal_loop
186 ; CHECK-NOT: br
187 ; CHECK: BB7_1:
188 ; CHECK: (store_i32 @0 @2)
189 ; CHECK: (br $BB7_1)
190 define i32 @minimal_loop(i32* %p) {
191 entry:
192 store volatile i32 0, i32* %p
193 br label %loop
194 loop:
195 store volatile i32 1, i32* %p
196 br label %loop
197 }
198
199 ; CHECK-LABEL: simple_loop
200 ; CHECK-NOT: br
201 ; CHECK: BB8_1:
202 ; CHECK: (loop $BB8_1)
203 ; CHECK: (brif $BB8_1 @4)
204 ; CHECK: (return @2)
205 define i32 @simple_loop(i32* %p, i32 %a) {
206 entry:
207 %c = icmp eq i32 %a, 0
208 store volatile i32 0, i32* %p
209 br label %loop
210 loop:
211 store volatile i32 1, i32* %p
212 br i1 %c, label %loop, label %exit
213 exit:
214 store volatile i32 2, i32* %p
215 ret i32 0
216 }
217
218 ; CHECK-LABEL: doubletriangle
219 ; CHECK: (block $BB9_4)
220 ; CHECK: (block $BB9_3)
221 ; CHECK: (brif $BB9_4 @4)
222 ; CHECK: (brif $BB9_3 @7)
223 ; CHECK: BB9_3:
224 ; CHECK: BB9_4:
225 ; CHECK: (return @3)
226 define i32 @doubletriangle(i32 %a, i32 %b, i32* %p) {
227 entry:
228 %c = icmp eq i32 %a, 0
229 %d = icmp eq i32 %b, 0
230 store volatile i32 0, i32* %p
231 br i1 %c, label %true, label %exit
232 true:
233 store volatile i32 2, i32* %p
234 br i1 %d, label %tt, label %tf
235 tt:
236 store volatile i32 3, i32* %p
237 br label %tf
238 tf:
239 store volatile i32 4, i32* %p
240 br label %exit
241 exit:
242 store volatile i32 5, i32* %p
243 ret i32 0
244 }
245
246 ; CHECK-LABEL: ifelse_earlyexits
247 ; CHECK: (block $BB10_4)
248 ; CHECK: (block $BB10_2)
249 ; CHECK: (brif $BB10_2 @4)
250 ; CHECK: (br $BB10_4)
251 ; CHECK: BB10_2:
252 ; CHECK: (brif $BB10_4 @7)
253 ; CHECK: BB10_4:
254 ; CHECK: (return @3)
255 define i32 @ifelse_earlyexits(i32 %a, i32 %b, i32* %p) {
256 entry:
257 %c = icmp eq i32 %a, 0
258 %d = icmp eq i32 %b, 0
259 store volatile i32 0, i32* %p
260 br i1 %c, label %true, label %false
261 true:
262 store volatile i32 1, i32* %p
263 br label %exit
264 false:
265 store volatile i32 2, i32* %p
266 br i1 %d, label %ft, label %exit
267 ft:
268 store volatile i32 3, i32* %p
269 br label %exit
270 exit:
271 store volatile i32 4, i32* %p
272 ret i32 0
273 }
0 ; RUN: llc < %s -asm-verbose=false | FileCheck %s
1
2 ; This test depends on branching support, which is not yet checked in.
3 ; XFAIL: *
41
52 ; Test that phis are lowered.
63
2825 ; Swap phis.
2926
3027 ; CHECK-LABEL: test1
31 ; CHECK: BB0_1:
28 ; CHECK: BB1_1:
3229 ; CHECK: (setlocal [[REG0:@.*]] [[REG1:@.*]])
3330 ; CHECK: (setlocal [[REG1]] [[REG2:@.*]])
3431 ; CHECK: (setlocal [[REG2]] [[REG0]])
0 ; RUN: llc < %s -asm-verbose=false | FileCheck %s
1
2 ; Test switch instructions.
3
4 target datalayout = "e-p:32:32-i64:64-n32:64-S128"
5 target triple = "wasm32-unknown-unknown"
6
7 declare void @foo0()
8 declare void @foo1()
9 declare void @foo2()
10 declare void @foo3()
11 declare void @foo4()
12 declare void @foo5()
13
14 ; CHECK-LABEL: bar32
15 ; CHECK: (block $BB0_8)
16 ; CHECK: (block $BB0_7)
17 ; CHECK: (block $BB0_6)
18 ; CHECK: (block $BB0_5)
19 ; CHECK: (block $BB0_4)
20 ; CHECK: (block $BB0_3)
21 ; CHECK: (block $BB0_2)
22 ; CHECK: (switch {{.*}} $BB0_2 $BB0_2 $BB0_2 $BB0_2 $BB0_2 $BB0_2 $BB0_2 $BB0_2 $BB0_3 $BB0_3 $BB0_3 $BB0_3 $BB0_3 $BB0_3 $BB0_3 $BB0_3 $BB0_4 $BB0_4 $BB0_4 $BB0_4 $BB0_4 $BB0_4 $BB0_5 $BB0_6 $BB0_7)
23 ; CHECk: BB0_2:
24 ; CHECK: (setlocal {{.*}} (global $foo0))
25 ; CHECK: BB0_3:
26 ; CHECK: (setlocal {{.*}} (global $foo1))
27 ; CHECK: BB0_4:
28 ; CHECK: (setlocal {{.*}} (global $foo2))
29 ; CHECK: BB0_5:
30 ; CHECK: (setlocal {{.*}} (global $foo3))
31 ; CHECK: BB0_6:
32 ; CHECK: (setlocal {{.*}} (global $foo4))
33 ; CHECK: BB0_7:
34 ; CHECK: (setlocal {{.*}} (global $foo5))
35 ; CHECK: BB0_8:
36 ; CHECK: (return)
37 define void @bar32(i32 %n) {
38 entry:
39 switch i32 %n, label %sw.epilog [
40 i32 0, label %sw.bb
41 i32 1, label %sw.bb
42 i32 2, label %sw.bb
43 i32 3, label %sw.bb
44 i32 4, label %sw.bb
45 i32 5, label %sw.bb
46 i32 6, label %sw.bb
47 i32 7, label %sw.bb.1
48 i32 8, label %sw.bb.1
49 i32 9, label %sw.bb.1
50 i32 10, label %sw.bb.1
51 i32 11, label %sw.bb.1
52 i32 12, label %sw.bb.1
53 i32 13, label %sw.bb.1
54 i32 14, label %sw.bb.1
55 i32 15, label %sw.bb.2
56 i32 16, label %sw.bb.2
57 i32 17, label %sw.bb.2
58 i32 18, label %sw.bb.2
59 i32 19, label %sw.bb.2
60 i32 20, label %sw.bb.2
61 i32 21, label %sw.bb.3
62 i32 22, label %sw.bb.4
63 i32 23, label %sw.bb.5
64 ]
65
66 sw.bb: ; preds = %entry, %entry, %entry, %entry, %entry, %entry, %entry
67 tail call void @foo0()
68 br label %sw.epilog
69
70 sw.bb.1: ; preds = %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry
71 tail call void @foo1()
72 br label %sw.epilog
73
74 sw.bb.2: ; preds = %entry, %entry, %entry, %entry, %entry, %entry
75 tail call void @foo2()
76 br label %sw.epilog
77
78 sw.bb.3: ; preds = %entry
79 tail call void @foo3()
80 br label %sw.epilog
81
82 sw.bb.4: ; preds = %entry
83 tail call void @foo4()
84 br label %sw.epilog
85
86 sw.bb.5: ; preds = %entry
87 tail call void @foo5()
88 br label %sw.epilog
89
90 sw.epilog: ; preds = %entry, %sw.bb.5, %sw.bb.4, %sw.bb.3, %sw.bb.2, %sw.bb.1, %sw.bb
91 ret void
92 }
93
94 ; CHECK-LABEL: bar64
95 ; CHECK: (block $BB1_8)
96 ; CHECK: (block $BB1_7)
97 ; CHECK: (block $BB1_6)
98 ; CHECK: (block $BB1_5)
99 ; CHECK: (block $BB1_4)
100 ; CHECK: (block $BB1_3)
101 ; CHECK: (block $BB1_2)
102 ; CHECK: (switch {{.*}} $BB1_2 $BB1_2 $BB1_2 $BB1_2 $BB1_2 $BB1_2 $BB1_2 $BB1_2 $BB1_3 $BB1_3 $BB1_3 $BB1_3 $BB1_3 $BB1_3 $BB1_3 $BB1_3 $BB1_4 $BB1_4 $BB1_4 $BB1_4 $BB1_4 $BB1_4 $BB1_5 $BB1_6 $BB1_7)
103 ; CHECk: BB1_2:
104 ; CHECK: (setlocal {{.*}} (global $foo0))
105 ; CHECK: BB1_3:
106 ; CHECK: (setlocal {{.*}} (global $foo1))
107 ; CHECK: BB1_4:
108 ; CHECK: (setlocal {{.*}} (global $foo2))
109 ; CHECK: BB1_5:
110 ; CHECK: (setlocal {{.*}} (global $foo3))
111 ; CHECK: BB1_6:
112 ; CHECK: (setlocal {{.*}} (global $foo4))
113 ; CHECK: BB1_7:
114 ; CHECK: (setlocal {{.*}} (global $foo5))
115 ; CHECK: BB1_8:
116 ; CHECK: (return)
117 define void @bar64(i64 %n) {
118 entry:
119 switch i64 %n, label %sw.epilog [
120 i64 0, label %sw.bb
121 i64 1, label %sw.bb
122 i64 2, label %sw.bb
123 i64 3, label %sw.bb
124 i64 4, label %sw.bb
125 i64 5, label %sw.bb
126 i64 6, label %sw.bb
127 i64 7, label %sw.bb.1
128 i64 8, label %sw.bb.1
129 i64 9, label %sw.bb.1
130 i64 10, label %sw.bb.1
131 i64 11, label %sw.bb.1
132 i64 12, label %sw.bb.1
133 i64 13, label %sw.bb.1
134 i64 14, label %sw.bb.1
135 i64 15, label %sw.bb.2
136 i64 16, label %sw.bb.2
137 i64 17, label %sw.bb.2
138 i64 18, label %sw.bb.2
139 i64 19, label %sw.bb.2
140 i64 20, label %sw.bb.2
141 i64 21, label %sw.bb.3
142 i64 22, label %sw.bb.4
143 i64 23, label %sw.bb.5
144 ]
145
146 sw.bb: ; preds = %entry, %entry, %entry, %entry, %entry, %entry, %entry
147 tail call void @foo0()
148 br label %sw.epilog
149
150 sw.bb.1: ; preds = %entry, %entry, %entry, %entry, %entry, %entry, %entry, %entry
151 tail call void @foo1()
152 br label %sw.epilog
153
154 sw.bb.2: ; preds = %entry, %entry, %entry, %entry, %entry, %entry
155 tail call void @foo2()
156 br label %sw.epilog
157
158 sw.bb.3: ; preds = %entry
159 tail call void @foo3()
160 br label %sw.epilog
161
162 sw.bb.4: ; preds = %entry
163 tail call void @foo4()
164 br label %sw.epilog
165
166 sw.bb.5: ; preds = %entry
167 tail call void @foo5()
168 br label %sw.epilog
169
170 sw.epilog: ; preds = %entry, %sw.bb.5, %sw.bb.4, %sw.bb.3, %sw.bb.2, %sw.bb.1, %sw.bb
171 ret void
172 }