llvm.org GIT mirror llvm / da781c7
[RISCV] Initial support for function calls Note that this is just enough for simple function call examples to generate working code. Support for varargs etc follows in future patches. Differential Revision: https://reviews.llvm.org/D29936 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317691 91177308-0d34-0410-b5e6-96231b3b80d8 Alex Bradbury 1 year, 11 months ago
9 changed file(s) with 269 addition(s) and 4 deletion(s). Raw diff Collapse all Expand all
2929 void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
3030
3131 bool hasFP(const MachineFunction &MF) const override;
32
33 MachineBasicBlock::iterator
34 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
35 MachineBasicBlock::iterator MI) const override {
36 return MBB.erase(MI);
37 }
3238 };
3339 }
3440 #endif
107107 default:
108108 report_fatal_error("Unsupported calling convention");
109109 case CallingConv::C:
110 case CallingConv::Fast:
110111 break;
111112 }
112113
143144 return Chain;
144145 }
145146
147 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
148 // and output parameter nodes.
149 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
150 SmallVectorImpl &InVals) const {
151 SelectionDAG &DAG = CLI.DAG;
152 SDLoc &DL = CLI.DL;
153 SmallVectorImpl &Outs = CLI.Outs;
154 SmallVectorImpl &OutVals = CLI.OutVals;
155 SmallVectorImpl &Ins = CLI.Ins;
156 SDValue Chain = CLI.Chain;
157 SDValue Callee = CLI.Callee;
158 CLI.IsTailCall = false;
159 CallingConv::ID CallConv = CLI.CallConv;
160 bool IsVarArg = CLI.IsVarArg;
161 EVT PtrVT = getPointerTy(DAG.getDataLayout());
162
163 if (IsVarArg) {
164 report_fatal_error("LowerCall with varargs not implemented");
165 }
166
167 MachineFunction &MF = DAG.getMachineFunction();
168
169 // Analyze the operands of the call, assigning locations to each operand.
170 SmallVector ArgLocs;
171 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
172 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV32);
173
174 // Get a count of how many bytes are to be pushed on the stack.
175 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
176
177 for (auto &Arg : Outs) {
178 if (!Arg.Flags.isByVal())
179 continue;
180 report_fatal_error("Passing arguments byval not yet implemented");
181 }
182
183 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
184
185 // Copy argument values to their designated locations.
186 SmallVector, 8> RegsToPass;
187 SDValue StackPtr;
188 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) {
189 CCValAssign &VA = ArgLocs[I];
190 SDValue ArgValue = OutVals[I];
191
192 // Promote the value if needed.
193 // For now, only handle fully promoted arguments.
194 switch (VA.getLocInfo()) {
195 case CCValAssign::Full:
196 break;
197 default:
198 llvm_unreachable("Unknown loc info!");
199 }
200
201 if (VA.isRegLoc()) {
202 // Queue up the argument copies and emit them at the end.
203 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
204 } else {
205 assert(VA.isMemLoc() && "Argument not register or memory");
206 report_fatal_error("Passing arguments via the stack not yet implemented");
207 }
208 }
209
210 SDValue Glue;
211
212 // Build a sequence of copy-to-reg nodes, chained and glued together.
213 for (auto &Reg : RegsToPass) {
214 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
215 Glue = Chain.getValue(1);
216 }
217
218 if (isa(Callee)) {
219 Callee = lowerGlobalAddress(Callee, DAG);
220 } else if (isa(Callee)) {
221 report_fatal_error(
222 "lowerExternalSymbol, needed for lowerCall, not yet handled");
223 }
224
225 // The first call operand is the chain and the second is the target address.
226 SmallVector Ops;
227 Ops.push_back(Chain);
228 Ops.push_back(Callee);
229
230 // Add argument registers to the end of the list so that they are
231 // known live into the call.
232 for (auto &Reg : RegsToPass)
233 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
234
235 // Add a register mask operand representing the call-preserved registers.
236 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
237 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
238 assert(Mask && "Missing call preserved mask for calling convention");
239 Ops.push_back(DAG.getRegisterMask(Mask));
240
241 // Glue the call to the argument copies, if any.
242 if (Glue.getNode())
243 Ops.push_back(Glue);
244
245 // Emit the call.
246 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
247 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
248 Glue = Chain.getValue(1);
249
250 // Mark the end of the call, which is glued to the call itself.
251 Chain = DAG.getCALLSEQ_END(Chain,
252 DAG.getConstant(NumBytes, DL, PtrVT, true),
253 DAG.getConstant(0, DL, PtrVT, true),
254 Glue, DL);
255 Glue = Chain.getValue(1);
256
257 // Assign locations to each value returned by this call.
258 SmallVector RVLocs;
259 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
260 RetCCInfo.AnalyzeCallResult(Ins, RetCC_RISCV32);
261
262 // Copy all of the result registers out of their specified physreg.
263 for (auto &VA : RVLocs) {
264 // Copy the value out, gluing the copy to the end of the call sequence.
265 SDValue RetValue = DAG.getCopyFromReg(Chain, DL, VA.getLocReg(),
266 VA.getLocVT(), Glue);
267 Chain = RetValue.getValue(1);
268 Glue = RetValue.getValue(2);
269
270 InVals.push_back(Chain.getValue(0));
271 }
272
273 return Chain;
274 }
275
146276 SDValue
147277 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
148278 bool IsVarArg,
193323 break;
194324 case RISCVISD::RET_FLAG:
195325 return "RISCVISD::RET_FLAG";
326 case RISCVISD::CALL:
327 return "RISCVISD::CALL";
196328 }
197329 return nullptr;
198330 }
2323 namespace RISCVISD {
2424 enum NodeType : unsigned {
2525 FIRST_NUMBER = ISD::BUILTIN_OP_END,
26 RET_FLAG
26 RET_FLAG,
27 CALL
2728 };
2829 }
2930
5152 const SmallVectorImpl &Outs,
5253 const SmallVectorImpl &OutVals, const SDLoc &DL,
5354 SelectionDAG &DAG) const override;
55 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
56 SmallVectorImpl &InVals) const override;
5457 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
5558 Type *Ty) const override {
5659 return true;
2727
2828 using namespace llvm;
2929
30 RISCVInstrInfo::RISCVInstrInfo() : RISCVGenInstrInfo() {}
30 RISCVInstrInfo::RISCVInstrInfo()
31 : RISCVGenInstrInfo(RISCV::ADJCALLSTACKDOWN, RISCV::ADJCALLSTACKUP) {}
3132
3233 void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
3334 MachineBasicBlock::iterator MBBI,
1616 // RISC-V specific DAG Nodes.
1717 //===----------------------------------------------------------------------===//
1818
19 def RetFlag : SDNode<"RISCVISD::RET_FLAG", SDTNone,
20 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
19 def SDT_RISCVCall : SDTypeProfile<0, -1, [SDTCisVT<0, XLenVT>]>;
20 def SDT_RISCVCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
21 SDTCisVT<1, i32>]>;
22 def SDT_RISCVCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
23 SDTCisVT<1, i32>]>;
24
25
26 def Call : SDNode<"RISCVISD::CALL", SDT_RISCVCall,
27 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
28 SDNPVariadic]>;
29 def CallSeqStart : SDNode<"ISD::CALLSEQ_START", SDT_RISCVCallSeqStart,
30 [SDNPHasChain, SDNPOutGlue]>;
31 def CallSeqEnd : SDNode<"ISD::CALLSEQ_END", SDT_RISCVCallSeqEnd,
32 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
33 def RetFlag : SDNode<"RISCVISD::RET_FLAG", SDTNone,
34 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
2135
2236 //===----------------------------------------------------------------------===//
2337 // Operand and SDNode transformation definitions.
339353 def PseudoBR : Pseudo<(outs), (ins simm21_lsb0:$imm20), [(br bb:$imm20)]>,
340354 PseudoInstExpansion<(JAL X0, simm21_lsb0:$imm20)>;
341355
356 let isCall = 1, Defs=[X1] in
357 def PseudoCALL : Pseudo<(outs), (ins GPR:$rs1), [(Call GPR:$rs1)]>,
358 PseudoInstExpansion<(JALR X1, GPR:$rs1, 0)>;
359
342360 let isBarrier = 1, isReturn = 1, isTerminator = 1 in
343361 def PseudoRET : Pseudo<(outs), (ins), [(RetFlag)]>,
344362 PseudoInstExpansion<(JALR X0, X1, 0)>;
370388 defm : StPat;
371389 defm : StPat;
372390 defm : StPat;
391
392 /// Other pseudo-instructions
393
394 // Pessimistically assume the stack pointer will be clobbered
395 let Defs = [X2], Uses = [X2] in {
396 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
397 [(CallSeqStart timm:$amt1, timm:$amt2)]>;
398 def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
399 [(CallSeqEnd timm:$amt1, timm:$amt2)]>;
400 } // Defs = [X2], Uses = [X2]
6767 return false;
6868 MCOp = MCOperand::createReg(MO.getReg());
6969 break;
70 case MachineOperand::MO_RegisterMask:
71 // Regmasks are like implicit defs.
72 return false;
7073 case MachineOperand::MO_Immediate:
7174 MCOp = MCOperand::createImm(MO.getImm());
7275 break;
8787 unsigned RISCVRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
8888 return RISCV::X8;
8989 }
90
91 const uint32_t *
92 RISCVRegisterInfo::getCallPreservedMask(const MachineFunction & /*MF*/,
93 CallingConv::ID /*CC*/) const {
94 return CSR_RegMask;
95 }
2424
2525 RISCVRegisterInfo(unsigned HwMode);
2626
27 const uint32_t *getCallPreservedMask(const MachineFunction &MF,
28 CallingConv::ID) const override;
29
2730 const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
2831
2932 BitVector getReservedRegs(const MachineFunction &MF) const override;
0 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
1 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
2 ; RUN: | FileCheck -check-prefix=RV32I %s
3
4 declare i32 @external_function(i32)
5
6 define i32 @test_call_external(i32 %a) nounwind {
7 ; RV32I-LABEL: test_call_external:
8 ; RV32I: # BB#0:
9 ; RV32I-NEXT: sw ra, 12(s0)
10 ; RV32I-NEXT: lui a1, %hi(external_function)
11 ; RV32I-NEXT: addi a1, a1, %lo(external_function)
12 ; RV32I-NEXT: jalr ra, a1, 0
13 ; RV32I-NEXT: lw ra, 12(s0)
14 ; RV32I-NEXT: jalr zero, ra, 0
15 %1 = call i32 @external_function(i32 %a)
16 ret i32 %1
17 }
18
19 define i32 @defined_function(i32 %a) nounwind {
20 ; RV32I-LABEL: defined_function:
21 ; RV32I: # BB#0:
22 ; RV32I-NEXT: addi a0, a0, 1
23 ; RV32I-NEXT: jalr zero, ra, 0
24 %1 = add i32 %a, 1
25 ret i32 %1
26 }
27
28 define i32 @test_call_defined(i32 %a) nounwind {
29 ; RV32I-LABEL: test_call_defined:
30 ; RV32I: # BB#0:
31 ; RV32I-NEXT: sw ra, 12(s0)
32 ; RV32I-NEXT: lui a1, %hi(defined_function)
33 ; RV32I-NEXT: addi a1, a1, %lo(defined_function)
34 ; RV32I-NEXT: jalr ra, a1, 0
35 ; RV32I-NEXT: lw ra, 12(s0)
36 ; RV32I-NEXT: jalr zero, ra, 0
37 %1 = call i32 @defined_function(i32 %a) nounwind
38 ret i32 %1
39 }
40
41 define i32 @test_call_indirect(i32 (i32)* %a, i32 %b) nounwind {
42 ; RV32I-LABEL: test_call_indirect:
43 ; RV32I: # BB#0:
44 ; RV32I-NEXT: sw ra, 12(s0)
45 ; RV32I-NEXT: addi a2, a0, 0
46 ; RV32I-NEXT: addi a0, a1, 0
47 ; RV32I-NEXT: jalr ra, a2, 0
48 ; RV32I-NEXT: lw ra, 12(s0)
49 ; RV32I-NEXT: jalr zero, ra, 0
50 %1 = call i32 %a(i32 %b)
51 ret i32 %1
52 }
53
54 ; Ensure that calls to fastcc functions aren't rejected. Such calls may be
55 ; introduced when compiling with optimisation.
56
57 define fastcc i32 @fastcc_function(i32 %a, i32 %b) nounwind {
58 ; RV32I-LABEL: fastcc_function:
59 ; RV32I: # BB#0:
60 ; RV32I-NEXT: add a0, a0, a1
61 ; RV32I-NEXT: jalr zero, ra, 0
62 %1 = add i32 %a, %b
63 ret i32 %1
64 }
65
66 define i32 @test_call_fastcc(i32 %a, i32 %b) nounwind {
67 ; RV32I-LABEL: test_call_fastcc:
68 ; RV32I: # BB#0:
69 ; RV32I-NEXT: sw ra, 12(s0)
70 ; RV32I-NEXT: sw s1, 8(s0)
71 ; RV32I-NEXT: addi s1, a0, 0
72 ; RV32I-NEXT: lui a0, %hi(fastcc_function)
73 ; RV32I-NEXT: addi a2, a0, %lo(fastcc_function)
74 ; RV32I-NEXT: addi a0, s1, 0
75 ; RV32I-NEXT: jalr ra, a2, 0
76 ; RV32I-NEXT: addi a0, s1, 0
77 ; RV32I-NEXT: lw s1, 8(s0)
78 ; RV32I-NEXT: lw ra, 12(s0)
79 ; RV32I-NEXT: jalr zero, ra, 0
80 %1 = call fastcc i32 @fastcc_function(i32 %a, i32 %b)
81 ret i32 %a
82 }