llvm.org GIT mirror llvm / 21ae2e7
[RISCV] Codegen support for memory operations This required the implementation of RISCVTargetInstrInfo::copyPhysReg. Support for lowering global addresses follow in the next patch. Differential Revision: https://reviews.llvm.org/D29934 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@317685 91177308-0d34-0410-b5e6-96231b3b80d8 Alex Bradbury 1 year, 11 months ago
6 changed file(s) with 240 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
4747 computeRegisterProperties(STI.getRegisterInfo());
4848
4949 setStackPointerRegisterToSaveRestore(RISCV::X2);
50
51 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
52 setLoadExtAction(N, XLenVT, MVT::i1, Promote);
5053
5154 // TODO: add all necessary setOperationAction calls.
5255
2828 using namespace llvm;
2929
3030 RISCVInstrInfo::RISCVInstrInfo() : RISCVGenInstrInfo() {}
31
32 void RISCVInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
33 MachineBasicBlock::iterator MBBI,
34 const DebugLoc &DL, unsigned DstReg,
35 unsigned SrcReg, bool KillSrc) const {
36 assert(RISCV::GPRRegClass.contains(DstReg, SrcReg) &&
37 "Impossible reg-to-reg copy");
38
39 BuildMI(MBB, MBBI, DL, get(RISCV::ADDI), DstReg)
40 .addReg(SrcReg, getKillRegState(KillSrc))
41 .addImm(0);
42 }
2525
2626 public:
2727 RISCVInstrInfo();
28
29 void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
30 const DebugLoc &DL, unsigned DstReg, unsigned SrcReg,
31 bool KillSrc) const override;
2832 };
2933 }
3034
310310 let isBarrier = 1, isReturn = 1, isTerminator = 1 in
311311 def PseudoRET : Pseudo<(outs), (ins), [(RetFlag)]>,
312312 PseudoInstExpansion<(JALR X0, X1, 0)>;
313
314 /// Loads
315
316 multiclass LdPat {
317 def : Pat<(LoadOp GPR:$rs1), (Inst GPR:$rs1, 0)>;
318 def : Pat<(LoadOp (add GPR:$rs1, simm12:$imm12)),
319 (Inst GPR:$rs1, simm12:$imm12)>;
320 }
321
322 defm : LdPat;
323 defm : LdPat;
324 defm : LdPat;
325 defm : LdPat;
326 defm : LdPat;
327 defm : LdPat;
328 defm : LdPat;
329
330 /// Stores
331
332 multiclass StPat {
333 def : Pat<(StoreOp GPR:$rs2, GPR:$rs1), (Inst GPR:$rs2, GPR:$rs1, 0)>;
334 def : Pat<(StoreOp GPR:$rs2, (add GPR:$rs1, simm12:$imm12)),
335 (Inst GPR:$rs2, GPR:$rs1, simm12:$imm12)>;
336 }
337
338 defm : StPat;
339 defm : StPat;
340 defm : StPat;
0 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
1 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
2 ; RUN: | FileCheck %s -check-prefix=RV32I
3
4 ; Check indexed and unindexed, sext, zext and anyext loads
5
6 define i32 @lb(i8 *%a) nounwind {
7 ; RV32I-LABEL: lb:
8 ; RV32I: # BB#0:
9 ; RV32I-NEXT: lb a1, 0(a0)
10 ; RV32I-NEXT: lb a0, 1(a0)
11 ; RV32I-NEXT: jalr zero, ra, 0
12 %1 = getelementptr i8, i8* %a, i32 1
13 %2 = load i8, i8* %1
14 %3 = sext i8 %2 to i32
15 ; the unused load will produce an anyext for selection
16 %4 = load volatile i8, i8* %a
17 ret i32 %3
18 }
19
20 define i32 @lh(i16 *%a) nounwind {
21 ; RV32I-LABEL: lh:
22 ; RV32I: # BB#0:
23 ; RV32I-NEXT: lh a1, 0(a0)
24 ; RV32I-NEXT: lh a0, 4(a0)
25 ; RV32I-NEXT: jalr zero, ra, 0
26 %1 = getelementptr i16, i16* %a, i32 2
27 %2 = load i16, i16* %1
28 %3 = sext i16 %2 to i32
29 ; the unused load will produce an anyext for selection
30 %4 = load volatile i16, i16* %a
31 ret i32 %3
32 }
33
34 define i32 @lw(i32 *%a) nounwind {
35 ; RV32I-LABEL: lw:
36 ; RV32I: # BB#0:
37 ; RV32I-NEXT: lw a1, 0(a0)
38 ; RV32I-NEXT: lw a0, 12(a0)
39 ; RV32I-NEXT: jalr zero, ra, 0
40 %1 = getelementptr i32, i32* %a, i32 3
41 %2 = load i32, i32* %1
42 %3 = load volatile i32, i32* %a
43 ret i32 %2
44 }
45
46 define i32 @lbu(i8 *%a) nounwind {
47 ; RV32I-LABEL: lbu:
48 ; RV32I: # BB#0:
49 ; RV32I-NEXT: lbu a1, 0(a0)
50 ; RV32I-NEXT: lbu a0, 4(a0)
51 ; RV32I-NEXT: add a0, a0, a1
52 ; RV32I-NEXT: jalr zero, ra, 0
53 %1 = getelementptr i8, i8* %a, i32 4
54 %2 = load i8, i8* %1
55 %3 = zext i8 %2 to i32
56 %4 = load volatile i8, i8* %a
57 %5 = zext i8 %4 to i32
58 %6 = add i32 %3, %5
59 ret i32 %6
60 }
61
62 define i32 @lhu(i16 *%a) nounwind {
63 ; RV32I-LABEL: lhu:
64 ; RV32I: # BB#0:
65 ; RV32I-NEXT: lhu a1, 0(a0)
66 ; RV32I-NEXT: lhu a0, 10(a0)
67 ; RV32I-NEXT: add a0, a0, a1
68 ; RV32I-NEXT: jalr zero, ra, 0
69 %1 = getelementptr i16, i16* %a, i32 5
70 %2 = load i16, i16* %1
71 %3 = zext i16 %2 to i32
72 %4 = load volatile i16, i16* %a
73 %5 = zext i16 %4 to i32
74 %6 = add i32 %3, %5
75 ret i32 %6
76 }
77
78 ; Check indexed and unindexed stores
79
80 define void @sb(i8 *%a, i8 %b) nounwind {
81 ; RV32I-LABEL: sb:
82 ; RV32I: # BB#0:
83 ; RV32I-NEXT: sb a1, 6(a0)
84 ; RV32I-NEXT: sb a1, 0(a0)
85 ; RV32I-NEXT: jalr zero, ra, 0
86 store i8 %b, i8* %a
87 %1 = getelementptr i8, i8* %a, i32 6
88 store i8 %b, i8* %1
89 ret void
90 }
91
92 define void @sh(i16 *%a, i16 %b) nounwind {
93 ; RV32I-LABEL: sh:
94 ; RV32I: # BB#0:
95 ; RV32I-NEXT: sh a1, 14(a0)
96 ; RV32I-NEXT: sh a1, 0(a0)
97 ; RV32I-NEXT: jalr zero, ra, 0
98 store i16 %b, i16* %a
99 %1 = getelementptr i16, i16* %a, i32 7
100 store i16 %b, i16* %1
101 ret void
102 }
103
104 define void @sw(i32 *%a, i32 %b) nounwind {
105 ; RV32I-LABEL: sw:
106 ; RV32I: # BB#0:
107 ; RV32I-NEXT: sw a1, 32(a0)
108 ; RV32I-NEXT: sw a1, 0(a0)
109 ; RV32I-NEXT: jalr zero, ra, 0
110 store i32 %b, i32* %a
111 %1 = getelementptr i32, i32* %a, i32 8
112 store i32 %b, i32* %1
113 ret void
114 }
115
116 ; Check load and store to an i1 location
117 define i32 @load_sext_zext_anyext_i1(i1 *%a) nounwind {
118 ; RV32I-LABEL: load_sext_zext_anyext_i1:
119 ; RV32I: # BB#0:
120 ; RV32I-NEXT: lb a1, 0(a0)
121 ; RV32I-NEXT: lbu a1, 1(a0)
122 ; RV32I-NEXT: lbu a0, 2(a0)
123 ; RV32I-NEXT: sub a0, a0, a1
124 ; RV32I-NEXT: jalr zero, ra, 0
125 ; sextload i1
126 %1 = getelementptr i1, i1* %a, i32 1
127 %2 = load i1, i1* %1
128 %3 = sext i1 %2 to i32
129 ; zextload i1
130 %4 = getelementptr i1, i1* %a, i32 2
131 %5 = load i1, i1* %4
132 %6 = zext i1 %5 to i32
133 %7 = add i32 %3, %6
134 ; extload i1 (anyext). Produced as the load is unused.
135 %8 = load volatile i1, i1* %a
136 ret i32 %7
137 }
138
139 define i16 @load_sext_zext_anyext_i1_i16(i1 *%a) nounwind {
140 ; RV32I-LABEL: load_sext_zext_anyext_i1_i16:
141 ; RV32I: # BB#0:
142 ; RV32I-NEXT: lb a1, 0(a0)
143 ; RV32I-NEXT: lbu a1, 1(a0)
144 ; RV32I-NEXT: lbu a0, 2(a0)
145 ; RV32I-NEXT: sub a0, a0, a1
146 ; RV32I-NEXT: jalr zero, ra, 0
147 ; sextload i1
148 %1 = getelementptr i1, i1* %a, i32 1
149 %2 = load i1, i1* %1
150 %3 = sext i1 %2 to i16
151 ; zextload i1
152 %4 = getelementptr i1, i1* %a, i32 2
153 %5 = load i1, i1* %4
154 %6 = zext i1 %5 to i16
155 %7 = add i16 %3, %6
156 ; extload i1 (anyext). Produced as the load is unused.
157 %8 = load volatile i1, i1* %a
158 ret i16 %7
159 }
160
161 ; Ensure that 1 is added to the high 20 bits if bit 11 of the low part is 1
162 define i32 @lw_sw_constant(i32 %a) nounwind {
163 ; TODO: the addi should be folded in to the lw/sw
164 ; RV32I-LABEL: lw_sw_constant:
165 ; RV32I: # BB#0:
166 ; RV32I-NEXT: lui a1, 912092
167 ; RV32I-NEXT: addi a2, a1, -273
168 ; RV32I-NEXT: lw a1, 0(a2)
169 ; RV32I-NEXT: sw a0, 0(a2)
170 ; RV32I-NEXT: addi a0, a1, 0
171 ; RV32I-NEXT: jalr zero, ra, 0
172 %1 = inttoptr i32 3735928559 to i32*
173 %2 = load volatile i32, i32* %1
174 store i32 %a, i32* %1
175 ret i32 %2
176 }
0 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
1 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
2 ; RUN: | FileCheck %s -check-prefix=RV32I
3
4 ; Check load/store operations on values wider than what is natively supported
5
6 define i64 @load_i64(i64 *%a) nounwind {
7 ; RV32I-LABEL: load_i64:
8 ; RV32I: # BB#0:
9 ; RV32I-NEXT: lw a2, 0(a0)
10 ; RV32I-NEXT: lw a1, 4(a0)
11 ; RV32I-NEXT: addi a0, a2, 0
12 ; RV32I-NEXT: jalr zero, ra, 0
13 %1 = load i64, i64* %a
14 ret i64 %1
15 }