llvm.org GIT mirror llvm / a8e2989
ARM backend contribution from Apple. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@33353 91177308-0d34-0410-b5e6-96231b3b80d8 Evan Cheng 13 years ago
32 changed file(s) with 8883 addition(s) and 2156 deletion(s). Raw diff Collapse all Expand all
1919 #include
2020
2121 namespace llvm {
22 // Enums corresponding to ARM condition codes
23 namespace ARMCC {
24 enum CondCodes {
25 EQ,
26 NE,
27 CS,
28 CC,
29 MI,
30 PL,
31 VS,
32 VC,
33 HI,
34 LS,
35 GE,
36 LT,
37 GT,
38 LE,
39 AL
40 };
22
23 class ARMTargetMachine;
24 class FunctionPass;
25
26 // Enums corresponding to ARM condition codes
27 namespace ARMCC {
28 enum CondCodes {
29 EQ,
30 NE,
31 HS,
32 LO,
33 MI,
34 PL,
35 VS,
36 VC,
37 HI,
38 LS,
39 GE,
40 LT,
41 GT,
42 LE,
43 AL
44 };
45
46 inline static CondCodes getOppositeCondition(CondCodes CC){
47 switch (CC) {
48 default: assert(0 && "Unknown condition code");
49 case EQ: return NE;
50 case NE: return EQ;
51 case HS: return LO;
52 case LO: return HS;
53 case MI: return PL;
54 case PL: return MI;
55 case VS: return VC;
56 case VC: return VS;
57 case HI: return LS;
58 case LS: return HI;
59 case GE: return LT;
60 case LT: return GE;
61 case GT: return LE;
62 case LE: return GT;
63 }
4164 }
65 }
4266
43 namespace ARMShift {
44 enum ShiftTypes {
45 LSL,
46 LSR,
47 ASR,
48 ROR,
49 RRX
50 };
67 inline static const char *ARMCondCodeToString(ARMCC::CondCodes CC) {
68 switch (CC) {
69 default: assert(0 && "Unknown condition code");
70 case ARMCC::EQ: return "eq";
71 case ARMCC::NE: return "ne";
72 case ARMCC::HS: return "hs";
73 case ARMCC::LO: return "lo";
74 case ARMCC::MI: return "mi";
75 case ARMCC::PL: return "pl";
76 case ARMCC::VS: return "vs";
77 case ARMCC::VC: return "vc";
78 case ARMCC::HI: return "hi";
79 case ARMCC::LS: return "ls";
80 case ARMCC::GE: return "ge";
81 case ARMCC::LT: return "lt";
82 case ARMCC::GT: return "gt";
83 case ARMCC::LE: return "le";
84 case ARMCC::AL: return "al";
5185 }
86 }
5287
53 class FunctionPass;
54 class TargetMachine;
88 FunctionPass *createARMISelDag(ARMTargetMachine &TM);
89 FunctionPass *createARMCodePrinterPass(std::ostream &O, ARMTargetMachine &TM);
90 FunctionPass *createARMLoadStoreOptimizationPass();
91 FunctionPass *createARMConstantIslandPass();
5592
56 FunctionPass *createARMISelDag(TargetMachine &TM);
57 FunctionPass *createARMCodePrinterPass(std::ostream &OS, TargetMachine &TM);
58 FunctionPass *createARMFixMulPass();
5993 } // end namespace llvm;
6094
6195 // Defines symbolic names for ARM registers. This defines a mapping from
1717 include "../Target.td"
1818
1919 //===----------------------------------------------------------------------===//
20 // ARM Subtarget features.
21 //
22
23 def ArchV4T : SubtargetFeature<"v4t", "ARMArchVersion", "V4T",
24 "ARM v4T">;
25 def ArchV5T : SubtargetFeature<"v5t", "ARMArchVersion", "V5T",
26 "ARM v5T">;
27 def ArchV5TE : SubtargetFeature<"v5te", "ARMArchVersion", "V5TE",
28 "ARM v5TE, v5TEj, v5TExp">;
29 def ArchV6 : SubtargetFeature<"v6", "ARMArchVersion", "V6",
30 "ARM v6">;
31 def FeatureVFP2 : SubtargetFeature<"vfp2", "HasVFP2", "true",
32 "Enable VFP2 instructions ">;
33
34 //===----------------------------------------------------------------------===//
35 // ARM Processors supported.
36 //
37
38 class Proc Features>
39 : Processor;
40
41 // V4 Processors.
42 def : Proc<"generic", []>;
43 def : Proc<"arm8", []>;
44 def : Proc<"arm810", []>;
45 def : Proc<"strongarm", []>;
46 def : Proc<"strongarm110", []>;
47 def : Proc<"strongarm1100", []>;
48 def : Proc<"strongarm1110", []>;
49
50 // V4T Processors.
51 def : Proc<"arm7tdmi", [ArchV4T]>;
52 def : Proc<"arm7tdmi-s", [ArchV4T]>;
53 def : Proc<"arm710t", [ArchV4T]>;
54 def : Proc<"arm720t", [ArchV4T]>;
55 def : Proc<"arm9", [ArchV4T]>;
56 def : Proc<"arm9tdmi", [ArchV4T]>;
57 def : Proc<"arm920", [ArchV4T]>;
58 def : Proc<"arm920t", [ArchV4T]>;
59 def : Proc<"arm922t", [ArchV4T]>;
60 def : Proc<"arm940t", [ArchV4T]>;
61 def : Proc<"ep9312", [ArchV4T]>;
62
63 // V5T Processors.
64 def : Proc<"arm10tdmi", [ArchV5T]>;
65 def : Proc<"arm1020t", [ArchV5T]>;
66
67 // V5TE Processors.
68 def : Proc<"arm9e", [ArchV5TE]>;
69 def : Proc<"arm946e-s", [ArchV5TE]>;
70 def : Proc<"arm966e-s", [ArchV5TE]>;
71 def : Proc<"arm968e-s", [ArchV5TE]>;
72 def : Proc<"arm10e", [ArchV5TE]>;
73 def : Proc<"arm1020e", [ArchV5TE]>;
74 def : Proc<"arm1022e", [ArchV5TE]>;
75 def : Proc<"xscale", [ArchV5TE]>;
76 def : Proc<"iwmmxt", [ArchV5TE]>;
77
78 // V6 Processors.
79 def : Proc<"arm1136j-s", [ArchV6]>;
80 def : Proc<"arm1136jf-s", [ArchV6, FeatureVFP2]>;
81 def : Proc<"arm1176jz-s", [ArchV6]>;
82 def : Proc<"arm1176jzf-s", [ArchV6, FeatureVFP2]>;
83 def : Proc<"mpcorenovfp", [ArchV6]>;
84 def : Proc<"mpcore", [ArchV6, FeatureVFP2]>;
85
86 //===----------------------------------------------------------------------===//
2087 // Register File Description
2188 //===----------------------------------------------------------------------===//
2289
3097
3198 def ARMInstrInfo : InstrInfo {
3299 // Define how we want to layout our target-specific information field.
33 let TSFlagsFields = [];
34 let TSFlagsShifts = [];
100 let TSFlagsFields = ["AddrModeBits",
101 "SizeFlag",
102 "IndexModeBits",
103 "Opcode"];
104 let TSFlagsShifts = [0,
105 4,
106 7,
107 9];
35108 }
36109
37110 //===----------------------------------------------------------------------===//
0 //===- ARMAddressingModes.h - ARM Addressing Modes --------------*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file was developed by Chris Lattner and is distributed under the
5 // University of Illinois Open Source License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the ARM addressing mode implementation stuff.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #ifndef LLVM_TARGET_ARM_ARMADDRESSINGMODES_H
14 #define LLVM_TARGET_ARM_ARMADDRESSINGMODES_H
15
16 #include "llvm/CodeGen/SelectionDAGNodes.h"
17 #include "llvm/Support/MathExtras.h"
18 #include
19
20 namespace llvm {
21
22 /// ARM_AM - ARM Addressing Mode Stuff
23 namespace ARM_AM {
24 enum ShiftOpc {
25 no_shift = 0,
26 asr,
27 lsl,
28 lsr,
29 ror,
30 rrx
31 };
32
33 enum AddrOpc {
34 add = '+', sub = '-'
35 };
36
37 static inline const char *getShiftOpcStr(ShiftOpc Op) {
38 switch (Op) {
39 default: assert(0 && "Unknown shift opc!");
40 case ARM_AM::asr: return "asr";
41 case ARM_AM::lsl: return "lsl";
42 case ARM_AM::lsr: return "lsr";
43 case ARM_AM::ror: return "ror";
44 case ARM_AM::rrx: return "rrx";
45 }
46 }
47
48 static inline ShiftOpc getShiftOpcForNode(SDOperand N) {
49 switch (N.getOpcode()) {
50 default: return ARM_AM::no_shift;
51 case ISD::SHL: return ARM_AM::lsl;
52 case ISD::SRL: return ARM_AM::lsr;
53 case ISD::SRA: return ARM_AM::asr;
54 case ISD::ROTR: return ARM_AM::ror;
55 //case ISD::ROTL: // Only if imm -> turn into ROTR.
56 // Can't handle RRX here, because it would require folding a flag into
57 // the addressing mode. :( This causes us to miss certain things.
58 //case ARMISD::RRX: return ARM_AM::rrx;
59 }
60 }
61
62 enum AMSubMode {
63 bad_am_submode = 0,
64 ia,
65 ib,
66 da,
67 db
68 };
69
70 static inline const char *getAMSubModeStr(AMSubMode Mode) {
71 switch (Mode) {
72 default: assert(0 && "Unknown addressing sub-mode!");
73 case ARM_AM::ia: return "ia";
74 case ARM_AM::ib: return "ib";
75 case ARM_AM::da: return "da";
76 case ARM_AM::db: return "db";
77 }
78 }
79
80 static inline const char *getAMSubModeAltStr(AMSubMode Mode, bool isLD) {
81 switch (Mode) {
82 default: assert(0 && "Unknown addressing sub-mode!");
83 case ARM_AM::ia: return isLD ? "fd" : "ea";
84 case ARM_AM::ib: return isLD ? "ed" : "fa";
85 case ARM_AM::da: return isLD ? "fa" : "ed";
86 case ARM_AM::db: return isLD ? "ea" : "fd";
87 }
88 }
89
90 /// rotr32 - Rotate a 32-bit unsigned value right by a specified # bits.
91 ///
92 static inline unsigned rotr32(unsigned Val, unsigned Amt) {
93 assert(Amt < 32 && "Invalid rotate amount");
94 return (Val >> Amt) | (Val << ((32-Amt)&31));
95 }
96
97 /// rotl32 - Rotate a 32-bit unsigned value left by a specified # bits.
98 ///
99 static inline unsigned rotl32(unsigned Val, unsigned Amt) {
100 assert(Amt < 32 && "Invalid rotate amount");
101 return (Val << Amt) | (Val >> ((32-Amt)&31));
102 }
103
104 //===--------------------------------------------------------------------===//
105 // Addressing Mode #1: shift_operand with registers
106 //===--------------------------------------------------------------------===//
107 //
108 // This 'addressing mode' is used for arithmetic instructions. It can
109 // represent things like:
110 // reg
111 // reg [asr|lsl|lsr|ror|rrx] reg
112 // reg [asr|lsl|lsr|ror|rrx] imm
113 //
114 // This is stored three operands [rega, regb, opc]. The first is the base
115 // reg, the second is the shift amount (or reg0 if not present or imm). The
116 // third operand encodes the shift opcode and the imm if a reg isn't present.
117 //
118 static inline unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm) {
119 return ShOp | (Imm << 3);
120 }
121 static inline unsigned getSORegOffset(unsigned Op) {
122 return Op >> 3;
123 }
124 static inline ShiftOpc getSORegShOp(unsigned Op) {
125 return (ShiftOpc)(Op & 7);
126 }
127
128 /// getSOImmValImm - Given an encoded imm field for the reg/imm form, return
129 /// the 8-bit imm value.
130 static inline unsigned getSOImmValImm(unsigned Imm) {
131 return Imm & 0xFF;
132 }
133 /// getSOImmValRotate - Given an encoded imm field for the reg/imm form, return
134 /// the rotate amount.
135 static inline unsigned getSOImmValRot(unsigned Imm) {
136 return (Imm >> 8) * 2;
137 }
138
139 /// getSOImmValRotate - Try to handle Imm with an immediate shifter operand,
140 /// computing the rotate amount to use. If this immediate value cannot be
141 /// handled with a single shifter-op, determine a good rotate amount that will
142 /// take a maximal chunk of bits out of the immediate.
143 static inline unsigned getSOImmValRotate(unsigned Imm) {
144 // 8-bit (or less) immediates are trivially shifter_operands with a rotate
145 // of zero.
146 if ((Imm & ~255U) == 0) return 0;
147
148 // Use CTZ to compute the rotate amount.
149 unsigned TZ = CountTrailingZeros_32(Imm);
150
151 // Rotate amount must be even. Something like 0x200 must be rotated 8 bits,
152 // not 9.
153 unsigned RotAmt = TZ & ~1;
154
155 // If we can handle this spread, return it.
156 if ((rotr32(Imm, RotAmt) & ~255U) == 0)
157 return (32-RotAmt)&31; // HW rotates right, not left.
158
159 // For values like 0xF000000F, we should skip the first run of ones, then
160 // retry the hunt.
161 if (Imm & 1) {
162 unsigned TrailingOnes = CountTrailingZeros_32(~Imm);
163 if (TrailingOnes != 32) { // Avoid overflow on 0xFFFFFFFF
164 // Restart the search for a high-order bit after the initial seconds of
165 // ones.
166 unsigned TZ2 = CountTrailingZeros_32(Imm & ~((1 << TrailingOnes)-1));
167
168 // Rotate amount must be even.
169 unsigned RotAmt2 = TZ2 & ~1;
170
171 // If this fits, use it.
172 if (RotAmt2 != 32 && (rotr32(Imm, RotAmt2) & ~255U) == 0)
173 return (32-RotAmt2)&31; // HW rotates right, not left.
174 }
175 }
176
177 // Otherwise, we have no way to cover this span of bits with a single
178 // shifter_op immediate. Return a chunk of bits that will be useful to
179 // handle.
180 return (32-RotAmt)&31; // HW rotates right, not left.
181 }
182
183 /// getSOImmVal - Given a 32-bit immediate, if it is something that can fit
184 /// into an shifter_operand immediate operand, return the 12-bit encoding for
185 /// it. If not, return -1.
186 static inline int getSOImmVal(unsigned Arg) {
187 // 8-bit (or less) immediates are trivially shifter_operands with a rotate
188 // of zero.
189 if ((Arg & ~255U) == 0) return Arg;
190
191 unsigned RotAmt = getSOImmValRotate(Arg);
192
193 // If this cannot be handled with a single shifter_op, bail out.
194 if (rotr32(~255U, RotAmt) & Arg)
195 return -1;
196
197 // Encode this correctly.
198 return rotl32(Arg, RotAmt) | ((RotAmt>>1) << 8);
199 }
200
201 /// isSOImmTwoPartVal - Return true if the specified value can be obtained by
202 /// or'ing together two SOImmVal's.
203 static inline bool isSOImmTwoPartVal(unsigned V) {
204 // If this can be handled with a single shifter_op, bail out.
205 V = rotr32(~255U, getSOImmValRotate(V)) & V;
206 if (V == 0)
207 return false;
208
209 // If this can be handled with two shifter_op's, accept.
210 V = rotr32(~255U, getSOImmValRotate(V)) & V;
211 return V == 0;
212 }
213
214 /// getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal,
215 /// return the first chunk of it.
216 static inline unsigned getSOImmTwoPartFirst(unsigned V) {
217 return rotr32(255U, getSOImmValRotate(V)) & V;
218 }
219
220 /// getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal,
221 /// return the second chunk of it.
222 static inline unsigned getSOImmTwoPartSecond(unsigned V) {
223 // Mask out the first hunk.
224 V = rotr32(~255U, getSOImmValRotate(V)) & V;
225
226 // Take what's left.
227 assert(V == (rotr32(255U, getSOImmValRotate(V)) & V));
228 return V;
229 }
230
231 /// getThumbImmValShift - Try to handle Imm with a 8-bit immediate followed
232 /// by a left shift. Returns the shift amount to use.
233 static inline unsigned getThumbImmValShift(unsigned Imm) {
234 // 8-bit (or less) immediates are trivially immediate operand with a shift
235 // of zero.
236 if ((Imm & ~255U) == 0) return 0;
237
238 // Use CTZ to compute the shift amount.
239 return CountTrailingZeros_32(Imm);
240 }
241
242 /// isThumbImmShiftedVal - Return true if the specified value can be obtained
243 /// by left shifting a 8-bit immediate.
244 static inline bool isThumbImmShiftedVal(unsigned V) {
245 // If this can be handled with
246 V = (~255U << getThumbImmValShift(V)) & V;
247 return V == 0;
248 }
249
250 /// getThumbImmNonShiftedVal - If V is a value that satisfies
251 /// isThumbImmShiftedVal, return the non-shiftd value.
252 static inline unsigned getThumbImmNonShiftedVal(unsigned V) {
253 return V >> getThumbImmValShift(V);
254 }
255
256 //===--------------------------------------------------------------------===//
257 // Addressing Mode #2
258 //===--------------------------------------------------------------------===//
259 //
260 // This is used for most simple load/store instructions.
261 //
262 // addrmode2 := reg +/- reg shop imm
263 // addrmode2 := reg +/- imm12
264 //
265 // The first operand is always a Reg. The second operand is a reg if in
266 // reg/reg form, otherwise it's reg#0. The third field encodes the operation
267 // in bit 12, the immediate in bits 0-11, and the shift op in 13-15.
268 //
269 // If this addressing mode is a frame index (before prolog/epilog insertion
270 // and code rewriting), this operand will have the form: FI#, reg0,
271 // with no shift amount for the frame offset.
272 //
273 static inline unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO) {
274 assert(Imm12 < (1 << 12) && "Imm too large!");
275 bool isSub = Opc == sub;
276 return Imm12 | ((int)isSub << 12) | (SO << 13);
277 }
278 static inline unsigned getAM2Offset(unsigned AM2Opc) {
279 return AM2Opc & ((1 << 12)-1);
280 }
281 static inline AddrOpc getAM2Op(unsigned AM2Opc) {
282 return ((AM2Opc >> 12) & 1) ? sub : add;
283 }
284 static inline ShiftOpc getAM2ShiftOpc(unsigned AM2Opc) {
285 return (ShiftOpc)(AM2Opc >> 13);
286 }
287
288
289 //===--------------------------------------------------------------------===//
290 // Addressing Mode #3
291 //===--------------------------------------------------------------------===//
292 //
293 // This is used for sign-extending loads, and load/store-pair instructions.
294 //
295 // addrmode3 := reg +/- reg
296 // addrmode3 := reg +/- imm8
297 //
298 // The first operand is always a Reg. The second operand is a reg if in
299 // reg/reg form, otherwise it's reg#0. The third field encodes the operation
300 // in bit 8, the immediate in bits 0-7.
301
302 /// getAM3Opc - This function encodes the addrmode3 opc field.
303 static inline unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset) {
304 bool isSub = Opc == sub;
305 return ((int)isSub << 8) | Offset;
306 }
307 static inline unsigned char getAM3Offset(unsigned AM3Opc) {
308 return AM3Opc & 0xFF;
309 }
310 static inline AddrOpc getAM3Op(unsigned AM3Opc) {
311 return ((AM3Opc >> 8) & 1) ? sub : add;
312 }
313
314 //===--------------------------------------------------------------------===//
315 // Addressing Mode #4
316 //===--------------------------------------------------------------------===//
317 //
318 // This is used for load / store multiple instructions.
319 //
320 // addrmode4 := reg,
321 //
322 // The four modes are:
323 // IA - Increment after
324 // IB - Increment before
325 // DA - Decrement after
326 // DB - Decrement before
327 //
328 // If the 4th bit (writeback)is set, then the base register is updated after
329 // the memory transfer.
330
331 static inline AMSubMode getAM4SubMode(unsigned Mode) {
332 return (AMSubMode)(Mode & 0x7);
333 }
334
335 static inline unsigned getAM4ModeImm(AMSubMode SubMode, bool WB = false) {
336 return (int)SubMode | ((int)WB << 3);
337 }
338
339 static inline bool getAM4WBFlag(unsigned Mode) {
340 return (Mode >> 3) & 1;
341 }
342
343 //===--------------------------------------------------------------------===//
344 // Addressing Mode #5
345 //===--------------------------------------------------------------------===//
346 //
347 // This is used for coprocessor instructions, such as FP load/stores.
348 //
349 // addrmode5 := reg +/- imm8*4
350 //
351 // The first operand is always a Reg. The third field encodes the operation
352 // in bit 8, the immediate in bits 0-7.
353 //
354 // This can also be used for FP load/store multiple ops. The third field encodes
355 // writeback mode in bit 8, the number of registers (or 2 times the number of
356 // registers for DPR ops) in bits 0-7. In addition, bit 9-11 encodes one of the
357 // following two sub-modes:
358 //
359 // IA - Increment after
360 // DB - Decrement before
361
362 /// getAM5Opc - This function encodes the addrmode5 opc field.
363 static inline unsigned getAM5Opc(AddrOpc Opc, unsigned char Offset) {
364 bool isSub = Opc == sub;
365 return ((int)isSub << 8) | Offset;
366 }
367 static inline unsigned char getAM5Offset(unsigned AM5Opc) {
368 return AM5Opc & 0xFF;
369 }
370 static inline AddrOpc getAM5Op(unsigned AM5Opc) {
371 return ((AM5Opc >> 8) & 1) ? sub : add;
372 }
373
374 /// getAM5Opc - This function encodes the addrmode5 opc field for FLDM and
375 /// FSTM instructions.
376 static inline unsigned getAM5Opc(AMSubMode SubMode, bool WB,
377 unsigned char Offset) {
378 assert((SubMode == ia || SubMode == db) &&
379 "Illegal addressing mode 5 sub-mode!");
380 return ((int)SubMode << 9) | ((int)WB << 8) | Offset;
381 }
382 static inline AMSubMode getAM5SubMode(unsigned AM5Opc) {
383 return (AMSubMode)((AM5Opc >> 9) & 0x7);
384 }
385 static inline bool getAM5WBFlag(unsigned AM5Opc) {
386 return ((AM5Opc >> 8) & 1);
387 }
388
389 } // end namespace ARM_AM
390 } // end namespace llvm
391
392 #endif
393
1414
1515 #define DEBUG_TYPE "asm-printer"
1616 #include "ARM.h"
17 #include "ARMInstrInfo.h"
17 #include "ARMTargetMachine.h"
18 #include "ARMAddressingModes.h"
19 #include "ARMConstantPoolValue.h"
20 #include "ARMMachineFunctionInfo.h"
1821 #include "llvm/Constants.h"
19 #include "llvm/DerivedTypes.h"
2022 #include "llvm/Module.h"
2123 #include "llvm/CodeGen/AsmPrinter.h"
24 #include "llvm/CodeGen/DwarfWriter.h"
25 #include "llvm/CodeGen/MachineDebugInfo.h"
2226 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineConstantPool.h"
24 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineJumpTableInfo.h"
2528 #include "llvm/Target/TargetAsmInfo.h"
2629 #include "llvm/Target/TargetData.h"
2730 #include "llvm/Target/TargetMachine.h"
28 #include "llvm/Support/Mangler.h"
2931 #include "llvm/ADT/Statistic.h"
3032 #include "llvm/ADT/StringExtras.h"
31 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Compiler.h"
34 #include "llvm/Support/Mangler.h"
3235 #include "llvm/Support/MathExtras.h"
3336 #include
37 #include
38 #include
3439 using namespace llvm;
3540
3641 STATISTIC(EmittedInsts, "Number of machine instrs printed");
3742
3843 namespace {
39 static const char *ARMCondCodeToString(ARMCC::CondCodes CC) {
40 switch (CC) {
41 default: assert(0 && "Unknown condition code");
42 case ARMCC::EQ: return "eq";
43 case ARMCC::NE: return "ne";
44 case ARMCC::CS: return "cs";
45 case ARMCC::CC: return "cc";
46 case ARMCC::MI: return "mi";
47 case ARMCC::PL: return "pl";
48 case ARMCC::VS: return "vs";
49 case ARMCC::VC: return "vc";
50 case ARMCC::HI: return "hi";
51 case ARMCC::LS: return "ls";
52 case ARMCC::GE: return "ge";
53 case ARMCC::LT: return "lt";
54 case ARMCC::GT: return "gt";
55 case ARMCC::LE: return "le";
56 case ARMCC::AL: return "al";
57 }
58 }
59
6044 struct VISIBILITY_HIDDEN ARMAsmPrinter : public AsmPrinter {
6145 ARMAsmPrinter(std::ostream &O, TargetMachine &TM, const TargetAsmInfo *T)
62 : AsmPrinter(O, TM, T) {
63 }
46 : AsmPrinter(O, TM, T), DW(O, this, T), AFI(NULL), InCPMode(false) {
47 Subtarget = &TM.getSubtarget();
48 }
49
50 DwarfWriter DW;
51
52 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
53 /// make the right decision when printing asm code for different targets.
54 const ARMSubtarget *Subtarget;
55
56 /// AFI - Keep a pointer to ARMFunctionInfo for the current
57 /// MachineFunction
58 ARMFunctionInfo *AFI;
6459
6560 /// We name each basic block in a Function with a unique number, so
6661 /// that we can consistently refer to them later. This is cleared
6964 typedef std::map ValueMapTy;
7065 ValueMapTy NumberForBB;
7166
67 /// Keeps the set of GlobalValues that require non-lazy-pointers for
68 /// indirect access.
69 std::set GVNonLazyPtrs;
70
71 /// Keeps the set of external function GlobalAddresses that the asm
72 /// printer should generate stubs for.
73 std::set FnStubs;
74
75 /// True if asm printer is printing a series of CONSTPOOL_ENTRY.
76 bool InCPMode;
77
7278 virtual const char *getPassName() const {
7379 return "ARM Assembly Printer";
7480 }
7581
76 void printAddrMode1(const MachineInstr *MI, int opNum);
77 void printAddrMode2(const MachineInstr *MI, int opNum);
78 void printAddrMode5(const MachineInstr *MI, int opNum);
79 void printOperand(const MachineInstr *MI, int opNum);
80 void printMemOperand(const MachineInstr *MI, int opNum,
81 const char *Modifier = 0);
82 void printOperand(const MachineInstr *MI, int opNum,
83 const char *Modifier = 0);
84 void printSOImmOperand(const MachineInstr *MI, int opNum);
85 void printSORegOperand(const MachineInstr *MI, int opNum);
86 void printAddrMode2Operand(const MachineInstr *MI, int OpNo);
87 void printAddrMode2OffsetOperand(const MachineInstr *MI, int OpNo);
88 void printAddrMode3Operand(const MachineInstr *MI, int OpNo);
89 void printAddrMode3OffsetOperand(const MachineInstr *MI, int OpNo);
90 void printAddrMode4Operand(const MachineInstr *MI, int OpNo,
91 const char *Modifier = 0);
92 void printAddrMode5Operand(const MachineInstr *MI, int OpNo,
93 const char *Modifier = 0);
94 void printAddrModePCOperand(const MachineInstr *MI, int OpNo,
95 const char *Modifier = 0);
96 void printThumbAddrModeRROperand(const MachineInstr *MI, int OpNo);
97 void printThumbAddrModeRI5Operand(const MachineInstr *MI, int OpNo,
98 unsigned Scale);
99 void printThumbAddrModeRI5_1Operand(const MachineInstr *MI, int OpNo);
100 void printThumbAddrModeRI5_2Operand(const MachineInstr *MI, int OpNo);
101 void printThumbAddrModeRI5_4Operand(const MachineInstr *MI, int OpNo);
102 void printThumbAddrModeSPOperand(const MachineInstr *MI, int OpNo);
82103 void printCCOperand(const MachineInstr *MI, int opNum);
104 void printPCLabel(const MachineInstr *MI, int opNum);
105 void printRegisterList(const MachineInstr *MI, int opNum);
106 void printCPInstOperand(const MachineInstr *MI, int opNum,
107 const char *Modifier);
108 void printJTBlockOperand(const MachineInstr *MI, int opNum);
109
110 virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
111 unsigned AsmVariant, const char *ExtraCode);
83112
84113 bool printInstruction(const MachineInstr *MI); // autogenerated.
114 void printMachineInstruction(const MachineInstr *MI);
85115 bool runOnMachineFunction(MachineFunction &F);
86116 bool doInitialization(Module &M);
87117 bool doFinalization(Module &M);
118
119 virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
120 printDataDirective(MCPV->getType());
121
122 ARMConstantPoolValue *ACPV = (ARMConstantPoolValue*)MCPV;
123 std::string Name = Mang->getValueName(ACPV->getGV());
124 if (ACPV->isNonLazyPointer()) {
125 GVNonLazyPtrs.insert(Name);
126 O << TAI->getPrivateGlobalPrefix() << Name << "$non_lazy_ptr";
127 } else
128 O << Name;
129 if (ACPV->getPCAdjustment() != 0)
130 O << "-(" << TAI->getPrivateGlobalPrefix() << "PC"
131 << utostr(ACPV->getLabelId())
132 << "+" << (unsigned)ACPV->getPCAdjustment() << ")";
133 O << "\n";
134 }
135
136 void getAnalysisUsage(AnalysisUsage &AU) const {
137 AU.setPreservesAll();
138 AU.addRequired();
139 }
88140 };
89141 } // end of anonymous namespace
90142
96148 /// regardless of whether the function is in SSA form.
97149 ///
98150 FunctionPass *llvm::createARMCodePrinterPass(std::ostream &o,
99 TargetMachine &tm) {
151 ARMTargetMachine &tm) {
100152 return new ARMAsmPrinter(o, tm, tm.getTargetAsmInfo());
101153 }
102154
103 /// runOnMachineFunction - This uses the printMachineInstruction()
155 /// runOnMachineFunction - This uses the printInstruction()
104156 /// method to print assembly for each instruction.
105157 ///
106158 bool ARMAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
159 AFI = MF.getInfo();
160
161 if (Subtarget->isDarwin()) {
162 DW.SetDebugInfo(&getAnalysis());
163 }
164
107165 SetupMachineFunction(MF);
108 O << "\n\n";
109
110 // Print out constants referenced by the function
111 EmitConstantPool(MF.getConstantPool());
112
113 const std::vector
114 &CP = MF.getConstantPool()->getConstants();
115 for (unsigned i = 0, e = CP.size(); i != e; ++i) {
116 MachineConstantPoolEntry CPE = CP[i];
117 if (!CPE.isMachineConstantPoolEntry()){
118 Constant *CV = CPE.Val.ConstVal;
119 if (const GlobalValue *GV = dyn_cast(CV)) {
120 if (GV->hasExternalWeakLinkage()) {
121 ExtWeakSymbols.insert(GV);
122 }
123 }
124 }
125 }
126
127 // Print out jump tables referenced by the function
128 EmitJumpTableInfo(MF.getJumpTableInfo(), MF);
129
166 O << "\n";
167
168 // NOTE: we don't print out constant pools here, they are handled as
169 // instructions.
170
171 O << "\n";
130172 // Print out labels for the function.
131173 const Function *F = MF.getFunction();
132 SwitchToTextSection(getSectionForFunction(*F).c_str(), F);
133
134174 switch (F->getLinkage()) {
135175 default: assert(0 && "Unknown linkage type!");
136176 case Function::InternalLinkage:
177 SwitchToTextSection("\t.text", F);
137178 break;
138179 case Function::ExternalLinkage:
180 SwitchToTextSection("\t.text", F);
139181 O << "\t.globl\t" << CurrentFnName << "\n";
140182 break;
141183 case Function::WeakLinkage:
142184 case Function::LinkOnceLinkage:
143 O << TAI->getWeakRefDirective() << CurrentFnName << "\n";
185 if (Subtarget->isDarwin()) {
186 SwitchToTextSection(
187 ".section __TEXT,__textcoal_nt,coalesced,pure_instructions", F);
188 O << "\t.globl\t" << CurrentFnName << "\n";
189 O << "\t.weak_definition\t" << CurrentFnName << "\n";
190 } else {
191 O << TAI->getWeakRefDirective() << CurrentFnName << "\n";
192 }
144193 break;
145194 }
146 EmitAlignment(2, F);
195
196 if (AFI->isThumbFunction()) {
197 EmitAlignment(1, F);
198 O << "\t.code\t16\n";
199 O << "\t.thumb_func\t" << CurrentFnName << "\n";
200 InCPMode = false;
201 } else
202 EmitAlignment(2, F);
203
147204 O << CurrentFnName << ":\n";
205 if (Subtarget->isDarwin()) {
206 // Emit pre-function debug information.
207 DW.BeginFunction(&MF);
208 }
148209
149210 // Print out code for the function.
150211 for (MachineFunction::const_iterator I = MF.begin(), E = MF.end();
157218 for (MachineBasicBlock::const_iterator II = I->begin(), E = I->end();
158219 II != E; ++II) {
159220 // Print the assembly for the instruction.
160 O << "\t";
161 ++EmittedInsts;
162 printInstruction(II);
163 }
221 printMachineInstruction(II);
222 }
223 }
224
225 if (TAI->hasDotTypeDotSizeDirective())
226 O << "\t.size " << CurrentFnName << ", .-" << CurrentFnName << "\n";
227
228 if (Subtarget->isDarwin()) {
229 // Emit post-function debug information.
230 DW.EndFunction();
164231 }
165232
166233 return false;
167234 }
168235
169 void ARMAsmPrinter::printAddrMode1(const MachineInstr *MI, int opNum) {
170 const MachineOperand &Arg = MI->getOperand(opNum);
171 const MachineOperand &Shift = MI->getOperand(opNum + 1);
172 const MachineOperand &ShiftType = MI->getOperand(opNum + 2);
173
174 if(Arg.isImmediate()) {
175 assert(Shift.getImmedValue() == 0);
176 printOperand(MI, opNum);
177 } else {
178 assert(Arg.isRegister());
179 printOperand(MI, opNum);
180 if(Shift.isRegister() || Shift.getImmedValue() != 0) {
181 const char *s = NULL;
182 switch(ShiftType.getImmedValue()) {
183 case ARMShift::LSL:
184 s = ", lsl ";
185 break;
186 case ARMShift::LSR:
187 s = ", lsr ";
188 break;
189 case ARMShift::ASR:
190 s = ", asr ";
191 break;
192 case ARMShift::ROR:
193 s = ", ror ";
194 break;
195 case ARMShift::RRX:
196 s = ", rrx ";
197 break;
198 }
199 O << s;
200 printOperand(MI, opNum + 1);
201 }
202 }
203 }
204
205 void ARMAsmPrinter::printAddrMode2(const MachineInstr *MI, int opNum) {
206 const MachineOperand &Arg = MI->getOperand(opNum);
207 const MachineOperand &Offset = MI->getOperand(opNum + 1);
208 assert(Offset.isImmediate());
209
210 if (Arg.isConstantPoolIndex()) {
211 assert(Offset.getImmedValue() == 0);
212 printOperand(MI, opNum);
213 } else {
214 assert(Arg.isRegister());
215 O << '[';
216 printOperand(MI, opNum);
217 O << ", ";
218 printOperand(MI, opNum + 1);
219 O << ']';
220 }
221 }
222
223 void ARMAsmPrinter::printAddrMode5(const MachineInstr *MI, int opNum) {
224 const MachineOperand &Arg = MI->getOperand(opNum);
225 const MachineOperand &Offset = MI->getOperand(opNum + 1);
226 assert(Offset.isImmediate());
227
228 if (Arg.isConstantPoolIndex()) {
229 assert(Offset.getImmedValue() == 0);
230 printOperand(MI, opNum);
231 } else {
232 assert(Arg.isRegister());
233 O << '[';
234 printOperand(MI, opNum);
235 O << ", ";
236 printOperand(MI, opNum + 1);
237 O << ']';
238 }
239 }
240
241 void ARMAsmPrinter::printOperand(const MachineInstr *MI, int opNum) {
242 const MachineOperand &MO = MI->getOperand (opNum);
243 const MRegisterInfo &RI = *TM.getRegisterInfo();
236 void ARMAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
237 const char *Modifier) {
238 const MachineOperand &MO = MI->getOperand(opNum);
244239 switch (MO.getType()) {
245240 case MachineOperand::MO_Register:
246241 if (MRegisterInfo::isPhysicalRegister(MO.getReg()))
247 O << LowercaseString (RI.get(MO.getReg()).Name);
242 O << TM.getRegisterInfo()->get(MO.getReg()).Name;
248243 else
249244 assert(0 && "not implemented");
250245 break;
251 case MachineOperand::MO_Immediate:
252 O << "#" << (int)MO.getImmedValue();
246 case MachineOperand::MO_Immediate: {
247 if (!Modifier || strcmp(Modifier, "no_hash") != 0)
248 O << "#";
249
250 O << (int)MO.getImmedValue();
253251 break;
252 }
254253 case MachineOperand::MO_MachineBasicBlock:
255254 printBasicBlockLabel(MO.getMachineBasicBlock());
256255 return;
257256 case MachineOperand::MO_GlobalAddress: {
257 bool isCallOp = Modifier && !strcmp(Modifier, "call");
258258 GlobalValue *GV = MO.getGlobal();
259259 std::string Name = Mang->getValueName(GV);
260 O << Name;
261 if (GV->hasExternalWeakLinkage()) {
260 bool isExt = (GV->isExternal() || GV->hasWeakLinkage() ||
261 GV->hasLinkOnceLinkage());
262 if (isExt && isCallOp && Subtarget->isDarwin() &&
263 TM.getRelocationModel() != Reloc::Static) {
264 O << TAI->getPrivateGlobalPrefix() << Name << "$stub";
265 FnStubs.insert(Name);
266 } else
267 O << Name;
268
269 if (GV->hasExternalWeakLinkage())
262270 ExtWeakSymbols.insert(GV);
263 }
264 }
265271 break;
266 case MachineOperand::MO_ExternalSymbol:
267 O << TAI->getGlobalPrefix() << MO.getSymbolName();
272 }
273 case MachineOperand::MO_ExternalSymbol: {
274 bool isCallOp = Modifier && !strcmp(Modifier, "call");
275 std::string Name(TAI->getGlobalPrefix());
276 Name += MO.getSymbolName();
277 if (isCallOp && Subtarget->isDarwin() &&
278 TM.getRelocationModel() != Reloc::Static) {
279 O << TAI->getPrivateGlobalPrefix() << Name << "$stub";
280 FnStubs.insert(Name);
281 } else
282 O << Name;
268283 break;
284 }
269285 case MachineOperand::MO_ConstantPoolIndex:
270286 O << TAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber()
271287 << '_' << MO.getConstantPoolIndex();
272288 break;
289 case MachineOperand::MO_JumpTableIndex:
290 O << TAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber()
291 << '_' << MO.getJumpTableIndex();
292 break;
273293 default:
274294 O << ""; abort (); break;
275295 }
276296 }
277297
278 void ARMAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum,
279 const char *Modifier) {
280 assert(0 && "not implemented");
298 /// printSOImmOperand - SOImm is 4-bit rotate amount in bits 8-11 with 8-bit
299 /// immediate in bits 0-7.
300 void ARMAsmPrinter::printSOImmOperand(const MachineInstr *MI, int OpNum) {
301 const MachineOperand &MO = MI->getOperand(OpNum);
302 assert(MO.isImmediate() && (MO.getImmedValue() < (1 << 12)) &&
303 "Not a valid so_imm value!");
304 unsigned Imm = ARM_AM::getSOImmValImm(MO.getImmedValue());
305 unsigned Rot = ARM_AM::getSOImmValRot(MO.getImmedValue());
306
307 // Print low-level immediate formation info, per
308 // A5.1.3: "Data-processing operands - Immediate".
309 if (Rot) {
310 O << "#" << Imm << ", " << Rot;
311 // Pretty printed version.
312 O << ' ' << TAI->getCommentString() << ' ' << (int)ARM_AM::rotr32(Imm, Rot);
313 } else {
314 O << "#" << Imm;
315 }
316 }
317
318 // so_reg is a 4-operand unit corresponding to register forms of the A5.1
319 // "Addressing Mode 1 - Data-processing operands" forms. This includes:
320 // REG 0 0 - e.g. R5
321 // REG REG 0,SH_OPC - e.g. R5, ROR R3
322 // REG 0 IMM,SH_OPC - e.g. R5, LSL #3
323 void ARMAsmPrinter::printSORegOperand(const MachineInstr *MI, int Op) {
324 const MachineOperand &MO1 = MI->getOperand(Op);
325 const MachineOperand &MO2 = MI->getOperand(Op+1);
326 const MachineOperand &MO3 = MI->getOperand(Op+2);
327
328 assert(MRegisterInfo::isPhysicalRegister(MO1.getReg()));
329 O << TM.getRegisterInfo()->get(MO1.getReg()).Name;
330
331 // Print the shift opc.
332 O << ", "
333 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(MO3.getImmedValue()))
334 << " ";
335
336 if (MO2.getReg()) {
337 assert(MRegisterInfo::isPhysicalRegister(MO2.getReg()));
338 O << TM.getRegisterInfo()->get(MO2.getReg()).Name;
339 assert(ARM_AM::getSORegOffset(MO3.getImm()) == 0);
340 } else {
341 O << "#" << ARM_AM::getSORegOffset(MO3.getImm());
342 }
343 }
344
345 void ARMAsmPrinter::printAddrMode2Operand(const MachineInstr *MI, int Op) {
346 const MachineOperand &MO1 = MI->getOperand(Op);
347 const MachineOperand &MO2 = MI->getOperand(Op+1);
348 const MachineOperand &MO3 = MI->getOperand(Op+2);
349
350 if (!MO1.isRegister()) { // FIXME: This is for CP entries, but isn't right.
351 printOperand(MI, Op);
352 return;
353 }
354
355 O << "[" << TM.getRegisterInfo()->get(MO1.getReg()).Name;
356
357 if (!MO2.getReg()) {
358 if (ARM_AM::getAM2Offset(MO3.getImm())) // Don't print +0.
359 O << ", #"
360 << (char)ARM_AM::getAM2Op(MO3.getImm())
361 << ARM_AM::getAM2Offset(MO3.getImm());
362 O << "]";
363 return;
364 }
365
366 O << ", "
367 << (char)ARM_AM::getAM2Op(MO3.getImm())
368 << TM.getRegisterInfo()->get(MO2.getReg()).Name;
369
370 if (unsigned ShImm = ARM_AM::getAM2Offset(MO3.getImm()))
371 O << ", "
372 << ARM_AM::getShiftOpcStr(ARM_AM::getAM2ShiftOpc(MO3.getImmedValue()))
373 << " #" << ShImm;
374 O << "]";
375 }
376
377 void ARMAsmPrinter::printAddrMode2OffsetOperand(const MachineInstr *MI, int Op){
378 const MachineOperand &MO1 = MI->getOperand(Op);
379 const MachineOperand &MO2 = MI->getOperand(Op+1);
380
381 if (!MO1.getReg()) {
382 if (ARM_AM::getAM2Offset(MO2.getImm())) // Don't print +0.
383 O << "#"
384 << (char)ARM_AM::getAM2Op(MO2.getImm())
385 << ARM_AM::getAM2Offset(MO2.getImm());
386 return;
387 }
388
389 O << (char)ARM_AM::getAM2Op(MO2.getImm())
390 << TM.getRegisterInfo()->get(MO1.getReg()).Name;
391
392 if (unsigned ShImm = ARM_AM::getAM2Offset(MO2.getImm()))
393 O << ", "
394 << ARM_AM::getShiftOpcStr(ARM_AM::getAM2ShiftOpc(MO2.getImmedValue()))
395 << " #" << ShImm;
396 }
397
398 void ARMAsmPrinter::printAddrMode3Operand(const MachineInstr *MI, int Op) {
399 const MachineOperand &MO1 = MI->getOperand(Op);
400 const MachineOperand &MO2 = MI->getOperand(Op+1);
401 const MachineOperand &MO3 = MI->getOperand(Op+2);
402
403 assert(MRegisterInfo::isPhysicalRegister(MO1.getReg()));
404 O << "[" << TM.getRegisterInfo()->get(MO1.getReg()).Name;
405
406 if (MO2.getReg()) {
407 O << ", "
408 << (char)ARM_AM::getAM3Op(MO3.getImm())
409 << TM.getRegisterInfo()->get(MO2.getReg()).Name
410 << "]";
411 return;
412 }
413
414 if (unsigned ImmOffs = ARM_AM::getAM3Offset(MO3.getImm()))
415 O << ", #"
416 << (char)ARM_AM::getAM3Op(MO3.getImm())
417 << ImmOffs;
418 O << "]";
419 }
420
421 void ARMAsmPrinter::printAddrMode3OffsetOperand(const MachineInstr *MI, int Op){
422 const MachineOperand &MO1 = MI->getOperand(Op);
423 const MachineOperand &MO2 = MI->getOperand(Op+1);
424
425 if (MO1.getReg()) {
426 O << (char)ARM_AM::getAM3Op(MO2.getImm())
427 << TM.getRegisterInfo()->get(MO1.getReg()).Name;
428 return;
429 }
430
431 unsigned ImmOffs = ARM_AM::getAM3Offset(MO2.getImm());
432 O << "#"
433 << (char)ARM_AM::getAM3Op(MO2.getImm())
434 << ImmOffs;
435 }
436
437 void ARMAsmPrinter::printAddrMode4Operand(const MachineInstr *MI, int Op,
438 const char *Modifier) {
439 const MachineOperand &MO1 = MI->getOperand(Op);
440 const MachineOperand &MO2 = MI->getOperand(Op+1);
441 ARM_AM::AMSubMode Mode = ARM_AM::getAM4SubMode(MO2.getImm());
442 if (Modifier && strcmp(Modifier, "submode") == 0) {
443 if (MO1.getReg() == ARM::SP) {
444 bool isLDM = (MI->getOpcode() == ARM::LDM ||
445 MI->getOpcode() == ARM::LDM_RET);
446 O << ARM_AM::getAMSubModeAltStr(Mode, isLDM);
447 } else
448 O << ARM_AM::getAMSubModeStr(Mode);
449 } else {
450 printOperand(MI, Op);
451 if (ARM_AM::getAM4WBFlag(MO2.getImm()))
452 O << "!";
453 }
454 }
455
456 void ARMAsmPrinter::printAddrMode5Operand(const MachineInstr *MI, int Op,
457 const char *Modifier) {
458 const MachineOperand &MO1 = MI->getOperand(Op);
459 const MachineOperand &MO2 = MI->getOperand(Op+1);
460
461 if (!MO1.isRegister()) { // FIXME: This is for CP entries, but isn't right.
462 printOperand(MI, Op);
463 return;
464 }
465
466 assert(MRegisterInfo::isPhysicalRegister(MO1.getReg()));
467
468 if (Modifier && strcmp(Modifier, "submode") == 0) {
469 ARM_AM::AMSubMode Mode = ARM_AM::getAM5SubMode(MO2.getImm());
470 if (MO1.getReg() == ARM::SP) {
471 bool isFLDM = (MI->getOpcode() == ARM::FLDMD ||
472 MI->getOpcode() == ARM::FLDMS);
473 O << ARM_AM::getAMSubModeAltStr(Mode, isFLDM);
474 } else
475 O << ARM_AM::getAMSubModeStr(Mode);
476 return;
477 } else if (Modifier && strcmp(Modifier, "base") == 0) {
478 // Used for FSTM{D|S} and LSTM{D|S} operations.
479 O << TM.getRegisterInfo()->get(MO1.getReg()).Name;
480 if (ARM_AM::getAM5WBFlag(MO2.getImm()))
481 O << "!";
482 return;
483 }
484
485 O << "[" << TM.getRegisterInfo()->get(MO1.getReg()).Name;
486
487 if (unsigned ImmOffs = ARM_AM::getAM5Offset(MO2.getImm())) {
488 O << ", #"
489 << (char)ARM_AM::getAM5Op(MO2.getImm())
490 << ImmOffs*4;
491 }
492 O << "]";
493 }
494
495 void ARMAsmPrinter::printAddrModePCOperand(const MachineInstr *MI, int Op,
496 const char *Modifier) {
497 if (Modifier && strcmp(Modifier, "label") == 0) {
498 printPCLabel(MI, Op+1);
499 return;
500 }
501
502 const MachineOperand &MO1 = MI->getOperand(Op);
503 assert(MRegisterInfo::isPhysicalRegister(MO1.getReg()));
504 O << "[pc, +" << TM.getRegisterInfo()->get(MO1.getReg()).Name << "]";
505 }
506
507 void
508 ARMAsmPrinter::printThumbAddrModeRROperand(const MachineInstr *MI, int Op) {
509 const MachineOperand &MO1 = MI->getOperand(Op);
510 const MachineOperand &MO2 = MI->getOperand(Op+1);
511 O << "[" << TM.getRegisterInfo()->get(MO1.getReg()).Name;
512 O << ", " << TM.getRegisterInfo()->get(MO2.getReg()).Name << "]";
513 }
514
515 void
516 ARMAsmPrinter::printThumbAddrModeRI5Operand(const MachineInstr *MI, int Op,
517 unsigned Scale) {
518 const MachineOperand &MO1 = MI->getOperand(Op);
519 const MachineOperand &MO2 = MI->getOperand(Op+1);
520
521 if (!MO1.isRegister()) { // FIXME: This is for CP entries, but isn't right.
522 printOperand(MI, Op);
523 return;
524 }
525
526 O << "[" << TM.getRegisterInfo()->get(MO1.getReg()).Name;
527 if (unsigned ImmOffs = MO2.getImm()) {
528 O << ", #" << ImmOffs;
529 if (Scale > 1)
530 O << " * " << Scale;
531 }
532 O << "]";
533 }
534
535 void
536 ARMAsmPrinter::printThumbAddrModeRI5_1Operand(const MachineInstr *MI, int Op) {
537 printThumbAddrModeRI5Operand(MI, Op, 1);
538 }
539 void
540 ARMAsmPrinter::printThumbAddrModeRI5_2Operand(const MachineInstr *MI, int Op) {
541 printThumbAddrModeRI5Operand(MI, Op, 2);
542 }
543 void
544 ARMAsmPrinter::printThumbAddrModeRI5_4Operand(const MachineInstr *MI, int Op) {
545 printThumbAddrModeRI5Operand(MI, Op, 4);
546 }
547
548 void ARMAsmPrinter::printThumbAddrModeSPOperand(const MachineInstr *MI,int Op) {
549 const MachineOperand &MO1 = MI->getOperand(Op);
550 const MachineOperand &MO2 = MI->getOperand(Op+1);
551 O << "[" << TM.getRegisterInfo()->get(MO1.getReg()).Name;
552 if (unsigned ImmOffs = MO2.getImm())
553 O << ", #" << ImmOffs << " * 4";
554 O << "]";
281555 }
282556
283557 void ARMAsmPrinter::printCCOperand(const MachineInstr *MI, int opNum) {
285559 O << ARMCondCodeToString((ARMCC::CondCodes)CC);
286560 }
287561
562 void ARMAsmPrinter::printPCLabel(const MachineInstr *MI, int opNum) {
563 int Id = (int)MI->getOperand(opNum).getImmedValue();
564 O << TAI->getPrivateGlobalPrefix() << "PC" << Id;
565 }
566
567 void ARMAsmPrinter::printRegisterList(const MachineInstr *MI, int opNum) {
568 O << "{";
569 for (unsigned i = opNum, e = MI->getNumOperands(); i != e; ++i) {
570 printOperand(MI, i);
571 if (i != e-1) O << ", ";
572 }
573 O << "}";
574 }
575
576 void ARMAsmPrinter::printCPInstOperand(const MachineInstr *MI, int OpNo,
577 const char *Modifier) {
578 assert(Modifier && "This operand only works with a modifier!");
579 // There are two aspects to a CONSTANTPOOL_ENTRY operand, the label and the
580 // data itself.
581 if (!strcmp(Modifier, "label")) {
582 unsigned ID = MI->getOperand(OpNo).getImm();
583 O << TAI->getPrivateGlobalPrefix() << "CPI" << getFunctionNumber()
584 << '_' << ID << ":\n";
585 } else {
586 assert(!strcmp(Modifier, "cpentry") && "Unknown modifier for CPE");
587 unsigned CPI = MI->getOperand(OpNo).getConstantPoolIndex();
588
589 const MachineConstantPoolEntry &MCPE = // Chasing pointers is fun?
590 MI->getParent()->getParent()->getConstantPool()->getConstants()[CPI];
591
592 if (MCPE.isMachineConstantPoolEntry())
593 EmitMachineConstantPoolValue(MCPE.Val.MachineCPVal);
594 else
595 EmitGlobalConstant(MCPE.Val.ConstVal);
596 }
597 }
598
599 void ARMAsmPrinter::printJTBlockOperand(const MachineInstr *MI, int OpNo) {
600 const MachineOperand &MO1 = MI->getOperand(OpNo);
601 const MachineOperand &MO2 = MI->getOperand(OpNo+1); // Unique Id
602 unsigned JTI = MO1.getJumpTableIndex();
603 O << TAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber()
604 << '_' << JTI << '_' << MO2.getImmedValue() << ":\n";
605
606 const char *JTEntryDirective = TAI->getJumpTableDirective();
607 if (!JTEntryDirective)
608 JTEntryDirective = TAI->getData32bitsDirective();
609
610 const MachineFunction *MF = MI->getParent()->getParent();
611 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
612 const std::vector &JT = MJTI->getJumpTables();
613 const std::vector &JTBBs = JT[JTI].MBBs;
614 bool UseSet= TAI->getSetDirective() && TM.getRelocationModel() == Reloc::PIC_;
615 std::set JTSets;
616 for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
617 MachineBasicBlock *MBB = JTBBs[i];
618 if (UseSet && JTSets.insert(MBB).second)
619 printSetLabel(JTI, MO2.getImmedValue(), MBB);
620
621 O << JTEntryDirective << ' ';
622 if (UseSet)
623 O << TAI->getPrivateGlobalPrefix() << getFunctionNumber()
624 << '_' << JTI << '_' << MO2.getImmedValue()
625 << "_set_" << MBB->getNumber();
626 else if (TM.getRelocationModel() == Reloc::PIC_) {
627 printBasicBlockLabel(MBB, false, false);
628 // If the arch uses custom Jump Table directives, don't calc relative to JT
629 if (!TAI->getJumpTableDirective())
630 O << '-' << TAI->getPrivateGlobalPrefix() << "JTI"
631 << getFunctionNumber() << '_' << JTI << '_' << MO2.getImmedValue();
632 } else
633 printBasicBlockLabel(MBB, false, false);
634 O << '\n';
635 }
636 }
637
638
639 bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
640 unsigned AsmVariant, const char *ExtraCode){
641 // Does this asm operand have a single letter operand modifier?
642 if (ExtraCode && ExtraCode[0]) {
643 if (ExtraCode[1] != 0) return true; // Unknown modifier.
644
645 switch (ExtraCode[0]) {
646 default: return true; // Unknown modifier.
647 case 'Q':
648 if (TM.getTargetData()->isLittleEndian())
649 break;
650 // Fallthrough
651 case 'R':
652 if (TM.getTargetData()->isBigEndian())
653 break;
654 // Fallthrough
655 case 'H': // Write second word of DI / DF reference.
656 // Verify that this operand has two consecutive registers.
657 if (!MI->getOperand(OpNo).isRegister() ||
658 OpNo+1 == MI->getNumOperands() ||
659 !MI->getOperand(OpNo+1).isRegister())
660 return true;
661 ++OpNo; // Return the high-part.
662 }
663 }
664
665 printOperand(MI, OpNo);
666 return false;
667 }
668
669 void ARMAsmPrinter::printMachineInstruction(const MachineInstr *MI) {
670 ++EmittedInsts;
671
672 if (MI->getOpcode() == ARM::CONSTPOOL_ENTRY) {
673 if (!InCPMode && AFI->isThumbFunction()) {
674 EmitAlignment(2);
675 InCPMode = true;
676 }
677 } else {
678 if (InCPMode && AFI->isThumbFunction()) {
679 EmitAlignment(1);
680 InCPMode = false;
681 }
682 O << "\t";
683 }
684
685 // Call the autogenerated instruction printer routines.
686 printInstruction(MI);
687 }
688
288689 bool ARMAsmPrinter::doInitialization(Module &M) {
289 AsmPrinter::doInitialization(M);
290 return false; // success
690 if (Subtarget->isDarwin()) {
691 // Emit initial debug information.
692 DW.BeginModule(&M);
693 }
694
695 return AsmPrinter::doInitialization(M);
291696 }
292697
293698 bool ARMAsmPrinter::doFinalization(Module &M) {
301706 if (EmitSpecialLLVMGlobal(I))
302707 continue;
303708
304 O << "\n\n";
305709 std::string name = Mang->getValueName(I);
306710 Constant *C = I->getInitializer();
307711 unsigned Size = TD->getTypeSize(C->getType());
308 unsigned Align = Log2_32(TD->getTypeAlignment(C->getType()));
712 unsigned Align = TD->getPreferredAlignmentLog(I);
309713
310714 if (C->isNullValue() &&
311715 !I->hasSection() &&
312 (I->hasLinkOnceLinkage() || I->hasInternalLinkage() ||
313 I->hasWeakLinkage())) {
314 SwitchToDataSection(".data", I);
315 if (I->hasInternalLinkage())
316 O << "\t.local " << name << "\n";
317
318 O << "\t.comm " << name << "," << Size
319 << "," << (unsigned) (1 << Align);
320 O << "\n";
716 (I->hasInternalLinkage() || I->hasWeakLinkage() ||
717 I->hasLinkOnceLinkage() ||
718 (Subtarget->isDarwin() && I->hasExternalLinkage()))) {
719 if (Size == 0) Size = 1; // .comm Foo, 0 is undefined, avoid it.
720 if (I->hasExternalLinkage()) {
721 O << "\t.globl\t" << name << "\n";
722 O << "\t.zerofill __DATA__, __common, " << name << ", "
723 << Size << ", " << Align;
724 } else {
725 SwitchToDataSection(TAI->getDataSection(), I);
726 if (TAI->getLCOMMDirective() != NULL) {
727 if (I->hasInternalLinkage()) {
728 O << TAI->getLCOMMDirective() << name << "," << Size;
729 if (Subtarget->isDarwin())
730 O << "," << Align;
731 } else
732 O << TAI->getCOMMDirective() << name << "," << Size;
733 } else {
734 if (I->hasInternalLinkage())
735 O << "\t.local\t" << name << "\n";
736 O << TAI->getCOMMDirective() << name << "," << Size;
737 if (TAI->getCOMMDirectiveTakesAlignment())
738 O << "," << (TAI->getAlignmentIsInBytes() ? (1 << Align) : Align);
739 }
740 }
741 O << "\t\t" << TAI->getCommentString() << " " << I->getName() << "\n";
742 continue;
321743 } else {
322744 switch (I->getLinkage()) {
323745 default:
324746 assert(0 && "Unknown linkage type!");
325747 break;
748 case GlobalValue::LinkOnceLinkage:
749 case GlobalValue::WeakLinkage:
750 if (Subtarget->isDarwin()) {
751 O << "\t.globl " << name << "\n"
752 << "\t.weak_definition " << name << "\n";
753 SwitchToDataSection("\t.section __DATA,__const_coal,coalesced", I);
754 } else {
755 O << "\t.section\t.llvm.linkonce.d." << name << ",\"aw\",@progbits\n"
756 << "\t.weak " << name << "\n";
757 }
758 break;
326759 case GlobalValue::ExternalLinkage:
327760 O << "\t.globl " << name << "\n";
328 break;
761 // FALL THROUGH
329762 case GlobalValue::InternalLinkage:
763 if (I->isConstant()) {
764 const ConstantArray *CVA = dyn_cast(C);
765 if (TAI->getCStringSection() && CVA && CVA->isCString()) {
766 SwitchToDataSection(TAI->getCStringSection(), I);
767 break;
768 }
769 }
770
771 if (I->hasSection() &&
772 (I->getSection() == ".ctors" ||
773 I->getSection() == ".dtors")) {
774 assert(!Subtarget->isDarwin());
775 std::string SectionName = ".section " + I->getSection();
776 SectionName += ",\"aw\",@progbits";
777 SwitchToDataSection(SectionName.c_str());
778 } else {
779 SwitchToDataSection(TAI->getDataSection(), I);
780 }
781
330782 break;
331783 }
332
333 if (I->hasSection() &&
334 (I->getSection() == ".ctors" ||
335 I->getSection() == ".dtors")) {
336 std::string SectionName = ".section " + I->getSection();
337
338 SectionName += ",\"aw\",%progbits";
339
340 SwitchToDataSection(SectionName.c_str());
341 } else {
342 SwitchToDataSection(TAI->getDataSection(), I);
343 }
344
345 EmitAlignment(Align, I);
784 }
785
786 EmitAlignment(Align, I);
787 if (TAI->hasDotTypeDotSizeDirective()) {
346788 O << "\t.type " << name << ", %object\n";
347789 O << "\t.size " << name << ", " << Size << "\n";
348 O << name << ":\n";
349 EmitGlobalConstant(C);
350 }
790 }
791 O << name << ":\n";
792
793 // If the initializer is a extern weak symbol, remember to emit the weak
794 // reference!
795 if (const GlobalValue *GV = dyn_cast(C))
796 if (GV->hasExternalWeakLinkage())
797 ExtWeakSymbols.insert(GV);
798
799 EmitGlobalConstant(C);
800 O << '\n';
801 }
802
803 if (Subtarget->isDarwin()) {
804 // Output stubs for dynamically-linked functions
805 unsigned j = 1;
806 for (std::set::iterator i = FnStubs.begin(), e = FnStubs.end();
807 i != e; ++i, ++j) {
808 if (TM.getRelocationModel() == Reloc::PIC_)
809 SwitchToTextSection(".section __TEXT,__picsymbolstub4,symbol_stubs,"
810 "none,16", 0);
811 else
812 SwitchToTextSection(".section __TEXT,__symbol_stub4,symbol_stubs,"
813 "none,12", 0);
814
815 EmitAlignment(2);
816 O << "\t.code\t32\n";
817
818 O << "L" << *i << "$stub:\n";
819 O << "\t.indirect_symbol " << *i << "\n";
820 O << "\tldr ip, L" << *i << "$slp\n";
821 if (TM.getRelocationModel() == Reloc::PIC_) {
822 O << "L" << *i << "$scv:\n";
823 O << "\tadd ip, pc, ip\n";
824 }
825 O << "\tldr pc, [ip, #0]\n";
826 O << "L" << *i << "$slp:\n";
827 if (TM.getRelocationModel() == Reloc::PIC_)
828 O << "\t.long\tL" << *i << "$lazy_ptr-(L" << *i << "$scv+8)\n";
829 else
830 O << "\t.long\tL" << *i << "$lazy_ptr\n";
831 SwitchToDataSection(".lazy_symbol_pointer", 0);
832 O << "L" << *i << "$lazy_ptr:\n";
833 O << "\t.indirect_symbol " << *i << "\n";
834 O << "\t.long\tdyld_stub_binding_helper\n";
835 }
836 O << "\n";
837
838 // Output non-lazy-pointers for external and common global variables.
839 if (GVNonLazyPtrs.begin() != GVNonLazyPtrs.end())
840 SwitchToDataSection(".non_lazy_symbol_pointer", 0);
841 for (std::set::iterator i = GVNonLazyPtrs.begin(),
842 e = GVNonLazyPtrs.end(); i != e; ++i) {
843 O << "L" << *i << "$non_lazy_ptr:\n";
844 O << "\t.indirect_symbol " << *i << "\n";
845 O << "\t.long\t0\n";
846 }
847
848 // Emit initial debug information.
849 DW.EndModule();
850
851 // Funny Darwin hack: This flag tells the linker that no global symbols
852 // contain code that falls through to other global symbols (e.g. the obvious
853 // implementation of multiple entry points). If this doesn't occur, the
854 // linker can safely perform dead code stripping. Since LLVM never
855 // generates code that does this, it is always safe to set.
856 O << "\t.subsections_via_symbols\n";
351857 }
352858
353859 AsmPrinter::doFinalization(M);
+0
-84
lib/Target/ARM/ARMCommon.cpp less more
None //===-- ARMCommon.cpp - Define support functions for ARM --------*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file was developed by the "Instituto Nokia de Tecnologia" and
5 // is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //
11 //
12 //===----------------------------------------------------------------------===//
13 #include "ARMCommon.h"
14
15 static inline unsigned rotateL(unsigned x, unsigned n){
16 return ((x << n) | (x >> (32 - n)));
17 }
18
19 static inline unsigned rotateR(unsigned x, unsigned n){
20 return ((x >> n) | (x << (32 - n)));
21 }
22
23 // finds the end position of largest sequence of zeros in binary representation
24 // of 'immediate'.
25 static int findLargestZeroSequence(unsigned immediate){
26 int max_zero_pos = 0;
27 int max_zero_length = 0;
28 int zero_pos;
29 int zero_length;
30 int pos = 0;
31 int end_pos;
32
33 while ((immediate & 0x3) == 0) {
34 immediate = rotateR(immediate, 2);
35 pos+=2;
36 }
37 end_pos = pos+32;
38
39 while (pos
40 while (((immediate & 0x3) != 0)&&(pos
41 immediate = rotateR(immediate, 2);
42 pos+=2;
43 }
44 zero_pos = pos;
45 while (((immediate & 0x3) == 0)&&(pos
46 immediate = rotateR(immediate, 2);
47 pos+=2;
48 }
49 zero_length = pos - zero_pos;
50 if (zero_length > max_zero_length){
51 max_zero_length = zero_length;
52 max_zero_pos = zero_pos % 32;
53 }
54
55 }
56
57 return (max_zero_pos + max_zero_length) % 32;
58 }
59
60 std::vector splitImmediate(unsigned immediate){
61 std::vector immediatePieces;
62
63 if (immediate == 0){
64 immediatePieces.push_back(0);
65 } else {
66 int start_pos = findLargestZeroSequence(immediate);
67 unsigned immediate_tmp = rotateR(immediate, start_pos);
68 int pos = 0;
69 while (pos < 32){
70 while(((immediate_tmp&0x3) == 0)&&(pos<32)){
71 immediate_tmp = rotateR(immediate_tmp,2);
72 pos+=2;
73 }
74 if (pos < 32){
75 immediatePieces.push_back(rotateL(immediate_tmp&0xFF,
76 (start_pos + pos) % 32 ));
77 immediate_tmp = rotateR(immediate_tmp,8);
78 pos+=8;
79 }
80 }
81 }
82 return immediatePieces;
83 }
+0
-22
lib/Target/ARM/ARMCommon.h less more
None //===-- ARMCommon.h - Define support functions for ARM ----------*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file was developed by the "Instituto Nokia de Tecnologia" and
5 // is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //
11 //
12 //===----------------------------------------------------------------------===//
13
14 #ifndef ARM_COMMON_H
15 #define ARM_COMMON_H
16
17 #include
18
19 std::vector splitImmediate(unsigned immediate);
20
21 #endif
0 //===-- ARMConstantIslandPass.cpp - ARM constant islands --------*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file was developed by Chris Lattner and is distributed under the
5 // University of Illinois Open Source License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that splits the constant pool up into 'islands'
10 // which are scattered through-out the function. This is required due to the
11 // limited pc-relative displacements that ARM has.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #define DEBUG_TYPE "arm-cp-islands"
16 #include "ARM.h"
17 #include "ARMInstrInfo.h"
18 #include "llvm/CodeGen/MachineConstantPool.h"
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineJumpTableInfo.h"
22 #include "llvm/Target/TargetAsmInfo.h"
23 #include "llvm/Target/TargetData.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include "llvm/Support/Compiler.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/ADT/STLExtras.h"
28 #include "llvm/ADT/Statistic.h"
29 #include
30 using namespace llvm;
31
32 STATISTIC(NumSplit, "Number of uncond branches inserted");
33
34 namespace {
35 /// ARMConstantIslands - Due to limited pc-relative displacements, ARM
36 /// requires constant pool entries to be scattered among the instructions
37 /// inside a function. To do this, it completely ignores the normal LLVM
38 /// constant pool, instead, it places constants where-ever it feels like with
39 /// special instructions.
40 ///
41 /// The terminology used in this pass includes:
42 /// Islands - Clumps of constants placed in the function.
43 /// Water - Potential places where an island could be formed.
44 /// CPE - A constant pool entry that has been placed somewhere, which
45 /// tracks a list of users.
46 class VISIBILITY_HIDDEN ARMConstantIslands : public MachineFunctionPass {
47 /// NextUID - Assign unique ID's to CPE's.
48 unsigned NextUID;
49
50 /// BBSizes - The size of each MachineBasicBlock in bytes of code, indexed
51 /// by MBB Number.
52 std::vector BBSizes;
53
54 /// WaterList - A sorted list of basic blocks where islands could be placed
55 /// (i.e. blocks that don't fall through to the following block, due
56 /// to a return, unreachable, or unconditional branch).
57 std::vector WaterList;
58
59 /// CPUser - One user of a constant pool, keeping the machine instruction
60 /// pointer, the constant pool being referenced, and the max displacement
61 /// allowed from the instruction to the CP.
62 struct CPUser {
63 MachineInstr *MI;
64 MachineInstr *CPEMI;
65 unsigned MaxDisp;
66 CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp)
67 : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp) {}
68 };
69
70 /// CPUsers - Keep track of all of the machine instructions that use various
71 /// constant pools and their max displacement.
72 std::vector CPUsers;
73
74 const TargetInstrInfo *TII;
75 const TargetAsmInfo *TAI;
76 public:
77 virtual bool runOnMachineFunction(MachineFunction &Fn);
78
79 virtual const char *getPassName() const {
80 return "ARM constant island placement pass";
81 }
82
83 private:
84 void DoInitialPlacement(MachineFunction &Fn,
85 std::vector &CPEMIs);
86 void InitialFunctionScan(MachineFunction &Fn,
87 const std::vector &CPEMIs);
88 void SplitBlockBeforeInstr(MachineInstr *MI);
89 bool HandleConstantPoolUser(MachineFunction &Fn, CPUser &U);
90 void UpdateForInsertedWaterBlock(MachineBasicBlock *NewBB);
91
92 unsigned GetInstSize(MachineInstr *MI) const;
93 unsigned GetOffsetOf(MachineInstr *MI) const;
94 };
95 }
96
97 /// createARMLoadStoreOptimizationPass - returns an instance of the load / store
98 /// optimization pass.
99 FunctionPass *llvm::createARMConstantIslandPass() {
100 return new ARMConstantIslands();
101 }
102
103 bool ARMConstantIslands::runOnMachineFunction(MachineFunction &Fn) {
104 // If there are no constants, there is nothing to do.
105 MachineConstantPool &MCP = *Fn.getConstantPool();
106 if (MCP.isEmpty()) return false;
107
108 TII = Fn.getTarget().getInstrInfo();
109 TAI = Fn.getTarget().getTargetAsmInfo();
110
111 // Renumber all of the machine basic blocks in the function, guaranteeing that
112 // the numbers agree with the position of the block in the function.
113 Fn.RenumberBlocks();
114
115 // Perform the initial placement of the constant pool entries. To start with,
116 // we put them all at the end of the function.
117 std::vector CPEMIs;
118 DoInitialPlacement(Fn, CPEMIs);
119
120 /// The next UID to take is the first unused one.
121 NextUID = CPEMIs.size();
122
123 // Do the initial scan of the function, building up information about the
124 // sizes of each block, the location of all the water, and finding all of the
125 // constant pool users.
126 InitialFunctionScan(Fn, CPEMIs);
127 CPEMIs.clear();
128
129 // Iteratively place constant pool entries until there is no change.
130 bool MadeChange;
131 do {
132 MadeChange = false;
133 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
134 MadeChange |= HandleConstantPoolUser(Fn, CPUsers[i]);
135 } while (MadeChange);
136
137 BBSizes.clear();
138 WaterList.clear();
139 CPUsers.clear();
140
141 return true;
142 }
143
144 /// DoInitialPlacement - Perform the initial placement of the constant pool
145 /// entries. To start with, we put them all at the end of the function.
146 void ARMConstantIslands::DoInitialPlacement(MachineFunction &Fn,
147 std::vector &CPEMIs){
148 // Create the basic block to hold the CPE's.
149 MachineBasicBlock *BB = new MachineBasicBlock();
150 Fn.getBasicBlockList().push_back(BB);
151
152 // Add all of the constants from the constant pool to the end block, use an
153 // identity mapping of CPI's to CPE's.
154 const std::vector &CPs =
155 Fn.getConstantPool()->getConstants();
156
157 const TargetData &TD = *Fn.getTarget().getTargetData();
158 for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
159 unsigned Size = TD.getTypeSize(CPs[i].getType());
160 // Verify that all constant pool entries are a multiple of 4 bytes. If not,
161 // we would have to pad them out or something so that instructions stay
162 // aligned.
163 assert((Size & 3) == 0 && "CP Entry not multiple of 4 bytes!");
164 MachineInstr *CPEMI =
165 BuildMI(BB, TII->get(ARM::CONSTPOOL_ENTRY))
166 .addImm(i).addConstantPoolIndex(i).addImm(Size);
167 CPEMIs.push_back(CPEMI);
168 DEBUG(std::cerr << "Moved CPI#" << i << " to end of function as #"
169 << i << "\n");
170 }
171 }
172
173 /// BBHasFallthrough - Return true of the specified basic block can fallthrough
174 /// into the block immediately after it.
175 static bool BBHasFallthrough(MachineBasicBlock *MBB) {
176 // Get the next machine basic block in the function.
177 MachineFunction::iterator MBBI = MBB;
178 if (next(MBBI) == MBB->getParent()->end()) // Can't fall off end of function.
179 return false;
180
181 MachineBasicBlock *NextBB = next(MBBI);
182 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
183 E = MBB->succ_end(); I != E; ++I)
184 if (*I == NextBB)
185 return true;
186
187 return false;
188 }
189
190 /// InitialFunctionScan - Do the initial scan of the function, building up
191 /// information about the sizes of each block, the location of all the water,
192 /// and finding all of the constant pool users.
193 void ARMConstantIslands::InitialFunctionScan(MachineFunction &Fn,
194 const std::vector &CPEMIs) {
195 for (MachineFunction::iterator MBBI = Fn.begin(), E = Fn.end();
196 MBBI != E; ++MBBI) {
197 MachineBasicBlock &MBB = *MBBI;
198
199 // If this block doesn't fall through into the next MBB, then this is
200 // 'water' that a constant pool island could be placed.
201 if (!BBHasFallthrough(&MBB))
202 WaterList.push_back(&MBB);
203
204 unsigned MBBSize = 0;
205 for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
206 I != E; ++I) {
207 // Add instruction size to MBBSize.
208 MBBSize += GetInstSize(I);
209
210 // Scan the instructions for constant pool operands.
211 for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
212 if (I->getOperand(op).isConstantPoolIndex()) {
213 // We found one. The addressing mode tells us the max displacement
214 // from the PC that this instruction permits.
215 unsigned MaxOffs = 0;
216
217 // Basic size info comes from the TSFlags field.
218 unsigned TSFlags = I->getInstrDescriptor()->TSFlags;
219 switch (TSFlags & ARMII::AddrModeMask) {
220 default:
221 // Constant pool entries can reach anything.
222 if (I->getOpcode() == ARM::CONSTPOOL_ENTRY)
223 continue;
224 assert(0 && "Unknown addressing mode for CP reference!");
225 case ARMII::AddrMode1: // AM1: 8 bits << 2
226 MaxOffs = 1 << (8+2); // Taking the address of a CP entry.
227 break;
228 case ARMII::AddrMode2:
229 MaxOffs = 1 << 12; // +-offset_12
230 break;
231 case ARMII::AddrMode3:
232 MaxOffs = 1 << 8; // +-offset_8
233 break;
234 // addrmode4 has no immediate offset.
235 case ARMII::AddrMode5:
236 MaxOffs = 1 << (8+2); // +-(offset_8*4)
237 break;
238 case ARMII::AddrModeT1:
239 MaxOffs = 1 << 5;
240 break;
241 case ARMII::AddrModeT2:
242 MaxOffs = 1 << (5+1);
243 break;
244 case ARMII::AddrModeT4:
245 MaxOffs = 1 << (5+2);
246 break;
247 }
248
249 // Remember that this is a user of a CP entry.
250 MachineInstr *CPEMI =CPEMIs[I->getOperand(op).getConstantPoolIndex()];
251 CPUsers.push_back(CPUser(I, CPEMI, MaxOffs));
252
253 // Instructions can only use one CP entry, don't bother scanning the
254 // rest of the operands.
255 break;
256 }
257 }
258 BBSizes.push_back(MBBSize);
259 }
260 }
261
262 /// FIXME: Works around a gcc miscompilation with -fstrict-aliasing
263 static unsigned getNumJTEntries(const std::vector &JT,
264 unsigned JTI) DISABLE_INLINE;
265 static unsigned getNumJTEntries(const std::vector &JT,
266 unsigned JTI) {
267 return JT[JTI].MBBs.size();
268 }
269
270 /// GetInstSize - Return the size of the specified MachineInstr.
271 ///
272 unsigned ARMConstantIslands::GetInstSize(MachineInstr *MI) const {
273 // Basic size info comes from the TSFlags field.
274 unsigned TSFlags = MI->getInstrDescriptor()->TSFlags;
275
276 switch ((TSFlags & ARMII::SizeMask) >> ARMII::SizeShift) {
277 default:
278 // If this machine instr is an inline asm, measure it.
279 if (MI->getOpcode() == ARM::INLINEASM)
280 return TAI->getInlineAsmLength(MI->getOperand(0).getSymbolName());
281 assert(0 && "Unknown or unset size field for instr!");
282 break;
283 case ARMII::Size8Bytes: return 8; // Arm instruction x 2.
284 case ARMII::Size4Bytes: return 4; // Arm instruction.
285 case ARMII::Size2Bytes: return 2; // Thumb instruction.
286 case ARMII::SizeSpecial: {
287 switch (MI->getOpcode()) {
288 case ARM::CONSTPOOL_ENTRY:
289 // If this machine instr is a constant pool entry, its size is recorded as
290 // operand #2.
291 return MI->getOperand(2).getImm();
292 case ARM::BR_JTr:
293 case ARM::BR_JTm:
294 case ARM::BR_JTadd: {
295 // These are jumptable branches, i.e. a branch followed by an inlined
296 // jumptable. The size is 4 + 4 * number of entries.
297 unsigned JTI = MI->getOperand(MI->getNumOperands()-2).getJumpTableIndex();
298 const MachineFunction *MF = MI->getParent()->getParent();
299 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
300 const std::vector &JT = MJTI->getJumpTables();
301 assert(JTI < JT.size());
302 return getNumJTEntries(JT, JTI) * 4 + 4;
303 }
304 default:
305 // Otherwise, pseudo-instruction sizes are zero.
306 return 0;
307 }
308 }
309 }
310 }
311
312 /// GetOffsetOf - Return the current offset of the specified machine instruction
313 /// from the start of the function. This offset changes as stuff is moved
314 /// around inside the function.
315 unsigned ARMConstantIslands::GetOffsetOf(MachineInstr *MI) const {
316 MachineBasicBlock *MBB = MI->getParent();
317
318 // The offset is composed of two things: the sum of the sizes of all MBB's
319 // before this instruction's block, and the offset from the start of the block
320 // it is in.
321 unsigned Offset = 0;
322
323 // Sum block sizes before MBB.
324 for (unsigned BB = 0, e = MBB->getNumber(); BB != e; ++BB)
325 Offset += BBSizes[BB];
326
327 // Sum instructions before MI in MBB.
328 for (MachineBasicBlock::iterator I = MBB->begin(); ; ++I) {
329 assert(I != MBB->end() && "Didn't find MI in its own basic block?");
330 if (&*I == MI) return Offset;
331 Offset += GetInstSize(I);
332 }
333 }
334
335 /// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
336 /// ID.
337 static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
338 const MachineBasicBlock *RHS) {
339 return LHS->getNumber() < RHS->getNumber();
340 }
341
342 /// UpdateForInsertedWaterBlock - When a block is newly inserted into the
343 /// machine function, it upsets all of the block numbers. Renumber the blocks
344 /// and update the arrays that parallel this numbering.
345 void ARMConstantIslands::UpdateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
346 // Renumber the MBB's to keep them consequtive.
347 NewBB->getParent()->RenumberBlocks(NewBB);
348
349 // Insert a size into BBSizes to align it properly with the (newly
350 // renumbered) block numbers.
351 BBSizes.insert(BBSizes.begin()+NewBB->getNumber(), 0);
352
353 // Next, update WaterList. Specifically, we need to add NewMBB as having
354 // available water after it.
355 std::vector::iterator IP =
356 std::lower_bound(WaterList.begin(), WaterList.end(), NewBB,
357 CompareMBBNumbers);
358 WaterList.insert(IP, NewBB);
359 }
360
361
362 /// Split the basic block containing MI into two blocks, which are joined by
363 /// an unconditional branch. Update datastructures and renumber blocks to
364 /// account for this change.
365 void ARMConstantIslands::SplitBlockBeforeInstr(MachineInstr *MI) {
366 MachineBasicBlock *OrigBB = MI->getParent();
367
368 // Create a new MBB for the code after the OrigBB.
369 MachineBasicBlock *NewBB = new MachineBasicBlock(OrigBB->getBasicBlock());
370 MachineFunction::iterator MBBI = OrigBB; ++MBBI;
371 OrigBB->getParent()->getBasicBlockList().insert(MBBI, NewBB);
372
373 // Splice the instructions starting with MI over to NewBB.
374 NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
375
376 // Add an unconditional branch from OrigBB to NewBB.
377 BuildMI(OrigBB, TII->get(ARM::B)).addMBB(NewBB);
378 NumSplit++;
379
380 // Update the CFG. All succs of OrigBB are now succs of NewBB.
381 while (!OrigBB->succ_empty()) {
382 MachineBasicBlock *Succ = *OrigBB->succ_begin();
383 OrigBB->removeSuccessor(Succ);
384 NewBB->addSuccessor(Succ);
385
386 // This pass should be run after register allocation, so there should be no
387 // PHI nodes to update.
388 assert((Succ->empty() || Succ->begin()->getOpcode() != TargetInstrInfo::PHI)
389 && "PHI nodes should be eliminated by now!");
390 }
391
392 // OrigBB branches to NewBB.
393 OrigBB->addSuccessor(NewBB);
394
395 // Update internal data structures to account for the newly inserted MBB.
396 UpdateForInsertedWaterBlock(NewBB);
397
398 // Figure out how large the first NewMBB is.
399 unsigned NewBBSize = 0;
400 for (MachineBasicBlock::iterator I = NewBB->begin(), E = NewBB->end();
401 I != E; ++I)
402 NewBBSize += GetInstSize(I);
403
404 // Set the size of NewBB in BBSizes.
405 BBSizes[NewBB->getNumber()] = NewBBSize;
406
407 // We removed instructions from UserMBB, subtract that off from its size.
408 // Add 4 to the block to count the unconditional branch we added to it.
409 BBSizes[OrigBB->getNumber()] -= NewBBSize-4;
410 }
411
412 /// HandleConstantPoolUser - Analyze the specified user, checking to see if it
413 /// is out-of-range. If so, pick it up the constant pool value and move it some
414 /// place in-range.
415 bool ARMConstantIslands::HandleConstantPoolUser(MachineFunction &Fn, CPUser &U){
416 MachineInstr *UserMI = U.MI;
417 MachineInstr *CPEMI = U.CPEMI;
418
419 unsigned UserOffset = GetOffsetOf(UserMI);
420 unsigned CPEOffset = GetOffsetOf(CPEMI);
421
422 DEBUG(std::cerr << "User of CPE#" << CPEMI->getOperand(0).getImm()
423 << " max delta=" << U.MaxDisp
424 << " at offset " << int(UserOffset-CPEOffset) << "\t"
425 << *UserMI);
426
427 // Check to see if the CPE is already in-range.
428 if (UserOffset < CPEOffset) {
429 // User before the CPE.
430 if (CPEOffset-UserOffset <= U.MaxDisp)
431 return false;
432 } else {
433 if (UserOffset-CPEOffset <= U.MaxDisp)
434 return false;
435 }
436
437
438 // Solution guaranteed to work: split the user's MBB right before the user and
439 // insert a clone the CPE into the newly created water.
440
441 // If the user isn't at the start of its MBB, or if there is a fall-through
442 // into the user's MBB, split the MBB before the User.
443 MachineBasicBlock *UserMBB = UserMI->getParent();
444 if (&UserMBB->front() != UserMI ||
445 UserMBB == &Fn.front() || // entry MBB of function.
446 BBHasFallthrough(prior(MachineFunction::iterator(UserMBB)))) {
447 // TODO: Search for the best place to split the code. In practice, using
448 // loop nesting information to insert these guys outside of loops would be
449 // sufficient.
450 SplitBlockBeforeInstr(UserMI);
451
452 // UserMI's BB may have changed.
453 UserMBB = UserMI->getParent();
454 }
455
456 // Okay, we know we can put an island before UserMBB now, do it!
457 MachineBasicBlock *NewIsland = new MachineBasicBlock();
458 Fn.getBasicBlockList().insert(UserMBB, NewIsland);
459
460 // Update internal data structures to account for the newly inserted MBB.
461 UpdateForInsertedWaterBlock(NewIsland);
462
463 // Now that we have an island to add the CPE to, clone the original CPE and
464 // add it to the island.
465 unsigned ID = NextUID++;
466 unsigned CPI = CPEMI->getOperand(1).getConstantPoolIndex();
467 unsigned Size = CPEMI->getOperand(2).getImm();
468
469 // Build a new CPE for this user.
470 U.CPEMI = BuildMI(NewIsland, TII->get(ARM::CONSTPOOL_ENTRY))
471 .addImm(ID).addConstantPoolIndex(CPI).addImm(Size);
472
473 // Increase the size of the island block to account for the new entry.
474 BBSizes[NewIsland->getNumber()] += Size;
475
476 // Finally, change the CPI in the instruction operand to be ID.
477 for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
478 if (UserMI->getOperand(i).isConstantPoolIndex()) {
479 UserMI->getOperand(i).setConstantPoolIndex(ID);
480 break;
481 }
482
483 DEBUG(std::cerr << " Moved CPE to #" << ID << " CPI=" << CPI << "\t"
484 << *UserMI);
485
486
487 return true;
488 }
489
0 //===- ARMConstantPoolValue.cpp - ARM constantpool value --------*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file was developed by Evan Cheng and is distributed under the
5 // University of Illinois Open Source License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the ARM specific constantpool value class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "ARMConstantPoolValue.h"
14 #include "llvm/ADT/FoldingSet.h"
15 #include "llvm/GlobalValue.h"
16 using namespace llvm;
17
18 ARMConstantPoolValue::ARMConstantPoolValue(GlobalValue *gv, unsigned id,
19 bool isNonLazy, unsigned char PCAdj)
20 : MachineConstantPoolValue((const Type*)gv->getType()),
21 GV(gv), LabelId(id), isNonLazyPtr(isNonLazy), PCAdjust(PCAdj) {}
22
23 int ARMConstantPoolValue::getExistingMachineCPValue(MachineConstantPool *CP,
24 unsigned Alignment) {
25 unsigned AlignMask = (1 << Alignment)-1;
26 const std::vector Constants = CP->getConstants();
27 for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
28 if (Constants[i].isMachineConstantPoolEntry() &&
29 (Constants[i].Offset & AlignMask) == 0) {
30 ARMConstantPoolValue *CPV =
31 (ARMConstantPoolValue *)Constants[i].Val.MachineCPVal;
32 if (CPV->GV == GV && CPV->LabelId == LabelId &&
33 CPV->isNonLazyPtr == isNonLazyPtr)
34 return i;
35 }
36 }
37
38 return -1;
39 }
40
41 void
42 ARMConstantPoolValue::AddSelectionDAGCSEId(FoldingSetNodeID &ID) {
43 ID.AddPointer(GV);
44 ID.AddInteger(LabelId);
45 ID.AddInteger((unsigned)isNonLazyPtr);
46 ID.AddInteger(PCAdjust);
47 }
48
49 void ARMConstantPoolValue::print(std::ostream &O) const {
50 O << GV->getName();
51 if (isNonLazyPtr) O << "$non_lazy_ptr";
52 if (PCAdjust != 0) O << "-(LPIC" << LabelId << "+"
53 << (unsigned)PCAdjust << ")";
54 }
0 //===- ARMConstantPoolValue.h - ARM constantpool value ----------*- C++ -*-===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file was developed by Evan Cheng and is distributed under the
5 // University of Illinois Open Source License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the ARM specific constantpool value class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #ifndef LLVM_TARGET_ARM_CONSTANTPOOLVALUE_H
14 #define LLVM_TARGET_ARM_CONSTANTPOOLVALUE_H
15
16 #include "llvm/CodeGen/MachineConstantPool.h"
17
18 namespace llvm {
19
20 /// ARMConstantPoolValue - ARM specific constantpool value. This is used to
21 /// represent PC relative displacement between the address of the load
22 /// instruction and the global value being loaded, i.e. (&GV-(LPIC+8)).
23 class ARMConstantPoolValue : public MachineConstantPoolValue {
24 GlobalValue *GV; // GlobalValue being loaded.
25 unsigned LabelId; // Label id of the load.
26 bool isNonLazyPtr; // True if loading a Mac OS X non_lazy_ptr stub.
27 unsigned char PCAdjust; // Extra adjustment if constantpool is pc relative.
28 // 8 for ARM, 4 for Thumb.
29
30 public:
31 ARMConstantPoolValue(GlobalValue *gv, unsigned id, bool isNonLazy = false,
32 unsigned char PCAdj = 0);
33
34 GlobalValue *getGV() const { return GV; }
35 unsigned getLabelId() const { return LabelId; }
36 bool isNonLazyPointer() const { return isNonLazyPtr; }
37 unsigned char getPCAdjustment() const { return PCAdjust; }
38
39 virtual int getExistingMachineCPValue(MachineConstantPool *CP,
40 unsigned Alignment);
41
42 virtual void AddSelectionDAGCSEId(FoldingSetNodeID &ID);
43
44 virtual void print(std::ostream &O) const;
45 };
46
47 }
48
49 #endif
1616
1717 #include "ARM.h"
1818 #include "llvm/Target/TargetFrameInfo.h"
19 #include "llvm/Target/TargetMachine.h"
19 #include "ARMSubtarget.h"
2020
2121 namespace llvm {
2222
23 class ARMFrameInfo: public TargetFrameInfo {
24
23 class ARMFrameInfo : public TargetFrameInfo {
2524 public:
26 ARMFrameInfo()
27 : TargetFrameInfo(TargetFrameInfo::StackGrowsDown, 8, 0) {
25 ARMFrameInfo(const ARMSubtarget &ST)
26 : TargetFrameInfo(StackGrowsDown, ST.getStackAlignment(), 0) {
2827 }
29
3028 };
3129
3230 } // End llvm namespace
1111 //===----------------------------------------------------------------------===//
1212
1313 #include "ARM.h"
14 #include "ARMISelLowering.h"
1415 #include "ARMTargetMachine.h"
15 #include "ARMCommon.h"
16 #include "ARMAddressingModes.h"
1617 #include "llvm/CallingConv.h"
18 #include "llvm/Constants.h"
1719 #include "llvm/DerivedTypes.h"
1820 #include "llvm/Function.h"
19 #include "llvm/Constants.h"
2021 #include "llvm/Intrinsics.h"
21 #include "llvm/ADT/VectorExtras.h"
2222 #include "llvm/CodeGen/MachineFrameInfo.h"
2323 #include "llvm/CodeGen/MachineFunction.h"
2424 #include "llvm/CodeGen/MachineInstrBuilder.h"
2727 #include "llvm/CodeGen/SSARegMap.h"
2828 #include "llvm/Target/TargetLowering.h"
2929 #include "llvm/Support/Debug.h"
30 #include "llvm/Support/MathExtras.h"
31 #include <vector>
30 #include <iostream>
3231 using namespace llvm;
33
34 namespace {
35 class ARMTargetLowering : public TargetLowering {
36 int VarArgsFrameIndex; // FrameIndex for start of varargs area.
37 public:
38 ARMTargetLowering(TargetMachine &TM);
39 virtual SDOperand LowerOperation(SDOperand Op, SelectionDAG &DAG);
40 virtual const char *getTargetNodeName(unsigned Opcode) const;
41 std::vector
42 getRegClassForInlineAsmConstraint(const std::string &Constraint,
43 MVT::ValueType VT) const;
44 };
45
46 }
47
48 ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
49 : TargetLowering(TM) {
50 addRegisterClass(MVT::i32, ARM::IntRegsRegisterClass);
51 addRegisterClass(MVT::f32, ARM::FPRegsRegisterClass);
52 addRegisterClass(MVT::f64, ARM::DFPRegsRegisterClass);
53
54 setLoadXAction(ISD::EXTLOAD, MVT::f32, Expand);
55
56 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
57 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
58
59 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
60 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
61
62 setOperationAction(ISD::RET, MVT::Other, Custom);
63 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
64 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
65
66 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
67 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
68 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
69
70 setOperationAction(ISD::SELECT, MVT::i32, Expand);
71 setOperationAction(ISD::SELECT, MVT::f32, Expand);
72 setOperationAction(ISD::SELECT, MVT::f64, Expand);
73
74 setOperationAction(ISD::SETCC, MVT::i32, Expand);
75 setOperationAction(ISD::SETCC, MVT::f32, Expand);
76 setOperationAction(ISD::SETCC, MVT::f64, Expand);
77
78 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
79 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
80 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
81
82 setOperationAction(ISD::MEMMOVE, MVT::Other, Expand);
83 setOperationAction(ISD::MEMSET, MVT::Other, Expand);
84 setOperationAction(ISD::MEMCPY, MVT::Other, Expand);
85
86 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
87 setOperationAction(ISD::BRIND, MVT::Other, Expand);
88 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
89 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
90 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
91
92 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
93
94 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
95 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
96 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
97 setOperationAction(ISD::SDIV, MVT::i32, Expand);
98 setOperationAction(ISD::UDIV, MVT::i32, Expand);
99 setOperationAction(ISD::SREM, MVT::i32, Expand);
100 setOperationAction(ISD::UREM, MVT::i32, Expand);
101
102 setOperationAction(ISD::VASTART, MVT::Other, Custom);
103 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
104 setOperationAction(ISD::VAEND, MVT::Other, Expand);
105 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
106
107 setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
108 setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
109
110 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
111 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
112
113 setStackPointerRegisterToSaveRestore(ARM::R13);
114
115 setSchedulingPreference(SchedulingForRegPressure);
116 computeRegisterProperties();
117 }
118
119 namespace llvm {
120 namespace ARMISD {
121 enum NodeType {
122 // Start the numbering where the builting ops and target ops leave off.
123 FIRST_NUMBER = ISD::BUILTIN_OP_END+ARM::INSTRUCTION_LIST_END,
124 /// CALL - A direct function call.
125 CALL,
126
127 /// Return with a flag operand.
128 RET_FLAG,
129
130 CMP,
131
132 SELECT,
133
134 BR,
135
136 FSITOS,
137 FTOSIS,
138
139 FSITOD,
140 FTOSID,
141
142 FUITOS,
143 FTOUIS,
144
145 FUITOD,
146 FTOUID,
147
148 FMRRD,
149
150 FMDRR,
151
152 FMSTAT
153 };
154 }
155 }
156
157 /// DAGFPCCToARMCC - Convert a DAG fp condition code to an ARM CC
158 // Unordered = !N & !Z & C & V = V
159 // Ordered = N | Z | !C | !V = N | Z | !V
160 static std::vector DAGFPCCToARMCC(ISD::CondCode CC) {
161 switch (CC) {
162 default:
163 assert(0 && "Unknown fp condition code!");
164 // SETOEQ = (N | Z | !V) & Z = Z = EQ
165 case ISD::SETEQ:
166 case ISD::SETOEQ: return make_vector(ARMCC::EQ, 0);
167 // SETOGT = (N | Z | !V) & !N & !Z = !V &!N &!Z = (N = V) & !Z = GT
168 case ISD::SETGT:
169 case ISD::SETOGT: return make_vector(ARMCC::GT, 0);
170 // SETOGE = (N | Z | !V) & !N = (Z | !V) & !N = !V & !N = GE
171 case ISD::SETGE:
172 case ISD::SETOGE: return make_vector(ARMCC::GE, 0);
173 // SETOLT = (N | Z | !V) & N = N = MI
174 case ISD::SETLT:
175 case ISD::SETOLT: return make_vector(ARMCC::MI, 0);
176 // SETOLE = (N | Z | !V) & (N | Z) = N | Z = !C | Z = LS
177 case ISD::SETLE:
178 case ISD::SETOLE: return make_vector(ARMCC::LS, 0);
179 // SETONE = OGT | OLT
180 case ISD::SETONE: return make_vector(ARMCC::GT, ARMCC::MI, 0);
181 // SETO = N | Z | !V = Z | !V = !V = VC
182 case ISD::SETO: return make_vector(ARMCC::VC, 0);
183 // SETUO = V = VS
184 case ISD::SETUO: return make_vector(ARMCC::VS, 0);
185 // SETUEQ = V | Z (need two instructions) = EQ/VS
186 case ISD::SETUEQ: return make_vector(ARMCC::EQ, ARMCC::VS, 0);
187 // SETUGT = V | (!Z & !N) = !Z & !N = !Z & C = HI
188 case ISD::SETUGT: return make_vector(ARMCC::HI, 0);
189 // SETUGE = V | !N = !N = PL
190 case ISD::SETUGE: return make_vector(ARMCC::PL, 0);
191 // SETULT = V | N = LT
192 case ISD::SETULT: return make_vector(ARMCC::LT, 0);
193 // SETULE = V | Z | N = LE
194 case ISD::SETULE: return make_vector(ARMCC::LE, 0);
195 // SETUNE = V | !Z = !Z = NE
196 case ISD::SETNE:
197 case ISD::SETUNE: return make_vector(ARMCC::NE, 0);
198 }
199 }
200
201 /// DAGIntCCToARMCC - Convert a DAG integer condition code to an ARM CC
202 static std::vector DAGIntCCToARMCC(ISD::CondCode CC) {
203 switch (CC) {
204 default:
205 assert(0 && "Unknown integer condition code!");
206 case ISD::SETEQ: return make_vector(ARMCC::EQ, 0);
207 case ISD::SETNE: return make_vector(ARMCC::NE, 0);
208 case ISD::SETLT: return make_vector(ARMCC::LT, 0);
209 case ISD::SETLE: return make_vector(ARMCC::LE, 0);
210 case ISD::SETGT: return make_vector(ARMCC::GT, 0);
211 case ISD::SETGE: return make_vector(ARMCC::GE, 0);
212 case ISD::SETULT: return make_vector(ARMCC::CC, 0);
213 case ISD::SETULE: return make_vector(ARMCC::LS, 0);
214 case ISD::SETUGT: return make_vector(ARMCC::HI, 0);
215 case ISD::SETUGE: return make_vector(ARMCC::CS, 0);
216 }
217 }
218
219 std::vector ARMTargetLowering::
220 getRegClassForInlineAsmConstraint(const std::string &Constraint,
221 MVT::ValueType VT) const {
222 if (Constraint.size() == 1) {
223 // FIXME: handling only r regs
224 switch (Constraint[0]) {
225 default: break; // Unknown constraint letter
226
227 case 'r': // GENERAL_REGS
228 case 'R': // LEGACY_REGS
229 if (VT == MVT::i32)
230 return make_vector(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
231 ARM::R4, ARM::R5, ARM::R6, ARM::R7,
232 ARM::R8, ARM::R9, ARM::R10, ARM::R11,
233 ARM::R12, ARM::R13, ARM::R14, 0);
234 break;
235
236 }
237 }
238
239 return std::vector();
240 }
241
242 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
243 switch (Opcode) {
244 default: return 0;
245 case ARMISD::CALL: return "ARMISD::CALL";
246 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG";
247 case ARMISD::SELECT: return "ARMISD::SELECT";
248 case ARMISD::CMP: return "ARMISD::CMP";
249 case ARMISD::BR: return "ARMISD::BR";
250 case ARMISD::FSITOS: return "ARMISD::FSITOS";
251 case ARMISD::FTOSIS: return "ARMISD::FTOSIS";
252 case ARMISD::FSITOD: return "ARMISD::FSITOD";
253 case ARMISD::FTOSID: return "ARMISD::FTOSID";
254 case ARMISD::FUITOS: return "ARMISD::FUITOS";
255 case ARMISD::FTOUIS: return "ARMISD::FTOUIS";
256 case ARMISD::FUITOD: return "ARMISD::FUITOD";
257 case ARMISD::FTOUID: return "ARMISD::FTOUID";
258 case ARMISD::FMRRD: return "ARMISD::FMRRD";
259 case ARMISD::FMDRR: return "ARMISD::FMDRR";
260 case ARMISD::FMSTAT: return "ARMISD::FMSTAT";
261 }
262 }
263
264 class ArgumentLayout {
265 std::vector is_reg;
266 std::vector pos;
267 std::vector types;
268 public:
269 ArgumentLayout(const std::vector &Types) {
270 types = Types;
271
272 unsigned RegNum = 0;
273 unsigned StackOffset = 0;
274 for(std::vector::const_iterator I = Types.begin();
275 I != Types.end();
276 ++I) {
277 MVT::ValueType VT = *I;
278 assert(VT == MVT::i32 || VT == MVT::f32 || VT == MVT::f64);
279 unsigned size = MVT::getSizeInBits(VT)/32;
280
281 RegNum = ((RegNum + size - 1) / size) * size;
282 if (RegNum < 4) {
283 pos.push_back(RegNum);
284 is_reg.push_back(true);
285 RegNum += size;
286 } else {
287 unsigned bytes = size * 32/8;
288 StackOffset = ((StackOffset + bytes - 1) / bytes) * bytes;
289 pos.push_back(StackOffset);
290 is_reg.push_back(false);
291 StackOffset += bytes;
292 }
293 }
294 }
295 unsigned getRegisterNum(unsigned argNum) {
296 assert(isRegister(argNum));
297 return pos[argNum];
298 }
299 unsigned getOffset(unsigned argNum) {
300 assert(isOffset(argNum));
301 return pos[argNum];
302 }
303 unsigned isRegister(unsigned argNum) {
304 assert(argNum < is_reg.size());
305 return is_reg[argNum];
306 }
307 unsigned isOffset(unsigned argNum) {
308 return !isRegister(argNum);
309 }
310 MVT::ValueType getType(unsigned argNum) {
311 assert(argNum < types.size());
312 return types[argNum];
313 }
314 unsigned getStackSize(void) {
315 int last = is_reg.size() - 1;
316 if (last < 0)
317 return 0;
318 if (isRegister(last))
319 return 0;
320 return getOffset(last) + MVT::getSizeInBits(getType(last))/8;
321 }
322 int lastRegArg(void) {
323 int size = is_reg.size();
324 int last = 0;
325 while(last < size && isRegister(last))
326 last++;
327 last--;
328 return last;
329 }
330 int lastRegNum(void) {
331 int l = lastRegArg();
332 if (l < 0)
333 return -1;
334 unsigned r = getRegisterNum(l);
335 MVT::ValueType t = getType(l);
336 assert(t == MVT::i32 || t == MVT::f32 || t == MVT::f64);
337 if (t == MVT::f64)
338 return r + 1;
339 return r;
340 }
341 };
342
343 // This transforms a ISD::CALL node into a
344 // callseq_star <- ARMISD:CALL <- callseq_end
345 // chain
346 static SDOperand LowerCALL(SDOperand Op, SelectionDAG &DAG) {
347 SDOperand Chain = Op.getOperand(0);
348 unsigned CallConv = cast(Op.getOperand(1))->getValue();
349 assert((CallConv == CallingConv::C ||
350 CallConv == CallingConv::Fast)
351 && "unknown calling convention");
352 SDOperand Callee = Op.getOperand(4);
353 unsigned NumOps = (Op.getNumOperands() - 5) / 2;
354 SDOperand StackPtr = DAG.getRegister(ARM::R13, MVT::i32);
355 static const unsigned regs[] = {
356 ARM::R0, ARM::R1, ARM::R2, ARM::R3
357 };
358
359 std::vector Types;
360 for (unsigned i = 0; i < NumOps; ++i) {
361 MVT::ValueType VT = Op.getOperand(5+2*i).getValueType();
362 Types.push_back(VT);
363 }
364 ArgumentLayout Layout(Types);
365
366 unsigned NumBytes = Layout.getStackSize();
367
368 Chain = DAG.getCALLSEQ_START(Chain,
369 DAG.getConstant(NumBytes, MVT::i32));
370
371 //Build a sequence of stores
372 std::vector MemOpChains;
373 for (unsigned i = Layout.lastRegArg() + 1; i < NumOps; ++i) {
374 SDOperand Arg = Op.getOperand(5+2*i);
375 unsigned ArgOffset = Layout.getOffset(i);
376 SDOperand PtrOff = DAG.getConstant(ArgOffset, StackPtr.getValueType());
377 PtrOff = DAG.getNode(ISD::ADD, MVT::i32, StackPtr, PtrOff);
378 MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
379 }
380 if (!MemOpChains.empty())
381 Chain = DAG.getNode(ISD::TokenFactor, MVT::Other,
382 &MemOpChains[0], MemOpChains.size());
383
384 // If the callee is a GlobalAddress node (quite common, every direct call is)
385 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
386 // Likewise ExternalSymbol -> TargetExternalSymbol.
387 assert(Callee.getValueType() == MVT::i32);
388 if (GlobalAddressSDNode *G = dyn_cast(Callee))
389 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
390 else if (ExternalSymbolSDNode *E = dyn_cast(Callee))
391 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
392
393 // If this is a direct call, pass the chain and the callee.
394 assert (Callee.Val);
395 std::vector Ops;
396 Ops.push_back(Chain);
397 Ops.push_back(Callee);
398
399 // Build a sequence of copy-to-reg nodes chained together with token chain
400 // and flag operands which copy the outgoing args into the appropriate regs.
401 SDOperand InFlag;
402 for (int i = 0, e = Layout.lastRegArg(); i <= e; ++i) {
403 SDOperand Arg = Op.getOperand(5+2*i);
404 unsigned RegNum = Layout.getRegisterNum(i);
405 unsigned Reg1 = regs[RegNum];
406 MVT::ValueType VT = Layout.getType(i);
407 assert(VT == Arg.getValueType());
408 assert(VT == MVT::i32 || VT == MVT::f32 || VT == MVT::f64);
409
410 // Add argument register to the end of the list so that it is known live
411 // into the call.
412 Ops.push_back(DAG.getRegister(Reg1, MVT::i32));
413 if (VT == MVT::f64) {
414 unsigned Reg2 = regs[RegNum + 1];
415 SDOperand SDReg1 = DAG.getRegister(Reg1, MVT::i32);
416 SDOperand SDReg2 = DAG.getRegister(Reg2, MVT::i32);
417
418 Ops.push_back(DAG.getRegister(Reg2, MVT::i32));
419 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Flag);
420 SDOperand Ops[] = {Chain, SDReg1, SDReg2, Arg, InFlag};
421 Chain = DAG.getNode(ARMISD::FMRRD, VTs, Ops, InFlag.Val ? 5 : 4);
422 } else {
423 if (VT == MVT::f32)
424 Arg = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Arg);
425 Chain = DAG.getCopyToReg(Chain, Reg1, Arg, InFlag);
426 }
427 InFlag = Chain.getValue(1);
428 }
429
430 std::vector NodeTys;
431 NodeTys.push_back(MVT::Other); // Returns a chain
432 NodeTys.push_back(MVT::Flag); // Returns a flag for retval copy to use.
433
434 unsigned CallOpc = ARMISD::CALL;
435 if (InFlag.Val)
436 Ops.push_back(InFlag);
437 Chain = DAG.getNode(CallOpc, NodeTys, &Ops[0], Ops.size());
438 InFlag = Chain.getValue(1);
439
440 std::vector ResultVals;
441 NodeTys.clear();
442
443 // If the call has results, copy the values out of the ret val registers.
444 MVT::ValueType VT = Op.Val->getValueType(0);
445 if (VT != MVT::Other) {
446 assert(VT == MVT::i32 || VT == MVT::f32 || VT == MVT::f64);
447
448 SDOperand Value1 = DAG.getCopyFromReg(Chain, ARM::R0, MVT::i32, InFlag);
449 Chain = Value1.getValue(1);
450 InFlag = Value1.getValue(2);
451 NodeTys.push_back(VT);
452 if (VT == MVT::i32) {
453 ResultVals.push_back(Value1);
454 if (Op.Val->getValueType(1) == MVT::i32) {
455 SDOperand Value2 = DAG.getCopyFromReg(Chain, ARM::R1, MVT::i32, InFlag);
456 Chain = Value2.getValue(1);
457 ResultVals.push_back(Value2);
458 NodeTys.push_back(VT);
459 }
460 }
461 if (VT == MVT::f32) {
462 SDOperand Value = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, Value1);
463 ResultVals.push_back(Value);
464 }
465 if (VT == MVT::f64) {
466 SDOperand Value2 = DAG.getCopyFromReg(Chain, ARM::R1, MVT::i32, InFlag);
467 Chain = Value2.getValue(1);
468 SDOperand Value = DAG.getNode(ARMISD::FMDRR, MVT::f64, Value1, Value2);
469 ResultVals.push_back(Value);
470 }
471 }
472
473 Chain = DAG.getNode(ISD::CALLSEQ_END, MVT::Other, Chain,
474 DAG.getConstant(NumBytes, MVT::i32));
475 NodeTys.push_back(MVT::Other);
476
477 if (ResultVals.empty())
478 return Chain;
479
480 ResultVals.push_back(Chain);
481 SDOperand Res = DAG.getNode(ISD::MERGE_VALUES, NodeTys, &ResultVals[0],
482 ResultVals.size());
483 return Res.getValue(Op.ResNo);
484 }
485
486 static SDOperand LowerRET(SDOperand Op, SelectionDAG &DAG) {
487 SDOperand Copy;
488 SDOperand Chain = Op.getOperand(0);
489 SDOperand R0 = DAG.getRegister(ARM::R0, MVT::i32);
490 SDOperand R1 = DAG.getRegister(ARM::R1, MVT::i32);
491
492 switch(Op.getNumOperands()) {
493 default:
494 assert(0 && "Do not know how to return this many arguments!");
495 abort();
496 case 1: {
497 SDOperand LR = DAG.getRegister(ARM::R14, MVT::i32);
498 return DAG.getNode(ARMISD::RET_FLAG, MVT::Other, Chain);
499 }
500 case 3: {
501 SDOperand Val = Op.getOperand(1);
502 assert(Val.getValueType() == MVT::i32 ||
503 Val.getValueType() == MVT::f32 ||
504 Val.getValueType() == MVT::f64);
505
506 if (Val.getValueType() == MVT::f64) {
507 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Flag);
508 SDOperand Ops[] = {Chain, R0, R1, Val};
509 Copy = DAG.getNode(ARMISD::FMRRD, VTs, Ops, 4);
510 } else {
511 if (Val.getValueType() == MVT::f32)
512 Val = DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Val);
513 Copy = DAG.getCopyToReg(Chain, R0, Val, SDOperand());
514 }
515
516 if (DAG.getMachineFunction().liveout_empty()) {
517 DAG.getMachineFunction().addLiveOut(ARM::R0);
518 if (Val.getValueType() == MVT::f64)
519 DAG.getMachineFunction().addLiveOut(ARM::R1);
520 }
521 break;
522 }
523 case 5:
524 Copy = DAG.getCopyToReg(Chain, ARM::R1, Op.getOperand(3), SDOperand());
525 Copy = DAG.getCopyToReg(Copy, ARM::R0, Op.getOperand(1), Copy.getValue(1));
526 // If we haven't noted the R0+R1 are live out, do so now.
527 if (DAG.getMachineFunction().liveout_empty()) {
528 DAG.getMachineFunction().addLiveOut(ARM::R0);
529 DAG.getMachineFunction().addLiveOut(ARM::R1);
530 }
531 break;
532 }
533
534 //We must use RET_FLAG instead of BRIND because BRIND doesn't have a flag
535 return DAG.getNode(ARMISD::RET_FLAG, MVT::Other, Copy, Copy.getValue(1));
536 }
537
538 static SDOperand LowerConstantPool(SDOperand Op, SelectionDAG &DAG) {
539 MVT::ValueType PtrVT = Op.getValueType();
540 ConstantPoolSDNode *CP = cast(Op);
541 Constant *C = CP->getConstVal();
542 SDOperand CPI = DAG.getTargetConstantPool(C, PtrVT, CP->getAlignment());
543
544 return CPI;
545 }
546
547 SDOperand LegalizeImmediate(uint32_t immediate, SelectionDAG &DAG,
548 bool canReturnConstant){
549 SDOperand Shift = DAG.getTargetConstant(0, MVT::i32);
550 SDOperand ShiftType = DAG.getTargetConstant(ARMShift::LSL, MVT::i32);
551 std::vectorimmediatePieces = splitImmediate(immediate);
552 if (immediatePieces.size()>1){
553 unsigned movInst = ARM::MOV;
554 unsigned orInst = ARM::ORR;
555 SDNode *node;
556 //try mvn
557 std::vectorimmediateNegPieces = splitImmediate(~immediate);
558 if (immediatePieces.size() > immediateNegPieces.size()) {
559 //use mvn/eor
560 movInst = ARM::MVN;
561 orInst = ARM::EOR;
562 immediatePieces = immediateNegPieces;
563 }
564 SDOperand n = DAG.getTargetConstant(immediatePieces[0], MVT::i32);
565 node = DAG.getTargetNode(movInst, MVT::i32, n, Shift, ShiftType);
566 std::vector::iterator it;
567 for (it=immediatePieces.begin()+1; it != immediatePieces.end(); ++it){
568 n = DAG.getTargetConstant(*it, MVT::i32);
569 SDOperand ops[] = {SDOperand(node, 0), n, Shift, ShiftType};
570 node = DAG.getTargetNode(orInst, MVT::i32, ops, 4);
571 }
572 return SDOperand(node, 0);
573 } else {
574 if (canReturnConstant)
575 return DAG.getTargetConstant(immediate, MVT::i32);
576 else {
577 SDOperand n = DAG.getTargetConstant(immediate, MVT::i32);
578 SDNode *node = DAG.getTargetNode(ARM::MOV, MVT::i32, n, Shift,
579 ShiftType);
580 return SDOperand(node, 0);
581 }
582 }
583 }
584
585 static SDOperand LowerConstantFP(SDOperand Op, SelectionDAG &DAG) {
586 MVT::ValueType VT = Op.getValueType();
587 SDOperand Shift = DAG.getTargetConstant(0, MVT::i32);
588 SDOperand ShiftType = DAG.getTargetConstant(ARMShift::LSL, MVT::i32);
589 SDNode *node;
590 switch (VT) {
591 default: assert(0 && "VT!=f32 && VT!=f64");
592 case MVT::f32: {
593 float val = cast(Op)->getValue();
594 uint32_t i32_val = FloatToBits(val);
595 SDOperand c = LegalizeImmediate(i32_val, DAG, false);
596 node = DAG.getTargetNode(ARM::FMSR, MVT::f32, c);
597 break;
598 }
599 case MVT::f64: {
600 double val = cast(Op)->getValue();
601 uint64_t i64_val = DoubleToBits(val);
602 SDOperand hi = LegalizeImmediate(Hi_32(i64_val), DAG, false);
603 SDOperand lo = LegalizeImmediate(Lo_32(i64_val), DAG, false);
604 node = DAG.getTargetNode(ARM::FMDRR, MVT::f64, lo, hi);
605 break;
606 }
607 }
608 return SDOperand(node, 0);
609 }
610
611 static SDOperand LowerGlobalAddress(SDOperand Op,
612 SelectionDAG &DAG) {
613 GlobalValue *GV = cast(Op)->getGlobal();
614 int alignment = 2;
615 SDOperand CPAddr = DAG.getConstantPool(GV, MVT::i32, alignment);
616 return DAG.getLoad(MVT::i32, DAG.getEntryNode(), CPAddr, NULL, 0);
617 }
618
619 static SDOperand LowerVASTART(SDOperand Op, SelectionDAG &DAG,
620 unsigned VarArgsFrameIndex) {
621 // vastart just stores the address of the VarArgsFrameIndex slot into the
622 // memory location argument.
623 MVT::ValueType PtrVT = DAG.getTargetLoweringInfo().getPointerTy();
624 SDOperand FR = DAG.getFrameIndex(VarArgsFrameIndex, PtrVT);
625 SrcValueSDNode *SV = cast(Op.getOperand(2));
626 return DAG.getStore(Op.getOperand(0), FR, Op.getOperand(1), SV->getValue(),
627 SV->getOffset());
628 }
629
630 static SDOperand LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG,
631 int &VarArgsFrameIndex) {
632 MachineFunction &MF = DAG.getMachineFunction();
633 MachineFrameInfo *MFI = MF.getFrameInfo();
634 SSARegMap *RegMap = MF.getSSARegMap();
635 unsigned NumArgs = Op.Val->getNumValues()-1;
636 SDOperand Root = Op.getOperand(0);
637 bool isVarArg = cast(Op.getOperand(2))->getValue() != 0;
638 static const unsigned REGS[] = {
639 ARM::R0, ARM::R1, ARM::R2, ARM::R3
640 };
641
642 std::vector Types(Op.Val->value_begin(), Op.Val->value_end() - 1);
643 ArgumentLayout Layout(Types);
644
645 std::vector ArgValues;
646 for (unsigned ArgNo = 0; ArgNo < NumArgs; ++ArgNo) {
647 MVT::ValueType VT = Types[ArgNo];
648
649 SDOperand Value;
650 if (Layout.isRegister(ArgNo)) {
651 assert(VT == MVT::i32 || VT == MVT::f32 || VT == MVT::f64);
652 unsigned RegNum = Layout.getRegisterNum(ArgNo);
653 unsigned Reg1 = REGS[RegNum];
654 unsigned VReg1 = RegMap->createVirtualRegister(&ARM::IntRegsRegClass);
655 SDOperand Value1 = DAG.getCopyFromReg(Root, VReg1, MVT::i32);
656 MF.addLiveIn(Reg1, VReg1);
657 if (VT == MVT::f64) {
658 unsigned Reg2 = REGS[RegNum + 1];
659 unsigned VReg2 = RegMap->createVirtualRegister(&ARM::IntRegsRegClass);
660 SDOperand Value2 = DAG.getCopyFromReg(Root, VReg2, MVT::i32);
661 MF.addLiveIn(Reg2, VReg2);
662 Value = DAG.getNode(ARMISD::FMDRR, MVT::f64, Value1, Value2);
663 } else {
664 Value = Value1;
665 if (VT == MVT::f32)
666 Value = DAG.getNode(ISD::BIT_CONVERT, VT, Value);
667 }
668 } else {
669 // If the argument is actually used, emit a load from the right stack
670 // slot.
671 if (!Op.Val->hasNUsesOfValue(0, ArgNo)) {
672 unsigned Offset = Layout.getOffset(ArgNo);
673 unsigned Size = MVT::getSizeInBits(VT)/8;
674 int FI = MFI->CreateFixedObject(Size, Offset);
675 SDOperand FIN = DAG.getFrameIndex(FI, VT);
676 Value = DAG.getLoad(VT, Root, FIN, NULL, 0);
677 } else {
678 Value = DAG.getNode(ISD::UNDEF, VT);
679 }
680 }
681 ArgValues.push_back(Value);
682 }
683
684 unsigned NextRegNum = Layout.lastRegNum() + 1;
685
686 if (isVarArg) {
687 //If this function is vararg we must store the remaing
688 //registers so that they can be acessed with va_start
689 VarArgsFrameIndex = MFI->CreateFixedObject(MVT::getSizeInBits(MVT::i32)/8,
690 -16 + NextRegNum * 4);
691
692 SmallVector MemOps;
693 for (unsigned RegNo = NextRegNum; RegNo < 4; ++RegNo) {
694 int RegOffset = - (4 - RegNo) * 4;
695 int FI = MFI->CreateFixedObject(MVT::getSizeInBits(MVT::i32)/8,
696 RegOffset);
697 SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
698
699 unsigned VReg = RegMap->createVirtualRegister(&ARM::IntRegsRegClass);
700 MF.addLiveIn(REGS[RegNo], VReg);
701
702 SDOperand Val = DAG.getCopyFromReg(Root, VReg, MVT::i32);
703 SDOperand Store = DAG.getStore(Val.getValue(1), Val, FIN, NULL, 0);
704 MemOps.push_back(Store);
705 }
706 Root = DAG.getNode(ISD::TokenFactor, MVT::Other,&MemOps[0],MemOps.size());
707 }
708
709 ArgValues.push_back(Root);
710
711 // Return the new list of results.
712 std::vector RetVT(Op.Val->value_begin(),
713 Op.Val->value_end());
714 return DAG.getNode(ISD::MERGE_VALUES, RetVT, &ArgValues[0], ArgValues.size());
715 }
716
717 static SDOperand GetCMP(ISD::CondCode CC, SDOperand LHS, SDOperand RHS,
718 SelectionDAG &DAG) {
719 MVT::ValueType vt = LHS.getValueType();
720 assert(vt == MVT::i32 || vt == MVT::f32 || vt == MVT::f64);
721
722 SDOperand Cmp = DAG.getNode(ARMISD::CMP, MVT::Flag, LHS, RHS);
723
724 if (vt != MVT::i32)
725 Cmp = DAG.getNode(ARMISD::FMSTAT, MVT::Flag, Cmp);
726 return Cmp;
727 }
728
729 static std::vector GetARMCC(ISD::CondCode CC, MVT::ValueType vt,
730 SelectionDAG &DAG) {
731 assert(vt == MVT::i32 || vt == MVT::f32 || vt == MVT::f64);
732 std::vector vcc;
733 if (vt == MVT::i32)
734 vcc = DAGIntCCToARMCC(CC);
735 else
736 vcc = DAGFPCCToARMCC(CC);
737
738 std::vector::iterator it;
739 std::vector result;
740 for( it = vcc.begin(); it != vcc.end(); it++ )
741 result.push_back(DAG.getConstant(*it,MVT::i32));
742 return result;
743 }
744
745 static bool isUInt8Immediate(uint32_t x) {
746 return x < (1 << 8);
747 }
748
749 static uint32_t rotateL(uint32_t x) {
750 uint32_t bit31 = (x & (1 << 31)) >> 31;
751 uint32_t t = x << 1;
752 return t | bit31;
753 }
754
755 static bool isRotInt8Immediate(uint32_t x) {
756 int r;
757 for (r = 0; r < 16; r++) {
758 if (isUInt8Immediate(x))
759 return true;
760 x = rotateL(rotateL(x));
761 }
762 return false;
763 }
764
765 static void LowerCMP(SDOperand &Cmp, std::vector &ARMCC,
766 SDOperand LHS, SDOperand RHS, ISD::CondCode CC,
767 SelectionDAG &DAG) {
768 MVT::ValueType vt = LHS.getValueType();
769 if (vt == MVT::i32) {
770 assert(!isa(LHS));
771 if (ConstantSDNode *SD_C = dyn_cast(RHS.Val)) {
772 uint32_t C = SD_C->getValue();
773
774 uint32_t NC;
775 switch(CC) {
776 default:
777 NC = C; break;
778 case ISD::SETLT:
779 case ISD::SETULT:
780 case ISD::SETGE:
781 case ISD::SETUGE:
782 NC = C - 1; break;
783 case ISD::SETLE:
784 case ISD::SETULE:
785 case ISD::SETGT:
786 case ISD::SETUGT:
787 NC = C + 1; break;
788 }
789
790 ISD::CondCode NCC;
791 switch(CC) {
792 default:
793 NCC = CC; break;
794 case ISD::SETLT:
795 NCC = ISD::SETLE; break;
796 case ISD::SETULT:
797 NCC = ISD::SETULE; break;
798 case ISD::SETGE:
799 NCC = ISD::SETGT; break;
800 case ISD::SETUGE:
801 NCC = ISD::SETUGT; break;
802 case ISD::SETLE:
803 NCC = ISD::SETLT; break;
804 case ISD::SETULE:
805 NCC = ISD::SETULT; break;
806 case ISD::SETGT:
807 NCC = ISD::SETGE; break;
808 case ISD::SETUGT:
809 NCC = ISD::SETUGE; break;
810 }
811
812 if (!isRotInt8Immediate(C) && isRotInt8Immediate(NC)) {
813 RHS = DAG.getConstant(NC, MVT::i32);
814 CC = NCC;
815 }
816 }
817 }
818 Cmp = GetCMP(CC, LHS, RHS, DAG);
819 ARMCC = GetARMCC(CC, vt, DAG);
820 }
821
822 static SDOperand LowerSELECT_CC(SDOperand Op, SelectionDAG &DAG) {
823 SDOperand LHS = Op.getOperand(0);
824 SDOperand RHS = Op.getOperand(1);
825 ISD::CondCode CC = cast(Op.getOperand(4))->get();
826 SDOperand TrueVal = Op.getOperand(2);
827 SDOperand FalseVal = Op.getOperand(3);
828 SDOperand Cmp;
829 std::vector ARMCC;
830 LowerCMP(Cmp, ARMCC, LHS, RHS, CC, DAG);
831
832 SDOperand Aux = FalseVal;
833 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Flag);
834 std::vector::iterator it;
835 for (it = ARMCC.begin(); it != ARMCC.end(); ++it){
836 SDOperand Flag = it == ARMCC.begin() ? Cmp : Aux.getValue(1);
837 SDOperand Ops[] = {TrueVal, Aux, *it, Flag};
838 Aux = DAG.getNode(ARMISD::SELECT, VTs, Ops, 4);
839 }
840 return Aux;
841 }
842
843 static SDOperand LowerBR_CC(SDOperand Op, SelectionDAG &DAG) {
844 SDOperand Chain = Op.getOperand(0);
845 ISD::CondCode CC = cast(Op.getOperand(1))->get();
846 SDOperand LHS = Op.getOperand(2);
847 SDOperand RHS = Op.getOperand(3);
848 SDOperand Dest = Op.getOperand(4);
849 SDOperand Cmp;
850 std::vector ARMCC;
851 LowerCMP(Cmp, ARMCC, LHS, RHS, CC, DAG);
852
853 SDOperand Aux = Chain;
854 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Flag);
855 std::vector::iterator it;
856 for (it = ARMCC.begin(); it != ARMCC.end(); it++){
857 SDOperand Flag = it == ARMCC.begin() ? Cmp : Aux.getValue(1);
858 SDOperand Ops[] = {Aux, Dest, *it, Flag};
859 Aux = DAG.getNode(ARMISD::BR, VTs, Ops, 4);
860 }
861 return Aux;
862 }
863
864 static SDOperand LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
865 SDOperand IntVal = Op.getOperand(0);
866 assert(IntVal.getValueType() == MVT::i32);
867 MVT::ValueType vt = Op.getValueType();
868 assert(vt == MVT::f32 ||
869 vt == MVT::f64);
870
871 SDOperand Tmp = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, IntVal);
872 ARMISD::NodeType op = vt == MVT::f32 ? ARMISD::FSITOS : ARMISD::FSITOD;
873 return DAG.getNode(op, vt, Tmp);
874 }
875
876 static SDOperand LowerFP_TO_SINT(SDOperand Op, SelectionDAG &DAG) {
877 assert(Op.getValueType() == MVT::i32);
878 SDOperand FloatVal = Op.getOperand(0);
879 MVT::ValueType vt = FloatVal.getValueType();
880 assert(vt == MVT::f32 || vt == MVT::f64);
881
882 ARMISD::NodeType op = vt == MVT::f32 ? ARMISD::FTOSIS : ARMISD::FTOSID;
883 SDOperand Tmp = DAG.getNode(op, MVT::f32, FloatVal);
884 return DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Tmp);
885 }
886
887 static SDOperand LowerUINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
888 SDOperand IntVal = Op.getOperand(0);
889 assert(IntVal.getValueType() == MVT::i32);
890 MVT::ValueType vt = Op.getValueType();
891 assert(vt == MVT::f32 ||
892 vt == MVT::f64);
893
894 SDOperand Tmp = DAG.getNode(ISD::BIT_CONVERT, MVT::f32, IntVal);
895 ARMISD::NodeType op = vt == MVT::f32 ? ARMISD::FUITOS : ARMISD::FUITOD;
896 return DAG.getNode(op, vt, Tmp);
897 }
898
899 static SDOperand LowerFP_TO_UINT(SDOperand Op, SelectionDAG &DAG) {
900 assert(Op.getValueType() == MVT::i32);
901 SDOperand FloatVal = Op.getOperand(0);
902 MVT::ValueType vt = FloatVal.getValueType();
903 assert(vt == MVT::f32 || vt == MVT::f64);
904
905 ARMISD::NodeType op = vt == MVT::f32 ? ARMISD::FTOUIS : ARMISD::FTOUID;
906 SDOperand Tmp = DAG.getNode(op, MVT::f32, FloatVal);
907 return DAG.getNode(ISD::BIT_CONVERT, MVT::i32, Tmp);
908 }
909
910 SDOperand ARMTargetLowering::LowerOperation(SDOperand Op, SelectionDAG &DAG) {
911 switch (Op.getOpcode()) {
912 default:
913 assert(0 && "Should not custom lower this!");
914 abort();
915 case ISD::ConstantPool:
916 return LowerConstantPool(Op, DAG);
917 case ISD::ConstantFP:
918 return LowerConstantFP(Op, DAG);
919 case ISD::GlobalAddress:
920 return LowerGlobalAddress(Op, DAG);
921 case ISD::FP_TO_SINT:
922 return LowerFP_TO_SINT(Op, DAG);
923 case ISD::SINT_TO_FP:
924 return LowerSINT_TO_FP(Op, DAG);
925 case ISD::FP_TO_UINT:
926 return LowerFP_TO_UINT(Op, DAG);
927 case ISD::UINT_TO_FP:
928 return LowerUINT_TO_FP(Op, DAG);
929 case ISD::FORMAL_ARGUMENTS:
930 return LowerFORMAL_ARGUMENTS(Op, DAG, VarArgsFrameIndex);
931 case ISD::CALL:
932 return LowerCALL(Op, DAG);
933 case ISD::RET:
934 return LowerRET(Op, DAG);
935 case ISD::SELECT_CC:
936 return LowerSELECT_CC(Op, DAG);
937 case ISD::BR_CC:
938 return LowerBR_CC(Op, DAG);
939 case ISD::VASTART:
940 return LowerVASTART(Op, DAG, VarArgsFrameIndex);
941 }
942 }
943
944 //===----------------------------------------------------------------------===//
945 // Instruction Selector Implementation
946 //===----------------------------------------------------------------------===//
94732
94833 //===--------------------------------------------------------------------===//
94934 /// ARMDAGToDAGISel - ARM specific code to select ARM machine
95338 class ARMDAGToDAGISel : public SelectionDAGISel {
95439 ARMTargetLowering Lowering;
95540
41 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
42 /// make the right decision when generating code for different targets.
43 const ARMSubtarget *Subtarget;
44
95645 public:
957 ARMDAGToDAGISel(TargetMachine &TM)
958 : SelectionDAGISel(Lowering), Lowering(TM) {
959 }
960
46 ARMDAGToDAGISel(ARMTargetMachine &TM)
47 : SelectionDAGISel(Lowering), Lowering(TM),
48 Subtarget(&TM.getSubtarget()) {
49 }
50
51 virtual const char *getPassName() const {
52 return "ARM Instruction Selection";
53 }
54
96155 SDNode *Select(SDOperand Op);
96256 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
963 bool SelectAddrMode1(SDOperand Op, SDOperand N, SDOperand &Arg,
964 SDOperand &Shift, SDOperand &ShiftType);
965 bool SelectAddrMode1a(SDOperand Op, SDOperand N, SDOperand &Arg,
966 SDOperand &Shift, SDOperand &ShiftType);
967 bool SelectAddrMode2(SDOperand Op, SDOperand N, SDOperand &Arg,
57 bool SelectAddrMode2(SDOperand Op, SDOperand N, SDOperand &Base,
58 SDOperand &Offset, SDOperand &Opc);
59 bool SelectAddrMode2Offset(SDOperand Op, SDOperand N,
60 SDOperand &Offset, SDOperand &Opc);
61 bool SelectAddrMode3(SDOperand Op, SDOperand N, SDOperand &Base,
62 SDOperand &Offset, SDOperand &Opc);
63 bool SelectAddrMode3Offset(SDOperand Op, SDOperand N,
64 SDOperand &Offset, SDOperand &Opc);
65 bool SelectAddrMode5(SDOperand Op, SDOperand N, SDOperand &Base,
96866 SDOperand &Offset);
969 bool SelectAddrMode5(SDOperand Op, SDOperand N, SDOperand &Arg,
970 SDOperand &Offset);
971
67
68 bool SelectAddrModePC(SDOperand Op, SDOperand N, SDOperand &Offset,
69 SDOperand &Label);
70
71 bool SelectThumbAddrModeRR(SDOperand Op, SDOperand N, SDOperand &Base,
72 SDOperand &Offset);
73 bool SelectThumbAddrModeRI5_1(SDOperand Op, SDOperand N, SDOperand &Base,
74 SDOperand &Offset);
75 bool SelectThumbAddrModeRI5_2(SDOperand Op, SDOperand N, SDOperand &Base,
76 SDOperand &Offset);
77 bool SelectThumbAddrModeRI5_4(SDOperand Op, SDOperand N, SDOperand &Base,
78 SDOperand &Offset);
79 bool SelectThumbAddrModeSP(SDOperand Op, SDOperand N, SDOperand &Base,
80 SDOperand &Offset);
81
82 bool SelectShifterOperandReg(SDOperand Op, SDOperand N, SDOperand &A,
83 SDOperand &B, SDOperand &C);
84
97285 // Include the pieces autogenerated from the target description.
97386 #include "ARMGenDAGISel.inc"
97487 };
88 }
97589
97690 void ARMDAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
97791 DEBUG(BB->dump());
98296 ScheduleAndEmitDAG(DAG);
98397 }
98498
985 static bool isInt12Immediate(SDNode *N, short &Imm) {
986 if (N->getOpcode() != ISD::Constant)
99 bool ARMDAGToDAGISel::SelectAddrMode2(SDOperand Op, SDOperand N,
100 SDOperand &Base, SDOperand &Offset,
101 SDOperand &Opc) {
102 if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::SUB) {
103 Base = N;
104 if (N.getOpcode() == ISD::FrameIndex) {
105 int FI = cast(N)->getIndex();
106 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
107 } else if (N.getOpcode() == ARMISD::Wrapper) {
108 Base = N.getOperand(0);
109 }
110 Offset = CurDAG->getRegister(0, MVT::i32);
111 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, 0,
112 ARM_AM::no_shift),
113 MVT::i32);
114 return true;
115 }
116
117 // Match simple R +/- imm12 operands.
118 if (N.getOpcode() == ISD::ADD)
119 if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) {
120 int RHSC = (int)RHS->getValue();
121 if (RHSC >= 0 && RHSC < 0x1000) { // 12 bits.
122 Base = N.getOperand(0);
123 Offset = CurDAG->getRegister(0, MVT::i32);
124 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::add, RHSC,
125 ARM_AM::no_shift),
126 MVT::i32);
127 return true;
128 } else if (RHSC < 0 && RHSC > -0x1000) {
129 Base = N.getOperand(0);
130 Offset = CurDAG->getRegister(0, MVT::i32);
131 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(ARM_AM::sub, -RHSC,
132 ARM_AM::no_shift),
133 MVT::i32);
134 return true;
135 }
136 }
137
138 // Otherwise this is R +/- [possibly shifted] R
139 ARM_AM::AddrOpc AddSub = N.getOpcode() == ISD::ADD ? ARM_AM::add:ARM_AM::sub;
140 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(1));
141 unsigned ShAmt = 0;
142
143 Base = N.getOperand(0);
144 Offset = N.getOperand(1);
145
146 if (ShOpcVal != ARM_AM::no_shift) {
147 // Check to see if the RHS of the shift is a constant, if not, we can't fold
148 // it.
149 if (ConstantSDNode *Sh =
150 dyn_cast(N.getOperand(1).getOperand(1))) {
151 ShAmt = Sh->getValue();
152 Offset = N.getOperand(1).getOperand(0);
153 } else {
154 ShOpcVal = ARM_AM::no_shift;
155 }
156 }
157
158 // Try matching (R shl C) + (R).
159 if (N.getOpcode() == ISD::ADD && ShOpcVal == ARM_AM::no_shift) {
160 ShOpcVal = ARM_AM::getShiftOpcForNode(N.getOperand(0));
161 if (ShOpcVal != ARM_AM::no_shift) {
162 // Check to see if the RHS of the shift is a constant, if not, we can't
163 // fold it.
164 if (ConstantSDNode *Sh =
165 dyn_cast(N.getOperand(0).getOperand(1))) {
166 ShAmt = Sh->getValue();
167 Offset = N.getOperand(0).getOperand(0);
168 Base = N.getOperand(1);
169 } else {
170 ShOpcVal = ARM_AM::no_shift;
171 }
172 }
173 }
174
175 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
176 MVT::i32);
177 return true;
178 }
179
180 bool ARMDAGToDAGISel::SelectAddrMode2Offset(SDOperand Op, SDOperand N,
181 SDOperand &Offset, SDOperand &Opc) {
182 unsigned Opcode = Op.getOpcode();
183 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
184 ? cast(Op)->getAddressingMode()
185 : cast(Op)->getAddressingMode();
186 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
187 ? ARM_AM::add : ARM_AM::sub;
188 if (ConstantSDNode *C = dyn_cast(N)) {
189 int Val = (int)C->getValue();
190 if (Val >= 0 && Val < 0x1000) { // 12 bits.
191 Offset = CurDAG->getRegister(0, MVT::i32);
192 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, Val,
193 ARM_AM::no_shift),
194 MVT::i32);
195 return true;
196 }
197 }
198
199 Offset = N;
200 ARM_AM::ShiftOpc ShOpcVal = ARM_AM::getShiftOpcForNode(N);
201 unsigned ShAmt = 0;
202 if (ShOpcVal != ARM_AM::no_shift) {
203 // Check to see if the RHS of the shift is a constant, if not, we can't fold
204 // it.
205 if (ConstantSDNode *Sh = dyn_cast(N.getOperand(1))) {
206 ShAmt = Sh->getValue();
207 Offset = N.getOperand(0);
208 } else {
209 ShOpcVal = ARM_AM::no_shift;
210 }
211 }
212
213 Opc = CurDAG->getTargetConstant(ARM_AM::getAM2Opc(AddSub, ShAmt, ShOpcVal),
214 MVT::i32);
215 return true;
216 }
217
218
219 bool ARMDAGToDAGISel::SelectAddrMode3(SDOperand Op, SDOperand N,
220 SDOperand &Base, SDOperand &Offset,
221 SDOperand &Opc) {
222 if (N.getOpcode() == ISD::SUB) {
223 // X - C is canonicalize to X + -C, no need to handle it here.
224 Base = N.getOperand(0);
225 Offset = N.getOperand(1);
226 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, 0),MVT::i32);
227 return true;
228 }
229
230 if (N.getOpcode() != ISD::ADD) {
231 Base = N;
232 if (N.getOpcode() == ISD::FrameIndex) {
233 int FI = cast(N)->getIndex();
234 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
235 }
236 Offset = CurDAG->getRegister(0, MVT::i32);
237 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0),MVT::i32);
238 return true;
239 }
240
241 // If the RHS is +/- imm8, fold into addr mode.
242 if (ConstantSDNode *RHS = dyn_cast(N.getOperand(1))) {
243 int RHSC = (int)RHS->getValue();
244 if (RHSC >= 0 && RHSC < 256) {
245 Base = N.getOperand(0);
246 Offset = CurDAG->getRegister(0, MVT::i32);
247 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, RHSC),
248 MVT::i32);
249 return true;
250 } else if (RHSC < 0 && RHSC > -256) { // note -256 itself isn't allowed.
251 Base = N.getOperand(0);
252 Offset = CurDAG->getRegister(0, MVT::i32);
253 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::sub, -RHSC),
254 MVT::i32);
255 return true;
256 }
257 }
258
259 Base = N.getOperand(0);
260 Offset = N.getOperand(1);
261 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(ARM_AM::add, 0), MVT::i32);
262 return true;
263 }
264
265 bool ARMDAGToDAGISel::SelectAddrMode3Offset(SDOperand Op, SDOperand N,
266 SDOperand &Offset, SDOperand &Opc) {
267 unsigned Opcode = Op.getOpcode();
268 ISD::MemIndexedMode AM = (Opcode == ISD::LOAD)
269 ? cast(Op)->getAddressingMode()
270 : cast(Op)->getAddressingMode();
271 ARM_AM::AddrOpc AddSub = (AM == ISD::PRE_INC || AM == ISD::POST_INC)
272 ? ARM_AM::add : ARM_AM::sub;
273 if (ConstantSDNode *C = dyn_cast(N)) {
274 int Val = (int)C->getValue();
275 if (Val >= 0 && Val < 256) {
276 Offset = CurDAG->getRegister(0, MVT::i32);
277 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, Val), MVT::i32);
278 return true;
279 }
280 }
281
282 Offset = N;
283 Opc = CurDAG->getTargetConstant(ARM_AM::getAM3Opc(AddSub, 0), MVT::i32);
284 return true;
285 }
286
287
288 bool ARMDAGToDAGISel::SelectAddrMode5(SDOperand Op, SDOperand N,
289 SDOperand &Base, SDOperand &Offset) {
290 if (N.getOpcode() != ISD::ADD) {
291 Base = N;
292 if (N.getOpcode() == ISD::FrameIndex) {
293 int FI = cast(N)->getIndex();
294 Base = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy()); <