llvm.org GIT mirror llvm / f865ea8
Revert r79127. It was causing compilation errors. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@79135 91177308-0d34-0410-b5e6-96231b3b80d8 Bill Wendling 10 years ago
7 changed file(s) with 33 addition(s) and 76 deletion(s). Raw diff Collapse all Expand all
636636 unsigned getMaxStoresPerMemmove() const { return maxStoresPerMemmove; }
637637
638638 /// This function returns true if the target allows unaligned memory accesses.
639 /// of the specified type. This is used, for example, in situations where an
640 /// array copy/move/set is converted to a sequence of store operations. It's
641 /// use helps to ensure that such replacements don't generate code that causes
642 /// an alignment error (trap) on the target machine.
639 /// This is used, for example, in situations where an array copy/move/set is
640 /// converted to a sequence of store operations. It's use helps to ensure that
641 /// such replacements don't generate code that causes an alignment error
642 /// (trap) on the target machine.
643643 /// @brief Determine if the target supports unaligned memory accesses.
644 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const {
645 return false;
644 bool allowsUnalignedMemoryAccesses() const {
645 return allowUnalignedMemoryAccesses;
646646 }
647647
648648 /// This function returns true if the target would benefit from code placement
17561756 /// @brief Specify maximum bytes of store instructions per memmove call.
17571757 unsigned maxStoresPerMemmove;
17581758
1759 /// This field specifies whether the target machine permits unaligned memory
1760 /// accesses. This is used, for example, to determine the size of store
1761 /// operations when copying small arrays and other similar tasks.
1762 /// @brief Indicate whether the target permits unaligned memory accesses.
1763 bool allowUnalignedMemoryAccesses;
1764
17591765 /// This field specifies whether the target can benefit from code placement
17601766 /// optimization.
17611767 bool benefitFromCodePlacementOpt;
31203120 return SDValue();
31213121 }
31223122
3123 bool ARMTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const {
3124 if (!Subtarget->hasV6Ops())
3125 // Pre-v6 does not support unaligned mem access.
3126 return false;
3127 else if (!Subtarget->hasV6Ops()) {
3128 // v6 may or may not support unaligned mem access.
3129 if (!Subtarget->isTargetDarwin())
3130 return false;
3131 }
3132
3133 switch (VT.getSimpleVT().SimpleTy) {
3134 default:
3135 return false;
3136 case MVT::i8:
3137 case MVT::i16:
3138 case MVT::i32:
3139 return true;
3140 // FIXME: VLD1 etc with standard alignment is legal.
3141 }
3142 }
3143
31443123 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
31453124 if (V < 0)
31463125 return false;
165165 virtual MachineBasicBlock *EmitInstrWithCustomInserter(MachineInstr *MI,
166166 MachineBasicBlock *MBB) const;
167167
168 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
169 /// unaligned memory accesses. of the specified type.
170 /// FIXME: Add getOptimalMemOpType to implement memcpy with NEON?
171 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const;
172
173168 /// isLegalAddressingMode - Return true if the addressing mode represented
174169 /// by AM is legal for this target, for a load/store of the specified type.
175170 virtual bool isLegalAddressingMode(const AddrMode &AM, const Type *Ty)const;
197192 APInt &KnownOne,
198193 const SelectionDAG &DAG,
199194 unsigned Depth) const;
200
201
202195 ConstraintType getConstraintType(const std::string &Constraint) const;
203196 std::pair
204197 getRegForInlineAsmConstraint(const std::string &Constraint,
956956 maxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
957957 maxStoresPerMemcpy = 16; // For @llvm.memcpy -> sequence of stores
958958 maxStoresPerMemmove = 3; // For @llvm.memmove -> sequence of stores
959 allowUnalignedMemoryAccesses = true; // x86 supports it!
959960 setPrefLoopAlignment(16);
960961 benefitFromCodePlacementOpt = true;
961962 }
388388 /// and store operations as a result of memset, memcpy, and memmove
389389 /// lowering. It returns EVT::iAny if SelectionDAG should be responsible for
390390 /// determining it.
391 virtual EVT getOptimalMemOpType(uint64_t Size, unsigned Align,
392 bool isSrcConst, bool isSrcStr,
393 SelectionDAG &DAG) const;
394
395 /// allowsUnalignedMemoryAccesses - Returns true if the target allows
396 /// unaligned memory accesses. of the specified type.
397 virtual bool allowsUnalignedMemoryAccesses(EVT VT) const {
398 return true;
399 }
391 virtual
392 EVT getOptimalMemOpType(uint64_t Size, unsigned Align,
393 bool isSrcConst, bool isSrcStr,
394 SelectionDAG &DAG) const;
400395
401396 /// LowerOperation - Provide custom lowering hooks for some operations.
402397 ///
366366 LowerLOAD(SDValue Op, SelectionDAG &DAG)
367367 {
368368 LoadSDNode *LD = cast(Op);
369 assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
370 "Unexpected extension type");
369 assert(LD->getExtensionType() == ISD::NON_EXTLOAD && "Unexpected extension type");
371370 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
372 if (allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
371 if (allowsUnalignedMemoryAccesses()) {
373372 return SDValue();
374373 }
375374 unsigned ABIAlignment = getTargetData()->
465464 StoreSDNode *ST = cast(Op);
466465 assert(!ST->isTruncatingStore() && "Unexpected store type");
467466 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
468 if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
467 if (allowsUnalignedMemoryAccesses()) {
469468 return SDValue();
470469 }
471470 unsigned ABIAlignment = getTargetData()->
10481047 case ISD::STORE: {
10491048 // Replace unaligned store of unaligned load with memmove.
10501049 StoreSDNode *ST = cast(N);
1051 if (!DCI.isBeforeLegalize() ||
1052 allowsUnalignedMemoryAccesses(ST->getMemoryVT()) ||
1050 if (!DCI.isBeforeLegalize() || allowsUnalignedMemoryAccesses() ||
10531051 ST->isVolatile() || ST->isIndexed()) {
10541052 break;
10551053 }
None ; RUN: llvm-as < %s | llc -march=arm | FileCheck %s -check-prefix=GENERIC
1 ; RUN: llvm-as < %s | llc -mtriple=armv6-apple-darwin | FileCheck %s -check-prefix=DARWIN_V6
2 ; RUN: llvm-as < %s | llc -march=arm -mattr=+v7a | FileCheck %s -check-prefix=V7
0 ; RUN: llvm-as < %s | \
1 ; RUN: llc -march=arm -o %t -f
2 ; RUN: grep ldrb %t | count 4
3 ; RUN: grep strb %t | count 4
34
4 ; rdar://7113725
55
6 define arm_apcscc void @t(i8* nocapture %a, i8* nocapture %b) nounwind {
6 %struct.p = type <{ i8, i32 }>
7 @t = global %struct.p <{ i8 1, i32 10 }> ; <%struct.p*> [#uses=1]
8 @u = weak global %struct.p zeroinitializer ; <%struct.p*> [#uses=1]
9
10 define i32 @main() {
711 entry:
8 ; GENERIC: t:
9 ; GENERIC: ldrb r2
10 ; GENERIC: ldrb r3
11 ; GENERIC: ldrb r12
12 ; GENERIC: ldrb r1
13 ; GENERIC: strb r1
14 ; GENERIC: strb r12
15 ; GENERIC: strb r3
16 ; GENERIC: strb r2
17
18 ; DARWIN_V6: t:
19 ; DARWIN_V6: ldr r1
20 ; DARWIN_V6: str r1
21
22 ; V7: t:
23 ; V7: ldr r1
24 ; V7: str r1
25 %__src1.i = bitcast i8* %b to i32* ; [#uses=1]
26 %__dest2.i = bitcast i8* %a to i32* ; [#uses=1]
27 %tmp.i = load i32* %__src1.i, align 1 ; [#uses=1]
28 store i32 %tmp.i, i32* %__dest2.i, align 1
29 ret void
12 %tmp3 = load i32* getelementptr (%struct.p* @t, i32 0, i32 1), align 1 ; [#uses=2]
13 store i32 %tmp3, i32* getelementptr (%struct.p* @u, i32 0, i32 1), align 1
14 ret i32 %tmp3
3015 }