llvm.org GIT mirror llvm / f3c13c8
Expand unaligned loads/stores when the target doesn't support them. (PR1548) git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@40682 91177308-0d34-0410-b5e6-96231b3b80d8 Lauro Ramos Venancio 12 years ago
2 changed file(s) with 156 addition(s) and 3 deletion(s). Raw diff Collapse all Expand all
549549 return Result;
550550 }
551551
552 /// ExpandUnalignedStore - Expands an unaligned store to 2 half-size stores.
553 static
554 SDOperand ExpandUnalignedStore(StoreSDNode *ST, SelectionDAG &DAG,
555 TargetLowering &TLI) {
556 assert(MVT::isInteger(ST->getStoredVT()) &&
557 "Non integer unaligned stores not implemented.");
558 int SVOffset = ST->getSrcValueOffset();
559 SDOperand Chain = ST->getChain();
560 SDOperand Ptr = ST->getBasePtr();
561 SDOperand Val = ST->getValue();
562 MVT::ValueType VT = Val.getValueType();
563 // Get the half-size VT
564 MVT::ValueType NewStoredVT = ST->getStoredVT() - 1;
565 int NumBits = MVT::getSizeInBits(NewStoredVT);
566 int Alignment = ST->getAlignment();
567 int IncrementSize = NumBits / 8;
568
569 // Divide the stored value in two parts.
570 SDOperand ShiftAmount = DAG.getConstant(NumBits, TLI.getShiftAmountTy());
571 SDOperand Lo = Val;
572 SDOperand Hi = DAG.getNode(ISD::SRL, VT, Val, ShiftAmount);
573
574 // Store the two parts
575 SDOperand Store1, Store2;
576 Store1 = DAG.getTruncStore(Chain, TLI.isLittleEndian()?Lo:Hi, Ptr,
577 ST->getSrcValue(), SVOffset, NewStoredVT,
578 ST->isVolatile(), Alignment);
579 Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
580 DAG.getConstant(IncrementSize, TLI.getPointerTy()));
581 Store2 = DAG.getTruncStore(Chain, TLI.isLittleEndian()?Hi:Lo, Ptr,
582 ST->getSrcValue(), SVOffset + IncrementSize,
583 NewStoredVT, ST->isVolatile(), Alignment);
584
585 return DAG.getNode(ISD::TokenFactor, MVT::Other, Store1, Store2);
586 }
587
588 /// ExpandUnalignedLoad - Expands an unaligned load to 2 half-size loads.
589 static
590 SDOperand ExpandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG,
591 TargetLowering &TLI) {
592 assert(MVT::isInteger(LD->getLoadedVT()) &&
593 "Non integer unaligned loads not implemented.");
594 int SVOffset = LD->getSrcValueOffset();
595 SDOperand Chain = LD->getChain();
596 SDOperand Ptr = LD->getBasePtr();
597 MVT::ValueType VT = LD->getValueType(0);
598 MVT::ValueType NewLoadedVT = LD->getLoadedVT() - 1;
599 int NumBits = MVT::getSizeInBits(NewLoadedVT);
600 int Alignment = LD->getAlignment();
601 int IncrementSize = NumBits / 8;
602 ISD::LoadExtType HiExtType = LD->getExtensionType();
603
604 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD.
605 if (HiExtType == ISD::NON_EXTLOAD)
606 HiExtType = ISD::ZEXTLOAD;
607
608 // Load the value in two parts
609 SDOperand Lo, Hi;
610 if (TLI.isLittleEndian()) {
611 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, VT, Chain, Ptr, LD->getSrcValue(),
612 SVOffset, NewLoadedVT, LD->isVolatile(), Alignment);
613 Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
614 DAG.getConstant(IncrementSize, TLI.getPointerTy()));
615 Hi = DAG.getExtLoad(HiExtType, VT, Chain, Ptr, LD->getSrcValue(),
616 SVOffset + IncrementSize, NewLoadedVT, LD->isVolatile(),
617 Alignment);
618 } else {
619 Hi = DAG.getExtLoad(HiExtType, VT, Chain, Ptr, LD->getSrcValue(), SVOffset,
620 NewLoadedVT,LD->isVolatile(), Alignment);
621 Ptr = DAG.getNode(ISD::ADD, Ptr.getValueType(), Ptr,
622 DAG.getConstant(IncrementSize, TLI.getPointerTy()));
623 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, VT, Chain, Ptr, LD->getSrcValue(),
624 SVOffset + IncrementSize, NewLoadedVT, LD->isVolatile(),
625 Alignment);
626 }
627
628 // aggregate the two parts
629 SDOperand ShiftAmount = DAG.getConstant(NumBits, TLI.getShiftAmountTy());
630 SDOperand Result = DAG.getNode(ISD::SHL, VT, Hi, ShiftAmount);
631 Result = DAG.getNode(ISD::OR, VT, Result, Lo);
632
633 SDOperand TF = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1),
634 Hi.getValue(1));
635
636 SDOperand Ops[] = { Result, TF };
637 return DAG.getNode(ISD::MERGE_VALUES, DAG.getVTList(VT, MVT::Other), Ops, 2);
638 }
552639
553640 /// LegalizeOp - We know that the specified value has a legal type, and
554641 /// that its operands are legal. Now ensure that the operation itself
15061593
15071594 switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
15081595 default: assert(0 && "This action is not supported yet!");
1509 case TargetLowering::Legal: break;
1596 case TargetLowering::Legal:
1597 // If this is an unaligned load and the target doesn't support it,
1598 // expand it.
1599 if (!TLI.allowsUnalignedMemoryAccesses()) {
1600 unsigned ABIAlignment = TLI.getTargetData()->
1601 getABITypeAlignment(MVT::getTypeForValueType(LD->getLoadedVT()));
1602 if (LD->getAlignment() < ABIAlignment){
1603 Result = ExpandUnalignedLoad(cast(Result.Val), DAG,
1604 TLI);
1605 Tmp3 = Result.getOperand(0);
1606 Tmp4 = Result.getOperand(1);
1607 LegalizeOp(Tmp3);
1608 LegalizeOp(Tmp4);
1609 }
1610 }
1611 break;
15101612 case TargetLowering::Custom:
15111613 Tmp1 = TLI.LowerOperation(Tmp3, DAG);
15121614 if (Tmp1.Val) {
15591661 if (Tmp3.Val) {
15601662 Tmp1 = LegalizeOp(Tmp3);
15611663 Tmp2 = LegalizeOp(Tmp3.getValue(1));
1664 }
1665 } else {
1666 // If this is an unaligned load and the target doesn't support it,
1667 // expand it.
1668 if (!TLI.allowsUnalignedMemoryAccesses()) {
1669 unsigned ABIAlignment = TLI.getTargetData()->
1670 getABITypeAlignment(MVT::getTypeForValueType(LD->getLoadedVT()));
1671 if (LD->getAlignment() < ABIAlignment){
1672 Result = ExpandUnalignedLoad(cast(Result.Val), DAG,
1673 TLI);
1674 Tmp1 = Result.getOperand(0);
1675 Tmp2 = Result.getOperand(1);
1676 LegalizeOp(Tmp1);
1677 LegalizeOp(Tmp2);
1678 }
15621679 }
15631680 }
15641681 break;
18091926 MVT::ValueType VT = Tmp3.getValueType();
18101927 switch (TLI.getOperationAction(ISD::STORE, VT)) {
18111928 default: assert(0 && "This action is not supported yet!");
1812 case TargetLowering::Legal: break;
1929 case TargetLowering::Legal:
1930 // If this is an unaligned store and the target doesn't support it,
1931 // expand it.
1932 if (!TLI.allowsUnalignedMemoryAccesses()) {
1933 unsigned ABIAlignment = TLI.getTargetData()->
1934 getABITypeAlignment(MVT::getTypeForValueType(ST->getStoredVT()));
1935 if (ST->getAlignment() < ABIAlignment)
1936 Result = ExpandUnalignedStore(cast(Result.Val), DAG,
1937 TLI);
1938 }
1939 break;
18131940 case TargetLowering::Custom:
18141941 Tmp1 = TLI.LowerOperation(Result, DAG);
18151942 if (Tmp1.Val) Result = Tmp1;
19222049 MVT::ValueType StVT = cast(Result.Val)->getStoredVT();
19232050 switch (TLI.getStoreXAction(StVT)) {
19242051 default: assert(0 && "This action is not supported yet!");
1925 case TargetLowering::Legal: break;
2052 case TargetLowering::Legal:
2053 // If this is an unaligned store and the target doesn't support it,
2054 // expand it.
2055 if (!TLI.allowsUnalignedMemoryAccesses()) {
2056 unsigned ABIAlignment = TLI.getTargetData()->
2057 getABITypeAlignment(MVT::getTypeForValueType(ST->getStoredVT()));
2058 if (ST->getAlignment() < ABIAlignment)
2059 Result = ExpandUnalignedStore(cast(Result.Val), DAG,
2060 TLI);
2061 }
2062 break;
19262063 case TargetLowering::Custom:
19272064 Tmp1 = TLI.LowerOperation(Result, DAG);
19282065 if (Tmp1.Val) Result = Tmp1;
0 ; RUN: llvm-as < %s | \
1 ; RUN: llc -march=arm -o %t -f
2 ; RUN: grep -c ldrb %t | grep 4
3 ; RUN: grep -c strb %t | grep 4
4
5
6 %struct.p = type <{ i8, i32 }>
7 @t = global %struct.p <{ i8 1, i32 10 }> ; <%struct.p*> [#uses=1]
8 @u = weak global %struct.p zeroinitializer ; <%struct.p*> [#uses=1]
9
10 define i32 @main() {
11 entry:
12 %tmp3 = load i32* getelementptr (%struct.p* @t, i32 0, i32 1), align 1 ; [#uses=2]
13 store i32 %tmp3, i32* getelementptr (%struct.p* @u, i32 0, i32 1), align 1
14 ret i32 %tmp3
15 }