llvm.org GIT mirror llvm / 876dd97
ARM: recommit r237590: allow jump tables to be placed as constant islands. The original version didn't properly account for the base register being modified before the final jump, so caused miscompilations in Chromium and LLVM. I've fixed this and tested with an LLVM self-host (I don't have the means to build & test Chromium). The general idea remains the same: in pathological cases jump tables can be too far away from the instructions referencing them (like other constants) so they need to be movable. Should fix PR23627. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@238680 91177308-0d34-0410-b5e6-96231b3b80d8 Tim Northover 4 years ago
12 changed file(s) with 535 addition(s) and 253 deletion(s). Raw diff Collapse all Expand all
921921 OutStreamer->EmitValue(Expr, Size);
922922 }
923923
924 void ARMAsmPrinter::EmitJumpTable(const MachineInstr *MI) {
925 unsigned Opcode = MI->getOpcode();
926 int OpNum = 1;
927 if (Opcode == ARM::BR_JTadd)
928 OpNum = 2;
929 else if (Opcode == ARM::BR_JTm)
930 OpNum = 3;
931
932 const MachineOperand &MO1 = MI->getOperand(OpNum);
924 void ARMAsmPrinter::EmitJumpTableAddrs(const MachineInstr *MI) {
925 const MachineOperand &MO1 = MI->getOperand(1);
933926 unsigned JTI = MO1.getIndex();
927
928 // Make sure the Thumb jump table is 4-byte aligned. This will be a nop for
929 // ARM mode tables.
930 EmitAlignment(2);
934931
935932 // Emit a label for the jump table.
936933 MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI);
971968 OutStreamer->EmitDataRegion(MCDR_DataRegionEnd);
972969 }
973970
974 void ARMAsmPrinter::EmitJump2Table(const MachineInstr *MI) {
975 unsigned Opcode = MI->getOpcode();
976 int OpNum = (Opcode == ARM::t2BR_JT) ? 2 : 1;
977 const MachineOperand &MO1 = MI->getOperand(OpNum);
971 void ARMAsmPrinter::EmitJumpTableInsts(const MachineInstr *MI) {
972 const MachineOperand &MO1 = MI->getOperand(1);
978973 unsigned JTI = MO1.getIndex();
979974
980975 MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI);
984979 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
985980 const std::vector &JT = MJTI->getJumpTables();
986981 const std::vector &JTBBs = JT[JTI].MBBs;
987 unsigned OffsetWidth = 4;
988 if (MI->getOpcode() == ARM::t2TBB_JT) {
989 OffsetWidth = 1;
990 // Mark the jump table as data-in-code.
991 OutStreamer->EmitDataRegion(MCDR_DataRegionJT8);
992 } else if (MI->getOpcode() == ARM::t2TBH_JT) {
993 OffsetWidth = 2;
994 // Mark the jump table as data-in-code.
995 OutStreamer->EmitDataRegion(MCDR_DataRegionJT16);
996 }
997982
998983 for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
999984 MachineBasicBlock *MBB = JTBBs[i];
1000985 const MCExpr *MBBSymbolExpr = MCSymbolRefExpr::create(MBB->getSymbol(),
1001986 OutContext);
1002987 // If this isn't a TBB or TBH, the entries are direct branch instructions.
1003 if (OffsetWidth == 4) {
1004 EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::t2B)
988 EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::t2B)
1005989 .addExpr(MBBSymbolExpr)
1006990 .addImm(ARMCC::AL)
1007991 .addReg(0));
1008 continue;
1009 }
992 }
993 }
994
995 void ARMAsmPrinter::EmitJumpTableTBInst(const MachineInstr *MI,
996 unsigned OffsetWidth) {
997 assert((OffsetWidth == 1 || OffsetWidth == 2) && "invalid tbb/tbh width");
998 const MachineOperand &MO1 = MI->getOperand(1);
999 unsigned JTI = MO1.getIndex();
1000
1001 MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI);
1002 OutStreamer->EmitLabel(JTISymbol);
1003
1004 // Emit each entry of the table.
1005 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1006 const std::vector &JT = MJTI->getJumpTables();
1007 const std::vector &JTBBs = JT[JTI].MBBs;
1008
1009 // Mark the jump table as data-in-code.
1010 OutStreamer->EmitDataRegion(OffsetWidth == 1 ? MCDR_DataRegionJT8
1011 : MCDR_DataRegionJT16);
1012
1013 for (auto MBB : JTBBs) {
1014 const MCExpr *MBBSymbolExpr = MCSymbolRefExpr::create(MBB->getSymbol(),
1015 OutContext);
10101016 // Otherwise it's an offset from the dispatch instruction. Construct an
10111017 // MCExpr for the entry. We want a value of the form:
1012 // (BasicBlockAddr - TableBeginAddr) / 2
1018 // (BasicBlockAddr - TBBInstAddr + 4) / 2
10131019 //
10141020 // For example, a TBB table with entries jumping to basic blocks BB0 and BB1
10151021 // would look like:
10161022 // LJTI_0_0:
1017 // .byte (LBB0 - LJTI_0_0) / 2
1018 // .byte (LBB1 - LJTI_0_0) / 2
1019 const MCExpr *Expr =
1020 MCBinaryExpr::createSub(MBBSymbolExpr,
1021 MCSymbolRefExpr::create(JTISymbol, OutContext),
1022 OutContext);
1023 // .byte (LBB0 - (LCPI0_0 + 4)) / 2
1024 // .byte (LBB1 - (LCPI0_0 + 4)) / 2
1025 // where LCPI0_0 is a label defined just before the TBB instruction using
1026 // this table.
1027 MCSymbol *TBInstPC = GetCPISymbol(MI->getOperand(0).getImm());
1028 const MCExpr *Expr = MCBinaryExpr::createAdd(
1029 MCSymbolRefExpr::create(TBInstPC, OutContext),
1030 MCConstantExpr::create(4, OutContext), OutContext);
1031 Expr = MCBinaryExpr::createSub(MBBSymbolExpr, Expr, OutContext);
10231032 Expr = MCBinaryExpr::createDiv(Expr, MCConstantExpr::create(2, OutContext),
10241033 OutContext);
10251034 OutStreamer->EmitValue(Expr, OffsetWidth);
10271036 // Mark the end of jump table data-in-code region. 32-bit offsets use
10281037 // actual branch instructions here, so we don't mark those as a data-region
10291038 // at all.
1030 if (OffsetWidth != 4)
1031 OutStreamer->EmitDataRegion(MCDR_DataRegionEnd);
1039 OutStreamer->EmitDataRegion(MCDR_DataRegionEnd);
1040
1041 // Make sure the next instruction is 2-byte aligned.
1042 EmitAlignment(1);
10321043 }
10331044
10341045 void ARMAsmPrinter::EmitUnwindingInstruction(const MachineInstr *MI) {
15001511 EmitGlobalConstant(MCPE.Val.ConstVal);
15011512 return;
15021513 }
1514 case ARM::JUMPTABLE_ADDRS:
1515 EmitJumpTableAddrs(MI);
1516 return;
1517 case ARM::JUMPTABLE_INSTS:
1518 EmitJumpTableInsts(MI);
1519 return;
1520 case ARM::JUMPTABLE_TBB:
1521 case ARM::JUMPTABLE_TBH:
1522 EmitJumpTableTBInst(MI, MI->getOpcode() == ARM::JUMPTABLE_TBB ? 1 : 2);
1523 return;
15031524 case ARM::t2BR_JT: {
15041525 // Lower and emit the instruction itself, then the jump table following it.
15051526 EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::tMOVr)
15081529 // Add predicate operands.
15091530 .addImm(ARMCC::AL)
15101531 .addReg(0));
1511
1512 // Output the data for the jump table itself
1513 EmitJump2Table(MI);
1514 return;
1515 }
1516 case ARM::t2TBB_JT: {
1517 // Lower and emit the instruction itself, then the jump table following it.
1518 EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::t2TBB)
1519 .addReg(ARM::PC)
1520 .addReg(MI->getOperand(0).getReg())
1521 // Add predicate operands.
1522 .addImm(ARMCC::AL)
1523 .addReg(0));
1524
1525 // Output the data for the jump table itself
1526 EmitJump2Table(MI);
1527 // Make sure the next instruction is 2-byte aligned.
1528 EmitAlignment(1);
1529 return;
1530 }
1532 return;
1533 }
1534 case ARM::t2TBB_JT:
15311535 case ARM::t2TBH_JT: {
1532 // Lower and emit the instruction itself, then the jump table following it.
1533 EmitToStreamer(*OutStreamer, MCInstBuilder(ARM::t2TBH)
1534 .addReg(ARM::PC)
1535 .addReg(MI->getOperand(0).getReg())
1536 // Add predicate operands.
1537 .addImm(ARMCC::AL)
1538 .addReg(0));
1539
1540 // Output the data for the jump table itself
1541 EmitJump2Table(MI);
1536 unsigned Opc = MI->getOpcode() == ARM::t2TBB_JT ? ARM::t2TBB : ARM::t2TBH;
1537 // Lower and emit the PC label, then the instruction itself.
1538 OutStreamer->EmitLabel(GetCPISymbol(MI->getOperand(3).getImm()));
1539 EmitToStreamer(*OutStreamer, MCInstBuilder(Opc)
1540 .addReg(MI->getOperand(0).getReg())
1541 .addReg(MI->getOperand(1).getReg())
1542 // Add predicate operands.
1543 .addImm(ARMCC::AL)
1544 .addReg(0));
15421545 return;
15431546 }
15441547 case ARM::tBR_JTr:
15581561 if (Opc == ARM::MOVr)
15591562 TmpInst.addOperand(MCOperand::createReg(0));
15601563 EmitToStreamer(*OutStreamer, TmpInst);
1561
1562 // Make sure the Thumb jump table is 4-byte aligned.
1563 if (Opc == ARM::tMOVr)
1564 EmitAlignment(2);
1565
1566 // Output the data for the jump table itself
1567 EmitJumpTable(MI);
15681564 return;
15691565 }
15701566 case ARM::BR_JTm: {
15881584 TmpInst.addOperand(MCOperand::createImm(ARMCC::AL));
15891585 TmpInst.addOperand(MCOperand::createReg(0));
15901586 EmitToStreamer(*OutStreamer, TmpInst);
1591
1592 // Output the data for the jump table itself
1593 EmitJumpTable(MI);
15941587 return;
15951588 }
15961589 case ARM::BR_JTadd: {
16051598 .addReg(0)
16061599 // Add 's' bit operand (always reg0 for this)
16071600 .addReg(0));
1608
1609 // Output the data for the jump table itself
1610 EmitJumpTable(MI);
16111601 return;
16121602 }
16131603 case ARM::SPACE:
7070 void emitInlineAsmEnd(const MCSubtargetInfo &StartInfo,
7171 const MCSubtargetInfo *EndInfo) const override;
7272
73 void EmitJumpTable(const MachineInstr *MI);
74 void EmitJump2Table(const MachineInstr *MI);
73 void EmitJumpTableAddrs(const MachineInstr *MI);
74 void EmitJumpTableInsts(const MachineInstr *MI);
75 void EmitJumpTableTBInst(const MachineInstr *MI, unsigned OffsetWidth);
7576 void EmitInstruction(const MachineInstr *MI) override;
7677 bool runOnMachineFunction(MachineFunction &F) override;
7778
626626 case ARM::t2MOVi32imm:
627627 return 8;
628628 case ARM::CONSTPOOL_ENTRY:
629 case ARM::JUMPTABLE_INSTS:
630 case ARM::JUMPTABLE_ADDRS:
631 case ARM::JUMPTABLE_TBB:
632 case ARM::JUMPTABLE_TBH:
629633 // If this machine instr is a constant pool entry, its size is recorded as
630634 // operand #2.
631635 return MI->getOperand(2).getImm();
640644 case ARM::t2Int_eh_sjlj_setjmp:
641645 case ARM::t2Int_eh_sjlj_setjmp_nofp:
642646 return 12;
643 case ARM::BR_JTr:
644 case ARM::BR_JTm:
645 case ARM::BR_JTadd:
646 case ARM::tBR_JTr:
647 case ARM::t2BR_JT:
648 case ARM::t2TBB_JT:
649 case ARM::t2TBH_JT: {
650 // These are jumptable branches, i.e. a branch followed by an inlined
651 // jumptable. The size is 4 + 4 * number of entries. For TBB, each
652 // entry is one byte; TBH two byte each.
653 unsigned EntrySize = (Opc == ARM::t2TBB_JT)
654 ? 1 : ((Opc == ARM::t2TBH_JT) ? 2 : 4);
655 unsigned NumOps = MCID.getNumOperands();
656 MachineOperand JTOP =
657 MI->getOperand(NumOps - (MI->isPredicable() ? 2 : 1));
658 unsigned JTI = JTOP.getIndex();
659 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
660 assert(MJTI != nullptr);
661 const std::vector &JT = MJTI->getJumpTables();
662 assert(JTI < JT.size());
663 // Thumb instructions are 2 byte aligned, but JT entries are 4 byte
664 // 4 aligned. The assembler / linker may add 2 byte padding just before
665 // the JT entries. The size does not include this padding; the
666 // constant islands pass does separate bookkeeping for it.
667 // FIXME: If we know the size of the function is less than (1 << 16) *2
668 // bytes, we can use 16-bit entries instead. Then there won't be an
669 // alignment issue.
670 unsigned InstSize = (Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT) ? 2 : 4;
671 unsigned NumEntries = JT[JTI].MBBs.size();
672 if (Opc == ARM::t2TBB_JT && (NumEntries & 1))
673 // Make sure the instruction that follows TBB is 2-byte aligned.
674 // FIXME: Constant island pass should insert an "ALIGN" instruction
675 // instead.
676 ++NumEntries;
677 return NumEntries * EntrySize + InstSize;
678 }
679647 case ARM::SPACE:
680648 return MI->getOperand(1).getImm();
681649 }
179179 MachineInstr *MI;
180180 MachineInstr *CPEMI;
181181 MachineBasicBlock *HighWaterMark;
182 private:
183182 unsigned MaxDisp;
184 public:
185183 bool NegOk;
186184 bool IsSoImm;
187185 bool KnownAlignment;
215213 };
216214
217215 /// CPEntries - Keep track of all of the constant pool entry machine
218 /// instructions. For each original constpool index (i.e. those that
219 /// existed upon entry to this pass), it keeps a vector of entries.
220 /// Original elements are cloned as we go along; the clones are
221 /// put in the vector of the original element, but have distinct CPIs.
216 /// instructions. For each original constpool index (i.e. those that existed
217 /// upon entry to this pass), it keeps a vector of entries. Original
218 /// elements are cloned as we go along; the clones are put in the vector of
219 /// the original element, but have distinct CPIs.
220 ///
221 /// The first half of CPEntries contains generic constants, the second half
222 /// contains jump tables. Use getCombinedIndex on a generic CPEMI to look up
223 /// which vector it will be in here.
222224 std::vector > CPEntries;
225
226 /// Maps a JT index to the offset in CPEntries containing copies of that
227 /// table. The equivalent map for a CONSTPOOL_ENTRY is the identity.
228 DenseMap JumpTableEntryIndices;
229
230 /// Maps a JT index to the LEA that actually uses the index to calculate its
231 /// base address.
232 DenseMap JumpTableUserIndices;
223233
224234 /// ImmBranch - One per immediate branch, keeping the machine instruction
225235 /// pointer, conditional or unconditional, the max displacement,
268278 }
269279
270280 private:
271 void doInitialPlacement(std::vector*> &CPEMIs);
281 void doInitialConstPlacement(std::vector*> &CPEMIs);
282 void doInitialJumpTablePlacement(std::vector &CPEMIs);
272283 bool BBHasFallthrough(MachineBasicBlock *MBB);
273284 CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
274285 unsigned getCPELogAlign(const MachineInstr *CPEMI);
278289 void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
279290 void adjustBBOffsetsAfter(MachineBasicBlock *BB);
280291 bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI);
292 unsigned getCombinedIndex(const MachineInstr *CPEMI);
281293 int findInRangeCPEntry(CPUser& U, unsigned UserOffset);
282294 bool findAvailableWater(CPUser&U, unsigned UserOffset,
283295 water_iterator &WaterIter);
300312 bool optimizeThumb2Instructions();
301313 bool optimizeThumb2Branches();
302314 bool reorderThumb2JumpTables();
303 unsigned removeDeadDefinitions(MachineInstr *MI, unsigned BaseReg,
304 unsigned IdxReg);
315 bool preserveBaseRegister(MachineInstr *JumpMI, MachineInstr *LEAMI,
316 unsigned &DeadSize, bool &CanDeleteLEA,
317 bool &BaseRegKill);
305318 bool optimizeThumb2JumpTables();
306319 MachineBasicBlock *adjustJTTargetBlockForward(MachineBasicBlock *BB,
307320 MachineBasicBlock *JTBB);
412425 // we put them all at the end of the function.
413426 std::vector CPEMIs;
414427 if (!MCP->isEmpty())
415 doInitialPlacement(CPEMIs);
428 doInitialConstPlacement(CPEMIs);
429
430 if (MF->getJumpTableInfo())
431 doInitialJumpTablePlacement(CPEMIs);
416432
417433 /// The next UID to take is the first unused one.
418434 AFI->initPICLabelUId(CPEMIs.size());
477493 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
478494 for (unsigned j = 0, je = CPEntries[i].size(); j != je; ++j) {
479495 const CPEntry & CPE = CPEntries[i][j];
480 AFI->recordCPEClone(i, CPE.CPI);
496 if (CPE.CPEMI && CPE.CPEMI->getOperand(1).isCPI())
497 AFI->recordCPEClone(i, CPE.CPI);
481498 }
482499 }
483500
487504 WaterList.clear();
488505 CPUsers.clear();
489506 CPEntries.clear();
507 JumpTableEntryIndices.clear();
508 JumpTableUserIndices.clear();
490509 ImmBranches.clear();
491510 PushPopMIs.clear();
492511 T2JumpTables.clear();
494513 return MadeChange;
495514 }
496515
497 /// doInitialPlacement - Perform the initial placement of the constant pool
498 /// entries. To start with, we put them all at the end of the function.
516 /// \brief Perform the initial placement of the regular constant pool entries.
517 /// To start with, we put them all at the end of the function.
499518 void
500 ARMConstantIslands::doInitialPlacement(std::vector &CPEMIs) {
519 ARMConstantIslands::doInitialConstPlacement(std::vector &CPEMIs) {
501520 // Create the basic block to hold the CPE's.
502521 MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
503522 MF->push_back(BB);
553572 << Size << ", align = " << Align <<'\n');
554573 }
555574 DEBUG(BB->dump());
575 }
576
577 /// \brief Do initial placement of the jump tables. Because Thumb2's TBB and TBH
578 /// instructions can be made more efficient if the jump table immediately
579 /// follows the instruction, it's best to place them immediately next to their
580 /// jumps to begin with. In almost all cases they'll never be moved from that
581 /// position.
582 void ARMConstantIslands::doInitialJumpTablePlacement(
583 std::vector &CPEMIs) {
584 unsigned i = CPEntries.size();
585 auto MJTI = MF->getJumpTableInfo();
586 const std::vector &JT = MJTI->getJumpTables();
587
588 MachineBasicBlock *LastCorrectlyNumberedBB = nullptr;
589 for (MachineBasicBlock &MBB : *MF) {
590 auto MI = MBB.getLastNonDebugInstr();
591
592 unsigned JTOpcode;
593 switch (MI->getOpcode()) {
594 default:
595 continue;
596 case ARM::BR_JTadd:
597 case ARM::BR_JTr:
598 case ARM::tBR_JTr:
599 case ARM::BR_JTm:
600 JTOpcode = ARM::JUMPTABLE_ADDRS;
601 break;
602 case ARM::t2BR_JT:
603 JTOpcode = ARM::JUMPTABLE_INSTS;
604 break;
605 case ARM::t2TBB_JT:
606 JTOpcode = ARM::JUMPTABLE_TBB;
607 break;
608 case ARM::t2TBH_JT:
609 JTOpcode = ARM::JUMPTABLE_TBH;
610 break;
611 }
612
613 unsigned NumOps = MI->getDesc().getNumOperands();
614 MachineOperand JTOp =
615 MI->getOperand(NumOps - (MI->isPredicable() ? 2 : 1));
616 unsigned JTI = JTOp.getIndex();
617 unsigned Size = JT[JTI].MBBs.size() * sizeof(uint32_t);
618 MachineBasicBlock *JumpTableBB = MF->CreateMachineBasicBlock();
619 MF->insert(std::next(MachineFunction::iterator(MBB)), JumpTableBB);
620 MachineInstr *CPEMI = BuildMI(*JumpTableBB, JumpTableBB->begin(),
621 DebugLoc(), TII->get(JTOpcode))
622 .addImm(i++)
623 .addJumpTableIndex(JTI)
624 .addImm(Size);
625 CPEMIs.push_back(CPEMI);
626 CPEntries.emplace_back(1, CPEntry(CPEMI, JTI));
627 JumpTableEntryIndices.insert(std::make_pair(JTI, CPEntries.size() - 1));
628 if (!LastCorrectlyNumberedBB)
629 LastCorrectlyNumberedBB = &MBB;
630 }
631
632 // If we did anything then we need to renumber the subsequent blocks.
633 if (LastCorrectlyNumberedBB)
634 MF->RenumberBlocks(LastCorrectlyNumberedBB);
556635 }
557636
558637 /// BBHasFallthrough - Return true if the specified basic block can fallthrough
594673 /// getCPELogAlign - Returns the required alignment of the constant pool entry
595674 /// represented by CPEMI. Alignment is measured in log2(bytes) units.
596675 unsigned ARMConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) {
597 assert(CPEMI && CPEMI->getOpcode() == ARM::CONSTPOOL_ENTRY);
598
599 unsigned CPI = CPEMI->getOperand(1).getIndex();
676 switch (CPEMI->getOpcode()) {
677 case ARM::CONSTPOOL_ENTRY:
678 break;
679 case ARM::JUMPTABLE_TBB:
680 return 0;
681 case ARM::JUMPTABLE_TBH:
682 case ARM::JUMPTABLE_INSTS:
683 return 1;
684 case ARM::JUMPTABLE_ADDRS:
685 return 2;
686 default:
687 llvm_unreachable("unknown constpool entry kind");
688 }
689
690 unsigned CPI = getCombinedIndex(CPEMI);
600691 assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
601692 unsigned Align = MCP->getConstants()[CPI].getAlignment();
602693 assert(isPowerOf2_32(Align) && "Invalid CPE alignment");
705796 if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
706797 PushPopMIs.push_back(I);
707798
708 if (Opc == ARM::CONSTPOOL_ENTRY)
799 if (Opc == ARM::CONSTPOOL_ENTRY || Opc == ARM::JUMPTABLE_ADDRS ||
800 Opc == ARM::JUMPTABLE_INSTS || Opc == ARM::JUMPTABLE_TBB ||
801 Opc == ARM::JUMPTABLE_TBH)
709802 continue;
710803
711804 // Scan the instructions for constant pool operands.
712805 for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op)
713 if (I->getOperand(op).isCPI()) {
806 if (I->getOperand(op).isCPI() || I->getOperand(op).isJTI()) {
714807 // We found one. The addressing mode tells us the max displacement
715808 // from the PC that this instruction permits.
716809
726819
727820 // Taking the address of a CP entry.
728821 case ARM::LEApcrel:
822 case ARM::LEApcrelJT:
729823 // This takes a SoImm, which is 8 bit immediate rotated. We'll
730824 // pretend the maximum offset is 255 * 4. Since each instruction
731825 // 4 byte wide, this is always correct. We'll check for other
736830 IsSoImm = true;
737831 break;
738832 case ARM::t2LEApcrel:
833 case ARM::t2LEApcrelJT:
739834 Bits = 12;
740835 NegOk = true;
741836 break;
742837 case ARM::tLEApcrel:
838 case ARM::tLEApcrelJT:
743839 Bits = 8;
744840 Scale = 4;
745841 break;
767863
768864 // Remember that this is a user of a CP entry.
769865 unsigned CPI = I->getOperand(op).getIndex();
866 if (I->getOperand(op).isJTI()) {
867 JumpTableUserIndices.insert(std::make_pair(CPI, CPUsers.size()));
868 CPI = JumpTableEntryIndices[CPI];
869 }
870
770871 MachineInstr *CPEMI = CPEMIs[CPI];
771872 unsigned MaxOffs = ((1 << Bits)-1) * Scale;
772873 CPUsers.push_back(CPUser(I, CPEMI, MaxOffs, NegOk, IsSoImm));
11001201 return false;
11011202 }
11021203
1204 unsigned ARMConstantIslands::getCombinedIndex(const MachineInstr *CPEMI) {
1205 if (CPEMI->getOperand(1).isCPI())
1206 return CPEMI->getOperand(1).getIndex();
1207
1208 return JumpTableEntryIndices[CPEMI->getOperand(1).getIndex()];
1209 }
1210
11031211 /// LookForCPEntryInRange - see if the currently referenced CPE is in range;
11041212 /// if not, see if an in-range clone of the CPE is in range, and if so,
11051213 /// change the data structures so the user references the clone. Returns:
11191227 }
11201228
11211229 // No. Look for previously created clones of the CPE that are in range.
1122 unsigned CPI = CPEMI->getOperand(1).getIndex();
1230 unsigned CPI = getCombinedIndex(CPEMI);
11231231 std::vector &CPEs = CPEntries[CPI];
11241232 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
11251233 // We already tried this one
13641472 CPUser &U = CPUsers[CPUserIndex];
13651473 MachineInstr *UserMI = U.MI;
13661474 MachineInstr *CPEMI = U.CPEMI;
1367 unsigned CPI = CPEMI->getOperand(1).getIndex();
1475 unsigned CPI = getCombinedIndex(CPEMI);
13681476 unsigned Size = CPEMI->getOperand(2).getImm();
13691477 // Compute this only once, it's expensive.
13701478 unsigned UserOffset = getUserOffset(U);
14281536 // Update internal data structures to account for the newly inserted MBB.
14291537 updateForInsertedWaterBlock(NewIsland);
14301538
1431 // Decrement the old entry, and remove it if refcount becomes 0.
1432 decrementCPEReferenceCount(CPI, CPEMI);
1433
14341539 // Now that we have an island to add the CPE to, clone the original CPE and
14351540 // add it to the island.
14361541 U.HighWaterMark = NewIsland;
1437 U.CPEMI = BuildMI(NewIsland, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
1438 .addImm(ID).addConstantPoolIndex(CPI).addImm(Size);
1542 U.CPEMI = BuildMI(NewIsland, DebugLoc(), CPEMI->getDesc())
1543 .addImm(ID).addOperand(CPEMI->getOperand(1)).addImm(Size);
14391544 CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
14401545 ++NumCPEs;
1546
1547 // Decrement the old entry, and remove it if refcount becomes 0.
1548 decrementCPEReferenceCount(CPI, CPEMI);
14411549
14421550 // Mark the basic block as aligned as required by the const-pool entry.
14431551 NewIsland->setAlignment(getCPELogAlign(U.CPEMI));
18431951 return MadeChange;
18441952 }
18451953
1846 /// If we've formed a TBB or TBH instruction, the base register is now
1847 /// redundant. In most cases, the instructions defining it will now be dead and
1848 /// can be tidied up. This function removes them if so, and returns the number
1849 /// of bytes saved.
1850 unsigned ARMConstantIslands::removeDeadDefinitions(MachineInstr *MI,
1851 unsigned BaseReg,
1852 unsigned IdxReg) {
1853 unsigned BytesRemoved = 0;
1854 MachineBasicBlock *MBB = MI->getParent();
1855
1856 // Scan backwards to find the instruction that defines the base
1857 // register. Due to post-RA scheduling, we can't count on it
1858 // immediately preceding the branch instruction.
1859 MachineBasicBlock::iterator PrevI = MI;
1860 MachineBasicBlock::iterator B = MBB->begin();
1861 while (PrevI != B && !PrevI->definesRegister(BaseReg))
1862 --PrevI;
1863
1864 // If for some reason we didn't find it, we can't do anything, so
1865 // just skip this one.
1866 if (!PrevI->definesRegister(BaseReg) || PrevI->hasUnmodeledSideEffects() ||
1867 PrevI->mayStore())
1868 return BytesRemoved;
1869
1870 MachineInstr *AddrMI = PrevI;
1871 unsigned NewBaseReg = BytesRemoved;
1872
1873 // Examine the instruction that calculates the jumptable entry address. Make
1874 // sure it only defines the base register and kills any uses other than the
1875 // index register. We also need precisely one use to trace backwards to
1876 // (hopefully) the LEA.
1877 for (unsigned k = 0, eee = AddrMI->getNumOperands(); k != eee; ++k) {
1878 const MachineOperand &MO = AddrMI->getOperand(k);
1879 if (!MO.isReg() || !MO.getReg())
1880 continue;
1881 if (MO.isDef() && MO.getReg() != BaseReg)
1882 return BytesRemoved;
1883
1884 if (MO.isUse() && MO.getReg() != IdxReg) {
1885 if (!MO.isKill() || (NewBaseReg != 0 && NewBaseReg != MO.getReg()))
1886 return BytesRemoved;
1887 NewBaseReg = MO.getReg();
1888 }
1889 }
1890
1891 // Want to continue searching for AddrMI, but there are 2 problems: AddrMI is
1892 // going away soon, and even decrementing once may be invalid.
1893 if (PrevI != B)
1894 PrevI = std::prev(PrevI);
1895
1896 DEBUG(dbgs() << "remove addr: " << *AddrMI);
1897 BytesRemoved += TII->GetInstSizeInBytes(AddrMI);
1898 AddrMI->eraseFromParent();
1899
1900 // Now scan back again to find the tLEApcrel or t2LEApcrelJT instruction
1901 // that gave us the initial base register definition.
1902 for (; PrevI != B && !PrevI->definesRegister(NewBaseReg); --PrevI)
1903 ;
1904
1905 // The instruction should be a tLEApcrel or t2LEApcrelJT; we want
1906 // to delete it as well.
1907 MachineInstr *LeaMI = PrevI;
1908 if ((LeaMI->getOpcode() != ARM::tLEApcrelJT &&
1909 LeaMI->getOpcode() != ARM::t2LEApcrelJT) ||
1910 LeaMI->getOperand(0).getReg() != NewBaseReg)
1911 return BytesRemoved;
1912
1913 DEBUG(dbgs() << "remove lea: " << *LeaMI);
1914 BytesRemoved += TII->GetInstSizeInBytes(LeaMI);
1915 LeaMI->eraseFromParent();
1916 return BytesRemoved;
1954 /// \brief
1955 static bool isSimpleIndexCalc(MachineInstr &I, unsigned EntryReg,
1956 unsigned BaseReg) {
1957 if (I.getOpcode() != ARM::t2ADDrs)
1958 return false;
1959
1960 if (I.getOperand(0).getReg() != EntryReg)
1961 return false;
1962
1963 if (I.getOperand(1).getReg() != BaseReg)
1964 return false;
1965
1966 // FIXME: what about CC and IdxReg?
1967 return true;
1968 }
1969
1970 /// \brief While trying to form a TBB/TBH instruction, we may (if the table
1971 /// doesn't immediately follow the BR_JT) need access to the start of the
1972 /// jump-table. We know one instruction that produces such a register; this
1973 /// function works out whether that definition can be preserved to the BR_JT,
1974 /// possibly by removing an intervening addition (which is usually needed to
1975 /// calculate the actual entry to jump to).
1976 bool ARMConstantIslands::preserveBaseRegister(MachineInstr *JumpMI,
1977 MachineInstr *LEAMI,
1978 unsigned &DeadSize,
1979 bool &CanDeleteLEA,
1980 bool &BaseRegKill) {
1981 if (JumpMI->getParent() != LEAMI->getParent())
1982 return false;
1983
1984 // Now we hope that we have at least these instructions in the basic block:
1985 // BaseReg = t2LEA ...
1986 // [...]
1987 // EntryReg = t2ADDrs BaseReg, ...
1988 // [...]
1989 // t2BR_JT EntryReg
1990 //
1991 // We have to be very conservative about what we recognise here though. The
1992 // main perturbing factors to watch out for are:
1993 // + Spills at any point in the chain: not direct problems but we would
1994 // expect a blocking Def of the spilled register so in practice what we
1995 // can do is limited.
1996 // + EntryReg == BaseReg: this is the one situation we should allow a Def
1997 // of BaseReg, but only if the t2ADDrs can be removed.
1998 // + Some instruction other than t2ADDrs computing the entry. Not seen in
1999 // the wild, but we should be careful.
2000 unsigned EntryReg = JumpMI->getOperand(0).getReg();
2001 unsigned BaseReg = LEAMI->getOperand(0).getReg();
2002
2003 CanDeleteLEA = true;
2004 BaseRegKill = false;
2005 MachineInstr *RemovableAdd = nullptr;
2006 MachineBasicBlock::iterator I(LEAMI);
2007 for (++I; &*I != JumpMI; ++I) {
2008 if (isSimpleIndexCalc(*I, EntryReg, BaseReg)) {
2009 RemovableAdd = &*I;
2010 break;
2011 }
2012
2013 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
2014 const MachineOperand &MO = I->getOperand(K);
2015 if (!MO.isReg() || !MO.getReg())
2016 continue;
2017 if (MO.isDef() && MO.getReg() == BaseReg)
2018 return false;
2019 if (MO.isUse() && MO.getReg() == BaseReg) {
2020 BaseRegKill = BaseRegKill || MO.isKill();
2021 CanDeleteLEA = false;
2022 }
2023 }
2024 }
2025
2026 if (!RemovableAdd)
2027 return true;
2028
2029 // Check the add really is removable, and that nothing else in the block
2030 // clobbers BaseReg.
2031 for (++I; &*I != JumpMI; ++I) {
2032 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
2033 const MachineOperand &MO = I->getOperand(K);
2034 if (!MO.isReg() || !MO.getReg())
2035 continue;
2036 if (MO.isDef() && MO.getReg() == BaseReg)
2037 return false;
2038 if (MO.isUse() && MO.getReg() == EntryReg)
2039 RemovableAdd = nullptr;
2040 }
2041 }
2042
2043 if (RemovableAdd) {
2044 RemovableAdd->eraseFromParent();
2045 DeadSize += 4;
2046 } else if (BaseReg == EntryReg) {
2047 // The add wasn't removable, but clobbered the base for the TBB. So we can't
2048 // preserve it.
2049 return false;
2050 }
2051
2052 // We reached the end of the block without seeing another definition of
2053 // BaseReg (except, possibly the t2ADDrs, which was removed). BaseReg can be
2054 // used in the TBB/TBH if necessary.
2055 return true;
2056 }
2057
2058 /// \brief Returns whether CPEMI is the first instruction in the block
2059 /// immediately following JTMI (assumed to be a TBB or TBH terminator). If so,
2060 /// we can switch the first register to PC and usually remove the address
2061 /// calculation that preceeded it.
2062 static bool jumpTableFollowsTB(MachineInstr *JTMI, MachineInstr *CPEMI) {
2063 MachineFunction::iterator MBB = JTMI->getParent();
2064 MachineFunction *MF = MBB->getParent();
2065 ++MBB;
2066
2067 return MBB != MF->end() && MBB->begin() != MBB->end() &&
2068 &*MBB->begin() == CPEMI;
19172069 }
19182070
19192071 /// optimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller
19542106 break;
19552107 }
19562108
1957 if (ByteOk || HalfWordOk) {
1958 MachineBasicBlock *MBB = MI->getParent();
1959 unsigned BaseReg = MI->getOperand(0).getReg();
1960 bool BaseRegKill = MI->getOperand(0).isKill();
1961 if (!BaseRegKill)
1962 continue;
1963 unsigned IdxReg = MI->getOperand(1).getReg();
1964 bool IdxRegKill = MI->getOperand(1).isKill();
1965
1966 DEBUG(dbgs() << "Shrink JT: " << *MI);
1967 unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
1968 MachineBasicBlock::iterator MI_JT = MI;
1969 MachineInstr *NewJTMI =
2109 if (!ByteOk && !HalfWordOk)
2110 continue;
2111
2112 MachineBasicBlock *MBB = MI->getParent();
2113 if (!MI->getOperand(0).isKill()) // FIXME: needed now?
2114 continue;
2115 unsigned IdxReg = MI->getOperand(1).getReg();
2116 bool IdxRegKill = MI->getOperand(1).isKill();
2117
2118 CPUser &User = CPUsers[JumpTableUserIndices[JTI]];
2119 unsigned DeadSize = 0;
2120 bool CanDeleteLEA = false;
2121 bool BaseRegKill = false;
2122 bool PreservedBaseReg =
2123 preserveBaseRegister(MI, User.MI, DeadSize, CanDeleteLEA, BaseRegKill);
2124
2125 if (!jumpTableFollowsTB(MI, User.CPEMI) && !PreservedBaseReg)
2126 continue;
2127
2128 DEBUG(dbgs() << "Shrink JT: " << *MI);
2129 MachineInstr *CPEMI = User.CPEMI;
2130 unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
2131 MachineBasicBlock::iterator MI_JT = MI;
2132 MachineInstr *NewJTMI =
19702133 BuildMI(*MBB, MI_JT, MI->getDebugLoc(), TII->get(Opc))
1971 .addReg(IdxReg, getKillRegState(IdxRegKill))
1972 .addJumpTableIndex(JTI, JTOP.getTargetFlags());
1973 DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": " << *NewJTMI);
1974 // FIXME: Insert an "ALIGN" instruction to ensure the next instruction
1975 // is 2-byte aligned. For now, asm printer will fix it up.
1976 unsigned NewSize = TII->GetInstSizeInBytes(NewJTMI);
1977 unsigned OrigSize = TII->GetInstSizeInBytes(MI);
1978 unsigned DeadSize = removeDeadDefinitions(MI, BaseReg, IdxReg);
1979 MI->eraseFromParent();
1980
1981 int delta = OrigSize - NewSize + DeadSize;
1982 BBInfo[MBB->getNumber()].Size -= delta;
1983 adjustBBOffsetsAfter(MBB);
1984
1985 ++NumTBs;
1986 MadeChange = true;
1987 }
2134 .addReg(User.MI->getOperand(0).getReg(),
2135 getKillRegState(BaseRegKill))
2136 .addReg(IdxReg, getKillRegState(IdxRegKill))
2137 .addJumpTableIndex(JTI, JTOP.getTargetFlags())
2138 .addImm(CPEMI->getOperand(0).getImm());
2139 DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": " << *NewJTMI);
2140
2141 unsigned JTOpc = ByteOk ? ARM::JUMPTABLE_TBB : ARM::JUMPTABLE_TBH;
2142 CPEMI->setDesc(TII->get(JTOpc));
2143
2144 if (jumpTableFollowsTB(MI, User.CPEMI)) {
2145 NewJTMI->getOperand(0).setReg(ARM::PC);
2146 NewJTMI->getOperand(0).setIsKill(false);
2147
2148 if (CanDeleteLEA) {
2149 User.MI->eraseFromParent();
2150 DeadSize += 4;
2151
2152 // The LEA was eliminated, the TBB instruction becomes the only new user
2153 // of the jump table.
2154 User.MI = NewJTMI;
2155 User.MaxDisp = 4;
2156 User.NegOk = false;
2157 User.IsSoImm = false;
2158 User.KnownAlignment = false;
2159 } else {
2160 // The LEA couldn't be eliminated, so we must add another CPUser to
2161 // record the TBB or TBH use.
2162 int CPEntryIdx = JumpTableEntryIndices[JTI];
2163 auto &CPEs = CPEntries[CPEntryIdx];
2164 auto Entry = std::find_if(CPEs.begin(), CPEs.end(), [&](CPEntry &E) {
2165 return E.CPEMI == User.CPEMI;
2166 });
2167 ++Entry->RefCount;
2168 CPUsers.emplace_back(CPUser(NewJTMI, User.CPEMI, 4, false, false));
2169 }
2170 }
2171
2172 unsigned NewSize = TII->GetInstSizeInBytes(NewJTMI);
2173 unsigned OrigSize = TII->GetInstSizeInBytes(MI);
2174 MI->eraseFromParent();
2175
2176 int Delta = OrigSize - NewSize + DeadSize;
2177 BBInfo[MBB->getNumber()].Size -= Delta;
2178 adjustBBOffsetsAfter(MBB);
2179
2180 ++NumTBs;
2181 MadeChange = true;
19882182 }
19892183
19902184 return MadeChange;
18331833 PseudoInst<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx,
18341834 i32imm:$size), NoItinerary, []>;
18351835
1836 /// A jumptable consisting of direct 32-bit addresses of the destination basic
1837 /// blocks (either absolute, or relative to the start of the jump-table in PIC
1838 /// mode). Used mostly in ARM and Thumb-1 modes.
1839 def JUMPTABLE_ADDRS :
1840 PseudoInst<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx,
1841 i32imm:$size), NoItinerary, []>;
1842
1843 /// A jumptable consisting of 32-bit jump instructions. Used for Thumb-2 tables
1844 /// that cannot be optimised to use TBB or TBH.
1845 def JUMPTABLE_INSTS :
1846 PseudoInst<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx,
1847 i32imm:$size), NoItinerary, []>;
1848
1849 /// A jumptable consisting of 8-bit unsigned integers representing offsets from
1850 /// a TBB instruction.
1851 def JUMPTABLE_TBB :
1852 PseudoInst<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx,
1853 i32imm:$size), NoItinerary, []>;
1854
1855 /// A jumptable consisting of 16-bit unsigned integers representing offsets from
1856 /// a TBH instruction.
1857 def JUMPTABLE_TBH :
1858 PseudoInst<(outs), (ins cpinst_operand:$instid, cpinst_operand:$cpidx,
1859 i32imm:$size), NoItinerary, []>;
1860
1861
18361862 // FIXME: Marking these as hasSideEffects is necessary to prevent machine DCE
18371863 // from removing one half of the matched pairs. That breaks PEI, which assumes
18381864 // these will always be in pairs, and asserts if it finds otherwise. Better way?
22312257 [(br bb:$target)], (Bcc br_target:$target, (ops 14, zero_reg))>,
22322258 Sched<[WriteBr]>;
22332259
2234 let isNotDuplicable = 1, isIndirectBranch = 1 in {
2260 let Size = 4, isNotDuplicable = 1, isIndirectBranch = 1 in {
22352261 def BR_JTr : ARMPseudoInst<(outs),
22362262 (ins GPR:$target, i32imm:$jt),
22372263 0, IIC_Br,
525525 0, IIC_Br,
526526 [(ARMbrjt tGPR:$target, tjumptable:$jt)]>,
527527 Sched<[WriteBrTbl]> {
528 let Size = 2;
528529 list Predicates = [IsThumb, IsThumb1Only];
529530 }
530531 }
35303530 let AsmMatchConverter = "cvtThumbBranches";
35313531 }
35323532
3533 let isNotDuplicable = 1, isIndirectBranch = 1 in {
3533 let Size = 4, isNotDuplicable = 1, isIndirectBranch = 1 in {
35343534 def t2BR_JT : t2PseudoInst<(outs),
35353535 (ins GPR:$target, GPR:$index, i32imm:$jt),
35363536 0, IIC_Br,
35373537 [(ARMbr2jt GPR:$target, GPR:$index, tjumptable:$jt)]>,
35383538 Sched<[WriteBr]>;
35393539
3540 // FIXME: Add a non-pc based case that can be predicated.
3540 // FIXME: Add a case that can be predicated.
35413541 def t2TBB_JT : t2PseudoInst<(outs),
3542 (ins GPR:$index, i32imm:$jt), 0, IIC_Br, []>,
3542 (ins GPR:$base, GPR:$index, i32imm:$jt, i32imm:$pclbl), 0, IIC_Br, []>,
35433543 Sched<[WriteBr]>;
35443544
35453545 def t2TBH_JT : t2PseudoInst<(outs),
3546 (ins GPR:$index, i32imm:$jt), 0, IIC_Br, []>,
3546 (ins GPR:$base, GPR:$index, i32imm:$jt, i32imm:$pclbl), 0, IIC_Br, []>,
35473547 Sched<[WriteBr]>;
35483548
35493549 def t2TBB : T2I<(outs), (ins addrmode_tbb:$addr), IIC_Br,
0 ; RUN: llc -mtriple=thumbv7s-apple-ios8.0 -o - %s | FileCheck %s
1
2 declare void @foo(double)
3 declare i32 @llvm.arm.space(i32, i32)
4
5 ; The constpool entry used to call @foo should be directly between where we want
6 ; the tbb and its table. Fortunately, the flow is simple enough that we can
7 ; eliminate the entry calculation (ADD) and use the ADR as the base.
8 ;
9 ; I'm hoping this won't be fragile, but if it does break the most likely fix is
10 ; adjusting the @llvm.arm.space call slightly. If this happens too many times
11 ; the test should probably be removed.
12 define i32 @test_jumptable_not_adjacent(i1 %tst, i32 %sw, i32 %l) {
13 ; CHECK-LABEL: test_jumptable_not_adjacent:
14 ; CHECK: vldr {{d[0-9]+}}, [[DBL_CONST:LCPI[0-9]+_[0-9]+]]
15 ; [...]
16 ; CHECK: adr.w r[[BASE:[0-9]+]], [[JUMP_TABLE:LJTI[0-9]+_[0-9]+]]
17 ; CHECK-NOT: r[[BASE]]
18
19 ; CHECK: [[TBB_KEY:LCPI[0-9]+_[0-9]+]]:
20 ; CHECK-NEXT: tbb [r[[BASE]], {{r[0-9]+}}]
21
22 ; CHECK: [[DBL_CONST]]:
23 ; CHECK: .long
24 ; CHECK: .long
25 ; CHECK: [[JUMP_TABLE]]:
26 ; CHECK: .byte (LBB{{[0-9]+}}_{{[0-9]+}}-([[TBB_KEY]]+4)
27
28 br label %complex
29
30 complex:
31 call void @foo(double 12345.0)
32 call i32 @llvm.arm.space(i32 970, i32 undef)
33 switch i32 %sw, label %second [ i32 0, label %other
34 i32 1, label %third
35 i32 2, label %end
36 i32 3, label %other ]
37
38 second:
39 ret i32 43
40 third:
41 ret i32 0
42
43 other:
44 call void @bar()
45 unreachable
46
47 end:
48 ret i32 42
49 }
50
51 declare void @bar()
0 ; RUN: llc -mtriple=armv7-apple-ios8.0 -o - %s | FileCheck %s
1
2 %BigInt = type i5500
3
4 define %BigInt @test_moved_jumptable(i1 %tst, i32 %sw, %BigInt %l) {
5 ; CHECK-LABEL: test_moved_jumptable:
6
7 ; CHECK: adr {{r[0-9]+}}, [[JUMP_TABLE:LJTI[0-9]+_[0-9]+]]
8 ; CHECK: b [[SKIP_TABLE:LBB[0-9]+_[0-9]+]]
9
10 ; CHECK: [[JUMP_TABLE]]:
11 ; CHECK: .data_region jt32
12 ; CHECK: .long LBB{{[0-9]+_[0-9]+}}-[[JUMP_TABLE]]
13
14 ; CHECK: [[SKIP_TABLE]]:
15 ; CHECK: add pc, {{r[0-9]+}}, {{r[0-9]+}}
16 br i1 %tst, label %simple, label %complex
17
18 simple:
19 br label %end
20
21 complex:
22 switch i32 %sw, label %simple [ i32 0, label %other
23 i32 1, label %third
24 i32 5, label %end
25 i32 6, label %other ]
26
27 third:
28 ret %BigInt 0
29
30 other:
31 call void @bar()
32 unreachable
33
34 end:
35 %val = phi %BigInt [ %l, %complex ], [ -1, %simple ]
36 ret %BigInt %val
37 }
38
39 declare void @bar()
11
22 ; test that we print the label of a bb that is only used in a jump table.
33
4 ; CHECK: .long LBB0_2
5 ; CHECK: LBB0_2:
4 ; CHECK: .long [[JUMPTABLE_DEST:LBB[0-9]+_[0-9]+]]
5 ; CHECK: [[JUMPTABLE_DEST]]:
66
77 define i32 @calculate() {
88 entry:
0 ; RUN: llc < %s -mtriple=thumbv7-linux-gnueabihf -O1 %s -o - | FileCheck %s
11
22 ; CHECK-LABEL: test_jump_table:
3 ; CHECK: b .LBB
3 ; CHECK: b{{.*}} .LBB
44 ; CHECK-NOT: tbh
55
66 define i32 @test_jump_table(i32 %x, float %in) {
1313
1414 declare noalias i8* @calloc(i32, i32) nounwind
1515
16 ; Jump tables are not anchored next to the TBB/TBH any more. Make sure the
17 ; correct address is still calculated (i.e. via a PC-relative symbol *at* the
18 ; TBB/TBH).
1619 define i32 @main(i32 %argc, i8** nocapture %argv) nounwind {
1720 ; CHECK-LABEL: main:
18 ; CHECK: tbb
21 ; CHECK-NOT: adr {{r[0-9]+}}, LJTI
22 ; CHECK: [[PCREL_ANCHOR:LCPI[0-9]+_[0-9]+]]:
23 ; CHECK-NEXT: tbb [pc, {{r[0-9]+}}]
24
25 ; CHECK: LJTI0_0:
26 ; CHECK-NEXT: .data_region jt8
27 ; CHECK-NEXT: .byte (LBB{{[0-9]+_[0-9]+}}-([[PCREL_ANCHOR]]+4))/2
28
1929 entry:
2030 br label %bb42.i
2131