LLVM 19.0.0git
MachineIRBuilder.cpp
Go to the documentation of this file.
1//===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9/// This file implements the MachineIRBuidler class.
10//===----------------------------------------------------------------------===//
21
22using namespace llvm;
23
25 State.MF = &MF;
26 State.MBB = nullptr;
27 State.MRI = &MF.getRegInfo();
28 State.TII = MF.getSubtarget().getInstrInfo();
29 State.DL = DebugLoc();
30 State.PCSections = nullptr;
31 State.MMRA = nullptr;
33 State.Observer = nullptr;
34}
35
36//------------------------------------------------------------------------------
37// Build instruction variants.
38//------------------------------------------------------------------------------
39
42 getTII().get(Opcode));
43}
44
46 getMBB().insert(getInsertPt(), MIB);
47 recordInsertion(MIB);
48 return MIB;
49}
50
53 const MDNode *Expr) {
54 assert(isa<DILocalVariable>(Variable) && "not a variable");
55 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56 assert(
57 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58 "Expected inlined-at fields to agree");
59 return insertInstr(BuildMI(getMF(), getDL(),
60 getTII().get(TargetOpcode::DBG_VALUE),
61 /*IsIndirect*/ false, Reg, Variable, Expr));
62}
63
66 const MDNode *Expr) {
67 assert(isa<DILocalVariable>(Variable) && "not a variable");
68 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69 assert(
70 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71 "Expected inlined-at fields to agree");
72 return insertInstr(BuildMI(getMF(), getDL(),
73 getTII().get(TargetOpcode::DBG_VALUE),
74 /*IsIndirect*/ true, Reg, Variable, Expr));
75}
76
78 const MDNode *Variable,
79 const MDNode *Expr) {
80 assert(isa<DILocalVariable>(Variable) && "not a variable");
81 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82 assert(
83 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84 "Expected inlined-at fields to agree");
85 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
86 .addFrameIndex(FI)
87 .addImm(0)
88 .addMetadata(Variable)
89 .addMetadata(Expr));
90}
91
93 const MDNode *Variable,
94 const MDNode *Expr) {
95 assert(isa<DILocalVariable>(Variable) && "not a variable");
96 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97 assert(
98 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99 "Expected inlined-at fields to agree");
100 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101
102 auto *NumericConstant = [&] () -> const Constant* {
103 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
104 if (CE->getOpcode() == Instruction::IntToPtr)
105 return CE->getOperand(0);
106 return &C;
107 }();
108
109 if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
110 if (CI->getBitWidth() > 64)
111 MIB.addCImm(CI);
112 else
113 MIB.addImm(CI->getZExtValue());
114 } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
115 MIB.addFPImm(CFP);
116 } else if (isa<ConstantPointerNull>(NumericConstant)) {
117 MIB.addImm(0);
118 } else {
119 // Insert $noreg if we didn't find a usable constant and had to drop it.
120 MIB.addReg(Register());
121 }
122
123 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
124 return insertInstr(MIB);
125}
126
128 assert(isa<DILabel>(Label) && "not a label");
129 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
130 "Expected inlined-at fields to agree");
131 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
132
133 return MIB.addMetadata(Label);
134}
135
137 const SrcOp &Size,
138 Align Alignment) {
139 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
140 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
141 Res.addDefToMIB(*getMRI(), MIB);
142 Size.addSrcToMIB(MIB);
143 MIB.addImm(Alignment.value());
144 return MIB;
145}
146
148 int Idx) {
149 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
150 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
151 Res.addDefToMIB(*getMRI(), MIB);
152 MIB.addFrameIndex(Idx);
153 return MIB;
154}
155
157 const GlobalValue *GV) {
158 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
159 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
160 GV->getType()->getAddressSpace() &&
161 "address space mismatch");
162
163 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
164 Res.addDefToMIB(*getMRI(), MIB);
165 MIB.addGlobalAddress(GV);
166 return MIB;
167}
168
170 unsigned Idx) {
171 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
172 auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
173 Res.addDefToMIB(*getMRI(), MIB);
174 MIB.addConstantPoolIndex(Idx);
175 return MIB;
176}
177
179 unsigned JTI) {
180 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
181 .addJumpTableIndex(JTI);
182}
183
184void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
185 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
186 assert((Res == Op0) && "type mismatch");
187}
188
190 const LLT Op1) {
191 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
192 assert((Res == Op0 && Res == Op1) && "type mismatch");
193}
194
195void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
196 const LLT Op1) {
197 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
198 assert((Res == Op0) && "type mismatch");
199}
200
203 const SrcOp &Op1, std::optional<unsigned> Flags) {
204 assert(Res.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
205 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
206 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
207
208 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
209}
210
211std::optional<MachineInstrBuilder>
213 const LLT ValueTy, uint64_t Value) {
214 assert(Res == 0 && "Res is a result argument");
215 assert(ValueTy.isScalar() && "invalid offset type");
216
217 if (Value == 0) {
218 Res = Op0;
219 return std::nullopt;
220 }
221
223 auto Cst = buildConstant(ValueTy, Value);
224 return buildPtrAdd(Res, Op0, Cst.getReg(0));
225}
226
228 const SrcOp &Op0,
229 uint32_t NumBits) {
230 LLT PtrTy = Res.getLLTTy(*getMRI());
231 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
232 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
233 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
234 return buildPtrMask(Res, Op0, MaskReg);
235}
236
239 const SrcOp &Op0) {
240 LLT ResTy = Res.getLLTTy(*getMRI());
241 LLT Op0Ty = Op0.getLLTTy(*getMRI());
242
243 assert(ResTy.isVector() && "Res non vector type");
244
246 if (Op0Ty.isVector()) {
247 assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
248 "Different vector element types");
249 assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
250 "Op0 has more elements");
251 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
252
253 for (auto Op : Unmerge.getInstr()->defs())
254 Regs.push_back(Op.getReg());
255 } else {
256 assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
257 "Op0 has more size");
258 Regs.push_back(Op0.getReg());
259 }
260 Register Undef =
261 buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
262 unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
263 for (unsigned i = 0; i < NumberOfPadElts; ++i)
264 Regs.push_back(Undef);
265 return buildMergeLikeInstr(Res, Regs);
266}
267
270 const SrcOp &Op0) {
271 LLT ResTy = Res.getLLTTy(*getMRI());
272 LLT Op0Ty = Op0.getLLTTy(*getMRI());
273
274 assert(Op0Ty.isVector() && "Non vector type");
275 assert(((ResTy.isScalar() && (ResTy == Op0Ty.getElementType())) ||
276 (ResTy.isVector() &&
277 (ResTy.getElementType() == Op0Ty.getElementType()))) &&
278 "Different vector element types");
279 assert(
280 (ResTy.isScalar() || (ResTy.getNumElements() < Op0Ty.getNumElements())) &&
281 "Op0 has fewer elements");
282
283 auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
284 if (ResTy.isScalar())
285 return buildCopy(Res, Unmerge.getReg(0));
287 for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
288 Regs.push_back(Unmerge.getReg(i));
289 return buildMergeLikeInstr(Res, Regs);
290}
291
293 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
294}
295
297 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
298 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
299}
300
302 unsigned JTI,
303 Register IndexReg) {
304 assert(getMRI()->getType(TablePtr).isPointer() &&
305 "Table reg must be a pointer");
306 return buildInstr(TargetOpcode::G_BRJT)
307 .addUse(TablePtr)
309 .addUse(IndexReg);
310}
311
313 const SrcOp &Op) {
314 return buildInstr(TargetOpcode::COPY, Res, Op);
315}
316
318 const ConstantInt &Val) {
319 LLT Ty = Res.getLLTTy(*getMRI());
320 LLT EltTy = Ty.getScalarType();
321 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
322 "creating constant with the wrong size");
323
324 assert(!Ty.isScalableVector() &&
325 "unexpected scalable vector in buildConstant");
326
327 if (Ty.isFixedVector()) {
328 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
329 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
330 .addCImm(&Val);
331 return buildSplatBuildVector(Res, Const);
332 }
333
334 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
335 Const->setDebugLoc(DebugLoc());
336 Res.addDefToMIB(*getMRI(), Const);
337 Const.addCImm(&Val);
338 return Const;
339}
340
342 int64_t Val) {
345 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
346 return buildConstant(Res, *CI);
347}
348
350 const ConstantFP &Val) {
351 LLT Ty = Res.getLLTTy(*getMRI());
352 LLT EltTy = Ty.getScalarType();
353
355 == EltTy.getSizeInBits() &&
356 "creating fconstant with the wrong size");
357
358 assert(!Ty.isPointer() && "invalid operand type");
359
360 assert(!Ty.isScalableVector() &&
361 "unexpected scalable vector in buildFConstant");
362
363 if (Ty.isFixedVector()) {
364 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
365 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
366 .addFPImm(&Val);
367
368 return buildSplatBuildVector(Res, Const);
369 }
370
371 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
372 Const->setDebugLoc(DebugLoc());
373 Res.addDefToMIB(*getMRI(), Const);
374 Const.addFPImm(&Val);
375 return Const;
376}
377
379 const APInt &Val) {
380 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
381 return buildConstant(Res, *CI);
382}
383
385 double Val) {
386 LLT DstTy = Res.getLLTTy(*getMRI());
387 auto &Ctx = getMF().getFunction().getContext();
388 auto *CFP =
389 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
390 return buildFConstant(Res, *CFP);
391}
392
394 const APFloat &Val) {
395 auto &Ctx = getMF().getFunction().getContext();
396 auto *CFP = ConstantFP::get(Ctx, Val);
397 return buildFConstant(Res, *CFP);
398}
399
401 MachineBasicBlock &Dest) {
402 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
403
404 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
405 Tst.addSrcToMIB(MIB);
406 MIB.addMBB(&Dest);
407 return MIB;
408}
409
412 MachinePointerInfo PtrInfo, Align Alignment,
414 const AAMDNodes &AAInfo) {
415 MMOFlags |= MachineMemOperand::MOLoad;
416 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
417
418 LLT Ty = Dst.getLLTTy(*getMRI());
419 MachineMemOperand *MMO =
420 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
421 return buildLoad(Dst, Addr, *MMO);
422}
423
425 const DstOp &Res,
426 const SrcOp &Addr,
427 MachineMemOperand &MMO) {
428 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
429 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
430
431 auto MIB = buildInstr(Opcode);
432 Res.addDefToMIB(*getMRI(), MIB);
433 Addr.addSrcToMIB(MIB);
434 MIB.addMemOperand(&MMO);
435 return MIB;
436}
437
439 const DstOp &Dst, const SrcOp &BasePtr,
440 MachineMemOperand &BaseMMO, int64_t Offset) {
441 LLT LoadTy = Dst.getLLTTy(*getMRI());
442 MachineMemOperand *OffsetMMO =
443 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
444
445 if (Offset == 0) // This may be a size or type changing load.
446 return buildLoad(Dst, BasePtr, *OffsetMMO);
447
448 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
449 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
450 auto ConstOffset = buildConstant(OffsetTy, Offset);
451 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
452 return buildLoad(Dst, Ptr, *OffsetMMO);
453}
454
456 const SrcOp &Addr,
457 MachineMemOperand &MMO) {
458 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
459 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
460
461 auto MIB = buildInstr(TargetOpcode::G_STORE);
462 Val.addSrcToMIB(MIB);
463 Addr.addSrcToMIB(MIB);
464 MIB.addMemOperand(&MMO);
465 return MIB;
466}
467
470 MachinePointerInfo PtrInfo, Align Alignment,
472 const AAMDNodes &AAInfo) {
473 MMOFlags |= MachineMemOperand::MOStore;
474 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
475
476 LLT Ty = Val.getLLTTy(*getMRI());
477 MachineMemOperand *MMO =
478 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
479 return buildStore(Val, Addr, *MMO);
480}
481
483 const SrcOp &Op) {
484 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
485}
486
488 const SrcOp &Op) {
489 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
490}
491
493 const SrcOp &Op,
494 std::optional<unsigned> Flags) {
495 return buildInstr(TargetOpcode::G_ZEXT, Res, Op, Flags);
496}
497
498unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
499 const auto *TLI = getMF().getSubtarget().getTargetLowering();
500 switch (TLI->getBooleanContents(IsVec, IsFP)) {
502 return TargetOpcode::G_SEXT;
504 return TargetOpcode::G_ZEXT;
505 default:
506 return TargetOpcode::G_ANYEXT;
507 }
508}
509
511 const SrcOp &Op,
512 bool IsFP) {
513 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
514 return buildInstr(ExtOp, Res, Op);
515}
516
518 const SrcOp &Op,
519 bool IsVector,
520 bool IsFP) {
521 const auto *TLI = getMF().getSubtarget().getTargetLowering();
522 switch (TLI->getBooleanContents(IsVector, IsFP)) {
524 return buildSExtInReg(Res, Op, 1);
526 return buildZExtInReg(Res, Op, 1);
528 return buildCopy(Res, Op);
529 }
530
531 llvm_unreachable("unexpected BooleanContent");
532}
533
535 const DstOp &Res,
536 const SrcOp &Op) {
537 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
538 TargetOpcode::G_SEXT == ExtOpc) &&
539 "Expecting Extending Opc");
540 assert(Res.getLLTTy(*getMRI()).isScalar() ||
541 Res.getLLTTy(*getMRI()).isVector());
542 assert(Res.getLLTTy(*getMRI()).isScalar() ==
543 Op.getLLTTy(*getMRI()).isScalar());
544
545 unsigned Opcode = TargetOpcode::COPY;
546 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
547 Op.getLLTTy(*getMRI()).getSizeInBits())
548 Opcode = ExtOpc;
549 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
550 Op.getLLTTy(*getMRI()).getSizeInBits())
551 Opcode = TargetOpcode::G_TRUNC;
552 else
553 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
554
555 return buildInstr(Opcode, Res, Op);
556}
557
559 const SrcOp &Op) {
560 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
561}
562
564 const SrcOp &Op) {
565 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
566}
567
569 const SrcOp &Op) {
570 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
571}
572
574 const SrcOp &Op,
575 int64_t ImmOp) {
576 LLT ResTy = Res.getLLTTy(*getMRI());
577 auto Mask = buildConstant(
578 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
579 return buildAnd(Res, Op, Mask);
580}
581
583 const SrcOp &Src) {
584 LLT SrcTy = Src.getLLTTy(*getMRI());
585 LLT DstTy = Dst.getLLTTy(*getMRI());
586 if (SrcTy == DstTy)
587 return buildCopy(Dst, Src);
588
589 unsigned Opcode;
590 if (SrcTy.isPointer() && DstTy.isScalar())
591 Opcode = TargetOpcode::G_PTRTOINT;
592 else if (DstTy.isPointer() && SrcTy.isScalar())
593 Opcode = TargetOpcode::G_INTTOPTR;
594 else {
595 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
596 Opcode = TargetOpcode::G_BITCAST;
597 }
598
599 return buildInstr(Opcode, Dst, Src);
600}
601
603 const SrcOp &Src,
604 uint64_t Index) {
605 LLT SrcTy = Src.getLLTTy(*getMRI());
606 LLT DstTy = Dst.getLLTTy(*getMRI());
607
608#ifndef NDEBUG
609 assert(SrcTy.isValid() && "invalid operand type");
610 assert(DstTy.isValid() && "invalid operand type");
611 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
612 "extracting off end of register");
613#endif
614
615 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
616 assert(Index == 0 && "insertion past the end of a register");
617 return buildCast(Dst, Src);
618 }
619
620 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
621 Dst.addDefToMIB(*getMRI(), Extract);
622 Src.addSrcToMIB(Extract);
623 Extract.addImm(Index);
624 return Extract;
625}
626
628 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
629}
630
632 ArrayRef<Register> Ops) {
633 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
634 // we need some temporary storage for the DstOp objects. Here we use a
635 // sufficiently large SmallVector to not go through the heap.
636 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
637 assert(TmpVec.size() > 1);
638 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
639}
640
643 ArrayRef<Register> Ops) {
644 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
645 // we need some temporary storage for the DstOp objects. Here we use a
646 // sufficiently large SmallVector to not go through the heap.
647 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
648 assert(TmpVec.size() > 1);
649 return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
650}
651
654 std::initializer_list<SrcOp> Ops) {
655 assert(Ops.size() > 1);
656 return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
657}
658
659unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
660 ArrayRef<SrcOp> SrcOps) const {
661 if (DstOp.getLLTTy(*getMRI()).isVector()) {
662 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
663 return TargetOpcode::G_CONCAT_VECTORS;
664 return TargetOpcode::G_BUILD_VECTOR;
665 }
666
667 return TargetOpcode::G_MERGE_VALUES;
668}
669
671 const SrcOp &Op) {
672 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
673 // we need some temporary storage for the DstOp objects. Here we use a
674 // sufficiently large SmallVector to not go through the heap.
675 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
676 assert(TmpVec.size() > 1);
677 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
678}
679
681 const SrcOp &Op) {
682 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
683 SmallVector<DstOp, 8> TmpVec(NumReg, Res);
684 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
685}
686
688 const SrcOp &Op) {
689 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
690 // we need some temporary storage for the DstOp objects. Here we use a
691 // sufficiently large SmallVector to not go through the heap.
692 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
693 assert(TmpVec.size() > 1);
694 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
695}
696
698 ArrayRef<Register> Ops) {
699 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
700 // we need some temporary storage for the DstOp objects. Here we use a
701 // sufficiently large SmallVector to not go through the heap.
702 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
703 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
704}
705
708 ArrayRef<APInt> Ops) {
709 SmallVector<SrcOp> TmpVec;
710 TmpVec.reserve(Ops.size());
711 LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
712 for (const auto &Op : Ops)
713 TmpVec.push_back(buildConstant(EltTy, Op));
714 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
715}
716
718 const SrcOp &Src) {
719 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
720 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
721}
722
725 ArrayRef<Register> Ops) {
726 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
727 // we need some temporary storage for the DstOp objects. Here we use a
728 // sufficiently large SmallVector to not go through the heap.
729 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
730 if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
731 Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
732 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
733 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
734}
735
737 const SrcOp &Src) {
738 LLT DstTy = Res.getLLTTy(*getMRI());
739 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
740 "Expected Src to match Dst elt ty");
741 auto UndefVec = buildUndef(DstTy);
742 auto Zero = buildConstant(LLT::scalar(64), 0);
743 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
744 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
745 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
746}
747
749 const SrcOp &Src) {
750 assert(Src.getLLTTy(*getMRI()) == Res.getLLTTy(*getMRI()).getElementType() &&
751 "Expected Src to match Dst elt ty");
752 return buildInstr(TargetOpcode::G_SPLAT_VECTOR, Res, Src);
753}
754
756 const SrcOp &Src1,
757 const SrcOp &Src2,
758 ArrayRef<int> Mask) {
759 LLT DstTy = Res.getLLTTy(*getMRI());
760 LLT Src1Ty = Src1.getLLTTy(*getMRI());
761 LLT Src2Ty = Src2.getLLTTy(*getMRI());
762 assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
763 Mask.size());
764 assert(DstTy.getElementType() == Src1Ty.getElementType() &&
765 DstTy.getElementType() == Src2Ty.getElementType());
766 (void)DstTy;
767 (void)Src1Ty;
768 (void)Src2Ty;
769 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
770 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
771 .addShuffleMask(MaskAlloc);
772}
773
776 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
777 // we need some temporary storage for the DstOp objects. Here we use a
778 // sufficiently large SmallVector to not go through the heap.
779 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
780 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
781}
782
784 const SrcOp &Src,
785 const SrcOp &Op,
786 unsigned Index) {
787 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
788 Res.getLLTTy(*getMRI()).getSizeInBits() &&
789 "insertion past the end of a register");
790
791 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
792 Op.getLLTTy(*getMRI()).getSizeInBits()) {
793 return buildCast(Res, Op);
794 }
795
796 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
797}
798
800 unsigned MinElts) {
801
804 ConstantInt *CI = ConstantInt::get(IntN, MinElts);
805 return buildVScale(Res, *CI);
806}
807
809 const ConstantInt &MinElts) {
810 auto VScale = buildInstr(TargetOpcode::G_VSCALE);
811 VScale->setDebugLoc(DebugLoc());
812 Res.addDefToMIB(*getMRI(), VScale);
813 VScale.addCImm(&MinElts);
814 return VScale;
815}
816
818 const APInt &MinElts) {
819 ConstantInt *CI =
820 ConstantInt::get(getMF().getFunction().getContext(), MinElts);
821 return buildVScale(Res, *CI);
822}
823
824static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
825 if (HasSideEffects && IsConvergent)
826 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
827 if (HasSideEffects)
828 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
829 if (IsConvergent)
830 return TargetOpcode::G_INTRINSIC_CONVERGENT;
831 return TargetOpcode::G_INTRINSIC;
832}
833
836 ArrayRef<Register> ResultRegs,
837 bool HasSideEffects, bool isConvergent) {
838 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
839 for (unsigned ResultReg : ResultRegs)
840 MIB.addDef(ResultReg);
841 MIB.addIntrinsicID(ID);
842 return MIB;
843}
844
847 ArrayRef<Register> ResultRegs) {
848 auto Attrs = Intrinsic::getAttributes(getContext(), ID);
849 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
850 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
851 return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
852}
853
856 bool HasSideEffects,
857 bool isConvergent) {
858 auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
859 for (DstOp Result : Results)
860 Result.addDefToMIB(*getMRI(), MIB);
861 MIB.addIntrinsicID(ID);
862 return MIB;
863}
864
867 auto Attrs = Intrinsic::getAttributes(getContext(), ID);
868 bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
869 bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
870 return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
875 std::optional<unsigned> Flags) {
876 return buildInstr(TargetOpcode::G_TRUNC, Res, Op, Flags);
877}
878
881 std::optional<unsigned> Flags) {
882 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
884
886 const DstOp &Res,
887 const SrcOp &Op0,
888 const SrcOp &Op1) {
889 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
890}
891
893 const DstOp &Res,
894 const SrcOp &Op0,
895 const SrcOp &Op1,
896 std::optional<unsigned> Flags) {
897
898 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
899}
900
903 const SrcOp &Op0, const SrcOp &Op1,
904 std::optional<unsigned> Flags) {
905
906 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
907}
908
910 const SrcOp &Src0,
911 const SrcOp &Src1,
912 unsigned Idx) {
913 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
914 {Src0, Src1, uint64_t(Idx)});
915}
916
918 const SrcOp &Src,
919 unsigned Idx) {
920 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR, Res,
921 {Src, uint64_t(Idx)});
922}
923
926 const SrcOp &Elt, const SrcOp &Idx) {
927 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
928}
929
932 const SrcOp &Idx) {
933 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
934}
935
937 const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr,
938 const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO) {
939#ifndef NDEBUG
940 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
941 LLT SuccessResTy = SuccessRes.getLLTTy(*getMRI());
942 LLT AddrTy = Addr.getLLTTy(*getMRI());
943 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
944 LLT NewValTy = NewVal.getLLTTy(*getMRI());
945 assert(OldValResTy.isScalar() && "invalid operand type");
946 assert(SuccessResTy.isScalar() && "invalid operand type");
947 assert(AddrTy.isPointer() && "invalid operand type");
948 assert(CmpValTy.isValid() && "invalid operand type");
949 assert(NewValTy.isValid() && "invalid operand type");
950 assert(OldValResTy == CmpValTy && "type mismatch");
951 assert(OldValResTy == NewValTy && "type mismatch");
952#endif
953
954 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS);
955 OldValRes.addDefToMIB(*getMRI(), MIB);
956 SuccessRes.addDefToMIB(*getMRI(), MIB);
957 Addr.addSrcToMIB(MIB);
958 CmpVal.addSrcToMIB(MIB);
959 NewVal.addSrcToMIB(MIB);
960 MIB.addMemOperand(&MMO);
961 return MIB;
962}
963
966 const SrcOp &CmpVal, const SrcOp &NewVal,
967 MachineMemOperand &MMO) {
968#ifndef NDEBUG
969 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
970 LLT AddrTy = Addr.getLLTTy(*getMRI());
971 LLT CmpValTy = CmpVal.getLLTTy(*getMRI());
972 LLT NewValTy = NewVal.getLLTTy(*getMRI());
973 assert(OldValResTy.isScalar() && "invalid operand type");
974 assert(AddrTy.isPointer() && "invalid operand type");
975 assert(CmpValTy.isValid() && "invalid operand type");
976 assert(NewValTy.isValid() && "invalid operand type");
977 assert(OldValResTy == CmpValTy && "type mismatch");
978 assert(OldValResTy == NewValTy && "type mismatch");
979#endif
980
981 auto MIB = buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG);
982 OldValRes.addDefToMIB(*getMRI(), MIB);
983 Addr.addSrcToMIB(MIB);
984 CmpVal.addSrcToMIB(MIB);
985 NewVal.addSrcToMIB(MIB);
986 MIB.addMemOperand(&MMO);
987 return MIB;
988}
989
991 unsigned Opcode, const DstOp &OldValRes,
992 const SrcOp &Addr, const SrcOp &Val,
993 MachineMemOperand &MMO) {
994
995#ifndef NDEBUG
996 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
997 LLT AddrTy = Addr.getLLTTy(*getMRI());
998 LLT ValTy = Val.getLLTTy(*getMRI());
999 assert(OldValResTy.isScalar() && "invalid operand type");
1000 assert(AddrTy.isPointer() && "invalid operand type");
1001 assert(ValTy.isValid() && "invalid operand type");
1002 assert(OldValResTy == ValTy && "type mismatch");
1003 assert(MMO.isAtomic() && "not atomic mem operand");
1004#endif
1005
1006 auto MIB = buildInstr(Opcode);
1007 OldValRes.addDefToMIB(*getMRI(), MIB);
1008 Addr.addSrcToMIB(MIB);
1009 Val.addSrcToMIB(MIB);
1010 MIB.addMemOperand(&MMO);
1011 return MIB;
1012}
1013
1016 Register Val, MachineMemOperand &MMO) {
1017 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
1018 MMO);
1019}
1022 Register Val, MachineMemOperand &MMO) {
1023 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
1024 MMO);
1025}
1028 Register Val, MachineMemOperand &MMO) {
1029 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
1030 MMO);
1031}
1034 Register Val, MachineMemOperand &MMO) {
1035 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
1036 MMO);
1037}
1040 Register Val, MachineMemOperand &MMO) {
1041 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
1042 MMO);
1043}
1045 Register Addr,
1046 Register Val,
1047 MachineMemOperand &MMO) {
1048 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
1049 MMO);
1050}
1053 Register Val, MachineMemOperand &MMO) {
1054 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
1055 MMO);
1056}
1059 Register Val, MachineMemOperand &MMO) {
1060 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1061 MMO);
1062}
1065 Register Val, MachineMemOperand &MMO) {
1066 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1067 MMO);
1068}
1071 Register Val, MachineMemOperand &MMO) {
1072 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1073 MMO);
1074}
1077 Register Val, MachineMemOperand &MMO) {
1078 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1079 MMO);
1080}
1081
1084 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1085 MachineMemOperand &MMO) {
1086 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1087 MMO);
1088}
1089
1091MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1092 MachineMemOperand &MMO) {
1093 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1094 MMO);
1095}
1096
1099 const SrcOp &Val, MachineMemOperand &MMO) {
1100 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1101 MMO);
1102}
1103
1106 const SrcOp &Val, MachineMemOperand &MMO) {
1107 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1108 MMO);
1109}
1110
1112MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1113 return buildInstr(TargetOpcode::G_FENCE)
1114 .addImm(Ordering)
1115 .addImm(Scope);
1116}
1117
1119 unsigned RW,
1120 unsigned Locality,
1121 unsigned CacheType,
1122 MachineMemOperand &MMO) {
1123 auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1124 Addr.addSrcToMIB(MIB);
1125 MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1126 MIB.addMemOperand(&MMO);
1127 return MIB;
1128}
1129
1132#ifndef NDEBUG
1133 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1134#endif
1135
1136 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1137}
1138
1139void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1140 bool IsExtend) {
1141#ifndef NDEBUG
1142 if (DstTy.isVector()) {
1143 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1144 assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1145 "different number of elements in a trunc/ext");
1146 } else
1147 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1148
1149 if (IsExtend)
1151 "invalid narrowing extend");
1152 else
1154 "invalid widening trunc");
1155#endif
1156}
1157
1158void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1159 const LLT Op0Ty, const LLT Op1Ty) {
1160#ifndef NDEBUG
1161 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1162 "invalid operand type");
1163 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1164 if (ResTy.isScalar() || ResTy.isPointer())
1165 assert(TstTy.isScalar() && "type mismatch");
1166 else
1167 assert((TstTy.isScalar() ||
1168 (TstTy.isVector() &&
1169 TstTy.getElementCount() == Op0Ty.getElementCount())) &&
1170 "type mismatch");
1171#endif
1172}
1173
1176 ArrayRef<SrcOp> SrcOps,
1177 std::optional<unsigned> Flags) {
1178 switch (Opc) {
1179 default:
1180 break;
1181 case TargetOpcode::G_SELECT: {
1182 assert(DstOps.size() == 1 && "Invalid select");
1183 assert(SrcOps.size() == 3 && "Invalid select");
1185 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1186 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1187 break;
1188 }
1189 case TargetOpcode::G_FNEG:
1190 case TargetOpcode::G_ABS:
1191 // All these are unary ops.
1192 assert(DstOps.size() == 1 && "Invalid Dst");
1193 assert(SrcOps.size() == 1 && "Invalid Srcs");
1194 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1195 SrcOps[0].getLLTTy(*getMRI()));
1196 break;
1197 case TargetOpcode::G_ADD:
1198 case TargetOpcode::G_AND:
1199 case TargetOpcode::G_MUL:
1200 case TargetOpcode::G_OR:
1201 case TargetOpcode::G_SUB:
1202 case TargetOpcode::G_XOR:
1203 case TargetOpcode::G_UDIV:
1204 case TargetOpcode::G_SDIV:
1205 case TargetOpcode::G_UREM:
1206 case TargetOpcode::G_SREM:
1207 case TargetOpcode::G_SMIN:
1208 case TargetOpcode::G_SMAX:
1209 case TargetOpcode::G_UMIN:
1210 case TargetOpcode::G_UMAX:
1211 case TargetOpcode::G_UADDSAT:
1212 case TargetOpcode::G_SADDSAT:
1213 case TargetOpcode::G_USUBSAT:
1214 case TargetOpcode::G_SSUBSAT: {
1215 // All these are binary ops.
1216 assert(DstOps.size() == 1 && "Invalid Dst");
1217 assert(SrcOps.size() == 2 && "Invalid Srcs");
1218 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1219 SrcOps[0].getLLTTy(*getMRI()),
1220 SrcOps[1].getLLTTy(*getMRI()));
1221 break;
1222 }
1223 case TargetOpcode::G_SHL:
1224 case TargetOpcode::G_ASHR:
1225 case TargetOpcode::G_LSHR:
1226 case TargetOpcode::G_USHLSAT:
1227 case TargetOpcode::G_SSHLSAT: {
1228 assert(DstOps.size() == 1 && "Invalid Dst");
1229 assert(SrcOps.size() == 2 && "Invalid Srcs");
1230 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1231 SrcOps[0].getLLTTy(*getMRI()),
1232 SrcOps[1].getLLTTy(*getMRI()));
1233 break;
1234 }
1235 case TargetOpcode::G_SEXT:
1236 case TargetOpcode::G_ZEXT:
1237 case TargetOpcode::G_ANYEXT:
1238 assert(DstOps.size() == 1 && "Invalid Dst");
1239 assert(SrcOps.size() == 1 && "Invalid Srcs");
1240 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1241 SrcOps[0].getLLTTy(*getMRI()), true);
1242 break;
1243 case TargetOpcode::G_TRUNC:
1244 case TargetOpcode::G_FPTRUNC: {
1245 assert(DstOps.size() == 1 && "Invalid Dst");
1246 assert(SrcOps.size() == 1 && "Invalid Srcs");
1247 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1248 SrcOps[0].getLLTTy(*getMRI()), false);
1249 break;
1250 }
1251 case TargetOpcode::G_BITCAST: {
1252 assert(DstOps.size() == 1 && "Invalid Dst");
1253 assert(SrcOps.size() == 1 && "Invalid Srcs");
1254 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1255 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1256 break;
1257 }
1258 case TargetOpcode::COPY:
1259 assert(DstOps.size() == 1 && "Invalid Dst");
1260 // If the caller wants to add a subreg source it has to be done separately
1261 // so we may not have any SrcOps at this point yet.
1262 break;
1263 case TargetOpcode::G_FCMP:
1264 case TargetOpcode::G_ICMP: {
1265 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1266 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1267 // For F/ICMP, the first src operand is the predicate, followed by
1268 // the two comparands.
1269 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1270 "Expecting predicate");
1271 assert([&]() -> bool {
1272 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1273 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1274 : CmpInst::isFPPredicate(Pred);
1275 }() && "Invalid predicate");
1276 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1277 "Type mismatch");
1278 assert([&]() -> bool {
1279 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1280 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1281 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1282 return DstTy.isScalar();
1283 else
1284 return DstTy.isVector() &&
1285 DstTy.getElementCount() == Op0Ty.getElementCount();
1286 }() && "Type Mismatch");
1287 break;
1288 }
1289 case TargetOpcode::G_UNMERGE_VALUES: {
1290 assert(!DstOps.empty() && "Invalid trivial sequence");
1291 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1292 assert(llvm::all_of(DstOps,
1293 [&, this](const DstOp &Op) {
1294 return Op.getLLTTy(*getMRI()) ==
1295 DstOps[0].getLLTTy(*getMRI());
1296 }) &&
1297 "type mismatch in output list");
1298 assert((TypeSize::ScalarTy)DstOps.size() *
1299 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1300 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1301 "input operands do not cover output register");
1302 break;
1303 }
1304 case TargetOpcode::G_MERGE_VALUES: {
1305 assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1306 assert(DstOps.size() == 1 && "Invalid Dst");
1307 assert(llvm::all_of(SrcOps,
1308 [&, this](const SrcOp &Op) {
1309 return Op.getLLTTy(*getMRI()) ==
1310 SrcOps[0].getLLTTy(*getMRI());
1311 }) &&
1312 "type mismatch in input list");
1313 assert((TypeSize::ScalarTy)SrcOps.size() *
1314 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1315 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1316 "input operands do not cover output register");
1317 assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1318 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1319 break;
1320 }
1321 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1322 assert(DstOps.size() == 1 && "Invalid Dst size");
1323 assert(SrcOps.size() == 2 && "Invalid Src size");
1324 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1325 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1326 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1327 "Invalid operand type");
1328 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1329 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1330 DstOps[0].getLLTTy(*getMRI()) &&
1331 "Type mismatch");
1332 break;
1333 }
1334 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1335 assert(DstOps.size() == 1 && "Invalid dst size");
1336 assert(SrcOps.size() == 3 && "Invalid src size");
1337 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1338 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1339 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1340 SrcOps[1].getLLTTy(*getMRI()) &&
1341 "Type mismatch");
1342 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1343 assert(DstOps[0].getLLTTy(*getMRI()).getElementCount() ==
1344 SrcOps[0].getLLTTy(*getMRI()).getElementCount() &&
1345 "Type mismatch");
1346 break;
1347 }
1348 case TargetOpcode::G_BUILD_VECTOR: {
1349 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1350 "Must have at least 2 operands");
1351 assert(DstOps.size() == 1 && "Invalid DstOps");
1352 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1353 "Res type must be a vector");
1354 assert(llvm::all_of(SrcOps,
1355 [&, this](const SrcOp &Op) {
1356 return Op.getLLTTy(*getMRI()) ==
1357 SrcOps[0].getLLTTy(*getMRI());
1358 }) &&
1359 "type mismatch in input list");
1360 assert((TypeSize::ScalarTy)SrcOps.size() *
1361 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1362 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1363 "input scalars do not exactly cover the output vector register");
1364 break;
1365 }
1366 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1367 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1368 "Must have at least 2 operands");
1369 assert(DstOps.size() == 1 && "Invalid DstOps");
1370 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1371 "Res type must be a vector");
1372 assert(llvm::all_of(SrcOps,
1373 [&, this](const SrcOp &Op) {
1374 return Op.getLLTTy(*getMRI()) ==
1375 SrcOps[0].getLLTTy(*getMRI());
1376 }) &&
1377 "type mismatch in input list");
1378 break;
1379 }
1380 case TargetOpcode::G_CONCAT_VECTORS: {
1381 assert(DstOps.size() == 1 && "Invalid DstOps");
1382 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1383 "Must have at least 2 operands");
1384 assert(llvm::all_of(SrcOps,
1385 [&, this](const SrcOp &Op) {
1386 return (Op.getLLTTy(*getMRI()).isVector() &&
1387 Op.getLLTTy(*getMRI()) ==
1388 SrcOps[0].getLLTTy(*getMRI()));
1389 }) &&
1390 "type mismatch in input list");
1391 assert((TypeSize::ScalarTy)SrcOps.size() *
1392 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1393 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1394 "input vectors do not exactly cover the output vector register");
1395 break;
1396 }
1397 case TargetOpcode::G_UADDE: {
1398 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1399 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1400 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1401 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1402 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1403 "Invalid operand");
1404 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1405 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1406 "type mismatch");
1407 break;
1408 }
1409 }
1410
1411 auto MIB = buildInstr(Opc);
1412 for (const DstOp &Op : DstOps)
1413 Op.addDefToMIB(*getMRI(), MIB);
1414 for (const SrcOp &Op : SrcOps)
1415 Op.addSrcToMIB(MIB);
1416 if (Flags)
1417 MIB->setFlags(*Flags);
1418 return MIB;
1419}
Function Alias Analysis Results
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Addr
uint64_t Size
static Function * getFunction(Constant *C)
Definition: Evaluator.cpp:236
static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent)
This file declares the MachineIRBuilder class.
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static unsigned getScalarSizeInBits(Type *Ty)
static SymbolRef::Type getType(const Symbol *Sym)
Definition: TapiFile.cpp:40
This file describes how to lower LLVM code to machine code.
const fltSemantics & getSemantics() const
Definition: APFloat.h:1303
Class for arbitrary precision integers.
Definition: APInt.h:76
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition: APInt.h:284
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
iterator end() const
Definition: ArrayRef.h:154
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:165
iterator begin() const
Definition: ArrayRef.h:153
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:160
The address of a basic block.
Definition: Constants.h:889
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition: InstrTypes.h:993
bool isFPPredicate() const
Definition: InstrTypes.h:1122
bool isIntPredicate() const
Definition: InstrTypes.h:1123
ConstantFP - Floating Point Values [float, double].
Definition: Constants.h:268
const APFloat & getValueAPF() const
Definition: Constants.h:311
This is the shared class of boolean and integer constants.
Definition: Constants.h:80
unsigned getBitWidth() const
getBitWidth - Return the scalar bitwidth of this constant.
Definition: Constants.h:148
This is an important base class in LLVM.
Definition: Constant.h:41
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
void addDefToMIB(MachineRegisterInfo &MRI, MachineInstrBuilder &MIB) const
LLT getLLTTy(const MachineRegisterInfo &MRI) const
Register getReg() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:358
PointerType * getType() const
Global values are always pointers.
Definition: GlobalValue.h:294
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
constexpr bool isScalableVector() const
Returns true if the LLT is a scalable vector.
Definition: LowLevelType.h:182
constexpr unsigned getScalarSizeInBits() const
Definition: LowLevelType.h:267
constexpr bool isScalar() const
Definition: LowLevelType.h:146
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
constexpr bool isValid() const
Definition: LowLevelType.h:145
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
Definition: LowLevelType.h:159
constexpr bool isVector() const
Definition: LowLevelType.h:148
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
Definition: LowLevelType.h:193
constexpr bool isPointer() const
Definition: LowLevelType.h:149
constexpr LLT getElementType() const
Returns the vector's element type. Only valid for vector types.
Definition: LowLevelType.h:290
constexpr ElementCount getElementCount() const
Definition: LowLevelType.h:184
constexpr bool isFixedVector() const
Returns true if the LLT is a fixed vector.
Definition: LowLevelType.h:178
constexpr LLT getScalarType() const
Definition: LowLevelType.h:208
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:63
Metadata node.
Definition: Metadata.h:1067
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
MachineInstrBundleIterator< MachineInstr > iterator
ArrayRef< int > allocateShuffleMask(ArrayRef< int > Mask)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
MachineInstrBuilder buildLoadFromOffset(const DstOp &Dst, const SrcOp &BasePtr, MachineMemOperand &BaseMMO, int64_t Offset)
Helper to create a load from a constant offset given a base address.
MachineInstrBuilder buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMIN Addr, Val, MMO.
MachineInstrBuilder buildBoolExtInReg(const DstOp &Res, const SrcOp &Op, bool IsVector, bool IsFP)
MachineInstrBuilder insertInstr(MachineInstrBuilder MIB)
Insert an existing instruction at the insertion point.
MachineInstrBuilder buildAtomicRMWXor(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XOR Addr, Val, MMO.
MachineInstrBuilder buildGlobalValue(const DstOp &Res, const GlobalValue *GV)
Build and insert Res = G_GLOBAL_VALUE GV.
MachineInstrBuilder buildBr(MachineBasicBlock &Dest)
Build and insert G_BR Dest.
std::optional< MachineInstrBuilder > materializePtrAdd(Register &Res, Register Op0, const LLT ValueTy, uint64_t Value)
Materialize and insert Res = G_PTR_ADD Op0, (G_CONSTANT Value)
LLVMContext & getContext() const
MachineInstrBuilder buildUndef(const DstOp &Res)
Build and insert Res = IMPLICIT_DEF.
MachineInstrBuilder buildConstantPool(const DstOp &Res, unsigned Idx)
Build and insert Res = G_CONSTANT_POOL Idx.
MachineInstrBuilder buildJumpTable(const LLT PtrTy, unsigned JTI)
Build and insert Res = G_JUMP_TABLE JTI.
MachineInstrBuilder buildBoolExt(const DstOp &Res, const SrcOp &Op, bool IsFP)
MachineInstrBuilder buildUnmerge(ArrayRef< LLT > Res, const SrcOp &Op)
Build and insert Res0, ... = G_UNMERGE_VALUES Op.
MachineInstrBuilder buildFence(unsigned Ordering, unsigned Scope)
Build and insert G_FENCE Ordering, Scope.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildAtomicRMWAnd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_AND Addr, Val, MMO.
MachineInstrBuilder buildZExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and inserts Res = G_AND Op, LowBitsSet(ImmOp) Since there is no G_ZEXT_INREG like G_SEXT_INREG,...
MachineInstrBuilder buildAtomicRMWMin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MIN Addr, Val, MMO.
MachineInstrBuilder buildExtract(const DstOp &Res, const SrcOp &Src, uint64_t Index)
Build and insert Res0, ... = G_EXTRACT Src, Idx0.
MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, const SrcOp &Src1, unsigned Index)
Build and insert Res = G_INSERT_SUBVECTOR Src0, Src1, Idx.
MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, const SrcOp &Src1)
Build and insert Res = G_AND Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildCast(const DstOp &Dst, const SrcOp &Src)
Build and insert an appropriate cast between two registers of equal size.
const TargetInstrInfo & getTII()
MachineInstrBuilder buildAtomicRMWFAdd(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FADD Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWNand(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_NAND Addr, Val, MMO.
MachineInstrBuilder buildAnyExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Res = COPY Op depending on the differing sizes of Res and Op.
MachineInstrBuilder buildSExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildSExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_SEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildShuffleSplat(const DstOp &Res, const SrcOp &Src)
Build and insert a vector splat of a scalar Src using a G_INSERT_VECTOR_ELT and G_SHUFFLE_VECTOR idio...
MachineInstrBuilder buildZExt(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_ZEXT Op.
MachineInstrBuilder buildConcatVectors(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_CONCAT_VECTORS Op0, ...
MachineInstrBuilder buildAtomicRMW(unsigned Opcode, const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_<Opcode> Addr, Val, MMO.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MDNode * getPCSections()
Get the current instruction's PC sections metadata.
MachineInstrBuilder buildVScale(const DstOp &Res, unsigned MinElts)
Build and insert Res = G_VSCALE MinElts.
MachineInstrBuilder buildSplatBuildVector(const DstOp &Res, const SrcOp &Src)
Build and insert Res = G_BUILD_VECTOR with Src replicated to fill the number of elements.
MachineInstrBuilder buildIndirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in me...
unsigned getBoolExtOp(bool IsVec, bool IsFP) const
MachineInstrBuilder buildAtomicRMWUmax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMAX Addr, Val, MMO.
MachineInstrBuilder buildBuildVector(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ...
MachineInstrBuilder buildConstDbgValue(const Constant &C, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instructions specifying that Variable is given by C (suitably modified b...
void recordInsertion(MachineInstr *InsertedInstr) const
MachineInstrBuilder buildBrCond(const SrcOp &Tst, MachineBasicBlock &Dest)
Build and insert G_BRCOND Tst, Dest.
MachineInstrBuilder buildMergeLikeInstr(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ... or Res = G_BUILD_VECTOR Op0, ... or Res = G_CONCAT_VEC...
MachineInstrBuilder buildExtractVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Idx)
Build and insert Res = G_EXTRACT_VECTOR_ELT Val, Idx.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildPtrAdd(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_PTR_ADD Op0, Op1.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildBuildVectorTrunc(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_BUILD_VECTOR_TRUNC Op0, ...
virtual MachineInstrBuilder buildFConstant(const DstOp &Res, const ConstantFP &Val)
Build and insert Res = G_FCONSTANT Val.
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPadVectorWithUndefElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a, b, .....
void validateSelectOp(const LLT ResTy, const LLT TstTy, const LLT Op0Ty, const LLT Op1Ty)
MachineInstrBuilder buildFrameIndex(const DstOp &Res, int Idx)
Build and insert Res = G_FRAME_INDEX Idx.
MachineInstrBuilder buildDirectDbgValue(Register Reg, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in Re...
const DebugLoc & getDL()
Getter for DebugLoc.
MachineInstrBuilder buildBuildVectorConstant(const DstOp &Res, ArrayRef< APInt > Ops)
Build and insert Res = G_BUILD_VECTOR Op0, ... where each OpN is built with G_CONSTANT.
MachineInstrBuilder buildAtomicRMWUmin(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_UMIN Addr, Val, MMO.
void validateBinaryOp(const LLT Res, const LLT Op0, const LLT Op1)
void validateShiftOp(const LLT Res, const LLT Op0, const LLT Op1)
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildDbgLabel(const MDNode *Label)
Build and insert a DBG_LABEL instructions specifying that Label is given.
MachineInstrBuilder buildBrJT(Register TablePtr, unsigned JTI, Register IndexReg)
Build and insert G_BRJT TablePtr, JTI, IndexReg.
MachineInstrBuilder buildInsert(const DstOp &Res, const SrcOp &Src, const SrcOp &Op, unsigned Index)
MachineInstrBuilder buildDynStackAlloc(const DstOp &Res, const SrcOp &Size, Align Alignment)
Build and insert Res = G_DYN_STACKALLOC Size, Align.
MachineInstrBuilder buildFIDbgValue(int FI, const MDNode *Variable, const MDNode *Expr)
Build and insert a DBG_VALUE instruction expressing the fact that the associated Variable lives in th...
MachineInstrBuilder buildExtOrTrunc(unsigned ExtOpc, const DstOp &Res, const SrcOp &Op)
Build and insert Res = ExtOpc, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes of...
MachineInstrBuilder buildAtomicRMWSub(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_SUB Addr, Val, MMO.
MachineInstrBuilder buildMergeValues(const DstOp &Res, ArrayRef< Register > Ops)
Build and insert Res = G_MERGE_VALUES Op0, ...
MachineInstrBuilder buildTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_TRUNC Op.
MachineInstrBuilder buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FMAX Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWOr(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_OR Addr, Val, MMO.
const MachineBasicBlock & getMBB() const
Getter for the basic block we currently build.
MachineInstrBuilder buildInsertVectorElement(const DstOp &Res, const SrcOp &Val, const SrcOp &Elt, const SrcOp &Idx)
Build and insert Res = G_INSERT_VECTOR_ELT Val, Elt, Idx.
MachineInstrBuilder buildAnyExt(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ANYEXT Op0.
MachineInstrBuilder buildAtomicCmpXchgWithSuccess(const DstOp &OldValRes, const DstOp &SuccessRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def>, SuccessRes<def> = G_ATOMIC_CMPXCHG_WITH_SUCCESS Addr,...
MachineInstrBuilder buildDeleteTrailingVectorElements(const DstOp &Res, const SrcOp &Op0)
Build and insert a, b, ..., x, y, z = G_UNMERGE_VALUES Op0 Res = G_BUILD_VECTOR a,...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildAtomicRMWAdd(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_ADD Addr, Val, MMO.
MachineInstrBuilder buildFPTrunc(const DstOp &Res, const SrcOp &Op, std::optional< unsigned > Flags=std::nullopt)
Build and insert Res = G_FPTRUNC Op.
MachineInstrBuilder buildAtomicCmpXchg(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &CmpVal, const SrcOp &NewVal, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMIC_CMPXCHG Addr, CmpVal, NewVal, MMO.
MachineInstrBuilder buildShuffleVector(const DstOp &Res, const SrcOp &Src1, const SrcOp &Src2, ArrayRef< int > Mask)
Build and insert Res = G_SHUFFLE_VECTOR Src1, Src2, Mask.
void validateTruncExt(const LLT Dst, const LLT Src, bool IsExtend)
MachineInstrBuilder buildInstrNoInsert(unsigned Opcode)
Build but don't insert <empty> = Opcode <empty>.
MachineInstrBuilder buildPtrMask(const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert Res = G_PTRMASK Op0, Op1.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
void validateUnaryOp(const LLT Res, const LLT Op0)
MachineInstrBuilder buildBlockAddress(Register Res, const BlockAddress *BA)
Build and insert Res = G_BLOCK_ADDR BA.
MDNode * getMMRAMetadata()
Get the current instruction's MMRA metadata.
MachineInstrBuilder buildAtomicRMWMax(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_MAX Addr, Val, MMO.
MachineInstrBuilder buildPrefetch(const SrcOp &Addr, unsigned RW, unsigned Locality, unsigned CacheType, MachineMemOperand &MMO)
Build and insert G_PREFETCH Addr, RW, Locality, CacheType.
MachineInstrBuilder buildExtractSubvector(const DstOp &Res, const SrcOp &Src, unsigned Index)
Build and insert Res = G_EXTRACT_SUBVECTOR Src, Idx0.
MachineInstrBuilder buildBrIndirect(Register Tgt)
Build and insert G_BRINDIRECT Tgt.
MachineInstrBuilder buildSplatVector(const DstOp &Res, const SrcOp &Val)
Build and insert Res = G_SPLAT_VECTOR Val.
MachineInstrBuilder buildLoadInstr(unsigned Opcode, const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = <opcode> Addr, MMO.
void setMF(MachineFunction &MF)
MachineInstrBuilder buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_FSUB Addr, Val, MMO.
MachineInstrBuilder buildAtomicRMWXchg(Register OldValRes, Register Addr, Register Val, MachineMemOperand &MMO)
Build and insert OldValRes<def> = G_ATOMICRMW_XCHG Addr, Val, MMO.
MachineInstrBuilder buildMaskLowPtrBits(const DstOp &Res, const SrcOp &Op0, uint32_t NumBits)
Build and insert Res = G_PTRMASK Op0, G_CONSTANT (1 << NumBits) - 1.
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
MachineInstrBuilder buildFCmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_FCMP PredOp0, Op1.
MachineInstrBuilder buildSExtInReg(const DstOp &Res, const SrcOp &Op, int64_t ImmOp)
Build and insert Res = G_SEXT_INREG Op, ImmOp.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCImm(const ConstantInt *Val) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addBlockAddress(const BlockAddress *BA, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addFPImm(const ConstantFP *Val) const
const MachineInstrBuilder & addJumpTableIndex(unsigned Idx, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Flags
Flags values. These may be or'd together.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getAddressSpace() const
Return the address space of the Pointer type.
Definition: DerivedTypes.h:679
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
size_t size() const
Definition: SmallVector.h:91
void reserve(size_type N)
Definition: SmallVector.h:676
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
LLT getLLTTy(const MachineRegisterInfo &MRI) const
void addSrcToMIB(MachineInstrBuilder &MIB) const
Register getReg() const
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetLowering * getTargetLowering() const
LLVM Value Representation.
Definition: Value.h:74
static constexpr bool isKnownLT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:215
static constexpr bool isKnownGT(const FixedOrScalableQuantity &LHS, const FixedOrScalableQuantity &RHS)
Definition: TypeSize.h:222
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
AttributeList getAttributes(LLVMContext &C, ID id)
Return the attributes for an intrinsic.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:456
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1722
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
DWARFExpression::Operation Op
APFloat getAPFloatFromSize(double Val, unsigned Size)
Returns an APFloat from Val converted to the appropriate size.
Definition: Utils.cpp:633
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition: Metadata.h:760
static unsigned getSizeInBits(const fltSemantics &Sem)
Returns the size of the floating point number (in bits) in the given semantics.
Definition: APFloat.cpp:331
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
uint64_t value() const
This is a hole in the type system and should not be abused.
Definition: Alignment.h:85
MachineFunction * MF
MachineFunction under construction.
MDNode * MMRA
MMRA Metadata to be set on any instruction we create.
DebugLoc DL
Debug location to be set to any instruction we create.
const TargetInstrInfo * TII
Information used to access the description of the opcodes.
MDNode * PCSections
PC sections metadata to be set to any instruction we create.
MachineBasicBlock::iterator II
MachineRegisterInfo * MRI
Information used to verify types are consistent and to create virtual registers.
GISelChangeObserver * Observer
This class contains a discriminated union of information about pointers in memory operands,...