llvm.org GIT mirror llvm / 81856f7
Try again at r138809 (make DSE more aggressive in removing dead stores at the end of a function), now with less deleting stores before memcpy's. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@139150 91177308-0d34-0410-b5e6-96231b3b80d8 Owen Anderson 8 years ago
3 changed file(s) with 192 addition(s) and 79 deletion(s). Raw diff Collapse all Expand all
5151 AA = &getAnalysis();
5252 MD = &getAnalysis();
5353 DominatorTree &DT = getAnalysis();
54
54
5555 bool Changed = false;
5656 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I)
5757 // Only check non-dead blocks. Dead blocks may have strange pointer
5858 // cycles that will confuse alias analysis.
5959 if (DT.isReachableFromEntry(I))
6060 Changed |= runOnBasicBlock(*I);
61
61
6262 AA = 0; MD = 0;
6363 return Changed;
6464 }
65
65
6666 bool runOnBasicBlock(BasicBlock &BB);
6767 bool HandleFree(CallInst *F);
6868 bool handleEndBlock(BasicBlock &BB);
104104 MemoryDependenceAnalysis &MD,
105105 SmallPtrSet *ValueSet = 0) {
106106 SmallVector NowDeadInsts;
107
107
108108 NowDeadInsts.push_back(I);
109109 --NumFastOther;
110
110
111111 // Before we touch this instruction, remove it from memdep!
112112 do {
113113 Instruction *DeadInst = NowDeadInsts.pop_back_val();
114114 ++NumFastOther;
115
115
116116 // This instruction is dead, zap it, in stages. Start by removing it from
117117 // MemDep, which needs to know the operands and needs it to be in the
118118 // function.
119119 MD.removeInstruction(DeadInst);
120
120
121121 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
122122 Value *Op = DeadInst->getOperand(op);
123123 DeadInst->setOperand(op, 0);
124
124
125125 // If this operand just became dead, add it to the NowDeadInsts list.
126126 if (!Op->use_empty()) continue;
127
127
128128 if (Instruction *OpI = dyn_cast(Op))
129129 if (isInstructionTriviallyDead(OpI))
130130 NowDeadInsts.push_back(OpI);
131131 }
132
132
133133 DeadInst->eraseFromParent();
134
134
135135 if (ValueSet) ValueSet->erase(DeadInst);
136136 } while (!NowDeadInsts.empty());
137137 }
162162 getLocForWrite(Instruction *Inst, AliasAnalysis &AA) {
163163 if (StoreInst *SI = dyn_cast(Inst))
164164 return AA.getLocation(SI);
165
165
166166 if (MemIntrinsic *MI = dyn_cast(Inst)) {
167167 // memcpy/memmove/memset.
168168 AliasAnalysis::Location Loc = AA.getLocationForDest(MI);
173173 return AliasAnalysis::Location();
174174 return Loc;
175175 }
176
176
177177 IntrinsicInst *II = dyn_cast(Inst);
178178 if (II == 0) return AliasAnalysis::Location();
179
179
180180 switch (II->getIntrinsicID()) {
181181 default: return AliasAnalysis::Location(); // Unhandled intrinsic.
182182 case Intrinsic::init_trampoline:
184184 // that we should use the size of the pointee type. This isn't valid for
185185 // init.trampoline, which writes more than an i8.
186186 if (AA.getTargetData() == 0) return AliasAnalysis::Location();
187
187
188188 // FIXME: We don't know the size of the trampoline, so we can't really
189189 // handle it here.
190190 return AliasAnalysis::Location(II->getArgOperand(0));
197197
198198 /// getLocForRead - Return the location read by the specified "hasMemoryWrite"
199199 /// instruction if any.
200 static AliasAnalysis::Location
200 static AliasAnalysis::Location
201201 getLocForRead(Instruction *Inst, AliasAnalysis &AA) {
202202 assert(hasMemoryWrite(Inst) && "Unknown instruction case");
203
203
204204 // The only instructions that both read and write are the mem transfer
205205 // instructions (memcpy/memmove).
206206 if (MemTransferInst *MTI = dyn_cast(Inst))
215215 // Don't remove volatile/atomic stores.
216216 if (StoreInst *SI = dyn_cast(I))
217217 return SI->isUnordered();
218
218
219219 IntrinsicInst *II = cast(I);
220220 switch (II->getIntrinsicID()) {
221221 default: assert(0 && "doesn't pass 'hasMemoryWrite' predicate");
226226 case Intrinsic::init_trampoline:
227227 // Always safe to remove init_trampoline.
228228 return true;
229
229
230230 case Intrinsic::memset:
231231 case Intrinsic::memmove:
232232 case Intrinsic::memcpy:
254254 const TargetData *TD = AA.getTargetData();
255255 if (TD == 0)
256256 return AliasAnalysis::UnknownSize;
257
257
258258 if (AllocaInst *A = dyn_cast(V)) {
259259 // Get size information for the alloca
260260 if (ConstantInt *C = dyn_cast(A->getArraySize()))
261261 return C->getZExtValue() * TD->getTypeAllocSize(A->getAllocatedType());
262262 return AliasAnalysis::UnknownSize;
263263 }
264
264
265265 assert(isa(V) && "Expected AllocaInst or Argument!");
266266 PointerType *PT = cast(V->getType());
267267 return TD->getTypeAllocSize(PT->getElementType());
286286 AliasAnalysis &AA) {
287287 const Value *P1 = Earlier.Ptr->stripPointerCasts();
288288 const Value *P2 = Later.Ptr->stripPointerCasts();
289
289
290290 // If the start pointers are the same, we just have to compare sizes to see if
291291 // the later store was larger than the earlier store.
292292 if (P1 == P2) {
301301 return Later.Ptr->getType() == Earlier.Ptr->getType();
302302 return false;
303303 }
304
304
305305 // Make sure that the Later size is >= the Earlier size.
306306 if (Later.Size < Earlier.Size)
307307 return false;
308308 return true;
309309 }
310
310
311311 // Otherwise, we have to have size information, and the later store has to be
312312 // larger than the earlier one.
313313 if (Later.Size == AliasAnalysis::UnknownSize ||
314314 Earlier.Size == AliasAnalysis::UnknownSize ||
315315 Later.Size <= Earlier.Size || AA.getTargetData() == 0)
316316 return false;
317
317
318318 // Check to see if the later store is to the entire object (either a global,
319319 // an alloca, or a byval argument). If so, then it clearly overwrites any
320320 // other store to the same object.
321321 const TargetData &TD = *AA.getTargetData();
322
322
323323 const Value *UO1 = GetUnderlyingObject(P1, &TD),
324324 *UO2 = GetUnderlyingObject(P2, &TD);
325
325
326326 // If we can't resolve the same pointers to the same object, then we can't
327327 // analyze them at all.
328328 if (UO1 != UO2)
329329 return false;
330
330
331331 // If the "Later" store is to a recognizable object, get its size.
332332 if (isObjectPointerWithTrustworthySize(UO2)) {
333333 uint64_t ObjectSize =
335335 if (ObjectSize == Later.Size)
336336 return true;
337337 }
338
338
339339 // Okay, we have stores to two completely different pointers. Try to
340340 // decompose the pointer into a "base + constant_offset" form. If the base
341341 // pointers are equal, then we can reason about the two stores.
342342 int64_t EarlierOff = 0, LaterOff = 0;
343343 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, TD);
344344 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, TD);
345
345
346346 // If the base pointers still differ, we have two completely different stores.
347347 if (BP1 != BP2)
348348 return false;
349349
350350 // The later store completely overlaps the earlier store if:
351 //
351 //
352352 // 1. Both start at the same offset and the later one's size is greater than
353353 // or equal to the earlier one's, or
354354 //
355355 // |--earlier--|
356356 // |-- later --|
357 //
357 //
358358 // 2. The earlier store has an offset greater than the later offset, but which
359359 // still lies completely within the later store.
360360 //
372372
373373 /// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a
374374 /// memory region into an identical pointer) then it doesn't actually make its
375 /// input dead in the traditional sense. Consider this case:
375 /// input dead in the traditional sense. Consider this case:
376376 ///
377377 /// memcpy(A <- B)
378378 /// memcpy(A <- A)
390390 // location read.
391391 AliasAnalysis::Location InstReadLoc = getLocForRead(Inst, AA);
392392 if (InstReadLoc.Ptr == 0) return false; // Not a reading instruction.
393
393
394394 // If the read and written loc obviously don't alias, it isn't a read.
395395 if (AA.isNoAlias(InstReadLoc, InstStoreLoc)) return false;
396
396
397397 // Okay, 'Inst' may copy over itself. However, we can still remove a the
398398 // DepWrite instruction if we can prove that it reads from the same location
399399 // as Inst. This handles useful cases like:
403403 // aliases, so removing the first memcpy is safe (assuming it writes <= #
404404 // bytes as the second one.
405405 AliasAnalysis::Location DepReadLoc = getLocForRead(DepWrite, AA);
406
406
407407 if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
408408 return false;
409
409
410410 // If DepWrite doesn't read memory or if we can't prove it is a must alias,
411411 // then it can't be considered dead.
412412 return true;
419419
420420 bool DSE::runOnBasicBlock(BasicBlock &BB) {
421421 bool MadeChange = false;
422
422
423423 // Do a top-down walk on the BB.
424424 for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
425425 Instruction *Inst = BBI++;
426
426
427427 // Handle 'free' calls specially.
428428 if (CallInst *F = isFreeCall(Inst)) {
429429 MadeChange |= HandleFree(F);
430430 continue;
431431 }
432
432
433433 // If we find something that writes memory, get its memory dependence.
434434 if (!hasMemoryWrite(Inst))
435435 continue;
436436
437437 MemDepResult InstDep = MD->getDependency(Inst);
438
438
439439 // Ignore any store where we can't find a local dependence.
440440 // FIXME: cross-block DSE would be fun. :)
441441 if (InstDep.isNonLocal() || InstDep.isUnknown())
442442 continue;
443
443
444444 // If we're storing the same value back to a pointer that we just
445445 // loaded from, then the store can be removed.
446446 if (StoreInst *SI = dyn_cast(Inst)) {
449449 SI->getOperand(0) == DepLoad && isRemovable(SI)) {
450450 DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n "
451451 << "LOAD: " << *DepLoad << "\n STORE: " << *SI << '\n');
452
452
453453 // DeleteDeadInstruction can delete the current instruction. Save BBI
454454 // in case we need it.
455455 WeakVH NextInst(BBI);
456
456
457457 DeleteDeadInstruction(SI, *MD);
458
458
459459 if (NextInst == 0) // Next instruction deleted.
460460 BBI = BB.begin();
461461 else if (BBI != BB.begin()) // Revisit this instruction if possible.
466466 }
467467 }
468468 }
469
469
470470 // Figure out what location is being stored to.
471471 AliasAnalysis::Location Loc = getLocForWrite(Inst, *AA);
472472
473473 // If we didn't get a useful location, fail.
474474 if (Loc.Ptr == 0)
475475 continue;
476
476
477477 while (!InstDep.isNonLocal() && !InstDep.isUnknown()) {
478478 // Get the memory clobbered by the instruction we depend on. MemDep will
479479 // skip any instructions that 'Loc' clearly doesn't interact with. If we
495495 !isPossibleSelfRead(Inst, Loc, DepWrite, *AA)) {
496496 DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
497497 << *DepWrite << "\n KILLER: " << *Inst << '\n');
498
498
499499 // Delete the store and now-dead instructions that feed it.
500500 DeleteDeadInstruction(DepWrite, *MD);
501501 ++NumFastStores;
502502 MadeChange = true;
503
503
504504 // DeleteDeadInstruction can delete the current instruction in loop
505505 // cases, reset BBI.
506506 BBI = Inst;
508508 --BBI;
509509 break;
510510 }
511
511
512512 // If this is a may-aliased store that is clobbering the store value, we
513513 // can keep searching past it for another must-aliased pointer that stores
514514 // to the same location. For example, in:
518518 // we can remove the first store to P even though we don't know if P and Q
519519 // alias.
520520 if (DepWrite == &BB.front()) break;
521
521
522522 // Can't look past this instruction if it might read 'Loc'.
523523 if (AA->getModRefInfo(DepWrite, Loc) & AliasAnalysis::Ref)
524524 break;
525
525
526526 InstDep = MD->getPointerDependencyFrom(Loc, false, DepWrite, &BB);
527527 }
528528 }
529
529
530530 // If this block ends in a return, unwind, or unreachable, all allocas are
531531 // dead at its end, which means stores to them are also dead.
532532 if (BB.getTerminator()->getNumSuccessors() == 0)
533533 MadeChange |= handleEndBlock(BB);
534
534
535535 return MadeChange;
536536 }
537537
546546 Instruction *Dependency = Dep.getInst();
547547 if (!hasMemoryWrite(Dependency) || !isRemovable(Dependency))
548548 return MadeChange;
549
549
550550 Value *DepPointer =
551551 GetUnderlyingObject(getStoredPointerOperand(Dependency));
552552
553553 // Check for aliasing.
554554 if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
555555 return MadeChange;
556
556
557557 // DCE instructions only used to calculate that store
558558 DeleteDeadInstruction(Dependency, *MD);
559559 ++NumFastStores;
566566 // free(s);
567567 Dep = MD->getDependency(F);
568568 };
569
569
570570 return MadeChange;
571571 }
572572
578578 /// ret void
579579 bool DSE::handleEndBlock(BasicBlock &BB) {
580580 bool MadeChange = false;
581
581
582582 // Keep track of all of the stack objects that are dead at the end of the
583583 // function.
584584 SmallPtrSet DeadStackObjects;
585
585
586586 // Find all of the alloca'd pointers in the entry block.
587587 BasicBlock *Entry = BB.getParent()->begin();
588588 for (BasicBlock::iterator I = Entry->begin(), E = Entry->end(); I != E; ++I)
589589 if (AllocaInst *AI = dyn_cast(I))
590590 DeadStackObjects.insert(AI);
591
591
592592 // Treat byval arguments the same, stores to them are dead at the end of the
593593 // function.
594594 for (Function::arg_iterator AI = BB.getParent()->arg_begin(),
595595 AE = BB.getParent()->arg_end(); AI != AE; ++AI)
596596 if (AI->hasByValAttr())
597597 DeadStackObjects.insert(AI);
598
598
599599 // Scan the basic block backwards
600600 for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
601601 --BBI;
602
602
603603 // If we find a store, check to see if it points into a dead stack value.
604604 if (hasMemoryWrite(BBI) && isRemovable(BBI)) {
605605 // See through pointer-to-pointer bitcasts
608608 // Stores to stack values are valid candidates for removal.
609609 if (DeadStackObjects.count(Pointer)) {
610610 Instruction *Dead = BBI++;
611
611
612612 DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
613613 << *Dead << "\n Object: " << *Pointer << '\n');
614
614
615615 // DCE instructions only used to calculate that store.
616616 DeleteDeadInstruction(Dead, *MD, &DeadStackObjects);
617617 ++NumFastStores;
619619 continue;
620620 }
621621 }
622
622
623623 // Remove any dead non-memory-mutating instructions.
624624 if (isInstructionTriviallyDead(BBI)) {
625625 Instruction *Inst = BBI++;
628628 MadeChange = true;
629629 continue;
630630 }
631
631
632632 if (AllocaInst *A = dyn_cast(BBI)) {
633633 DeadStackObjects.erase(A);
634634 continue;
635635 }
636
636
637637 if (CallSite CS = cast(BBI)) {
638638 // If this call does not access memory, it can't be loading any of our
639639 // pointers.
640640 if (AA->doesNotAccessMemory(CS))
641641 continue;
642
642
643643 // If the call might load from any of our allocas, then any store above
644644 // the call is live.
645645 SmallVector LiveAllocas;
646646 for (SmallPtrSet::iterator I = DeadStackObjects.begin(),
647647 E = DeadStackObjects.end(); I != E; ++I) {
648648 // See if the call site touches it.
649 AliasAnalysis::ModRefResult A =
649 AliasAnalysis::ModRefResult A =
650650 AA->getModRefInfo(CS, *I, getPointerSize(*I, *AA));
651
651
652652 if (A == AliasAnalysis::ModRef || A == AliasAnalysis::Ref)
653653 LiveAllocas.push_back(*I);
654654 }
655
655
656656 for (SmallVector::iterator I = LiveAllocas.begin(),
657657 E = LiveAllocas.end(); I != E; ++I)
658658 DeadStackObjects.erase(*I);
659
659
660660 // If all of the allocas were clobbered by the call then we're not going
661661 // to find anything else to process.
662662 if (DeadStackObjects.empty())
663663 return MadeChange;
664
664
665665 continue;
666666 }
667667
668668 AliasAnalysis::Location LoadedLoc;
669
669
670670 // If we encounter a use of the pointer, it is no longer considered dead
671671 if (LoadInst *L = dyn_cast(BBI)) {
672672 if (!L->isUnordered()) // Be conservative with atomic/volatile load
676676 LoadedLoc = AA->getLocation(V);
677677 } else if (MemTransferInst *MTI = dyn_cast(BBI)) {
678678 LoadedLoc = AA->getLocationForSource(MTI);
679 } else if (!BBI->mayReadOrWriteMemory()) {
680 // Instruction doesn't touch memory.
679 } else if (!BBI->mayReadFromMemory()) {
680 // Instruction doesn't read memory. Note that stores that weren't removed
681 // above will hit this case.
681682 continue;
682683 } else {
683684 // Unknown inst; assume it clobbers everything.
693694 if (DeadStackObjects.empty())
694695 break;
695696 }
696
697
697698 return MadeChange;
698699 }
699700
707708 // A constant can't be in the dead pointer set.
708709 if (isa(UnderlyingPointer))
709710 return;
710
711
711712 // If the kill pointer can be easily reduced to an alloca, don't bother doing
712713 // extraneous AA queries.
713714 if (isa(UnderlyingPointer) || isa(UnderlyingPointer)) {
714715 DeadStackObjects.erase(const_cast(UnderlyingPointer));
715716 return;
716717 }
717
718
718719 SmallVector NowLive;
719720 for (SmallPtrSet::iterator I = DeadStackObjects.begin(),
720721 E = DeadStackObjects.end(); I != E; ++I) {
0 ; RUN: opt -dse -S < %s | FileCheck %s
1
2 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
3 target triple = "x86_64-apple-darwin"
4
5 %"class.std::auto_ptr" = type { i32* }
6
7 ; CHECK: @_Z3foov
8 define void @_Z3foov(%"class.std::auto_ptr"* noalias nocapture sret %agg.result) uwtable ssp {
9 _ZNSt8auto_ptrIiED1Ev.exit:
10 %temp.lvalue = alloca %"class.std::auto_ptr", align 8
11 call void @_Z3barv(%"class.std::auto_ptr"* sret %temp.lvalue)
12 %_M_ptr.i.i = getelementptr inbounds %"class.std::auto_ptr"* %temp.lvalue, i64 0, i32 0
13 %tmp.i.i = load i32** %_M_ptr.i.i, align 8, !tbaa !0
14 ; CHECK-NOT: store i32* null
15 store i32* null, i32** %_M_ptr.i.i, align 8, !tbaa !0
16 %_M_ptr.i.i4 = getelementptr inbounds %"class.std::auto_ptr"* %agg.result, i64 0, i32 0
17 store i32* %tmp.i.i, i32** %_M_ptr.i.i4, align 8, !tbaa !0
18 ; CHECK: ret void
19 ret void
20 }
21
22 declare void @_Z3barv(%"class.std::auto_ptr"* sret)
23
24 !0 = metadata !{metadata !"any pointer", metadata !1}
25 !1 = metadata !{metadata !"omnipotent char", metadata !2}
26 !2 = metadata !{metadata !"Simple C/C++ TBAA", null}
0 ; RUN: opt -dse -S < %s | FileCheck %s
1 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-f128:128:128-n8:16:32:64"
2 target triple = "x86_64-unknown-linux-gnu"
3
4 %struct.pair.162 = type { %struct.BasicBlock*, i32, [4 x i8] }
5 %struct.BasicBlock = type { %struct.Value, %struct.ilist_node.24, %struct.iplist.22, %struct.Function* }
6 %struct.Value = type { i32 (...)**, i8, i8, i16, %struct.Type*, %struct.Use*, %struct.StringMapEntry* }
7 %struct.Type = type { %struct.LLVMContext*, i8, [3 x i8], i32, {}* }
8 %struct.LLVMContext = type { %struct.LLVMContextImpl* }
9 %struct.LLVMContextImpl = type opaque
10 %struct.Use = type { %struct.Value*, %struct.Use*, %struct.PointerIntPair }
11 %struct.PointerIntPair = type { i64 }
12 %struct.StringMapEntry = type opaque
13 %struct.ilist_node.24 = type { %struct.ilist_half_node.23, %struct.BasicBlock* }
14 %struct.ilist_half_node.23 = type { %struct.BasicBlock* }
15 %struct.iplist.22 = type { %struct.ilist_traits.21, %struct.Instruction* }
16 %struct.ilist_traits.21 = type { %struct.ilist_half_node.25 }
17 %struct.ilist_half_node.25 = type { %struct.Instruction* }
18 %struct.Instruction = type { [52 x i8], %struct.ilist_node.26, %struct.BasicBlock*, %struct.DebugLoc }
19 %struct.ilist_node.26 = type { %struct.ilist_half_node.25, %struct.Instruction* }
20 %struct.DebugLoc = type { i32, i32 }
21 %struct.Function = type { %struct.GlobalValue, %struct.ilist_node.14, %struct.iplist.4, %struct.iplist, %struct.ValueSymbolTable*, %struct.AttrListPtr }
22 %struct.GlobalValue = type <{ [52 x i8], [4 x i8], %struct.Module*, i8, i16, [5 x i8], %struct.basic_string }>
23 %struct.Module = type { %struct.LLVMContext*, %struct.iplist.20, %struct.iplist.16, %struct.iplist.12, %struct.vector.2, %struct.ilist, %struct.basic_string, %struct.ValueSymbolTable*, %struct.OwningPtr, %struct.basic_string, %struct.basic_string, %struct.basic_string, i8* }
24 %struct.iplist.20 = type { %struct.ilist_traits.19, %struct.GlobalVariable* }
25 %struct.ilist_traits.19 = type { %struct.ilist_node.18 }
26 %struct.ilist_node.18 = type { %struct.ilist_half_node.17, %struct.GlobalVariable* }
27 %struct.ilist_half_node.17 = type { %struct.GlobalVariable* }
28 %struct.GlobalVariable = type { %struct.GlobalValue, %struct.ilist_node.18, i8, [7 x i8] }
29 %struct.iplist.16 = type { %struct.ilist_traits.15, %struct.Function* }
30 %struct.ilist_traits.15 = type { %struct.ilist_node.14 }
31 %struct.ilist_node.14 = type { %struct.ilist_half_node.13, %struct.Function* }
32 %struct.ilist_half_node.13 = type { %struct.Function* }
33 %struct.iplist.12 = type { %struct.ilist_traits.11, %struct.GlobalAlias* }
34 %struct.ilist_traits.11 = type { %struct.ilist_node.10 }
35 %struct.ilist_node.10 = type { %struct.ilist_half_node.9, %struct.GlobalAlias* }
36 %struct.ilist_half_node.9 = type { %struct.GlobalAlias* }
37 %struct.GlobalAlias = type { %struct.GlobalValue, %struct.ilist_node.10 }
38 %struct.vector.2 = type { %struct._Vector_base.1 }
39 %struct._Vector_base.1 = type { %struct._Vector_impl.0 }
40 %struct._Vector_impl.0 = type { %struct.basic_string*, %struct.basic_string*, %struct.basic_string* }
41 %struct.basic_string = type { %struct._Alloc_hider }
42 %struct._Alloc_hider = type { i8* }
43 %struct.ilist = type { %struct.iplist.8 }
44 %struct.iplist.8 = type { %struct.ilist_traits.7, %struct.NamedMDNode* }
45 %struct.ilist_traits.7 = type { %struct.ilist_node.6 }
46 %struct.ilist_node.6 = type { %struct.ilist_half_node.5, %struct.NamedMDNode* }
47 %struct.ilist_half_node.5 = type { %struct.NamedMDNode* }
48 %struct.NamedMDNode = type { %struct.ilist_node.6, %struct.basic_string, %struct.Module*, i8* }
49 %struct.ValueSymbolTable = type opaque
50 %struct.OwningPtr = type { %struct.GVMaterializer* }
51 %struct.GVMaterializer = type opaque
52 %struct.iplist.4 = type { %struct.ilist_traits.3, %struct.BasicBlock* }
53 %struct.ilist_traits.3 = type { %struct.ilist_half_node.23 }
54 %struct.iplist = type { %struct.ilist_traits, %struct.Argument* }
55 %struct.ilist_traits = type { %struct.ilist_half_node }
56 %struct.ilist_half_node = type { %struct.Argument* }
57 %struct.Argument = type { %struct.Value, %struct.ilist_node, %struct.Function* }
58 %struct.ilist_node = type { %struct.ilist_half_node, %struct.Argument* }
59 %struct.AttrListPtr = type { %struct.AttributeListImpl* }
60 %struct.AttributeListImpl = type opaque
61
62 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind
63
64 ; CHECK: _ZSt9iter_swapIPSt4pairIPN4llvm10BasicBlockEjES5_EvT_T0_
65 ; CHECK: store
66 ; CHECK: ret void
67 define void @_ZSt9iter_swapIPSt4pairIPN4llvm10BasicBlockEjES5_EvT_T0_(%struct.pair.162* %__a, %struct.pair.162* %__b) nounwind uwtable inlinehint {
68 entry:
69 %memtmp = alloca %struct.pair.162, align 8
70 %0 = getelementptr inbounds %struct.pair.162* %memtmp, i64 0, i32 0
71 %1 = getelementptr inbounds %struct.pair.162* %__a, i64 0, i32 0
72 %2 = load %struct.BasicBlock** %1, align 8
73 store %struct.BasicBlock* %2, %struct.BasicBlock** %0, align 8
74 %3 = getelementptr inbounds %struct.pair.162* %memtmp, i64 0, i32 1
75 %4 = getelementptr inbounds %struct.pair.162* %__a, i64 0, i32 1
76 %5 = load i32* %4, align 4
77 store i32 %5, i32* %3, align 8
78 %6 = bitcast %struct.pair.162* %__a to i8*
79 %7 = bitcast %struct.pair.162* %__b to i8*
80 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %6, i8* %7, i64 12, i32 1, i1 false)
81 %8 = bitcast %struct.pair.162* %memtmp to i8*
82 call void @llvm.memcpy.p0i8.p0i8.i64(i8* %7, i8* %8, i64 12, i32 1, i1 false)
83 ret void
84 }