llvm.org GIT mirror llvm / 92b6b15
Revert r312154 "Re-enable "[MachineCopyPropagation] Extend pass to do COPY source forwarding"" It caused PR34387: Assertion failed: (RegNo < NumRegs && "Attempting to access record for invalid register number!") > Issues identified by buildbots addressed since original review: > - Fixed ARMLoadStoreOptimizer bug exposed by this change in r311907. > - The pass no longer forwards COPYs to physical register uses, since > doing so can break code that implicitly relies on the physical > register number of the use. > - The pass no longer forwards COPYs to undef uses, since doing so > can break the machine verifier by creating LiveRanges that don't > end on a use (since the undef operand is not considered a use). > > [MachineCopyPropagation] Extend pass to do COPY source forwarding > > This change extends MachineCopyPropagation to do COPY source forwarding. > > This change also extends the MachineCopyPropagation pass to be able to > be run during register allocation, after physical registers have been > assigned, but before the virtual registers have been re-written, which > allows it to remove virtual register COPY LiveIntervals that become dead > through the forwarding of all of their uses. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@312178 91177308-0d34-0410-b5e6-96231b3b80d8 Hans Wennborg 2 years ago
78 changed file(s) with 501 addition(s) and 1044 deletion(s). Raw diff Collapse all Expand all
277277 /// MachineSinking - This pass performs sinking on machine instructions.
278278 extern char &MachineSinkingID;
279279
280 /// MachineCopyPropagationPreRegRewrite - This pass performs copy propagation
281 /// on machine instructions after register allocation but before virtual
282 /// register re-writing..
283 extern char &MachineCopyPropagationPreRegRewriteID;
284
285280 /// MachineCopyPropagation - This pass performs copy propagation on
286281 /// machine instructions.
287282 extern char &MachineCopyPropagationID;
231231 void initializeMachineCSEPass(PassRegistry&);
232232 void initializeMachineCombinerPass(PassRegistry&);
233233 void initializeMachineCopyPropagationPass(PassRegistry&);
234 void initializeMachineCopyPropagationPreRegRewritePass(PassRegistry&);
235234 void initializeMachineDominanceFrontierPass(PassRegistry&);
236235 void initializeMachineDominatorTreePass(PassRegistry&);
237236 void initializeMachineFunctionPrinterPassPass(PassRegistry&);
5252 initializeMachineCSEPass(Registry);
5353 initializeMachineCombinerPass(Registry);
5454 initializeMachineCopyPropagationPass(Registry);
55 initializeMachineCopyPropagationPreRegRewritePass(Registry);
5655 initializeMachineDominatorTreePass(Registry);
5756 initializeMachineFunctionPrinterPassPass(Registry);
5857 initializeMachineLICMPass(Registry);
66 //
77 //===----------------------------------------------------------------------===//
88 //
9 // This is a simple MachineInstr-level copy forwarding pass. It may be run at
10 // two places in the codegen pipeline:
11 // - After register allocation but before virtual registers have been remapped
12 // to physical registers.
13 // - After physical register remapping.
14 //
15 // The optimizations done vary slightly based on whether virtual registers are
16 // still present. In both cases, this pass forwards the source of COPYs to the
17 // users of their destinations when doing so is legal. For example:
18 //
19 // %vreg1 = COPY %vreg0
20 // ...
21 // ... = OP %vreg1
22 //
23 // If
24 // - the physical register assigned to %vreg0 has not been clobbered by the
25 // time of the use of %vreg1
26 // - the register class constraints are satisfied
27 // - the COPY def is the only value that reaches OP
28 // then this pass replaces the above with:
29 //
30 // %vreg1 = COPY %vreg0
31 // ...
32 // ... = OP %vreg0
33 //
34 // and updates the relevant state required by VirtRegMap (e.g. LiveIntervals).
35 // COPYs whose LiveIntervals become dead as a result of this forwarding (i.e. if
36 // all uses of %vreg1 are changed to %vreg0) are removed.
37 //
38 // When being run with only physical registers, this pass will also remove some
39 // redundant COPYs. For example:
40 //
41 // %R1 = COPY %R0
42 // ... // No clobber of %R1
43 // %R0 = COPY %R1 <<< Removed
44 //
45 // or
46 //
47 // %R1 = COPY %R0
48 // ... // No clobber of %R0
49 // %R1 = COPY %R0 <<< Removed
9 // This is an extremely simple MachineInstr-level copy propagation pass.
5010 //
5111 //===----------------------------------------------------------------------===//
5212
53 #include "LiveDebugVariables.h"
5413 #include "llvm/ADT/DenseMap.h"
5514 #include "llvm/ADT/STLExtras.h"
5615 #include "llvm/ADT/SetVector.h"
5716 #include "llvm/ADT/SmallVector.h"
5817 #include "llvm/ADT/Statistic.h"
5918 #include "llvm/ADT/iterator_range.h"
60 #include "llvm/CodeGen/LiveRangeEdit.h"
61 #include "llvm/CodeGen/LiveStackAnalysis.h"
6219 #include "llvm/CodeGen/MachineBasicBlock.h"
6320 #include "llvm/CodeGen/MachineFunction.h"
6421 #include "llvm/CodeGen/MachineFunctionPass.h"
6522 #include "llvm/CodeGen/MachineInstr.h"
6623 #include "llvm/CodeGen/MachineOperand.h"
6724 #include "llvm/CodeGen/MachineRegisterInfo.h"
68 #include "llvm/CodeGen/Passes.h"
69 #include "llvm/CodeGen/VirtRegMap.h"
7025 #include "llvm/MC/MCRegisterInfo.h"
7126 #include "llvm/Pass.h"
7227 #include "llvm/Support/Debug.h"
73 #include "llvm/Support/DebugCounter.h"
7428 #include "llvm/Support/raw_ostream.h"
7529 #include "llvm/Target/TargetInstrInfo.h"
7630 #include "llvm/Target/TargetRegisterInfo.h"
8337 #define DEBUG_TYPE "machine-cp"
8438
8539 STATISTIC(NumDeletes, "Number of dead copies deleted");
86 STATISTIC(NumCopyForwards, "Number of copy uses forwarded");
87 DEBUG_COUNTER(FwdCounter, "machine-cp-fwd",
88 "Controls which register COPYs are forwarded");
8940
9041 namespace {
9142
9344 using SourceMap = DenseMap;
9445 using Reg2MIMap = DenseMap;
9546
96 class MachineCopyPropagation : public MachineFunctionPass,
97 private LiveRangeEdit::Delegate {
47 class MachineCopyPropagation : public MachineFunctionPass {
9848 const TargetRegisterInfo *TRI;
9949 const TargetInstrInfo *TII;
100 MachineRegisterInfo *MRI;
101 MachineFunction *MF;
102 SlotIndexes *Indexes;
103 LiveIntervals *LIS;
104 const VirtRegMap *VRM;
105 // True if this pass being run before virtual registers are remapped to
106 // physical ones.
107 bool PreRegRewrite;
108 bool NoSubRegLiveness;
109
110 protected:
111 MachineCopyPropagation(char &ID, bool PreRegRewrite)
112 : MachineFunctionPass(ID), PreRegRewrite(PreRegRewrite) {}
50 const MachineRegisterInfo *MRI;
11351
11452 public:
11553 static char ID; // Pass identification, replacement for typeid
11654
117 MachineCopyPropagation() : MachineCopyPropagation(ID, false) {
55 MachineCopyPropagation() : MachineFunctionPass(ID) {
11856 initializeMachineCopyPropagationPass(*PassRegistry::getPassRegistry());
11957 }
12058
12159 void getAnalysisUsage(AnalysisUsage &AU) const override {
122 if (PreRegRewrite) {
123 AU.addRequired();
124 AU.addPreserved();
125 AU.addRequired();
126 AU.addPreserved();
127 AU.addRequired();
128 AU.addPreserved();
129 AU.addPreserved();
130 AU.addPreserved();
131 }
13260 AU.setPreservesCFG();
13361 MachineFunctionPass::getAnalysisUsage(AU);
13462 }
13664 bool runOnMachineFunction(MachineFunction &MF) override;
13765
13866 MachineFunctionProperties getRequiredProperties() const override {
139 if (PreRegRewrite)
140 return MachineFunctionProperties()
141 .set(MachineFunctionProperties::Property::NoPHIs)
142 .set(MachineFunctionProperties::Property::TracksLiveness);
14367 return MachineFunctionProperties().set(
14468 MachineFunctionProperties::Property::NoVRegs);
14569 }
14973 void ReadRegister(unsigned Reg);
15074 void CopyPropagateBlock(MachineBasicBlock &MBB);
15175 bool eraseIfRedundant(MachineInstr &Copy, unsigned Src, unsigned Def);
152 unsigned getPhysReg(unsigned Reg, unsigned SubReg);
153 unsigned getPhysReg(const MachineOperand &Opnd) {
154 return getPhysReg(Opnd.getReg(), Opnd.getSubReg());
155 }
156 unsigned getFullPhysReg(const MachineOperand &Opnd) {
157 return getPhysReg(Opnd.getReg(), 0);
158 }
159 void forwardUses(MachineInstr &MI);
160 bool isForwardableRegClassCopy(const MachineInstr &Copy,
161 const MachineInstr &UseI);
162 std::tuple
163 checkUseSubReg(const MachineOperand &CopySrc, const MachineOperand &MOUse);
164 bool hasImplicitOverlap(const MachineInstr &MI, const MachineOperand &Use);
165 void narrowRegClass(const MachineInstr &MI, const MachineOperand &MOUse,
166 unsigned NewUseReg, unsigned NewUseSubReg);
167 void updateForwardedCopyLiveInterval(const MachineInstr &Copy,
168 const MachineInstr &UseMI,
169 unsigned OrigUseReg,
170 unsigned NewUseReg,
171 unsigned NewUseSubReg);
172 /// LiveRangeEdit callback for eliminateDeadDefs().
173 void LRE_WillEraseInstruction(MachineInstr *MI) override;
17476
17577 /// Candidates for deletion.
17678 SmallSetVector MaybeDeadCopies;
18789 bool Changed;
18890 };
18991
190 class MachineCopyPropagationPreRegRewrite : public MachineCopyPropagation {
191 public:
192 static char ID; // Pass identification, replacement for typeid
193 MachineCopyPropagationPreRegRewrite()
194 : MachineCopyPropagation(ID, true) {
195 initializeMachineCopyPropagationPreRegRewritePass(*PassRegistry::getPassRegistry());
196 }
197 };
19892 } // end anonymous namespace
19993
20094 char MachineCopyPropagation::ID = 0;
20397
20498 INITIALIZE_PASS(MachineCopyPropagation, DEBUG_TYPE,
20599 "Machine Copy Propagation Pass", false, false)
206
207 /// We have two separate passes that are very similar, the only difference being
208 /// where they are meant to be run in the pipeline. This is done for several
209 /// reasons:
210 /// - the two passes have different dependencies
211 /// - some targets want to disable the later run of this pass, but not the
212 /// earlier one (e.g. NVPTX and WebAssembly)
213 /// - it allows for easier debugging via llc
214
215 char MachineCopyPropagationPreRegRewrite::ID = 0;
216 char &llvm::MachineCopyPropagationPreRegRewriteID = MachineCopyPropagationPreRegRewrite::ID;
217
218 INITIALIZE_PASS_BEGIN(MachineCopyPropagationPreRegRewrite,
219 "machine-cp-prerewrite",
220 "Machine Copy Propagation Pre-Register Rewrite Pass",
221 false, false)
222 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
223 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
224 INITIALIZE_PASS_DEPENDENCY(VirtRegMap)
225 INITIALIZE_PASS_END(MachineCopyPropagationPreRegRewrite,
226 "machine-cp-prerewrite",
227 "Machine Copy Propagation Pre-Register Rewrite Pass", false,
228 false)
229100
230101 /// Remove any entry in \p Map where the register is a subregister or equal to
231102 /// a register contained in \p Regs.
267138 }
268139
269140 void MachineCopyPropagation::ReadRegister(unsigned Reg) {
270 // We don't track MaybeDeadCopies when running pre-VirtRegRewriter.
271 if (PreRegRewrite)
272 return;
273
274141 // If 'Reg' is defined by a copy, the copy is no longer a candidate
275142 // for elimination.
276143 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
302169 return SubIdx == TRI->getSubRegIndex(PreviousDef, Def);
303170 }
304171
305 /// Return the physical register assigned to \p Reg if it is a virtual register,
306 /// otherwise just return the physical reg from the operand itself.
307 ///
308 /// If \p SubReg is 0 then return the full physical register assigned to the
309 /// virtual register ignoring subregs. If we aren't tracking sub-reg liveness
310 /// then we need to use this to be more conservative with clobbers by killing
311 /// all super reg and their sub reg COPYs as well. This is to prevent COPY
312 /// forwarding in cases like the following:
313 ///
314 /// %vreg2 = COPY %vreg1:sub1
315 /// %vreg3 = COPY %vreg1:sub0
316 /// ... = OP1 %vreg2
317 /// ... = OP2 %vreg3
318 ///
319 /// After forward %vreg2 (assuming this is the last use of %vreg1) and
320 /// VirtRegRewriter adding kill markers we have:
321 ///
322 /// %vreg3 = COPY %vreg1:sub0
323 /// ... = OP1 %vreg1:sub1
324 /// ... = OP2 %vreg3
325 ///
326 /// If %vreg3 is assigned to a sub-reg of %vreg1, then after rewriting we have:
327 ///
328 /// ... = OP1 R0:sub1, R0
329 /// ... = OP2 R0:sub0
330 ///
331 /// and the use of R0 by OP2 will not have a valid definition.
332 unsigned MachineCopyPropagation::getPhysReg(unsigned Reg, unsigned SubReg) {
333
334 // Physical registers cannot have subregs.
335 if (!TargetRegisterInfo::isVirtualRegister(Reg))
336 return Reg;
337
338 assert(PreRegRewrite && "Unexpected virtual register encountered");
339 Reg = VRM->getPhys(Reg);
340 if (SubReg && !NoSubRegLiveness)
341 Reg = TRI->getSubReg(Reg, SubReg);
342 return Reg;
343 }
344
345172 /// Remove instruction \p Copy if there exists a previous copy that copies the
346173 /// register \p Src to the register \p Def; This may happen indirectly by
347174 /// copying the super registers.
379206 return true;
380207 }
381208
382
383 /// Decide whether we should forward the destination of \param Copy to its use
384 /// in \param UseI based on the register class of the Copy operands. Same-class
385 /// COPYs are always accepted by this function, but cross-class COPYs are only
386 /// accepted if they are forwarded to another COPY with the operand register
387 /// classes reversed. For example:
388 ///
389 /// RegClassA = COPY RegClassB // Copy parameter
390 /// ...
391 /// RegClassB = COPY RegClassA // UseI parameter
392 ///
393 /// which after forwarding becomes
394 ///
395 /// RegClassA = COPY RegClassB
396 /// ...
397 /// RegClassB = COPY RegClassB
398 ///
399 /// so we have reduced the number of cross-class COPYs and potentially
400 /// introduced a no COPY that can be removed.
401 bool MachineCopyPropagation::isForwardableRegClassCopy(
402 const MachineInstr &Copy, const MachineInstr &UseI) {
403 auto isCross = [&](const MachineOperand &Dst, const MachineOperand &Src) {
404 unsigned DstReg = Dst.getReg();
405 unsigned SrcPhysReg = getPhysReg(Src);
406 const TargetRegisterClass *DstRC;
407 if (TargetRegisterInfo::isVirtualRegister(DstReg)) {
408 DstRC = MRI->getRegClass(DstReg);
409 unsigned DstSubReg = Dst.getSubReg();
410 if (DstSubReg)
411 SrcPhysReg = TRI->getMatchingSuperReg(SrcPhysReg, DstSubReg, DstRC);
412 } else
413 DstRC = TRI->getMinimalPhysRegClass(DstReg);
414
415 return !DstRC->contains(SrcPhysReg);
416 };
417
418 const MachineOperand &CopyDst = Copy.getOperand(0);
419 const MachineOperand &CopySrc = Copy.getOperand(1);
420
421 if (!isCross(CopyDst, CopySrc))
422 return true;
423
424 if (!UseI.isCopy())
425 return false;
426
427 assert(getFullPhysReg(UseI.getOperand(1)) == getFullPhysReg(CopyDst));
428 return !isCross(UseI.getOperand(0), CopySrc);
429 }
430
431 /// Check that the subregs on the copy source operand (\p CopySrc) and the use
432 /// operand to be forwarded to (\p MOUse) are compatible with doing the
433 /// forwarding. Also computes the new register and subregister to be used in
434 /// the forwarded-to instruction.
435 std::tuple MachineCopyPropagation::checkUseSubReg(
436 const MachineOperand &CopySrc, const MachineOperand &MOUse) {
437 unsigned NewUseReg = CopySrc.getReg();
438 unsigned NewUseSubReg;
439
440 if (TargetRegisterInfo::isPhysicalRegister(NewUseReg)) {
441 // If MOUse is a virtual reg, we need to apply it to the new physical reg
442 // we're going to replace it with.
443 if (MOUse.getSubReg())
444 NewUseReg = TRI->getSubReg(NewUseReg, MOUse.getSubReg());
445 // If the original use subreg isn't valid on the new src reg, we can't
446 // forward it here.
447 if (!NewUseReg)
448 return std::make_tuple(0, 0, false);
449 NewUseSubReg = 0;
450 } else {
451 // %v1 = COPY %v2:sub1
452 // USE %v1:sub2
453 // The new use is %v2:sub1:sub2
454 NewUseSubReg =
455 TRI->composeSubRegIndices(CopySrc.getSubReg(), MOUse.getSubReg());
456 // Check that NewUseSubReg is valid on NewUseReg
457 if (NewUseSubReg &&
458 !TRI->getSubClassWithSubReg(MRI->getRegClass(NewUseReg), NewUseSubReg))
459 return std::make_tuple(0, 0, false);
460 }
461
462 return std::make_tuple(NewUseReg, NewUseSubReg, true);
463 }
464
465 /// Check that \p MI does not have implicit uses that overlap with it's \p Use
466 /// operand (the register being replaced), since these can sometimes be
467 /// implicitly tied to other operands. For example, on AMDGPU:
468 ///
469 /// V_MOVRELS_B32_e32 %VGPR2, %M0, %EXEC, %VGPR2_VGPR3_VGPR4_VGPR5
470 ///
471 /// the %VGPR2 is implicitly tied to the larger reg operand, but we have no
472 /// way of knowing we need to update the latter when updating the former.
473 bool MachineCopyPropagation::hasImplicitOverlap(const MachineInstr &MI,
474 const MachineOperand &Use) {
475 if (!TargetRegisterInfo::isPhysicalRegister(Use.getReg()))
476 return false;
477
478 for (const MachineOperand &MIUse : MI.uses())
479 if (&MIUse != &Use && MIUse.isReg() && MIUse.isImplicit() &&
480 TRI->regsOverlap(Use.getReg(), MIUse.getReg()))
481 return true;
482
483 return false;
484 }
485
486 /// Narrow the register class of the forwarded vreg so it matches any
487 /// instruction constraints. \p MI is the instruction being forwarded to. \p
488 /// MOUse is the operand being replaced in \p MI (which hasn't yet been updated
489 /// at the time this function is called). \p NewUseReg and \p NewUseSubReg are
490 /// what the \p MOUse will be changed to after forwarding.
491 ///
492 /// If we are forwarding
493 /// A:RCA = COPY B:RCB
494 /// into
495 /// ... = OP A:RCA
496 ///
497 /// then we need to narrow the register class of B so that it is a subclass
498 /// of RCA so that it meets the instruction register class constraints.
499 void MachineCopyPropagation::narrowRegClass(const MachineInstr &MI,
500 const MachineOperand &MOUse,
501 unsigned NewUseReg,
502 unsigned NewUseSubReg) {
503 if (!TargetRegisterInfo::isVirtualRegister(NewUseReg))
504 return;
505
506 // Make sure the virtual reg class allows the subreg.
507 if (NewUseSubReg) {
508 const TargetRegisterClass *CurUseRC = MRI->getRegClass(NewUseReg);
509 const TargetRegisterClass *NewUseRC =
510 TRI->getSubClassWithSubReg(CurUseRC, NewUseSubReg);
511 if (CurUseRC != NewUseRC) {
512 DEBUG(dbgs() << "MCP: Setting regclass of " << PrintReg(NewUseReg, TRI)
513 << " to " << TRI->getRegClassName(NewUseRC) << "\n");
514 MRI->setRegClass(NewUseReg, NewUseRC);
515 }
516 }
517
518 unsigned MOUseOpNo = &MOUse - &MI.getOperand(0);
519 const TargetRegisterClass *InstRC =
520 TII->getRegClass(MI.getDesc(), MOUseOpNo, TRI, *MF);
521 if (InstRC) {
522 const TargetRegisterClass *CurUseRC = MRI->getRegClass(NewUseReg);
523 if (NewUseSubReg)
524 InstRC = TRI->getMatchingSuperRegClass(CurUseRC, InstRC, NewUseSubReg);
525 if (!InstRC->hasSubClassEq(CurUseRC)) {
526 const TargetRegisterClass *NewUseRC =
527 TRI->getCommonSubClass(InstRC, CurUseRC);
528 DEBUG(dbgs() << "MCP: Setting regclass of " << PrintReg(NewUseReg, TRI)
529 << " to " << TRI->getRegClassName(NewUseRC) << "\n");
530 MRI->setRegClass(NewUseReg, NewUseRC);
531 }
532 }
533 }
534
535 /// Update the LiveInterval information to reflect the destination of \p Copy
536 /// being forwarded to a use in \p UseMI. \p OrigUseReg is the register being
537 /// forwarded through. It should be the destination register of \p Copy and has
538 /// already been replaced in \p UseMI at the point this function is called. \p
539 /// NewUseReg and \p NewUseSubReg are the register and subregister being
540 /// forwarded. They should be the source register of the \p Copy and should be
541 /// the value of the \p UseMI operand being forwarded at the point this function
542 /// is called.
543 void MachineCopyPropagation::updateForwardedCopyLiveInterval(
544 const MachineInstr &Copy, const MachineInstr &UseMI, unsigned OrigUseReg,
545 unsigned NewUseReg, unsigned NewUseSubReg) {
546
547 assert(TRI->isSubRegisterEq(getPhysReg(OrigUseReg, 0),
548 getFullPhysReg(Copy.getOperand(0))) &&
549 "OrigUseReg mismatch");
550 assert(TRI->isSubRegisterEq(getFullPhysReg(Copy.getOperand(1)),
551 getPhysReg(NewUseReg, 0)) &&
552 "NewUseReg mismatch");
553
554 // Extend live range starting from COPY early-clobber slot, since that
555 // is where the original src live range ends.
556 SlotIndex CopyUseIdx =
557 Indexes->getInstructionIndex(Copy).getRegSlot(true /*=EarlyClobber*/);
558 SlotIndex UseIdx = Indexes->getInstructionIndex(UseMI).getRegSlot();
559 if (TargetRegisterInfo::isVirtualRegister(NewUseReg)) {
560 LiveInterval &LI = LIS->getInterval(NewUseReg);
561 LI.extendInBlock(CopyUseIdx, UseIdx);
562 LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(NewUseSubReg);
563 for (auto &S : LI.subranges())
564 if ((S.LaneMask & UseMask).any() && S.find(CopyUseIdx))
565 S.extendInBlock(CopyUseIdx, UseIdx);
566 } else {
567 assert(NewUseSubReg == 0 && "Unexpected subreg on physical register!");
568 for (MCRegUnitIterator UI(NewUseReg, TRI); UI.isValid(); ++UI) {
569 LiveRange &LR = LIS->getRegUnit(*UI);
570 LR.extendInBlock(CopyUseIdx, UseIdx);
571 }
572 }
573
574 if (!TargetRegisterInfo::isVirtualRegister(OrigUseReg))
575 return;
576
577 LiveInterval &LI = LIS->getInterval(OrigUseReg);
578
579 // Can happen for undef uses.
580 if (LI.empty())
581 return;
582
583 SlotIndex UseIndex = Indexes->getInstructionIndex(UseMI);
584 const LiveRange::Segment *UseSeg = LI.getSegmentContaining(UseIndex);
585
586 // Only shrink if forwarded use is the end of a segment.
587 if (UseSeg->end != UseIndex.getRegSlot())
588 return;
589
590 SmallVector DeadInsts;
591 LIS->shrinkToUses(&LI, &DeadInsts);
592 if (!DeadInsts.empty()) {
593 SmallVector NewRegs;
594 LiveRangeEdit(nullptr, NewRegs, *MF, *LIS, nullptr, this)
595 .eliminateDeadDefs(DeadInsts);
596 }
597 }
598
599 void MachineCopyPropagation::LRE_WillEraseInstruction(MachineInstr *MI) {
600 // Remove this COPY from further consideration for forwarding.
601 ClobberRegister(getFullPhysReg(MI->getOperand(0)));
602 Changed = true;
603 }
604
605 /// Look for available copies whose destination register is used by \p MI and
606 /// replace the use in \p MI with the copy's source register.
607 void MachineCopyPropagation::forwardUses(MachineInstr &MI) {
608 // We can't generally forward uses after virtual registers have been renamed
609 // because some targets generate code that has implicit dependencies on the
610 // physical register numbers. For example, in PowerPC, when spilling
611 // condition code registers, the following code pattern is generated:
612 //
613 // %CR7 = COPY %CR0
614 // %R6 = MFOCRF %CR7
615 // %R6 = RLWINM %R6, 29, 31, 31
616 //
617 // where the shift amount in the RLWINM instruction depends on the source
618 // register number of the MFOCRF instruction. If we were to forward %CR0 to
619 // the MFOCRF instruction, the shift amount would no longer be correct.
620 //
621 // FIXME: It may be possible to define a target hook that checks the register
622 // class or user opcode and allows some cases, but prevents cases like the
623 // above from being broken to enable later register copy forwarding.
624 if (!PreRegRewrite)
625 return;
626
627 if (AvailCopyMap.empty())
628 return;
629
630 // Look for non-tied explicit vreg uses that have an active COPY
631 // instruction that defines the physical register allocated to them.
632 // Replace the vreg with the source of the active COPY.
633 for (MachineOperand &MOUse : MI.explicit_uses()) {
634 // Don't forward into undef use operands since doing so can cause problems
635 // with the machine verifier, since it doesn't treat undef reads as reads,
636 // so we can end up with a live range the ends on an undef read, leading to
637 // an error that the live range doesn't end on a read of the live range
638 // register.
639 if (!MOUse.isReg() || MOUse.isTied() || MOUse.isUndef())
640 continue;
641
642 unsigned UseReg = MOUse.getReg();
643 if (!UseReg)
644 continue;
645
646 // See comment above check for !PreRegRewrite regarding forwarding changing
647 // physical registers.
648 if (!TargetRegisterInfo::isVirtualRegister(UseReg))
649 continue;
650
651 UseReg = VRM->getPhys(UseReg);
652
653 // Don't forward COPYs via non-allocatable regs since they can have
654 // non-standard semantics.
655 if (!MRI->isAllocatable(UseReg))
656 continue;
657
658 auto CI = AvailCopyMap.find(UseReg);
659 if (CI == AvailCopyMap.end())
660 continue;
661
662 MachineInstr &Copy = *CI->second;
663 MachineOperand &CopyDst = Copy.getOperand(0);
664 MachineOperand &CopySrc = Copy.getOperand(1);
665
666 // Don't forward COPYs that are already NOPs due to register assignment.
667 if (getPhysReg(CopyDst) == getPhysReg(CopySrc))
668 continue;
669
670 // FIXME: Don't handle partial uses of wider COPYs yet.
671 if (CopyDst.getSubReg() != 0 || UseReg != getPhysReg(CopyDst))
672 continue;
673
674 // Don't forward COPYs of non-allocatable regs unless they are constant.
675 unsigned CopySrcReg = CopySrc.getReg();
676 if (TargetRegisterInfo::isPhysicalRegister(CopySrcReg) &&
677 !MRI->isAllocatable(CopySrcReg) && !MRI->isConstantPhysReg(CopySrcReg))
678 continue;
679
680 if (!isForwardableRegClassCopy(Copy, MI))
681 continue;
682
683 unsigned NewUseReg, NewUseSubReg;
684 bool SubRegOK;
685 std::tie(NewUseReg, NewUseSubReg, SubRegOK) =
686 checkUseSubReg(CopySrc, MOUse);
687 if (!SubRegOK)
688 continue;
689
690 if (hasImplicitOverlap(MI, MOUse))
691 continue;
692
693 if (!DebugCounter::shouldExecute(FwdCounter))
694 continue;
695
696 DEBUG(dbgs() << "MCP: Replacing "
697 << PrintReg(MOUse.getReg(), TRI, MOUse.getSubReg())
698 << "\n with "
699 << PrintReg(NewUseReg, TRI, CopySrc.getSubReg())
700 << "\n in "
701 << MI
702 << " from "
703 << Copy);
704
705 narrowRegClass(MI, MOUse, NewUseReg, NewUseSubReg);
706
707 unsigned OrigUseReg = MOUse.getReg();
708 MOUse.setReg(NewUseReg);
709 MOUse.setSubReg(NewUseSubReg);
710
711 DEBUG(dbgs() << "MCP: After replacement: " << MI << "\n");
712
713 if (PreRegRewrite)
714 updateForwardedCopyLiveInterval(Copy, MI, OrigUseReg, NewUseReg,
715 NewUseSubReg);
716 else
717 for (MachineInstr &KMI :
718 make_range(Copy.getIterator(), std::next(MI.getIterator())))
719 KMI.clearRegisterKills(NewUseReg, TRI);
720
721 ++NumCopyForwards;
722 Changed = true;
723 }
724 }
725
726209 void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
727210 DEBUG(dbgs() << "MCP: CopyPropagateBlock " << MBB.getName() << "\n");
728211
731214 ++I;
732215
733216 if (MI->isCopy()) {
734 unsigned Def = getPhysReg(MI->getOperand(0));
735 unsigned Src = getPhysReg(MI->getOperand(1));
217 unsigned Def = MI->getOperand(0).getReg();
218 unsigned Src = MI->getOperand(1).getReg();
219
220 assert(!TargetRegisterInfo::isVirtualRegister(Def) &&
221 !TargetRegisterInfo::isVirtualRegister(Src) &&
222 "MachineCopyPropagation should be run after register allocation!");
736223
737224 // The two copies cancel out and the source of the first copy
738225 // hasn't been overridden, eliminate the second one. e.g.
749236 // %ECX = COPY %EAX
750237 // =>
751238 // %ECX = COPY %EAX
752 if (!PreRegRewrite)
753 if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def))
754 continue;
755
756 forwardUses(*MI);
757
758 // Src may have been changed by forwardUses()
759 Src = getPhysReg(MI->getOperand(1));
760 unsigned DefClobber = getFullPhysReg(MI->getOperand(0));
761 unsigned SrcClobber = getFullPhysReg(MI->getOperand(1));
239 if (eraseIfRedundant(*MI, Def, Src) || eraseIfRedundant(*MI, Src, Def))
240 continue;
762241
763242 // If Src is defined by a previous copy, the previous copy cannot be
764243 // eliminated.
775254 DEBUG(dbgs() << "MCP: Copy is a deletion candidate: "; MI->dump());
776255
777256 // Copy is now a candidate for deletion.
778 // Only look for dead COPYs if we're not running just before
779 // VirtRegRewriter, since presumably these COPYs will have already been
780 // removed.
781 if (!PreRegRewrite && !MRI->isReserved(Def))
257 if (!MRI->isReserved(Def))
782258 MaybeDeadCopies.insert(MI);
783259
784260 // If 'Def' is previously source of another copy, then this earlier copy's
788264 // %xmm2 = copy %xmm0
789265 // ...
790266 // %xmm2 = copy %xmm9
791 ClobberRegister(DefClobber);
267 ClobberRegister(Def);
792268 for (const MachineOperand &MO : MI->implicit_operands()) {
793269 if (!MO.isReg() || !MO.isDef())
794270 continue;
795 unsigned Reg = getFullPhysReg(MO);
271 unsigned Reg = MO.getReg();
796272 if (!Reg)
797273 continue;
798274 ClobberRegister(Reg);
807283
808284 // Remember source that's copied to Def. Once it's clobbered, then
809285 // it's no longer available for copy propagation.
810 RegList &DestList = SrcMap[SrcClobber];
811 if (!is_contained(DestList, DefClobber))
812 DestList.push_back(DefClobber);
286 RegList &DestList = SrcMap[Src];
287 if (!is_contained(DestList, Def))
288 DestList.push_back(Def);
813289
814290 continue;
815291 }
816
817 // Clobber any earlyclobber regs first.
818 for (const MachineOperand &MO : MI->operands())
819 if (MO.isReg() && MO.isEarlyClobber()) {
820 unsigned Reg = getFullPhysReg(MO);
821 // If we have a tied earlyclobber, that means it is also read by this
822 // instruction, so we need to make sure we don't remove it as dead
823 // later.
824 if (MO.isTied())
825 ReadRegister(Reg);
826 ClobberRegister(Reg);
827 }
828
829 forwardUses(*MI);
830292
831293 // Not a copy.
832294 SmallVector Defs;
836298 RegMask = &MO;
837299 if (!MO.isReg())
838300 continue;
839 unsigned Reg = getFullPhysReg(MO);
301 unsigned Reg = MO.getReg();
840302 if (!Reg)
841303 continue;
842304
843 if (MO.isDef() && !MO.isEarlyClobber()) {
305 assert(!TargetRegisterInfo::isVirtualRegister(Reg) &&
306 "MachineCopyPropagation should be run after register allocation!");
307
308 if (MO.isDef()) {
844309 Defs.push_back(Reg);
845310 continue;
846311 } else if (MO.readsReg())
897362 // since we don't want to trust live-in lists.
898363 if (MBB.succ_empty()) {
899364 for (MachineInstr *MaybeDead : MaybeDeadCopies) {
900 DEBUG(dbgs() << "MCP: Removing copy due to no live-out succ: ";
901 MaybeDead->dump());
902365 assert(!MRI->isReserved(MaybeDead->getOperand(0).getReg()));
903366 MaybeDead->eraseFromParent();
904367 Changed = true;
921384 TRI = MF.getSubtarget().getRegisterInfo();
922385 TII = MF.getSubtarget().getInstrInfo();
923386 MRI = &MF.getRegInfo();
924 this->MF = &MF;
925 if (PreRegRewrite) {
926 Indexes = &getAnalysis();
927 LIS = &getAnalysis();
928 VRM = &getAnalysis();
929 }
930 NoSubRegLiveness = !MRI->subRegLivenessEnabled();
931387
932388 for (MachineBasicBlock &MBB : MF)
933389 CopyPropagateBlock(MBB);
8787 cl::desc("Disable Codegen Prepare"));
8888 static cl::opt DisableCopyProp("disable-copyprop", cl::Hidden,
8989 cl::desc("Disable Copy Propagation pass"));
90 static cl::opt DisableCopyPropPreRegRewrite("disable-copyprop-prerewrite", cl::Hidden,
91 cl::desc("Disable Copy Propagation Pre-Register Re-write pass"));
9290 static cl::opt DisablePartialLibcallInlining("disable-partial-libcall-inlining",
9391 cl::Hidden, cl::desc("Disable Partial Libcall Inlining"));
9492 static cl::opt EnableImplicitNullChecks(
248246
249247 if (StandardID == &MachineCopyPropagationID)
250248 return applyDisable(TargetID, DisableCopyProp);
251
252 if (StandardID == &MachineCopyPropagationPreRegRewriteID)
253 return applyDisable(TargetID, DisableCopyPropPreRegRewrite);
254249
255250 return TargetID;
256251 }
10601055 // Allow targets to change the register assignments before rewriting.
10611056 addPreRewrite();
10621057
1063 // Copy propagate to forward register uses and try to eliminate COPYs that
1064 // were not coalesced.
1065 addPass(&MachineCopyPropagationPreRegRewriteID);
1066
10671058 // Finally rewrite virtual registers.
10681059 addPass(&VirtRegRewriterID);
10691060
88 ; CHECK-LABEL: halfword:
99 ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
1010 ; CHECK: ldrh [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #1]
11 ; CHECK: mov [[REG3:x[0-9]+]], [[REG2]]
12 ; CHECK: strh [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #1]
11 ; CHECK: strh [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #1]
1312 %shr81 = lshr i32 %xor72, 9
1413 %conv82 = zext i32 %shr81 to i64
1514 %idxprom83 = and i64 %conv82, 255
2423 ; CHECK-LABEL: word:
2524 ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
2625 ; CHECK: ldr [[REG1:w[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #2]
27 ; CHECK: mov [[REG3:x[0-9]+]], [[REG2]]
28 ; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #2]
26 ; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #2]
2927 %shr81 = lshr i32 %xor72, 9
3028 %conv82 = zext i32 %shr81 to i64
3129 %idxprom83 = and i64 %conv82, 255
4038 ; CHECK-LABEL: doubleword:
4139 ; CHECK: ubfx [[REG:x[0-9]+]], x1, #9, #8
4240 ; CHECK: ldr [[REG1:x[0-9]+]], [{{.*}}[[REG2:x[0-9]+]], [[REG]], lsl #3]
43 ; CHECK: mov [[REG3:x[0-9]+]], [[REG2]]
44 ; CHECK: str [[REG1]], [{{.*}}[[REG3]], [[REG]], lsl #3]
41 ; CHECK: str [[REG1]], [{{.*}}[[REG2]], [[REG]], lsl #3]
4542 %shr81 = lshr i32 %xor72, 9
4643 %conv82 = zext i32 %shr81 to i64
4744 %idxprom83 = and i64 %conv82, 255
77 ; CHECK: add.2d v[[REG:[0-9]+]], v0, v1
88 ; CHECK: add d[[REG3:[0-9]+]], d[[REG]], d1
99 ; CHECK: sub d[[REG2:[0-9]+]], d[[REG]], d1
10 ; CHECK-NOT: fmov
10 ; Without advanced copy optimization, we end up with cross register
11 ; banks copies that cannot be coalesced.
12 ; CHECK-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]]
13 ; With advanced copy optimization, we end up with just one copy
14 ; to insert the computed high part into the V register.
15 ; CHECK-OPT-NOT: fmov
1116 ; CHECK: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
12 ; CHECK-NOT: fmov
17 ; CHECK-NOOPT: fmov d0, [[COPY_REG3]]
18 ; CHECK-OPT-NOT: fmov
1319 ; CHECK: ins.d v0[1], [[COPY_REG2]]
1420 ; CHECK-NEXT: ret
1521 ;
1723 ; GENERIC: add v[[REG:[0-9]+]].2d, v0.2d, v1.2d
1824 ; GENERIC: add d[[REG3:[0-9]+]], d[[REG]], d1
1925 ; GENERIC: sub d[[REG2:[0-9]+]], d[[REG]], d1
20 ; GENERIC-NOT: fmov
26 ; GENERIC-NOOPT: fmov [[COPY_REG3:x[0-9]+]], d[[REG3]]
27 ; GENERIC-OPT-NOT: fmov
2128 ; GENERIC: fmov [[COPY_REG2:x[0-9]+]], d[[REG2]]
22 ; GENERIC-NOT: fmov
29 ; GENERIC-NOOPT: fmov d0, [[COPY_REG3]]
30 ; GENERIC-OPT-NOT: fmov
2331 ; GENERIC: ins v0.d[1], [[COPY_REG2]]
2432 ; GENERIC-NEXT: ret
2533 %add = add <2 x i64> %a, %b
33 define i32 @t(i32 %a, i32 %b, i32 %c, i32 %d) nounwind ssp {
44 entry:
55 ; CHECK-LABEL: t:
6 ; CHECK: mov [[REG2:x[0-9]+]], x3
7 ; CHECK: mov [[REG1:x[0-9]+]], x2
8 ; CHECK: mov x0, x2
9 ; CHECK: mov x1, x3
6 ; CHECK: mov x0, [[REG1:x[0-9]+]]
7 ; CHECK: mov x1, [[REG2:x[0-9]+]]
108 ; CHECK: bl _foo
119 ; CHECK: mov x0, [[REG1]]
1210 ; CHECK: mov x1, [[REG2]]
488488
489489 ; CHECK-COMMON-LABEL: test_phi:
490490 ; CHECK-COMMON: mov x[[PTR:[0-9]+]], x0
491 ; CHECK-COMMON: ldr h[[AB:[0-9]+]], [x0]
491 ; CHECK-COMMON: ldr h[[AB:[0-9]+]], [x[[PTR]]]
492492 ; CHECK-COMMON: [[LOOP:LBB[0-9_]+]]:
493493 ; CHECK-COMMON: mov.16b v[[R:[0-9]+]], v[[AB]]
494494 ; CHECK-COMMON: ldr h[[AB]], [x[[PTR]]]
1616 %val = zext i1 %test to i32
1717 ; CHECK: cset {{[xw][0-9]+}}, ne
1818
19 ; CHECK: mov [[RHSCOPY:w[0-9]+]], [[RHS]]
20 ; CHECK: mov [[LHSCOPY:w[0-9]+]], [[LHS]]
21
2219 store i32 %val, i32* @var
2320
2421 call void @bar()
2724 ; Currently, the comparison is emitted again. An MSR/MRS pair would also be
2825 ; acceptable, but assuming the call preserves NZCV is not.
2926 br i1 %test, label %iftrue, label %iffalse
30 ; CHECK: cmp [[LHSCOPY]], [[RHSCOPY]]
27 ; CHECK: cmp [[LHS]], [[RHS]]
3128 ; CHECK: b.eq
3229
3330 iftrue:
77 define void @test(%struct1* %fde, i32 %fd, void (i32, i32, i8*)* %func, i8* %arg) {
88 ;CHECK-LABEL: test
99 entry:
10 ; A53: mov [[DATA:w[0-9]+]], w1
1011 ; A53: str q{{[0-9]+}}, {{.*}}
1112 ; A53: str q{{[0-9]+}}, {{.*}}
12 ; A53: str w1, {{.*}}
13 ; A53: str [[DATA]], {{.*}}
1314
1415 %0 = bitcast %struct1* %fde to i8*
1516 tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 40, i32 8, i1 false)
66 define void @test(i32 %px) {
77 ; CHECK_LABEL: test:
88 ; CHECK_LABEL: %entry
9 ; CHECK: subs [[REG0:w[0-9]+]],
10 ; CHECK: csel {{w[0-9]+}}, wzr, [[REG0]]
9 ; CHECK: subs
10 ; CHECK-NEXT: csel
1111 entry:
1212 %sub = add nsw i32 %px, -1
1313 %cmp = icmp slt i32 %px, 1
546546 ; GCN: s_mov_b32 s5, s32
547547 ; GCN: s_add_u32 s32, s32, 0x300
548548
549 ; GCN-DAG: s_mov_b32 [[SAVE_X:s[0-57-9][0-9]*]], s14
550 ; GCN-DAG: s_mov_b32 [[SAVE_Y:s[0-68-9][0-9]*]], s15
551 ; GCN-DAG: s_mov_b32 [[SAVE_Z:s[0-79][0-9]*]], s16
549 ; GCN-DAG: s_mov_b32 [[SAVE_X:s[0-9]+]], s14
550 ; GCN-DAG: s_mov_b32 [[SAVE_Y:s[0-9]+]], s15
551 ; GCN-DAG: s_mov_b32 [[SAVE_Z:s[0-9]+]], s16
552552 ; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[6:7]
553553 ; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[8:9]
554554 ; GCN-DAG: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, s[10:11]
555555
556 ; GCN-DAG: s_mov_b32 s6, s14
557 ; GCN-DAG: s_mov_b32 s7, s15
558 ; GCN-DAG: s_mov_b32 s8, s16
556 ; GCN-DAG: s_mov_b32 s6, [[SAVE_X]]
557 ; GCN-DAG: s_mov_b32 s7, [[SAVE_Y]]
558 ; GCN-DAG: s_mov_b32 s8, [[SAVE_Z]]
559559 ; GCN: s_swappc_b64
560560
561561 ; GCN: buffer_store_dword v{{[0-9]+}}, off, s[0:3], s5 offset:4
11 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
22
33 ; GCN-LABEL: {{^}}vgpr:
4 ; GCN-DAG: v_mov_b32_e32 v1, v0
5 ; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm
6 ; GCN: s_waitcnt expcnt(0)
7 ; GCN: v_add_f32_e32 v0, 1.0, v0
4 ; GCN: v_mov_b32_e32 v1, v0
5 ; GCN-DAG: v_add_f32_e32 v0, 1.0, v1
6 ; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm
7 ; GCN: s_waitcnt expcnt(0)
88 ; GCN-NOT: s_endpgm
99 define amdgpu_vs { float, float } @vgpr([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
1010 bb:
203203 }
204204
205205 ; GCN-LABEL: {{^}}both:
206 ; GCN-DAG: exp mrt0 v0, v0, v0, v0 done vm
207 ; GCN-DAG: v_mov_b32_e32 v1, v0
206 ; GCN: v_mov_b32_e32 v1, v0
207 ; GCN-DAG: exp mrt0 v1, v1, v1, v1 done vm
208 ; GCN-DAG: v_add_f32_e32 v0, 1.0, v1
209 ; GCN-DAG: s_add_i32 s0, s3, 2
208210 ; GCN-DAG: s_mov_b32 s1, s2
209 ; GCN: s_waitcnt expcnt(0)
210 ; GCN: v_add_f32_e32 v0, 1.0, v0
211 ; GCN-DAG: s_add_i32 s0, s3, 2
212 ; GCN-DAG: s_mov_b32 s2, s3
211 ; GCN: s_mov_b32 s2, s3
212 ; GCN: s_waitcnt expcnt(0)
213213 ; GCN-NOT: s_endpgm
214214 define amdgpu_vs { float, i32, float, i32, i32 } @both([9 x <16 x i8>] addrspace(2)* byval %arg, i32 inreg %arg1, i32 inreg %arg2, float %arg3) #0 {
215215 bb:
286286
287287 %pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic
288288 %oldval = extractvalue { i32, i1 } %pair, 0
289 ; CHECK-ARMV7: mov r[[ADDR:[0-9]+]], r0
290 ; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r0]
289 ; CHECK-ARMV7: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
291290 ; CHECK-ARMV7: cmp [[OLDVAL]], r1
292291 ; CHECK-ARMV7: bne [[FAIL_BB:\.?LBB[0-9]+_[0-9]+]]
293292 ; CHECK-ARMV7: dmb ish
305304 ; CHECK-ARMV7: dmb ish
306305 ; CHECK-ARMV7: bx lr
307306
308 ; CHECK-T2: mov r[[ADDR:[0-9]+]], r0
309 ; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r0]
307 ; CHECK-T2: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
310308 ; CHECK-T2: cmp [[OLDVAL]], r1
311309 ; CHECK-T2: bne [[FAIL_BB:\.?LBB.*]]
312310 ; CHECK-T2: dmb ish
181181 ; CHECK-APPLE: beq
182182 ; CHECK-APPLE: mov r0, #16
183183 ; CHECK-APPLE: malloc
184 ; CHECK-APPLE: strb r{{.*}}, [r0, #8]
184 ; CHECK-APPLE: strb r{{.*}}, [{{.*}}[[ID]], #8]
185185 ; CHECK-APPLE: ble
186186 ; CHECK-APPLE: mov r8, [[ID]]
187187
164164 ; MMR3: subu16 $5, $[[T19]], $[[T20]]
165165
166166 ; MMR6: move $[[T0:[0-9]+]], $7
167 ; MMR6: sw $7, 8($sp)
167 ; MMR6: sw $[[T0]], 8($sp)
168168 ; MMR6: move $[[T1:[0-9]+]], $5
169169 ; MMR6: sw $4, 12($sp)
170170 ; MMR6: lw $[[T2:[0-9]+]], 48($sp)
1313 ret double %r
1414
1515 ; CHECK: @foo3
16 ; CHECK: fmr [[REG:[0-9]+]], [[REG2:[0-9]+]]
17 ; CHECK: xsnmsubadp [[REG]], {{[0-9]+}}, [[REG2]]
16 ; CHECK: xsnmsubadp [[REG:[0-9]+]], {{[0-9]+}}, [[REG]]
1817 ; CHECK: xsmaddmdp
1918 ; CHECK: xsmaddadp
2019 }
7474
7575 ; CHECK-DAG: mr [[REG:[0-9]+]], 3
7676 ; CHECK-DAG: li 0, 1076
77 ; CHECK-DAG: stw 3,
77 ; CHECK: stw [[REG]],
7878
7979 ; CHECK: #APP
8080 ; CHECK: sc
2222 ;CHECK-LABEL: straight_test:
2323 ; test1 may have been merged with entry
2424 ;CHECK: mr [[TAGREG:[0-9]+]], 3
25 ;CHECK: andi. {{[0-9]+}}, [[TAGREG:[0-9]+]], 1
25 ;CHECK: andi. {{[0-9]+}}, [[TAGREG]], 1
2626 ;CHECK-NEXT: bc 12, 1, .[[OPT1LABEL:[_0-9A-Za-z]+]]
2727 ;CHECK-NEXT: # %test2
2828 ;CHECK-NEXT: rlwinm. {{[0-9]+}}, [[TAGREG]], 0, 30, 30
234234
235235 ; CHECK-LABEL: test_load_add_i32
236236 ; CHECK: membar
237 ; CHECK: mov [[U:%[gilo][0-7]]], [[V:%[gilo][0-7]]]
238 ; CHECK: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]]
239 ; CHECK: cas [%o0], [[V]], [[V2]]
237 ; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]]
238 ; CHECK: cas [%o0], [[V]], [[U]]
240239 ; CHECK: membar
241240 define zeroext i32 @test_load_add_i32(i32* %p, i32 zeroext %v) {
242241 entry:
597597 define i32 @b_to_bx(i32 %value) {
598598 ; CHECK-LABEL: b_to_bx:
599599 ; DISABLE: push {r7, lr}
600 ; CHECK: cmp r0, #49
600 ; CHECK: cmp r1, #49
601601 ; CHECK-NEXT: bgt [[ELSE_LABEL:LBB[0-9_]+]]
602602 ; ENABLE: push {r7, lr}
603603
66 ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
77 ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %ecx
88 ; CHECK-NEXT: movl %ecx, %edx
9 ; CHECK-NEXT: imull %ecx, %edx
9 ; CHECK-NEXT: imull %edx, %edx
1010 ; CHECK-NEXT: imull %eax, %ecx
1111 ; CHECK-NEXT: imull %eax, %eax
1212 ; CHECK-NEXT: addl %edx, %eax
105105 ; CHECK-DAG: movl %edx, %[[r1:[^ ]*]]
106106 ; CHECK-DAG: movl 8(%ebp), %[[r2:[^ ]*]]
107107 ; CHECK-DAG: movl %[[r2]], 4(%esp)
108 ; CHECK-DAG: movl %edx, (%esp)
108 ; CHECK-DAG: movl %[[r1]], (%esp)
109109 ; CHECK: movl %esp, %[[reg:[^ ]*]]
110110 ; CHECK: pushl %[[reg]]
111111 ; CHECK: calll _addrof_i64
406406 ; SSE2-NEXT: pand %xmm0, %xmm2
407407 ; SSE2-NEXT: packuswb %xmm1, %xmm2
408408 ; SSE2-NEXT: packuswb %xmm10, %xmm2
409 ; SSE2-NEXT: movdqa %xmm2, %xmm1
409410 ; SSE2-NEXT: psrld $1, %xmm4
410411 ; SSE2-NEXT: psrld $1, %xmm12
411412 ; SSE2-NEXT: pand %xmm0, %xmm12
442443 ; SSE2-NEXT: movdqu %xmm7, (%rax)
443444 ; SSE2-NEXT: movdqu %xmm11, (%rax)
444445 ; SSE2-NEXT: movdqu %xmm13, (%rax)
445 ; SSE2-NEXT: movdqu %xmm2, (%rax)
446 ; SSE2-NEXT: movdqu %xmm1, (%rax)
446447 ; SSE2-NEXT: retq
447448 ;
448449 ; AVX1-LABEL: avg_v64i8:
1111 ; CHECK-NEXT: movq %rdx, %r14
1212 ; CHECK-NEXT: movq %rsi, %r15
1313 ; CHECK-NEXT: movq %rdi, %rbx
14 ; CHECK-NEXT: vmovaps (%rdi), %ymm0
14 ; CHECK-NEXT: vmovaps (%rbx), %ymm0
1515 ; CHECK-NEXT: vmovups %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill
16 ; CHECK-NEXT: vmovaps (%rsi), %ymm1
16 ; CHECK-NEXT: vmovaps (%r15), %ymm1
1717 ; CHECK-NEXT: vmovups %ymm1, {{[0-9]+}}(%rsp) # 32-byte Spill
18 ; CHECK-NEXT: vmovaps (%rdx), %ymm2
18 ; CHECK-NEXT: vmovaps (%r14), %ymm2
1919 ; CHECK-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill
2020 ; CHECK-NEXT: callq dummy
2121 ; CHECK-NEXT: vmovups {{[0-9]+}}(%rsp), %ymm0 # 32-byte Reload
88 ; CHECK-NEXT: pushq %rbx
99 ; CHECK-NEXT: subq $112, %rsp
1010 ; CHECK-NEXT: movq %rdi, %rbx
11 ; CHECK-NEXT: vmovups (%rdi), %zmm0
11 ; CHECK-NEXT: vmovups (%rbx), %zmm0
1212 ; CHECK-NEXT: vmovups %zmm0, (%rsp) ## 64-byte Spill
1313 ; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %zmm1
14 ; CHECK-NEXT: vmovaps %zmm1, (%rdi)
14 ; CHECK-NEXT: vmovaps %zmm1, (%rbx)
1515 ; CHECK-NEXT: callq _Print__512
1616 ; CHECK-NEXT: vmovups (%rsp), %zmm0 ## 64-byte Reload
1717 ; CHECK-NEXT: callq _Print__512
465465 ; KNL_X32-NEXT: movl %edi, (%esp)
466466 ; KNL_X32-NEXT: calll _test11
467467 ; KNL_X32-NEXT: movl %eax, %ebx
468 ; KNL_X32-NEXT: movzbl %al, %eax
468 ; KNL_X32-NEXT: movzbl %bl, %eax
469469 ; KNL_X32-NEXT: movl %eax, {{[0-9]+}}(%esp)
470470 ; KNL_X32-NEXT: movl %esi, {{[0-9]+}}(%esp)
471471 ; KNL_X32-NEXT: movl %edi, (%esp)
11701170 ; KNL-NEXT: kmovw %esi, %k0
11711171 ; KNL-NEXT: kshiftlw $7, %k0, %k2
11721172 ; KNL-NEXT: kshiftrw $15, %k2, %k2
1173 ; KNL-NEXT: kmovw %k2, %eax
11731174 ; KNL-NEXT: kshiftlw $6, %k0, %k0
11741175 ; KNL-NEXT: kshiftrw $15, %k0, %k0
11751176 ; KNL-NEXT: kmovw %k0, %ecx
11821183 ; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
11831184 ; KNL-NEXT: kshiftlw $1, %k0, %k0
11841185 ; KNL-NEXT: kshiftrw $1, %k0, %k0
1185 ; KNL-NEXT: kshiftlw $7, %k2, %k1
1186 ; KNL-NEXT: kmovw %eax, %k1
1187 ; KNL-NEXT: kshiftlw $7, %k1, %k1
11861188 ; KNL-NEXT: korw %k1, %k0, %k1
11871189 ; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
11881190 ; KNL-NEXT: vpmovqw %zmm0, %xmm0
11941196 ; SKX-NEXT: kmovd %esi, %k1
11951197 ; SKX-NEXT: kshiftlw $7, %k1, %k2
11961198 ; SKX-NEXT: kshiftrw $15, %k2, %k2
1199 ; SKX-NEXT: kmovd %k2, %eax
11971200 ; SKX-NEXT: kshiftlw $6, %k1, %k1
11981201 ; SKX-NEXT: kshiftrw $15, %k1, %k1
1202 ; SKX-NEXT: kmovd %k1, %ecx
11991203 ; SKX-NEXT: vpmovm2q %k0, %zmm0
1200 ; SKX-NEXT: vpmovm2q %k1, %zmm1
1204 ; SKX-NEXT: kmovd %ecx, %k0
1205 ; SKX-NEXT: vpmovm2q %k0, %zmm1
12011206 ; SKX-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
12021207 ; SKX-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
12031208 ; SKX-NEXT: vpmovq2m %zmm2, %k0
12041209 ; SKX-NEXT: kshiftlb $1, %k0, %k0
12051210 ; SKX-NEXT: kshiftrb $1, %k0, %k0
1206 ; SKX-NEXT: kshiftlb $7, %k2, %k1
1211 ; SKX-NEXT: kmovd %eax, %k1
1212 ; SKX-NEXT: kshiftlb $7, %k1, %k1
12071213 ; SKX-NEXT: korb %k1, %k0, %k0
12081214 ; SKX-NEXT: vpmovm2w %k0, %xmm0
12091215 ; SKX-NEXT: vzeroupper
12151221 ; AVX512BW-NEXT: kmovd %esi, %k0
12161222 ; AVX512BW-NEXT: kshiftlw $7, %k0, %k2
12171223 ; AVX512BW-NEXT: kshiftrw $15, %k2, %k2
1224 ; AVX512BW-NEXT: kmovd %k2, %eax
12181225 ; AVX512BW-NEXT: kshiftlw $6, %k0, %k0
12191226 ; AVX512BW-NEXT: kshiftrw $15, %k0, %k0
12201227 ; AVX512BW-NEXT: kmovd %k0, %ecx
12271234 ; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0
12281235 ; AVX512BW-NEXT: kshiftlw $1, %k0, %k0
12291236 ; AVX512BW-NEXT: kshiftrw $1, %k0, %k0
1230 ; AVX512BW-NEXT: kshiftlw $7, %k2, %k1
1237 ; AVX512BW-NEXT: kmovd %eax, %k1
1238 ; AVX512BW-NEXT: kshiftlw $7, %k1, %k1
12311239 ; AVX512BW-NEXT: korw %k1, %k0, %k0
12321240 ; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
12331241 ; AVX512BW-NEXT: ## kill: %XMM0 %XMM0 %ZMM0
12401248 ; AVX512DQ-NEXT: kmovw %esi, %k1
12411249 ; AVX512DQ-NEXT: kshiftlw $7, %k1, %k2
12421250 ; AVX512DQ-NEXT: kshiftrw $15, %k2, %k2
1251 ; AVX512DQ-NEXT: kmovw %k2, %eax
12431252 ; AVX512DQ-NEXT: kshiftlw $6, %k1, %k1
12441253 ; AVX512DQ-NEXT: kshiftrw $15, %k1, %k1
1254 ; AVX512DQ-NEXT: kmovw %k1, %ecx
12451255 ; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
1246 ; AVX512DQ-NEXT: vpmovm2q %k1, %zmm1
1256 ; AVX512DQ-NEXT: kmovw %ecx, %k0
1257 ; AVX512DQ-NEXT: vpmovm2q %k0, %zmm1
12471258 ; AVX512DQ-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,1,2,3,4,5,8,7]
12481259 ; AVX512DQ-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
12491260 ; AVX512DQ-NEXT: vpmovq2m %zmm2, %k0
12501261 ; AVX512DQ-NEXT: kshiftlb $1, %k0, %k0
12511262 ; AVX512DQ-NEXT: kshiftrb $1, %k0, %k0
1252 ; AVX512DQ-NEXT: kshiftlb $7, %k2, %k1
1263 ; AVX512DQ-NEXT: kmovw %eax, %k1
1264 ; AVX512DQ-NEXT: kshiftlb $7, %k1, %k1
12531265 ; AVX512DQ-NEXT: korb %k1, %k0, %k0
12541266 ; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
12551267 ; AVX512DQ-NEXT: vpmovqw %zmm0, %xmm0
20022002 ; AVX512F-32-NEXT: vpblendvb %ymm3, %ymm2, %ymm7, %ymm7
20032003 ; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm7[0,1,2,3],zmm2[4,5,6,7]
20042004 ; AVX512F-32-NEXT: vpmovb2m %zmm2, %k0
2005 ; AVX512F-32-NEXT: movl %ecx, %eax
2005 ; AVX512F-32-NEXT: movl %esi, %eax
20062006 ; AVX512F-32-NEXT: shrl $30, %eax
20072007 ; AVX512F-32-NEXT: kmovd %eax, %k1
20082008 ; AVX512F-32-NEXT: vpmovm2b %k1, %zmm2
20132013 ; AVX512F-32-NEXT: vpblendvb %ymm7, %ymm3, %ymm2, %ymm2
20142014 ; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm3[4,5,6,7]
20152015 ; AVX512F-32-NEXT: vpmovb2m %zmm2, %k0
2016 ; AVX512F-32-NEXT: movl %ecx, %eax
2016 ; AVX512F-32-NEXT: movl %esi, %eax
20172017 ; AVX512F-32-NEXT: shrl $31, %eax
20182018 ; AVX512F-32-NEXT: kmovd %eax, %k1
20192019 ; AVX512F-32-NEXT: vpmovm2b %k1, %zmm2
28862886 ; AVX512F-32-NEXT: vpblendvb %ymm3, %ymm2, %ymm7, %ymm7
28872887 ; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm7[0,1,2,3],zmm2[4,5,6,7]
28882888 ; AVX512F-32-NEXT: vpmovb2m %zmm2, %k0
2889 ; AVX512F-32-NEXT: movl %ecx, %eax
2889 ; AVX512F-32-NEXT: movl %esi, %eax
28902890 ; AVX512F-32-NEXT: shrl $30, %eax
28912891 ; AVX512F-32-NEXT: kmovd %eax, %k1
28922892 ; AVX512F-32-NEXT: vpmovm2b %k1, %zmm2
28972897 ; AVX512F-32-NEXT: vpblendvb %ymm7, %ymm3, %ymm2, %ymm2
28982898 ; AVX512F-32-NEXT: vshufi64x2 {{.*#+}} zmm2 = zmm2[0,1,2,3],zmm3[4,5,6,7]
28992899 ; AVX512F-32-NEXT: vpmovb2m %zmm2, %k0
2900 ; AVX512F-32-NEXT: movl %ecx, %eax
2900 ; AVX512F-32-NEXT: movl %esi, %eax
29012901 ; AVX512F-32-NEXT: shrl $31, %eax
29022902 ; AVX512F-32-NEXT: kmovd %eax, %k1
29032903 ; AVX512F-32-NEXT: vpmovm2b %k1, %zmm2
3737 ; SSE2-LABEL: test_negative_zero_1:
3838 ; SSE2: # BB#0: # %entry
3939 ; SSE2-NEXT: movaps %xmm0, %xmm1
40 ; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
40 ; SSE2-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
4141 ; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
4242 ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
4343 ; SSE2-NEXT: xorps %xmm2, %xmm2
230230 ; SSE-NEXT: cvtss2sd %xmm2, %xmm4
231231 ; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
232232 ; SSE-NEXT: movaps %xmm2, %xmm6
233 ; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm2[1],xmm6[1]
234 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm2[2,3]
233 ; SSE-NEXT: movhlps {{.*#+}} xmm6 = xmm6[1,1]
234 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
235235 ; SSE-NEXT: movaps {{.*#+}} xmm7
236236 ; SSE-NEXT: movaps %xmm0, %xmm2
237237 ; SSE-NEXT: andps %xmm7, %xmm2
246246 ; SSE-NEXT: orps %xmm0, %xmm4
247247 ; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm4[0]
248248 ; SSE-NEXT: movaps %xmm1, %xmm0
249 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm1[1],xmm0[1]
249 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
250250 ; SSE-NEXT: andps %xmm7, %xmm0
251251 ; SSE-NEXT: cvtss2sd %xmm3, %xmm3
252252 ; SSE-NEXT: andps %xmm8, %xmm3
293293 ; SSE-NEXT: orps %xmm6, %xmm1
294294 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
295295 ; SSE-NEXT: movaps %xmm3, %xmm1
296 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm3[1],xmm1[1]
296 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
297297 ; SSE-NEXT: andps %xmm5, %xmm1
298298 ; SSE-NEXT: xorps %xmm6, %xmm6
299299 ; SSE-NEXT: cvtsd2ss %xmm2, %xmm6
1313 ; SSE: # BB#0:
1414 ; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
1515 ; SSE-NEXT: movaps %xmm0, %xmm2
16 ; SSE-NEXT: addss %xmm0, %xmm2
16 ; SSE-NEXT: addss %xmm2, %xmm2
1717 ; SSE-NEXT: mulss %xmm1, %xmm2
1818 ; SSE-NEXT: mulss %xmm0, %xmm0
1919 ; SSE-NEXT: mulss %xmm1, %xmm1
5757 ; SSE-LABEL: complex_square_f64:
5858 ; SSE: # BB#0:
5959 ; SSE-NEXT: movaps %xmm0, %xmm1
60 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
60 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
6161 ; SSE-NEXT: movaps %xmm0, %xmm2
62 ; SSE-NEXT: addsd %xmm0, %xmm2
62 ; SSE-NEXT: addsd %xmm2, %xmm2
6363 ; SSE-NEXT: mulsd %xmm1, %xmm2
6464 ; SSE-NEXT: mulsd %xmm0, %xmm0
6565 ; SSE-NEXT: mulsd %xmm1, %xmm1
160160 ; SSE-LABEL: complex_mul_f64:
161161 ; SSE: # BB#0:
162162 ; SSE-NEXT: movaps %xmm0, %xmm2
163 ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
163 ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
164164 ; SSE-NEXT: movaps %xmm1, %xmm3
165 ; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1]
165 ; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
166166 ; SSE-NEXT: movaps %xmm3, %xmm4
167167 ; SSE-NEXT: mulsd %xmm0, %xmm4
168168 ; SSE-NEXT: mulsd %xmm1, %xmm0
317317 ; X64: # BB#0: # %entry
318318 ; X64-NEXT: movq %rdi, %rcx
319319 ; X64-NEXT: movabsq $6120523590596543007, %rdx # imm = 0x54F077C718E7C21F
320 ; X64-NEXT: movq %rdi, %rax
320 ; X64-NEXT: movq %rcx, %rax
321321 ; X64-NEXT: mulq %rdx
322322 ; X64-NEXT: shrq $12, %rdx
323323 ; X64-NEXT: imulq $12345, %rdx, %rax # imm = 0x3039
1717
1818 ; CHECK-LABEL: @test_fmaxf
1919 ; SSE: movaps %xmm0, %xmm2
20 ; SSE-NEXT: cmpunordss %xmm0, %xmm2
20 ; SSE-NEXT: cmpunordss %xmm2, %xmm2
2121 ; SSE-NEXT: movaps %xmm2, %xmm3
2222 ; SSE-NEXT: andps %xmm1, %xmm3
2323 ; SSE-NEXT: maxss %xmm0, %xmm1
4646
4747 ; CHECK-LABEL: @test_fmax
4848 ; SSE: movapd %xmm0, %xmm2
49 ; SSE-NEXT: cmpunordsd %xmm0, %xmm2
49 ; SSE-NEXT: cmpunordsd %xmm2, %xmm2
5050 ; SSE-NEXT: movapd %xmm2, %xmm3
5151 ; SSE-NEXT: andpd %xmm1, %xmm3
5252 ; SSE-NEXT: maxsd %xmm0, %xmm1
7373
7474 ; CHECK-LABEL: @test_intrinsic_fmaxf
7575 ; SSE: movaps %xmm0, %xmm2
76 ; SSE-NEXT: cmpunordss %xmm0, %xmm2
76 ; SSE-NEXT: cmpunordss %xmm2, %xmm2
7777 ; SSE-NEXT: movaps %xmm2, %xmm3
7878 ; SSE-NEXT: andps %xmm1, %xmm3
7979 ; SSE-NEXT: maxss %xmm0, %xmm1
9494
9595 ; CHECK-LABEL: @test_intrinsic_fmax
9696 ; SSE: movapd %xmm0, %xmm2
97 ; SSE-NEXT: cmpunordsd %xmm0, %xmm2
97 ; SSE-NEXT: cmpunordsd %xmm2, %xmm2
9898 ; SSE-NEXT: movapd %xmm2, %xmm3
9999 ; SSE-NEXT: andpd %xmm1, %xmm3
100100 ; SSE-NEXT: maxsd %xmm0, %xmm1
1717
1818 ; CHECK-LABEL: @test_fminf
1919 ; SSE: movaps %xmm0, %xmm2
20 ; SSE-NEXT: cmpunordss %xmm0, %xmm2
20 ; SSE-NEXT: cmpunordss %xmm2, %xmm2
2121 ; SSE-NEXT: movaps %xmm2, %xmm3
2222 ; SSE-NEXT: andps %xmm1, %xmm3
2323 ; SSE-NEXT: minss %xmm0, %xmm1
3939
4040 ; CHECK-LABEL: @test_fmin
4141 ; SSE: movapd %xmm0, %xmm2
42 ; SSE-NEXT: cmpunordsd %xmm0, %xmm2
42 ; SSE-NEXT: cmpunordsd %xmm2, %xmm2
4343 ; SSE-NEXT: movapd %xmm2, %xmm3
4444 ; SSE-NEXT: andpd %xmm1, %xmm3
4545 ; SSE-NEXT: minsd %xmm0, %xmm1
6666
6767 ; CHECK-LABEL: @test_intrinsic_fminf
6868 ; SSE: movaps %xmm0, %xmm2
69 ; SSE-NEXT: cmpunordss %xmm0, %xmm2
69 ; SSE-NEXT: cmpunordss %xmm2, %xmm2
7070 ; SSE-NEXT: movaps %xmm2, %xmm3
7171 ; SSE-NEXT: andps %xmm1, %xmm3
7272 ; SSE-NEXT: minss %xmm0, %xmm1
8686
8787 ; CHECK-LABEL: @test_intrinsic_fmin
8888 ; SSE: movapd %xmm0, %xmm2
89 ; SSE-NEXT: cmpunordsd %xmm0, %xmm2
89 ; SSE-NEXT: cmpunordsd %xmm2, %xmm2
9090 ; SSE-NEXT: movapd %xmm2, %xmm3
9191 ; SSE-NEXT: andpd %xmm1, %xmm3
9292 ; SSE-NEXT: minsd %xmm0, %xmm1
226226 ; CHECK: # BB#0: # %entry
227227 ; CHECK-NEXT: subq $40, %rsp
228228 ; CHECK-NEXT: movaps %xmm0, %xmm1
229 ; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
229 ; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
230230 ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
231231 ; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
232232 ; CHECK-NEXT: movq $0, (%rsp)
274274 ; CHECK: # BB#0: # %entry
275275 ; CHECK-NEXT: subq $40, %rsp
276276 ; CHECK-NEXT: movaps %xmm0, %xmm1
277 ; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp)
277 ; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
278278 ; CHECK-NEXT: movq {{[0-9]+}}(%rsp), %rax
279279 ; CHECK-NEXT: movq %rax, {{[0-9]+}}(%rsp)
280280 ; CHECK-NEXT: movq $0, (%rsp)
907907 ; SSE-LABEL: not_a_hsub_2:
908908 ; SSE: # BB#0:
909909 ; SSE-NEXT: movaps %xmm0, %xmm2
910 ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
910 ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
911911 ; SSE-NEXT: movaps %xmm0, %xmm3
912 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
912 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
913913 ; SSE-NEXT: subss %xmm3, %xmm2
914914 ; SSE-NEXT: movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
915915 ; SSE-NEXT: subss %xmm3, %xmm0
916916 ; SSE-NEXT: movaps %xmm1, %xmm3
917 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm1[2,3]
917 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
918918 ; SSE-NEXT: movaps %xmm1, %xmm4
919 ; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1]
919 ; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
920920 ; SSE-NEXT: subss %xmm4, %xmm3
921921 ; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
922922 ; SSE-NEXT: subss %xmm4, %xmm1
964964 ; SSE-LABEL: not_a_hsub_3:
965965 ; SSE: # BB#0:
966966 ; SSE-NEXT: movaps %xmm1, %xmm2
967 ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm1[1],xmm2[1]
967 ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
968968 ; SSE-NEXT: subsd %xmm2, %xmm1
969969 ; SSE-NEXT: movaps %xmm0, %xmm2
970 ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
970 ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
971971 ; SSE-NEXT: subsd %xmm0, %xmm2
972972 ; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm1[0]
973973 ; SSE-NEXT: movapd %xmm2, %xmm0
102102 ; SSE-LABEL: test5_undef:
103103 ; SSE: # BB#0:
104104 ; SSE-NEXT: movaps %xmm0, %xmm1
105 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
105 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
106106 ; SSE-NEXT: addsd %xmm0, %xmm1
107107 ; SSE-NEXT: movapd %xmm1, %xmm0
108108 ; SSE-NEXT: retq
167167 ; SSE-NEXT: movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
168168 ; SSE-NEXT: addss %xmm0, %xmm1
169169 ; SSE-NEXT: movaps %xmm0, %xmm2
170 ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
170 ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
171171 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
172172 ; SSE-NEXT: addss %xmm2, %xmm0
173173 ; SSE-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0]
385385 ; CHECK-LIBCALL-NEXT: pushq %rbx
386386 ; CHECK-LIBCALL-NEXT: subq $48, %rsp
387387 ; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
388 ; CHECK-LIBCALL-NEXT: movzwl (%rdi), %edi
388 ; CHECK-LIBCALL-NEXT: movzwl (%rbx), %edi
389389 ; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
390390 ; CHECK-LIBCALL-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
391391 ; CHECK-LIBCALL-NEXT: movzwl 2(%rbx), %edi
471471 ; CHECK-LIBCALL-NEXT: pushq %rbx
472472 ; CHECK-LIBCALL-NEXT: subq $16, %rsp
473473 ; CHECK-LIBCALL-NEXT: movq %rdi, %rbx
474 ; CHECK-LIBCALL-NEXT: movzwl 4(%rdi), %edi
474 ; CHECK-LIBCALL-NEXT: movzwl 4(%rbx), %edi
475475 ; CHECK-LIBCALL-NEXT: callq __gnu_h2f_ieee
476476 ; CHECK-LIBCALL-NEXT: movss %xmm0, {{[0-9]+}}(%rsp) # 4-byte Spill
477477 ; CHECK-LIBCALL-NEXT: movzwl 6(%rbx), %edi
656656 ; CHECK-I686-NEXT: movaps %xmm0, {{[0-9]+}}(%esp) # 16-byte Spill
657657 ; CHECK-I686-NEXT: movl {{[0-9]+}}(%esp), %ebp
658658 ; CHECK-I686-NEXT: movaps %xmm0, %xmm1
659 ; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
659 ; CHECK-I686-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
660660 ; CHECK-I686-NEXT: movss %xmm1, (%esp)
661661 ; CHECK-I686-NEXT: calll __gnu_f2h_ieee
662662 ; CHECK-I686-NEXT: movw %ax, %si
161161 ; CHECK-NEXT: fstpt (%esp)
162162 ; CHECK-NEXT: calll _ceil
163163 ; CHECK-NEXT: fld %st(0)
164 ; CHECK-NEXT: fxch %st(1)
165164 ; CHECK-NEXT: ## InlineAsm Start
166165 ; CHECK-NEXT: fistpl %st(0)
167166 ; CHECK-NEXT: ## InlineAsm End
2323 call void @foo()
2424 ; CHECK-LABEL: bar:
2525 ; CHECK: callq foo
26 ; CHECK-NEXT: movl %edi, %r15d
26 ; CHECK-NEXT: movl %eax, %r15d
2727 call void asm sideeffect "movl $0, %r12d", "{r15}~{r12}"(i32 %X)
2828 ret void
2929 }
2626
2727 ; X64-LABEL: print_framealloc_from_fp:
2828 ; X64: movq %rcx, %[[parent_fp:[a-z]+]]
29 ; X64: movl .Lalloc_func$frame_escape_0(%rcx), %edx
29 ; X64: movl .Lalloc_func$frame_escape_0(%[[parent_fp]]), %edx
3030 ; X64: leaq {{.*}}(%rip), %[[str:[a-z]+]]
3131 ; X64: movq %[[str]], %rcx
3232 ; X64: callq printf
158158 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
159159 ; X32-NEXT: pushl %esi
160160 ; X32-NEXT: movl %esi, %ebx
161 ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
161 ; X32-NEXT: movl %ebx, {{[0-9]+}}(%esp) # 4-byte Spill
162162 ; X32-NEXT: pushl %edi
163163 ; X32-NEXT: movl %edi, {{[0-9]+}}(%esp) # 4-byte Spill
164164 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
751751 ; X32-NEXT: pushl $0
752752 ; X32-NEXT: pushl %edi
753753 ; X32-NEXT: movl %ebx, %esi
754 ; X32-NEXT: pushl %ebx
755 ; X32-NEXT: pushl $0
756 ; X32-NEXT: pushl $0
757 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
758 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
759 ; X32-NEXT: pushl %eax
760 ; X32-NEXT: calll __multi3
761 ; X32-NEXT: addl $32, %esp
762 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
763 ; X32-NEXT: pushl $0
764 ; X32-NEXT: pushl $0
765 ; X32-NEXT: pushl $0
766 ; X32-NEXT: pushl $0
767 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
768 ; X32-NEXT: pushl %ebx
769 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
770 ; X32-NEXT: pushl %edi
771 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
772 ; X32-NEXT: pushl %esi
773 ; X32-NEXT: pushl %eax
774 ; X32-NEXT: calll __multi3
775 ; X32-NEXT: addl $32, %esp
776 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
777 ; X32-NEXT: pushl $0
778 ; X32-NEXT: pushl $0
779 ; X32-NEXT: pushl %ebx
780 ; X32-NEXT: pushl %edi
781 ; X32-NEXT: pushl $0
782 ; X32-NEXT: pushl $0
783 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
784 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
785 ; X32-NEXT: pushl %eax
786 ; X32-NEXT: calll __multi3
787 ; X32-NEXT: addl $32, %esp
788 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
789 ; X32-NEXT: pushl $0
790 ; X32-NEXT: pushl $0
791 ; X32-NEXT: pushl %ebx
792 ; X32-NEXT: pushl %edi
793 ; X32-NEXT: pushl $0
794 ; X32-NEXT: pushl $0
795 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
796 ; X32-NEXT: pushl %edi
797 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
798 ; X32-NEXT: pushl %esi
799 ; X32-NEXT: pushl %eax
800 ; X32-NEXT: calll __multi3
801 ; X32-NEXT: addl $32, %esp
802 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
803 ; X32-NEXT: pushl $0
804 ; X32-NEXT: pushl $0
805 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
806 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
807 ; X32-NEXT: pushl %ebx
808 ; X32-NEXT: pushl $0
809 ; X32-NEXT: pushl $0
810 ; X32-NEXT: pushl %edi
811 ; X32-NEXT: pushl %esi
812 ; X32-NEXT: pushl %eax
813 ; X32-NEXT: calll __multi3
814 ; X32-NEXT: addl $32, %esp
815 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
816 ; X32-NEXT: pushl $0
817 ; X32-NEXT: pushl $0
818 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
819 ; X32-NEXT: pushl %ebx
820 ; X32-NEXT: pushl $0
821 ; X32-NEXT: pushl $0
822 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
823 ; X32-NEXT: pushl %edi
824 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
825 ; X32-NEXT: pushl %esi
826 ; X32-NEXT: pushl %eax
827 ; X32-NEXT: calll __multi3
828 ; X32-NEXT: addl $32, %esp
829 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
830 ; X32-NEXT: pushl $0
831 ; X32-NEXT: pushl $0
832 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
833 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
834 ; X32-NEXT: pushl %ebx
835 ; X32-NEXT: pushl $0
836 ; X32-NEXT: pushl $0
837 ; X32-NEXT: pushl %edi
838 ; X32-NEXT: pushl %esi
839 ; X32-NEXT: pushl %eax
840 ; X32-NEXT: calll __multi3
841 ; X32-NEXT: addl $32, %esp
842 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
843 ; X32-NEXT: pushl $0
844 ; X32-NEXT: pushl $0
845 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
846 ; X32-NEXT: pushl %ebx
847 ; X32-NEXT: pushl $0
848 ; X32-NEXT: pushl $0
849 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
850 ; X32-NEXT: pushl %esi
851 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
852 ; X32-NEXT: pushl %ebx
853 ; X32-NEXT: pushl %eax
854 ; X32-NEXT: calll __multi3
855 ; X32-NEXT: addl $32, %esp
856 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
857 ; X32-NEXT: pushl $0
858 ; X32-NEXT: pushl $0
859 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
860 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
861 ; X32-NEXT: pushl %edi
862 ; X32-NEXT: pushl $0
863 ; X32-NEXT: pushl $0
864 ; X32-NEXT: pushl %esi
865 ; X32-NEXT: pushl %ebx
866 ; X32-NEXT: pushl %eax
867 ; X32-NEXT: calll __multi3
868 ; X32-NEXT: addl $32, %esp
869 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
870 ; X32-NEXT: pushl $0
871 ; X32-NEXT: pushl $0
872 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
873 ; X32-NEXT: pushl %ebx
874 ; X32-NEXT: pushl %edi
875 ; X32-NEXT: pushl $0
876 ; X32-NEXT: pushl $0
877 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
878 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
879 ; X32-NEXT: pushl %eax
880 ; X32-NEXT: calll __multi3
881 ; X32-NEXT: addl $32, %esp
882 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
883 ; X32-NEXT: pushl $0
884 ; X32-NEXT: pushl $0
885 ; X32-NEXT: pushl $0
886 ; X32-NEXT: pushl $0
887 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
888 ; X32-NEXT: pushl %edi
889 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
890 ; X32-NEXT: pushl %esi
891 ; X32-NEXT: pushl %ebx
892 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
893 ; X32-NEXT: pushl %eax
894 ; X32-NEXT: calll __multi3
895 ; X32-NEXT: addl $32, %esp
896 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
897 ; X32-NEXT: pushl $0
898 ; X32-NEXT: pushl $0
899 ; X32-NEXT: pushl %edi
900 ; X32-NEXT: pushl %esi
901 ; X32-NEXT: pushl $0
902 ; X32-NEXT: pushl $0
903 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
904 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
905 ; X32-NEXT: pushl %eax
906 ; X32-NEXT: calll __multi3
907 ; X32-NEXT: addl $32, %esp
908 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
909 ; X32-NEXT: pushl $0
910 ; X32-NEXT: pushl $0
911 ; X32-NEXT: pushl %edi
754 ; X32-NEXT: pushl %esi
755 ; X32-NEXT: pushl $0
756 ; X32-NEXT: pushl $0
757 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
758 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
759 ; X32-NEXT: pushl %eax
760 ; X32-NEXT: calll __multi3
761 ; X32-NEXT: addl $32, %esp
762 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
763 ; X32-NEXT: pushl $0
764 ; X32-NEXT: pushl $0
765 ; X32-NEXT: pushl $0
766 ; X32-NEXT: pushl $0
767 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
768 ; X32-NEXT: pushl %ebx
769 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
770 ; X32-NEXT: pushl %edi
771 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
772 ; X32-NEXT: pushl %esi
773 ; X32-NEXT: pushl %eax
774 ; X32-NEXT: calll __multi3
775 ; X32-NEXT: addl $32, %esp
776 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
777 ; X32-NEXT: pushl $0
778 ; X32-NEXT: pushl $0
779 ; X32-NEXT: pushl %ebx
780 ; X32-NEXT: pushl %edi
781 ; X32-NEXT: pushl $0
782 ; X32-NEXT: pushl $0
783 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
784 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
785 ; X32-NEXT: pushl %eax
786 ; X32-NEXT: calll __multi3
787 ; X32-NEXT: addl $32, %esp
788 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
789 ; X32-NEXT: pushl $0
790 ; X32-NEXT: pushl $0
791 ; X32-NEXT: pushl %ebx
792 ; X32-NEXT: pushl %edi
793 ; X32-NEXT: pushl $0
794 ; X32-NEXT: pushl $0
795 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
796 ; X32-NEXT: pushl %edi
797 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
798 ; X32-NEXT: pushl %esi
799 ; X32-NEXT: pushl %eax
800 ; X32-NEXT: calll __multi3
801 ; X32-NEXT: addl $32, %esp
802 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
803 ; X32-NEXT: pushl $0
804 ; X32-NEXT: pushl $0
805 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
806 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
807 ; X32-NEXT: pushl %ebx
808 ; X32-NEXT: pushl $0
809 ; X32-NEXT: pushl $0
810 ; X32-NEXT: pushl %edi
811 ; X32-NEXT: pushl %esi
812 ; X32-NEXT: pushl %eax
813 ; X32-NEXT: calll __multi3
814 ; X32-NEXT: addl $32, %esp
815 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
816 ; X32-NEXT: pushl $0
817 ; X32-NEXT: pushl $0
818 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
819 ; X32-NEXT: pushl %ebx
820 ; X32-NEXT: pushl $0
821 ; X32-NEXT: pushl $0
822 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
823 ; X32-NEXT: pushl %edi
824 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
825 ; X32-NEXT: pushl %esi
826 ; X32-NEXT: pushl %eax
827 ; X32-NEXT: calll __multi3
828 ; X32-NEXT: addl $32, %esp
829 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
830 ; X32-NEXT: pushl $0
831 ; X32-NEXT: pushl $0
832 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
833 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
834 ; X32-NEXT: pushl %ebx
835 ; X32-NEXT: pushl $0
836 ; X32-NEXT: pushl $0
837 ; X32-NEXT: pushl %edi
838 ; X32-NEXT: pushl %esi
839 ; X32-NEXT: pushl %eax
840 ; X32-NEXT: calll __multi3
841 ; X32-NEXT: addl $32, %esp
842 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
843 ; X32-NEXT: pushl $0
844 ; X32-NEXT: pushl $0
845 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
846 ; X32-NEXT: pushl %ebx
847 ; X32-NEXT: pushl $0
848 ; X32-NEXT: pushl $0
849 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
850 ; X32-NEXT: pushl %esi
851 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
852 ; X32-NEXT: pushl %ebx
853 ; X32-NEXT: pushl %eax
854 ; X32-NEXT: calll __multi3
855 ; X32-NEXT: addl $32, %esp
856 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
857 ; X32-NEXT: pushl $0
858 ; X32-NEXT: pushl $0
859 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
860 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
861 ; X32-NEXT: pushl %edi
862 ; X32-NEXT: pushl $0
863 ; X32-NEXT: pushl $0
864 ; X32-NEXT: pushl %esi
865 ; X32-NEXT: pushl %ebx
866 ; X32-NEXT: pushl %eax
867 ; X32-NEXT: calll __multi3
868 ; X32-NEXT: addl $32, %esp
869 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
870 ; X32-NEXT: pushl $0
871 ; X32-NEXT: pushl $0
872 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload
873 ; X32-NEXT: pushl %ebx
874 ; X32-NEXT: pushl %edi
875 ; X32-NEXT: pushl $0
876 ; X32-NEXT: pushl $0
877 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
878 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
879 ; X32-NEXT: pushl %eax
880 ; X32-NEXT: calll __multi3
881 ; X32-NEXT: addl $32, %esp
882 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
883 ; X32-NEXT: pushl $0
884 ; X32-NEXT: pushl $0
885 ; X32-NEXT: pushl $0
886 ; X32-NEXT: pushl $0
887 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edi # 4-byte Reload
888 ; X32-NEXT: pushl %edi
889 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
890 ; X32-NEXT: pushl %esi
891 ; X32-NEXT: pushl %ebx
892 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
893 ; X32-NEXT: pushl %eax
894 ; X32-NEXT: calll __multi3
895 ; X32-NEXT: addl $32, %esp
896 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
897 ; X32-NEXT: pushl $0
898 ; X32-NEXT: pushl $0
899 ; X32-NEXT: pushl %edi
900 ; X32-NEXT: movl %edi, %ebx
901 ; X32-NEXT: pushl %esi
902 ; X32-NEXT: pushl $0
903 ; X32-NEXT: pushl $0
904 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
905 ; X32-NEXT: pushl {{[0-9]+}}(%esp) # 4-byte Folded Reload
906 ; X32-NEXT: pushl %eax
907 ; X32-NEXT: calll __multi3
908 ; X32-NEXT: addl $32, %esp
909 ; X32-NEXT: leal {{[0-9]+}}(%esp), %eax
910 ; X32-NEXT: pushl $0
911 ; X32-NEXT: pushl $0
912 ; X32-NEXT: pushl %ebx
912913 ; X32-NEXT: pushl %esi
913914 ; X32-NEXT: pushl $0
914915 ; X32-NEXT: pushl $0
13631364 ; X32-NEXT: pushl $0
13641365 ; X32-NEXT: pushl $0
13651366 ; X32-NEXT: movl %edi, %ebx
1366 ; X32-NEXT: pushl %edi
1367 ; X32-NEXT: pushl %ebx
13671368 ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload
13681369 ; X32-NEXT: pushl %esi
13691370 ; X32-NEXT: pushl $0
24402441 ; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill
24412442 ; X32-NEXT: adcl %edi, %eax
24422443 ; X32-NEXT: movl %eax, %esi
2443 ; X32-NEXT: movl %eax, {{[0-9]+}}(%esp) # 4-byte Spill
2444 ; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill
24442445 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
24452446 ; X32-NEXT: addl %eax, {{[0-9]+}}(%esp) # 4-byte Folded Spill
24462447 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax # 4-byte Reload
42634264 ; X64-NEXT: adcq $0, %rbp
42644265 ; X64-NEXT: addq %rcx, %rbx
42654266 ; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
4267 ; X64-NEXT: movq %rcx, %r11
42664268 ; X64-NEXT: adcq %rdi, %rbp
42674269 ; X64-NEXT: setb %bl
42684270 ; X64-NEXT: movzbl %bl, %ebx
42724274 ; X64-NEXT: mulq %r8
42734275 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
42744276 ; X64-NEXT: movq %rdx, {{[0-9]+}}(%rsp) # 8-byte Spill
4275 ; X64-NEXT: movq %rcx, %r12
4276 ; X64-NEXT: movq %rcx, %r8
4277 ; X64-NEXT: movq %r11, %r12
4278 ; X64-NEXT: movq %r11, %r8
42774279 ; X64-NEXT: addq %rax, %r12
42784280 ; X64-NEXT: movq %rdi, %rax
42794281 ; X64-NEXT: movq %rdi, %r9
4280 ; X64-NEXT: movq %rdi, (%rsp) # 8-byte Spill
4282 ; X64-NEXT: movq %r9, (%rsp) # 8-byte Spill
42814283 ; X64-NEXT: adcq %rdx, %rax
42824284 ; X64-NEXT: addq %rbp, %r12
42834285 ; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
43064308 ; X64-NEXT: adcq %rdx, %rbx
43074309 ; X64-NEXT: movq 16(%rsi), %rax
43084310 ; X64-NEXT: movq %rsi, %r13
4309 ; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
4311 ; X64-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill
43104312 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
43114313 ; X64-NEXT: mulq %r11
43124314 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
43194321 ; X64-NEXT: adcq %rbx, %r11
43204322 ; X64-NEXT: movq %r8, %rax
43214323 ; X64-NEXT: movq %r8, %rbp
4322 ; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
4324 ; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill
43234325 ; X64-NEXT: addq %rdi, %rax
43244326 ; X64-NEXT: movq %r9, %rax
43254327 ; X64-NEXT: adcq %rcx, %rax
43314333 ; X64-NEXT: movq %rdx, %rsi
43324334 ; X64-NEXT: movq %rax, %rbx
43334335 ; X64-NEXT: addq %rdi, %rax
4334 ; X64-NEXT: movq %rdx, %rax
4336 ; X64-NEXT: movq %rdi, %r9
4337 ; X64-NEXT: movq %rsi, %rax
43354338 ; X64-NEXT: adcq %rcx, %rax
43364339 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
43374340 ; X64-NEXT: movq 32(%r13), %rax
43474350 ; X64-NEXT: adcq %rdx, %rax
43484351 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
43494352 ; X64-NEXT: movq %rbp, %rax
4350 ; X64-NEXT: addq %rdi, %rax
4353 ; X64-NEXT: addq %r9, %rax
43514354 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
4352 ; X64-NEXT: movq %rdi, %r9
4353 ; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
4355 ; X64-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill
43544356 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload
43554357 ; X64-NEXT: adcq %r15, %rax
43564358 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
43684370 ; X64-NEXT: addq %rsi, %r11
43694371 ; X64-NEXT: movq %rdx, %rbp
43704372 ; X64-NEXT: adcq $0, %rbp
4371 ; X64-NEXT: addq %rbx, %r11
4373 ; X64-NEXT: addq %rcx, %r11
43724374 ; X64-NEXT: adcq %rsi, %rbp
43734375 ; X64-NEXT: movq %rsi, {{[0-9]+}}(%rsp) # 8-byte Spill
43744376 ; X64-NEXT: setb %bl
43894391 ; X64-NEXT: adcq %rbx, %r10
43904392 ; X64-NEXT: movq %rcx, %rdx
43914393 ; X64-NEXT: movq %rcx, %r12
4392 ; X64-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill
4394 ; X64-NEXT: movq %r12, {{[0-9]+}}(%rsp) # 8-byte Spill
43934395 ; X64-NEXT: addq %r9, %rdx
43944396 ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
43954397 ; X64-NEXT: movq %r11, %r8
4396 ; X64-NEXT: adcq %r11, %r15
4398 ; X64-NEXT: adcq %r8, %r15
43974399 ; X64-NEXT: movq %r15, {{[0-9]+}}(%rsp) # 8-byte Spill
43984400 ; X64-NEXT: adcq %rax, %r14
43994401 ; X64-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill
44894491 ; X64-NEXT: adcq %rdx, %r12
44904492 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
44914493 ; X64-NEXT: movq %rcx, %rax
4492 ; X64-NEXT: mulq %r10
4494 ; X64-NEXT: movq %r10, %rbp
4495 ; X64-NEXT: mulq %rbp
44934496 ; X64-NEXT: movq %rdx, %rsi
44944497 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
44954498 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rdi # 8-byte Reload
44964499 ; X64-NEXT: movq %rdi, %rax
4497 ; X64-NEXT: mulq %r10
4500 ; X64-NEXT: mulq %rbp
44984501 ; X64-NEXT: movq %rdx, %rbp
44994502 ; X64-NEXT: movq %rax, %rbx
45004503 ; X64-NEXT: addq %rsi, %rbx
45214524 ; X64-NEXT: adcq $0, %r15
45224525 ; X64-NEXT: adcq $0, %r12
45234526 ; X64-NEXT: movq %r10, %rbx
4524 ; X64-NEXT: movq %r10, %rax
4527 ; X64-NEXT: movq %rbx, %rax
45254528 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r11 # 8-byte Reload
45264529 ; X64-NEXT: mulq %r11
45274530 ; X64-NEXT: movq %rdx, %rcx
45384541 ; X64-NEXT: movq %rbx, %rax
45394542 ; X64-NEXT: mulq %rcx
45404543 ; X64-NEXT: movq %rcx, %rbx
4541 ; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
4544 ; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
45424545 ; X64-NEXT: movq %rdx, %rcx
45434546 ; X64-NEXT: movq %rax, %r8
45444547 ; X64-NEXT: addq %rbp, %r8
45694572 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
45704573 ; X64-NEXT: movq %rcx, %rax
45714574 ; X64-NEXT: movq %r11, %rsi
4572 ; X64-NEXT: mulq %r11
4575 ; X64-NEXT: mulq %rsi
45734576 ; X64-NEXT: movq %rdx, %r11
45744577 ; X64-NEXT: movq %rax, %r13
45754578 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
46494652 ; X64-NEXT: adcq %rdx, %r10
46504653 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
46514654 ; X64-NEXT: movq %rcx, %rax
4652 ; X64-NEXT: mulq %r11
4655 ; X64-NEXT: movq %r11, %rbp
4656 ; X64-NEXT: mulq %rbp
46534657 ; X64-NEXT: movq %rdx, %rdi
46544658 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
46554659 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
46564660 ; X64-NEXT: movq %rsi, %rax
4657 ; X64-NEXT: mulq %r11
4661 ; X64-NEXT: mulq %rbp
46584662 ; X64-NEXT: movq %rdx, %rbp
46594663 ; X64-NEXT: movq %rax, %rbx
46604664 ; X64-NEXT: addq %rdi, %rbx
47844788 ; X64-NEXT: movq %rdx, %rsi
47854789 ; X64-NEXT: movq %rax, %r14
47864790 ; X64-NEXT: movq %r8, %rbp
4787 ; X64-NEXT: movq %r8, %rax
4791 ; X64-NEXT: movq %rbp, %rax
47884792 ; X64-NEXT: mulq %rcx
47894793 ; X64-NEXT: movq %rcx, %r11
47904794 ; X64-NEXT: movq %rdx, %rbx
48444848 ; X64-NEXT: adcq $0, %r9
48454849 ; X64-NEXT: adcq $0, %r10
48464850 ; X64-NEXT: movq %rbp, %rsi
4847 ; X64-NEXT: movq %rbp, %rax
4851 ; X64-NEXT: movq %rsi, %rax
48484852 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
48494853 ; X64-NEXT: mulq %rcx
48504854 ; X64-NEXT: movq %rdx, %r14
49014905 ; X64-NEXT: adcq $0, %r15
49024906 ; X64-NEXT: movq %rbp, %rax
49034907 ; X64-NEXT: movq %r8, %rdi
4904 ; X64-NEXT: movq %r8, {{[0-9]+}}(%rsp) # 8-byte Spill
4905 ; X64-NEXT: mulq %r8
4908 ; X64-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill
4909 ; X64-NEXT: mulq %rdi
49064910 ; X64-NEXT: movq %rdx, %r9
49074911 ; X64-NEXT: movq %rax, %r8
49084912 ; X64-NEXT: addq %rbx, %r8
49854989 ; X64-NEXT: movq %rcx, %r14
49864990 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
49874991 ; X64-NEXT: movq %rcx, %rax
4988 ; X64-NEXT: mulq %r10
4992 ; X64-NEXT: movq %r10, %rdi
4993 ; X64-NEXT: mulq %rdi
49894994 ; X64-NEXT: movq %rdx, %r11
49904995 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
49914996 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rsi # 8-byte Reload
49924997 ; X64-NEXT: movq %rsi, %rax
4993 ; X64-NEXT: mulq %r10
4998 ; X64-NEXT: mulq %rdi
49944999 ; X64-NEXT: movq %rdx, %rdi
49955000 ; X64-NEXT: movq %rax, %rbx
49965001 ; X64-NEXT: addq %r11, %rbx
50185023 ; X64-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill
50195024 ; X64-NEXT: adcq $0, %r14
50205025 ; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
5021 ; X64-NEXT: movq %r13, %rax
5026 ; X64-NEXT: movq %r13, %rbx
5027 ; X64-NEXT: movq %rbx, %rax
50225028 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload
50235029 ; X64-NEXT: mulq %rcx
50245030 ; X64-NEXT: movq %rdx, %r8
50315037 ; X64-NEXT: movq %rax, %rcx
50325038 ; X64-NEXT: addq %r8, %rcx
50335039 ; X64-NEXT: adcq $0, %rsi
5034 ; X64-NEXT: movq %r13, %rax
5040 ; X64-NEXT: movq %rbx, %rax
50355041 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r13 # 8-byte Reload
50365042 ; X64-NEXT: mulq %r13
50375043 ; X64-NEXT: movq %rdx, %rbx
50655071 ; X64-NEXT: setb -{{[0-9]+}}(%rsp) # 1-byte Folded Spill
50665072 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbx # 8-byte Reload
50675073 ; X64-NEXT: movq %rbx, %rax
5068 ; X64-NEXT: mulq %r10
5074 ; X64-NEXT: movq %r10, %rsi
5075 ; X64-NEXT: mulq %rsi
50695076 ; X64-NEXT: movq %rdx, %rcx
50705077 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
50715078 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r8 # 8-byte Reload
50725079 ; X64-NEXT: movq %r8, %rax
5073 ; X64-NEXT: mulq %r10
5080 ; X64-NEXT: mulq %rsi
50745081 ; X64-NEXT: movq %rdx, %rsi
50755082 ; X64-NEXT: movq %rax, %rdi
50765083 ; X64-NEXT: addq %rcx, %rdi
51465153 ; X64-NEXT: movq %r9, %rax
51475154 ; X64-NEXT: mulq %rcx
51485155 ; X64-NEXT: movq %rcx, %r10
5149 ; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
5156 ; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
51505157 ; X64-NEXT: movq %rdx, %rcx
51515158 ; X64-NEXT: movq %rax, %rdi
51525159 ; X64-NEXT: addq %rsi, %rdi
51585165 ; X64-NEXT: movq %rax, %rbx
51595166 ; X64-NEXT: movq %rdx, %r14
51605167 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload
5161 ; X64-NEXT: addq %rax, %r12
5168 ; X64-NEXT: addq %rbx, %r12
51625169 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %r15 # 8-byte Reload
5163 ; X64-NEXT: adcq %rdx, %r15
5170 ; X64-NEXT: adcq %r14, %r15
51645171 ; X64-NEXT: addq %rdi, %r12
51655172 ; X64-NEXT: adcq %rcx, %r15
51665173 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload
51675174 ; X64-NEXT: movq %rcx, %rax
51685175 ; X64-NEXT: movq %r11, %rsi
5169 ; X64-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill
5170 ; X64-NEXT: mulq %r11
5176 ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
5177 ; X64-NEXT: mulq %rsi
51715178 ; X64-NEXT: movq %rdx, %r11
51725179 ; X64-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill
51735180 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload
52315238 ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
52325239 ; X64-NEXT: movq %rax, %r9
52335240 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rbp # 8-byte Reload
5234 ; X64-NEXT: addq %rax, %rbp
5241 ; X64-NEXT: addq %r9, %rbp
52355242 ; X64-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload
52365243 ; X64-NEXT: adcq %rdx, %rax
52375244 ; X64-NEXT: addq %rsi, %rbp
54095416 ; X64-NEXT: movq 88(%rsi), %rax
54105417 ; X64-NEXT: movq %rsi, %r9
54115418 ; X64-NEXT: movq %rax, %rsi
5412 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
5419 ; X64-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill
54135420 ; X64-NEXT: mulq %rcx
54145421 ; X64-NEXT: movq %rcx, %r11
54155422 ; X64-NEXT: movq %rdx, %rbp
54455452 ; X64-NEXT: adcq %r8, %r10
54465453 ; X64-NEXT: addq %rbx, %rsi
54475454 ; X64-NEXT: adcq %rbp, %r10
5448 ; X64-NEXT: movq 64(%r9), %r13
5455 ; X64-NEXT: movq %r9, %rdi
5456 ; X64-NEXT: movq 64(%rdi), %r13
54495457 ; X64-NEXT: movq %r13, %rax
54505458 ; X64-NEXT: mulq %r11
54515459 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
54525460 ; X64-NEXT: movq %rdx, %rcx
5453 ; X64-NEXT: movq 72(%r9), %r9
5461 ; X64-NEXT: movq 72(%rdi), %r9
54545462 ; X64-NEXT: movq %r9, %rax
54555463 ; X64-NEXT: mulq %r11
54565464 ; X64-NEXT: movq %rdx, %rbp
54785486 ; X64-NEXT: movq %rdx, %r11
54795487 ; X64-NEXT: movq %rax, %r15
54805488 ; X64-NEXT: movq %r12, %rcx
5481 ; X64-NEXT: addq %rax, %rcx
5482 ; X64-NEXT: adcq %rdx, %r8
5489 ; X64-NEXT: addq %r15, %rcx
5490 ; X64-NEXT: adcq %r11, %r8
54835491 ; X64-NEXT: addq %rbp, %rcx
54845492 ; X64-NEXT: adcq %rbx, %r8
54855493 ; X64-NEXT: addq -{{[0-9]+}}(%rsp), %rcx # 8-byte Folded Reload
55315539 ; X64-NEXT: setb %r10b
55325540 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload
55335541 ; X64-NEXT: movq %rsi, %rax
5534 ; X64-NEXT: mulq %r8
5542 ; X64-NEXT: movq %r8, %rdi
5543 ; X64-NEXT: mulq %rdi
55355544 ; X64-NEXT: movq %rdx, %rcx
55365545 ; X64-NEXT: movq %rax, %r9
55375546 ; X64-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload
55385547 ; X64-NEXT: movq %rbp, %rax
5539 ; X64-NEXT: mulq %r8
5540 ; X64-NEXT: movq %r8, %r12
5548 ; X64-NEXT: mulq %rdi
5549 ; X64-NEXT: movq %rdi, %r12
55415550 ; X64-NEXT: movq %rdx, %rdi
55425551 ; X64-NEXT: movq %rax, %rbx
55435552 ; X64-NEXT: addq %rcx, %rbx
55765585 ; X64-NEXT: imulq %rcx, %rdi
55775586 ; X64-NEXT: movq %rcx, %rax
55785587 ; X64-NEXT: movq %r12, %rsi
5579 ; X64-NEXT: mulq %r12
5588 ; X64-NEXT: mulq %rsi
55805589 ; X64-NEXT: movq %rax, %r9
55815590 ; X64-NEXT: addq %rdi, %rdx
55825591 ; X64-NEXT: movq 104(%rbp), %r8
908908 ; X64-NEXT: movq 8(%rsi), %rbp
909909 ; X64-NEXT: movq %r15, %rax
910910 ; X64-NEXT: movq %rdx, %rsi
911 ; X64-NEXT: mulq %rdx
911 ; X64-NEXT: mulq %rsi
912912 ; X64-NEXT: movq %rdx, %r9
913913 ; X64-NEXT: movq %rax, %r8
914914 ; X64-NEXT: movq %r11, %rax
931931 ; X64-NEXT: movq %r11, %rax
932932 ; X64-NEXT: mulq %rbp
933933 ; X64-NEXT: movq %rbp, %r14
934 ; X64-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill
934 ; X64-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill
935935 ; X64-NEXT: movq %rdx, %rsi
936936 ; X64-NEXT: movq %rax, %rbp
937937 ; X64-NEXT: addq %rcx, %rbp
938938 ; X64-NEXT: adcq %rbx, %rsi
939939 ; X64-NEXT: xorl %ecx, %ecx
940940 ; X64-NEXT: movq %r10, %rbx
941 ; X64-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill
942 ; X64-NEXT: movq %r10, %rax
941 ; X64-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill
942 ; X64-NEXT: movq %rbx, %rax
943943 ; X64-NEXT: mulq %rcx
944944 ; X64-NEXT: movq %rdx, %r13
945945 ; X64-NEXT: movq %rax, %r10
946946 ; X64-NEXT: movq %r15, %rax
947947 ; X64-NEXT: mulq %rcx
948948 ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
949 ; X64-NEXT: # kill: %RAX
950 ; X64-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill
951949 ; X64-NEXT: movq %rax, %r15
950 ; X64-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill
952951 ; X64-NEXT: addq %r10, %r15
953952 ; X64-NEXT: adcq %r13, %rdx
954953 ; X64-NEXT: addq %rbp, %r15
987986 ; X64-NEXT: mulq %rdx
988987 ; X64-NEXT: movq %rdx, %r14
989988 ; X64-NEXT: movq %rax, %r11
990 ; X64-NEXT: addq %rax, %r10
991 ; X64-NEXT: adcq %rdx, %r13
989 ; X64-NEXT: addq %r11, %r10
990 ; X64-NEXT: adcq %r14, %r13
992991 ; X64-NEXT: addq %rbp, %r10
993992 ; X64-NEXT: adcq %rsi, %r13
994993 ; X64-NEXT: addq %r8, %r10
1000999 ; X64-NEXT: movq 16(%rsi), %r8
10011000 ; X64-NEXT: movq %rcx, %rax
10021001 ; X64-NEXT: movq %rcx, %r9
1003 ; X64-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill
1002 ; X64-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill
10041003 ; X64-NEXT: mulq %r8
10051004 ; X64-NEXT: movq %rdx, %rdi
10061005 ; X64-NEXT: movq %rax, %r12
10311030 ; X64-NEXT: mulq %rcx
10321031 ; X64-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill
10331032 ; X64-NEXT: movq %rax, %rbp
1034 ; X64-NEXT: addq %rax, %r11
1033 ; X64-NEXT: addq %rbp, %r11
10351034 ; X64-NEXT: adcq %rdx, %r14
10361035 ; X64-NEXT: addq %r9, %r11
10371036 ; X64-NEXT: adcq %rbx, %r14
66 ; X64-NEXT: movq %rdx, %r8
77 ; X64-NEXT: imulq %rdi, %rcx
88 ; X64-NEXT: movq %rdi, %rax
9 ; X64-NEXT: mulq %rdx
9 ; X64-NEXT: mulq %r8
1010 ; X64-NEXT: addq %rcx, %rdx
1111 ; X64-NEXT: imulq %r8, %rsi
1212 ; X64-NEXT: addq %rsi, %rdx
88 ; SSE2-LABEL: mul_v16i8c:
99 ; SSE2: # BB#0: # %entry
1010 ; SSE2-NEXT: movdqa %xmm0, %xmm1
11 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
11 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
1212 ; SSE2-NEXT: psraw $8, %xmm1
1313 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [117,117,117,117,117,117,117,117]
1414 ; SSE2-NEXT: pmullw %xmm2, %xmm1
142142 ; SSE2-LABEL: mul_v16i8:
143143 ; SSE2: # BB#0: # %entry
144144 ; SSE2-NEXT: movdqa %xmm1, %xmm2
145 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
145 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
146146 ; SSE2-NEXT: psraw $8, %xmm2
147147 ; SSE2-NEXT: movdqa %xmm0, %xmm3
148 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
148 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
149149 ; SSE2-NEXT: psraw $8, %xmm3
150150 ; SSE2-NEXT: pmullw %xmm2, %xmm3
151151 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
385385 ; SSE2-LABEL: mul_v32i8c:
386386 ; SSE2: # BB#0: # %entry
387387 ; SSE2-NEXT: movdqa %xmm0, %xmm2
388 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
388 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
389389 ; SSE2-NEXT: psraw $8, %xmm2
390390 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [117,117,117,117,117,117,117,117]
391391 ; SSE2-NEXT: pmullw %xmm3, %xmm2
397397 ; SSE2-NEXT: pand %xmm4, %xmm0
398398 ; SSE2-NEXT: packuswb %xmm2, %xmm0
399399 ; SSE2-NEXT: movdqa %xmm1, %xmm2
400 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
400 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
401401 ; SSE2-NEXT: psraw $8, %xmm2
402402 ; SSE2-NEXT: pmullw %xmm3, %xmm2
403403 ; SSE2-NEXT: pand %xmm4, %xmm2
566566 ; SSE2-LABEL: mul_v32i8:
567567 ; SSE2: # BB#0: # %entry
568568 ; SSE2-NEXT: movdqa %xmm2, %xmm4
569 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm2[8],xmm4[9],xmm2[9],xmm4[10],xmm2[10],xmm4[11],xmm2[11],xmm4[12],xmm2[12],xmm4[13],xmm2[13],xmm4[14],xmm2[14],xmm4[15],xmm2[15]
569 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
570570 ; SSE2-NEXT: psraw $8, %xmm4
571571 ; SSE2-NEXT: movdqa %xmm0, %xmm5
572 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm0[8],xmm5[9],xmm0[9],xmm5[10],xmm0[10],xmm5[11],xmm0[11],xmm5[12],xmm0[12],xmm5[13],xmm0[13],xmm5[14],xmm0[14],xmm5[15],xmm0[15]
572 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
573573 ; SSE2-NEXT: psraw $8, %xmm5
574574 ; SSE2-NEXT: pmullw %xmm4, %xmm5
575575 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
582582 ; SSE2-NEXT: pand %xmm4, %xmm0
583583 ; SSE2-NEXT: packuswb %xmm5, %xmm0
584584 ; SSE2-NEXT: movdqa %xmm3, %xmm2
585 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15]
585 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
586586 ; SSE2-NEXT: psraw $8, %xmm2
587587 ; SSE2-NEXT: movdqa %xmm1, %xmm5
588 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm1[8],xmm5[9],xmm1[9],xmm5[10],xmm1[10],xmm5[11],xmm1[11],xmm5[12],xmm1[12],xmm5[13],xmm1[13],xmm5[14],xmm1[14],xmm5[15],xmm1[15]
588 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
589589 ; SSE2-NEXT: psraw $8, %xmm5
590590 ; SSE2-NEXT: pmullw %xmm2, %xmm5
591591 ; SSE2-NEXT: pand %xmm4, %xmm5
773773 ; SSE2-LABEL: mul_v64i8c:
774774 ; SSE2: # BB#0: # %entry
775775 ; SSE2-NEXT: movdqa %xmm0, %xmm6
776 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm0[8],xmm6[9],xmm0[9],xmm6[10],xmm0[10],xmm6[11],xmm0[11],xmm6[12],xmm0[12],xmm6[13],xmm0[13],xmm6[14],xmm0[14],xmm6[15],xmm0[15]
776 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
777777 ; SSE2-NEXT: psraw $8, %xmm6
778778 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [117,117,117,117,117,117,117,117]
779779 ; SSE2-NEXT: pmullw %xmm4, %xmm6
785785 ; SSE2-NEXT: pand %xmm5, %xmm0
786786 ; SSE2-NEXT: packuswb %xmm6, %xmm0
787787 ; SSE2-NEXT: movdqa %xmm1, %xmm6
788 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm1[8],xmm6[9],xmm1[9],xmm6[10],xmm1[10],xmm6[11],xmm1[11],xmm6[12],xmm1[12],xmm6[13],xmm1[13],xmm6[14],xmm1[14],xmm6[15],xmm1[15]
788 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
789789 ; SSE2-NEXT: psraw $8, %xmm6
790790 ; SSE2-NEXT: pmullw %xmm4, %xmm6
791791 ; SSE2-NEXT: pand %xmm5, %xmm6
795795 ; SSE2-NEXT: pand %xmm5, %xmm1
796796 ; SSE2-NEXT: packuswb %xmm6, %xmm1
797797 ; SSE2-NEXT: movdqa %xmm2, %xmm6
798 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm2[8],xmm6[9],xmm2[9],xmm6[10],xmm2[10],xmm6[11],xmm2[11],xmm6[12],xmm2[12],xmm6[13],xmm2[13],xmm6[14],xmm2[14],xmm6[15],xmm2[15]
798 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
799799 ; SSE2-NEXT: psraw $8, %xmm6
800800 ; SSE2-NEXT: pmullw %xmm4, %xmm6
801801 ; SSE2-NEXT: pand %xmm5, %xmm6
805805 ; SSE2-NEXT: pand %xmm5, %xmm2
806806 ; SSE2-NEXT: packuswb %xmm6, %xmm2
807807 ; SSE2-NEXT: movdqa %xmm3, %xmm6
808 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm3[8],xmm6[9],xmm3[9],xmm6[10],xmm3[10],xmm6[11],xmm3[11],xmm6[12],xmm3[12],xmm6[13],xmm3[13],xmm6[14],xmm3[14],xmm6[15],xmm3[15]
808 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
809809 ; SSE2-NEXT: psraw $8, %xmm6
810810 ; SSE2-NEXT: pmullw %xmm4, %xmm6
811811 ; SSE2-NEXT: pand %xmm5, %xmm6
820820 ; SSE41: # BB#0: # %entry
821821 ; SSE41-NEXT: movdqa %xmm1, %xmm4
822822 ; SSE41-NEXT: movdqa %xmm0, %xmm1
823 ; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
823 ; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
824824 ; SSE41-NEXT: movdqa {{.*#+}} xmm6 = [117,117,117,117,117,117,117,117]
825825 ; SSE41-NEXT: pmullw %xmm6, %xmm0
826826 ; SSE41-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,255,255,255,255,255]
938938 ; SSE2-LABEL: mul_v64i8:
939939 ; SSE2: # BB#0: # %entry
940940 ; SSE2-NEXT: movdqa %xmm4, %xmm8
941 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm4[8],xmm8[9],xmm4[9],xmm8[10],xmm4[10],xmm8[11],xmm4[11],xmm8[12],xmm4[12],xmm8[13],xmm4[13],xmm8[14],xmm4[14],xmm8[15],xmm4[15]
941 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
942942 ; SSE2-NEXT: psraw $8, %xmm8
943943 ; SSE2-NEXT: movdqa %xmm0, %xmm9
944 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm0[8],xmm9[9],xmm0[9],xmm9[10],xmm0[10],xmm9[11],xmm0[11],xmm9[12],xmm0[12],xmm9[13],xmm0[13],xmm9[14],xmm0[14],xmm9[15],xmm0[15]
944 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
945945 ; SSE2-NEXT: psraw $8, %xmm9
946946 ; SSE2-NEXT: pmullw %xmm8, %xmm9
947947 ; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,255,255,255,255,255]
954954 ; SSE2-NEXT: pand %xmm8, %xmm0
955955 ; SSE2-NEXT: packuswb %xmm9, %xmm0
956956 ; SSE2-NEXT: movdqa %xmm5, %xmm9
957 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm5[8],xmm9[9],xmm5[9],xmm9[10],xmm5[10],xmm9[11],xmm5[11],xmm9[12],xmm5[12],xmm9[13],xmm5[13],xmm9[14],xmm5[14],xmm9[15],xmm5[15]
957 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
958958 ; SSE2-NEXT: psraw $8, %xmm9
959959 ; SSE2-NEXT: movdqa %xmm1, %xmm4
960 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15]
960 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
961961 ; SSE2-NEXT: psraw $8, %xmm4
962962 ; SSE2-NEXT: pmullw %xmm9, %xmm4
963963 ; SSE2-NEXT: pand %xmm8, %xmm4
969969 ; SSE2-NEXT: pand %xmm8, %xmm1
970970 ; SSE2-NEXT: packuswb %xmm4, %xmm1
971971 ; SSE2-NEXT: movdqa %xmm6, %xmm4
972 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm6[8],xmm4[9],xmm6[9],xmm4[10],xmm6[10],xmm4[11],xmm6[11],xmm4[12],xmm6[12],xmm4[13],xmm6[13],xmm4[14],xmm6[14],xmm4[15],xmm6[15]
972 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
973973 ; SSE2-NEXT: psraw $8, %xmm4
974974 ; SSE2-NEXT: movdqa %xmm2, %xmm5
975 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm2[8],xmm5[9],xmm2[9],xmm5[10],xmm2[10],xmm5[11],xmm2[11],xmm5[12],xmm2[12],xmm5[13],xmm2[13],xmm5[14],xmm2[14],xmm5[15],xmm2[15]
975 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
976976 ; SSE2-NEXT: psraw $8, %xmm5
977977 ; SSE2-NEXT: pmullw %xmm4, %xmm5
978978 ; SSE2-NEXT: pand %xmm8, %xmm5
984984 ; SSE2-NEXT: pand %xmm8, %xmm2
985985 ; SSE2-NEXT: packuswb %xmm5, %xmm2
986986 ; SSE2-NEXT: movdqa %xmm7, %xmm4
987 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm7[8],xmm4[9],xmm7[9],xmm4[10],xmm7[10],xmm4[11],xmm7[11],xmm4[12],xmm7[12],xmm4[13],xmm7[13],xmm4[14],xmm7[14],xmm4[15],xmm7[15]
987 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
988988 ; SSE2-NEXT: psraw $8, %xmm4
989989 ; SSE2-NEXT: movdqa %xmm3, %xmm5
990 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15]
990 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
991991 ; SSE2-NEXT: psraw $8, %xmm5
992992 ; SSE2-NEXT: pmullw %xmm4, %xmm5
993993 ; SSE2-NEXT: pand %xmm8, %xmm5
10051005 ; SSE41-NEXT: movdqa %xmm1, %xmm8
10061006 ; SSE41-NEXT: movdqa %xmm0, %xmm1
10071007 ; SSE41-NEXT: pmovsxbw %xmm4, %xmm9
1008 ; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
1008 ; SSE41-NEXT: pmovsxbw %xmm1, %xmm0
10091009 ; SSE41-NEXT: pmullw %xmm9, %xmm0
10101010 ; SSE41-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255]
10111011 ; SSE41-NEXT: pand %xmm9, %xmm0
44 ; CHECK-LABEL: pow_wrapper:
55 ; CHECK: # BB#0:
66 ; CHECK-NEXT: movapd %xmm0, %xmm1
7 ; CHECK-NEXT: mulsd %xmm0, %xmm1
7 ; CHECK-NEXT: mulsd %xmm1, %xmm1
88 ; CHECK-NEXT: mulsd %xmm1, %xmm0
99 ; CHECK-NEXT: mulsd %xmm1, %xmm1
1010 ; CHECK-NEXT: mulsd %xmm1, %xmm0
2424 ; SSE-NEXT: cvtps2pd %xmm0, %xmm0
2525 ; SSE-NEXT: movlps %xmm0, -{{[0-9]+}}(%rsp)
2626 ; SSE-NEXT: movaps %xmm2, %xmm1
27 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
27 ; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
2828 ; SSE-NEXT: fldl -{{[0-9]+}}(%rsp)
2929 ; SSE-NEXT: movaps %xmm2, %xmm0
3030 ; SSE-NEXT: retq
4848 ; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm9[0,1],xmm2[3],xmm9[3]
4949 ; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm12[0]
5050 ; CHECK-NEXT: vaddps %xmm3, %xmm2, %xmm2
51 ; CHECK-NEXT: vmovaps %xmm15, {{[0-9]+}}(%rsp) # 16-byte Spill
52 ; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm9
51 ; CHECK-NEXT: vmovaps %xmm15, %xmm1
52 ; CHECK-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
53 ; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm9
5354 ; CHECK-NEXT: vaddps %xmm14, %xmm10, %xmm0
54 ; CHECK-NEXT: vaddps %xmm15, %xmm15, %xmm8
55 ; CHECK-NEXT: vaddps %xmm1, %xmm1, %xmm8
5556 ; CHECK-NEXT: vaddps %xmm11, %xmm3, %xmm3
5657 ; CHECK-NEXT: vaddps %xmm0, %xmm3, %xmm0
57 ; CHECK-NEXT: vaddps %xmm0, %xmm15, %xmm0
58 ; CHECK-NEXT: vaddps %xmm0, %xmm1, %xmm0
5859 ; CHECK-NEXT: vmovaps %xmm8, {{[0-9]+}}(%rsp)
5960 ; CHECK-NEXT: vmovaps %xmm9, (%rsp)
60 ; CHECK-NEXT: vmovaps %xmm15, %xmm1
6161 ; CHECK-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm3 # 16-byte Reload
6262 ; CHECK-NEXT: vzeroupper
6363 ; CHECK-NEXT: callq foo
637637 ; SSE41-LABEL: test14:
638638 ; SSE41: ## BB#0: ## %vector.ph
639639 ; SSE41-NEXT: movdqa %xmm0, %xmm5
640 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
640 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,2,3]
641641 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm8 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
642642 ; SSE41-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm5[0],zero,zero,zero,xmm5[1],zero,zero,zero,xmm5[2],zero,zero,zero,xmm5[3],zero,zero,zero
643643 ; SSE41-NEXT: pshufd {{.*#+}} xmm6 = xmm5[2,3,0,1]
6060
6161 ; CHECK-LABEL: @use_eax_before_prologue@8: # @use_eax_before_prologue
6262 ; CHECK: movl %ecx, %eax
63 ; CHECK: cmpl %edx, %ecx
63 ; CHECK: cmpl %edx, %eax
6464 ; CHECK: jge LBB1_2
6565 ; CHECK: pushl %eax
6666 ; CHECK: movl $4092, %eax
131131 ; SSE: # BB#0:
132132 ; SSE-NEXT: rsqrtss %xmm0, %xmm1
133133 ; SSE-NEXT: movaps %xmm1, %xmm2
134 ; SSE-NEXT: mulss %xmm1, %xmm2
134 ; SSE-NEXT: mulss %xmm2, %xmm2
135135 ; SSE-NEXT: mulss %xmm0, %xmm2
136136 ; SSE-NEXT: addss {{.*}}(%rip), %xmm2
137137 ; SSE-NEXT: mulss {{.*}}(%rip), %xmm1
177177 ; SSE: # BB#0:
178178 ; SSE-NEXT: rsqrtps %xmm0, %xmm1
179179 ; SSE-NEXT: movaps %xmm1, %xmm2
180 ; SSE-NEXT: mulps %xmm1, %xmm2
180 ; SSE-NEXT: mulps %xmm2, %xmm2
181181 ; SSE-NEXT: mulps %xmm0, %xmm2
182182 ; SSE-NEXT: addps {{.*}}(%rip), %xmm2
183183 ; SSE-NEXT: mulps {{.*}}(%rip), %xmm1
227227 ; SSE-NEXT: rsqrtps %xmm0, %xmm3
228228 ; SSE-NEXT: movaps {{.*#+}} xmm4 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01]
229229 ; SSE-NEXT: movaps %xmm3, %xmm2
230 ; SSE-NEXT: mulps %xmm3, %xmm2
230 ; SSE-NEXT: mulps %xmm2, %xmm2
231231 ; SSE-NEXT: mulps %xmm0, %xmm2
232232 ; SSE-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00]
233233 ; SSE-NEXT: addps %xmm0, %xmm2
235235 ; SSE-NEXT: mulps %xmm3, %xmm2
236236 ; SSE-NEXT: rsqrtps %xmm1, %xmm5
237237 ; SSE-NEXT: movaps %xmm5, %xmm3
238 ; SSE-NEXT: mulps %xmm5, %xmm3
238 ; SSE-NEXT: mulps %xmm3, %xmm3
239239 ; SSE-NEXT: mulps %xmm1, %xmm3
240240 ; SSE-NEXT: addps %xmm0, %xmm3
241241 ; SSE-NEXT: mulps %xmm4, %xmm3
1515 ; X32-LABEL: test4:
1616 ; X32: # BB#0: # %entry
1717 ; X32-NEXT: movaps %xmm0, %xmm2
18 ; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3]
18 ; X32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
1919 ; X32-NEXT: addss %xmm1, %xmm0
2020 ; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
2121 ; X32-NEXT: subss %xmm1, %xmm2
2525 ; X64-LABEL: test4:
2626 ; X64: # BB#0: # %entry
2727 ; X64-NEXT: movaps %xmm0, %xmm2
28 ; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm0[2,3]
28 ; X64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,2,3]
2929 ; X64-NEXT: addss %xmm1, %xmm0
3030 ; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
3131 ; X64-NEXT: subss %xmm1, %xmm2
405405 ; SSE-NEXT: movaps %xmm0, %xmm2
406406 ; SSE-NEXT: subss %xmm0, %xmm2
407407 ; SSE-NEXT: movaps %xmm0, %xmm3
408 ; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm0[1],xmm3[1]
408 ; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
409409 ; SSE-NEXT: movaps %xmm1, %xmm4
410 ; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm1[1],xmm4[1]
410 ; SSE-NEXT: movhlps {{.*#+}} xmm4 = xmm4[1,1]
411411 ; SSE-NEXT: subss %xmm4, %xmm3
412412 ; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
413413 ; SSE-NEXT: addss %xmm0, %xmm4
125125 ; CHECK-NEXT: Lcfi11:
126126 ; CHECK-NEXT: .cfi_offset %rbx, -16
127127 ; CHECK-NEXT: movl %edi, %ebx
128 ; CHECK-NEXT: movl %edi, {{[0-9]+}}(%rsp)
128 ; CHECK-NEXT: movl %ebx, {{[0-9]+}}(%rsp)
129129 ; CHECK-NEXT: callq _baz
130130 ; CHECK-NEXT: Ltmp6:
131131 ; CHECK-NEXT: callq _bar
152152 ; CHECK: .byte 1
153153 ; CHECK-NEXT: .byte 0
154154 ; CHECK-NEXT: .short 4
155 ; CHECK-NEXT: .short 5
155 ; CHECK-NEXT: .short 6
156156 ; CHECK-NEXT: .short 0
157157 ; CHECK-NEXT: .long 0
158158 ; CHECK: .byte 1
159159 ; CHECK-NEXT: .byte 0
160160 ; CHECK-NEXT: .short 4
161 ; CHECK-NEXT: .short 4
161 ; CHECK-NEXT: .short 3
162162 ; CHECK-NEXT: .short 0
163163 ; CHECK-NEXT: .long 0
164164 ; CHECK: Ltmp2-_test2
6060 gc "statepoint-example" {
6161 ; CHECK-LABEL: back_to_back_deopt
6262 ; The exact stores don't matter, but there need to be three stack slots created
63 ; CHECK-DAG: movl %edi, 12(%rsp)
64 ; CHECK-DAG: movl %esi, 8(%rsp)
65 ; CHECK-DAG: movl %edx, 4(%rsp)
63 ; CHECK-DAG: movl %ebx, 12(%rsp)
64 ; CHECK-DAG: movl %ebp, 8(%rsp)
65 ; CHECK-DAG: movl %r14d, 4(%rsp)
6666 ; CHECK: callq
6767 ; CHECK-DAG: movl %ebx, 12(%rsp)
6868 ; CHECK-DAG: movl %ebp, 8(%rsp)
10171017 ; SSE-NEXT: cvttss2si %xmm0, %rax
10181018 ; SSE-NEXT: movq %rax, %xmm2
10191019 ; SSE-NEXT: movaps %xmm0, %xmm1
1020 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
1020 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
10211021 ; SSE-NEXT: cvttss2si %xmm1, %rax
10221022 ; SSE-NEXT: movq %rax, %xmm1
10231023 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
10241024 ; SSE-NEXT: movaps %xmm0, %xmm1
1025 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
1025 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
10261026 ; SSE-NEXT: cvttss2si %xmm1, %rax
10271027 ; SSE-NEXT: movq %rax, %xmm3
10281028 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
11251125 ; SSE-NEXT: cvttss2si %xmm0, %rax
11261126 ; SSE-NEXT: movq %rax, %xmm2
11271127 ; SSE-NEXT: movaps %xmm0, %xmm1
1128 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1],xmm0[2,3]
1128 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
11291129 ; SSE-NEXT: cvttss2si %xmm1, %rax
11301130 ; SSE-NEXT: movq %rax, %xmm1
11311131 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
11321132 ; SSE-NEXT: movaps %xmm0, %xmm1
1133 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
1133 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
11341134 ; SSE-NEXT: cvttss2si %xmm1, %rax
11351135 ; SSE-NEXT: movq %rax, %xmm3
11361136 ; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
13151315 ; SSE-LABEL: fptoui_4f32_to_4i32:
13161316 ; SSE: # BB#0:
13171317 ; SSE-NEXT: movaps %xmm0, %xmm1
1318 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
1318 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
13191319 ; SSE-NEXT: cvttss2si %xmm1, %rax
13201320 ; SSE-NEXT: movd %eax, %xmm1
13211321 ; SSE-NEXT: movaps %xmm0, %xmm2
1322 ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
1322 ; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
13231323 ; SSE-NEXT: cvttss2si %xmm2, %rax
13241324 ; SSE-NEXT: movd %eax, %xmm2
13251325 ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
15591559 ; SSE-NEXT: cvttss2si %xmm0, %rax
15601560 ; SSE-NEXT: movd %eax, %xmm0
15611561 ; SSE-NEXT: movaps %xmm2, %xmm3
1562 ; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm2[1],xmm3[1]
1562 ; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
15631563 ; SSE-NEXT: cvttss2si %xmm3, %rax
15641564 ; SSE-NEXT: movd %eax, %xmm3
15651565 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
15711571 ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
15721572 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
15731573 ; SSE-NEXT: movaps %xmm1, %xmm2
1574 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[2,3]
1574 ; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1,2,3]
15751575 ; SSE-NEXT: cvttss2si %xmm2, %rax
15761576 ; SSE-NEXT: movd %eax, %xmm2
15771577 ; SSE-NEXT: movaps %xmm1, %xmm3
1578 ; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1]
1578 ; SSE-NEXT: movhlps {{.*#+}} xmm3 = xmm3[1,1]
15791579 ; SSE-NEXT: cvttss2si %xmm3, %rax
15801580 ; SSE-NEXT: movd %eax, %xmm3
15811581 ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
16861686 ; SSE-NEXT: cmovaeq %rcx, %rdx
16871687 ; SSE-NEXT: movq %rdx, %xmm2
16881688 ; SSE-NEXT: movaps %xmm0, %xmm3
1689 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3]
1689 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3]
16901690 ; SSE-NEXT: movaps %xmm3, %xmm4
16911691 ; SSE-NEXT: subss %xmm1, %xmm4
16921692 ; SSE-NEXT: cvttss2si %xmm4, %rcx
16971697 ; SSE-NEXT: movq %rdx, %xmm3
16981698 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
16991699 ; SSE-NEXT: movaps %xmm0, %xmm3
1700 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
1700 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
17011701 ; SSE-NEXT: movaps %xmm3, %xmm4
17021702 ; SSE-NEXT: subss %xmm1, %xmm4
17031703 ; SSE-NEXT: cvttss2si %xmm4, %rcx
18641864 ; SSE-NEXT: cmovaeq %rcx, %rdx
18651865 ; SSE-NEXT: movq %rdx, %xmm2
18661866 ; SSE-NEXT: movaps %xmm0, %xmm3
1867 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1],xmm0[2,3]
1867 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,2,3]
18681868 ; SSE-NEXT: movaps %xmm3, %xmm4
18691869 ; SSE-NEXT: subss %xmm1, %xmm4
18701870 ; SSE-NEXT: cvttss2si %xmm4, %rcx
18751875 ; SSE-NEXT: movq %rdx, %xmm3
18761876 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
18771877 ; SSE-NEXT: movaps %xmm0, %xmm3
1878 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
1878 ; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
18791879 ; SSE-NEXT: movaps %xmm3, %xmm4
18801880 ; SSE-NEXT: subss %xmm1, %xmm4
18811881 ; SSE-NEXT: cvttss2si %xmm4, %rcx
16101610 ; SSE-LABEL: uitofp_2i64_to_4f32:
16111611 ; SSE: # BB#0:
16121612 ; SSE-NEXT: movdqa %xmm0, %xmm1
1613 ; SSE-NEXT: movq %xmm0, %rax
1613 ; SSE-NEXT: movq %xmm1, %rax
16141614 ; SSE-NEXT: testq %rax, %rax
16151615 ; SSE-NEXT: js .LBB39_1
16161616 ; SSE-NEXT: # BB#2:
18381838 ; SSE-LABEL: uitofp_4i64_to_4f32_undef:
18391839 ; SSE: # BB#0:
18401840 ; SSE-NEXT: movdqa %xmm0, %xmm1
1841 ; SSE-NEXT: movq %xmm0, %rax
1841 ; SSE-NEXT: movq %xmm1, %rax
18421842 ; SSE-NEXT: testq %rax, %rax
18431843 ; SSE-NEXT: js .LBB41_1
18441844 ; SSE-NEXT: # BB#2:
436436 ; SSE42: # BB#0:
437437 ; SSE42-NEXT: movdqa %xmm0, %xmm2
438438 ; SSE42-NEXT: movdqa %xmm1, %xmm3
439 ; SSE42-NEXT: pcmpgtq %xmm0, %xmm3
439 ; SSE42-NEXT: pcmpgtq %xmm2, %xmm3
440440 ; SSE42-NEXT: pcmpeqd %xmm0, %xmm0
441441 ; SSE42-NEXT: pxor %xmm3, %xmm0
442442 ; SSE42-NEXT: blendvpd %xmm0, %xmm2, %xmm1
3434 ; X32: # BB#0: # %entry
3535 ; X32-NEXT: movdqa %xmm0, %xmm2
3636 ; X32-NEXT: psllw $5, %xmm1
37 ; X32-NEXT: movdqa %xmm0, %xmm3
37 ; X32-NEXT: movdqa %xmm2, %xmm3
3838 ; X32-NEXT: psllw $4, %xmm3
3939 ; X32-NEXT: pand {{\.LCPI.*}}, %xmm3
4040 ; X32-NEXT: movdqa %xmm1, %xmm0
4646 ; X32-NEXT: movdqa %xmm1, %xmm0
4747 ; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2
4848 ; X32-NEXT: movdqa %xmm2, %xmm3
49 ; X32-NEXT: paddb %xmm2, %xmm3
49 ; X32-NEXT: paddb %xmm3, %xmm3
5050 ; X32-NEXT: paddb %xmm1, %xmm1
5151 ; X32-NEXT: movdqa %xmm1, %xmm0
5252 ; X32-NEXT: pblendvb %xmm0, %xmm3, %xmm2
5757 ; X64: # BB#0: # %entry
5858 ; X64-NEXT: movdqa %xmm0, %xmm2
5959 ; X64-NEXT: psllw $5, %xmm1
60 ; X64-NEXT: movdqa %xmm0, %xmm3
60 ; X64-NEXT: movdqa %xmm2, %xmm3
6161 ; X64-NEXT: psllw $4, %xmm3
6262 ; X64-NEXT: pand {{.*}}(%rip), %xmm3
6363 ; X64-NEXT: movdqa %xmm1, %xmm0
6969 ; X64-NEXT: movdqa %xmm1, %xmm0
7070 ; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2
7171 ; X64-NEXT: movdqa %xmm2, %xmm3
72 ; X64-NEXT: paddb %xmm2, %xmm3
72 ; X64-NEXT: paddb %xmm3, %xmm3
7373 ; X64-NEXT: paddb %xmm1, %xmm1
7474 ; X64-NEXT: movdqa %xmm1, %xmm0
7575 ; X64-NEXT: pblendvb %xmm0, %xmm3, %xmm2
991991 ; SSE41-NEXT: movdqa %xmm0, %xmm2
992992 ; SSE41-NEXT: psrad $31, %xmm1
993993 ; SSE41-NEXT: pxor %xmm3, %xmm3
994 ; SSE41-NEXT: psubd %xmm0, %xmm3
994 ; SSE41-NEXT: psubd %xmm2, %xmm3
995995 ; SSE41-NEXT: movdqa %xmm1, %xmm0
996996 ; SSE41-NEXT: blendvps %xmm0, %xmm2, %xmm3
997997 ; SSE41-NEXT: movaps %xmm3, %xmm0
175175 ; SSE2-LABEL: test_div7_16i8:
176176 ; SSE2: # BB#0:
177177 ; SSE2-NEXT: movdqa %xmm0, %xmm2
178 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
178 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
179179 ; SSE2-NEXT: psraw $8, %xmm2
180180 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
181181 ; SSE2-NEXT: pmullw %xmm3, %xmm2
182182 ; SSE2-NEXT: psrlw $8, %xmm2
183183 ; SSE2-NEXT: movdqa %xmm0, %xmm1
184 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
184 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
185185 ; SSE2-NEXT: psraw $8, %xmm1
186186 ; SSE2-NEXT: pmullw %xmm3, %xmm1
187187 ; SSE2-NEXT: psrlw $8, %xmm1
481481 ; SSE2-LABEL: test_rem7_16i8:
482482 ; SSE2: # BB#0:
483483 ; SSE2-NEXT: movdqa %xmm0, %xmm2
484 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
484 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
485485 ; SSE2-NEXT: psraw $8, %xmm2
486486 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65427,65427,65427,65427,65427,65427,65427,65427]
487487 ; SSE2-NEXT: pmullw %xmm3, %xmm2
488488 ; SSE2-NEXT: psrlw $8, %xmm2
489489 ; SSE2-NEXT: movdqa %xmm0, %xmm1
490 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
490 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
491491 ; SSE2-NEXT: psraw $8, %xmm1
492492 ; SSE2-NEXT: pmullw %xmm3, %xmm1
493493 ; SSE2-NEXT: psrlw $8, %xmm1
503503 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
504504 ; SSE2-NEXT: paddb %xmm2, %xmm1
505505 ; SSE2-NEXT: movdqa %xmm1, %xmm2
506 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
506 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
507507 ; SSE2-NEXT: psraw $8, %xmm2
508508 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7]
509509 ; SSE2-NEXT: pmullw %xmm3, %xmm2
480480 ; SSE2-NEXT: psrlw $2, %xmm1
481481 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
482482 ; SSE2-NEXT: movdqa %xmm1, %xmm2
483 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
483 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
484484 ; SSE2-NEXT: psraw $8, %xmm2
485485 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [7,7,7,7,7,7,7,7]
486486 ; SSE2-NEXT: pmullw %xmm3, %xmm2
360360 ; SSE41-NEXT: psllw $4, %xmm1
361361 ; SSE41-NEXT: por %xmm0, %xmm1
362362 ; SSE41-NEXT: movdqa %xmm1, %xmm4
363 ; SSE41-NEXT: paddw %xmm1, %xmm4
363 ; SSE41-NEXT: paddw %xmm4, %xmm4
364364 ; SSE41-NEXT: movdqa %xmm3, %xmm6
365365 ; SSE41-NEXT: psllw $8, %xmm6
366366 ; SSE41-NEXT: movdqa %xmm3, %xmm5
385385 ; SSE41-NEXT: psllw $4, %xmm2
386386 ; SSE41-NEXT: por %xmm0, %xmm2
387387 ; SSE41-NEXT: movdqa %xmm2, %xmm1
388 ; SSE41-NEXT: paddw %xmm2, %xmm1
388 ; SSE41-NEXT: paddw %xmm1, %xmm1
389389 ; SSE41-NEXT: movdqa %xmm3, %xmm4
390390 ; SSE41-NEXT: psrlw $8, %xmm4
391391 ; SSE41-NEXT: movdqa %xmm2, %xmm0
630630 ; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8]
631631 ; SSE41-NEXT: psubb %xmm3, %xmm2
632632 ; SSE41-NEXT: psllw $5, %xmm3
633 ; SSE41-NEXT: movdqa %xmm0, %xmm5
633 ; SSE41-NEXT: movdqa %xmm1, %xmm5
634634 ; SSE41-NEXT: psllw $4, %xmm5
635635 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
636 ; SSE41-NEXT: movdqa %xmm0, %xmm4
636 ; SSE41-NEXT: movdqa %xmm1, %xmm4
637637 ; SSE41-NEXT: movdqa %xmm3, %xmm0
638638 ; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
639639 ; SSE41-NEXT: movdqa %xmm4, %xmm5
643643 ; SSE41-NEXT: movdqa %xmm3, %xmm0
644644 ; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
645645 ; SSE41-NEXT: movdqa %xmm4, %xmm5
646 ; SSE41-NEXT: paddb %xmm4, %xmm5
646 ; SSE41-NEXT: paddb %xmm5, %xmm5
647647 ; SSE41-NEXT: paddb %xmm3, %xmm3
648648 ; SSE41-NEXT: movdqa %xmm3, %xmm0
649649 ; SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm4
650650 ; SSE41-NEXT: psllw $5, %xmm2
651651 ; SSE41-NEXT: movdqa %xmm2, %xmm3
652 ; SSE41-NEXT: paddb %xmm2, %xmm3
652 ; SSE41-NEXT: paddb %xmm3, %xmm3
653653 ; SSE41-NEXT: movdqa %xmm1, %xmm5
654654 ; SSE41-NEXT: psrlw $4, %xmm5
655655 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm5
11901190 ; SSE41-LABEL: constant_rotate_v16i8:
11911191 ; SSE41: # BB#0:
11921192 ; SSE41-NEXT: movdqa %xmm0, %xmm1
1193 ; SSE41-NEXT: movdqa %xmm0, %xmm3
1193 ; SSE41-NEXT: movdqa %xmm1, %xmm3
11941194 ; SSE41-NEXT: psllw $4, %xmm3
11951195 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
11961196 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,57600,41152,24704,8256]
12021202 ; SSE41-NEXT: paddb %xmm0, %xmm0
12031203 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
12041204 ; SSE41-NEXT: movdqa %xmm2, %xmm3
1205 ; SSE41-NEXT: paddb %xmm2, %xmm3
1205 ; SSE41-NEXT: paddb %xmm3, %xmm3
12061206 ; SSE41-NEXT: paddb %xmm0, %xmm0
12071207 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
12081208 ; SSE41-NEXT: movdqa %xmm1, %xmm3
242242 ; SSSE3-LABEL: sext_16i8_to_8i32:
243243 ; SSSE3: # BB#0: # %entry
244244 ; SSSE3-NEXT: movdqa %xmm0, %xmm1
245 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
245 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
246246 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
247247 ; SSSE3-NEXT: psrad $24, %xmm0
248248 ; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[u,u,u,4,u,u,u,5,u,u,u,6,u,u,u,7]
311311 ; SSSE3-LABEL: sext_16i8_to_16i32:
312312 ; SSSE3: # BB#0: # %entry
313313 ; SSSE3-NEXT: movdqa %xmm0, %xmm3
314 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
314 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
315315 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
316316 ; SSSE3-NEXT: psrad $24, %xmm0
317317 ; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15]
442442 ; SSSE3-LABEL: sext_16i8_to_4i64:
443443 ; SSSE3: # BB#0: # %entry
444444 ; SSSE3-NEXT: movdqa %xmm0, %xmm1
445 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
445 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
446446 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
447447 ; SSSE3-NEXT: movdqa %xmm0, %xmm2
448448 ; SSSE3-NEXT: psrad $31, %xmm2
498498 ; SSE2-LABEL: sext_16i8_to_8i64:
499499 ; SSE2: # BB#0: # %entry
500500 ; SSE2-NEXT: movdqa %xmm0, %xmm1
501 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
501 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
502502 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
503503 ; SSE2-NEXT: movdqa %xmm0, %xmm2
504504 ; SSE2-NEXT: psrad $31, %xmm2
11111111 ; SSE2-NEXT: movdqa %xmm1, %xmm2
11121112 ; SSE2-NEXT: movdqa %xmm0, %xmm3
11131113 ; SSE2-NEXT: psrad $31, %xmm3
1114 ; SSE2-NEXT: movdqa %xmm1, %xmm4
1114 ; SSE2-NEXT: movdqa %xmm2, %xmm4
11151115 ; SSE2-NEXT: psrad $31, %xmm4
11161116 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
11171117 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
11301130 ; SSSE3-NEXT: movdqa %xmm1, %xmm2
11311131 ; SSSE3-NEXT: movdqa %xmm0, %xmm3
11321132 ; SSSE3-NEXT: psrad $31, %xmm3
1133 ; SSSE3-NEXT: movdqa %xmm1, %xmm4
1133 ; SSSE3-NEXT: movdqa %xmm2, %xmm4
11341134 ; SSSE3-NEXT: psrad $31, %xmm4
11351135 ; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
11361136 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
273273 ; SSE41-NEXT: psllw $4, %xmm1
274274 ; SSE41-NEXT: por %xmm0, %xmm1
275275 ; SSE41-NEXT: movdqa %xmm1, %xmm3
276 ; SSE41-NEXT: paddw %xmm1, %xmm3
276 ; SSE41-NEXT: paddw %xmm3, %xmm3
277277 ; SSE41-NEXT: movdqa %xmm2, %xmm4
278278 ; SSE41-NEXT: psraw $8, %xmm4
279279 ; SSE41-NEXT: movdqa %xmm1, %xmm0
244244 ; SSE41-NEXT: psllw $4, %xmm1
245245 ; SSE41-NEXT: por %xmm0, %xmm1
246246 ; SSE41-NEXT: movdqa %xmm1, %xmm3
247 ; SSE41-NEXT: paddw %xmm1, %xmm3
247 ; SSE41-NEXT: paddw %xmm3, %xmm3
248248 ; SSE41-NEXT: movdqa %xmm2, %xmm4
249249 ; SSE41-NEXT: psrlw $8, %xmm4
250250 ; SSE41-NEXT: movdqa %xmm1, %xmm0
406406 ; SSE41: # BB#0:
407407 ; SSE41-NEXT: movdqa %xmm0, %xmm2
408408 ; SSE41-NEXT: psllw $5, %xmm1
409 ; SSE41-NEXT: movdqa %xmm0, %xmm3
409 ; SSE41-NEXT: movdqa %xmm2, %xmm3
410410 ; SSE41-NEXT: psrlw $4, %xmm3
411411 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
412412 ; SSE41-NEXT: movdqa %xmm1, %xmm0
678678 ; SSE41-NEXT: pshufb %xmm0, %xmm1
679679 ; SSE41-NEXT: psllw $5, %xmm1
680680 ; SSE41-NEXT: movdqa %xmm1, %xmm3
681 ; SSE41-NEXT: paddb %xmm1, %xmm3
681 ; SSE41-NEXT: paddb %xmm3, %xmm3
682682 ; SSE41-NEXT: movdqa %xmm2, %xmm4
683683 ; SSE41-NEXT: psrlw $4, %xmm4
684684 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm4
11001100 ; SSE41-LABEL: constant_shift_v16i8:
11011101 ; SSE41: # BB#0:
11021102 ; SSE41-NEXT: movdqa %xmm0, %xmm1
1103 ; SSE41-NEXT: movdqa %xmm0, %xmm2
1103 ; SSE41-NEXT: movdqa %xmm1, %xmm2
11041104 ; SSE41-NEXT: psrlw $4, %xmm2
11051105 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
11061106 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32]
201201 ; SSE41-NEXT: psllw $4, %xmm1
202202 ; SSE41-NEXT: por %xmm0, %xmm1
203203 ; SSE41-NEXT: movdqa %xmm1, %xmm3
204 ; SSE41-NEXT: paddw %xmm1, %xmm3
204 ; SSE41-NEXT: paddw %xmm3, %xmm3
205205 ; SSE41-NEXT: movdqa %xmm2, %xmm4
206206 ; SSE41-NEXT: psllw $8, %xmm4
207207 ; SSE41-NEXT: movdqa %xmm1, %xmm0
360360 ; SSE41: # BB#0:
361361 ; SSE41-NEXT: movdqa %xmm0, %xmm2
362362 ; SSE41-NEXT: psllw $5, %xmm1
363 ; SSE41-NEXT: movdqa %xmm0, %xmm3
363 ; SSE41-NEXT: movdqa %xmm2, %xmm3
364364 ; SSE41-NEXT: psllw $4, %xmm3
365365 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm3
366366 ; SSE41-NEXT: movdqa %xmm1, %xmm0
372372 ; SSE41-NEXT: movdqa %xmm1, %xmm0
373373 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
374374 ; SSE41-NEXT: movdqa %xmm2, %xmm3
375 ; SSE41-NEXT: paddb %xmm2, %xmm3
375 ; SSE41-NEXT: paddb %xmm3, %xmm3
376376 ; SSE41-NEXT: paddb %xmm1, %xmm1
377377 ; SSE41-NEXT: movdqa %xmm1, %xmm0
378378 ; SSE41-NEXT: pblendvb %xmm0, %xmm3, %xmm2
626626 ; SSE41-NEXT: pshufb %xmm0, %xmm1
627627 ; SSE41-NEXT: psllw $5, %xmm1
628628 ; SSE41-NEXT: movdqa %xmm1, %xmm3
629 ; SSE41-NEXT: paddb %xmm1, %xmm3
629 ; SSE41-NEXT: paddb %xmm3, %xmm3
630630 ; SSE41-NEXT: movdqa %xmm2, %xmm4
631631 ; SSE41-NEXT: psllw $4, %xmm4
632632 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm4
638638 ; SSE41-NEXT: movdqa %xmm3, %xmm0
639639 ; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
640640 ; SSE41-NEXT: movdqa %xmm2, %xmm1
641 ; SSE41-NEXT: paddb %xmm2, %xmm1
641 ; SSE41-NEXT: paddb %xmm1, %xmm1
642642 ; SSE41-NEXT: paddb %xmm3, %xmm3
643643 ; SSE41-NEXT: movdqa %xmm3, %xmm0
644644 ; SSE41-NEXT: pblendvb %xmm0, %xmm1, %xmm2
956956 ; SSE41-LABEL: constant_shift_v16i8:
957957 ; SSE41: # BB#0:
958958 ; SSE41-NEXT: movdqa %xmm0, %xmm1
959 ; SSE41-NEXT: movdqa %xmm0, %xmm2
959 ; SSE41-NEXT: movdqa %xmm1, %xmm2
960960 ; SSE41-NEXT: psllw $4, %xmm2
961961 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
962962 ; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32]
967967 ; SSE41-NEXT: paddb %xmm0, %xmm0
968968 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
969969 ; SSE41-NEXT: movdqa %xmm1, %xmm2
970 ; SSE41-NEXT: paddb %xmm1, %xmm2
970 ; SSE41-NEXT: paddb %xmm2, %xmm2
971971 ; SSE41-NEXT: paddb %xmm0, %xmm0
972972 ; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
973973 ; SSE41-NEXT: movdqa %xmm1, %xmm0
27912791 ; SSE-LABEL: PR22377:
27922792 ; SSE: # BB#0: # %entry
27932793 ; SSE-NEXT: movaps %xmm0, %xmm1
2794 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm0[1,3]
2794 ; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3,1,3]
27952795 ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,0,2]
27962796 ; SSE-NEXT: addps %xmm0, %xmm1
27972797 ; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
49634963 ; SSE-LABEL: mul_add_const_v4i64_v4i32:
49644964 ; SSE: # BB#0:
49654965 ; SSE-NEXT: movdqa %xmm0, %xmm2
4966 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
4966 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
49674967 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,3,3]
49684968 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,1,1,3]
49694969 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
245245 ; SSE2: # BB#0: # %entry
246246 ; SSE2-NEXT: movdqa %xmm0, %xmm3
247247 ; SSE2-NEXT: pxor %xmm4, %xmm4
248 ; SSE2-NEXT: movdqa %xmm0, %xmm1
248 ; SSE2-NEXT: movdqa %xmm3, %xmm1
249249 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
250250 ; SSE2-NEXT: movdqa %xmm1, %xmm0
251251 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
260260 ; SSSE3: # BB#0: # %entry
261261 ; SSSE3-NEXT: movdqa %xmm0, %xmm3
262262 ; SSSE3-NEXT: pxor %xmm4, %xmm4
263 ; SSSE3-NEXT: movdqa %xmm0, %xmm1
263 ; SSSE3-NEXT: movdqa %xmm3, %xmm1
264264 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
265265 ; SSSE3-NEXT: movdqa %xmm1, %xmm0
266266 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
398398 ; SSE2: # BB#0: # %entry
399399 ; SSE2-NEXT: movdqa %xmm0, %xmm1
400400 ; SSE2-NEXT: pxor %xmm4, %xmm4
401 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,3]
401 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,2,3]
402402 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
403403 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
404404 ; SSE2-NEXT: movdqa %xmm1, %xmm0
699699 ; SSE2: # BB#0: # %entry
700700 ; SSE2-NEXT: movdqa %xmm0, %xmm3
701701 ; SSE2-NEXT: pxor %xmm4, %xmm4
702 ; SSE2-NEXT: movdqa %xmm0, %xmm1
702 ; SSE2-NEXT: movdqa %xmm3, %xmm1
703703 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
704704 ; SSE2-NEXT: movdqa %xmm1, %xmm0
705705 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
714714 ; SSSE3: # BB#0: # %entry
715715 ; SSSE3-NEXT: movdqa %xmm0, %xmm3
716716 ; SSSE3-NEXT: pxor %xmm4, %xmm4
717 ; SSSE3-NEXT: movdqa %xmm0, %xmm1
717 ; SSSE3-NEXT: movdqa %xmm3, %xmm1
718718 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
719719 ; SSSE3-NEXT: movdqa %xmm1, %xmm0
720720 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
15811581 ; SSE41: # BB#0: # %entry
15821582 ; SSE41-NEXT: movdqa %xmm0, %xmm1
15831583 ; SSE41-NEXT: pxor %xmm2, %xmm2
1584 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
1584 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
15851585 ; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
15861586 ; SSE41-NEXT: retq
15871587 ;
16291629 ; SSE41: # BB#0: # %entry
16301630 ; SSE41-NEXT: movdqa %xmm0, %xmm1
16311631 ; SSE41-NEXT: pxor %xmm2, %xmm2
1632 ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
1632 ; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
16331633 ; SSE41-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
16341634 ; SSE41-NEXT: retq
16351635 ;
33433343 ; SSE2: # BB#0: # %entry
33443344 ; SSE2-NEXT: movdqa %xmm3, %xmm8
33453345 ; SSE2-NEXT: movdqa %xmm2, %xmm9
3346 ; SSE2-NEXT: movdqa %xmm3, %xmm12
3346 ; SSE2-NEXT: movdqa %xmm8, %xmm12
33473347 ; SSE2-NEXT: pcmpgtb %xmm7, %xmm12
33483348 ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
33493349 ; SSE2-NEXT: movdqa %xmm12, %xmm3
33503350 ; SSE2-NEXT: pxor %xmm13, %xmm3
3351 ; SSE2-NEXT: movdqa %xmm2, %xmm14
3351 ; SSE2-NEXT: movdqa %xmm9, %xmm14
33523352 ; SSE2-NEXT: pcmpgtb %xmm6, %xmm14
33533353 ; SSE2-NEXT: movdqa %xmm14, %xmm2
33543354 ; SSE2-NEXT: pxor %xmm13, %xmm2
34863486 ; SSE2-NEXT: movdqa %xmm2, %xmm9
34873487 ; SSE2-NEXT: movdqa %xmm0, %xmm10
34883488 ; SSE2-NEXT: movdqa %xmm7, %xmm12
3489 ; SSE2-NEXT: pcmpgtb %xmm3, %xmm12
3489 ; SSE2-NEXT: pcmpgtb %xmm8, %xmm12
34903490 ; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
34913491 ; SSE2-NEXT: movdqa %xmm12, %xmm3
34923492 ; SSE2-NEXT: pxor %xmm0, %xmm3
34933493 ; SSE2-NEXT: movdqa %xmm6, %xmm13
3494 ; SSE2-NEXT: pcmpgtb %xmm2, %xmm13
3494 ; SSE2-NEXT: pcmpgtb %xmm9, %xmm13
34953495 ; SSE2-NEXT: movdqa %xmm13, %xmm2
34963496 ; SSE2-NEXT: pxor %xmm0, %xmm2
34973497 ; SSE2-NEXT: movdqa %xmm5, %xmm14
42244224 ; SSE2: # BB#0: # %entry
42254225 ; SSE2-NEXT: movdqa %xmm3, %xmm8
42264226 ; SSE2-NEXT: movdqa %xmm2, %xmm9
4227 ; SSE2-NEXT: movdqa %xmm3, %xmm12
4227 ; SSE2-NEXT: movdqa %xmm8, %xmm12
42284228 ; SSE2-NEXT: pcmpgtd %xmm7, %xmm12
42294229 ; SSE2-NEXT: pcmpeqd %xmm13, %xmm13
42304230 ; SSE2-NEXT: movdqa %xmm12, %xmm3
42314231 ; SSE2-NEXT: pxor %xmm13, %xmm3
4232 ; SSE2-NEXT: movdqa %xmm2, %xmm14
4232 ; SSE2-NEXT: movdqa %xmm9, %xmm14
42334233 ; SSE2-NEXT: pcmpgtd %xmm6, %xmm14
42344234 ; SSE2-NEXT: movdqa %xmm14, %xmm2
42354235 ; SSE2-NEXT: pxor %xmm13, %xmm2
43674367 ; SSE2-NEXT: movdqa %xmm2, %xmm9
43684368 ; SSE2-NEXT: movdqa %xmm0, %xmm10
4369