llvm.org GIT mirror llvm / b36d1a8
NFC: make AtomicOrdering an enum class Summary: In the context of http://wg21.link/lwg2445 C++ uses the concept of 'stronger' ordering but doesn't define it properly. This should be fixed in C++17 barring a small question that's still open. The code currently plays fast and loose with the AtomicOrdering enum. Using an enum class is one step towards tightening things. I later also want to tighten related enums, such as clang's AtomicOrderingKind (which should be shared with LLVM as a 'C++ ABI' enum). This change touches a few lines of code which can be improved later, I'd like to keep it as NFC for now as it's already quite complex. I have related changes for clang. As a follow-up I'll add: bool operator<(AtomicOrdering, AtomicOrdering) = delete; bool operator>(AtomicOrdering, AtomicOrdering) = delete; bool operator<=(AtomicOrdering, AtomicOrdering) = delete; bool operator>=(AtomicOrdering, AtomicOrdering) = delete; This is separate so that clang and LLVM changes don't need to be in sync. Reviewers: jyknight, reames Subscribers: jyknight, llvm-commits Differential Revision: http://reviews.llvm.org/D18775 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@265602 91177308-0d34-0410-b5e6-96231b3b80d8 JF Bastien 3 years ago
37 changed file(s) with 389 addition(s) and 315 deletion(s). Raw diff Collapse all Expand all
366366 that they return true for any operation which is volatile or at least
367367 Monotonic.
368368
369 * ``isAtLeastAcquire()``/``isAtLeastRelease()``: These are predicates on
369 * ``isStrongerThan`` / ``isAtLeastOrStrongerThan``: These are predicates on
370370 orderings. They can be useful for passes that are aware of atomics, for
371371 example to do DSE across a single atomic access, but not across a
372372 release-acquire pair (see MemoryDependencyAnalysis for an example of this)
11261126 AtomicOrdering FailureOrdering,
11271127 SynchronizationScope SynchScope) {
11281128 // This must match encodeMemSDNodeFlags() in SelectionDAG.cpp.
1129 assert((SuccessOrdering & 15) == SuccessOrdering &&
1129 assert((AtomicOrdering)((unsigned)SuccessOrdering & 15) ==
1130 SuccessOrdering &&
11301131 "Ordering may not require more than 4 bits!");
1131 assert((FailureOrdering & 15) == FailureOrdering &&
1132 assert((AtomicOrdering)((unsigned)FailureOrdering & 15) ==
1133 FailureOrdering &&
11321134 "Ordering may not require more than 4 bits!");
11331135 assert((SynchScope & 1) == SynchScope &&
11341136 "SynchScope may not require more than 1 bit!");
1135 SubclassData |= SuccessOrdering << 8;
1137 SubclassData |= (unsigned)SuccessOrdering << 8;
11361138 SubclassData |= SynchScope << 12;
11371139 this->FailureOrdering = FailureOrdering;
11381140 assert(getSuccessOrdering() == SuccessOrdering &&
3535 class DataLayout;
3636 class LLVMContext;
3737
38 enum AtomicOrdering {
38 /// C++ defines ordering as a lattice. LLVM supplements this with NotAtomic and
39 /// Unordered, which are both below the C++ orders. See docs/Atomics.rst for
40 /// details.
41 ///
42 /// not_atomic-->unordered-->relaxed-->release--------------->acq_rel-->seq_cst
43 /// \-->consume-->acquire--/
44 enum class AtomicOrdering {
3945 NotAtomic = 0,
4046 Unordered = 1,
41 Monotonic = 2,
47 Monotonic = 2, // Equivalent to C++'s relaxed.
4248 // Consume = 3, // Not specified yet.
4349 Acquire = 4,
4450 Release = 5,
4652 SequentiallyConsistent = 7
4753 };
4854
55 /// String used by LLVM IR to represent atomic ordering.
56 static inline const char *toIRString(AtomicOrdering ao) {
57 static const char *names[8] = {"not_atomic", "unordered", "monotonic",
58 "consume", "acquire", "release",
59 "acq_rel", "seq_cst"};
60 return names[(size_t)ao];
61 }
62
63 /// Returns true if ao is stronger than other as defined by the AtomicOrdering
64 /// lattice, which is based on C++'s definition.
65 static inline bool isStrongerThan(AtomicOrdering ao, AtomicOrdering other) {
66 static const bool lookup[8][8] = {
67 // NA UN RX CO AC RE AR SC
68 /* NotAtomic */ {0, 0, 0, 0, 0, 0, 0, 0},
69 /* Unordered */ {1, 0, 0, 0, 0, 0, 0, 0},
70 /* relaxed */ {1, 1, 0, 0, 0, 0, 0, 0},
71 /* consume */ {1, 1, 1, 0, 0, 0, 0, 0},
72 /* acquire */ {1, 1, 1, 1, 0, 0, 0, 0},
73 /* release */ {1, 1, 1, 0, 0, 0, 0, 0},
74 /* acq_rel */ {1, 1, 1, 1, 1, 1, 0, 0},
75 /* seq_cst */ {1, 1, 1, 1, 1, 1, 1, 0},
76 };
77 return lookup[(size_t)ao][(size_t)other];
78 }
79
80 static inline bool isAtLeastOrStrongerThan(AtomicOrdering ao,
81 AtomicOrdering other) {
82 static const bool lookup[8][8] = {
83 // NA UN RX CO AC RE AR SC
84 /* NotAtomic */ {1, 0, 0, 0, 0, 0, 0, 0},
85 /* Unordered */ {1, 1, 0, 0, 0, 0, 0, 0},
86 /* relaxed */ {1, 1, 1, 0, 0, 0, 0, 0},
87 /* consume */ {1, 1, 1, 1, 0, 0, 0, 0},
88 /* acquire */ {1, 1, 1, 1, 1, 0, 0, 0},
89 /* release */ {1, 1, 1, 0, 0, 1, 0, 0},
90 /* acq_rel */ {1, 1, 1, 1, 1, 1, 1, 0},
91 /* seq_cst */ {1, 1, 1, 1, 1, 1, 1, 1},
92 };
93 return lookup[(size_t)ao][(size_t)other];
94 }
95
96 static inline bool isStrongerThanUnordered(AtomicOrdering Ord) {
97 return isStrongerThan(Ord, AtomicOrdering::Unordered);
98 }
99
100 static inline bool isStrongerThanMonotonic(AtomicOrdering Ord) {
101 return isStrongerThan(Ord, AtomicOrdering::Monotonic);
102 }
103
104 static inline bool isAcquireOrStronger(AtomicOrdering Ord) {
105 return isAtLeastOrStrongerThan(Ord, AtomicOrdering::Acquire);
106 }
107
108 static inline bool isReleaseOrStronger(AtomicOrdering Ord) {
109 return isAtLeastOrStrongerThan(Ord, AtomicOrdering::Release);
110 }
111
49112 enum SynchronizationScope {
50113 SingleThread = 0,
51114 CrossThread = 1
52115 };
53116
54 /// Returns true if the ordering is at least as strong as acquire
55 /// (i.e. acquire, acq_rel or seq_cst)
56 inline bool isAtLeastAcquire(AtomicOrdering Ord) {
57 return (Ord == Acquire ||
58 Ord == AcquireRelease ||
59 Ord == SequentiallyConsistent);
60 }
61
62 /// Returns true if the ordering is at least as strong as release
63 /// (i.e. release, acq_rel or seq_cst)
64 inline bool isAtLeastRelease(AtomicOrdering Ord) {
65 return (Ord == Release ||
66 Ord == AcquireRelease ||
67 Ord == SequentiallyConsistent);
68 }
69117
70118 //===----------------------------------------------------------------------===//
71119 // AllocaInst Class
268316 /// AcquireRelease.
269317 void setOrdering(AtomicOrdering Ordering) {
270318 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
271 (Ordering << 7));
319 ((unsigned)Ordering << 7));
272320 }
273321
274322 SynchronizationScope getSynchScope() const {
291339
292340 bool isSimple() const { return !isAtomic() && !isVolatile(); }
293341 bool isUnordered() const {
294 return getOrdering() <= Unordered && !isVolatile();
342 return (getOrdering() == AtomicOrdering::NotAtomic ||
343 getOrdering() == AtomicOrdering::Unordered) &&
344 !isVolatile();
295345 }
296346
297347 Value *getPointerOperand() { return getOperand(0); }
389439 /// AcquireRelease.
390440 void setOrdering(AtomicOrdering Ordering) {
391441 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
392 (Ordering << 7));
442 ((unsigned)Ordering << 7));
393443 }
394444
395445 SynchronizationScope getSynchScope() const {
412462
413463 bool isSimple() const { return !isAtomic() && !isVolatile(); }
414464 bool isUnordered() const {
415 return getOrdering() <= Unordered && !isVolatile();
465 return (getOrdering() == AtomicOrdering::NotAtomic ||
466 getOrdering() == AtomicOrdering::Unordered) &&
467 !isVolatile();
416468 }
417469
418470 Value *getValueOperand() { return getOperand(0); }
488540 /// AcquireRelease, or SequentiallyConsistent.
489541 void setOrdering(AtomicOrdering Ordering) {
490542 setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
491 (Ordering << 1));
543 ((unsigned)Ordering << 1));
492544 }
493545
494546 SynchronizationScope getSynchScope() const {
583635
584636 /// Set the ordering constraint on this cmpxchg.
585637 void setSuccessOrdering(AtomicOrdering Ordering) {
586 assert(Ordering != NotAtomic &&
638 assert(Ordering != AtomicOrdering::NotAtomic &&
587639 "CmpXchg instructions can only be atomic.");
588640 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
589 (Ordering << 2));
641 ((unsigned)Ordering << 2));
590642 }
591643
592644 void setFailureOrdering(AtomicOrdering Ordering) {
593 assert(Ordering != NotAtomic &&
645 assert(Ordering != AtomicOrdering::NotAtomic &&
594646 "CmpXchg instructions can only be atomic.");
595647 setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
596 (Ordering << 5));
648 ((unsigned)Ordering << 5));
597649 }
598650
599651 /// Specify whether this cmpxchg is atomic and orders other operations with
645697 static AtomicOrdering
646698 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
647699 switch (SuccessOrdering) {
648 default: llvm_unreachable("invalid cmpxchg success ordering");
649 case Release:
650 case Monotonic:
651 return Monotonic;
652 case AcquireRelease:
653 case Acquire:
654 return Acquire;
655 case SequentiallyConsistent:
656 return SequentiallyConsistent;
700 default:
701 llvm_unreachable("invalid cmpxchg success ordering");
702 case AtomicOrdering::Release:
703 case AtomicOrdering::Monotonic:
704 return AtomicOrdering::Monotonic;
705 case AtomicOrdering::AcquireRelease:
706 case AtomicOrdering::Acquire:
707 return AtomicOrdering::Acquire;
708 case AtomicOrdering::SequentiallyConsistent:
709 return AtomicOrdering::SequentiallyConsistent;
657710 }
658711 }
659712
769822
770823 /// Set the ordering constraint on this RMW.
771824 void setOrdering(AtomicOrdering Ordering) {
772 assert(Ordering != NotAtomic &&
825 assert(Ordering != AtomicOrdering::NotAtomic &&
773826 "atomicrmw instructions can only be atomic.");
774827 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
775 (Ordering << 2));
828 ((unsigned)Ordering << 2));
776829 }
777830
778831 /// Specify whether this RMW orders other operations with respect to all
11071107 virtual Instruction *emitLeadingFence(IRBuilder<> &Builder,
11081108 AtomicOrdering Ord, bool IsStore,
11091109 bool IsLoad) const {
1110 if (isAtLeastRelease(Ord) && IsStore)
1110 if (isReleaseOrStronger(Ord) && IsStore)
11111111 return Builder.CreateFence(Ord);
11121112 else
11131113 return nullptr;
11161116 virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
11171117 AtomicOrdering Ord, bool IsStore,
11181118 bool IsLoad) const {
1119 if (isAtLeastAcquire(Ord))
1119 if (isAcquireOrStronger(Ord))
11201120 return Builder.CreateFence(Ord);
11211121 else
11221122 return nullptr;
388388 ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
389389 const MemoryLocation &Loc) {
390390 // Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
391 if (CX->getSuccessOrdering() > Monotonic)
391 if (isStrongerThanMonotonic(CX->getSuccessOrdering()))
392392 return MRI_ModRef;
393393
394394 // If the cmpxchg address does not alias the location, it does not access it.
401401 ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
402402 const MemoryLocation &Loc) {
403403 // Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
404 if (RMW->getOrdering() > Monotonic)
404 if (isStrongerThanMonotonic(RMW->getOrdering()))
405405 return MRI_ModRef;
406406
407407 // If the atomicrmw address does not alias the location, it does not access it.
299299
300300
301301 bool AliasSetTracker::add(LoadInst *LI) {
302 if (LI->getOrdering() > Monotonic) return addUnknown(LI);
302 if (isStrongerThanMonotonic(LI->getOrdering())) return addUnknown(LI);
303303
304304 AAMDNodes AAInfo;
305305 LI->getAAMetadata(AAInfo);
315315 }
316316
317317 bool AliasSetTracker::add(StoreInst *SI) {
318 if (SI->getOrdering() > Monotonic) return addUnknown(SI);
318 if (isStrongerThanMonotonic(SI->getOrdering())) return addUnknown(SI);
319319
320320 AAMDNodes AAInfo;
321321 SI->getAAMetadata(AAInfo);
9292 Loc = MemoryLocation::get(LI);
9393 return MRI_Ref;
9494 }
95 if (LI->getOrdering() == Monotonic) {
95 if (LI->getOrdering() == AtomicOrdering::Monotonic) {
9696 Loc = MemoryLocation::get(LI);
9797 return MRI_ModRef;
9898 }
105105 Loc = MemoryLocation::get(SI);
106106 return MRI_Mod;
107107 }
108 if (SI->getOrdering() == Monotonic) {
108 if (SI->getOrdering() == AtomicOrdering::Monotonic) {
109109 Loc = MemoryLocation::get(SI);
110110 return MRI_ModRef;
111111 }
517517 // A Monotonic (or higher) load is OK if the query inst is itself not
518518 // atomic.
519519 // FIXME: This is overly conservative.
520 if (LI->isAtomic() && LI->getOrdering() > Unordered) {
520 if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) {
521521 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
522522 isOtherMemAccess(QueryInst))
523523 return MemDepResult::getClobber(LI);
524 if (LI->getOrdering() != Monotonic)
524 if (LI->getOrdering() != AtomicOrdering::Monotonic)
525525 return MemDepResult::getClobber(LI);
526526 }
527527
587587 if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
588588 isOtherMemAccess(QueryInst))
589589 return MemDepResult::getClobber(SI);
590 if (SI->getOrdering() != Monotonic)
590 if (SI->getOrdering() != AtomicOrdering::Monotonic)
591591 return MemDepResult::getClobber(SI);
592592 }
593593
643643 // loads. DSE uses this to find preceeding stores to delete and thus we
644644 // can't bypass the fence if the query instruction is a store.
645645 if (FenceInst *FI = dyn_cast(Inst))
646 if (isLoad && FI->getOrdering() == Release)
646 if (isLoad && FI->getOrdering() == AtomicOrdering::Release)
647647 continue;
648
648
649649 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
650650 ModRefInfo MR = AA.getModRefInfo(Inst, MemLoc);
651651 // If necessary, perform additional analysis.
17071707 MemDep.emplace(AA, AC, TLI, DT);
17081708 return false;
17091709 }
1710
18091809 bool LLParser::ParseOrdering(AtomicOrdering &Ordering) {
18101810 switch (Lex.getKind()) {
18111811 default: return TokError("Expected ordering on atomic instruction");
1812 case lltok::kw_unordered: Ordering = Unordered; break;
1813 case lltok::kw_monotonic: Ordering = Monotonic; break;
1814 case lltok::kw_acquire: Ordering = Acquire; break;
1815 case lltok::kw_release: Ordering = Release; break;
1816 case lltok::kw_acq_rel: Ordering = AcquireRelease; break;
1817 case lltok::kw_seq_cst: Ordering = SequentiallyConsistent; break;
1812 case lltok::kw_unordered: Ordering = AtomicOrdering::Unordered; break;
1813 case lltok::kw_monotonic: Ordering = AtomicOrdering::Monotonic; break;
1814 // Not specified yet:
1815 // case lltok::kw_consume: Ordering = AtomicOrdering::Consume; break;
1816 case lltok::kw_acquire: Ordering = AtomicOrdering::Acquire; break;
1817 case lltok::kw_release: Ordering = AtomicOrdering::Release; break;
1818 case lltok::kw_acq_rel: Ordering = AtomicOrdering::AcquireRelease; break;
1819 case lltok::kw_seq_cst:
1820 Ordering = AtomicOrdering::SequentiallyConsistent;
1821 break;
18181822 }
18191823 Lex.Lex();
18201824 return false;
58835887 unsigned Alignment = 0;
58845888 bool AteExtraComma = false;
58855889 bool isAtomic = false;
5886 AtomicOrdering Ordering = NotAtomic;
5890 AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
58875891 SynchronizationScope Scope = CrossThread;
58885892
58895893 if (Lex.getKind() == lltok::kw_atomic) {
59105914 return Error(Loc, "load operand must be a pointer to a first class type");
59115915 if (isAtomic && !Alignment)
59125916 return Error(Loc, "atomic load must have explicit non-zero alignment");
5913 if (Ordering == Release || Ordering == AcquireRelease)
5917 if (Ordering == AtomicOrdering::Release ||
5918 Ordering == AtomicOrdering::AcquireRelease)
59145919 return Error(Loc, "atomic load cannot use Release ordering");
59155920
59165921 if (Ty != cast(Val->getType())->getElementType())
59315936 unsigned Alignment = 0;
59325937 bool AteExtraComma = false;
59335938 bool isAtomic = false;
5934 AtomicOrdering Ordering = NotAtomic;
5939 AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
59355940 SynchronizationScope Scope = CrossThread;
59365941
59375942 if (Lex.getKind() == lltok::kw_atomic) {
59605965 return Error(Loc, "stored value and pointer type do not match");
59615966 if (isAtomic && !Alignment)
59625967 return Error(Loc, "atomic store must have explicit non-zero alignment");
5963 if (Ordering == Acquire || Ordering == AcquireRelease)
5968 if (Ordering == AtomicOrdering::Acquire ||
5969 Ordering == AtomicOrdering::AcquireRelease)
59645970 return Error(Loc, "atomic store cannot use Acquire ordering");
59655971
59665972 Inst = new StoreInst(Val, Ptr, isVolatile, Alignment, Ordering, Scope);
59735979 int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
59745980 Value *Ptr, *Cmp, *New; LocTy PtrLoc, CmpLoc, NewLoc;
59755981 bool AteExtraComma = false;
5976 AtomicOrdering SuccessOrdering = NotAtomic;
5977 AtomicOrdering FailureOrdering = NotAtomic;
5982 AtomicOrdering SuccessOrdering = AtomicOrdering::NotAtomic;
5983 AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic;
59785984 SynchronizationScope Scope = CrossThread;
59795985 bool isVolatile = false;
59805986 bool isWeak = false;
59946000 ParseOrdering(FailureOrdering))
59956001 return true;
59966002
5997 if (SuccessOrdering == Unordered || FailureOrdering == Unordered)
6003 if (SuccessOrdering == AtomicOrdering::Unordered ||
6004 FailureOrdering == AtomicOrdering::Unordered)
59986005 return TokError("cmpxchg cannot be unordered");
5999 if (SuccessOrdering < FailureOrdering)
6000 return TokError("cmpxchg must be at least as ordered on success as failure");
6001 if (FailureOrdering == Release || FailureOrdering == AcquireRelease)
6002 return TokError("cmpxchg failure ordering cannot include release semantics");
6006 if (isStrongerThan(FailureOrdering, SuccessOrdering))
6007 return TokError("cmpxchg failure argument shall be no stronger than the "
6008 "success argument");
6009 if (FailureOrdering == AtomicOrdering::Release ||
6010 FailureOrdering == AtomicOrdering::AcquireRelease)
6011 return TokError(
6012 "cmpxchg failure ordering cannot include release semantics");
60036013 if (!Ptr->getType()->isPointerTy())
60046014 return Error(PtrLoc, "cmpxchg operand must be a pointer");
60056015 if (cast(Ptr->getType())->getElementType() != Cmp->getType())
60226032 int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
60236033 Value *Ptr, *Val; LocTy PtrLoc, ValLoc;
60246034 bool AteExtraComma = false;
6025 AtomicOrdering Ordering = NotAtomic;
6035 AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
60266036 SynchronizationScope Scope = CrossThread;
60276037 bool isVolatile = false;
60286038 AtomicRMWInst::BinOp Operation;
60526062 ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
60536063 return true;
60546064
6055 if (Ordering == Unordered)
6065 if (Ordering == AtomicOrdering::Unordered)
60566066 return TokError("atomicrmw cannot be unordered");
60576067 if (!Ptr->getType()->isPointerTy())
60586068 return Error(PtrLoc, "atomicrmw operand must be a pointer");
60756085 /// ParseFence
60766086 /// ::= 'fence' 'singlethread'? AtomicOrdering
60776087 int LLParser::ParseFence(Instruction *&Inst, PerFunctionState &PFS) {
6078 AtomicOrdering Ordering = NotAtomic;
6088 AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
60796089 SynchronizationScope Scope = CrossThread;
60806090 if (ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
60816091 return true;
60826092
6083 if (Ordering == Unordered)
6093 if (Ordering == AtomicOrdering::Unordered)
60846094 return TokError("fence cannot be unordered");
6085 if (Ordering == Monotonic)
6095 if (Ordering == AtomicOrdering::Monotonic)
60866096 return TokError("fence cannot be monotonic");
60876097
60886098 Inst = new FenceInst(Context, Ordering, Scope);
807807
808808 static AtomicOrdering getDecodedOrdering(unsigned Val) {
809809 switch (Val) {
810 case bitc::ORDERING_NOTATOMIC: return NotAtomic;
811 case bitc::ORDERING_UNORDERED: return Unordered;
812 case bitc::ORDERING_MONOTONIC: return Monotonic;
813 case bitc::ORDERING_ACQUIRE: return Acquire;
814 case bitc::ORDERING_RELEASE: return Release;
815 case bitc::ORDERING_ACQREL: return AcquireRelease;
810 case bitc::ORDERING_NOTATOMIC: return AtomicOrdering::NotAtomic;
811 case bitc::ORDERING_UNORDERED: return AtomicOrdering::Unordered;
812 case bitc::ORDERING_MONOTONIC: return AtomicOrdering::Monotonic;
813 case bitc::ORDERING_ACQUIRE: return AtomicOrdering::Acquire;
814 case bitc::ORDERING_RELEASE: return AtomicOrdering::Release;
815 case bitc::ORDERING_ACQREL: return AtomicOrdering::AcquireRelease;
816816 default: // Map unknown orderings to sequentially-consistent.
817 case bitc::ORDERING_SEQCST: return SequentiallyConsistent;
817 case bitc::ORDERING_SEQCST: return AtomicOrdering::SequentiallyConsistent;
818818 }
819819 }
820820
49354935 Ty = cast(Op->getType())->getElementType();
49364936
49374937 AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
4938 if (Ordering == NotAtomic || Ordering == Release ||
4939 Ordering == AcquireRelease)
4940 return error("Invalid record");
4941 if (Ordering != NotAtomic && Record[OpNum] == 0)
4938 if (Ordering == AtomicOrdering::NotAtomic ||
4939 Ordering == AtomicOrdering::Release ||
4940 Ordering == AtomicOrdering::AcquireRelease)
4941 return error("Invalid record");
4942 if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0)
49424943 return error("Invalid record");
49434944 SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
49444945
49914992 typeCheckLoadStoreInst(Val->getType(), Ptr->getType()))
49924993 return EC;
49934994 AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
4994 if (Ordering == NotAtomic || Ordering == Acquire ||
4995 Ordering == AcquireRelease)
4995 if (Ordering == AtomicOrdering::NotAtomic ||
4996 Ordering == AtomicOrdering::Acquire ||
4997 Ordering == AtomicOrdering::AcquireRelease)
49964998 return error("Invalid record");
49974999 SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
4998 if (Ordering != NotAtomic && Record[OpNum] == 0)
5000 if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0)
49995001 return error("Invalid record");
50005002
50015003 unsigned Align;
50215023 Record.size() < OpNum + 3 || Record.size() > OpNum + 5)
50225024 return error("Invalid record");
50235025 AtomicOrdering SuccessOrdering = getDecodedOrdering(Record[OpNum + 1]);
5024 if (SuccessOrdering == NotAtomic || SuccessOrdering == Unordered)
5026 if (SuccessOrdering == AtomicOrdering::NotAtomic ||
5027 SuccessOrdering == AtomicOrdering::Unordered)
50255028 return error("Invalid record");
50265029 SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 2]);
50275030
50665069 Operation > AtomicRMWInst::LAST_BINOP)
50675070 return error("Invalid record");
50685071 AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
5069 if (Ordering == NotAtomic || Ordering == Unordered)
5072 if (Ordering == AtomicOrdering::NotAtomic ||
5073 Ordering == AtomicOrdering::Unordered)
50705074 return error("Invalid record");
50715075 SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
50725076 I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope);
50785082 if (2 != Record.size())
50795083 return error("Invalid record");
50805084 AtomicOrdering Ordering = getDecodedOrdering(Record[0]);
5081 if (Ordering == NotAtomic || Ordering == Unordered ||
5082 Ordering == Monotonic)
5085 if (Ordering == AtomicOrdering::NotAtomic ||
5086 Ordering == AtomicOrdering::Unordered ||
5087 Ordering == AtomicOrdering::Monotonic)
50835088 return error("Invalid record");
50845089 SynchronizationScope SynchScope = getDecodedSynchScope(Record[1]);
50855090 I = new FenceInst(Context, Ordering, SynchScope);
132132
133133 static unsigned GetEncodedOrdering(AtomicOrdering Ordering) {
134134 switch (Ordering) {
135 case NotAtomic: return bitc::ORDERING_NOTATOMIC;
136 case Unordered: return bitc::ORDERING_UNORDERED;
137 case Monotonic: return bitc::ORDERING_MONOTONIC;
138 case Acquire: return bitc::ORDERING_ACQUIRE;
139 case Release: return bitc::ORDERING_RELEASE;
140 case AcquireRelease: return bitc::ORDERING_ACQREL;
141 case SequentiallyConsistent: return bitc::ORDERING_SEQCST;
135 case AtomicOrdering::NotAtomic: return bitc::ORDERING_NOTATOMIC;
136 case AtomicOrdering::Unordered: return bitc::ORDERING_UNORDERED;
137 case AtomicOrdering::Monotonic: return bitc::ORDERING_MONOTONIC;
138 case AtomicOrdering::Acquire: return bitc::ORDERING_ACQUIRE;
139 case AtomicOrdering::Release: return bitc::ORDERING_RELEASE;
140 case AtomicOrdering::AcquireRelease: return bitc::ORDERING_ACQREL;
141 case AtomicOrdering::SequentiallyConsistent: return bitc::ORDERING_SEQCST;
142142 }
143143 llvm_unreachable("Invalid ordering");
144144 }
100100 assert((LI || SI || RMWI || CASI) && "Unknown atomic instruction");
101101
102102 if (TLI->shouldInsertFencesForAtomic(I)) {
103 auto FenceOrdering = Monotonic;
103 auto FenceOrdering = AtomicOrdering::Monotonic;
104104 bool IsStore, IsLoad;
105 if (LI && isAtLeastAcquire(LI->getOrdering())) {
105 if (LI && isAcquireOrStronger(LI->getOrdering())) {
106106 FenceOrdering = LI->getOrdering();
107 LI->setOrdering(Monotonic);
107 LI->setOrdering(AtomicOrdering::Monotonic);
108108 IsStore = false;
109109 IsLoad = true;
110 } else if (SI && isAtLeastRelease(SI->getOrdering())) {
110 } else if (SI && isReleaseOrStronger(SI->getOrdering())) {
111111 FenceOrdering = SI->getOrdering();
112 SI->setOrdering(Monotonic);
112 SI->setOrdering(AtomicOrdering::Monotonic);
113113 IsStore = true;
114114 IsLoad = false;
115 } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) ||
116 isAtLeastAcquire(RMWI->getOrdering()))) {
115 } else if (RMWI && (isReleaseOrStronger(RMWI->getOrdering()) ||
116 isAcquireOrStronger(RMWI->getOrdering()))) {
117117 FenceOrdering = RMWI->getOrdering();
118 RMWI->setOrdering(Monotonic);
118 RMWI->setOrdering(AtomicOrdering::Monotonic);
119119 IsStore = IsLoad = true;
120120 } else if (CASI && !TLI->shouldExpandAtomicCmpXchgInIR(CASI) &&
121 (isAtLeastRelease(CASI->getSuccessOrdering()) ||
122 isAtLeastAcquire(CASI->getSuccessOrdering()))) {
121 (isReleaseOrStronger(CASI->getSuccessOrdering()) ||
122 isAcquireOrStronger(CASI->getSuccessOrdering()))) {
123123 // If a compare and swap is lowered to LL/SC, we can do smarter fence
124124 // insertion, with a stronger one on the success path than on the
125125 // failure path. As a result, fence insertion is directly done by
126126 // expandAtomicCmpXchg in that case.
127127 FenceOrdering = CASI->getSuccessOrdering();
128 CASI->setSuccessOrdering(Monotonic);
129 CASI->setFailureOrdering(Monotonic);
128 CASI->setSuccessOrdering(AtomicOrdering::Monotonic);
129 CASI->setFailureOrdering(AtomicOrdering::Monotonic);
130130 IsStore = IsLoad = true;
131131 }
132132
133 if (FenceOrdering != Monotonic) {
133 if (FenceOrdering != AtomicOrdering::Monotonic) {
134134 MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad);
135135 }
136136 }
519519 // should preserve the ordering.
520520 bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI);
521521 AtomicOrdering MemOpOrder =
522 ShouldInsertFencesForAtomic ? Monotonic : SuccessOrder;
522 ShouldInsertFencesForAtomic ? AtomicOrdering::Monotonic : SuccessOrder;
523523
524524 // In implementations which use a barrier to achieve release semantics, we can
525525 // delay emitting this barrier until we know a store is actually going to be
531531 // minimal loop. Unfortunately, this puts too much stress on later
532532 // optimisations so we avoid emitting the extra logic in those cases too.
533533 bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic &&
534 SuccessOrder != Monotonic &&
535 SuccessOrder != Acquire && !F->optForMinSize();
534 SuccessOrder != AtomicOrdering::Monotonic &&
535 SuccessOrder != AtomicOrdering::Acquire &&
536 !F->optForMinSize();
536537
537538 // There's no overhead for sinking the release barrier in a weak cmpxchg, so
538539 // do it even on minsize.
766767 CreateCmpXchgInstFun CreateCmpXchg) {
767768 assert(AI);
768769
769 AtomicOrdering MemOpOrder =
770 AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering();
770 AtomicOrdering MemOpOrder = AI->getOrdering() == AtomicOrdering::Unordered
771 ? AtomicOrdering::Monotonic
772 : AI->getOrdering();
771773 Value *Addr = AI->getPointerOperand();
772774 BasicBlock *BB = AI->getParent();
773775 Function *F = BB->getParent();
39023902 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39033903 SDValue Ops[3];
39043904 Ops[0] = getRoot();
3905 Ops[1] = DAG.getConstant(I.getOrdering(), dl,
3905 Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl,
39063906 TLI.getPointerTy(DAG.getDataLayout()));
39073907 Ops[2] = DAG.getConstant(I.getSynchScope(), dl,
39083908 TLI.getPointerTy(DAG.getDataLayout()));
21092109
21102110 void AssemblyWriter::writeAtomic(AtomicOrdering Ordering,
21112111 SynchronizationScope SynchScope) {
2112 if (Ordering == NotAtomic)
2112 if (Ordering == AtomicOrdering::NotAtomic)
21132113 return;
21142114
21152115 switch (SynchScope) {
21172117 case CrossThread: break;
21182118 }
21192119
2120 switch (Ordering) {
2121 default: Out << " "; break;
2122 case Unordered: Out << " unordered"; break;
2123 case Monotonic: Out << " monotonic"; break;
2124 case Acquire: Out << " acquire"; break;
2125 case Release: Out << " release"; break;
2126 case AcquireRelease: Out << " acq_rel"; break;
2127 case SequentiallyConsistent: Out << " seq_cst"; break;
2128 }
2120 Out << " " << toIRString(Ordering);
21292121 }
21302122
21312123 void AssemblyWriter::writeAtomicCmpXchg(AtomicOrdering SuccessOrdering,
21322124 AtomicOrdering FailureOrdering,
21332125 SynchronizationScope SynchScope) {
2134 assert(SuccessOrdering != NotAtomic && FailureOrdering != NotAtomic);
2126 assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
2127 FailureOrdering != AtomicOrdering::NotAtomic);
21352128
21362129 switch (SynchScope) {
21372130 case SingleThread: Out << " singlethread"; break;
21382131 case CrossThread: break;
21392132 }
21402133
2141 switch (SuccessOrdering) {
2142 default: Out << " "; break;
2143 case Unordered: Out << " unordered"; break;
2144 case Monotonic: Out << " monotonic"; break;
2145 case Acquire: Out << " acquire"; break;
2146 case Release: Out << " release"; break;
2147 case AcquireRelease: Out << " acq_rel"; break;
2148 case SequentiallyConsistent: Out << " seq_cst"; break;
2149 }
2150
2151 switch (FailureOrdering) {
2152 default: Out << " "; break;
2153 case Unordered: Out << " unordered"; break;
2154 case Monotonic: Out << " monotonic"; break;
2155 case Acquire: Out << " acquire"; break;
2156 case Release: Out << " release"; break;
2157 case AcquireRelease: Out << " acq_rel"; break;
2158 case SequentiallyConsistent: Out << " seq_cst"; break;
2159 }
2134 Out << " " << toIRString(SuccessOrdering);
2135 Out << " " << toIRString(FailureOrdering);
21602136 }
21612137
21622138 void AssemblyWriter::writeParamOperand(const Value *Operand,
26012601
26022602 static AtomicOrdering mapFromLLVMOrdering(LLVMAtomicOrdering Ordering) {
26032603 switch (Ordering) {
2604 case LLVMAtomicOrderingNotAtomic: return NotAtomic;
2605 case LLVMAtomicOrderingUnordered: return Unordered;
2606 case LLVMAtomicOrderingMonotonic: return Monotonic;
2607 case LLVMAtomicOrderingAcquire: return Acquire;
2608 case LLVMAtomicOrderingRelease: return Release;
2609 case LLVMAtomicOrderingAcquireRelease: return AcquireRelease;
2604 case LLVMAtomicOrderingNotAtomic: return AtomicOrdering::NotAtomic;
2605 case LLVMAtomicOrderingUnordered: return AtomicOrdering::Unordered;
2606 case LLVMAtomicOrderingMonotonic: return AtomicOrdering::Monotonic;
2607 case LLVMAtomicOrderingAcquire: return AtomicOrdering::Acquire;
2608 case LLVMAtomicOrderingRelease: return AtomicOrdering::Release;
2609 case LLVMAtomicOrderingAcquireRelease:
2610 return AtomicOrdering::AcquireRelease;
26102611 case LLVMAtomicOrderingSequentiallyConsistent:
2611 return SequentiallyConsistent;
2612 return AtomicOrdering::SequentiallyConsistent;
26122613 }
26132614
26142615 llvm_unreachable("Invalid LLVMAtomicOrdering value!");
26162617
26172618 static LLVMAtomicOrdering mapToLLVMOrdering(AtomicOrdering Ordering) {
26182619 switch (Ordering) {
2619 case NotAtomic: return LLVMAtomicOrderingNotAtomic;
2620 case Unordered: return LLVMAtomicOrderingUnordered;
2621 case Monotonic: return LLVMAtomicOrderingMonotonic;
2622 case Acquire: return LLVMAtomicOrderingAcquire;
2623 case Release: return LLVMAtomicOrderingRelease;
2624 case AcquireRelease: return LLVMAtomicOrderingAcquireRelease;
2625 case SequentiallyConsistent:
2620 case AtomicOrdering::NotAtomic: return LLVMAtomicOrderingNotAtomic;
2621 case AtomicOrdering::Unordered: return LLVMAtomicOrderingUnordered;
2622 case AtomicOrdering::Monotonic: return LLVMAtomicOrderingMonotonic;
2623 case AtomicOrdering::Acquire: return LLVMAtomicOrderingAcquire;
2624 case AtomicOrdering::Release: return LLVMAtomicOrderingRelease;
2625 case AtomicOrdering::AcquireRelease:
2626 return LLVMAtomicOrderingAcquireRelease;
2627 case AtomicOrdering::SequentiallyConsistent:
26262628 return LLVMAtomicOrderingSequentiallyConsistent;
26272629 }
26282630
460460 case Instruction::Fence:
461461 return true;
462462 case Instruction::Load:
463 return cast(this)->getOrdering() != NotAtomic;
463 return cast(this)->getOrdering() != AtomicOrdering::NotAtomic;
464464 case Instruction::Store:
465 return cast(this)->getOrdering() != NotAtomic;
465 return cast(this)->getOrdering() != AtomicOrdering::NotAtomic;
466466 }
467467 }
468468
12081208
12091209 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
12101210 unsigned Align, Instruction *InsertBef)
1211 : LoadInst(Ty, Ptr, Name, isVolatile, Align, NotAtomic, CrossThread,
1212 InsertBef) {}
1211 : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1212 CrossThread, InsertBef) {}
12131213
12141214 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
12151215 unsigned Align, BasicBlock *InsertAE)
1216 : LoadInst(Ptr, Name, isVolatile, Align, NotAtomic, CrossThread, InsertAE) {
1217 }
1216 : LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic,
1217 CrossThread, InsertAE) {}
12181218
12191219 LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile,
12201220 unsigned Align, AtomicOrdering Order,
12461246 Load, Ptr, InsertBef) {
12471247 setVolatile(false);
12481248 setAlignment(0);
1249 setAtomic(NotAtomic);
1249 setAtomic(AtomicOrdering::NotAtomic);
12501250 AssertOK();
12511251 if (Name && Name[0]) setName(Name);
12521252 }
12561256 Load, Ptr, InsertAE) {
12571257 setVolatile(false);
12581258 setAlignment(0);
1259 setAtomic(NotAtomic);
1259 setAtomic(AtomicOrdering::NotAtomic);
12601260 AssertOK();
12611261 if (Name && Name[0]) setName(Name);
12621262 }
12671267 assert(Ty == cast(Ptr->getType())->getElementType());
12681268 setVolatile(isVolatile);
12691269 setAlignment(0);
1270 setAtomic(NotAtomic);
1270 setAtomic(AtomicOrdering::NotAtomic);
12711271 AssertOK();
12721272 if (Name && Name[0]) setName(Name);
12731273 }
12781278 Load, Ptr, InsertAE) {
12791279 setVolatile(isVolatile);
12801280 setAlignment(0);
1281 setAtomic(NotAtomic);
1281 setAtomic(AtomicOrdering::NotAtomic);
12821282 AssertOK();
12831283 if (Name && Name[0]) setName(Name);
12841284 }
13231323
13241324 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
13251325 Instruction *InsertBefore)
1326 : StoreInst(val, addr, isVolatile, Align, NotAtomic, CrossThread,
1327 InsertBefore) {}
1326 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1327 CrossThread, InsertBefore) {}
13281328
13291329 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align,
13301330 BasicBlock *InsertAtEnd)
1331 : StoreInst(val, addr, isVolatile, Align, NotAtomic, CrossThread,
1332 InsertAtEnd) {}
1331 : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic,
1332 CrossThread, InsertAtEnd) {}
13331333
13341334 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
13351335 unsigned Align, AtomicOrdering Order,
13971397 assert(getOperand(2)->getType() ==
13981398 cast(getOperand(0)->getType())->getElementType()
13991399 && "Ptr must be a pointer to NewVal type!");
1400 assert(SuccessOrdering != NotAtomic &&
1400 assert(SuccessOrdering != AtomicOrdering::NotAtomic &&
14011401 "AtomicCmpXchg instructions must be atomic!");
1402 assert(FailureOrdering != NotAtomic &&
1402 assert(FailureOrdering != AtomicOrdering::NotAtomic &&
14031403 "AtomicCmpXchg instructions must be atomic!");
1404 assert(SuccessOrdering >= FailureOrdering &&
1405 "AtomicCmpXchg success ordering must be at least as strong as fail");
1406 assert(FailureOrdering != Release && FailureOrdering != AcquireRelease &&
1404 assert(!isStrongerThan(FailureOrdering, SuccessOrdering) &&
1405 "AtomicCmpXchg failure argument shall be no stronger than the success "
1406 "argument");
1407 assert(FailureOrdering != AtomicOrdering::Release &&
1408 FailureOrdering != AtomicOrdering::AcquireRelease &&
14071409 "AtomicCmpXchg failure ordering cannot include release semantics");
14081410 }
14091411
14531455 assert(getOperand(1)->getType() ==
14541456 cast(getOperand(0)->getType())->getElementType()
14551457 && "Ptr must be a pointer to Val type!");
1456 assert(Ordering != NotAtomic &&
1458 assert(Ordering != AtomicOrdering::NotAtomic &&
14571459 "AtomicRMW instructions must be atomic!");
14581460 }
14591461
29182918 Assert(LI.getAlignment() <= Value::MaximumAlignment,
29192919 "huge alignment values are unsupported", &LI);
29202920 if (LI.isAtomic()) {
2921 Assert(LI.getOrdering() != Release && LI.getOrdering() != AcquireRelease,
2921 Assert(LI.getOrdering() != AtomicOrdering::Release &&
2922 LI.getOrdering() != AtomicOrdering::AcquireRelease,
29222923 "Load cannot have Release ordering", &LI);
29232924 Assert(LI.getAlignment() != 0,
29242925 "Atomic load must specify explicit alignment", &LI);
29452946 Assert(SI.getAlignment() <= Value::MaximumAlignment,
29462947 "huge alignment values are unsupported", &SI);
29472948 if (SI.isAtomic()) {
2948 Assert(SI.getOrdering() != Acquire && SI.getOrdering() != AcquireRelease,
2949 Assert(SI.getOrdering() != AtomicOrdering::Acquire &&
2950 SI.getOrdering() != AtomicOrdering::AcquireRelease,
29492951 "Store cannot have Acquire ordering", &SI);
29502952 Assert(SI.getAlignment() != 0,
29512953 "Atomic store must specify explicit alignment", &SI);
30213023 void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
30223024
30233025 // FIXME: more conditions???
3024 Assert(CXI.getSuccessOrdering() != NotAtomic,
3026 Assert(CXI.getSuccessOrdering() != AtomicOrdering::NotAtomic,
30253027 "cmpxchg instructions must be atomic.", &CXI);
3026 Assert(CXI.getFailureOrdering() != NotAtomic,
3028 Assert(CXI.getFailureOrdering() != AtomicOrdering::NotAtomic,
30273029 "cmpxchg instructions must be atomic.", &CXI);
3028 Assert(CXI.getSuccessOrdering() != Unordered,
3030 Assert(CXI.getSuccessOrdering() != AtomicOrdering::Unordered,
30293031 "cmpxchg instructions cannot be unordered.", &CXI);
3030 Assert(CXI.getFailureOrdering() != Unordered,
3032 Assert(CXI.getFailureOrdering() != AtomicOrdering::Unordered,
30313033 "cmpxchg instructions cannot be unordered.", &CXI);
3032 Assert(CXI.getSuccessOrdering() >= CXI.getFailureOrdering(),
3033 "cmpxchg instructions be at least as constrained on success as fail",
3034 Assert(!isStrongerThan(CXI.getFailureOrdering(), CXI.getSuccessOrdering()),
3035 "cmpxchg instructions failure argument shall be no stronger than the "
3036 "success argument",
30343037 &CXI);
3035 Assert(CXI.getFailureOrdering() != Release &&
3036 CXI.getFailureOrdering() != AcquireRelease,
3038 Assert(CXI.getFailureOrdering() != AtomicOrdering::Release &&
3039 CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease,
30373040 "cmpxchg failure ordering cannot include release semantics", &CXI);
30383041
30393042 PointerType *PTy = dyn_cast(CXI.getOperand(0)->getType());
30523055 }
30533056
30543057 void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
3055 Assert(RMWI.getOrdering() != NotAtomic,
3058 Assert(RMWI.getOrdering() != AtomicOrdering::NotAtomic,
30563059 "atomicrmw instructions must be atomic.", &RMWI);
3057 Assert(RMWI.getOrdering() != Unordered,
3060 Assert(RMWI.getOrdering() != AtomicOrdering::Unordered,
30583061 "atomicrmw instructions cannot be unordered.", &RMWI);
30593062 PointerType *PTy = dyn_cast(RMWI.getOperand(0)->getType());
30603063 Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
30733076
30743077 void Verifier::visitFenceInst(FenceInst &FI) {
30753078 const AtomicOrdering Ordering = FI.getOrdering();
3076 Assert(Ordering == Acquire || Ordering == Release ||
3077 Ordering == AcquireRelease || Ordering == SequentiallyConsistent,
3078 "fence instructions may only have "
3079 "acquire, release, acq_rel, or seq_cst ordering.",
3079 Assert(Ordering == AtomicOrdering::Acquire ||
3080 Ordering == AtomicOrdering::Release ||
3081 Ordering == AtomicOrdering::AcquireRelease ||
3082 Ordering == AtomicOrdering::SequentiallyConsistent,
3083 "fence instructions may only have acquire, release, acq_rel, or "
3084 "seq_cst ordering.",
30803085 &FI);
30813086 visitInstruction(FI);
30823087 }
607607
608608 // ldar and stlr have much more restrictive addressing modes (just a
609609 // register).
610 if (cast(Use)->getOrdering() > Monotonic)
610 if (isStrongerThanMonotonic(cast(Use)->getOrdering()))
611611 return false;
612612 }
613613
1013110131 AtomicOrdering Ord) const {
1013210132 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1013310133 Type *ValTy = cast(Addr->getType())->getElementType();
10134 bool IsAcquire = isAtLeastAcquire(Ord);
10134 bool IsAcquire = isAcquireOrStronger(Ord);
1013510135
1013610136 // Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd
1013710137 // intrinsic must return {i64, i64} and we have to recombine them into a
1017310173 Value *Val, Value *Addr,
1017410174 AtomicOrdering Ord) const {
1017510175 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
10176 bool IsRelease = isAtLeastRelease(Ord);
10176 bool IsRelease = isReleaseOrStronger(Ord);
1017710177
1017810178 // Since the intrinsics must have legal type, the i128 intrinsics take two
1017910179 // parameters: "i64, i64". We must marshal Val into the appropriate form
2828 class acquiring_load
2929 : PatFrag<(ops node:$ptr), (base node:$ptr), [{
3030 AtomicOrdering Ordering = cast(N)->getOrdering();
31 return isAtLeastAcquire(Ordering);
31 return isAcquireOrStronger(Ordering);
3232 }]>;
3333
3434 // An atomic load operation that does not need either acquire or release
3636 class relaxed_load
3737 : PatFrag<(ops node:$ptr), (base node:$ptr), [{
3838 AtomicOrdering Ordering = cast(N)->getOrdering();
39 return !isAtLeastAcquire(Ordering);
39 return !isAcquireOrStronger(Ordering);
4040 }]>;
4141
4242 // 8-bit loads
111111 class releasing_store
112112 : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
113113 AtomicOrdering Ordering = cast(N)->getOrdering();
114 assert(Ordering != AcquireRelease && "unexpected store ordering");
115 return isAtLeastRelease(Ordering);
114 assert(Ordering != AtomicOrdering::AcquireRelease &&
115 "unexpected store ordering");
116 return isReleaseOrStronger(Ordering);
116117 }]>;
117118
118119 // An atomic store operation that doesn't actually need to be atomic on AArch64.
119120 class relaxed_store
120121 : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
121122 AtomicOrdering Ordering = cast(N)->getOrdering();
122 return !isAtLeastRelease(Ordering);
123 return !isReleaseOrStronger(Ordering);
123124 }]>;
124125
125126 // 8-bit stores
30103010 if (Subtarget->isMClass()) {
30113011 // Only a full system barrier exists in the M-class architectures.
30123012 Domain = ARM_MB::SY;
3013 } else if (Subtarget->isSwift() && Ord == Release) {
3013 } else if (Subtarget->isSwift() && Ord == AtomicOrdering::Release) {
30143014 // Swift happens to implement ISHST barriers in a way that's compatible with
30153015 // Release semantics but weaker than ISH so we'd be fools not to use
30163016 // it. Beware: other processors probably don't!
69316931 }
69326932
69336933 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
6934 // Monotonic load/store is legal for all targets
6935 if (cast(Op)->getOrdering() <= Monotonic)
6936 return Op;
6937
6938 // Acquire/Release load/store is not legal for targets without a
6939 // dmb or equivalent available.
6940 return SDValue();
6934 if (isStrongerThanMonotonic(cast(Op)->getOrdering()))
6935 // Acquire/Release load/store is not legal for targets without a dmb or
6936 // equivalent available.
6937 return SDValue();
6938
6939 // Monotonic load/store is legal for all targets.
6940 return Op;
69416941 }
69426942
69436943 static void ReplaceREADCYCLECOUNTER(SDNode *N,
1207512075 AtomicOrdering Ord, bool IsStore,
1207612076 bool IsLoad) const {
1207712077 switch (Ord) {
12078 case NotAtomic:
12079 case Unordered:
12078 case AtomicOrdering::NotAtomic:
12079 case AtomicOrdering::Unordered:
1208012080 llvm_unreachable("Invalid fence: unordered/non-atomic");
12081 case Monotonic:
12082 case Acquire:
12081 case AtomicOrdering::Monotonic:
12082 case AtomicOrdering::Acquire:
1208312083 return nullptr; // Nothing to do
12084 case SequentiallyConsistent:
12084 case AtomicOrdering::SequentiallyConsistent:
1208512085 if (!IsStore)
1208612086 return nullptr; // Nothing to do
1208712087 /*FALLTHROUGH*/
12088 case Release:
12089 case AcquireRelease:
12088 case AtomicOrdering::Release:
12089 case AtomicOrdering::AcquireRelease:
1209012090 if (Subtarget->isSwift())
1209112091 return makeDMB(Builder, ARM_MB::ISHST);
1209212092 // FIXME: add a comment with a link to documentation justifying this.
1210012100 AtomicOrdering Ord, bool IsStore,
1210112101 bool IsLoad) const {
1210212102 switch (Ord) {
12103 case NotAtomic:
12104 case Unordered:
12103 case AtomicOrdering::NotAtomic:
12104 case AtomicOrdering::Unordered:
1210512105 llvm_unreachable("Invalid fence: unordered/not-atomic");
12106 case Monotonic:
12107 case Release:
12106 case AtomicOrdering::Monotonic:
12107 case AtomicOrdering::Release:
1210812108 return nullptr; // Nothing to do
12109 case Acquire:
12110 case AcquireRelease:
12111 case SequentiallyConsistent:
12109 case AtomicOrdering::Acquire:
12110 case AtomicOrdering::AcquireRelease:
12111 case AtomicOrdering::SequentiallyConsistent:
1211212112 return makeDMB(Builder, ARM_MB::ISH);
1211312113 }
1211412114 llvm_unreachable("Unknown fence ordering in emitTrailingFence");
1220312203 AtomicOrdering Ord) const {
1220412204 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
1220512205 Type *ValTy = cast(Addr->getType())->getElementType();
12206 bool IsAcquire = isAtLeastAcquire(Ord);
12206 bool IsAcquire = isAcquireOrStronger(Ord);
1220712207
1220812208 // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
1220912209 // intrinsic must return {i32, i32} and we have to recombine them into a
1224712247 Value *Addr,
1224812248 AtomicOrdering Ord) const {
1224912249 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
12250 bool IsRelease = isAtLeastRelease(Ord);
12250 bool IsRelease = isReleaseOrStronger(Ord);
1225112251
1225212252 // Since the intrinsics must have legal type, the i64 intrinsics take two
1225312253 // parameters: "i32, i32". We must marshal Val into the appropriate form
47604760 class acquiring_load
47614761 : PatFrag<(ops node:$ptr), (base node:$ptr), [{
47624762 AtomicOrdering Ordering = cast(N)->getOrdering();
4763 return isAtLeastAcquire(Ordering);
4763 return isAcquireOrStronger(Ordering);
47644764 }]>;
47654765
47664766 def atomic_load_acquire_8 : acquiring_load;
47704770 class releasing_store
47714771 : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{
47724772 AtomicOrdering Ordering = cast(N)->getOrdering();
4773 return isAtLeastRelease(Ordering);
4773 return isReleaseOrStronger(Ordering);
47744774 }]>;
47754775
47764776 def atomic_store_release_8 : releasing_store;
10901090
10911091 static StringRef ConvertAtomicOrdering(AtomicOrdering Ordering) {
10921092 switch (Ordering) {
1093 case NotAtomic: return "NotAtomic";
1094 case Unordered: return "Unordered";
1095 case Monotonic: return "Monotonic";
1096 case Acquire: return "Acquire";
1097 case Release: return "Release";
1098 case AcquireRelease: return "AcquireRelease";
1099 case SequentiallyConsistent: return "SequentiallyConsistent";
1093 case AtomicOrdering::NotAtomic: return "NotAtomic";
1094 case AtomicOrdering::Unordered: return "Unordered";
1095 case AtomicOrdering::Monotonic: return "Monotonic";
1096 case AtomicOrdering::Acquire: return "Acquire";
1097 case AtomicOrdering::Release: return "Release";
1098 case AtomicOrdering::AcquireRelease: return "AcquireRelease";
1099 case AtomicOrdering::SequentiallyConsistent:
1100 return "SequentiallyConsistent";
11001101 }
11011102 llvm_unreachable("Unknown ordering");
11021103 }
83228322 Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
83238323 AtomicOrdering Ord, bool IsStore,
83248324 bool IsLoad) const {
8325 if (Ord == SequentiallyConsistent)
8325 if (Ord == AtomicOrdering::SequentiallyConsistent)
83268326 return callIntrinsic(Builder, Intrinsic::ppc_sync);
8327 if (isAtLeastRelease(Ord))
8327 if (isReleaseOrStronger(Ord))
83288328 return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
83298329 return nullptr;
83308330 }
83328332 Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
83338333 AtomicOrdering Ord, bool IsStore,
83348334 bool IsLoad) const {
8335 if (IsLoad && isAtLeastAcquire(Ord))
8335 if (IsLoad && isAcquireOrStronger(Ord))
83368336 return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
83378337 // FIXME: this is too conservative, a dependent branch + isync is enough.
83388338 // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
29282928 }
29292929
29302930 static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
2931 if (isStrongerThanMonotonic(cast(Op)->getOrdering()))
2932 // Expand with a fence.
2933 return SDValue();
2934
29312935 // Monotonic load/stores are legal.
2932 if (cast(Op)->getOrdering() <= Monotonic)
2933 return Op;
2934
2935 // Otherwise, expand with a fence.
2936 return SDValue();
2936 return Op;
29372937 }
29382938
29392939 SDValue SparcTargetLowering::
31293129
31303130 // The only fence that needs an instruction is a sequentially-consistent
31313131 // cross-thread fence.
3132 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
3132 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
3133 FenceScope == CrossThread) {
31333134 return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other,
3134 Op.getOperand(0)), 0);
3135 Op.getOperand(0)),
3136 0);
31353137 }
31363138
31373139 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
2046320463 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
2046420464 // lowered to just a load without a fence. A mfence flushes the store buffer,
2046520465 // making the optimization clearly correct.
20466 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
20466 // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
2046720467 // otherwise, we might be able to be more aggressive on relaxed idempotent
2046820468 // rmw. In practice, they do not look useful, so we don't try to be
2046920469 // especially clever.
2050220502
2050320503 // The only fence that needs an instruction is a sequentially-consistent
2050420504 // cross-thread fence.
20505 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
20505 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
20506 FenceScope == CrossThread) {
2050620507 if (Subtarget.hasMFence())
2050720508 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
2050820509
2098520986 // FIXME: On 32-bit, store -> fist or movq would be more efficient
2098620987 // (The only way to get a 16-byte store is cmpxchg16b)
2098720988 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
20988 if (cast(Node)->getOrdering() == SequentiallyConsistent ||
20989 if (cast(Node)->getOrdering() ==
20990 AtomicOrdering::SequentiallyConsistent ||
2098920991 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
2099020992 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
2099120993 cast(Node)->getMemoryVT(),
969969 LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const {
970970 AtomicSDNode *N = cast(Op);
971971 assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP");
972 assert(N->getOrdering() <= Monotonic &&
973 "setInsertFencesForAtomic(true) and yet greater than Monotonic");
972 assert((N->getOrdering() == AtomicOrdering::Unordered ||
973 N->getOrdering() == AtomicOrdering::Monotonic) &&
974 "setInsertFencesForAtomic(true) expects unordered / monotonic");
974975 if (N->getMemoryVT() == MVT::i32) {
975976 if (N->getAlignment() < 4)
976977 report_fatal_error("atomic load must be aligned");
9991000 LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const {
10001001 AtomicSDNode *N = cast(Op);
10011002 assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP");
1002 assert(N->getOrdering() <= Monotonic &&
1003 "setInsertFencesForAtomic(true) and yet greater than Monotonic");
1003 assert((N->getOrdering() == AtomicOrdering::Unordered ||
1004 N->getOrdering() == AtomicOrdering::Monotonic) &&
1005 "setInsertFencesForAtomic(true) expects unordered / monotonic");
10041006 if (N->getMemoryVT() == MVT::i32) {
10051007 if (N->getAlignment() < 4)
10061008 report_fatal_error("atomic store must be aligned");
15021502 // into multiple malloc'd arrays, one for each field. This is basically
15031503 // SRoA for malloc'd memory.
15041504
1505 if (Ordering != NotAtomic)
1505 if (Ordering != AtomicOrdering::NotAtomic)
15061506 return false;
15071507
15081508 // If this is an allocation of a fixed size array of structs, analyze as a
19811981 // Otherwise, if the global was not a boolean, we can shrink it to be a
19821982 // boolean.
19831983 if (Constant *SOVConstant = dyn_cast(GS.StoredOnceValue)) {
1984 if (GS.Ordering == NotAtomic) {
1984 if (GS.Ordering == AtomicOrdering::NotAtomic) {
19851985 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
19861986 ++NumShrunkToBool;
19871987 return true;
25802580
25812581 return Changed;
25822582 }
2583
400400 int cmpTypes(Type *TyL, Type *TyR) const;
401401
402402 int cmpNumbers(uint64_t L, uint64_t R) const;
403 int cmpOrderings(AtomicOrdering L, AtomicOrdering R) const;
403404 int cmpAPInts(const APInt &L, const APInt &R) const;
404405 int cmpAPFloats(const APFloat &L, const APFloat &R) const;
405406 int cmpInlineAsm(const InlineAsm *L, const InlineAsm *R) const;
476477 return 0;
477478 }
478479
480 int FunctionComparator::cmpOrderings(AtomicOrdering L, AtomicOrdering R) const {
481 if ((int)L < (int)R) return -1;
482 if ((int)L > (int)R) return 1;
483 return 0;
484 }
485
479486 int FunctionComparator::cmpAPInts(const APInt &L, const APInt &R) const {
480487 if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth()))
481488 return Res;
938945 cmpNumbers(LI->getAlignment(), cast(R)->getAlignment()))
939946 return Res;
940947 if (int Res =
941 cmpNumbers(LI->getOrdering(), cast(R)->getOrdering()))
948 cmpOrderings(LI->getOrdering(), cast(R)->getOrdering()))
942949 return Res;
943950 if (int Res =
944951 cmpNumbers(LI->getSynchScope(), cast(R)->getSynchScope()))
954961 cmpNumbers(SI->getAlignment(), cast(R)->getAlignment()))
955962 return Res;
956963 if (int Res =
957 cmpNumbers(SI->getOrdering(), cast(R)->getOrdering()))
964 cmpOrderings(SI->getOrdering(), cast(R)->getOrdering()))
958965 return Res;
959966 return cmpNumbers(SI->getSynchScope(), cast(R)->getSynchScope());
960967 }
10081015 }
10091016 if (const FenceInst *FI = dyn_cast(L)) {
10101017 if (int Res =
1011 cmpNumbers(FI->getOrdering(), cast(R)->getOrdering()))
1018 cmpOrderings(FI->getOrdering(), cast(R)->getOrdering()))
10121019 return Res;
10131020 return cmpNumbers(FI->getSynchScope(), cast(R)->getSynchScope());
10141021 }
10201027 if (int Res = cmpNumbers(CXI->isWeak(),
10211028 cast(R)->isWeak()))
10221029 return Res;
1023 if (int Res = cmpNumbers(CXI->getSuccessOrdering(),
1024 cast(R)->getSuccessOrdering()))
1025 return Res;
1026 if (int Res = cmpNumbers(CXI->getFailureOrdering(),
1027 cast(R)->getFailureOrdering()))
1030 if (int Res =
1031 cmpOrderings(CXI->getSuccessOrdering(),
1032 cast(R)->getSuccessOrdering()))
1033 return Res;
1034 if (int Res =
1035 cmpOrderings(CXI->getFailureOrdering(),
1036 cast(R)->getFailureOrdering()))
10281037 return Res;
10291038 return cmpNumbers(CXI->getSynchScope(),
10301039 cast(R)->getSynchScope());
10361045 if (int Res = cmpNumbers(RMWI->isVolatile(),
10371046 cast(R)->isVolatile()))
10381047 return Res;
1039 if (int Res = cmpNumbers(RMWI->getOrdering(),
1048 if (int Res = cmpOrderings(RMWI->getOrdering(),
10401049 cast(R)->getOrdering()))
10411050 return Res;
10421051 return cmpNumbers(RMWI->getSynchScope(),
12211221
12221222 AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
12231223 switch (a) {
1224 case NotAtomic:
1225 return NotAtomic;
1226 case Unordered:
1227 case Monotonic:
1228 case Release:
1229 return Release;
1230 case Acquire:
1231 case AcquireRelease:
1232 return AcquireRelease;
1233 case SequentiallyConsistent:
1234 return SequentiallyConsistent;
1224 case AtomicOrdering::NotAtomic:
1225 return AtomicOrdering::NotAtomic;
1226 case AtomicOrdering::Unordered:
1227 case AtomicOrdering::Monotonic:
1228 case AtomicOrdering::Release:
1229 return AtomicOrdering::Release;
1230 case AtomicOrdering::Acquire:
1231 case AtomicOrdering::AcquireRelease:
1232 return AtomicOrdering::AcquireRelease;
1233 case AtomicOrdering::SequentiallyConsistent:
1234 return AtomicOrdering::SequentiallyConsistent;
12351235 }
12361236 llvm_unreachable("Unknown ordering");
12371237 }
12381238
12391239 AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
12401240 switch (a) {
1241 case NotAtomic:
1242 return NotAtomic;
1243 case Unordered:
1244 case Monotonic:
1245 case Acquire:
1246 return Acquire;
1247 case Release:
1248 case AcquireRelease:
1249 return AcquireRelease;
1250 case SequentiallyConsistent:
1251 return SequentiallyConsistent;
1241 case AtomicOrdering::NotAtomic:
1242 return AtomicOrdering::NotAtomic;
1243 case AtomicOrdering::Unordered:
1244 case AtomicOrdering::Monotonic:
1245 case AtomicOrdering::Acquire:
1246 return AtomicOrdering::Acquire;
1247 case AtomicOrdering::Release:
1248 case AtomicOrdering::AcquireRelease:
1249 return AtomicOrdering::AcquireRelease;
1250 case AtomicOrdering::SequentiallyConsistent:
1251 return AtomicOrdering::SequentiallyConsistent;
12521252 }
12531253 llvm_unreachable("Unknown ordering");
12541254 }
550550 IRB.CreateCall(SanCovWithCheckFunction, GuardP);
551551 } else {
552552 LoadInst *Load = IRB.CreateLoad(GuardP);
553 Load->setAtomic(Monotonic);
553 Load->setAtomic(AtomicOrdering::Monotonic);
554554 Load->setAlignment(4);
555555 SetNoSanitizeMetadata(Load);
556556 Value *Cmp =
479479 static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) {
480480 uint32_t v = 0;
481481 switch (ord) {
482 case NotAtomic: llvm_unreachable("unexpected atomic ordering!");
483 case Unordered: // Fall-through.
484 case Monotonic: v = 0; break;
485 // case Consume: v = 1; break; // Not specified yet.
486 case Acquire: v = 2; break;
487 case Release: v = 3; break;
488 case AcquireRelease: v = 4; break;
489 case SequentiallyConsistent: v = 5; break;
482 case AtomicOrdering::NotAtomic:
483 llvm_unreachable("unexpected atomic ordering!");
484 case AtomicOrdering::Unordered: // Fall-through.
485 case AtomicOrdering::Monotonic: v = 0; break;
486 // Not specified yet:
487 // case AtomicOrdering::Consume: v = 1; break;
488 case AtomicOrdering::Acquire: v = 2; break;
489 case AtomicOrdering::Release: v = 3; break;
490 case AtomicOrdering::AcquireRelease: v = 4; break;
491 case AtomicOrdering::SequentiallyConsistent: v = 5; break;
490492 }
491493 return IRB->getInt32(v);
492494 }
672672 // to advance the generation. We do need to prevent DSE across the fence,
673673 // but that's handled above.
674674 if (FenceInst *FI = dyn_cast(Inst))
675 if (FI->getOrdering() == Release) {
675 if (FI->getOrdering() == AtomicOrdering::Release) {
676676 assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above");
677677 continue;
678678 }
9999 }
100100
101101 static bool LowerLoadInst(LoadInst *LI) {
102 LI->setAtomic(NotAtomic);
102 LI->setAtomic(AtomicOrdering::NotAtomic);
103103 return true;
104104 }
105105
106106 static bool LowerStoreInst(StoreInst *SI) {
107 SI->setAtomic(NotAtomic);
107 SI->setAtomic(AtomicOrdering::NotAtomic);
108108 return true;
109109 }
110110
1919 /// and release, then return AcquireRelease.
2020 ///
2121 static AtomicOrdering strongerOrdering(AtomicOrdering X, AtomicOrdering Y) {
22 if (X == Acquire && Y == Release)
23 return AcquireRelease;
24 if (Y == Acquire && X == Release)
25 return AcquireRelease;
26 return (AtomicOrdering)std::max(X, Y);
22 if (X == AtomicOrdering::Acquire && Y == AtomicOrdering::Release)
23 return AtomicOrdering::AcquireRelease;
24 if (Y == AtomicOrdering::Acquire && X == AtomicOrdering::Release)
25 return AtomicOrdering::AcquireRelease;
26 return (AtomicOrdering)std::max((unsigned)X, (unsigned)Y);
2727 }
2828
2929 /// It is safe to destroy a constant iff it is only used by constants itself.
184184 : IsCompared(false), IsLoaded(false), StoredType(NotStored),
185185 StoredOnceValue(nullptr), AccessingFunction(nullptr),
186186 HasMultipleAccessingFunctions(false), HasNonInstructionUser(false),
187 Ordering(NotAtomic) {}
187 Ordering(AtomicOrdering::NotAtomic) {}
178178 auto *Load1 = new LoadInst(Addr, "load", BB);
179179 auto *Add1 = BinaryOperator::CreateAdd(Value, Value, "add", BB);
180180 auto *VAArg1 = new VAArgInst(Addr, PtrType, "vaarg", BB);
181 auto *CmpXChg1 = new AtomicCmpXchgInst(Addr, ConstantInt::get(IntType, 0),
182 ConstantInt::get(IntType, 1),
183 Monotonic, Monotonic, CrossThread, BB);
181 auto *CmpXChg1 = new AtomicCmpXchgInst(
182 Addr, ConstantInt::get(IntType, 0), ConstantInt::get(IntType, 1),
183 AtomicOrdering::Monotonic, AtomicOrdering::Monotonic, CrossThread, BB);
184184 auto *AtomicRMW =
185185 new AtomicRMWInst(AtomicRMWInst::Xchg, Addr, ConstantInt::get(IntType, 1),
186 Monotonic, CrossThread, BB);
186 AtomicOrdering::Monotonic, CrossThread, BB);
187187
188188 ReturnInst::Create(C, nullptr, BB);
189189