llvm.org GIT mirror llvm / 21006d4
Representation of 'atomic load' and 'atomic store' in IR. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@137170 91177308-0d34-0410-b5e6-96231b3b80d8 Eli Friedman 9 years ago
12 changed file(s) with 391 addition(s) and 79 deletion(s). Raw diff Collapse all Expand all
22
33
44 LLVM Atomic Instructions and Concurrency Guide
5
56
67
78
15821582
15831583
15841584

Atomic instructions (cmpxchg,

1585 atomicrmw, and
1586 fence) take an ordering parameter
1585 atomicrmw,
1586 fence,
1587 atomic load, and
1588 atomic store) take an ordering parameter
15871589 that determines which other atomic instructions on the same address they
15881590 synchronize with. These semantics are borrowed from Java and C++0x,
15891591 but are somewhat more colloquial. If these descriptions aren't precise enough,
15911593 treat these orderings somewhat differently since they don't take an address.
15921594 See that instruction's documentation for details.

15931595
1594
1595
15961596
1597
15991597
unordered
16001598
The set of values that can be read is governed by the happens-before
16011599 partial order. A value cannot be read unless some operation wrote it.
45714569
45724570
Syntax:
45734571

                  
                
4574 <result> = load <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>]
4575 <result> = volatile load <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>]
4572 <result> = [volatile] load <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>]
4573 <result> = atomic [volatile] load <ty>* <pointer> [singlethread] <ordering>, align <alignment>
45764574 !<index> = !{ i32 1 }
45774575
45784576
45864584 marked as volatile, then the optimizer is not allowed to modify the
45874585 number or order of execution of this load with other
45884586 href="#volatile">volatile operations.

4587
4588

If the load is marked as atomic, it takes an extra

4589 ordering and optional singlethread
4590 argument. The release and acq_rel orderings are
4591 not valid on load instructions. Atomic loads produce
4592 href="#memorymodel">defined results when they may see multiple atomic
4593 stores. The type of the pointee must be an integer type whose bit width
4594 is a power of two greater than or equal to eight and less than or equal
4595 to a target-specific size limit. align must be explicitly
4596 specified on atomic loads, and the load has undefined behavior if the
4597 alignment is not set to a value which is at least the size in bytes of
4598 the pointee. !nontemporal does not have any defined semantics
4599 for atomic loads.

45894600
45904601

The optional constant align argument specifies the alignment of the

45914602 operation (that is, the alignment of the memory address). A value of 0 or an
46304641
46314642
Syntax:
46324643

                  
                
4633 store <ty> <value>, <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>] ; yields {void}
4634 volatile store <ty> <value>, <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>] ; yields {void}
4644 [volatile] store <ty> <value>, <ty>* <pointer>[, align <alignment>][, !nontemporal !<index>] ; yields {void}
4645 atomic [volatile] store <ty> <value>, <ty>* <pointer> [singlethread] <ordering>, align <alignment> ; yields {void}
46354646
46364647
46374648
Overview:
46464657 volatile, then the optimizer is not allowed to modify the number or
46474658 order of execution of this store with other
46484659 href="#volatile">volatile operations.

4660
4661

If the store is marked as atomic, it takes an extra

4662 ordering and optional singlethread
4663 argument. The acquire and acq_rel orderings aren't
4664 valid on store instructions. Atomic loads produce
4665 href="#memorymodel">defined results when they may see multiple atomic
4666 stores. The type of the pointee must be an integer type whose bit width
4667 is a power of two greater than or equal to eight and less than or equal
4668 to a target-specific size limit. align must be explicitly
4669 specified on atomic stores, and the store has undefined behavior if the
4670 alignment is not set to a value which is at least the size in bytes of
4671 the pointee. !nontemporal does not have any defined semantics
4672 for atomic stores.

46494673
46504674

The optional constant "align" argument specifies the alignment of the

46514675 operation (that is, the alignment of the memory address). A value of 0 or an
47284752

The optional "singlethread" argument

47294753 specifies that the fence only synchronizes with other fences in the same
47304754 thread. (This is useful for interacting with signal handlers.)

4731
4732

FIXME: This instruction is a work in progress; until it is finished, use

4733 llvm.memory.barrier.
47344755
47354756
Example:
47364757

                  
                
306306 FUNC_CODE_INST_ATOMICRMW = 38, // ATOMICRMW: [ptrty,ptr,val, operation,
307307 // align, vol,
308308 // ordering, synchscope]
309 FUNC_CODE_INST_RESUME = 39 // RESUME: [opval]
309 FUNC_CODE_INST_RESUME = 39, // RESUME: [opval]
310 FUNC_CODE_INST_LOADATOMIC = 40, // LOAD: [opty, op, align, vol,
311 // ordering, synchscope]
312 FUNC_CODE_INST_STOREATOMIC = 41 // STORE: [ptrty,ptr,val, align, vol
313 // ordering, synchscope]
310314 };
311315 } // End bitc namespace
312316 } // End llvm namespace
142142 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile = false,
143143 Instruction *InsertBefore = 0);
144144 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
145 BasicBlock *InsertAtEnd);
146 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
145147 unsigned Align, Instruction *InsertBefore = 0);
146148 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
149 unsigned Align, BasicBlock *InsertAtEnd);
150 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
151 unsigned Align, AtomicOrdering Order,
152 SynchronizationScope SynchScope = CrossThread,
153 Instruction *InsertBefore = 0);
154 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
155 unsigned Align, AtomicOrdering Order,
156 SynchronizationScope SynchScope,
147157 BasicBlock *InsertAtEnd);
148 LoadInst(Value *Ptr, const Twine &NameStr, bool isVolatile,
149 unsigned Align, BasicBlock *InsertAtEnd);
150158
151159 LoadInst(Value *Ptr, const char *NameStr, Instruction *InsertBefore);
152160 LoadInst(Value *Ptr, const char *NameStr, BasicBlock *InsertAtEnd);
170178 /// getAlignment - Return the alignment of the access that is being performed
171179 ///
172180 unsigned getAlignment() const {
173 return (1 << (getSubclassDataFromInstruction() >> 1)) >> 1;
181 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
174182 }
175183
176184 void setAlignment(unsigned Align);
185
186 /// Returns the ordering effect of this fence.
187 AtomicOrdering getOrdering() const {
188 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
189 }
190
191 /// Set the ordering constraint on this load. May not be Release or
192 /// AcquireRelease.
193 void setOrdering(AtomicOrdering Ordering) {
194 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
195 (Ordering << 7));
196 }
197
198 SynchronizationScope getSynchScope() const {
199 return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1);
200 }
201
202 /// Specify whether this load is ordered with respect to all
203 /// concurrently executing threads, or only with respect to signal handlers
204 /// executing in the same thread.
205 void setSynchScope(SynchronizationScope xthread) {
206 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) |
207 (xthread << 6));
208 }
209
210 bool isAtomic() const { return getOrdering() != NotAtomic; }
211 void setAtomic(AtomicOrdering Ordering,
212 SynchronizationScope SynchScope = CrossThread) {
213 setOrdering(Ordering);
214 setSynchScope(SynchScope);
215 }
216
217 bool isSimple() const { return !isAtomic() && !isVolatile(); }
218 bool isUnordered() const {
219 return getOrdering() <= Unordered && !isVolatile();
220 }
177221
178222 Value *getPointerOperand() { return getOperand(0); }
179223 const Value *getPointerOperand() const { return getOperand(0); }
221265 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
222266 StoreInst(Value *Val, Value *Ptr, bool isVolatile = false,
223267 Instruction *InsertBefore = 0);
268 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
224269 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
225270 unsigned Align, Instruction *InsertBefore = 0);
226 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
227271 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
228272 unsigned Align, BasicBlock *InsertAtEnd);
229
230
231 /// isVolatile - Return true if this is a load from a volatile memory
273 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
274 unsigned Align, AtomicOrdering Order,
275 SynchronizationScope SynchScope = CrossThread,
276 Instruction *InsertBefore = 0);
277 StoreInst(Value *Val, Value *Ptr, bool isVolatile,
278 unsigned Align, AtomicOrdering Order,
279 SynchronizationScope SynchScope,
280 BasicBlock *InsertAtEnd);
281
282
283 /// isVolatile - Return true if this is a store to a volatile memory
232284 /// location.
233285 ///
234286 bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
235287
236 /// setVolatile - Specify whether this is a volatile load or not.
288 /// setVolatile - Specify whether this is a volatile store or not.
237289 ///
238290 void setVolatile(bool V) {
239291 setInstructionSubclassData((getSubclassDataFromInstruction() & ~1) |
246298 /// getAlignment - Return the alignment of the access that is being performed
247299 ///
248300 unsigned getAlignment() const {
249 return (1 << (getSubclassDataFromInstruction() >> 1)) >> 1;
301 return (1 << ((getSubclassDataFromInstruction() >> 1) & 31)) >> 1;
250302 }
251303
252304 void setAlignment(unsigned Align);
305
306 /// Returns the ordering effect of this store.
307 AtomicOrdering getOrdering() const {
308 return AtomicOrdering((getSubclassDataFromInstruction() >> 7) & 7);
309 }
310
311 /// Set the ordering constraint on this store. May not be Acquire or
312 /// AcquireRelease.
313 void setOrdering(AtomicOrdering Ordering) {
314 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
315 (Ordering << 7));
316 }
317
318 SynchronizationScope getSynchScope() const {
319 return SynchronizationScope((getSubclassDataFromInstruction() >> 6) & 1);
320 }
321
322 /// Specify whether this store instruction is ordered with respect to all
323 /// concurrently executing threads, or only with respect to signal handlers
324 /// executing in the same thread.
325 void setSynchScope(SynchronizationScope xthread) {
326 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(1 << 6)) |
327 (xthread << 6));
328 }
329
330 bool isAtomic() const { return getOrdering() != NotAtomic; }
331 void setAtomic(AtomicOrdering Ordering,
332 SynchronizationScope SynchScope = CrossThread) {
333 setOrdering(Ordering);
334 setSynchScope(SynchScope);
335 }
336
337 bool isSimple() const { return !isAtomic() && !isVolatile(); }
338 bool isUnordered() const {
339 return getOrdering() <= Unordered && !isVolatile();
340 }
253341
254342 Value *getValueOperand() { return getOperand(0); }
255343 const Value *getValueOperand() const { return getOperand(0); }
318406 /// Set the ordering constraint on this fence. May only be Acquire, Release,
319407 /// AcquireRelease, or SequentiallyConsistent.
320408 void setOrdering(AtomicOrdering Ordering) {
321 switch (Ordering) {
322 case Acquire:
323 case Release:
324 case AcquireRelease:
325 case SequentiallyConsistent:
326 setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
327 (Ordering << 1));
328 return;
329 default:
330 llvm_unreachable("FenceInst ordering must be Acquire, Release,"
331 " AcquireRelease, or SequentiallyConsistent");
332 }
409 setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
410 (Ordering << 1));
333411 }
334412
335413 SynchronizationScope getSynchScope() const {
554632 void setOrdering(AtomicOrdering Ordering) {
555633 assert(Ordering != NotAtomic &&
556634 "atomicrmw instructions can only be atomic.");
557 setInstructionSubclassData((getSubclassDataFromInstruction() & ~28) |
635 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
558636 (Ordering << 2));
559637 }
560638
568646
569647 /// Returns the ordering constraint on this RMW.
570648 AtomicOrdering getOrdering() const {
571 return AtomicOrdering((getSubclassDataFromInstruction() & 28) >> 2);
649 return AtomicOrdering((getSubclassDataFromInstruction() >> 2) & 7);
572650 }
573651
574652 /// Returns whether this RMW is atomic between threads or only within a
29482948 case lltok::kw_tail: return ParseCall(Inst, PFS, true);
29492949 // Memory.
29502950 case lltok::kw_alloca: return ParseAlloc(Inst, PFS);
2951 case lltok::kw_load: return ParseLoad(Inst, PFS, false);
2952 case lltok::kw_store: return ParseStore(Inst, PFS, false);
2951 case lltok::kw_load: return ParseLoad(Inst, PFS, false, false);
2952 case lltok::kw_store: return ParseStore(Inst, PFS, false, false);
29532953 case lltok::kw_cmpxchg: return ParseCmpXchg(Inst, PFS, false);
29542954 case lltok::kw_atomicrmw: return ParseAtomicRMW(Inst, PFS, false);
29552955 case lltok::kw_fence: return ParseFence(Inst, PFS);
2956 case lltok::kw_atomic: {
2957 bool isVolatile = EatIfPresent(lltok::kw_volatile);
2958 if (EatIfPresent(lltok::kw_load))
2959 return ParseLoad(Inst, PFS, true, isVolatile);
2960 else if (EatIfPresent(lltok::kw_store))
2961 return ParseStore(Inst, PFS, true, isVolatile);
2962 }
29562963 case lltok::kw_volatile:
29572964 if (EatIfPresent(lltok::kw_load))
2958 return ParseLoad(Inst, PFS, true);
2965 return ParseLoad(Inst, PFS, false, true);
29592966 else if (EatIfPresent(lltok::kw_store))
2960 return ParseStore(Inst, PFS, true);
2967 return ParseStore(Inst, PFS, false, true);
29612968 else if (EatIfPresent(lltok::kw_cmpxchg))
29622969 return ParseCmpXchg(Inst, PFS, true);
29632970 else if (EatIfPresent(lltok::kw_atomicrmw))
36343641 }
36353642
36363643 /// ParseLoad
3637 /// ::= 'volatile'? 'load' TypeAndValue (',' OptionalInfo)?
3644 /// ::= 'volatile'? 'load' TypeAndValue (',' 'align' i32)?
3645 // ::= 'atomic' 'volatile'? 'load' TypeAndValue
3646 // 'singlethread'? AtomicOrdering (',' 'align' i32)?
36383647 int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
3639 bool isVolatile) {
3648 bool isAtomic, bool isVolatile) {
36403649 Value *Val; LocTy Loc;
36413650 unsigned Alignment = 0;
36423651 bool AteExtraComma = false;
3652 AtomicOrdering Ordering = NotAtomic;
3653 SynchronizationScope Scope = CrossThread;
36433654 if (ParseTypeAndValue(Val, Loc, PFS) ||
3655 ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
36443656 ParseOptionalCommaAlign(Alignment, AteExtraComma))
36453657 return true;
36463658
36473659 if (!Val->getType()->isPointerTy() ||
36483660 !cast(Val->getType())->getElementType()->isFirstClassType())
36493661 return Error(Loc, "load operand must be a pointer to a first class type");
3650
3651 Inst = new LoadInst(Val, "", isVolatile, Alignment);
3662 if (isAtomic && !Alignment)
3663 return Error(Loc, "atomic load must have explicit non-zero alignment");
3664 if (Ordering == Release || Ordering == AcquireRelease)
3665 return Error(Loc, "atomic load cannot use Release ordering");
3666
3667 Inst = new LoadInst(Val, "", isVolatile, Alignment, Ordering, Scope);
36523668 return AteExtraComma ? InstExtraComma : InstNormal;
36533669 }
36543670
36553671 /// ParseStore
36563672 /// ::= 'volatile'? 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)?
3673 /// ::= 'atomic' 'volatile'? 'store' TypeAndValue ',' TypeAndValue
3674 /// 'singlethread'? AtomicOrdering (',' 'align' i32)?
36573675 int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
3658 bool isVolatile) {
3676 bool isAtomic, bool isVolatile) {
36593677 Value *Val, *Ptr; LocTy Loc, PtrLoc;
36603678 unsigned Alignment = 0;
36613679 bool AteExtraComma = false;
3680 AtomicOrdering Ordering = NotAtomic;
3681 SynchronizationScope Scope = CrossThread;
36623682 if (ParseTypeAndValue(Val, Loc, PFS) ||
36633683 ParseToken(lltok::comma, "expected ',' after store operand") ||
36643684 ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
3685 ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
36653686 ParseOptionalCommaAlign(Alignment, AteExtraComma))
36663687 return true;
36673688
36713692 return Error(Loc, "store operand must be a first class value");
36723693 if (cast(Ptr->getType())->getElementType() != Val->getType())
36733694 return Error(Loc, "stored value and pointer type do not match");
3674
3675 Inst = new StoreInst(Val, Ptr, isVolatile, Alignment);
3695 if (isAtomic && !Alignment)
3696 return Error(Loc, "atomic store must have explicit non-zero alignment");
3697 if (Ordering == Acquire || Ordering == AcquireRelease)
3698 return Error(Loc, "atomic store cannot use Acquire ordering");
3699
3700 Inst = new StoreInst(Val, Ptr, isVolatile, Alignment, Ordering, Scope);
36763701 return AteExtraComma ? InstExtraComma : InstNormal;
36773702 }
36783703
361361 int ParsePHI(Instruction *&I, PerFunctionState &PFS);
362362 bool ParseCall(Instruction *&I, PerFunctionState &PFS, bool isTail);
363363 int ParseAlloc(Instruction *&I, PerFunctionState &PFS);
364 int ParseLoad(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
365 int ParseStore(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
364 int ParseLoad(Instruction *&I, PerFunctionState &PFS,
365 bool isAtomic, bool isVolatile);
366 int ParseStore(Instruction *&I, PerFunctionState &PFS,
367 bool isAtomic, bool isVolatile);
366368 int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
367369 int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS, bool isVolatile);
368370 int ParseFence(Instruction *&I, PerFunctionState &PFS);
25662566 InstructionList.push_back(I);
25672567 break;
25682568 }
2569 case bitc::FUNC_CODE_INST_LOADATOMIC: {
2570 // LOADATOMIC: [opty, op, align, vol, ordering, synchscope]
2571 unsigned OpNum = 0;
2572 Value *Op;
2573 if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
2574 OpNum+4 != Record.size())
2575 return Error("Invalid LOADATOMIC record");
2576
2577
2578 AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
2579 if (Ordering == NotAtomic || Ordering == Release ||
2580 Ordering == AcquireRelease)
2581 return Error("Invalid LOADATOMIC record");
2582 if (Ordering != NotAtomic && Record[OpNum] == 0)
2583 return Error("Invalid LOADATOMIC record");
2584 SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
2585
2586 I = new LoadInst(Op, "", Record[OpNum+1], (1 << Record[OpNum]) >> 1,
2587 Ordering, SynchScope);
2588 InstructionList.push_back(I);
2589 break;
2590 }
25692591 case bitc::FUNC_CODE_INST_STORE: { // STORE2:[ptrty, ptr, val, align, vol]
25702592 unsigned OpNum = 0;
25712593 Value *Val, *Ptr;
25762598 return Error("Invalid STORE record");
25772599
25782600 I = new StoreInst(Val, Ptr, Record[OpNum+1], (1 << Record[OpNum]) >> 1);
2601 InstructionList.push_back(I);
2602 break;
2603 }
2604 case bitc::FUNC_CODE_INST_STOREATOMIC: {
2605 // STOREATOMIC: [ptrty, ptr, val, align, vol, ordering, synchscope]
2606 unsigned OpNum = 0;
2607 Value *Val, *Ptr;
2608 if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
2609 getValue(Record, OpNum,
2610 cast(Ptr->getType())->getElementType(), Val) ||
2611 OpNum+4 != Record.size())
2612 return Error("Invalid STOREATOMIC record");
2613
2614 AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
2615 if (Ordering == NotAtomic || Ordering == Release ||
2616 Ordering == AcquireRelease)
2617 return Error("Invalid STOREATOMIC record");
2618 SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
2619 if (Ordering != NotAtomic && Record[OpNum] == 0)
2620 return Error("Invalid STOREATOMIC record");
2621
2622 I = new StoreInst(Val, Ptr, Record[OpNum+1], (1 << Record[OpNum]) >> 1,
2623 Ordering, SynchScope);
25792624 InstructionList.push_back(I);
25802625 break;
25812626 }
25912636 OpNum+3 != Record.size())
25922637 return Error("Invalid CMPXCHG record");
25932638 AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+1]);
2594 if (Ordering == NotAtomic)
2639 if (Ordering == NotAtomic || Ordering == Unordered)
25952640 return Error("Invalid CMPXCHG record");
25962641 SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+2]);
25972642 I = new AtomicCmpXchgInst(Ptr, Cmp, New, Ordering, SynchScope);
26132658 Operation > AtomicRMWInst::LAST_BINOP)
26142659 return Error("Invalid ATOMICRMW record");
26152660 AtomicOrdering Ordering = GetDecodedOrdering(Record[OpNum+2]);
2616 if (Ordering == NotAtomic)
2661 if (Ordering == NotAtomic || Ordering == Unordered)
26172662 return Error("Invalid ATOMICRMW record");
26182663 SynchronizationScope SynchScope = GetDecodedSynchScope(Record[OpNum+3]);
26192664 I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope);
11741174 break;
11751175
11761176 case Instruction::Load:
1177 Code = bitc::FUNC_CODE_INST_LOAD;
1178 if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE)) // ptr
1179 AbbrevToUse = FUNCTION_INST_LOAD_ABBREV;
1180
1177 if (cast(I).isAtomic()) {
1178 Code = bitc::FUNC_CODE_INST_LOADATOMIC;
1179 PushValueAndType(I.getOperand(0), InstID, Vals, VE);
1180 } else {
1181 Code = bitc::FUNC_CODE_INST_LOAD;
1182 if (!PushValueAndType(I.getOperand(0), InstID, Vals, VE)) // ptr
1183 AbbrevToUse = FUNCTION_INST_LOAD_ABBREV;
1184 }
11811185 Vals.push_back(Log2_32(cast(I).getAlignment())+1);
11821186 Vals.push_back(cast(I).isVolatile());
1187 if (cast(I).isAtomic()) {
1188 Vals.push_back(GetEncodedOrdering(cast(I).getOrdering()));
1189 Vals.push_back(GetEncodedSynchScope(cast(I).getSynchScope()));
1190 }
11831191 break;
11841192 case Instruction::Store:
1185 Code = bitc::FUNC_CODE_INST_STORE;
1193 if (cast(I).isAtomic())
1194 Code = bitc::FUNC_CODE_INST_STOREATOMIC;
1195 else
1196 Code = bitc::FUNC_CODE_INST_STORE;
11861197 PushValueAndType(I.getOperand(1), InstID, Vals, VE); // ptrty + ptr
11871198 Vals.push_back(VE.getValueID(I.getOperand(0))); // val.
11881199 Vals.push_back(Log2_32(cast(I).getAlignment())+1);
11891200 Vals.push_back(cast(I).isVolatile());
1201 if (cast(I).isAtomic()) {
1202 Vals.push_back(GetEncodedOrdering(cast(I).getOrdering()));
1203 Vals.push_back(GetEncodedSynchScope(cast(I).getSynchScope()));
1204 }
11901205 break;
11911206 case Instruction::AtomicCmpXchg:
11921207 Code = bitc::FUNC_CODE_INST_CMPXCHG;
186186
187187 static bool LowerFenceInst(FenceInst *FI) {
188188 FI->eraseFromParent();
189 return true;
190 }
191
192 static bool LowerLoadInst(LoadInst *LI) {
193 LI->setAtomic(NotAtomic);
194 return true;
195 }
196
197 static bool LowerStoreInst(StoreInst *SI) {
198 SI->setAtomic(NotAtomic);
189199 return true;
190200 }
191201
207217 Changed |= LowerAtomicCmpXchgInst(CXI);
208218 else if (AtomicRMWInst *RMWI = dyn_cast(Inst))
209219 Changed |= LowerAtomicRMWInst(RMWI);
220 else if (LoadInst *LI = dyn_cast(Inst)) {
221 if (LI->isAtomic())
222 LowerLoadInst(LI);
223 } else if (StoreInst *SI = dyn_cast(Inst)) {
224 if (SI->isAtomic())
225 LowerStoreInst(SI);
226 }
210227 }
211228 return Changed;
212229 }
16581658 Out << '%' << SlotNum << " = ";
16591659 }
16601660
1661 // If this is an atomic load or store, print out the atomic marker.
1662 if ((isa(I) && cast(I).isAtomic()) ||
1663 (isa(I) && cast(I).isAtomic()))
1664 Out << "atomic ";
1665
16611666 // If this is a volatile load or store, print out the volatile marker.
16621667 if ((isa(I) && cast(I).isVolatile()) ||
1663 (isa(I) && cast(I).isVolatile())) {
1664 Out << "volatile ";
1665 } else if (isa(I) && cast(I).isTailCall()) {
1666 // If this is a call, check if it's a tail call.
1668 (isa(I) && cast(I).isVolatile()))
1669 Out << "volatile ";
1670
1671 if (isa(I) && cast(I).isTailCall())
16671672 Out << "tail ";
1668 }
16691673
16701674 // Print out the opcode...
16711675 Out << I.getOpcodeName();
19121916 }
19131917 }
19141918
1915 // Print post operand alignment for load/store.
1916 if (isa(I) && cast(I).getAlignment()) {
1917 Out << ", align " << cast(I).getAlignment();
1918 } else if (isa(I) && cast(I).getAlignment()) {
1919 Out << ", align " << cast(I).getAlignment();
1919 // Print atomic ordering/alignment for memory operations
1920 if (const LoadInst *LI = dyn_cast(&I)) {
1921 if (LI->isAtomic())
1922 writeAtomic(LI->getOrdering(), LI->getSynchScope());
1923 if (LI->getAlignment())
1924 Out << ", align " << LI->getAlignment();
1925 } else if (const StoreInst *SI = dyn_cast(&I)) {
1926 if (SI->isAtomic())
1927 writeAtomic(SI->getOrdering(), SI->getSynchScope());
1928 if (SI->getAlignment())
1929 Out << ", align " << SI->getAlignment();
19201930 } else if (const AtomicCmpXchgInst *CXI = dyn_cast(&I)) {
19211931 writeAtomic(CXI->getOrdering(), CXI->getSynchScope());
19221932 } else if (const AtomicRMWInst *RMWI = dyn_cast(&I)) {
821821 void LoadInst::AssertOK() {
822822 assert(getOperand(0)->getType()->isPointerTy() &&
823823 "Ptr must have pointer type.");
824 assert(!(isAtomic() && getAlignment() == 0) &&
825 "Alignment required for atomic load");
824826 }
825827
826828 LoadInst::LoadInst(Value *Ptr, const Twine &Name, Instruction *InsertBef)
828830 Load, Ptr, InsertBef) {
829831 setVolatile(false);
830832 setAlignment(0);
833 setAtomic(NotAtomic);
831834 AssertOK();
832835 setName(Name);
833836 }
837840 Load, Ptr, InsertAE) {
838841 setVolatile(false);
839842 setAlignment(0);
843 setAtomic(NotAtomic);
840844 AssertOK();
841845 setName(Name);
842846 }
847851 Load, Ptr, InsertBef) {
848852 setVolatile(isVolatile);
849853 setAlignment(0);
854 setAtomic(NotAtomic);
855 AssertOK();
856 setName(Name);
857 }
858
859 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
860 BasicBlock *InsertAE)
861 : UnaryInstruction(cast(Ptr->getType())->getElementType(),
862 Load, Ptr, InsertAE) {
863 setVolatile(isVolatile);
864 setAlignment(0);
865 setAtomic(NotAtomic);
850866 AssertOK();
851867 setName(Name);
852868 }
857873 Load, Ptr, InsertBef) {
858874 setVolatile(isVolatile);
859875 setAlignment(Align);
876 setAtomic(NotAtomic);
860877 AssertOK();
861878 setName(Name);
862879 }
867884 Load, Ptr, InsertAE) {
868885 setVolatile(isVolatile);
869886 setAlignment(Align);
887 setAtomic(NotAtomic);
870888 AssertOK();
871889 setName(Name);
872890 }
873891
874 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
892 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
893 unsigned Align, AtomicOrdering Order,
894 SynchronizationScope SynchScope,
895 Instruction *InsertBef)
896 : UnaryInstruction(cast(Ptr->getType())->getElementType(),
897 Load, Ptr, InsertBef) {
898 setVolatile(isVolatile);
899 setAlignment(Align);
900 setAtomic(Order, SynchScope);
901 AssertOK();
902 setName(Name);
903 }
904
905 LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
906 unsigned Align, AtomicOrdering Order,
907 SynchronizationScope SynchScope,
875908 BasicBlock *InsertAE)
876909 : UnaryInstruction(cast(Ptr->getType())->getElementType(),
877910 Load, Ptr, InsertAE) {
878911 setVolatile(isVolatile);
879 setAlignment(0);
912 setAlignment(Align);
913 setAtomic(Order, SynchScope);
880914 AssertOK();
881915 setName(Name);
882916 }
883
884
885917
886918 LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef)
887919 : UnaryInstruction(cast(Ptr->getType())->getElementType(),
888920 Load, Ptr, InsertBef) {
889921 setVolatile(false);
890922 setAlignment(0);
923 setAtomic(NotAtomic);
891924 AssertOK();
892925 if (Name && Name[0]) setName(Name);
893926 }
897930 Load, Ptr, InsertAE) {
898931 setVolatile(false);
899932 setAlignment(0);
933 setAtomic(NotAtomic);
900934 AssertOK();
901935 if (Name && Name[0]) setName(Name);
902936 }
907941 Load, Ptr, InsertBef) {
908942 setVolatile(isVolatile);
909943 setAlignment(0);
944 setAtomic(NotAtomic);
910945 AssertOK();
911946 if (Name && Name[0]) setName(Name);
912947 }
917952 Load, Ptr, InsertAE) {
918953 setVolatile(isVolatile);
919954 setAlignment(0);
955 setAtomic(NotAtomic);
920956 AssertOK();
921957 if (Name && Name[0]) setName(Name);
922958 }
925961 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
926962 assert(Align <= MaximumAlignment &&
927963 "Alignment is greater than MaximumAlignment!");
928 setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
964 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
929965 ((Log2_32(Align)+1)<<1));
930966 assert(getAlignment() == Align && "Alignment representation error!");
931967 }
941977 assert(getOperand(0)->getType() ==
942978 cast(getOperand(1)->getType())->getElementType()
943979 && "Ptr must be a pointer to Val type!");
980 assert(!(isAtomic() && getAlignment() == 0) &&
981 "Alignment required for atomic load");
944982 }
945983
946984
953991 Op<1>() = addr;
954992 setVolatile(false);
955993 setAlignment(0);
994 setAtomic(NotAtomic);
956995 AssertOK();
957996 }
958997
9651004 Op<1>() = addr;
9661005 setVolatile(false);
9671006 setAlignment(0);
1007 setAtomic(NotAtomic);
9681008 AssertOK();
9691009 }
9701010
9781018 Op<1>() = addr;
9791019 setVolatile(isVolatile);
9801020 setAlignment(0);
1021 setAtomic(NotAtomic);
9811022 AssertOK();
9821023 }
9831024
9911032 Op<1>() = addr;
9921033 setVolatile(isVolatile);
9931034 setAlignment(Align);
1035 setAtomic(NotAtomic);
1036 AssertOK();
1037 }
1038
1039 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1040 unsigned Align, AtomicOrdering Order,
1041 SynchronizationScope SynchScope,
1042 Instruction *InsertBefore)
1043 : Instruction(Type::getVoidTy(val->getContext()), Store,
1044 OperandTraits::op_begin(this),
1045 OperandTraits::operands(this),
1046 InsertBefore) {
1047 Op<0>() = val;
1048 Op<1>() = addr;
1049 setVolatile(isVolatile);
1050 setAlignment(Align);
1051 setAtomic(Order, SynchScope);
1052 AssertOK();
1053 }
1054
1055 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1056 BasicBlock *InsertAtEnd)
1057 : Instruction(Type::getVoidTy(val->getContext()), Store,
1058 OperandTraits::op_begin(this),
1059 OperandTraits::operands(this),
1060 InsertAtEnd) {
1061 Op<0>() = val;
1062 Op<1>() = addr;
1063 setVolatile(isVolatile);
1064 setAlignment(0);
1065 setAtomic(NotAtomic);
9941066 AssertOK();
9951067 }
9961068
10041076 Op<1>() = addr;
10051077 setVolatile(isVolatile);
10061078 setAlignment(Align);
1079 setAtomic(NotAtomic);
10071080 AssertOK();
10081081 }
10091082
10101083 StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile,
1084 unsigned Align, AtomicOrdering Order,
1085 SynchronizationScope SynchScope,
10111086 BasicBlock *InsertAtEnd)
10121087 : Instruction(Type::getVoidTy(val->getContext()), Store,
10131088 OperandTraits::op_begin(this),
10161091 Op<0>() = val;
10171092 Op<1>() = addr;
10181093 setVolatile(isVolatile);
1019 setAlignment(0);
1094 setAlignment(Align);
1095 setAtomic(Order, SynchScope);
10201096 AssertOK();
10211097 }
10221098
10241100 assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
10251101 assert(Align <= MaximumAlignment &&
10261102 "Alignment is greater than MaximumAlignment!");
1027 setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
1103 setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) |
10281104 ((Log2_32(Align)+1) << 1));
10291105 assert(getAlignment() == Align && "Alignment representation error!");
10301106 }
31573233 }
31583234
31593235 LoadInst *LoadInst::clone_impl() const {
3160 return new LoadInst(getOperand(0),
3161 Twine(), isVolatile(),
3162 getAlignment());
3236 return new LoadInst(getOperand(0), Twine(), isVolatile(),
3237 getAlignment(), getOrdering(), getSynchScope());
31633238 }
31643239
31653240 StoreInst *StoreInst::clone_impl() const {
3166 return new StoreInst(getOperand(0), getOperand(1),
3167 isVolatile(), getAlignment());
3241 return new StoreInst(getOperand(0), getOperand(1),isVolatile(),
3242 getAlignment(), getOrdering(), getSynchScope());
3243
31683244 }
31693245
31703246 AtomicCmpXchgInst *AtomicCmpXchgInst::clone_impl() const {
12961296 Type *ElTy = PTy->getElementType();
12971297 Assert2(ElTy == LI.getType(),
12981298 "Load result type does not match pointer operand type!", &LI, ElTy);
1299 if (LI.isAtomic()) {
1300 Assert1(LI.getOrdering() != Release && LI.getOrdering() != AcquireRelease,
1301 "Load cannot have Release ordering", &LI);
1302 Assert1(LI.getAlignment() != 0,
1303 "Atomic load must specify explicit alignment", &LI);
1304 } else {
1305 Assert1(LI.getSynchScope() == CrossThread,
1306 "Non-atomic load cannot have SynchronizationScope specified", &LI);
1307 }
12991308 visitInstruction(LI);
13001309 }
13011310
13061315 Assert2(ElTy == SI.getOperand(0)->getType(),
13071316 "Stored value type does not match pointer operand type!",
13081317 &SI, ElTy);
1318 if (SI.isAtomic()) {
1319 Assert1(SI.getOrdering() != Acquire && SI.getOrdering() != AcquireRelease,
1320 "Store cannot have Acquire ordering", &SI);
1321 Assert1(SI.getAlignment() != 0,
1322 "Atomic store must specify explicit alignment", &SI);
1323 } else {
1324 Assert1(SI.getSynchScope() == CrossThread,
1325 "Non-atomic store cannot have SynchronizationScope specified", &SI);
1326 }
13091327 visitInstruction(SI);
13101328 }
13111329