llvm.org GIT mirror llvm / 3bababf
Add an atomic lowering pass git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@110113 91177308-0d34-0410-b5e6-96231b3b80d8 Peter Collingbourne 9 years ago
9 changed file(s) with 266 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
166166
-loop-unrollUnroll loops
167167
-loop-unswitchUnswitch loops
168168
-loopsimplifyCanonicalize natural loops
169
-loweratomicLower atomic intrinsics
169170
-lowerinvokeLower invoke and unwind, for unwindless code generators
170171
-lowersetjmpLower Set Jump
171172
-lowerswitchLower SwitchInst's to branches
15431544

15441545 This is a target-dependent tranformation because it depends on the size of
15451546 data types and alignment constraints.
1547

1548
1549
1550
1551
1552 -loweratomic: Lower atomic intrinsics
1553
1554
1555

1556 This pass lowers atomic intrinsics to non-atomic form for use in a known
1557 non-preemptible environment.
1558

1559
1560

1561 The pass does not verify that the environment is non-preemptible (in
1562 general this would require knowledge of the entire call graph of the
1563 program including any libraries which may not be available in bitcode form);
1564 it simply lowers every atomic intrinsic.
15461565

15471566
15481567
147147 (void) llvm::createABCDPass();
148148 (void) llvm::createLintPass();
149149 (void) llvm::createSinkingPass();
150 (void) llvm::createLowerAtomicPass();
150151
151152 (void)new llvm::IntervalPartition();
152153 (void)new llvm::FindUsedTypes();
337337 //
338338 FunctionPass *createSinkingPass();
339339
340 //===----------------------------------------------------------------------===//
341 //
342 // LowerAtomic - Lower atomic intrinsics to non-atomic form
343 //
344 Pass *createLowerAtomicPass();
345
340346 } // End llvm namespace
341347
342348 #endif
1616 LoopStrengthReduce.cpp
1717 LoopUnrollPass.cpp
1818 LoopUnswitch.cpp
19 LowerAtomic.cpp
1920 MemCpyOptimizer.cpp
2021 Reassociate.cpp
2122 Reg2Mem.cpp
0 //===- LowerAtomic.cpp - Lower atomic intrinsics --------------------------===//
1 //
2 // The LLVM Compiler Infrastructure
3 //
4 // This file is distributed under the University of Illinois Open Source
5 // License. See LICENSE.TXT for details.
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass lowers atomic intrinsics to non-atomic form for use in a known
10 // non-preemptible environment.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #define DEBUG_TYPE "loweratomic"
15 #include "llvm/Transforms/Scalar.h"
16 #include "llvm/BasicBlock.h"
17 #include "llvm/Function.h"
18 #include "llvm/Instruction.h"
19 #include "llvm/Instructions.h"
20 #include "llvm/Intrinsics.h"
21 #include "llvm/Pass.h"
22 #include "llvm/Support/IRBuilder.h"
23
24 using namespace llvm;
25
26 namespace {
27
28 bool LowerAtomicIntrinsic(CallInst *CI) {
29 IRBuilder<> Builder(CI->getParent(), CI);
30
31 Function *Callee = CI->getCalledFunction();
32 if (!Callee)
33 return false;
34
35 unsigned IID = Callee->getIntrinsicID();
36 switch (IID) {
37 case Intrinsic::memory_barrier:
38 break;
39
40 case Intrinsic::atomic_load_add:
41 case Intrinsic::atomic_load_sub:
42 case Intrinsic::atomic_load_and:
43 case Intrinsic::atomic_load_nand:
44 case Intrinsic::atomic_load_or:
45 case Intrinsic::atomic_load_xor:
46 case Intrinsic::atomic_load_max:
47 case Intrinsic::atomic_load_min:
48 case Intrinsic::atomic_load_umax:
49 case Intrinsic::atomic_load_umin: {
50 Value *Ptr = CI->getArgOperand(0);
51 Value *Delta = CI->getArgOperand(1);
52
53 LoadInst *Orig = Builder.CreateLoad(Ptr);
54 Value *Res;
55 switch (IID) {
56 case Intrinsic::atomic_load_add:
57 Res = Builder.CreateAdd(Orig, Delta);
58 break;
59 case Intrinsic::atomic_load_sub:
60 Res = Builder.CreateSub(Orig, Delta);
61 break;
62 case Intrinsic::atomic_load_and:
63 Res = Builder.CreateAnd(Orig, Delta);
64 break;
65 case Intrinsic::atomic_load_nand:
66 Res = Builder.CreateNot(Builder.CreateAnd(Orig, Delta));
67 break;
68 case Intrinsic::atomic_load_or:
69 Res = Builder.CreateOr(Orig, Delta);
70 break;
71 case Intrinsic::atomic_load_xor:
72 Res = Builder.CreateXor(Orig, Delta);
73 break;
74 case Intrinsic::atomic_load_max:
75 Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Delta),
76 Delta,
77 Orig);
78 break;
79 case Intrinsic::atomic_load_min:
80 Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Delta),
81 Orig,
82 Delta);
83 break;
84 case Intrinsic::atomic_load_umax:
85 Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Delta),
86 Delta,
87 Orig);
88 break;
89 case Intrinsic::atomic_load_umin:
90 Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Delta),
91 Orig,
92 Delta);
93 break;
94 default: assert(0 && "Unrecognized atomic modify operation");
95 }
96 Builder.CreateStore(Res, Ptr);
97
98 CI->replaceAllUsesWith(Orig);
99 break;
100 }
101
102 case Intrinsic::atomic_swap: {
103 Value *Ptr = CI->getArgOperand(0);
104 Value *Val = CI->getArgOperand(1);
105
106 LoadInst *Orig = Builder.CreateLoad(Ptr);
107 Builder.CreateStore(Val, Ptr);
108
109 CI->replaceAllUsesWith(Orig);
110 break;
111 }
112
113 case Intrinsic::atomic_cmp_swap: {
114 Value *Ptr = CI->getArgOperand(0);
115 Value *Cmp = CI->getArgOperand(1);
116 Value *Val = CI->getArgOperand(2);
117
118 LoadInst *Orig = Builder.CreateLoad(Ptr);
119 Value *Equal = Builder.CreateICmpEQ(Orig, Cmp);
120 Value *Res = Builder.CreateSelect(Equal, Val, Orig);
121 Builder.CreateStore(Res, Ptr);
122
123 CI->replaceAllUsesWith(Orig);
124 break;
125 }
126
127 default:
128 return false;
129 }
130
131 assert(CI->use_empty() &&
132 "Lowering should have eliminated any uses of the intrinsic call!");
133 CI->eraseFromParent();
134
135 return true;
136 }
137
138 struct LowerAtomic : public BasicBlockPass {
139 static char ID;
140 LowerAtomic() : BasicBlockPass(&ID) {}
141 bool runOnBasicBlock(BasicBlock &BB) {
142 bool Changed = false;
143 for (BasicBlock::iterator DI = BB.begin(), DE = BB.end(); DI != DE; ) {
144 Instruction *Inst = DI++;
145 if (CallInst *CI = dyn_cast(Inst))
146 Changed |= LowerAtomicIntrinsic(CI);
147 }
148 return Changed;
149 }
150
151 };
152
153 }
154
155 char LowerAtomic::ID = 0;
156 static RegisterPass
157 X("loweratomic", "Lower atomic intrinsics to non-atomic form");
158
159 Pass *llvm::createLowerAtomicPass() { return new LowerAtomic(); }
0 ; RUN: opt < %s -loweratomic -S | FileCheck %s
1
2 declare i8 @llvm.atomic.load.add.i8.p0i8(i8* %ptr, i8 %delta)
3 declare i8 @llvm.atomic.load.nand.i8.p0i8(i8* %ptr, i8 %delta)
4 declare i8 @llvm.atomic.load.min.i8.p0i8(i8* %ptr, i8 %delta)
5
6 define i8 @add() {
7 ; CHECK: @add
8 %i = alloca i8
9 %j = call i8 @llvm.atomic.load.add.i8.p0i8(i8* %i, i8 42)
10 ; CHECK: [[INST:%[a-z0-9]+]] = load
11 ; CHECK-NEXT: add
12 ; CHECK-NEXT: store
13 ret i8 %j
14 ; CHECK: ret i8 [[INST]]
15 }
16
17 define i8 @nand() {
18 ; CHECK: @nand
19 %i = alloca i8
20 %j = call i8 @llvm.atomic.load.nand.i8.p0i8(i8* %i, i8 42)
21 ; CHECK: [[INST:%[a-z0-9]+]] = load
22 ; CHECK-NEXT: and
23 ; CHECK-NEXT: xor
24 ; CHECK-NEXT: store
25 ret i8 %j
26 ; CHECK: ret i8 [[INST]]
27 }
28
29 define i8 @min() {
30 ; CHECK: @min
31 %i = alloca i8
32 %j = call i8 @llvm.atomic.load.min.i8.p0i8(i8* %i, i8 42)
33 ; CHECK: [[INST:%[a-z0-9]+]] = load
34 ; CHECK-NEXT: icmp
35 ; CHECK-NEXT: select
36 ; CHECK-NEXT: store
37 ret i8 %j
38 ; CHECK: ret i8 [[INST]]
39 }
0 ; RUN: opt < %s -loweratomic -S | FileCheck %s
1
2 declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* %ptr, i8 %cmp, i8 %val)
3 declare i8 @llvm.atomic.swap.i8.p0i8(i8* %ptr, i8 %val)
4
5 define i8 @cmpswap() {
6 ; CHECK: @cmpswap
7 %i = alloca i8
8 %j = call i8 @llvm.atomic.cmp.swap.i8.p0i8(i8* %i, i8 0, i8 42)
9 ; CHECK: [[INST:%[a-z0-9]+]] = load
10 ; CHECK-NEXT: icmp
11 ; CHECK-NEXT: select
12 ; CHECK-NEXT: store
13 ret i8 %j
14 ; CHECK: ret i8 [[INST]]
15 }
16
17 define i8 @swap() {
18 ; CHECK: @swap
19 %i = alloca i8
20 %j = call i8 @llvm.atomic.swap.i8.p0i8(i8* %i, i8 42)
21 ; CHECK: [[INST:%[a-z0-9]+]] = load
22 ; CHECK-NEXT: store
23 ret i8 %j
24 ; CHECK: ret i8 [[INST]]
25 }
0 ; RUN: opt < %s -loweratomic -S | FileCheck %s
1
2 declare void @llvm.memory.barrier(i1 %ll, i1 %ls, i1 %sl, i1 %ss, i1 %device)
3
4 define void @barrier() {
5 ; CHECK: @barrier
6 call void @llvm.memory.barrier(i1 0, i1 0, i1 0, i1 0, i1 0)
7 ; CHECK-NEXT: ret
8 ret void
9 }
0 load_lib llvm.exp
1
2 RunLLVMTests [lsort [glob -nocomplain $srcdir/$subdir/*.{ll,c,cpp}]]