llvm.org GIT mirror llvm / de412c3
Convert more tests over to the new atomic instructions. I did not convert Atomics-32.ll and Atomics-64.ll by hand; the diff is autoupgrade output. The wmb test is gone because there isn't any way to express wmb with the new atomic instructions; if someone really needs a non-asm way to write a wmb on Alpha, a platform-specific intrisic could be added. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@140566 91177308-0d34-0410-b5e6-96231b3b80d8 Eli Friedman 8 years ago
6 changed file(s) with 1367 addition(s) and 1504 deletion(s). Raw diff Collapse all Expand all
0 ; RUN: llc < %s -march=alpha | grep mb
11
2 declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1)
3
42 define void @test() {
5 call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true , i1 true)
3 fence seq_cst
64 ret void
75 }
+0
-8
test/CodeGen/Alpha/wmb.ll less more
None ; RUN: llc < %s -march=alpha | grep wmb
1
2 declare void @llvm.memory.barrier( i1 , i1 , i1 , i1 , i1)
3
4 define void @test() {
5 call void @llvm.memory.barrier( i1 false, i1 false, i1 false, i1 true , i1 true)
6 ret void
7 }
0 ; RUN: llc < %s -march=ppc32
1 ; ModuleID = 'Atomics.c'
21 target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
32 target triple = "powerpc-apple-darwin9"
4 @sc = common global i8 0 ; [#uses=52]
5 @uc = common global i8 0 ; [#uses=100]
6 @ss = common global i16 0 ; [#uses=15]
7 @us = common global i16 0 ; [#uses=15]
8 @si = common global i32 0 ; [#uses=15]
9 @ui = common global i32 0 ; [#uses=23]
10 @sl = common global i32 0 ; [#uses=15]
11 @ul = common global i32 0 ; [#uses=15]
12 @sll = common global i64 0, align 8 ; [#uses=1]
13 @ull = common global i64 0, align 8 ; [#uses=1]
3
4 @sc = common global i8 0
5 @uc = common global i8 0
6 @ss = common global i16 0
7 @us = common global i16 0
8 @si = common global i32 0
9 @ui = common global i32 0
10 @sl = common global i32 0
11 @ul = common global i32 0
12 @sll = common global i64 0, align 8
13 @ull = common global i64 0, align 8
1414
1515 define void @test_op_ignore() nounwind {
1616 entry:
17 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; :0 [#uses=0]
18 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; :1 [#uses=0]
19 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :2 [#uses=1]
20 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; :3 [#uses=0]
21 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :4 [#uses=1]
22 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; :5 [#uses=0]
23 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :6 [#uses=1]
24 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; :7 [#uses=0]
25 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :8 [#uses=1]
26 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; :9 [#uses=0]
27 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :10 [#uses=1]
28 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 1 ) ; :11 [#uses=0]
29 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :12 [#uses=1]
30 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 1 ) ; :13 [#uses=0]
31 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; :14 [#uses=0]
32 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; :15 [#uses=0]
33 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :16 [#uses=1]
34 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 1 ) ; :17 [#uses=0]
35 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :18 [#uses=1]
36 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 1 ) ; :19 [#uses=0]
37 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :20 [#uses=1]
38 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 1 ) ; :21 [#uses=0]
39 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :22 [#uses=1]
40 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 1 ) ; :23 [#uses=0]
41 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :24 [#uses=1]
42 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 1 ) ; :25 [#uses=0]
43 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :26 [#uses=1]
44 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 1 ) ; :27 [#uses=0]
45 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; :28 [#uses=0]
46 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; :29 [#uses=0]
47 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :30 [#uses=1]
48 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 1 ) ; :31 [#uses=0]
49 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :32 [#uses=1]
50 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 1 ) ; :33 [#uses=0]
51 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :34 [#uses=1]
52 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 1 ) ; :35 [#uses=0]
53 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :36 [#uses=1]
54 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 1 ) ; :37 [#uses=0]
55 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :38 [#uses=1]
56 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 1 ) ; :39 [#uses=0]
57 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :40 [#uses=1]
58 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 1 ) ; :41 [#uses=0]
59 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; :42 [#uses=0]
60 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; :43 [#uses=0]
61 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :44 [#uses=1]
62 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 1 ) ; :45 [#uses=0]
63 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :46 [#uses=1]
64 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 1 ) ; :47 [#uses=0]
65 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :48 [#uses=1]
66 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 1 ) ; :49 [#uses=0]
67 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :50 [#uses=1]
68 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 1 ) ; :51 [#uses=0]
69 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :52 [#uses=1]
70 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 1 ) ; :53 [#uses=0]
71 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :54 [#uses=1]
72 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 1 ) ; :55 [#uses=0]
73 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; :56 [#uses=0]
74 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; :57 [#uses=0]
75 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :58 [#uses=1]
76 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 1 ) ; :59 [#uses=0]
77 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :60 [#uses=1]
78 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 1 ) ; :61 [#uses=0]
79 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :62 [#uses=1]
80 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 1 ) ; :63 [#uses=0]
81 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :64 [#uses=1]
82 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 1 ) ; :65 [#uses=0]
83 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :66 [#uses=1]
84 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 1 ) ; :67 [#uses=0]
85 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :68 [#uses=1]
86 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 1 ) ; :69 [#uses=0]
87 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; :70 [#uses=0]
88 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; :71 [#uses=0]
89 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :72 [#uses=1]
90 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 1 ) ; :73 [#uses=0]
91 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :74 [#uses=1]
92 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 1 ) ; :75 [#uses=0]
93 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :76 [#uses=1]
94 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 1 ) ; :77 [#uses=0]
95 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :78 [#uses=1]
96 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 1 ) ; :79 [#uses=0]
97 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :80 [#uses=1]
98 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 1 ) ; :81 [#uses=0]
99 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :82 [#uses=1]
100 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 1 ) ; :83 [#uses=0]
101 br label %return
102
103 return: ; preds = %entry
104 ret void
17 %0 = atomicrmw add i8* @sc, i8 1 monotonic
18 %1 = atomicrmw add i8* @uc, i8 1 monotonic
19 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
20 %3 = atomicrmw add i16* %2, i16 1 monotonic
21 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
22 %5 = atomicrmw add i16* %4, i16 1 monotonic
23 %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
24 %7 = atomicrmw add i32* %6, i32 1 monotonic
25 %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
26 %9 = atomicrmw add i32* %8, i32 1 monotonic
27 %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
28 %11 = atomicrmw add i32* %10, i32 1 monotonic
29 %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
30 %13 = atomicrmw add i32* %12, i32 1 monotonic
31 %14 = atomicrmw sub i8* @sc, i8 1 monotonic
32 %15 = atomicrmw sub i8* @uc, i8 1 monotonic
33 %16 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
34 %17 = atomicrmw sub i16* %16, i16 1 monotonic
35 %18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
36 %19 = atomicrmw sub i16* %18, i16 1 monotonic
37 %20 = bitcast i8* bitcast (i32* @si to i8*) to i32*
38 %21 = atomicrmw sub i32* %20, i32 1 monotonic
39 %22 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
40 %23 = atomicrmw sub i32* %22, i32 1 monotonic
41 %24 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
42 %25 = atomicrmw sub i32* %24, i32 1 monotonic
43 %26 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
44 %27 = atomicrmw sub i32* %26, i32 1 monotonic
45 %28 = atomicrmw or i8* @sc, i8 1 monotonic
46 %29 = atomicrmw or i8* @uc, i8 1 monotonic
47 %30 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
48 %31 = atomicrmw or i16* %30, i16 1 monotonic
49 %32 = bitcast i8* bitcast (i16* @us to i8*) to i16*
50 %33 = atomicrmw or i16* %32, i16 1 monotonic
51 %34 = bitcast i8* bitcast (i32* @si to i8*) to i32*
52 %35 = atomicrmw or i32* %34, i32 1 monotonic
53 %36 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
54 %37 = atomicrmw or i32* %36, i32 1 monotonic
55 %38 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
56 %39 = atomicrmw or i32* %38, i32 1 monotonic
57 %40 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
58 %41 = atomicrmw or i32* %40, i32 1 monotonic
59 %42 = atomicrmw xor i8* @sc, i8 1 monotonic
60 %43 = atomicrmw xor i8* @uc, i8 1 monotonic
61 %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
62 %45 = atomicrmw xor i16* %44, i16 1 monotonic
63 %46 = bitcast i8* bitcast (i16* @us to i8*) to i16*
64 %47 = atomicrmw xor i16* %46, i16 1 monotonic
65 %48 = bitcast i8* bitcast (i32* @si to i8*) to i32*
66 %49 = atomicrmw xor i32* %48, i32 1 monotonic
67 %50 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
68 %51 = atomicrmw xor i32* %50, i32 1 monotonic
69 %52 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
70 %53 = atomicrmw xor i32* %52, i32 1 monotonic
71 %54 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
72 %55 = atomicrmw xor i32* %54, i32 1 monotonic
73 %56 = atomicrmw and i8* @sc, i8 1 monotonic
74 %57 = atomicrmw and i8* @uc, i8 1 monotonic
75 %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
76 %59 = atomicrmw and i16* %58, i16 1 monotonic
77 %60 = bitcast i8* bitcast (i16* @us to i8*) to i16*
78 %61 = atomicrmw and i16* %60, i16 1 monotonic
79 %62 = bitcast i8* bitcast (i32* @si to i8*) to i32*
80 %63 = atomicrmw and i32* %62, i32 1 monotonic
81 %64 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
82 %65 = atomicrmw and i32* %64, i32 1 monotonic
83 %66 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
84 %67 = atomicrmw and i32* %66, i32 1 monotonic
85 %68 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
86 %69 = atomicrmw and i32* %68, i32 1 monotonic
87 %70 = atomicrmw nand i8* @sc, i8 1 monotonic
88 %71 = atomicrmw nand i8* @uc, i8 1 monotonic
89 %72 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
90 %73 = atomicrmw nand i16* %72, i16 1 monotonic
91 %74 = bitcast i8* bitcast (i16* @us to i8*) to i16*
92 %75 = atomicrmw nand i16* %74, i16 1 monotonic
93 %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
94 %77 = atomicrmw nand i32* %76, i32 1 monotonic
95 %78 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
96 %79 = atomicrmw nand i32* %78, i32 1 monotonic
97 %80 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
98 %81 = atomicrmw nand i32* %80, i32 1 monotonic
99 %82 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
100 %83 = atomicrmw nand i32* %82, i32 1 monotonic
101 br label %return
102
103 return: ; preds = %entry
104 ret void
105105 }
106
107 declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind
108
109 declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind
110
111 declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
112
113 declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind
114
115 declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind
116
117 declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
118
119 declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind
120
121 declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind
122
123 declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
124
125 declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind
126
127 declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind
128
129 declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
130
131 declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind
132
133 declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind
134
135 declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
136
137 declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind
138
139 declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind
140
141 declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind
142106
143107 define void @test_fetch_and_op() nounwind {
144108 entry:
145 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; :0 [#uses=1]
146 store i8 %0, i8* @sc, align 1
147 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; :1 [#uses=1]
148 store i8 %1, i8* @uc, align 1
149 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :2 [#uses=1]
150 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; :3 [#uses=1]
151 store i16 %3, i16* @ss, align 2
152 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :4 [#uses=1]
153 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; :5 [#uses=1]
154 store i16 %5, i16* @us, align 2
155 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :6 [#uses=1]
156 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; :7 [#uses=1]
157 store i32 %7, i32* @si, align 4
158 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :8 [#uses=1]
159 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; :9 [#uses=1]
160 store i32 %9, i32* @ui, align 4
161 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :10 [#uses=1]
162 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %10, i32 11 ) ; :11 [#uses=1]
163 store i32 %11, i32* @sl, align 4
164 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :12 [#uses=1]
165 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %12, i32 11 ) ; :13 [#uses=1]
166 store i32 %13, i32* @ul, align 4
167 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; :14 [#uses=1]
168 store i8 %14, i8* @sc, align 1
169 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; :15 [#uses=1]
170 store i8 %15, i8* @uc, align 1
171 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :16 [#uses=1]
172 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 11 ) ; :17 [#uses=1]
173 store i16 %17, i16* @ss, align 2
174 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :18 [#uses=1]
175 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 11 ) ; :19 [#uses=1]
176 store i16 %19, i16* @us, align 2
177 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :20 [#uses=1]
178 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 11 ) ; :21 [#uses=1]
179 store i32 %21, i32* @si, align 4
180 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :22 [#uses=1]
181 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 11 ) ; :23 [#uses=1]
182 store i32 %23, i32* @ui, align 4
183 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :24 [#uses=1]
184 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %24, i32 11 ) ; :25 [#uses=1]
185 store i32 %25, i32* @sl, align 4
186 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :26 [#uses=1]
187 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %26, i32 11 ) ; :27 [#uses=1]
188 store i32 %27, i32* @ul, align 4
189 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; :28 [#uses=1]
190 store i8 %28, i8* @sc, align 1
191 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; :29 [#uses=1]
192 store i8 %29, i8* @uc, align 1
193 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :30 [#uses=1]
194 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 11 ) ; :31 [#uses=1]
195 store i16 %31, i16* @ss, align 2
196 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :32 [#uses=1]
197 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 11 ) ; :33 [#uses=1]
198 store i16 %33, i16* @us, align 2
199 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :34 [#uses=1]
200 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 11 ) ; :35 [#uses=1]
201 store i32 %35, i32* @si, align 4
202 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :36 [#uses=1]
203 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 11 ) ; :37 [#uses=1]
204 store i32 %37, i32* @ui, align 4
205 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :38 [#uses=1]
206 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %38, i32 11 ) ; :39 [#uses=1]
207 store i32 %39, i32* @sl, align 4
208 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :40 [#uses=1]
209 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %40, i32 11 ) ; :41 [#uses=1]
210 store i32 %41, i32* @ul, align 4
211 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; :42 [#uses=1]
212 store i8 %42, i8* @sc, align 1
213 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; :43 [#uses=1]
214 store i8 %43, i8* @uc, align 1
215 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :44 [#uses=1]
216 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 11 ) ; :45 [#uses=1]
217 store i16 %45, i16* @ss, align 2
218 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :46 [#uses=1]
219 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 11 ) ; :47 [#uses=1]
220 store i16 %47, i16* @us, align 2
221 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :48 [#uses=1]
222 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 11 ) ; :49 [#uses=1]
223 store i32 %49, i32* @si, align 4
224 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :50 [#uses=1]
225 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 11 ) ; :51 [#uses=1]
226 store i32 %51, i32* @ui, align 4
227 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :52 [#uses=1]
228 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %52, i32 11 ) ; :53 [#uses=1]
229 store i32 %53, i32* @sl, align 4
230 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :54 [#uses=1]
231 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %54, i32 11 ) ; :55 [#uses=1]
232 store i32 %55, i32* @ul, align 4
233 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; :56 [#uses=1]
234 store i8 %56, i8* @sc, align 1
235 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; :57 [#uses=1]
236 store i8 %57, i8* @uc, align 1
237 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :58 [#uses=1]
238 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 11 ) ; :59 [#uses=1]
239 store i16 %59, i16* @ss, align 2
240 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :60 [#uses=1]
241 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 11 ) ; :61 [#uses=1]
242 store i16 %61, i16* @us, align 2
243 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :62 [#uses=1]
244 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 11 ) ; :63 [#uses=1]
245 store i32 %63, i32* @si, align 4
246 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :64 [#uses=1]
247 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 11 ) ; :65 [#uses=1]
248 store i32 %65, i32* @ui, align 4
249 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :66 [#uses=1]
250 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %66, i32 11 ) ; :67 [#uses=1]
251 store i32 %67, i32* @sl, align 4
252 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :68 [#uses=1]
253 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %68, i32 11 ) ; :69 [#uses=1]
254 store i32 %69, i32* @ul, align 4
255 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; :70 [#uses=1]
256 store i8 %70, i8* @sc, align 1
257 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; :71 [#uses=1]
258 store i8 %71, i8* @uc, align 1
259 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :72 [#uses=1]
260 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 11 ) ; :73 [#uses=1]
261 store i16 %73, i16* @ss, align 2
262 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :74 [#uses=1]
263 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 11 ) ; :75 [#uses=1]
264 store i16 %75, i16* @us, align 2
265 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :76 [#uses=1]
266 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 11 ) ; :77 [#uses=1]
267 store i32 %77, i32* @si, align 4
268 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :78 [#uses=1]
269 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 11 ) ; :79 [#uses=1]
270 store i32 %79, i32* @ui, align 4
271 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :80 [#uses=1]
272 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %80, i32 11 ) ; :81 [#uses=1]
273 store i32 %81, i32* @sl, align 4
274 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :82 [#uses=1]
275 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %82, i32 11 ) ; :83 [#uses=1]
276 store i32 %83, i32* @ul, align 4
277 br label %return
278
279 return: ; preds = %entry
280 ret void
109 %0 = atomicrmw add i8* @sc, i8 11 monotonic
110 store i8 %0, i8* @sc, align 1
111 %1 = atomicrmw add i8* @uc, i8 11 monotonic
112 store i8 %1, i8* @uc, align 1
113 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
114 %3 = atomicrmw add i16* %2, i16 11 monotonic
115 store i16 %3, i16* @ss, align 2
116 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
117 %5 = atomicrmw add i16* %4, i16 11 monotonic
118 store i16 %5, i16* @us, align 2
119 %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
120 %7 = atomicrmw add i32* %6, i32 11 monotonic
121 store i32 %7, i32* @si, align 4
122 %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
123 %9 = atomicrmw add i32* %8, i32 11 monotonic
124 store i32 %9, i32* @ui, align 4
125 %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
126 %11 = atomicrmw add i32* %10, i32 11 monotonic
127 store i32 %11, i32* @sl, align 4
128 %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
129 %13 = atomicrmw add i32* %12, i32 11 monotonic
130 store i32 %13, i32* @ul, align 4
131 %14 = atomicrmw sub i8* @sc, i8 11 monotonic
132 store i8 %14, i8* @sc, align 1
133 %15 = atomicrmw sub i8* @uc, i8 11 monotonic
134 store i8 %15, i8* @uc, align 1
135 %16 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
136 %17 = atomicrmw sub i16* %16, i16 11 monotonic
137 store i16 %17, i16* @ss, align 2
138 %18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
139 %19 = atomicrmw sub i16* %18, i16 11 monotonic
140 store i16 %19, i16* @us, align 2
141 %20 = bitcast i8* bitcast (i32* @si to i8*) to i32*
142 %21 = atomicrmw sub i32* %20, i32 11 monotonic
143 store i32 %21, i32* @si, align 4
144 %22 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
145 %23 = atomicrmw sub i32* %22, i32 11 monotonic
146 store i32 %23, i32* @ui, align 4
147 %24 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
148 %25 = atomicrmw sub i32* %24, i32 11 monotonic
149 store i32 %25, i32* @sl, align 4
150 %26 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
151 %27 = atomicrmw sub i32* %26, i32 11 monotonic
152 store i32 %27, i32* @ul, align 4
153 %28 = atomicrmw or i8* @sc, i8 11 monotonic
154 store i8 %28, i8* @sc, align 1
155 %29 = atomicrmw or i8* @uc, i8 11 monotonic
156 store i8 %29, i8* @uc, align 1
157 %30 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
158 %31 = atomicrmw or i16* %30, i16 11 monotonic
159 store i16 %31, i16* @ss, align 2
160 %32 = bitcast i8* bitcast (i16* @us to i8*) to i16*
161 %33 = atomicrmw or i16* %32, i16 11 monotonic
162 store i16 %33, i16* @us, align 2
163 %34 = bitcast i8* bitcast (i32* @si to i8*) to i32*
164 %35 = atomicrmw or i32* %34, i32 11 monotonic
165 store i32 %35, i32* @si, align 4
166 %36 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
167 %37 = atomicrmw or i32* %36, i32 11 monotonic
168 store i32 %37, i32* @ui, align 4
169 %38 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
170 %39 = atomicrmw or i32* %38, i32 11 monotonic
171 store i32 %39, i32* @sl, align 4
172 %40 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
173 %41 = atomicrmw or i32* %40, i32 11 monotonic
174 store i32 %41, i32* @ul, align 4
175 %42 = atomicrmw xor i8* @sc, i8 11 monotonic
176 store i8 %42, i8* @sc, align 1
177 %43 = atomicrmw xor i8* @uc, i8 11 monotonic
178 store i8 %43, i8* @uc, align 1
179 %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
180 %45 = atomicrmw xor i16* %44, i16 11 monotonic
181 store i16 %45, i16* @ss, align 2
182 %46 = bitcast i8* bitcast (i16* @us to i8*) to i16*
183 %47 = atomicrmw xor i16* %46, i16 11 monotonic
184 store i16 %47, i16* @us, align 2
185 %48 = bitcast i8* bitcast (i32* @si to i8*) to i32*
186 %49 = atomicrmw xor i32* %48, i32 11 monotonic
187 store i32 %49, i32* @si, align 4
188 %50 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
189 %51 = atomicrmw xor i32* %50, i32 11 monotonic
190 store i32 %51, i32* @ui, align 4
191 %52 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
192 %53 = atomicrmw xor i32* %52, i32 11 monotonic
193 store i32 %53, i32* @sl, align 4
194 %54 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
195 %55 = atomicrmw xor i32* %54, i32 11 monotonic
196 store i32 %55, i32* @ul, align 4
197 %56 = atomicrmw and i8* @sc, i8 11 monotonic
198 store i8 %56, i8* @sc, align 1
199 %57 = atomicrmw and i8* @uc, i8 11 monotonic
200 store i8 %57, i8* @uc, align 1
201 %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
202 %59 = atomicrmw and i16* %58, i16 11 monotonic
203 store i16 %59, i16* @ss, align 2
204 %60 = bitcast i8* bitcast (i16* @us to i8*) to i16*
205 %61 = atomicrmw and i16* %60, i16 11 monotonic
206 store i16 %61, i16* @us, align 2
207 %62 = bitcast i8* bitcast (i32* @si to i8*) to i32*
208 %63 = atomicrmw and i32* %62, i32 11 monotonic
209 store i32 %63, i32* @si, align 4
210 %64 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
211 %65 = atomicrmw and i32* %64, i32 11 monotonic
212 store i32 %65, i32* @ui, align 4
213 %66 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
214 %67 = atomicrmw and i32* %66, i32 11 monotonic
215 store i32 %67, i32* @sl, align 4
216 %68 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
217 %69 = atomicrmw and i32* %68, i32 11 monotonic
218 store i32 %69, i32* @ul, align 4
219 %70 = atomicrmw nand i8* @sc, i8 11 monotonic
220 store i8 %70, i8* @sc, align 1
221 %71 = atomicrmw nand i8* @uc, i8 11 monotonic
222 store i8 %71, i8* @uc, align 1
223 %72 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
224 %73 = atomicrmw nand i16* %72, i16 11 monotonic
225 store i16 %73, i16* @ss, align 2
226 %74 = bitcast i8* bitcast (i16* @us to i8*) to i16*
227 %75 = atomicrmw nand i16* %74, i16 11 monotonic
228 store i16 %75, i16* @us, align 2
229 %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
230 %77 = atomicrmw nand i32* %76, i32 11 monotonic
231 store i32 %77, i32* @si, align 4
232 %78 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
233 %79 = atomicrmw nand i32* %78, i32 11 monotonic
234 store i32 %79, i32* @ui, align 4
235 %80 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
236 %81 = atomicrmw nand i32* %80, i32 11 monotonic
237 store i32 %81, i32* @sl, align 4
238 %82 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
239 %83 = atomicrmw nand i32* %82, i32 11 monotonic
240 store i32 %83, i32* @ul, align 4
241 br label %return
242
243 return: ; preds = %entry
244 ret void
281245 }
282246
283247 define void @test_op_and_fetch() nounwind {
284248 entry:
285 load i8* @uc, align 1 ; :0 [#uses=2]
286 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %0 ) ; :1 [#uses=1]
287 add i8 %1, %0 ; :2 [#uses=1]
288 store i8 %2, i8* @sc, align 1
289 load i8* @uc, align 1 ; :3 [#uses=2]
290 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %3 ) ; :4 [#uses=1]
291 add i8 %4, %3 ; :5 [#uses=1]
292 store i8 %5, i8* @uc, align 1
293 load i8* @uc, align 1 ; :6 [#uses=1]
294 zext i8 %6 to i16 ; :7 [#uses=2]
295 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :8 [#uses=1]
296 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %8, i16 %7 ) ; :9 [#uses=1]
297 add i16 %9, %7 ; :10 [#uses=1]
298 store i16 %10, i16* @ss, align 2
299 load i8* @uc, align 1 ; :11 [#uses=1]
300 zext i8 %11 to i16 ; :12 [#uses=2]
301 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :13 [#uses=1]
302 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %13, i16 %12 ) ; :14 [#uses=1]
303 add i16 %14, %12 ; :15 [#uses=1]
304 store i16 %15, i16* @us, align 2
305 load i8* @uc, align 1 ; :16 [#uses=1]
306 zext i8 %16 to i32 ; :17 [#uses=2]
307 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :18 [#uses=1]
308 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %18, i32 %17 ) ; :19 [#uses=1]
309 add i32 %19, %17 ; :20 [#uses=1]
310 store i32 %20, i32* @si, align 4
311 load i8* @uc, align 1 ; :21 [#uses=1]
312 zext i8 %21 to i32 ; :22 [#uses=2]
313 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :23 [#uses=1]
314 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %23, i32 %22 ) ; :24 [#uses=1]
315 add i32 %24, %22 ; :25 [#uses=1]
316 store i32 %25, i32* @ui, align 4
317 load i8* @uc, align 1 ; :26 [#uses=1]
318 zext i8 %26 to i32 ; :27 [#uses=2]
319 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :28 [#uses=1]
320 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %28, i32 %27 ) ; :29 [#uses=1]
321 add i32 %29, %27 ; :30 [#uses=1]
322 store i32 %30, i32* @sl, align 4
323 load i8* @uc, align 1 ; :31 [#uses=1]
324 zext i8 %31 to i32 ; :32 [#uses=2]
325 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :33 [#uses=1]
326 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %33, i32 %32 ) ; :34 [#uses=1]
327 add i32 %34, %32 ; :35 [#uses=1]
328 store i32 %35, i32* @ul, align 4
329 load i8* @uc, align 1 ; :36 [#uses=2]
330 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %36 ) ; :37 [#uses=1]
331 sub i8 %37, %36 ; :38 [#uses=1]
332 store i8 %38, i8* @sc, align 1
333 load i8* @uc, align 1 ; :39 [#uses=2]
334 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %39 ) ; :40 [#uses=1]
335 sub i8 %40, %39 ; :41 [#uses=1]
336 store i8 %41, i8* @uc, align 1
337 load i8* @uc, align 1 ; :42 [#uses=1]
338 zext i8 %42 to i16 ; :43 [#uses=2]
339 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :44 [#uses=1]
340 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %44, i16 %43 ) ; :45 [#uses=1]
341 sub i16 %45, %43 ; :46 [#uses=1]
342 store i16 %46, i16* @ss, align 2
343 load i8* @uc, align 1 ; :47 [#uses=1]
344 zext i8 %47 to i16 ; :48 [#uses=2]
345 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :49 [#uses=1]
346 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %49, i16 %48 ) ; :50 [#uses=1]
347 sub i16 %50, %48 ; :51 [#uses=1]
348 store i16 %51, i16* @us, align 2
349 load i8* @uc, align 1 ; :52 [#uses=1]
350 zext i8 %52 to i32 ; :53 [#uses=2]
351 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :54 [#uses=1]
352 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %54, i32 %53 ) ; :55 [#uses=1]
353 sub i32 %55, %53 ; :56 [#uses=1]
354 store i32 %56, i32* @si, align 4
355 load i8* @uc, align 1 ; :57 [#uses=1]
356 zext i8 %57 to i32 ; :58 [#uses=2]
357 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :59 [#uses=1]
358 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %59, i32 %58 ) ; :60 [#uses=1]
359 sub i32 %60, %58 ; :61 [#uses=1]
360 store i32 %61, i32* @ui, align 4
361 load i8* @uc, align 1 ; :62 [#uses=1]
362 zext i8 %62 to i32 ; :63 [#uses=2]
363 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :64 [#uses=1]
364 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %64, i32 %63 ) ; :65 [#uses=1]
365 sub i32 %65, %63 ; :66 [#uses=1]
366 store i32 %66, i32* @sl, align 4
367 load i8* @uc, align 1 ; :67 [#uses=1]
368 zext i8 %67 to i32 ; :68 [#uses=2]
369 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :69 [#uses=1]
370 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %69, i32 %68 ) ; :70 [#uses=1]
371 sub i32 %70, %68 ; :71 [#uses=1]
372 store i32 %71, i32* @ul, align 4
373 load i8* @uc, align 1 ; :72 [#uses=2]
374 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %72 ) ; :73 [#uses=1]
375 or i8 %73, %72 ; :74 [#uses=1]
376 store i8 %74, i8* @sc, align 1
377 load i8* @uc, align 1 ; :75 [#uses=2]
378 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %75 ) ; :76 [#uses=1]
379 or i8 %76, %75 ; :77 [#uses=1]
380 store i8 %77, i8* @uc, align 1
381 load i8* @uc, align 1 ; :78 [#uses=1]
382 zext i8 %78 to i16 ; :79 [#uses=2]
383 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :80 [#uses=1]
384 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %80, i16 %79 ) ; :81 [#uses=1]
385 or i16 %81, %79 ; :82 [#uses=1]
386 store i16 %82, i16* @ss, align 2
387 load i8* @uc, align 1 ; :83 [#uses=1]
388 zext i8 %83 to i16 ; :84 [#uses=2]
389 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :85 [#uses=1]
390 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %85, i16 %84 ) ; :86 [#uses=1]
391 or i16 %86, %84 ; :87 [#uses=1]
392 store i16 %87, i16* @us, align 2
393 load i8* @uc, align 1 ; :88 [#uses=1]
394 zext i8 %88 to i32 ; :89 [#uses=2]
395 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :90 [#uses=1]
396 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %90, i32 %89 ) ; :91 [#uses=1]
397 or i32 %91, %89 ; :92 [#uses=1]
398 store i32 %92, i32* @si, align 4
399 load i8* @uc, align 1 ; :93 [#uses=1]
400 zext i8 %93 to i32 ; :94 [#uses=2]
401 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :95 [#uses=1]
402 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %95, i32 %94 ) ; :96 [#uses=1]
403 or i32 %96, %94 ; :97 [#uses=1]
404 store i32 %97, i32* @ui, align 4
405 load i8* @uc, align 1 ; :98 [#uses=1]
406 zext i8 %98 to i32 ; :99 [#uses=2]
407 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :100 [#uses=1]
408 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %100, i32 %99 ) ; :101 [#uses=1]
409 or i32 %101, %99 ; :102 [#uses=1]
410 store i32 %102, i32* @sl, align 4
411 load i8* @uc, align 1 ; :103 [#uses=1]
412 zext i8 %103 to i32 ; :104 [#uses=2]
413 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :105 [#uses=1]
414 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %105, i32 %104 ) ; :106 [#uses=1]
415 or i32 %106, %104 ; :107 [#uses=1]
416 store i32 %107, i32* @ul, align 4
417 load i8* @uc, align 1 ; :108 [#uses=2]
418 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %108 ) ; :109 [#uses=1]
419 xor i8 %109, %108 ; :110 [#uses=1]
420 store i8 %110, i8* @sc, align 1
421 load i8* @uc, align 1 ; :111 [#uses=2]
422 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %111 ) ; :112 [#uses=1]
423 xor i8 %112, %111 ; :113 [#uses=1]
424 store i8 %113, i8* @uc, align 1
425 load i8* @uc, align 1 ; :114 [#uses=1]
426 zext i8 %114 to i16 ; :115 [#uses=2]
427 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :116 [#uses=1]
428 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %116, i16 %115 ) ; :117 [#uses=1]
429 xor i16 %117, %115 ; :118 [#uses=1]
430 store i16 %118, i16* @ss, align 2
431 load i8* @uc, align 1 ; :119 [#uses=1]
432 zext i8 %119 to i16 ; :120 [#uses=2]
433 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :121 [#uses=1]
434 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %121, i16 %120 ) ; :122 [#uses=1]
435 xor i16 %122, %120 ; :123 [#uses=1]
436 store i16 %123, i16* @us, align 2
437 load i8* @uc, align 1 ; :124 [#uses=1]
438 zext i8 %124 to i32 ; :125 [#uses=2]
439 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :126 [#uses=1]
440 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %126, i32 %125 ) ; :127 [#uses=1]
441 xor i32 %127, %125 ; :128 [#uses=1]
442 store i32 %128, i32* @si, align 4
443 load i8* @uc, align 1 ; :129 [#uses=1]
444 zext i8 %129 to i32 ; :130 [#uses=2]
445 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :131 [#uses=1]
446 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %131, i32 %130 ) ; :132 [#uses=1]
447 xor i32 %132, %130 ; :133 [#uses=1]
448 store i32 %133, i32* @ui, align 4
449 load i8* @uc, align 1 ; :134 [#uses=1]
450 zext i8 %134 to i32 ; :135 [#uses=2]
451 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :136 [#uses=1]
452 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %136, i32 %135 ) ; :137 [#uses=1]
453 xor i32 %137, %135 ; :138 [#uses=1]
454 store i32 %138, i32* @sl, align 4
455 load i8* @uc, align 1 ; :139 [#uses=1]
456 zext i8 %139 to i32 ; :140 [#uses=2]
457 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :141 [#uses=1]
458 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %141, i32 %140 ) ; :142 [#uses=1]
459 xor i32 %142, %140 ; :143 [#uses=1]
460 store i32 %143, i32* @ul, align 4
461 load i8* @uc, align 1 ; :144 [#uses=2]
462 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %144 ) ; :145 [#uses=1]
463 and i8 %145, %144 ; :146 [#uses=1]
464 store i8 %146, i8* @sc, align 1
465 load i8* @uc, align 1 ; :147 [#uses=2]
466 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %147 ) ; :148 [#uses=1]
467 and i8 %148, %147 ; :149 [#uses=1]
468 store i8 %149, i8* @uc, align 1
469 load i8* @uc, align 1 ; :150 [#uses=1]
470 zext i8 %150 to i16 ; :151 [#uses=2]
471 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :152 [#uses=1]
472 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %152, i16 %151 ) ; :153 [#uses=1]
473 and i16 %153, %151 ; :154 [#uses=1]
474 store i16 %154, i16* @ss, align 2
475 load i8* @uc, align 1 ; :155 [#uses=1]
476 zext i8 %155 to i16 ; :156 [#uses=2]
477 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :157 [#uses=1]
478 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %157, i16 %156 ) ; :158 [#uses=1]
479 and i16 %158, %156 ; :159 [#uses=1]
480 store i16 %159, i16* @us, align 2
481 load i8* @uc, align 1 ; :160 [#uses=1]
482 zext i8 %160 to i32 ; :161 [#uses=2]
483 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :162 [#uses=1]
484 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %162, i32 %161 ) ; :163 [#uses=1]
485 and i32 %163, %161 ; :164 [#uses=1]
486 store i32 %164, i32* @si, align 4
487 load i8* @uc, align 1 ; :165 [#uses=1]
488 zext i8 %165 to i32 ; :166 [#uses=2]
489 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :167 [#uses=1]
490 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %167, i32 %166 ) ; :168 [#uses=1]
491 and i32 %168, %166 ; :169 [#uses=1]
492 store i32 %169, i32* @ui, align 4
493 load i8* @uc, align 1 ; :170 [#uses=1]
494 zext i8 %170 to i32 ; :171 [#uses=2]
495 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :172 [#uses=1]
496 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %172, i32 %171 ) ; :173 [#uses=1]
497 and i32 %173, %171 ; :174 [#uses=1]
498 store i32 %174, i32* @sl, align 4
499 load i8* @uc, align 1 ; :175 [#uses=1]
500 zext i8 %175 to i32 ; :176 [#uses=2]
501 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :177 [#uses=1]
502 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %177, i32 %176 ) ; :178 [#uses=1]
503 and i32 %178, %176 ; :179 [#uses=1]
504 store i32 %179, i32* @ul, align 4
505 load i8* @uc, align 1 ; :180 [#uses=2]
506 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %180 ) ; :181 [#uses=1]
507 xor i8 %181, -1 ; :182 [#uses=1]
508 and i8 %182, %180 ; :183 [#uses=1]
509 store i8 %183, i8* @sc, align 1
510 load i8* @uc, align 1 ; :184 [#uses=2]
511 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %184 ) ; :185 [#uses=1]
512 xor i8 %185, -1 ; :186 [#uses=1]
513 and i8 %186, %184 ; :187 [#uses=1]
514 store i8 %187, i8* @uc, align 1
515 load i8* @uc, align 1 ; :188 [#uses=1]
516 zext i8 %188 to i16 ; :189 [#uses=2]
517 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :190 [#uses=1]
518 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %190, i16 %189 ) ; :191 [#uses=1]
519 xor i16 %191, -1 ; :192 [#uses=1]
520 and i16 %192, %189 ; :193 [#uses=1]
521 store i16 %193, i16* @ss, align 2
522 load i8* @uc, align 1 ; :194 [#uses=1]
523 zext i8 %194 to i16 ; :195 [#uses=2]
524 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :196 [#uses=1]
525 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %196, i16 %195 ) ; :197 [#uses=1]
526 xor i16 %197, -1 ; :198 [#uses=1]
527 and i16 %198, %195 ; :199 [#uses=1]
528 store i16 %199, i16* @us, align 2
529 load i8* @uc, align 1 ; :200 [#uses=1]
530 zext i8 %200 to i32 ; :201 [#uses=2]
531 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :202 [#uses=1]
532 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %202, i32 %201 ) ; :203 [#uses=1]
533 xor i32 %203, -1 ; :204 [#uses=1]
534 and i32 %204, %201 ; :205 [#uses=1]
535 store i32 %205, i32* @si, align 4
536 load i8* @uc, align 1 ; :206 [#uses=1]
537 zext i8 %206 to i32 ; :207 [#uses=2]
538 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :208 [#uses=1]
539 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %208, i32 %207 ) ; :209 [#uses=1]
540 xor i32 %209, -1 ; :210 [#uses=1]
541 and i32 %210, %207 ; :211 [#uses=1]
542 store i32 %211, i32* @ui, align 4
543 load i8* @uc, align 1 ; :212 [#uses=1]
544 zext i8 %212 to i32 ; :213 [#uses=2]
545 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :214 [#uses=1]
546 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %214, i32 %213 ) ; :215 [#uses=1]
547 xor i32 %215, -1 ; :216 [#uses=1]
548 and i32 %216, %213 ; :217 [#uses=1]
549 store i32 %217, i32* @sl, align 4
550 load i8* @uc, align 1 ; :218 [#uses=1]
551 zext i8 %218 to i32 ; :219 [#uses=2]
552 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :220 [#uses=1]
553 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %220, i32 %219 ) ; :221 [#uses=1]
554 xor i32 %221, -1 ; :222 [#uses=1]
555 and i32 %222, %219 ; :223 [#uses=1]
556 store i32 %223, i32* @ul, align 4
557 br label %return
558
559 return: ; preds = %entry
560 ret void
249 %0 = load i8* @uc, align 1
250 %1 = atomicrmw add i8* @sc, i8 %0 monotonic
251 %2 = add i8 %1, %0
252 store i8 %2, i8* @sc, align 1
253 %3 = load i8* @uc, align 1
254 %4 = atomicrmw add i8* @uc, i8 %3 monotonic
255 %5 = add i8 %4, %3
256 store i8 %5, i8* @uc, align 1
257 %6 = load i8* @uc, align 1
258 %7 = zext i8 %6 to i16
259 %8 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
260 %9 = atomicrmw add i16* %8, i16 %7 monotonic
261 %10 = add i16 %9, %7
262 store i16 %10, i16* @ss, align 2
263 %11 = load i8* @uc, align 1
264 %12 = zext i8 %11 to i16
265 %13 = bitcast i8* bitcast (i16* @us to i8*) to i16*
266 %14 = atomicrmw add i16* %13, i16 %12 monotonic
267 %15 = add i16 %14, %12
268 store i16 %15, i16* @us, align 2
269 %16 = load i8* @uc, align 1
270 %17 = zext i8 %16 to i32
271 %18 = bitcast i8* bitcast (i32* @si to i8*) to i32*
272 %19 = atomicrmw add i32* %18, i32 %17 monotonic
273 %20 = add i32 %19, %17
274 store i32 %20, i32* @si, align 4
275 %21 = load i8* @uc, align 1
276 %22 = zext i8 %21 to i32
277 %23 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
278 %24 = atomicrmw add i32* %23, i32 %22 monotonic
279 %25 = add i32 %24, %22
280 store i32 %25, i32* @ui, align 4
281 %26 = load i8* @uc, align 1
282 %27 = zext i8 %26 to i32
283 %28 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
284 %29 = atomicrmw add i32* %28, i32 %27 monotonic
285 %30 = add i32 %29, %27
286 store i32 %30, i32* @sl, align 4
287 %31 = load i8* @uc, align 1
288 %32 = zext i8 %31 to i32
289 %33 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
290 %34 = atomicrmw add i32* %33, i32 %32 monotonic
291 %35 = add i32 %34, %32
292 store i32 %35, i32* @ul, align 4
293 %36 = load i8* @uc, align 1
294 %37 = atomicrmw sub i8* @sc, i8 %36 monotonic
295 %38 = sub i8 %37, %36
296 store i8 %38, i8* @sc, align 1
297 %39 = load i8* @uc, align 1
298 %40 = atomicrmw sub i8* @uc, i8 %39 monotonic
299 %41 = sub i8 %40, %39
300 store i8 %41, i8* @uc, align 1
301 %42 = load i8* @uc, align 1
302 %43 = zext i8 %42 to i16
303 %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
304 %45 = atomicrmw sub i16* %44, i16 %43 monotonic
305 %46 = sub i16 %45, %43
306 store i16 %46, i16* @ss, align 2
307 %47 = load i8* @uc, align 1
308 %48 = zext i8 %47 to i16
309 %49 = bitcast i8* bitcast (i16* @us to i8*) to i16*
310 %50 = atomicrmw sub i16* %49, i16 %48 monotonic
311 %51 = sub i16 %50, %48
312 store i16 %51, i16* @us, align 2
313 %52 = load i8* @uc, align 1
314 %53 = zext i8 %52 to i32
315 %54 = bitcast i8* bitcast (i32* @si to i8*) to i32*
316 %55 = atomicrmw sub i32* %54, i32 %53 monotonic
317 %56 = sub i32 %55, %53
318 store i32 %56, i32* @si, align 4
319 %57 = load i8* @uc, align 1
320 %58 = zext i8 %57 to i32
321 %59 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
322 %60 = atomicrmw sub i32* %59, i32 %58 monotonic
323 %61 = sub i32 %60, %58
324 store i32 %61, i32* @ui, align 4
325 %62 = load i8* @uc, align 1
326 %63 = zext i8 %62 to i32
327 %64 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
328 %65 = atomicrmw sub i32* %64, i32 %63 monotonic
329 %66 = sub i32 %65, %63
330 store i32 %66, i32* @sl, align 4
331 %67 = load i8* @uc, align 1
332 %68 = zext i8 %67 to i32
333 %69 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
334 %70 = atomicrmw sub i32* %69, i32 %68 monotonic
335 %71 = sub i32 %70, %68
336 store i32 %71, i32* @ul, align 4
337 %72 = load i8* @uc, align 1
338 %73 = atomicrmw or i8* @sc, i8 %72 monotonic
339 %74 = or i8 %73, %72
340 store i8 %74, i8* @sc, align 1
341 %75 = load i8* @uc, align 1
342 %76 = atomicrmw or i8* @uc, i8 %75 monotonic
343 %77 = or i8 %76, %75
344 store i8 %77, i8* @uc, align 1
345 %78 = load i8* @uc, align 1
346 %79 = zext i8 %78 to i16
347 %80 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
348 %81 = atomicrmw or i16* %80, i16 %79 monotonic
349 %82 = or i16 %81, %79
350 store i16 %82, i16* @ss, align 2
351 %83 = load i8* @uc, align 1
352 %84 = zext i8 %83 to i16
353 %85 = bitcast i8* bitcast (i16* @us to i8*) to i16*
354 %86 = atomicrmw or i16* %85, i16 %84 monotonic
355 %87 = or i16 %86, %84
356 store i16 %87, i16* @us, align 2
357 %88 = load i8* @uc, align 1
358 %89 = zext i8 %88 to i32
359 %90 = bitcast i8* bitcast (i32* @si to i8*) to i32*
360 %91 = atomicrmw or i32* %90, i32 %89 monotonic
361 %92 = or i32 %91, %89
362 store i32 %92, i32* @si, align 4
363 %93 = load i8* @uc, align 1
364 %94 = zext i8 %93 to i32
365 %95 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
366 %96 = atomicrmw or i32* %95, i32 %94 monotonic
367 %97 = or i32 %96, %94
368 store i32 %97, i32* @ui, align 4
369 %98 = load i8* @uc, align 1
370 %99 = zext i8 %98 to i32
371 %100 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
372 %101 = atomicrmw or i32* %100, i32 %99 monotonic
373 %102 = or i32 %101, %99
374 store i32 %102, i32* @sl, align 4
375 %103 = load i8* @uc, align 1
376 %104 = zext i8 %103 to i32
377 %105 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
378 %106 = atomicrmw or i32* %105, i32 %104 monotonic
379 %107 = or i32 %106, %104
380 store i32 %107, i32* @ul, align 4
381 %108 = load i8* @uc, align 1
382 %109 = atomicrmw xor i8* @sc, i8 %108 monotonic
383 %110 = xor i8 %109, %108
384 store i8 %110, i8* @sc, align 1
385 %111 = load i8* @uc, align 1
386 %112 = atomicrmw xor i8* @uc, i8 %111 monotonic
387 %113 = xor i8 %112, %111
388 store i8 %113, i8* @uc, align 1
389 %114 = load i8* @uc, align 1
390 %115 = zext i8 %114 to i16
391 %116 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
392 %117 = atomicrmw xor i16* %116, i16 %115 monotonic
393 %118 = xor i16 %117, %115
394 store i16 %118, i16* @ss, align 2
395 %119 = load i8* @uc, align 1
396 %120 = zext i8 %119 to i16
397 %121 = bitcast i8* bitcast (i16* @us to i8*) to i16*
398 %122 = atomicrmw xor i16* %121, i16 %120 monotonic
399 %123 = xor i16 %122, %120
400 store i16 %123, i16* @us, align 2
401 %124 = load i8* @uc, align 1
402 %125 = zext i8 %124 to i32
403 %126 = bitcast i8* bitcast (i32* @si to i8*) to i32*
404 %127 = atomicrmw xor i32* %126, i32 %125 monotonic
405 %128 = xor i32 %127, %125
406 store i32 %128, i32* @si, align 4
407 %129 = load i8* @uc, align 1
408 %130 = zext i8 %129 to i32
409 %131 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
410 %132 = atomicrmw xor i32* %131, i32 %130 monotonic
411 %133 = xor i32 %132, %130
412 store i32 %133, i32* @ui, align 4
413 %134 = load i8* @uc, align 1
414 %135 = zext i8 %134 to i32
415 %136 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
416 %137 = atomicrmw xor i32* %136, i32 %135 monotonic
417 %138 = xor i32 %137, %135
418 store i32 %138, i32* @sl, align 4
419 %139 = load i8* @uc, align 1
420 %140 = zext i8 %139 to i32
421 %141 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
422 %142 = atomicrmw xor i32* %141, i32 %140 monotonic
423 %143 = xor i32 %142, %140
424 store i32 %143, i32* @ul, align 4
425 %144 = load i8* @uc, align 1
426 %145 = atomicrmw and i8* @sc, i8 %144 monotonic
427 %146 = and i8 %145, %144
428 store i8 %146, i8* @sc, align 1
429 %147 = load i8* @uc, align 1
430 %148 = atomicrmw and i8* @uc, i8 %147 monotonic
431 %149 = and i8 %148, %147
432 store i8 %149, i8* @uc, align 1
433 %150 = load i8* @uc, align 1
434 %151 = zext i8 %150 to i16
435 %152 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
436 %153 = atomicrmw and i16* %152, i16 %151 monotonic
437 %154 = and i16 %153, %151
438 store i16 %154, i16* @ss, align 2
439 %155 = load i8* @uc, align 1
440 %156 = zext i8 %155 to i16
441 %157 = bitcast i8* bitcast (i16* @us to i8*) to i16*
442 %158 = atomicrmw and i16* %157, i16 %156 monotonic
443 %159 = and i16 %158, %156
444 store i16 %159, i16* @us, align 2
445 %160 = load i8* @uc, align 1
446 %161 = zext i8 %160 to i32
447 %162 = bitcast i8* bitcast (i32* @si to i8*) to i32*
448 %163 = atomicrmw and i32* %162, i32 %161 monotonic
449 %164 = and i32 %163, %161
450 store i32 %164, i32* @si, align 4
451 %165 = load i8* @uc, align 1
452 %166 = zext i8 %165 to i32
453 %167 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
454 %168 = atomicrmw and i32* %167, i32 %166 monotonic
455 %169 = and i32 %168, %166
456 store i32 %169, i32* @ui, align 4
457 %170 = load i8* @uc, align 1
458 %171 = zext i8 %170 to i32
459 %172 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
460 %173 = atomicrmw and i32* %172, i32 %171 monotonic
461 %174 = and i32 %173, %171
462 store i32 %174, i32* @sl, align 4
463 %175 = load i8* @uc, align 1
464 %176 = zext i8 %175 to i32
465 %177 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
466 %178 = atomicrmw and i32* %177, i32 %176 monotonic
467 %179 = and i32 %178, %176
468 store i32 %179, i32* @ul, align 4
469 %180 = load i8* @uc, align 1
470 %181 = atomicrmw nand i8* @sc, i8 %180 monotonic
471 %182 = xor i8 %181, -1
472 %183 = and i8 %182, %180
473 store i8 %183, i8* @sc, align 1
474 %184 = load i8* @uc, align 1
475 %185 = atomicrmw nand i8* @uc, i8 %184 monotonic
476 %186 = xor i8 %185, -1
477 %187 = and i8 %186, %184
478 store i8 %187, i8* @uc, align 1
479 %188 = load i8* @uc, align 1
480 %189 = zext i8 %188 to i16
481 %190 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
482 %191 = atomicrmw nand i16* %190, i16 %189 monotonic
483 %192 = xor i16 %191, -1
484 %193 = and i16 %192, %189
485 store i16 %193, i16* @ss, align 2
486 %194 = load i8* @uc, align 1
487 %195 = zext i8 %194 to i16
488 %196 = bitcast i8* bitcast (i16* @us to i8*) to i16*
489 %197 = atomicrmw nand i16* %196, i16 %195 monotonic
490 %198 = xor i16 %197, -1
491 %199 = and i16 %198, %195
492 store i16 %199, i16* @us, align 2
493 %200 = load i8* @uc, align 1
494 %201 = zext i8 %200 to i32
495 %202 = bitcast i8* bitcast (i32* @si to i8*) to i32*
496 %203 = atomicrmw nand i32* %202, i32 %201 monotonic
497 %204 = xor i32 %203, -1
498 %205 = and i32 %204, %201
499 store i32 %205, i32* @si, align 4
500 %206 = load i8* @uc, align 1
501 %207 = zext i8 %206 to i32
502 %208 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
503 %209 = atomicrmw nand i32* %208, i32 %207 monotonic
504 %210 = xor i32 %209, -1
505 %211 = and i32 %210, %207
506 store i32 %211, i32* @ui, align 4
507 %212 = load i8* @uc, align 1
508 %213 = zext i8 %212 to i32
509 %214 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
510 %215 = atomicrmw nand i32* %214, i32 %213 monotonic
511 %216 = xor i32 %215, -1
512 %217 = and i32 %216, %213
513 store i32 %217, i32* @sl, align 4
514 %218 = load i8* @uc, align 1
515 %219 = zext i8 %218 to i32
516 %220 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
517 %221 = atomicrmw nand i32* %220, i32 %219 monotonic
518 %222 = xor i32 %221, -1
519 %223 = and i32 %222, %219
520 store i32 %223, i32* @ul, align 4
521 br label %return
522
523 return: ; preds = %entry
524 ret void
561525 }
562526
563527 define void @test_compare_and_swap() nounwind {
564528 entry:
565 load i8* @uc, align 1 ; :0 [#uses=1]
566 load i8* @sc, align 1 ; :1 [#uses=1]
567 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %0, i8 %1 ) ; :2 [#uses=1]
568 store i8 %2, i8* @sc, align 1
569 load i8* @uc, align 1 ; :3 [#uses=1]
570 load i8* @sc, align 1 ; :4 [#uses=1]
571 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %3, i8 %4 ) ; :5 [#uses=1]
572 store i8 %5, i8* @uc, align 1
573 load i8* @uc, align 1 ; :6 [#uses=1]
574 zext i8 %6 to i16 ; :7 [#uses=1]
575 load i8* @sc, align 1 ; :8 [#uses=1]
576 sext i8 %8 to i16 ; :9 [#uses=1]
577 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :10 [#uses=1]
578 call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %10, i16 %7, i16 %9 ) ; :11 [#uses=1]
579 store i16 %11, i16* @ss, align 2
580 load i8* @uc, align 1 ; :12 [#uses=1]
581 zext i8 %12 to i16 ; :13 [#uses=1]
582 load i8* @sc, align 1 ; :14 [#uses=1]
583 sext i8 %14 to i16 ; :15 [#uses=1]
584 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :16 [#uses=1]
585 call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %16, i16 %13, i16 %15 ) ; :17 [#uses=1]
586 store i16 %17, i16* @us, align 2
587 load i8* @uc, align 1 ; :18 [#uses=1]
588 zext i8 %18 to i32 ; :19 [#uses=1]
589 load i8* @sc, align 1 ; :20 [#uses=1]
590 sext i8 %20 to i32 ; :21 [#uses=1]
591 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :22 [#uses=1]
592 call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %22, i32 %19, i32 %21 ) ; :23 [#uses=1]
593 store i32 %23, i32* @si, align 4
594 load i8* @uc, align 1 ; :24 [#uses=1]
595 zext i8 %24 to i32 ; :25 [#uses=1]
596 load i8* @sc, align 1 ; :26 [#uses=1]
597 sext i8 %26 to i32 ; :27 [#uses=1]
598 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :28 [#uses=1]
599 call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %28, i32 %25, i32 %27 ) ; :29 [#uses=1]
600 store i32 %29, i32* @ui, align 4
601 load i8* @uc, align 1 ; :30 [#uses=1]
602 zext i8 %30 to i32 ; :31 [#uses=1]
603 load i8* @sc, align 1 ; :32 [#uses=1]
604 sext i8 %32 to i32 ; :33 [#uses=1]
605 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :34 [#uses=1]
606 call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %34, i32 %31, i32 %33 ) ; :35 [#uses=1]
607 store i32 %35, i32* @sl, align 4
608 load i8* @uc, align 1 ; :36 [#uses=1]
609 zext i8 %36 to i32 ; :37 [#uses=1]
610 load i8* @sc, align 1 ; :38 [#uses=1]
611 sext i8 %38 to i32 ; :39 [#uses=1]
612 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :40 [#uses=1]
613 call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %40, i32 %37, i32 %39 ) ; :41 [#uses=1]
614 store i32 %41, i32* @ul, align 4
615 load i8* @uc, align 1 ; :42 [#uses=2]
616 load i8* @sc, align 1 ; :43 [#uses=1]
617 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %42, i8 %43 ) ; :44 [#uses=1]
618 icmp eq i8 %44, %42 ; :45 [#uses=1]
619 zext i1 %45 to i32 ; :46 [#uses=1]
620 store i32 %46, i32* @ui, align 4
621 load i8* @uc, align 1 ; :47 [#uses=2]
622 load i8* @sc, align 1 ; :48 [#uses=1]
623 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %47, i8 %48 ) ; :49 [#uses=1]
624 icmp eq i8 %49, %47 ; :50 [#uses=1]
625 zext i1 %50 to i32 ; :51 [#uses=1]
626 store i32 %51, i32* @ui, align 4
627 load i8* @uc, align 1 ; :52 [#uses=1]
628 zext i8 %52 to i16 ; :53 [#uses=2]
629 load i8* @sc, align 1 ; :54 [#uses=1]
630 sext i8 %54 to i16 ; :55 [#uses=1]
631 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :56 [#uses=1]
632 call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %56, i16 %53, i16 %55 ) ; :57 [#uses=1]
633 icmp eq i16 %57, %53 ; :58 [#uses=1]
634 zext i1 %58 to i32 ; :59 [#uses=1]
635 store i32 %59, i32* @ui, align 4
636 load i8* @uc, align 1 ; :60 [#uses=1]
637 zext i8 %60 to i16 ; :61 [#uses=2]
638 load i8* @sc, align 1 ; :62 [#uses=1]
639 sext i8 %62 to i16 ; :63 [#uses=1]
640 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :64 [#uses=1]
641 call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %64, i16 %61, i16 %63 ) ; :65 [#uses=1]
642 icmp eq i16 %65, %61 ; :66 [#uses=1]
643 zext i1 %66 to i32 ; :67 [#uses=1]
644 store i32 %67, i32* @ui, align 4
645 load i8* @uc, align 1 ; :68 [#uses=1]
646 zext i8 %68 to i32 ; :69 [#uses=2]
647 load i8* @sc, align 1 ; :70 [#uses=1]
648 sext i8 %70 to i32 ; :71 [#uses=1]
649 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :72 [#uses=1]
650 call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %72, i32 %69, i32 %71 ) ; :73 [#uses=1]
651 icmp eq i32 %73, %69 ; :74 [#uses=1]
652 zext i1 %74 to i32 ; :75 [#uses=1]
653 store i32 %75, i32* @ui, align 4
654 load i8* @uc, align 1 ; :76 [#uses=1]
655 zext i8 %76 to i32 ; :77 [#uses=2]
656 load i8* @sc, align 1 ; :78 [#uses=1]
657 sext i8 %78 to i32 ; :79 [#uses=1]
658 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :80 [#uses=1]
659 call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %80, i32 %77, i32 %79 ) ; :81 [#uses=1]
660 icmp eq i32 %81, %77 ; :82 [#uses=1]
661 zext i1 %82 to i32 ; :83 [#uses=1]
662 store i32 %83, i32* @ui, align 4
663 load i8* @uc, align 1 ; :84 [#uses=1]
664 zext i8 %84 to i32 ; :85 [#uses=2]
665 load i8* @sc, align 1 ; :86 [#uses=1]
666 sext i8 %86 to i32 ; :87 [#uses=1]
667 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :88 [#uses=1]
668 call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %88, i32 %85, i32 %87 ) ; :89 [#uses=1]
669 icmp eq i32 %89, %85 ; :90 [#uses=1]
670 zext i1 %90 to i32 ; :91 [#uses=1]
671 store i32 %91, i32* @ui, align 4
672 load i8* @uc, align 1 ; :92 [#uses=1]
673 zext i8 %92 to i32 ; :93 [#uses=2]
674 load i8* @sc, align 1 ; :94 [#uses=1]
675 sext i8 %94 to i32 ; :95 [#uses=1]
676 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :96 [#uses=1]
677 call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %96, i32 %93, i32 %95 ) ; :97 [#uses=1]
678 icmp eq i32 %97, %93 ; :98 [#uses=1]
679 zext i1 %98 to i32 ; :99 [#uses=1]
680 store i32 %99, i32* @ui, align 4
681 br label %return
682
683 return: ; preds = %entry
684 ret void
529 %0 = load i8* @uc, align 1
530 %1 = load i8* @sc, align 1
531 %2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic
532 store i8 %2, i8* @sc, align 1
533 %3 = load i8* @uc, align 1
534 %4 = load i8* @sc, align 1
535 %5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic
536 store i8 %5, i8* @uc, align 1
537 %6 = load i8* @uc, align 1
538 %7 = zext i8 %6 to i16
539 %8 = load i8* @sc, align 1
540 %9 = sext i8 %8 to i16
541 %10 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
542 %11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic
543 store i16 %11, i16* @ss, align 2
544 %12 = load i8* @uc, align 1
545 %13 = zext i8 %12 to i16
546 %14 = load i8* @sc, align 1
547 %15 = sext i8 %14 to i16
548 %16 = bitcast i8* bitcast (i16* @us to i8*) to i16*
549 %17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic
550 store i16 %17, i16* @us, align 2
551 %18 = load i8* @uc, align 1
552 %19 = zext i8 %18 to i32
553 %20 = load i8* @sc, align 1
554 %21 = sext i8 %20 to i32
555 %22 = bitcast i8* bitcast (i32* @si to i8*) to i32*
556 %23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic
557 store i32 %23, i32* @si, align 4
558 %24 = load i8* @uc, align 1
559 %25 = zext i8 %24 to i32
560 %26 = load i8* @sc, align 1
561 %27 = sext i8 %26 to i32
562 %28 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
563 %29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic
564 store i32 %29, i32* @ui, align 4
565 %30 = load i8* @uc, align 1
566 %31 = zext i8 %30 to i32
567 %32 = load i8* @sc, align 1
568 %33 = sext i8 %32 to i32
569 %34 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
570 %35 = cmpxchg i32* %34, i32 %31, i32 %33 monotonic
571 store i32 %35, i32* @sl, align 4
572 %36 = load i8* @uc, align 1
573 %37 = zext i8 %36 to i32
574 %38 = load i8* @sc, align 1
575 %39 = sext i8 %38 to i32
576 %40 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
577 %41 = cmpxchg i32* %40, i32 %37, i32 %39 monotonic
578 store i32 %41, i32* @ul, align 4
579 %42 = load i8* @uc, align 1
580 %43 = load i8* @sc, align 1
581 %44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic
582 %45 = icmp eq i8 %44, %42
583 %46 = zext i1 %45 to i32
584 store i32 %46, i32* @ui, align 4
585 %47 = load i8* @uc, align 1
586 %48 = load i8* @sc, align 1
587 %49 = cmpxchg i8* @uc, i8 %47, i8 %48 monotonic
588 %50 = icmp eq i8 %49, %47
589 %51 = zext i1 %50 to i32
590 store i32 %51, i32* @ui, align 4
591 %52 = load i8* @uc, align 1
592 %53 = zext i8 %52 to i16
593 %54 = load i8* @sc, align 1
594 %55 = sext i8 %54 to i16
595 %56 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
596 %57 = cmpxchg i16* %56, i16 %53, i16 %55 monotonic
597 %58 = icmp eq i16 %57, %53
598 %59 = zext i1 %58 to i32
599 store i32 %59, i32* @ui, align 4
600 %60 = load i8* @uc, align 1
601 %61 = zext i8 %60 to i16
602 %62 = load i8* @sc, align 1
603 %63 = sext i8 %62 to i16
604 %64 = bitcast i8* bitcast (i16* @us to i8*) to i16*
605 %65 = cmpxchg i16* %64, i16 %61, i16 %63 monotonic
606 %66 = icmp eq i16 %65, %61
607 %67 = zext i1 %66 to i32
608 store i32 %67, i32* @ui, align 4
609 %68 = load i8* @uc, align 1
610 %69 = zext i8 %68 to i32
611 %70 = load i8* @sc, align 1
612 %71 = sext i8 %70 to i32
613 %72 = bitcast i8* bitcast (i32* @si to i8*) to i32*
614 %73 = cmpxchg i32* %72, i32 %69, i32 %71 monotonic
615 %74 = icmp eq i32 %73, %69
616 %75 = zext i1 %74 to i32
617 store i32 %75, i32* @ui, align 4
618 %76 = load i8* @uc, align 1
619 %77 = zext i8 %76 to i32
620 %78 = load i8* @sc, align 1
621 %79 = sext i8 %78 to i32
622 %80 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
623 %81 = cmpxchg i32* %80, i32 %77, i32 %79 monotonic
624 %82 = icmp eq i32 %81, %77
625 %83 = zext i1 %82 to i32
626 store i32 %83, i32* @ui, align 4
627 %84 = load i8* @uc, align 1
628 %85 = zext i8 %84 to i32
629 %86 = load i8* @sc, align 1
630 %87 = sext i8 %86 to i32
631 %88 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
632 %89 = cmpxchg i32* %88, i32 %85, i32 %87 monotonic
633 %90 = icmp eq i32 %89, %85
634 %91 = zext i1 %90 to i32
635 store i32 %91, i32* @ui, align 4
636 %92 = load i8* @uc, align 1
637 %93 = zext i8 %92 to i32
638 %94 = load i8* @sc, align 1
639 %95 = sext i8 %94 to i32
640 %96 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
641 %97 = cmpxchg i32* %96, i32 %93, i32 %95 monotonic
642 %98 = icmp eq i32 %97, %93
643 %99 = zext i1 %98 to i32
644 store i32 %99, i32* @ui, align 4
645 br label %return
646
647 return: ; preds = %entry
648 ret void
685649 }
686
687 declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind
688
689 declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind
690
691 declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
692650
693651 define void @test_lock() nounwind {
694652 entry:
695 call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; :0 [#uses=1]
696 store i8 %0, i8* @sc, align 1
697 call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; :1 [#uses=1]
698 store i8 %1, i8* @uc, align 1
699 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :2 [#uses=1]
700 call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; :3 [#uses=1]
701 store i16 %3, i16* @ss, align 2
702 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :4 [#uses=1]
703 call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; :5 [#uses=1]
704 store i16 %5, i16* @us, align 2
705 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :6 [#uses=1]
706 call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; :7 [#uses=1]
707 store i32 %7, i32* @si, align 4
708 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :8 [#uses=1]
709 call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; :9 [#uses=1]
710 store i32 %9, i32* @ui, align 4
711 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :10 [#uses=1]
712 call i32 @llvm.atomic.swap.i32.p0i32( i32* %10, i32 1 ) ; :11 [#uses=1]
713 store i32 %11, i32* @sl, align 4
714 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :12 [#uses=1]
715 call i32 @llvm.atomic.swap.i32.p0i32( i32* %12, i32 1 ) ; :13 [#uses=1]
716 store i32 %13, i32* @ul, align 4
717 call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false )
718 volatile store i8 0, i8* @sc, align 1
719 volatile store i8 0, i8* @uc, align 1
720 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :14 [#uses=1]
721 volatile store i16 0, i16* %14, align 2
722 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :15 [#uses=1]
723 volatile store i16 0, i16* %15, align 2
724 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :16 [#uses=1]
725 volatile store i32 0, i32* %16, align 4
726 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :17 [#uses=1]
727 volatile store i32 0, i32* %17, align 4
728 bitcast i8* bitcast (i32* @sl to i8*) to i32* ; :18 [#uses=1]
729 volatile store i32 0, i32* %18, align 4
730 bitcast i8* bitcast (i32* @ul to i8*) to i32* ; :19 [#uses=1]
731 volatile store i32 0, i32* %19, align 4
732 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :20 [#uses=1]
733 volatile store i64 0, i64* %20, align 8
734 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :21 [#uses=1]
735 volatile store i64 0, i64* %21, align 8
736 br label %return
737
738 return: ; preds = %entry
739 ret void
653 %0 = atomicrmw xchg i8* @sc, i8 1 monotonic
654 store i8 %0, i8* @sc, align 1
655 %1 = atomicrmw xchg i8* @uc, i8 1 monotonic
656 store i8 %1, i8* @uc, align 1
657 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
658 %3 = atomicrmw xchg i16* %2, i16 1 monotonic
659 store i16 %3, i16* @ss, align 2
660 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
661 %5 = atomicrmw xchg i16* %4, i16 1 monotonic
662 store i16 %5, i16* @us, align 2
663 %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
664 %7 = atomicrmw xchg i32* %6, i32 1 monotonic
665 store i32 %7, i32* @si, align 4
666 %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
667 %9 = atomicrmw xchg i32* %8, i32 1 monotonic
668 store i32 %9, i32* @ui, align 4
669 %10 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
670 %11 = atomicrmw xchg i32* %10, i32 1 monotonic
671 store i32 %11, i32* @sl, align 4
672 %12 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
673 %13 = atomicrmw xchg i32* %12, i32 1 monotonic
674 store i32 %13, i32* @ul, align 4
675 fence seq_cst
676 store volatile i8 0, i8* @sc, align 1
677 store volatile i8 0, i8* @uc, align 1
678 %14 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
679 store volatile i16 0, i16* %14, align 2
680 %15 = bitcast i8* bitcast (i16* @us to i8*) to i16*
681 store volatile i16 0, i16* %15, align 2
682 %16 = bitcast i8* bitcast (i32* @si to i8*) to i32*
683 store volatile i32 0, i32* %16, align 4
684 %17 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
685 store volatile i32 0, i32* %17, align 4
686 %18 = bitcast i8* bitcast (i32* @sl to i8*) to i32*
687 store volatile i32 0, i32* %18, align 4
688 %19 = bitcast i8* bitcast (i32* @ul to i8*) to i32*
689 store volatile i32 0, i32* %19, align 4
690 %20 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
691 store volatile i64 0, i64* %20, align 8
692 %21 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
693 store volatile i64 0, i64* %21, align 8
694 br label %return
695
696 return: ; preds = %entry
697 ret void
740698 }
741
742 declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind
743
744 declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind
745
746 declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
747
748 declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
77
88 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
99 target triple = "powerpc64-apple-darwin9"
10 @sc = common global i8 0 ; [#uses=52]
11 @uc = common global i8 0 ; [#uses=100]
12 @ss = common global i16 0 ; [#uses=15]
13 @us = common global i16 0 ; [#uses=15]
14 @si = common global i32 0 ; [#uses=15]
15 @ui = common global i32 0 ; [#uses=23]
16 @sl = common global i64 0, align 8 ; [#uses=15]
17 @ul = common global i64 0, align 8 ; [#uses=15]
18 @sll = common global i64 0, align 8 ; [#uses=1]
19 @ull = common global i64 0, align 8 ; [#uses=1]
10
11 @sc = common global i8 0
12 @uc = common global i8 0
13 @ss = common global i16 0
14 @us = common global i16 0
15 @si = common global i32 0
16 @ui = common global i32 0
17 @sl = common global i64 0, align 8
18 @ul = common global i64 0, align 8
19 @sll = common global i64 0, align 8
20 @ull = common global i64 0, align 8
2021
2122 define void @test_op_ignore() nounwind {
2223 entry:
23 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 1 ) ; :0 [#uses=0]
24 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 1 ) ; :1 [#uses=0]
25 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :2 [#uses=1]
26 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 1 ) ; :3 [#uses=0]
27 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :4 [#uses=1]
28 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 1 ) ; :5 [#uses=0]
29 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :6 [#uses=1]
30 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 1 ) ; :7 [#uses=0]
31 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :8 [#uses=1]
32 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 1 ) ; :9 [#uses=0]
33 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :10 [#uses=1]
34 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 1 ) ; :11 [#uses=0]
35 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :12 [#uses=1]
36 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 1 ) ; :13 [#uses=0]
37 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 1 ) ; :14 [#uses=0]
38 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 1 ) ; :15 [#uses=0]
39 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :16 [#uses=1]
40 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 1 ) ; :17 [#uses=0]
41 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :18 [#uses=1]
42 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 1 ) ; :19 [#uses=0]
43 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :20 [#uses=1]
44 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 1 ) ; :21 [#uses=0]
45 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :22 [#uses=1]
46 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 1 ) ; :23 [#uses=0]
47 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :24 [#uses=1]
48 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %24, i64 1 ) ; :25 [#uses=0]
49 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :26 [#uses=1]
50 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %26, i64 1 ) ; :27 [#uses=0]
51 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 1 ) ; :28 [#uses=0]
52 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 1 ) ; :29 [#uses=0]
53 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :30 [#uses=1]
54 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 1 ) ; :31 [#uses=0]
55 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :32 [#uses=1]
56 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 1 ) ; :33 [#uses=0]
57 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :34 [#uses=1]
58 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 1 ) ; :35 [#uses=0]
59 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :36 [#uses=1]
60 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 1 ) ; :37 [#uses=0]
61 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :38 [#uses=1]
62 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %38, i64 1 ) ; :39 [#uses=0]
63 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :40 [#uses=1]
64 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %40, i64 1 ) ; :41 [#uses=0]
65 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 1 ) ; :42 [#uses=0]
66 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 1 ) ; :43 [#uses=0]
67 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :44 [#uses=1]
68 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 1 ) ; :45 [#uses=0]
69 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :46 [#uses=1]
70 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 1 ) ; :47 [#uses=0]
71 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :48 [#uses=1]
72 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 1 ) ; :49 [#uses=0]
73 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :50 [#uses=1]
74 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 1 ) ; :51 [#uses=0]
75 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :52 [#uses=1]
76 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %52, i64 1 ) ; :53 [#uses=0]
77 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :54 [#uses=1]
78 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %54, i64 1 ) ; :55 [#uses=0]
79 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 1 ) ; :56 [#uses=0]
80 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 1 ) ; :57 [#uses=0]
81 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :58 [#uses=1]
82 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 1 ) ; :59 [#uses=0]
83 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :60 [#uses=1]
84 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 1 ) ; :61 [#uses=0]
85 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :62 [#uses=1]
86 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 1 ) ; :63 [#uses=0]
87 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :64 [#uses=1]
88 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 1 ) ; :65 [#uses=0]
89 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :66 [#uses=1]
90 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %66, i64 1 ) ; :67 [#uses=0]
91 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :68 [#uses=1]
92 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %68, i64 1 ) ; :69 [#uses=0]
93 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 1 ) ; :70 [#uses=0]
94 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 1 ) ; :71 [#uses=0]
95 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :72 [#uses=1]
96 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 1 ) ; :73 [#uses=0]
97 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :74 [#uses=1]
98 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 1 ) ; :75 [#uses=0]
99 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :76 [#uses=1]
100 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 1 ) ; :77 [#uses=0]
101 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :78 [#uses=1]
102 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 1 ) ; :79 [#uses=0]
103 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :80 [#uses=1]
104 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %80, i64 1 ) ; :81 [#uses=0]
105 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :82 [#uses=1]
106 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %82, i64 1 ) ; :83 [#uses=0]
107 br label %return
108
109 return: ; preds = %entry
110 ret void
24 %0 = atomicrmw add i8* @sc, i8 1 monotonic
25 %1 = atomicrmw add i8* @uc, i8 1 monotonic
26 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
27 %3 = atomicrmw add i16* %2, i16 1 monotonic
28 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
29 %5 = atomicrmw add i16* %4, i16 1 monotonic
30 %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
31 %7 = atomicrmw add i32* %6, i32 1 monotonic
32 %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
33 %9 = atomicrmw add i32* %8, i32 1 monotonic
34 %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
35 %11 = atomicrmw add i64* %10, i64 1 monotonic
36 %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
37 %13 = atomicrmw add i64* %12, i64 1 monotonic
38 %14 = atomicrmw sub i8* @sc, i8 1 monotonic
39 %15 = atomicrmw sub i8* @uc, i8 1 monotonic
40 %16 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
41 %17 = atomicrmw sub i16* %16, i16 1 monotonic
42 %18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
43 %19 = atomicrmw sub i16* %18, i16 1 monotonic
44 %20 = bitcast i8* bitcast (i32* @si to i8*) to i32*
45 %21 = atomicrmw sub i32* %20, i32 1 monotonic
46 %22 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
47 %23 = atomicrmw sub i32* %22, i32 1 monotonic
48 %24 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
49 %25 = atomicrmw sub i64* %24, i64 1 monotonic
50 %26 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
51 %27 = atomicrmw sub i64* %26, i64 1 monotonic
52 %28 = atomicrmw or i8* @sc, i8 1 monotonic
53 %29 = atomicrmw or i8* @uc, i8 1 monotonic
54 %30 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
55 %31 = atomicrmw or i16* %30, i16 1 monotonic
56 %32 = bitcast i8* bitcast (i16* @us to i8*) to i16*
57 %33 = atomicrmw or i16* %32, i16 1 monotonic
58 %34 = bitcast i8* bitcast (i32* @si to i8*) to i32*
59 %35 = atomicrmw or i32* %34, i32 1 monotonic
60 %36 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
61 %37 = atomicrmw or i32* %36, i32 1 monotonic
62 %38 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
63 %39 = atomicrmw or i64* %38, i64 1 monotonic
64 %40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
65 %41 = atomicrmw or i64* %40, i64 1 monotonic
66 %42 = atomicrmw xor i8* @sc, i8 1 monotonic
67 %43 = atomicrmw xor i8* @uc, i8 1 monotonic
68 %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
69 %45 = atomicrmw xor i16* %44, i16 1 monotonic
70 %46 = bitcast i8* bitcast (i16* @us to i8*) to i16*
71 %47 = atomicrmw xor i16* %46, i16 1 monotonic
72 %48 = bitcast i8* bitcast (i32* @si to i8*) to i32*
73 %49 = atomicrmw xor i32* %48, i32 1 monotonic
74 %50 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
75 %51 = atomicrmw xor i32* %50, i32 1 monotonic
76 %52 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
77 %53 = atomicrmw xor i64* %52, i64 1 monotonic
78 %54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
79 %55 = atomicrmw xor i64* %54, i64 1 monotonic
80 %56 = atomicrmw and i8* @sc, i8 1 monotonic
81 %57 = atomicrmw and i8* @uc, i8 1 monotonic
82 %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
83 %59 = atomicrmw and i16* %58, i16 1 monotonic
84 %60 = bitcast i8* bitcast (i16* @us to i8*) to i16*
85 %61 = atomicrmw and i16* %60, i16 1 monotonic
86 %62 = bitcast i8* bitcast (i32* @si to i8*) to i32*
87 %63 = atomicrmw and i32* %62, i32 1 monotonic
88 %64 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
89 %65 = atomicrmw and i32* %64, i32 1 monotonic
90 %66 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
91 %67 = atomicrmw and i64* %66, i64 1 monotonic
92 %68 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
93 %69 = atomicrmw and i64* %68, i64 1 monotonic
94 %70 = atomicrmw nand i8* @sc, i8 1 monotonic
95 %71 = atomicrmw nand i8* @uc, i8 1 monotonic
96 %72 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
97 %73 = atomicrmw nand i16* %72, i16 1 monotonic
98 %74 = bitcast i8* bitcast (i16* @us to i8*) to i16*
99 %75 = atomicrmw nand i16* %74, i16 1 monotonic
100 %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
101 %77 = atomicrmw nand i32* %76, i32 1 monotonic
102 %78 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
103 %79 = atomicrmw nand i32* %78, i32 1 monotonic
104 %80 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
105 %81 = atomicrmw nand i64* %80, i64 1 monotonic
106 %82 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
107 %83 = atomicrmw nand i64* %82, i64 1 monotonic
108 br label %return
109
110 return: ; preds = %entry
111 ret void
111112 }
112
113 declare i8 @llvm.atomic.load.add.i8.p0i8(i8*, i8) nounwind
114
115 declare i16 @llvm.atomic.load.add.i16.p0i16(i16*, i16) nounwind
116
117 declare i32 @llvm.atomic.load.add.i32.p0i32(i32*, i32) nounwind
118
119 declare i64 @llvm.atomic.load.add.i64.p0i64(i64*, i64) nounwind
120
121 declare i8 @llvm.atomic.load.sub.i8.p0i8(i8*, i8) nounwind
122
123 declare i16 @llvm.atomic.load.sub.i16.p0i16(i16*, i16) nounwind
124
125 declare i32 @llvm.atomic.load.sub.i32.p0i32(i32*, i32) nounwind
126
127 declare i64 @llvm.atomic.load.sub.i64.p0i64(i64*, i64) nounwind
128
129 declare i8 @llvm.atomic.load.or.i8.p0i8(i8*, i8) nounwind
130
131 declare i16 @llvm.atomic.load.or.i16.p0i16(i16*, i16) nounwind
132
133 declare i32 @llvm.atomic.load.or.i32.p0i32(i32*, i32) nounwind
134
135 declare i64 @llvm.atomic.load.or.i64.p0i64(i64*, i64) nounwind
136
137 declare i8 @llvm.atomic.load.xor.i8.p0i8(i8*, i8) nounwind
138
139 declare i16 @llvm.atomic.load.xor.i16.p0i16(i16*, i16) nounwind
140
141 declare i32 @llvm.atomic.load.xor.i32.p0i32(i32*, i32) nounwind
142
143 declare i64 @llvm.atomic.load.xor.i64.p0i64(i64*, i64) nounwind
144
145 declare i8 @llvm.atomic.load.and.i8.p0i8(i8*, i8) nounwind
146
147 declare i16 @llvm.atomic.load.and.i16.p0i16(i16*, i16) nounwind
148
149 declare i32 @llvm.atomic.load.and.i32.p0i32(i32*, i32) nounwind
150
151 declare i64 @llvm.atomic.load.and.i64.p0i64(i64*, i64) nounwind
152
153 declare i8 @llvm.atomic.load.nand.i8.p0i8(i8*, i8) nounwind
154
155 declare i16 @llvm.atomic.load.nand.i16.p0i16(i16*, i16) nounwind
156
157 declare i32 @llvm.atomic.load.nand.i32.p0i32(i32*, i32) nounwind
158
159 declare i64 @llvm.atomic.load.nand.i64.p0i64(i64*, i64) nounwind
160113
161114 define void @test_fetch_and_op() nounwind {
162115 entry:
163 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 11 ) ; :0 [#uses=1]
164 store i8 %0, i8* @sc, align 1
165 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 11 ) ; :1 [#uses=1]
166 store i8 %1, i8* @uc, align 1
167 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :2 [#uses=1]
168 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %2, i16 11 ) ; :3 [#uses=1]
169 store i16 %3, i16* @ss, align 2
170 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :4 [#uses=1]
171 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %4, i16 11 ) ; :5 [#uses=1]
172 store i16 %5, i16* @us, align 2
173 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :6 [#uses=1]
174 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %6, i32 11 ) ; :7 [#uses=1]
175 store i32 %7, i32* @si, align 4
176 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :8 [#uses=1]
177 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %8, i32 11 ) ; :9 [#uses=1]
178 store i32 %9, i32* @ui, align 4
179 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :10 [#uses=1]
180 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %10, i64 11 ) ; :11 [#uses=1]
181 store i64 %11, i64* @sl, align 8
182 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :12 [#uses=1]
183 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %12, i64 11 ) ; :13 [#uses=1]
184 store i64 %13, i64* @ul, align 8
185 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 11 ) ; :14 [#uses=1]
186 store i8 %14, i8* @sc, align 1
187 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 11 ) ; :15 [#uses=1]
188 store i8 %15, i8* @uc, align 1
189 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :16 [#uses=1]
190 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %16, i16 11 ) ; :17 [#uses=1]
191 store i16 %17, i16* @ss, align 2
192 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :18 [#uses=1]
193 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %18, i16 11 ) ; :19 [#uses=1]
194 store i16 %19, i16* @us, align 2
195 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :20 [#uses=1]
196 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %20, i32 11 ) ; :21 [#uses=1]
197 store i32 %21, i32* @si, align 4
198 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :22 [#uses=1]
199 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %22, i32 11 ) ; :23 [#uses=1]
200 store i32 %23, i32* @ui, align 4
201 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :24 [#uses=1]
202 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %24, i64 11 ) ; :25 [#uses=1]
203 store i64 %25, i64* @sl, align 8
204 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :26 [#uses=1]
205 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %26, i64 11 ) ; :27 [#uses=1]
206 store i64 %27, i64* @ul, align 8
207 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 11 ) ; :28 [#uses=1]
208 store i8 %28, i8* @sc, align 1
209 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 11 ) ; :29 [#uses=1]
210 store i8 %29, i8* @uc, align 1
211 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :30 [#uses=1]
212 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %30, i16 11 ) ; :31 [#uses=1]
213 store i16 %31, i16* @ss, align 2
214 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :32 [#uses=1]
215 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %32, i16 11 ) ; :33 [#uses=1]
216 store i16 %33, i16* @us, align 2
217 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :34 [#uses=1]
218 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %34, i32 11 ) ; :35 [#uses=1]
219 store i32 %35, i32* @si, align 4
220 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :36 [#uses=1]
221 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %36, i32 11 ) ; :37 [#uses=1]
222 store i32 %37, i32* @ui, align 4
223 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :38 [#uses=1]
224 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %38, i64 11 ) ; :39 [#uses=1]
225 store i64 %39, i64* @sl, align 8
226 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :40 [#uses=1]
227 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %40, i64 11 ) ; :41 [#uses=1]
228 store i64 %41, i64* @ul, align 8
229 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 11 ) ; :42 [#uses=1]
230 store i8 %42, i8* @sc, align 1
231 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 11 ) ; :43 [#uses=1]
232 store i8 %43, i8* @uc, align 1
233 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :44 [#uses=1]
234 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %44, i16 11 ) ; :45 [#uses=1]
235 store i16 %45, i16* @ss, align 2
236 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :46 [#uses=1]
237 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %46, i16 11 ) ; :47 [#uses=1]
238 store i16 %47, i16* @us, align 2
239 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :48 [#uses=1]
240 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %48, i32 11 ) ; :49 [#uses=1]
241 store i32 %49, i32* @si, align 4
242 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :50 [#uses=1]
243 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %50, i32 11 ) ; :51 [#uses=1]
244 store i32 %51, i32* @ui, align 4
245 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :52 [#uses=1]
246 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %52, i64 11 ) ; :53 [#uses=1]
247 store i64 %53, i64* @sl, align 8
248 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :54 [#uses=1]
249 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %54, i64 11 ) ; :55 [#uses=1]
250 store i64 %55, i64* @ul, align 8
251 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 11 ) ; :56 [#uses=1]
252 store i8 %56, i8* @sc, align 1
253 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 11 ) ; :57 [#uses=1]
254 store i8 %57, i8* @uc, align 1
255 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :58 [#uses=1]
256 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %58, i16 11 ) ; :59 [#uses=1]
257 store i16 %59, i16* @ss, align 2
258 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :60 [#uses=1]
259 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %60, i16 11 ) ; :61 [#uses=1]
260 store i16 %61, i16* @us, align 2
261 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :62 [#uses=1]
262 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %62, i32 11 ) ; :63 [#uses=1]
263 store i32 %63, i32* @si, align 4
264 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :64 [#uses=1]
265 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %64, i32 11 ) ; :65 [#uses=1]
266 store i32 %65, i32* @ui, align 4
267 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :66 [#uses=1]
268 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %66, i64 11 ) ; :67 [#uses=1]
269 store i64 %67, i64* @sl, align 8
270 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :68 [#uses=1]
271 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %68, i64 11 ) ; :69 [#uses=1]
272 store i64 %69, i64* @ul, align 8
273 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 11 ) ; :70 [#uses=1]
274 store i8 %70, i8* @sc, align 1
275 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 11 ) ; :71 [#uses=1]
276 store i8 %71, i8* @uc, align 1
277 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :72 [#uses=1]
278 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %72, i16 11 ) ; :73 [#uses=1]
279 store i16 %73, i16* @ss, align 2
280 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :74 [#uses=1]
281 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %74, i16 11 ) ; :75 [#uses=1]
282 store i16 %75, i16* @us, align 2
283 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :76 [#uses=1]
284 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %76, i32 11 ) ; :77 [#uses=1]
285 store i32 %77, i32* @si, align 4
286 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :78 [#uses=1]
287 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %78, i32 11 ) ; :79 [#uses=1]
288 store i32 %79, i32* @ui, align 4
289 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :80 [#uses=1]
290 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %80, i64 11 ) ; :81 [#uses=1]
291 store i64 %81, i64* @sl, align 8
292 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :82 [#uses=1]
293 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %82, i64 11 ) ; :83 [#uses=1]
294 store i64 %83, i64* @ul, align 8
295 br label %return
296
297 return: ; preds = %entry
298 ret void
116 %0 = atomicrmw add i8* @sc, i8 11 monotonic
117 store i8 %0, i8* @sc, align 1
118 %1 = atomicrmw add i8* @uc, i8 11 monotonic
119 store i8 %1, i8* @uc, align 1
120 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
121 %3 = atomicrmw add i16* %2, i16 11 monotonic
122 store i16 %3, i16* @ss, align 2
123 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
124 %5 = atomicrmw add i16* %4, i16 11 monotonic
125 store i16 %5, i16* @us, align 2
126 %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
127 %7 = atomicrmw add i32* %6, i32 11 monotonic
128 store i32 %7, i32* @si, align 4
129 %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
130 %9 = atomicrmw add i32* %8, i32 11 monotonic
131 store i32 %9, i32* @ui, align 4
132 %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
133 %11 = atomicrmw add i64* %10, i64 11 monotonic
134 store i64 %11, i64* @sl, align 8
135 %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
136 %13 = atomicrmw add i64* %12, i64 11 monotonic
137 store i64 %13, i64* @ul, align 8
138 %14 = atomicrmw sub i8* @sc, i8 11 monotonic
139 store i8 %14, i8* @sc, align 1
140 %15 = atomicrmw sub i8* @uc, i8 11 monotonic
141 store i8 %15, i8* @uc, align 1
142 %16 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
143 %17 = atomicrmw sub i16* %16, i16 11 monotonic
144 store i16 %17, i16* @ss, align 2
145 %18 = bitcast i8* bitcast (i16* @us to i8*) to i16*
146 %19 = atomicrmw sub i16* %18, i16 11 monotonic
147 store i16 %19, i16* @us, align 2
148 %20 = bitcast i8* bitcast (i32* @si to i8*) to i32*
149 %21 = atomicrmw sub i32* %20, i32 11 monotonic
150 store i32 %21, i32* @si, align 4
151 %22 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
152 %23 = atomicrmw sub i32* %22, i32 11 monotonic
153 store i32 %23, i32* @ui, align 4
154 %24 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
155 %25 = atomicrmw sub i64* %24, i64 11 monotonic
156 store i64 %25, i64* @sl, align 8
157 %26 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
158 %27 = atomicrmw sub i64* %26, i64 11 monotonic
159 store i64 %27, i64* @ul, align 8
160 %28 = atomicrmw or i8* @sc, i8 11 monotonic
161 store i8 %28, i8* @sc, align 1
162 %29 = atomicrmw or i8* @uc, i8 11 monotonic
163 store i8 %29, i8* @uc, align 1
164 %30 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
165 %31 = atomicrmw or i16* %30, i16 11 monotonic
166 store i16 %31, i16* @ss, align 2
167 %32 = bitcast i8* bitcast (i16* @us to i8*) to i16*
168 %33 = atomicrmw or i16* %32, i16 11 monotonic
169 store i16 %33, i16* @us, align 2
170 %34 = bitcast i8* bitcast (i32* @si to i8*) to i32*
171 %35 = atomicrmw or i32* %34, i32 11 monotonic
172 store i32 %35, i32* @si, align 4
173 %36 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
174 %37 = atomicrmw or i32* %36, i32 11 monotonic
175 store i32 %37, i32* @ui, align 4
176 %38 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
177 %39 = atomicrmw or i64* %38, i64 11 monotonic
178 store i64 %39, i64* @sl, align 8
179 %40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
180 %41 = atomicrmw or i64* %40, i64 11 monotonic
181 store i64 %41, i64* @ul, align 8
182 %42 = atomicrmw xor i8* @sc, i8 11 monotonic
183 store i8 %42, i8* @sc, align 1
184 %43 = atomicrmw xor i8* @uc, i8 11 monotonic
185 store i8 %43, i8* @uc, align 1
186 %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
187 %45 = atomicrmw xor i16* %44, i16 11 monotonic
188 store i16 %45, i16* @ss, align 2
189 %46 = bitcast i8* bitcast (i16* @us to i8*) to i16*
190 %47 = atomicrmw xor i16* %46, i16 11 monotonic
191 store i16 %47, i16* @us, align 2
192 %48 = bitcast i8* bitcast (i32* @si to i8*) to i32*
193 %49 = atomicrmw xor i32* %48, i32 11 monotonic
194 store i32 %49, i32* @si, align 4
195 %50 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
196 %51 = atomicrmw xor i32* %50, i32 11 monotonic
197 store i32 %51, i32* @ui, align 4
198 %52 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
199 %53 = atomicrmw xor i64* %52, i64 11 monotonic
200 store i64 %53, i64* @sl, align 8
201 %54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
202 %55 = atomicrmw xor i64* %54, i64 11 monotonic
203 store i64 %55, i64* @ul, align 8
204 %56 = atomicrmw and i8* @sc, i8 11 monotonic
205 store i8 %56, i8* @sc, align 1
206 %57 = atomicrmw and i8* @uc, i8 11 monotonic
207 store i8 %57, i8* @uc, align 1
208 %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
209 %59 = atomicrmw and i16* %58, i16 11 monotonic
210 store i16 %59, i16* @ss, align 2
211 %60 = bitcast i8* bitcast (i16* @us to i8*) to i16*
212 %61 = atomicrmw and i16* %60, i16 11 monotonic
213 store i16 %61, i16* @us, align 2
214 %62 = bitcast i8* bitcast (i32* @si to i8*) to i32*
215 %63 = atomicrmw and i32* %62, i32 11 monotonic
216 store i32 %63, i32* @si, align 4
217 %64 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
218 %65 = atomicrmw and i32* %64, i32 11 monotonic
219 store i32 %65, i32* @ui, align 4
220 %66 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
221 %67 = atomicrmw and i64* %66, i64 11 monotonic
222 store i64 %67, i64* @sl, align 8
223 %68 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
224 %69 = atomicrmw and i64* %68, i64 11 monotonic
225 store i64 %69, i64* @ul, align 8
226 %70 = atomicrmw nand i8* @sc, i8 11 monotonic
227 store i8 %70, i8* @sc, align 1
228 %71 = atomicrmw nand i8* @uc, i8 11 monotonic
229 store i8 %71, i8* @uc, align 1
230 %72 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
231 %73 = atomicrmw nand i16* %72, i16 11 monotonic
232 store i16 %73, i16* @ss, align 2
233 %74 = bitcast i8* bitcast (i16* @us to i8*) to i16*
234 %75 = atomicrmw nand i16* %74, i16 11 monotonic
235 store i16 %75, i16* @us, align 2
236 %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
237 %77 = atomicrmw nand i32* %76, i32 11 monotonic
238 store i32 %77, i32* @si, align 4
239 %78 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
240 %79 = atomicrmw nand i32* %78, i32 11 monotonic
241 store i32 %79, i32* @ui, align 4
242 %80 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
243 %81 = atomicrmw nand i64* %80, i64 11 monotonic
244 store i64 %81, i64* @sl, align 8
245 %82 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
246 %83 = atomicrmw nand i64* %82, i64 11 monotonic
247 store i64 %83, i64* @ul, align 8
248 br label %return
249
250 return: ; preds = %entry
251 ret void
299252 }
300253
301254 define void @test_op_and_fetch() nounwind {
302255 entry:
303 load i8* @uc, align 1 ; :0 [#uses=2]
304 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @sc, i8 %0 ) ; :1 [#uses=1]
305 add i8 %1, %0 ; :2 [#uses=1]
306 store i8 %2, i8* @sc, align 1
307 load i8* @uc, align 1 ; :3 [#uses=2]
308 call i8 @llvm.atomic.load.add.i8.p0i8( i8* @uc, i8 %3 ) ; :4 [#uses=1]
309 add i8 %4, %3 ; :5 [#uses=1]
310 store i8 %5, i8* @uc, align 1
311 load i8* @uc, align 1 ; :6 [#uses=1]
312 zext i8 %6 to i16 ; :7 [#uses=2]
313 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :8 [#uses=1]
314 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %8, i16 %7 ) ; :9 [#uses=1]
315 add i16 %9, %7 ; :10 [#uses=1]
316 store i16 %10, i16* @ss, align 2
317 load i8* @uc, align 1 ; :11 [#uses=1]
318 zext i8 %11 to i16 ; :12 [#uses=2]
319 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :13 [#uses=1]
320 call i16 @llvm.atomic.load.add.i16.p0i16( i16* %13, i16 %12 ) ; :14 [#uses=1]
321 add i16 %14, %12 ; :15 [#uses=1]
322 store i16 %15, i16* @us, align 2
323 load i8* @uc, align 1 ; :16 [#uses=1]
324 zext i8 %16 to i32 ; :17 [#uses=2]
325 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :18 [#uses=1]
326 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %18, i32 %17 ) ; :19 [#uses=1]
327 add i32 %19, %17 ; :20 [#uses=1]
328 store i32 %20, i32* @si, align 4
329 load i8* @uc, align 1 ; :21 [#uses=1]
330 zext i8 %21 to i32 ; :22 [#uses=2]
331 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :23 [#uses=1]
332 call i32 @llvm.atomic.load.add.i32.p0i32( i32* %23, i32 %22 ) ; :24 [#uses=1]
333 add i32 %24, %22 ; :25 [#uses=1]
334 store i32 %25, i32* @ui, align 4
335 load i8* @uc, align 1 ; :26 [#uses=1]
336 zext i8 %26 to i64 ; :27 [#uses=2]
337 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :28 [#uses=1]
338 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %28, i64 %27 ) ; :29 [#uses=1]
339 add i64 %29, %27 ; :30 [#uses=1]
340 store i64 %30, i64* @sl, align 8
341 load i8* @uc, align 1 ; :31 [#uses=1]
342 zext i8 %31 to i64 ; :32 [#uses=2]
343 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :33 [#uses=1]
344 call i64 @llvm.atomic.load.add.i64.p0i64( i64* %33, i64 %32 ) ; :34 [#uses=1]
345 add i64 %34, %32 ; :35 [#uses=1]
346 store i64 %35, i64* @ul, align 8
347 load i8* @uc, align 1 ; :36 [#uses=2]
348 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @sc, i8 %36 ) ; :37 [#uses=1]
349 sub i8 %37, %36 ; :38 [#uses=1]
350 store i8 %38, i8* @sc, align 1
351 load i8* @uc, align 1 ; :39 [#uses=2]
352 call i8 @llvm.atomic.load.sub.i8.p0i8( i8* @uc, i8 %39 ) ; :40 [#uses=1]
353 sub i8 %40, %39 ; :41 [#uses=1]
354 store i8 %41, i8* @uc, align 1
355 load i8* @uc, align 1 ; :42 [#uses=1]
356 zext i8 %42 to i16 ; :43 [#uses=2]
357 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :44 [#uses=1]
358 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %44, i16 %43 ) ; :45 [#uses=1]
359 sub i16 %45, %43 ; :46 [#uses=1]
360 store i16 %46, i16* @ss, align 2
361 load i8* @uc, align 1 ; :47 [#uses=1]
362 zext i8 %47 to i16 ; :48 [#uses=2]
363 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :49 [#uses=1]
364 call i16 @llvm.atomic.load.sub.i16.p0i16( i16* %49, i16 %48 ) ; :50 [#uses=1]
365 sub i16 %50, %48 ; :51 [#uses=1]
366 store i16 %51, i16* @us, align 2
367 load i8* @uc, align 1 ; :52 [#uses=1]
368 zext i8 %52 to i32 ; :53 [#uses=2]
369 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :54 [#uses=1]
370 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %54, i32 %53 ) ; :55 [#uses=1]
371 sub i32 %55, %53 ; :56 [#uses=1]
372 store i32 %56, i32* @si, align 4
373 load i8* @uc, align 1 ; :57 [#uses=1]
374 zext i8 %57 to i32 ; :58 [#uses=2]
375 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :59 [#uses=1]
376 call i32 @llvm.atomic.load.sub.i32.p0i32( i32* %59, i32 %58 ) ; :60 [#uses=1]
377 sub i32 %60, %58 ; :61 [#uses=1]
378 store i32 %61, i32* @ui, align 4
379 load i8* @uc, align 1 ; :62 [#uses=1]
380 zext i8 %62 to i64 ; :63 [#uses=2]
381 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :64 [#uses=1]
382 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %64, i64 %63 ) ; :65 [#uses=1]
383 sub i64 %65, %63 ; :66 [#uses=1]
384 store i64 %66, i64* @sl, align 8
385 load i8* @uc, align 1 ; :67 [#uses=1]
386 zext i8 %67 to i64 ; :68 [#uses=2]
387 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :69 [#uses=1]
388 call i64 @llvm.atomic.load.sub.i64.p0i64( i64* %69, i64 %68 ) ; :70 [#uses=1]
389 sub i64 %70, %68 ; :71 [#uses=1]
390 store i64 %71, i64* @ul, align 8
391 load i8* @uc, align 1 ; :72 [#uses=2]
392 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @sc, i8 %72 ) ; :73 [#uses=1]
393 or i8 %73, %72 ; :74 [#uses=1]
394 store i8 %74, i8* @sc, align 1
395 load i8* @uc, align 1 ; :75 [#uses=2]
396 call i8 @llvm.atomic.load.or.i8.p0i8( i8* @uc, i8 %75 ) ; :76 [#uses=1]
397 or i8 %76, %75 ; :77 [#uses=1]
398 store i8 %77, i8* @uc, align 1
399 load i8* @uc, align 1 ; :78 [#uses=1]
400 zext i8 %78 to i16 ; :79 [#uses=2]
401 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :80 [#uses=1]
402 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %80, i16 %79 ) ; :81 [#uses=1]
403 or i16 %81, %79 ; :82 [#uses=1]
404 store i16 %82, i16* @ss, align 2
405 load i8* @uc, align 1 ; :83 [#uses=1]
406 zext i8 %83 to i16 ; :84 [#uses=2]
407 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :85 [#uses=1]
408 call i16 @llvm.atomic.load.or.i16.p0i16( i16* %85, i16 %84 ) ; :86 [#uses=1]
409 or i16 %86, %84 ; :87 [#uses=1]
410 store i16 %87, i16* @us, align 2
411 load i8* @uc, align 1 ; :88 [#uses=1]
412 zext i8 %88 to i32 ; :89 [#uses=2]
413 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :90 [#uses=1]
414 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %90, i32 %89 ) ; :91 [#uses=1]
415 or i32 %91, %89 ; :92 [#uses=1]
416 store i32 %92, i32* @si, align 4
417 load i8* @uc, align 1 ; :93 [#uses=1]
418 zext i8 %93 to i32 ; :94 [#uses=2]
419 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :95 [#uses=1]
420 call i32 @llvm.atomic.load.or.i32.p0i32( i32* %95, i32 %94 ) ; :96 [#uses=1]
421 or i32 %96, %94 ; :97 [#uses=1]
422 store i32 %97, i32* @ui, align 4
423 load i8* @uc, align 1 ; :98 [#uses=1]
424 zext i8 %98 to i64 ; :99 [#uses=2]
425 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :100 [#uses=1]
426 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %100, i64 %99 ) ; :101 [#uses=1]
427 or i64 %101, %99 ; :102 [#uses=1]
428 store i64 %102, i64* @sl, align 8
429 load i8* @uc, align 1 ; :103 [#uses=1]
430 zext i8 %103 to i64 ; :104 [#uses=2]
431 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :105 [#uses=1]
432 call i64 @llvm.atomic.load.or.i64.p0i64( i64* %105, i64 %104 ) ; :106 [#uses=1]
433 or i64 %106, %104 ; :107 [#uses=1]
434 store i64 %107, i64* @ul, align 8
435 load i8* @uc, align 1 ; :108 [#uses=2]
436 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @sc, i8 %108 ) ; :109 [#uses=1]
437 xor i8 %109, %108 ; :110 [#uses=1]
438 store i8 %110, i8* @sc, align 1
439 load i8* @uc, align 1 ; :111 [#uses=2]
440 call i8 @llvm.atomic.load.xor.i8.p0i8( i8* @uc, i8 %111 ) ; :112 [#uses=1]
441 xor i8 %112, %111 ; :113 [#uses=1]
442 store i8 %113, i8* @uc, align 1
443 load i8* @uc, align 1 ; :114 [#uses=1]
444 zext i8 %114 to i16 ; :115 [#uses=2]
445 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :116 [#uses=1]
446 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %116, i16 %115 ) ; :117 [#uses=1]
447 xor i16 %117, %115 ; :118 [#uses=1]
448 store i16 %118, i16* @ss, align 2
449 load i8* @uc, align 1 ; :119 [#uses=1]
450 zext i8 %119 to i16 ; :120 [#uses=2]
451 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :121 [#uses=1]
452 call i16 @llvm.atomic.load.xor.i16.p0i16( i16* %121, i16 %120 ) ; :122 [#uses=1]
453 xor i16 %122, %120 ; :123 [#uses=1]
454 store i16 %123, i16* @us, align 2
455 load i8* @uc, align 1 ; :124 [#uses=1]
456 zext i8 %124 to i32 ; :125 [#uses=2]
457 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :126 [#uses=1]
458 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %126, i32 %125 ) ; :127 [#uses=1]
459 xor i32 %127, %125 ; :128 [#uses=1]
460 store i32 %128, i32* @si, align 4
461 load i8* @uc, align 1 ; :129 [#uses=1]
462 zext i8 %129 to i32 ; :130 [#uses=2]
463 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :131 [#uses=1]
464 call i32 @llvm.atomic.load.xor.i32.p0i32( i32* %131, i32 %130 ) ; :132 [#uses=1]
465 xor i32 %132, %130 ; :133 [#uses=1]
466 store i32 %133, i32* @ui, align 4
467 load i8* @uc, align 1 ; :134 [#uses=1]
468 zext i8 %134 to i64 ; :135 [#uses=2]
469 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :136 [#uses=1]
470 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %136, i64 %135 ) ; :137 [#uses=1]
471 xor i64 %137, %135 ; :138 [#uses=1]
472 store i64 %138, i64* @sl, align 8
473 load i8* @uc, align 1 ; :139 [#uses=1]
474 zext i8 %139 to i64 ; :140 [#uses=2]
475 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :141 [#uses=1]
476 call i64 @llvm.atomic.load.xor.i64.p0i64( i64* %141, i64 %140 ) ; :142 [#uses=1]
477 xor i64 %142, %140 ; :143 [#uses=1]
478 store i64 %143, i64* @ul, align 8
479 load i8* @uc, align 1 ; :144 [#uses=2]
480 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @sc, i8 %144 ) ; :145 [#uses=1]
481 and i8 %145, %144 ; :146 [#uses=1]
482 store i8 %146, i8* @sc, align 1
483 load i8* @uc, align 1 ; :147 [#uses=2]
484 call i8 @llvm.atomic.load.and.i8.p0i8( i8* @uc, i8 %147 ) ; :148 [#uses=1]
485 and i8 %148, %147 ; :149 [#uses=1]
486 store i8 %149, i8* @uc, align 1
487 load i8* @uc, align 1 ; :150 [#uses=1]
488 zext i8 %150 to i16 ; :151 [#uses=2]
489 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :152 [#uses=1]
490 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %152, i16 %151 ) ; :153 [#uses=1]
491 and i16 %153, %151 ; :154 [#uses=1]
492 store i16 %154, i16* @ss, align 2
493 load i8* @uc, align 1 ; :155 [#uses=1]
494 zext i8 %155 to i16 ; :156 [#uses=2]
495 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :157 [#uses=1]
496 call i16 @llvm.atomic.load.and.i16.p0i16( i16* %157, i16 %156 ) ; :158 [#uses=1]
497 and i16 %158, %156 ; :159 [#uses=1]
498 store i16 %159, i16* @us, align 2
499 load i8* @uc, align 1 ; :160 [#uses=1]
500 zext i8 %160 to i32 ; :161 [#uses=2]
501 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :162 [#uses=1]
502 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %162, i32 %161 ) ; :163 [#uses=1]
503 and i32 %163, %161 ; :164 [#uses=1]
504 store i32 %164, i32* @si, align 4
505 load i8* @uc, align 1 ; :165 [#uses=1]
506 zext i8 %165 to i32 ; :166 [#uses=2]
507 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :167 [#uses=1]
508 call i32 @llvm.atomic.load.and.i32.p0i32( i32* %167, i32 %166 ) ; :168 [#uses=1]
509 and i32 %168, %166 ; :169 [#uses=1]
510 store i32 %169, i32* @ui, align 4
511 load i8* @uc, align 1 ; :170 [#uses=1]
512 zext i8 %170 to i64 ; :171 [#uses=2]
513 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :172 [#uses=1]
514 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %172, i64 %171 ) ; :173 [#uses=1]
515 and i64 %173, %171 ; :174 [#uses=1]
516 store i64 %174, i64* @sl, align 8
517 load i8* @uc, align 1 ; :175 [#uses=1]
518 zext i8 %175 to i64 ; :176 [#uses=2]
519 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :177 [#uses=1]
520 call i64 @llvm.atomic.load.and.i64.p0i64( i64* %177, i64 %176 ) ; :178 [#uses=1]
521 and i64 %178, %176 ; :179 [#uses=1]
522 store i64 %179, i64* @ul, align 8
523 load i8* @uc, align 1 ; :180 [#uses=2]
524 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @sc, i8 %180 ) ; :181 [#uses=1]
525 xor i8 %181, -1 ; :182 [#uses=1]
526 and i8 %182, %180 ; :183 [#uses=1]
527 store i8 %183, i8* @sc, align 1
528 load i8* @uc, align 1 ; :184 [#uses=2]
529 call i8 @llvm.atomic.load.nand.i8.p0i8( i8* @uc, i8 %184 ) ; :185 [#uses=1]
530 xor i8 %185, -1 ; :186 [#uses=1]
531 and i8 %186, %184 ; :187 [#uses=1]
532 store i8 %187, i8* @uc, align 1
533 load i8* @uc, align 1 ; :188 [#uses=1]
534 zext i8 %188 to i16 ; :189 [#uses=2]
535 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :190 [#uses=1]
536 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %190, i16 %189 ) ; :191 [#uses=1]
537 xor i16 %191, -1 ; :192 [#uses=1]
538 and i16 %192, %189 ; :193 [#uses=1]
539 store i16 %193, i16* @ss, align 2
540 load i8* @uc, align 1 ; :194 [#uses=1]
541 zext i8 %194 to i16 ; :195 [#uses=2]
542 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :196 [#uses=1]
543 call i16 @llvm.atomic.load.nand.i16.p0i16( i16* %196, i16 %195 ) ; :197 [#uses=1]
544 xor i16 %197, -1 ; :198 [#uses=1]
545 and i16 %198, %195 ; :199 [#uses=1]
546 store i16 %199, i16* @us, align 2
547 load i8* @uc, align 1 ; :200 [#uses=1]
548 zext i8 %200 to i32 ; :201 [#uses=2]
549 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :202 [#uses=1]
550 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %202, i32 %201 ) ; :203 [#uses=1]
551 xor i32 %203, -1 ; :204 [#uses=1]
552 and i32 %204, %201 ; :205 [#uses=1]
553 store i32 %205, i32* @si, align 4
554 load i8* @uc, align 1 ; :206 [#uses=1]
555 zext i8 %206 to i32 ; :207 [#uses=2]
556 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :208 [#uses=1]
557 call i32 @llvm.atomic.load.nand.i32.p0i32( i32* %208, i32 %207 ) ; :209 [#uses=1]
558 xor i32 %209, -1 ; :210 [#uses=1]
559 and i32 %210, %207 ; :211 [#uses=1]
560 store i32 %211, i32* @ui, align 4
561 load i8* @uc, align 1 ; :212 [#uses=1]
562 zext i8 %212 to i64 ; :213 [#uses=2]
563 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :214 [#uses=1]
564 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %214, i64 %213 ) ; :215 [#uses=1]
565 xor i64 %215, -1 ; :216 [#uses=1]
566 and i64 %216, %213 ; :217 [#uses=1]
567 store i64 %217, i64* @sl, align 8
568 load i8* @uc, align 1 ; :218 [#uses=1]
569 zext i8 %218 to i64 ; :219 [#uses=2]
570 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :220 [#uses=1]
571 call i64 @llvm.atomic.load.nand.i64.p0i64( i64* %220, i64 %219 ) ; :221 [#uses=1]
572 xor i64 %221, -1 ; :222 [#uses=1]
573 and i64 %222, %219 ; :223 [#uses=1]
574 store i64 %223, i64* @ul, align 8
575 br label %return
576
577 return: ; preds = %entry
578 ret void
256 %0 = load i8* @uc, align 1
257 %1 = atomicrmw add i8* @sc, i8 %0 monotonic
258 %2 = add i8 %1, %0
259 store i8 %2, i8* @sc, align 1
260 %3 = load i8* @uc, align 1
261 %4 = atomicrmw add i8* @uc, i8 %3 monotonic
262 %5 = add i8 %4, %3
263 store i8 %5, i8* @uc, align 1
264 %6 = load i8* @uc, align 1
265 %7 = zext i8 %6 to i16
266 %8 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
267 %9 = atomicrmw add i16* %8, i16 %7 monotonic
268 %10 = add i16 %9, %7
269 store i16 %10, i16* @ss, align 2
270 %11 = load i8* @uc, align 1
271 %12 = zext i8 %11 to i16
272 %13 = bitcast i8* bitcast (i16* @us to i8*) to i16*
273 %14 = atomicrmw add i16* %13, i16 %12 monotonic
274 %15 = add i16 %14, %12
275 store i16 %15, i16* @us, align 2
276 %16 = load i8* @uc, align 1
277 %17 = zext i8 %16 to i32
278 %18 = bitcast i8* bitcast (i32* @si to i8*) to i32*
279 %19 = atomicrmw add i32* %18, i32 %17 monotonic
280 %20 = add i32 %19, %17
281 store i32 %20, i32* @si, align 4
282 %21 = load i8* @uc, align 1
283 %22 = zext i8 %21 to i32
284 %23 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
285 %24 = atomicrmw add i32* %23, i32 %22 monotonic
286 %25 = add i32 %24, %22
287 store i32 %25, i32* @ui, align 4
288 %26 = load i8* @uc, align 1
289 %27 = zext i8 %26 to i64
290 %28 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
291 %29 = atomicrmw add i64* %28, i64 %27 monotonic
292 %30 = add i64 %29, %27
293 store i64 %30, i64* @sl, align 8
294 %31 = load i8* @uc, align 1
295 %32 = zext i8 %31 to i64
296 %33 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
297 %34 = atomicrmw add i64* %33, i64 %32 monotonic
298 %35 = add i64 %34, %32
299 store i64 %35, i64* @ul, align 8
300 %36 = load i8* @uc, align 1
301 %37 = atomicrmw sub i8* @sc, i8 %36 monotonic
302 %38 = sub i8 %37, %36
303 store i8 %38, i8* @sc, align 1
304 %39 = load i8* @uc, align 1
305 %40 = atomicrmw sub i8* @uc, i8 %39 monotonic
306 %41 = sub i8 %40, %39
307 store i8 %41, i8* @uc, align 1
308 %42 = load i8* @uc, align 1
309 %43 = zext i8 %42 to i16
310 %44 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
311 %45 = atomicrmw sub i16* %44, i16 %43 monotonic
312 %46 = sub i16 %45, %43
313 store i16 %46, i16* @ss, align 2
314 %47 = load i8* @uc, align 1
315 %48 = zext i8 %47 to i16
316 %49 = bitcast i8* bitcast (i16* @us to i8*) to i16*
317 %50 = atomicrmw sub i16* %49, i16 %48 monotonic
318 %51 = sub i16 %50, %48
319 store i16 %51, i16* @us, align 2
320 %52 = load i8* @uc, align 1
321 %53 = zext i8 %52 to i32
322 %54 = bitcast i8* bitcast (i32* @si to i8*) to i32*
323 %55 = atomicrmw sub i32* %54, i32 %53 monotonic
324 %56 = sub i32 %55, %53
325 store i32 %56, i32* @si, align 4
326 %57 = load i8* @uc, align 1
327 %58 = zext i8 %57 to i32
328 %59 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
329 %60 = atomicrmw sub i32* %59, i32 %58 monotonic
330 %61 = sub i32 %60, %58
331 store i32 %61, i32* @ui, align 4
332 %62 = load i8* @uc, align 1
333 %63 = zext i8 %62 to i64
334 %64 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
335 %65 = atomicrmw sub i64* %64, i64 %63 monotonic
336 %66 = sub i64 %65, %63
337 store i64 %66, i64* @sl, align 8
338 %67 = load i8* @uc, align 1
339 %68 = zext i8 %67 to i64
340 %69 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
341 %70 = atomicrmw sub i64* %69, i64 %68 monotonic
342 %71 = sub i64 %70, %68
343 store i64 %71, i64* @ul, align 8
344 %72 = load i8* @uc, align 1
345 %73 = atomicrmw or i8* @sc, i8 %72 monotonic
346 %74 = or i8 %73, %72
347 store i8 %74, i8* @sc, align 1
348 %75 = load i8* @uc, align 1
349 %76 = atomicrmw or i8* @uc, i8 %75 monotonic
350 %77 = or i8 %76, %75
351 store i8 %77, i8* @uc, align 1
352 %78 = load i8* @uc, align 1
353 %79 = zext i8 %78 to i16
354 %80 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
355 %81 = atomicrmw or i16* %80, i16 %79 monotonic
356 %82 = or i16 %81, %79
357 store i16 %82, i16* @ss, align 2
358 %83 = load i8* @uc, align 1
359 %84 = zext i8 %83 to i16
360 %85 = bitcast i8* bitcast (i16* @us to i8*) to i16*
361 %86 = atomicrmw or i16* %85, i16 %84 monotonic
362 %87 = or i16 %86, %84
363 store i16 %87, i16* @us, align 2
364 %88 = load i8* @uc, align 1
365 %89 = zext i8 %88 to i32
366 %90 = bitcast i8* bitcast (i32* @si to i8*) to i32*
367 %91 = atomicrmw or i32* %90, i32 %89 monotonic
368 %92 = or i32 %91, %89
369 store i32 %92, i32* @si, align 4
370 %93 = load i8* @uc, align 1
371 %94 = zext i8 %93 to i32
372 %95 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
373 %96 = atomicrmw or i32* %95, i32 %94 monotonic
374 %97 = or i32 %96, %94
375 store i32 %97, i32* @ui, align 4
376 %98 = load i8* @uc, align 1
377 %99 = zext i8 %98 to i64
378 %100 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
379 %101 = atomicrmw or i64* %100, i64 %99 monotonic
380 %102 = or i64 %101, %99
381 store i64 %102, i64* @sl, align 8
382 %103 = load i8* @uc, align 1
383 %104 = zext i8 %103 to i64
384 %105 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
385 %106 = atomicrmw or i64* %105, i64 %104 monotonic
386 %107 = or i64 %106, %104
387 store i64 %107, i64* @ul, align 8
388 %108 = load i8* @uc, align 1
389 %109 = atomicrmw xor i8* @sc, i8 %108 monotonic
390 %110 = xor i8 %109, %108
391 store i8 %110, i8* @sc, align 1
392 %111 = load i8* @uc, align 1
393 %112 = atomicrmw xor i8* @uc, i8 %111 monotonic
394 %113 = xor i8 %112, %111
395 store i8 %113, i8* @uc, align 1
396 %114 = load i8* @uc, align 1
397 %115 = zext i8 %114 to i16
398 %116 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
399 %117 = atomicrmw xor i16* %116, i16 %115 monotonic
400 %118 = xor i16 %117, %115
401 store i16 %118, i16* @ss, align 2
402 %119 = load i8* @uc, align 1
403 %120 = zext i8 %119 to i16
404 %121 = bitcast i8* bitcast (i16* @us to i8*) to i16*
405 %122 = atomicrmw xor i16* %121, i16 %120 monotonic
406 %123 = xor i16 %122, %120
407 store i16 %123, i16* @us, align 2
408 %124 = load i8* @uc, align 1
409 %125 = zext i8 %124 to i32
410 %126 = bitcast i8* bitcast (i32* @si to i8*) to i32*
411 %127 = atomicrmw xor i32* %126, i32 %125 monotonic
412 %128 = xor i32 %127, %125
413 store i32 %128, i32* @si, align 4
414 %129 = load i8* @uc, align 1
415 %130 = zext i8 %129 to i32
416 %131 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
417 %132 = atomicrmw xor i32* %131, i32 %130 monotonic
418 %133 = xor i32 %132, %130
419 store i32 %133, i32* @ui, align 4
420 %134 = load i8* @uc, align 1
421 %135 = zext i8 %134 to i64
422 %136 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
423 %137 = atomicrmw xor i64* %136, i64 %135 monotonic
424 %138 = xor i64 %137, %135
425 store i64 %138, i64* @sl, align 8
426 %139 = load i8* @uc, align 1
427 %140 = zext i8 %139 to i64
428 %141 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
429 %142 = atomicrmw xor i64* %141, i64 %140 monotonic
430 %143 = xor i64 %142, %140
431 store i64 %143, i64* @ul, align 8
432 %144 = load i8* @uc, align 1
433 %145 = atomicrmw and i8* @sc, i8 %144 monotonic
434 %146 = and i8 %145, %144
435 store i8 %146, i8* @sc, align 1
436 %147 = load i8* @uc, align 1
437 %148 = atomicrmw and i8* @uc, i8 %147 monotonic
438 %149 = and i8 %148, %147
439 store i8 %149, i8* @uc, align 1
440 %150 = load i8* @uc, align 1
441 %151 = zext i8 %150 to i16
442 %152 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
443 %153 = atomicrmw and i16* %152, i16 %151 monotonic
444 %154 = and i16 %153, %151
445 store i16 %154, i16* @ss, align 2
446 %155 = load i8* @uc, align 1
447 %156 = zext i8 %155 to i16
448 %157 = bitcast i8* bitcast (i16* @us to i8*) to i16*
449 %158 = atomicrmw and i16* %157, i16 %156 monotonic
450 %159 = and i16 %158, %156
451 store i16 %159, i16* @us, align 2
452 %160 = load i8* @uc, align 1
453 %161 = zext i8 %160 to i32
454 %162 = bitcast i8* bitcast (i32* @si to i8*) to i32*
455 %163 = atomicrmw and i32* %162, i32 %161 monotonic
456 %164 = and i32 %163, %161
457 store i32 %164, i32* @si, align 4
458 %165 = load i8* @uc, align 1
459 %166 = zext i8 %165 to i32
460 %167 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
461 %168 = atomicrmw and i32* %167, i32 %166 monotonic
462 %169 = and i32 %168, %166
463 store i32 %169, i32* @ui, align 4
464 %170 = load i8* @uc, align 1
465 %171 = zext i8 %170 to i64
466 %172 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
467 %173 = atomicrmw and i64* %172, i64 %171 monotonic
468 %174 = and i64 %173, %171
469 store i64 %174, i64* @sl, align 8
470 %175 = load i8* @uc, align 1
471 %176 = zext i8 %175 to i64
472 %177 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
473 %178 = atomicrmw and i64* %177, i64 %176 monotonic
474 %179 = and i64 %178, %176
475 store i64 %179, i64* @ul, align 8
476 %180 = load i8* @uc, align 1
477 %181 = atomicrmw nand i8* @sc, i8 %180 monotonic
478 %182 = xor i8 %181, -1
479 %183 = and i8 %182, %180
480 store i8 %183, i8* @sc, align 1
481 %184 = load i8* @uc, align 1
482 %185 = atomicrmw nand i8* @uc, i8 %184 monotonic
483 %186 = xor i8 %185, -1
484 %187 = and i8 %186, %184
485 store i8 %187, i8* @uc, align 1
486 %188 = load i8* @uc, align 1
487 %189 = zext i8 %188 to i16
488 %190 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
489 %191 = atomicrmw nand i16* %190, i16 %189 monotonic
490 %192 = xor i16 %191, -1
491 %193 = and i16 %192, %189
492 store i16 %193, i16* @ss, align 2
493 %194 = load i8* @uc, align 1
494 %195 = zext i8 %194 to i16
495 %196 = bitcast i8* bitcast (i16* @us to i8*) to i16*
496 %197 = atomicrmw nand i16* %196, i16 %195 monotonic
497 %198 = xor i16 %197, -1
498 %199 = and i16 %198, %195
499 store i16 %199, i16* @us, align 2
500 %200 = load i8* @uc, align 1
501 %201 = zext i8 %200 to i32
502 %202 = bitcast i8* bitcast (i32* @si to i8*) to i32*
503 %203 = atomicrmw nand i32* %202, i32 %201 monotonic
504 %204 = xor i32 %203, -1
505 %205 = and i32 %204, %201
506 store i32 %205, i32* @si, align 4
507 %206 = load i8* @uc, align 1
508 %207 = zext i8 %206 to i32
509 %208 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
510 %209 = atomicrmw nand i32* %208, i32 %207 monotonic
511 %210 = xor i32 %209, -1
512 %211 = and i32 %210, %207
513 store i32 %211, i32* @ui, align 4
514 %212 = load i8* @uc, align 1
515 %213 = zext i8 %212 to i64
516 %214 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
517 %215 = atomicrmw nand i64* %214, i64 %213 monotonic
518 %216 = xor i64 %215, -1
519 %217 = and i64 %216, %213
520 store i64 %217, i64* @sl, align 8
521 %218 = load i8* @uc, align 1
522 %219 = zext i8 %218 to i64
523 %220 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
524 %221 = atomicrmw nand i64* %220, i64 %219 monotonic
525 %222 = xor i64 %221, -1
526 %223 = and i64 %222, %219
527 store i64 %223, i64* @ul, align 8
528 br label %return
529
530 return: ; preds = %entry
531 ret void
579532 }
580533
581534 define void @test_compare_and_swap() nounwind {
582535 entry:
583 load i8* @uc, align 1 ; :0 [#uses=1]
584 load i8* @sc, align 1 ; :1 [#uses=1]
585 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %0, i8 %1 ) ; :2 [#uses=1]
586 store i8 %2, i8* @sc, align 1
587 load i8* @uc, align 1 ; :3 [#uses=1]
588 load i8* @sc, align 1 ; :4 [#uses=1]
589 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %3, i8 %4 ) ; :5 [#uses=1]
590 store i8 %5, i8* @uc, align 1
591 load i8* @uc, align 1 ; :6 [#uses=1]
592 zext i8 %6 to i16 ; :7 [#uses=1]
593 load i8* @sc, align 1 ; :8 [#uses=1]
594 sext i8 %8 to i16 ; :9 [#uses=1]
595 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :10 [#uses=1]
596 call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %10, i16 %7, i16 %9 ) ; :11 [#uses=1]
597 store i16 %11, i16* @ss, align 2
598 load i8* @uc, align 1 ; :12 [#uses=1]
599 zext i8 %12 to i16 ; :13 [#uses=1]
600 load i8* @sc, align 1 ; :14 [#uses=1]
601 sext i8 %14 to i16 ; :15 [#uses=1]
602 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :16 [#uses=1]
603 call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %16, i16 %13, i16 %15 ) ; :17 [#uses=1]
604 store i16 %17, i16* @us, align 2
605 load i8* @uc, align 1 ; :18 [#uses=1]
606 zext i8 %18 to i32 ; :19 [#uses=1]
607 load i8* @sc, align 1 ; :20 [#uses=1]
608 sext i8 %20 to i32 ; :21 [#uses=1]
609 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :22 [#uses=1]
610 call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %22, i32 %19, i32 %21 ) ; :23 [#uses=1]
611 store i32 %23, i32* @si, align 4
612 load i8* @uc, align 1 ; :24 [#uses=1]
613 zext i8 %24 to i32 ; :25 [#uses=1]
614 load i8* @sc, align 1 ; :26 [#uses=1]
615 sext i8 %26 to i32 ; :27 [#uses=1]
616 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :28 [#uses=1]
617 call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %28, i32 %25, i32 %27 ) ; :29 [#uses=1]
618 store i32 %29, i32* @ui, align 4
619 load i8* @uc, align 1 ; :30 [#uses=1]
620 zext i8 %30 to i64 ; :31 [#uses=1]
621 load i8* @sc, align 1 ; :32 [#uses=1]
622 sext i8 %32 to i64 ; :33 [#uses=1]
623 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :34 [#uses=1]
624 call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %34, i64 %31, i64 %33 ) ; :35 [#uses=1]
625 store i64 %35, i64* @sl, align 8
626 load i8* @uc, align 1 ; :36 [#uses=1]
627 zext i8 %36 to i64 ; :37 [#uses=1]
628 load i8* @sc, align 1 ; :38 [#uses=1]
629 sext i8 %38 to i64 ; :39 [#uses=1]
630 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :40 [#uses=1]
631 call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %40, i64 %37, i64 %39 ) ; :41 [#uses=1]
632 store i64 %41, i64* @ul, align 8
633 load i8* @uc, align 1 ; :42 [#uses=2]
634 load i8* @sc, align 1 ; :43 [#uses=1]
635 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @sc, i8 %42, i8 %43 ) ; :44 [#uses=1]
636 icmp eq i8 %44, %42 ; :45 [#uses=1]
637 zext i1 %45 to i8 ; :46 [#uses=1]
638 zext i8 %46 to i32 ; :47 [#uses=1]
639 store i32 %47, i32* @ui, align 4
640 load i8* @uc, align 1 ; :48 [#uses=2]
641 load i8* @sc, align 1 ; :49 [#uses=1]
642 call i8 @llvm.atomic.cmp.swap.i8.p0i8( i8* @uc, i8 %48, i8 %49 ) ; :50 [#uses=1]
643 icmp eq i8 %50, %48 ; :51 [#uses=1]
644 zext i1 %51 to i8 ; :52 [#uses=1]
645 zext i8 %52 to i32 ; :53 [#uses=1]
646 store i32 %53, i32* @ui, align 4
647 load i8* @uc, align 1 ; :54 [#uses=1]
648 zext i8 %54 to i16 ; :55 [#uses=2]
649 load i8* @sc, align 1 ; :56 [#uses=1]
650 sext i8 %56 to i16 ; :57 [#uses=1]
651 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :58 [#uses=1]
652 call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %58, i16 %55, i16 %57 ) ; :59 [#uses=1]
653 icmp eq i16 %59, %55 ; :60 [#uses=1]
654 zext i1 %60 to i8 ; :61 [#uses=1]
655 zext i8 %61 to i32 ; :62 [#uses=1]
656 store i32 %62, i32* @ui, align 4
657 load i8* @uc, align 1 ; :63 [#uses=1]
658 zext i8 %63 to i16 ; :64 [#uses=2]
659 load i8* @sc, align 1 ; :65 [#uses=1]
660 sext i8 %65 to i16 ; :66 [#uses=1]
661 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :67 [#uses=1]
662 call i16 @llvm.atomic.cmp.swap.i16.p0i16( i16* %67, i16 %64, i16 %66 ) ; :68 [#uses=1]
663 icmp eq i16 %68, %64 ; :69 [#uses=1]
664 zext i1 %69 to i8 ; :70 [#uses=1]
665 zext i8 %70 to i32 ; :71 [#uses=1]
666 store i32 %71, i32* @ui, align 4
667 load i8* @uc, align 1 ; :72 [#uses=1]
668 zext i8 %72 to i32 ; :73 [#uses=2]
669 load i8* @sc, align 1 ; :74 [#uses=1]
670 sext i8 %74 to i32 ; :75 [#uses=1]
671 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :76 [#uses=1]
672 call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %76, i32 %73, i32 %75 ) ; :77 [#uses=1]
673 icmp eq i32 %77, %73 ; :78 [#uses=1]
674 zext i1 %78 to i8 ; :79 [#uses=1]
675 zext i8 %79 to i32 ; :80 [#uses=1]
676 store i32 %80, i32* @ui, align 4
677 load i8* @uc, align 1 ; :81 [#uses=1]
678 zext i8 %81 to i32 ; :82 [#uses=2]
679 load i8* @sc, align 1 ; :83 [#uses=1]
680 sext i8 %83 to i32 ; :84 [#uses=1]
681 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :85 [#uses=1]
682 call i32 @llvm.atomic.cmp.swap.i32.p0i32( i32* %85, i32 %82, i32 %84 ) ; :86 [#uses=1]
683 icmp eq i32 %86, %82 ; :87 [#uses=1]
684 zext i1 %87 to i8 ; :88 [#uses=1]
685 zext i8 %88 to i32 ; :89 [#uses=1]
686 store i32 %89, i32* @ui, align 4
687 load i8* @uc, align 1 ; :90 [#uses=1]
688 zext i8 %90 to i64 ; :91 [#uses=2]
689 load i8* @sc, align 1 ; :92 [#uses=1]
690 sext i8 %92 to i64 ; :93 [#uses=1]
691 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :94 [#uses=1]
692 call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %94, i64 %91, i64 %93 ) ; :95 [#uses=1]
693 icmp eq i64 %95, %91 ; :96 [#uses=1]
694 zext i1 %96 to i8 ; :97 [#uses=1]
695 zext i8 %97 to i32 ; :98 [#uses=1]
696 store i32 %98, i32* @ui, align 4
697 load i8* @uc, align 1 ; :99 [#uses=1]
698 zext i8 %99 to i64 ; :100 [#uses=2]
699 load i8* @sc, align 1 ; :101 [#uses=1]
700 sext i8 %101 to i64 ; :102 [#uses=1]
701 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :103 [#uses=1]
702 call i64 @llvm.atomic.cmp.swap.i64.p0i64( i64* %103, i64 %100, i64 %102 ) ; :104 [#uses=1]
703 icmp eq i64 %104, %100 ; :105 [#uses=1]
704 zext i1 %105 to i8 ; :106 [#uses=1]
705 zext i8 %106 to i32 ; :107 [#uses=1]
706 store i32 %107, i32* @ui, align 4
707 br label %return
708
709 return: ; preds = %entry
710 ret void
536 %0 = load i8* @uc, align 1
537 %1 = load i8* @sc, align 1
538 %2 = cmpxchg i8* @sc, i8 %0, i8 %1 monotonic
539 store i8 %2, i8* @sc, align 1
540 %3 = load i8* @uc, align 1
541 %4 = load i8* @sc, align 1
542 %5 = cmpxchg i8* @uc, i8 %3, i8 %4 monotonic
543 store i8 %5, i8* @uc, align 1
544 %6 = load i8* @uc, align 1
545 %7 = zext i8 %6 to i16
546 %8 = load i8* @sc, align 1
547 %9 = sext i8 %8 to i16
548 %10 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
549 %11 = cmpxchg i16* %10, i16 %7, i16 %9 monotonic
550 store i16 %11, i16* @ss, align 2
551 %12 = load i8* @uc, align 1
552 %13 = zext i8 %12 to i16
553 %14 = load i8* @sc, align 1
554 %15 = sext i8 %14 to i16
555 %16 = bitcast i8* bitcast (i16* @us to i8*) to i16*
556 %17 = cmpxchg i16* %16, i16 %13, i16 %15 monotonic
557 store i16 %17, i16* @us, align 2
558 %18 = load i8* @uc, align 1
559 %19 = zext i8 %18 to i32
560 %20 = load i8* @sc, align 1
561 %21 = sext i8 %20 to i32
562 %22 = bitcast i8* bitcast (i32* @si to i8*) to i32*
563 %23 = cmpxchg i32* %22, i32 %19, i32 %21 monotonic
564 store i32 %23, i32* @si, align 4
565 %24 = load i8* @uc, align 1
566 %25 = zext i8 %24 to i32
567 %26 = load i8* @sc, align 1
568 %27 = sext i8 %26 to i32
569 %28 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
570 %29 = cmpxchg i32* %28, i32 %25, i32 %27 monotonic
571 store i32 %29, i32* @ui, align 4
572 %30 = load i8* @uc, align 1
573 %31 = zext i8 %30 to i64
574 %32 = load i8* @sc, align 1
575 %33 = sext i8 %32 to i64
576 %34 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
577 %35 = cmpxchg i64* %34, i64 %31, i64 %33 monotonic
578 store i64 %35, i64* @sl, align 8
579 %36 = load i8* @uc, align 1
580 %37 = zext i8 %36 to i64
581 %38 = load i8* @sc, align 1
582 %39 = sext i8 %38 to i64
583 %40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
584 %41 = cmpxchg i64* %40, i64 %37, i64 %39 monotonic
585 store i64 %41, i64* @ul, align 8
586 %42 = load i8* @uc, align 1
587 %43 = load i8* @sc, align 1
588 %44 = cmpxchg i8* @sc, i8 %42, i8 %43 monotonic
589 %45 = icmp eq i8 %44, %42
590 %46 = zext i1 %45 to i8
591 %47 = zext i8 %46 to i32
592 store i32 %47, i32* @ui, align 4
593 %48 = load i8* @uc, align 1
594 %49 = load i8* @sc, align 1
595 %50 = cmpxchg i8* @uc, i8 %48, i8 %49 monotonic
596 %51 = icmp eq i8 %50, %48
597 %52 = zext i1 %51 to i8
598 %53 = zext i8 %52 to i32
599 store i32 %53, i32* @ui, align 4
600 %54 = load i8* @uc, align 1
601 %55 = zext i8 %54 to i16
602 %56 = load i8* @sc, align 1
603 %57 = sext i8 %56 to i16
604 %58 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
605 %59 = cmpxchg i16* %58, i16 %55, i16 %57 monotonic
606 %60 = icmp eq i16 %59, %55
607 %61 = zext i1 %60 to i8
608 %62 = zext i8 %61 to i32
609 store i32 %62, i32* @ui, align 4
610 %63 = load i8* @uc, align 1
611 %64 = zext i8 %63 to i16
612 %65 = load i8* @sc, align 1
613 %66 = sext i8 %65 to i16
614 %67 = bitcast i8* bitcast (i16* @us to i8*) to i16*
615 %68 = cmpxchg i16* %67, i16 %64, i16 %66 monotonic
616 %69 = icmp eq i16 %68, %64
617 %70 = zext i1 %69 to i8
618 %71 = zext i8 %70 to i32
619 store i32 %71, i32* @ui, align 4
620 %72 = load i8* @uc, align 1
621 %73 = zext i8 %72 to i32
622 %74 = load i8* @sc, align 1
623 %75 = sext i8 %74 to i32
624 %76 = bitcast i8* bitcast (i32* @si to i8*) to i32*
625 %77 = cmpxchg i32* %76, i32 %73, i32 %75 monotonic
626 %78 = icmp eq i32 %77, %73
627 %79 = zext i1 %78 to i8
628 %80 = zext i8 %79 to i32
629 store i32 %80, i32* @ui, align 4
630 %81 = load i8* @uc, align 1
631 %82 = zext i8 %81 to i32
632 %83 = load i8* @sc, align 1
633 %84 = sext i8 %83 to i32
634 %85 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
635 %86 = cmpxchg i32* %85, i32 %82, i32 %84 monotonic
636 %87 = icmp eq i32 %86, %82
637 %88 = zext i1 %87 to i8
638 %89 = zext i8 %88 to i32
639 store i32 %89, i32* @ui, align 4
640 %90 = load i8* @uc, align 1
641 %91 = zext i8 %90 to i64
642 %92 = load i8* @sc, align 1
643 %93 = sext i8 %92 to i64
644 %94 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
645 %95 = cmpxchg i64* %94, i64 %91, i64 %93 monotonic
646 %96 = icmp eq i64 %95, %91
647 %97 = zext i1 %96 to i8
648 %98 = zext i8 %97 to i32
649 store i32 %98, i32* @ui, align 4
650 %99 = load i8* @uc, align 1
651 %100 = zext i8 %99 to i64
652 %101 = load i8* @sc, align 1
653 %102 = sext i8 %101 to i64
654 %103 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
655 %104 = cmpxchg i64* %103, i64 %100, i64 %102 monotonic
656 %105 = icmp eq i64 %104, %100
657 %106 = zext i1 %105 to i8
658 %107 = zext i8 %106 to i32
659 store i32 %107, i32* @ui, align 4
660 br label %return
661
662 return: ; preds = %entry
663 ret void
711664 }
712
713 declare i8 @llvm.atomic.cmp.swap.i8.p0i8(i8*, i8, i8) nounwind
714
715 declare i16 @llvm.atomic.cmp.swap.i16.p0i16(i16*, i16, i16) nounwind
716
717 declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32*, i32, i32) nounwind
718
719 declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64*, i64, i64) nounwind
720665
721666 define void @test_lock() nounwind {
722667 entry:
723 call i8 @llvm.atomic.swap.i8.p0i8( i8* @sc, i8 1 ) ; :0 [#uses=1]
724 store i8 %0, i8* @sc, align 1
725 call i8 @llvm.atomic.swap.i8.p0i8( i8* @uc, i8 1 ) ; :1 [#uses=1]
726 store i8 %1, i8* @uc, align 1
727 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :2 [#uses=1]
728 call i16 @llvm.atomic.swap.i16.p0i16( i16* %2, i16 1 ) ; :3 [#uses=1]
729 store i16 %3, i16* @ss, align 2
730 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :4 [#uses=1]
731 call i16 @llvm.atomic.swap.i16.p0i16( i16* %4, i16 1 ) ; :5 [#uses=1]
732 store i16 %5, i16* @us, align 2
733 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :6 [#uses=1]
734 call i32 @llvm.atomic.swap.i32.p0i32( i32* %6, i32 1 ) ; :7 [#uses=1]
735 store i32 %7, i32* @si, align 4
736 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :8 [#uses=1]
737 call i32 @llvm.atomic.swap.i32.p0i32( i32* %8, i32 1 ) ; :9 [#uses=1]
738 store i32 %9, i32* @ui, align 4
739 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :10 [#uses=1]
740 call i64 @llvm.atomic.swap.i64.p0i64( i64* %10, i64 1 ) ; :11 [#uses=1]
741 store i64 %11, i64* @sl, align 8
742 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :12 [#uses=1]
743 call i64 @llvm.atomic.swap.i64.p0i64( i64* %12, i64 1 ) ; :13 [#uses=1]
744 store i64 %13, i64* @ul, align 8
745 call void @llvm.memory.barrier( i1 true, i1 true, i1 true, i1 true, i1 false )
746 volatile store i8 0, i8* @sc, align 1
747 volatile store i8 0, i8* @uc, align 1
748 bitcast i8* bitcast (i16* @ss to i8*) to i16* ; :14 [#uses=1]
749 volatile store i16 0, i16* %14, align 2
750 bitcast i8* bitcast (i16* @us to i8*) to i16* ; :15 [#uses=1]
751 volatile store i16 0, i16* %15, align 2
752 bitcast i8* bitcast (i32* @si to i8*) to i32* ; :16 [#uses=1]
753 volatile store i32 0, i32* %16, align 4
754 bitcast i8* bitcast (i32* @ui to i8*) to i32* ; :17 [#uses=1]
755 volatile store i32 0, i32* %17, align 4
756 bitcast i8* bitcast (i64* @sl to i8*) to i64* ; :18 [#uses=1]
757 volatile store i64 0, i64* %18, align 8
758 bitcast i8* bitcast (i64* @ul to i8*) to i64* ; :19 [#uses=1]
759 volatile store i64 0, i64* %19, align 8
760 bitcast i8* bitcast (i64* @sll to i8*) to i64* ; :20 [#uses=1]
761 volatile store i64 0, i64* %20, align 8
762 bitcast i8* bitcast (i64* @ull to i8*) to i64* ; :21 [#uses=1]
763 volatile store i64 0, i64* %21, align 8
764 br label %return
765
766 return: ; preds = %entry
767 ret void
668 %0 = atomicrmw xchg i8* @sc, i8 1 monotonic
669 store i8 %0, i8* @sc, align 1
670 %1 = atomicrmw xchg i8* @uc, i8 1 monotonic
671 store i8 %1, i8* @uc, align 1
672 %2 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
673 %3 = atomicrmw xchg i16* %2, i16 1 monotonic
674 store i16 %3, i16* @ss, align 2
675 %4 = bitcast i8* bitcast (i16* @us to i8*) to i16*
676 %5 = atomicrmw xchg i16* %4, i16 1 monotonic
677 store i16 %5, i16* @us, align 2
678 %6 = bitcast i8* bitcast (i32* @si to i8*) to i32*
679 %7 = atomicrmw xchg i32* %6, i32 1 monotonic
680 store i32 %7, i32* @si, align 4
681 %8 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
682 %9 = atomicrmw xchg i32* %8, i32 1 monotonic
683 store i32 %9, i32* @ui, align 4
684 %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
685 %11 = atomicrmw xchg i64* %10, i64 1 monotonic
686 store i64 %11, i64* @sl, align 8
687 %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
688 %13 = atomicrmw xchg i64* %12, i64 1 monotonic
689 store i64 %13, i64* @ul, align 8
690 fence seq_cst
691 store volatile i8 0, i8* @sc, align 1
692 store volatile i8 0, i8* @uc, align 1
693 %14 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
694 store volatile i16 0, i16* %14, align 2
695 %15 = bitcast i8* bitcast (i16* @us to i8*) to i16*
696 store volatile i16 0, i16* %15, align 2
697 %16 = bitcast i8* bitcast (i32* @si to i8*) to i32*
698 store volatile i32 0, i32* %16, align 4
699 %17 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
700 store volatile i32 0, i32* %17, align 4
701 %18 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
702 store volatile i64 0, i64* %18, align 8
703 %19 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
704 store volatile i64 0, i64* %19, align 8
705 %20 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
706 store volatile i64 0, i64* %20, align 8
707 %21 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
708 store volatile i64 0, i64* %21, align 8
709 br label %return
710
711 return: ; preds = %entry
712 ret void
768713 }
769
770 declare i8 @llvm.atomic.swap.i8.p0i8(i8*, i8) nounwind
771
772 declare i16 @llvm.atomic.swap.i16.p0i16(i16*, i16) nounwind
773
774 declare i32 @llvm.atomic.swap.i32.p0i32(i32*, i32) nounwind
775
776 declare i64 @llvm.atomic.swap.i64.p0i64(i64*, i64) nounwind
777
778 declare void @llvm.memory.barrier(i1, i1, i1, i1, i1) nounwind
22 define i32 @exchange_and_add(i32* %mem, i32 %val) nounwind {
33 ; CHECK: exchange_and_add:
44 ; CHECK: lwarx
5 %tmp = call i32 @llvm.atomic.load.add.i32.p0i32(i32* %mem, i32 %val)
5 %tmp = atomicrmw add i32* %mem, i32 %val monotonic
66 ; CHECK: stwcx.
77 ret i32 %tmp
88 }
1010 define i32 @exchange_and_cmp(i32* %mem) nounwind {
1111 ; CHECK: exchange_and_cmp:
1212 ; CHECK: lwarx
13 %tmp = call i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* %mem, i32 0, i32 1)
13 %tmp = cmpxchg i32* %mem, i32 0, i32 1 monotonic
1414 ; CHECK: stwcx.
1515 ; CHECK: stwcx.
1616 ret i32 %tmp
1919 define i32 @exchange(i32* %mem, i32 %val) nounwind {
2020 ; CHECK: exchange:
2121 ; CHECK: lwarx
22 %tmp = call i32 @llvm.atomic.swap.i32.p0i32(i32* %mem, i32 1)
22 %tmp = atomicrmw xchg i32* %mem, i32 1 monotonic
2323 ; CHECK: stwcx.
2424 ret i32 %tmp
2525 }
26
27 declare i32 @llvm.atomic.load.add.i32.p0i32(i32* nocapture, i32) nounwind
28
29 declare i32 @llvm.atomic.cmp.swap.i32.p0i32(i32* nocapture, i32, i32) nounwind
30
31 declare i32 @llvm.atomic.swap.i32.p0i32(i32* nocapture, i32) nounwind
22 define i64 @exchange_and_add(i64* %mem, i64 %val) nounwind {
33 ; CHECK: exchange_and_add:
44 ; CHECK: ldarx
5 %tmp = call i64 @llvm.atomic.load.add.i64.p0i64(i64* %mem, i64 %val)
5 %tmp = atomicrmw add i64* %mem, i64 %val monotonic
66 ; CHECK: stdcx.
77 ret i64 %tmp
88 }
1010 define i64 @exchange_and_cmp(i64* %mem) nounwind {
1111 ; CHECK: exchange_and_cmp:
1212 ; CHECK: ldarx
13 %tmp = call i64 @llvm.atomic.cmp.swap.i64.p0i64(i64* %mem, i64 0, i64 1)
13 %tmp = cmpxchg i64* %mem, i64 0, i64 1 monotonic
1414 ; CHECK: stdcx.
1515 ; CHECK: stdcx.
1616 ret i64 %tmp
1919 define i64 @exchange(i64* %mem, i64 %val) nounwind {
2020 ; CHECK: exchange:
2121 ; CHECK: ldarx
22 %tmp = call i64 @llvm.atomic.swap.i64.p0i64(i64* %mem, i64 1)
22 %tmp = atomicrmw xchg i64* %mem, i64 1 monotonic
2323 ; CHECK: stdcx.
2424 ret i64 %tmp
2525 }
26
27 declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind
28
29 declare i64 @llvm.atomic.cmp.swap.i64.p0i64(i64* nocapture, i64, i64) nounwind
30
31 declare i64 @llvm.atomic.swap.i64.p0i64(i64* nocapture, i64) nounwind