llvm.org GIT mirror llvm / 01724b2
[esan|cfrag] Disable load/store instrumentation for cfrag Summary: Adds ClInstrumentFastpath option to control fastpath instrumentation. Avoids the load/store instrumentation for the cache fragmentation tool. Renames cache_frag_basic.ll to working_set_slow.ll for slowpath instrumentation test. Adds the __esan_init check in struct_field_count_basic.ll. Reviewers: aizatsky Subscribers: llvm-commits, bruening, eugenis, kcc, zhaoqin, vitalybuka Differential Revision: http://reviews.llvm.org/D21079 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@272355 91177308-0d34-0410-b5e6-96231b3b80d8 Qin Zhao 4 years ago
4 changed file(s) with 275 addition(s) and 263 deletion(s). Raw diff Collapse all Expand all
5555 static cl::opt ClInstrumentMemIntrinsics(
5656 "esan-instrument-memintrinsics", cl::init(true),
5757 cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden);
58 static cl::opt ClInstrumentFastpath(
59 "esan-instrument-fastpath", cl::init(true),
60 cl::desc("Instrument fastpath"), cl::Hidden);
5861
5962 // Experiments show that the performance difference can be 2x or more,
6063 // and accuracy loss is typically negligible, so we turn this on by default.
590593 {IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
591594 ConstantInt::get(IntptrTy, TypeSizeBytes)});
592595 } else {
593 if (instrumentFastpath(I, DL, IsStore, Addr, Alignment)) {
596 if (ClInstrumentFastpath &&
597 instrumentFastpath(I, DL, IsStore, Addr, Alignment)) {
594598 NumFastpaths++;
595599 return true;
596600 }
707711 const DataLayout &DL,
708712 Value *Addr,
709713 unsigned Alignment) {
710 // TODO(bruening): implement a fastpath for aligned accesses
711 return false;
714 // Do nothing.
715 return true; // Return true to avoid slowpath instrumentation.
712716 }
713717
714718 bool EfficiencySanitizer::instrumentFastpathWorkingSet(
+0
-259
test/Instrumentation/EfficiencySanitizer/cache_frag_basic.ll less more
None ; Test basic EfficiencySanitizer cache frag instrumentation.
1 ;
2 ; RUN: opt < %s -esan -esan-cache-frag -S | FileCheck %s
3
4 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
5 ; Aligned loads:
6
7 define i8 @loadAligned1(i8* %a) {
8 entry:
9 %tmp1 = load i8, i8* %a, align 1
10 ret i8 %tmp1
11 ; CHECK: @llvm.global_ctors = {{.*}}@esan.module_ctor
12 ; CHECK: call void @__esan_aligned_load1(i8* %a)
13 ; CHECK-NEXT: %tmp1 = load i8, i8* %a, align 1
14 ; CHECK-NEXT: ret i8 %tmp1
15 }
16
17 define i16 @loadAligned2(i16* %a) {
18 entry:
19 %tmp1 = load i16, i16* %a, align 2
20 ret i16 %tmp1
21 ; CHECK: %0 = bitcast i16* %a to i8*
22 ; CHECK-NEXT: call void @__esan_aligned_load2(i8* %0)
23 ; CHECK-NEXT: %tmp1 = load i16, i16* %a, align 2
24 ; CHECK-NEXT: ret i16 %tmp1
25 }
26
27 define i32 @loadAligned4(i32* %a) {
28 entry:
29 %tmp1 = load i32, i32* %a, align 4
30 ret i32 %tmp1
31 ; CHECK: %0 = bitcast i32* %a to i8*
32 ; CHECK-NEXT: call void @__esan_aligned_load4(i8* %0)
33 ; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 4
34 ; CHECK-NEXT: ret i32 %tmp1
35 }
36
37 define i64 @loadAligned8(i64* %a) {
38 entry:
39 %tmp1 = load i64, i64* %a, align 8
40 ret i64 %tmp1
41 ; CHECK: %0 = bitcast i64* %a to i8*
42 ; CHECK-NEXT: call void @__esan_aligned_load8(i8* %0)
43 ; CHECK-NEXT: %tmp1 = load i64, i64* %a, align 8
44 ; CHECK-NEXT: ret i64 %tmp1
45 }
46
47 define i128 @loadAligned16(i128* %a) {
48 entry:
49 %tmp1 = load i128, i128* %a, align 16
50 ret i128 %tmp1
51 ; CHECK: %0 = bitcast i128* %a to i8*
52 ; CHECK-NEXT: call void @__esan_aligned_load16(i8* %0)
53 ; CHECK-NEXT: %tmp1 = load i128, i128* %a, align 16
54 ; CHECK-NEXT: ret i128 %tmp1
55 }
56
57 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
58 ; Aligned stores:
59
60 define void @storeAligned1(i8* %a) {
61 entry:
62 store i8 1, i8* %a, align 1
63 ret void
64 ; CHECK: call void @__esan_aligned_store1(i8* %a)
65 ; CHECK-NEXT: store i8 1, i8* %a, align 1
66 ; CHECK-NEXT: ret void
67 }
68
69 define void @storeAligned2(i16* %a) {
70 entry:
71 store i16 1, i16* %a, align 2
72 ret void
73 ; CHECK: %0 = bitcast i16* %a to i8*
74 ; CHECK-NEXT: call void @__esan_aligned_store2(i8* %0)
75 ; CHECK-NEXT: store i16 1, i16* %a, align 2
76 ; CHECK-NEXT: ret void
77 }
78
79 define void @storeAligned4(i32* %a) {
80 entry:
81 store i32 1, i32* %a, align 4
82 ret void
83 ; CHECK: %0 = bitcast i32* %a to i8*
84 ; CHECK-NEXT: call void @__esan_aligned_store4(i8* %0)
85 ; CHECK-NEXT: store i32 1, i32* %a, align 4
86 ; CHECK-NEXT: ret void
87 }
88
89 define void @storeAligned8(i64* %a) {
90 entry:
91 store i64 1, i64* %a, align 8
92 ret void
93 ; CHECK: %0 = bitcast i64* %a to i8*
94 ; CHECK-NEXT: call void @__esan_aligned_store8(i8* %0)
95 ; CHECK-NEXT: store i64 1, i64* %a, align 8
96 ; CHECK-NEXT: ret void
97 }
98
99 define void @storeAligned16(i128* %a) {
100 entry:
101 store i128 1, i128* %a, align 16
102 ret void
103 ; CHECK: %0 = bitcast i128* %a to i8*
104 ; CHECK-NEXT: call void @__esan_aligned_store16(i8* %0)
105 ; CHECK-NEXT: store i128 1, i128* %a, align 16
106 ; CHECK-NEXT: ret void
107 }
108
109 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
110 ; Unaligned loads:
111
112 define i16 @loadUnaligned2(i16* %a) {
113 entry:
114 %tmp1 = load i16, i16* %a, align 1
115 ret i16 %tmp1
116 ; CHECK: %0 = bitcast i16* %a to i8*
117 ; CHECK-NEXT: call void @__esan_unaligned_load2(i8* %0)
118 ; CHECK-NEXT: %tmp1 = load i16, i16* %a, align 1
119 ; CHECK-NEXT: ret i16 %tmp1
120 }
121
122 define i32 @loadUnaligned4(i32* %a) {
123 entry:
124 %tmp1 = load i32, i32* %a, align 1
125 ret i32 %tmp1
126 ; CHECK: %0 = bitcast i32* %a to i8*
127 ; CHECK-NEXT: call void @__esan_unaligned_load4(i8* %0)
128 ; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 1
129 ; CHECK-NEXT: ret i32 %tmp1
130 }
131
132 define i64 @loadUnaligned8(i64* %a) {
133 entry:
134 %tmp1 = load i64, i64* %a, align 1
135 ret i64 %tmp1
136 ; CHECK: %0 = bitcast i64* %a to i8*
137 ; CHECK-NEXT: call void @__esan_unaligned_load8(i8* %0)
138 ; CHECK-NEXT: %tmp1 = load i64, i64* %a, align 1
139 ; CHECK-NEXT: ret i64 %tmp1
140 }
141
142 define i128 @loadUnaligned16(i128* %a) {
143 entry:
144 %tmp1 = load i128, i128* %a, align 1
145 ret i128 %tmp1
146 ; CHECK: %0 = bitcast i128* %a to i8*
147 ; CHECK-NEXT: call void @__esan_unaligned_load16(i8* %0)
148 ; CHECK-NEXT: %tmp1 = load i128, i128* %a, align 1
149 ; CHECK-NEXT: ret i128 %tmp1
150 }
151
152 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
153 ; Unaligned stores:
154
155 define void @storeUnaligned2(i16* %a) {
156 entry:
157 store i16 1, i16* %a, align 1
158 ret void
159 ; CHECK: %0 = bitcast i16* %a to i8*
160 ; CHECK-NEXT: call void @__esan_unaligned_store2(i8* %0)
161 ; CHECK-NEXT: store i16 1, i16* %a, align 1
162 ; CHECK-NEXT: ret void
163 }
164
165 define void @storeUnaligned4(i32* %a) {
166 entry:
167 store i32 1, i32* %a, align 1
168 ret void
169 ; CHECK: %0 = bitcast i32* %a to i8*
170 ; CHECK-NEXT: call void @__esan_unaligned_store4(i8* %0)
171 ; CHECK-NEXT: store i32 1, i32* %a, align 1
172 ; CHECK-NEXT: ret void
173 }
174
175 define void @storeUnaligned8(i64* %a) {
176 entry:
177 store i64 1, i64* %a, align 1
178 ret void
179 ; CHECK: %0 = bitcast i64* %a to i8*
180 ; CHECK-NEXT: call void @__esan_unaligned_store8(i8* %0)
181 ; CHECK-NEXT: store i64 1, i64* %a, align 1
182 ; CHECK-NEXT: ret void
183 }
184
185 define void @storeUnaligned16(i128* %a) {
186 entry:
187 store i128 1, i128* %a, align 1
188 ret void
189 ; CHECK: %0 = bitcast i128* %a to i8*
190 ; CHECK-NEXT: call void @__esan_unaligned_store16(i8* %0)
191 ; CHECK-NEXT: store i128 1, i128* %a, align 1
192 ; CHECK-NEXT: ret void
193 }
194
195 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
196 ; Unusual loads and stores:
197
198 define x86_fp80 @loadUnalignedFP(x86_fp80* %a) {
199 entry:
200 %tmp1 = load x86_fp80, x86_fp80* %a, align 1
201 ret x86_fp80 %tmp1
202 ; CHECK: %0 = bitcast x86_fp80* %a to i8*
203 ; CHECK-NEXT: call void @__esan_unaligned_loadN(i8* %0, i64 10)
204 ; CHECK-NEXT: %tmp1 = load x86_fp80, x86_fp80* %a, align 1
205 ; CHECK-NEXT: ret x86_fp80 %tmp1
206 }
207
208 define void @storeUnalignedFP(x86_fp80* %a) {
209 entry:
210 store x86_fp80 0xK00000000000000000000, x86_fp80* %a, align 1
211 ret void
212 ; CHECK: %0 = bitcast x86_fp80* %a to i8*
213 ; CHECK-NEXT: call void @__esan_unaligned_storeN(i8* %0, i64 10)
214 ; CHECK-NEXT: store x86_fp80 0xK00000000000000000000, x86_fp80* %a, align 1
215 ; CHECK-NEXT: ret void
216 }
217
218 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
219 ; Ensure that esan converts memcpy intrinsics to calls:
220
221 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
222 declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
223 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
224
225 define void @memCpyTest(i8* nocapture %x, i8* nocapture %y) {
226 entry:
227 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
228 ret void
229 ; CHECK: define void @memCpyTest
230 ; CHECK: call i8* @memcpy
231 ; CHECK: ret void
232 }
233
234 define void @memMoveTest(i8* nocapture %x, i8* nocapture %y) {
235 entry:
236 tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
237 ret void
238 ; CHECK: define void @memMoveTest
239 ; CHECK: call i8* @memmove
240 ; CHECK: ret void
241 }
242
243 define void @memSetTest(i8* nocapture %x) {
244 entry:
245 tail call void @llvm.memset.p0i8.i64(i8* %x, i8 77, i64 16, i32 4, i1 false)
246 ret void
247 ; CHECK: define void @memSetTest
248 ; CHECK: call i8* @memset
249 ; CHECK: ret void
250 }
251
252 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
253 ; Top-level:
254
255 ; CHECK: define internal void @esan.module_ctor()
256 ; CHECK: call void @__esan_init(i32 1, i8* bitcast ({ i8*, i32, { i8*, i32, i64*, i8** }* }* @1 to i8*))
257 ; CHECK: define internal void @esan.module_dtor()
258 ; CHECK: call void @__esan_exit(i8* bitcast ({ i8*, i32, { i8*, i32, i64*, i8** }* }* @1 to i8*))
0 ; Test basic EfficiencySanitizer struct field count instrumentation.
11 ;
2 ; RUN: opt < %s -esan -esan-cache-frag -esan-instrument-loads-and-stores=false -esan-instrument-memintrinsics=false -S | FileCheck %s
2 ; RUN: opt < %s -esan -esan-cache-frag -S | FileCheck %s
33
44 %struct.A = type { i32, i32 }
55 %union.U = type { double }
9292 ; CHECK-NEXT: %k1 = load %struct.A*, %struct.A** %k, align 8
9393 ; CHECK-NEXT: %arrayidx13 = getelementptr inbounds %struct.A, %struct.A* %k1, i64 0
9494 ; CHECK-NEXT: ret i32 0
95
96 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
97 ; Top-level:
98
99 ; CHECK: define internal void @esan.module_ctor()
100 ; CHECK: call void @__esan_init(i32 1, i8* bitcast ({ i8*, i32, { i8*, i32, i64*, i8** }* }* @21 to i8*))
101 ; CHECK: define internal void @esan.module_dtor()
102 ; CHECK: call void @__esan_exit(i8* bitcast ({ i8*, i32, { i8*, i32, i64*, i8** }* }* @21 to i8*))
0 ; Test basic EfficiencySanitizer slowpath instrumentation.
1 ;
2 ; RUN: opt < %s -esan -esan-working-set -esan-instrument-fastpath=false -S | FileCheck %s
3
4 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
5 ; Aligned loads:
6
7 define i8 @loadAligned1(i8* %a) {
8 entry:
9 %tmp1 = load i8, i8* %a, align 1
10 ret i8 %tmp1
11 ; CHECK: @llvm.global_ctors = {{.*}}@esan.module_ctor
12 ; CHECK: call void @__esan_aligned_load1(i8* %a)
13 ; CHECK-NEXT: %tmp1 = load i8, i8* %a, align 1
14 ; CHECK-NEXT: ret i8 %tmp1
15 }
16
17 define i16 @loadAligned2(i16* %a) {
18 entry:
19 %tmp1 = load i16, i16* %a, align 2
20 ret i16 %tmp1
21 ; CHECK: %0 = bitcast i16* %a to i8*
22 ; CHECK-NEXT: call void @__esan_aligned_load2(i8* %0)
23 ; CHECK-NEXT: %tmp1 = load i16, i16* %a, align 2
24 ; CHECK-NEXT: ret i16 %tmp1
25 }
26
27 define i32 @loadAligned4(i32* %a) {
28 entry:
29 %tmp1 = load i32, i32* %a, align 4
30 ret i32 %tmp1
31 ; CHECK: %0 = bitcast i32* %a to i8*
32 ; CHECK-NEXT: call void @__esan_aligned_load4(i8* %0)
33 ; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 4
34 ; CHECK-NEXT: ret i32 %tmp1
35 }
36
37 define i64 @loadAligned8(i64* %a) {
38 entry:
39 %tmp1 = load i64, i64* %a, align 8
40 ret i64 %tmp1
41 ; CHECK: %0 = bitcast i64* %a to i8*
42 ; CHECK-NEXT: call void @__esan_aligned_load8(i8* %0)
43 ; CHECK-NEXT: %tmp1 = load i64, i64* %a, align 8
44 ; CHECK-NEXT: ret i64 %tmp1
45 }
46
47 define i128 @loadAligned16(i128* %a) {
48 entry:
49 %tmp1 = load i128, i128* %a, align 16
50 ret i128 %tmp1
51 ; CHECK: %0 = bitcast i128* %a to i8*
52 ; CHECK-NEXT: call void @__esan_aligned_load16(i8* %0)
53 ; CHECK-NEXT: %tmp1 = load i128, i128* %a, align 16
54 ; CHECK-NEXT: ret i128 %tmp1
55 }
56
57 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
58 ; Aligned stores:
59
60 define void @storeAligned1(i8* %a) {
61 entry:
62 store i8 1, i8* %a, align 1
63 ret void
64 ; CHECK: call void @__esan_aligned_store1(i8* %a)
65 ; CHECK-NEXT: store i8 1, i8* %a, align 1
66 ; CHECK-NEXT: ret void
67 }
68
69 define void @storeAligned2(i16* %a) {
70 entry:
71 store i16 1, i16* %a, align 2
72 ret void
73 ; CHECK: %0 = bitcast i16* %a to i8*
74 ; CHECK-NEXT: call void @__esan_aligned_store2(i8* %0)
75 ; CHECK-NEXT: store i16 1, i16* %a, align 2
76 ; CHECK-NEXT: ret void
77 }
78
79 define void @storeAligned4(i32* %a) {
80 entry:
81 store i32 1, i32* %a, align 4
82 ret void
83 ; CHECK: %0 = bitcast i32* %a to i8*
84 ; CHECK-NEXT: call void @__esan_aligned_store4(i8* %0)
85 ; CHECK-NEXT: store i32 1, i32* %a, align 4
86 ; CHECK-NEXT: ret void
87 }
88
89 define void @storeAligned8(i64* %a) {
90 entry:
91 store i64 1, i64* %a, align 8
92 ret void
93 ; CHECK: %0 = bitcast i64* %a to i8*
94 ; CHECK-NEXT: call void @__esan_aligned_store8(i8* %0)
95 ; CHECK-NEXT: store i64 1, i64* %a, align 8
96 ; CHECK-NEXT: ret void
97 }
98
99 define void @storeAligned16(i128* %a) {
100 entry:
101 store i128 1, i128* %a, align 16
102 ret void
103 ; CHECK: %0 = bitcast i128* %a to i8*
104 ; CHECK-NEXT: call void @__esan_aligned_store16(i8* %0)
105 ; CHECK-NEXT: store i128 1, i128* %a, align 16
106 ; CHECK-NEXT: ret void
107 }
108
109 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
110 ; Unaligned loads:
111
112 define i16 @loadUnaligned2(i16* %a) {
113 entry:
114 %tmp1 = load i16, i16* %a, align 1
115 ret i16 %tmp1
116 ; CHECK: %0 = bitcast i16* %a to i8*
117 ; CHECK-NEXT: call void @__esan_unaligned_load2(i8* %0)
118 ; CHECK-NEXT: %tmp1 = load i16, i16* %a, align 1
119 ; CHECK-NEXT: ret i16 %tmp1
120 }
121
122 define i32 @loadUnaligned4(i32* %a) {
123 entry:
124 %tmp1 = load i32, i32* %a, align 1
125 ret i32 %tmp1
126 ; CHECK: %0 = bitcast i32* %a to i8*
127 ; CHECK-NEXT: call void @__esan_unaligned_load4(i8* %0)
128 ; CHECK-NEXT: %tmp1 = load i32, i32* %a, align 1
129 ; CHECK-NEXT: ret i32 %tmp1
130 }
131
132 define i64 @loadUnaligned8(i64* %a) {
133 entry:
134 %tmp1 = load i64, i64* %a, align 1
135 ret i64 %tmp1
136 ; CHECK: %0 = bitcast i64* %a to i8*
137 ; CHECK-NEXT: call void @__esan_unaligned_load8(i8* %0)
138 ; CHECK-NEXT: %tmp1 = load i64, i64* %a, align 1
139 ; CHECK-NEXT: ret i64 %tmp1
140 }
141
142 define i128 @loadUnaligned16(i128* %a) {
143 entry:
144 %tmp1 = load i128, i128* %a, align 1
145 ret i128 %tmp1
146 ; CHECK: %0 = bitcast i128* %a to i8*
147 ; CHECK-NEXT: call void @__esan_unaligned_load16(i8* %0)
148 ; CHECK-NEXT: %tmp1 = load i128, i128* %a, align 1
149 ; CHECK-NEXT: ret i128 %tmp1
150 }
151
152 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
153 ; Unaligned stores:
154
155 define void @storeUnaligned2(i16* %a) {
156 entry:
157 store i16 1, i16* %a, align 1
158 ret void
159 ; CHECK: %0 = bitcast i16* %a to i8*
160 ; CHECK-NEXT: call void @__esan_unaligned_store2(i8* %0)
161 ; CHECK-NEXT: store i16 1, i16* %a, align 1
162 ; CHECK-NEXT: ret void
163 }
164
165 define void @storeUnaligned4(i32* %a) {
166 entry:
167 store i32 1, i32* %a, align 1
168 ret void
169 ; CHECK: %0 = bitcast i32* %a to i8*
170 ; CHECK-NEXT: call void @__esan_unaligned_store4(i8* %0)
171 ; CHECK-NEXT: store i32 1, i32* %a, align 1
172 ; CHECK-NEXT: ret void
173 }
174
175 define void @storeUnaligned8(i64* %a) {
176 entry:
177 store i64 1, i64* %a, align 1
178 ret void
179 ; CHECK: %0 = bitcast i64* %a to i8*
180 ; CHECK-NEXT: call void @__esan_unaligned_store8(i8* %0)
181 ; CHECK-NEXT: store i64 1, i64* %a, align 1
182 ; CHECK-NEXT: ret void
183 }
184
185 define void @storeUnaligned16(i128* %a) {
186 entry:
187 store i128 1, i128* %a, align 1
188 ret void
189 ; CHECK: %0 = bitcast i128* %a to i8*
190 ; CHECK-NEXT: call void @__esan_unaligned_store16(i8* %0)
191 ; CHECK-NEXT: store i128 1, i128* %a, align 1
192 ; CHECK-NEXT: ret void
193 }
194
195 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
196 ; Unusual loads and stores:
197
198 define x86_fp80 @loadUnalignedFP(x86_fp80* %a) {
199 entry:
200 %tmp1 = load x86_fp80, x86_fp80* %a, align 1
201 ret x86_fp80 %tmp1
202 ; CHECK: %0 = bitcast x86_fp80* %a to i8*
203 ; CHECK-NEXT: call void @__esan_unaligned_loadN(i8* %0, i64 10)
204 ; CHECK-NEXT: %tmp1 = load x86_fp80, x86_fp80* %a, align 1
205 ; CHECK-NEXT: ret x86_fp80 %tmp1
206 }
207
208 define void @storeUnalignedFP(x86_fp80* %a) {
209 entry:
210 store x86_fp80 0xK00000000000000000000, x86_fp80* %a, align 1
211 ret void
212 ; CHECK: %0 = bitcast x86_fp80* %a to i8*
213 ; CHECK-NEXT: call void @__esan_unaligned_storeN(i8* %0, i64 10)
214 ; CHECK-NEXT: store x86_fp80 0xK00000000000000000000, x86_fp80* %a, align 1
215 ; CHECK-NEXT: ret void
216 }
217
218 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
219 ; Ensure that esan converts memcpy intrinsics to calls:
220
221 declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
222 declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1)
223 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
224
225 define void @memCpyTest(i8* nocapture %x, i8* nocapture %y) {
226 entry:
227 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
228 ret void
229 ; CHECK: define void @memCpyTest
230 ; CHECK: call i8* @memcpy
231 ; CHECK: ret void
232 }
233
234 define void @memMoveTest(i8* nocapture %x, i8* nocapture %y) {
235 entry:
236 tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %x, i8* %y, i64 16, i32 4, i1 false)
237 ret void
238 ; CHECK: define void @memMoveTest
239 ; CHECK: call i8* @memmove
240 ; CHECK: ret void
241 }
242
243 define void @memSetTest(i8* nocapture %x) {
244 entry:
245 tail call void @llvm.memset.p0i8.i64(i8* %x, i8 77, i64 16, i32 4, i1 false)
246 ret void
247 ; CHECK: define void @memSetTest
248 ; CHECK: call i8* @memset
249 ; CHECK: ret void
250 }
251
252 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
253 ; Top-level:
254
255 ; CHECK: define internal void @esan.module_ctor()
256 ; CHECK: call void @__esan_init(i32 2, i8* null)
257 ; CHECK: define internal void @esan.module_dtor()
258 ; CHECK: call void @__esan_exit(i8* null)