llvm.org GIT mirror llvm / d2bf432
Upgrade syntax of tests using volatile instructions to use 'load volatile' instead of 'volatile load', which is archaic. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@145171 91177308-0d34-0410-b5e6-96231b3b80d8 Chris Lattner 7 years ago
91 changed file(s) with 300 addition(s) and 318 deletion(s). Raw diff Collapse all Expand all
1515
1616 %p.0.i.0 = getelementptr [3 x [3 x double]]* %p, i64 0, i64 %i, i64 0
1717
18 volatile store double 0.0, double* %p3
19 volatile store double 0.1, double* %p.0.i.0
18 store volatile double 0.0, double* %p3
19 store volatile double 0.1, double* %p.0.i.0
2020
2121 %i.next = add i64 %i, 1
2222 %cmp = icmp slt i64 %i.next, 3
1616 exit:
1717 %a = phi double* [ %x, %true ], [ %y, %false ]
1818 %b = phi double* [ %x, %false ], [ %y, %true ]
19 volatile store double 0.0, double* %a
20 volatile store double 1.0, double* %b
19 store volatile double 0.0, double* %a
20 store volatile double 1.0, double* %b
2121 ret void
2222 }
2323
2626 entry:
2727 %a = select i1 %m, double* %x, double* %y
2828 %b = select i1 %m, double* %y, double* %x
29 volatile store double 0.000000e+00, double* %a
30 volatile store double 1.000000e+00, double* %b
29 store volatile double 0.000000e+00, double* %a
30 store volatile double 1.000000e+00, double* %b
3131 ret void
3232 }
3333
5555
5656 nexit:
5757 %b = phi double* [ %v, %ntrue ], [ %w, %nfalse ]
58 volatile store double 0.0, double* %a
59 volatile store double 1.0, double* %b
58 store volatile double 0.0, double* %a
59 store volatile double 1.0, double* %b
6060 ret void
6161 }
6262
6666 entry:
6767 %a = select i1 %m, double* %x, double* %y
6868 %b = select i1 %n, double* %v, double* %w
69 volatile store double 0.000000e+00, double* %a
70 volatile store double 1.000000e+00, double* %b
69 store volatile double 0.000000e+00, double* %a
70 store volatile double 1.000000e+00, double* %b
7171 ret void
7272 }
1010 bb88.i: ; preds = %bb74.i
1111 br i1 false, label %mandel.exit, label %bb74.i
1212 mandel.exit: ; preds = %bb88.i
13 %tmp2 = volatile load double* getelementptr ({ double, double }* @accum, i32 0, i32 0), align 8 ; [#uses=1]
13 %tmp2 = load volatile double* getelementptr ({ double, double }* @accum, i32 0, i32 0), align 8 ; [#uses=1]
1414 %tmp23 = fptosi double %tmp2 to i32 ; [#uses=1]
1515 %tmp5 = tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @.str, i32 0, i32 0), i32 %tmp23 ) ; [#uses=0]
1616 ret i32 0
2525 ; CHECKV4: bx r{{.*}}
2626 BB0:
2727 %5 = inttoptr i32 %0 to i32* ; [#uses=1]
28 %t35 = volatile load i32* %5 ; [#uses=1]
28 %t35 = load volatile i32* %5 ; [#uses=1]
2929 %6 = inttoptr i32 %t35 to i32** ; [#uses=1]
3030 %7 = getelementptr i32** %6, i32 86 ; [#uses=1]
3131 %8 = load i32** %7 ; [#uses=1]
+0
-8
test/CodeGen/CBackend/2005-02-14-VolatileOperations.ll less more
None ; RUN: llc < %s -march=c | grep volatile
1
2 define void @test(i32* %P) {
3 %X = volatile load i32* %P ; [#uses=1]
4 volatile store i32 %X, i32* %P
5 ret void
6 }
7
+0
-10
test/CodeGen/CBackend/2005-09-27-VolatileFuncPtr.ll less more
None ; RUN: llc < %s -march=c | grep {\\* *volatile *\\*}
1
2 @G = external global void ()* ; [#uses=2]
3
4 define void @test() {
5 volatile store void ()* @test, void ()** @G
6 volatile load void ()** @G ; :1 [#uses=0]
7 ret void
8 }
9
77 }
88
99 define void @test2(i32* %P) {
10 %X = volatile load i32* %P, align 2
11 volatile store i32 %X, i32* %P, align 2
10 %X = load volatile i32* %P, align 2
11 store volatile i32 %X, i32* %P, align 2
1212 ret void
1313 }
1414
66
77 define void @uip_arp_arpin() nounwind {
88 entry:
9 %tmp = volatile load i16* @uip_len ; [#uses=1]
9 %tmp = load volatile i16* @uip_len ; [#uses=1]
1010 %cmp = icmp ult i16 %tmp, 42 ; [#uses=1]
11 volatile store i16 0, i16* @uip_len
11 store volatile i16 0, i16* @uip_len
1212 br i1 %cmp, label %if.then, label %if.end
1313
1414 if.then: ; preds = %entry
55 define i16 @foo() nounwind readnone {
66 entry:
77 %result = alloca i16, align 1 ; [#uses=2]
8 volatile store i16 0, i16* %result
9 %tmp = volatile load i16* %result ; [#uses=1]
8 store volatile i16 0, i16* %result
9 %tmp = load volatile i16* %result ; [#uses=1]
1010 ret i16 %tmp
1111 }
1212
2121
2222 while.end: ; preds = %while.cond
2323 %result.i = alloca i16, align 1 ; [#uses=2]
24 volatile store i16 0, i16* %result.i
25 %tmp.i = volatile load i16* %result.i ; [#uses=0]
24 store volatile i16 0, i16* %result.i
25 %tmp.i = load volatile i16* %result.i ; [#uses=0]
2626 ret i16 0
2727 }
2828
1010 %x.addr = alloca i8 ; [#uses=2]
1111 %tmp = alloca i8, align 1 ; [#uses=2]
1212 store i8 %x, i8* %x.addr
13 %tmp1 = volatile load i8* @"\010x0021" ; [#uses=1]
13 %tmp1 = load volatile i8* @"\010x0021" ; [#uses=1]
1414 store i8 %tmp1, i8* %tmp
1515 %tmp2 = load i8* %x.addr ; [#uses=1]
16 volatile store i8 %tmp2, i8* @"\010x0021"
16 store volatile i8 %tmp2, i8* @"\010x0021"
1717 %tmp3 = load i8* %tmp ; [#uses=1]
1818 store i8 %tmp3, i8* %retval
1919 %0 = load i8* %retval ; [#uses=1]
33 entry:
44 %r = alloca i8 ; [#uses=2]
55 %"alloca point" = bitcast i32 0 to i32 ; [#uses=0]
6 volatile load i8* %r, align 1 ; :0 [#uses=1]
6 load volatile i8* %r, align 1 ; :0 [#uses=1]
77 or i8 %0, 1 ; :1 [#uses=1]
8 volatile store i8 %1, i8* %r, align 1
8 store volatile i8 %1, i8* %r, align 1
99 br label %return
1010
1111 return: ; preds = %entry
3131 ; CHECK: bis.b bar(r14), r15
3232
3333 define i16 @am4(i16 %x) nounwind {
34 %1 = volatile load i16* inttoptr(i16 32 to i16*)
34 %1 = load volatile i16* inttoptr(i16 32 to i16*)
3535 %2 = or i16 %1,%x
3636 ret i16 %2
3737 }
3434 ; CHECK: bis.b r14, bar(r15)
3535
3636 define void @am4(i16 %x) nounwind {
37 %1 = volatile load i16* inttoptr(i16 32 to i16*)
37 %1 = load volatile i16* inttoptr(i16 32 to i16*)
3838 %2 = or i16 %x, %1
39 volatile store i16 %2, i16* inttoptr(i16 32 to i16*)
39 store volatile i16 %2, i16* inttoptr(i16 32 to i16*)
4040 ret void
4141 }
4242 ; CHECK: am4:
2828 ; CHECK: mov.b bar(r15), r15
2929
3030 define i16 @am4() nounwind {
31 %1 = volatile load i16* inttoptr(i16 32 to i16*)
31 %1 = load volatile i16* inttoptr(i16 32 to i16*)
3232 ret i16 %1
3333 }
3434 ; CHECK: am4:
2828 ; CHECK: mov.b r14, bar(r15)
2929
3030 define void @am4(i16 %a) nounwind {
31 volatile store i16 %a, i16* inttoptr(i16 32 to i16*)
31 store volatile i16 %a, i16* inttoptr(i16 32 to i16*)
3232 ret void
3333 }
3434 ; CHECK: am4:
22 define i32 @main() nounwind readnone {
33 entry:
44 %x = alloca i32, align 4 ; [#uses=2]
5 volatile store i32 2, i32* %x, align 4
6 %0 = volatile load i32* %x, align 4 ; [#uses=1]
5 store volatile i32 2, i32* %x, align 4
6 %0 = load volatile i32* %x, align 4 ; [#uses=1]
77 ; CHECK: lui $3, %hi($JTI0_0)
88 ; CHECK: addiu $3, $3, %lo($JTI0_0)
99 ; CHECK: sll $2, $2, 2
55
66 define void @simple_vol_file() nounwind {
77 entry:
8 %tmp = volatile load i32** @stat_vol_ptr_int, align 4
8 %tmp = load volatile i32** @stat_vol_ptr_int, align 4
99 %0 = bitcast i32* %tmp to i8*
1010 call void @llvm.prefetch(i8* %0, i32 0, i32 0, i32 1)
1111 %tmp1 = load i32** @stat_ptr_vol_int, align 4
1111
1212 define i32 @_Z14ProgramByWordsPvyy(i8* %buffer, i64 %Offset, i64 %bufferSize) nounwind {
1313 entry:
14 volatile store i8 -1, i8* null, align 1
14 store volatile i8 -1, i8* null, align 1
1515 %tmp28 = icmp eq i8 0, 0 ; [#uses=1]
1616 br i1 %tmp28, label %bb107, label %bb
1717
4242 %tmp2021.i = trunc i64 %Pos.0.reg2mem.0 to i32 ; [#uses=1]
4343 %tmp202122.i = inttoptr i32 %tmp2021.i to i8* ; [#uses=1]
4444 tail call void @IODelay( i32 500 ) nounwind
45 %tmp53.i = volatile load i16* null, align 2 ; [#uses=2]
45 %tmp53.i = load volatile i16* null, align 2 ; [#uses=2]
4646 %tmp5455.i = zext i16 %tmp53.i to i32 ; [#uses=1]
4747 br i1 false, label %bb.i, label %bb65.i
4848
5858 ret i32 0
5959
6060 _Z24unlock_then_erase_sectory.exit: ; preds = %bb65.i
61 volatile store i8 -1, i8* %tmp202122.i, align 1
61 store volatile i8 -1, i8* %tmp202122.i, align 1
6262 %tmp93 = add i64 0, %Pos.0.reg2mem.0 ; [#uses=2]
6363 %tmp98 = add i64 0, %Offset ; [#uses=1]
6464 %tmp100 = icmp ugt i64 %tmp98, %tmp93 ; [#uses=1]
1212
1313 bb: ; preds = %bb, %entry
1414 %a_addr.0 = phi i32 [ %a, %entry ], [ %tmp5, %bb ] ; [#uses=2]
15 %tmp = volatile load i8** %va ; [#uses=2]
15 %tmp = load volatile i8** %va ; [#uses=2]
1616 %tmp2 = getelementptr i8* %tmp, i32 4 ; [#uses=1]
17 volatile store i8* %tmp2, i8** %va
17 store volatile i8* %tmp2, i8** %va
1818 %tmp5 = add i32 %a_addr.0, -1 ; [#uses=1]
1919 %tmp.upgrd.2 = icmp eq i32 %a_addr.0, 1 ; [#uses=1]
2020 br i1 %tmp.upgrd.2, label %bb7, label %bb
2020 define void @test(%s1* %this, i32 %format, i32 %w, i32 %h, i32 %levels, i32* %s, i8* %data, i32* nocapture %rowbytes, void (i8*, i8*)* %release, i8* %info) nounwind {
2121 entry:
2222 %tmp1 = getelementptr inbounds %s1* %this, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
23 volatile store i32 1, i32* %tmp1, align 4
23 store volatile i32 1, i32* %tmp1, align 4
2424 %tmp12 = getelementptr inbounds %s1* %this, i32 0, i32 1
2525 store i32 %levels, i32* %tmp12, align 4
2626 %tmp13 = getelementptr inbounds %s1* %this, i32 0, i32 3
4545 %tmp24 = shl i32 %flags.0, 16
4646 %asmtmp.i.i.i = tail call %0 asm sideeffect "\0A0:\09ldrex $1, [$2]\0A\09orr $1, $1, $3\0A\09strex $0, $1, [$2]\0A\09cmp $0, #0\0A\09bne 0b", "=&r,=&r,r,r,~{memory},~{cc}"(i32* %tmp1, i32 %tmp24) nounwind
4747 %tmp25 = getelementptr inbounds %s1* %this, i32 0, i32 2, i32 0, i32 0
48 volatile store i32 1, i32* %tmp25, align 4
48 store volatile i32 1, i32* %tmp25, align 4
4949 %tmp26 = icmp eq i32 %levels, 0
5050 br i1 %tmp26, label %return, label %bb4
5151
0 ; RUN: llc < %s -march=x86 | not grep movsd
11 ; RUN: llc < %s -march=x86 | grep movw
22 ; RUN: llc < %s -march=x86 | grep addw
3 ; These transforms are turned off for volatile loads and stores.
3 ; These transforms are turned off for load volatiles and stores.
44 ; Check that they weren't turned off for all loads and stores!
55
66 @atomic = global double 0.000000e+00 ; [#uses=1]
77
88 define i16 @f(i64 %x, double %y) {
99 %b = bitcast i64 %x to double ; [#uses=1]
10 volatile store double %b, double* @atomic ; one processor operation only
11 volatile store double 0.000000e+00, double* @atomic2 ; one processor operation only
10 store volatile double %b, double* @atomic ; one processor operation only
11 store volatile double 0.000000e+00, double* @atomic2 ; one processor operation only
1212 %b2 = bitcast double %y to i64 ; [#uses=1]
13 volatile store i64 %b2, i64* @anything ; may transform to store of double
14 %l = volatile load i32* @ioport ; must not narrow
13 store volatile i64 %b2, i64* @anything ; may transform to store of double
14 %l = load volatile i32* @ioport ; must not narrow
1515 %t = trunc i32 %l to i16 ; [#uses=1]
16 %l2 = volatile load i32* @ioport ; must not narrow
16 %l2 = load volatile i32* @ioport ; must not narrow
1717 %tmp = lshr i32 %l2, 16 ; [#uses=1]
1818 %t2 = trunc i32 %tmp to i16 ; [#uses=1]
1919 %f = add i16 %t, %t2 ; [#uses=1]
55
66 define i32 @main() nounwind {
77 entry:
8 %0 = volatile load i32* @g_407, align 4 ; [#uses=1]
8 %0 = load volatile i32* @g_407, align 4 ; [#uses=1]
99 %1 = trunc i32 %0 to i8 ; [#uses=1]
1010 %2 = tail call i32 @func_45(i8 zeroext %1) nounwind ; [#uses=0]
1111 ret i32 0
55 %B = bitcast <8 x double> %A to i512 ; [#uses=1]
66 %C = lshr i512 %B, 448 ; [#uses=1]
77 %D = trunc i512 %C to i64 ; [#uses=1]
8 volatile store i64 %D, i64* %Q
8 store volatile i64 %D, i64* %Q
99 ret void
1010 }
88 @X = external global i64 ; [#uses=25]
99
1010 define fastcc i64 @foo() nounwind {
11 %tmp = volatile load i64* @X ; [#uses=7]
12 %tmp1 = volatile load i64* @X ; [#uses=5]
13 %tmp2 = volatile load i64* @X ; [#uses=3]
14 %tmp3 = volatile load i64* @X ; [#uses=1]
15 %tmp4 = volatile load i64* @X ; [#uses=5]
16 %tmp5 = volatile load i64* @X ; [#uses=3]
17 %tmp6 = volatile load i64* @X ; [#uses=2]
18 %tmp7 = volatile load i64* @X ; [#uses=1]
19 %tmp8 = volatile load i64* @X ; [#uses=1]
20 %tmp9 = volatile load i64* @X ; [#uses=1]
21 %tmp10 = volatile load i64* @X ; [#uses=1]
22 %tmp11 = volatile load i64* @X ; [#uses=1]
23 %tmp12 = volatile load i64* @X ; [#uses=1]
24 %tmp13 = volatile load i64* @X ; [#uses=1]
25 %tmp14 = volatile load i64* @X ; [#uses=1]
26 %tmp15 = volatile load i64* @X ; [#uses=1]
27 %tmp16 = volatile load i64* @X ; [#uses=1]
28 %tmp17 = volatile load i64* @X ; [#uses=1]
29 %tmp18 = volatile load i64* @X ; [#uses=1]
30 %tmp19 = volatile load i64* @X ; [#uses=1]
31 %tmp20 = volatile load i64* @X ; [#uses=1]
32 %tmp21 = volatile load i64* @X ; [#uses=1]
33 %tmp22 = volatile load i64* @X ; [#uses=1]
34 %tmp23 = volatile load i64* @X ; [#uses=1]
11 %tmp = load volatile i64* @X ; [#uses=7]
12 %tmp1 = load volatile i64* @X ; [#uses=5]
13 %tmp2 = load volatile i64* @X ; [#uses=3]
14 %tmp3 = load volatile i64* @X ; [#uses=1]
15 %tmp4 = load volatile i64* @X ; [#uses=5]
16 %tmp5 = load volatile i64* @X ; [#uses=3]
17 %tmp6 = load volatile i64* @X ; [#uses=2]
18 %tmp7 = load volatile i64* @X ; [#uses=1]
19 %tmp8 = load volatile i64* @X ; [#uses=1]
20 %tmp9 = load volatile i64* @X ; [#uses=1]
21 %tmp10 = load volatile i64* @X ; [#uses=1]
22 %tmp11 = load volatile i64* @X ; [#uses=1]
23 %tmp12 = load volatile i64* @X ; [#uses=1]
24 %tmp13 = load volatile i64* @X ; [#uses=1]
25 %tmp14 = load volatile i64* @X ; [#uses=1]
26 %tmp15 = load volatile i64* @X ; [#uses=1]
27 %tmp16 = load volatile i64* @X ; [#uses=1]
28 %tmp17 = load volatile i64* @X ; [#uses=1]
29 %tmp18 = load volatile i64* @X ; [#uses=1]
30 %tmp19 = load volatile i64* @X ; [#uses=1]
31 %tmp20 = load volatile i64* @X ; [#uses=1]
32 %tmp21 = load volatile i64* @X ; [#uses=1]
33 %tmp22 = load volatile i64* @X ; [#uses=1]
34 %tmp23 = load volatile i64* @X ; [#uses=1]
3535 %tmp24 = call i64 @llvm.bswap.i64(i64 %tmp8) ; [#uses=1]
3636 %tmp25 = add i64 %tmp6, %tmp5 ; [#uses=1]
3737 %tmp26 = add i64 %tmp25, %tmp4 ; [#uses=1]
228228 %tmp217 = add i64 %tmp205, %tmp215 ; [#uses=1]
229229 %tmp218 = add i64 %tmp217, %tmp211 ; [#uses=1]
230230 %tmp219 = call i64 @llvm.bswap.i64(i64 %tmp23) ; [#uses=2]
231 volatile store i64 %tmp219, i64* @X, align 8
231 store volatile i64 %tmp219, i64* @X, align 8
232232 %tmp220 = add i64 %tmp203, %tmp190 ; [#uses=1]
233233 %tmp221 = add i64 %tmp220, %tmp216 ; [#uses=1]
234234 %tmp222 = add i64 %tmp219, %tmp177 ; [#uses=1]
1111 br label %bb
1212
1313 bb: ; preds = %bb.i, %bb, %entry
14 %2 = volatile load i32* @g_9, align 4 ; [#uses=2]
14 %2 = load volatile i32* @g_9, align 4 ; [#uses=2]
1515 %3 = icmp sgt i32 %2, 1 ; [#uses=1]
1616 %4 = and i1 %3, %1 ; [#uses=1]
1717 br i1 %4, label %bb.i, label %bb
4040 br i1 undef, label %bb5, label %bb4
4141
4242 bb4: ; preds = %bb3
43 %17 = volatile load i32* @uint8, align 4 ; [#uses=0]
43 %17 = load volatile i32* @uint8, align 4 ; [#uses=0]
4444 br label %bb5
4545
4646 bb5: ; preds = %bb4, %bb3
47 %18 = volatile load i32* @uint8, align 4 ; [#uses=0]
47 %18 = load volatile i32* @uint8, align 4 ; [#uses=0]
4848 %19 = sext i8 undef to i16 ; [#uses=1]
4949 %20 = tail call i32 @func_24(i16 zeroext %19, i8 signext 1) nounwind; [#uses=0]
5050 br i1 undef, label %return, label %bb6.preheader
5151
5252 bb6.preheader: ; preds = %bb5
5353 %21 = sext i8 %p_52 to i32 ; [#uses=1]
54 %22 = volatile load i32* @uint8, align 4 ; [#uses=0]
54 %22 = load volatile i32* @uint8, align 4 ; [#uses=0]
5555 %23 = tail call i32 (...)* @safefuncts(i32 %21, i32 1) nounwind; [#uses=0]
5656 unreachable
5757
689689 entry:
690690 br label %0
691691
692 %val0 = volatile load float* undef
692 %val0 = load volatile float* undef
693693 %cmp0 = fcmp une float %val0, undef
694694 br i1 %cmp0, label %1, label %0
695 %val1 = volatile load float* undef
695 %val1 = load volatile float* undef
696696 %cmp1 = fcmp une float %val1, undef
697697 br i1 %cmp1, label %2, label %1
698 %val2 = volatile load float* undef
698 %val2 = load volatile float* undef
699699 %cmp2 = fcmp une float %val2, undef
700700 br i1 %cmp2, label %3, label %2
701 %val3 = volatile load float* undef
701 %val3 = load volatile float* undef
702702 %cmp3 = fcmp une float %val3, undef
703703 br i1 %cmp3, label %4, label %3
704 %val4 = volatile load float* undef
704 %val4 = load volatile float* undef
705705 %cmp4 = fcmp une float %val4, undef
706706 br i1 %cmp4, label %5, label %4
707 %val5 = volatile load float* undef
707 %val5 = load volatile float* undef
708708 %cmp5 = fcmp une float %val5, undef
709709 br i1 %cmp5, label %6, label %5
710 %val6 = volatile load float* undef
710 %val6 = load volatile float* undef
711711 %cmp6 = fcmp une float %val6, undef
712712 br i1 %cmp6, label %7, label %6
713 %val7 = volatile load float* undef
713 %val7 = load volatile float* undef
714714 %cmp7 = fcmp une float %val7, undef
715715 br i1 %cmp7, label %8, label %7
716 %val8 = volatile load float* undef
716 %val8 = load volatile float* undef
717717 %cmp8 = fcmp une float %val8, undef
718718 br i1 %cmp8, label %9, label %8
719 %val9 = volatile load float* undef
719 %val9 = load volatile float* undef
720720 %cmp9 = fcmp une float %val9, undef
721721 br i1 %cmp9, label %10, label %9
722 %val10 = volatile load float* undef
722 %val10 = load volatile float* undef
723723 %cmp10 = fcmp une float %val10, undef
724724 br i1 %cmp10, label %11, label %10
725 %val11 = volatile load float* undef
725 %val11 = load volatile float* undef
726726 %cmp11 = fcmp une float %val11, undef
727727 br i1 %cmp11, label %12, label %11
728 %val12 = volatile load float* undef
728 %val12 = load volatile float* undef
729729 %cmp12 = fcmp une float %val12, undef
730730 br i1 %cmp12, label %13, label %12
731 %val13 = volatile load float* undef
731 %val13 = load volatile float* undef
732732 %cmp13 = fcmp une float %val13, undef
733733 br i1 %cmp13, label %14, label %13
734 %val14 = volatile load float* undef
734 %val14 = load volatile float* undef
735735 %cmp14 = fcmp une float %val14, undef
736736 br i1 %cmp14, label %15, label %14
737 %val15 = volatile load float* undef
737 %val15 = load volatile float* undef
738738 %cmp15 = fcmp une float %val15, undef
739739 br i1 %cmp15, label %16, label %15
740 %val16 = volatile load float* undef
740 %val16 = load volatile float* undef
741741 %cmp16 = fcmp une float %val16, undef
742742 br i1 %cmp16, label %17, label %16
743 %val17 = volatile load float* undef
743 %val17 = load volatile float* undef
744744 %cmp17 = fcmp une float %val17, undef
745745 br i1 %cmp17, label %18, label %17
746 %val18 = volatile load float* undef
746 %val18 = load volatile float* undef
747747 %cmp18 = fcmp une float %val18, undef
748748 br i1 %cmp18, label %19, label %18
749 %val19 = volatile load float* undef
749 %val19 = load volatile float* undef
750750 %cmp19 = fcmp une float %val19, undef
751751 br i1 %cmp19, label %20, label %19
752 %val20 = volatile load float* undef
752 %val20 = load volatile float* undef
753753 %cmp20 = fcmp une float %val20, undef
754754 br i1 %cmp20, label %21, label %20
755 %val21 = volatile load float* undef
755 %val21 = load volatile float* undef
756756 %cmp21 = fcmp une float %val21, undef
757757 br i1 %cmp21, label %22, label %21
758 %val22 = volatile load float* undef
758 %val22 = load volatile float* undef
759759 %cmp22 = fcmp une float %val22, undef
760760 br i1 %cmp22, label %23, label %22
761 %val23 = volatile load float* undef
761 %val23 = load volatile float* undef
762762 %cmp23 = fcmp une float %val23, undef
763763 br i1 %cmp23, label %24, label %23
764 %val24 = volatile load float* undef
764 %val24 = load volatile float* undef
765765 %cmp24 = fcmp une float %val24, undef
766766 br i1 %cmp24, label %25, label %24
767 %val25 = volatile load float* undef
767 %val25 = load volatile float* undef
768768 %cmp25 = fcmp une float %val25, undef
769769 br i1 %cmp25, label %26, label %25
770 %val26 = volatile load float* undef
770 %val26 = load volatile float* undef
771771 %cmp26 = fcmp une float %val26, undef
772772 br i1 %cmp26, label %27, label %26
773 %val27 = volatile load float* undef
773 %val27 = load volatile float* undef
774774 %cmp27 = fcmp une float %val27, undef
775775 br i1 %cmp27, label %28, label %27
776 %val28 = volatile load float* undef
776 %val28 = load volatile float* undef
777777 %cmp28 = fcmp une float %val28, undef
778778 br i1 %cmp28, label %29, label %28
779 %val29 = volatile load float* undef
779 %val29 = load volatile float* undef
780780 %cmp29 = fcmp une float %val29, undef
781781 br i1 %cmp29, label %30, label %29
782 %val30 = volatile load float* undef
782 %val30 = load volatile float* undef
783783 %cmp30 = fcmp une float %val30, undef
784784 br i1 %cmp30, label %31, label %30
785 %val31 = volatile load float* undef
785 %val31 = load volatile float* undef
786786 %cmp31 = fcmp une float %val31, undef
787787 br i1 %cmp31, label %32, label %31
788 %val32 = volatile load float* undef
788 %val32 = load volatile float* undef
789789 %cmp32 = fcmp une float %val32, undef
790790 br i1 %cmp32, label %33, label %32
791 %val33 = volatile load float* undef
791 %val33 = load volatile float* undef
792792 %cmp33 = fcmp une float %val33, undef
793793 br i1 %cmp33, label %34, label %33
794 %val34 = volatile load float* undef
794 %val34 = load volatile float* undef
795795 %cmp34 = fcmp une float %val34, undef
796796 br i1 %cmp34, label %35, label %34
797 %val35 = volatile load float* undef
797 %val35 = load volatile float* undef
798798 %cmp35 = fcmp une float %val35, undef
799799 br i1 %cmp35, label %36, label %35
800 %val36 = volatile load float* undef
800 %val36 = load volatile float* undef
801801 %cmp36 = fcmp une float %val36, undef
802802 br i1 %cmp36, label %37, label %36
803 %val37 = volatile load float* undef
803 %val37 = load volatile float* undef
804804 %cmp37 = fcmp une float %val37, undef
805805 br i1 %cmp37, label %38, label %37
806 %val38 = volatile load float* undef
806 %val38 = load volatile float* undef
807807 %cmp38 = fcmp une float %val38, undef
808808 br i1 %cmp38, label %39, label %38
809 %val39 = volatile load float* undef
809 %val39 = load volatile float* undef
810810 %cmp39 = fcmp une float %val39, undef
811811 br i1 %cmp39, label %40, label %39
812 %val40 = volatile load float* undef
812 %val40 = load volatile float* undef
813813 %cmp40 = fcmp une float %val40, undef
814814 br i1 %cmp40, label %41, label %40
815 %val41 = volatile load float* undef
815 %val41 = load volatile float* undef
816816 %cmp41 = fcmp une float %val41, undef
817817 br i1 %cmp41, label %42, label %41
818 %val42 = volatile load float* undef
818 %val42 = load volatile float* undef
819819 %cmp42 = fcmp une float %val42, undef
820820 br i1 %cmp42, label %43, label %42
821 %val43 = volatile load float* undef
821 %val43 = load volatile float* undef
822822 %cmp43 = fcmp une float %val43, undef
823823 br i1 %cmp43, label %44, label %43
824 %val44 = volatile load float* undef
824 %val44 = load volatile float* undef
825825 %cmp44 = fcmp une float %val44, undef
826826 br i1 %cmp44, label %45, label %44
827 %val45 = volatile load float* undef
827 %val45 = load volatile float* undef
828828 %cmp45 = fcmp une float %val45, undef
829829 br i1 %cmp45, label %46, label %45
830 %val46 = volatile load float* undef
830 %val46 = load volatile float* undef
831831 %cmp46 = fcmp une float %val46, undef
832832 br i1 %cmp46, label %47, label %46
833 %val47 = volatile load float* undef
833 %val47 = load volatile float* undef
834834 %cmp47 = fcmp une float %val47, undef
835835 br i1 %cmp47, label %48, label %47
836 %val48 = volatile load float* undef
836 %val48 = load volatile float* undef
837837 %cmp48 = fcmp une float %val48, undef
838838 br i1 %cmp48, label %49, label %48
839 %val49 = volatile load float* undef
839 %val49 = load volatile float* undef
840840 %cmp49 = fcmp une float %val49, undef
841841 br i1 %cmp49, label %50, label %49
842 %val50 = volatile load float* undef
842 %val50 = load volatile float* undef
843843 %cmp50 = fcmp une float %val50, undef
844844 br i1 %cmp50, label %51, label %50
845 %val51 = volatile load float* undef
845 %val51 = load volatile float* undef
846846 %cmp51 = fcmp une float %val51, undef
847847 br i1 %cmp51, label %52, label %51
848 %val52 = volatile load float* undef
848 %val52 = load volatile float* undef
849849 %cmp52 = fcmp une float %val52, undef
850850 br i1 %cmp52, label %53, label %52
851 %val53 = volatile load float* undef
851 %val53 = load volatile float* undef
852852 %cmp53 = fcmp une float %val53, undef
853853 br i1 %cmp53, label %54, label %53
854 %val54 = volatile load float* undef
854 %val54 = load volatile float* undef
855855 %cmp54 = fcmp une float %val54, undef
856856 br i1 %cmp54, label %55, label %54
857 %val55 = volatile load float* undef
857 %val55 = load volatile float* undef
858858 %cmp55 = fcmp une float %val55, undef
859859 br i1 %cmp55, label %56, label %55
860 %val56 = volatile load float* undef
860 %val56 = load volatile float* undef
861861 %cmp56 = fcmp une float %val56, undef
862862 br i1 %cmp56, label %57, label %56
863 %val57 = volatile load float* undef
863 %val57 = load volatile float* undef
864864 %cmp57 = fcmp une float %val57, undef
865865 br i1 %cmp57, label %58, label %57
866 %val58 = volatile load float* undef
866 %val58 = load volatile float* undef
867867 %cmp58 = fcmp une float %val58, undef
868868 br i1 %cmp58, label %59, label %58
869 %val59 = volatile load float* undef
869 %val59 = load volatile float* undef
870870 %cmp59 = fcmp une float %val59, undef
871871 br i1 %cmp59, label %60, label %59
872 %val60 = volatile load float* undef
872 %val60 = load volatile float* undef
873873 %cmp60 = fcmp une float %val60, undef
874874 br i1 %cmp60, label %61, label %60
875 %val61 = volatile load float* undef
875 %val61 = load volatile float* undef
876876 %cmp61 = fcmp une float %val61, undef
877877 br i1 %cmp61, label %62, label %61
878 %val62 = volatile load float* undef
878 %val62 = load volatile float* undef
879879 %cmp62 = fcmp une float %val62, undef
880880 br i1 %cmp62, label %63, label %62
881 %val63 = volatile load float* undef
881 %val63 = load volatile float* undef
882882 %cmp63 = fcmp une float %val63, undef
883883 br i1 %cmp63, label %64, label %63
884 %val64 = volatile load float* undef
884 %val64 = load volatile float* undef
885885 %cmp64 = fcmp une float %val64, undef
886886 br i1 %cmp64, label %65, label %64
887887
8383 br i1 %3, label %func_4.exit.i, label %bb.i.i.i
8484
8585 bb.i.i.i: ; preds = %entry
86 %4 = volatile load i8* @g_100, align 1 ; [#uses=0]
86 %4 = load volatile i8* @g_100, align 1 ; [#uses=0]
8787 br label %func_4.exit.i
8888
8989 ; CHECK: test4:
100100 br i1 %brmerge.i, label %func_1.exit, label %bb.i.i
101101
102102 bb.i.i: ; preds = %func_4.exit.i
103 %5 = volatile load i8* @g_100, align 1 ; [#uses=0]
103 %5 = load volatile i8* @g_100, align 1 ; [#uses=0]
104104 br label %func_1.exit
105105
106106 func_1.exit: ; preds = %bb.i.i, %func_4.exit.i
2020 br i1 %exitcond, label %bb13, label %bb
2121
2222 bb13: ; preds = %bb
23 volatile store float %tmp6, float* @G, align 4
23 store volatile float %tmp6, float* @G, align 4
2424 ret void
2525 }
55 ; Chain and flag folding issues.
66 define i32 @test1() nounwind ssp {
77 entry:
8 %tmp5.i = volatile load i32* undef ; [#uses=1]
8 %tmp5.i = load volatile i32* undef ; [#uses=1]
99 %conv.i = zext i32 %tmp5.i to i64 ; [#uses=1]
10 %tmp12.i = volatile load i32* undef ; [#uses=1]
10 %tmp12.i = load volatile i32* undef ; [#uses=1]
1111 %conv13.i = zext i32 %tmp12.i to i64 ; [#uses=1]
1212 %shl.i = shl i64 %conv13.i, 32 ; [#uses=1]
1313 %or.i = or i64 %shl.i, %conv.i ; [#uses=1]
1414 %add16.i = add i64 %or.i, 256 ; [#uses=1]
1515 %shr.i = lshr i64 %add16.i, 8 ; [#uses=1]
1616 %conv19.i = trunc i64 %shr.i to i32 ; [#uses=1]
17 volatile store i32 %conv19.i, i32* undef
17 store volatile i32 %conv19.i, i32* undef
1818 ret i32 undef
1919 }
2020
99 %tmp13 = tail call double @foo()
1010 %tmp1314 = fptrunc double %tmp13 to float ; [#uses=1]
1111 %tmp3940 = fpext float %tmp1314 to double ; [#uses=1]
12 volatile store double %tmp3940, double* %b
12 store volatile double %tmp3940, double* %b
1313 ret void
1414 }
1515
1010 bb: ; preds = %bb, %entry
1111 %i.014.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; [#uses=2]
1212 %tmp1 = trunc i32 %i.014.0 to i16 ; [#uses=2]
13 volatile store i16 %tmp1, i16* @X, align 2
13 store volatile i16 %tmp1, i16* @X, align 2
1414 %tmp34 = shl i16 %tmp1, 2 ; [#uses=1]
15 volatile store i16 %tmp34, i16* @Y, align 2
15 store volatile i16 %tmp34, i16* @Y, align 2
1616 %indvar.next = add i32 %i.014.0, 1 ; [#uses=2]
1717 %exitcond = icmp eq i32 %indvar.next, %N ; [#uses=1]
1818 br i1 %exitcond, label %return, label %bb
1818
1919 loop:
2020 %i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
21 volatile store i64 %i, i64* %p
21 store volatile i64 %i, i64* %p
2222 %i.next = add i64 %i, %s
2323 %c = icmp slt i64 %i.next, %n
2424 br i1 %c, label %loop, label %exit
1111 bb: ; preds = %bb, %entry
1212 %i.03 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ] ; [#uses=2]
1313 %1 = trunc i32 %i.03 to i16 ; [#uses=1]
14 volatile store i16 %1, i16* @X, align 2
14 store volatile i16 %1, i16* @X, align 2
1515 %indvar.next = add i32 %i.03, 1 ; [#uses=2]
1616 %exitcond = icmp eq i32 %indvar.next, %N ; [#uses=1]
1717 br i1 %exitcond, label %return, label %bb
5151 %tmp17 = ashr i64 %tmp16, %.cast ; [#uses=1]
5252 %tmp1718 = trunc i64 %tmp17 to i32 ; [#uses=1]
5353 %tmp19 = getelementptr [10 x i8]* @.str, i32 0, i32 0 ; [#uses=1]
54 volatile store i32 %tmp1718, i32* @var
55 volatile store i32 %tmp13, i32* @var
54 store volatile i32 %tmp1718, i32* @var
55 store volatile i32 %tmp13, i32* @var
5656 %tmp21 = load i32* %i, align 4 ; [#uses=1]
5757 %tmp22 = add i32 %tmp21, 1 ; [#uses=1]
5858 store i32 %tmp22, i32* %i, align 4
8585 %tmp3940 = bitcast float* %tmp39 to i32* ; [#uses=1]
8686 %tmp41 = load i32* %tmp3940, align 4 ; [#uses=1]
8787 %tmp42 = getelementptr [6 x i8]* @.str1, i32 0, i32 0 ; [#uses=1]
88 volatile store i32 %tmp41, i32* @var
88 store volatile i32 %tmp41, i32* @var
8989 %tmp44 = load i32* %i, align 4 ; [#uses=1]
9090 %tmp45 = add i32 %tmp44, 1 ; [#uses=1]
9191 store i32 %tmp45, i32* %i, align 4
126126 %tmp72 = ashr i64 %tmp70, %.cast71 ; [#uses=1]
127127 %tmp7273 = trunc i64 %tmp72 to i32 ; [#uses=1]
128128 %tmp74 = getelementptr [10 x i8]* @.str, i32 0, i32 0 ; [#uses=1]
129 volatile store i32 %tmp7273, i32* @var
130 volatile store i32 %tmp66, i32* @var
129 store volatile i32 %tmp7273, i32* @var
130 store volatile i32 %tmp66, i32* @var
131131 %tmp76 = load i32* %i, align 4 ; [#uses=1]
132132 %tmp77 = add i32 %tmp76, 1 ; [#uses=1]
133133 store i32 %tmp77, i32* %i, align 4
160160 %tmp9495 = bitcast float* %tmp94 to i32* ; [#uses=1]
161161 %tmp96 = load i32* %tmp9495, align 4 ; [#uses=1]
162162 %tmp97 = getelementptr [6 x i8]* @.str1, i32 0, i32 0 ; [#uses=1]
163 volatile store i32 %tmp96, i32* @var
163 store volatile i32 %tmp96, i32* @var
164164 %tmp99 = load i32* %i, align 4 ; [#uses=1]
165165 %tmp100 = add i32 %tmp99, 1 ; [#uses=1]
166166 store i32 %tmp100, i32* %i, align 4
6666 ; DAG Combiner can't fold this into a load of the 1'th byte.
6767 ; PR8757
6868 define i32 @test3(i32 *%P) nounwind ssp {
69 volatile store i32 128, i32* %P
69 store volatile i32 128, i32* %P
7070 %tmp4.pre = load i32* %P
7171 %phitmp = trunc i32 %tmp4.pre to i16
7272 %phitmp13 = shl i16 %phitmp, 8
1212
1313 define i32 @test1(i32 %X) {
1414 %Z = shl i32 %X, 2 ; [#uses=1]
15 volatile store i32 %Z, i32* @G
15 store volatile i32 %Z, i32* @G
1616 ret i32 %X
1717 }
1818
3232 define i32 @main() {
3333 entry:
3434 ; CHECK: flds
35 %tmp6 = volatile load float* @a ; [#uses=1]
35 %tmp6 = load volatile float* @a ; [#uses=1]
3636 ; CHECK: fstps (%esp)
3737 ; CHECK: tanf
3838 %tmp9 = tail call float @tanf( float %tmp6 ) ; [#uses=1]
4040 ; CHECK: fstp
4141
4242 ; CHECK: fldl
43 %tmp12 = volatile load double* @b ; [#uses=1]
43 %tmp12 = load volatile double* @b ; [#uses=1]
4444 ; CHECK: fstpl (%esp)
4545 ; CHECK: tan
4646 %tmp13 = tail call double @tan( double %tmp12 ) ; [#uses=1]
1414 ; CHECK-NEXT: addl $3, (%{{.*}})
1515 ; CHECK-NEXT: ret
1616
17 %tmp = volatile load i32* @x, align 4 ; [#uses=1]
17 %tmp = load volatile i32* @x, align 4 ; [#uses=1]
1818 %tmp1 = add i32 %tmp, 3 ; [#uses=1]
19 volatile store i32 %tmp1, i32* @x, align 4
20 %tmp.1 = volatile load i32* @x, align 4 ; [#uses=1]
19 store volatile i32 %tmp1, i32* @x, align 4
20 %tmp.1 = load volatile i32* @x, align 4 ; [#uses=1]
2121 %tmp1.1 = add i32 %tmp.1, 3 ; [#uses=1]
22 volatile store i32 %tmp1.1, i32* @x, align 4
23 %tmp.2 = volatile load i32* @x, align 4 ; [#uses=1]
22 store volatile i32 %tmp1.1, i32* @x, align 4
23 %tmp.2 = load volatile i32* @x, align 4 ; [#uses=1]
2424 %tmp1.2 = add i32 %tmp.2, 3 ; [#uses=1]
25 volatile store i32 %tmp1.2, i32* @x, align 4
26 %tmp.3 = volatile load i32* @x, align 4 ; [#uses=1]
25 store volatile i32 %tmp1.2, i32* @x, align 4
26 %tmp.3 = load volatile i32* @x, align 4 ; [#uses=1]
2727 %tmp1.3 = add i32 %tmp.3, 3 ; [#uses=1]
28 volatile store i32 %tmp1.3, i32* @x, align 4
28 store volatile i32 %tmp1.3, i32* @x, align 4
2929 ret void
3030 }
77 ; CHECK: movl %eax
88 %C = add i64 %A, %B
99 %D = trunc i64 %C to i32
10 volatile store i32 %D, i32* %P
10 store volatile i32 %D, i32* %P
1111 %E = shl i64 %C, 32
1212 %F = ashr i64 %E, 32
13 volatile store i64 %F, i64 *%P2
14 volatile store i32 %D, i32* %P
13 store volatile i64 %F, i64 *%P2
14 store volatile i32 %D, i32* %P
1515 ret i64 undef
1616 }
1010 entry:
1111 %tmp3 = load double* @G, align 16 ; [#uses=1]
1212 %tmp4 = tail call double @fabs( double %tmp3 ) ; [#uses=1]
13 volatile store double %tmp4, double* %P
13 store volatile double %tmp4, double* %P
1414 %tmp = getelementptr { double, double }* %z, i32 0, i32 0 ; [#uses=1]
15 %tmp1 = volatile load double* %tmp, align 8 ; [#uses=1]
15 %tmp1 = load volatile double* %tmp, align 8 ; [#uses=1]
1616 %tmp2 = tail call double @fabs( double %tmp1 ) ; [#uses=1]
1717 ; CHECK: andpd{{.*}}4(%esp), %xmm
1818 %tmp6 = fadd double %tmp4, %tmp2 ; [#uses=1]
19 volatile store double %tmp6, double* %P, align 8
19 store volatile double %tmp6, double* %P, align 8
2020 ret void
2121 }
2222
88
99 define void @foo() nounwind {
1010 %1 = alloca %testType
11 volatile store %testType {i32 1, [0 x i32] zeroinitializer, i32 2}, %testType* %1
11 store volatile %testType {i32 1, [0 x i32] zeroinitializer, i32 2}, %testType* %1
1212 ret void
1313 }
313313 ]
314314
315315 bb7:
316 volatile store i32 0, i32* @XYZ
316 store volatile i32 0, i32* @XYZ
317317 unreachable
318318
319319 bbx:
322322 ]
323323
324324 bb12:
325 volatile store i32 0, i32* @XYZ
325 store volatile i32 0, i32* @XYZ
326326 unreachable
327327
328328 return:
351351 ]
352352
353353 bb7:
354 volatile store i32 0, i32* @XYZ
355 volatile store i32 1, i32* @XYZ
354 store volatile i32 0, i32* @XYZ
355 store volatile i32 1, i32* @XYZ
356356 unreachable
357357
358358 bbx:
361361 ]
362362
363363 bb12:
364 volatile store i32 0, i32* @XYZ
365 volatile store i32 1, i32* @XYZ
364 store volatile i32 0, i32* @XYZ
365 store volatile i32 1, i32* @XYZ
366366 unreachable
367367
368368 return:
389389 ]
390390
391391 bb7:
392 volatile store i32 0, i32* @XYZ
393 volatile store i32 1, i32* @XYZ
392 store volatile i32 0, i32* @XYZ
393 store volatile i32 1, i32* @XYZ
394394 unreachable
395395
396396 bbx:
399399 ]
400400
401401 bb12:
402 volatile store i32 0, i32* @XYZ
403 volatile store i32 1, i32* @XYZ
402 store volatile i32 0, i32* @XYZ
403 store volatile i32 1, i32* @XYZ
404404 unreachable
405405
406406 return:
1313 ; CHECK-NOT: mov
1414 ; CHECK: leal 1(%rdi)
1515 %Z = add i32 %X, 1
16 volatile store i32 %Z, i32* @G
16 store volatile i32 %Z, i32* @G
1717 ret i32 %X
1818 }
1919
44 entry:
55 %a = alloca <4 x i32> ; <<4 x i32>*> [#uses=2]
66 %b = alloca <4 x i32> ; <<4 x i32>*> [#uses=5]
7 volatile store <4 x i32> < i32 0, i32 1, i32 2, i32 3 >, <4 x i32>* %a
7 store volatile <4 x i32> < i32 0, i32 1, i32 2, i32 3 >, <4 x i32>* %a
88 %tmp = load <4 x i32>* %a ; <<4 x i32>> [#uses=1]
99 store <4 x i32> %tmp, <4 x i32>* %b
1010 %tmp1 = load <4 x i32>* %b ; <<4 x i32>> [#uses=1]
44 ; CHECK: punpckldq
55 %a = alloca <4 x i32> ; <<4 x i32>*> [#uses=2]
66 %b = alloca <4 x i32> ; <<4 x i32>*> [#uses=5]
7 volatile store <4 x i32> < i32 0, i32 1, i32 2, i32 3 >, <4 x i32>* %a
7 store volatile <4 x i32> < i32 0, i32 1, i32 2, i32 3 >, <4 x i32>* %a
88 %tmp = load <4 x i32>* %a ; <<4 x i32>> [#uses=1]
99 store <4 x i32> %tmp, <4 x i32>* %b
1010 %tmp1 = load <4 x i32>* %b ; <<4 x i32>> [#uses=1]
33 @x = external global double
44
55 define void @foo() nounwind {
6 %a = volatile load double* @x
7 volatile store double 0.0, double* @x
8 volatile store double 0.0, double* @x
9 %b = volatile load double* @x
6 %a = load volatile double* @x
7 store volatile double 0.0, double* @x
8 store volatile double 0.0, double* @x
9 %b = load volatile double* @x
1010 ret void
1111 }
1212
1313 define void @bar() nounwind {
14 %c = volatile load double* @x
14 %c = load volatile double* @x
1515 ret void
1616 }
1212 br label %bb
1313
1414 bb: ; preds = %bb, %entry
15 volatile store i32 525509670, i32* %p, align 4
15 store volatile i32 525509670, i32* %p, align 4
1616 br label %bb
1717 }
1717 %x = alloca [100 x i32], align 4 ; <[100 x i32]*> [#uses=2]
1818 %0 = load i32* @size, align 4 ; [#uses=1]
1919 %1 = alloca i32, i32 %0, align 4 ; [#uses=1]
20 %2 = volatile load i32* @g0, align 4 ; [#uses=1]
21 %3 = volatile load i32* @g1, align 4 ; [#uses=1]
22 %4 = volatile load i32* @g2, align 4 ; [#uses=1]
23 %5 = volatile load i32* @g3, align 4 ; [#uses=1]
24 %6 = volatile load i32* @g4, align 4 ; [#uses=1]
25 %7 = volatile load i32* @g5, align 4 ; [#uses=1]
26 %8 = volatile load i32* @g6, align 4 ; [#uses=1]
27 %9 = volatile load i32* @g7, align 4 ; [#uses=1]
28 %10 = volatile load i32* @g8, align 4 ; [#uses=1]
29 %11 = volatile load i32* @g9, align 4 ; [#uses=1]
30 %12 = volatile load i32* @g10, align 4 ; [#uses=1]
31 %13 = volatile load i32* @g11, align 4 ; [#uses=2]
20 %2 = load volatile i32* @g0, align 4 ; [#uses=1]
21 %3 = load volatile i32* @g1, align 4 ; [#uses=1]
22 %4 = load volatile i32* @g2, align 4 ; [#uses=1]
23 %5 = load volatile i32* @g3, align 4 ; [#uses=1]
24 %6 = load volatile i32* @g4, align 4 ; [#uses=1]
25 %7 = load volatile i32* @g5, align 4 ; [#uses=1]
26 %8 = load volatile i32* @g6, align 4 ; [#uses=1]
27 %9 = load volatile i32* @g7, align 4 ; [#uses=1]
28 %10 = load volatile i32* @g8, align 4 ; [#uses=1]
29 %11 = load volatile i32* @g9, align 4 ; [#uses=1]
30 %12 = load volatile i32* @g10, align 4 ; [#uses=1]
31 %13 = load volatile i32* @g11, align 4 ; [#uses=2]
3232 %14 = getelementptr [100 x i32]* %x, i32 0, i32 50 ; [#uses=1]
3333 store i32 %13, i32* %14, align 4
34 volatile store i32 %13, i32* @g11, align 4
35 volatile store i32 %12, i32* @g10, align 4
36 volatile store i32 %11, i32* @g9, align 4
37 volatile store i32 %10, i32* @g8, align 4
38 volatile store i32 %9, i32* @g7, align 4
39 volatile store i32 %8, i32* @g6, align 4
40 volatile store i32 %7, i32* @g5, align 4
41 volatile store i32 %6, i32* @g4, align 4
42 volatile store i32 %5, i32* @g3, align 4
43 volatile store i32 %4, i32* @g2, align 4
44 volatile store i32 %3, i32* @g1, align 4
45 volatile store i32 %2, i32* @g0, align 4
34 store volatile i32 %13, i32* @g11, align 4
35 store volatile i32 %12, i32* @g10, align 4
36 store volatile i32 %11, i32* @g9, align 4
37 store volatile i32 %10, i32* @g8, align 4
38 store volatile i32 %9, i32* @g7, align 4
39 store volatile i32 %8, i32* @g6, align 4
40 store volatile i32 %7, i32* @g5, align 4
41 store volatile i32 %6, i32* @g4, align 4
42 store volatile i32 %5, i32* @g3, align 4
43 store volatile i32 %4, i32* @g2, align 4
44 store volatile i32 %3, i32* @g1, align 4
45 store volatile i32 %2, i32* @g0, align 4
4646 %x1 = getelementptr [100 x i32]* %x, i32 0, i32 0 ; [#uses=1]
4747 call void @g(i32* %x1, i32* %1) nounwind
4848 ret void
1818 %0 = getelementptr inbounds %struct.gpm_t* %gpm, i32 0, i32 2, i32 0 ; [#uses=1]
1919 %1 = getelementptr inbounds %struct.gpt_t* %gpt, i32 0, i32 9, i32 0 ; [#uses=1]
2020 call void @uuid_LtoB(i8* %0, i8* %1) nounwind, !dbg !0
21 %a9 = volatile load i64* %data_addr.i18, align 8 ; [#uses=1]
21 %a9 = load volatile i64* %data_addr.i18, align 8 ; [#uses=1]
2222 %a10 = call i64 @llvm.bswap.i64(i64 %a9) nounwind ; [#uses=1]
2323 %a11 = getelementptr inbounds %struct.gpt_t* %gpt, i32 0, i32 8, !dbg !7 ; [#uses=1]
2424 %a12 = load i64* %a11, align 4, !dbg !7 ; [#uses=1]
2828 call void @llvm.dbg.value(metadata !18, i64 0, metadata !19) nounwind
2929 call void @llvm.dbg.declare(metadata !6, metadata !23) nounwind
3030 call void @llvm.dbg.value(metadata !{i64* %data_addr.i17}, i64 0, metadata !34) nounwind
31 %a13 = volatile load i64* %data_addr.i17, align 8 ; [#uses=1]
31 %a13 = load volatile i64* %data_addr.i17, align 8 ; [#uses=1]
3232 %a14 = call i64 @llvm.bswap.i64(i64 %a13) nounwind ; [#uses=2]
3333 %a15 = add i64 %a10, %a14, !dbg !7 ; [#uses=1]
3434 %a16 = sub i64 %a15, %a14 ; [#uses=1]
2929
3030 define void @tty_init() {
3131 entry:
32 volatile store void (%struct2*)* @func, void (%struct2*)** getelementptr (%struct1* @driver1, i64 0, i32 1)
32 store volatile void (%struct2*)* @func, void (%struct2*)** getelementptr (%struct1* @driver1, i64 0, i32 1)
3333 ret void
3434 }
88 define internal void @f1(%struct1* %tty) {
99 loopentry.preheader:
1010 %tmp.2.i.i = getelementptr %struct1* %tty, i64 0, i32 1 ; [#uses=1]
11 %tmp.3.i.i = volatile load void (%struct2*)** %tmp.2.i.i ; [#uses=0]
11 %tmp.3.i.i = load volatile void (%struct2*)** %tmp.2.i.i ; [#uses=0]
1212 ret void
1313 }
1414
4141 ]
4242
4343 bb: ; preds = %entry
44 volatile store i32 11, i32* @var_tls, align 4
45 volatile store double 2.200000e+01, double* @var_tls_double, align 8
46 volatile store i32 33, i32* @var_static, align 4
47 volatile store double 4.400000e+01, double* @var_static_double, align 8
48 volatile store i32 55, i32* @var_global, align 4
49 volatile store double 6.600000e+01, double* @var_global_double, align 8
44 store volatile i32 11, i32* @var_tls, align 4
45 store volatile double 2.200000e+01, double* @var_tls_double, align 8
46 store volatile i32 33, i32* @var_static, align 4
47 store volatile double 4.400000e+01, double* @var_static_double, align 8
48 store volatile i32 55, i32* @var_global, align 4
49 store volatile double 6.600000e+01, double* @var_global_double, align 8
5050 br label %bb3
5151
5252 bb2: ; preds = %entry
150150 exit:
151151 %t3 = phi i32* [ %t4, %exit ]
152152 %t4 = bitcast i32* %t3 to i32*
153 %x = volatile load i32* %t3
153 %x = load volatile i32* %t3
154154 br label %exit
155155 }
156156
2929 define void @h() {
3030 entry:
3131 %i = alloca i32, align 4
32 volatile store i32 10, i32* %i, align 4
32 store volatile i32 10, i32* %i, align 4
3333 ; CHECK: %tmp = load volatile i32* %i, align 4
3434 ; CHECK-next: call void @f(i32 undef)
35 %tmp = volatile load i32* %i, align 4
35 %tmp = load volatile i32* %i, align 4
3636 call void @f(i32 %tmp)
3737 ret void
3838 }
99
1010 %C = zext i8 %V to i32
1111 %D = zext i8 %V to i32 ;; CSE
12 volatile store i32 %C, i32* %P
13 volatile store i32 %D, i32* %P
12 store volatile i32 %C, i32* %P
13 store volatile i32 %D, i32* %P
1414 ; CHECK-NEXT: %C = zext i8 %V to i32
1515 ; CHECK-NEXT: store volatile i32 %C
1616 ; CHECK-NEXT: store volatile i32 %C
1717
1818 %E = add i32 %C, %C
1919 %F = add i32 %C, %C
20 volatile store i32 %E, i32* %P
21 volatile store i32 %F, i32* %P
20 store volatile i32 %E, i32* %P
21 store volatile i32 %F, i32* %P
2222 ; CHECK-NEXT: %E = add i32 %C, %C
2323 ; CHECK-NEXT: store volatile i32 %E
2424 ; CHECK-NEXT: store volatile i32 %E
2525
2626 %G = add nuw i32 %C, %C ;; not a CSE with E
27 volatile store i32 %G, i32* %P
27 store volatile i32 %G, i32* %P
2828 ; CHECK-NEXT: %G = add nuw i32 %C, %C
2929 ; CHECK-NEXT: store volatile i32 %G
3030 ret void
33 @g = global i32 0 ; [#uses=1]
44
55 define i32 @f() {
6 %t = volatile load i32* @g ; [#uses=1]
6 %t = load volatile i32* @g ; [#uses=1]
77 ret i32 %t
88 }
44
55 define void @foo() {
66 ; CHECK: void @foo() {
7 %tmp = volatile load i32* @g
7 %tmp = load volatile i32* @g
88 ret void
99 }
22
33 define double @foo() nounwind {
44 entry:
5 %tmp1 = volatile load double* @t0.1441, align 8 ; [#uses=2]
5 %tmp1 = load volatile double* @t0.1441, align 8 ; [#uses=2]
66 %tmp4 = fmul double %tmp1, %tmp1 ; [#uses=1]
77 ret double %tmp4
88 }
1111 define void @foo(i32 %x) nounwind readnone {
1212 entry:
1313 %b = alloca i32, align 4 ; [#uses=1]
14 volatile store i32 -1, i32* %b
14 store volatile i32 -1, i32* %b
1515 ret void
1616 }
1717
8989 br label %bb4
9090
9191 bb: ; preds = %bb4
92 %0 = volatile load i32* @x, align 4 ; [#uses=1]
92 %0 = load volatile i32* @x, align 4 ; [#uses=1]
9393 store i32 %0, i32* %vol.0, align 4
9494 store i32 0, i32* %l_52, align 4
9595 br label %bb2
1313 %i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
1414 %ip = add i64 %i, 1
1515 %p.2.ip.1 = getelementptr [3 x [3 x double]]* %p, i64 2, i64 %ip, i64 1
16 volatile store double 0.0, double* %p.2.ip.1
16 store volatile double 0.0, double* %p.2.ip.1
1717 %i.next = add i64 %i, 1
1818 br label %loop
1919 }
1717 br i1 %tobool, label %while.end, label %while.cond
1818
1919 while.end: ; preds = %while.cond
20 volatile store i32 0, i32* %result.i
21 %tmp.i = volatile load i32* %result.i ; [#uses=0]
20 store volatile i32 0, i32* %result.i
21 %tmp.i = load volatile i32* %result.i ; [#uses=0]
2222 ret i32 0
2323 }
2424 declare i32 @bar()
1616 bb: ; preds = %entry
1717 %1 = sub nsw i32 %x, 1 ; [#uses=1]
1818 call void @foo(i32 %1) nounwind ssp
19 volatile store i32 1, i32* @g, align 4
19 store volatile i32 1, i32* @g, align 4
2020 ret void
2121
2222 return: ; preds = %entry
4141 %0 = bitcast i8* %Bar to void (i32, i8*, i8*)*
4242 %1 = sub nsw i32 %x, 1
4343 call void %0(i32 %1, i8* %Foo, i8* %Bar) nounwind
44 volatile store i32 42, i32* @g, align 4
44 store volatile i32 42, i32* @g, align 4
4545 ret void
4646 }
4747
5353 bb: ; preds = %entry
5454 %1 = bitcast i8* %Foo to void (i32, i8*, i8*)* ; [#uses=1]
5555 call void %1(i32 %x, i8* %Foo, i8* %Bar) nounwind
56 volatile store i32 13, i32* @g, align 4
56 store volatile i32 13, i32* @g, align 4
5757 ret void
5858
5959 return: ; preds = %entry
11
22 define void @test(i32* %P) {
33 ; Dead but not deletable!
4 %X = volatile load i32* %P ; [#uses=0]
4 %X = load volatile i32* %P ; [#uses=0]
55 ret void
66 }
2525 %tmp21 = getelementptr i32* %tmp1819, i32 0 ; [#uses=1]
2626 store i32 1, i32* %tmp21, align 4
2727 %tmp2223 = bitcast i32* %tmp1819 to i8* ; [#uses=1]
28 volatile store i8* %tmp2223, i8** @p, align 4
28 store volatile i8* %tmp2223, i8** @p, align 4
2929 %tmp25 = add i32 %n.0, 1 ; [#uses=2]
3030 %tmp27 = icmp sle i32 %tmp25, 999999 ; [#uses=1]
3131 %tmp2728 = zext i1 %tmp27 to i8 ; [#uses=1]
11
22 define void @test() {
33 %votf = alloca <4 x float> ; <<4 x float>*> [#uses=1]
4 volatile store <4 x float> zeroinitializer, <4 x float>* %votf, align 16
4 store volatile <4 x float> zeroinitializer, <4 x float>* %votf, align 16
55 ret void
66 }
77
55 define i32 @main() nounwind {
66 entry:
77 %tmp93 = icmp slt i32 0, 10 ; [#uses=0]
8 %tmp34 = volatile load i32* @g_1, align 4 ; [#uses=1]
8 %tmp34 = load volatile i32* @g_1, align 4 ; [#uses=1]
99 br label %bb
1010
1111 bb: ; preds = %bb, %entry
1212 %b.0.reg2mem.0 = phi i32 [ 0, %entry ], [ %tmp6, %bb ] ; [#uses=1]
1313 %tmp3.reg2mem.0 = phi i32 [ %tmp34, %entry ], [ %tmp3, %bb ] ; [#uses=1]
1414 %tmp4 = add i32 %tmp3.reg2mem.0, 5 ; [#uses=1]
15 volatile store i32 %tmp4, i32* @g_1, align 4
15 store volatile i32 %tmp4, i32* @g_1, align 4
1616 %tmp6 = add i32 %b.0.reg2mem.0, 1 ; [#uses=2]
1717 %tmp9 = icmp slt i32 %tmp6, 10 ; [#uses=1]
18 %tmp3 = volatile load i32* @g_1, align 4 ; [#uses=1]
18 %tmp3 = load volatile i32* @g_1, align 4 ; [#uses=1]
1919 br i1 %tmp9, label %bb, label %bb11
2020
2121 bb11: ; preds = %bb
66 define i32 @main(i32 %i) nounwind {
77 entry:
88 %tmp93 = icmp slt i32 %i, 10 ; [#uses=0]
9 %tmp34 = volatile load i32* @g_1, align 4 ; [#uses=1]
9 %tmp34 = load volatile i32* @g_1, align 4 ; [#uses=1]
1010 br i1 %tmp93, label %bb11, label %bb
1111
1212 bb: ; preds = %bb, %entry
13 %tmp3 = volatile load i32* @g_1, align 4 ; [#uses=1]
13 %tmp3 = load volatile i32* @g_1, align 4 ; [#uses=1]
1414 br label %bb11
1515
1616 bb11: ; preds = %bb
99 %tmp2752 = alloca i32 ; [#uses=2]
1010 %tmpcast53 = bitcast i32* %tmp2752 to i8* ; [#uses=1]
1111 store i32 2, i32* %tmp2752, align 4
12 volatile store i8* %tmpcast53, i8** @p, align 4
12 store volatile i8* %tmpcast53, i8** @p, align 4
1313 br label %bb44
1414
1515 bb: ; preds = %bb44
2828 store i32 1, i32* %tmp27, align 4
2929 %tmp34 = getelementptr i32* %tmp27, i32 %tmp4 ; [#uses=1]
3030 store i32 2, i32* %tmp34, align 4
31 volatile store i8* %tmpcast, i8** @p, align 4
31 store volatile i8* %tmpcast, i8** @p, align 4
3232 %exitcond = icmp eq i32 %tmp3857, 999999 ; [#uses=1]
3333 br i1 %exitcond, label %bb, label %bb44
3434 }
66 define i32 @main() nounwind {
77 entry:
88 %tmp93 = icmp slt i32 0, 10 ; [#uses=0]
9 %tmp34 = volatile load i32* @g_1, align 4 ; [#uses=1]
9 %tmp34 = load volatile i32* @g_1, align 4 ; [#uses=1]
1010 br label %bb
1111
1212 bb: ; preds = %bb, %entry
1313 %b.0.reg2mem.0 = phi i32 [ 0, %entry ], [ %tmp6, %bb ] ; [#uses=1]
1414 %tmp3.reg2mem.0 = phi i32 [ %tmp3, %bb ], [ %tmp34, %entry ]
1515 %tmp4 = add i32 %tmp3.reg2mem.0, 5 ; [#uses=1]
16 volatile store i32 %tmp4, i32* @g_1, align 4
16 store volatile i32 %tmp4, i32* @g_1, align 4
1717 %tmp6 = add i32 %b.0.reg2mem.0, 1 ; [#uses=2]
1818 %tmp9 = icmp slt i32 %tmp6, 10 ; [#uses=1]
19 %tmp3 = volatile load i32* @g_1, align 4 ; [#uses=1]
19 %tmp3 = load volatile i32* @g_1, align 4 ; [#uses=1]
2020 br i1 %tmp9, label %bb, label %bb11
2121
2222 bb11: ; preds = %bb
8787 ; CHECK-NEXT: ret
8888 define i32 @nogep-multiuse({i32, i32}* %pair) {
8989 ; The load should be left unchanged since both parts are needed.
90 %L = volatile load {i32, i32}* %pair
90 %L = load volatile {i32, i32}* %pair
9191 %LHS = extractvalue {i32, i32} %L, 0
9292 %RHS = extractvalue {i32, i32} %L, 1
9393 %R = add i32 %LHS, %RHS
9999 ; CHECK-NEXT: extractvalue
100100 ; CHECK-NEXT: ret
101101 define i32 @nogep-volatile({i32, i32}* %pair) {
102 ; The volatile load should be left unchanged.
103 %L = volatile load {i32, i32}* %pair
102 ; The load volatile should be left unchanged.
103 %L = load volatile {i32, i32}* %pair
104104 %E = extractvalue {i32, i32} %L, 1
105105 ret i32 %E
106106 }
141141 define void @powi(double %V, double *%P) {
142142 entry:
143143 %A = tail call double @llvm.powi.f64(double %V, i32 -1) nounwind
144 volatile store double %A, double* %P
144 store volatile double %A, double* %P
145145
146146 %B = tail call double @llvm.powi.f64(double %V, i32 0) nounwind
147 volatile store double %B, double* %P
147 store volatile double %B, double* %P
148148
149149 %C = tail call double @llvm.powi.f64(double %V, i32 1) nounwind
150 volatile store double %C, double* %P
150 store volatile double %C, double* %P
151151 ret void
152152 ; CHECK: @powi
153153 ; CHECK: %A = fdiv double 1.0{{.*}}, %V
182182 entry:
183183 %lz = tail call i32 @llvm.ctlz.i32(i32 %a) nounwind readnone
184184 %lz.cmp = icmp eq i32 %lz, 32
185 volatile store i1 %lz.cmp, i1* %c
185 store volatile i1 %lz.cmp, i1* %c
186186 %tz = tail call i32 @llvm.cttz.i32(i32 %a) nounwind readnone
187187 %tz.cmp = icmp ne i32 %tz, 32
188 volatile store i1 %tz.cmp, i1* %c
188 store volatile i1 %tz.cmp, i1* %c
189189 %pop = tail call i32 @llvm.ctpop.i32(i32 %b) nounwind readnone
190190 %pop.cmp = icmp eq i32 %pop, 0
191 volatile store i1 %pop.cmp, i1* %c
191 store volatile i1 %pop.cmp, i1* %c
192192 ret void
193193 ; CHECK: @cmp.simplify
194194 ; CHECK-NEXT: entry:
44
55 define void @self_assign_1() {
66 entry:
7 %tmp = volatile load i32* @x ; [#uses=1]
8 volatile store i32 %tmp, i32* @x
7 %tmp = load volatile i32* @x ; [#uses=1]
8 store volatile i32 %tmp, i32* @x
99 br label %return
1010
1111 return: ; preds = %entry
1414 br i1 %cmp1179, label %for.cond1177, label %land.rhs1320
1515
1616 land.rhs1320:
17 %tmp1324 = volatile load i64* getelementptr inbounds (%0* @g_338, i64 0, i32 2), align 1, !tbaa !0
17 %tmp1324 = load volatile i64* getelementptr inbounds (%0* @g_338, i64 0, i32 2), align 1, !tbaa !0
1818 br label %if.end.i
1919
2020 if.end.i:
398398 br label %lbl_664
399399
400400 lbl_596: ; preds = %lbl_664, %for.end37
401 volatile store i64 undef, i64* undef, align 4
401 store volatile i64 undef, i64* undef, align 4
402402 br label %for.cond111
403403
404404 for.cond111: ; preds = %safe_sub_func_int64_t_s_s.exit, %lbl_596
1616 br i1 %0, label %bb1, label %bb2
1717
1818 bb1: ; preds = %bb
19 volatile store i32 1000, i32* @v1, align 4
19 store volatile i32 1000, i32* @v1, align 4
2020 br label %bb3
2121
2222 bb2: ; preds = %bb
23 volatile store i32 1001, i32* @v1, align 4
23 store volatile i32 1001, i32* @v1, align 4
2424 br label %bb3
2525
2626 bb3: ; preds = %bb2, %bb1
99 br label %bb6
1010
1111 bb: ; preds = %bb6
12 %tmp2 = volatile load i32* %DataIn ; [#uses=1]
12 %tmp2 = load volatile i32* %DataIn ; [#uses=1]
1313 %tmp3 = getelementptr [64 x i32]* %buffer, i32 0, i32 %i.0 ; [#uses=1]
1414 store i32 %tmp2, i32* %tmp3
1515 %tmp5 = add i32 %i.0, 1 ; [#uses=1]
2727 %tmp16 = add i32 %tmp14, %i.1 ; [#uses=1]
2828 %tmp17 = getelementptr [64 x i32]* %buffer, i32 0, i32 %tmp16 ; [#uses=1]
2929 %tmp18 = load i32* %tmp17 ; [#uses=1]
30 volatile store i32 %tmp18, i32* %DataOut
30 store volatile i32 %tmp18, i32* %DataOut
3131 %tmp21 = add i32 %j.1, 1 ; [#uses=1]
3232 br label %bb22
3333
1818
1919 for.body4:
2020 %l_612.11 = phi i32* [ undef, %for.body4.lr.ph ], [ %call19, %for.body4 ]
21 %tmp7 = volatile load i16* @g_39, align 2
21 %tmp7 = load volatile i16* @g_39, align 2
2222 %call = call i32** @func_108(i32*** undef)
2323 %call19 = call i32* @func_84(i32** %call)
2424 br i1 false, label %for.body4, label %for.cond.loopexit
6767 br label %1
6868
6969 ;
70 volatile store i32* @g_47, i32** undef, align 8
70 store volatile i32* @g_47, i32** undef, align 8
7171 store i32 undef, i32* @g_47, align 4
7272 br label %1
7373 }
5858 br label %Loop
5959 Loop:
6060 ; Should not promote this to a register
61 %x = volatile load i32* @X
61 %x = load volatile i32* @X
6262 %x2 = add i32 %x, 1
6363 store i32 %x2, i32* @X
6464 br i1 true, label %Out, label %Loop
132132 %x2 = add i32 %x, 1 ; [#uses=1]
133133 store i32 %x2, i32* @X
134134
135 volatile store i32* @X, i32** %P2
135 store volatile i32* @X, i32** %P2
136136
137137 %Next = add i32 %j, 1 ; [#uses=2]
138138 %cond = icmp eq i32 %Next, 0 ; [#uses=1]
99 bb: ; preds = %bb, %entry
1010 %l_2.0.reg2mem.0 = phi i16 [ 0, %entry ], [ %t1, %bb ] ; [#uses=2]
1111 %t0 = shl i16 %l_2.0.reg2mem.0, 1 ; :0 [#uses=1]
12 volatile store i16 %t0, i16* @g_3, align 2
12 store volatile i16 %t0, i16* @g_3, align 2
1313 %t1 = add i16 %l_2.0.reg2mem.0, -3 ; :1 [#uses=2]
1414 %t2 = icmp slt i16 %t1, 1 ; :2 [#uses=1]
1515 br i1 %t2, label %bb, label %return
2121 define i32 @main() nounwind {
2222 entry:
2323 tail call void @func_1( ) nounwind
24 volatile load i16* @g_3, align 2 ; :0 [#uses=1]
24 load volatile i16* @g_3, align 2 ; :0 [#uses=1]
2525 zext i16 %0 to i32 ; :1 [#uses=1]
2626 tail call i32 (i8*, ...)* @printf( i8* getelementptr ([4 x i8]* @"\01LC", i32 0, i32 0), i32 %1 ) nounwind ; :2 [#uses=0]
2727 ret i32 0
88 br label %no_exit
99 no_exit: ; preds = %no_exit, %entry
1010 %indvar = phi i32 [ 0, %entry ], [ %indvar.next, %no_exit ] ; [#uses=1]
11 volatile store float 0.000000e+00, float* %D
11 store volatile float 0.000000e+00, float* %D
1212 %indvar.next = add i32 %indvar, 1 ; [#uses=2]
1313 ; CHECK: icmp
1414 ; CHECK-NEXT: br i1
1212
1313 bb1: ; preds = %bb
1414 %l_2.0.reg2mem.0 = sub i32 0, %indvar ; [#uses=1]
15 %0 = volatile load i32* @g_53, align 4 ; [#uses=1]
15 %0 = load volatile i32* @g_53, align 4 ; [#uses=1]
1616 %1 = trunc i32 %l_2.0.reg2mem.0 to i16 ; [#uses=1]
1717 %2 = trunc i32 %0 to i16 ; [#uses=1]
1818 %3 = mul i16 %2, %1 ; [#uses=1]
0 ; RUN: opt < %s -std-compile-opts -S | grep volatile | count 3
11 ; PR1520
2 ; Don't promote volatile loads/stores. This is really needed to handle setjmp/lonjmp properly.
2 ; Don't promote load volatiles/stores. This is really needed to handle setjmp/lonjmp properly.
33
44 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32"
55 target triple = "i686-pc-linux-gnu"
1313 %v = alloca i32, align 4 ; [#uses=3]
1414 %tmp = alloca i32, align 4 ; [#uses=3]
1515 %"alloca point" = bitcast i32 0 to i32 ; [#uses=0]
16 volatile store i32 0, i32* %v, align 4
16 store volatile i32 0, i32* %v, align 4
1717 %tmp1 = call i32 @_setjmp( %struct.__jmp_buf_tag* getelementptr ([1 x %struct.__jmp_buf_tag]* @j, i32 0, i32 0) ) ; [#uses=1]
1818 %tmp2 = icmp ne i32 %tmp1, 0 ; [#uses=1]
1919 %tmp23 = zext i1 %tmp2 to i8 ; [#uses=1]
2121 br i1 %toBool, label %bb, label %bb5
2222
2323 bb: ; preds = %entry
24 %tmp4 = volatile load i32* %v, align 4 ; [#uses=1]
24 %tmp4 = load volatile i32* %v, align 4 ; [#uses=1]
2525 store i32 %tmp4, i32* %tmp, align 4
2626 br label %bb6
2727
2828 bb5: ; preds = %entry
29 volatile store i32 1, i32* %v, align 4
29 store volatile i32 1, i32* %v, align 4
3030 call void @g( )
3131 store i32 0, i32* %tmp, align 4
3232 br label %bb6
166166 loop:
167167 %c = bitcast i32* %x to i8*
168168 call void @objc_release(i8* %c) nounwind
169 %j = volatile load i1* %q
169 %j = load volatile i1* %q
170170 br i1 %j, label %loop, label %return
171171
172172 return:
189189 loop:
190190 %a = bitcast i32* %x to i8*
191191 %0 = call i8* @objc_retain(i8* %a) nounwind
192 %j = volatile load i1* %q
192 %j = load volatile i1* %q
193193 br i1 %j, label %loop, label %return
194194
195195 return:
3232 define void @test1(i8* %p) {
3333 entry:
3434 %0 = tail call i8* @objc_retain(i8* %p) nounwind
35 %tmp = volatile load i8** @x, align 8
35 %tmp = load volatile i8** @x, align 8
3636 store i8* %0, i8** @x, align 8
3737 tail call void @objc_release(i8* %tmp) nounwind
3838 ret void
5252 entry:
5353 %0 = tail call i8* @objc_retain(i8* %p) nounwind
5454 %tmp = load i8** @x, align 8
55 volatile store i8* %0, i8** @x, align 8
55 store volatile i8* %0, i8** @x, align 8
5656 tail call void @objc_release(i8* %tmp) nounwind
5757 ret void
5858 }
33 define i32 @voltest(i32 %T) {
44 %A = alloca {i32, i32}
55 %B = getelementptr {i32,i32}* %A, i32 0, i32 0
6 volatile store i32 %T, i32* %B
6 store volatile i32 %T, i32* %B
77
88 %C = getelementptr {i32,i32}* %A, i32 0, i32 1
9 %X = volatile load i32* %C
9 %X = load volatile i32* %C
1010 ret i32 %X
1111 }
2020 ret i32 0
2121 }
2222
23 ; But don't sink volatile loads...
23 ; But don't sink load volatiles...
2424
2525 ; CHECK: @foo2
2626 ; CHECK: load volatile
4545 }
4646
4747 ; This load can't be safely moved above the call because that would change the
48 ; order in which the volatile loads are performed.
48 ; order in which the load volatiles are performed.
4949 define fastcc i32 @no_tailrecelim_3(i32* %a_arg, i32 %a_len_arg, i32 %start_arg) nounwind {
5050 entry:
5151 %tmp2 = icmp sge i32 %start_arg, %a_len_arg ; [#uses=1]
5757 else: ; preds = %entry
5858 %tmp7 = add i32 %start_arg, 1 ; [#uses=1]
5959 %tmp8 = call fastcc i32 @no_tailrecelim_3(i32* %a_arg, i32 %a_len_arg, i32 %tmp7) ; [#uses=1]
60 %tmp9 = volatile load i32* %a_arg ; [#uses=1]
60 %tmp9 = load volatile i32* %a_arg ; [#uses=1]
6161 %tmp10 = add i32 %tmp9, %tmp8 ; [#uses=1]
6262 ret i32 %tmp10
6363 }