llvm.org GIT mirror llvm / b8cd3bb
[X86] Dont run combineSetCCAtomicArith() when the cmp has multiple uses We would miscompile the following: void g(int); int f(volatile long long *p) { bool b = __atomic_fetch_add(p, 1, __ATOMIC_SEQ_CST) < 0; g(b ? 12 : 34); return b ? 56 : 78; } into pushq %rax lock incq (%rdi) movl $12, %eax movl $34, %edi cmovlel %eax, %edi callq g(int) testq %rax, %rax <---- Bad. movl $56, %ecx movl $78, %eax cmovsl %ecx, %eax popq %rcx retq because the code failed to take into account that the cmp has multiple uses, replaced one of them, and left the other one comparing garbage. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@291630 91177308-0d34-0410-b5e6-96231b3b80d8 Hans Wennborg 3 years ago
2 changed file(s) with 22 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
2940329403 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
2940429404 return SDValue();
2940529405
29406 // Can't replace the cmp if it has more uses than the one we're looking at.
29407 // FIXME: We would like to be able to handle this, but would need to make sure
29408 // all uses were updated.
29409 if (!Cmp.hasOneUse())
29410 return SDValue();
29411
2940629412 // This only applies to variations of the common case:
2940729413 // (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
2940829414 // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
175175 ret i8 %tmp2
176176 }
177177
178 define i8 @test_add_1_cmov_cmov(i64* %p, i8* %q) #0 {
179 ; TODO: It's possible to use "lock inc" here, but both cmovs need to be updated.
180 ; CHECK-LABEL: test_add_1_cmov_cmov:
181 ; CHECK: # BB#0: # %entry
182 ; CHECK-NEXT: movl $1, %eax
183 ; CHECK-NEXT: lock xaddq %rax, (%rdi)
184 ; CHECK-NEXT: testq %rax, %rax
185 entry:
186 %add = atomicrmw add i64* %p, i64 1 seq_cst
187 %cmp = icmp slt i64 %add, 0
188 %s1 = select i1 %cmp, i8 12, i8 34
189 store i8 %s1, i8* %q
190 %s2 = select i1 %cmp, i8 56, i8 78
191 ret i8 %s2
192 }
193
178194 attributes #0 = { nounwind }