llvm.org GIT mirror llvm / 85dc2a1
AMDGPU: Fix capitalized register names in asm constraints This was a workaround a long time ago, but the canonical lower case names work now. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@363459 91177308-0d34-0410-b5e6-96231b3b80d8 Matt Arsenault 9 months ago
12 changed file(s) with 20 addition(s) and 20 deletion(s). Raw diff Collapse all Expand all
110110 %sgpr101 = tail call i32 asm sideeffect "s_mov_b32 s101, 0", "={s101}"() #0
111111 %sgpr102 = tail call i32 asm sideeffect "s_mov_b32 s102, 0", "={s102}"() #0
112112 %sgpr103 = tail call i32 asm sideeffect "s_mov_b32 s103, 0", "={s103}"() #0
113 %vcc_lo = tail call i32 asm sideeffect "s_mov_b32 $0, 0", "={VCC_LO}"() #0
114 %vcc_hi = tail call i32 asm sideeffect "s_mov_b32 $0, 0", "={VCC_HI}"() #0
113 %vcc_lo = tail call i32 asm sideeffect "s_mov_b32 $0, 0", "={vcc_lo}"() #0
114 %vcc_hi = tail call i32 asm sideeffect "s_mov_b32 $0, 0", "={vcc_hi}"() #0
115115 %cmp = icmp eq i32 %cnd, 0
116116 br i1 %cmp, label %bb3, label %bb2 ; +8 dword branch
117117
8484 ; GCN-NEXT: ;;#ASMEND
8585 ; GCN-NEXT: s_setpc_b64 s[30:31]
8686 define hidden void @void_func_void_clobber_vcc() #2 {
87 call void asm sideeffect "", "~{VCC}"() #0
87 call void asm sideeffect "", "~{vcc}"() #0
8888 ret void
8989 }
9090
6464 br i1 %cc, label %if, label %endif
6565
6666 if:
67 call void asm "; clobber $0", "~{VCC}"() #0
67 call void asm "; clobber $0", "~{vcc}"() #0
6868 %u = add i32 %v, %v
6969 br label %endif
7070
217217 define void @func_other_fi_user_non_inline_imm_offset_i32_vcc_live() #0 {
218218 %alloca0 = alloca [128 x i32], align 4, addrspace(5)
219219 %alloca1 = alloca [8 x i32], align 4, addrspace(5)
220 %vcc = call i64 asm sideeffect "; def $0", "={VCC}"()
220 %vcc = call i64 asm sideeffect "; def $0", "={vcc}"()
221221 %gep0 = getelementptr inbounds [128 x i32], [128 x i32] addrspace(5)* %alloca0, i32 0, i32 65
222222 %gep1 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %alloca1, i32 0, i32 0
223223 store volatile i32 7, i32 addrspace(5)* %gep0
224 call void asm sideeffect "; use $0", "{VCC}"(i64 %vcc)
224 call void asm sideeffect "; use $0", "{vcc}"(i64 %vcc)
225225 %ptrtoint = ptrtoint i32 addrspace(5)* %gep1 to i32
226226 %mul = mul i32 %ptrtoint, 9
227227 store volatile i32 %mul, i32 addrspace(3)* undef
3535 ; GCN: s_mov_b32 [[COPY_M0:s[0-9]+]], m0
3636 ; GCN: ; use [[COPY_M0]]
3737 define amdgpu_kernel void @inline_sreg_constraint_m0() {
38 %m0 = tail call i32 asm sideeffect "s_mov_b32 m0, -1", "={M0}"()
38 %m0 = tail call i32 asm sideeffect "s_mov_b32 m0, -1", "={m0}"()
3939 tail call void asm sideeffect "; use $0", "s"(i32 %m0)
4040 ret void
4141 }
105105 ; GFX8-16BANK-NEXT: v_add_f16_e32 v0, s3, v0
106106 ; GFX8-16BANK-NEXT: ; return to shader part epilog
107107 main_body:
108 %mx = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0
108 %mx = call i32 asm sideeffect "s_mov_b32 m0, 0", "={m0}"() #0
109109 %p1_0 = call float @llvm.amdgcn.interp.p1.f16(float %i, i32 1, i32 2, i1 0, i32 %m0)
110110 %p2_0 = call half @llvm.amdgcn.interp.p2.f16(float %p1_0, float %j, i32 1, i32 2, i1 0, i32 %m0)
111111 %my = trunc i32 %mx to i16
169169 ; GFX8-16BANK-NEXT: ; return to shader part epilog
170170 main_body:
171171 %p1_0 = call float @llvm.amdgcn.interp.p1.f16(float %i, i32 1, i32 2, i1 0, i32 %m0)
172 %mx = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0
172 %mx = call i32 asm sideeffect "s_mov_b32 m0, 0", "={m0}"() #0
173173 %p2_0 = call half @llvm.amdgcn.interp.p2.f16(float %p1_0, float %j, i32 1, i32 2, i1 0, i32 %m0)
174174 %my = trunc i32 %mx to i16
175175 %mh = bitcast i16 %my to half
2525 ; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], [[COPY_M0]]
2626 ; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, [[VVAL]]
2727 define amdgpu_kernel void @test_readfirstlane_m0(i32 addrspace(1)* %out) #1 {
28 %m0 = call i32 asm "s_mov_b32 m0, -1", "={M0}"()
28 %m0 = call i32 asm "s_mov_b32 m0, -1", "={m0}"()
2929 %readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 %m0)
3030 store i32 %readfirstlane, i32 addrspace(1)* %out, align 4
3131 ret void
3939 ; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], [[COPY_M0]]
4040 ; CHECK: v_readlane_b32 s{{[0-9]+}}, [[VVAL]], s{{[0-9]+}}
4141 define amdgpu_kernel void @test_readlane_m0_sreg(i32 addrspace(1)* %out, i32 %src1) #1 {
42 %m0 = call i32 asm "s_mov_b32 m0, -1", "={M0}"()
42 %m0 = call i32 asm "s_mov_b32 m0, -1", "={m0}"()
4343 %readlane = call i32 @llvm.amdgcn.readlane(i32 %m0, i32 %src1)
4444 store i32 %readlane, i32 addrspace(1)* %out, align 4
4545 ret void
4141 ; CHECK: v_writelane_b32 v{{[0-9]+}}, [[COPY_M0]], s{{[0-9]+}}
4242 define amdgpu_kernel void @test_writelane_m0_sreg(i32 addrspace(1)* %out, i32 %src1) #1 {
4343 %oldval = load i32, i32 addrspace(1)* %out
44 %m0 = call i32 asm "s_mov_b32 m0, -1", "={M0}"()
44 %m0 = call i32 asm "s_mov_b32 m0, -1", "={m0}"()
4545 %writelane = call i32 @llvm.amdgcn.writelane(i32 %m0, i32 %src1, i32 %oldval)
4646 store i32 %writelane, i32 addrspace(1)* %out, align 4
4747 ret void
4242 ; GCN: s_add_i32 s{{[0-9]+}}, m0, 1
4343 define amdgpu_kernel void @spill_m0(i32 %cond, i32 addrspace(1)* %out) #0 {
4444 entry:
45 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0
45 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={m0}"() #0
4646 %cmp0 = icmp eq i32 %cond, 0
4747 br i1 %cmp0, label %if, label %endif
4848
5151 br label %endif
5252
5353 endif:
54 %foo = call i32 asm sideeffect "s_add_i32 $0, $1, 1", "=s,{M0}"(i32 %m0) #0
54 %foo = call i32 asm sideeffect "s_add_i32 $0, $1, 1", "=s,{m0}"(i32 %m0) #0
5555 store i32 %foo, i32 addrspace(1)* %out
5656 ret void
5757 }
137137 ; GCN-NOT: s_buffer_load_dword m0
138138 define amdgpu_kernel void @m0_unavailable_spill(i32 %m0.arg) #0 {
139139 main_body:
140 %m0 = call i32 asm sideeffect "; def $0, 1", "={M0}"() #0
140 %m0 = call i32 asm sideeffect "; def $0, 1", "={m0}"() #0
141141 %tmp = call float @llvm.amdgcn.interp.mov(i32 2, i32 0, i32 0, i32 %m0.arg)
142 call void asm sideeffect "; clobber $0", "~{M0}"() #0
142 call void asm sideeffect "; clobber $0", "~{m0}"() #0
143143 %cmp = fcmp ueq float 0.000000e+00, %tmp
144144 br i1 %cmp, label %if, label %else
145145
190190 ; TOSMEM: s_dcache_wb
191191 ; TOSMEM: s_endpgm
192192 define amdgpu_kernel void @restore_m0_lds(i32 %arg) {
193 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={M0}"() #0
193 %m0 = call i32 asm sideeffect "s_mov_b32 m0, 0", "={m0}"() #0
194194 %sval = load volatile i64, i64 addrspace(4)* undef
195195 %cmp = icmp eq i32 %arg, 0
196196 br i1 %cmp, label %ret, label %bb
197197
198198 bb:
199199 store volatile i64 %sval, i64 addrspace(3)* undef
200 call void asm sideeffect "; use $0", "{M0}"(i32 %m0) #0
200 call void asm sideeffect "; use $0", "{m0}"(i32 %m0) #0
201201 br label %ret
202202
203203 ret:
8282 %val = extractvalue { i32, i1 } %uadd, 0
8383 %carry = extractvalue { i32, i1 } %uadd, 1
8484 store volatile i32 %val, i32 addrspace(1)* %out, align 4
85 call void asm sideeffect "", "~{VCC}"() #0
85 call void asm sideeffect "", "~{vcc}"() #0
8686 store volatile i1 %carry, i1 addrspace(1)* %carryout
8787 ret void
8888 }
8383 %val = extractvalue { i32, i1 } %uadd, 0
8484 %carry = extractvalue { i32, i1 } %uadd, 1
8585 store volatile i32 %val, i32 addrspace(1)* %out, align 4
86 call void asm sideeffect "", "~{VCC}"() #0
86 call void asm sideeffect "", "~{vcc}"() #0
8787 store volatile i1 %carry, i1 addrspace(1)* %carryout
8888 ret void
8989 }