llvm.org GIT mirror llvm / d1c65e8
Merging r260599: ------------------------------------------------------------------------ r260599 | thomas.stellard | 2016-02-11 13:45:07 -0800 (Thu, 11 Feb 2016) | 14 lines AMDGPU/SI: Make sure MIMG descriptors and samplers stay in SGPRs Summary: It's possible to have resource descriptors and samplers stored in VGPRs, either by a VMEM instruction or in the case of samplers, floating-point calculations. When this happens, we need to use v_readfirstlane to copy these values back to sgprs. Reviewers: mareko, arsenm Subscribers: arsenm, llvm-commits Differential Revision: http://reviews.llvm.org/D17102 ------------------------------------------------------------------------ git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_38@271642 91177308-0d34-0410-b5e6-96231b3b80d8 Tom Stellard 3 years ago
7 changed file(s) with 152 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
686686 let MIMG = 1;
687687 let Uses = [EXEC];
688688
689 let UseNamedOperandTable = 1;
689690 let hasSideEffects = 0; // XXX ????
690691 }
19671967 }
19681968 }
19691969
1970 unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr *UseMI,
1971 MachineRegisterInfo &MRI) const {
1972 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
1973 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
1974 unsigned DstReg = MRI.createVirtualRegister(SRC);
1975 unsigned SubRegs = VRC->getSize() / 4;
1976
1977 SmallVector SRegs;
1978 for (unsigned i = 0; i < SubRegs; ++i) {
1979 unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
1980 BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
1981 get(AMDGPU::V_READFIRSTLANE_B32), SGPR)
1982 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i));
1983 SRegs.push_back(SGPR);
1984 }
1985
1986 MachineInstrBuilder MIB = BuildMI(*UseMI->getParent(), UseMI,
1987 UseMI->getDebugLoc(),
1988 get(AMDGPU::REG_SEQUENCE), DstReg);
1989 for (unsigned i = 0; i < SubRegs; ++i) {
1990 MIB.addReg(SRegs[i]);
1991 MIB.addImm(RI.getSubRegFromChannel(i));
1992 }
1993 return DstReg;
1994 }
1995
19701996 void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
19711997 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
19721998
20762102 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), NewSrc0)
20772103 .addReg(Src0);
20782104 MI->getOperand(1).setReg(NewSrc0);
2105 }
2106 return;
2107 }
2108
2109 // Legalize MIMG
2110 if (isMIMG(*MI)) {
2111 MachineOperand *SRsrc = getNamedOperand(*MI, AMDGPU::OpName::srsrc);
2112 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) {
2113 unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI);
2114 SRsrc->setReg(SGPR);
2115 }
2116
2117 MachineOperand *SSamp = getNamedOperand(*MI, AMDGPU::OpName::ssamp);
2118 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) {
2119 unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI);
2120 SSamp->setReg(SGPR);
20792121 }
20802122 return;
20812123 }
400400 /// \brief Fix operands in \p MI to satisfy constant bus requirements.
401401 void legalizeOperandsVOP3(MachineRegisterInfo &MRI, MachineInstr *MI) const;
402402
403 /// Copy a value from a VGPR (\p SrcReg) to SGPR. This function can only
404 /// be used when it is know that the value in SrcReg is same across all
405 /// threads in the wave.
406 /// \returns The SGPR register that \p SrcReg was copied to.
407 unsigned readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr *UseMI,
408 MachineRegisterInfo &MRI) const;
409
403410 /// \brief Legalize all operands in this instruction. This function may
404411 /// create new instruction and insert them before \p MI.
405412 void legalizeOperands(MachineInstr *MI) const;
473473 return &AMDGPU::VReg_256RegClass;
474474 case 64:
475475 return &AMDGPU::VReg_512RegClass;
476 default:
477 llvm_unreachable("Invalid register class size");
478 }
479 }
480
481 const TargetRegisterClass *SIRegisterInfo::getEquivalentSGPRClass(
482 const TargetRegisterClass *VRC) const {
483 switch (VRC->getSize()) {
484 case 4:
485 return &AMDGPU::SGPR_32RegClass;
486 case 8:
487 return &AMDGPU::SReg_64RegClass;
488 case 16:
489 return &AMDGPU::SReg_128RegClass;
490 case 32:
491 return &AMDGPU::SReg_256RegClass;
492 case 64:
493 return &AMDGPU::SReg_512RegClass;
476494 default:
477495 llvm_unreachable("Invalid register class size");
478496 }
8888 const TargetRegisterClass *getEquivalentVGPRClass(
8989 const TargetRegisterClass *SRC) const;
9090
91 /// \returns A SGPR reg class with the same width as \p SRC
92 const TargetRegisterClass *getEquivalentSGPRClass(
93 const TargetRegisterClass *VRC) const;
94
9195 /// \returns The register class that is used for a sub-register of \p RC for
9296 /// the given \p SubIdx. If \p SubIdx equals NoSubRegister, \p RC will
9397 /// be returned.
374374 ret void
375375 }
376376
377 ; Check the the resource descriptor is stored in an sgpr.
378 ; CHECK-LABEL: {{^}}mimg_srsrc_sgpr:
379 ; CHECK: image_sample v{{[0-9]+}}, 1, 0, 0, 0, 0, 0, 0, 0, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
380 define void @mimg_srsrc_sgpr([34 x <8 x i32>] addrspace(2)* byval %arg) #0 {
381 %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
382 %tmp7 = getelementptr [34 x <8 x i32>], [34 x <8 x i32>] addrspace(2)* %arg, i32 0, i32 %tid
383 %tmp8 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp7, align 32, !tbaa !0
384 %tmp_bit = bitcast <8 x i32> %tmp8 to <32 x i8>
385 %tmp9 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> , <32 x i8> %tmp_bit, <16 x i8> undef, i32 15)
386 %tmp10 = extractelement <4 x float> %tmp9, i32 0
387 %tmp12 = call i32 @llvm.SI.packf16(float undef, float %tmp10)
388 %tmp13 = bitcast i32 %tmp12 to float
389 call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tmp13, float undef, float undef, float undef)
390 ret void
391 }
392
393 ; Check the the sampler is stored in an sgpr.
394 ; CHECK-LABEL: {{^}}mimg_ssamp_sgpr:
395 ; CHECK: image_sample v{{[0-9]+}}, 1, 0, 0, 0, 0, 0, 0, 0, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
396 define void @mimg_ssamp_sgpr([17 x <4 x i32>] addrspace(2)* byval %arg) #0 {
397 %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0
398 %tmp7 = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(2)* %arg, i32 0, i32 %tid
399 %tmp8 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp7, align 16, !tbaa !0
400 %tmp_bit = bitcast <4 x i32> %tmp8 to <16 x i8>
401 %tmp9 = call <4 x float> @llvm.SI.sample.v2i32(<2 x i32> , <32 x i8> undef, <16 x i8> %tmp_bit, i32 15)
402 %tmp10 = extractelement <4 x float> %tmp9, i32 0
403 %tmp12 = call i32 @llvm.SI.packf16(float %tmp10, float undef)
404 %tmp13 = bitcast i32 %tmp12 to float
405 call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float %tmp13, float undef, float undef, float undef)
406 ret void
407 }
408
409 declare i32 @llvm.amdgcn.mbcnt.lo(i32, i32) #1
410
377411 attributes #0 = { "ShaderType"="0" "unsafe-fp-math"="true" }
378412 attributes #1 = { nounwind readnone }
0 ; RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs | FileCheck %s
1
2 ; FIXME: Move this to sgpr-copy.ll when this is fixed on VI.
3 ; Make sure that when we split an smrd instruction in order to move it to
4 ; the VALU, we are also moving its users to the VALU.
5 ; CHECK-LABEL: {{^}}split_smrd_add_worklist:
6 ; CHECK: image_sample v{{[0-9]+}}, 1, 0, 0, 0, 0, 0, 0, 0, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}]
7
8 define void @split_smrd_add_worklist([34 x <8 x i32>] addrspace(2)* byval %arg) #0 {
9 bb:
10 %tmp = call float @llvm.SI.load.const(<16 x i8> undef, i32 96)
11 %tmp1 = bitcast float %tmp to i32
12 br i1 undef, label %bb2, label %bb3
13
14 bb2: ; preds = %bb
15 unreachable
16
17 bb3: ; preds = %bb
18 %tmp4 = bitcast float %tmp to i32
19 %tmp5 = add i32 %tmp4, 4
20 %tmp6 = sext i32 %tmp5 to i64
21 %tmp7 = getelementptr [34 x <8 x i32>], [34 x <8 x i32>] addrspace(2)* %arg, i64 0, i64 %tmp6
22 %tmp8 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp7, align 32, !tbaa !0
23 %tmp9 = call <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32> , <8 x i32> %tmp8, <4 x i32> undef, i32 15, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0)
24 %tmp10 = extractelement <4 x float> %tmp9, i32 0
25 %tmp12 = call i32 @llvm.SI.packf16(float %tmp10, float undef)
26 %tmp13 = bitcast i32 %tmp12 to float
27 call void @llvm.SI.export(i32 15, i32 1, i32 1, i32 0, i32 1, float undef, float %tmp13, float undef, float undef)
28 ret void
29 }
30
31 ; Function Attrs: nounwind readnone
32 declare float @llvm.SI.load.const(<16 x i8>, i32) #1
33
34 declare void @llvm.SI.export(i32, i32, i32, i32, i32, float, float, float, float)
35
36 declare <4 x float> @llvm.SI.image.sample.v2i32(<2 x i32>, <8 x i32>, <4 x i32>, i32, i32, i32, i32, i32, i32, i32, i32) #1
37
38 declare i32 @llvm.SI.packf16(float, float) #1
39
40 attributes #0 = { "ShaderType"="0" "unsafe-fp-math"="true" }
41 attributes #1 = { nounwind readnone }
42
43 !0 = !{!1, !1, i64 0, i32 1}
44 !1 = !{!"const", null}
45 !2 = !{!1, !1, i64 0}