llvm.org GIT mirror llvm / 32c7610
R600: Remove dead code from the CodeEmitter v2 v2: - Replace switch statement with TSFlags query Reviewed-by: Vincent Lejeune <vljn@ovi.com> Tested-By: Aaron Watry <awatry@gmail.com> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@181229 91177308-0d34-0410-b5e6-96231b3b80d8 Tom Stellard 7 years ago
3 changed file(s) with 75 addition(s) and 411 deletion(s). Raw diff Collapse all Expand all
2525 #include "llvm/Support/raw_ostream.h"
2626 #include
2727
28 #define SRC_BYTE_COUNT 11
29 #define DST_BYTE_COUNT 5
30
3128 using namespace llvm;
3229
3330 namespace {
5552 SmallVectorImpl &Fixups) const;
5653 private:
5754
58 void EmitALUInstr(const MCInst &MI, SmallVectorImpl &Fixups,
59 raw_ostream &OS) const;
60 void EmitSrc(const MCInst &MI, unsigned OpIdx, raw_ostream &OS) const;
61 void EmitSrcISA(const MCInst &MI, unsigned RegOpIdx, unsigned SelOpIdx,
62 raw_ostream &OS) const;
63 void EmitDst(const MCInst &MI, raw_ostream &OS) const;
64 void EmitFCInstr(const MCInst &MI, raw_ostream &OS) const;
65
66 void EmitNullBytes(unsigned int byteCount, raw_ostream &OS) const;
67
6855 void EmitByte(unsigned int byte, raw_ostream &OS) const;
69
70 void EmitTwoBytes(uint32_t bytes, raw_ostream &OS) const;
7156
7257 void Emit(uint32_t value, raw_ostream &OS) const;
7358 void Emit(uint64_t value, raw_ostream &OS) const;
7459
7560 unsigned getHWRegChan(unsigned reg) const;
7661 unsigned getHWReg(unsigned regNo) const;
77
78 bool isFCOp(unsigned opcode) const;
79 bool isTexOp(unsigned opcode) const;
80 bool isFlagSet(const MCInst &MI, unsigned Operand, unsigned Flag) const;
8162
8263 };
8364
124105
125106 void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
126107 SmallVectorImpl &Fixups) const {
127 if (isFCOp(MI.getOpcode())){
128 EmitFCInstr(MI, OS);
129 } else if (MI.getOpcode() == AMDGPU::RETURN ||
108 const MCInstrDesc &Desc = MCII.get(MI.getOpcode());
109 if (MI.getOpcode() == AMDGPU::RETURN ||
130110 MI.getOpcode() == AMDGPU::FETCH_CLAUSE ||
131111 MI.getOpcode() == AMDGPU::ALU_CLAUSE ||
132112 MI.getOpcode() == AMDGPU::BUNDLE ||
133113 MI.getOpcode() == AMDGPU::KILL) {
134114 return;
115 } else if (IS_VTX(Desc)) {
116 uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups);
117 uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset
118 InstWord2 |= 1 << 19;
119
120 Emit(InstWord01, OS);
121 Emit(InstWord2, OS);
122 Emit((u_int32_t) 0, OS);
123 } else if (IS_TEX(Desc)) {
124 unsigned Opcode = MI.getOpcode();
125 bool HasOffsets = (Opcode == AMDGPU::TEX_LD);
126 unsigned OpOffset = HasOffsets ? 3 : 0;
127 int64_t Sampler = MI.getOperand(OpOffset + 3).getImm();
128 int64_t TextureType = MI.getOperand(OpOffset + 4).getImm();
129
130 uint32_t SrcSelect[4] = {0, 1, 2, 3};
131 uint32_t Offsets[3] = {0, 0, 0};
132 uint64_t CoordType[4] = {1, 1, 1, 1};
133
134 if (HasOffsets)
135 for (unsigned i = 0; i < 3; i++) {
136 int SignedOffset = MI.getOperand(i + 2).getImm();
137 Offsets[i] = (SignedOffset & 0x1F);
138 }
139
140 if (TextureType == TEXTURE_RECT ||
141 TextureType == TEXTURE_SHADOWRECT) {
142 CoordType[ELEMENT_X] = 0;
143 CoordType[ELEMENT_Y] = 0;
144 }
145
146 if (TextureType == TEXTURE_1D_ARRAY ||
147 TextureType == TEXTURE_SHADOW1D_ARRAY) {
148 if (Opcode == AMDGPU::TEX_SAMPLE_C_L ||
149 Opcode == AMDGPU::TEX_SAMPLE_C_LB) {
150 CoordType[ELEMENT_Y] = 0;
151 } else {
152 CoordType[ELEMENT_Z] = 0;
153 SrcSelect[ELEMENT_Z] = ELEMENT_Y;
154 }
155 } else if (TextureType == TEXTURE_2D_ARRAY ||
156 TextureType == TEXTURE_SHADOW2D_ARRAY) {
157 CoordType[ELEMENT_Z] = 0;
158 }
159
160
161 if ((TextureType == TEXTURE_SHADOW1D ||
162 TextureType == TEXTURE_SHADOW2D ||
163 TextureType == TEXTURE_SHADOWRECT ||
164 TextureType == TEXTURE_SHADOW1D_ARRAY) &&
165 Opcode != AMDGPU::TEX_SAMPLE_C_L &&
166 Opcode != AMDGPU::TEX_SAMPLE_C_LB) {
167 SrcSelect[ELEMENT_W] = ELEMENT_Z;
168 }
169
170 uint64_t Word01 = getBinaryCodeForInstr(MI, Fixups) |
171 CoordType[ELEMENT_X] << 60 | CoordType[ELEMENT_Y] << 61 |
172 CoordType[ELEMENT_Z] << 62 | CoordType[ELEMENT_W] << 63;
173 uint32_t Word2 = Sampler << 15 | SrcSelect[ELEMENT_X] << 20 |
174 SrcSelect[ELEMENT_Y] << 23 | SrcSelect[ELEMENT_Z] << 26 |
175 SrcSelect[ELEMENT_W] << 29 | Offsets[0] << 0 | Offsets[1] << 5 |
176 Offsets[2] << 10;
177
178 Emit(Word01, OS);
179 Emit(Word2, OS);
180 Emit((u_int32_t) 0, OS);
135181 } else {
136 switch(MI.getOpcode()) {
137 case AMDGPU::RAT_WRITE_CACHELESS_32_eg:
138 case AMDGPU::RAT_WRITE_CACHELESS_128_eg: {
139 uint64_t inst = getBinaryCodeForInstr(MI, Fixups);
140 Emit(inst, OS);
141 break;
142 }
143 case AMDGPU::CONSTANT_LOAD_eg:
144 case AMDGPU::VTX_READ_PARAM_8_eg:
145 case AMDGPU::VTX_READ_PARAM_16_eg:
146 case AMDGPU::VTX_READ_PARAM_32_eg:
147 case AMDGPU::VTX_READ_PARAM_128_eg:
148 case AMDGPU::VTX_READ_GLOBAL_8_eg:
149 case AMDGPU::VTX_READ_GLOBAL_32_eg:
150 case AMDGPU::VTX_READ_GLOBAL_128_eg:
151 case AMDGPU::TEX_VTX_CONSTBUF:
152 case AMDGPU::TEX_VTX_TEXBUF : {
153 uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups);
154 uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset
155 InstWord2 |= 1 << 19;
156
157 Emit(InstWord01, OS);
158 Emit(InstWord2, OS);
159 Emit((u_int32_t) 0, OS);
160 break;
161 }
162 case AMDGPU::TEX_LD:
163 case AMDGPU::TEX_GET_TEXTURE_RESINFO:
164 case AMDGPU::TEX_SAMPLE:
165 case AMDGPU::TEX_SAMPLE_C:
166 case AMDGPU::TEX_SAMPLE_L:
167 case AMDGPU::TEX_SAMPLE_C_L:
168 case AMDGPU::TEX_SAMPLE_LB:
169 case AMDGPU::TEX_SAMPLE_C_LB:
170 case AMDGPU::TEX_SAMPLE_G:
171 case AMDGPU::TEX_SAMPLE_C_G:
172 case AMDGPU::TEX_GET_GRADIENTS_H:
173 case AMDGPU::TEX_GET_GRADIENTS_V:
174 case AMDGPU::TEX_SET_GRADIENTS_H:
175 case AMDGPU::TEX_SET_GRADIENTS_V: {
176 unsigned Opcode = MI.getOpcode();
177 bool HasOffsets = (Opcode == AMDGPU::TEX_LD);
178 unsigned OpOffset = HasOffsets ? 3 : 0;
179 int64_t Sampler = MI.getOperand(OpOffset + 3).getImm();
180 int64_t TextureType = MI.getOperand(OpOffset + 4).getImm();
181
182 uint32_t SrcSelect[4] = {0, 1, 2, 3};
183 uint32_t Offsets[3] = {0, 0, 0};
184 uint64_t CoordType[4] = {1, 1, 1, 1};
185
186 if (HasOffsets)
187 for (unsigned i = 0; i < 3; i++) {
188 int SignedOffset = MI.getOperand(i + 2).getImm();
189 Offsets[i] = (SignedOffset & 0x1F);
190 }
191
192
193 if (TextureType == TEXTURE_RECT ||
194 TextureType == TEXTURE_SHADOWRECT) {
195 CoordType[ELEMENT_X] = 0;
196 CoordType[ELEMENT_Y] = 0;
197 }
198
199 if (TextureType == TEXTURE_1D_ARRAY ||
200 TextureType == TEXTURE_SHADOW1D_ARRAY) {
201 if (Opcode == AMDGPU::TEX_SAMPLE_C_L ||
202 Opcode == AMDGPU::TEX_SAMPLE_C_LB) {
203 CoordType[ELEMENT_Y] = 0;
204 } else {
205 CoordType[ELEMENT_Z] = 0;
206 SrcSelect[ELEMENT_Z] = ELEMENT_Y;
207 }
208 } else if (TextureType == TEXTURE_2D_ARRAY ||
209 TextureType == TEXTURE_SHADOW2D_ARRAY) {
210 CoordType[ELEMENT_Z] = 0;
211 }
212
213
214 if ((TextureType == TEXTURE_SHADOW1D ||
215 TextureType == TEXTURE_SHADOW2D ||
216 TextureType == TEXTURE_SHADOWRECT ||
217 TextureType == TEXTURE_SHADOW1D_ARRAY) &&
218 Opcode != AMDGPU::TEX_SAMPLE_C_L &&
219 Opcode != AMDGPU::TEX_SAMPLE_C_LB) {
220 SrcSelect[ELEMENT_W] = ELEMENT_Z;
221 }
222
223 uint64_t Word01 = getBinaryCodeForInstr(MI, Fixups) |
224 CoordType[ELEMENT_X] << 60 | CoordType[ELEMENT_Y] << 61 |
225 CoordType[ELEMENT_Z] << 62 | CoordType[ELEMENT_W] << 63;
226 uint32_t Word2 = Sampler << 15 | SrcSelect[ELEMENT_X] << 20 |
227 SrcSelect[ELEMENT_Y] << 23 | SrcSelect[ELEMENT_Z] << 26 |
228 SrcSelect[ELEMENT_W] << 29 | Offsets[0] << 0 | Offsets[1] << 5 |
229 Offsets[2] << 10;
230
231 Emit(Word01, OS);
232 Emit(Word2, OS);
233 Emit((u_int32_t) 0, OS);
234 break;
235 }
236 case AMDGPU::CF_ALU:
237 case AMDGPU::CF_ALU_PUSH_BEFORE: {
238 uint64_t Inst = getBinaryCodeForInstr(MI, Fixups);
239 Emit(Inst, OS);
240 break;
241 }
242 case AMDGPU::CF_CALL_FS_EG:
243 case AMDGPU::CF_CALL_FS_R600:
244 case AMDGPU::CF_TC_EG:
245 case AMDGPU::CF_VC_EG:
246 case AMDGPU::CF_TC_R600:
247 case AMDGPU::CF_VC_R600:
248 case AMDGPU::WHILE_LOOP_EG:
249 case AMDGPU::END_LOOP_EG:
250 case AMDGPU::LOOP_BREAK_EG:
251 case AMDGPU::CF_CONTINUE_EG:
252 case AMDGPU::CF_JUMP_EG:
253 case AMDGPU::CF_ELSE_EG:
254 case AMDGPU::POP_EG:
255 case AMDGPU::WHILE_LOOP_R600:
256 case AMDGPU::END_LOOP_R600:
257 case AMDGPU::LOOP_BREAK_R600:
258 case AMDGPU::CF_CONTINUE_R600:
259 case AMDGPU::CF_JUMP_R600:
260 case AMDGPU::CF_ELSE_R600:
261 case AMDGPU::POP_R600:
262 case AMDGPU::EG_ExportSwz:
263 case AMDGPU::R600_ExportSwz:
264 case AMDGPU::EG_ExportBuf:
265 case AMDGPU::R600_ExportBuf:
266 case AMDGPU::PAD:
267 case AMDGPU::CF_END_R600:
268 case AMDGPU::CF_END_EG:
269 case AMDGPU::CF_END_CM: {
270 uint64_t Inst = getBinaryCodeForInstr(MI, Fixups);
271 Emit(Inst, OS);
272 break;
273 }
274 default:
275 uint64_t Inst = getBinaryCodeForInstr(MI, Fixups);
276 Emit(Inst, OS);
277 break;
278 }
279 }
280 }
281
282 void R600MCCodeEmitter::EmitALUInstr(const MCInst &MI,
283 SmallVectorImpl &Fixups,
284 raw_ostream &OS) const {
285 const MCInstrDesc &MCDesc = MCII.get(MI.getOpcode());
286
287 uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups);
288
289 //older alu have different encoding for instructions with one or two src
290 //parameters.
291 if ((STI.getFeatureBits() & AMDGPU::FeatureR600ALUInst) &&
292 !(MCDesc.TSFlags & R600_InstFlag::OP3)) {
293 uint64_t ISAOpCode = InstWord01 & (0x3FFULL << 39);
294 InstWord01 &= ~(0x3FFULL << 39);
295 InstWord01 |= ISAOpCode << 1;
296 }
297
298 unsigned SrcNum = MCDesc.TSFlags & R600_InstFlag::OP3 ? 3 :
299 MCDesc.TSFlags & R600_InstFlag::OP2 ? 2 : 1;
300
301 const unsigned SrcOps[3][2] = {
302 {R600Operands::SRC0, R600Operands::SRC0_SEL},
303 {R600Operands::SRC1, R600Operands::SRC1_SEL},
304 {R600Operands::SRC2, R600Operands::SRC2_SEL}
305 };
306
307 for (unsigned SrcIdx = 0; SrcIdx < SrcNum; ++SrcIdx) {
308 unsigned RegOpIdx = R600Operands::ALUOpTable[SrcNum-1][SrcOps[SrcIdx][0]];
309 unsigned SelOpIdx = R600Operands::ALUOpTable[SrcNum-1][SrcOps[SrcIdx][1]];
310 }
311
312 Emit(InstWord01, OS);
313 return;
314 }
315
316 void R600MCCodeEmitter::EmitSrc(const MCInst &MI, unsigned OpIdx,
317 raw_ostream &OS) const {
318 const MCOperand &MO = MI.getOperand(OpIdx);
319 union {
320 float f;
321 uint32_t i;
322 } Value;
323 Value.i = 0;
324 // Emit the source select (2 bytes). For GPRs, this is the register index.
325 // For other potential instruction operands, (e.g. constant registers) the
326 // value of the source select is defined in the r600isa docs.
327 if (MO.isReg()) {
328 unsigned reg = MO.getReg();
329 EmitTwoBytes(getHWReg(reg), OS);
330 if (reg == AMDGPU::ALU_LITERAL_X) {
331 unsigned ImmOpIndex = MI.getNumOperands() - 1;
332 MCOperand ImmOp = MI.getOperand(ImmOpIndex);
333 if (ImmOp.isFPImm()) {
334 Value.f = ImmOp.getFPImm();
335 } else {
336 assert(ImmOp.isImm());
337 Value.i = ImmOp.getImm();
338 }
339 }
340 } else {
341 // XXX: Handle other operand types.
342 EmitTwoBytes(0, OS);
343 }
344
345 // Emit the source channel (1 byte)
346 if (MO.isReg()) {
347 EmitByte(getHWRegChan(MO.getReg()), OS);
348 } else {
349 EmitByte(0, OS);
350 }
351
352 // XXX: Emit isNegated (1 byte)
353 if ((!(isFlagSet(MI, OpIdx, MO_FLAG_ABS)))
354 && (isFlagSet(MI, OpIdx, MO_FLAG_NEG) ||
355 (MO.isReg() &&
356 (MO.getReg() == AMDGPU::NEG_ONE || MO.getReg() == AMDGPU::NEG_HALF)))){
357 EmitByte(1, OS);
358 } else {
359 EmitByte(0, OS);
360 }
361
362 // Emit isAbsolute (1 byte)
363 if (isFlagSet(MI, OpIdx, MO_FLAG_ABS)) {
364 EmitByte(1, OS);
365 } else {
366 EmitByte(0, OS);
367 }
368
369 // XXX: Emit relative addressing mode (1 byte)
370 EmitByte(0, OS);
371
372 // Emit kc_bank, This will be adjusted later by r600_asm
373 EmitByte(0, OS);
374
375 // Emit the literal value, if applicable (4 bytes).
376 Emit(Value.i, OS);
377
378 }
379
380 void R600MCCodeEmitter::EmitSrcISA(const MCInst &MI, unsigned RegOpIdx,
381 unsigned SelOpIdx, raw_ostream &OS) const {
382 const MCOperand &RegMO = MI.getOperand(RegOpIdx);
383 const MCOperand &SelMO = MI.getOperand(SelOpIdx);
384
385 union {
386 float f;
387 uint32_t i;
388 } InlineConstant;
389 InlineConstant.i = 0;
390 // Emit source type (1 byte) and source select (4 bytes). For GPRs type is 0
391 // and select is 0 (GPR index is encoded in the instr encoding. For constants
392 // type is 1 and select is the original const select passed from the driver.
393 unsigned Reg = RegMO.getReg();
394 if (Reg == AMDGPU::ALU_CONST) {
395 EmitByte(1, OS);
396 uint32_t Sel = SelMO.getImm();
397 Emit(Sel, OS);
398 } else {
399 EmitByte(0, OS);
400 Emit((uint32_t)0, OS);
401 }
402
403 if (Reg == AMDGPU::ALU_LITERAL_X) {
404 unsigned ImmOpIndex = MI.getNumOperands() - 2;
405 MCOperand ImmOp = MI.getOperand(ImmOpIndex);
406 if (ImmOp.isFPImm()) {
407 InlineConstant.f = ImmOp.getFPImm();
408 } else {
409 assert(ImmOp.isImm());
410 InlineConstant.i = ImmOp.getImm();
411 }
412 }
413
414 // Emit the literal value, if applicable (4 bytes).
415 Emit(InlineConstant.i, OS);
416 }
417
418 void R600MCCodeEmitter::EmitFCInstr(const MCInst &MI, raw_ostream &OS) const {
419
420 // Emit SRC
421 unsigned NumOperands = MI.getNumOperands();
422 if (NumOperands > 0) {
423 assert(NumOperands == 1);
424 EmitSrc(MI, 0, OS);
425 } else {
426 EmitNullBytes(SRC_BYTE_COUNT, OS);
427 }
428
429 // Emit FC Instruction
430 enum FCInstr instr;
431 switch (MI.getOpcode()) {
432 case AMDGPU::PREDICATED_BREAK:
433 instr = FC_BREAK_PREDICATE;
434 break;
435 case AMDGPU::CONTINUE:
436 instr = FC_CONTINUE;
437 break;
438 case AMDGPU::IF_PREDICATE_SET:
439 instr = FC_IF_PREDICATE;
440 break;
441 case AMDGPU::ELSE:
442 instr = FC_ELSE;
443 break;
444 case AMDGPU::ENDIF:
445 instr = FC_ENDIF;
446 break;
447 case AMDGPU::ENDLOOP:
448 instr = FC_ENDLOOP;
449 break;
450 case AMDGPU::WHILELOOP:
451 instr = FC_BGNLOOP;
452 break;
453 default:
454 abort();
455 break;
456 }
457 EmitByte(instr, OS);
458 }
459
460 void R600MCCodeEmitter::EmitNullBytes(unsigned int ByteCount,
461 raw_ostream &OS) const {
462
463 for (unsigned int i = 0; i < ByteCount; i++) {
464 EmitByte(0, OS);
182 uint64_t Inst = getBinaryCodeForInstr(MI, Fixups);
183 Emit(Inst, OS);
465184 }
466185 }
467186
468187 void R600MCCodeEmitter::EmitByte(unsigned int Byte, raw_ostream &OS) const {
469188 OS.write((uint8_t) Byte & 0xff);
470 }
471
472 void R600MCCodeEmitter::EmitTwoBytes(unsigned int Bytes,
473 raw_ostream &OS) const {
474 OS.write((uint8_t) (Bytes & 0xff));
475 OS.write((uint8_t) ((Bytes >> 8) & 0xff));
476189 }
477190
478191 void R600MCCodeEmitter::Emit(uint32_t Value, raw_ostream &OS) const {
512225 }
513226 }
514227
515 //===----------------------------------------------------------------------===//
516 // Encoding helper functions
517 //===----------------------------------------------------------------------===//
518
519 bool R600MCCodeEmitter::isFCOp(unsigned opcode) const {
520 switch(opcode) {
521 default: return false;
522 case AMDGPU::PREDICATED_BREAK:
523 case AMDGPU::CONTINUE:
524 case AMDGPU::IF_PREDICATE_SET:
525 case AMDGPU::ELSE:
526 case AMDGPU::ENDIF:
527 case AMDGPU::ENDLOOP:
528 case AMDGPU::WHILELOOP:
529 return true;
530 }
531 }
532
533 bool R600MCCodeEmitter::isTexOp(unsigned opcode) const {
534 switch(opcode) {
535 default: return false;
536 case AMDGPU::TEX_LD:
537 case AMDGPU::TEX_GET_TEXTURE_RESINFO:
538 case AMDGPU::TEX_SAMPLE:
539 case AMDGPU::TEX_SAMPLE_C:
540 case AMDGPU::TEX_SAMPLE_L:
541 case AMDGPU::TEX_SAMPLE_C_L:
542 case AMDGPU::TEX_SAMPLE_LB:
543 case AMDGPU::TEX_SAMPLE_C_LB:
544 case AMDGPU::TEX_SAMPLE_G:
545 case AMDGPU::TEX_SAMPLE_C_G:
546 case AMDGPU::TEX_GET_GRADIENTS_H:
547 case AMDGPU::TEX_GET_GRADIENTS_V:
548 case AMDGPU::TEX_SET_GRADIENTS_H:
549 case AMDGPU::TEX_SET_GRADIENTS_V:
550 return true;
551 }
552 }
553
554 bool R600MCCodeEmitter::isFlagSet(const MCInst &MI, unsigned Operand,
555 unsigned Flag) const {
556 const MCInstrDesc &MCDesc = MCII.get(MI.getOpcode());
557 unsigned FlagIndex = GET_FLAG_OPERAND_IDX(MCDesc.TSFlags);
558 if (FlagIndex == 0) {
559 return false;
560 }
561 assert(MI.getOperand(FlagIndex).isImm());
562 return !!((MI.getOperand(FlagIndex).getImm() >>
563 (NUM_MO_FLAGS * Operand)) & Flag);
564 }
565
566228 #include "AMDGPUGenMCCodeEmitter.inc"
5252
5353 #define GET_REG_CHAN(reg) ((reg) >> HW_CHAN_SHIFT)
5454 #define GET_REG_INDEX(reg) ((reg) & HW_REG_MASK)
55
56 #define IS_VTX(desc) ((desc).TSFlags & R600_InstFlag::VTX_INST)
57 #define IS_TEX(desc) ((desc).TSFlags & R600_InstFlag::TEX_INST)
5558
5659 namespace R600Operands {
5760 enum Ops {
149149 }
150150
151151 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
152 return ST.hasVertexCache() && get(Opcode).TSFlags & R600_InstFlag::VTX_INST;
152 return ST.hasVertexCache() && IS_VTX(get(Opcode));
153153 }
154154
155155 bool R600InstrInfo::usesVertexCache(const MachineInstr *MI) const {
158158 }
159159
160160 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
161 return (!ST.hasVertexCache() && get(Opcode).TSFlags & R600_InstFlag::VTX_INST) ||
162 (get(Opcode).TSFlags & R600_InstFlag::TEX_INST);
161 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
163162 }
164163
165164 bool R600InstrInfo::usesTextureCache(const MachineInstr *MI) const {