34c4b396c7796448d3e52a4194e306fe64fa09df
[mesa.git] / src / gallium / drivers / radeon / MCTargetDesc / R600MCCodeEmitter.cpp
1 //===- R600MCCodeEmitter.cpp - Code Emitter for R600->Cayman GPU families -===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This code emitters outputs bytecode that is understood by the r600g driver
11 // in the Mesa [1] project. The bytecode is very similar to the hardware's ISA,
12 // except that the size of the instruction fields are rounded up to the
13 // nearest byte.
14 //
15 // [1] http://www.mesa3d.org/
16 //
17 //===----------------------------------------------------------------------===//
18
19 #include "R600Defines.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "MCTargetDesc/AMDGPUMCCodeEmitter.h"
22 #include "llvm/MC/MCCodeEmitter.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCInst.h"
25 #include "llvm/MC/MCInstrInfo.h"
26 #include "llvm/MC/MCRegisterInfo.h"
27 #include "llvm/MC/MCSubtargetInfo.h"
28 #include "llvm/Support/raw_ostream.h"
29
30 #include <stdio.h>
31
32 #define SRC_BYTE_COUNT 11
33 #define DST_BYTE_COUNT 5
34
35 using namespace llvm;
36
37 namespace {
38
39 class R600MCCodeEmitter : public AMDGPUMCCodeEmitter {
40 R600MCCodeEmitter(const R600MCCodeEmitter &); // DO NOT IMPLEMENT
41 void operator=(const R600MCCodeEmitter &); // DO NOT IMPLEMENT
42 const MCInstrInfo &MCII;
43 const MCSubtargetInfo &STI;
44 MCContext &Ctx;
45
46 public:
47
48 R600MCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
49 MCContext &ctx)
50 : MCII(mcii), STI(sti), Ctx(ctx) { }
51
52 /// EncodeInstruction - Encode the instruction and write it to the OS.
53 virtual void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
54 SmallVectorImpl<MCFixup> &Fixups) const;
55
56 /// getMachineOpValue - Reutrn the encoding for an MCOperand.
57 virtual uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
58 SmallVectorImpl<MCFixup> &Fixups) const;
59 private:
60
61 void EmitALUInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups,
62 raw_ostream &OS) const;
63 void EmitSrc(const MCInst &MI, unsigned OpIdx, raw_ostream &OS) const;
64 void EmitDst(const MCInst &MI, raw_ostream &OS) const;
65 void EmitALU(const MCInst &MI, unsigned numSrc,
66 SmallVectorImpl<MCFixup> &Fixups,
67 raw_ostream &OS) const;
68 void EmitTexInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups,
69 raw_ostream &OS) const;
70 void EmitFCInstr(const MCInst &MI, raw_ostream &OS) const;
71
72 void EmitNullBytes(unsigned int byteCount, raw_ostream &OS) const;
73
74 void EmitByte(unsigned int byte, raw_ostream &OS) const;
75
76 void EmitTwoBytes(uint32_t bytes, raw_ostream &OS) const;
77
78 void Emit(uint32_t value, raw_ostream &OS) const;
79 void Emit(uint64_t value, raw_ostream &OS) const;
80
81 unsigned getHWRegIndex(unsigned reg) const;
82 unsigned getHWRegChan(unsigned reg) const;
83 unsigned getHWReg(unsigned regNo) const;
84
85 bool isFCOp(unsigned opcode) const;
86 bool isTexOp(unsigned opcode) const;
87 bool isFlagSet(const MCInst &MI, unsigned Operand, unsigned Flag) const;
88
89 /// getHWRegIndexGen - Get the register's hardware index. Implemented in
90 /// R600HwRegInfo.include.
91 unsigned getHWRegIndexGen(unsigned int Reg) const;
92
93 /// getHWRegChanGen - Get the register's channel. Implemented in
94 /// R600HwRegInfo.include.
95 unsigned getHWRegChanGen(unsigned int Reg) const;
96 };
97
98 } // End anonymous namespace
99
100 enum RegElement {
101 ELEMENT_X = 0,
102 ELEMENT_Y,
103 ELEMENT_Z,
104 ELEMENT_W
105 };
106
107 enum InstrTypes {
108 INSTR_ALU = 0,
109 INSTR_TEX,
110 INSTR_FC,
111 INSTR_NATIVE,
112 INSTR_VTX
113 };
114
115 enum FCInstr {
116 FC_IF = 0,
117 FC_IF_INT,
118 FC_ELSE,
119 FC_ENDIF,
120 FC_BGNLOOP,
121 FC_ENDLOOP,
122 FC_BREAK,
123 FC_BREAK_NZ_INT,
124 FC_CONTINUE,
125 FC_BREAK_Z_INT,
126 FC_BREAK_NZ
127 };
128
129 enum TextureTypes {
130 TEXTURE_1D = 1,
131 TEXTURE_2D,
132 TEXTURE_3D,
133 TEXTURE_CUBE,
134 TEXTURE_RECT,
135 TEXTURE_SHADOW1D,
136 TEXTURE_SHADOW2D,
137 TEXTURE_SHADOWRECT,
138 TEXTURE_1D_ARRAY,
139 TEXTURE_2D_ARRAY,
140 TEXTURE_SHADOW1D_ARRAY,
141 TEXTURE_SHADOW2D_ARRAY
142 };
143
144 MCCodeEmitter *llvm::createR600MCCodeEmitter(const MCInstrInfo &MCII,
145 const MCSubtargetInfo &STI,
146 MCContext &Ctx) {
147 return new R600MCCodeEmitter(MCII, STI, Ctx);
148 }
149
150 void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
151 SmallVectorImpl<MCFixup> &Fixups) const {
152 if (isTexOp(MI.getOpcode())) {
153 EmitTexInstr(MI, Fixups, OS);
154 } else if (isFCOp(MI.getOpcode())){
155 EmitFCInstr(MI, OS);
156 } else if (MI.getOpcode() == AMDGPU::RETURN ||
157 MI.getOpcode() == AMDGPU::BUNDLE ||
158 MI.getOpcode() == AMDGPU::KILL) {
159 return;
160 } else {
161 switch(MI.getOpcode()) {
162 case AMDGPU::RAT_WRITE_CACHELESS_eg:
163 {
164 uint64_t inst = getBinaryCodeForInstr(MI, Fixups);
165 EmitByte(INSTR_NATIVE, OS);
166 Emit(inst, OS);
167 break;
168 }
169 case AMDGPU::VTX_READ_PARAM_i32_eg:
170 case AMDGPU::VTX_READ_PARAM_f32_eg:
171 case AMDGPU::VTX_READ_GLOBAL_i32_eg:
172 case AMDGPU::VTX_READ_GLOBAL_f32_eg:
173 case AMDGPU::VTX_READ_GLOBAL_v4i32_eg:
174 case AMDGPU::VTX_READ_GLOBAL_v4f32_eg:
175 {
176 uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups);
177 uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset
178
179 EmitByte(INSTR_VTX, OS);
180 Emit(InstWord01, OS);
181 Emit(InstWord2, OS);
182 break;
183 }
184
185 default:
186 EmitALUInstr(MI, Fixups, OS);
187 break;
188 }
189 }
190 }
191
192 void R600MCCodeEmitter::EmitALUInstr(const MCInst &MI,
193 SmallVectorImpl<MCFixup> &Fixups,
194 raw_ostream &OS) const {
195 const MCInstrDesc &MCDesc = MCII.get(MI.getOpcode());
196 unsigned NumOperands = MI.getNumOperands();
197
198 if(MCDesc.findFirstPredOperandIdx() > -1)
199 NumOperands--;
200
201 if (GET_FLAG_OPERAND_IDX(MCDesc.TSFlags) != 0)
202 NumOperands--;
203
204 if(MI.getOpcode() == AMDGPU::PRED_X)
205 NumOperands = 2;
206
207 // XXX Check if instruction writes a result
208 if (NumOperands < 1) {
209 return;
210 }
211
212 // Emit instruction type
213 EmitByte(0, OS);
214
215 unsigned int OpIndex;
216 for (OpIndex = 1; OpIndex < NumOperands; OpIndex++) {
217 // Literal constants are always stored as the last operand.
218 if (MI.getOperand(OpIndex).isImm() || MI.getOperand(OpIndex).isFPImm()) {
219 break;
220 }
221 EmitSrc(MI, OpIndex, OS);
222 }
223
224 // Emit zeros for unused sources
225 for ( ; OpIndex < 4; OpIndex++) {
226 EmitNullBytes(SRC_BYTE_COUNT, OS);
227 }
228
229 EmitDst(MI, OS);
230
231 EmitALU(MI, NumOperands - 1, Fixups, OS);
232 }
233
234 void R600MCCodeEmitter::EmitSrc(const MCInst &MI, unsigned OpIdx,
235 raw_ostream &OS) const {
236 const MCOperand &MO = MI.getOperand(OpIdx);
237 union {
238 float f;
239 uint32_t i;
240 } Value;
241 Value.i = 0;
242 // Emit the source select (2 bytes). For GPRs, this is the register index.
243 // For other potential instruction operands, (e.g. constant registers) the
244 // value of the source select is defined in the r600isa docs.
245 if (MO.isReg()) {
246 unsigned reg = MO.getReg();
247 EmitTwoBytes(getHWReg(reg), OS);
248 if (reg == AMDGPU::ALU_LITERAL_X) {
249 unsigned ImmOpIndex = MI.getNumOperands() - 1;
250 MCOperand ImmOp = MI.getOperand(ImmOpIndex);
251 if (ImmOp.isFPImm()) {
252 Value.f = ImmOp.getFPImm();
253 } else {
254 assert(ImmOp.isImm());
255 Value.i = ImmOp.getImm();
256 }
257 }
258 } else {
259 // XXX: Handle other operand types.
260 EmitTwoBytes(0, OS);
261 }
262
263 // Emit the source channel (1 byte)
264 if (MO.isReg()) {
265 EmitByte(getHWRegChan(MO.getReg()), OS);
266 } else {
267 EmitByte(0, OS);
268 }
269
270 // XXX: Emit isNegated (1 byte)
271 if ((!(isFlagSet(MI, OpIdx, MO_FLAG_ABS)))
272 && (isFlagSet(MI, OpIdx, MO_FLAG_NEG) ||
273 (MO.isReg() &&
274 (MO.getReg() == AMDGPU::NEG_ONE || MO.getReg() == AMDGPU::NEG_HALF)))){
275 EmitByte(1, OS);
276 } else {
277 EmitByte(0, OS);
278 }
279
280 // Emit isAbsolute (1 byte)
281 if (isFlagSet(MI, OpIdx, MO_FLAG_ABS)) {
282 EmitByte(1, OS);
283 } else {
284 EmitByte(0, OS);
285 }
286
287 // XXX: Emit relative addressing mode (1 byte)
288 EmitByte(0, OS);
289
290 // Emit kc_bank, This will be adjusted later by r600_asm
291 EmitByte(0, OS);
292
293 // Emit the literal value, if applicable (4 bytes).
294 Emit(Value.i, OS);
295
296 }
297
298 void R600MCCodeEmitter::EmitDst(const MCInst &MI, raw_ostream &OS) const {
299
300 const MCOperand &MO = MI.getOperand(0);
301 if (MO.isReg() && MO.getReg() != AMDGPU::PREDICATE_BIT) {
302 // Emit the destination register index (1 byte)
303 EmitByte(getHWReg(MO.getReg()), OS);
304
305 // Emit the element of the destination register (1 byte)
306 EmitByte(getHWRegChan(MO.getReg()), OS);
307
308 // Emit isClamped (1 byte)
309 if (isFlagSet(MI, 0, MO_FLAG_CLAMP)) {
310 EmitByte(1, OS);
311 } else {
312 EmitByte(0, OS);
313 }
314
315 // Emit writemask (1 byte).
316 if (isFlagSet(MI, 0, MO_FLAG_MASK)) {
317 EmitByte(0, OS);
318 } else {
319 EmitByte(1, OS);
320 }
321
322 // XXX: Emit relative addressing mode
323 EmitByte(0, OS);
324 } else {
325 // XXX: Handle other operand types. Are there any for destination regs?
326 EmitNullBytes(DST_BYTE_COUNT, OS);
327 }
328 }
329
330 void R600MCCodeEmitter::EmitALU(const MCInst &MI, unsigned numSrc,
331 SmallVectorImpl<MCFixup> &Fixups,
332 raw_ostream &OS) const {
333 const MCInstrDesc &MCDesc = MCII.get(MI.getOpcode());
334
335 // Emit the instruction (2 bytes)
336 EmitTwoBytes(getBinaryCodeForInstr(MI, Fixups), OS);
337
338 // Emit IsLast (for this instruction group) (1 byte)
339 if (isFlagSet(MI, 0, MO_FLAG_NOT_LAST)) {
340 EmitByte(0, OS);
341 } else {
342 EmitByte(1, OS);
343 }
344
345 // Emit isOp3 (1 byte)
346 if (numSrc == 3) {
347 EmitByte(1, OS);
348 } else {
349 EmitByte(0, OS);
350 }
351
352 // XXX: Emit push modifier
353 if(isFlagSet(MI, 1, MO_FLAG_PUSH)) {
354 EmitByte(1, OS);
355 } else {
356 EmitByte(0, OS);
357 }
358
359 // XXX: Emit predicate (1 byte)
360 int PredIdx = MCDesc.findFirstPredOperandIdx();
361 if (PredIdx > -1)
362 switch(MI.getOperand(PredIdx).getReg()) {
363 case AMDGPU::PRED_SEL_ZERO:
364 EmitByte(2, OS);
365 break;
366 case AMDGPU::PRED_SEL_ONE:
367 EmitByte(3, OS);
368 break;
369 default:
370 EmitByte(0, OS);
371 break;
372 }
373 else {
374 EmitByte(0, OS);
375 }
376
377
378 // XXX: Emit bank swizzle. (1 byte) Do we need this? It looks like
379 // r600_asm.c sets it.
380 EmitByte(0, OS);
381
382 // XXX: Emit bank_swizzle_force (1 byte) Not sure what this is for.
383 EmitByte(0, OS);
384
385 // XXX: Emit OMOD (1 byte) Not implemented.
386 EmitByte(0, OS);
387
388 // XXX: Emit index_mode. I think this is for indirect addressing, so we
389 // don't need to worry about it.
390 EmitByte(0, OS);
391 }
392
393 void R600MCCodeEmitter::EmitTexInstr(const MCInst &MI,
394 SmallVectorImpl<MCFixup> &Fixups,
395 raw_ostream &OS) const {
396
397 unsigned opcode = MI.getOpcode();
398 bool hasOffsets = (opcode == AMDGPU::TEX_LD);
399 unsigned op_offset = hasOffsets ? 3 : 0;
400 int64_t sampler = MI.getOperand(op_offset+2).getImm();
401 int64_t textureType = MI.getOperand(op_offset+3).getImm();
402 unsigned srcSelect[4] = {0, 1, 2, 3};
403
404 // Emit instruction type
405 EmitByte(1, OS);
406
407 // Emit instruction
408 EmitByte(getBinaryCodeForInstr(MI, Fixups), OS);
409
410 // XXX: Emit resource id r600_shader.c uses sampler + 1. Why?
411 EmitByte(sampler + 1 + 1, OS);
412
413 // Emit source register
414 EmitByte(getHWReg(MI.getOperand(1).getReg()), OS);
415
416 // XXX: Emit src isRelativeAddress
417 EmitByte(0, OS);
418
419 // Emit destination register
420 EmitByte(getHWReg(MI.getOperand(0).getReg()), OS);
421
422 // XXX: Emit dst isRealtiveAddress
423 EmitByte(0, OS);
424
425 // XXX: Emit dst select
426 EmitByte(0, OS); // X
427 EmitByte(1, OS); // Y
428 EmitByte(2, OS); // Z
429 EmitByte(3, OS); // W
430
431 // XXX: Emit lod bias
432 EmitByte(0, OS);
433
434 // XXX: Emit coord types
435 unsigned coordType[4] = {1, 1, 1, 1};
436
437 if (textureType == TEXTURE_RECT
438 || textureType == TEXTURE_SHADOWRECT) {
439 coordType[ELEMENT_X] = 0;
440 coordType[ELEMENT_Y] = 0;
441 }
442
443 if (textureType == TEXTURE_1D_ARRAY
444 || textureType == TEXTURE_SHADOW1D_ARRAY) {
445 if (opcode == AMDGPU::TEX_SAMPLE_C_L || opcode == AMDGPU::TEX_SAMPLE_C_LB) {
446 coordType[ELEMENT_Y] = 0;
447 } else {
448 coordType[ELEMENT_Z] = 0;
449 srcSelect[ELEMENT_Z] = ELEMENT_Y;
450 }
451 } else if (textureType == TEXTURE_2D_ARRAY
452 || textureType == TEXTURE_SHADOW2D_ARRAY) {
453 coordType[ELEMENT_Z] = 0;
454 }
455
456 for (unsigned i = 0; i < 4; i++) {
457 EmitByte(coordType[i], OS);
458 }
459
460 // XXX: Emit offsets
461 if (hasOffsets)
462 for (unsigned i = 2; i < 5; i++)
463 EmitByte(MI.getOperand(i).getImm()<<1, OS);
464 else
465 EmitNullBytes(3, OS);
466
467 // Emit sampler id
468 EmitByte(sampler, OS);
469
470 // XXX:Emit source select
471 if ((textureType == TEXTURE_SHADOW1D
472 || textureType == TEXTURE_SHADOW2D
473 || textureType == TEXTURE_SHADOWRECT
474 || textureType == TEXTURE_SHADOW1D_ARRAY)
475 && opcode != AMDGPU::TEX_SAMPLE_C_L
476 && opcode != AMDGPU::TEX_SAMPLE_C_LB) {
477 srcSelect[ELEMENT_W] = ELEMENT_Z;
478 }
479
480 for (unsigned i = 0; i < 4; i++) {
481 EmitByte(srcSelect[i], OS);
482 }
483 }
484
485 void R600MCCodeEmitter::EmitFCInstr(const MCInst &MI, raw_ostream &OS) const {
486
487 // Emit instruction type
488 EmitByte(INSTR_FC, OS);
489
490 // Emit SRC
491 unsigned NumOperands = MI.getNumOperands();
492 if (NumOperands > 0) {
493 assert(NumOperands == 1);
494 EmitSrc(MI, 0, OS);
495 } else {
496 EmitNullBytes(SRC_BYTE_COUNT, OS);
497 }
498
499 // Emit FC Instruction
500 enum FCInstr instr;
501 switch (MI.getOpcode()) {
502 case AMDGPU::BREAK_LOGICALZ_f32:
503 instr = FC_BREAK;
504 break;
505 case AMDGPU::BREAK_LOGICALNZ_f32:
506 instr = FC_BREAK_NZ;
507 break;
508 case AMDGPU::BREAK_LOGICALNZ_i32:
509 instr = FC_BREAK_NZ_INT;
510 break;
511 case AMDGPU::BREAK_LOGICALZ_i32:
512 instr = FC_BREAK_Z_INT;
513 break;
514 case AMDGPU::CONTINUE_LOGICALNZ_f32:
515 case AMDGPU::CONTINUE_LOGICALNZ_i32:
516 instr = FC_CONTINUE;
517 break;
518 case AMDGPU::IF_LOGICALNZ_f32:
519 instr = FC_IF;
520 case AMDGPU::IF_LOGICALNZ_i32:
521 instr = FC_IF_INT;
522 break;
523 case AMDGPU::IF_LOGICALZ_f32:
524 abort();
525 break;
526 case AMDGPU::ELSE:
527 instr = FC_ELSE;
528 break;
529 case AMDGPU::ENDIF:
530 instr = FC_ENDIF;
531 break;
532 case AMDGPU::ENDLOOP:
533 instr = FC_ENDLOOP;
534 break;
535 case AMDGPU::WHILELOOP:
536 instr = FC_BGNLOOP;
537 break;
538 default:
539 abort();
540 break;
541 }
542 EmitByte(instr, OS);
543 }
544
545 void R600MCCodeEmitter::EmitNullBytes(unsigned int ByteCount,
546 raw_ostream &OS) const {
547
548 for (unsigned int i = 0; i < ByteCount; i++) {
549 EmitByte(0, OS);
550 }
551 }
552
553 void R600MCCodeEmitter::EmitByte(unsigned int Byte, raw_ostream &OS) const {
554 OS.write((uint8_t) Byte & 0xff);
555 }
556
557 void R600MCCodeEmitter::EmitTwoBytes(unsigned int Bytes,
558 raw_ostream &OS) const {
559 OS.write((uint8_t) (Bytes & 0xff));
560 OS.write((uint8_t) ((Bytes >> 8) & 0xff));
561 }
562
563 void R600MCCodeEmitter::Emit(uint32_t Value, raw_ostream &OS) const {
564 for (unsigned i = 0; i < 4; i++) {
565 OS.write((uint8_t) ((Value >> (8 * i)) & 0xff));
566 }
567 }
568
569 void R600MCCodeEmitter::Emit(uint64_t Value, raw_ostream &OS) const {
570 for (unsigned i = 0; i < 8; i++) {
571 EmitByte((Value >> (8 * i)) & 0xff, OS);
572 }
573 }
574
575 unsigned R600MCCodeEmitter::getHWRegIndex(unsigned reg) const {
576 switch(reg) {
577 case AMDGPU::ZERO: return 248;
578 case AMDGPU::ONE:
579 case AMDGPU::NEG_ONE: return 249;
580 case AMDGPU::ONE_INT: return 250;
581 case AMDGPU::HALF:
582 case AMDGPU::NEG_HALF: return 252;
583 case AMDGPU::ALU_LITERAL_X: return 253;
584 case AMDGPU::PREDICATE_BIT:
585 case AMDGPU::PRED_SEL_OFF:
586 case AMDGPU::PRED_SEL_ZERO:
587 case AMDGPU::PRED_SEL_ONE:
588 return 0;
589 default: return getHWRegIndexGen(reg);
590 }
591 }
592
593 unsigned R600MCCodeEmitter::getHWRegChan(unsigned reg) const {
594 switch(reg) {
595 case AMDGPU::ZERO:
596 case AMDGPU::ONE:
597 case AMDGPU::ONE_INT:
598 case AMDGPU::NEG_ONE:
599 case AMDGPU::HALF:
600 case AMDGPU::NEG_HALF:
601 case AMDGPU::ALU_LITERAL_X:
602 case AMDGPU::PREDICATE_BIT:
603 case AMDGPU::PRED_SEL_OFF:
604 case AMDGPU::PRED_SEL_ZERO:
605 case AMDGPU::PRED_SEL_ONE:
606 return 0;
607 default: return getHWRegChanGen(reg);
608 }
609 }
610 unsigned R600MCCodeEmitter::getHWReg(unsigned RegNo) const {
611 unsigned HWReg;
612
613 HWReg = getHWRegIndex(RegNo);
614 if (AMDGPUMCRegisterClasses[AMDGPU::R600_CReg32RegClassID].contains(RegNo)) {
615 HWReg += 512;
616 }
617 return HWReg;
618 }
619
620 uint64_t R600MCCodeEmitter::getMachineOpValue(const MCInst &MI,
621 const MCOperand &MO,
622 SmallVectorImpl<MCFixup> &Fixup) const {
623 if (MO.isReg()) {
624 return getHWReg(MO.getReg());
625 } else {
626 return MO.getImm();
627 }
628 }
629
630 //===----------------------------------------------------------------------===//
631 // Encoding helper functions
632 //===----------------------------------------------------------------------===//
633
634 bool R600MCCodeEmitter::isFCOp(unsigned opcode) const {
635 switch(opcode) {
636 default: return false;
637 case AMDGPU::BREAK_LOGICALZ_f32:
638 case AMDGPU::BREAK_LOGICALNZ_i32:
639 case AMDGPU::BREAK_LOGICALZ_i32:
640 case AMDGPU::BREAK_LOGICALNZ_f32:
641 case AMDGPU::CONTINUE_LOGICALNZ_f32:
642 case AMDGPU::IF_LOGICALNZ_i32:
643 case AMDGPU::IF_LOGICALZ_f32:
644 case AMDGPU::ELSE:
645 case AMDGPU::ENDIF:
646 case AMDGPU::ENDLOOP:
647 case AMDGPU::IF_LOGICALNZ_f32:
648 case AMDGPU::WHILELOOP:
649 return true;
650 }
651 }
652
653 bool R600MCCodeEmitter::isTexOp(unsigned opcode) const {
654 switch(opcode) {
655 default: return false;
656 case AMDGPU::TEX_LD:
657 case AMDGPU::TEX_GET_TEXTURE_RESINFO:
658 case AMDGPU::TEX_SAMPLE:
659 case AMDGPU::TEX_SAMPLE_C:
660 case AMDGPU::TEX_SAMPLE_L:
661 case AMDGPU::TEX_SAMPLE_C_L:
662 case AMDGPU::TEX_SAMPLE_LB:
663 case AMDGPU::TEX_SAMPLE_C_LB:
664 case AMDGPU::TEX_SAMPLE_G:
665 case AMDGPU::TEX_SAMPLE_C_G:
666 case AMDGPU::TEX_GET_GRADIENTS_H:
667 case AMDGPU::TEX_GET_GRADIENTS_V:
668 case AMDGPU::TEX_SET_GRADIENTS_H:
669 case AMDGPU::TEX_SET_GRADIENTS_V:
670 return true;
671 }
672 }
673
674 bool R600MCCodeEmitter::isFlagSet(const MCInst &MI, unsigned Operand,
675 unsigned Flag) const {
676 const MCInstrDesc &MCDesc = MCII.get(MI.getOpcode());
677 unsigned FlagIndex = GET_FLAG_OPERAND_IDX(MCDesc.TSFlags);
678 if (FlagIndex == 0) {
679 return false;
680 }
681 assert(MI.getOperand(FlagIndex).isImm());
682 return !!((MI.getOperand(FlagIndex).getImm() >>
683 (NUM_MO_FLAGS * Operand)) & Flag);
684 }
685 #define R600RegisterInfo R600MCCodeEmitter
686 #include "R600HwRegInfo.include"
687 #undef R600RegisterInfo
688
689 #include "AMDGPUGenMCCodeEmitter.inc"