65fd22f8cf5ad27cb44f99427a72c6bac17b7bc2
[mesa.git] / src / gallium / drivers / radeon / MCTargetDesc / R600MCCodeEmitter.cpp
1 //===- R600MCCodeEmitter.cpp - Code Emitter for R600->Cayman GPU families -===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This code emitters outputs bytecode that is understood by the r600g driver
11 // in the Mesa [1] project. The bytecode is very similar to the hardware's ISA,
12 // except that the size of the instruction fields are rounded up to the
13 // nearest byte.
14 //
15 // [1] http://www.mesa3d.org/
16 //
17 //===----------------------------------------------------------------------===//
18
19 #include "R600Defines.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "MCTargetDesc/AMDGPUMCCodeEmitter.h"
22 #include "llvm/MC/MCCodeEmitter.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCInst.h"
25 #include "llvm/MC/MCInstrInfo.h"
26 #include "llvm/MC/MCRegisterInfo.h"
27 #include "llvm/MC/MCSubtargetInfo.h"
28 #include "llvm/Support/raw_ostream.h"
29
30 #include <stdio.h>
31
32 #define SRC_BYTE_COUNT 11
33 #define DST_BYTE_COUNT 5
34
35 using namespace llvm;
36
37 namespace {
38
39 class R600MCCodeEmitter : public AMDGPUMCCodeEmitter {
40 R600MCCodeEmitter(const R600MCCodeEmitter &); // DO NOT IMPLEMENT
41 void operator=(const R600MCCodeEmitter &); // DO NOT IMPLEMENT
42 const MCInstrInfo &MCII;
43 const MCSubtargetInfo &STI;
44 MCContext &Ctx;
45
46 public:
47
48 R600MCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti,
49 MCContext &ctx)
50 : MCII(mcii), STI(sti), Ctx(ctx) { }
51
52 /// EncodeInstruction - Encode the instruction and write it to the OS.
53 virtual void EncodeInstruction(const MCInst &MI, raw_ostream &OS,
54 SmallVectorImpl<MCFixup> &Fixups) const;
55
56 /// getMachineOpValue - Reutrn the encoding for an MCOperand.
57 virtual uint64_t getMachineOpValue(const MCInst &MI, const MCOperand &MO,
58 SmallVectorImpl<MCFixup> &Fixups) const;
59 private:
60
61 void EmitALUInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups,
62 raw_ostream &OS) const;
63 void EmitSrc(const MCInst &MI, unsigned OpIdx, raw_ostream &OS) const;
64 void EmitSrcISA(const MCInst &MI, unsigned OpIdx, uint64_t &Value,
65 raw_ostream &OS) const;
66 void EmitDst(const MCInst &MI, raw_ostream &OS) const;
67 void EmitTexInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups,
68 raw_ostream &OS) const;
69 void EmitFCInstr(const MCInst &MI, raw_ostream &OS) const;
70
71 void EmitNullBytes(unsigned int byteCount, raw_ostream &OS) const;
72
73 void EmitByte(unsigned int byte, raw_ostream &OS) const;
74
75 void EmitTwoBytes(uint32_t bytes, raw_ostream &OS) const;
76
77 void Emit(uint32_t value, raw_ostream &OS) const;
78 void Emit(uint64_t value, raw_ostream &OS) const;
79
80 unsigned getHWRegIndex(unsigned reg) const;
81 unsigned getHWRegChan(unsigned reg) const;
82 unsigned getHWReg(unsigned regNo) const;
83
84 bool isFCOp(unsigned opcode) const;
85 bool isTexOp(unsigned opcode) const;
86 bool isFlagSet(const MCInst &MI, unsigned Operand, unsigned Flag) const;
87
88 /// getHWRegIndexGen - Get the register's hardware index. Implemented in
89 /// R600HwRegInfo.include.
90 unsigned getHWRegIndexGen(unsigned int Reg) const;
91
92 /// getHWRegChanGen - Get the register's channel. Implemented in
93 /// R600HwRegInfo.include.
94 unsigned getHWRegChanGen(unsigned int Reg) const;
95 };
96
97 } // End anonymous namespace
98
99 enum RegElement {
100 ELEMENT_X = 0,
101 ELEMENT_Y,
102 ELEMENT_Z,
103 ELEMENT_W
104 };
105
106 enum InstrTypes {
107 INSTR_ALU = 0,
108 INSTR_TEX,
109 INSTR_FC,
110 INSTR_NATIVE,
111 INSTR_VTX
112 };
113
114 enum FCInstr {
115 FC_IF = 0,
116 FC_IF_INT,
117 FC_ELSE,
118 FC_ENDIF,
119 FC_BGNLOOP,
120 FC_ENDLOOP,
121 FC_BREAK,
122 FC_BREAK_NZ_INT,
123 FC_CONTINUE,
124 FC_BREAK_Z_INT,
125 FC_BREAK_NZ
126 };
127
128 enum TextureTypes {
129 TEXTURE_1D = 1,
130 TEXTURE_2D,
131 TEXTURE_3D,
132 TEXTURE_CUBE,
133 TEXTURE_RECT,
134 TEXTURE_SHADOW1D,
135 TEXTURE_SHADOW2D,
136 TEXTURE_SHADOWRECT,
137 TEXTURE_1D_ARRAY,
138 TEXTURE_2D_ARRAY,
139 TEXTURE_SHADOW1D_ARRAY,
140 TEXTURE_SHADOW2D_ARRAY
141 };
142
143 MCCodeEmitter *llvm::createR600MCCodeEmitter(const MCInstrInfo &MCII,
144 const MCSubtargetInfo &STI,
145 MCContext &Ctx) {
146 return new R600MCCodeEmitter(MCII, STI, Ctx);
147 }
148
149 void R600MCCodeEmitter::EncodeInstruction(const MCInst &MI, raw_ostream &OS,
150 SmallVectorImpl<MCFixup> &Fixups) const {
151 if (isTexOp(MI.getOpcode())) {
152 EmitTexInstr(MI, Fixups, OS);
153 } else if (isFCOp(MI.getOpcode())){
154 EmitFCInstr(MI, OS);
155 } else if (MI.getOpcode() == AMDGPU::RETURN ||
156 MI.getOpcode() == AMDGPU::BUNDLE ||
157 MI.getOpcode() == AMDGPU::KILL) {
158 return;
159 } else {
160 switch(MI.getOpcode()) {
161 case AMDGPU::RAT_WRITE_CACHELESS_eg:
162 {
163 uint64_t inst = getBinaryCodeForInstr(MI, Fixups);
164 EmitByte(INSTR_NATIVE, OS);
165 Emit(inst, OS);
166 break;
167 }
168 case AMDGPU::VTX_READ_PARAM_i32_eg:
169 case AMDGPU::VTX_READ_PARAM_f32_eg:
170 case AMDGPU::VTX_READ_GLOBAL_i8_eg:
171 case AMDGPU::VTX_READ_GLOBAL_i32_eg:
172 case AMDGPU::VTX_READ_GLOBAL_f32_eg:
173 case AMDGPU::VTX_READ_GLOBAL_v4i32_eg:
174 case AMDGPU::VTX_READ_GLOBAL_v4f32_eg:
175 {
176 uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups);
177 uint32_t InstWord2 = MI.getOperand(2).getImm(); // Offset
178
179 EmitByte(INSTR_VTX, OS);
180 Emit(InstWord01, OS);
181 Emit(InstWord2, OS);
182 break;
183 }
184
185 default:
186 EmitALUInstr(MI, Fixups, OS);
187 break;
188 }
189 }
190 }
191
192 void R600MCCodeEmitter::EmitALUInstr(const MCInst &MI,
193 SmallVectorImpl<MCFixup> &Fixups,
194 raw_ostream &OS) const {
195 const MCInstrDesc &MCDesc = MCII.get(MI.getOpcode());
196 unsigned NumOperands = MI.getNumOperands();
197
198 if(MCDesc.findFirstPredOperandIdx() > -1)
199 NumOperands--;
200
201 if (GET_FLAG_OPERAND_IDX(MCDesc.TSFlags) != 0)
202 NumOperands--;
203
204 if(MI.getOpcode() == AMDGPU::PRED_X)
205 NumOperands = 2;
206
207 // XXX Check if instruction writes a result
208 if (NumOperands < 1) {
209 return;
210 }
211
212 // Emit instruction type
213 EmitByte(INSTR_ALU, OS);
214
215 uint64_t InstWord01 = getBinaryCodeForInstr(MI, Fixups);
216
217 //older alu have different encoding for instructions with one or two src
218 //parameters.
219 if (STI.getFeatureBits() & AMDGPU::FeatureR600ALUInst &&
220 MI.getNumOperands() < 4) {
221 uint64_t ISAOpCode = InstWord01 & (0x3FFULL << 39);
222 InstWord01 &= ~(0x3FFULL << 39);
223 InstWord01 |= ISAOpCode << 1;
224 }
225
226 unsigned int OpIndex;
227 for (OpIndex = 1; OpIndex < NumOperands; OpIndex++) {
228 // Literal constants are always stored as the last operand.
229 if (MI.getOperand(OpIndex).isImm() || MI.getOperand(OpIndex).isFPImm()) {
230 break;
231 }
232 EmitSrcISA(MI, OpIndex, InstWord01, OS);
233 }
234
235 // Emit zeros for unused sources
236 for ( ; OpIndex < 4; OpIndex++) {
237 EmitNullBytes(SRC_BYTE_COUNT - 6, OS);
238 }
239
240 // Emit destination register
241 const MCOperand &dstOp = MI.getOperand(0);
242 if (dstOp.isReg() && dstOp.getReg() != AMDGPU::PREDICATE_BIT) {
243 //element of destination register
244 InstWord01 |= uint64_t(getHWRegChan(dstOp.getReg())) << 61;
245
246 // isClamped
247 if (isFlagSet(MI, 0, MO_FLAG_CLAMP)) {
248 InstWord01 |= 1ULL << 63;
249 }
250
251 // write mask
252 if (!isFlagSet(MI, 0, MO_FLAG_MASK) && NumOperands < 4) {
253 InstWord01 |= 1ULL << 36;
254 }
255
256 // XXX: Emit relative addressing mode
257 }
258
259 // Emit ALU
260
261 // Emit IsLast (for this instruction group) (1 byte)
262 if (!isFlagSet(MI, 0, MO_FLAG_NOT_LAST)) {
263 InstWord01 |= 1ULL << 31;
264 }
265
266 // XXX: Emit push modifier
267 if(isFlagSet(MI, 1, MO_FLAG_PUSH)) {
268 InstWord01 |= 1ULL << 34;
269 }
270
271 // XXX: Emit predicate (1 byte)
272 int PredIdx = MCDesc.findFirstPredOperandIdx();
273 if (PredIdx != -1) {
274 switch(MI.getOperand(PredIdx).getReg()) {
275 case AMDGPU::PRED_SEL_ZERO:
276 InstWord01 |= 2ULL << 29;
277 break;
278 case AMDGPU::PRED_SEL_ONE:
279 InstWord01 |= 3ULL << 29;
280 break;
281 }
282 }
283
284 //XXX: predicate
285 //XXX: bank swizzle
286 //XXX: OMOD
287 //XXX: index mode
288
289 Emit(InstWord01, OS);
290 }
291
292 void R600MCCodeEmitter::EmitSrc(const MCInst &MI, unsigned OpIdx,
293 raw_ostream &OS) const {
294 const MCOperand &MO = MI.getOperand(OpIdx);
295 union {
296 float f;
297 uint32_t i;
298 } Value;
299 Value.i = 0;
300 // Emit the source select (2 bytes). For GPRs, this is the register index.
301 // For other potential instruction operands, (e.g. constant registers) the
302 // value of the source select is defined in the r600isa docs.
303 if (MO.isReg()) {
304 unsigned reg = MO.getReg();
305 EmitTwoBytes(getHWReg(reg), OS);
306 if (reg == AMDGPU::ALU_LITERAL_X) {
307 unsigned ImmOpIndex = MI.getNumOperands() - 1;
308 MCOperand ImmOp = MI.getOperand(ImmOpIndex);
309 if (ImmOp.isFPImm()) {
310 Value.f = ImmOp.getFPImm();
311 } else {
312 assert(ImmOp.isImm());
313 Value.i = ImmOp.getImm();
314 }
315 }
316 } else {
317 // XXX: Handle other operand types.
318 EmitTwoBytes(0, OS);
319 }
320
321 // Emit the source channel (1 byte)
322 if (MO.isReg()) {
323 EmitByte(getHWRegChan(MO.getReg()), OS);
324 } else {
325 EmitByte(0, OS);
326 }
327
328 // XXX: Emit isNegated (1 byte)
329 if ((!(isFlagSet(MI, OpIdx, MO_FLAG_ABS)))
330 && (isFlagSet(MI, OpIdx, MO_FLAG_NEG) ||
331 (MO.isReg() &&
332 (MO.getReg() == AMDGPU::NEG_ONE || MO.getReg() == AMDGPU::NEG_HALF)))){
333 EmitByte(1, OS);
334 } else {
335 EmitByte(0, OS);
336 }
337
338 // Emit isAbsolute (1 byte)
339 if (isFlagSet(MI, OpIdx, MO_FLAG_ABS)) {
340 EmitByte(1, OS);
341 } else {
342 EmitByte(0, OS);
343 }
344
345 // XXX: Emit relative addressing mode (1 byte)
346 EmitByte(0, OS);
347
348 // Emit kc_bank, This will be adjusted later by r600_asm
349 EmitByte(0, OS);
350
351 // Emit the literal value, if applicable (4 bytes).
352 Emit(Value.i, OS);
353
354 }
355
356 void R600MCCodeEmitter::EmitSrcISA(const MCInst &MI, unsigned OpIdx,
357 uint64_t &Value, raw_ostream &OS) const {
358 const MCOperand &MO = MI.getOperand(OpIdx);
359 union {
360 float f;
361 uint32_t i;
362 } InlineConstant;
363 InlineConstant.i = 0;
364 // Emit the source select (2 bytes). For GPRs, this is the register index.
365 // For other potential instruction operands, (e.g. constant registers) the
366 // value of the source select is defined in the r600isa docs.
367 if (MO.isReg()) {
368 unsigned Reg = MO.getReg();
369 if (AMDGPUMCRegisterClasses[AMDGPU::R600_CReg32RegClassID].contains(Reg)) {
370 EmitByte(1, OS);
371 } else {
372 EmitByte(0, OS);
373 }
374
375 if (Reg == AMDGPU::ALU_LITERAL_X) {
376 unsigned ImmOpIndex = MI.getNumOperands() - 1;
377 MCOperand ImmOp = MI.getOperand(ImmOpIndex);
378 if (ImmOp.isFPImm()) {
379 InlineConstant.f = ImmOp.getFPImm();
380 } else {
381 assert(ImmOp.isImm());
382 InlineConstant.i = ImmOp.getImm();
383 }
384 }
385 } else {
386 // XXX: Handle other operand types.
387 EmitTwoBytes(0, OS);
388 }
389
390 // source channel
391 uint64_t sourceChannelValue = getHWRegChan(MO.getReg());
392 if (OpIdx == 1)
393 Value |= sourceChannelValue << 10;
394 if (OpIdx == 2)
395 Value |= sourceChannelValue << 23;
396 if (OpIdx == 3)
397 Value |= sourceChannelValue << 42;
398
399 // isNegated
400 if ((!(isFlagSet(MI, OpIdx, MO_FLAG_ABS)))
401 && (isFlagSet(MI, OpIdx, MO_FLAG_NEG) ||
402 (MO.isReg() &&
403 (MO.getReg() == AMDGPU::NEG_ONE || MO.getReg() == AMDGPU::NEG_HALF)))){
404 if (OpIdx == 1)
405 Value |= 1ULL << 12;
406 else if (OpIdx == 2)
407 Value |= 1ULL << 25;
408 else if (OpIdx == 3)
409 Value |= 1ULL << 44;
410 }
411
412 // isAbsolute
413 if (isFlagSet(MI, OpIdx, MO_FLAG_ABS)) {
414 assert(OpIdx < 3);
415 Value |= 1ULL << (32+OpIdx-1);
416 }
417
418 // XXX: relative addressing mode
419 // XXX: kc_bank
420
421 // Emit the literal value, if applicable (4 bytes).
422 Emit(InlineConstant.i, OS);
423
424 }
425
426 void R600MCCodeEmitter::EmitTexInstr(const MCInst &MI,
427 SmallVectorImpl<MCFixup> &Fixups,
428 raw_ostream &OS) const {
429
430 unsigned opcode = MI.getOpcode();
431 bool hasOffsets = (opcode == AMDGPU::TEX_LD);
432 unsigned op_offset = hasOffsets ? 3 : 0;
433 int64_t sampler = MI.getOperand(op_offset+2).getImm();
434 int64_t textureType = MI.getOperand(op_offset+3).getImm();
435 unsigned srcSelect[4] = {0, 1, 2, 3};
436
437 // Emit instruction type
438 EmitByte(1, OS);
439
440 // Emit instruction
441 EmitByte(getBinaryCodeForInstr(MI, Fixups), OS);
442
443 // XXX: Emit resource id r600_shader.c uses sampler + 1. Why?
444 EmitByte(sampler + 1 + 1, OS);
445
446 // Emit source register
447 EmitByte(getHWReg(MI.getOperand(1).getReg()), OS);
448
449 // XXX: Emit src isRelativeAddress
450 EmitByte(0, OS);
451
452 // Emit destination register
453 EmitByte(getHWReg(MI.getOperand(0).getReg()), OS);
454
455 // XXX: Emit dst isRealtiveAddress
456 EmitByte(0, OS);
457
458 // XXX: Emit dst select
459 EmitByte(0, OS); // X
460 EmitByte(1, OS); // Y
461 EmitByte(2, OS); // Z
462 EmitByte(3, OS); // W
463
464 // XXX: Emit lod bias
465 EmitByte(0, OS);
466
467 // XXX: Emit coord types
468 unsigned coordType[4] = {1, 1, 1, 1};
469
470 if (textureType == TEXTURE_RECT
471 || textureType == TEXTURE_SHADOWRECT) {
472 coordType[ELEMENT_X] = 0;
473 coordType[ELEMENT_Y] = 0;
474 }
475
476 if (textureType == TEXTURE_1D_ARRAY
477 || textureType == TEXTURE_SHADOW1D_ARRAY) {
478 if (opcode == AMDGPU::TEX_SAMPLE_C_L || opcode == AMDGPU::TEX_SAMPLE_C_LB) {
479 coordType[ELEMENT_Y] = 0;
480 } else {
481 coordType[ELEMENT_Z] = 0;
482 srcSelect[ELEMENT_Z] = ELEMENT_Y;
483 }
484 } else if (textureType == TEXTURE_2D_ARRAY
485 || textureType == TEXTURE_SHADOW2D_ARRAY) {
486 coordType[ELEMENT_Z] = 0;
487 }
488
489 for (unsigned i = 0; i < 4; i++) {
490 EmitByte(coordType[i], OS);
491 }
492
493 // XXX: Emit offsets
494 if (hasOffsets)
495 for (unsigned i = 2; i < 5; i++)
496 EmitByte(MI.getOperand(i).getImm()<<1, OS);
497 else
498 EmitNullBytes(3, OS);
499
500 // Emit sampler id
501 EmitByte(sampler, OS);
502
503 // XXX:Emit source select
504 if ((textureType == TEXTURE_SHADOW1D
505 || textureType == TEXTURE_SHADOW2D
506 || textureType == TEXTURE_SHADOWRECT
507 || textureType == TEXTURE_SHADOW1D_ARRAY)
508 && opcode != AMDGPU::TEX_SAMPLE_C_L
509 && opcode != AMDGPU::TEX_SAMPLE_C_LB) {
510 srcSelect[ELEMENT_W] = ELEMENT_Z;
511 }
512
513 for (unsigned i = 0; i < 4; i++) {
514 EmitByte(srcSelect[i], OS);
515 }
516 }
517
518 void R600MCCodeEmitter::EmitFCInstr(const MCInst &MI, raw_ostream &OS) const {
519
520 // Emit instruction type
521 EmitByte(INSTR_FC, OS);
522
523 // Emit SRC
524 unsigned NumOperands = MI.getNumOperands();
525 if (NumOperands > 0) {
526 assert(NumOperands == 1);
527 EmitSrc(MI, 0, OS);
528 } else {
529 EmitNullBytes(SRC_BYTE_COUNT, OS);
530 }
531
532 // Emit FC Instruction
533 enum FCInstr instr;
534 switch (MI.getOpcode()) {
535 case AMDGPU::BREAK_LOGICALZ_f32:
536 instr = FC_BREAK;
537 break;
538 case AMDGPU::BREAK_LOGICALNZ_f32:
539 instr = FC_BREAK_NZ;
540 break;
541 case AMDGPU::BREAK_LOGICALNZ_i32:
542 instr = FC_BREAK_NZ_INT;
543 break;
544 case AMDGPU::BREAK_LOGICALZ_i32:
545 instr = FC_BREAK_Z_INT;
546 break;
547 case AMDGPU::CONTINUE_LOGICALNZ_f32:
548 case AMDGPU::CONTINUE_LOGICALNZ_i32:
549 instr = FC_CONTINUE;
550 break;
551 case AMDGPU::IF_LOGICALNZ_f32:
552 instr = FC_IF;
553 case AMDGPU::IF_LOGICALNZ_i32:
554 instr = FC_IF_INT;
555 break;
556 case AMDGPU::IF_LOGICALZ_f32:
557 abort();
558 break;
559 case AMDGPU::ELSE:
560 instr = FC_ELSE;
561 break;
562 case AMDGPU::ENDIF:
563 instr = FC_ENDIF;
564 break;
565 case AMDGPU::ENDLOOP:
566 instr = FC_ENDLOOP;
567 break;
568 case AMDGPU::WHILELOOP:
569 instr = FC_BGNLOOP;
570 break;
571 default:
572 abort();
573 break;
574 }
575 EmitByte(instr, OS);
576 }
577
578 void R600MCCodeEmitter::EmitNullBytes(unsigned int ByteCount,
579 raw_ostream &OS) const {
580
581 for (unsigned int i = 0; i < ByteCount; i++) {
582 EmitByte(0, OS);
583 }
584 }
585
586 void R600MCCodeEmitter::EmitByte(unsigned int Byte, raw_ostream &OS) const {
587 OS.write((uint8_t) Byte & 0xff);
588 }
589
590 void R600MCCodeEmitter::EmitTwoBytes(unsigned int Bytes,
591 raw_ostream &OS) const {
592 OS.write((uint8_t) (Bytes & 0xff));
593 OS.write((uint8_t) ((Bytes >> 8) & 0xff));
594 }
595
596 void R600MCCodeEmitter::Emit(uint32_t Value, raw_ostream &OS) const {
597 for (unsigned i = 0; i < 4; i++) {
598 OS.write((uint8_t) ((Value >> (8 * i)) & 0xff));
599 }
600 }
601
602 void R600MCCodeEmitter::Emit(uint64_t Value, raw_ostream &OS) const {
603 for (unsigned i = 0; i < 8; i++) {
604 EmitByte((Value >> (8 * i)) & 0xff, OS);
605 }
606 }
607
608 unsigned R600MCCodeEmitter::getHWRegIndex(unsigned reg) const {
609 switch(reg) {
610 case AMDGPU::ZERO: return 248;
611 case AMDGPU::ONE:
612 case AMDGPU::NEG_ONE: return 249;
613 case AMDGPU::ONE_INT: return 250;
614 case AMDGPU::HALF:
615 case AMDGPU::NEG_HALF: return 252;
616 case AMDGPU::ALU_LITERAL_X: return 253;
617 case AMDGPU::PREDICATE_BIT:
618 case AMDGPU::PRED_SEL_OFF:
619 case AMDGPU::PRED_SEL_ZERO:
620 case AMDGPU::PRED_SEL_ONE:
621 return 0;
622 default: return getHWRegIndexGen(reg);
623 }
624 }
625
626 unsigned R600MCCodeEmitter::getHWRegChan(unsigned reg) const {
627 switch(reg) {
628 case AMDGPU::ZERO:
629 case AMDGPU::ONE:
630 case AMDGPU::ONE_INT:
631 case AMDGPU::NEG_ONE:
632 case AMDGPU::HALF:
633 case AMDGPU::NEG_HALF:
634 case AMDGPU::ALU_LITERAL_X:
635 case AMDGPU::PREDICATE_BIT:
636 case AMDGPU::PRED_SEL_OFF:
637 case AMDGPU::PRED_SEL_ZERO:
638 case AMDGPU::PRED_SEL_ONE:
639 return 0;
640 default: return getHWRegChanGen(reg);
641 }
642 }
643 unsigned R600MCCodeEmitter::getHWReg(unsigned RegNo) const {
644 unsigned HWReg;
645
646 HWReg = getHWRegIndex(RegNo);
647 if (AMDGPUMCRegisterClasses[AMDGPU::R600_CReg32RegClassID].contains(RegNo)) {
648 HWReg += 512;
649 }
650 return HWReg;
651 }
652
653 uint64_t R600MCCodeEmitter::getMachineOpValue(const MCInst &MI,
654 const MCOperand &MO,
655 SmallVectorImpl<MCFixup> &Fixup) const {
656 if (MO.isReg()) {
657 return getHWRegIndex(MO.getReg());
658 } else if (MO.isImm()) {
659 return MO.getImm();
660 } else {
661 assert(0);
662 return 0;
663 }
664 }
665
666 //===----------------------------------------------------------------------===//
667 // Encoding helper functions
668 //===----------------------------------------------------------------------===//
669
670 bool R600MCCodeEmitter::isFCOp(unsigned opcode) const {
671 switch(opcode) {
672 default: return false;
673 case AMDGPU::BREAK_LOGICALZ_f32:
674 case AMDGPU::BREAK_LOGICALNZ_i32:
675 case AMDGPU::BREAK_LOGICALZ_i32:
676 case AMDGPU::BREAK_LOGICALNZ_f32:
677 case AMDGPU::CONTINUE_LOGICALNZ_f32:
678 case AMDGPU::IF_LOGICALNZ_i32:
679 case AMDGPU::IF_LOGICALZ_f32:
680 case AMDGPU::ELSE:
681 case AMDGPU::ENDIF:
682 case AMDGPU::ENDLOOP:
683 case AMDGPU::IF_LOGICALNZ_f32:
684 case AMDGPU::WHILELOOP:
685 return true;
686 }
687 }
688
689 bool R600MCCodeEmitter::isTexOp(unsigned opcode) const {
690 switch(opcode) {
691 default: return false;
692 case AMDGPU::TEX_LD:
693 case AMDGPU::TEX_GET_TEXTURE_RESINFO:
694 case AMDGPU::TEX_SAMPLE:
695 case AMDGPU::TEX_SAMPLE_C:
696 case AMDGPU::TEX_SAMPLE_L:
697 case AMDGPU::TEX_SAMPLE_C_L:
698 case AMDGPU::TEX_SAMPLE_LB:
699 case AMDGPU::TEX_SAMPLE_C_LB:
700 case AMDGPU::TEX_SAMPLE_G:
701 case AMDGPU::TEX_SAMPLE_C_G:
702 case AMDGPU::TEX_GET_GRADIENTS_H:
703 case AMDGPU::TEX_GET_GRADIENTS_V:
704 case AMDGPU::TEX_SET_GRADIENTS_H:
705 case AMDGPU::TEX_SET_GRADIENTS_V:
706 return true;
707 }
708 }
709
710 bool R600MCCodeEmitter::isFlagSet(const MCInst &MI, unsigned Operand,
711 unsigned Flag) const {
712 const MCInstrDesc &MCDesc = MCII.get(MI.getOpcode());
713 unsigned FlagIndex = GET_FLAG_OPERAND_IDX(MCDesc.TSFlags);
714 if (FlagIndex == 0) {
715 return false;
716 }
717 assert(MI.getOperand(FlagIndex).isImm());
718 return !!((MI.getOperand(FlagIndex).getImm() >>
719 (NUM_MO_FLAGS * Operand)) & Flag);
720 }
721 #define R600RegisterInfo R600MCCodeEmitter
722 #include "R600HwRegInfo.include"
723 #undef R600RegisterInfo
724
725 #include "AMDGPUGenMCCodeEmitter.inc"