5 #include "common/sid.h"
6 #include "ac_shader_util.h"
7 #include "util/u_math.h"
13 enum chip_class chip_class
;
14 std::vector
<std::pair
<int, SOPP_instruction
*>> branches
;
15 std::vector
<unsigned> constaddrs
;
16 const int16_t* opcode
;
17 // TODO: keep track of branch instructions referring blocks
18 // and, when emitting the block, correct the offset in instr
19 asm_context(Program
* program
) : program(program
), chip_class(program
->chip_class
) {
20 if (chip_class
<= GFX7
)
21 opcode
= &instr_info
.opcode_gfx7
[0];
22 else if (chip_class
<= GFX9
)
23 opcode
= &instr_info
.opcode_gfx9
[0];
24 else if (chip_class
== GFX10
)
25 opcode
= &instr_info
.opcode_gfx10
[0];
28 int subvector_begin_pos
= -1;
31 static uint32_t get_sdwa_sel(unsigned sel
, PhysReg reg
)
33 if (sel
& sdwa_isra
) {
34 unsigned size
= sdwa_rasize
& sel
;
38 return sdwa_isword
| (reg
.byte() >> 1);
40 return sel
& sdwa_asuint
;
43 void emit_instruction(asm_context
& ctx
, std::vector
<uint32_t>& out
, Instruction
* instr
)
45 uint32_t instr_offset
= out
.size() * 4u;
47 /* lower remaining pseudo-instructions */
48 if (instr
->opcode
== aco_opcode::p_constaddr
) {
49 unsigned dest
= instr
->definitions
[0].physReg();
50 unsigned offset
= instr
->operands
[0].constantValue();
52 /* s_getpc_b64 dest[0:1] */
53 uint32_t encoding
= (0b101111101 << 23);
54 uint32_t opcode
= ctx
.opcode
[(int)aco_opcode::s_getpc_b64
];
55 if (opcode
>= 55 && ctx
.chip_class
<= GFX9
) {
56 assert(ctx
.chip_class
== GFX9
&& opcode
< 60);
59 encoding
|= dest
<< 16;
60 encoding
|= opcode
<< 8;
61 out
.push_back(encoding
);
63 /* s_add_u32 dest[0], dest[0], ... */
64 encoding
= (0b10 << 30);
65 encoding
|= ctx
.opcode
[(int)aco_opcode::s_add_u32
] << 23;
66 encoding
|= dest
<< 16;
69 out
.push_back(encoding
);
70 ctx
.constaddrs
.push_back(out
.size());
71 out
.push_back(-(instr_offset
+ 4) + offset
);
73 /* s_addc_u32 dest[1], dest[1], 0 */
74 encoding
= (0b10 << 30);
75 encoding
|= ctx
.opcode
[(int)aco_opcode::s_addc_u32
] << 23;
76 encoding
|= (dest
+ 1) << 16;
79 out
.push_back(encoding
);
83 uint32_t opcode
= ctx
.opcode
[(int)instr
->opcode
];
84 if (opcode
== (uint32_t)-1) {
85 fprintf(stderr
, "Unsupported opcode: ");
86 aco_print_instr(instr
, stderr
);
90 switch (instr
->format
) {
92 uint32_t encoding
= (0b10 << 30);
93 encoding
|= opcode
<< 23;
94 encoding
|= !instr
->definitions
.empty() ? instr
->definitions
[0].physReg() << 16 : 0;
95 encoding
|= instr
->operands
.size() >= 2 ? instr
->operands
[1].physReg() << 8 : 0;
96 encoding
|= !instr
->operands
.empty() ? instr
->operands
[0].physReg() : 0;
97 out
.push_back(encoding
);
101 SOPK_instruction
*sopk
= static_cast<SOPK_instruction
*>(instr
);
103 if (instr
->opcode
== aco_opcode::s_subvector_loop_begin
) {
104 assert(ctx
.chip_class
>= GFX10
);
105 assert(ctx
.subvector_begin_pos
== -1);
106 ctx
.subvector_begin_pos
= out
.size();
107 } else if (instr
->opcode
== aco_opcode::s_subvector_loop_end
) {
108 assert(ctx
.chip_class
>= GFX10
);
109 assert(ctx
.subvector_begin_pos
!= -1);
110 /* Adjust s_subvector_loop_begin instruction to the address after the end */
111 out
[ctx
.subvector_begin_pos
] |= (out
.size() - ctx
.subvector_begin_pos
);
112 /* Adjust s_subvector_loop_end instruction to the address after the beginning */
113 sopk
->imm
= (uint16_t)(ctx
.subvector_begin_pos
- (int)out
.size());
114 ctx
.subvector_begin_pos
= -1;
117 uint32_t encoding
= (0b1011 << 28);
118 encoding
|= opcode
<< 23;
120 !instr
->definitions
.empty() && !(instr
->definitions
[0].physReg() == scc
) ?
121 instr
->definitions
[0].physReg() << 16 :
122 !instr
->operands
.empty() && instr
->operands
[0].physReg() <= 127 ?
123 instr
->operands
[0].physReg() << 16 : 0;
124 encoding
|= sopk
->imm
;
125 out
.push_back(encoding
);
129 uint32_t encoding
= (0b101111101 << 23);
130 if (opcode
>= 55 && ctx
.chip_class
<= GFX9
) {
131 assert(ctx
.chip_class
== GFX9
&& opcode
< 60);
134 encoding
|= !instr
->definitions
.empty() ? instr
->definitions
[0].physReg() << 16 : 0;
135 encoding
|= opcode
<< 8;
136 encoding
|= !instr
->operands
.empty() ? instr
->operands
[0].physReg() : 0;
137 out
.push_back(encoding
);
141 uint32_t encoding
= (0b101111110 << 23);
142 encoding
|= opcode
<< 16;
143 encoding
|= instr
->operands
.size() == 2 ? instr
->operands
[1].physReg() << 8 : 0;
144 encoding
|= !instr
->operands
.empty() ? instr
->operands
[0].physReg() : 0;
145 out
.push_back(encoding
);
149 SOPP_instruction
* sopp
= static_cast<SOPP_instruction
*>(instr
);
150 uint32_t encoding
= (0b101111111 << 23);
151 encoding
|= opcode
<< 16;
152 encoding
|= (uint16_t) sopp
->imm
;
153 if (sopp
->block
!= -1)
154 ctx
.branches
.emplace_back(out
.size(), sopp
);
155 out
.push_back(encoding
);
159 SMEM_instruction
* smem
= static_cast<SMEM_instruction
*>(instr
);
160 bool soe
= instr
->operands
.size() >= (!instr
->definitions
.empty() ? 3 : 4);
161 bool is_load
= !instr
->definitions
.empty();
162 uint32_t encoding
= 0;
164 if (ctx
.chip_class
<= GFX7
) {
165 encoding
= (0b11000 << 27);
166 encoding
|= opcode
<< 22;
167 encoding
|= instr
->definitions
.size() ? instr
->definitions
[0].physReg() << 15 : 0;
168 encoding
|= instr
->operands
.size() ? (instr
->operands
[0].physReg() >> 1) << 9 : 0;
169 if (instr
->operands
.size() >= 2) {
170 if (!instr
->operands
[1].isConstant() || instr
->operands
[1].constantValue() >= 1024) {
171 encoding
|= instr
->operands
[1].physReg().reg();
173 encoding
|= instr
->operands
[1].constantValue() >> 2;
177 out
.push_back(encoding
);
178 /* SMRD instructions can take a literal on GFX6 & GFX7 */
179 if (instr
->operands
.size() >= 2 && instr
->operands
[1].isConstant() && instr
->operands
[1].constantValue() >= 1024)
180 out
.push_back(instr
->operands
[1].constantValue() >> 2);
184 if (ctx
.chip_class
<= GFX9
) {
185 encoding
= (0b110000 << 26);
186 assert(!smem
->dlc
); /* Device-level coherent is not supported on GFX9 and lower */
187 encoding
|= smem
->nv
? 1 << 15 : 0;
189 encoding
= (0b111101 << 26);
190 assert(!smem
->nv
); /* Non-volatile is not supported on GFX10 */
191 encoding
|= smem
->dlc
? 1 << 14 : 0;
194 encoding
|= opcode
<< 18;
195 encoding
|= smem
->glc
? 1 << 16 : 0;
197 if (ctx
.chip_class
<= GFX9
) {
198 if (instr
->operands
.size() >= 2)
199 encoding
|= instr
->operands
[1].isConstant() ? 1 << 17 : 0; /* IMM - immediate enable */
201 if (ctx
.chip_class
== GFX9
) {
202 encoding
|= soe
? 1 << 14 : 0;
205 if (is_load
|| instr
->operands
.size() >= 3) { /* SDATA */
206 encoding
|= (is_load
? instr
->definitions
[0].physReg() : instr
->operands
[2].physReg()) << 6;
208 if (instr
->operands
.size() >= 1) { /* SBASE */
209 encoding
|= instr
->operands
[0].physReg() >> 1;
212 out
.push_back(encoding
);
216 uint32_t soffset
= ctx
.chip_class
>= GFX10
217 ? sgpr_null
/* On GFX10 this is disabled by specifying SGPR_NULL */
218 : 0; /* On GFX9, it is disabled by the SOE bit (and it's not present on GFX8 and below) */
219 if (instr
->operands
.size() >= 2) {
220 const Operand
&op_off1
= instr
->operands
[1];
221 if (ctx
.chip_class
<= GFX9
) {
222 offset
= op_off1
.isConstant() ? op_off1
.constantValue() : op_off1
.physReg();
224 /* GFX10 only supports constants in OFFSET, so put the operand in SOFFSET if it's an SGPR */
225 if (op_off1
.isConstant()) {
226 offset
= op_off1
.constantValue();
228 soffset
= op_off1
.physReg();
229 assert(!soe
); /* There is no place to put the other SGPR offset, if any */
234 const Operand
&op_off2
= instr
->operands
.back();
235 assert(ctx
.chip_class
>= GFX9
); /* GFX8 and below don't support specifying a constant and an SGPR at the same time */
236 assert(!op_off2
.isConstant());
237 soffset
= op_off2
.physReg();
241 encoding
|= soffset
<< 25;
243 out
.push_back(encoding
);
247 uint32_t encoding
= 0;
248 encoding
|= opcode
<< 25;
249 encoding
|= (0xFF & instr
->definitions
[0].physReg()) << 17;
250 encoding
|= (0xFF & instr
->operands
[1].physReg()) << 9;
251 encoding
|= instr
->operands
[0].physReg();
252 out
.push_back(encoding
);
256 uint32_t encoding
= (0b0111111 << 25);
257 if (!instr
->definitions
.empty())
258 encoding
|= (0xFF & instr
->definitions
[0].physReg()) << 17;
259 encoding
|= opcode
<< 9;
260 if (!instr
->operands
.empty())
261 encoding
|= instr
->operands
[0].physReg();
262 out
.push_back(encoding
);
266 uint32_t encoding
= (0b0111110 << 25);
267 encoding
|= opcode
<< 17;
268 encoding
|= (0xFF & instr
->operands
[1].physReg()) << 9;
269 encoding
|= instr
->operands
[0].physReg();
270 out
.push_back(encoding
);
273 case Format::VINTRP
: {
274 Interp_instruction
* interp
= static_cast<Interp_instruction
*>(instr
);
275 uint32_t encoding
= 0;
277 if (instr
->opcode
== aco_opcode::v_interp_p1ll_f16
||
278 instr
->opcode
== aco_opcode::v_interp_p1lv_f16
||
279 instr
->opcode
== aco_opcode::v_interp_p2_legacy_f16
||
280 instr
->opcode
== aco_opcode::v_interp_p2_f16
) {
281 if (ctx
.chip_class
== GFX8
|| ctx
.chip_class
== GFX9
) {
282 encoding
= (0b110100 << 26);
283 } else if (ctx
.chip_class
== GFX10
) {
284 encoding
= (0b110101 << 26);
286 unreachable("Unknown chip_class.");
289 encoding
|= opcode
<< 16;
290 encoding
|= (0xFF & instr
->definitions
[0].physReg());
291 out
.push_back(encoding
);
294 encoding
|= interp
->attribute
;
295 encoding
|= interp
->component
<< 6;
296 encoding
|= instr
->operands
[0].physReg() << 9;
297 if (instr
->opcode
== aco_opcode::v_interp_p2_f16
||
298 instr
->opcode
== aco_opcode::v_interp_p2_legacy_f16
||
299 instr
->opcode
== aco_opcode::v_interp_p1lv_f16
) {
300 encoding
|= instr
->operands
[2].physReg() << 18;
302 out
.push_back(encoding
);
304 if (ctx
.chip_class
== GFX8
|| ctx
.chip_class
== GFX9
) {
305 encoding
= (0b110101 << 26); /* Vega ISA doc says 110010 but it's wrong */
307 encoding
= (0b110010 << 26);
311 encoding
|= (0xFF & instr
->definitions
[0].physReg()) << 18;
312 encoding
|= opcode
<< 16;
313 encoding
|= interp
->attribute
<< 10;
314 encoding
|= interp
->component
<< 8;
315 if (instr
->opcode
== aco_opcode::v_interp_mov_f32
)
316 encoding
|= (0x3 & instr
->operands
[0].constantValue());
318 encoding
|= (0xFF & instr
->operands
[0].physReg());
319 out
.push_back(encoding
);
324 DS_instruction
* ds
= static_cast<DS_instruction
*>(instr
);
325 uint32_t encoding
= (0b110110 << 26);
326 if (ctx
.chip_class
== GFX8
|| ctx
.chip_class
== GFX9
) {
327 encoding
|= opcode
<< 17;
328 encoding
|= (ds
->gds
? 1 : 0) << 16;
330 encoding
|= opcode
<< 18;
331 encoding
|= (ds
->gds
? 1 : 0) << 17;
333 encoding
|= ((0xFF & ds
->offset1
) << 8);
334 encoding
|= (0xFFFF & ds
->offset0
);
335 out
.push_back(encoding
);
337 unsigned reg
= !instr
->definitions
.empty() ? instr
->definitions
[0].physReg() : 0;
338 encoding
|= (0xFF & reg
) << 24;
339 reg
= instr
->operands
.size() >= 3 && !(instr
->operands
[2].physReg() == m0
) ? instr
->operands
[2].physReg() : 0;
340 encoding
|= (0xFF & reg
) << 16;
341 reg
= instr
->operands
.size() >= 2 && !(instr
->operands
[1].physReg() == m0
) ? instr
->operands
[1].physReg() : 0;
342 encoding
|= (0xFF & reg
) << 8;
343 encoding
|= (0xFF & instr
->operands
[0].physReg());
344 out
.push_back(encoding
);
347 case Format::MUBUF
: {
348 MUBUF_instruction
* mubuf
= static_cast<MUBUF_instruction
*>(instr
);
349 uint32_t encoding
= (0b111000 << 26);
350 encoding
|= opcode
<< 18;
351 encoding
|= (mubuf
->lds
? 1 : 0) << 16;
352 encoding
|= (mubuf
->glc
? 1 : 0) << 14;
353 encoding
|= (mubuf
->idxen
? 1 : 0) << 13;
354 assert(!mubuf
->addr64
|| ctx
.chip_class
<= GFX7
);
355 if (ctx
.chip_class
== GFX6
|| ctx
.chip_class
== GFX7
)
356 encoding
|= (mubuf
->addr64
? 1 : 0) << 15;
357 encoding
|= (mubuf
->offen
? 1 : 0) << 12;
358 if (ctx
.chip_class
== GFX8
|| ctx
.chip_class
== GFX9
) {
359 assert(!mubuf
->dlc
); /* Device-level coherent is not supported on GFX9 and lower */
360 encoding
|= (mubuf
->slc
? 1 : 0) << 17;
361 } else if (ctx
.chip_class
>= GFX10
) {
362 encoding
|= (mubuf
->dlc
? 1 : 0) << 15;
364 encoding
|= 0x0FFF & mubuf
->offset
;
365 out
.push_back(encoding
);
367 if (ctx
.chip_class
<= GFX7
|| ctx
.chip_class
>= GFX10
) {
368 encoding
|= (mubuf
->slc
? 1 : 0) << 22;
370 encoding
|= instr
->operands
[2].physReg() << 24;
371 encoding
|= (mubuf
->tfe
? 1 : 0) << 23;
372 encoding
|= (instr
->operands
[0].physReg() >> 2) << 16;
373 unsigned reg
= instr
->operands
.size() > 3 ? instr
->operands
[3].physReg() : instr
->definitions
[0].physReg();
374 encoding
|= (0xFF & reg
) << 8;
375 encoding
|= (0xFF & instr
->operands
[1].physReg());
376 out
.push_back(encoding
);
379 case Format::MTBUF
: {
380 MTBUF_instruction
* mtbuf
= static_cast<MTBUF_instruction
*>(instr
);
382 uint32_t img_format
= ac_get_tbuffer_format(ctx
.chip_class
, mtbuf
->dfmt
, mtbuf
->nfmt
);
383 uint32_t encoding
= (0b111010 << 26);
384 assert(img_format
<= 0x7F);
385 assert(!mtbuf
->dlc
|| ctx
.chip_class
>= GFX10
);
386 encoding
|= (mtbuf
->dlc
? 1 : 0) << 15; /* DLC bit replaces one bit of the OPCODE on GFX10 */
387 encoding
|= (mtbuf
->glc
? 1 : 0) << 14;
388 encoding
|= (mtbuf
->idxen
? 1 : 0) << 13;
389 encoding
|= (mtbuf
->offen
? 1 : 0) << 12;
390 encoding
|= 0x0FFF & mtbuf
->offset
;
391 encoding
|= (img_format
<< 19); /* Handles both the GFX10 FORMAT and the old NFMT+DFMT */
393 if (ctx
.chip_class
== GFX8
|| ctx
.chip_class
== GFX9
) {
394 encoding
|= opcode
<< 15;
396 encoding
|= (opcode
& 0x07) << 16; /* 3 LSBs of 4-bit OPCODE */
399 out
.push_back(encoding
);
402 encoding
|= instr
->operands
[2].physReg() << 24;
403 encoding
|= (mtbuf
->tfe
? 1 : 0) << 23;
404 encoding
|= (mtbuf
->slc
? 1 : 0) << 22;
405 encoding
|= (instr
->operands
[0].physReg() >> 2) << 16;
406 unsigned reg
= instr
->operands
.size() > 3 ? instr
->operands
[3].physReg() : instr
->definitions
[0].physReg();
407 encoding
|= (0xFF & reg
) << 8;
408 encoding
|= (0xFF & instr
->operands
[1].physReg());
410 if (ctx
.chip_class
>= GFX10
) {
411 encoding
|= (((opcode
& 0x08) >> 4) << 21); /* MSB of 4-bit OPCODE */
414 out
.push_back(encoding
);
418 MIMG_instruction
* mimg
= static_cast<MIMG_instruction
*>(instr
);
419 uint32_t encoding
= (0b111100 << 26);
420 encoding
|= mimg
->slc
? 1 << 25 : 0;
421 encoding
|= opcode
<< 18;
422 encoding
|= mimg
->lwe
? 1 << 17 : 0;
423 encoding
|= mimg
->tfe
? 1 << 16 : 0;
424 encoding
|= mimg
->glc
? 1 << 13 : 0;
425 encoding
|= mimg
->unrm
? 1 << 12 : 0;
426 if (ctx
.chip_class
<= GFX9
) {
427 assert(!mimg
->dlc
); /* Device-level coherent is not supported on GFX9 and lower */
429 encoding
|= mimg
->a16
? 1 << 15 : 0;
430 encoding
|= mimg
->da
? 1 << 14 : 0;
432 encoding
|= mimg
->r128
? 1 << 15 : 0; /* GFX10: A16 moved to 2nd word, R128 replaces it in 1st word */
433 encoding
|= mimg
->dim
<< 3; /* GFX10: dimensionality instead of declare array */
434 encoding
|= mimg
->dlc
? 1 << 7 : 0;
436 encoding
|= (0xF & mimg
->dmask
) << 8;
437 out
.push_back(encoding
);
438 encoding
= (0xFF & instr
->operands
[2].physReg()); /* VADDR */
439 if (!instr
->definitions
.empty()) {
440 encoding
|= (0xFF & instr
->definitions
[0].physReg()) << 8; /* VDATA */
441 } else if (instr
->operands
[1].regClass().type() == RegType::vgpr
) {
442 encoding
|= (0xFF & instr
->operands
[1].physReg()) << 8; /* VDATA */
444 encoding
|= (0x1F & (instr
->operands
[0].physReg() >> 2)) << 16; /* T# (resource) */
445 if (instr
->operands
[1].regClass().type() == RegType::sgpr
)
446 encoding
|= (0x1F & (instr
->operands
[1].physReg() >> 2)) << 21; /* sampler */
448 assert(!mimg
->d16
|| ctx
.chip_class
>= GFX9
);
449 encoding
|= mimg
->d16
? 1 << 15 : 0;
450 if (ctx
.chip_class
>= GFX10
) {
451 encoding
|= mimg
->a16
? 1 << 14 : 0; /* GFX10: A16 still exists, but is in a different place */
454 out
.push_back(encoding
);
458 case Format::SCRATCH
:
459 case Format::GLOBAL
: {
460 FLAT_instruction
*flat
= static_cast<FLAT_instruction
*>(instr
);
461 uint32_t encoding
= (0b110111 << 26);
462 encoding
|= opcode
<< 18;
463 if (ctx
.chip_class
<= GFX9
) {
464 assert(flat
->offset
<= 0x1fff);
465 encoding
|= flat
->offset
& 0x1fff;
466 } else if (instr
->format
== Format::FLAT
) {
467 /* GFX10 has a 12-bit immediate OFFSET field,
468 * but it has a hw bug: it ignores the offset, called FlatSegmentOffsetBug
470 assert(flat
->offset
== 0);
472 assert(flat
->offset
<= 0xfff);
473 encoding
|= flat
->offset
& 0xfff;
475 if (instr
->format
== Format::SCRATCH
)
477 else if (instr
->format
== Format::GLOBAL
)
479 encoding
|= flat
->lds
? 1 << 13 : 0;
480 encoding
|= flat
->glc
? 1 << 16 : 0;
481 encoding
|= flat
->slc
? 1 << 17 : 0;
482 if (ctx
.chip_class
>= GFX10
) {
484 encoding
|= flat
->dlc
? 1 << 12 : 0;
488 out
.push_back(encoding
);
489 encoding
= (0xFF & instr
->operands
[0].physReg());
490 if (!instr
->definitions
.empty())
491 encoding
|= (0xFF & instr
->definitions
[0].physReg()) << 24;
492 if (instr
->operands
.size() >= 3)
493 encoding
|= (0xFF & instr
->operands
[2].physReg()) << 8;
494 if (!instr
->operands
[1].isUndefined()) {
495 assert(ctx
.chip_class
>= GFX10
|| instr
->operands
[1].physReg() != 0x7F);
496 assert(instr
->format
!= Format::FLAT
);
497 encoding
|= instr
->operands
[1].physReg() << 16;
498 } else if (instr
->format
!= Format::FLAT
|| ctx
.chip_class
>= GFX10
) { /* SADDR is actually used with FLAT on GFX10 */
499 if (ctx
.chip_class
<= GFX9
)
500 encoding
|= 0x7F << 16;
502 encoding
|= sgpr_null
<< 16;
504 encoding
|= flat
->nv
? 1 << 23 : 0;
505 out
.push_back(encoding
);
509 Export_instruction
* exp
= static_cast<Export_instruction
*>(instr
);
511 if (ctx
.chip_class
== GFX8
|| ctx
.chip_class
== GFX9
) {
512 encoding
= (0b110001 << 26);
514 encoding
= (0b111110 << 26);
517 encoding
|= exp
->valid_mask
? 0b1 << 12 : 0;
518 encoding
|= exp
->done
? 0b1 << 11 : 0;
519 encoding
|= exp
->compressed
? 0b1 << 10 : 0;
520 encoding
|= exp
->dest
<< 4;
521 encoding
|= exp
->enabled_mask
;
522 out
.push_back(encoding
);
523 encoding
= 0xFF & exp
->operands
[0].physReg();
524 encoding
|= (0xFF & exp
->operands
[1].physReg()) << 8;
525 encoding
|= (0xFF & exp
->operands
[2].physReg()) << 16;
526 encoding
|= (0xFF & exp
->operands
[3].physReg()) << 24;
527 out
.push_back(encoding
);
531 case Format::PSEUDO_BARRIER
:
532 unreachable("Pseudo instructions should be lowered before assembly.");
534 if ((uint16_t) instr
->format
& (uint16_t) Format::VOP3A
) {
535 VOP3A_instruction
* vop3
= static_cast<VOP3A_instruction
*>(instr
);
537 if ((uint16_t) instr
->format
& (uint16_t) Format::VOP2
) {
538 opcode
= opcode
+ 0x100;
539 } else if ((uint16_t) instr
->format
& (uint16_t) Format::VOP1
) {
540 if (ctx
.chip_class
== GFX8
|| ctx
.chip_class
== GFX9
)
541 opcode
= opcode
+ 0x140;
543 opcode
= opcode
+ 0x180;
544 } else if ((uint16_t) instr
->format
& (uint16_t) Format::VOPC
) {
545 opcode
= opcode
+ 0x0;
546 } else if ((uint16_t) instr
->format
& (uint16_t) Format::VINTRP
) {
547 opcode
= opcode
+ 0x270;
551 if (ctx
.chip_class
<= GFX9
) {
552 encoding
= (0b110100 << 26);
553 } else if (ctx
.chip_class
== GFX10
) {
554 encoding
= (0b110101 << 26);
556 unreachable("Unknown chip_class.");
559 if (ctx
.chip_class
<= GFX7
) {
560 encoding
|= opcode
<< 17;
561 encoding
|= (vop3
->clamp
? 1 : 0) << 11;
563 encoding
|= opcode
<< 16;
564 encoding
|= (vop3
->clamp
? 1 : 0) << 15;
566 encoding
|= vop3
->opsel
<< 11;
567 for (unsigned i
= 0; i
< 3; i
++)
568 encoding
|= vop3
->abs
[i
] << (8+i
);
569 if (instr
->definitions
.size() == 2)
570 encoding
|= instr
->definitions
[1].physReg() << 8;
571 encoding
|= (0xFF & instr
->definitions
[0].physReg());
572 out
.push_back(encoding
);
574 if (instr
->opcode
== aco_opcode::v_interp_mov_f32
) {
575 encoding
= 0x3 & instr
->operands
[0].constantValue();
577 for (unsigned i
= 0; i
< instr
->operands
.size(); i
++)
578 encoding
|= instr
->operands
[i
].physReg() << (i
* 9);
580 encoding
|= vop3
->omod
<< 27;
581 for (unsigned i
= 0; i
< 3; i
++)
582 encoding
|= vop3
->neg
[i
] << (29+i
);
583 out
.push_back(encoding
);
585 } else if (instr
->format
== Format::VOP3P
) {
586 VOP3P_instruction
* vop3
= static_cast<VOP3P_instruction
*>(instr
);
589 if (ctx
.chip_class
== GFX9
) {
590 encoding
= (0b110100111 << 23);
591 } else if (ctx
.chip_class
== GFX10
) {
592 encoding
= (0b110011 << 26);
594 unreachable("Unknown chip_class.");
597 encoding
|= opcode
<< 16;
598 encoding
|= (vop3
->clamp
? 1 : 0) << 15;
599 encoding
|= vop3
->opsel_lo
<< 11;
600 encoding
|= (vop3
->opsel_hi
& 0x4) ? 1 : 0 << 14;
601 for (unsigned i
= 0; i
< 3; i
++)
602 encoding
|= vop3
->neg_hi
[i
] << (8+i
);
603 encoding
|= (0xFF & instr
->definitions
[0].physReg());
604 out
.push_back(encoding
);
606 for (unsigned i
= 0; i
< instr
->operands
.size(); i
++)
607 encoding
|= instr
->operands
[i
].physReg() << (i
* 9);
608 encoding
|= vop3
->opsel_hi
& 0x3 << 27;
609 for (unsigned i
= 0; i
< 3; i
++)
610 encoding
|= vop3
->neg_lo
[i
] << (29+i
);
611 out
.push_back(encoding
);
613 } else if (instr
->isDPP()){
614 assert(ctx
.chip_class
>= GFX8
);
615 /* first emit the instruction without the DPP operand */
616 Operand dpp_op
= instr
->operands
[0];
617 instr
->operands
[0] = Operand(PhysReg
{250}, v1
);
618 instr
->format
= (Format
) ((uint16_t) instr
->format
& ~(uint16_t)Format::DPP
);
619 emit_instruction(ctx
, out
, instr
);
620 DPP_instruction
* dpp
= static_cast<DPP_instruction
*>(instr
);
621 uint32_t encoding
= (0xF & dpp
->row_mask
) << 28;
622 encoding
|= (0xF & dpp
->bank_mask
) << 24;
623 encoding
|= dpp
->abs
[1] << 23;
624 encoding
|= dpp
->neg
[1] << 22;
625 encoding
|= dpp
->abs
[0] << 21;
626 encoding
|= dpp
->neg
[0] << 20;
627 encoding
|= dpp
->bound_ctrl
<< 19;
628 encoding
|= dpp
->dpp_ctrl
<< 8;
629 encoding
|= (0xFF) & dpp_op
.physReg();
630 out
.push_back(encoding
);
632 } else if (instr
->isSDWA()) {
633 /* first emit the instruction without the SDWA operand */
634 Operand sdwa_op
= instr
->operands
[0];
635 instr
->operands
[0] = Operand(PhysReg
{249}, v1
);
636 instr
->format
= (Format
) ((uint16_t) instr
->format
& ~(uint16_t)Format::SDWA
);
637 emit_instruction(ctx
, out
, instr
);
639 SDWA_instruction
* sdwa
= static_cast<SDWA_instruction
*>(instr
);
640 uint32_t encoding
= 0;
642 if ((uint16_t)instr
->format
& (uint16_t)Format::VOPC
) {
643 if (instr
->definitions
[0].physReg() != vcc
) {
644 encoding
|= instr
->definitions
[0].physReg() << 8;
647 encoding
|= (sdwa
->clamp
? 1 : 0) << 13;
649 encoding
|= get_sdwa_sel(sdwa
->dst_sel
, instr
->definitions
[0].physReg()) << 8;
650 uint32_t dst_u
= sdwa
->dst_sel
& sdwa_sext
? 1 : 0;
651 if (sdwa
->dst_preserve
|| (sdwa
->dst_sel
& sdwa_isra
))
653 encoding
|= dst_u
<< 11;
654 encoding
|= (sdwa
->clamp
? 1 : 0) << 13;
655 encoding
|= sdwa
->omod
<< 14;
658 encoding
|= get_sdwa_sel(sdwa
->sel
[0], sdwa_op
.physReg()) << 16;
659 encoding
|= sdwa
->sel
[0] & sdwa_sext
? 1 << 19 : 0;
660 encoding
|= sdwa
->abs
[0] << 21;
661 encoding
|= sdwa
->neg
[0] << 20;
663 if (instr
->operands
.size() >= 2) {
664 encoding
|= get_sdwa_sel(sdwa
->sel
[1], instr
->operands
[1].physReg()) << 24;
665 encoding
|= sdwa
->sel
[1] & sdwa_sext
? 1 << 27 : 0;
666 encoding
|= sdwa
->abs
[1] << 29;
667 encoding
|= sdwa
->neg
[1] << 28;
670 encoding
|= 0xFF & sdwa_op
.physReg();
671 encoding
|= (sdwa_op
.physReg() < 256) << 23;
672 if (instr
->operands
.size() >= 2)
673 encoding
|= (instr
->operands
[1].physReg() < 256) << 31;
674 out
.push_back(encoding
);
676 unreachable("unimplemented instruction format");
681 /* append literal dword */
682 for (const Operand
& op
: instr
->operands
) {
683 if (op
.isLiteral()) {
684 out
.push_back(op
.constantValue());
690 void emit_block(asm_context
& ctx
, std::vector
<uint32_t>& out
, Block
& block
)
692 for (aco_ptr
<Instruction
>& instr
: block
.instructions
) {
694 int start_idx
= out
.size();
695 std::cerr
<< "Encoding:\t" << std::endl
;
696 aco_print_instr(&*instr
, stderr
);
697 std::cerr
<< std::endl
;
699 emit_instruction(ctx
, out
, instr
.get());
701 for (int i
= start_idx
; i
< out
.size(); i
++)
702 std::cerr
<< "encoding: " << "0x" << std::setfill('0') << std::setw(8) << std::hex
<< out
[i
] << std::endl
;
707 void fix_exports(asm_context
& ctx
, std::vector
<uint32_t>& out
, Program
* program
)
709 bool exported
= false;
710 for (Block
& block
: program
->blocks
) {
711 if (!(block
.kind
& block_kind_export_end
))
713 std::vector
<aco_ptr
<Instruction
>>::reverse_iterator it
= block
.instructions
.rbegin();
714 while ( it
!= block
.instructions
.rend())
716 if ((*it
)->format
== Format::EXP
) {
717 Export_instruction
* exp
= static_cast<Export_instruction
*>((*it
).get());
718 if (program
->stage
& (hw_vs
| hw_ngg_gs
)) {
719 if (exp
->dest
>= V_008DFC_SQ_EXP_POS
&& exp
->dest
<= (V_008DFC_SQ_EXP_POS
+ 3)) {
726 exp
->valid_mask
= true;
730 } else if ((*it
)->definitions
.size() && (*it
)->definitions
[0].physReg() == exec
)
737 /* Abort in order to avoid a GPU hang. */
738 fprintf(stderr
, "Missing export in %s shader:\n", (program
->stage
& hw_vs
) ? "vertex" : "fragment");
739 aco_print_program(program
, stderr
);
744 static void fix_branches_gfx10(asm_context
& ctx
, std::vector
<uint32_t>& out
)
746 /* Branches with an offset of 0x3f are buggy on GFX10, we workaround by inserting NOPs if needed. */
747 bool gfx10_3f_bug
= false;
750 auto buggy_branch_it
= std::find_if(ctx
.branches
.begin(), ctx
.branches
.end(), [&ctx
](const auto &branch
) -> bool {
751 return ((int)ctx
.program
->blocks
[branch
.second
->block
].offset
- branch
.first
- 1) == 0x3f;
754 gfx10_3f_bug
= buggy_branch_it
!= ctx
.branches
.end();
757 /* Insert an s_nop after the branch */
758 constexpr uint32_t s_nop_0
= 0xbf800000u
;
759 int s_nop_pos
= buggy_branch_it
->first
+ 1;
760 auto out_pos
= std::next(out
.begin(), s_nop_pos
);
761 out
.insert(out_pos
, s_nop_0
);
763 /* Update the offset of each affected block */
764 for (Block
& block
: ctx
.program
->blocks
) {
765 if (block
.offset
> (unsigned)buggy_branch_it
->first
)
769 /* Update the branches following the current one */
770 for (auto branch_it
= std::next(buggy_branch_it
); branch_it
!= ctx
.branches
.end(); ++branch_it
)
773 /* Find first constant address after the inserted instruction */
774 auto caddr_it
= std::find_if(ctx
.constaddrs
.begin(), ctx
.constaddrs
.end(), [s_nop_pos
](const int &caddr_pos
) -> bool {
775 return caddr_pos
>= s_nop_pos
;
778 /* Update the locations of constant addresses */
779 for (; caddr_it
!= ctx
.constaddrs
.end(); ++caddr_it
)
783 } while (gfx10_3f_bug
);
786 void fix_branches(asm_context
& ctx
, std::vector
<uint32_t>& out
)
788 if (ctx
.chip_class
>= GFX10
)
789 fix_branches_gfx10(ctx
, out
);
791 for (std::pair
<int, SOPP_instruction
*> &branch
: ctx
.branches
) {
792 int offset
= (int)ctx
.program
->blocks
[branch
.second
->block
].offset
- branch
.first
- 1;
793 out
[branch
.first
] |= (uint16_t) offset
;
797 void fix_constaddrs(asm_context
& ctx
, std::vector
<uint32_t>& out
)
799 for (unsigned addr
: ctx
.constaddrs
)
800 out
[addr
] += out
.size() * 4u;
803 unsigned emit_program(Program
* program
,
804 std::vector
<uint32_t>& code
)
806 asm_context
ctx(program
);
808 if (program
->stage
& (hw_vs
| hw_fs
| hw_ngg_gs
))
809 fix_exports(ctx
, code
, program
);
811 for (Block
& block
: program
->blocks
) {
812 block
.offset
= code
.size();
813 emit_block(ctx
, code
, block
);
816 fix_branches(ctx
, code
);
818 unsigned exec_size
= code
.size() * sizeof(uint32_t);
820 if (program
->chip_class
>= GFX10
) {
821 /* Pad output with s_code_end so instruction prefetching doesn't cause
823 unsigned final_size
= align(code
.size() + 3 * 16, 16);
824 while (code
.size() < final_size
)
825 code
.push_back(0xbf9f0000u
);
828 fix_constaddrs(ctx
, code
);
830 while (program
->constant_data
.size() % 4u)
831 program
->constant_data
.push_back(0);
832 /* Copy constant data */
833 code
.insert(code
.end(), (uint32_t*)program
->constant_data
.data(),
834 (uint32_t*)(program
->constant_data
.data() + program
->constant_data
.size()));