aco: add ACO_DEBUG=force-waitcnt to emit wait-states
[mesa.git] / src / amd / compiler / aco_assembler.cpp
1 #include <vector>
2 #include <algorithm>
3
4 #include "aco_ir.h"
5 #include "common/sid.h"
6 #include "ac_shader_util.h"
7 #include "util/u_math.h"
8
9 namespace aco {
10
11 struct asm_context {
12 Program *program;
13 enum chip_class chip_class;
14 std::vector<std::pair<int, SOPP_instruction*>> branches;
15 std::vector<unsigned> constaddrs;
16 const int16_t* opcode;
17 // TODO: keep track of branch instructions referring blocks
18 // and, when emitting the block, correct the offset in instr
19 asm_context(Program* program) : program(program), chip_class(program->chip_class) {
20 if (chip_class <= GFX7)
21 opcode = &instr_info.opcode_gfx7[0];
22 else if (chip_class <= GFX9)
23 opcode = &instr_info.opcode_gfx9[0];
24 else if (chip_class >= GFX10)
25 opcode = &instr_info.opcode_gfx10[0];
26 }
27
28 int subvector_begin_pos = -1;
29 };
30
31 static uint32_t get_sdwa_sel(unsigned sel, PhysReg reg)
32 {
33 if (sel & sdwa_isra) {
34 unsigned size = sdwa_rasize & sel;
35 if (size == 1)
36 return reg.byte();
37 else /* size == 2 */
38 return sdwa_isword | (reg.byte() >> 1);
39 }
40 return sel & sdwa_asuint;
41 }
42
43 void emit_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
44 {
45 /* lower remaining pseudo-instructions */
46 if (instr->opcode == aco_opcode::p_constaddr) {
47 unsigned dest = instr->definitions[0].physReg();
48 unsigned offset = instr->operands[0].constantValue();
49
50 /* s_getpc_b64 dest[0:1] */
51 uint32_t encoding = (0b101111101 << 23);
52 uint32_t opcode = ctx.opcode[(int)aco_opcode::s_getpc_b64];
53 if (opcode >= 55 && ctx.chip_class <= GFX9) {
54 assert(ctx.chip_class == GFX9 && opcode < 60);
55 opcode = opcode - 4;
56 }
57 encoding |= dest << 16;
58 encoding |= opcode << 8;
59 out.push_back(encoding);
60
61 /* s_add_u32 dest[0], dest[0], ... */
62 encoding = (0b10 << 30);
63 encoding |= ctx.opcode[(int)aco_opcode::s_add_u32] << 23;
64 encoding |= dest << 16;
65 encoding |= dest;
66 encoding |= 255 << 8;
67 out.push_back(encoding);
68 ctx.constaddrs.push_back(out.size());
69 out.push_back(offset);
70
71 /* s_addc_u32 dest[1], dest[1], 0 */
72 encoding = (0b10 << 30);
73 encoding |= ctx.opcode[(int)aco_opcode::s_addc_u32] << 23;
74 encoding |= (dest + 1) << 16;
75 encoding |= dest + 1;
76 encoding |= 128 << 8;
77 out.push_back(encoding);
78 return;
79 }
80
81 uint32_t opcode = ctx.opcode[(int)instr->opcode];
82 if (opcode == (uint32_t)-1) {
83 char *out;
84 size_t outsize;
85 FILE *memf = open_memstream(&out, &outsize);
86
87 fprintf(memf, "Unsupported opcode: ");
88 aco_print_instr(instr, memf);
89 fclose(memf);
90
91 aco_err(ctx.program, out);
92 free(out);
93
94 abort();
95 }
96
97 switch (instr->format) {
98 case Format::SOP2: {
99 uint32_t encoding = (0b10 << 30);
100 encoding |= opcode << 23;
101 encoding |= !instr->definitions.empty() ? instr->definitions[0].physReg() << 16 : 0;
102 encoding |= instr->operands.size() >= 2 ? instr->operands[1].physReg() << 8 : 0;
103 encoding |= !instr->operands.empty() ? instr->operands[0].physReg() : 0;
104 out.push_back(encoding);
105 break;
106 }
107 case Format::SOPK: {
108 SOPK_instruction *sopk = static_cast<SOPK_instruction*>(instr);
109
110 if (instr->opcode == aco_opcode::s_subvector_loop_begin) {
111 assert(ctx.chip_class >= GFX10);
112 assert(ctx.subvector_begin_pos == -1);
113 ctx.subvector_begin_pos = out.size();
114 } else if (instr->opcode == aco_opcode::s_subvector_loop_end) {
115 assert(ctx.chip_class >= GFX10);
116 assert(ctx.subvector_begin_pos != -1);
117 /* Adjust s_subvector_loop_begin instruction to the address after the end */
118 out[ctx.subvector_begin_pos] |= (out.size() - ctx.subvector_begin_pos);
119 /* Adjust s_subvector_loop_end instruction to the address after the beginning */
120 sopk->imm = (uint16_t)(ctx.subvector_begin_pos - (int)out.size());
121 ctx.subvector_begin_pos = -1;
122 }
123
124 uint32_t encoding = (0b1011 << 28);
125 encoding |= opcode << 23;
126 encoding |=
127 !instr->definitions.empty() && !(instr->definitions[0].physReg() == scc) ?
128 instr->definitions[0].physReg() << 16 :
129 !instr->operands.empty() && instr->operands[0].physReg() <= 127 ?
130 instr->operands[0].physReg() << 16 : 0;
131 encoding |= sopk->imm;
132 out.push_back(encoding);
133 break;
134 }
135 case Format::SOP1: {
136 uint32_t encoding = (0b101111101 << 23);
137 if (opcode >= 55 && ctx.chip_class <= GFX9) {
138 assert(ctx.chip_class == GFX9 && opcode < 60);
139 opcode = opcode - 4;
140 }
141 encoding |= !instr->definitions.empty() ? instr->definitions[0].physReg() << 16 : 0;
142 encoding |= opcode << 8;
143 encoding |= !instr->operands.empty() ? instr->operands[0].physReg() : 0;
144 out.push_back(encoding);
145 break;
146 }
147 case Format::SOPC: {
148 uint32_t encoding = (0b101111110 << 23);
149 encoding |= opcode << 16;
150 encoding |= instr->operands.size() == 2 ? instr->operands[1].physReg() << 8 : 0;
151 encoding |= !instr->operands.empty() ? instr->operands[0].physReg() : 0;
152 out.push_back(encoding);
153 break;
154 }
155 case Format::SOPP: {
156 SOPP_instruction* sopp = static_cast<SOPP_instruction*>(instr);
157 uint32_t encoding = (0b101111111 << 23);
158 encoding |= opcode << 16;
159 encoding |= (uint16_t) sopp->imm;
160 if (sopp->block != -1)
161 ctx.branches.emplace_back(out.size(), sopp);
162 out.push_back(encoding);
163 break;
164 }
165 case Format::SMEM: {
166 SMEM_instruction* smem = static_cast<SMEM_instruction*>(instr);
167 bool soe = instr->operands.size() >= (!instr->definitions.empty() ? 3 : 4);
168 bool is_load = !instr->definitions.empty();
169 uint32_t encoding = 0;
170
171 if (ctx.chip_class <= GFX7) {
172 encoding = (0b11000 << 27);
173 encoding |= opcode << 22;
174 encoding |= instr->definitions.size() ? instr->definitions[0].physReg() << 15 : 0;
175 encoding |= instr->operands.size() ? (instr->operands[0].physReg() >> 1) << 9 : 0;
176 if (instr->operands.size() >= 2) {
177 if (!instr->operands[1].isConstant() || instr->operands[1].constantValue() >= 1024) {
178 encoding |= instr->operands[1].physReg().reg();
179 } else {
180 encoding |= instr->operands[1].constantValue() >> 2;
181 encoding |= 1 << 8;
182 }
183 }
184 out.push_back(encoding);
185 /* SMRD instructions can take a literal on GFX6 & GFX7 */
186 if (instr->operands.size() >= 2 && instr->operands[1].isConstant() && instr->operands[1].constantValue() >= 1024)
187 out.push_back(instr->operands[1].constantValue() >> 2);
188 return;
189 }
190
191 if (ctx.chip_class <= GFX9) {
192 encoding = (0b110000 << 26);
193 assert(!smem->dlc); /* Device-level coherent is not supported on GFX9 and lower */
194 encoding |= smem->nv ? 1 << 15 : 0;
195 } else {
196 encoding = (0b111101 << 26);
197 assert(!smem->nv); /* Non-volatile is not supported on GFX10 */
198 encoding |= smem->dlc ? 1 << 14 : 0;
199 }
200
201 encoding |= opcode << 18;
202 encoding |= smem->glc ? 1 << 16 : 0;
203
204 if (ctx.chip_class <= GFX9) {
205 if (instr->operands.size() >= 2)
206 encoding |= instr->operands[1].isConstant() ? 1 << 17 : 0; /* IMM - immediate enable */
207 }
208 if (ctx.chip_class == GFX9) {
209 encoding |= soe ? 1 << 14 : 0;
210 }
211
212 if (is_load || instr->operands.size() >= 3) { /* SDATA */
213 encoding |= (is_load ? instr->definitions[0].physReg() : instr->operands[2].physReg()) << 6;
214 }
215 if (instr->operands.size() >= 1) { /* SBASE */
216 encoding |= instr->operands[0].physReg() >> 1;
217 }
218
219 out.push_back(encoding);
220 encoding = 0;
221
222 int32_t offset = 0;
223 uint32_t soffset = ctx.chip_class >= GFX10
224 ? sgpr_null /* On GFX10 this is disabled by specifying SGPR_NULL */
225 : 0; /* On GFX9, it is disabled by the SOE bit (and it's not present on GFX8 and below) */
226 if (instr->operands.size() >= 2) {
227 const Operand &op_off1 = instr->operands[1];
228 if (ctx.chip_class <= GFX9) {
229 offset = op_off1.isConstant() ? op_off1.constantValue() : op_off1.physReg();
230 } else {
231 /* GFX10 only supports constants in OFFSET, so put the operand in SOFFSET if it's an SGPR */
232 if (op_off1.isConstant()) {
233 offset = op_off1.constantValue();
234 } else {
235 soffset = op_off1.physReg();
236 assert(!soe); /* There is no place to put the other SGPR offset, if any */
237 }
238 }
239
240 if (soe) {
241 const Operand &op_off2 = instr->operands.back();
242 assert(ctx.chip_class >= GFX9); /* GFX8 and below don't support specifying a constant and an SGPR at the same time */
243 assert(!op_off2.isConstant());
244 soffset = op_off2.physReg();
245 }
246 }
247 encoding |= offset;
248 encoding |= soffset << 25;
249
250 out.push_back(encoding);
251 return;
252 }
253 case Format::VOP2: {
254 uint32_t encoding = 0;
255 encoding |= opcode << 25;
256 encoding |= (0xFF & instr->definitions[0].physReg()) << 17;
257 encoding |= (0xFF & instr->operands[1].physReg()) << 9;
258 encoding |= instr->operands[0].physReg();
259 out.push_back(encoding);
260 break;
261 }
262 case Format::VOP1: {
263 uint32_t encoding = (0b0111111 << 25);
264 if (!instr->definitions.empty())
265 encoding |= (0xFF & instr->definitions[0].physReg()) << 17;
266 encoding |= opcode << 9;
267 if (!instr->operands.empty())
268 encoding |= instr->operands[0].physReg();
269 out.push_back(encoding);
270 break;
271 }
272 case Format::VOPC: {
273 uint32_t encoding = (0b0111110 << 25);
274 encoding |= opcode << 17;
275 encoding |= (0xFF & instr->operands[1].physReg()) << 9;
276 encoding |= instr->operands[0].physReg();
277 out.push_back(encoding);
278 break;
279 }
280 case Format::VINTRP: {
281 Interp_instruction* interp = static_cast<Interp_instruction*>(instr);
282 uint32_t encoding = 0;
283
284 if (instr->opcode == aco_opcode::v_interp_p1ll_f16 ||
285 instr->opcode == aco_opcode::v_interp_p1lv_f16 ||
286 instr->opcode == aco_opcode::v_interp_p2_legacy_f16 ||
287 instr->opcode == aco_opcode::v_interp_p2_f16) {
288 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
289 encoding = (0b110100 << 26);
290 } else if (ctx.chip_class >= GFX10) {
291 encoding = (0b110101 << 26);
292 } else {
293 unreachable("Unknown chip_class.");
294 }
295
296 encoding |= opcode << 16;
297 encoding |= (0xFF & instr->definitions[0].physReg());
298 out.push_back(encoding);
299
300 encoding = 0;
301 encoding |= interp->attribute;
302 encoding |= interp->component << 6;
303 encoding |= instr->operands[0].physReg() << 9;
304 if (instr->opcode == aco_opcode::v_interp_p2_f16 ||
305 instr->opcode == aco_opcode::v_interp_p2_legacy_f16 ||
306 instr->opcode == aco_opcode::v_interp_p1lv_f16) {
307 encoding |= instr->operands[2].physReg() << 18;
308 }
309 out.push_back(encoding);
310 } else {
311 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
312 encoding = (0b110101 << 26); /* Vega ISA doc says 110010 but it's wrong */
313 } else {
314 encoding = (0b110010 << 26);
315 }
316
317 assert(encoding);
318 encoding |= (0xFF & instr->definitions[0].physReg()) << 18;
319 encoding |= opcode << 16;
320 encoding |= interp->attribute << 10;
321 encoding |= interp->component << 8;
322 if (instr->opcode == aco_opcode::v_interp_mov_f32)
323 encoding |= (0x3 & instr->operands[0].constantValue());
324 else
325 encoding |= (0xFF & instr->operands[0].physReg());
326 out.push_back(encoding);
327 }
328 break;
329 }
330 case Format::DS: {
331 DS_instruction* ds = static_cast<DS_instruction*>(instr);
332 uint32_t encoding = (0b110110 << 26);
333 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
334 encoding |= opcode << 17;
335 encoding |= (ds->gds ? 1 : 0) << 16;
336 } else {
337 encoding |= opcode << 18;
338 encoding |= (ds->gds ? 1 : 0) << 17;
339 }
340 encoding |= ((0xFF & ds->offset1) << 8);
341 encoding |= (0xFFFF & ds->offset0);
342 out.push_back(encoding);
343 encoding = 0;
344 unsigned reg = !instr->definitions.empty() ? instr->definitions[0].physReg() : 0;
345 encoding |= (0xFF & reg) << 24;
346 reg = instr->operands.size() >= 3 && !(instr->operands[2].physReg() == m0) ? instr->operands[2].physReg() : 0;
347 encoding |= (0xFF & reg) << 16;
348 reg = instr->operands.size() >= 2 && !(instr->operands[1].physReg() == m0) ? instr->operands[1].physReg() : 0;
349 encoding |= (0xFF & reg) << 8;
350 encoding |= (0xFF & instr->operands[0].physReg());
351 out.push_back(encoding);
352 break;
353 }
354 case Format::MUBUF: {
355 MUBUF_instruction* mubuf = static_cast<MUBUF_instruction*>(instr);
356 uint32_t encoding = (0b111000 << 26);
357 encoding |= opcode << 18;
358 encoding |= (mubuf->lds ? 1 : 0) << 16;
359 encoding |= (mubuf->glc ? 1 : 0) << 14;
360 encoding |= (mubuf->idxen ? 1 : 0) << 13;
361 assert(!mubuf->addr64 || ctx.chip_class <= GFX7);
362 if (ctx.chip_class == GFX6 || ctx.chip_class == GFX7)
363 encoding |= (mubuf->addr64 ? 1 : 0) << 15;
364 encoding |= (mubuf->offen ? 1 : 0) << 12;
365 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
366 assert(!mubuf->dlc); /* Device-level coherent is not supported on GFX9 and lower */
367 encoding |= (mubuf->slc ? 1 : 0) << 17;
368 } else if (ctx.chip_class >= GFX10) {
369 encoding |= (mubuf->dlc ? 1 : 0) << 15;
370 }
371 encoding |= 0x0FFF & mubuf->offset;
372 out.push_back(encoding);
373 encoding = 0;
374 if (ctx.chip_class <= GFX7 || ctx.chip_class >= GFX10) {
375 encoding |= (mubuf->slc ? 1 : 0) << 22;
376 }
377 encoding |= instr->operands[2].physReg() << 24;
378 encoding |= (mubuf->tfe ? 1 : 0) << 23;
379 encoding |= (instr->operands[0].physReg() >> 2) << 16;
380 unsigned reg = instr->operands.size() > 3 ? instr->operands[3].physReg() : instr->definitions[0].physReg();
381 encoding |= (0xFF & reg) << 8;
382 encoding |= (0xFF & instr->operands[1].physReg());
383 out.push_back(encoding);
384 break;
385 }
386 case Format::MTBUF: {
387 MTBUF_instruction* mtbuf = static_cast<MTBUF_instruction*>(instr);
388
389 uint32_t img_format = ac_get_tbuffer_format(ctx.chip_class, mtbuf->dfmt, mtbuf->nfmt);
390 uint32_t encoding = (0b111010 << 26);
391 assert(img_format <= 0x7F);
392 assert(!mtbuf->dlc || ctx.chip_class >= GFX10);
393 encoding |= (mtbuf->dlc ? 1 : 0) << 15; /* DLC bit replaces one bit of the OPCODE on GFX10 */
394 encoding |= (mtbuf->glc ? 1 : 0) << 14;
395 encoding |= (mtbuf->idxen ? 1 : 0) << 13;
396 encoding |= (mtbuf->offen ? 1 : 0) << 12;
397 encoding |= 0x0FFF & mtbuf->offset;
398 encoding |= (img_format << 19); /* Handles both the GFX10 FORMAT and the old NFMT+DFMT */
399
400 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
401 encoding |= opcode << 15;
402 } else {
403 encoding |= (opcode & 0x07) << 16; /* 3 LSBs of 4-bit OPCODE */
404 }
405
406 out.push_back(encoding);
407 encoding = 0;
408
409 encoding |= instr->operands[2].physReg() << 24;
410 encoding |= (mtbuf->tfe ? 1 : 0) << 23;
411 encoding |= (mtbuf->slc ? 1 : 0) << 22;
412 encoding |= (instr->operands[0].physReg() >> 2) << 16;
413 unsigned reg = instr->operands.size() > 3 ? instr->operands[3].physReg() : instr->definitions[0].physReg();
414 encoding |= (0xFF & reg) << 8;
415 encoding |= (0xFF & instr->operands[1].physReg());
416
417 if (ctx.chip_class >= GFX10) {
418 encoding |= (((opcode & 0x08) >> 3) << 21); /* MSB of 4-bit OPCODE */
419 }
420
421 out.push_back(encoding);
422 break;
423 }
424 case Format::MIMG: {
425 MIMG_instruction* mimg = static_cast<MIMG_instruction*>(instr);
426 uint32_t encoding = (0b111100 << 26);
427 encoding |= mimg->slc ? 1 << 25 : 0;
428 encoding |= opcode << 18;
429 encoding |= mimg->lwe ? 1 << 17 : 0;
430 encoding |= mimg->tfe ? 1 << 16 : 0;
431 encoding |= mimg->glc ? 1 << 13 : 0;
432 encoding |= mimg->unrm ? 1 << 12 : 0;
433 if (ctx.chip_class <= GFX9) {
434 assert(!mimg->dlc); /* Device-level coherent is not supported on GFX9 and lower */
435 assert(!mimg->r128);
436 encoding |= mimg->a16 ? 1 << 15 : 0;
437 encoding |= mimg->da ? 1 << 14 : 0;
438 } else {
439 encoding |= mimg->r128 ? 1 << 15 : 0; /* GFX10: A16 moved to 2nd word, R128 replaces it in 1st word */
440 encoding |= mimg->dim << 3; /* GFX10: dimensionality instead of declare array */
441 encoding |= mimg->dlc ? 1 << 7 : 0;
442 }
443 encoding |= (0xF & mimg->dmask) << 8;
444 out.push_back(encoding);
445 encoding = (0xFF & instr->operands[2].physReg()); /* VADDR */
446 if (!instr->definitions.empty()) {
447 encoding |= (0xFF & instr->definitions[0].physReg()) << 8; /* VDATA */
448 } else if (instr->operands[1].regClass().type() == RegType::vgpr) {
449 encoding |= (0xFF & instr->operands[1].physReg()) << 8; /* VDATA */
450 }
451 encoding |= (0x1F & (instr->operands[0].physReg() >> 2)) << 16; /* T# (resource) */
452 if (instr->operands[1].regClass().type() == RegType::sgpr)
453 encoding |= (0x1F & (instr->operands[1].physReg() >> 2)) << 21; /* sampler */
454
455 assert(!mimg->d16 || ctx.chip_class >= GFX9);
456 encoding |= mimg->d16 ? 1 << 15 : 0;
457 if (ctx.chip_class >= GFX10) {
458 encoding |= mimg->a16 ? 1 << 14 : 0; /* GFX10: A16 still exists, but is in a different place */
459 }
460
461 out.push_back(encoding);
462 break;
463 }
464 case Format::FLAT:
465 case Format::SCRATCH:
466 case Format::GLOBAL: {
467 FLAT_instruction *flat = static_cast<FLAT_instruction*>(instr);
468 uint32_t encoding = (0b110111 << 26);
469 encoding |= opcode << 18;
470 if (ctx.chip_class <= GFX9) {
471 assert(flat->offset <= 0x1fff);
472 encoding |= flat->offset & 0x1fff;
473 } else if (instr->format == Format::FLAT) {
474 /* GFX10 has a 12-bit immediate OFFSET field,
475 * but it has a hw bug: it ignores the offset, called FlatSegmentOffsetBug
476 */
477 assert(flat->offset == 0);
478 } else {
479 assert(flat->offset <= 0xfff);
480 encoding |= flat->offset & 0xfff;
481 }
482 if (instr->format == Format::SCRATCH)
483 encoding |= 1 << 14;
484 else if (instr->format == Format::GLOBAL)
485 encoding |= 2 << 14;
486 encoding |= flat->lds ? 1 << 13 : 0;
487 encoding |= flat->glc ? 1 << 16 : 0;
488 encoding |= flat->slc ? 1 << 17 : 0;
489 if (ctx.chip_class >= GFX10) {
490 assert(!flat->nv);
491 encoding |= flat->dlc ? 1 << 12 : 0;
492 } else {
493 assert(!flat->dlc);
494 }
495 out.push_back(encoding);
496 encoding = (0xFF & instr->operands[0].physReg());
497 if (!instr->definitions.empty())
498 encoding |= (0xFF & instr->definitions[0].physReg()) << 24;
499 if (instr->operands.size() >= 3)
500 encoding |= (0xFF & instr->operands[2].physReg()) << 8;
501 if (!instr->operands[1].isUndefined()) {
502 assert(ctx.chip_class >= GFX10 || instr->operands[1].physReg() != 0x7F);
503 assert(instr->format != Format::FLAT);
504 encoding |= instr->operands[1].physReg() << 16;
505 } else if (instr->format != Format::FLAT || ctx.chip_class >= GFX10) { /* SADDR is actually used with FLAT on GFX10 */
506 if (ctx.chip_class <= GFX9)
507 encoding |= 0x7F << 16;
508 else
509 encoding |= sgpr_null << 16;
510 }
511 encoding |= flat->nv ? 1 << 23 : 0;
512 out.push_back(encoding);
513 break;
514 }
515 case Format::EXP: {
516 Export_instruction* exp = static_cast<Export_instruction*>(instr);
517 uint32_t encoding;
518 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
519 encoding = (0b110001 << 26);
520 } else {
521 encoding = (0b111110 << 26);
522 }
523
524 encoding |= exp->valid_mask ? 0b1 << 12 : 0;
525 encoding |= exp->done ? 0b1 << 11 : 0;
526 encoding |= exp->compressed ? 0b1 << 10 : 0;
527 encoding |= exp->dest << 4;
528 encoding |= exp->enabled_mask;
529 out.push_back(encoding);
530 encoding = 0xFF & exp->operands[0].physReg();
531 encoding |= (0xFF & exp->operands[1].physReg()) << 8;
532 encoding |= (0xFF & exp->operands[2].physReg()) << 16;
533 encoding |= (0xFF & exp->operands[3].physReg()) << 24;
534 out.push_back(encoding);
535 break;
536 }
537 case Format::PSEUDO:
538 case Format::PSEUDO_BARRIER:
539 if (instr->opcode != aco_opcode::p_unit_test)
540 unreachable("Pseudo instructions should be lowered before assembly.");
541 break;
542 default:
543 if ((uint16_t) instr->format & (uint16_t) Format::VOP3A) {
544 VOP3A_instruction* vop3 = static_cast<VOP3A_instruction*>(instr);
545
546 if ((uint16_t) instr->format & (uint16_t) Format::VOP2) {
547 opcode = opcode + 0x100;
548 } else if ((uint16_t) instr->format & (uint16_t) Format::VOP1) {
549 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9)
550 opcode = opcode + 0x140;
551 else
552 opcode = opcode + 0x180;
553 } else if ((uint16_t) instr->format & (uint16_t) Format::VOPC) {
554 opcode = opcode + 0x0;
555 } else if ((uint16_t) instr->format & (uint16_t) Format::VINTRP) {
556 opcode = opcode + 0x270;
557 }
558
559 uint32_t encoding;
560 if (ctx.chip_class <= GFX9) {
561 encoding = (0b110100 << 26);
562 } else if (ctx.chip_class >= GFX10) {
563 encoding = (0b110101 << 26);
564 } else {
565 unreachable("Unknown chip_class.");
566 }
567
568 if (ctx.chip_class <= GFX7) {
569 encoding |= opcode << 17;
570 encoding |= (vop3->clamp ? 1 : 0) << 11;
571 } else {
572 encoding |= opcode << 16;
573 encoding |= (vop3->clamp ? 1 : 0) << 15;
574 }
575 encoding |= vop3->opsel << 11;
576 for (unsigned i = 0; i < 3; i++)
577 encoding |= vop3->abs[i] << (8+i);
578 if (instr->definitions.size() == 2)
579 encoding |= instr->definitions[1].physReg() << 8;
580 encoding |= (0xFF & instr->definitions[0].physReg());
581 out.push_back(encoding);
582 encoding = 0;
583 if (instr->opcode == aco_opcode::v_interp_mov_f32) {
584 encoding = 0x3 & instr->operands[0].constantValue();
585 } else {
586 for (unsigned i = 0; i < instr->operands.size(); i++)
587 encoding |= instr->operands[i].physReg() << (i * 9);
588 }
589 encoding |= vop3->omod << 27;
590 for (unsigned i = 0; i < 3; i++)
591 encoding |= vop3->neg[i] << (29+i);
592 out.push_back(encoding);
593
594 } else if (instr->format == Format::VOP3P) {
595 VOP3P_instruction* vop3 = static_cast<VOP3P_instruction*>(instr);
596
597 uint32_t encoding;
598 if (ctx.chip_class == GFX9) {
599 encoding = (0b110100111 << 23);
600 } else if (ctx.chip_class >= GFX10) {
601 encoding = (0b110011 << 26);
602 } else {
603 unreachable("Unknown chip_class.");
604 }
605
606 encoding |= opcode << 16;
607 encoding |= (vop3->clamp ? 1 : 0) << 15;
608 encoding |= vop3->opsel_lo << 11;
609 encoding |= (vop3->opsel_hi & 0x4) ? 1 : 0 << 14;
610 for (unsigned i = 0; i < 3; i++)
611 encoding |= vop3->neg_hi[i] << (8+i);
612 encoding |= (0xFF & instr->definitions[0].physReg());
613 out.push_back(encoding);
614 encoding = 0;
615 for (unsigned i = 0; i < instr->operands.size(); i++)
616 encoding |= instr->operands[i].physReg() << (i * 9);
617 encoding |= vop3->opsel_hi & 0x3 << 27;
618 for (unsigned i = 0; i < 3; i++)
619 encoding |= vop3->neg_lo[i] << (29+i);
620 out.push_back(encoding);
621
622 } else if (instr->isDPP()){
623 assert(ctx.chip_class >= GFX8);
624 /* first emit the instruction without the DPP operand */
625 Operand dpp_op = instr->operands[0];
626 instr->operands[0] = Operand(PhysReg{250}, v1);
627 instr->format = (Format) ((uint16_t) instr->format & ~(uint16_t)Format::DPP);
628 emit_instruction(ctx, out, instr);
629 DPP_instruction* dpp = static_cast<DPP_instruction*>(instr);
630 uint32_t encoding = (0xF & dpp->row_mask) << 28;
631 encoding |= (0xF & dpp->bank_mask) << 24;
632 encoding |= dpp->abs[1] << 23;
633 encoding |= dpp->neg[1] << 22;
634 encoding |= dpp->abs[0] << 21;
635 encoding |= dpp->neg[0] << 20;
636 if (ctx.chip_class >= GFX10)
637 encoding |= 1 << 18; /* set Fetch Inactive to match GFX9 behaviour */
638 encoding |= dpp->bound_ctrl << 19;
639 encoding |= dpp->dpp_ctrl << 8;
640 encoding |= (0xFF) & dpp_op.physReg();
641 out.push_back(encoding);
642 return;
643 } else if (instr->isSDWA()) {
644 /* first emit the instruction without the SDWA operand */
645 Operand sdwa_op = instr->operands[0];
646 instr->operands[0] = Operand(PhysReg{249}, v1);
647 instr->format = (Format) ((uint16_t) instr->format & ~(uint16_t)Format::SDWA);
648 emit_instruction(ctx, out, instr);
649
650 SDWA_instruction* sdwa = static_cast<SDWA_instruction*>(instr);
651 uint32_t encoding = 0;
652
653 if ((uint16_t)instr->format & (uint16_t)Format::VOPC) {
654 if (instr->definitions[0].physReg() != vcc) {
655 encoding |= instr->definitions[0].physReg() << 8;
656 encoding |= 1 << 15;
657 }
658 encoding |= (sdwa->clamp ? 1 : 0) << 13;
659 } else {
660 encoding |= get_sdwa_sel(sdwa->dst_sel, instr->definitions[0].physReg()) << 8;
661 uint32_t dst_u = sdwa->dst_sel & sdwa_sext ? 1 : 0;
662 if (sdwa->dst_preserve || (sdwa->dst_sel & sdwa_isra))
663 dst_u = 2;
664 encoding |= dst_u << 11;
665 encoding |= (sdwa->clamp ? 1 : 0) << 13;
666 encoding |= sdwa->omod << 14;
667 }
668
669 encoding |= get_sdwa_sel(sdwa->sel[0], sdwa_op.physReg()) << 16;
670 encoding |= sdwa->sel[0] & sdwa_sext ? 1 << 19 : 0;
671 encoding |= sdwa->abs[0] << 21;
672 encoding |= sdwa->neg[0] << 20;
673
674 if (instr->operands.size() >= 2) {
675 encoding |= get_sdwa_sel(sdwa->sel[1], instr->operands[1].physReg()) << 24;
676 encoding |= sdwa->sel[1] & sdwa_sext ? 1 << 27 : 0;
677 encoding |= sdwa->abs[1] << 29;
678 encoding |= sdwa->neg[1] << 28;
679 }
680
681 encoding |= 0xFF & sdwa_op.physReg();
682 encoding |= (sdwa_op.physReg() < 256) << 23;
683 if (instr->operands.size() >= 2)
684 encoding |= (instr->operands[1].physReg() < 256) << 31;
685 out.push_back(encoding);
686 } else {
687 unreachable("unimplemented instruction format");
688 }
689 break;
690 }
691
692 /* append literal dword */
693 for (const Operand& op : instr->operands) {
694 if (op.isLiteral()) {
695 out.push_back(op.constantValue());
696 break;
697 }
698 }
699 }
700
701 void emit_block(asm_context& ctx, std::vector<uint32_t>& out, Block& block)
702 {
703 for (aco_ptr<Instruction>& instr : block.instructions) {
704 #if 0
705 int start_idx = out.size();
706 std::cerr << "Encoding:\t" << std::endl;
707 aco_print_instr(&*instr, stderr);
708 std::cerr << std::endl;
709 #endif
710 emit_instruction(ctx, out, instr.get());
711 #if 0
712 for (int i = start_idx; i < out.size(); i++)
713 std::cerr << "encoding: " << "0x" << std::setfill('0') << std::setw(8) << std::hex << out[i] << std::endl;
714 #endif
715 }
716 }
717
718 void fix_exports(asm_context& ctx, std::vector<uint32_t>& out, Program* program)
719 {
720 bool exported = false;
721 for (Block& block : program->blocks) {
722 if (!(block.kind & block_kind_export_end))
723 continue;
724 std::vector<aco_ptr<Instruction>>::reverse_iterator it = block.instructions.rbegin();
725 while ( it != block.instructions.rend())
726 {
727 if ((*it)->format == Format::EXP) {
728 Export_instruction* exp = static_cast<Export_instruction*>((*it).get());
729 if (program->stage & (hw_vs | hw_ngg_gs)) {
730 if (exp->dest >= V_008DFC_SQ_EXP_POS && exp->dest <= (V_008DFC_SQ_EXP_POS + 3)) {
731 exp->done = true;
732 exported = true;
733 break;
734 }
735 } else {
736 exp->done = true;
737 exp->valid_mask = true;
738 exported = true;
739 break;
740 }
741 } else if ((*it)->definitions.size() && (*it)->definitions[0].physReg() == exec)
742 break;
743 ++it;
744 }
745 }
746
747 if (!exported) {
748 /* Abort in order to avoid a GPU hang. */
749 aco_err(program, "Missing export in %s shader:", (program->stage & hw_vs) ? "vertex" : "fragment");
750 aco_print_program(program, stderr);
751 abort();
752 }
753 }
754
755 static void fix_branches_gfx10(asm_context& ctx, std::vector<uint32_t>& out)
756 {
757 /* Branches with an offset of 0x3f are buggy on GFX10, we workaround by inserting NOPs if needed. */
758 bool gfx10_3f_bug = false;
759
760 do {
761 auto buggy_branch_it = std::find_if(ctx.branches.begin(), ctx.branches.end(), [&ctx](const auto &branch) -> bool {
762 return ((int)ctx.program->blocks[branch.second->block].offset - branch.first - 1) == 0x3f;
763 });
764
765 gfx10_3f_bug = buggy_branch_it != ctx.branches.end();
766
767 if (gfx10_3f_bug) {
768 /* Insert an s_nop after the branch */
769 constexpr uint32_t s_nop_0 = 0xbf800000u;
770 int s_nop_pos = buggy_branch_it->first + 1;
771 auto out_pos = std::next(out.begin(), s_nop_pos);
772 out.insert(out_pos, s_nop_0);
773
774 /* Update the offset of each affected block */
775 for (Block& block : ctx.program->blocks) {
776 if (block.offset > (unsigned)buggy_branch_it->first)
777 block.offset++;
778 }
779
780 /* Update the branches following the current one */
781 for (auto branch_it = std::next(buggy_branch_it); branch_it != ctx.branches.end(); ++branch_it)
782 branch_it->first++;
783
784 /* Find first constant address after the inserted instruction */
785 auto caddr_it = std::find_if(ctx.constaddrs.begin(), ctx.constaddrs.end(), [s_nop_pos](const int &caddr_pos) -> bool {
786 return caddr_pos >= s_nop_pos;
787 });
788
789 /* Update the locations of constant addresses */
790 for (; caddr_it != ctx.constaddrs.end(); ++caddr_it)
791 (*caddr_it)++;
792
793 }
794 } while (gfx10_3f_bug);
795 }
796
797 void fix_branches(asm_context& ctx, std::vector<uint32_t>& out)
798 {
799 if (ctx.chip_class == GFX10)
800 fix_branches_gfx10(ctx, out);
801
802 for (std::pair<int, SOPP_instruction*> &branch : ctx.branches) {
803 int offset = (int)ctx.program->blocks[branch.second->block].offset - branch.first - 1;
804 out[branch.first] |= (uint16_t) offset;
805 }
806 }
807
808 void fix_constaddrs(asm_context& ctx, std::vector<uint32_t>& out)
809 {
810 for (unsigned addr : ctx.constaddrs)
811 out[addr] += (out.size() - addr + 1u) * 4u;
812 }
813
814 unsigned emit_program(Program* program,
815 std::vector<uint32_t>& code)
816 {
817 asm_context ctx(program);
818
819 if (program->stage & (hw_vs | hw_fs | hw_ngg_gs))
820 fix_exports(ctx, code, program);
821
822 for (Block& block : program->blocks) {
823 block.offset = code.size();
824 emit_block(ctx, code, block);
825 }
826
827 fix_branches(ctx, code);
828
829 unsigned exec_size = code.size() * sizeof(uint32_t);
830
831 if (program->chip_class >= GFX10) {
832 /* Pad output with s_code_end so instruction prefetching doesn't cause
833 * page faults */
834 unsigned final_size = align(code.size() + 3 * 16, 16);
835 while (code.size() < final_size)
836 code.push_back(0xbf9f0000u);
837 }
838
839 fix_constaddrs(ctx, code);
840
841 while (program->constant_data.size() % 4u)
842 program->constant_data.push_back(0);
843 /* Copy constant data */
844 code.insert(code.end(), (uint32_t*)program->constant_data.data(),
845 (uint32_t*)(program->constant_data.data() + program->constant_data.size()));
846
847 return exec_size;
848 }
849
850 }