aco: emit v_interp_*_f16 instructions as VOP3 instead of VINTRP
[mesa.git] / src / amd / compiler / aco_assembler.cpp
1 #include <vector>
2 #include <algorithm>
3
4 #include "aco_ir.h"
5 #include "common/sid.h"
6 #include "ac_shader_util.h"
7 #include "util/u_math.h"
8
9 namespace aco {
10
11 struct asm_context {
12 Program *program;
13 enum chip_class chip_class;
14 std::vector<std::pair<int, SOPP_instruction*>> branches;
15 std::vector<unsigned> constaddrs;
16 const int16_t* opcode;
17 // TODO: keep track of branch instructions referring blocks
18 // and, when emitting the block, correct the offset in instr
19 asm_context(Program* program) : program(program), chip_class(program->chip_class) {
20 if (chip_class <= GFX7)
21 opcode = &instr_info.opcode_gfx7[0];
22 else if (chip_class <= GFX9)
23 opcode = &instr_info.opcode_gfx9[0];
24 else if (chip_class == GFX10)
25 opcode = &instr_info.opcode_gfx10[0];
26 }
27
28 int subvector_begin_pos = -1;
29 };
30
31 static uint32_t get_sdwa_sel(unsigned sel, PhysReg reg)
32 {
33 if (sel & sdwa_isra) {
34 unsigned size = sdwa_rasize & sel;
35 if (size == 1)
36 return reg.byte();
37 else /* size == 2 */
38 return sdwa_isword | (reg.byte() >> 1);
39 }
40 return sel & sdwa_asuint;
41 }
42
43 void emit_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
44 {
45 uint32_t instr_offset = out.size() * 4u;
46
47 /* lower remaining pseudo-instructions */
48 if (instr->opcode == aco_opcode::p_constaddr) {
49 unsigned dest = instr->definitions[0].physReg();
50 unsigned offset = instr->operands[0].constantValue();
51
52 /* s_getpc_b64 dest[0:1] */
53 uint32_t encoding = (0b101111101 << 23);
54 uint32_t opcode = ctx.opcode[(int)aco_opcode::s_getpc_b64];
55 if (opcode >= 55 && ctx.chip_class <= GFX9) {
56 assert(ctx.chip_class == GFX9 && opcode < 60);
57 opcode = opcode - 4;
58 }
59 encoding |= dest << 16;
60 encoding |= opcode << 8;
61 out.push_back(encoding);
62
63 /* s_add_u32 dest[0], dest[0], ... */
64 encoding = (0b10 << 30);
65 encoding |= ctx.opcode[(int)aco_opcode::s_add_u32] << 23;
66 encoding |= dest << 16;
67 encoding |= dest;
68 encoding |= 255 << 8;
69 out.push_back(encoding);
70 ctx.constaddrs.push_back(out.size());
71 out.push_back(-(instr_offset + 4) + offset);
72
73 /* s_addc_u32 dest[1], dest[1], 0 */
74 encoding = (0b10 << 30);
75 encoding |= ctx.opcode[(int)aco_opcode::s_addc_u32] << 23;
76 encoding |= (dest + 1) << 16;
77 encoding |= dest + 1;
78 encoding |= 128 << 8;
79 out.push_back(encoding);
80 return;
81 }
82
83 uint32_t opcode = ctx.opcode[(int)instr->opcode];
84 if (opcode == (uint32_t)-1) {
85 fprintf(stderr, "Unsupported opcode: ");
86 aco_print_instr(instr, stderr);
87 abort();
88 }
89
90 switch (instr->format) {
91 case Format::SOP2: {
92 uint32_t encoding = (0b10 << 30);
93 encoding |= opcode << 23;
94 encoding |= !instr->definitions.empty() ? instr->definitions[0].physReg() << 16 : 0;
95 encoding |= instr->operands.size() >= 2 ? instr->operands[1].physReg() << 8 : 0;
96 encoding |= !instr->operands.empty() ? instr->operands[0].physReg() : 0;
97 out.push_back(encoding);
98 break;
99 }
100 case Format::SOPK: {
101 SOPK_instruction *sopk = static_cast<SOPK_instruction*>(instr);
102
103 if (instr->opcode == aco_opcode::s_subvector_loop_begin) {
104 assert(ctx.chip_class >= GFX10);
105 assert(ctx.subvector_begin_pos == -1);
106 ctx.subvector_begin_pos = out.size();
107 } else if (instr->opcode == aco_opcode::s_subvector_loop_end) {
108 assert(ctx.chip_class >= GFX10);
109 assert(ctx.subvector_begin_pos != -1);
110 /* Adjust s_subvector_loop_begin instruction to the address after the end */
111 out[ctx.subvector_begin_pos] |= (out.size() - ctx.subvector_begin_pos);
112 /* Adjust s_subvector_loop_end instruction to the address after the beginning */
113 sopk->imm = (uint16_t)(ctx.subvector_begin_pos - (int)out.size());
114 ctx.subvector_begin_pos = -1;
115 }
116
117 uint32_t encoding = (0b1011 << 28);
118 encoding |= opcode << 23;
119 encoding |=
120 !instr->definitions.empty() && !(instr->definitions[0].physReg() == scc) ?
121 instr->definitions[0].physReg() << 16 :
122 !instr->operands.empty() && instr->operands[0].physReg() <= 127 ?
123 instr->operands[0].physReg() << 16 : 0;
124 encoding |= sopk->imm;
125 out.push_back(encoding);
126 break;
127 }
128 case Format::SOP1: {
129 uint32_t encoding = (0b101111101 << 23);
130 if (opcode >= 55 && ctx.chip_class <= GFX9) {
131 assert(ctx.chip_class == GFX9 && opcode < 60);
132 opcode = opcode - 4;
133 }
134 encoding |= !instr->definitions.empty() ? instr->definitions[0].physReg() << 16 : 0;
135 encoding |= opcode << 8;
136 encoding |= !instr->operands.empty() ? instr->operands[0].physReg() : 0;
137 out.push_back(encoding);
138 break;
139 }
140 case Format::SOPC: {
141 uint32_t encoding = (0b101111110 << 23);
142 encoding |= opcode << 16;
143 encoding |= instr->operands.size() == 2 ? instr->operands[1].physReg() << 8 : 0;
144 encoding |= !instr->operands.empty() ? instr->operands[0].physReg() : 0;
145 out.push_back(encoding);
146 break;
147 }
148 case Format::SOPP: {
149 SOPP_instruction* sopp = static_cast<SOPP_instruction*>(instr);
150 uint32_t encoding = (0b101111111 << 23);
151 encoding |= opcode << 16;
152 encoding |= (uint16_t) sopp->imm;
153 if (sopp->block != -1)
154 ctx.branches.emplace_back(out.size(), sopp);
155 out.push_back(encoding);
156 break;
157 }
158 case Format::SMEM: {
159 SMEM_instruction* smem = static_cast<SMEM_instruction*>(instr);
160 bool soe = instr->operands.size() >= (!instr->definitions.empty() ? 3 : 4);
161 bool is_load = !instr->definitions.empty();
162 uint32_t encoding = 0;
163
164 if (ctx.chip_class <= GFX7) {
165 encoding = (0b11000 << 27);
166 encoding |= opcode << 22;
167 encoding |= instr->definitions.size() ? instr->definitions[0].physReg() << 15 : 0;
168 encoding |= instr->operands.size() ? (instr->operands[0].physReg() >> 1) << 9 : 0;
169 if (instr->operands.size() >= 2) {
170 if (!instr->operands[1].isConstant() || instr->operands[1].constantValue() >= 1024) {
171 encoding |= instr->operands[1].physReg().reg();
172 } else {
173 encoding |= instr->operands[1].constantValue() >> 2;
174 encoding |= 1 << 8;
175 }
176 }
177 out.push_back(encoding);
178 /* SMRD instructions can take a literal on GFX6 & GFX7 */
179 if (instr->operands.size() >= 2 && instr->operands[1].isConstant() && instr->operands[1].constantValue() >= 1024)
180 out.push_back(instr->operands[1].constantValue() >> 2);
181 return;
182 }
183
184 if (ctx.chip_class <= GFX9) {
185 encoding = (0b110000 << 26);
186 assert(!smem->dlc); /* Device-level coherent is not supported on GFX9 and lower */
187 encoding |= smem->nv ? 1 << 15 : 0;
188 } else {
189 encoding = (0b111101 << 26);
190 assert(!smem->nv); /* Non-volatile is not supported on GFX10 */
191 encoding |= smem->dlc ? 1 << 14 : 0;
192 }
193
194 encoding |= opcode << 18;
195 encoding |= smem->glc ? 1 << 16 : 0;
196
197 if (ctx.chip_class <= GFX9) {
198 if (instr->operands.size() >= 2)
199 encoding |= instr->operands[1].isConstant() ? 1 << 17 : 0; /* IMM - immediate enable */
200 }
201 if (ctx.chip_class == GFX9) {
202 encoding |= soe ? 1 << 14 : 0;
203 }
204
205 if (is_load || instr->operands.size() >= 3) { /* SDATA */
206 encoding |= (is_load ? instr->definitions[0].physReg() : instr->operands[2].physReg()) << 6;
207 }
208 if (instr->operands.size() >= 1) { /* SBASE */
209 encoding |= instr->operands[0].physReg() >> 1;
210 }
211
212 out.push_back(encoding);
213 encoding = 0;
214
215 int32_t offset = 0;
216 uint32_t soffset = ctx.chip_class >= GFX10
217 ? sgpr_null /* On GFX10 this is disabled by specifying SGPR_NULL */
218 : 0; /* On GFX9, it is disabled by the SOE bit (and it's not present on GFX8 and below) */
219 if (instr->operands.size() >= 2) {
220 const Operand &op_off1 = instr->operands[1];
221 if (ctx.chip_class <= GFX9) {
222 offset = op_off1.isConstant() ? op_off1.constantValue() : op_off1.physReg();
223 } else {
224 /* GFX10 only supports constants in OFFSET, so put the operand in SOFFSET if it's an SGPR */
225 if (op_off1.isConstant()) {
226 offset = op_off1.constantValue();
227 } else {
228 soffset = op_off1.physReg();
229 assert(!soe); /* There is no place to put the other SGPR offset, if any */
230 }
231 }
232
233 if (soe) {
234 const Operand &op_off2 = instr->operands.back();
235 assert(ctx.chip_class >= GFX9); /* GFX8 and below don't support specifying a constant and an SGPR at the same time */
236 assert(!op_off2.isConstant());
237 soffset = op_off2.physReg();
238 }
239 }
240 encoding |= offset;
241 encoding |= soffset << 25;
242
243 out.push_back(encoding);
244 return;
245 }
246 case Format::VOP2: {
247 uint32_t encoding = 0;
248 encoding |= opcode << 25;
249 encoding |= (0xFF & instr->definitions[0].physReg()) << 17;
250 encoding |= (0xFF & instr->operands[1].physReg()) << 9;
251 encoding |= instr->operands[0].physReg();
252 out.push_back(encoding);
253 break;
254 }
255 case Format::VOP1: {
256 uint32_t encoding = (0b0111111 << 25);
257 if (!instr->definitions.empty())
258 encoding |= (0xFF & instr->definitions[0].physReg()) << 17;
259 encoding |= opcode << 9;
260 if (!instr->operands.empty())
261 encoding |= instr->operands[0].physReg();
262 out.push_back(encoding);
263 break;
264 }
265 case Format::VOPC: {
266 uint32_t encoding = (0b0111110 << 25);
267 encoding |= opcode << 17;
268 encoding |= (0xFF & instr->operands[1].physReg()) << 9;
269 encoding |= instr->operands[0].physReg();
270 out.push_back(encoding);
271 break;
272 }
273 case Format::VINTRP: {
274 Interp_instruction* interp = static_cast<Interp_instruction*>(instr);
275 uint32_t encoding = 0;
276
277 if (instr->opcode == aco_opcode::v_interp_p1ll_f16 ||
278 instr->opcode == aco_opcode::v_interp_p1lv_f16 ||
279 instr->opcode == aco_opcode::v_interp_p2_legacy_f16 ||
280 instr->opcode == aco_opcode::v_interp_p2_f16) {
281 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
282 encoding = (0b110100 << 26);
283 } else if (ctx.chip_class == GFX10) {
284 encoding = (0b110101 << 26);
285 } else {
286 unreachable("Unknown chip_class.");
287 }
288
289 encoding |= opcode << 16;
290 encoding |= (0xFF & instr->definitions[0].physReg());
291 out.push_back(encoding);
292
293 encoding = 0;
294 encoding |= interp->attribute;
295 encoding |= interp->component << 6;
296 encoding |= instr->operands[0].physReg() << 9;
297 if (instr->opcode == aco_opcode::v_interp_p2_f16 ||
298 instr->opcode == aco_opcode::v_interp_p2_legacy_f16 ||
299 instr->opcode == aco_opcode::v_interp_p1lv_f16) {
300 encoding |= instr->operands[2].physReg() << 18;
301 }
302 out.push_back(encoding);
303 } else {
304 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
305 encoding = (0b110101 << 26); /* Vega ISA doc says 110010 but it's wrong */
306 } else {
307 encoding = (0b110010 << 26);
308 }
309
310 assert(encoding);
311 encoding |= (0xFF & instr->definitions[0].physReg()) << 18;
312 encoding |= opcode << 16;
313 encoding |= interp->attribute << 10;
314 encoding |= interp->component << 8;
315 if (instr->opcode == aco_opcode::v_interp_mov_f32)
316 encoding |= (0x3 & instr->operands[0].constantValue());
317 else
318 encoding |= (0xFF & instr->operands[0].physReg());
319 out.push_back(encoding);
320 }
321 break;
322 }
323 case Format::DS: {
324 DS_instruction* ds = static_cast<DS_instruction*>(instr);
325 uint32_t encoding = (0b110110 << 26);
326 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
327 encoding |= opcode << 17;
328 encoding |= (ds->gds ? 1 : 0) << 16;
329 } else {
330 encoding |= opcode << 18;
331 encoding |= (ds->gds ? 1 : 0) << 17;
332 }
333 encoding |= ((0xFF & ds->offset1) << 8);
334 encoding |= (0xFFFF & ds->offset0);
335 out.push_back(encoding);
336 encoding = 0;
337 unsigned reg = !instr->definitions.empty() ? instr->definitions[0].physReg() : 0;
338 encoding |= (0xFF & reg) << 24;
339 reg = instr->operands.size() >= 3 && !(instr->operands[2].physReg() == m0) ? instr->operands[2].physReg() : 0;
340 encoding |= (0xFF & reg) << 16;
341 reg = instr->operands.size() >= 2 && !(instr->operands[1].physReg() == m0) ? instr->operands[1].physReg() : 0;
342 encoding |= (0xFF & reg) << 8;
343 encoding |= (0xFF & instr->operands[0].physReg());
344 out.push_back(encoding);
345 break;
346 }
347 case Format::MUBUF: {
348 MUBUF_instruction* mubuf = static_cast<MUBUF_instruction*>(instr);
349 uint32_t encoding = (0b111000 << 26);
350 encoding |= opcode << 18;
351 encoding |= (mubuf->lds ? 1 : 0) << 16;
352 encoding |= (mubuf->glc ? 1 : 0) << 14;
353 encoding |= (mubuf->idxen ? 1 : 0) << 13;
354 assert(!mubuf->addr64 || ctx.chip_class <= GFX7);
355 if (ctx.chip_class == GFX6 || ctx.chip_class == GFX7)
356 encoding |= (mubuf->addr64 ? 1 : 0) << 15;
357 encoding |= (mubuf->offen ? 1 : 0) << 12;
358 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
359 assert(!mubuf->dlc); /* Device-level coherent is not supported on GFX9 and lower */
360 encoding |= (mubuf->slc ? 1 : 0) << 17;
361 } else if (ctx.chip_class >= GFX10) {
362 encoding |= (mubuf->dlc ? 1 : 0) << 15;
363 }
364 encoding |= 0x0FFF & mubuf->offset;
365 out.push_back(encoding);
366 encoding = 0;
367 if (ctx.chip_class <= GFX7 || ctx.chip_class >= GFX10) {
368 encoding |= (mubuf->slc ? 1 : 0) << 22;
369 }
370 encoding |= instr->operands[2].physReg() << 24;
371 encoding |= (mubuf->tfe ? 1 : 0) << 23;
372 encoding |= (instr->operands[0].physReg() >> 2) << 16;
373 unsigned reg = instr->operands.size() > 3 ? instr->operands[3].physReg() : instr->definitions[0].physReg();
374 encoding |= (0xFF & reg) << 8;
375 encoding |= (0xFF & instr->operands[1].physReg());
376 out.push_back(encoding);
377 break;
378 }
379 case Format::MTBUF: {
380 MTBUF_instruction* mtbuf = static_cast<MTBUF_instruction*>(instr);
381
382 uint32_t img_format = ac_get_tbuffer_format(ctx.chip_class, mtbuf->dfmt, mtbuf->nfmt);
383 uint32_t encoding = (0b111010 << 26);
384 assert(img_format <= 0x7F);
385 assert(!mtbuf->dlc || ctx.chip_class >= GFX10);
386 encoding |= (mtbuf->dlc ? 1 : 0) << 15; /* DLC bit replaces one bit of the OPCODE on GFX10 */
387 encoding |= (mtbuf->glc ? 1 : 0) << 14;
388 encoding |= (mtbuf->idxen ? 1 : 0) << 13;
389 encoding |= (mtbuf->offen ? 1 : 0) << 12;
390 encoding |= 0x0FFF & mtbuf->offset;
391 encoding |= (img_format << 19); /* Handles both the GFX10 FORMAT and the old NFMT+DFMT */
392
393 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
394 encoding |= opcode << 15;
395 } else {
396 encoding |= (opcode & 0x07) << 16; /* 3 LSBs of 4-bit OPCODE */
397 }
398
399 out.push_back(encoding);
400 encoding = 0;
401
402 encoding |= instr->operands[2].physReg() << 24;
403 encoding |= (mtbuf->tfe ? 1 : 0) << 23;
404 encoding |= (mtbuf->slc ? 1 : 0) << 22;
405 encoding |= (instr->operands[0].physReg() >> 2) << 16;
406 unsigned reg = instr->operands.size() > 3 ? instr->operands[3].physReg() : instr->definitions[0].physReg();
407 encoding |= (0xFF & reg) << 8;
408 encoding |= (0xFF & instr->operands[1].physReg());
409
410 if (ctx.chip_class >= GFX10) {
411 encoding |= (((opcode & 0x08) >> 4) << 21); /* MSB of 4-bit OPCODE */
412 }
413
414 out.push_back(encoding);
415 break;
416 }
417 case Format::MIMG: {
418 MIMG_instruction* mimg = static_cast<MIMG_instruction*>(instr);
419 uint32_t encoding = (0b111100 << 26);
420 encoding |= mimg->slc ? 1 << 25 : 0;
421 encoding |= opcode << 18;
422 encoding |= mimg->lwe ? 1 << 17 : 0;
423 encoding |= mimg->tfe ? 1 << 16 : 0;
424 encoding |= mimg->glc ? 1 << 13 : 0;
425 encoding |= mimg->unrm ? 1 << 12 : 0;
426 if (ctx.chip_class <= GFX9) {
427 assert(!mimg->dlc); /* Device-level coherent is not supported on GFX9 and lower */
428 assert(!mimg->r128);
429 encoding |= mimg->a16 ? 1 << 15 : 0;
430 encoding |= mimg->da ? 1 << 14 : 0;
431 } else {
432 encoding |= mimg->r128 ? 1 << 15 : 0; /* GFX10: A16 moved to 2nd word, R128 replaces it in 1st word */
433 encoding |= mimg->dim << 3; /* GFX10: dimensionality instead of declare array */
434 encoding |= mimg->dlc ? 1 << 7 : 0;
435 }
436 encoding |= (0xF & mimg->dmask) << 8;
437 out.push_back(encoding);
438 encoding = (0xFF & instr->operands[2].physReg()); /* VADDR */
439 if (!instr->definitions.empty()) {
440 encoding |= (0xFF & instr->definitions[0].physReg()) << 8; /* VDATA */
441 } else if (instr->operands[1].regClass().type() == RegType::vgpr) {
442 encoding |= (0xFF & instr->operands[1].physReg()) << 8; /* VDATA */
443 }
444 encoding |= (0x1F & (instr->operands[0].physReg() >> 2)) << 16; /* T# (resource) */
445 if (instr->operands[1].regClass().type() == RegType::sgpr)
446 encoding |= (0x1F & (instr->operands[1].physReg() >> 2)) << 21; /* sampler */
447
448 assert(!mimg->d16 || ctx.chip_class >= GFX9);
449 encoding |= mimg->d16 ? 1 << 15 : 0;
450 if (ctx.chip_class >= GFX10) {
451 encoding |= mimg->a16 ? 1 << 14 : 0; /* GFX10: A16 still exists, but is in a different place */
452 }
453
454 out.push_back(encoding);
455 break;
456 }
457 case Format::FLAT:
458 case Format::SCRATCH:
459 case Format::GLOBAL: {
460 FLAT_instruction *flat = static_cast<FLAT_instruction*>(instr);
461 uint32_t encoding = (0b110111 << 26);
462 encoding |= opcode << 18;
463 if (ctx.chip_class <= GFX9) {
464 assert(flat->offset <= 0x1fff);
465 encoding |= flat->offset & 0x1fff;
466 } else if (instr->format == Format::FLAT) {
467 /* GFX10 has a 12-bit immediate OFFSET field,
468 * but it has a hw bug: it ignores the offset, called FlatSegmentOffsetBug
469 */
470 assert(flat->offset == 0);
471 } else {
472 assert(flat->offset <= 0xfff);
473 encoding |= flat->offset & 0xfff;
474 }
475 if (instr->format == Format::SCRATCH)
476 encoding |= 1 << 14;
477 else if (instr->format == Format::GLOBAL)
478 encoding |= 2 << 14;
479 encoding |= flat->lds ? 1 << 13 : 0;
480 encoding |= flat->glc ? 1 << 16 : 0;
481 encoding |= flat->slc ? 1 << 17 : 0;
482 if (ctx.chip_class >= GFX10) {
483 assert(!flat->nv);
484 encoding |= flat->dlc ? 1 << 12 : 0;
485 } else {
486 assert(!flat->dlc);
487 }
488 out.push_back(encoding);
489 encoding = (0xFF & instr->operands[0].physReg());
490 if (!instr->definitions.empty())
491 encoding |= (0xFF & instr->definitions[0].physReg()) << 24;
492 if (instr->operands.size() >= 3)
493 encoding |= (0xFF & instr->operands[2].physReg()) << 8;
494 if (!instr->operands[1].isUndefined()) {
495 assert(ctx.chip_class >= GFX10 || instr->operands[1].physReg() != 0x7F);
496 assert(instr->format != Format::FLAT);
497 encoding |= instr->operands[1].physReg() << 16;
498 } else if (instr->format != Format::FLAT || ctx.chip_class >= GFX10) { /* SADDR is actually used with FLAT on GFX10 */
499 if (ctx.chip_class <= GFX9)
500 encoding |= 0x7F << 16;
501 else
502 encoding |= sgpr_null << 16;
503 }
504 encoding |= flat->nv ? 1 << 23 : 0;
505 out.push_back(encoding);
506 break;
507 }
508 case Format::EXP: {
509 Export_instruction* exp = static_cast<Export_instruction*>(instr);
510 uint32_t encoding;
511 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
512 encoding = (0b110001 << 26);
513 } else {
514 encoding = (0b111110 << 26);
515 }
516
517 encoding |= exp->valid_mask ? 0b1 << 12 : 0;
518 encoding |= exp->done ? 0b1 << 11 : 0;
519 encoding |= exp->compressed ? 0b1 << 10 : 0;
520 encoding |= exp->dest << 4;
521 encoding |= exp->enabled_mask;
522 out.push_back(encoding);
523 encoding = 0xFF & exp->operands[0].physReg();
524 encoding |= (0xFF & exp->operands[1].physReg()) << 8;
525 encoding |= (0xFF & exp->operands[2].physReg()) << 16;
526 encoding |= (0xFF & exp->operands[3].physReg()) << 24;
527 out.push_back(encoding);
528 break;
529 }
530 case Format::PSEUDO:
531 case Format::PSEUDO_BARRIER:
532 unreachable("Pseudo instructions should be lowered before assembly.");
533 default:
534 if ((uint16_t) instr->format & (uint16_t) Format::VOP3A) {
535 VOP3A_instruction* vop3 = static_cast<VOP3A_instruction*>(instr);
536
537 if ((uint16_t) instr->format & (uint16_t) Format::VOP2) {
538 opcode = opcode + 0x100;
539 } else if ((uint16_t) instr->format & (uint16_t) Format::VOP1) {
540 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9)
541 opcode = opcode + 0x140;
542 else
543 opcode = opcode + 0x180;
544 } else if ((uint16_t) instr->format & (uint16_t) Format::VOPC) {
545 opcode = opcode + 0x0;
546 } else if ((uint16_t) instr->format & (uint16_t) Format::VINTRP) {
547 opcode = opcode + 0x270;
548 }
549
550 uint32_t encoding;
551 if (ctx.chip_class <= GFX9) {
552 encoding = (0b110100 << 26);
553 } else if (ctx.chip_class == GFX10) {
554 encoding = (0b110101 << 26);
555 } else {
556 unreachable("Unknown chip_class.");
557 }
558
559 if (ctx.chip_class <= GFX7) {
560 encoding |= opcode << 17;
561 encoding |= (vop3->clamp ? 1 : 0) << 11;
562 } else {
563 encoding |= opcode << 16;
564 encoding |= (vop3->clamp ? 1 : 0) << 15;
565 }
566 encoding |= vop3->opsel << 11;
567 for (unsigned i = 0; i < 3; i++)
568 encoding |= vop3->abs[i] << (8+i);
569 if (instr->definitions.size() == 2)
570 encoding |= instr->definitions[1].physReg() << 8;
571 encoding |= (0xFF & instr->definitions[0].physReg());
572 out.push_back(encoding);
573 encoding = 0;
574 if (instr->opcode == aco_opcode::v_interp_mov_f32) {
575 encoding = 0x3 & instr->operands[0].constantValue();
576 } else {
577 for (unsigned i = 0; i < instr->operands.size(); i++)
578 encoding |= instr->operands[i].physReg() << (i * 9);
579 }
580 encoding |= vop3->omod << 27;
581 for (unsigned i = 0; i < 3; i++)
582 encoding |= vop3->neg[i] << (29+i);
583 out.push_back(encoding);
584
585 } else if (instr->format == Format::VOP3P) {
586 VOP3P_instruction* vop3 = static_cast<VOP3P_instruction*>(instr);
587
588 uint32_t encoding;
589 if (ctx.chip_class == GFX9) {
590 encoding = (0b110100111 << 23);
591 } else if (ctx.chip_class == GFX10) {
592 encoding = (0b110011 << 26);
593 } else {
594 unreachable("Unknown chip_class.");
595 }
596
597 encoding |= opcode << 16;
598 encoding |= (vop3->clamp ? 1 : 0) << 15;
599 encoding |= vop3->opsel_lo << 11;
600 encoding |= (vop3->opsel_hi & 0x4) ? 1 : 0 << 14;
601 for (unsigned i = 0; i < 3; i++)
602 encoding |= vop3->neg_hi[i] << (8+i);
603 encoding |= (0xFF & instr->definitions[0].physReg());
604 out.push_back(encoding);
605 encoding = 0;
606 for (unsigned i = 0; i < instr->operands.size(); i++)
607 encoding |= instr->operands[i].physReg() << (i * 9);
608 encoding |= vop3->opsel_hi & 0x3 << 27;
609 for (unsigned i = 0; i < 3; i++)
610 encoding |= vop3->neg_lo[i] << (29+i);
611 out.push_back(encoding);
612
613 } else if (instr->isDPP()){
614 assert(ctx.chip_class >= GFX8);
615 /* first emit the instruction without the DPP operand */
616 Operand dpp_op = instr->operands[0];
617 instr->operands[0] = Operand(PhysReg{250}, v1);
618 instr->format = (Format) ((uint16_t) instr->format & ~(uint16_t)Format::DPP);
619 emit_instruction(ctx, out, instr);
620 DPP_instruction* dpp = static_cast<DPP_instruction*>(instr);
621 uint32_t encoding = (0xF & dpp->row_mask) << 28;
622 encoding |= (0xF & dpp->bank_mask) << 24;
623 encoding |= dpp->abs[1] << 23;
624 encoding |= dpp->neg[1] << 22;
625 encoding |= dpp->abs[0] << 21;
626 encoding |= dpp->neg[0] << 20;
627 encoding |= dpp->bound_ctrl << 19;
628 encoding |= dpp->dpp_ctrl << 8;
629 encoding |= (0xFF) & dpp_op.physReg();
630 out.push_back(encoding);
631 return;
632 } else if (instr->isSDWA()) {
633 /* first emit the instruction without the SDWA operand */
634 Operand sdwa_op = instr->operands[0];
635 instr->operands[0] = Operand(PhysReg{249}, v1);
636 instr->format = (Format) ((uint16_t) instr->format & ~(uint16_t)Format::SDWA);
637 emit_instruction(ctx, out, instr);
638
639 SDWA_instruction* sdwa = static_cast<SDWA_instruction*>(instr);
640 uint32_t encoding = 0;
641
642 if ((uint16_t)instr->format & (uint16_t)Format::VOPC) {
643 if (instr->definitions[0].physReg() != vcc) {
644 encoding |= instr->definitions[0].physReg() << 8;
645 encoding |= 1 << 15;
646 }
647 encoding |= (sdwa->clamp ? 1 : 0) << 13;
648 } else {
649 encoding |= get_sdwa_sel(sdwa->dst_sel, instr->definitions[0].physReg()) << 8;
650 uint32_t dst_u = sdwa->dst_sel & sdwa_sext ? 1 : 0;
651 if (sdwa->dst_preserve || (sdwa->dst_sel & sdwa_isra))
652 dst_u = 2;
653 encoding |= dst_u << 11;
654 encoding |= (sdwa->clamp ? 1 : 0) << 13;
655 encoding |= sdwa->omod << 14;
656 }
657
658 encoding |= get_sdwa_sel(sdwa->sel[0], sdwa_op.physReg()) << 16;
659 encoding |= sdwa->sel[0] & sdwa_sext ? 1 << 19 : 0;
660 encoding |= sdwa->abs[0] << 21;
661 encoding |= sdwa->neg[0] << 20;
662
663 if (instr->operands.size() >= 2) {
664 encoding |= get_sdwa_sel(sdwa->sel[1], instr->operands[1].physReg()) << 24;
665 encoding |= sdwa->sel[1] & sdwa_sext ? 1 << 27 : 0;
666 encoding |= sdwa->abs[1] << 29;
667 encoding |= sdwa->neg[1] << 28;
668 }
669
670 encoding |= 0xFF & sdwa_op.physReg();
671 encoding |= (sdwa_op.physReg() < 256) << 23;
672 if (instr->operands.size() >= 2)
673 encoding |= (instr->operands[1].physReg() < 256) << 31;
674 out.push_back(encoding);
675 } else {
676 unreachable("unimplemented instruction format");
677 }
678 break;
679 }
680
681 /* append literal dword */
682 for (const Operand& op : instr->operands) {
683 if (op.isLiteral()) {
684 out.push_back(op.constantValue());
685 break;
686 }
687 }
688 }
689
690 void emit_block(asm_context& ctx, std::vector<uint32_t>& out, Block& block)
691 {
692 for (aco_ptr<Instruction>& instr : block.instructions) {
693 #if 0
694 int start_idx = out.size();
695 std::cerr << "Encoding:\t" << std::endl;
696 aco_print_instr(&*instr, stderr);
697 std::cerr << std::endl;
698 #endif
699 emit_instruction(ctx, out, instr.get());
700 #if 0
701 for (int i = start_idx; i < out.size(); i++)
702 std::cerr << "encoding: " << "0x" << std::setfill('0') << std::setw(8) << std::hex << out[i] << std::endl;
703 #endif
704 }
705 }
706
707 void fix_exports(asm_context& ctx, std::vector<uint32_t>& out, Program* program)
708 {
709 bool exported = false;
710 for (Block& block : program->blocks) {
711 if (!(block.kind & block_kind_export_end))
712 continue;
713 std::vector<aco_ptr<Instruction>>::reverse_iterator it = block.instructions.rbegin();
714 while ( it != block.instructions.rend())
715 {
716 if ((*it)->format == Format::EXP) {
717 Export_instruction* exp = static_cast<Export_instruction*>((*it).get());
718 if (program->stage & (hw_vs | hw_ngg_gs)) {
719 if (exp->dest >= V_008DFC_SQ_EXP_POS && exp->dest <= (V_008DFC_SQ_EXP_POS + 3)) {
720 exp->done = true;
721 exported = true;
722 break;
723 }
724 } else {
725 exp->done = true;
726 exp->valid_mask = true;
727 exported = true;
728 break;
729 }
730 } else if ((*it)->definitions.size() && (*it)->definitions[0].physReg() == exec)
731 break;
732 ++it;
733 }
734 }
735
736 if (!exported) {
737 /* Abort in order to avoid a GPU hang. */
738 fprintf(stderr, "Missing export in %s shader:\n", (program->stage & hw_vs) ? "vertex" : "fragment");
739 aco_print_program(program, stderr);
740 abort();
741 }
742 }
743
744 static void fix_branches_gfx10(asm_context& ctx, std::vector<uint32_t>& out)
745 {
746 /* Branches with an offset of 0x3f are buggy on GFX10, we workaround by inserting NOPs if needed. */
747 bool gfx10_3f_bug = false;
748
749 do {
750 auto buggy_branch_it = std::find_if(ctx.branches.begin(), ctx.branches.end(), [&ctx](const auto &branch) -> bool {
751 return ((int)ctx.program->blocks[branch.second->block].offset - branch.first - 1) == 0x3f;
752 });
753
754 gfx10_3f_bug = buggy_branch_it != ctx.branches.end();
755
756 if (gfx10_3f_bug) {
757 /* Insert an s_nop after the branch */
758 constexpr uint32_t s_nop_0 = 0xbf800000u;
759 int s_nop_pos = buggy_branch_it->first + 1;
760 auto out_pos = std::next(out.begin(), s_nop_pos);
761 out.insert(out_pos, s_nop_0);
762
763 /* Update the offset of each affected block */
764 for (Block& block : ctx.program->blocks) {
765 if (block.offset > (unsigned)buggy_branch_it->first)
766 block.offset++;
767 }
768
769 /* Update the branches following the current one */
770 for (auto branch_it = std::next(buggy_branch_it); branch_it != ctx.branches.end(); ++branch_it)
771 branch_it->first++;
772
773 /* Find first constant address after the inserted instruction */
774 auto caddr_it = std::find_if(ctx.constaddrs.begin(), ctx.constaddrs.end(), [s_nop_pos](const int &caddr_pos) -> bool {
775 return caddr_pos >= s_nop_pos;
776 });
777
778 /* Update the locations of constant addresses */
779 for (; caddr_it != ctx.constaddrs.end(); ++caddr_it)
780 (*caddr_it)++;
781
782 }
783 } while (gfx10_3f_bug);
784 }
785
786 void fix_branches(asm_context& ctx, std::vector<uint32_t>& out)
787 {
788 if (ctx.chip_class >= GFX10)
789 fix_branches_gfx10(ctx, out);
790
791 for (std::pair<int, SOPP_instruction*> &branch : ctx.branches) {
792 int offset = (int)ctx.program->blocks[branch.second->block].offset - branch.first - 1;
793 out[branch.first] |= (uint16_t) offset;
794 }
795 }
796
797 void fix_constaddrs(asm_context& ctx, std::vector<uint32_t>& out)
798 {
799 for (unsigned addr : ctx.constaddrs)
800 out[addr] += out.size() * 4u;
801 }
802
803 unsigned emit_program(Program* program,
804 std::vector<uint32_t>& code)
805 {
806 asm_context ctx(program);
807
808 if (program->stage & (hw_vs | hw_fs | hw_ngg_gs))
809 fix_exports(ctx, code, program);
810
811 for (Block& block : program->blocks) {
812 block.offset = code.size();
813 emit_block(ctx, code, block);
814 }
815
816 fix_branches(ctx, code);
817
818 unsigned exec_size = code.size() * sizeof(uint32_t);
819
820 if (program->chip_class >= GFX10) {
821 /* Pad output with s_code_end so instruction prefetching doesn't cause
822 * page faults */
823 unsigned final_size = align(code.size() + 3 * 16, 16);
824 while (code.size() < final_size)
825 code.push_back(0xbf9f0000u);
826 }
827
828 fix_constaddrs(ctx, code);
829
830 while (program->constant_data.size() % 4u)
831 program->constant_data.push_back(0);
832 /* Copy constant data */
833 code.insert(code.end(), (uint32_t*)program->constant_data.data(),
834 (uint32_t*)(program->constant_data.data() + program->constant_data.size()));
835
836 return exec_size;
837 }
838
839 }