aco: compact various Instruction classes
[mesa.git] / src / amd / compiler / aco_assembler.cpp
1 #include <vector>
2 #include <algorithm>
3
4 #include "aco_ir.h"
5 #include "common/sid.h"
6 #include "ac_shader_util.h"
7 #include "util/u_math.h"
8
9 namespace aco {
10
11 struct asm_context {
12 Program *program;
13 enum chip_class chip_class;
14 std::vector<std::pair<int, SOPP_instruction*>> branches;
15 std::vector<unsigned> constaddrs;
16 const int16_t* opcode;
17 // TODO: keep track of branch instructions referring blocks
18 // and, when emitting the block, correct the offset in instr
19 asm_context(Program* program) : program(program), chip_class(program->chip_class) {
20 if (chip_class <= GFX7)
21 opcode = &instr_info.opcode_gfx7[0];
22 else if (chip_class <= GFX9)
23 opcode = &instr_info.opcode_gfx9[0];
24 else if (chip_class == GFX10)
25 opcode = &instr_info.opcode_gfx10[0];
26 }
27
28 int subvector_begin_pos = -1;
29 };
30
31 void emit_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
32 {
33 uint32_t instr_offset = out.size() * 4u;
34
35 /* lower remaining pseudo-instructions */
36 if (instr->opcode == aco_opcode::p_constaddr) {
37 unsigned dest = instr->definitions[0].physReg();
38 unsigned offset = instr->operands[0].constantValue();
39
40 /* s_getpc_b64 dest[0:1] */
41 uint32_t encoding = (0b101111101 << 23);
42 uint32_t opcode = ctx.opcode[(int)aco_opcode::s_getpc_b64];
43 if (opcode >= 55 && ctx.chip_class <= GFX9) {
44 assert(ctx.chip_class == GFX9 && opcode < 60);
45 opcode = opcode - 4;
46 }
47 encoding |= dest << 16;
48 encoding |= opcode << 8;
49 out.push_back(encoding);
50
51 /* s_add_u32 dest[0], dest[0], ... */
52 encoding = (0b10 << 30);
53 encoding |= ctx.opcode[(int)aco_opcode::s_add_u32] << 23;
54 encoding |= dest << 16;
55 encoding |= dest;
56 encoding |= 255 << 8;
57 out.push_back(encoding);
58 ctx.constaddrs.push_back(out.size());
59 out.push_back(-(instr_offset + 4) + offset);
60
61 /* s_addc_u32 dest[1], dest[1], 0 */
62 encoding = (0b10 << 30);
63 encoding |= ctx.opcode[(int)aco_opcode::s_addc_u32] << 23;
64 encoding |= (dest + 1) << 16;
65 encoding |= dest + 1;
66 encoding |= 128 << 8;
67 out.push_back(encoding);
68 return;
69 }
70
71 uint32_t opcode = ctx.opcode[(int)instr->opcode];
72 if (opcode == (uint32_t)-1) {
73 fprintf(stderr, "Unsupported opcode: ");
74 aco_print_instr(instr, stderr);
75 abort();
76 }
77
78 switch (instr->format) {
79 case Format::SOP2: {
80 uint32_t encoding = (0b10 << 30);
81 encoding |= opcode << 23;
82 encoding |= !instr->definitions.empty() ? instr->definitions[0].physReg() << 16 : 0;
83 encoding |= instr->operands.size() >= 2 ? instr->operands[1].physReg() << 8 : 0;
84 encoding |= !instr->operands.empty() ? instr->operands[0].physReg() : 0;
85 out.push_back(encoding);
86 break;
87 }
88 case Format::SOPK: {
89 SOPK_instruction *sopk = static_cast<SOPK_instruction*>(instr);
90
91 if (instr->opcode == aco_opcode::s_subvector_loop_begin) {
92 assert(ctx.chip_class >= GFX10);
93 assert(ctx.subvector_begin_pos == -1);
94 ctx.subvector_begin_pos = out.size();
95 } else if (instr->opcode == aco_opcode::s_subvector_loop_end) {
96 assert(ctx.chip_class >= GFX10);
97 assert(ctx.subvector_begin_pos != -1);
98 /* Adjust s_subvector_loop_begin instruction to the address after the end */
99 out[ctx.subvector_begin_pos] |= (out.size() - ctx.subvector_begin_pos);
100 /* Adjust s_subvector_loop_end instruction to the address after the beginning */
101 sopk->imm = (uint16_t)(ctx.subvector_begin_pos - (int)out.size());
102 ctx.subvector_begin_pos = -1;
103 }
104
105 uint32_t encoding = (0b1011 << 28);
106 encoding |= opcode << 23;
107 encoding |=
108 !instr->definitions.empty() && !(instr->definitions[0].physReg() == scc) ?
109 instr->definitions[0].physReg() << 16 :
110 !instr->operands.empty() && instr->operands[0].physReg() <= 127 ?
111 instr->operands[0].physReg() << 16 : 0;
112 encoding |= sopk->imm;
113 out.push_back(encoding);
114 break;
115 }
116 case Format::SOP1: {
117 uint32_t encoding = (0b101111101 << 23);
118 if (opcode >= 55 && ctx.chip_class <= GFX9) {
119 assert(ctx.chip_class == GFX9 && opcode < 60);
120 opcode = opcode - 4;
121 }
122 encoding |= !instr->definitions.empty() ? instr->definitions[0].physReg() << 16 : 0;
123 encoding |= opcode << 8;
124 encoding |= !instr->operands.empty() ? instr->operands[0].physReg() : 0;
125 out.push_back(encoding);
126 break;
127 }
128 case Format::SOPC: {
129 uint32_t encoding = (0b101111110 << 23);
130 encoding |= opcode << 16;
131 encoding |= instr->operands.size() == 2 ? instr->operands[1].physReg() << 8 : 0;
132 encoding |= !instr->operands.empty() ? instr->operands[0].physReg() : 0;
133 out.push_back(encoding);
134 break;
135 }
136 case Format::SOPP: {
137 SOPP_instruction* sopp = static_cast<SOPP_instruction*>(instr);
138 uint32_t encoding = (0b101111111 << 23);
139 encoding |= opcode << 16;
140 encoding |= (uint16_t) sopp->imm;
141 if (sopp->block != -1)
142 ctx.branches.emplace_back(out.size(), sopp);
143 out.push_back(encoding);
144 break;
145 }
146 case Format::SMEM: {
147 SMEM_instruction* smem = static_cast<SMEM_instruction*>(instr);
148 bool soe = instr->operands.size() >= (!instr->definitions.empty() ? 3 : 4);
149 bool is_load = !instr->definitions.empty();
150 uint32_t encoding = 0;
151
152 if (ctx.chip_class <= GFX7) {
153 encoding = (0b11000 << 27);
154 encoding |= opcode << 22;
155 encoding |= instr->definitions.size() ? instr->definitions[0].physReg() << 15 : 0;
156 encoding |= instr->operands.size() ? (instr->operands[0].physReg() >> 1) << 9 : 0;
157 if (!instr->operands[1].isConstant() || instr->operands[1].constantValue() >= 1024) {
158 encoding |= instr->operands[1].physReg().reg;
159 } else {
160 encoding |= instr->operands[1].constantValue() >> 2;
161 encoding |= 1 << 8;
162 }
163 out.push_back(encoding);
164 /* SMRD instructions can take a literal on GFX6 & GFX7 */
165 if (instr->operands[1].isConstant() && instr->operands[1].constantValue() >= 1024)
166 out.push_back(instr->operands[1].constantValue() >> 2);
167 return;
168 }
169
170 if (ctx.chip_class <= GFX9) {
171 encoding = (0b110000 << 26);
172 assert(!smem->dlc); /* Device-level coherent is not supported on GFX9 and lower */
173 encoding |= smem->nv ? 1 << 15 : 0;
174 } else {
175 encoding = (0b111101 << 26);
176 assert(!smem->nv); /* Non-volatile is not supported on GFX10 */
177 encoding |= smem->dlc ? 1 << 14 : 0;
178 }
179
180 encoding |= opcode << 18;
181 encoding |= smem->glc ? 1 << 16 : 0;
182
183 if (ctx.chip_class <= GFX9) {
184 if (instr->operands.size() >= 2)
185 encoding |= instr->operands[1].isConstant() ? 1 << 17 : 0; /* IMM - immediate enable */
186 }
187 if (ctx.chip_class == GFX9) {
188 encoding |= soe ? 1 << 14 : 0;
189 }
190
191 if (is_load || instr->operands.size() >= 3) { /* SDATA */
192 encoding |= (is_load ? instr->definitions[0].physReg() : instr->operands[2].physReg()) << 6;
193 }
194 if (instr->operands.size() >= 1) { /* SBASE */
195 encoding |= instr->operands[0].physReg() >> 1;
196 }
197
198 out.push_back(encoding);
199 encoding = 0;
200
201 int32_t offset = 0;
202 uint32_t soffset = ctx.chip_class >= GFX10
203 ? sgpr_null /* On GFX10 this is disabled by specifying SGPR_NULL */
204 : 0; /* On GFX9, it is disabled by the SOE bit (and it's not present on GFX8 and below) */
205 if (instr->operands.size() >= 2) {
206 const Operand &op_off1 = instr->operands[1];
207 if (ctx.chip_class <= GFX9) {
208 offset = op_off1.isConstant() ? op_off1.constantValue() : op_off1.physReg();
209 } else {
210 /* GFX10 only supports constants in OFFSET, so put the operand in SOFFSET if it's an SGPR */
211 if (op_off1.isConstant()) {
212 offset = op_off1.constantValue();
213 } else {
214 soffset = op_off1.physReg();
215 assert(!soe); /* There is no place to put the other SGPR offset, if any */
216 }
217 }
218
219 if (soe) {
220 const Operand &op_off2 = instr->operands.back();
221 assert(ctx.chip_class >= GFX9); /* GFX8 and below don't support specifying a constant and an SGPR at the same time */
222 assert(!op_off2.isConstant());
223 soffset = op_off2.physReg();
224 }
225 }
226 encoding |= offset;
227 encoding |= soffset << 25;
228
229 out.push_back(encoding);
230 return;
231 }
232 case Format::VOP2: {
233 uint32_t encoding = 0;
234 encoding |= opcode << 25;
235 encoding |= (0xFF & instr->definitions[0].physReg()) << 17;
236 encoding |= (0xFF & instr->operands[1].physReg()) << 9;
237 encoding |= instr->operands[0].physReg();
238 out.push_back(encoding);
239 break;
240 }
241 case Format::VOP1: {
242 uint32_t encoding = (0b0111111 << 25);
243 if (!instr->definitions.empty())
244 encoding |= (0xFF & instr->definitions[0].physReg()) << 17;
245 encoding |= opcode << 9;
246 if (!instr->operands.empty())
247 encoding |= instr->operands[0].physReg();
248 out.push_back(encoding);
249 break;
250 }
251 case Format::VOPC: {
252 uint32_t encoding = (0b0111110 << 25);
253 encoding |= opcode << 17;
254 encoding |= (0xFF & instr->operands[1].physReg()) << 9;
255 encoding |= instr->operands[0].physReg();
256 out.push_back(encoding);
257 break;
258 }
259 case Format::VINTRP: {
260 Interp_instruction* interp = static_cast<Interp_instruction*>(instr);
261 uint32_t encoding = 0;
262
263 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
264 encoding = (0b110101 << 26); /* Vega ISA doc says 110010 but it's wrong */
265 } else {
266 encoding = (0b110010 << 26);
267 }
268
269 assert(encoding);
270 encoding |= (0xFF & instr->definitions[0].physReg()) << 18;
271 encoding |= opcode << 16;
272 encoding |= interp->attribute << 10;
273 encoding |= interp->component << 8;
274 if (instr->opcode == aco_opcode::v_interp_mov_f32)
275 encoding |= (0x3 & instr->operands[0].constantValue());
276 else
277 encoding |= (0xFF & instr->operands[0].physReg());
278 out.push_back(encoding);
279 break;
280 }
281 case Format::DS: {
282 DS_instruction* ds = static_cast<DS_instruction*>(instr);
283 uint32_t encoding = (0b110110 << 26);
284 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
285 encoding |= opcode << 17;
286 encoding |= (ds->gds ? 1 : 0) << 16;
287 } else {
288 encoding |= opcode << 18;
289 encoding |= (ds->gds ? 1 : 0) << 17;
290 }
291 encoding |= ((0xFF & ds->offset1) << 8);
292 encoding |= (0xFFFF & ds->offset0);
293 out.push_back(encoding);
294 encoding = 0;
295 unsigned reg = !instr->definitions.empty() ? instr->definitions[0].physReg() : 0;
296 encoding |= (0xFF & reg) << 24;
297 reg = instr->operands.size() >= 3 && !(instr->operands[2].physReg() == m0) ? instr->operands[2].physReg() : 0;
298 encoding |= (0xFF & reg) << 16;
299 reg = instr->operands.size() >= 2 && !(instr->operands[1].physReg() == m0) ? instr->operands[1].physReg() : 0;
300 encoding |= (0xFF & reg) << 8;
301 encoding |= (0xFF & instr->operands[0].physReg());
302 out.push_back(encoding);
303 break;
304 }
305 case Format::MUBUF: {
306 MUBUF_instruction* mubuf = static_cast<MUBUF_instruction*>(instr);
307 uint32_t encoding = (0b111000 << 26);
308 encoding |= opcode << 18;
309 encoding |= (mubuf->lds ? 1 : 0) << 16;
310 encoding |= (mubuf->glc ? 1 : 0) << 14;
311 encoding |= (mubuf->idxen ? 1 : 0) << 13;
312 encoding |= (mubuf->offen ? 1 : 0) << 12;
313 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
314 assert(!mubuf->dlc); /* Device-level coherent is not supported on GFX9 and lower */
315 encoding |= (mubuf->slc ? 1 : 0) << 17;
316 } else if (ctx.chip_class >= GFX10) {
317 encoding |= (mubuf->dlc ? 1 : 0) << 15;
318 }
319 encoding |= 0x0FFF & mubuf->offset;
320 out.push_back(encoding);
321 encoding = 0;
322 if (ctx.chip_class >= GFX10) {
323 encoding |= (mubuf->slc ? 1 : 0) << 22;
324 }
325 encoding |= instr->operands[2].physReg() << 24;
326 encoding |= (mubuf->tfe ? 1 : 0) << 23;
327 encoding |= (instr->operands[1].physReg() >> 2) << 16;
328 unsigned reg = instr->operands.size() > 3 ? instr->operands[3].physReg() : instr->definitions[0].physReg();
329 encoding |= (0xFF & reg) << 8;
330 encoding |= (0xFF & instr->operands[0].physReg());
331 out.push_back(encoding);
332 break;
333 }
334 case Format::MTBUF: {
335 MTBUF_instruction* mtbuf = static_cast<MTBUF_instruction*>(instr);
336
337 uint32_t img_format = ac_get_tbuffer_format(ctx.chip_class, mtbuf->dfmt, mtbuf->nfmt);
338 uint32_t encoding = (0b111010 << 26);
339 assert(img_format <= 0x7F);
340 assert(!mtbuf->dlc || ctx.chip_class >= GFX10);
341 encoding |= (mtbuf->dlc ? 1 : 0) << 15; /* DLC bit replaces one bit of the OPCODE on GFX10 */
342 encoding |= (mtbuf->glc ? 1 : 0) << 14;
343 encoding |= (mtbuf->idxen ? 1 : 0) << 13;
344 encoding |= (mtbuf->offen ? 1 : 0) << 12;
345 encoding |= 0x0FFF & mtbuf->offset;
346 encoding |= (img_format << 19); /* Handles both the GFX10 FORMAT and the old NFMT+DFMT */
347
348 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
349 encoding |= opcode << 15;
350 } else {
351 encoding |= (opcode & 0x07) << 16; /* 3 LSBs of 4-bit OPCODE */
352 }
353
354 out.push_back(encoding);
355 encoding = 0;
356
357 encoding |= instr->operands[2].physReg() << 24;
358 encoding |= (mtbuf->tfe ? 1 : 0) << 23;
359 encoding |= (mtbuf->slc ? 1 : 0) << 22;
360 encoding |= (instr->operands[1].physReg() >> 2) << 16;
361 unsigned reg = instr->operands.size() > 3 ? instr->operands[3].physReg() : instr->definitions[0].physReg();
362 encoding |= (0xFF & reg) << 8;
363 encoding |= (0xFF & instr->operands[0].physReg());
364
365 if (ctx.chip_class >= GFX10) {
366 encoding |= (((opcode & 0x08) >> 4) << 21); /* MSB of 4-bit OPCODE */
367 }
368
369 out.push_back(encoding);
370 break;
371 }
372 case Format::MIMG: {
373 MIMG_instruction* mimg = static_cast<MIMG_instruction*>(instr);
374 uint32_t encoding = (0b111100 << 26);
375 encoding |= mimg->slc ? 1 << 25 : 0;
376 encoding |= opcode << 18;
377 encoding |= mimg->lwe ? 1 << 17 : 0;
378 encoding |= mimg->tfe ? 1 << 16 : 0;
379 encoding |= mimg->glc ? 1 << 13 : 0;
380 encoding |= mimg->unrm ? 1 << 12 : 0;
381 if (ctx.chip_class <= GFX9) {
382 assert(!mimg->dlc); /* Device-level coherent is not supported on GFX9 and lower */
383 assert(!mimg->r128);
384 encoding |= mimg->a16 ? 1 << 15 : 0;
385 encoding |= mimg->da ? 1 << 14 : 0;
386 } else {
387 encoding |= mimg->r128 ? 1 << 15 : 0; /* GFX10: A16 moved to 2nd word, R128 replaces it in 1st word */
388 encoding |= mimg->dim << 3; /* GFX10: dimensionality instead of declare array */
389 encoding |= mimg->dlc ? 1 << 7 : 0;
390 }
391 encoding |= (0xF & mimg->dmask) << 8;
392 out.push_back(encoding);
393 encoding = (0xFF & instr->operands[0].physReg()); /* VADDR */
394 if (!instr->definitions.empty()) {
395 encoding |= (0xFF & instr->definitions[0].physReg()) << 8; /* VDATA */
396 } else if (instr->operands.size() == 4) {
397 encoding |= (0xFF & instr->operands[3].physReg()) << 8; /* VDATA */
398 }
399 encoding |= (0x1F & (instr->operands[1].physReg() >> 2)) << 16; /* T# (resource) */
400 if (instr->operands.size() > 2)
401 encoding |= (0x1F & (instr->operands[2].physReg() >> 2)) << 21; /* sampler */
402
403 assert(!mimg->d16 || ctx.chip_class >= GFX9);
404 encoding |= mimg->d16 ? 1 << 15 : 0;
405 if (ctx.chip_class >= GFX10) {
406 encoding |= mimg->a16 ? 1 << 14 : 0; /* GFX10: A16 still exists, but is in a different place */
407 }
408
409 out.push_back(encoding);
410 break;
411 }
412 case Format::FLAT:
413 case Format::SCRATCH:
414 case Format::GLOBAL: {
415 FLAT_instruction *flat = static_cast<FLAT_instruction*>(instr);
416 uint32_t encoding = (0b110111 << 26);
417 encoding |= opcode << 18;
418 if (ctx.chip_class <= GFX9) {
419 assert(flat->offset <= 0x1fff);
420 encoding |= flat->offset & 0x1fff;
421 } else if (instr->format == Format::FLAT) {
422 /* GFX10 has a 12-bit immediate OFFSET field,
423 * but it has a hw bug: it ignores the offset, called FlatSegmentOffsetBug
424 */
425 assert(flat->offset == 0);
426 } else {
427 assert(flat->offset <= 0xfff);
428 encoding |= flat->offset & 0xfff;
429 }
430 if (instr->format == Format::SCRATCH)
431 encoding |= 1 << 14;
432 else if (instr->format == Format::GLOBAL)
433 encoding |= 2 << 14;
434 encoding |= flat->lds ? 1 << 13 : 0;
435 encoding |= flat->glc ? 1 << 16 : 0;
436 encoding |= flat->slc ? 1 << 17 : 0;
437 if (ctx.chip_class >= GFX10) {
438 assert(!flat->nv);
439 encoding |= flat->dlc ? 1 << 12 : 0;
440 } else {
441 assert(!flat->dlc);
442 }
443 out.push_back(encoding);
444 encoding = (0xFF & instr->operands[0].physReg());
445 if (!instr->definitions.empty())
446 encoding |= (0xFF & instr->definitions[0].physReg()) << 24;
447 if (instr->operands.size() >= 3)
448 encoding |= (0xFF & instr->operands[2].physReg()) << 8;
449 if (!instr->operands[1].isUndefined()) {
450 assert(ctx.chip_class >= GFX10 || instr->operands[1].physReg() != 0x7F);
451 assert(instr->format != Format::FLAT);
452 encoding |= instr->operands[1].physReg() << 16;
453 } else if (instr->format != Format::FLAT || ctx.chip_class >= GFX10) { /* SADDR is actually used with FLAT on GFX10 */
454 if (ctx.chip_class <= GFX9)
455 encoding |= 0x7F << 16;
456 else
457 encoding |= sgpr_null << 16;
458 }
459 encoding |= flat->nv ? 1 << 23 : 0;
460 out.push_back(encoding);
461 break;
462 }
463 case Format::EXP: {
464 Export_instruction* exp = static_cast<Export_instruction*>(instr);
465 uint32_t encoding;
466 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
467 encoding = (0b110001 << 26);
468 } else {
469 encoding = (0b111110 << 26);
470 }
471
472 encoding |= exp->valid_mask ? 0b1 << 12 : 0;
473 encoding |= exp->done ? 0b1 << 11 : 0;
474 encoding |= exp->compressed ? 0b1 << 10 : 0;
475 encoding |= exp->dest << 4;
476 encoding |= exp->enabled_mask;
477 out.push_back(encoding);
478 encoding = 0xFF & exp->operands[0].physReg();
479 encoding |= (0xFF & exp->operands[1].physReg()) << 8;
480 encoding |= (0xFF & exp->operands[2].physReg()) << 16;
481 encoding |= (0xFF & exp->operands[3].physReg()) << 24;
482 out.push_back(encoding);
483 break;
484 }
485 case Format::PSEUDO:
486 case Format::PSEUDO_BARRIER:
487 unreachable("Pseudo instructions should be lowered before assembly.");
488 default:
489 if ((uint16_t) instr->format & (uint16_t) Format::VOP3A) {
490 VOP3A_instruction* vop3 = static_cast<VOP3A_instruction*>(instr);
491
492 if ((uint16_t) instr->format & (uint16_t) Format::VOP2) {
493 opcode = opcode + 0x100;
494 } else if ((uint16_t) instr->format & (uint16_t) Format::VOP1) {
495 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9)
496 opcode = opcode + 0x140;
497 else
498 opcode = opcode + 0x180;
499 } else if ((uint16_t) instr->format & (uint16_t) Format::VOPC) {
500 opcode = opcode + 0x0;
501 } else if ((uint16_t) instr->format & (uint16_t) Format::VINTRP) {
502 opcode = opcode + 0x270;
503 }
504
505 uint32_t encoding;
506 if (ctx.chip_class <= GFX9) {
507 encoding = (0b110100 << 26);
508 } else if (ctx.chip_class == GFX10) {
509 encoding = (0b110101 << 26);
510 }
511
512 if (ctx.chip_class <= GFX7) {
513 encoding |= opcode << 17;
514 encoding |= (vop3->clamp ? 1 : 0) << 11;
515 } else {
516 encoding |= opcode << 16;
517 encoding |= (vop3->clamp ? 1 : 0) << 15;
518 }
519 encoding |= vop3->opsel << 11;
520 for (unsigned i = 0; i < 3; i++)
521 encoding |= vop3->abs[i] << (8+i);
522 if (instr->definitions.size() == 2)
523 encoding |= instr->definitions[1].physReg() << 8;
524 encoding |= (0xFF & instr->definitions[0].physReg());
525 out.push_back(encoding);
526 encoding = 0;
527 if (instr->opcode == aco_opcode::v_interp_mov_f32) {
528 encoding = 0x3 & instr->operands[0].constantValue();
529 } else {
530 for (unsigned i = 0; i < instr->operands.size(); i++)
531 encoding |= instr->operands[i].physReg() << (i * 9);
532 }
533 encoding |= vop3->omod << 27;
534 for (unsigned i = 0; i < 3; i++)
535 encoding |= vop3->neg[i] << (29+i);
536 out.push_back(encoding);
537
538 } else if (instr->isDPP()){
539 assert(ctx.chip_class >= GFX8);
540 /* first emit the instruction without the DPP operand */
541 Operand dpp_op = instr->operands[0];
542 instr->operands[0] = Operand(PhysReg{250}, v1);
543 instr->format = (Format) ((uint32_t) instr->format & ~(1 << 14));
544 emit_instruction(ctx, out, instr);
545 DPP_instruction* dpp = static_cast<DPP_instruction*>(instr);
546 uint32_t encoding = (0xF & dpp->row_mask) << 28;
547 encoding |= (0xF & dpp->bank_mask) << 24;
548 encoding |= dpp->abs[1] << 23;
549 encoding |= dpp->neg[1] << 22;
550 encoding |= dpp->abs[0] << 21;
551 encoding |= dpp->neg[0] << 20;
552 encoding |= dpp->bound_ctrl << 19;
553 encoding |= dpp->dpp_ctrl << 8;
554 encoding |= (0xFF) & dpp_op.physReg();
555 out.push_back(encoding);
556 return;
557 } else {
558 unreachable("unimplemented instruction format");
559 }
560 break;
561 }
562
563 /* append literal dword */
564 for (const Operand& op : instr->operands) {
565 if (op.isLiteral()) {
566 out.push_back(op.constantValue());
567 break;
568 }
569 }
570 }
571
572 void emit_block(asm_context& ctx, std::vector<uint32_t>& out, Block& block)
573 {
574 for (aco_ptr<Instruction>& instr : block.instructions) {
575 #if 0
576 int start_idx = out.size();
577 std::cerr << "Encoding:\t" << std::endl;
578 aco_print_instr(&*instr, stderr);
579 std::cerr << std::endl;
580 #endif
581 emit_instruction(ctx, out, instr.get());
582 #if 0
583 for (int i = start_idx; i < out.size(); i++)
584 std::cerr << "encoding: " << "0x" << std::setfill('0') << std::setw(8) << std::hex << out[i] << std::endl;
585 #endif
586 }
587 }
588
589 void fix_exports(asm_context& ctx, std::vector<uint32_t>& out, Program* program)
590 {
591 for (int idx = program->blocks.size() - 1; idx >= 0; idx--) {
592 Block& block = program->blocks[idx];
593 std::vector<aco_ptr<Instruction>>::reverse_iterator it = block.instructions.rbegin();
594 bool endBlock = false;
595 bool exported = false;
596 while ( it != block.instructions.rend())
597 {
598 if ((*it)->format == Format::EXP && endBlock) {
599 Export_instruction* exp = static_cast<Export_instruction*>((*it).get());
600 if (program->stage & hw_vs) {
601 if (exp->dest >= V_008DFC_SQ_EXP_POS && exp->dest <= (V_008DFC_SQ_EXP_POS + 3)) {
602 exp->done = true;
603 exported = true;
604 break;
605 }
606 } else {
607 exp->done = true;
608 exp->valid_mask = true;
609 exported = true;
610 break;
611 }
612 } else if ((*it)->definitions.size() && (*it)->definitions[0].physReg() == exec)
613 break;
614 else if ((*it)->opcode == aco_opcode::s_endpgm) {
615 if (endBlock)
616 break;
617 endBlock = true;
618 }
619 ++it;
620 }
621 if (!endBlock || exported)
622 continue;
623 /* we didn't find an Export instruction and have to insert a null export */
624 aco_ptr<Export_instruction> exp{create_instruction<Export_instruction>(aco_opcode::exp, Format::EXP, 4, 0)};
625 for (unsigned i = 0; i < 4; i++)
626 exp->operands[i] = Operand(v1);
627 exp->enabled_mask = 0;
628 exp->compressed = false;
629 exp->done = true;
630 exp->valid_mask = program->stage & hw_fs;
631 if (program->stage & hw_fs)
632 exp->dest = 9; /* NULL */
633 else
634 exp->dest = V_008DFC_SQ_EXP_POS;
635 /* insert the null export 1 instruction before endpgm */
636 block.instructions.insert(block.instructions.end() - 1, std::move(exp));
637 }
638 }
639
640 static void fix_branches_gfx10(asm_context& ctx, std::vector<uint32_t>& out)
641 {
642 /* Branches with an offset of 0x3f are buggy on GFX10, we workaround by inserting NOPs if needed. */
643 bool gfx10_3f_bug = false;
644
645 do {
646 auto buggy_branch_it = std::find_if(ctx.branches.begin(), ctx.branches.end(), [&ctx](const auto &branch) -> bool {
647 return ((int)ctx.program->blocks[branch.second->block].offset - branch.first - 1) == 0x3f;
648 });
649
650 gfx10_3f_bug = buggy_branch_it != ctx.branches.end();
651
652 if (gfx10_3f_bug) {
653 /* Insert an s_nop after the branch */
654 constexpr uint32_t s_nop_0 = 0xbf800000u;
655 int s_nop_pos = buggy_branch_it->first + 1;
656 auto out_pos = std::next(out.begin(), s_nop_pos);
657 out.insert(out_pos, s_nop_0);
658
659 /* Update the offset of each affected block */
660 for (Block& block : ctx.program->blocks) {
661 if (block.offset > (unsigned)buggy_branch_it->first)
662 block.offset++;
663 }
664
665 /* Update the branches following the current one */
666 for (auto branch_it = std::next(buggy_branch_it); branch_it != ctx.branches.end(); ++branch_it)
667 branch_it->first++;
668
669 /* Find first constant address after the inserted instruction */
670 auto caddr_it = std::find_if(ctx.constaddrs.begin(), ctx.constaddrs.end(), [s_nop_pos](const int &caddr_pos) -> bool {
671 return caddr_pos >= s_nop_pos;
672 });
673
674 /* Update the locations of constant addresses */
675 for (; caddr_it != ctx.constaddrs.end(); ++caddr_it)
676 (*caddr_it)++;
677
678 }
679 } while (gfx10_3f_bug);
680 }
681
682 void fix_branches(asm_context& ctx, std::vector<uint32_t>& out)
683 {
684 if (ctx.chip_class >= GFX10)
685 fix_branches_gfx10(ctx, out);
686
687 for (std::pair<int, SOPP_instruction*> &branch : ctx.branches) {
688 int offset = (int)ctx.program->blocks[branch.second->block].offset - branch.first - 1;
689 out[branch.first] |= (uint16_t) offset;
690 }
691 }
692
693 void fix_constaddrs(asm_context& ctx, std::vector<uint32_t>& out)
694 {
695 for (unsigned addr : ctx.constaddrs)
696 out[addr] += out.size() * 4u;
697 }
698
699 unsigned emit_program(Program* program,
700 std::vector<uint32_t>& code)
701 {
702 asm_context ctx(program);
703
704 if (program->stage & (hw_vs | hw_fs))
705 fix_exports(ctx, code, program);
706
707 for (Block& block : program->blocks) {
708 block.offset = code.size();
709 emit_block(ctx, code, block);
710 }
711
712 fix_branches(ctx, code);
713
714 unsigned exec_size = code.size() * sizeof(uint32_t);
715
716 if (program->chip_class >= GFX10) {
717 /* Pad output with s_code_end so instruction prefetching doesn't cause
718 * page faults */
719 unsigned final_size = align(code.size() + 3 * 16, 16);
720 while (code.size() < final_size)
721 code.push_back(0xbf9f0000u);
722 }
723
724 fix_constaddrs(ctx, code);
725
726 while (program->constant_data.size() % 4u)
727 program->constant_data.push_back(0);
728 /* Copy constant data */
729 code.insert(code.end(), (uint32_t*)program->constant_data.data(),
730 (uint32_t*)(program->constant_data.data() + program->constant_data.size()));
731
732 return exec_size;
733 }
734
735 }