aco: make PhysReg in units of bytes
[mesa.git] / src / amd / compiler / aco_assembler.cpp
1 #include <vector>
2 #include <algorithm>
3
4 #include "aco_ir.h"
5 #include "common/sid.h"
6 #include "ac_shader_util.h"
7 #include "util/u_math.h"
8
9 namespace aco {
10
11 struct asm_context {
12 Program *program;
13 enum chip_class chip_class;
14 std::vector<std::pair<int, SOPP_instruction*>> branches;
15 std::vector<unsigned> constaddrs;
16 const int16_t* opcode;
17 // TODO: keep track of branch instructions referring blocks
18 // and, when emitting the block, correct the offset in instr
19 asm_context(Program* program) : program(program), chip_class(program->chip_class) {
20 if (chip_class <= GFX7)
21 opcode = &instr_info.opcode_gfx7[0];
22 else if (chip_class <= GFX9)
23 opcode = &instr_info.opcode_gfx9[0];
24 else if (chip_class == GFX10)
25 opcode = &instr_info.opcode_gfx10[0];
26 }
27
28 int subvector_begin_pos = -1;
29 };
30
31 void emit_instruction(asm_context& ctx, std::vector<uint32_t>& out, Instruction* instr)
32 {
33 uint32_t instr_offset = out.size() * 4u;
34
35 /* lower remaining pseudo-instructions */
36 if (instr->opcode == aco_opcode::p_constaddr) {
37 unsigned dest = instr->definitions[0].physReg();
38 unsigned offset = instr->operands[0].constantValue();
39
40 /* s_getpc_b64 dest[0:1] */
41 uint32_t encoding = (0b101111101 << 23);
42 uint32_t opcode = ctx.opcode[(int)aco_opcode::s_getpc_b64];
43 if (opcode >= 55 && ctx.chip_class <= GFX9) {
44 assert(ctx.chip_class == GFX9 && opcode < 60);
45 opcode = opcode - 4;
46 }
47 encoding |= dest << 16;
48 encoding |= opcode << 8;
49 out.push_back(encoding);
50
51 /* s_add_u32 dest[0], dest[0], ... */
52 encoding = (0b10 << 30);
53 encoding |= ctx.opcode[(int)aco_opcode::s_add_u32] << 23;
54 encoding |= dest << 16;
55 encoding |= dest;
56 encoding |= 255 << 8;
57 out.push_back(encoding);
58 ctx.constaddrs.push_back(out.size());
59 out.push_back(-(instr_offset + 4) + offset);
60
61 /* s_addc_u32 dest[1], dest[1], 0 */
62 encoding = (0b10 << 30);
63 encoding |= ctx.opcode[(int)aco_opcode::s_addc_u32] << 23;
64 encoding |= (dest + 1) << 16;
65 encoding |= dest + 1;
66 encoding |= 128 << 8;
67 out.push_back(encoding);
68 return;
69 }
70
71 uint32_t opcode = ctx.opcode[(int)instr->opcode];
72 if (opcode == (uint32_t)-1) {
73 fprintf(stderr, "Unsupported opcode: ");
74 aco_print_instr(instr, stderr);
75 abort();
76 }
77
78 switch (instr->format) {
79 case Format::SOP2: {
80 uint32_t encoding = (0b10 << 30);
81 encoding |= opcode << 23;
82 encoding |= !instr->definitions.empty() ? instr->definitions[0].physReg() << 16 : 0;
83 encoding |= instr->operands.size() >= 2 ? instr->operands[1].physReg() << 8 : 0;
84 encoding |= !instr->operands.empty() ? instr->operands[0].physReg() : 0;
85 out.push_back(encoding);
86 break;
87 }
88 case Format::SOPK: {
89 SOPK_instruction *sopk = static_cast<SOPK_instruction*>(instr);
90
91 if (instr->opcode == aco_opcode::s_subvector_loop_begin) {
92 assert(ctx.chip_class >= GFX10);
93 assert(ctx.subvector_begin_pos == -1);
94 ctx.subvector_begin_pos = out.size();
95 } else if (instr->opcode == aco_opcode::s_subvector_loop_end) {
96 assert(ctx.chip_class >= GFX10);
97 assert(ctx.subvector_begin_pos != -1);
98 /* Adjust s_subvector_loop_begin instruction to the address after the end */
99 out[ctx.subvector_begin_pos] |= (out.size() - ctx.subvector_begin_pos);
100 /* Adjust s_subvector_loop_end instruction to the address after the beginning */
101 sopk->imm = (uint16_t)(ctx.subvector_begin_pos - (int)out.size());
102 ctx.subvector_begin_pos = -1;
103 }
104
105 uint32_t encoding = (0b1011 << 28);
106 encoding |= opcode << 23;
107 encoding |=
108 !instr->definitions.empty() && !(instr->definitions[0].physReg() == scc) ?
109 instr->definitions[0].physReg() << 16 :
110 !instr->operands.empty() && instr->operands[0].physReg() <= 127 ?
111 instr->operands[0].physReg() << 16 : 0;
112 encoding |= sopk->imm;
113 out.push_back(encoding);
114 break;
115 }
116 case Format::SOP1: {
117 uint32_t encoding = (0b101111101 << 23);
118 if (opcode >= 55 && ctx.chip_class <= GFX9) {
119 assert(ctx.chip_class == GFX9 && opcode < 60);
120 opcode = opcode - 4;
121 }
122 encoding |= !instr->definitions.empty() ? instr->definitions[0].physReg() << 16 : 0;
123 encoding |= opcode << 8;
124 encoding |= !instr->operands.empty() ? instr->operands[0].physReg() : 0;
125 out.push_back(encoding);
126 break;
127 }
128 case Format::SOPC: {
129 uint32_t encoding = (0b101111110 << 23);
130 encoding |= opcode << 16;
131 encoding |= instr->operands.size() == 2 ? instr->operands[1].physReg() << 8 : 0;
132 encoding |= !instr->operands.empty() ? instr->operands[0].physReg() : 0;
133 out.push_back(encoding);
134 break;
135 }
136 case Format::SOPP: {
137 SOPP_instruction* sopp = static_cast<SOPP_instruction*>(instr);
138 uint32_t encoding = (0b101111111 << 23);
139 encoding |= opcode << 16;
140 encoding |= (uint16_t) sopp->imm;
141 if (sopp->block != -1)
142 ctx.branches.emplace_back(out.size(), sopp);
143 out.push_back(encoding);
144 break;
145 }
146 case Format::SMEM: {
147 SMEM_instruction* smem = static_cast<SMEM_instruction*>(instr);
148 bool soe = instr->operands.size() >= (!instr->definitions.empty() ? 3 : 4);
149 bool is_load = !instr->definitions.empty();
150 uint32_t encoding = 0;
151
152 if (ctx.chip_class <= GFX7) {
153 encoding = (0b11000 << 27);
154 encoding |= opcode << 22;
155 encoding |= instr->definitions.size() ? instr->definitions[0].physReg() << 15 : 0;
156 encoding |= instr->operands.size() ? (instr->operands[0].physReg() >> 1) << 9 : 0;
157 if (instr->operands.size() >= 2) {
158 if (!instr->operands[1].isConstant() || instr->operands[1].constantValue() >= 1024) {
159 encoding |= instr->operands[1].physReg().reg();
160 } else {
161 encoding |= instr->operands[1].constantValue() >> 2;
162 encoding |= 1 << 8;
163 }
164 }
165 out.push_back(encoding);
166 /* SMRD instructions can take a literal on GFX6 & GFX7 */
167 if (instr->operands.size() >= 2 && instr->operands[1].isConstant() && instr->operands[1].constantValue() >= 1024)
168 out.push_back(instr->operands[1].constantValue() >> 2);
169 return;
170 }
171
172 if (ctx.chip_class <= GFX9) {
173 encoding = (0b110000 << 26);
174 assert(!smem->dlc); /* Device-level coherent is not supported on GFX9 and lower */
175 encoding |= smem->nv ? 1 << 15 : 0;
176 } else {
177 encoding = (0b111101 << 26);
178 assert(!smem->nv); /* Non-volatile is not supported on GFX10 */
179 encoding |= smem->dlc ? 1 << 14 : 0;
180 }
181
182 encoding |= opcode << 18;
183 encoding |= smem->glc ? 1 << 16 : 0;
184
185 if (ctx.chip_class <= GFX9) {
186 if (instr->operands.size() >= 2)
187 encoding |= instr->operands[1].isConstant() ? 1 << 17 : 0; /* IMM - immediate enable */
188 }
189 if (ctx.chip_class == GFX9) {
190 encoding |= soe ? 1 << 14 : 0;
191 }
192
193 if (is_load || instr->operands.size() >= 3) { /* SDATA */
194 encoding |= (is_load ? instr->definitions[0].physReg() : instr->operands[2].physReg()) << 6;
195 }
196 if (instr->operands.size() >= 1) { /* SBASE */
197 encoding |= instr->operands[0].physReg() >> 1;
198 }
199
200 out.push_back(encoding);
201 encoding = 0;
202
203 int32_t offset = 0;
204 uint32_t soffset = ctx.chip_class >= GFX10
205 ? sgpr_null /* On GFX10 this is disabled by specifying SGPR_NULL */
206 : 0; /* On GFX9, it is disabled by the SOE bit (and it's not present on GFX8 and below) */
207 if (instr->operands.size() >= 2) {
208 const Operand &op_off1 = instr->operands[1];
209 if (ctx.chip_class <= GFX9) {
210 offset = op_off1.isConstant() ? op_off1.constantValue() : op_off1.physReg();
211 } else {
212 /* GFX10 only supports constants in OFFSET, so put the operand in SOFFSET if it's an SGPR */
213 if (op_off1.isConstant()) {
214 offset = op_off1.constantValue();
215 } else {
216 soffset = op_off1.physReg();
217 assert(!soe); /* There is no place to put the other SGPR offset, if any */
218 }
219 }
220
221 if (soe) {
222 const Operand &op_off2 = instr->operands.back();
223 assert(ctx.chip_class >= GFX9); /* GFX8 and below don't support specifying a constant and an SGPR at the same time */
224 assert(!op_off2.isConstant());
225 soffset = op_off2.physReg();
226 }
227 }
228 encoding |= offset;
229 encoding |= soffset << 25;
230
231 out.push_back(encoding);
232 return;
233 }
234 case Format::VOP2: {
235 uint32_t encoding = 0;
236 encoding |= opcode << 25;
237 encoding |= (0xFF & instr->definitions[0].physReg()) << 17;
238 encoding |= (0xFF & instr->operands[1].physReg()) << 9;
239 encoding |= instr->operands[0].physReg();
240 out.push_back(encoding);
241 break;
242 }
243 case Format::VOP1: {
244 uint32_t encoding = (0b0111111 << 25);
245 if (!instr->definitions.empty())
246 encoding |= (0xFF & instr->definitions[0].physReg()) << 17;
247 encoding |= opcode << 9;
248 if (!instr->operands.empty())
249 encoding |= instr->operands[0].physReg();
250 out.push_back(encoding);
251 break;
252 }
253 case Format::VOPC: {
254 uint32_t encoding = (0b0111110 << 25);
255 encoding |= opcode << 17;
256 encoding |= (0xFF & instr->operands[1].physReg()) << 9;
257 encoding |= instr->operands[0].physReg();
258 out.push_back(encoding);
259 break;
260 }
261 case Format::VINTRP: {
262 Interp_instruction* interp = static_cast<Interp_instruction*>(instr);
263 uint32_t encoding = 0;
264
265 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
266 encoding = (0b110101 << 26); /* Vega ISA doc says 110010 but it's wrong */
267 } else {
268 encoding = (0b110010 << 26);
269 }
270
271 assert(encoding);
272 encoding |= (0xFF & instr->definitions[0].physReg()) << 18;
273 encoding |= opcode << 16;
274 encoding |= interp->attribute << 10;
275 encoding |= interp->component << 8;
276 if (instr->opcode == aco_opcode::v_interp_mov_f32)
277 encoding |= (0x3 & instr->operands[0].constantValue());
278 else
279 encoding |= (0xFF & instr->operands[0].physReg());
280 out.push_back(encoding);
281 break;
282 }
283 case Format::DS: {
284 DS_instruction* ds = static_cast<DS_instruction*>(instr);
285 uint32_t encoding = (0b110110 << 26);
286 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
287 encoding |= opcode << 17;
288 encoding |= (ds->gds ? 1 : 0) << 16;
289 } else {
290 encoding |= opcode << 18;
291 encoding |= (ds->gds ? 1 : 0) << 17;
292 }
293 encoding |= ((0xFF & ds->offset1) << 8);
294 encoding |= (0xFFFF & ds->offset0);
295 out.push_back(encoding);
296 encoding = 0;
297 unsigned reg = !instr->definitions.empty() ? instr->definitions[0].physReg() : 0;
298 encoding |= (0xFF & reg) << 24;
299 reg = instr->operands.size() >= 3 && !(instr->operands[2].physReg() == m0) ? instr->operands[2].physReg() : 0;
300 encoding |= (0xFF & reg) << 16;
301 reg = instr->operands.size() >= 2 && !(instr->operands[1].physReg() == m0) ? instr->operands[1].physReg() : 0;
302 encoding |= (0xFF & reg) << 8;
303 encoding |= (0xFF & instr->operands[0].physReg());
304 out.push_back(encoding);
305 break;
306 }
307 case Format::MUBUF: {
308 MUBUF_instruction* mubuf = static_cast<MUBUF_instruction*>(instr);
309 uint32_t encoding = (0b111000 << 26);
310 encoding |= opcode << 18;
311 encoding |= (mubuf->lds ? 1 : 0) << 16;
312 encoding |= (mubuf->glc ? 1 : 0) << 14;
313 encoding |= (mubuf->idxen ? 1 : 0) << 13;
314 assert(!mubuf->addr64 || ctx.chip_class <= GFX7);
315 if (ctx.chip_class == GFX6 || ctx.chip_class == GFX7)
316 encoding |= (mubuf->addr64 ? 1 : 0) << 15;
317 encoding |= (mubuf->offen ? 1 : 0) << 12;
318 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
319 assert(!mubuf->dlc); /* Device-level coherent is not supported on GFX9 and lower */
320 encoding |= (mubuf->slc ? 1 : 0) << 17;
321 } else if (ctx.chip_class >= GFX10) {
322 encoding |= (mubuf->dlc ? 1 : 0) << 15;
323 }
324 encoding |= 0x0FFF & mubuf->offset;
325 out.push_back(encoding);
326 encoding = 0;
327 if (ctx.chip_class <= GFX7 || ctx.chip_class >= GFX10) {
328 encoding |= (mubuf->slc ? 1 : 0) << 22;
329 }
330 encoding |= instr->operands[2].physReg() << 24;
331 encoding |= (mubuf->tfe ? 1 : 0) << 23;
332 encoding |= (instr->operands[0].physReg() >> 2) << 16;
333 unsigned reg = instr->operands.size() > 3 ? instr->operands[3].physReg() : instr->definitions[0].physReg();
334 encoding |= (0xFF & reg) << 8;
335 encoding |= (0xFF & instr->operands[1].physReg());
336 out.push_back(encoding);
337 break;
338 }
339 case Format::MTBUF: {
340 MTBUF_instruction* mtbuf = static_cast<MTBUF_instruction*>(instr);
341
342 uint32_t img_format = ac_get_tbuffer_format(ctx.chip_class, mtbuf->dfmt, mtbuf->nfmt);
343 uint32_t encoding = (0b111010 << 26);
344 assert(img_format <= 0x7F);
345 assert(!mtbuf->dlc || ctx.chip_class >= GFX10);
346 encoding |= (mtbuf->dlc ? 1 : 0) << 15; /* DLC bit replaces one bit of the OPCODE on GFX10 */
347 encoding |= (mtbuf->glc ? 1 : 0) << 14;
348 encoding |= (mtbuf->idxen ? 1 : 0) << 13;
349 encoding |= (mtbuf->offen ? 1 : 0) << 12;
350 encoding |= 0x0FFF & mtbuf->offset;
351 encoding |= (img_format << 19); /* Handles both the GFX10 FORMAT and the old NFMT+DFMT */
352
353 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
354 encoding |= opcode << 15;
355 } else {
356 encoding |= (opcode & 0x07) << 16; /* 3 LSBs of 4-bit OPCODE */
357 }
358
359 out.push_back(encoding);
360 encoding = 0;
361
362 encoding |= instr->operands[2].physReg() << 24;
363 encoding |= (mtbuf->tfe ? 1 : 0) << 23;
364 encoding |= (mtbuf->slc ? 1 : 0) << 22;
365 encoding |= (instr->operands[0].physReg() >> 2) << 16;
366 unsigned reg = instr->operands.size() > 3 ? instr->operands[3].physReg() : instr->definitions[0].physReg();
367 encoding |= (0xFF & reg) << 8;
368 encoding |= (0xFF & instr->operands[1].physReg());
369
370 if (ctx.chip_class >= GFX10) {
371 encoding |= (((opcode & 0x08) >> 4) << 21); /* MSB of 4-bit OPCODE */
372 }
373
374 out.push_back(encoding);
375 break;
376 }
377 case Format::MIMG: {
378 MIMG_instruction* mimg = static_cast<MIMG_instruction*>(instr);
379 uint32_t encoding = (0b111100 << 26);
380 encoding |= mimg->slc ? 1 << 25 : 0;
381 encoding |= opcode << 18;
382 encoding |= mimg->lwe ? 1 << 17 : 0;
383 encoding |= mimg->tfe ? 1 << 16 : 0;
384 encoding |= mimg->glc ? 1 << 13 : 0;
385 encoding |= mimg->unrm ? 1 << 12 : 0;
386 if (ctx.chip_class <= GFX9) {
387 assert(!mimg->dlc); /* Device-level coherent is not supported on GFX9 and lower */
388 assert(!mimg->r128);
389 encoding |= mimg->a16 ? 1 << 15 : 0;
390 encoding |= mimg->da ? 1 << 14 : 0;
391 } else {
392 encoding |= mimg->r128 ? 1 << 15 : 0; /* GFX10: A16 moved to 2nd word, R128 replaces it in 1st word */
393 encoding |= mimg->dim << 3; /* GFX10: dimensionality instead of declare array */
394 encoding |= mimg->dlc ? 1 << 7 : 0;
395 }
396 encoding |= (0xF & mimg->dmask) << 8;
397 out.push_back(encoding);
398 encoding = (0xFF & instr->operands[2].physReg()); /* VADDR */
399 if (!instr->definitions.empty()) {
400 encoding |= (0xFF & instr->definitions[0].physReg()) << 8; /* VDATA */
401 } else if (instr->operands[1].regClass().type() == RegType::vgpr) {
402 encoding |= (0xFF & instr->operands[1].physReg()) << 8; /* VDATA */
403 }
404 encoding |= (0x1F & (instr->operands[0].physReg() >> 2)) << 16; /* T# (resource) */
405 if (instr->operands[1].regClass().type() == RegType::sgpr)
406 encoding |= (0x1F & (instr->operands[1].physReg() >> 2)) << 21; /* sampler */
407
408 assert(!mimg->d16 || ctx.chip_class >= GFX9);
409 encoding |= mimg->d16 ? 1 << 15 : 0;
410 if (ctx.chip_class >= GFX10) {
411 encoding |= mimg->a16 ? 1 << 14 : 0; /* GFX10: A16 still exists, but is in a different place */
412 }
413
414 out.push_back(encoding);
415 break;
416 }
417 case Format::FLAT:
418 case Format::SCRATCH:
419 case Format::GLOBAL: {
420 FLAT_instruction *flat = static_cast<FLAT_instruction*>(instr);
421 uint32_t encoding = (0b110111 << 26);
422 encoding |= opcode << 18;
423 if (ctx.chip_class <= GFX9) {
424 assert(flat->offset <= 0x1fff);
425 encoding |= flat->offset & 0x1fff;
426 } else if (instr->format == Format::FLAT) {
427 /* GFX10 has a 12-bit immediate OFFSET field,
428 * but it has a hw bug: it ignores the offset, called FlatSegmentOffsetBug
429 */
430 assert(flat->offset == 0);
431 } else {
432 assert(flat->offset <= 0xfff);
433 encoding |= flat->offset & 0xfff;
434 }
435 if (instr->format == Format::SCRATCH)
436 encoding |= 1 << 14;
437 else if (instr->format == Format::GLOBAL)
438 encoding |= 2 << 14;
439 encoding |= flat->lds ? 1 << 13 : 0;
440 encoding |= flat->glc ? 1 << 16 : 0;
441 encoding |= flat->slc ? 1 << 17 : 0;
442 if (ctx.chip_class >= GFX10) {
443 assert(!flat->nv);
444 encoding |= flat->dlc ? 1 << 12 : 0;
445 } else {
446 assert(!flat->dlc);
447 }
448 out.push_back(encoding);
449 encoding = (0xFF & instr->operands[0].physReg());
450 if (!instr->definitions.empty())
451 encoding |= (0xFF & instr->definitions[0].physReg()) << 24;
452 if (instr->operands.size() >= 3)
453 encoding |= (0xFF & instr->operands[2].physReg()) << 8;
454 if (!instr->operands[1].isUndefined()) {
455 assert(ctx.chip_class >= GFX10 || instr->operands[1].physReg() != 0x7F);
456 assert(instr->format != Format::FLAT);
457 encoding |= instr->operands[1].physReg() << 16;
458 } else if (instr->format != Format::FLAT || ctx.chip_class >= GFX10) { /* SADDR is actually used with FLAT on GFX10 */
459 if (ctx.chip_class <= GFX9)
460 encoding |= 0x7F << 16;
461 else
462 encoding |= sgpr_null << 16;
463 }
464 encoding |= flat->nv ? 1 << 23 : 0;
465 out.push_back(encoding);
466 break;
467 }
468 case Format::EXP: {
469 Export_instruction* exp = static_cast<Export_instruction*>(instr);
470 uint32_t encoding;
471 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9) {
472 encoding = (0b110001 << 26);
473 } else {
474 encoding = (0b111110 << 26);
475 }
476
477 encoding |= exp->valid_mask ? 0b1 << 12 : 0;
478 encoding |= exp->done ? 0b1 << 11 : 0;
479 encoding |= exp->compressed ? 0b1 << 10 : 0;
480 encoding |= exp->dest << 4;
481 encoding |= exp->enabled_mask;
482 out.push_back(encoding);
483 encoding = 0xFF & exp->operands[0].physReg();
484 encoding |= (0xFF & exp->operands[1].physReg()) << 8;
485 encoding |= (0xFF & exp->operands[2].physReg()) << 16;
486 encoding |= (0xFF & exp->operands[3].physReg()) << 24;
487 out.push_back(encoding);
488 break;
489 }
490 case Format::PSEUDO:
491 case Format::PSEUDO_BARRIER:
492 unreachable("Pseudo instructions should be lowered before assembly.");
493 default:
494 if ((uint16_t) instr->format & (uint16_t) Format::VOP3A) {
495 VOP3A_instruction* vop3 = static_cast<VOP3A_instruction*>(instr);
496
497 if ((uint16_t) instr->format & (uint16_t) Format::VOP2) {
498 opcode = opcode + 0x100;
499 } else if ((uint16_t) instr->format & (uint16_t) Format::VOP1) {
500 if (ctx.chip_class == GFX8 || ctx.chip_class == GFX9)
501 opcode = opcode + 0x140;
502 else
503 opcode = opcode + 0x180;
504 } else if ((uint16_t) instr->format & (uint16_t) Format::VOPC) {
505 opcode = opcode + 0x0;
506 } else if ((uint16_t) instr->format & (uint16_t) Format::VINTRP) {
507 opcode = opcode + 0x270;
508 }
509
510 uint32_t encoding;
511 if (ctx.chip_class <= GFX9) {
512 encoding = (0b110100 << 26);
513 } else if (ctx.chip_class == GFX10) {
514 encoding = (0b110101 << 26);
515 } else {
516 unreachable("Unknown chip_class.");
517 }
518
519 if (ctx.chip_class <= GFX7) {
520 encoding |= opcode << 17;
521 encoding |= (vop3->clamp ? 1 : 0) << 11;
522 } else {
523 encoding |= opcode << 16;
524 encoding |= (vop3->clamp ? 1 : 0) << 15;
525 }
526 encoding |= vop3->opsel << 11;
527 for (unsigned i = 0; i < 3; i++)
528 encoding |= vop3->abs[i] << (8+i);
529 if (instr->definitions.size() == 2)
530 encoding |= instr->definitions[1].physReg() << 8;
531 encoding |= (0xFF & instr->definitions[0].physReg());
532 out.push_back(encoding);
533 encoding = 0;
534 if (instr->opcode == aco_opcode::v_interp_mov_f32) {
535 encoding = 0x3 & instr->operands[0].constantValue();
536 } else {
537 for (unsigned i = 0; i < instr->operands.size(); i++)
538 encoding |= instr->operands[i].physReg() << (i * 9);
539 }
540 encoding |= vop3->omod << 27;
541 for (unsigned i = 0; i < 3; i++)
542 encoding |= vop3->neg[i] << (29+i);
543 out.push_back(encoding);
544
545 } else if (instr->isDPP()){
546 assert(ctx.chip_class >= GFX8);
547 /* first emit the instruction without the DPP operand */
548 Operand dpp_op = instr->operands[0];
549 instr->operands[0] = Operand(PhysReg{250}, v1);
550 instr->format = (Format) ((uint32_t) instr->format & ~(1 << 14));
551 emit_instruction(ctx, out, instr);
552 DPP_instruction* dpp = static_cast<DPP_instruction*>(instr);
553 uint32_t encoding = (0xF & dpp->row_mask) << 28;
554 encoding |= (0xF & dpp->bank_mask) << 24;
555 encoding |= dpp->abs[1] << 23;
556 encoding |= dpp->neg[1] << 22;
557 encoding |= dpp->abs[0] << 21;
558 encoding |= dpp->neg[0] << 20;
559 encoding |= dpp->bound_ctrl << 19;
560 encoding |= dpp->dpp_ctrl << 8;
561 encoding |= (0xFF) & dpp_op.physReg();
562 out.push_back(encoding);
563 return;
564 } else {
565 unreachable("unimplemented instruction format");
566 }
567 break;
568 }
569
570 /* append literal dword */
571 for (const Operand& op : instr->operands) {
572 if (op.isLiteral()) {
573 out.push_back(op.constantValue());
574 break;
575 }
576 }
577 }
578
579 void emit_block(asm_context& ctx, std::vector<uint32_t>& out, Block& block)
580 {
581 for (aco_ptr<Instruction>& instr : block.instructions) {
582 #if 0
583 int start_idx = out.size();
584 std::cerr << "Encoding:\t" << std::endl;
585 aco_print_instr(&*instr, stderr);
586 std::cerr << std::endl;
587 #endif
588 emit_instruction(ctx, out, instr.get());
589 #if 0
590 for (int i = start_idx; i < out.size(); i++)
591 std::cerr << "encoding: " << "0x" << std::setfill('0') << std::setw(8) << std::hex << out[i] << std::endl;
592 #endif
593 }
594 }
595
596 void fix_exports(asm_context& ctx, std::vector<uint32_t>& out, Program* program)
597 {
598 bool exported = false;
599 for (Block& block : program->blocks) {
600 if (!(block.kind & block_kind_export_end))
601 continue;
602 std::vector<aco_ptr<Instruction>>::reverse_iterator it = block.instructions.rbegin();
603 while ( it != block.instructions.rend())
604 {
605 if ((*it)->format == Format::EXP) {
606 Export_instruction* exp = static_cast<Export_instruction*>((*it).get());
607 if (program->stage & hw_vs) {
608 if (exp->dest >= V_008DFC_SQ_EXP_POS && exp->dest <= (V_008DFC_SQ_EXP_POS + 3)) {
609 exp->done = true;
610 exported = true;
611 break;
612 }
613 } else {
614 exp->done = true;
615 exp->valid_mask = true;
616 exported = true;
617 break;
618 }
619 } else if ((*it)->definitions.size() && (*it)->definitions[0].physReg() == exec)
620 break;
621 ++it;
622 }
623 }
624
625 if (!exported) {
626 /* Abort in order to avoid a GPU hang. */
627 fprintf(stderr, "Missing export in %s shader:\n", (program->stage & hw_vs) ? "vertex" : "fragment");
628 aco_print_program(program, stderr);
629 abort();
630 }
631 }
632
633 static void fix_branches_gfx10(asm_context& ctx, std::vector<uint32_t>& out)
634 {
635 /* Branches with an offset of 0x3f are buggy on GFX10, we workaround by inserting NOPs if needed. */
636 bool gfx10_3f_bug = false;
637
638 do {
639 auto buggy_branch_it = std::find_if(ctx.branches.begin(), ctx.branches.end(), [&ctx](const auto &branch) -> bool {
640 return ((int)ctx.program->blocks[branch.second->block].offset - branch.first - 1) == 0x3f;
641 });
642
643 gfx10_3f_bug = buggy_branch_it != ctx.branches.end();
644
645 if (gfx10_3f_bug) {
646 /* Insert an s_nop after the branch */
647 constexpr uint32_t s_nop_0 = 0xbf800000u;
648 int s_nop_pos = buggy_branch_it->first + 1;
649 auto out_pos = std::next(out.begin(), s_nop_pos);
650 out.insert(out_pos, s_nop_0);
651
652 /* Update the offset of each affected block */
653 for (Block& block : ctx.program->blocks) {
654 if (block.offset > (unsigned)buggy_branch_it->first)
655 block.offset++;
656 }
657
658 /* Update the branches following the current one */
659 for (auto branch_it = std::next(buggy_branch_it); branch_it != ctx.branches.end(); ++branch_it)
660 branch_it->first++;
661
662 /* Find first constant address after the inserted instruction */
663 auto caddr_it = std::find_if(ctx.constaddrs.begin(), ctx.constaddrs.end(), [s_nop_pos](const int &caddr_pos) -> bool {
664 return caddr_pos >= s_nop_pos;
665 });
666
667 /* Update the locations of constant addresses */
668 for (; caddr_it != ctx.constaddrs.end(); ++caddr_it)
669 (*caddr_it)++;
670
671 }
672 } while (gfx10_3f_bug);
673 }
674
675 void fix_branches(asm_context& ctx, std::vector<uint32_t>& out)
676 {
677 if (ctx.chip_class >= GFX10)
678 fix_branches_gfx10(ctx, out);
679
680 for (std::pair<int, SOPP_instruction*> &branch : ctx.branches) {
681 int offset = (int)ctx.program->blocks[branch.second->block].offset - branch.first - 1;
682 out[branch.first] |= (uint16_t) offset;
683 }
684 }
685
686 void fix_constaddrs(asm_context& ctx, std::vector<uint32_t>& out)
687 {
688 for (unsigned addr : ctx.constaddrs)
689 out[addr] += out.size() * 4u;
690 }
691
692 unsigned emit_program(Program* program,
693 std::vector<uint32_t>& code)
694 {
695 asm_context ctx(program);
696
697 if (program->stage & (hw_vs | hw_fs))
698 fix_exports(ctx, code, program);
699
700 for (Block& block : program->blocks) {
701 block.offset = code.size();
702 emit_block(ctx, code, block);
703 }
704
705 fix_branches(ctx, code);
706
707 unsigned exec_size = code.size() * sizeof(uint32_t);
708
709 if (program->chip_class >= GFX10) {
710 /* Pad output with s_code_end so instruction prefetching doesn't cause
711 * page faults */
712 unsigned final_size = align(code.size() + 3 * 16, 16);
713 while (code.size() < final_size)
714 code.push_back(0xbf9f0000u);
715 }
716
717 fix_constaddrs(ctx, code);
718
719 while (program->constant_data.size() % 4u)
720 program->constant_data.push_back(0);
721 /* Copy constant data */
722 code.insert(code.end(), (uint32_t*)program->constant_data.data(),
723 (uint32_t*)(program->constant_data.data() + program->constant_data.size()));
724
725 return exec_size;
726 }
727
728 }