aco: use DPP instead of exec modification when lowering GFX10 shuffles
[mesa.git] / src / amd / compiler / aco_lower_to_hw_instr.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
25 *
26 */
27
28 #include <map>
29
30 #include "aco_ir.h"
31 #include "aco_builder.h"
32 #include "util/u_math.h"
33 #include "sid.h"
34 #include "vulkan/radv_shader.h"
35
36
37 namespace aco {
38
39 struct lower_context {
40 Program *program;
41 std::vector<aco_ptr<Instruction>> instructions;
42 };
43
44 void emit_dpp_op(lower_context *ctx, PhysReg dst, PhysReg src0, PhysReg src1, PhysReg vtmp,
45 aco_opcode op, Format format, bool clobber_vcc, unsigned dpp_ctrl,
46 unsigned row_mask, unsigned bank_mask, bool bound_ctrl_zero, unsigned size,
47 Operand *identity=NULL) /* for VOP3 with sparse writes */
48 {
49 RegClass rc = RegClass(RegType::vgpr, size);
50 if (format == Format::VOP3) {
51 Builder bld(ctx->program, &ctx->instructions);
52
53 if (identity)
54 bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), identity[0]);
55 if (identity && size >= 2)
56 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), identity[1]);
57
58 for (unsigned i = 0; i < size; i++)
59 bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{src0+i}, v1),
60 dpp_ctrl, row_mask, bank_mask, bound_ctrl_zero);
61
62 if (clobber_vcc)
63 bld.vop3(op, Definition(dst, rc), Definition(vcc, s2), Operand(vtmp, rc), Operand(src1, rc));
64 else
65 bld.vop3(op, Definition(dst, rc), Operand(vtmp, rc), Operand(src1, rc));
66 } else {
67 assert(format == Format::VOP2 || format == Format::VOP1);
68 assert(size == 1 || (op == aco_opcode::v_mov_b32));
69
70 for (unsigned i = 0; i < size; i++) {
71 aco_ptr<DPP_instruction> dpp{create_instruction<DPP_instruction>(
72 op, (Format) ((uint32_t) format | (uint32_t) Format::DPP),
73 format == Format::VOP2 ? 2 : 1, clobber_vcc ? 2 : 1)};
74 dpp->operands[0] = Operand(PhysReg{src0+i}, rc);
75 if (format == Format::VOP2)
76 dpp->operands[1] = Operand(PhysReg{src1+i}, rc);
77 dpp->definitions[0] = Definition(PhysReg{dst+i}, rc);
78 if (clobber_vcc)
79 dpp->definitions[1] = Definition(vcc, s2);
80 dpp->dpp_ctrl = dpp_ctrl;
81 dpp->row_mask = row_mask;
82 dpp->bank_mask = bank_mask;
83 dpp->bound_ctrl = bound_ctrl_zero;
84 ctx->instructions.emplace_back(std::move(dpp));
85 }
86 }
87 }
88
89 void emit_op(lower_context *ctx, PhysReg dst, PhysReg src0, PhysReg src1,
90 aco_opcode op, Format format, bool clobber_vcc, unsigned size)
91 {
92 aco_ptr<Instruction> instr;
93 if (format == Format::VOP3)
94 instr.reset(create_instruction<VOP3A_instruction>(op, format, 2, clobber_vcc ? 2 : 1));
95 else
96 instr.reset(create_instruction<VOP2_instruction>(op, format, 2, clobber_vcc ? 2 : 1));
97 instr->operands[0] = Operand(src0, src0.reg >= 256 ? v1 : s1);
98 instr->operands[1] = Operand(src1, v1);
99 instr->definitions[0] = Definition(dst, v1);
100 if (clobber_vcc)
101 instr->definitions[1] = Definition(vcc, s2);
102 ctx->instructions.emplace_back(std::move(instr));
103 }
104
105 uint32_t get_reduction_identity(ReduceOp op, unsigned idx)
106 {
107 switch (op) {
108 case iadd32:
109 case iadd64:
110 case fadd32:
111 case fadd64:
112 case ior32:
113 case ior64:
114 case ixor32:
115 case ixor64:
116 case umax32:
117 case umax64:
118 return 0;
119 case imul32:
120 case imul64:
121 return idx ? 0 : 1;
122 case fmul32:
123 return 0x3f800000u; /* 1.0 */
124 case fmul64:
125 return idx ? 0x3ff00000u : 0u; /* 1.0 */
126 case imin32:
127 return INT32_MAX;
128 case imin64:
129 return idx ? 0x7fffffffu : 0xffffffffu;
130 case imax32:
131 return INT32_MIN;
132 case imax64:
133 return idx ? 0x80000000u : 0;
134 case umin32:
135 case umin64:
136 case iand32:
137 case iand64:
138 return 0xffffffffu;
139 case fmin32:
140 return 0x7f800000u; /* infinity */
141 case fmin64:
142 return idx ? 0x7ff00000u : 0u; /* infinity */
143 case fmax32:
144 return 0xff800000u; /* negative infinity */
145 case fmax64:
146 return idx ? 0xfff00000u : 0u; /* negative infinity */
147 default:
148 unreachable("Invalid reduction operation");
149 break;
150 }
151 return 0;
152 }
153
154 aco_opcode get_reduction_opcode(lower_context *ctx, ReduceOp op, bool *clobber_vcc, Format *format)
155 {
156 *clobber_vcc = false;
157 *format = Format::VOP2;
158 switch (op) {
159 case iadd32:
160 *clobber_vcc = ctx->program->chip_class < GFX9;
161 return ctx->program->chip_class < GFX9 ? aco_opcode::v_add_co_u32 : aco_opcode::v_add_u32;
162 case imul32:
163 *format = Format::VOP3;
164 return aco_opcode::v_mul_lo_u32;
165 case fadd32:
166 return aco_opcode::v_add_f32;
167 case fmul32:
168 return aco_opcode::v_mul_f32;
169 case imax32:
170 return aco_opcode::v_max_i32;
171 case imin32:
172 return aco_opcode::v_min_i32;
173 case umin32:
174 return aco_opcode::v_min_u32;
175 case umax32:
176 return aco_opcode::v_max_u32;
177 case fmin32:
178 return aco_opcode::v_min_f32;
179 case fmax32:
180 return aco_opcode::v_max_f32;
181 case iand32:
182 return aco_opcode::v_and_b32;
183 case ixor32:
184 return aco_opcode::v_xor_b32;
185 case ior32:
186 return aco_opcode::v_or_b32;
187 case iadd64:
188 case imul64:
189 assert(false);
190 break;
191 case fadd64:
192 *format = Format::VOP3;
193 return aco_opcode::v_add_f64;
194 case fmul64:
195 *format = Format::VOP3;
196 return aco_opcode::v_mul_f64;
197 case imin64:
198 case imax64:
199 case umin64:
200 case umax64:
201 assert(false);
202 break;
203 case fmin64:
204 *format = Format::VOP3;
205 return aco_opcode::v_min_f64;
206 case fmax64:
207 *format = Format::VOP3;
208 return aco_opcode::v_max_f64;
209 case iand64:
210 case ior64:
211 case ixor64:
212 assert(false);
213 break;
214 default:
215 unreachable("Invalid reduction operation");
216 break;
217 }
218 return aco_opcode::v_min_u32;
219 }
220
221 void emit_vopn(lower_context *ctx, PhysReg dst, PhysReg src0, PhysReg src1,
222 RegClass rc, aco_opcode op, Format format, bool clobber_vcc)
223 {
224 aco_ptr<Instruction> instr;
225 switch (format) {
226 case Format::VOP2:
227 instr.reset(create_instruction<VOP2_instruction>(op, format, 2, clobber_vcc ? 2 : 1));
228 break;
229 case Format::VOP3:
230 instr.reset(create_instruction<VOP3A_instruction>(op, format, 2, clobber_vcc ? 2 : 1));
231 break;
232 default:
233 assert(false);
234 }
235 instr->operands[0] = Operand(src0, rc);
236 instr->operands[1] = Operand(src1, rc);
237 instr->definitions[0] = Definition(dst, rc);
238 if (clobber_vcc)
239 instr->definitions[1] = Definition(vcc, s2);
240 ctx->instructions.emplace_back(std::move(instr));
241 }
242
243 void emit_reduction(lower_context *ctx, aco_opcode op, ReduceOp reduce_op, unsigned cluster_size, PhysReg tmp,
244 PhysReg stmp, PhysReg vtmp, PhysReg sitmp, Operand src, Definition dst)
245 {
246 assert(cluster_size == 64 || op == aco_opcode::p_reduce);
247
248 Builder bld(ctx->program, &ctx->instructions);
249
250 Format format;
251 bool should_clobber_vcc;
252 aco_opcode reduce_opcode = get_reduction_opcode(ctx, reduce_op, &should_clobber_vcc, &format);
253 Operand identity[2];
254 identity[0] = Operand(get_reduction_identity(reduce_op, 0));
255 identity[1] = Operand(get_reduction_identity(reduce_op, 1));
256 Operand vcndmask_identity[2] = {identity[0], identity[1]};
257
258 /* First, copy the source to tmp and set inactive lanes to the identity */
259 bld.sop1(aco_opcode::s_or_saveexec_b64, Definition(stmp, s2), Definition(scc, s1), Definition(exec, s2), Operand(UINT64_MAX), Operand(exec, s2));
260
261 for (unsigned i = 0; i < src.size(); i++) {
262 /* p_exclusive_scan needs it to be a sgpr or inline constant for the v_writelane_b32
263 * except on GFX10, where v_writelane_b32 can take a literal. */
264 if (identity[i].isLiteral() && op == aco_opcode::p_exclusive_scan && ctx->program->chip_class < GFX10) {
265 bld.sop1(aco_opcode::s_mov_b32, Definition(PhysReg{sitmp+i}, s1), identity[i]);
266 identity[i] = Operand(PhysReg{sitmp+i}, s1);
267
268 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp+i}, v1), identity[i]);
269 vcndmask_identity[i] = Operand(PhysReg{tmp+i}, v1);
270 } else if (identity[i].isLiteral()) {
271 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp+i}, v1), identity[i]);
272 vcndmask_identity[i] = Operand(PhysReg{tmp+i}, v1);
273 }
274 }
275
276 for (unsigned i = 0; i < src.size(); i++) {
277 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(PhysReg{tmp + i}, v1),
278 vcndmask_identity[i], Operand(PhysReg{src.physReg() + i}, v1),
279 Operand(stmp, s2));
280 }
281
282 bool exec_restored = false;
283 bool dst_written = false;
284 switch (op) {
285 case aco_opcode::p_reduce:
286 if (cluster_size == 1) break;
287 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_opcode, format, should_clobber_vcc,
288 dpp_quad_perm(1, 0, 3, 2), 0xf, 0xf, false, src.size());
289 if (cluster_size == 2) break;
290 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_opcode, format, should_clobber_vcc,
291 dpp_quad_perm(2, 3, 0, 1), 0xf, 0xf, false, src.size());
292 if (cluster_size == 4) break;
293 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_opcode, format, should_clobber_vcc,
294 dpp_row_half_mirror, 0xf, 0xf, false, src.size());
295 if (cluster_size == 8) break;
296 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_opcode, format, should_clobber_vcc,
297 dpp_row_mirror, 0xf, 0xf, false, src.size());
298 if (cluster_size == 16) break;
299 if (cluster_size == 32) {
300 for (unsigned i = 0; i < src.size(); i++)
301 bld.ds(aco_opcode::ds_swizzle_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, s1), ds_pattern_bitmode(0x1f, 0, 0x10));
302 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(stmp, s2));
303 exec_restored = true;
304 emit_vopn(ctx, dst.physReg(), vtmp, tmp, src.regClass(), reduce_opcode, format, should_clobber_vcc);
305 dst_written = true;
306 } else if (ctx->program->chip_class >= GFX10) {
307 assert(cluster_size == 64);
308 /* GFX10+ doesn't support row_bcast15 and row_bcast31 */
309 for (unsigned i = 0; i < src.size(); i++)
310 bld.vop3(aco_opcode::v_permlanex16_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1), Operand(0u), Operand(0u));
311 emit_op(ctx, tmp, tmp, vtmp, reduce_opcode, format, should_clobber_vcc, src.size());
312
313 for (unsigned i = 0; i < src.size(); i++)
314 bld.vop3(aco_opcode::v_readlane_b32, Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
315 emit_op(ctx, tmp, sitmp, tmp, reduce_opcode, format, should_clobber_vcc, src.size());
316 } else {
317 assert(cluster_size == 64);
318 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_opcode, format, should_clobber_vcc,
319 dpp_row_bcast15, 0xa, 0xf, false, src.size());
320 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_opcode, format, should_clobber_vcc,
321 dpp_row_bcast31, 0xc, 0xf, false, src.size());
322 }
323 break;
324 case aco_opcode::p_exclusive_scan:
325 if (ctx->program->chip_class >= GFX10) { /* gfx10 doesn't support wf_sr1, so emulate it */
326 /* shift rows right */
327 for (unsigned i = 0; i < src.size(); i++) {
328 bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, s1), dpp_row_sr(1), 0xf, 0xf, true);
329 }
330
331 /* fill in the gaps in rows 1 and 3 */
332 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0x10000u));
333 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(0x10000u));
334 for (unsigned i = 0; i < src.size(); i++) {
335 Instruction *perm = bld.vop3(aco_opcode::v_permlanex16_b32,
336 Definition(PhysReg{vtmp+i}, v1),
337 Operand(PhysReg{tmp+i}, v1),
338 Operand(0xffffffffu), Operand(0xffffffffu)).instr;
339 static_cast<VOP3A_instruction*>(perm)->opsel[0] = true; /* FI (Fetch Inactive) */
340 }
341 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
342
343 /* fill in the gap in row 2 */
344 for (unsigned i = 0; i < src.size(); i++) {
345 bld.vop3(aco_opcode::v_readlane_b32, Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
346 bld.vop3(aco_opcode::v_writelane_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{sitmp+i}, s1), Operand(32u));
347 }
348 std::swap(tmp, vtmp);
349 } else {
350 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, aco_opcode::v_mov_b32, Format::VOP1, false,
351 dpp_wf_sr1, 0xf, 0xf, true, src.size());
352 }
353 for (unsigned i = 0; i < src.size(); i++) {
354 if (!identity[i].isConstant() || identity[i].constantValue()) { /* bound_ctrl should take case of this overwise */
355 if (ctx->program->chip_class < GFX10)
356 assert((identity[i].isConstant() && !identity[i].isLiteral()) || identity[i].physReg() == PhysReg{sitmp+i});
357 bld.vop3(aco_opcode::v_writelane_b32, Definition(PhysReg{tmp+i}, v1),
358 identity[i], Operand(0u));
359 }
360 }
361 /* fall through */
362 case aco_opcode::p_inclusive_scan:
363 assert(cluster_size == 64);
364 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_opcode, format, should_clobber_vcc,
365 dpp_row_sr(1), 0xf, 0xf, false, src.size(), identity);
366 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_opcode, format, should_clobber_vcc,
367 dpp_row_sr(2), 0xf, 0xf, false, src.size(), identity);
368 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_opcode, format, should_clobber_vcc,
369 dpp_row_sr(4), 0xf, 0xf, false, src.size(), identity);
370 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_opcode, format, should_clobber_vcc,
371 dpp_row_sr(8), 0xf, 0xf, false, src.size(), identity);
372 if (ctx->program->chip_class >= GFX10) {
373 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0xffff0000u));
374 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(0xffff0000u));
375 for (unsigned i = 0; i < src.size(); i++) {
376 Instruction *perm = bld.vop3(aco_opcode::v_permlanex16_b32,
377 Definition(PhysReg{vtmp+i}, v1),
378 Operand(PhysReg{tmp+i}, v1),
379 Operand(0xffffffffu), Operand(0xffffffffu)).instr;
380 static_cast<VOP3A_instruction*>(perm)->opsel[0] = true; /* FI (Fetch Inactive) */
381 }
382 emit_op(ctx, tmp, tmp, vtmp, reduce_opcode, format, should_clobber_vcc, src.size());
383
384 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0u));
385 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(0xffffffffu));
386 for (unsigned i = 0; i < src.size(); i++)
387 bld.vop3(aco_opcode::v_readlane_b32, Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
388 emit_op(ctx, tmp, sitmp, tmp, reduce_opcode, format, should_clobber_vcc, src.size());
389 } else {
390 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_opcode, format, should_clobber_vcc,
391 dpp_row_bcast15, 0xa, 0xf, false, src.size(), identity);
392 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_opcode, format, should_clobber_vcc,
393 dpp_row_bcast31, 0xc, 0xf, false, src.size(), identity);
394 }
395 break;
396 default:
397 unreachable("Invalid reduction mode");
398 }
399
400 if (!exec_restored)
401 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(stmp, s2));
402
403 if (op == aco_opcode::p_reduce && cluster_size == 64) {
404 for (unsigned k = 0; k < src.size(); k++) {
405 bld.vop3(aco_opcode::v_readlane_b32, Definition(PhysReg{dst.physReg() + k}, s1),
406 Operand(PhysReg{tmp + k}, v1), Operand(63u));
407 }
408 } else if (!(dst.physReg() == tmp) && !dst_written) {
409 for (unsigned k = 0; k < src.size(); k++) {
410 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{dst.physReg() + k}, s1),
411 Operand(PhysReg{tmp + k}, v1));
412 }
413 }
414 }
415
416 struct copy_operation {
417 Operand op;
418 Definition def;
419 unsigned uses;
420 unsigned size;
421 };
422
423 void handle_operands(std::map<PhysReg, copy_operation>& copy_map, lower_context* ctx, chip_class chip_class, Pseudo_instruction *pi)
424 {
425 Builder bld(ctx->program, &ctx->instructions);
426 aco_ptr<Instruction> mov;
427 std::map<PhysReg, copy_operation>::iterator it = copy_map.begin();
428 std::map<PhysReg, copy_operation>::iterator target;
429 bool writes_scc = false;
430
431 /* count the number of uses for each dst reg */
432 while (it != copy_map.end()) {
433 if (it->second.op.isConstant()) {
434 ++it;
435 continue;
436 }
437
438 if (it->second.def.physReg() == scc)
439 writes_scc = true;
440
441 assert(!pi->tmp_in_scc || !(it->second.def.physReg() == pi->scratch_sgpr));
442
443 /* if src and dst reg are the same, remove operation */
444 if (it->first == it->second.op.physReg()) {
445 it = copy_map.erase(it);
446 continue;
447 }
448 /* check if the operand reg may be overwritten by another copy operation */
449 target = copy_map.find(it->second.op.physReg());
450 if (target != copy_map.end()) {
451 target->second.uses++;
452 }
453
454 ++it;
455 }
456
457 /* first, handle paths in the location transfer graph */
458 bool preserve_scc = pi->tmp_in_scc && !writes_scc;
459 it = copy_map.begin();
460 while (it != copy_map.end()) {
461
462 /* the target reg is not used as operand for any other copy */
463 if (it->second.uses == 0) {
464
465 /* try to coalesce 32-bit sgpr copies to 64-bit copies */
466 if (it->second.def.getTemp().type() == RegType::sgpr && it->second.size == 1 &&
467 !it->second.op.isConstant() && it->first % 2 == it->second.op.physReg() % 2) {
468
469 PhysReg other_def_reg = PhysReg{it->first % 2 ? it->first - 1 : it->first + 1};
470 PhysReg other_op_reg = PhysReg{it->first % 2 ? it->second.op.physReg() - 1 : it->second.op.physReg() + 1};
471 std::map<PhysReg, copy_operation>::iterator other = copy_map.find(other_def_reg);
472
473 if (other != copy_map.end() && !other->second.uses && other->second.size == 1 &&
474 other->second.op.physReg() == other_op_reg && !other->second.op.isConstant()) {
475 std::map<PhysReg, copy_operation>::iterator to_erase = it->first % 2 ? it : other;
476 it = it->first % 2 ? other : it;
477 copy_map.erase(to_erase);
478 it->second.size = 2;
479 }
480 }
481
482 if (it->second.def.physReg() == scc) {
483 bld.sopc(aco_opcode::s_cmp_lg_i32, it->second.def, it->second.op, Operand(0u));
484 preserve_scc = true;
485 } else if (it->second.size == 2 && it->second.def.getTemp().type() == RegType::sgpr) {
486 bld.sop1(aco_opcode::s_mov_b64, it->second.def, Operand(it->second.op.physReg(), s2));
487 } else {
488 bld.copy(it->second.def, it->second.op);
489 }
490
491 /* reduce the number of uses of the operand reg by one */
492 if (!it->second.op.isConstant()) {
493 for (unsigned i = 0; i < it->second.size; i++) {
494 target = copy_map.find(PhysReg{it->second.op.physReg() + i});
495 if (target != copy_map.end())
496 target->second.uses--;
497 }
498 }
499
500 copy_map.erase(it);
501 it = copy_map.begin();
502 continue;
503 } else {
504 /* the target reg is used as operand, check the next entry */
505 ++it;
506 }
507 }
508
509 if (copy_map.empty())
510 return;
511
512 /* all target regs are needed as operand somewhere which means, all entries are part of a cycle */
513 bool constants = false;
514 for (it = copy_map.begin(); it != copy_map.end(); ++it) {
515 assert(it->second.op.isFixed());
516 if (it->first == it->second.op.physReg())
517 continue;
518 /* do constants later */
519 if (it->second.op.isConstant()) {
520 constants = true;
521 continue;
522 }
523
524 if (preserve_scc && it->second.def.getTemp().type() == RegType::sgpr)
525 assert(!(it->second.def.physReg() == pi->scratch_sgpr));
526
527 /* to resolve the cycle, we have to swap the src reg with the dst reg */
528 copy_operation swap = it->second;
529 assert(swap.op.regClass() == swap.def.regClass());
530 Operand def_as_op = Operand(swap.def.physReg(), swap.def.regClass());
531 Definition op_as_def = Definition(swap.op.physReg(), swap.op.regClass());
532 if (chip_class >= GFX9 && swap.def.getTemp().type() == RegType::vgpr) {
533 bld.vop1(aco_opcode::v_swap_b32, swap.def, op_as_def, swap.op, def_as_op);
534 } else if (swap.op.physReg() == scc || swap.def.physReg() == scc) {
535 /* we need to swap scc and another sgpr */
536 assert(!preserve_scc);
537
538 PhysReg other = swap.op.physReg() == scc ? swap.def.physReg() : swap.op.physReg();
539
540 bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1));
541 bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(other, s1), Operand(0u));
542 bld.sop1(aco_opcode::s_mov_b32, Definition(other, s1), Operand(pi->scratch_sgpr, s1));
543 } else if (swap.def.getTemp().type() == RegType::sgpr) {
544 if (preserve_scc) {
545 bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), swap.op);
546 bld.sop1(aco_opcode::s_mov_b32, op_as_def, def_as_op);
547 bld.sop1(aco_opcode::s_mov_b32, swap.def, Operand(pi->scratch_sgpr, s1));
548 } else {
549 bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), swap.op, def_as_op);
550 bld.sop2(aco_opcode::s_xor_b32, swap.def, Definition(scc, s1), swap.op, def_as_op);
551 bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), swap.op, def_as_op);
552 }
553 } else {
554 bld.vop2(aco_opcode::v_xor_b32, op_as_def, swap.op, def_as_op);
555 bld.vop2(aco_opcode::v_xor_b32, swap.def, swap.op, def_as_op);
556 bld.vop2(aco_opcode::v_xor_b32, op_as_def, swap.op, def_as_op);
557 }
558
559 /* change the operand reg of the target's use */
560 assert(swap.uses == 1);
561 target = it;
562 for (++target; target != copy_map.end(); ++target) {
563 if (target->second.op.physReg() == it->first) {
564 target->second.op.setFixed(swap.op.physReg());
565 break;
566 }
567 }
568 }
569
570 /* copy constants into a registers which were operands */
571 if (constants) {
572 for (it = copy_map.begin(); it != copy_map.end(); ++it) {
573 if (!it->second.op.isConstant())
574 continue;
575 if (it->second.def.physReg() == scc) {
576 bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(0u), Operand(it->second.op.constantValue() ? 1u : 0u));
577 } else {
578 bld.copy(it->second.def, it->second.op);
579 }
580 }
581 }
582 }
583
584 void lower_to_hw_instr(Program* program)
585 {
586 Block *discard_block = NULL;
587
588 for (size_t i = 0; i < program->blocks.size(); i++)
589 {
590 Block *block = &program->blocks[i];
591 lower_context ctx;
592 ctx.program = program;
593 Builder bld(program, &ctx.instructions);
594
595 for (size_t j = 0; j < block->instructions.size(); j++) {
596 aco_ptr<Instruction>& instr = block->instructions[j];
597 aco_ptr<Instruction> mov;
598 if (instr->format == Format::PSEUDO) {
599 Pseudo_instruction *pi = (Pseudo_instruction*)instr.get();
600
601 switch (instr->opcode)
602 {
603 case aco_opcode::p_extract_vector:
604 {
605 unsigned reg = instr->operands[0].physReg() + instr->operands[1].constantValue() * instr->definitions[0].size();
606 RegClass rc = RegClass(instr->operands[0].getTemp().type(), 1);
607 RegClass rc_def = RegClass(instr->definitions[0].getTemp().type(), 1);
608 if (reg == instr->definitions[0].physReg())
609 break;
610
611 std::map<PhysReg, copy_operation> copy_operations;
612 for (unsigned i = 0; i < instr->definitions[0].size(); i++) {
613 Definition def = Definition(PhysReg{instr->definitions[0].physReg() + i}, rc_def);
614 copy_operations[def.physReg()] = {Operand(PhysReg{reg + i}, rc), def, 0, 1};
615 }
616 handle_operands(copy_operations, &ctx, program->chip_class, pi);
617 break;
618 }
619 case aco_opcode::p_create_vector:
620 {
621 std::map<PhysReg, copy_operation> copy_operations;
622 RegClass rc_def = RegClass(instr->definitions[0].getTemp().type(), 1);
623 unsigned reg_idx = 0;
624 for (const Operand& op : instr->operands) {
625 if (op.isConstant()) {
626 const PhysReg reg = PhysReg{instr->definitions[0].physReg() + reg_idx};
627 const Definition def = Definition(reg, rc_def);
628 copy_operations[reg] = {op, def, 0, 1};
629 reg_idx++;
630 continue;
631 }
632
633 RegClass rc_op = RegClass(op.getTemp().type(), 1);
634 for (unsigned j = 0; j < op.size(); j++)
635 {
636 const Operand copy_op = Operand(PhysReg{op.physReg() + j}, rc_op);
637 const Definition def = Definition(PhysReg{instr->definitions[0].physReg() + reg_idx}, rc_def);
638 copy_operations[def.physReg()] = {copy_op, def, 0, 1};
639 reg_idx++;
640 }
641 }
642 handle_operands(copy_operations, &ctx, program->chip_class, pi);
643 break;
644 }
645 case aco_opcode::p_split_vector:
646 {
647 std::map<PhysReg, copy_operation> copy_operations;
648 RegClass rc_op = instr->operands[0].isConstant() ? s1 : RegClass(instr->operands[0].regClass().type(), 1);
649 for (unsigned i = 0; i < instr->definitions.size(); i++) {
650 unsigned k = instr->definitions[i].size();
651 RegClass rc_def = RegClass(instr->definitions[i].getTemp().type(), 1);
652 for (unsigned j = 0; j < k; j++) {
653 Operand op = Operand(PhysReg{instr->operands[0].physReg() + (i*k+j)}, rc_op);
654 Definition def = Definition(PhysReg{instr->definitions[i].physReg() + j}, rc_def);
655 copy_operations[def.physReg()] = {op, def, 0, 1};
656 }
657 }
658 handle_operands(copy_operations, &ctx, program->chip_class, pi);
659 break;
660 }
661 case aco_opcode::p_parallelcopy:
662 case aco_opcode::p_wqm:
663 {
664 std::map<PhysReg, copy_operation> copy_operations;
665 for (unsigned i = 0; i < instr->operands.size(); i++)
666 {
667 Operand operand = instr->operands[i];
668 if (operand.isConstant() || operand.size() == 1) {
669 assert(instr->definitions[i].size() == 1);
670 copy_operations[instr->definitions[i].physReg()] = {operand, instr->definitions[i], 0, 1};
671 } else {
672 RegClass def_rc = RegClass(instr->definitions[i].regClass().type(), 1);
673 RegClass op_rc = RegClass(operand.getTemp().type(), 1);
674 for (unsigned j = 0; j < operand.size(); j++)
675 {
676 Operand op = Operand(PhysReg{instr->operands[i].physReg() + j}, op_rc);
677 Definition def = Definition(PhysReg{instr->definitions[i].physReg() + j}, def_rc);
678 copy_operations[def.physReg()] = {op, def, 0, 1};
679 }
680 }
681 }
682 handle_operands(copy_operations, &ctx, program->chip_class, pi);
683 break;
684 }
685 case aco_opcode::p_exit_early_if:
686 {
687 /* don't bother with an early exit at the end of the program */
688 if (block->instructions[j + 1]->opcode == aco_opcode::p_logical_end &&
689 block->instructions[j + 2]->opcode == aco_opcode::s_endpgm) {
690 break;
691 }
692
693 if (!discard_block) {
694 discard_block = program->create_and_insert_block();
695 block = &program->blocks[i];
696
697 bld.reset(discard_block);
698 bld.exp(aco_opcode::exp, Operand(v1), Operand(v1), Operand(v1), Operand(v1),
699 0, V_008DFC_SQ_EXP_NULL, false, true, true);
700 if (program->wb_smem_l1_on_end)
701 bld.smem(aco_opcode::s_dcache_wb);
702 bld.sopp(aco_opcode::s_endpgm);
703
704 bld.reset(&ctx.instructions);
705 }
706
707 //TODO: exec can be zero here with block_kind_discard
708
709 assert(instr->operands[0].physReg() == scc);
710 bld.sopp(aco_opcode::s_cbranch_scc0, instr->operands[0], discard_block->index);
711
712 discard_block->linear_preds.push_back(block->index);
713 block->linear_succs.push_back(discard_block->index);
714 break;
715 }
716 case aco_opcode::p_spill:
717 {
718 assert(instr->operands[0].regClass() == v1.as_linear());
719 for (unsigned i = 0; i < instr->operands[2].size(); i++) {
720 bld.vop3(aco_opcode::v_writelane_b32, bld.def(v1, instr->operands[0].physReg()),
721 Operand(PhysReg{instr->operands[2].physReg() + i}, s1),
722 Operand(instr->operands[1].constantValue() + i));
723 }
724 break;
725 }
726 case aco_opcode::p_reload:
727 {
728 assert(instr->operands[0].regClass() == v1.as_linear());
729 for (unsigned i = 0; i < instr->definitions[0].size(); i++) {
730 bld.vop3(aco_opcode::v_readlane_b32,
731 bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
732 instr->operands[0], Operand(instr->operands[1].constantValue() + i));
733 }
734 break;
735 }
736 case aco_opcode::p_as_uniform:
737 {
738 if (instr->operands[0].isConstant() || instr->operands[0].regClass().type() == RegType::sgpr) {
739 std::map<PhysReg, copy_operation> copy_operations;
740 Operand operand = instr->operands[0];
741 if (operand.isConstant() || operand.size() == 1) {
742 assert(instr->definitions[0].size() == 1);
743 copy_operations[instr->definitions[0].physReg()] = {operand, instr->definitions[0], 0, 1};
744 } else {
745 for (unsigned i = 0; i < operand.size(); i++)
746 {
747 Operand op = Operand(PhysReg{operand.physReg() + i}, s1);
748 Definition def = Definition(PhysReg{instr->definitions[0].physReg() + i}, s1);
749 copy_operations[def.physReg()] = {op, def, 0, 1};
750 }
751 }
752
753 handle_operands(copy_operations, &ctx, program->chip_class, pi);
754 } else {
755 assert(instr->operands[0].regClass().type() == RegType::vgpr);
756 assert(instr->definitions[0].regClass().type() == RegType::sgpr);
757 assert(instr->operands[0].size() == instr->definitions[0].size());
758 for (unsigned i = 0; i < instr->definitions[0].size(); i++) {
759 bld.vop1(aco_opcode::v_readfirstlane_b32,
760 bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
761 Operand(PhysReg{instr->operands[0].physReg() + i}, v1));
762 }
763 }
764 break;
765 }
766 default:
767 break;
768 }
769 } else if (instr->format == Format::PSEUDO_BRANCH) {
770 Pseudo_branch_instruction* branch = static_cast<Pseudo_branch_instruction*>(instr.get());
771 /* check if all blocks from current to target are empty */
772 bool can_remove = block->index < branch->target[0];
773 for (unsigned i = block->index + 1; can_remove && i < branch->target[0]; i++) {
774 if (program->blocks[i].instructions.size())
775 can_remove = false;
776 }
777 if (can_remove)
778 continue;
779
780 switch (instr->opcode) {
781 case aco_opcode::p_branch:
782 assert(block->linear_succs[0] == branch->target[0]);
783 bld.sopp(aco_opcode::s_branch, branch->target[0]);
784 break;
785 case aco_opcode::p_cbranch_nz:
786 assert(block->linear_succs[1] == branch->target[0]);
787 if (branch->operands[0].physReg() == exec)
788 bld.sopp(aco_opcode::s_cbranch_execnz, branch->target[0]);
789 else if (branch->operands[0].physReg() == vcc)
790 bld.sopp(aco_opcode::s_cbranch_vccnz, branch->target[0]);
791 else {
792 assert(branch->operands[0].physReg() == scc);
793 bld.sopp(aco_opcode::s_cbranch_scc1, branch->target[0]);
794 }
795 break;
796 case aco_opcode::p_cbranch_z:
797 assert(block->linear_succs[1] == branch->target[0]);
798 if (branch->operands[0].physReg() == exec)
799 bld.sopp(aco_opcode::s_cbranch_execz, branch->target[0]);
800 else if (branch->operands[0].physReg() == vcc)
801 bld.sopp(aco_opcode::s_cbranch_vccz, branch->target[0]);
802 else {
803 assert(branch->operands[0].physReg() == scc);
804 bld.sopp(aco_opcode::s_cbranch_scc0, branch->target[0]);
805 }
806 break;
807 default:
808 unreachable("Unknown Pseudo branch instruction!");
809 }
810
811 } else if (instr->format == Format::PSEUDO_REDUCTION) {
812 Pseudo_reduction_instruction* reduce = static_cast<Pseudo_reduction_instruction*>(instr.get());
813 if (reduce->reduce_op == gfx10_wave64_bpermute) {
814 /* Only makes sense on GFX10 wave64 */
815 assert(program->chip_class >= GFX10);
816 assert(program->info->wave_size == 64);
817 assert(instr->definitions[0].regClass() == v1); /* Destination */
818 assert(instr->definitions[1].regClass() == s2); /* Temp EXEC */
819 assert(instr->definitions[1].physReg() != vcc);
820 assert(instr->definitions[2].physReg() == scc); /* SCC clobber */
821 assert(instr->operands[0].physReg() == vcc); /* Compare */
822 assert(instr->operands[1].regClass() == v2.as_linear()); /* Temp VGPR pair */
823 assert(instr->operands[2].regClass() == v1); /* Indices x4 */
824 assert(instr->operands[3].regClass() == v1); /* Input data */
825
826 PhysReg shared_vgpr_reg_lo = PhysReg(align(program->config->num_vgprs, 4) + 256);
827 PhysReg shared_vgpr_reg_hi = PhysReg(shared_vgpr_reg_lo + 1);
828 Operand compare = instr->operands[0];
829 Operand tmp1(instr->operands[1].physReg(), v1);
830 Operand tmp2(PhysReg(instr->operands[1].physReg() + 1), v1);
831 Operand index_x4 = instr->operands[2];
832 Operand input_data = instr->operands[3];
833 Definition shared_vgpr_lo(shared_vgpr_reg_lo, v1);
834 Definition shared_vgpr_hi(shared_vgpr_reg_hi, v1);
835 Definition def_temp1(tmp1.physReg(), v1);
836 Definition def_temp2(tmp2.physReg(), v1);
837
838 /* Save EXEC and set it for all lanes */
839 bld.sop1(aco_opcode::s_or_saveexec_b64, instr->definitions[1], instr->definitions[2],
840 Definition(exec, s2), Operand((uint64_t)-1), Operand(exec, s2));
841
842 /* HI: Copy data from high lanes 32-63 to shared vgpr */
843 bld.vop1_dpp(aco_opcode::v_mov_b32, shared_vgpr_hi, input_data, dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
844
845 /* LO: Copy data from low lanes 0-31 to shared vgpr */
846 bld.vop1_dpp(aco_opcode::v_mov_b32, shared_vgpr_lo, input_data, dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
847 /* LO: Copy shared vgpr (high lanes' data) to output vgpr */
848 bld.vop1_dpp(aco_opcode::v_mov_b32, def_temp1, Operand(shared_vgpr_reg_hi, v1), dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
849
850 /* HI: Copy shared vgpr (low lanes' data) to output vgpr */
851 bld.vop1_dpp(aco_opcode::v_mov_b32, def_temp1, Operand(shared_vgpr_reg_lo, v1), dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
852
853 /* Permute the original input */
854 bld.ds(aco_opcode::ds_bpermute_b32, def_temp2, index_x4, input_data);
855 /* Permute the swapped input */
856 bld.ds(aco_opcode::ds_bpermute_b32, def_temp1, index_x4, tmp1);
857
858 /* Restore saved EXEC */
859 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(instr->definitions[1].physReg(), s2));
860 /* Choose whether to use the original or swapped */
861 bld.vop2(aco_opcode::v_cndmask_b32, instr->definitions[0], tmp1, tmp2, compare);
862 } else {
863 emit_reduction(&ctx, reduce->opcode, reduce->reduce_op, reduce->cluster_size,
864 reduce->operands[1].physReg(), // tmp
865 reduce->definitions[1].physReg(), // stmp
866 reduce->operands[2].physReg(), // vtmp
867 reduce->definitions[2].physReg(), // sitmp
868 reduce->operands[0], reduce->definitions[0]);
869 }
870 } else {
871 ctx.instructions.emplace_back(std::move(instr));
872 }
873
874 }
875 block->instructions.swap(ctx.instructions);
876 }
877 }
878
879 }