aco: refactor reduction lowering helpers
[mesa.git] / src / amd / compiler / aco_lower_to_hw_instr.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
25 *
26 */
27
28 #include <map>
29
30 #include "aco_ir.h"
31 #include "aco_builder.h"
32 #include "util/u_math.h"
33 #include "sid.h"
34 #include "vulkan/radv_shader.h"
35
36
37 namespace aco {
38
39 struct lower_context {
40 Program *program;
41 std::vector<aco_ptr<Instruction>> instructions;
42 };
43
44 aco_opcode get_reduce_opcode(chip_class chip, ReduceOp op) {
45 switch (op) {
46 case iadd32: return chip >= GFX9 ? aco_opcode::v_add_u32 : aco_opcode::v_add_co_u32;
47 case imul32: return aco_opcode::v_mul_lo_u32;
48 case fadd32: return aco_opcode::v_add_f32;
49 case fmul32: return aco_opcode::v_mul_f32;
50 case imax32: return aco_opcode::v_max_i32;
51 case imin32: return aco_opcode::v_min_i32;
52 case umin32: return aco_opcode::v_min_u32;
53 case umax32: return aco_opcode::v_max_u32;
54 case fmin32: return aco_opcode::v_min_f32;
55 case fmax32: return aco_opcode::v_max_f32;
56 case iand32: return aco_opcode::v_and_b32;
57 case ixor32: return aco_opcode::v_xor_b32;
58 case ior32: return aco_opcode::v_or_b32;
59 case iadd64: return aco_opcode::num_opcodes;
60 case imul64: return aco_opcode::num_opcodes;
61 case fadd64: return aco_opcode::v_add_f64;
62 case fmul64: return aco_opcode::v_mul_f64;
63 case imin64: return aco_opcode::num_opcodes;
64 case imax64: return aco_opcode::num_opcodes;
65 case umin64: return aco_opcode::num_opcodes;
66 case umax64: return aco_opcode::num_opcodes;
67 case fmin64: return aco_opcode::v_min_f64;
68 case fmax64: return aco_opcode::v_max_f64;
69 case iand64: return aco_opcode::num_opcodes;
70 case ior64: return aco_opcode::num_opcodes;
71 case ixor64: return aco_opcode::num_opcodes;
72 default: return aco_opcode::num_opcodes;
73 }
74 }
75
76 void emit_dpp_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
77 PhysReg vtmp, ReduceOp op, unsigned size,
78 unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask, bool bound_ctrl,
79 Operand *identity=NULL) /* for VOP3 with sparse writes */
80 {
81 Builder bld(ctx->program, &ctx->instructions);
82 RegClass rc = RegClass(RegType::vgpr, size);
83 Definition dst(dst_reg, rc);
84 Operand src0(src0_reg, rc);
85 Operand src1(src1_reg, rc);
86
87 aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op);
88 bool vop3 = op == imul32 || size == 2;
89
90 if (!vop3) {
91 if (opcode == aco_opcode::v_add_co_u32)
92 bld.vop2_dpp(opcode, dst, bld.def(s2, vcc), src0, src1, dpp_ctrl, row_mask, bank_mask, bound_ctrl);
93 else
94 bld.vop2_dpp(opcode, dst, src0, src1, dpp_ctrl, row_mask, bank_mask, bound_ctrl);
95 return;
96 }
97
98 if (identity)
99 bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), identity[0]);
100 if (identity && size >= 2)
101 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), identity[1]);
102
103 for (unsigned i = 0; i < size; i++)
104 bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{src0_reg+i}, v1),
105 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
106
107 bld.vop3(opcode, dst, Operand(vtmp, rc), src1);
108 }
109
110 void emit_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
111 ReduceOp op, unsigned size)
112 {
113 Builder bld(ctx->program, &ctx->instructions);
114 RegClass rc = RegClass(RegType::vgpr, size);
115 Definition dst(dst_reg, rc);
116 Operand src0(src0_reg, RegClass(src0_reg.reg >= 256 ? RegType::vgpr : RegType::sgpr, size));
117 Operand src1(src1_reg, rc);
118
119 aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op);
120 bool vop3 = op == imul32 || size == 2;
121
122 if (vop3) {
123 bld.vop3(opcode, dst, src0, src1);
124 } else if (opcode == aco_opcode::v_add_co_u32) {
125 bld.vop2(opcode, dst, bld.def(s2, vcc), src0, src1);
126 } else {
127 bld.vop2(opcode, dst, src0, src1);
128 }
129 }
130
131 void emit_dpp_mov(lower_context *ctx, PhysReg dst, PhysReg src0, unsigned size,
132 unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask, bool bound_ctrl)
133 {
134 Builder bld(ctx->program, &ctx->instructions);
135 for (unsigned i = 0; i < size; i++) {
136 bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{dst+i}, v1), Operand(PhysReg{src0+i}, v1),
137 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
138 }
139 }
140
141 uint32_t get_reduction_identity(ReduceOp op, unsigned idx)
142 {
143 switch (op) {
144 case iadd32:
145 case iadd64:
146 case fadd32:
147 case fadd64:
148 case ior32:
149 case ior64:
150 case ixor32:
151 case ixor64:
152 case umax32:
153 case umax64:
154 return 0;
155 case imul32:
156 case imul64:
157 return idx ? 0 : 1;
158 case fmul32:
159 return 0x3f800000u; /* 1.0 */
160 case fmul64:
161 return idx ? 0x3ff00000u : 0u; /* 1.0 */
162 case imin32:
163 return INT32_MAX;
164 case imin64:
165 return idx ? 0x7fffffffu : 0xffffffffu;
166 case imax32:
167 return INT32_MIN;
168 case imax64:
169 return idx ? 0x80000000u : 0;
170 case umin32:
171 case umin64:
172 case iand32:
173 case iand64:
174 return 0xffffffffu;
175 case fmin32:
176 return 0x7f800000u; /* infinity */
177 case fmin64:
178 return idx ? 0x7ff00000u : 0u; /* infinity */
179 case fmax32:
180 return 0xff800000u; /* negative infinity */
181 case fmax64:
182 return idx ? 0xfff00000u : 0u; /* negative infinity */
183 default:
184 unreachable("Invalid reduction operation");
185 break;
186 }
187 return 0;
188 }
189
190 void emit_reduction(lower_context *ctx, aco_opcode op, ReduceOp reduce_op, unsigned cluster_size, PhysReg tmp,
191 PhysReg stmp, PhysReg vtmp, PhysReg sitmp, Operand src, Definition dst)
192 {
193 assert(cluster_size == 64 || op == aco_opcode::p_reduce);
194
195 Builder bld(ctx->program, &ctx->instructions);
196
197 Operand identity[2];
198 identity[0] = Operand(get_reduction_identity(reduce_op, 0));
199 identity[1] = Operand(get_reduction_identity(reduce_op, 1));
200 Operand vcndmask_identity[2] = {identity[0], identity[1]};
201
202 /* First, copy the source to tmp and set inactive lanes to the identity */
203 bld.sop1(aco_opcode::s_or_saveexec_b64, Definition(stmp, s2), Definition(scc, s1), Definition(exec, s2), Operand(UINT64_MAX), Operand(exec, s2));
204
205 for (unsigned i = 0; i < src.size(); i++) {
206 /* p_exclusive_scan needs it to be a sgpr or inline constant for the v_writelane_b32
207 * except on GFX10, where v_writelane_b32 can take a literal. */
208 if (identity[i].isLiteral() && op == aco_opcode::p_exclusive_scan && ctx->program->chip_class < GFX10) {
209 bld.sop1(aco_opcode::s_mov_b32, Definition(PhysReg{sitmp+i}, s1), identity[i]);
210 identity[i] = Operand(PhysReg{sitmp+i}, s1);
211
212 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp+i}, v1), identity[i]);
213 vcndmask_identity[i] = Operand(PhysReg{tmp+i}, v1);
214 } else if (identity[i].isLiteral()) {
215 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp+i}, v1), identity[i]);
216 vcndmask_identity[i] = Operand(PhysReg{tmp+i}, v1);
217 }
218 }
219
220 for (unsigned i = 0; i < src.size(); i++) {
221 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(PhysReg{tmp + i}, v1),
222 vcndmask_identity[i], Operand(PhysReg{src.physReg() + i}, v1),
223 Operand(stmp, s2));
224 }
225
226 bool exec_restored = false;
227 bool dst_written = false;
228 switch (op) {
229 case aco_opcode::p_reduce:
230 if (cluster_size == 1) break;
231 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
232 dpp_quad_perm(1, 0, 3, 2), 0xf, 0xf, false);
233 if (cluster_size == 2) break;
234 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
235 dpp_quad_perm(2, 3, 0, 1), 0xf, 0xf, false);
236 if (cluster_size == 4) break;
237 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
238 dpp_row_half_mirror, 0xf, 0xf, false);
239 if (cluster_size == 8) break;
240 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
241 dpp_row_mirror, 0xf, 0xf, false);
242 if (cluster_size == 16) break;
243 if (cluster_size == 32) {
244 for (unsigned i = 0; i < src.size(); i++)
245 bld.ds(aco_opcode::ds_swizzle_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, s1), ds_pattern_bitmode(0x1f, 0, 0x10));
246 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(stmp, s2));
247 exec_restored = true;
248 emit_op(ctx, dst.physReg(), vtmp, tmp, reduce_op, src.size());
249 dst_written = true;
250 } else if (ctx->program->chip_class >= GFX10) {
251 assert(cluster_size == 64);
252 /* GFX10+ doesn't support row_bcast15 and row_bcast31 */
253 for (unsigned i = 0; i < src.size(); i++)
254 bld.vop3(aco_opcode::v_permlanex16_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1), Operand(0u), Operand(0u));
255 emit_op(ctx, tmp, tmp, vtmp, reduce_op, src.size());
256
257 for (unsigned i = 0; i < src.size(); i++)
258 bld.vop3(aco_opcode::v_readlane_b32, Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
259 emit_op(ctx, tmp, sitmp, tmp, reduce_op, src.size());
260 } else {
261 assert(cluster_size == 64);
262 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
263 dpp_row_bcast15, 0xa, 0xf, false);
264 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
265 dpp_row_bcast31, 0xc, 0xf, false);
266 }
267 break;
268 case aco_opcode::p_exclusive_scan:
269 if (ctx->program->chip_class >= GFX10) { /* gfx10 doesn't support wf_sr1, so emulate it */
270 /* shift rows right */
271 emit_dpp_mov(ctx, vtmp, tmp, src.size(), dpp_row_sr(1), 0xf, 0xf, true);
272
273 /* fill in the gaps in rows 1 and 3 */
274 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0x10000u));
275 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(0x10000u));
276 for (unsigned i = 0; i < src.size(); i++) {
277 Instruction *perm = bld.vop3(aco_opcode::v_permlanex16_b32,
278 Definition(PhysReg{vtmp+i}, v1),
279 Operand(PhysReg{tmp+i}, v1),
280 Operand(0xffffffffu), Operand(0xffffffffu)).instr;
281 static_cast<VOP3A_instruction*>(perm)->opsel[0] = true; /* FI (Fetch Inactive) */
282 }
283 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
284
285 /* fill in the gap in row 2 */
286 for (unsigned i = 0; i < src.size(); i++) {
287 bld.vop3(aco_opcode::v_readlane_b32, Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
288 bld.vop3(aco_opcode::v_writelane_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{sitmp+i}, s1), Operand(32u));
289 }
290 std::swap(tmp, vtmp);
291 } else {
292 emit_dpp_mov(ctx, tmp, tmp, src.size(), dpp_wf_sr1, 0xf, 0xf, true);
293 }
294 for (unsigned i = 0; i < src.size(); i++) {
295 if (!identity[i].isConstant() || identity[i].constantValue()) { /* bound_ctrl should take case of this overwise */
296 if (ctx->program->chip_class < GFX10)
297 assert((identity[i].isConstant() && !identity[i].isLiteral()) || identity[i].physReg() == PhysReg{sitmp+i});
298 bld.vop3(aco_opcode::v_writelane_b32, Definition(PhysReg{tmp+i}, v1),
299 identity[i], Operand(0u));
300 }
301 }
302 /* fall through */
303 case aco_opcode::p_inclusive_scan:
304 assert(cluster_size == 64);
305 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
306 dpp_row_sr(1), 0xf, 0xf, false, identity);
307 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
308 dpp_row_sr(2), 0xf, 0xf, false, identity);
309 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
310 dpp_row_sr(4), 0xf, 0xf, false, identity);
311 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
312 dpp_row_sr(8), 0xf, 0xf, false, identity);
313 if (ctx->program->chip_class >= GFX10) {
314 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0xffff0000u));
315 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(0xffff0000u));
316 for (unsigned i = 0; i < src.size(); i++) {
317 Instruction *perm = bld.vop3(aco_opcode::v_permlanex16_b32,
318 Definition(PhysReg{vtmp+i}, v1),
319 Operand(PhysReg{tmp+i}, v1),
320 Operand(0xffffffffu), Operand(0xffffffffu)).instr;
321 static_cast<VOP3A_instruction*>(perm)->opsel[0] = true; /* FI (Fetch Inactive) */
322 }
323 emit_op(ctx, tmp, tmp, vtmp, reduce_op, src.size());
324
325 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0u));
326 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(0xffffffffu));
327 for (unsigned i = 0; i < src.size(); i++)
328 bld.vop3(aco_opcode::v_readlane_b32, Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
329 emit_op(ctx, tmp, sitmp, tmp, reduce_op, src.size());
330 } else {
331 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
332 dpp_row_bcast15, 0xa, 0xf, false, identity);
333 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
334 dpp_row_bcast31, 0xc, 0xf, false, identity);
335 }
336 break;
337 default:
338 unreachable("Invalid reduction mode");
339 }
340
341 if (!exec_restored)
342 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(stmp, s2));
343
344 if (op == aco_opcode::p_reduce && cluster_size == 64) {
345 for (unsigned k = 0; k < src.size(); k++) {
346 bld.vop3(aco_opcode::v_readlane_b32, Definition(PhysReg{dst.physReg() + k}, s1),
347 Operand(PhysReg{tmp + k}, v1), Operand(63u));
348 }
349 } else if (!(dst.physReg() == tmp) && !dst_written) {
350 for (unsigned k = 0; k < src.size(); k++) {
351 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{dst.physReg() + k}, s1),
352 Operand(PhysReg{tmp + k}, v1));
353 }
354 }
355 }
356
357 struct copy_operation {
358 Operand op;
359 Definition def;
360 unsigned uses;
361 unsigned size;
362 };
363
364 void handle_operands(std::map<PhysReg, copy_operation>& copy_map, lower_context* ctx, chip_class chip_class, Pseudo_instruction *pi)
365 {
366 Builder bld(ctx->program, &ctx->instructions);
367 aco_ptr<Instruction> mov;
368 std::map<PhysReg, copy_operation>::iterator it = copy_map.begin();
369 std::map<PhysReg, copy_operation>::iterator target;
370 bool writes_scc = false;
371
372 /* count the number of uses for each dst reg */
373 while (it != copy_map.end()) {
374 if (it->second.op.isConstant()) {
375 ++it;
376 continue;
377 }
378
379 if (it->second.def.physReg() == scc)
380 writes_scc = true;
381
382 assert(!pi->tmp_in_scc || !(it->second.def.physReg() == pi->scratch_sgpr));
383
384 /* if src and dst reg are the same, remove operation */
385 if (it->first == it->second.op.physReg()) {
386 it = copy_map.erase(it);
387 continue;
388 }
389 /* check if the operand reg may be overwritten by another copy operation */
390 target = copy_map.find(it->second.op.physReg());
391 if (target != copy_map.end()) {
392 target->second.uses++;
393 }
394
395 ++it;
396 }
397
398 /* first, handle paths in the location transfer graph */
399 bool preserve_scc = pi->tmp_in_scc && !writes_scc;
400 it = copy_map.begin();
401 while (it != copy_map.end()) {
402
403 /* the target reg is not used as operand for any other copy */
404 if (it->second.uses == 0) {
405
406 /* try to coalesce 32-bit sgpr copies to 64-bit copies */
407 if (it->second.def.getTemp().type() == RegType::sgpr && it->second.size == 1 &&
408 !it->second.op.isConstant() && it->first % 2 == it->second.op.physReg() % 2) {
409
410 PhysReg other_def_reg = PhysReg{it->first % 2 ? it->first - 1 : it->first + 1};
411 PhysReg other_op_reg = PhysReg{it->first % 2 ? it->second.op.physReg() - 1 : it->second.op.physReg() + 1};
412 std::map<PhysReg, copy_operation>::iterator other = copy_map.find(other_def_reg);
413
414 if (other != copy_map.end() && !other->second.uses && other->second.size == 1 &&
415 other->second.op.physReg() == other_op_reg && !other->second.op.isConstant()) {
416 std::map<PhysReg, copy_operation>::iterator to_erase = it->first % 2 ? it : other;
417 it = it->first % 2 ? other : it;
418 copy_map.erase(to_erase);
419 it->second.size = 2;
420 }
421 }
422
423 if (it->second.def.physReg() == scc) {
424 bld.sopc(aco_opcode::s_cmp_lg_i32, it->second.def, it->second.op, Operand(0u));
425 preserve_scc = true;
426 } else if (it->second.size == 2 && it->second.def.getTemp().type() == RegType::sgpr) {
427 bld.sop1(aco_opcode::s_mov_b64, it->second.def, Operand(it->second.op.physReg(), s2));
428 } else {
429 bld.copy(it->second.def, it->second.op);
430 }
431
432 /* reduce the number of uses of the operand reg by one */
433 if (!it->second.op.isConstant()) {
434 for (unsigned i = 0; i < it->second.size; i++) {
435 target = copy_map.find(PhysReg{it->second.op.physReg() + i});
436 if (target != copy_map.end())
437 target->second.uses--;
438 }
439 }
440
441 copy_map.erase(it);
442 it = copy_map.begin();
443 continue;
444 } else {
445 /* the target reg is used as operand, check the next entry */
446 ++it;
447 }
448 }
449
450 if (copy_map.empty())
451 return;
452
453 /* all target regs are needed as operand somewhere which means, all entries are part of a cycle */
454 bool constants = false;
455 for (it = copy_map.begin(); it != copy_map.end(); ++it) {
456 assert(it->second.op.isFixed());
457 if (it->first == it->second.op.physReg())
458 continue;
459 /* do constants later */
460 if (it->second.op.isConstant()) {
461 constants = true;
462 continue;
463 }
464
465 if (preserve_scc && it->second.def.getTemp().type() == RegType::sgpr)
466 assert(!(it->second.def.physReg() == pi->scratch_sgpr));
467
468 /* to resolve the cycle, we have to swap the src reg with the dst reg */
469 copy_operation swap = it->second;
470 assert(swap.op.regClass() == swap.def.regClass());
471 Operand def_as_op = Operand(swap.def.physReg(), swap.def.regClass());
472 Definition op_as_def = Definition(swap.op.physReg(), swap.op.regClass());
473 if (chip_class >= GFX9 && swap.def.getTemp().type() == RegType::vgpr) {
474 bld.vop1(aco_opcode::v_swap_b32, swap.def, op_as_def, swap.op, def_as_op);
475 } else if (swap.op.physReg() == scc || swap.def.physReg() == scc) {
476 /* we need to swap scc and another sgpr */
477 assert(!preserve_scc);
478
479 PhysReg other = swap.op.physReg() == scc ? swap.def.physReg() : swap.op.physReg();
480
481 bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1));
482 bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(other, s1), Operand(0u));
483 bld.sop1(aco_opcode::s_mov_b32, Definition(other, s1), Operand(pi->scratch_sgpr, s1));
484 } else if (swap.def.getTemp().type() == RegType::sgpr) {
485 if (preserve_scc) {
486 bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), swap.op);
487 bld.sop1(aco_opcode::s_mov_b32, op_as_def, def_as_op);
488 bld.sop1(aco_opcode::s_mov_b32, swap.def, Operand(pi->scratch_sgpr, s1));
489 } else {
490 bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), swap.op, def_as_op);
491 bld.sop2(aco_opcode::s_xor_b32, swap.def, Definition(scc, s1), swap.op, def_as_op);
492 bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), swap.op, def_as_op);
493 }
494 } else {
495 bld.vop2(aco_opcode::v_xor_b32, op_as_def, swap.op, def_as_op);
496 bld.vop2(aco_opcode::v_xor_b32, swap.def, swap.op, def_as_op);
497 bld.vop2(aco_opcode::v_xor_b32, op_as_def, swap.op, def_as_op);
498 }
499
500 /* change the operand reg of the target's use */
501 assert(swap.uses == 1);
502 target = it;
503 for (++target; target != copy_map.end(); ++target) {
504 if (target->second.op.physReg() == it->first) {
505 target->second.op.setFixed(swap.op.physReg());
506 break;
507 }
508 }
509 }
510
511 /* copy constants into a registers which were operands */
512 if (constants) {
513 for (it = copy_map.begin(); it != copy_map.end(); ++it) {
514 if (!it->second.op.isConstant())
515 continue;
516 if (it->second.def.physReg() == scc) {
517 bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(0u), Operand(it->second.op.constantValue() ? 1u : 0u));
518 } else {
519 bld.copy(it->second.def, it->second.op);
520 }
521 }
522 }
523 }
524
525 void lower_to_hw_instr(Program* program)
526 {
527 Block *discard_block = NULL;
528
529 for (size_t i = 0; i < program->blocks.size(); i++)
530 {
531 Block *block = &program->blocks[i];
532 lower_context ctx;
533 ctx.program = program;
534 Builder bld(program, &ctx.instructions);
535
536 bool set_mode = i == 0 && block->fp_mode.val != program->config->float_mode;
537 for (unsigned pred : block->linear_preds) {
538 if (program->blocks[pred].fp_mode.val != block->fp_mode.val) {
539 set_mode = true;
540 break;
541 }
542 }
543 if (set_mode) {
544 /* only allow changing modes at top-level blocks so this doesn't break
545 * the "jump over empty blocks" optimization */
546 assert(block->kind & block_kind_top_level);
547 uint32_t mode = block->fp_mode.val;
548 /* "((size - 1) << 11) | register" (MODE is encoded as register 1) */
549 bld.sopk(aco_opcode::s_setreg_imm32_b32, Operand(mode), (7 << 11) | 1);
550 }
551
552 for (size_t j = 0; j < block->instructions.size(); j++) {
553 aco_ptr<Instruction>& instr = block->instructions[j];
554 aco_ptr<Instruction> mov;
555 if (instr->format == Format::PSEUDO) {
556 Pseudo_instruction *pi = (Pseudo_instruction*)instr.get();
557
558 switch (instr->opcode)
559 {
560 case aco_opcode::p_extract_vector:
561 {
562 unsigned reg = instr->operands[0].physReg() + instr->operands[1].constantValue() * instr->definitions[0].size();
563 RegClass rc = RegClass(instr->operands[0].getTemp().type(), 1);
564 RegClass rc_def = RegClass(instr->definitions[0].getTemp().type(), 1);
565 if (reg == instr->definitions[0].physReg())
566 break;
567
568 std::map<PhysReg, copy_operation> copy_operations;
569 for (unsigned i = 0; i < instr->definitions[0].size(); i++) {
570 Definition def = Definition(PhysReg{instr->definitions[0].physReg() + i}, rc_def);
571 copy_operations[def.physReg()] = {Operand(PhysReg{reg + i}, rc), def, 0, 1};
572 }
573 handle_operands(copy_operations, &ctx, program->chip_class, pi);
574 break;
575 }
576 case aco_opcode::p_create_vector:
577 {
578 std::map<PhysReg, copy_operation> copy_operations;
579 RegClass rc_def = RegClass(instr->definitions[0].getTemp().type(), 1);
580 unsigned reg_idx = 0;
581 for (const Operand& op : instr->operands) {
582 if (op.isConstant()) {
583 const PhysReg reg = PhysReg{instr->definitions[0].physReg() + reg_idx};
584 const Definition def = Definition(reg, rc_def);
585 copy_operations[reg] = {op, def, 0, 1};
586 reg_idx++;
587 continue;
588 }
589
590 RegClass rc_op = RegClass(op.getTemp().type(), 1);
591 for (unsigned j = 0; j < op.size(); j++)
592 {
593 const Operand copy_op = Operand(PhysReg{op.physReg() + j}, rc_op);
594 const Definition def = Definition(PhysReg{instr->definitions[0].physReg() + reg_idx}, rc_def);
595 copy_operations[def.physReg()] = {copy_op, def, 0, 1};
596 reg_idx++;
597 }
598 }
599 handle_operands(copy_operations, &ctx, program->chip_class, pi);
600 break;
601 }
602 case aco_opcode::p_split_vector:
603 {
604 std::map<PhysReg, copy_operation> copy_operations;
605 RegClass rc_op = instr->operands[0].isConstant() ? s1 : RegClass(instr->operands[0].regClass().type(), 1);
606 for (unsigned i = 0; i < instr->definitions.size(); i++) {
607 unsigned k = instr->definitions[i].size();
608 RegClass rc_def = RegClass(instr->definitions[i].getTemp().type(), 1);
609 for (unsigned j = 0; j < k; j++) {
610 Operand op = Operand(PhysReg{instr->operands[0].physReg() + (i*k+j)}, rc_op);
611 Definition def = Definition(PhysReg{instr->definitions[i].physReg() + j}, rc_def);
612 copy_operations[def.physReg()] = {op, def, 0, 1};
613 }
614 }
615 handle_operands(copy_operations, &ctx, program->chip_class, pi);
616 break;
617 }
618 case aco_opcode::p_parallelcopy:
619 case aco_opcode::p_wqm:
620 {
621 std::map<PhysReg, copy_operation> copy_operations;
622 for (unsigned i = 0; i < instr->operands.size(); i++)
623 {
624 Operand operand = instr->operands[i];
625 if (operand.isConstant() || operand.size() == 1) {
626 assert(instr->definitions[i].size() == 1);
627 copy_operations[instr->definitions[i].physReg()] = {operand, instr->definitions[i], 0, 1};
628 } else {
629 RegClass def_rc = RegClass(instr->definitions[i].regClass().type(), 1);
630 RegClass op_rc = RegClass(operand.getTemp().type(), 1);
631 for (unsigned j = 0; j < operand.size(); j++)
632 {
633 Operand op = Operand(PhysReg{instr->operands[i].physReg() + j}, op_rc);
634 Definition def = Definition(PhysReg{instr->definitions[i].physReg() + j}, def_rc);
635 copy_operations[def.physReg()] = {op, def, 0, 1};
636 }
637 }
638 }
639 handle_operands(copy_operations, &ctx, program->chip_class, pi);
640 break;
641 }
642 case aco_opcode::p_exit_early_if:
643 {
644 /* don't bother with an early exit at the end of the program */
645 if (block->instructions[j + 1]->opcode == aco_opcode::p_logical_end &&
646 block->instructions[j + 2]->opcode == aco_opcode::s_endpgm) {
647 break;
648 }
649
650 if (!discard_block) {
651 discard_block = program->create_and_insert_block();
652 block = &program->blocks[i];
653
654 bld.reset(discard_block);
655 bld.exp(aco_opcode::exp, Operand(v1), Operand(v1), Operand(v1), Operand(v1),
656 0, V_008DFC_SQ_EXP_NULL, false, true, true);
657 if (program->wb_smem_l1_on_end)
658 bld.smem(aco_opcode::s_dcache_wb);
659 bld.sopp(aco_opcode::s_endpgm);
660
661 bld.reset(&ctx.instructions);
662 }
663
664 //TODO: exec can be zero here with block_kind_discard
665
666 assert(instr->operands[0].physReg() == scc);
667 bld.sopp(aco_opcode::s_cbranch_scc0, instr->operands[0], discard_block->index);
668
669 discard_block->linear_preds.push_back(block->index);
670 block->linear_succs.push_back(discard_block->index);
671 break;
672 }
673 case aco_opcode::p_spill:
674 {
675 assert(instr->operands[0].regClass() == v1.as_linear());
676 for (unsigned i = 0; i < instr->operands[2].size(); i++) {
677 bld.vop3(aco_opcode::v_writelane_b32, bld.def(v1, instr->operands[0].physReg()),
678 Operand(PhysReg{instr->operands[2].physReg() + i}, s1),
679 Operand(instr->operands[1].constantValue() + i));
680 }
681 break;
682 }
683 case aco_opcode::p_reload:
684 {
685 assert(instr->operands[0].regClass() == v1.as_linear());
686 for (unsigned i = 0; i < instr->definitions[0].size(); i++) {
687 bld.vop3(aco_opcode::v_readlane_b32,
688 bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
689 instr->operands[0], Operand(instr->operands[1].constantValue() + i));
690 }
691 break;
692 }
693 case aco_opcode::p_as_uniform:
694 {
695 if (instr->operands[0].isConstant() || instr->operands[0].regClass().type() == RegType::sgpr) {
696 std::map<PhysReg, copy_operation> copy_operations;
697 Operand operand = instr->operands[0];
698 if (operand.isConstant() || operand.size() == 1) {
699 assert(instr->definitions[0].size() == 1);
700 copy_operations[instr->definitions[0].physReg()] = {operand, instr->definitions[0], 0, 1};
701 } else {
702 for (unsigned i = 0; i < operand.size(); i++)
703 {
704 Operand op = Operand(PhysReg{operand.physReg() + i}, s1);
705 Definition def = Definition(PhysReg{instr->definitions[0].physReg() + i}, s1);
706 copy_operations[def.physReg()] = {op, def, 0, 1};
707 }
708 }
709
710 handle_operands(copy_operations, &ctx, program->chip_class, pi);
711 } else {
712 assert(instr->operands[0].regClass().type() == RegType::vgpr);
713 assert(instr->definitions[0].regClass().type() == RegType::sgpr);
714 assert(instr->operands[0].size() == instr->definitions[0].size());
715 for (unsigned i = 0; i < instr->definitions[0].size(); i++) {
716 bld.vop1(aco_opcode::v_readfirstlane_b32,
717 bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
718 Operand(PhysReg{instr->operands[0].physReg() + i}, v1));
719 }
720 }
721 break;
722 }
723 default:
724 break;
725 }
726 } else if (instr->format == Format::PSEUDO_BRANCH) {
727 Pseudo_branch_instruction* branch = static_cast<Pseudo_branch_instruction*>(instr.get());
728 /* check if all blocks from current to target are empty */
729 bool can_remove = block->index < branch->target[0];
730 for (unsigned i = block->index + 1; can_remove && i < branch->target[0]; i++) {
731 if (program->blocks[i].instructions.size())
732 can_remove = false;
733 }
734 if (can_remove)
735 continue;
736
737 switch (instr->opcode) {
738 case aco_opcode::p_branch:
739 assert(block->linear_succs[0] == branch->target[0]);
740 bld.sopp(aco_opcode::s_branch, branch->target[0]);
741 break;
742 case aco_opcode::p_cbranch_nz:
743 assert(block->linear_succs[1] == branch->target[0]);
744 if (branch->operands[0].physReg() == exec)
745 bld.sopp(aco_opcode::s_cbranch_execnz, branch->target[0]);
746 else if (branch->operands[0].physReg() == vcc)
747 bld.sopp(aco_opcode::s_cbranch_vccnz, branch->target[0]);
748 else {
749 assert(branch->operands[0].physReg() == scc);
750 bld.sopp(aco_opcode::s_cbranch_scc1, branch->target[0]);
751 }
752 break;
753 case aco_opcode::p_cbranch_z:
754 assert(block->linear_succs[1] == branch->target[0]);
755 if (branch->operands[0].physReg() == exec)
756 bld.sopp(aco_opcode::s_cbranch_execz, branch->target[0]);
757 else if (branch->operands[0].physReg() == vcc)
758 bld.sopp(aco_opcode::s_cbranch_vccz, branch->target[0]);
759 else {
760 assert(branch->operands[0].physReg() == scc);
761 bld.sopp(aco_opcode::s_cbranch_scc0, branch->target[0]);
762 }
763 break;
764 default:
765 unreachable("Unknown Pseudo branch instruction!");
766 }
767
768 } else if (instr->format == Format::PSEUDO_REDUCTION) {
769 Pseudo_reduction_instruction* reduce = static_cast<Pseudo_reduction_instruction*>(instr.get());
770 if (reduce->reduce_op == gfx10_wave64_bpermute) {
771 /* Only makes sense on GFX10 wave64 */
772 assert(program->chip_class >= GFX10);
773 assert(program->info->wave_size == 64);
774 assert(instr->definitions[0].regClass() == v1); /* Destination */
775 assert(instr->definitions[1].regClass() == s2); /* Temp EXEC */
776 assert(instr->definitions[1].physReg() != vcc);
777 assert(instr->definitions[2].physReg() == scc); /* SCC clobber */
778 assert(instr->operands[0].physReg() == vcc); /* Compare */
779 assert(instr->operands[1].regClass() == v2.as_linear()); /* Temp VGPR pair */
780 assert(instr->operands[2].regClass() == v1); /* Indices x4 */
781 assert(instr->operands[3].regClass() == v1); /* Input data */
782
783 PhysReg shared_vgpr_reg_lo = PhysReg(align(program->config->num_vgprs, 4) + 256);
784 PhysReg shared_vgpr_reg_hi = PhysReg(shared_vgpr_reg_lo + 1);
785 Operand compare = instr->operands[0];
786 Operand tmp1(instr->operands[1].physReg(), v1);
787 Operand tmp2(PhysReg(instr->operands[1].physReg() + 1), v1);
788 Operand index_x4 = instr->operands[2];
789 Operand input_data = instr->operands[3];
790 Definition shared_vgpr_lo(shared_vgpr_reg_lo, v1);
791 Definition shared_vgpr_hi(shared_vgpr_reg_hi, v1);
792 Definition def_temp1(tmp1.physReg(), v1);
793 Definition def_temp2(tmp2.physReg(), v1);
794
795 /* Save EXEC and set it for all lanes */
796 bld.sop1(aco_opcode::s_or_saveexec_b64, instr->definitions[1], instr->definitions[2],
797 Definition(exec, s2), Operand((uint64_t)-1), Operand(exec, s2));
798
799 /* HI: Copy data from high lanes 32-63 to shared vgpr */
800 bld.vop1_dpp(aco_opcode::v_mov_b32, shared_vgpr_hi, input_data, dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
801
802 /* LO: Copy data from low lanes 0-31 to shared vgpr */
803 bld.vop1_dpp(aco_opcode::v_mov_b32, shared_vgpr_lo, input_data, dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
804 /* LO: Copy shared vgpr (high lanes' data) to output vgpr */
805 bld.vop1_dpp(aco_opcode::v_mov_b32, def_temp1, Operand(shared_vgpr_reg_hi, v1), dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
806
807 /* HI: Copy shared vgpr (low lanes' data) to output vgpr */
808 bld.vop1_dpp(aco_opcode::v_mov_b32, def_temp1, Operand(shared_vgpr_reg_lo, v1), dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
809
810 /* Permute the original input */
811 bld.ds(aco_opcode::ds_bpermute_b32, def_temp2, index_x4, input_data);
812 /* Permute the swapped input */
813 bld.ds(aco_opcode::ds_bpermute_b32, def_temp1, index_x4, tmp1);
814
815 /* Restore saved EXEC */
816 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(instr->definitions[1].physReg(), s2));
817 /* Choose whether to use the original or swapped */
818 bld.vop2(aco_opcode::v_cndmask_b32, instr->definitions[0], tmp1, tmp2, compare);
819 } else {
820 emit_reduction(&ctx, reduce->opcode, reduce->reduce_op, reduce->cluster_size,
821 reduce->operands[1].physReg(), // tmp
822 reduce->definitions[1].physReg(), // stmp
823 reduce->operands[2].physReg(), // vtmp
824 reduce->definitions[2].physReg(), // sitmp
825 reduce->operands[0], reduce->definitions[0]);
826 }
827 } else {
828 ctx.instructions.emplace_back(std::move(instr));
829 }
830
831 }
832 block->instructions.swap(ctx.instructions);
833 }
834 }
835
836 }