aco: don't use a scalar temporary for reductions on GFX10
[mesa.git] / src / amd / compiler / aco_lower_to_hw_instr.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
25 *
26 */
27
28 #include <map>
29
30 #include "aco_ir.h"
31 #include "aco_builder.h"
32 #include "util/u_math.h"
33 #include "sid.h"
34 #include "vulkan/radv_shader.h"
35
36
37 namespace aco {
38
39 struct lower_context {
40 Program *program;
41 std::vector<aco_ptr<Instruction>> instructions;
42 };
43
44 aco_opcode get_reduce_opcode(chip_class chip, ReduceOp op) {
45 switch (op) {
46 case iadd32: return chip >= GFX9 ? aco_opcode::v_add_u32 : aco_opcode::v_add_co_u32;
47 case imul32: return aco_opcode::v_mul_lo_u32;
48 case fadd32: return aco_opcode::v_add_f32;
49 case fmul32: return aco_opcode::v_mul_f32;
50 case imax32: return aco_opcode::v_max_i32;
51 case imin32: return aco_opcode::v_min_i32;
52 case umin32: return aco_opcode::v_min_u32;
53 case umax32: return aco_opcode::v_max_u32;
54 case fmin32: return aco_opcode::v_min_f32;
55 case fmax32: return aco_opcode::v_max_f32;
56 case iand32: return aco_opcode::v_and_b32;
57 case ixor32: return aco_opcode::v_xor_b32;
58 case ior32: return aco_opcode::v_or_b32;
59 case iadd64: return aco_opcode::num_opcodes;
60 case imul64: return aco_opcode::num_opcodes;
61 case fadd64: return aco_opcode::v_add_f64;
62 case fmul64: return aco_opcode::v_mul_f64;
63 case imin64: return aco_opcode::num_opcodes;
64 case imax64: return aco_opcode::num_opcodes;
65 case umin64: return aco_opcode::num_opcodes;
66 case umax64: return aco_opcode::num_opcodes;
67 case fmin64: return aco_opcode::v_min_f64;
68 case fmax64: return aco_opcode::v_max_f64;
69 case iand64: return aco_opcode::num_opcodes;
70 case ior64: return aco_opcode::num_opcodes;
71 case ixor64: return aco_opcode::num_opcodes;
72 default: return aco_opcode::num_opcodes;
73 }
74 }
75
76 void emit_vadd32(Builder& bld, Definition def, Operand src0, Operand src1)
77 {
78 Instruction *instr = bld.vadd32(def, src0, src1, false, Operand(s2), true);
79 if (instr->definitions.size() >= 2) {
80 assert(instr->definitions[1].regClass() == bld.lm);
81 instr->definitions[1].setFixed(vcc);
82 }
83 }
84
85 void emit_int64_dpp_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
86 PhysReg vtmp_reg, ReduceOp op,
87 unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask, bool bound_ctrl,
88 Operand *identity=NULL)
89 {
90 Builder bld(ctx->program, &ctx->instructions);
91 Definition dst[] = {Definition(dst_reg, v1), Definition(PhysReg{dst_reg+1}, v1)};
92 Definition vtmp_def[] = {Definition(vtmp_reg, v1), Definition(PhysReg{vtmp_reg+1}, v1)};
93 Operand src0[] = {Operand(src0_reg, v1), Operand(PhysReg{src0_reg+1}, v1)};
94 Operand src1[] = {Operand(src1_reg, v1), Operand(PhysReg{src1_reg+1}, v1)};
95 Operand src1_64 = Operand(src1_reg, v2);
96 Operand vtmp_op[] = {Operand(vtmp_reg, v1), Operand(PhysReg{vtmp_reg+1}, v1)};
97 Operand vtmp_op64 = Operand(vtmp_reg, v2);
98 if (op == iadd64) {
99 if (ctx->program->chip_class >= GFX10) {
100 if (identity)
101 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
102 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
103 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
104 bld.vop3(aco_opcode::v_add_co_u32_e64, dst[0], bld.def(bld.lm, vcc), vtmp_op[0], src1[0]);
105 } else {
106 bld.vop2_dpp(aco_opcode::v_add_co_u32, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0],
107 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
108 }
109 bld.vop2_dpp(aco_opcode::v_addc_co_u32, dst[1], bld.def(bld.lm, vcc), src0[1], src1[1], Operand(vcc, bld.lm),
110 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
111 } else if (op == iand64) {
112 bld.vop2_dpp(aco_opcode::v_and_b32, dst[0], src0[0], src1[0],
113 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
114 bld.vop2_dpp(aco_opcode::v_and_b32, dst[1], src0[1], src1[1],
115 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
116 } else if (op == ior64) {
117 bld.vop2_dpp(aco_opcode::v_or_b32, dst[0], src0[0], src1[0],
118 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
119 bld.vop2_dpp(aco_opcode::v_or_b32, dst[1], src0[1], src1[1],
120 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
121 } else if (op == ixor64) {
122 bld.vop2_dpp(aco_opcode::v_xor_b32, dst[0], src0[0], src1[0],
123 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
124 bld.vop2_dpp(aco_opcode::v_xor_b32, dst[1], src0[1], src1[1],
125 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
126 } else if (op == umin64 || op == umax64 || op == imin64 || op == imax64) {
127 aco_opcode cmp = aco_opcode::num_opcodes;
128 switch (op) {
129 case umin64:
130 cmp = aco_opcode::v_cmp_gt_u64;
131 break;
132 case umax64:
133 cmp = aco_opcode::v_cmp_lt_u64;
134 break;
135 case imin64:
136 cmp = aco_opcode::v_cmp_gt_i64;
137 break;
138 case imax64:
139 cmp = aco_opcode::v_cmp_lt_i64;
140 break;
141 default:
142 break;
143 }
144
145 if (identity) {
146 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
147 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[1], identity[1]);
148 }
149 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
150 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
151 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[1], src0[1],
152 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
153
154 bld.vopc(cmp, bld.def(bld.lm, vcc), vtmp_op64, src1_64);
155 bld.vop2(aco_opcode::v_cndmask_b32, dst[0], vtmp_op[0], src1[0], Operand(vcc, bld.lm));
156 bld.vop2(aco_opcode::v_cndmask_b32, dst[1], vtmp_op[1], src1[1], Operand(vcc, bld.lm));
157 } else if (op == imul64) {
158 /* t4 = dpp(x_hi)
159 * t1 = umul_lo(t4, y_lo)
160 * t3 = dpp(x_lo)
161 * t0 = umul_lo(t3, y_hi)
162 * t2 = iadd(t0, t1)
163 * t5 = umul_hi(t3, y_lo)
164 * res_hi = iadd(t2, t5)
165 * res_lo = umul_lo(t3, y_lo)
166 * Requires that res_hi != src0[0] and res_hi != src1[0]
167 * and that vtmp[0] != res_hi.
168 */
169 if (identity)
170 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[1]);
171 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[1],
172 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
173 bld.vop3(aco_opcode::v_mul_lo_u32, vtmp_def[1], vtmp_op[0], src1[0]);
174 if (identity)
175 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
176 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
177 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
178 bld.vop3(aco_opcode::v_mul_lo_u32, vtmp_def[0], vtmp_op[0], src1[1]);
179 emit_vadd32(bld, vtmp_def[1], vtmp_op[0], vtmp_op[1]);
180 if (identity)
181 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
182 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
183 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
184 bld.vop3(aco_opcode::v_mul_hi_u32, vtmp_def[0], vtmp_op[0], src1[0]);
185 emit_vadd32(bld, dst[1], vtmp_op[1], vtmp_op[0]);
186 if (identity)
187 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
188 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
189 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
190 bld.vop3(aco_opcode::v_mul_lo_u32, dst[0], vtmp_op[0], src1[0]);
191 }
192 }
193
194 void emit_int64_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg, PhysReg vtmp, ReduceOp op)
195 {
196 Builder bld(ctx->program, &ctx->instructions);
197 Definition dst[] = {Definition(dst_reg, v1), Definition(PhysReg{dst_reg+1}, v1)};
198 RegClass src0_rc = src0_reg.reg >= 256 ? v1 : s1;
199 Operand src0[] = {Operand(src0_reg, src0_rc), Operand(PhysReg{src0_reg+1}, src0_rc)};
200 Operand src1[] = {Operand(src1_reg, v1), Operand(PhysReg{src1_reg+1}, v1)};
201 Operand src0_64 = Operand(src0_reg, src0_reg.reg >= 256 ? v2 : s2);
202 Operand src1_64 = Operand(src1_reg, v2);
203
204 if (src0_rc == s1 &&
205 (op == imul64 || op == umin64 || op == umax64 || op == imin64 || op == imax64)) {
206 assert(vtmp.reg != 0);
207 bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), src0[0]);
208 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]);
209 src0_reg = vtmp;
210 src0[0] = Operand(vtmp, v1);
211 src0[1] = Operand(PhysReg{vtmp+1}, v1);
212 src0_64 = Operand(vtmp, v2);
213 } else if (src0_rc == s1 && op == iadd64) {
214 assert(vtmp.reg != 0);
215 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]);
216 src0[1] = Operand(PhysReg{vtmp+1}, v1);
217 }
218
219 if (op == iadd64) {
220 if (ctx->program->chip_class >= GFX10) {
221 bld.vop3(aco_opcode::v_add_co_u32_e64, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0]);
222 } else {
223 bld.vop2(aco_opcode::v_add_co_u32, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0]);
224 }
225 bld.vop2(aco_opcode::v_addc_co_u32, dst[1], bld.def(bld.lm, vcc), src0[1], src1[1], Operand(vcc, bld.lm));
226 } else if (op == iand64) {
227 bld.vop2(aco_opcode::v_and_b32, dst[0], src0[0], src1[0]);
228 bld.vop2(aco_opcode::v_and_b32, dst[1], src0[1], src1[1]);
229 } else if (op == ior64) {
230 bld.vop2(aco_opcode::v_or_b32, dst[0], src0[0], src1[0]);
231 bld.vop2(aco_opcode::v_or_b32, dst[1], src0[1], src1[1]);
232 } else if (op == ixor64) {
233 bld.vop2(aco_opcode::v_xor_b32, dst[0], src0[0], src1[0]);
234 bld.vop2(aco_opcode::v_xor_b32, dst[1], src0[1], src1[1]);
235 } else if (op == umin64 || op == umax64 || op == imin64 || op == imax64) {
236 aco_opcode cmp = aco_opcode::num_opcodes;
237 switch (op) {
238 case umin64:
239 cmp = aco_opcode::v_cmp_gt_u64;
240 break;
241 case umax64:
242 cmp = aco_opcode::v_cmp_lt_u64;
243 break;
244 case imin64:
245 cmp = aco_opcode::v_cmp_gt_i64;
246 break;
247 case imax64:
248 cmp = aco_opcode::v_cmp_lt_i64;
249 break;
250 default:
251 break;
252 }
253
254 bld.vopc(cmp, bld.def(bld.lm, vcc), src0_64, src1_64);
255 bld.vop2(aco_opcode::v_cndmask_b32, dst[0], src0[0], src1[0], Operand(vcc, bld.lm));
256 bld.vop2(aco_opcode::v_cndmask_b32, dst[1], src0[1], src1[1], Operand(vcc, bld.lm));
257 } else if (op == imul64) {
258 if (src1_reg == dst_reg) {
259 /* it's fine if src0==dst but not if src1==dst */
260 std::swap(src0_reg, src1_reg);
261 std::swap(src0[0], src1[0]);
262 std::swap(src0[1], src1[1]);
263 std::swap(src0_64, src1_64);
264 }
265 assert(!(src0_reg == src1_reg));
266 /* t1 = umul_lo(x_hi, y_lo)
267 * t0 = umul_lo(x_lo, y_hi)
268 * t2 = iadd(t0, t1)
269 * t5 = umul_hi(x_lo, y_lo)
270 * res_hi = iadd(t2, t5)
271 * res_lo = umul_lo(x_lo, y_lo)
272 * assumes that it's ok to modify x_hi/y_hi, since we might not have vtmp
273 */
274 Definition tmp0_def(PhysReg{src0_reg+1}, v1);
275 Definition tmp1_def(PhysReg{src1_reg+1}, v1);
276 Operand tmp0_op = src0[1];
277 Operand tmp1_op = src1[1];
278 bld.vop3(aco_opcode::v_mul_lo_u32, tmp0_def, src0[1], src1[0]);
279 bld.vop3(aco_opcode::v_mul_lo_u32, tmp1_def, src0[0], src1[1]);
280 emit_vadd32(bld, tmp0_def, tmp1_op, tmp0_op);
281 bld.vop3(aco_opcode::v_mul_hi_u32, tmp1_def, src0[0], src1[0]);
282 emit_vadd32(bld, dst[1], tmp0_op, tmp1_op);
283 bld.vop3(aco_opcode::v_mul_lo_u32, dst[0], src0[0], src1[0]);
284 }
285 }
286
287 void emit_dpp_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
288 PhysReg vtmp, ReduceOp op, unsigned size,
289 unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask, bool bound_ctrl,
290 Operand *identity=NULL) /* for VOP3 with sparse writes */
291 {
292 Builder bld(ctx->program, &ctx->instructions);
293 RegClass rc = RegClass(RegType::vgpr, size);
294 Definition dst(dst_reg, rc);
295 Operand src0(src0_reg, rc);
296 Operand src1(src1_reg, rc);
297
298 aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op);
299 bool vop3 = op == imul32 || size == 2;
300
301 if (!vop3) {
302 if (opcode == aco_opcode::v_add_co_u32)
303 bld.vop2_dpp(opcode, dst, bld.def(bld.lm, vcc), src0, src1, dpp_ctrl, row_mask, bank_mask, bound_ctrl);
304 else
305 bld.vop2_dpp(opcode, dst, src0, src1, dpp_ctrl, row_mask, bank_mask, bound_ctrl);
306 return;
307 }
308
309 if (opcode == aco_opcode::num_opcodes) {
310 emit_int64_dpp_op(ctx, dst_reg ,src0_reg, src1_reg, vtmp, op,
311 dpp_ctrl, row_mask, bank_mask, bound_ctrl, identity);
312 return;
313 }
314
315 if (identity)
316 bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), identity[0]);
317 if (identity && size >= 2)
318 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), identity[1]);
319
320 for (unsigned i = 0; i < size; i++)
321 bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{src0_reg+i}, v1),
322 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
323
324 bld.vop3(opcode, dst, Operand(vtmp, rc), src1);
325 }
326
327 void emit_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
328 PhysReg vtmp, ReduceOp op, unsigned size)
329 {
330 Builder bld(ctx->program, &ctx->instructions);
331 RegClass rc = RegClass(RegType::vgpr, size);
332 Definition dst(dst_reg, rc);
333 Operand src0(src0_reg, RegClass(src0_reg.reg >= 256 ? RegType::vgpr : RegType::sgpr, size));
334 Operand src1(src1_reg, rc);
335
336 aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op);
337 bool vop3 = op == imul32 || size == 2;
338
339 if (opcode == aco_opcode::num_opcodes) {
340 emit_int64_op(ctx, dst_reg, src0_reg, src1_reg, vtmp, op);
341 return;
342 }
343
344 if (vop3) {
345 bld.vop3(opcode, dst, src0, src1);
346 } else if (opcode == aco_opcode::v_add_co_u32) {
347 bld.vop2(opcode, dst, bld.def(bld.lm, vcc), src0, src1);
348 } else {
349 bld.vop2(opcode, dst, src0, src1);
350 }
351 }
352
353 void emit_dpp_mov(lower_context *ctx, PhysReg dst, PhysReg src0, unsigned size,
354 unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask, bool bound_ctrl)
355 {
356 Builder bld(ctx->program, &ctx->instructions);
357 for (unsigned i = 0; i < size; i++) {
358 bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{dst+i}, v1), Operand(PhysReg{src0+i}, v1),
359 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
360 }
361 }
362
363 uint32_t get_reduction_identity(ReduceOp op, unsigned idx)
364 {
365 switch (op) {
366 case iadd32:
367 case iadd64:
368 case fadd32:
369 case fadd64:
370 case ior32:
371 case ior64:
372 case ixor32:
373 case ixor64:
374 case umax32:
375 case umax64:
376 return 0;
377 case imul32:
378 case imul64:
379 return idx ? 0 : 1;
380 case fmul32:
381 return 0x3f800000u; /* 1.0 */
382 case fmul64:
383 return idx ? 0x3ff00000u : 0u; /* 1.0 */
384 case imin32:
385 return INT32_MAX;
386 case imin64:
387 return idx ? 0x7fffffffu : 0xffffffffu;
388 case imax32:
389 return INT32_MIN;
390 case imax64:
391 return idx ? 0x80000000u : 0;
392 case umin32:
393 case umin64:
394 case iand32:
395 case iand64:
396 return 0xffffffffu;
397 case fmin32:
398 return 0x7f800000u; /* infinity */
399 case fmin64:
400 return idx ? 0x7ff00000u : 0u; /* infinity */
401 case fmax32:
402 return 0xff800000u; /* negative infinity */
403 case fmax64:
404 return idx ? 0xfff00000u : 0u; /* negative infinity */
405 default:
406 unreachable("Invalid reduction operation");
407 break;
408 }
409 return 0;
410 }
411
412 void emit_reduction(lower_context *ctx, aco_opcode op, ReduceOp reduce_op, unsigned cluster_size, PhysReg tmp,
413 PhysReg stmp, PhysReg vtmp, PhysReg sitmp, Operand src, Definition dst)
414 {
415 assert(cluster_size == ctx->program->wave_size || op == aco_opcode::p_reduce);
416 assert(cluster_size <= ctx->program->wave_size);
417
418 Builder bld(ctx->program, &ctx->instructions);
419
420 Operand identity[2];
421 identity[0] = Operand(get_reduction_identity(reduce_op, 0));
422 identity[1] = Operand(get_reduction_identity(reduce_op, 1));
423 Operand vcndmask_identity[2] = {identity[0], identity[1]};
424
425 /* First, copy the source to tmp and set inactive lanes to the identity */
426 bld.sop1(Builder::s_or_saveexec, Definition(stmp, bld.lm), Definition(scc, s1), Definition(exec, bld.lm), Operand(UINT64_MAX), Operand(exec, bld.lm));
427
428 for (unsigned i = 0; i < src.size(); i++) {
429 /* p_exclusive_scan needs it to be a sgpr or inline constant for the v_writelane_b32
430 * except on GFX10, where v_writelane_b32 can take a literal. */
431 if (identity[i].isLiteral() && op == aco_opcode::p_exclusive_scan && ctx->program->chip_class < GFX10) {
432 bld.sop1(aco_opcode::s_mov_b32, Definition(PhysReg{sitmp+i}, s1), identity[i]);
433 identity[i] = Operand(PhysReg{sitmp+i}, s1);
434
435 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp+i}, v1), identity[i]);
436 vcndmask_identity[i] = Operand(PhysReg{tmp+i}, v1);
437 } else if (identity[i].isLiteral()) {
438 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp+i}, v1), identity[i]);
439 vcndmask_identity[i] = Operand(PhysReg{tmp+i}, v1);
440 }
441 }
442
443 for (unsigned i = 0; i < src.size(); i++) {
444 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(PhysReg{tmp + i}, v1),
445 vcndmask_identity[i], Operand(PhysReg{src.physReg() + i}, v1),
446 Operand(stmp, bld.lm));
447 }
448
449 bool exec_restored = false;
450 bool dst_written = false;
451 switch (op) {
452 case aco_opcode::p_reduce:
453 if (cluster_size == 1) break;
454 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
455 dpp_quad_perm(1, 0, 3, 2), 0xf, 0xf, false);
456 if (cluster_size == 2) break;
457 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
458 dpp_quad_perm(2, 3, 0, 1), 0xf, 0xf, false);
459 if (cluster_size == 4) break;
460 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
461 dpp_row_half_mirror, 0xf, 0xf, false);
462 if (cluster_size == 8) break;
463 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
464 dpp_row_mirror, 0xf, 0xf, false);
465 if (cluster_size == 16) break;
466
467 if (ctx->program->chip_class >= GFX10) {
468 /* GFX10+ doesn't support row_bcast15 and row_bcast31 */
469
470 for (unsigned i = 0; i < src.size(); i++)
471 bld.vop3(aco_opcode::v_permlanex16_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1), Operand(0u), Operand(0u));
472
473 if (cluster_size == 32 && dst.regClass().type() == RegType::vgpr) {
474 bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(stmp, bld.lm));
475 exec_restored = true;
476 emit_op(ctx, dst.physReg(), tmp, vtmp, PhysReg{0}, reduce_op, src.size());
477 dst_written = true;
478 } else {
479 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
480 }
481
482 if (cluster_size == 64) {
483 for (unsigned i = 0; i < src.size(); i++)
484 bld.readlane(Definition(PhysReg{dst.physReg() + i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
485 emit_op(ctx, tmp, dst.physReg(), tmp, vtmp, reduce_op, src.size());
486 }
487 } else if (cluster_size == 32) {
488 for (unsigned i = 0; i < src.size(); i++)
489 bld.ds(aco_opcode::ds_swizzle_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, s1), ds_pattern_bitmode(0x1f, 0, 0x10));
490 bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(stmp, bld.lm));
491 exec_restored = true;
492 emit_op(ctx, dst.physReg(), vtmp, tmp, PhysReg{0}, reduce_op, src.size());
493 dst_written = true;
494 } else {
495 assert(cluster_size == 64);
496 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
497 dpp_row_bcast15, 0xa, 0xf, false);
498 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
499 dpp_row_bcast31, 0xc, 0xf, false);
500 }
501 break;
502 case aco_opcode::p_exclusive_scan:
503 if (ctx->program->chip_class >= GFX10) { /* gfx10 doesn't support wf_sr1, so emulate it */
504 /* shift rows right */
505 emit_dpp_mov(ctx, vtmp, tmp, src.size(), dpp_row_sr(1), 0xf, 0xf, true);
506
507 /* fill in the gaps in rows 1 and 3 */
508 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0x10000u));
509 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(0x10000u));
510 for (unsigned i = 0; i < src.size(); i++) {
511 Instruction *perm = bld.vop3(aco_opcode::v_permlanex16_b32,
512 Definition(PhysReg{vtmp+i}, v1),
513 Operand(PhysReg{tmp+i}, v1),
514 Operand(0xffffffffu), Operand(0xffffffffu)).instr;
515 static_cast<VOP3A_instruction*>(perm)->opsel[0] = true; /* FI (Fetch Inactive) */
516 }
517 bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(UINT64_MAX));
518
519 if (ctx->program->wave_size == 64) {
520 /* fill in the gap in row 2 */
521 for (unsigned i = 0; i < src.size(); i++) {
522 bld.readlane(Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
523 bld.writelane(Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{sitmp+i}, s1), Operand(32u), Operand(PhysReg{vtmp+i}, v1));
524 }
525 }
526 std::swap(tmp, vtmp);
527 } else {
528 emit_dpp_mov(ctx, tmp, tmp, src.size(), dpp_wf_sr1, 0xf, 0xf, true);
529 }
530 for (unsigned i = 0; i < src.size(); i++) {
531 if (!identity[i].isConstant() || identity[i].constantValue()) { /* bound_ctrl should take case of this overwise */
532 if (ctx->program->chip_class < GFX10)
533 assert((identity[i].isConstant() && !identity[i].isLiteral()) || identity[i].physReg() == PhysReg{sitmp+i});
534 bld.writelane(Definition(PhysReg{tmp+i}, v1), identity[i], Operand(0u), Operand(PhysReg{tmp+i}, v1));
535 }
536 }
537 /* fall through */
538 case aco_opcode::p_inclusive_scan:
539 assert(cluster_size == ctx->program->wave_size);
540 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
541 dpp_row_sr(1), 0xf, 0xf, false, identity);
542 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
543 dpp_row_sr(2), 0xf, 0xf, false, identity);
544 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
545 dpp_row_sr(4), 0xf, 0xf, false, identity);
546 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
547 dpp_row_sr(8), 0xf, 0xf, false, identity);
548 if (ctx->program->chip_class >= GFX10) {
549 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0xffff0000u));
550 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(0xffff0000u));
551 for (unsigned i = 0; i < src.size(); i++) {
552 Instruction *perm = bld.vop3(aco_opcode::v_permlanex16_b32,
553 Definition(PhysReg{vtmp+i}, v1),
554 Operand(PhysReg{tmp+i}, v1),
555 Operand(0xffffffffu), Operand(0xffffffffu)).instr;
556 static_cast<VOP3A_instruction*>(perm)->opsel[0] = true; /* FI (Fetch Inactive) */
557 }
558 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
559
560 if (ctx->program->wave_size == 64) {
561 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0u));
562 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(0xffffffffu));
563 for (unsigned i = 0; i < src.size(); i++)
564 bld.readlane(Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
565 emit_op(ctx, tmp, sitmp, tmp, vtmp, reduce_op, src.size());
566 }
567 } else {
568 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
569 dpp_row_bcast15, 0xa, 0xf, false, identity);
570 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
571 dpp_row_bcast31, 0xc, 0xf, false, identity);
572 }
573 break;
574 default:
575 unreachable("Invalid reduction mode");
576 }
577
578 if (!exec_restored)
579 bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(stmp, bld.lm));
580
581 if (op == aco_opcode::p_reduce && dst.regClass().type() == RegType::sgpr) {
582 for (unsigned k = 0; k < src.size(); k++) {
583 bld.readlane(Definition(PhysReg{dst.physReg() + k}, s1),
584 Operand(PhysReg{tmp + k}, v1), Operand(ctx->program->wave_size - 1));
585 }
586 } else if (!(dst.physReg() == tmp) && !dst_written) {
587 for (unsigned k = 0; k < src.size(); k++) {
588 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{dst.physReg() + k}, s1),
589 Operand(PhysReg{tmp + k}, v1));
590 }
591 }
592 }
593
594 struct copy_operation {
595 Operand op;
596 Definition def;
597 unsigned uses;
598 unsigned size;
599 };
600
601 void handle_operands(std::map<PhysReg, copy_operation>& copy_map, lower_context* ctx, chip_class chip_class, Pseudo_instruction *pi)
602 {
603 Builder bld(ctx->program, &ctx->instructions);
604 aco_ptr<Instruction> mov;
605 std::map<PhysReg, copy_operation>::iterator it = copy_map.begin();
606 std::map<PhysReg, copy_operation>::iterator target;
607 bool writes_scc = false;
608
609 /* count the number of uses for each dst reg */
610 while (it != copy_map.end()) {
611 if (it->second.op.isConstant()) {
612 ++it;
613 continue;
614 }
615
616 if (it->second.def.physReg() == scc)
617 writes_scc = true;
618
619 assert(!pi->tmp_in_scc || !(it->second.def.physReg() == pi->scratch_sgpr));
620
621 /* if src and dst reg are the same, remove operation */
622 if (it->first == it->second.op.physReg()) {
623 it = copy_map.erase(it);
624 continue;
625 }
626 /* check if the operand reg may be overwritten by another copy operation */
627 target = copy_map.find(it->second.op.physReg());
628 if (target != copy_map.end()) {
629 target->second.uses++;
630 }
631
632 ++it;
633 }
634
635 /* first, handle paths in the location transfer graph */
636 bool preserve_scc = pi->tmp_in_scc && !writes_scc;
637 it = copy_map.begin();
638 while (it != copy_map.end()) {
639
640 /* the target reg is not used as operand for any other copy */
641 if (it->second.uses == 0) {
642
643 /* try to coalesce 32-bit sgpr copies to 64-bit copies */
644 if (it->second.def.getTemp().type() == RegType::sgpr && it->second.size == 1 &&
645 !it->second.op.isConstant() && it->first % 2 == it->second.op.physReg() % 2) {
646
647 PhysReg other_def_reg = PhysReg{it->first % 2 ? it->first - 1 : it->first + 1};
648 PhysReg other_op_reg = PhysReg{it->first % 2 ? it->second.op.physReg() - 1 : it->second.op.physReg() + 1};
649 std::map<PhysReg, copy_operation>::iterator other = copy_map.find(other_def_reg);
650
651 if (other != copy_map.end() && !other->second.uses && other->second.size == 1 &&
652 other->second.op.physReg() == other_op_reg && !other->second.op.isConstant()) {
653 std::map<PhysReg, copy_operation>::iterator to_erase = it->first % 2 ? it : other;
654 it = it->first % 2 ? other : it;
655 copy_map.erase(to_erase);
656 it->second.size = 2;
657 }
658 }
659
660 if (it->second.def.physReg() == scc) {
661 bld.sopc(aco_opcode::s_cmp_lg_i32, it->second.def, it->second.op, Operand(0u));
662 preserve_scc = true;
663 } else if (it->second.size == 2 && it->second.def.getTemp().type() == RegType::sgpr) {
664 bld.sop1(aco_opcode::s_mov_b64, it->second.def, Operand(it->second.op.physReg(), s2));
665 } else {
666 bld.copy(it->second.def, it->second.op);
667 }
668
669 /* reduce the number of uses of the operand reg by one */
670 if (!it->second.op.isConstant()) {
671 for (unsigned i = 0; i < it->second.size; i++) {
672 target = copy_map.find(PhysReg{it->second.op.physReg() + i});
673 if (target != copy_map.end())
674 target->second.uses--;
675 }
676 }
677
678 copy_map.erase(it);
679 it = copy_map.begin();
680 continue;
681 } else {
682 /* the target reg is used as operand, check the next entry */
683 ++it;
684 }
685 }
686
687 if (copy_map.empty())
688 return;
689
690 /* all target regs are needed as operand somewhere which means, all entries are part of a cycle */
691 bool constants = false;
692 for (it = copy_map.begin(); it != copy_map.end(); ++it) {
693 assert(it->second.op.isFixed());
694 if (it->first == it->second.op.physReg())
695 continue;
696 /* do constants later */
697 if (it->second.op.isConstant()) {
698 constants = true;
699 continue;
700 }
701
702 if (preserve_scc && it->second.def.getTemp().type() == RegType::sgpr)
703 assert(!(it->second.def.physReg() == pi->scratch_sgpr));
704
705 /* to resolve the cycle, we have to swap the src reg with the dst reg */
706 copy_operation swap = it->second;
707 assert(swap.op.regClass() == swap.def.regClass());
708 Operand def_as_op = Operand(swap.def.physReg(), swap.def.regClass());
709 Definition op_as_def = Definition(swap.op.physReg(), swap.op.regClass());
710 if (chip_class >= GFX9 && swap.def.getTemp().type() == RegType::vgpr) {
711 bld.vop1(aco_opcode::v_swap_b32, swap.def, op_as_def, swap.op, def_as_op);
712 } else if (swap.op.physReg() == scc || swap.def.physReg() == scc) {
713 /* we need to swap scc and another sgpr */
714 assert(!preserve_scc);
715
716 PhysReg other = swap.op.physReg() == scc ? swap.def.physReg() : swap.op.physReg();
717
718 bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1));
719 bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(other, s1), Operand(0u));
720 bld.sop1(aco_opcode::s_mov_b32, Definition(other, s1), Operand(pi->scratch_sgpr, s1));
721 } else if (swap.def.getTemp().type() == RegType::sgpr) {
722 if (preserve_scc) {
723 bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), swap.op);
724 bld.sop1(aco_opcode::s_mov_b32, op_as_def, def_as_op);
725 bld.sop1(aco_opcode::s_mov_b32, swap.def, Operand(pi->scratch_sgpr, s1));
726 } else {
727 bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), swap.op, def_as_op);
728 bld.sop2(aco_opcode::s_xor_b32, swap.def, Definition(scc, s1), swap.op, def_as_op);
729 bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), swap.op, def_as_op);
730 }
731 } else {
732 bld.vop2(aco_opcode::v_xor_b32, op_as_def, swap.op, def_as_op);
733 bld.vop2(aco_opcode::v_xor_b32, swap.def, swap.op, def_as_op);
734 bld.vop2(aco_opcode::v_xor_b32, op_as_def, swap.op, def_as_op);
735 }
736
737 /* change the operand reg of the target's use */
738 assert(swap.uses == 1);
739 target = it;
740 for (++target; target != copy_map.end(); ++target) {
741 if (target->second.op.physReg() == it->first) {
742 target->second.op.setFixed(swap.op.physReg());
743 break;
744 }
745 }
746 }
747
748 /* copy constants into a registers which were operands */
749 if (constants) {
750 for (it = copy_map.begin(); it != copy_map.end(); ++it) {
751 if (!it->second.op.isConstant())
752 continue;
753 if (it->second.def.physReg() == scc) {
754 bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(0u), Operand(it->second.op.constantValue() ? 1u : 0u));
755 } else {
756 bld.copy(it->second.def, it->second.op);
757 }
758 }
759 }
760 }
761
762 void lower_to_hw_instr(Program* program)
763 {
764 Block *discard_block = NULL;
765
766 for (size_t i = 0; i < program->blocks.size(); i++)
767 {
768 Block *block = &program->blocks[i];
769 lower_context ctx;
770 ctx.program = program;
771 Builder bld(program, &ctx.instructions);
772
773 bool set_mode = i == 0 && block->fp_mode.val != program->config->float_mode;
774 for (unsigned pred : block->linear_preds) {
775 if (program->blocks[pred].fp_mode.val != block->fp_mode.val) {
776 set_mode = true;
777 break;
778 }
779 }
780 if (set_mode) {
781 /* only allow changing modes at top-level blocks so this doesn't break
782 * the "jump over empty blocks" optimization */
783 assert(block->kind & block_kind_top_level);
784 uint32_t mode = block->fp_mode.val;
785 /* "((size - 1) << 11) | register" (MODE is encoded as register 1) */
786 bld.sopk(aco_opcode::s_setreg_imm32_b32, Operand(mode), (7 << 11) | 1);
787 }
788
789 for (size_t j = 0; j < block->instructions.size(); j++) {
790 aco_ptr<Instruction>& instr = block->instructions[j];
791 aco_ptr<Instruction> mov;
792 if (instr->format == Format::PSEUDO) {
793 Pseudo_instruction *pi = (Pseudo_instruction*)instr.get();
794
795 switch (instr->opcode)
796 {
797 case aco_opcode::p_extract_vector:
798 {
799 unsigned reg = instr->operands[0].physReg() + instr->operands[1].constantValue() * instr->definitions[0].size();
800 RegClass rc = RegClass(instr->operands[0].getTemp().type(), 1);
801 RegClass rc_def = RegClass(instr->definitions[0].getTemp().type(), 1);
802 if (reg == instr->definitions[0].physReg())
803 break;
804
805 std::map<PhysReg, copy_operation> copy_operations;
806 for (unsigned i = 0; i < instr->definitions[0].size(); i++) {
807 Definition def = Definition(PhysReg{instr->definitions[0].physReg() + i}, rc_def);
808 copy_operations[def.physReg()] = {Operand(PhysReg{reg + i}, rc), def, 0, 1};
809 }
810 handle_operands(copy_operations, &ctx, program->chip_class, pi);
811 break;
812 }
813 case aco_opcode::p_create_vector:
814 {
815 std::map<PhysReg, copy_operation> copy_operations;
816 RegClass rc_def = RegClass(instr->definitions[0].getTemp().type(), 1);
817 unsigned reg_idx = 0;
818 for (const Operand& op : instr->operands) {
819 if (op.isConstant()) {
820 const PhysReg reg = PhysReg{instr->definitions[0].physReg() + reg_idx};
821 const Definition def = Definition(reg, rc_def);
822 copy_operations[reg] = {op, def, 0, 1};
823 reg_idx++;
824 continue;
825 }
826
827 RegClass rc_op = RegClass(op.getTemp().type(), 1);
828 for (unsigned j = 0; j < op.size(); j++)
829 {
830 const Operand copy_op = Operand(PhysReg{op.physReg() + j}, rc_op);
831 const Definition def = Definition(PhysReg{instr->definitions[0].physReg() + reg_idx}, rc_def);
832 copy_operations[def.physReg()] = {copy_op, def, 0, 1};
833 reg_idx++;
834 }
835 }
836 handle_operands(copy_operations, &ctx, program->chip_class, pi);
837 break;
838 }
839 case aco_opcode::p_split_vector:
840 {
841 std::map<PhysReg, copy_operation> copy_operations;
842 RegClass rc_op = instr->operands[0].isConstant() ? s1 : RegClass(instr->operands[0].regClass().type(), 1);
843 for (unsigned i = 0; i < instr->definitions.size(); i++) {
844 unsigned k = instr->definitions[i].size();
845 RegClass rc_def = RegClass(instr->definitions[i].getTemp().type(), 1);
846 for (unsigned j = 0; j < k; j++) {
847 Operand op = Operand(PhysReg{instr->operands[0].physReg() + (i*k+j)}, rc_op);
848 Definition def = Definition(PhysReg{instr->definitions[i].physReg() + j}, rc_def);
849 copy_operations[def.physReg()] = {op, def, 0, 1};
850 }
851 }
852 handle_operands(copy_operations, &ctx, program->chip_class, pi);
853 break;
854 }
855 case aco_opcode::p_parallelcopy:
856 case aco_opcode::p_wqm:
857 {
858 std::map<PhysReg, copy_operation> copy_operations;
859 for (unsigned i = 0; i < instr->operands.size(); i++)
860 {
861 Operand operand = instr->operands[i];
862 if (operand.isConstant() || operand.size() == 1) {
863 assert(instr->definitions[i].size() == 1);
864 copy_operations[instr->definitions[i].physReg()] = {operand, instr->definitions[i], 0, 1};
865 } else {
866 RegClass def_rc = RegClass(instr->definitions[i].regClass().type(), 1);
867 RegClass op_rc = RegClass(operand.getTemp().type(), 1);
868 for (unsigned j = 0; j < operand.size(); j++)
869 {
870 Operand op = Operand(PhysReg{instr->operands[i].physReg() + j}, op_rc);
871 Definition def = Definition(PhysReg{instr->definitions[i].physReg() + j}, def_rc);
872 copy_operations[def.physReg()] = {op, def, 0, 1};
873 }
874 }
875 }
876 handle_operands(copy_operations, &ctx, program->chip_class, pi);
877 break;
878 }
879 case aco_opcode::p_exit_early_if:
880 {
881 /* don't bother with an early exit at the end of the program */
882 if (block->instructions[j + 1]->opcode == aco_opcode::p_logical_end &&
883 block->instructions[j + 2]->opcode == aco_opcode::s_endpgm) {
884 break;
885 }
886
887 if (!discard_block) {
888 discard_block = program->create_and_insert_block();
889 block = &program->blocks[i];
890
891 bld.reset(discard_block);
892 bld.exp(aco_opcode::exp, Operand(v1), Operand(v1), Operand(v1), Operand(v1),
893 0, V_008DFC_SQ_EXP_NULL, false, true, true);
894 if (program->wb_smem_l1_on_end)
895 bld.smem(aco_opcode::s_dcache_wb);
896 bld.sopp(aco_opcode::s_endpgm);
897
898 bld.reset(&ctx.instructions);
899 }
900
901 //TODO: exec can be zero here with block_kind_discard
902
903 assert(instr->operands[0].physReg() == scc);
904 bld.sopp(aco_opcode::s_cbranch_scc0, instr->operands[0], discard_block->index);
905
906 discard_block->linear_preds.push_back(block->index);
907 block->linear_succs.push_back(discard_block->index);
908 break;
909 }
910 case aco_opcode::p_spill:
911 {
912 assert(instr->operands[0].regClass() == v1.as_linear());
913 for (unsigned i = 0; i < instr->operands[2].size(); i++)
914 bld.writelane(bld.def(v1, instr->operands[0].physReg()),
915 Operand(PhysReg{instr->operands[2].physReg() + i}, s1),
916 Operand(instr->operands[1].constantValue() + i),
917 instr->operands[0]);
918 break;
919 }
920 case aco_opcode::p_reload:
921 {
922 assert(instr->operands[0].regClass() == v1.as_linear());
923 for (unsigned i = 0; i < instr->definitions[0].size(); i++)
924 bld.readlane(bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
925 instr->operands[0],
926 Operand(instr->operands[1].constantValue() + i));
927 break;
928 }
929 case aco_opcode::p_as_uniform:
930 {
931 if (instr->operands[0].isConstant() || instr->operands[0].regClass().type() == RegType::sgpr) {
932 std::map<PhysReg, copy_operation> copy_operations;
933 Operand operand = instr->operands[0];
934 if (operand.isConstant() || operand.size() == 1) {
935 assert(instr->definitions[0].size() == 1);
936 copy_operations[instr->definitions[0].physReg()] = {operand, instr->definitions[0], 0, 1};
937 } else {
938 for (unsigned i = 0; i < operand.size(); i++)
939 {
940 Operand op = Operand(PhysReg{operand.physReg() + i}, s1);
941 Definition def = Definition(PhysReg{instr->definitions[0].physReg() + i}, s1);
942 copy_operations[def.physReg()] = {op, def, 0, 1};
943 }
944 }
945
946 handle_operands(copy_operations, &ctx, program->chip_class, pi);
947 } else {
948 assert(instr->operands[0].regClass().type() == RegType::vgpr);
949 assert(instr->definitions[0].regClass().type() == RegType::sgpr);
950 assert(instr->operands[0].size() == instr->definitions[0].size());
951 for (unsigned i = 0; i < instr->definitions[0].size(); i++) {
952 bld.vop1(aco_opcode::v_readfirstlane_b32,
953 bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
954 Operand(PhysReg{instr->operands[0].physReg() + i}, v1));
955 }
956 }
957 break;
958 }
959 default:
960 break;
961 }
962 } else if (instr->format == Format::PSEUDO_BRANCH) {
963 Pseudo_branch_instruction* branch = static_cast<Pseudo_branch_instruction*>(instr.get());
964 /* check if all blocks from current to target are empty */
965 bool can_remove = block->index < branch->target[0];
966 for (unsigned i = block->index + 1; can_remove && i < branch->target[0]; i++) {
967 if (program->blocks[i].instructions.size())
968 can_remove = false;
969 }
970 if (can_remove)
971 continue;
972
973 switch (instr->opcode) {
974 case aco_opcode::p_branch:
975 assert(block->linear_succs[0] == branch->target[0]);
976 bld.sopp(aco_opcode::s_branch, branch->target[0]);
977 break;
978 case aco_opcode::p_cbranch_nz:
979 assert(block->linear_succs[1] == branch->target[0]);
980 if (branch->operands[0].physReg() == exec)
981 bld.sopp(aco_opcode::s_cbranch_execnz, branch->target[0]);
982 else if (branch->operands[0].physReg() == vcc)
983 bld.sopp(aco_opcode::s_cbranch_vccnz, branch->target[0]);
984 else {
985 assert(branch->operands[0].physReg() == scc);
986 bld.sopp(aco_opcode::s_cbranch_scc1, branch->target[0]);
987 }
988 break;
989 case aco_opcode::p_cbranch_z:
990 assert(block->linear_succs[1] == branch->target[0]);
991 if (branch->operands[0].physReg() == exec)
992 bld.sopp(aco_opcode::s_cbranch_execz, branch->target[0]);
993 else if (branch->operands[0].physReg() == vcc)
994 bld.sopp(aco_opcode::s_cbranch_vccz, branch->target[0]);
995 else {
996 assert(branch->operands[0].physReg() == scc);
997 bld.sopp(aco_opcode::s_cbranch_scc0, branch->target[0]);
998 }
999 break;
1000 default:
1001 unreachable("Unknown Pseudo branch instruction!");
1002 }
1003
1004 } else if (instr->format == Format::PSEUDO_REDUCTION) {
1005 Pseudo_reduction_instruction* reduce = static_cast<Pseudo_reduction_instruction*>(instr.get());
1006 if (reduce->reduce_op == gfx10_wave64_bpermute) {
1007 /* Only makes sense on GFX10 wave64 */
1008 assert(program->chip_class >= GFX10);
1009 assert(program->info->wave_size == 64);
1010 assert(instr->definitions[0].regClass() == v1); /* Destination */
1011 assert(instr->definitions[1].regClass() == s2); /* Temp EXEC */
1012 assert(instr->definitions[1].physReg() != vcc);
1013 assert(instr->definitions[2].physReg() == scc); /* SCC clobber */
1014 assert(instr->operands[0].physReg() == vcc); /* Compare */
1015 assert(instr->operands[1].regClass() == v2.as_linear()); /* Temp VGPR pair */
1016 assert(instr->operands[2].regClass() == v1); /* Indices x4 */
1017 assert(instr->operands[3].regClass() == v1); /* Input data */
1018
1019 PhysReg shared_vgpr_reg_lo = PhysReg(align(program->config->num_vgprs, 4) + 256);
1020 PhysReg shared_vgpr_reg_hi = PhysReg(shared_vgpr_reg_lo + 1);
1021 Operand compare = instr->operands[0];
1022 Operand tmp1(instr->operands[1].physReg(), v1);
1023 Operand tmp2(PhysReg(instr->operands[1].physReg() + 1), v1);
1024 Operand index_x4 = instr->operands[2];
1025 Operand input_data = instr->operands[3];
1026 Definition shared_vgpr_lo(shared_vgpr_reg_lo, v1);
1027 Definition shared_vgpr_hi(shared_vgpr_reg_hi, v1);
1028 Definition def_temp1(tmp1.physReg(), v1);
1029 Definition def_temp2(tmp2.physReg(), v1);
1030
1031 /* Save EXEC and set it for all lanes */
1032 bld.sop1(aco_opcode::s_or_saveexec_b64, instr->definitions[1], instr->definitions[2],
1033 Definition(exec, s2), Operand((uint64_t)-1), Operand(exec, s2));
1034
1035 /* HI: Copy data from high lanes 32-63 to shared vgpr */
1036 bld.vop1_dpp(aco_opcode::v_mov_b32, shared_vgpr_hi, input_data, dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
1037
1038 /* LO: Copy data from low lanes 0-31 to shared vgpr */
1039 bld.vop1_dpp(aco_opcode::v_mov_b32, shared_vgpr_lo, input_data, dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
1040 /* LO: Copy shared vgpr (high lanes' data) to output vgpr */
1041 bld.vop1_dpp(aco_opcode::v_mov_b32, def_temp1, Operand(shared_vgpr_reg_hi, v1), dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
1042
1043 /* HI: Copy shared vgpr (low lanes' data) to output vgpr */
1044 bld.vop1_dpp(aco_opcode::v_mov_b32, def_temp1, Operand(shared_vgpr_reg_lo, v1), dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
1045
1046 /* Permute the original input */
1047 bld.ds(aco_opcode::ds_bpermute_b32, def_temp2, index_x4, input_data);
1048 /* Permute the swapped input */
1049 bld.ds(aco_opcode::ds_bpermute_b32, def_temp1, index_x4, tmp1);
1050
1051 /* Restore saved EXEC */
1052 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(instr->definitions[1].physReg(), s2));
1053 /* Choose whether to use the original or swapped */
1054 bld.vop2(aco_opcode::v_cndmask_b32, instr->definitions[0], tmp1, tmp2, compare);
1055 } else {
1056 emit_reduction(&ctx, reduce->opcode, reduce->reduce_op, reduce->cluster_size,
1057 reduce->operands[1].physReg(), // tmp
1058 reduce->definitions[1].physReg(), // stmp
1059 reduce->operands[2].physReg(), // vtmp
1060 reduce->definitions[2].physReg(), // sitmp
1061 reduce->operands[0], reduce->definitions[0]);
1062 }
1063 } else {
1064 ctx.instructions.emplace_back(std::move(instr));
1065 }
1066
1067 }
1068 block->instructions.swap(ctx.instructions);
1069 }
1070 }
1071
1072 }