aco: Use correct reference type in for-range-loop.
[mesa.git] / src / amd / compiler / aco_lower_to_hw_instr.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
25 *
26 */
27
28 #include <map>
29
30 #include "aco_ir.h"
31 #include "aco_builder.h"
32 #include "util/u_math.h"
33 #include "sid.h"
34 #include "vulkan/radv_shader.h"
35
36
37 namespace aco {
38
39 struct lower_context {
40 Program *program;
41 std::vector<aco_ptr<Instruction>> instructions;
42 };
43
44 aco_opcode get_reduce_opcode(chip_class chip, ReduceOp op) {
45 /* Because some 16-bit instructions are already VOP3 on GFX10, we use the
46 * 32-bit opcodes (VOP2) which allows to remove the tempory VGPR and to use
47 * DPP with the arithmetic instructions. This requires to sign-extend.
48 */
49 switch (op) {
50 case iadd8:
51 case iadd16: return chip >= GFX10 ? aco_opcode::v_add_u32 : aco_opcode::v_add_u16;
52 case imul8:
53 case imul16: return chip >= GFX10 ? aco_opcode::v_mul_lo_u16_e64 : aco_opcode::v_mul_lo_u16;
54 case fadd16: return aco_opcode::v_add_f16;
55 case fmul16: return aco_opcode::v_mul_f16;
56 case imax8:
57 case imax16: return chip >= GFX10 ? aco_opcode::v_max_i32 : aco_opcode::v_max_i16;
58 case imin8:
59 case imin16: return chip >= GFX10 ? aco_opcode::v_min_i32 : aco_opcode::v_min_i16;
60 case umin8:
61 case umin16: return chip >= GFX10 ? aco_opcode::v_min_u32 : aco_opcode::v_min_u16;
62 case umax8:
63 case umax16: return chip >= GFX10 ? aco_opcode::v_max_u32 : aco_opcode::v_max_u16;
64 case fmin16: return aco_opcode::v_min_f16;
65 case fmax16: return aco_opcode::v_max_f16;
66 case iadd32: return chip >= GFX9 ? aco_opcode::v_add_u32 : aco_opcode::v_add_co_u32;
67 case imul32: return aco_opcode::v_mul_lo_u32;
68 case fadd32: return aco_opcode::v_add_f32;
69 case fmul32: return aco_opcode::v_mul_f32;
70 case imax32: return aco_opcode::v_max_i32;
71 case imin32: return aco_opcode::v_min_i32;
72 case umin32: return aco_opcode::v_min_u32;
73 case umax32: return aco_opcode::v_max_u32;
74 case fmin32: return aco_opcode::v_min_f32;
75 case fmax32: return aco_opcode::v_max_f32;
76 case iand8:
77 case iand16:
78 case iand32: return aco_opcode::v_and_b32;
79 case ixor8:
80 case ixor16:
81 case ixor32: return aco_opcode::v_xor_b32;
82 case ior8:
83 case ior16:
84 case ior32: return aco_opcode::v_or_b32;
85 case iadd64: return aco_opcode::num_opcodes;
86 case imul64: return aco_opcode::num_opcodes;
87 case fadd64: return aco_opcode::v_add_f64;
88 case fmul64: return aco_opcode::v_mul_f64;
89 case imin64: return aco_opcode::num_opcodes;
90 case imax64: return aco_opcode::num_opcodes;
91 case umin64: return aco_opcode::num_opcodes;
92 case umax64: return aco_opcode::num_opcodes;
93 case fmin64: return aco_opcode::v_min_f64;
94 case fmax64: return aco_opcode::v_max_f64;
95 case iand64: return aco_opcode::num_opcodes;
96 case ior64: return aco_opcode::num_opcodes;
97 case ixor64: return aco_opcode::num_opcodes;
98 default: return aco_opcode::num_opcodes;
99 }
100 }
101
102 bool is_vop3_reduce_opcode(aco_opcode opcode)
103 {
104 /* 64-bit reductions are VOP3. */
105 if (opcode == aco_opcode::num_opcodes)
106 return true;
107
108 return instr_info.format[(int)opcode] == Format::VOP3;
109 }
110
111 void emit_vadd32(Builder& bld, Definition def, Operand src0, Operand src1)
112 {
113 Instruction *instr = bld.vadd32(def, src0, src1, false, Operand(s2), true);
114 if (instr->definitions.size() >= 2) {
115 assert(instr->definitions[1].regClass() == bld.lm);
116 instr->definitions[1].setFixed(vcc);
117 }
118 }
119
120 void emit_int64_dpp_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
121 PhysReg vtmp_reg, ReduceOp op,
122 unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask, bool bound_ctrl,
123 Operand *identity=NULL)
124 {
125 Builder bld(ctx->program, &ctx->instructions);
126 Definition dst[] = {Definition(dst_reg, v1), Definition(PhysReg{dst_reg+1}, v1)};
127 Definition vtmp_def[] = {Definition(vtmp_reg, v1), Definition(PhysReg{vtmp_reg+1}, v1)};
128 Operand src0[] = {Operand(src0_reg, v1), Operand(PhysReg{src0_reg+1}, v1)};
129 Operand src1[] = {Operand(src1_reg, v1), Operand(PhysReg{src1_reg+1}, v1)};
130 Operand src1_64 = Operand(src1_reg, v2);
131 Operand vtmp_op[] = {Operand(vtmp_reg, v1), Operand(PhysReg{vtmp_reg+1}, v1)};
132 Operand vtmp_op64 = Operand(vtmp_reg, v2);
133 if (op == iadd64) {
134 if (ctx->program->chip_class >= GFX10) {
135 if (identity)
136 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
137 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
138 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
139 bld.vop3(aco_opcode::v_add_co_u32_e64, dst[0], bld.def(bld.lm, vcc), vtmp_op[0], src1[0]);
140 } else {
141 bld.vop2_dpp(aco_opcode::v_add_co_u32, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0],
142 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
143 }
144 bld.vop2_dpp(aco_opcode::v_addc_co_u32, dst[1], bld.def(bld.lm, vcc), src0[1], src1[1], Operand(vcc, bld.lm),
145 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
146 } else if (op == iand64) {
147 bld.vop2_dpp(aco_opcode::v_and_b32, dst[0], src0[0], src1[0],
148 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
149 bld.vop2_dpp(aco_opcode::v_and_b32, dst[1], src0[1], src1[1],
150 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
151 } else if (op == ior64) {
152 bld.vop2_dpp(aco_opcode::v_or_b32, dst[0], src0[0], src1[0],
153 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
154 bld.vop2_dpp(aco_opcode::v_or_b32, dst[1], src0[1], src1[1],
155 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
156 } else if (op == ixor64) {
157 bld.vop2_dpp(aco_opcode::v_xor_b32, dst[0], src0[0], src1[0],
158 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
159 bld.vop2_dpp(aco_opcode::v_xor_b32, dst[1], src0[1], src1[1],
160 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
161 } else if (op == umin64 || op == umax64 || op == imin64 || op == imax64) {
162 aco_opcode cmp = aco_opcode::num_opcodes;
163 switch (op) {
164 case umin64:
165 cmp = aco_opcode::v_cmp_gt_u64;
166 break;
167 case umax64:
168 cmp = aco_opcode::v_cmp_lt_u64;
169 break;
170 case imin64:
171 cmp = aco_opcode::v_cmp_gt_i64;
172 break;
173 case imax64:
174 cmp = aco_opcode::v_cmp_lt_i64;
175 break;
176 default:
177 break;
178 }
179
180 if (identity) {
181 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
182 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[1], identity[1]);
183 }
184 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
185 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
186 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[1], src0[1],
187 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
188
189 bld.vopc(cmp, bld.def(bld.lm, vcc), vtmp_op64, src1_64);
190 bld.vop2(aco_opcode::v_cndmask_b32, dst[0], vtmp_op[0], src1[0], Operand(vcc, bld.lm));
191 bld.vop2(aco_opcode::v_cndmask_b32, dst[1], vtmp_op[1], src1[1], Operand(vcc, bld.lm));
192 } else if (op == imul64) {
193 /* t4 = dpp(x_hi)
194 * t1 = umul_lo(t4, y_lo)
195 * t3 = dpp(x_lo)
196 * t0 = umul_lo(t3, y_hi)
197 * t2 = iadd(t0, t1)
198 * t5 = umul_hi(t3, y_lo)
199 * res_hi = iadd(t2, t5)
200 * res_lo = umul_lo(t3, y_lo)
201 * Requires that res_hi != src0[0] and res_hi != src1[0]
202 * and that vtmp[0] != res_hi.
203 */
204 if (identity)
205 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[1]);
206 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[1],
207 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
208 bld.vop3(aco_opcode::v_mul_lo_u32, vtmp_def[1], vtmp_op[0], src1[0]);
209 if (identity)
210 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
211 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
212 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
213 bld.vop3(aco_opcode::v_mul_lo_u32, vtmp_def[0], vtmp_op[0], src1[1]);
214 emit_vadd32(bld, vtmp_def[1], vtmp_op[0], vtmp_op[1]);
215 if (identity)
216 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
217 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
218 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
219 bld.vop3(aco_opcode::v_mul_hi_u32, vtmp_def[0], vtmp_op[0], src1[0]);
220 emit_vadd32(bld, dst[1], vtmp_op[1], vtmp_op[0]);
221 if (identity)
222 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
223 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
224 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
225 bld.vop3(aco_opcode::v_mul_lo_u32, dst[0], vtmp_op[0], src1[0]);
226 }
227 }
228
229 void emit_int64_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg, PhysReg vtmp, ReduceOp op)
230 {
231 Builder bld(ctx->program, &ctx->instructions);
232 Definition dst[] = {Definition(dst_reg, v1), Definition(PhysReg{dst_reg+1}, v1)};
233 RegClass src0_rc = src0_reg.reg() >= 256 ? v1 : s1;
234 Operand src0[] = {Operand(src0_reg, src0_rc), Operand(PhysReg{src0_reg+1}, src0_rc)};
235 Operand src1[] = {Operand(src1_reg, v1), Operand(PhysReg{src1_reg+1}, v1)};
236 Operand src0_64 = Operand(src0_reg, src0_reg.reg() >= 256 ? v2 : s2);
237 Operand src1_64 = Operand(src1_reg, v2);
238
239 if (src0_rc == s1 &&
240 (op == imul64 || op == umin64 || op == umax64 || op == imin64 || op == imax64)) {
241 assert(vtmp.reg() != 0);
242 bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), src0[0]);
243 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]);
244 src0_reg = vtmp;
245 src0[0] = Operand(vtmp, v1);
246 src0[1] = Operand(PhysReg{vtmp+1}, v1);
247 src0_64 = Operand(vtmp, v2);
248 } else if (src0_rc == s1 && op == iadd64) {
249 assert(vtmp.reg() != 0);
250 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]);
251 src0[1] = Operand(PhysReg{vtmp+1}, v1);
252 }
253
254 if (op == iadd64) {
255 if (ctx->program->chip_class >= GFX10) {
256 bld.vop3(aco_opcode::v_add_co_u32_e64, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0]);
257 } else {
258 bld.vop2(aco_opcode::v_add_co_u32, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0]);
259 }
260 bld.vop2(aco_opcode::v_addc_co_u32, dst[1], bld.def(bld.lm, vcc), src0[1], src1[1], Operand(vcc, bld.lm));
261 } else if (op == iand64) {
262 bld.vop2(aco_opcode::v_and_b32, dst[0], src0[0], src1[0]);
263 bld.vop2(aco_opcode::v_and_b32, dst[1], src0[1], src1[1]);
264 } else if (op == ior64) {
265 bld.vop2(aco_opcode::v_or_b32, dst[0], src0[0], src1[0]);
266 bld.vop2(aco_opcode::v_or_b32, dst[1], src0[1], src1[1]);
267 } else if (op == ixor64) {
268 bld.vop2(aco_opcode::v_xor_b32, dst[0], src0[0], src1[0]);
269 bld.vop2(aco_opcode::v_xor_b32, dst[1], src0[1], src1[1]);
270 } else if (op == umin64 || op == umax64 || op == imin64 || op == imax64) {
271 aco_opcode cmp = aco_opcode::num_opcodes;
272 switch (op) {
273 case umin64:
274 cmp = aco_opcode::v_cmp_gt_u64;
275 break;
276 case umax64:
277 cmp = aco_opcode::v_cmp_lt_u64;
278 break;
279 case imin64:
280 cmp = aco_opcode::v_cmp_gt_i64;
281 break;
282 case imax64:
283 cmp = aco_opcode::v_cmp_lt_i64;
284 break;
285 default:
286 break;
287 }
288
289 bld.vopc(cmp, bld.def(bld.lm, vcc), src0_64, src1_64);
290 bld.vop2(aco_opcode::v_cndmask_b32, dst[0], src0[0], src1[0], Operand(vcc, bld.lm));
291 bld.vop2(aco_opcode::v_cndmask_b32, dst[1], src0[1], src1[1], Operand(vcc, bld.lm));
292 } else if (op == imul64) {
293 if (src1_reg == dst_reg) {
294 /* it's fine if src0==dst but not if src1==dst */
295 std::swap(src0_reg, src1_reg);
296 std::swap(src0[0], src1[0]);
297 std::swap(src0[1], src1[1]);
298 std::swap(src0_64, src1_64);
299 }
300 assert(!(src0_reg == src1_reg));
301 /* t1 = umul_lo(x_hi, y_lo)
302 * t0 = umul_lo(x_lo, y_hi)
303 * t2 = iadd(t0, t1)
304 * t5 = umul_hi(x_lo, y_lo)
305 * res_hi = iadd(t2, t5)
306 * res_lo = umul_lo(x_lo, y_lo)
307 * assumes that it's ok to modify x_hi/y_hi, since we might not have vtmp
308 */
309 Definition tmp0_def(PhysReg{src0_reg+1}, v1);
310 Definition tmp1_def(PhysReg{src1_reg+1}, v1);
311 Operand tmp0_op = src0[1];
312 Operand tmp1_op = src1[1];
313 bld.vop3(aco_opcode::v_mul_lo_u32, tmp0_def, src0[1], src1[0]);
314 bld.vop3(aco_opcode::v_mul_lo_u32, tmp1_def, src0[0], src1[1]);
315 emit_vadd32(bld, tmp0_def, tmp1_op, tmp0_op);
316 bld.vop3(aco_opcode::v_mul_hi_u32, tmp1_def, src0[0], src1[0]);
317 emit_vadd32(bld, dst[1], tmp0_op, tmp1_op);
318 bld.vop3(aco_opcode::v_mul_lo_u32, dst[0], src0[0], src1[0]);
319 }
320 }
321
322 void emit_dpp_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
323 PhysReg vtmp, ReduceOp op, unsigned size,
324 unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask, bool bound_ctrl,
325 Operand *identity=NULL) /* for VOP3 with sparse writes */
326 {
327 Builder bld(ctx->program, &ctx->instructions);
328 RegClass rc = RegClass(RegType::vgpr, size);
329 Definition dst(dst_reg, rc);
330 Operand src0(src0_reg, rc);
331 Operand src1(src1_reg, rc);
332
333 aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op);
334 bool vop3 = is_vop3_reduce_opcode(opcode);
335
336 if (!vop3) {
337 if (opcode == aco_opcode::v_add_co_u32)
338 bld.vop2_dpp(opcode, dst, bld.def(bld.lm, vcc), src0, src1, dpp_ctrl, row_mask, bank_mask, bound_ctrl);
339 else
340 bld.vop2_dpp(opcode, dst, src0, src1, dpp_ctrl, row_mask, bank_mask, bound_ctrl);
341 return;
342 }
343
344 if (opcode == aco_opcode::num_opcodes) {
345 emit_int64_dpp_op(ctx, dst_reg ,src0_reg, src1_reg, vtmp, op,
346 dpp_ctrl, row_mask, bank_mask, bound_ctrl, identity);
347 return;
348 }
349
350 if (identity)
351 bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), identity[0]);
352 if (identity && size >= 2)
353 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), identity[1]);
354
355 for (unsigned i = 0; i < size; i++)
356 bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{src0_reg+i}, v1),
357 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
358
359 bld.vop3(opcode, dst, Operand(vtmp, rc), src1);
360 }
361
362 void emit_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
363 PhysReg vtmp, ReduceOp op, unsigned size)
364 {
365 Builder bld(ctx->program, &ctx->instructions);
366 RegClass rc = RegClass(RegType::vgpr, size);
367 Definition dst(dst_reg, rc);
368 Operand src0(src0_reg, RegClass(src0_reg.reg() >= 256 ? RegType::vgpr : RegType::sgpr, size));
369 Operand src1(src1_reg, rc);
370
371 aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op);
372 bool vop3 = is_vop3_reduce_opcode(opcode);
373
374 if (opcode == aco_opcode::num_opcodes) {
375 emit_int64_op(ctx, dst_reg, src0_reg, src1_reg, vtmp, op);
376 return;
377 }
378
379 if (vop3) {
380 bld.vop3(opcode, dst, src0, src1);
381 } else if (opcode == aco_opcode::v_add_co_u32) {
382 bld.vop2(opcode, dst, bld.def(bld.lm, vcc), src0, src1);
383 } else {
384 bld.vop2(opcode, dst, src0, src1);
385 }
386 }
387
388 void emit_dpp_mov(lower_context *ctx, PhysReg dst, PhysReg src0, unsigned size,
389 unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask, bool bound_ctrl)
390 {
391 Builder bld(ctx->program, &ctx->instructions);
392 for (unsigned i = 0; i < size; i++) {
393 bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{dst+i}, v1), Operand(PhysReg{src0+i}, v1),
394 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
395 }
396 }
397
398 uint32_t get_reduction_identity(ReduceOp op, unsigned idx)
399 {
400 switch (op) {
401 case iadd8:
402 case iadd16:
403 case iadd32:
404 case iadd64:
405 case fadd16:
406 case fadd32:
407 case fadd64:
408 case ior8:
409 case ior16:
410 case ior32:
411 case ior64:
412 case ixor8:
413 case ixor16:
414 case ixor32:
415 case ixor64:
416 case umax8:
417 case umax16:
418 case umax32:
419 case umax64:
420 return 0;
421 case imul8:
422 case imul16:
423 case imul32:
424 case imul64:
425 return idx ? 0 : 1;
426 case fmul16:
427 return 0x3c00u; /* 1.0 */
428 case fmul32:
429 return 0x3f800000u; /* 1.0 */
430 case fmul64:
431 return idx ? 0x3ff00000u : 0u; /* 1.0 */
432 case imin8:
433 return INT8_MAX;
434 case imin16:
435 return INT16_MAX;
436 case imin32:
437 return INT32_MAX;
438 case imin64:
439 return idx ? 0x7fffffffu : 0xffffffffu;
440 case imax8:
441 return INT8_MIN;
442 case imax16:
443 return INT16_MIN;
444 case imax32:
445 return INT32_MIN;
446 case imax64:
447 return idx ? 0x80000000u : 0;
448 case umin8:
449 case umin16:
450 case iand8:
451 case iand16:
452 return 0xffffffffu;
453 case umin32:
454 case umin64:
455 case iand32:
456 case iand64:
457 return 0xffffffffu;
458 case fmin16:
459 return 0x7c00u; /* infinity */
460 case fmin32:
461 return 0x7f800000u; /* infinity */
462 case fmin64:
463 return idx ? 0x7ff00000u : 0u; /* infinity */
464 case fmax16:
465 return 0xfc00u; /* negative infinity */
466 case fmax32:
467 return 0xff800000u; /* negative infinity */
468 case fmax64:
469 return idx ? 0xfff00000u : 0u; /* negative infinity */
470 default:
471 unreachable("Invalid reduction operation");
472 break;
473 }
474 return 0;
475 }
476
477 void emit_ds_swizzle(Builder bld, PhysReg dst, PhysReg src, unsigned size, unsigned ds_pattern)
478 {
479 for (unsigned i = 0; i < size; i++) {
480 bld.ds(aco_opcode::ds_swizzle_b32, Definition(PhysReg{dst+i}, v1),
481 Operand(PhysReg{src+i}, v1), ds_pattern);
482 }
483 }
484
485 void emit_reduction(lower_context *ctx, aco_opcode op, ReduceOp reduce_op, unsigned cluster_size, PhysReg tmp,
486 PhysReg stmp, PhysReg vtmp, PhysReg sitmp, Operand src, Definition dst)
487 {
488 assert(cluster_size == ctx->program->wave_size || op == aco_opcode::p_reduce);
489 assert(cluster_size <= ctx->program->wave_size);
490
491 Builder bld(ctx->program, &ctx->instructions);
492
493 Operand identity[2];
494 identity[0] = Operand(get_reduction_identity(reduce_op, 0));
495 identity[1] = Operand(get_reduction_identity(reduce_op, 1));
496 Operand vcndmask_identity[2] = {identity[0], identity[1]};
497
498 /* First, copy the source to tmp and set inactive lanes to the identity */
499 bld.sop1(Builder::s_or_saveexec, Definition(stmp, bld.lm), Definition(scc, s1), Definition(exec, bld.lm), Operand(UINT64_MAX), Operand(exec, bld.lm));
500
501 for (unsigned i = 0; i < src.size(); i++) {
502 /* p_exclusive_scan needs it to be a sgpr or inline constant for the v_writelane_b32
503 * except on GFX10, where v_writelane_b32 can take a literal. */
504 if (identity[i].isLiteral() && op == aco_opcode::p_exclusive_scan && ctx->program->chip_class < GFX10) {
505 bld.sop1(aco_opcode::s_mov_b32, Definition(PhysReg{sitmp+i}, s1), identity[i]);
506 identity[i] = Operand(PhysReg{sitmp+i}, s1);
507
508 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp+i}, v1), identity[i]);
509 vcndmask_identity[i] = Operand(PhysReg{tmp+i}, v1);
510 } else if (identity[i].isLiteral()) {
511 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp+i}, v1), identity[i]);
512 vcndmask_identity[i] = Operand(PhysReg{tmp+i}, v1);
513 }
514 }
515
516 for (unsigned i = 0; i < src.size(); i++) {
517 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(PhysReg{tmp + i}, v1),
518 vcndmask_identity[i], Operand(PhysReg{src.physReg() + i}, v1),
519 Operand(stmp, bld.lm));
520 }
521
522 if (src.regClass() == v1b) {
523 aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
524 sdwa->operands[0] = Operand(PhysReg{tmp}, v1);
525 sdwa->definitions[0] = Definition(PhysReg{tmp}, v1);
526 if (reduce_op == imin8 || reduce_op == imax8)
527 sdwa->sel[0] = sdwa_sbyte;
528 else
529 sdwa->sel[0] = sdwa_ubyte;
530 sdwa->dst_sel = sdwa_udword;
531 bld.insert(std::move(sdwa));
532 } else if (src.regClass() == v2b) {
533 if (ctx->program->chip_class >= GFX10 &&
534 (reduce_op == iadd16 || reduce_op == imax16 ||
535 reduce_op == imin16 || reduce_op == umin16 || reduce_op == umax16)) {
536 aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
537 sdwa->operands[0] = Operand(PhysReg{tmp}, v1);
538 sdwa->definitions[0] = Definition(PhysReg{tmp}, v1);
539 if (reduce_op == imin16 || reduce_op == imax16 || reduce_op == iadd16)
540 sdwa->sel[0] = sdwa_sword;
541 else
542 sdwa->sel[0] = sdwa_uword;
543 sdwa->dst_sel = sdwa_udword;
544 bld.insert(std::move(sdwa));
545 }
546 }
547
548 bool reduction_needs_last_op = false;
549 switch (op) {
550 case aco_opcode::p_reduce:
551 if (cluster_size == 1) break;
552
553 if (ctx->program->chip_class <= GFX7) {
554 reduction_needs_last_op = true;
555 emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(1, 0, 3, 2));
556 if (cluster_size == 2) break;
557 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
558 emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(2, 3, 0, 1));
559 if (cluster_size == 4) break;
560 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
561 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x04));
562 if (cluster_size == 8) break;
563 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
564 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x08));
565 if (cluster_size == 16) break;
566 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
567 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
568 if (cluster_size == 32) break;
569 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
570 for (unsigned i = 0; i < src.size(); i++)
571 bld.readlane(Definition(PhysReg{dst.physReg() + i}, s1), Operand(PhysReg{tmp + i}, v1), Operand(0u));
572 // TODO: it would be more effective to do the last reduction step on SALU
573 emit_op(ctx, tmp, dst.physReg(), tmp, vtmp, reduce_op, src.size());
574 reduction_needs_last_op = false;
575 break;
576 }
577
578 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_quad_perm(1, 0, 3, 2), 0xf, 0xf, false);
579 if (cluster_size == 2) break;
580 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_quad_perm(2, 3, 0, 1), 0xf, 0xf, false);
581 if (cluster_size == 4) break;
582 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_half_mirror, 0xf, 0xf, false);
583 if (cluster_size == 8) break;
584 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_mirror, 0xf, 0xf, false);
585 if (cluster_size == 16) break;
586
587 if (ctx->program->chip_class >= GFX10) {
588 /* GFX10+ doesn't support row_bcast15 and row_bcast31 */
589 for (unsigned i = 0; i < src.size(); i++)
590 bld.vop3(aco_opcode::v_permlanex16_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1), Operand(0u), Operand(0u));
591
592 if (cluster_size == 32) {
593 reduction_needs_last_op = true;
594 break;
595 }
596
597 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
598 for (unsigned i = 0; i < src.size(); i++)
599 bld.readlane(Definition(PhysReg{dst.physReg() + i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(0u));
600 // TODO: it would be more effective to do the last reduction step on SALU
601 emit_op(ctx, tmp, dst.physReg(), tmp, vtmp, reduce_op, src.size());
602 break;
603 }
604
605 if (cluster_size == 32) {
606 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
607 reduction_needs_last_op = true;
608 break;
609 }
610 assert(cluster_size == 64);
611 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_bcast15, 0xa, 0xf, false);
612 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_bcast31, 0xc, 0xf, false);
613 break;
614 case aco_opcode::p_exclusive_scan:
615 if (ctx->program->chip_class >= GFX10) { /* gfx10 doesn't support wf_sr1, so emulate it */
616 /* shift rows right */
617 emit_dpp_mov(ctx, vtmp, tmp, src.size(), dpp_row_sr(1), 0xf, 0xf, true);
618
619 /* fill in the gaps in rows 1 and 3 */
620 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0x10000u));
621 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(0x10000u));
622 for (unsigned i = 0; i < src.size(); i++) {
623 Instruction *perm = bld.vop3(aco_opcode::v_permlanex16_b32,
624 Definition(PhysReg{vtmp+i}, v1),
625 Operand(PhysReg{tmp+i}, v1),
626 Operand(0xffffffffu), Operand(0xffffffffu)).instr;
627 static_cast<VOP3A_instruction*>(perm)->opsel = 1; /* FI (Fetch Inactive) */
628 }
629 bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(UINT64_MAX));
630
631 if (ctx->program->wave_size == 64) {
632 /* fill in the gap in row 2 */
633 for (unsigned i = 0; i < src.size(); i++) {
634 bld.readlane(Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
635 bld.writelane(Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{sitmp+i}, s1), Operand(32u), Operand(PhysReg{vtmp+i}, v1));
636 }
637 }
638 std::swap(tmp, vtmp);
639 } else if (ctx->program->chip_class >= GFX8) {
640 emit_dpp_mov(ctx, tmp, tmp, src.size(), dpp_wf_sr1, 0xf, 0xf, true);
641 } else {
642 // TODO: use LDS on CS with a single write and shifted read
643 /* wavefront shift_right by 1 on SI/CI */
644 emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(0, 0, 1, 2));
645 emit_ds_swizzle(bld, tmp, tmp, src.size(), ds_pattern_bitmode(0x1F, 0x00, 0x07)); /* mirror(8) */
646 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0x10101010u));
647 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
648 for (unsigned i = 0; i < src.size(); i++)
649 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1));
650
651 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
652 emit_ds_swizzle(bld, tmp, tmp, src.size(), ds_pattern_bitmode(0x1F, 0x00, 0x08)); /* swap(8) */
653 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0x01000100u));
654 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
655 for (unsigned i = 0; i < src.size(); i++)
656 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1));
657
658 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
659 emit_ds_swizzle(bld, tmp, tmp, src.size(), ds_pattern_bitmode(0x1F, 0x00, 0x10)); /* swap(16) */
660 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand(1u), Operand(16u));
661 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand(1u), Operand(16u));
662 for (unsigned i = 0; i < src.size(); i++)
663 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1));
664
665 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
666 for (unsigned i = 0; i < src.size(); i++) {
667 bld.writelane(Definition(PhysReg{vtmp+i}, v1), identity[i], Operand(0u), Operand(PhysReg{vtmp+i}, v1));
668 bld.readlane(Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(0u));
669 bld.writelane(Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{sitmp+i}, s1), Operand(32u), Operand(PhysReg{vtmp+i}, v1));
670 identity[i] = Operand(0u); /* prevent further uses of identity */
671 }
672 std::swap(tmp, vtmp);
673 }
674
675 for (unsigned i = 0; i < src.size(); i++) {
676 if (!identity[i].isConstant() || identity[i].constantValue()) { /* bound_ctrl should take care of this overwise */
677 if (ctx->program->chip_class < GFX10)
678 assert((identity[i].isConstant() && !identity[i].isLiteral()) || identity[i].physReg() == PhysReg{sitmp+i});
679 bld.writelane(Definition(PhysReg{tmp+i}, v1), identity[i], Operand(0u), Operand(PhysReg{tmp+i}, v1));
680 }
681 }
682 /* fall through */
683 case aco_opcode::p_inclusive_scan:
684 assert(cluster_size == ctx->program->wave_size);
685 if (ctx->program->chip_class <= GFX7) {
686 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1e, 0x00, 0x00));
687 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0xAAAAAAAAu));
688 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
689 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
690
691 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
692 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1c, 0x01, 0x00));
693 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0xCCCCCCCCu));
694 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
695 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
696
697 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
698 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x18, 0x03, 0x00));
699 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0xF0F0F0F0u));
700 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
701 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
702
703 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
704 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x10, 0x07, 0x00));
705 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0xFF00FF00u));
706 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
707 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
708
709 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
710 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x00, 0x0f, 0x00));
711 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand(16u), Operand(16u));
712 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand(16u), Operand(16u));
713 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
714
715 for (unsigned i = 0; i < src.size(); i++)
716 bld.readlane(Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
717 bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand(32u), Operand(32u));
718 emit_op(ctx, tmp, sitmp, tmp, vtmp, reduce_op, src.size());
719 break;
720 }
721
722 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
723 dpp_row_sr(1), 0xf, 0xf, false, identity);
724 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
725 dpp_row_sr(2), 0xf, 0xf, false, identity);
726 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
727 dpp_row_sr(4), 0xf, 0xf, false, identity);
728 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
729 dpp_row_sr(8), 0xf, 0xf, false, identity);
730 if (ctx->program->chip_class >= GFX10) {
731 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand(16u), Operand(16u));
732 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand(16u), Operand(16u));
733 for (unsigned i = 0; i < src.size(); i++) {
734 Instruction *perm = bld.vop3(aco_opcode::v_permlanex16_b32,
735 Definition(PhysReg{vtmp+i}, v1),
736 Operand(PhysReg{tmp+i}, v1),
737 Operand(0xffffffffu), Operand(0xffffffffu)).instr;
738 static_cast<VOP3A_instruction*>(perm)->opsel = 1; /* FI (Fetch Inactive) */
739 }
740 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
741
742 if (ctx->program->wave_size == 64) {
743 bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand(32u), Operand(32u));
744 for (unsigned i = 0; i < src.size(); i++)
745 bld.readlane(Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
746 emit_op(ctx, tmp, sitmp, tmp, vtmp, reduce_op, src.size());
747 }
748 } else {
749 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
750 dpp_row_bcast15, 0xa, 0xf, false, identity);
751 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
752 dpp_row_bcast31, 0xc, 0xf, false, identity);
753 }
754 break;
755 default:
756 unreachable("Invalid reduction mode");
757 }
758
759
760 if (op == aco_opcode::p_reduce) {
761 if (reduction_needs_last_op && dst.regClass().type() == RegType::vgpr) {
762 bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(stmp, bld.lm));
763 emit_op(ctx, dst.physReg(), tmp, vtmp, PhysReg{0}, reduce_op, src.size());
764 return;
765 }
766
767 if (reduction_needs_last_op)
768 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
769 }
770
771 /* restore exec */
772 bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(stmp, bld.lm));
773
774 if (dst.regClass().type() == RegType::sgpr) {
775 for (unsigned k = 0; k < src.size(); k++) {
776 bld.readlane(Definition(PhysReg{dst.physReg() + k}, s1),
777 Operand(PhysReg{tmp + k}, v1), Operand(ctx->program->wave_size - 1));
778 }
779 } else if (dst.physReg() != tmp) {
780 for (unsigned k = 0; k < src.size(); k++) {
781 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{dst.physReg() + k}, v1),
782 Operand(PhysReg{tmp + k}, v1));
783 }
784 }
785 }
786
787 void emit_gfx10_wave64_bpermute(Program *program, aco_ptr<Instruction> &instr, Builder &bld)
788 {
789 /* Emulates proper bpermute on GFX10 in wave64 mode.
790 *
791 * This is necessary because on GFX10 the bpermute instruction only works
792 * on half waves (you can think of it as having a cluster size of 32), so we
793 * manually swap the data between the two halves using two shared VGPRs.
794 */
795
796 assert(program->chip_class >= GFX10);
797 assert(program->info->wave_size == 64);
798
799 unsigned shared_vgpr_reg_0 = align(program->config->num_vgprs, 4) + 256;
800 Definition dst = instr->definitions[0];
801 Definition tmp_exec = instr->definitions[1];
802 Definition clobber_scc = instr->definitions[2];
803 Operand index_x4 = instr->operands[0];
804 Operand input_data = instr->operands[1];
805 Operand same_half = instr->operands[2];
806
807 assert(dst.regClass() == v1);
808 assert(tmp_exec.regClass() == bld.lm);
809 assert(clobber_scc.isFixed() && clobber_scc.physReg() == scc);
810 assert(same_half.regClass() == bld.lm);
811 assert(index_x4.regClass() == v1);
812 assert(input_data.regClass().type() == RegType::vgpr);
813 assert(input_data.bytes() <= 4);
814 assert(dst.physReg() != index_x4.physReg());
815 assert(dst.physReg() != input_data.physReg());
816 assert(tmp_exec.physReg() != same_half.physReg());
817
818 PhysReg shared_vgpr_lo(shared_vgpr_reg_0);
819 PhysReg shared_vgpr_hi(shared_vgpr_reg_0 + 1);
820
821 /* Permute the input within the same half-wave */
822 bld.ds(aco_opcode::ds_bpermute_b32, dst, index_x4, input_data);
823
824 /* HI: Copy data from high lanes 32-63 to shared vgpr */
825 bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(shared_vgpr_hi, v1), input_data, dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
826 /* Save EXEC */
827 bld.sop1(aco_opcode::s_mov_b64, tmp_exec, Operand(exec, s2));
828 /* Set EXEC to enable LO lanes only */
829 bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand(32u), Operand(0u));
830 /* LO: Copy data from low lanes 0-31 to shared vgpr */
831 bld.vop1(aco_opcode::v_mov_b32, Definition(shared_vgpr_lo, v1), input_data);
832 /* LO: bpermute shared vgpr (high lanes' data) */
833 bld.ds(aco_opcode::ds_bpermute_b32, Definition(shared_vgpr_hi, v1), index_x4, Operand(shared_vgpr_hi, v1));
834 /* Set EXEC to enable HI lanes only */
835 bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand(32u), Operand(32u));
836 /* HI: bpermute shared vgpr (low lanes' data) */
837 bld.ds(aco_opcode::ds_bpermute_b32, Definition(shared_vgpr_lo, v1), index_x4, Operand(shared_vgpr_lo, v1));
838
839 /* Only enable lanes which use the other half's data */
840 bld.sop2(aco_opcode::s_andn2_b64, Definition(exec, s2), clobber_scc, Operand(tmp_exec.physReg(), s2), same_half);
841 /* LO: Copy shared vgpr (high lanes' bpermuted data) to output vgpr */
842 bld.vop1_dpp(aco_opcode::v_mov_b32, dst, Operand(shared_vgpr_hi, v1), dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
843 /* HI: Copy shared vgpr (low lanes' bpermuted data) to output vgpr */
844 bld.vop1_dpp(aco_opcode::v_mov_b32, dst, Operand(shared_vgpr_lo, v1), dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
845
846 /* Restore saved EXEC */
847 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(tmp_exec.physReg(), s2));
848
849 /* RA assumes that the result is always in the low part of the register, so we have to shift, if it's not there already */
850 if (input_data.physReg().byte()) {
851 unsigned right_shift = input_data.physReg().byte() * 8;
852 bld.vop2(aco_opcode::v_lshrrev_b32, dst, Operand(right_shift), Operand(dst.physReg(), v1));
853 }
854 }
855
856 void emit_gfx6_bpermute(Program *program, aco_ptr<Instruction> &instr, Builder &bld)
857 {
858 /* Emulates bpermute using readlane instructions */
859
860 Operand index = instr->operands[0];
861 Operand input = instr->operands[1];
862 Definition dst = instr->definitions[0];
863 Definition temp_exec = instr->definitions[1];
864 Definition clobber_vcc = instr->definitions[2];
865
866 assert(dst.regClass() == v1);
867 assert(temp_exec.regClass() == bld.lm);
868 assert(clobber_vcc.regClass() == bld.lm);
869 assert(clobber_vcc.physReg() == vcc);
870 assert(index.regClass() == v1);
871 assert(index.physReg() != dst.physReg());
872 assert(input.regClass().type() == RegType::vgpr);
873 assert(input.bytes() <= 4);
874 assert(input.physReg() != dst.physReg());
875
876 /* Save original EXEC */
877 bld.sop1(aco_opcode::s_mov_b64, temp_exec, Operand(exec, s2));
878
879 /* An "unrolled loop" that is executed per each lane.
880 * This takes only a few instructions per lane, as opposed to a "real" loop
881 * with branching, where the branch instruction alone would take 16+ cycles.
882 */
883 for (unsigned n = 0; n < program->wave_size; ++n) {
884 /* Activate the lane which has N for its source index */
885 bld.vopc(aco_opcode::v_cmpx_eq_u32, Definition(exec, bld.lm), clobber_vcc, Operand(n), index);
886 /* Read the data from lane N */
887 bld.readlane(Definition(vcc, s1), input, Operand(n));
888 /* On the active lane, move the data we read from lane N to the destination VGPR */
889 bld.vop1(aco_opcode::v_mov_b32, dst, Operand(vcc, s1));
890 /* Restore original EXEC */
891 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(temp_exec.physReg(), s2));
892 }
893 }
894
895 struct copy_operation {
896 Operand op;
897 Definition def;
898 unsigned bytes;
899 union {
900 uint8_t uses[8];
901 uint64_t is_used = 0;
902 };
903 };
904
905 void split_copy(unsigned offset, Definition *def, Operand *op, const copy_operation& src, bool ignore_uses, unsigned max_size)
906 {
907 PhysReg def_reg = src.def.physReg();
908 PhysReg op_reg = src.op.physReg();
909 def_reg.reg_b += offset;
910 op_reg.reg_b += offset;
911
912 max_size = MIN2(max_size, src.def.regClass().type() == RegType::vgpr ? 4 : 8);
913
914 /* make sure the size is a power of two and reg % bytes == 0 */
915 unsigned bytes = 1;
916 for (; bytes <= max_size; bytes *= 2) {
917 unsigned next = bytes * 2u;
918 bool can_increase = def_reg.reg_b % next == 0 &&
919 offset + next <= src.bytes && next <= max_size;
920 if (!src.op.isConstant() && can_increase)
921 can_increase = op_reg.reg_b % next == 0;
922 for (unsigned i = 0; !ignore_uses && can_increase && (i < bytes); i++)
923 can_increase = (src.uses[offset + bytes + i] == 0) == (src.uses[offset] == 0);
924 if (!can_increase)
925 break;
926 }
927
928 RegClass def_cls = bytes % 4 == 0 ? RegClass(src.def.regClass().type(), bytes / 4u) :
929 RegClass(src.def.regClass().type(), bytes).as_subdword();
930 *def = Definition(src.def.tempId(), def_reg, def_cls);
931 if (src.op.isConstant()) {
932 assert(offset == 0 || (offset == 4 && src.op.bytes() == 8));
933 if (src.op.bytes() == 8 && bytes == 4)
934 *op = Operand(uint32_t(src.op.constantValue64() >> (offset * 8u)));
935 else
936 *op = src.op;
937 } else {
938 RegClass op_cls = bytes % 4 == 0 ? RegClass(src.op.regClass().type(), bytes / 4u) :
939 RegClass(src.op.regClass().type(), bytes).as_subdword();
940 *op = Operand(op_reg, op_cls);
941 op->setTemp(Temp(src.op.tempId(), op_cls));
942 }
943 }
944
945 uint32_t get_intersection_mask(int a_start, int a_size,
946 int b_start, int b_size)
947 {
948 int intersection_start = MAX2(b_start - a_start, 0);
949 int intersection_end = MAX2(b_start + b_size - a_start, 0);
950 if (intersection_start >= a_size || intersection_end == 0)
951 return 0;
952
953 uint32_t mask = u_bit_consecutive(0, a_size);
954 return u_bit_consecutive(intersection_start, intersection_end - intersection_start) & mask;
955 }
956
957 bool do_copy(lower_context* ctx, Builder& bld, const copy_operation& copy, bool *preserve_scc)
958 {
959 bool did_copy = false;
960 for (unsigned offset = 0; offset < copy.bytes;) {
961 if (copy.uses[offset]) {
962 offset++;
963 continue;
964 }
965
966 Definition def;
967 Operand op;
968 split_copy(offset, &def, &op, copy, false, 8);
969
970 if (def.physReg() == scc) {
971 bld.sopc(aco_opcode::s_cmp_lg_i32, def, op, Operand(0u));
972 *preserve_scc = true;
973 } else if (def.bytes() == 8 && def.getTemp().type() == RegType::sgpr) {
974 bld.sop1(aco_opcode::s_mov_b64, def, Operand(op.physReg(), s2));
975 } else {
976 bld.copy(def, op);
977 }
978
979 ctx->program->statistics[statistic_copies]++;
980
981 did_copy = true;
982 offset += def.bytes();
983 }
984 return did_copy;
985 }
986
987 void do_swap(lower_context *ctx, Builder& bld, const copy_operation& copy, bool preserve_scc, Pseudo_instruction *pi)
988 {
989 unsigned offset = 0;
990
991 if (copy.bytes == 3 && (copy.def.physReg().reg_b % 4 <= 1) &&
992 (copy.def.physReg().reg_b % 4) == (copy.op.physReg().reg_b % 4)) {
993 /* instead of doing a 2-byte and 1-byte swap, do a 4-byte swap and then fixup with a 1-byte swap */
994 PhysReg op = copy.op.physReg();
995 PhysReg def = copy.def.physReg();
996 op.reg_b &= ~0x3;
997 def.reg_b &= ~0x3;
998
999 copy_operation tmp;
1000 tmp.op = Operand(op, v1);
1001 tmp.def = Definition(def, v1);
1002 tmp.bytes = 4;
1003 memset(tmp.uses, 1, 4);
1004 do_swap(ctx, bld, tmp, preserve_scc, pi);
1005
1006 op.reg_b += copy.def.physReg().reg_b % 4 == 0 ? 3 : 0;
1007 def.reg_b += copy.def.physReg().reg_b % 4 == 0 ? 3 : 0;
1008 tmp.op = Operand(op, v1b);
1009 tmp.def = Definition(def, v1b);
1010 tmp.bytes = 1;
1011 tmp.uses[0] = 1;
1012 do_swap(ctx, bld, tmp, preserve_scc, pi);
1013
1014 offset = copy.bytes;
1015 }
1016
1017 for (; offset < copy.bytes;) {
1018 Definition def;
1019 Operand op;
1020 split_copy(offset, &def, &op, copy, true, 8);
1021
1022 assert(op.regClass() == def.regClass());
1023 Operand def_as_op = Operand(def.physReg(), def.regClass());
1024 Definition op_as_def = Definition(op.physReg(), op.regClass());
1025 if (ctx->program->chip_class >= GFX9 && def.regClass() == v1) {
1026 bld.vop1(aco_opcode::v_swap_b32, def, op_as_def, op, def_as_op);
1027 ctx->program->statistics[statistic_copies]++;
1028 } else if (def.regClass() == v1) {
1029 bld.vop2(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1030 bld.vop2(aco_opcode::v_xor_b32, def, op, def_as_op);
1031 bld.vop2(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1032 ctx->program->statistics[statistic_copies] += 3;
1033 } else if (op.physReg() == scc || def.physReg() == scc) {
1034 /* we need to swap scc and another sgpr */
1035 assert(!preserve_scc);
1036
1037 PhysReg other = op.physReg() == scc ? def.physReg() : op.physReg();
1038
1039 bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1));
1040 bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(other, s1), Operand(0u));
1041 bld.sop1(aco_opcode::s_mov_b32, Definition(other, s1), Operand(pi->scratch_sgpr, s1));
1042 ctx->program->statistics[statistic_copies] += 3;
1043 } else if (def.regClass() == s1) {
1044 if (preserve_scc) {
1045 bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), op);
1046 bld.sop1(aco_opcode::s_mov_b32, op_as_def, def_as_op);
1047 bld.sop1(aco_opcode::s_mov_b32, def, Operand(pi->scratch_sgpr, s1));
1048 } else {
1049 bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), op, def_as_op);
1050 bld.sop2(aco_opcode::s_xor_b32, def, Definition(scc, s1), op, def_as_op);
1051 bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), op, def_as_op);
1052 }
1053 ctx->program->statistics[statistic_copies] += 3;
1054 } else if (def.regClass() == s2) {
1055 if (preserve_scc)
1056 bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1));
1057 bld.sop2(aco_opcode::s_xor_b64, op_as_def, Definition(scc, s1), op, def_as_op);
1058 bld.sop2(aco_opcode::s_xor_b64, def, Definition(scc, s1), op, def_as_op);
1059 bld.sop2(aco_opcode::s_xor_b64, op_as_def, Definition(scc, s1), op, def_as_op);
1060 if (preserve_scc)
1061 bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(pi->scratch_sgpr, s1), Operand(0u));
1062 ctx->program->statistics[statistic_copies] += 3;
1063 } else if (ctx->program->chip_class >= GFX9 && def.bytes() == 2 && def.physReg().reg() == op.physReg().reg()) {
1064 aco_ptr<VOP3P_instruction> vop3p{create_instruction<VOP3P_instruction>(aco_opcode::v_pk_add_u16, Format::VOP3P, 2, 1)};
1065 vop3p->operands[0] = Operand(PhysReg{op.physReg().reg()}, v1);
1066 vop3p->operands[1] = Operand(0u);
1067 vop3p->definitions[0] = Definition(PhysReg{op.physReg().reg()}, v1);
1068 vop3p->opsel_lo = 0x1;
1069 vop3p->opsel_hi = 0x2;
1070 bld.insert(std::move(vop3p));
1071 } else {
1072 assert(def.regClass().is_subdword());
1073 bld.vop2_sdwa(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1074 bld.vop2_sdwa(aco_opcode::v_xor_b32, def, op, def_as_op);
1075 bld.vop2_sdwa(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1076 ctx->program->statistics[statistic_copies] += 3;
1077 }
1078
1079 offset += def.bytes();
1080 }
1081
1082 /* fixup in case we swapped bytes we shouldn't have */
1083 copy_operation tmp_copy = copy;
1084 tmp_copy.op.setFixed(copy.def.physReg());
1085 tmp_copy.def.setFixed(copy.op.physReg());
1086 do_copy(ctx, bld, tmp_copy, &preserve_scc);
1087 }
1088
1089 void handle_operands(std::map<PhysReg, copy_operation>& copy_map, lower_context* ctx, chip_class chip_class, Pseudo_instruction *pi)
1090 {
1091 Builder bld(ctx->program, &ctx->instructions);
1092 aco_ptr<Instruction> mov;
1093 std::map<PhysReg, copy_operation>::iterator it = copy_map.begin();
1094 std::map<PhysReg, copy_operation>::iterator target;
1095 bool writes_scc = false;
1096
1097 /* count the number of uses for each dst reg */
1098 while (it != copy_map.end()) {
1099
1100 if (it->second.def.physReg() == scc)
1101 writes_scc = true;
1102
1103 assert(!pi->tmp_in_scc || !(it->second.def.physReg() == pi->scratch_sgpr));
1104
1105 /* if src and dst reg are the same, remove operation */
1106 if (it->first == it->second.op.physReg()) {
1107 it = copy_map.erase(it);
1108 continue;
1109 }
1110
1111 /* split large copies */
1112 if (it->second.bytes > 8) {
1113 assert(!it->second.op.isConstant());
1114 assert(!it->second.def.regClass().is_subdword());
1115 RegClass rc = RegClass(it->second.def.regClass().type(), it->second.def.size() - 2);
1116 Definition hi_def = Definition(PhysReg{it->first + 2}, rc);
1117 rc = RegClass(it->second.op.regClass().type(), it->second.op.size() - 2);
1118 Operand hi_op = Operand(PhysReg{it->second.op.physReg() + 2}, rc);
1119 copy_operation copy = {hi_op, hi_def, it->second.bytes - 8};
1120 copy_map[hi_def.physReg()] = copy;
1121 assert(it->second.op.physReg().byte() == 0 && it->second.def.physReg().byte() == 0);
1122 it->second.op = Operand(it->second.op.physReg(), it->second.op.regClass().type() == RegType::sgpr ? s2 : v2);
1123 it->second.def = Definition(it->second.def.physReg(), it->second.def.regClass().type() == RegType::sgpr ? s2 : v2);
1124 it->second.bytes = 8;
1125 }
1126
1127 /* check if the definition reg is used by another copy operation */
1128 for (std::pair<const PhysReg, copy_operation>& copy : copy_map) {
1129 if (copy.second.op.isConstant())
1130 continue;
1131 for (uint16_t i = 0; i < it->second.bytes; i++) {
1132 /* distance might underflow */
1133 unsigned distance = it->first.reg_b + i - copy.second.op.physReg().reg_b;
1134 if (distance < copy.second.bytes)
1135 it->second.uses[i] += 1;
1136 }
1137 }
1138
1139 ++it;
1140 }
1141
1142 /* first, handle paths in the location transfer graph */
1143 bool preserve_scc = pi->tmp_in_scc && !writes_scc;
1144 it = copy_map.begin();
1145 while (it != copy_map.end()) {
1146
1147 /* try to coalesce 32-bit sgpr copies to 64-bit copies */
1148 if (it->second.is_used == 0 &&
1149 it->second.def.getTemp().type() == RegType::sgpr && it->second.bytes == 4 &&
1150 !it->second.op.isConstant() && it->first % 2 == it->second.op.physReg() % 2) {
1151
1152 PhysReg other_def_reg = PhysReg{it->first % 2 ? it->first - 1 : it->first + 1};
1153 PhysReg other_op_reg = PhysReg{it->first % 2 ? it->second.op.physReg() - 1 : it->second.op.physReg() + 1};
1154 std::map<PhysReg, copy_operation>::iterator other = copy_map.find(other_def_reg);
1155
1156 if (other != copy_map.end() && !other->second.is_used && other->second.bytes == 4 &&
1157 other->second.op.physReg() == other_op_reg && !other->second.op.isConstant()) {
1158 std::map<PhysReg, copy_operation>::iterator to_erase = it->first % 2 ? it : other;
1159 it = it->first % 2 ? other : it;
1160 copy_map.erase(to_erase);
1161 it->second.bytes = 8;
1162 }
1163 }
1164 // TODO: try to coalesce subdword copies
1165
1166 /* find portions where the target reg is not used as operand for any other copy */
1167 if (it->second.is_used) {
1168 if (it->second.op.isConstant()) {
1169 /* we have to skip constants until is_used=0 */
1170 ++it;
1171 continue;
1172 }
1173
1174 unsigned has_zero_use_bytes = 0;
1175 for (unsigned i = 0; i < it->second.bytes; i++)
1176 has_zero_use_bytes |= (it->second.uses[i] == 0) << i;
1177
1178 if (has_zero_use_bytes) {
1179 /* Skipping partial copying and doing a v_swap_b32 and then fixup
1180 * copies is usually beneficial for sub-dword copies, but if doing
1181 * a partial copy allows further copies, it should be done instead. */
1182 bool partial_copy = (has_zero_use_bytes == 0xf) || (has_zero_use_bytes == 0xf0);
1183 for (std::pair<const PhysReg, copy_operation>& copy : copy_map) {
1184 if (partial_copy)
1185 break;
1186 for (uint16_t i = 0; i < copy.second.bytes; i++) {
1187 /* distance might underflow */
1188 unsigned distance = copy.first.reg_b + i - it->second.op.physReg().reg_b;
1189 if (distance < it->second.bytes && copy.second.uses[i] == 1 &&
1190 !it->second.uses[distance])
1191 partial_copy = true;
1192 }
1193 }
1194
1195 if (!partial_copy) {
1196 ++it;
1197 continue;
1198 }
1199 } else {
1200 /* full target reg is used: register swapping needed */
1201 ++it;
1202 continue;
1203 }
1204 }
1205
1206 bool did_copy = do_copy(ctx, bld, it->second, &preserve_scc);
1207
1208 std::pair<PhysReg, copy_operation> copy = *it;
1209
1210 if (it->second.is_used == 0) {
1211 /* the target reg is not used as operand for any other copy, so we
1212 * copied to all of it */
1213 copy_map.erase(it);
1214 it = copy_map.begin();
1215 } else {
1216 /* we only performed some portions of this copy, so split it to only
1217 * leave the portions that still need to be done */
1218 copy_operation original = it->second; /* the map insertion below can overwrite this */
1219 copy_map.erase(it);
1220 for (unsigned offset = 0; offset < original.bytes;) {
1221 if (original.uses[offset] == 0) {
1222 offset++;
1223 continue;
1224 }
1225 Definition def;
1226 Operand op;
1227 split_copy(offset, &def, &op, original, false, 8);
1228
1229 copy_operation copy = {op, def, def.bytes()};
1230 for (unsigned i = 0; i < copy.bytes; i++)
1231 copy.uses[i] = original.uses[i + offset];
1232 copy_map[def.physReg()] = copy;
1233
1234 offset += def.bytes();
1235 }
1236
1237 it = copy_map.begin();
1238 }
1239
1240 /* Reduce the number of uses of the operand reg by one. Do this after
1241 * splitting the copy or removing it in case the copy writes to it's own
1242 * operand (for example, v[7:8] = v[8:9]) */
1243 if (did_copy && !copy.second.op.isConstant()) {
1244 for (std::pair<const PhysReg, copy_operation>& other : copy_map) {
1245 for (uint16_t i = 0; i < other.second.bytes; i++) {
1246 /* distance might underflow */
1247 unsigned distance = other.first.reg_b + i - copy.second.op.physReg().reg_b;
1248 if (distance < copy.second.bytes && !copy.second.uses[distance])
1249 other.second.uses[i] -= 1;
1250 }
1251 }
1252 }
1253 }
1254
1255 if (copy_map.empty())
1256 return;
1257
1258 /* all target regs are needed as operand somewhere which means, all entries are part of a cycle */
1259 unsigned largest = 0;
1260 for (const std::pair<const PhysReg, copy_operation>& op : copy_map)
1261 largest = MAX2(largest, op.second.bytes);
1262
1263 while (!copy_map.empty()) {
1264
1265 /* Perform larger swaps first, because larger swaps swaps can make other
1266 * swaps unnecessary. */
1267 auto it = copy_map.begin();
1268 for (auto it2 = copy_map.begin(); it2 != copy_map.end(); ++it2) {
1269 if (it2->second.bytes > it->second.bytes) {
1270 it = it2;
1271 if (it->second.bytes == largest)
1272 break;
1273 }
1274 }
1275
1276 /* should already be done */
1277 assert(!it->second.op.isConstant());
1278
1279 assert(it->second.op.isFixed());
1280 assert(it->second.def.regClass() == it->second.op.regClass());
1281
1282 if (it->first == it->second.op.physReg()) {
1283 copy_map.erase(it);
1284 continue;
1285 }
1286
1287 if (preserve_scc && it->second.def.getTemp().type() == RegType::sgpr)
1288 assert(!(it->second.def.physReg() == pi->scratch_sgpr));
1289
1290 /* to resolve the cycle, we have to swap the src reg with the dst reg */
1291 copy_operation swap = it->second;
1292
1293 /* if this is self-intersecting, we have to split it because
1294 * self-intersecting swaps don't make sense */
1295 PhysReg lower = swap.def.physReg();
1296 PhysReg higher = swap.op.physReg();
1297 if (lower.reg_b > higher.reg_b)
1298 std::swap(lower, higher);
1299 if (higher.reg_b - lower.reg_b < (int)swap.bytes) {
1300 unsigned offset = higher.reg_b - lower.reg_b;
1301 RegType type = swap.def.regClass().type();
1302
1303 copy_operation middle;
1304 lower.reg_b += offset;
1305 higher.reg_b += offset;
1306 middle.bytes = swap.bytes - offset * 2;
1307 memcpy(middle.uses, swap.uses + offset, middle.bytes);
1308 middle.op = Operand(lower, RegClass::get(type, middle.bytes));
1309 middle.def = Definition(higher, RegClass::get(type, middle.bytes));
1310 copy_map[higher] = middle;
1311
1312 copy_operation end;
1313 lower.reg_b += middle.bytes;
1314 higher.reg_b += middle.bytes;
1315 end.bytes = swap.bytes - (offset + middle.bytes);
1316 memcpy(end.uses, swap.uses + offset + middle.bytes, end.bytes);
1317 end.op = Operand(lower, RegClass::get(type, end.bytes));
1318 end.def = Definition(higher, RegClass::get(type, end.bytes));
1319 copy_map[higher] = end;
1320
1321 memset(swap.uses + offset, 0, swap.bytes - offset);
1322 swap.bytes = offset;
1323 }
1324
1325 do_swap(ctx, bld, swap, preserve_scc, pi);
1326
1327 /* remove from map */
1328 copy_map.erase(it);
1329
1330 /* change the operand reg of the target's uses and split uses if needed */
1331 target = copy_map.begin();
1332 uint32_t bytes_left = u_bit_consecutive(0, swap.bytes);
1333 for (; target != copy_map.end(); ++target) {
1334 if (target->second.op.physReg() == swap.def.physReg() && swap.bytes == target->second.bytes) {
1335 target->second.op.setFixed(swap.op.physReg());
1336 break;
1337 }
1338
1339 uint32_t imask = get_intersection_mask(swap.def.physReg().reg_b, swap.bytes,
1340 target->second.op.physReg().reg_b, target->second.bytes);
1341
1342 if (!imask)
1343 continue;
1344
1345 assert(target->second.bytes < swap.bytes);
1346
1347 int offset = (int)target->second.op.physReg().reg_b - (int)swap.def.physReg().reg_b;
1348
1349 /* split and update the middle (the portion that reads the swap's
1350 * definition) to read the swap's operand instead */
1351 int target_op_end = target->second.op.physReg().reg_b + target->second.bytes;
1352 int swap_def_end = swap.def.physReg().reg_b + swap.bytes;
1353 int before_bytes = MAX2(-offset, 0);
1354 int after_bytes = MAX2(target_op_end - swap_def_end, 0);
1355 int middle_bytes = target->second.bytes - before_bytes - after_bytes;
1356
1357 if (after_bytes) {
1358 unsigned after_offset = before_bytes + middle_bytes;
1359 assert(after_offset > 0);
1360 copy_operation copy;
1361 copy.bytes = after_bytes;
1362 memcpy(copy.uses, target->second.uses + after_offset, copy.bytes);
1363 RegClass rc = RegClass::get(target->second.op.regClass().type(), after_bytes);
1364 copy.op = Operand(target->second.op.physReg().advance(after_offset), rc);
1365 copy.def = Definition(target->second.def.physReg().advance(after_offset), rc);
1366 copy_map[copy.def.physReg()] = copy;
1367 }
1368
1369 if (middle_bytes) {
1370 copy_operation copy;
1371 copy.bytes = middle_bytes;
1372 memcpy(copy.uses, target->second.uses + before_bytes, copy.bytes);
1373 RegClass rc = RegClass::get(target->second.op.regClass().type(), middle_bytes);
1374 copy.op = Operand(swap.op.physReg().advance(MAX2(offset, 0)), rc);
1375 copy.def = Definition(target->second.def.physReg().advance(before_bytes), rc);
1376 copy_map[copy.def.physReg()] = copy;
1377 }
1378
1379 if (before_bytes) {
1380 copy_operation copy;
1381 target->second.bytes = before_bytes;
1382 RegClass rc = RegClass::get(target->second.op.regClass().type(), before_bytes);
1383 target->second.op = Operand(target->second.op.physReg(), rc);
1384 target->second.def = Definition(target->second.def.physReg(), rc);
1385 memset(target->second.uses + target->second.bytes, 0, 8 - target->second.bytes);
1386 }
1387
1388 /* break early since we know each byte of the swap's definition is used
1389 * at most once */
1390 bytes_left &= ~imask;
1391 if (!bytes_left)
1392 break;
1393 }
1394 }
1395 }
1396
1397 void lower_to_hw_instr(Program* program)
1398 {
1399 Block *discard_block = NULL;
1400
1401 for (size_t i = 0; i < program->blocks.size(); i++)
1402 {
1403 Block *block = &program->blocks[i];
1404 lower_context ctx;
1405 ctx.program = program;
1406 Builder bld(program, &ctx.instructions);
1407
1408 bool set_mode = i == 0 && block->fp_mode.val != program->config->float_mode;
1409 for (unsigned pred : block->linear_preds) {
1410 if (program->blocks[pred].fp_mode.val != block->fp_mode.val) {
1411 set_mode = true;
1412 break;
1413 }
1414 }
1415 if (set_mode) {
1416 /* only allow changing modes at top-level blocks so this doesn't break
1417 * the "jump over empty blocks" optimization */
1418 assert(block->kind & block_kind_top_level);
1419 uint32_t mode = block->fp_mode.val;
1420 /* "((size - 1) << 11) | register" (MODE is encoded as register 1) */
1421 bld.sopk(aco_opcode::s_setreg_imm32_b32, Operand(mode), (7 << 11) | 1);
1422 }
1423
1424 for (size_t j = 0; j < block->instructions.size(); j++) {
1425 aco_ptr<Instruction>& instr = block->instructions[j];
1426 aco_ptr<Instruction> mov;
1427 if (instr->format == Format::PSEUDO) {
1428 Pseudo_instruction *pi = (Pseudo_instruction*)instr.get();
1429
1430 switch (instr->opcode)
1431 {
1432 case aco_opcode::p_extract_vector:
1433 {
1434 PhysReg reg = instr->operands[0].physReg();
1435 Definition& def = instr->definitions[0];
1436 reg.reg_b += instr->operands[1].constantValue() * def.bytes();
1437
1438 if (reg == def.physReg())
1439 break;
1440
1441 RegClass op_rc = def.regClass().is_subdword() ? def.regClass() :
1442 RegClass(instr->operands[0].getTemp().type(), def.size());
1443 std::map<PhysReg, copy_operation> copy_operations;
1444 copy_operations[def.physReg()] = {Operand(reg, op_rc), def, def.bytes()};
1445 handle_operands(copy_operations, &ctx, program->chip_class, pi);
1446 break;
1447 }
1448 case aco_opcode::p_create_vector:
1449 {
1450 std::map<PhysReg, copy_operation> copy_operations;
1451 PhysReg reg = instr->definitions[0].physReg();
1452
1453 for (const Operand& op : instr->operands) {
1454 if (op.isConstant()) {
1455 const Definition def = Definition(reg, RegClass(instr->definitions[0].getTemp().type(), op.size()));
1456 copy_operations[reg] = {op, def, op.bytes()};
1457 reg.reg_b += op.bytes();
1458 continue;
1459 }
1460 if (op.isUndefined()) {
1461 // TODO: coalesce subdword copies if dst byte is 0
1462 reg.reg_b += op.bytes();
1463 continue;
1464 }
1465
1466 RegClass rc_def = op.regClass().is_subdword() ? op.regClass() :
1467 RegClass(instr->definitions[0].getTemp().type(), op.size());
1468 const Definition def = Definition(reg, rc_def);
1469 copy_operations[def.physReg()] = {op, def, op.bytes()};
1470 reg.reg_b += op.bytes();
1471 }
1472 handle_operands(copy_operations, &ctx, program->chip_class, pi);
1473 break;
1474 }
1475 case aco_opcode::p_split_vector:
1476 {
1477 std::map<PhysReg, copy_operation> copy_operations;
1478 PhysReg reg = instr->operands[0].physReg();
1479
1480 for (const Definition& def : instr->definitions) {
1481 RegClass rc_op = def.regClass().is_subdword() ? def.regClass() :
1482 RegClass(instr->operands[0].getTemp().type(), def.size());
1483 const Operand op = Operand(reg, rc_op);
1484 copy_operations[def.physReg()] = {op, def, def.bytes()};
1485 reg.reg_b += def.bytes();
1486 }
1487 handle_operands(copy_operations, &ctx, program->chip_class, pi);
1488 break;
1489 }
1490 case aco_opcode::p_parallelcopy:
1491 case aco_opcode::p_wqm:
1492 {
1493 std::map<PhysReg, copy_operation> copy_operations;
1494 for (unsigned i = 0; i < instr->operands.size(); i++) {
1495 assert(instr->definitions[i].bytes() == instr->operands[i].bytes());
1496 copy_operations[instr->definitions[i].physReg()] = {instr->operands[i], instr->definitions[i], instr->operands[i].bytes()};
1497 }
1498 handle_operands(copy_operations, &ctx, program->chip_class, pi);
1499 break;
1500 }
1501 case aco_opcode::p_exit_early_if:
1502 {
1503 /* don't bother with an early exit near the end of the program */
1504 if ((block->instructions.size() - 1 - j) <= 4 &&
1505 block->instructions.back()->opcode == aco_opcode::s_endpgm) {
1506 unsigned null_exp_dest = (ctx.program->stage & hw_fs) ? 9 /* NULL */ : V_008DFC_SQ_EXP_POS;
1507 bool ignore_early_exit = true;
1508
1509 for (unsigned k = j + 1; k < block->instructions.size(); ++k) {
1510 const aco_ptr<Instruction> &instr = block->instructions[k];
1511 if (instr->opcode == aco_opcode::s_endpgm ||
1512 instr->opcode == aco_opcode::p_logical_end)
1513 continue;
1514 else if (instr->opcode == aco_opcode::exp &&
1515 static_cast<Export_instruction *>(instr.get())->dest == null_exp_dest)
1516 continue;
1517 else if (instr->opcode == aco_opcode::p_parallelcopy &&
1518 instr->definitions[0].isFixed() &&
1519 instr->definitions[0].physReg() == exec)
1520 continue;
1521
1522 ignore_early_exit = false;
1523 }
1524
1525 if (ignore_early_exit)
1526 break;
1527 }
1528
1529 if (!discard_block) {
1530 discard_block = program->create_and_insert_block();
1531 block = &program->blocks[i];
1532
1533 bld.reset(discard_block);
1534 bld.exp(aco_opcode::exp, Operand(v1), Operand(v1), Operand(v1), Operand(v1),
1535 0, V_008DFC_SQ_EXP_NULL, false, true, true);
1536 if (program->wb_smem_l1_on_end)
1537 bld.smem(aco_opcode::s_dcache_wb);
1538 bld.sopp(aco_opcode::s_endpgm);
1539
1540 bld.reset(&ctx.instructions);
1541 }
1542
1543 //TODO: exec can be zero here with block_kind_discard
1544
1545 assert(instr->operands[0].physReg() == scc);
1546 bld.sopp(aco_opcode::s_cbranch_scc0, instr->operands[0], discard_block->index);
1547
1548 discard_block->linear_preds.push_back(block->index);
1549 block->linear_succs.push_back(discard_block->index);
1550 break;
1551 }
1552 case aco_opcode::p_spill:
1553 {
1554 assert(instr->operands[0].regClass() == v1.as_linear());
1555 for (unsigned i = 0; i < instr->operands[2].size(); i++)
1556 bld.writelane(bld.def(v1, instr->operands[0].physReg()),
1557 Operand(PhysReg{instr->operands[2].physReg() + i}, s1),
1558 Operand(instr->operands[1].constantValue() + i),
1559 instr->operands[0]);
1560 break;
1561 }
1562 case aco_opcode::p_reload:
1563 {
1564 assert(instr->operands[0].regClass() == v1.as_linear());
1565 for (unsigned i = 0; i < instr->definitions[0].size(); i++)
1566 bld.readlane(bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
1567 instr->operands[0],
1568 Operand(instr->operands[1].constantValue() + i));
1569 break;
1570 }
1571 case aco_opcode::p_as_uniform:
1572 {
1573 if (instr->operands[0].isConstant() || instr->operands[0].regClass().type() == RegType::sgpr) {
1574 std::map<PhysReg, copy_operation> copy_operations;
1575 copy_operations[instr->definitions[0].physReg()] = {instr->operands[0], instr->definitions[0], instr->definitions[0].bytes()};
1576 handle_operands(copy_operations, &ctx, program->chip_class, pi);
1577 } else {
1578 assert(instr->operands[0].regClass().type() == RegType::vgpr);
1579 assert(instr->definitions[0].regClass().type() == RegType::sgpr);
1580 assert(instr->operands[0].size() == instr->definitions[0].size());
1581 for (unsigned i = 0; i < instr->definitions[0].size(); i++) {
1582 bld.vop1(aco_opcode::v_readfirstlane_b32,
1583 bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
1584 Operand(PhysReg{instr->operands[0].physReg() + i}, v1));
1585 }
1586 }
1587 break;
1588 }
1589 case aco_opcode::p_bpermute:
1590 {
1591 if (ctx.program->chip_class <= GFX7)
1592 emit_gfx6_bpermute(program, instr, bld);
1593 else if (ctx.program->chip_class == GFX10 && ctx.program->wave_size == 64)
1594 emit_gfx10_wave64_bpermute(program, instr, bld);
1595 else
1596 unreachable("Current hardware supports ds_bpermute, don't emit p_bpermute.");
1597 }
1598 default:
1599 break;
1600 }
1601 } else if (instr->format == Format::PSEUDO_BRANCH) {
1602 Pseudo_branch_instruction* branch = static_cast<Pseudo_branch_instruction*>(instr.get());
1603 /* check if all blocks from current to target are empty */
1604 bool can_remove = block->index < branch->target[0];
1605 for (unsigned i = block->index + 1; can_remove && i < branch->target[0]; i++) {
1606 if (program->blocks[i].instructions.size())
1607 can_remove = false;
1608 }
1609 if (can_remove)
1610 continue;
1611
1612 switch (instr->opcode) {
1613 case aco_opcode::p_branch:
1614 assert(block->linear_succs[0] == branch->target[0]);
1615 bld.sopp(aco_opcode::s_branch, branch->target[0]);
1616 break;
1617 case aco_opcode::p_cbranch_nz:
1618 assert(block->linear_succs[1] == branch->target[0]);
1619 if (branch->operands[0].physReg() == exec)
1620 bld.sopp(aco_opcode::s_cbranch_execnz, branch->target[0]);
1621 else if (branch->operands[0].physReg() == vcc)
1622 bld.sopp(aco_opcode::s_cbranch_vccnz, branch->target[0]);
1623 else {
1624 assert(branch->operands[0].physReg() == scc);
1625 bld.sopp(aco_opcode::s_cbranch_scc1, branch->target[0]);
1626 }
1627 break;
1628 case aco_opcode::p_cbranch_z:
1629 assert(block->linear_succs[1] == branch->target[0]);
1630 if (branch->operands[0].physReg() == exec)
1631 bld.sopp(aco_opcode::s_cbranch_execz, branch->target[0]);
1632 else if (branch->operands[0].physReg() == vcc)
1633 bld.sopp(aco_opcode::s_cbranch_vccz, branch->target[0]);
1634 else {
1635 assert(branch->operands[0].physReg() == scc);
1636 bld.sopp(aco_opcode::s_cbranch_scc0, branch->target[0]);
1637 }
1638 break;
1639 default:
1640 unreachable("Unknown Pseudo branch instruction!");
1641 }
1642
1643 } else if (instr->format == Format::PSEUDO_REDUCTION) {
1644 Pseudo_reduction_instruction* reduce = static_cast<Pseudo_reduction_instruction*>(instr.get());
1645 emit_reduction(&ctx, reduce->opcode, reduce->reduce_op, reduce->cluster_size,
1646 reduce->operands[1].physReg(), // tmp
1647 reduce->definitions[1].physReg(), // stmp
1648 reduce->operands[2].physReg(), // vtmp
1649 reduce->definitions[2].physReg(), // sitmp
1650 reduce->operands[0], reduce->definitions[0]);
1651 } else {
1652 ctx.instructions.emplace_back(std::move(instr));
1653 }
1654
1655 }
1656 block->instructions.swap(ctx.instructions);
1657 }
1658 }
1659
1660 }