aco: decrease the uses of other copy operations after splitting/removing
[mesa.git] / src / amd / compiler / aco_lower_to_hw_instr.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
25 *
26 */
27
28 #include <map>
29
30 #include "aco_ir.h"
31 #include "aco_builder.h"
32 #include "util/u_math.h"
33 #include "sid.h"
34 #include "vulkan/radv_shader.h"
35
36
37 namespace aco {
38
39 struct lower_context {
40 Program *program;
41 std::vector<aco_ptr<Instruction>> instructions;
42 };
43
44 aco_opcode get_reduce_opcode(chip_class chip, ReduceOp op) {
45 switch (op) {
46 case iadd32: return chip >= GFX9 ? aco_opcode::v_add_u32 : aco_opcode::v_add_co_u32;
47 case imul32: return aco_opcode::v_mul_lo_u32;
48 case fadd32: return aco_opcode::v_add_f32;
49 case fmul32: return aco_opcode::v_mul_f32;
50 case imax32: return aco_opcode::v_max_i32;
51 case imin32: return aco_opcode::v_min_i32;
52 case umin32: return aco_opcode::v_min_u32;
53 case umax32: return aco_opcode::v_max_u32;
54 case fmin32: return aco_opcode::v_min_f32;
55 case fmax32: return aco_opcode::v_max_f32;
56 case iand32: return aco_opcode::v_and_b32;
57 case ixor32: return aco_opcode::v_xor_b32;
58 case ior32: return aco_opcode::v_or_b32;
59 case iadd64: return aco_opcode::num_opcodes;
60 case imul64: return aco_opcode::num_opcodes;
61 case fadd64: return aco_opcode::v_add_f64;
62 case fmul64: return aco_opcode::v_mul_f64;
63 case imin64: return aco_opcode::num_opcodes;
64 case imax64: return aco_opcode::num_opcodes;
65 case umin64: return aco_opcode::num_opcodes;
66 case umax64: return aco_opcode::num_opcodes;
67 case fmin64: return aco_opcode::v_min_f64;
68 case fmax64: return aco_opcode::v_max_f64;
69 case iand64: return aco_opcode::num_opcodes;
70 case ior64: return aco_opcode::num_opcodes;
71 case ixor64: return aco_opcode::num_opcodes;
72 default: return aco_opcode::num_opcodes;
73 }
74 }
75
76 void emit_vadd32(Builder& bld, Definition def, Operand src0, Operand src1)
77 {
78 Instruction *instr = bld.vadd32(def, src0, src1, false, Operand(s2), true);
79 if (instr->definitions.size() >= 2) {
80 assert(instr->definitions[1].regClass() == bld.lm);
81 instr->definitions[1].setFixed(vcc);
82 }
83 }
84
85 void emit_int64_dpp_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
86 PhysReg vtmp_reg, ReduceOp op,
87 unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask, bool bound_ctrl,
88 Operand *identity=NULL)
89 {
90 Builder bld(ctx->program, &ctx->instructions);
91 Definition dst[] = {Definition(dst_reg, v1), Definition(PhysReg{dst_reg+1}, v1)};
92 Definition vtmp_def[] = {Definition(vtmp_reg, v1), Definition(PhysReg{vtmp_reg+1}, v1)};
93 Operand src0[] = {Operand(src0_reg, v1), Operand(PhysReg{src0_reg+1}, v1)};
94 Operand src1[] = {Operand(src1_reg, v1), Operand(PhysReg{src1_reg+1}, v1)};
95 Operand src1_64 = Operand(src1_reg, v2);
96 Operand vtmp_op[] = {Operand(vtmp_reg, v1), Operand(PhysReg{vtmp_reg+1}, v1)};
97 Operand vtmp_op64 = Operand(vtmp_reg, v2);
98 if (op == iadd64) {
99 if (ctx->program->chip_class >= GFX10) {
100 if (identity)
101 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
102 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
103 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
104 bld.vop3(aco_opcode::v_add_co_u32_e64, dst[0], bld.def(bld.lm, vcc), vtmp_op[0], src1[0]);
105 } else {
106 bld.vop2_dpp(aco_opcode::v_add_co_u32, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0],
107 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
108 }
109 bld.vop2_dpp(aco_opcode::v_addc_co_u32, dst[1], bld.def(bld.lm, vcc), src0[1], src1[1], Operand(vcc, bld.lm),
110 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
111 } else if (op == iand64) {
112 bld.vop2_dpp(aco_opcode::v_and_b32, dst[0], src0[0], src1[0],
113 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
114 bld.vop2_dpp(aco_opcode::v_and_b32, dst[1], src0[1], src1[1],
115 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
116 } else if (op == ior64) {
117 bld.vop2_dpp(aco_opcode::v_or_b32, dst[0], src0[0], src1[0],
118 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
119 bld.vop2_dpp(aco_opcode::v_or_b32, dst[1], src0[1], src1[1],
120 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
121 } else if (op == ixor64) {
122 bld.vop2_dpp(aco_opcode::v_xor_b32, dst[0], src0[0], src1[0],
123 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
124 bld.vop2_dpp(aco_opcode::v_xor_b32, dst[1], src0[1], src1[1],
125 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
126 } else if (op == umin64 || op == umax64 || op == imin64 || op == imax64) {
127 aco_opcode cmp = aco_opcode::num_opcodes;
128 switch (op) {
129 case umin64:
130 cmp = aco_opcode::v_cmp_gt_u64;
131 break;
132 case umax64:
133 cmp = aco_opcode::v_cmp_lt_u64;
134 break;
135 case imin64:
136 cmp = aco_opcode::v_cmp_gt_i64;
137 break;
138 case imax64:
139 cmp = aco_opcode::v_cmp_lt_i64;
140 break;
141 default:
142 break;
143 }
144
145 if (identity) {
146 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
147 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[1], identity[1]);
148 }
149 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
150 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
151 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[1], src0[1],
152 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
153
154 bld.vopc(cmp, bld.def(bld.lm, vcc), vtmp_op64, src1_64);
155 bld.vop2(aco_opcode::v_cndmask_b32, dst[0], vtmp_op[0], src1[0], Operand(vcc, bld.lm));
156 bld.vop2(aco_opcode::v_cndmask_b32, dst[1], vtmp_op[1], src1[1], Operand(vcc, bld.lm));
157 } else if (op == imul64) {
158 /* t4 = dpp(x_hi)
159 * t1 = umul_lo(t4, y_lo)
160 * t3 = dpp(x_lo)
161 * t0 = umul_lo(t3, y_hi)
162 * t2 = iadd(t0, t1)
163 * t5 = umul_hi(t3, y_lo)
164 * res_hi = iadd(t2, t5)
165 * res_lo = umul_lo(t3, y_lo)
166 * Requires that res_hi != src0[0] and res_hi != src1[0]
167 * and that vtmp[0] != res_hi.
168 */
169 if (identity)
170 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[1]);
171 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[1],
172 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
173 bld.vop3(aco_opcode::v_mul_lo_u32, vtmp_def[1], vtmp_op[0], src1[0]);
174 if (identity)
175 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
176 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
177 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
178 bld.vop3(aco_opcode::v_mul_lo_u32, vtmp_def[0], vtmp_op[0], src1[1]);
179 emit_vadd32(bld, vtmp_def[1], vtmp_op[0], vtmp_op[1]);
180 if (identity)
181 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
182 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
183 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
184 bld.vop3(aco_opcode::v_mul_hi_u32, vtmp_def[0], vtmp_op[0], src1[0]);
185 emit_vadd32(bld, dst[1], vtmp_op[1], vtmp_op[0]);
186 if (identity)
187 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
188 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
189 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
190 bld.vop3(aco_opcode::v_mul_lo_u32, dst[0], vtmp_op[0], src1[0]);
191 }
192 }
193
194 void emit_int64_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg, PhysReg vtmp, ReduceOp op)
195 {
196 Builder bld(ctx->program, &ctx->instructions);
197 Definition dst[] = {Definition(dst_reg, v1), Definition(PhysReg{dst_reg+1}, v1)};
198 RegClass src0_rc = src0_reg.reg() >= 256 ? v1 : s1;
199 Operand src0[] = {Operand(src0_reg, src0_rc), Operand(PhysReg{src0_reg+1}, src0_rc)};
200 Operand src1[] = {Operand(src1_reg, v1), Operand(PhysReg{src1_reg+1}, v1)};
201 Operand src0_64 = Operand(src0_reg, src0_reg.reg() >= 256 ? v2 : s2);
202 Operand src1_64 = Operand(src1_reg, v2);
203
204 if (src0_rc == s1 &&
205 (op == imul64 || op == umin64 || op == umax64 || op == imin64 || op == imax64)) {
206 assert(vtmp.reg() != 0);
207 bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), src0[0]);
208 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]);
209 src0_reg = vtmp;
210 src0[0] = Operand(vtmp, v1);
211 src0[1] = Operand(PhysReg{vtmp+1}, v1);
212 src0_64 = Operand(vtmp, v2);
213 } else if (src0_rc == s1 && op == iadd64) {
214 assert(vtmp.reg() != 0);
215 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]);
216 src0[1] = Operand(PhysReg{vtmp+1}, v1);
217 }
218
219 if (op == iadd64) {
220 if (ctx->program->chip_class >= GFX10) {
221 bld.vop3(aco_opcode::v_add_co_u32_e64, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0]);
222 } else {
223 bld.vop2(aco_opcode::v_add_co_u32, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0]);
224 }
225 bld.vop2(aco_opcode::v_addc_co_u32, dst[1], bld.def(bld.lm, vcc), src0[1], src1[1], Operand(vcc, bld.lm));
226 } else if (op == iand64) {
227 bld.vop2(aco_opcode::v_and_b32, dst[0], src0[0], src1[0]);
228 bld.vop2(aco_opcode::v_and_b32, dst[1], src0[1], src1[1]);
229 } else if (op == ior64) {
230 bld.vop2(aco_opcode::v_or_b32, dst[0], src0[0], src1[0]);
231 bld.vop2(aco_opcode::v_or_b32, dst[1], src0[1], src1[1]);
232 } else if (op == ixor64) {
233 bld.vop2(aco_opcode::v_xor_b32, dst[0], src0[0], src1[0]);
234 bld.vop2(aco_opcode::v_xor_b32, dst[1], src0[1], src1[1]);
235 } else if (op == umin64 || op == umax64 || op == imin64 || op == imax64) {
236 aco_opcode cmp = aco_opcode::num_opcodes;
237 switch (op) {
238 case umin64:
239 cmp = aco_opcode::v_cmp_gt_u64;
240 break;
241 case umax64:
242 cmp = aco_opcode::v_cmp_lt_u64;
243 break;
244 case imin64:
245 cmp = aco_opcode::v_cmp_gt_i64;
246 break;
247 case imax64:
248 cmp = aco_opcode::v_cmp_lt_i64;
249 break;
250 default:
251 break;
252 }
253
254 bld.vopc(cmp, bld.def(bld.lm, vcc), src0_64, src1_64);
255 bld.vop2(aco_opcode::v_cndmask_b32, dst[0], src0[0], src1[0], Operand(vcc, bld.lm));
256 bld.vop2(aco_opcode::v_cndmask_b32, dst[1], src0[1], src1[1], Operand(vcc, bld.lm));
257 } else if (op == imul64) {
258 if (src1_reg == dst_reg) {
259 /* it's fine if src0==dst but not if src1==dst */
260 std::swap(src0_reg, src1_reg);
261 std::swap(src0[0], src1[0]);
262 std::swap(src0[1], src1[1]);
263 std::swap(src0_64, src1_64);
264 }
265 assert(!(src0_reg == src1_reg));
266 /* t1 = umul_lo(x_hi, y_lo)
267 * t0 = umul_lo(x_lo, y_hi)
268 * t2 = iadd(t0, t1)
269 * t5 = umul_hi(x_lo, y_lo)
270 * res_hi = iadd(t2, t5)
271 * res_lo = umul_lo(x_lo, y_lo)
272 * assumes that it's ok to modify x_hi/y_hi, since we might not have vtmp
273 */
274 Definition tmp0_def(PhysReg{src0_reg+1}, v1);
275 Definition tmp1_def(PhysReg{src1_reg+1}, v1);
276 Operand tmp0_op = src0[1];
277 Operand tmp1_op = src1[1];
278 bld.vop3(aco_opcode::v_mul_lo_u32, tmp0_def, src0[1], src1[0]);
279 bld.vop3(aco_opcode::v_mul_lo_u32, tmp1_def, src0[0], src1[1]);
280 emit_vadd32(bld, tmp0_def, tmp1_op, tmp0_op);
281 bld.vop3(aco_opcode::v_mul_hi_u32, tmp1_def, src0[0], src1[0]);
282 emit_vadd32(bld, dst[1], tmp0_op, tmp1_op);
283 bld.vop3(aco_opcode::v_mul_lo_u32, dst[0], src0[0], src1[0]);
284 }
285 }
286
287 void emit_dpp_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
288 PhysReg vtmp, ReduceOp op, unsigned size,
289 unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask, bool bound_ctrl,
290 Operand *identity=NULL) /* for VOP3 with sparse writes */
291 {
292 Builder bld(ctx->program, &ctx->instructions);
293 RegClass rc = RegClass(RegType::vgpr, size);
294 Definition dst(dst_reg, rc);
295 Operand src0(src0_reg, rc);
296 Operand src1(src1_reg, rc);
297
298 aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op);
299 bool vop3 = op == imul32 || size == 2;
300
301 if (!vop3) {
302 if (opcode == aco_opcode::v_add_co_u32)
303 bld.vop2_dpp(opcode, dst, bld.def(bld.lm, vcc), src0, src1, dpp_ctrl, row_mask, bank_mask, bound_ctrl);
304 else
305 bld.vop2_dpp(opcode, dst, src0, src1, dpp_ctrl, row_mask, bank_mask, bound_ctrl);
306 return;
307 }
308
309 if (opcode == aco_opcode::num_opcodes) {
310 emit_int64_dpp_op(ctx, dst_reg ,src0_reg, src1_reg, vtmp, op,
311 dpp_ctrl, row_mask, bank_mask, bound_ctrl, identity);
312 return;
313 }
314
315 if (identity)
316 bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), identity[0]);
317 if (identity && size >= 2)
318 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), identity[1]);
319
320 for (unsigned i = 0; i < size; i++)
321 bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{src0_reg+i}, v1),
322 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
323
324 bld.vop3(opcode, dst, Operand(vtmp, rc), src1);
325 }
326
327 void emit_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
328 PhysReg vtmp, ReduceOp op, unsigned size)
329 {
330 Builder bld(ctx->program, &ctx->instructions);
331 RegClass rc = RegClass(RegType::vgpr, size);
332 Definition dst(dst_reg, rc);
333 Operand src0(src0_reg, RegClass(src0_reg.reg() >= 256 ? RegType::vgpr : RegType::sgpr, size));
334 Operand src1(src1_reg, rc);
335
336 aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op);
337 bool vop3 = op == imul32 || size == 2;
338
339 if (opcode == aco_opcode::num_opcodes) {
340 emit_int64_op(ctx, dst_reg, src0_reg, src1_reg, vtmp, op);
341 return;
342 }
343
344 if (vop3) {
345 bld.vop3(opcode, dst, src0, src1);
346 } else if (opcode == aco_opcode::v_add_co_u32) {
347 bld.vop2(opcode, dst, bld.def(bld.lm, vcc), src0, src1);
348 } else {
349 bld.vop2(opcode, dst, src0, src1);
350 }
351 }
352
353 void emit_dpp_mov(lower_context *ctx, PhysReg dst, PhysReg src0, unsigned size,
354 unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask, bool bound_ctrl)
355 {
356 Builder bld(ctx->program, &ctx->instructions);
357 for (unsigned i = 0; i < size; i++) {
358 bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{dst+i}, v1), Operand(PhysReg{src0+i}, v1),
359 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
360 }
361 }
362
363 uint32_t get_reduction_identity(ReduceOp op, unsigned idx)
364 {
365 switch (op) {
366 case iadd32:
367 case iadd64:
368 case fadd32:
369 case fadd64:
370 case ior32:
371 case ior64:
372 case ixor32:
373 case ixor64:
374 case umax32:
375 case umax64:
376 return 0;
377 case imul32:
378 case imul64:
379 return idx ? 0 : 1;
380 case fmul32:
381 return 0x3f800000u; /* 1.0 */
382 case fmul64:
383 return idx ? 0x3ff00000u : 0u; /* 1.0 */
384 case imin32:
385 return INT32_MAX;
386 case imin64:
387 return idx ? 0x7fffffffu : 0xffffffffu;
388 case imax32:
389 return INT32_MIN;
390 case imax64:
391 return idx ? 0x80000000u : 0;
392 case umin32:
393 case umin64:
394 case iand32:
395 case iand64:
396 return 0xffffffffu;
397 case fmin32:
398 return 0x7f800000u; /* infinity */
399 case fmin64:
400 return idx ? 0x7ff00000u : 0u; /* infinity */
401 case fmax32:
402 return 0xff800000u; /* negative infinity */
403 case fmax64:
404 return idx ? 0xfff00000u : 0u; /* negative infinity */
405 default:
406 unreachable("Invalid reduction operation");
407 break;
408 }
409 return 0;
410 }
411
412 void emit_ds_swizzle(Builder bld, PhysReg dst, PhysReg src, unsigned size, unsigned ds_pattern)
413 {
414 for (unsigned i = 0; i < size; i++) {
415 bld.ds(aco_opcode::ds_swizzle_b32, Definition(PhysReg{dst+i}, v1),
416 Operand(PhysReg{src+i}, v1), ds_pattern);
417 }
418 }
419
420 void emit_reduction(lower_context *ctx, aco_opcode op, ReduceOp reduce_op, unsigned cluster_size, PhysReg tmp,
421 PhysReg stmp, PhysReg vtmp, PhysReg sitmp, Operand src, Definition dst)
422 {
423 assert(cluster_size == ctx->program->wave_size || op == aco_opcode::p_reduce);
424 assert(cluster_size <= ctx->program->wave_size);
425
426 Builder bld(ctx->program, &ctx->instructions);
427
428 Operand identity[2];
429 identity[0] = Operand(get_reduction_identity(reduce_op, 0));
430 identity[1] = Operand(get_reduction_identity(reduce_op, 1));
431 Operand vcndmask_identity[2] = {identity[0], identity[1]};
432
433 /* First, copy the source to tmp and set inactive lanes to the identity */
434 bld.sop1(Builder::s_or_saveexec, Definition(stmp, bld.lm), Definition(scc, s1), Definition(exec, bld.lm), Operand(UINT64_MAX), Operand(exec, bld.lm));
435
436 for (unsigned i = 0; i < src.size(); i++) {
437 /* p_exclusive_scan needs it to be a sgpr or inline constant for the v_writelane_b32
438 * except on GFX10, where v_writelane_b32 can take a literal. */
439 if (identity[i].isLiteral() && op == aco_opcode::p_exclusive_scan && ctx->program->chip_class < GFX10) {
440 bld.sop1(aco_opcode::s_mov_b32, Definition(PhysReg{sitmp+i}, s1), identity[i]);
441 identity[i] = Operand(PhysReg{sitmp+i}, s1);
442
443 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp+i}, v1), identity[i]);
444 vcndmask_identity[i] = Operand(PhysReg{tmp+i}, v1);
445 } else if (identity[i].isLiteral()) {
446 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp+i}, v1), identity[i]);
447 vcndmask_identity[i] = Operand(PhysReg{tmp+i}, v1);
448 }
449 }
450
451 for (unsigned i = 0; i < src.size(); i++) {
452 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(PhysReg{tmp + i}, v1),
453 vcndmask_identity[i], Operand(PhysReg{src.physReg() + i}, v1),
454 Operand(stmp, bld.lm));
455 }
456
457 bool reduction_needs_last_op = false;
458 switch (op) {
459 case aco_opcode::p_reduce:
460 if (cluster_size == 1) break;
461
462 if (ctx->program->chip_class <= GFX7) {
463 reduction_needs_last_op = true;
464 emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(1, 0, 3, 2));
465 if (cluster_size == 2) break;
466 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
467 emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(2, 3, 0, 1));
468 if (cluster_size == 4) break;
469 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
470 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x04));
471 if (cluster_size == 8) break;
472 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
473 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x08));
474 if (cluster_size == 16) break;
475 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
476 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
477 if (cluster_size == 32) break;
478 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
479 for (unsigned i = 0; i < src.size(); i++)
480 bld.readlane(Definition(PhysReg{dst.physReg() + i}, s1), Operand(PhysReg{tmp + i}, v1), Operand(0u));
481 // TODO: it would be more effective to do the last reduction step on SALU
482 emit_op(ctx, tmp, dst.physReg(), tmp, vtmp, reduce_op, src.size());
483 reduction_needs_last_op = false;
484 break;
485 }
486
487 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_quad_perm(1, 0, 3, 2), 0xf, 0xf, false);
488 if (cluster_size == 2) break;
489 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_quad_perm(2, 3, 0, 1), 0xf, 0xf, false);
490 if (cluster_size == 4) break;
491 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_half_mirror, 0xf, 0xf, false);
492 if (cluster_size == 8) break;
493 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_mirror, 0xf, 0xf, false);
494 if (cluster_size == 16) break;
495
496 if (ctx->program->chip_class >= GFX10) {
497 /* GFX10+ doesn't support row_bcast15 and row_bcast31 */
498 for (unsigned i = 0; i < src.size(); i++)
499 bld.vop3(aco_opcode::v_permlanex16_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1), Operand(0u), Operand(0u));
500
501 if (cluster_size == 32) {
502 reduction_needs_last_op = true;
503 break;
504 }
505
506 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
507 for (unsigned i = 0; i < src.size(); i++)
508 bld.readlane(Definition(PhysReg{dst.physReg() + i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(0u));
509 // TODO: it would be more effective to do the last reduction step on SALU
510 emit_op(ctx, tmp, dst.physReg(), tmp, vtmp, reduce_op, src.size());
511 break;
512 }
513
514 if (cluster_size == 32) {
515 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
516 reduction_needs_last_op = true;
517 break;
518 }
519 assert(cluster_size == 64);
520 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_bcast15, 0xa, 0xf, false);
521 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_bcast31, 0xc, 0xf, false);
522 break;
523 case aco_opcode::p_exclusive_scan:
524 if (ctx->program->chip_class >= GFX10) { /* gfx10 doesn't support wf_sr1, so emulate it */
525 /* shift rows right */
526 emit_dpp_mov(ctx, vtmp, tmp, src.size(), dpp_row_sr(1), 0xf, 0xf, true);
527
528 /* fill in the gaps in rows 1 and 3 */
529 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0x10000u));
530 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(0x10000u));
531 for (unsigned i = 0; i < src.size(); i++) {
532 Instruction *perm = bld.vop3(aco_opcode::v_permlanex16_b32,
533 Definition(PhysReg{vtmp+i}, v1),
534 Operand(PhysReg{tmp+i}, v1),
535 Operand(0xffffffffu), Operand(0xffffffffu)).instr;
536 static_cast<VOP3A_instruction*>(perm)->opsel = 1; /* FI (Fetch Inactive) */
537 }
538 bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(UINT64_MAX));
539
540 if (ctx->program->wave_size == 64) {
541 /* fill in the gap in row 2 */
542 for (unsigned i = 0; i < src.size(); i++) {
543 bld.readlane(Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
544 bld.writelane(Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{sitmp+i}, s1), Operand(32u), Operand(PhysReg{vtmp+i}, v1));
545 }
546 }
547 std::swap(tmp, vtmp);
548 } else if (ctx->program->chip_class >= GFX8) {
549 emit_dpp_mov(ctx, tmp, tmp, src.size(), dpp_wf_sr1, 0xf, 0xf, true);
550 } else {
551 // TODO: use LDS on CS with a single write and shifted read
552 /* wavefront shift_right by 1 on SI/CI */
553 emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(0, 0, 1, 2));
554 emit_ds_swizzle(bld, tmp, tmp, src.size(), ds_pattern_bitmode(0x1F, 0x00, 0x07)); /* mirror(8) */
555 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0x10101010u));
556 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
557 for (unsigned i = 0; i < src.size(); i++)
558 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1));
559
560 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
561 emit_ds_swizzle(bld, tmp, tmp, src.size(), ds_pattern_bitmode(0x1F, 0x00, 0x08)); /* swap(8) */
562 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0x01000100u));
563 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
564 for (unsigned i = 0; i < src.size(); i++)
565 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1));
566
567 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
568 emit_ds_swizzle(bld, tmp, tmp, src.size(), ds_pattern_bitmode(0x1F, 0x00, 0x10)); /* swap(16) */
569 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand(1u), Operand(16u));
570 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand(1u), Operand(16u));
571 for (unsigned i = 0; i < src.size(); i++)
572 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1));
573
574 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
575 for (unsigned i = 0; i < src.size(); i++) {
576 bld.writelane(Definition(PhysReg{vtmp+i}, v1), identity[i], Operand(0u), Operand(PhysReg{vtmp+i}, v1));
577 bld.readlane(Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(0u));
578 bld.writelane(Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{sitmp+i}, s1), Operand(32u), Operand(PhysReg{vtmp+i}, v1));
579 identity[i] = Operand(0u); /* prevent further uses of identity */
580 }
581 std::swap(tmp, vtmp);
582 }
583
584 for (unsigned i = 0; i < src.size(); i++) {
585 if (!identity[i].isConstant() || identity[i].constantValue()) { /* bound_ctrl should take care of this overwise */
586 if (ctx->program->chip_class < GFX10)
587 assert((identity[i].isConstant() && !identity[i].isLiteral()) || identity[i].physReg() == PhysReg{sitmp+i});
588 bld.writelane(Definition(PhysReg{tmp+i}, v1), identity[i], Operand(0u), Operand(PhysReg{tmp+i}, v1));
589 }
590 }
591 /* fall through */
592 case aco_opcode::p_inclusive_scan:
593 assert(cluster_size == ctx->program->wave_size);
594 if (ctx->program->chip_class <= GFX7) {
595 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1e, 0x00, 0x00));
596 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0xAAAAAAAAu));
597 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
598 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
599
600 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
601 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1c, 0x01, 0x00));
602 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0xCCCCCCCCu));
603 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
604 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
605
606 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
607 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x18, 0x03, 0x00));
608 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0xF0F0F0F0u));
609 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
610 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
611
612 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
613 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x10, 0x07, 0x00));
614 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0xFF00FF00u));
615 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
616 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
617
618 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
619 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x00, 0x0f, 0x00));
620 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand(16u), Operand(16u));
621 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand(16u), Operand(16u));
622 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
623
624 for (unsigned i = 0; i < src.size(); i++)
625 bld.readlane(Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
626 bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand(32u), Operand(32u));
627 emit_op(ctx, tmp, sitmp, tmp, vtmp, reduce_op, src.size());
628 break;
629 }
630
631 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
632 dpp_row_sr(1), 0xf, 0xf, false, identity);
633 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
634 dpp_row_sr(2), 0xf, 0xf, false, identity);
635 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
636 dpp_row_sr(4), 0xf, 0xf, false, identity);
637 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
638 dpp_row_sr(8), 0xf, 0xf, false, identity);
639 if (ctx->program->chip_class >= GFX10) {
640 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand(16u), Operand(16u));
641 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand(16u), Operand(16u));
642 for (unsigned i = 0; i < src.size(); i++) {
643 Instruction *perm = bld.vop3(aco_opcode::v_permlanex16_b32,
644 Definition(PhysReg{vtmp+i}, v1),
645 Operand(PhysReg{tmp+i}, v1),
646 Operand(0xffffffffu), Operand(0xffffffffu)).instr;
647 static_cast<VOP3A_instruction*>(perm)->opsel = 1; /* FI (Fetch Inactive) */
648 }
649 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
650
651 if (ctx->program->wave_size == 64) {
652 bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand(32u), Operand(32u));
653 for (unsigned i = 0; i < src.size(); i++)
654 bld.readlane(Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
655 emit_op(ctx, tmp, sitmp, tmp, vtmp, reduce_op, src.size());
656 }
657 } else {
658 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
659 dpp_row_bcast15, 0xa, 0xf, false, identity);
660 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
661 dpp_row_bcast31, 0xc, 0xf, false, identity);
662 }
663 break;
664 default:
665 unreachable("Invalid reduction mode");
666 }
667
668
669 if (op == aco_opcode::p_reduce) {
670 if (reduction_needs_last_op && dst.regClass().type() == RegType::vgpr) {
671 bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(stmp, bld.lm));
672 emit_op(ctx, dst.physReg(), tmp, vtmp, PhysReg{0}, reduce_op, src.size());
673 return;
674 }
675
676 if (reduction_needs_last_op)
677 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
678 }
679
680 /* restore exec */
681 bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(stmp, bld.lm));
682
683 if (dst.regClass().type() == RegType::sgpr) {
684 for (unsigned k = 0; k < src.size(); k++) {
685 bld.readlane(Definition(PhysReg{dst.physReg() + k}, s1),
686 Operand(PhysReg{tmp + k}, v1), Operand(ctx->program->wave_size - 1));
687 }
688 } else if (dst.physReg() != tmp) {
689 for (unsigned k = 0; k < src.size(); k++) {
690 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{dst.physReg() + k}, v1),
691 Operand(PhysReg{tmp + k}, v1));
692 }
693 }
694 }
695
696 struct copy_operation {
697 Operand op;
698 Definition def;
699 unsigned bytes;
700 union {
701 uint8_t uses[8];
702 uint64_t is_used = 0;
703 };
704 };
705
706 void split_copy(unsigned offset, Definition *def, Operand *op, const copy_operation& src, bool ignore_uses, unsigned max_size)
707 {
708 PhysReg def_reg = src.def.physReg();
709 PhysReg op_reg = src.op.physReg();
710 def_reg.reg_b += offset;
711 op_reg.reg_b += offset;
712
713 max_size = MIN2(max_size, src.def.regClass().type() == RegType::vgpr ? 4 : 8);
714
715 /* make sure the size is a power of two and reg % bytes == 0 */
716 unsigned bytes = 1;
717 for (; bytes <= max_size; bytes *= 2) {
718 unsigned next = bytes * 2u;
719 bool can_increase = def_reg.reg_b % next == 0 &&
720 offset + next <= src.bytes && next <= max_size;
721 if (!src.op.isConstant() && can_increase)
722 can_increase = op_reg.reg_b % next == 0;
723 for (unsigned i = 0; !ignore_uses && can_increase && (i < bytes); i++)
724 can_increase = (src.uses[offset + bytes + i] == 0) == (src.uses[offset] == 0);
725 if (!can_increase)
726 break;
727 }
728
729 RegClass def_cls = bytes % 4 == 0 ? RegClass(src.def.regClass().type(), bytes / 4u) :
730 RegClass(src.def.regClass().type(), bytes).as_subdword();
731 *def = Definition(src.def.tempId(), def_reg, def_cls);
732 if (src.op.isConstant()) {
733 assert(offset == 0 || (offset == 4 && src.op.bytes() == 8));
734 if (src.op.bytes() == 8 && bytes == 4)
735 *op = Operand(uint32_t(src.op.constantValue64() >> (offset * 8u)));
736 else
737 *op = src.op;
738 } else {
739 RegClass op_cls = bytes % 4 == 0 ? RegClass(src.op.regClass().type(), bytes / 4u) :
740 RegClass(src.op.regClass().type(), bytes).as_subdword();
741 *op = Operand(op_reg, op_cls);
742 op->setTemp(Temp(src.op.tempId(), op_cls));
743 }
744 }
745
746 uint32_t get_intersection_mask(int a_start, int a_size,
747 int b_start, int b_size)
748 {
749 int intersection_start = MAX2(b_start - a_start, 0);
750 int intersection_end = MAX2(b_start + b_size - a_start, 0);
751 if (intersection_start >= a_size || intersection_end == 0)
752 return 0;
753
754 uint32_t mask = u_bit_consecutive(0, a_size);
755 return u_bit_consecutive(intersection_start, intersection_end - intersection_start) & mask;
756 }
757
758 bool do_copy(lower_context* ctx, Builder& bld, const copy_operation& copy, bool *preserve_scc)
759 {
760 bool did_copy = false;
761 for (unsigned offset = 0; offset < copy.bytes;) {
762 if (copy.uses[offset]) {
763 offset++;
764 continue;
765 }
766
767 Definition def;
768 Operand op;
769 split_copy(offset, &def, &op, copy, false, 8);
770
771 if (def.physReg() == scc) {
772 bld.sopc(aco_opcode::s_cmp_lg_i32, def, op, Operand(0u));
773 *preserve_scc = true;
774 } else if (def.bytes() == 8 && def.getTemp().type() == RegType::sgpr) {
775 bld.sop1(aco_opcode::s_mov_b64, def, Operand(op.physReg(), s2));
776 } else {
777 bld.copy(def, op);
778 }
779
780 ctx->program->statistics[statistic_copies]++;
781
782 did_copy = true;
783 offset += def.bytes();
784 }
785 return did_copy;
786 }
787
788 void do_swap(lower_context *ctx, Builder& bld, const copy_operation& copy, bool preserve_scc, Pseudo_instruction *pi)
789 {
790 unsigned offset = 0;
791
792 if (copy.bytes == 3 && (copy.def.physReg().reg_b % 4 <= 1) &&
793 (copy.def.physReg().reg_b % 4) == (copy.op.physReg().reg_b % 4)) {
794 /* instead of doing a 2-byte and 1-byte swap, do a 4-byte swap and then fixup with a 1-byte swap */
795 PhysReg op = copy.op.physReg();
796 PhysReg def = copy.def.physReg();
797 op.reg_b &= ~0x3;
798 def.reg_b &= ~0x3;
799
800 copy_operation tmp;
801 tmp.op = Operand(op, v1);
802 tmp.def = Definition(def, v1);
803 tmp.bytes = 4;
804 memset(tmp.uses, 1, 4);
805 do_swap(ctx, bld, tmp, preserve_scc, pi);
806
807 op.reg_b += copy.def.physReg().reg_b % 4 == 0 ? 3 : 0;
808 def.reg_b += copy.def.physReg().reg_b % 4 == 0 ? 3 : 0;
809 tmp.op = Operand(op, v1b);
810 tmp.def = Definition(def, v1b);
811 tmp.bytes = 1;
812 tmp.uses[0] = 1;
813 do_swap(ctx, bld, tmp, preserve_scc, pi);
814
815 offset = copy.bytes;
816 }
817
818 for (; offset < copy.bytes;) {
819 Definition def;
820 Operand op;
821 split_copy(offset, &def, &op, copy, true, 8);
822
823 assert(op.regClass() == def.regClass());
824 Operand def_as_op = Operand(def.physReg(), def.regClass());
825 Definition op_as_def = Definition(op.physReg(), op.regClass());
826 if (ctx->program->chip_class >= GFX9 && def.regClass() == v1) {
827 bld.vop1(aco_opcode::v_swap_b32, def, op_as_def, op, def_as_op);
828 ctx->program->statistics[statistic_copies]++;
829 } else if (def.regClass() == v1) {
830 bld.vop2(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
831 bld.vop2(aco_opcode::v_xor_b32, def, op, def_as_op);
832 bld.vop2(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
833 ctx->program->statistics[statistic_copies] += 3;
834 } else if (op.physReg() == scc || def.physReg() == scc) {
835 /* we need to swap scc and another sgpr */
836 assert(!preserve_scc);
837
838 PhysReg other = op.physReg() == scc ? def.physReg() : op.physReg();
839
840 bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1));
841 bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(other, s1), Operand(0u));
842 bld.sop1(aco_opcode::s_mov_b32, Definition(other, s1), Operand(pi->scratch_sgpr, s1));
843 ctx->program->statistics[statistic_copies] += 3;
844 } else if (def.regClass() == s1) {
845 if (preserve_scc) {
846 bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), op);
847 bld.sop1(aco_opcode::s_mov_b32, op_as_def, def_as_op);
848 bld.sop1(aco_opcode::s_mov_b32, def, Operand(pi->scratch_sgpr, s1));
849 } else {
850 bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), op, def_as_op);
851 bld.sop2(aco_opcode::s_xor_b32, def, Definition(scc, s1), op, def_as_op);
852 bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), op, def_as_op);
853 }
854 ctx->program->statistics[statistic_copies] += 3;
855 } else if (def.regClass() == s2) {
856 if (preserve_scc)
857 bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1));
858 bld.sop2(aco_opcode::s_xor_b64, op_as_def, Definition(scc, s1), op, def_as_op);
859 bld.sop2(aco_opcode::s_xor_b64, def, Definition(scc, s1), op, def_as_op);
860 bld.sop2(aco_opcode::s_xor_b64, op_as_def, Definition(scc, s1), op, def_as_op);
861 if (preserve_scc)
862 bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(pi->scratch_sgpr, s1), Operand(0u));
863 ctx->program->statistics[statistic_copies] += 3;
864 } else if (ctx->program->chip_class >= GFX9 && def.bytes() == 2 && def.physReg().reg() == op.physReg().reg()) {
865 aco_ptr<VOP3P_instruction> vop3p{create_instruction<VOP3P_instruction>(aco_opcode::v_pk_add_u16, Format::VOP3P, 2, 1)};
866 vop3p->operands[0] = Operand(PhysReg{op.physReg().reg()}, v1);
867 vop3p->operands[1] = Operand(0u);
868 vop3p->definitions[0] = Definition(PhysReg{op.physReg().reg()}, v1);
869 vop3p->opsel_lo = 0x1;
870 vop3p->opsel_hi = 0x2;
871 bld.insert(std::move(vop3p));
872 } else {
873 assert(def.regClass().is_subdword());
874 bld.vop2_sdwa(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
875 bld.vop2_sdwa(aco_opcode::v_xor_b32, def, op, def_as_op);
876 bld.vop2_sdwa(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
877 ctx->program->statistics[statistic_copies] += 3;
878 }
879
880 offset += def.bytes();
881 }
882
883 /* fixup in case we swapped bytes we shouldn't have */
884 copy_operation tmp_copy = copy;
885 tmp_copy.op.setFixed(copy.def.physReg());
886 tmp_copy.def.setFixed(copy.op.physReg());
887 do_copy(ctx, bld, tmp_copy, &preserve_scc);
888 }
889
890 void handle_operands(std::map<PhysReg, copy_operation>& copy_map, lower_context* ctx, chip_class chip_class, Pseudo_instruction *pi)
891 {
892 Builder bld(ctx->program, &ctx->instructions);
893 aco_ptr<Instruction> mov;
894 std::map<PhysReg, copy_operation>::iterator it = copy_map.begin();
895 std::map<PhysReg, copy_operation>::iterator target;
896 bool writes_scc = false;
897
898 /* count the number of uses for each dst reg */
899 while (it != copy_map.end()) {
900
901 if (it->second.def.physReg() == scc)
902 writes_scc = true;
903
904 assert(!pi->tmp_in_scc || !(it->second.def.physReg() == pi->scratch_sgpr));
905
906 /* if src and dst reg are the same, remove operation */
907 if (it->first == it->second.op.physReg()) {
908 it = copy_map.erase(it);
909 continue;
910 }
911
912 /* split large copies */
913 if (it->second.bytes > 8) {
914 assert(!it->second.op.isConstant());
915 assert(!it->second.def.regClass().is_subdword());
916 RegClass rc = RegClass(it->second.def.regClass().type(), it->second.def.size() - 2);
917 Definition hi_def = Definition(PhysReg{it->first + 2}, rc);
918 rc = RegClass(it->second.op.regClass().type(), it->second.op.size() - 2);
919 Operand hi_op = Operand(PhysReg{it->second.op.physReg() + 2}, rc);
920 copy_operation copy = {hi_op, hi_def, it->second.bytes - 8};
921 copy_map[hi_def.physReg()] = copy;
922 assert(it->second.op.physReg().byte() == 0 && it->second.def.physReg().byte() == 0);
923 it->second.op = Operand(it->second.op.physReg(), it->second.op.regClass().type() == RegType::sgpr ? s2 : v2);
924 it->second.def = Definition(it->second.def.physReg(), it->second.def.regClass().type() == RegType::sgpr ? s2 : v2);
925 it->second.bytes = 8;
926 }
927
928 /* check if the definition reg is used by another copy operation */
929 for (std::pair<const PhysReg, copy_operation>& copy : copy_map) {
930 if (copy.second.op.isConstant())
931 continue;
932 for (uint16_t i = 0; i < it->second.bytes; i++) {
933 /* distance might underflow */
934 unsigned distance = it->first.reg_b + i - copy.second.op.physReg().reg_b;
935 if (distance < copy.second.bytes)
936 it->second.uses[i] += 1;
937 }
938 }
939
940 ++it;
941 }
942
943 /* first, handle paths in the location transfer graph */
944 bool preserve_scc = pi->tmp_in_scc && !writes_scc;
945 it = copy_map.begin();
946 while (it != copy_map.end()) {
947
948 /* try to coalesce 32-bit sgpr copies to 64-bit copies */
949 if (it->second.is_used == 0 &&
950 it->second.def.getTemp().type() == RegType::sgpr && it->second.bytes == 4 &&
951 !it->second.op.isConstant() && it->first % 2 == it->second.op.physReg() % 2) {
952
953 PhysReg other_def_reg = PhysReg{it->first % 2 ? it->first - 1 : it->first + 1};
954 PhysReg other_op_reg = PhysReg{it->first % 2 ? it->second.op.physReg() - 1 : it->second.op.physReg() + 1};
955 std::map<PhysReg, copy_operation>::iterator other = copy_map.find(other_def_reg);
956
957 if (other != copy_map.end() && !other->second.is_used && other->second.bytes == 4 &&
958 other->second.op.physReg() == other_op_reg && !other->second.op.isConstant()) {
959 std::map<PhysReg, copy_operation>::iterator to_erase = it->first % 2 ? it : other;
960 it = it->first % 2 ? other : it;
961 copy_map.erase(to_erase);
962 it->second.bytes = 8;
963 }
964 }
965 // TODO: try to coalesce subdword copies
966
967 /* find portions where the target reg is not used as operand for any other copy */
968 if (it->second.is_used) {
969 if (it->second.op.isConstant()) {
970 /* we have to skip constants until is_used=0 */
971 ++it;
972 continue;
973 }
974
975 unsigned has_zero_use_bytes = 0;
976 for (unsigned i = 0; i < it->second.bytes; i++)
977 has_zero_use_bytes |= (it->second.uses[i] == 0) << i;
978
979 if (has_zero_use_bytes) {
980 /* Skipping partial copying and doing a v_swap_b32 and then fixup
981 * copies is usually beneficial for sub-dword copies, but if doing
982 * a partial copy allows further copies, it should be done instead. */
983 bool partial_copy = (has_zero_use_bytes == 0xf) || (has_zero_use_bytes == 0xf0);
984 for (std::pair<const PhysReg, copy_operation>& copy : copy_map) {
985 if (partial_copy)
986 break;
987 for (uint16_t i = 0; i < copy.second.bytes; i++) {
988 /* distance might underflow */
989 unsigned distance = copy.first.reg_b + i - it->second.op.physReg().reg_b;
990 if (distance < it->second.bytes && copy.second.uses[i] == 1 &&
991 !it->second.uses[distance])
992 partial_copy = true;
993 }
994 }
995
996 if (!partial_copy) {
997 ++it;
998 continue;
999 }
1000 } else {
1001 /* full target reg is used: register swapping needed */
1002 ++it;
1003 continue;
1004 }
1005 }
1006
1007 bool did_copy = do_copy(ctx, bld, it->second, &preserve_scc);
1008
1009 std::pair<PhysReg, copy_operation> copy = *it;
1010
1011 if (it->second.is_used == 0) {
1012 /* the target reg is not used as operand for any other copy, so we
1013 * copied to all of it */
1014 copy_map.erase(it);
1015 it = copy_map.begin();
1016 } else {
1017 /* we only performed some portions of this copy, so split it to only
1018 * leave the portions that still need to be done */
1019 copy_operation original = it->second; /* the map insertion below can overwrite this */
1020 copy_map.erase(it);
1021 for (unsigned offset = 0; offset < original.bytes;) {
1022 if (original.uses[offset] == 0) {
1023 offset++;
1024 continue;
1025 }
1026 Definition def;
1027 Operand op;
1028 split_copy(offset, &def, &op, original, false, 8);
1029
1030 copy_operation copy = {op, def, def.bytes()};
1031 for (unsigned i = 0; i < copy.bytes; i++)
1032 copy.uses[i] = original.uses[i + offset];
1033 copy_map[def.physReg()] = copy;
1034
1035 offset += def.bytes();
1036 }
1037
1038 it = copy_map.begin();
1039 }
1040
1041 /* Reduce the number of uses of the operand reg by one. Do this after
1042 * splitting the copy or removing it in case the copy writes to it's own
1043 * operand (for example, v[7:8] = v[8:9]) */
1044 if (did_copy && !copy.second.op.isConstant()) {
1045 for (std::pair<const PhysReg, copy_operation>& other : copy_map) {
1046 for (uint16_t i = 0; i < other.second.bytes; i++) {
1047 /* distance might underflow */
1048 unsigned distance = other.first.reg_b + i - copy.second.op.physReg().reg_b;
1049 if (distance < copy.second.bytes && !copy.second.uses[distance])
1050 other.second.uses[i] -= 1;
1051 }
1052 }
1053 }
1054 }
1055
1056 if (copy_map.empty())
1057 return;
1058
1059 /* all target regs are needed as operand somewhere which means, all entries are part of a cycle */
1060 unsigned largest = 0;
1061 for (const std::pair<PhysReg, copy_operation>& op : copy_map)
1062 largest = MAX2(largest, op.second.bytes);
1063
1064 while (!copy_map.empty()) {
1065
1066 /* Perform larger swaps first, so that we don't have to split the uses of
1067 * registers we swap (we don't have to because of alignment restrictions) and
1068 * larger swaps swaps can make other swaps unnecessary. */
1069 auto it = copy_map.begin();
1070 for (auto it2 = copy_map.begin(); it2 != copy_map.end(); ++it2) {
1071 if (it2->second.bytes > it->second.bytes) {
1072 it = it2;
1073 if (it->second.bytes == largest)
1074 break;
1075 }
1076 }
1077
1078 /* should already be done */
1079 assert(!it->second.op.isConstant());
1080
1081 assert(it->second.op.isFixed());
1082 assert(it->second.def.regClass() == it->second.op.regClass());
1083
1084 if (it->first == it->second.op.physReg()) {
1085 copy_map.erase(it);
1086 continue;
1087 }
1088
1089 if (preserve_scc && it->second.def.getTemp().type() == RegType::sgpr)
1090 assert(!(it->second.def.physReg() == pi->scratch_sgpr));
1091
1092 /* to resolve the cycle, we have to swap the src reg with the dst reg */
1093 copy_operation swap = it->second;
1094 do_swap(ctx, bld, swap, preserve_scc, pi);
1095
1096 /* remove from map */
1097 copy_map.erase(it);
1098
1099 /* change the operand reg of the target's use and split uses if needed */
1100 target = copy_map.begin();
1101 uint32_t bytes_left = u_bit_consecutive(0, swap.bytes);
1102 for (; target != copy_map.end(); ++target) {
1103 if (target->second.op.physReg() == swap.def.physReg() && swap.bytes == target->second.bytes) {
1104 target->second.op.setFixed(swap.op.physReg());
1105 break;
1106 }
1107
1108 uint32_t imask = get_intersection_mask(swap.def.physReg().reg_b, swap.bytes,
1109 target->second.op.physReg().reg_b, target->second.bytes);
1110
1111 if (!imask)
1112 continue;
1113
1114 assert(target->second.bytes < swap.bytes);
1115
1116 PhysReg new_reg = swap.op.physReg();
1117 new_reg.reg_b += target->second.op.physReg().reg_b - swap.def.physReg().reg_b;
1118 target->second.op.setFixed(new_reg);
1119
1120 bytes_left &= ~imask;
1121 if (!bytes_left)
1122 break;
1123 }
1124 }
1125 }
1126
1127 void lower_to_hw_instr(Program* program)
1128 {
1129 Block *discard_block = NULL;
1130
1131 for (size_t i = 0; i < program->blocks.size(); i++)
1132 {
1133 Block *block = &program->blocks[i];
1134 lower_context ctx;
1135 ctx.program = program;
1136 Builder bld(program, &ctx.instructions);
1137
1138 bool set_mode = i == 0 && block->fp_mode.val != program->config->float_mode;
1139 for (unsigned pred : block->linear_preds) {
1140 if (program->blocks[pred].fp_mode.val != block->fp_mode.val) {
1141 set_mode = true;
1142 break;
1143 }
1144 }
1145 if (set_mode) {
1146 /* only allow changing modes at top-level blocks so this doesn't break
1147 * the "jump over empty blocks" optimization */
1148 assert(block->kind & block_kind_top_level);
1149 uint32_t mode = block->fp_mode.val;
1150 /* "((size - 1) << 11) | register" (MODE is encoded as register 1) */
1151 bld.sopk(aco_opcode::s_setreg_imm32_b32, Operand(mode), (7 << 11) | 1);
1152 }
1153
1154 for (size_t j = 0; j < block->instructions.size(); j++) {
1155 aco_ptr<Instruction>& instr = block->instructions[j];
1156 aco_ptr<Instruction> mov;
1157 if (instr->format == Format::PSEUDO) {
1158 Pseudo_instruction *pi = (Pseudo_instruction*)instr.get();
1159
1160 switch (instr->opcode)
1161 {
1162 case aco_opcode::p_extract_vector:
1163 {
1164 PhysReg reg = instr->operands[0].physReg();
1165 Definition& def = instr->definitions[0];
1166 reg.reg_b += instr->operands[1].constantValue() * def.bytes();
1167
1168 if (reg == def.physReg())
1169 break;
1170
1171 RegClass op_rc = def.regClass().is_subdword() ? def.regClass() :
1172 RegClass(instr->operands[0].getTemp().type(), def.size());
1173 std::map<PhysReg, copy_operation> copy_operations;
1174 copy_operations[def.physReg()] = {Operand(reg, op_rc), def, def.bytes()};
1175 handle_operands(copy_operations, &ctx, program->chip_class, pi);
1176 break;
1177 }
1178 case aco_opcode::p_create_vector:
1179 {
1180 std::map<PhysReg, copy_operation> copy_operations;
1181 PhysReg reg = instr->definitions[0].physReg();
1182
1183 for (const Operand& op : instr->operands) {
1184 if (op.isConstant()) {
1185 const Definition def = Definition(reg, RegClass(instr->definitions[0].getTemp().type(), op.size()));
1186 copy_operations[reg] = {op, def, op.bytes()};
1187 reg.reg_b += op.bytes();
1188 continue;
1189 }
1190 if (op.isUndefined()) {
1191 // TODO: coalesce subdword copies if dst byte is 0
1192 reg.reg_b += op.bytes();
1193 continue;
1194 }
1195
1196 RegClass rc_def = op.regClass().is_subdword() ? op.regClass() :
1197 RegClass(instr->definitions[0].getTemp().type(), op.size());
1198 const Definition def = Definition(reg, rc_def);
1199 copy_operations[def.physReg()] = {op, def, op.bytes()};
1200 reg.reg_b += op.bytes();
1201 }
1202 handle_operands(copy_operations, &ctx, program->chip_class, pi);
1203 break;
1204 }
1205 case aco_opcode::p_split_vector:
1206 {
1207 std::map<PhysReg, copy_operation> copy_operations;
1208 PhysReg reg = instr->operands[0].physReg();
1209
1210 for (const Definition& def : instr->definitions) {
1211 RegClass rc_op = def.regClass().is_subdword() ? def.regClass() :
1212 RegClass(instr->operands[0].getTemp().type(), def.size());
1213 const Operand op = Operand(reg, rc_op);
1214 copy_operations[def.physReg()] = {op, def, def.bytes()};
1215 reg.reg_b += def.bytes();
1216 }
1217 handle_operands(copy_operations, &ctx, program->chip_class, pi);
1218 break;
1219 }
1220 case aco_opcode::p_parallelcopy:
1221 case aco_opcode::p_wqm:
1222 {
1223 std::map<PhysReg, copy_operation> copy_operations;
1224 for (unsigned i = 0; i < instr->operands.size(); i++) {
1225 assert(instr->definitions[i].bytes() == instr->operands[i].bytes());
1226 copy_operations[instr->definitions[i].physReg()] = {instr->operands[i], instr->definitions[i], instr->operands[i].bytes()};
1227 }
1228 handle_operands(copy_operations, &ctx, program->chip_class, pi);
1229 break;
1230 }
1231 case aco_opcode::p_exit_early_if:
1232 {
1233 /* don't bother with an early exit near the end of the program */
1234 if ((block->instructions.size() - 1 - j) <= 4 &&
1235 block->instructions.back()->opcode == aco_opcode::s_endpgm) {
1236 unsigned null_exp_dest = (ctx.program->stage & hw_fs) ? 9 /* NULL */ : V_008DFC_SQ_EXP_POS;
1237 bool ignore_early_exit = true;
1238
1239 for (unsigned k = j + 1; k < block->instructions.size(); ++k) {
1240 const aco_ptr<Instruction> &instr = block->instructions[k];
1241 if (instr->opcode == aco_opcode::s_endpgm ||
1242 instr->opcode == aco_opcode::p_logical_end)
1243 continue;
1244 else if (instr->opcode == aco_opcode::exp &&
1245 static_cast<Export_instruction *>(instr.get())->dest == null_exp_dest)
1246 continue;
1247 else if (instr->opcode == aco_opcode::p_parallelcopy &&
1248 instr->definitions[0].isFixed() &&
1249 instr->definitions[0].physReg() == exec)
1250 continue;
1251
1252 ignore_early_exit = false;
1253 }
1254
1255 if (ignore_early_exit)
1256 break;
1257 }
1258
1259 if (!discard_block) {
1260 discard_block = program->create_and_insert_block();
1261 block = &program->blocks[i];
1262
1263 bld.reset(discard_block);
1264 bld.exp(aco_opcode::exp, Operand(v1), Operand(v1), Operand(v1), Operand(v1),
1265 0, V_008DFC_SQ_EXP_NULL, false, true, true);
1266 if (program->wb_smem_l1_on_end)
1267 bld.smem(aco_opcode::s_dcache_wb);
1268 bld.sopp(aco_opcode::s_endpgm);
1269
1270 bld.reset(&ctx.instructions);
1271 }
1272
1273 //TODO: exec can be zero here with block_kind_discard
1274
1275 assert(instr->operands[0].physReg() == scc);
1276 bld.sopp(aco_opcode::s_cbranch_scc0, instr->operands[0], discard_block->index);
1277
1278 discard_block->linear_preds.push_back(block->index);
1279 block->linear_succs.push_back(discard_block->index);
1280 break;
1281 }
1282 case aco_opcode::p_spill:
1283 {
1284 assert(instr->operands[0].regClass() == v1.as_linear());
1285 for (unsigned i = 0; i < instr->operands[2].size(); i++)
1286 bld.writelane(bld.def(v1, instr->operands[0].physReg()),
1287 Operand(PhysReg{instr->operands[2].physReg() + i}, s1),
1288 Operand(instr->operands[1].constantValue() + i),
1289 instr->operands[0]);
1290 break;
1291 }
1292 case aco_opcode::p_reload:
1293 {
1294 assert(instr->operands[0].regClass() == v1.as_linear());
1295 for (unsigned i = 0; i < instr->definitions[0].size(); i++)
1296 bld.readlane(bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
1297 instr->operands[0],
1298 Operand(instr->operands[1].constantValue() + i));
1299 break;
1300 }
1301 case aco_opcode::p_as_uniform:
1302 {
1303 if (instr->operands[0].isConstant() || instr->operands[0].regClass().type() == RegType::sgpr) {
1304 std::map<PhysReg, copy_operation> copy_operations;
1305 copy_operations[instr->definitions[0].physReg()] = {instr->operands[0], instr->definitions[0], instr->definitions[0].bytes()};
1306 handle_operands(copy_operations, &ctx, program->chip_class, pi);
1307 } else {
1308 assert(instr->operands[0].regClass().type() == RegType::vgpr);
1309 assert(instr->definitions[0].regClass().type() == RegType::sgpr);
1310 assert(instr->operands[0].size() == instr->definitions[0].size());
1311 for (unsigned i = 0; i < instr->definitions[0].size(); i++) {
1312 bld.vop1(aco_opcode::v_readfirstlane_b32,
1313 bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
1314 Operand(PhysReg{instr->operands[0].physReg() + i}, v1));
1315 }
1316 }
1317 break;
1318 }
1319 default:
1320 break;
1321 }
1322 } else if (instr->format == Format::PSEUDO_BRANCH) {
1323 Pseudo_branch_instruction* branch = static_cast<Pseudo_branch_instruction*>(instr.get());
1324 /* check if all blocks from current to target are empty */
1325 bool can_remove = block->index < branch->target[0];
1326 for (unsigned i = block->index + 1; can_remove && i < branch->target[0]; i++) {
1327 if (program->blocks[i].instructions.size())
1328 can_remove = false;
1329 }
1330 if (can_remove)
1331 continue;
1332
1333 switch (instr->opcode) {
1334 case aco_opcode::p_branch:
1335 assert(block->linear_succs[0] == branch->target[0]);
1336 bld.sopp(aco_opcode::s_branch, branch->target[0]);
1337 break;
1338 case aco_opcode::p_cbranch_nz:
1339 assert(block->linear_succs[1] == branch->target[0]);
1340 if (branch->operands[0].physReg() == exec)
1341 bld.sopp(aco_opcode::s_cbranch_execnz, branch->target[0]);
1342 else if (branch->operands[0].physReg() == vcc)
1343 bld.sopp(aco_opcode::s_cbranch_vccnz, branch->target[0]);
1344 else {
1345 assert(branch->operands[0].physReg() == scc);
1346 bld.sopp(aco_opcode::s_cbranch_scc1, branch->target[0]);
1347 }
1348 break;
1349 case aco_opcode::p_cbranch_z:
1350 assert(block->linear_succs[1] == branch->target[0]);
1351 if (branch->operands[0].physReg() == exec)
1352 bld.sopp(aco_opcode::s_cbranch_execz, branch->target[0]);
1353 else if (branch->operands[0].physReg() == vcc)
1354 bld.sopp(aco_opcode::s_cbranch_vccz, branch->target[0]);
1355 else {
1356 assert(branch->operands[0].physReg() == scc);
1357 bld.sopp(aco_opcode::s_cbranch_scc0, branch->target[0]);
1358 }
1359 break;
1360 default:
1361 unreachable("Unknown Pseudo branch instruction!");
1362 }
1363
1364 } else if (instr->format == Format::PSEUDO_REDUCTION) {
1365 Pseudo_reduction_instruction* reduce = static_cast<Pseudo_reduction_instruction*>(instr.get());
1366 if (reduce->reduce_op == gfx10_wave64_bpermute) {
1367 /* Only makes sense on GFX10 wave64 */
1368 assert(program->chip_class >= GFX10);
1369 assert(program->info->wave_size == 64);
1370 assert(instr->definitions[0].regClass() == v1); /* Destination */
1371 assert(instr->definitions[1].regClass() == s2); /* Temp EXEC */
1372 assert(instr->definitions[1].physReg() != vcc);
1373 assert(instr->definitions[2].physReg() == scc); /* SCC clobber */
1374 assert(instr->operands[0].physReg() == vcc); /* Compare */
1375 assert(instr->operands[1].regClass() == v2.as_linear()); /* Temp VGPR pair */
1376 assert(instr->operands[2].regClass() == v1); /* Indices x4 */
1377 assert(instr->operands[3].regClass() == v1); /* Input data */
1378
1379 PhysReg shared_vgpr_reg_lo = PhysReg(align(program->config->num_vgprs, 4) + 256);
1380 PhysReg shared_vgpr_reg_hi = PhysReg(shared_vgpr_reg_lo + 1);
1381 Operand compare = instr->operands[0];
1382 Operand tmp1(instr->operands[1].physReg(), v1);
1383 Operand tmp2(PhysReg(instr->operands[1].physReg() + 1), v1);
1384 Operand index_x4 = instr->operands[2];
1385 Operand input_data = instr->operands[3];
1386 Definition shared_vgpr_lo(shared_vgpr_reg_lo, v1);
1387 Definition shared_vgpr_hi(shared_vgpr_reg_hi, v1);
1388 Definition def_temp1(tmp1.physReg(), v1);
1389 Definition def_temp2(tmp2.physReg(), v1);
1390
1391 /* Save EXEC and set it for all lanes */
1392 bld.sop1(aco_opcode::s_or_saveexec_b64, instr->definitions[1], instr->definitions[2],
1393 Definition(exec, s2), Operand((uint64_t)-1), Operand(exec, s2));
1394
1395 /* HI: Copy data from high lanes 32-63 to shared vgpr */
1396 bld.vop1_dpp(aco_opcode::v_mov_b32, shared_vgpr_hi, input_data, dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
1397
1398 /* LO: Copy data from low lanes 0-31 to shared vgpr */
1399 bld.vop1_dpp(aco_opcode::v_mov_b32, shared_vgpr_lo, input_data, dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
1400 /* LO: Copy shared vgpr (high lanes' data) to output vgpr */
1401 bld.vop1_dpp(aco_opcode::v_mov_b32, def_temp1, Operand(shared_vgpr_reg_hi, v1), dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
1402
1403 /* HI: Copy shared vgpr (low lanes' data) to output vgpr */
1404 bld.vop1_dpp(aco_opcode::v_mov_b32, def_temp1, Operand(shared_vgpr_reg_lo, v1), dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
1405
1406 /* Permute the original input */
1407 bld.ds(aco_opcode::ds_bpermute_b32, def_temp2, index_x4, input_data);
1408 /* Permute the swapped input */
1409 bld.ds(aco_opcode::ds_bpermute_b32, def_temp1, index_x4, tmp1);
1410
1411 /* Restore saved EXEC */
1412 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(instr->definitions[1].physReg(), s2));
1413 /* Choose whether to use the original or swapped */
1414 bld.vop2(aco_opcode::v_cndmask_b32, instr->definitions[0], tmp1, tmp2, compare);
1415 } else {
1416 emit_reduction(&ctx, reduce->opcode, reduce->reduce_op, reduce->cluster_size,
1417 reduce->operands[1].physReg(), // tmp
1418 reduce->definitions[1].physReg(), // stmp
1419 reduce->operands[2].physReg(), // vtmp
1420 reduce->definitions[2].physReg(), // sitmp
1421 reduce->operands[0], reduce->definitions[0]);
1422 }
1423 } else {
1424 ctx.instructions.emplace_back(std::move(instr));
1425 }
1426
1427 }
1428 block->instructions.swap(ctx.instructions);
1429 }
1430 }
1431
1432 }