aco: skip partial copies on first iteration when lowering to hw
[mesa.git] / src / amd / compiler / aco_lower_to_hw_instr.cpp
1 /*
2 * Copyright © 2018 Valve Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Daniel Schürmann (daniel.schuermann@campus.tu-berlin.de)
25 *
26 */
27
28 #include <map>
29
30 #include "aco_ir.h"
31 #include "aco_builder.h"
32 #include "util/u_math.h"
33 #include "sid.h"
34 #include "vulkan/radv_shader.h"
35
36
37 namespace aco {
38
39 struct lower_context {
40 Program *program;
41 std::vector<aco_ptr<Instruction>> instructions;
42 };
43
44 aco_opcode get_reduce_opcode(chip_class chip, ReduceOp op) {
45 /* Because some 16-bit instructions are already VOP3 on GFX10, we use the
46 * 32-bit opcodes (VOP2) which allows to remove the tempory VGPR and to use
47 * DPP with the arithmetic instructions. This requires to sign-extend.
48 */
49 switch (op) {
50 case iadd8:
51 case iadd16:
52 if (chip >= GFX10) {
53 return aco_opcode::v_add_u32;
54 } else if (chip >= GFX8) {
55 return aco_opcode::v_add_u16;
56 } else {
57 return aco_opcode::v_add_co_u32;
58 }
59 break;
60 case imul8:
61 case imul16:
62 if (chip >= GFX10) {
63 return aco_opcode::v_mul_lo_u16_e64;
64 } else if (chip >= GFX8) {
65 return aco_opcode::v_mul_lo_u16;
66 } else {
67 return aco_opcode::v_mul_u32_u24;
68 }
69 break;
70 case fadd16: return aco_opcode::v_add_f16;
71 case fmul16: return aco_opcode::v_mul_f16;
72 case imax8:
73 case imax16:
74 if (chip >= GFX10) {
75 return aco_opcode::v_max_i32;
76 } else if (chip >= GFX8) {
77 return aco_opcode::v_max_i16;
78 } else {
79 return aco_opcode::v_max_i32;
80 }
81 break;
82 case imin8:
83 case imin16:
84 if (chip >= GFX10) {
85 return aco_opcode::v_min_i32;
86 } else if (chip >= GFX8) {
87 return aco_opcode::v_min_i16;
88 } else {
89 return aco_opcode::v_min_i32;
90 }
91 break;
92 case umin8:
93 case umin16:
94 if (chip >= GFX10) {
95 return aco_opcode::v_min_u32;
96 } else if (chip >= GFX8) {
97 return aco_opcode::v_min_u16;
98 } else {
99 return aco_opcode::v_min_u32;
100 }
101 break;
102 case umax8:
103 case umax16:
104 if (chip >= GFX10) {
105 return aco_opcode::v_max_u32;
106 } else if (chip >= GFX8) {
107 return aco_opcode::v_max_u16;
108 } else {
109 return aco_opcode::v_max_u32;
110 }
111 break;
112 case fmin16: return aco_opcode::v_min_f16;
113 case fmax16: return aco_opcode::v_max_f16;
114 case iadd32: return chip >= GFX9 ? aco_opcode::v_add_u32 : aco_opcode::v_add_co_u32;
115 case imul32: return aco_opcode::v_mul_lo_u32;
116 case fadd32: return aco_opcode::v_add_f32;
117 case fmul32: return aco_opcode::v_mul_f32;
118 case imax32: return aco_opcode::v_max_i32;
119 case imin32: return aco_opcode::v_min_i32;
120 case umin32: return aco_opcode::v_min_u32;
121 case umax32: return aco_opcode::v_max_u32;
122 case fmin32: return aco_opcode::v_min_f32;
123 case fmax32: return aco_opcode::v_max_f32;
124 case iand8:
125 case iand16:
126 case iand32: return aco_opcode::v_and_b32;
127 case ixor8:
128 case ixor16:
129 case ixor32: return aco_opcode::v_xor_b32;
130 case ior8:
131 case ior16:
132 case ior32: return aco_opcode::v_or_b32;
133 case iadd64: return aco_opcode::num_opcodes;
134 case imul64: return aco_opcode::num_opcodes;
135 case fadd64: return aco_opcode::v_add_f64;
136 case fmul64: return aco_opcode::v_mul_f64;
137 case imin64: return aco_opcode::num_opcodes;
138 case imax64: return aco_opcode::num_opcodes;
139 case umin64: return aco_opcode::num_opcodes;
140 case umax64: return aco_opcode::num_opcodes;
141 case fmin64: return aco_opcode::v_min_f64;
142 case fmax64: return aco_opcode::v_max_f64;
143 case iand64: return aco_opcode::num_opcodes;
144 case ior64: return aco_opcode::num_opcodes;
145 case ixor64: return aco_opcode::num_opcodes;
146 default: return aco_opcode::num_opcodes;
147 }
148 }
149
150 bool is_vop3_reduce_opcode(aco_opcode opcode)
151 {
152 /* 64-bit reductions are VOP3. */
153 if (opcode == aco_opcode::num_opcodes)
154 return true;
155
156 return instr_info.format[(int)opcode] == Format::VOP3;
157 }
158
159 void emit_vadd32(Builder& bld, Definition def, Operand src0, Operand src1)
160 {
161 Instruction *instr = bld.vadd32(def, src0, src1, false, Operand(s2), true);
162 if (instr->definitions.size() >= 2) {
163 assert(instr->definitions[1].regClass() == bld.lm);
164 instr->definitions[1].setFixed(vcc);
165 }
166 }
167
168 void emit_int64_dpp_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
169 PhysReg vtmp_reg, ReduceOp op,
170 unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask, bool bound_ctrl,
171 Operand *identity=NULL)
172 {
173 Builder bld(ctx->program, &ctx->instructions);
174 Definition dst[] = {Definition(dst_reg, v1), Definition(PhysReg{dst_reg+1}, v1)};
175 Definition vtmp_def[] = {Definition(vtmp_reg, v1), Definition(PhysReg{vtmp_reg+1}, v1)};
176 Operand src0[] = {Operand(src0_reg, v1), Operand(PhysReg{src0_reg+1}, v1)};
177 Operand src1[] = {Operand(src1_reg, v1), Operand(PhysReg{src1_reg+1}, v1)};
178 Operand src1_64 = Operand(src1_reg, v2);
179 Operand vtmp_op[] = {Operand(vtmp_reg, v1), Operand(PhysReg{vtmp_reg+1}, v1)};
180 Operand vtmp_op64 = Operand(vtmp_reg, v2);
181 if (op == iadd64) {
182 if (ctx->program->chip_class >= GFX10) {
183 if (identity)
184 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
185 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
186 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
187 bld.vop3(aco_opcode::v_add_co_u32_e64, dst[0], bld.def(bld.lm, vcc), vtmp_op[0], src1[0]);
188 } else {
189 bld.vop2_dpp(aco_opcode::v_add_co_u32, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0],
190 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
191 }
192 bld.vop2_dpp(aco_opcode::v_addc_co_u32, dst[1], bld.def(bld.lm, vcc), src0[1], src1[1], Operand(vcc, bld.lm),
193 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
194 } else if (op == iand64) {
195 bld.vop2_dpp(aco_opcode::v_and_b32, dst[0], src0[0], src1[0],
196 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
197 bld.vop2_dpp(aco_opcode::v_and_b32, dst[1], src0[1], src1[1],
198 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
199 } else if (op == ior64) {
200 bld.vop2_dpp(aco_opcode::v_or_b32, dst[0], src0[0], src1[0],
201 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
202 bld.vop2_dpp(aco_opcode::v_or_b32, dst[1], src0[1], src1[1],
203 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
204 } else if (op == ixor64) {
205 bld.vop2_dpp(aco_opcode::v_xor_b32, dst[0], src0[0], src1[0],
206 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
207 bld.vop2_dpp(aco_opcode::v_xor_b32, dst[1], src0[1], src1[1],
208 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
209 } else if (op == umin64 || op == umax64 || op == imin64 || op == imax64) {
210 aco_opcode cmp = aco_opcode::num_opcodes;
211 switch (op) {
212 case umin64:
213 cmp = aco_opcode::v_cmp_gt_u64;
214 break;
215 case umax64:
216 cmp = aco_opcode::v_cmp_lt_u64;
217 break;
218 case imin64:
219 cmp = aco_opcode::v_cmp_gt_i64;
220 break;
221 case imax64:
222 cmp = aco_opcode::v_cmp_lt_i64;
223 break;
224 default:
225 break;
226 }
227
228 if (identity) {
229 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
230 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[1], identity[1]);
231 }
232 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
233 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
234 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[1], src0[1],
235 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
236
237 bld.vopc(cmp, bld.def(bld.lm, vcc), vtmp_op64, src1_64);
238 bld.vop2(aco_opcode::v_cndmask_b32, dst[0], vtmp_op[0], src1[0], Operand(vcc, bld.lm));
239 bld.vop2(aco_opcode::v_cndmask_b32, dst[1], vtmp_op[1], src1[1], Operand(vcc, bld.lm));
240 } else if (op == imul64) {
241 /* t4 = dpp(x_hi)
242 * t1 = umul_lo(t4, y_lo)
243 * t3 = dpp(x_lo)
244 * t0 = umul_lo(t3, y_hi)
245 * t2 = iadd(t0, t1)
246 * t5 = umul_hi(t3, y_lo)
247 * res_hi = iadd(t2, t5)
248 * res_lo = umul_lo(t3, y_lo)
249 * Requires that res_hi != src0[0] and res_hi != src1[0]
250 * and that vtmp[0] != res_hi.
251 */
252 if (identity)
253 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[1]);
254 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[1],
255 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
256 bld.vop3(aco_opcode::v_mul_lo_u32, vtmp_def[1], vtmp_op[0], src1[0]);
257 if (identity)
258 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
259 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
260 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
261 bld.vop3(aco_opcode::v_mul_lo_u32, vtmp_def[0], vtmp_op[0], src1[1]);
262 emit_vadd32(bld, vtmp_def[1], vtmp_op[0], vtmp_op[1]);
263 if (identity)
264 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
265 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
266 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
267 bld.vop3(aco_opcode::v_mul_hi_u32, vtmp_def[0], vtmp_op[0], src1[0]);
268 emit_vadd32(bld, dst[1], vtmp_op[1], vtmp_op[0]);
269 if (identity)
270 bld.vop1(aco_opcode::v_mov_b32, vtmp_def[0], identity[0]);
271 bld.vop1_dpp(aco_opcode::v_mov_b32, vtmp_def[0], src0[0],
272 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
273 bld.vop3(aco_opcode::v_mul_lo_u32, dst[0], vtmp_op[0], src1[0]);
274 }
275 }
276
277 void emit_int64_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg, PhysReg vtmp, ReduceOp op)
278 {
279 Builder bld(ctx->program, &ctx->instructions);
280 Definition dst[] = {Definition(dst_reg, v1), Definition(PhysReg{dst_reg+1}, v1)};
281 RegClass src0_rc = src0_reg.reg() >= 256 ? v1 : s1;
282 Operand src0[] = {Operand(src0_reg, src0_rc), Operand(PhysReg{src0_reg+1}, src0_rc)};
283 Operand src1[] = {Operand(src1_reg, v1), Operand(PhysReg{src1_reg+1}, v1)};
284 Operand src0_64 = Operand(src0_reg, src0_reg.reg() >= 256 ? v2 : s2);
285 Operand src1_64 = Operand(src1_reg, v2);
286
287 if (src0_rc == s1 &&
288 (op == imul64 || op == umin64 || op == umax64 || op == imin64 || op == imax64)) {
289 assert(vtmp.reg() != 0);
290 bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), src0[0]);
291 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]);
292 src0_reg = vtmp;
293 src0[0] = Operand(vtmp, v1);
294 src0[1] = Operand(PhysReg{vtmp+1}, v1);
295 src0_64 = Operand(vtmp, v2);
296 } else if (src0_rc == s1 && op == iadd64) {
297 assert(vtmp.reg() != 0);
298 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), src0[1]);
299 src0[1] = Operand(PhysReg{vtmp+1}, v1);
300 }
301
302 if (op == iadd64) {
303 if (ctx->program->chip_class >= GFX10) {
304 bld.vop3(aco_opcode::v_add_co_u32_e64, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0]);
305 } else {
306 bld.vop2(aco_opcode::v_add_co_u32, dst[0], bld.def(bld.lm, vcc), src0[0], src1[0]);
307 }
308 bld.vop2(aco_opcode::v_addc_co_u32, dst[1], bld.def(bld.lm, vcc), src0[1], src1[1], Operand(vcc, bld.lm));
309 } else if (op == iand64) {
310 bld.vop2(aco_opcode::v_and_b32, dst[0], src0[0], src1[0]);
311 bld.vop2(aco_opcode::v_and_b32, dst[1], src0[1], src1[1]);
312 } else if (op == ior64) {
313 bld.vop2(aco_opcode::v_or_b32, dst[0], src0[0], src1[0]);
314 bld.vop2(aco_opcode::v_or_b32, dst[1], src0[1], src1[1]);
315 } else if (op == ixor64) {
316 bld.vop2(aco_opcode::v_xor_b32, dst[0], src0[0], src1[0]);
317 bld.vop2(aco_opcode::v_xor_b32, dst[1], src0[1], src1[1]);
318 } else if (op == umin64 || op == umax64 || op == imin64 || op == imax64) {
319 aco_opcode cmp = aco_opcode::num_opcodes;
320 switch (op) {
321 case umin64:
322 cmp = aco_opcode::v_cmp_gt_u64;
323 break;
324 case umax64:
325 cmp = aco_opcode::v_cmp_lt_u64;
326 break;
327 case imin64:
328 cmp = aco_opcode::v_cmp_gt_i64;
329 break;
330 case imax64:
331 cmp = aco_opcode::v_cmp_lt_i64;
332 break;
333 default:
334 break;
335 }
336
337 bld.vopc(cmp, bld.def(bld.lm, vcc), src0_64, src1_64);
338 bld.vop2(aco_opcode::v_cndmask_b32, dst[0], src0[0], src1[0], Operand(vcc, bld.lm));
339 bld.vop2(aco_opcode::v_cndmask_b32, dst[1], src0[1], src1[1], Operand(vcc, bld.lm));
340 } else if (op == imul64) {
341 if (src1_reg == dst_reg) {
342 /* it's fine if src0==dst but not if src1==dst */
343 std::swap(src0_reg, src1_reg);
344 std::swap(src0[0], src1[0]);
345 std::swap(src0[1], src1[1]);
346 std::swap(src0_64, src1_64);
347 }
348 assert(!(src0_reg == src1_reg));
349 /* t1 = umul_lo(x_hi, y_lo)
350 * t0 = umul_lo(x_lo, y_hi)
351 * t2 = iadd(t0, t1)
352 * t5 = umul_hi(x_lo, y_lo)
353 * res_hi = iadd(t2, t5)
354 * res_lo = umul_lo(x_lo, y_lo)
355 * assumes that it's ok to modify x_hi/y_hi, since we might not have vtmp
356 */
357 Definition tmp0_def(PhysReg{src0_reg+1}, v1);
358 Definition tmp1_def(PhysReg{src1_reg+1}, v1);
359 Operand tmp0_op = src0[1];
360 Operand tmp1_op = src1[1];
361 bld.vop3(aco_opcode::v_mul_lo_u32, tmp0_def, src0[1], src1[0]);
362 bld.vop3(aco_opcode::v_mul_lo_u32, tmp1_def, src0[0], src1[1]);
363 emit_vadd32(bld, tmp0_def, tmp1_op, tmp0_op);
364 bld.vop3(aco_opcode::v_mul_hi_u32, tmp1_def, src0[0], src1[0]);
365 emit_vadd32(bld, dst[1], tmp0_op, tmp1_op);
366 bld.vop3(aco_opcode::v_mul_lo_u32, dst[0], src0[0], src1[0]);
367 }
368 }
369
370 void emit_dpp_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
371 PhysReg vtmp, ReduceOp op, unsigned size,
372 unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask, bool bound_ctrl,
373 Operand *identity=NULL) /* for VOP3 with sparse writes */
374 {
375 Builder bld(ctx->program, &ctx->instructions);
376 RegClass rc = RegClass(RegType::vgpr, size);
377 Definition dst(dst_reg, rc);
378 Operand src0(src0_reg, rc);
379 Operand src1(src1_reg, rc);
380
381 aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op);
382 bool vop3 = is_vop3_reduce_opcode(opcode);
383
384 if (!vop3) {
385 if (opcode == aco_opcode::v_add_co_u32)
386 bld.vop2_dpp(opcode, dst, bld.def(bld.lm, vcc), src0, src1, dpp_ctrl, row_mask, bank_mask, bound_ctrl);
387 else
388 bld.vop2_dpp(opcode, dst, src0, src1, dpp_ctrl, row_mask, bank_mask, bound_ctrl);
389 return;
390 }
391
392 if (opcode == aco_opcode::num_opcodes) {
393 emit_int64_dpp_op(ctx, dst_reg ,src0_reg, src1_reg, vtmp, op,
394 dpp_ctrl, row_mask, bank_mask, bound_ctrl, identity);
395 return;
396 }
397
398 if (identity)
399 bld.vop1(aco_opcode::v_mov_b32, Definition(vtmp, v1), identity[0]);
400 if (identity && size >= 2)
401 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+1}, v1), identity[1]);
402
403 for (unsigned i = 0; i < size; i++)
404 bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{src0_reg+i}, v1),
405 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
406
407 bld.vop3(opcode, dst, Operand(vtmp, rc), src1);
408 }
409
410 void emit_op(lower_context *ctx, PhysReg dst_reg, PhysReg src0_reg, PhysReg src1_reg,
411 PhysReg vtmp, ReduceOp op, unsigned size)
412 {
413 Builder bld(ctx->program, &ctx->instructions);
414 RegClass rc = RegClass(RegType::vgpr, size);
415 Definition dst(dst_reg, rc);
416 Operand src0(src0_reg, RegClass(src0_reg.reg() >= 256 ? RegType::vgpr : RegType::sgpr, size));
417 Operand src1(src1_reg, rc);
418
419 aco_opcode opcode = get_reduce_opcode(ctx->program->chip_class, op);
420 bool vop3 = is_vop3_reduce_opcode(opcode);
421
422 if (opcode == aco_opcode::num_opcodes) {
423 emit_int64_op(ctx, dst_reg, src0_reg, src1_reg, vtmp, op);
424 return;
425 }
426
427 if (vop3) {
428 bld.vop3(opcode, dst, src0, src1);
429 } else if (opcode == aco_opcode::v_add_co_u32) {
430 bld.vop2(opcode, dst, bld.def(bld.lm, vcc), src0, src1);
431 } else {
432 bld.vop2(opcode, dst, src0, src1);
433 }
434 }
435
436 void emit_dpp_mov(lower_context *ctx, PhysReg dst, PhysReg src0, unsigned size,
437 unsigned dpp_ctrl, unsigned row_mask, unsigned bank_mask, bool bound_ctrl)
438 {
439 Builder bld(ctx->program, &ctx->instructions);
440 for (unsigned i = 0; i < size; i++) {
441 bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(PhysReg{dst+i}, v1), Operand(PhysReg{src0+i}, v1),
442 dpp_ctrl, row_mask, bank_mask, bound_ctrl);
443 }
444 }
445
446 uint32_t get_reduction_identity(ReduceOp op, unsigned idx)
447 {
448 switch (op) {
449 case iadd8:
450 case iadd16:
451 case iadd32:
452 case iadd64:
453 case fadd16:
454 case fadd32:
455 case fadd64:
456 case ior8:
457 case ior16:
458 case ior32:
459 case ior64:
460 case ixor8:
461 case ixor16:
462 case ixor32:
463 case ixor64:
464 case umax8:
465 case umax16:
466 case umax32:
467 case umax64:
468 return 0;
469 case imul8:
470 case imul16:
471 case imul32:
472 case imul64:
473 return idx ? 0 : 1;
474 case fmul16:
475 return 0x3c00u; /* 1.0 */
476 case fmul32:
477 return 0x3f800000u; /* 1.0 */
478 case fmul64:
479 return idx ? 0x3ff00000u : 0u; /* 1.0 */
480 case imin8:
481 return INT8_MAX;
482 case imin16:
483 return INT16_MAX;
484 case imin32:
485 return INT32_MAX;
486 case imin64:
487 return idx ? 0x7fffffffu : 0xffffffffu;
488 case imax8:
489 return INT8_MIN;
490 case imax16:
491 return INT16_MIN;
492 case imax32:
493 return INT32_MIN;
494 case imax64:
495 return idx ? 0x80000000u : 0;
496 case umin8:
497 case umin16:
498 case iand8:
499 case iand16:
500 return 0xffffffffu;
501 case umin32:
502 case umin64:
503 case iand32:
504 case iand64:
505 return 0xffffffffu;
506 case fmin16:
507 return 0x7c00u; /* infinity */
508 case fmin32:
509 return 0x7f800000u; /* infinity */
510 case fmin64:
511 return idx ? 0x7ff00000u : 0u; /* infinity */
512 case fmax16:
513 return 0xfc00u; /* negative infinity */
514 case fmax32:
515 return 0xff800000u; /* negative infinity */
516 case fmax64:
517 return idx ? 0xfff00000u : 0u; /* negative infinity */
518 default:
519 unreachable("Invalid reduction operation");
520 break;
521 }
522 return 0;
523 }
524
525 void emit_ds_swizzle(Builder bld, PhysReg dst, PhysReg src, unsigned size, unsigned ds_pattern)
526 {
527 for (unsigned i = 0; i < size; i++) {
528 bld.ds(aco_opcode::ds_swizzle_b32, Definition(PhysReg{dst+i}, v1),
529 Operand(PhysReg{src+i}, v1), ds_pattern);
530 }
531 }
532
533 void emit_reduction(lower_context *ctx, aco_opcode op, ReduceOp reduce_op, unsigned cluster_size, PhysReg tmp,
534 PhysReg stmp, PhysReg vtmp, PhysReg sitmp, Operand src, Definition dst)
535 {
536 assert(cluster_size == ctx->program->wave_size || op == aco_opcode::p_reduce);
537 assert(cluster_size <= ctx->program->wave_size);
538
539 Builder bld(ctx->program, &ctx->instructions);
540
541 Operand identity[2];
542 identity[0] = Operand(get_reduction_identity(reduce_op, 0));
543 identity[1] = Operand(get_reduction_identity(reduce_op, 1));
544 Operand vcndmask_identity[2] = {identity[0], identity[1]};
545
546 /* First, copy the source to tmp and set inactive lanes to the identity */
547 bld.sop1(Builder::s_or_saveexec, Definition(stmp, bld.lm), Definition(scc, s1), Definition(exec, bld.lm), Operand(UINT64_MAX), Operand(exec, bld.lm));
548
549 for (unsigned i = 0; i < src.size(); i++) {
550 /* p_exclusive_scan needs it to be a sgpr or inline constant for the v_writelane_b32
551 * except on GFX10, where v_writelane_b32 can take a literal. */
552 if (identity[i].isLiteral() && op == aco_opcode::p_exclusive_scan && ctx->program->chip_class < GFX10) {
553 bld.sop1(aco_opcode::s_mov_b32, Definition(PhysReg{sitmp+i}, s1), identity[i]);
554 identity[i] = Operand(PhysReg{sitmp+i}, s1);
555
556 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp+i}, v1), identity[i]);
557 vcndmask_identity[i] = Operand(PhysReg{tmp+i}, v1);
558 } else if (identity[i].isLiteral()) {
559 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{tmp+i}, v1), identity[i]);
560 vcndmask_identity[i] = Operand(PhysReg{tmp+i}, v1);
561 }
562 }
563
564 for (unsigned i = 0; i < src.size(); i++) {
565 bld.vop2_e64(aco_opcode::v_cndmask_b32, Definition(PhysReg{tmp + i}, v1),
566 vcndmask_identity[i], Operand(PhysReg{src.physReg() + i}, v1),
567 Operand(stmp, bld.lm));
568 }
569
570 if (src.regClass() == v1b) {
571 if (ctx->program->chip_class >= GFX8) {
572 aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
573 sdwa->operands[0] = Operand(PhysReg{tmp}, v1);
574 sdwa->definitions[0] = Definition(PhysReg{tmp}, v1);
575 if (reduce_op == imin8 || reduce_op == imax8)
576 sdwa->sel[0] = sdwa_sbyte;
577 else
578 sdwa->sel[0] = sdwa_ubyte;
579 sdwa->dst_sel = sdwa_udword;
580 bld.insert(std::move(sdwa));
581 } else {
582 aco_opcode opcode;
583
584 if (reduce_op == imin8 || reduce_op == imax8)
585 opcode = aco_opcode::v_bfe_i32;
586 else
587 opcode = aco_opcode::v_bfe_u32;
588
589 bld.vop3(opcode, Definition(PhysReg{tmp}, v1),
590 Operand(PhysReg{tmp}, v1), Operand(0u), Operand(8u));
591 }
592 } else if (src.regClass() == v2b) {
593 if (ctx->program->chip_class >= GFX10 &&
594 (reduce_op == iadd16 || reduce_op == imax16 ||
595 reduce_op == imin16 || reduce_op == umin16 || reduce_op == umax16)) {
596 aco_ptr<SDWA_instruction> sdwa{create_instruction<SDWA_instruction>(aco_opcode::v_mov_b32, asSDWA(Format::VOP1), 1, 1)};
597 sdwa->operands[0] = Operand(PhysReg{tmp}, v1);
598 sdwa->definitions[0] = Definition(PhysReg{tmp}, v1);
599 if (reduce_op == imin16 || reduce_op == imax16 || reduce_op == iadd16)
600 sdwa->sel[0] = sdwa_sword;
601 else
602 sdwa->sel[0] = sdwa_uword;
603 sdwa->dst_sel = sdwa_udword;
604 bld.insert(std::move(sdwa));
605 } else if (ctx->program->chip_class == GFX6 || ctx->program->chip_class == GFX7) {
606 aco_opcode opcode;
607
608 if (reduce_op == imin16 || reduce_op == imax16 || reduce_op == iadd16)
609 opcode = aco_opcode::v_bfe_i32;
610 else
611 opcode = aco_opcode::v_bfe_u32;
612
613 bld.vop3(opcode, Definition(PhysReg{tmp}, v1),
614 Operand(PhysReg{tmp}, v1), Operand(0u), Operand(16u));
615 }
616 }
617
618 bool reduction_needs_last_op = false;
619 switch (op) {
620 case aco_opcode::p_reduce:
621 if (cluster_size == 1) break;
622
623 if (ctx->program->chip_class <= GFX7) {
624 reduction_needs_last_op = true;
625 emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(1, 0, 3, 2));
626 if (cluster_size == 2) break;
627 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
628 emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(2, 3, 0, 1));
629 if (cluster_size == 4) break;
630 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
631 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x04));
632 if (cluster_size == 8) break;
633 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
634 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x08));
635 if (cluster_size == 16) break;
636 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
637 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
638 if (cluster_size == 32) break;
639 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
640 for (unsigned i = 0; i < src.size(); i++)
641 bld.readlane(Definition(PhysReg{dst.physReg() + i}, s1), Operand(PhysReg{tmp + i}, v1), Operand(0u));
642 // TODO: it would be more effective to do the last reduction step on SALU
643 emit_op(ctx, tmp, dst.physReg(), tmp, vtmp, reduce_op, src.size());
644 reduction_needs_last_op = false;
645 break;
646 }
647
648 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_quad_perm(1, 0, 3, 2), 0xf, 0xf, false);
649 if (cluster_size == 2) break;
650 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_quad_perm(2, 3, 0, 1), 0xf, 0xf, false);
651 if (cluster_size == 4) break;
652 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_half_mirror, 0xf, 0xf, false);
653 if (cluster_size == 8) break;
654 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_mirror, 0xf, 0xf, false);
655 if (cluster_size == 16) break;
656
657 if (ctx->program->chip_class >= GFX10) {
658 /* GFX10+ doesn't support row_bcast15 and row_bcast31 */
659 for (unsigned i = 0; i < src.size(); i++)
660 bld.vop3(aco_opcode::v_permlanex16_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1), Operand(0u), Operand(0u));
661
662 if (cluster_size == 32) {
663 reduction_needs_last_op = true;
664 break;
665 }
666
667 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
668 for (unsigned i = 0; i < src.size(); i++)
669 bld.readlane(Definition(PhysReg{dst.physReg() + i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(0u));
670 // TODO: it would be more effective to do the last reduction step on SALU
671 emit_op(ctx, tmp, dst.physReg(), tmp, vtmp, reduce_op, src.size());
672 break;
673 }
674
675 if (cluster_size == 32) {
676 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1f, 0, 0x10));
677 reduction_needs_last_op = true;
678 break;
679 }
680 assert(cluster_size == 64);
681 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_bcast15, 0xa, 0xf, false);
682 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(), dpp_row_bcast31, 0xc, 0xf, false);
683 break;
684 case aco_opcode::p_exclusive_scan:
685 if (ctx->program->chip_class >= GFX10) { /* gfx10 doesn't support wf_sr1, so emulate it */
686 /* shift rows right */
687 emit_dpp_mov(ctx, vtmp, tmp, src.size(), dpp_row_sr(1), 0xf, 0xf, true);
688
689 /* fill in the gaps in rows 1 and 3 */
690 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0x10000u));
691 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(0x10000u));
692 for (unsigned i = 0; i < src.size(); i++) {
693 Instruction *perm = bld.vop3(aco_opcode::v_permlanex16_b32,
694 Definition(PhysReg{vtmp+i}, v1),
695 Operand(PhysReg{tmp+i}, v1),
696 Operand(0xffffffffu), Operand(0xffffffffu)).instr;
697 static_cast<VOP3A_instruction*>(perm)->opsel = 1; /* FI (Fetch Inactive) */
698 }
699 bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(UINT64_MAX));
700
701 if (ctx->program->wave_size == 64) {
702 /* fill in the gap in row 2 */
703 for (unsigned i = 0; i < src.size(); i++) {
704 bld.readlane(Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
705 bld.writelane(Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{sitmp+i}, s1), Operand(32u), Operand(PhysReg{vtmp+i}, v1));
706 }
707 }
708 std::swap(tmp, vtmp);
709 } else if (ctx->program->chip_class >= GFX8) {
710 emit_dpp_mov(ctx, tmp, tmp, src.size(), dpp_wf_sr1, 0xf, 0xf, true);
711 } else {
712 // TODO: use LDS on CS with a single write and shifted read
713 /* wavefront shift_right by 1 on SI/CI */
714 emit_ds_swizzle(bld, vtmp, tmp, src.size(), (1 << 15) | dpp_quad_perm(0, 0, 1, 2));
715 emit_ds_swizzle(bld, tmp, tmp, src.size(), ds_pattern_bitmode(0x1F, 0x00, 0x07)); /* mirror(8) */
716 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0x10101010u));
717 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
718 for (unsigned i = 0; i < src.size(); i++)
719 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1));
720
721 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
722 emit_ds_swizzle(bld, tmp, tmp, src.size(), ds_pattern_bitmode(0x1F, 0x00, 0x08)); /* swap(8) */
723 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0x01000100u));
724 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
725 for (unsigned i = 0; i < src.size(); i++)
726 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1));
727
728 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
729 emit_ds_swizzle(bld, tmp, tmp, src.size(), ds_pattern_bitmode(0x1F, 0x00, 0x10)); /* swap(16) */
730 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand(1u), Operand(16u));
731 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand(1u), Operand(16u));
732 for (unsigned i = 0; i < src.size(); i++)
733 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{tmp+i}, v1));
734
735 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
736 for (unsigned i = 0; i < src.size(); i++) {
737 bld.writelane(Definition(PhysReg{vtmp+i}, v1), identity[i], Operand(0u), Operand(PhysReg{vtmp+i}, v1));
738 bld.readlane(Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(0u));
739 bld.writelane(Definition(PhysReg{vtmp+i}, v1), Operand(PhysReg{sitmp+i}, s1), Operand(32u), Operand(PhysReg{vtmp+i}, v1));
740 identity[i] = Operand(0u); /* prevent further uses of identity */
741 }
742 std::swap(tmp, vtmp);
743 }
744
745 for (unsigned i = 0; i < src.size(); i++) {
746 if (!identity[i].isConstant() || identity[i].constantValue()) { /* bound_ctrl should take care of this overwise */
747 if (ctx->program->chip_class < GFX10)
748 assert((identity[i].isConstant() && !identity[i].isLiteral()) || identity[i].physReg() == PhysReg{sitmp+i});
749 bld.writelane(Definition(PhysReg{tmp+i}, v1), identity[i], Operand(0u), Operand(PhysReg{tmp+i}, v1));
750 }
751 }
752 /* fall through */
753 case aco_opcode::p_inclusive_scan:
754 assert(cluster_size == ctx->program->wave_size);
755 if (ctx->program->chip_class <= GFX7) {
756 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1e, 0x00, 0x00));
757 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0xAAAAAAAAu));
758 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
759 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
760
761 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
762 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x1c, 0x01, 0x00));
763 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0xCCCCCCCCu));
764 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
765 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
766
767 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
768 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x18, 0x03, 0x00));
769 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0xF0F0F0F0u));
770 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
771 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
772
773 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
774 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x10, 0x07, 0x00));
775 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_lo, s1), Operand(0xFF00FF00u));
776 bld.sop1(aco_opcode::s_mov_b32, Definition(exec_hi, s1), Operand(exec_lo, s1));
777 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
778
779 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(UINT64_MAX));
780 emit_ds_swizzle(bld, vtmp, tmp, src.size(), ds_pattern_bitmode(0x00, 0x0f, 0x00));
781 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand(16u), Operand(16u));
782 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand(16u), Operand(16u));
783 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
784
785 for (unsigned i = 0; i < src.size(); i++)
786 bld.readlane(Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
787 bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand(32u), Operand(32u));
788 emit_op(ctx, tmp, sitmp, tmp, vtmp, reduce_op, src.size());
789 break;
790 }
791
792 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
793 dpp_row_sr(1), 0xf, 0xf, false, identity);
794 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
795 dpp_row_sr(2), 0xf, 0xf, false, identity);
796 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
797 dpp_row_sr(4), 0xf, 0xf, false, identity);
798 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
799 dpp_row_sr(8), 0xf, 0xf, false, identity);
800 if (ctx->program->chip_class >= GFX10) {
801 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_lo, s1), Operand(16u), Operand(16u));
802 bld.sop2(aco_opcode::s_bfm_b32, Definition(exec_hi, s1), Operand(16u), Operand(16u));
803 for (unsigned i = 0; i < src.size(); i++) {
804 Instruction *perm = bld.vop3(aco_opcode::v_permlanex16_b32,
805 Definition(PhysReg{vtmp+i}, v1),
806 Operand(PhysReg{tmp+i}, v1),
807 Operand(0xffffffffu), Operand(0xffffffffu)).instr;
808 static_cast<VOP3A_instruction*>(perm)->opsel = 1; /* FI (Fetch Inactive) */
809 }
810 emit_op(ctx, tmp, tmp, vtmp, PhysReg{0}, reduce_op, src.size());
811
812 if (ctx->program->wave_size == 64) {
813 bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand(32u), Operand(32u));
814 for (unsigned i = 0; i < src.size(); i++)
815 bld.readlane(Definition(PhysReg{sitmp+i}, s1), Operand(PhysReg{tmp+i}, v1), Operand(31u));
816 emit_op(ctx, tmp, sitmp, tmp, vtmp, reduce_op, src.size());
817 }
818 } else {
819 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
820 dpp_row_bcast15, 0xa, 0xf, false, identity);
821 emit_dpp_op(ctx, tmp, tmp, tmp, vtmp, reduce_op, src.size(),
822 dpp_row_bcast31, 0xc, 0xf, false, identity);
823 }
824 break;
825 default:
826 unreachable("Invalid reduction mode");
827 }
828
829
830 if (op == aco_opcode::p_reduce) {
831 if (reduction_needs_last_op && dst.regClass().type() == RegType::vgpr) {
832 bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(stmp, bld.lm));
833 emit_op(ctx, dst.physReg(), tmp, vtmp, PhysReg{0}, reduce_op, src.size());
834 return;
835 }
836
837 if (reduction_needs_last_op)
838 emit_op(ctx, tmp, vtmp, tmp, PhysReg{0}, reduce_op, src.size());
839 }
840
841 /* restore exec */
842 bld.sop1(Builder::s_mov, Definition(exec, bld.lm), Operand(stmp, bld.lm));
843
844 if (dst.regClass().type() == RegType::sgpr) {
845 for (unsigned k = 0; k < src.size(); k++) {
846 bld.readlane(Definition(PhysReg{dst.physReg() + k}, s1),
847 Operand(PhysReg{tmp + k}, v1), Operand(ctx->program->wave_size - 1));
848 }
849 } else if (dst.physReg() != tmp) {
850 for (unsigned k = 0; k < src.size(); k++) {
851 bld.vop1(aco_opcode::v_mov_b32, Definition(PhysReg{dst.physReg() + k}, v1),
852 Operand(PhysReg{tmp + k}, v1));
853 }
854 }
855 }
856
857 void emit_gfx10_wave64_bpermute(Program *program, aco_ptr<Instruction> &instr, Builder &bld)
858 {
859 /* Emulates proper bpermute on GFX10 in wave64 mode.
860 *
861 * This is necessary because on GFX10 the bpermute instruction only works
862 * on half waves (you can think of it as having a cluster size of 32), so we
863 * manually swap the data between the two halves using two shared VGPRs.
864 */
865
866 assert(program->chip_class >= GFX10);
867 assert(program->info->wave_size == 64);
868
869 unsigned shared_vgpr_reg_0 = align(program->config->num_vgprs, 4) + 256;
870 Definition dst = instr->definitions[0];
871 Definition tmp_exec = instr->definitions[1];
872 Definition clobber_scc = instr->definitions[2];
873 Operand index_x4 = instr->operands[0];
874 Operand input_data = instr->operands[1];
875 Operand same_half = instr->operands[2];
876
877 assert(dst.regClass() == v1);
878 assert(tmp_exec.regClass() == bld.lm);
879 assert(clobber_scc.isFixed() && clobber_scc.physReg() == scc);
880 assert(same_half.regClass() == bld.lm);
881 assert(index_x4.regClass() == v1);
882 assert(input_data.regClass().type() == RegType::vgpr);
883 assert(input_data.bytes() <= 4);
884 assert(dst.physReg() != index_x4.physReg());
885 assert(dst.physReg() != input_data.physReg());
886 assert(tmp_exec.physReg() != same_half.physReg());
887
888 PhysReg shared_vgpr_lo(shared_vgpr_reg_0);
889 PhysReg shared_vgpr_hi(shared_vgpr_reg_0 + 1);
890
891 /* Permute the input within the same half-wave */
892 bld.ds(aco_opcode::ds_bpermute_b32, dst, index_x4, input_data);
893
894 /* HI: Copy data from high lanes 32-63 to shared vgpr */
895 bld.vop1_dpp(aco_opcode::v_mov_b32, Definition(shared_vgpr_hi, v1), input_data, dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
896 /* Save EXEC */
897 bld.sop1(aco_opcode::s_mov_b64, tmp_exec, Operand(exec, s2));
898 /* Set EXEC to enable LO lanes only */
899 bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand(32u), Operand(0u));
900 /* LO: Copy data from low lanes 0-31 to shared vgpr */
901 bld.vop1(aco_opcode::v_mov_b32, Definition(shared_vgpr_lo, v1), input_data);
902 /* LO: bpermute shared vgpr (high lanes' data) */
903 bld.ds(aco_opcode::ds_bpermute_b32, Definition(shared_vgpr_hi, v1), index_x4, Operand(shared_vgpr_hi, v1));
904 /* Set EXEC to enable HI lanes only */
905 bld.sop2(aco_opcode::s_bfm_b64, Definition(exec, s2), Operand(32u), Operand(32u));
906 /* HI: bpermute shared vgpr (low lanes' data) */
907 bld.ds(aco_opcode::ds_bpermute_b32, Definition(shared_vgpr_lo, v1), index_x4, Operand(shared_vgpr_lo, v1));
908
909 /* Only enable lanes which use the other half's data */
910 bld.sop2(aco_opcode::s_andn2_b64, Definition(exec, s2), clobber_scc, Operand(tmp_exec.physReg(), s2), same_half);
911 /* LO: Copy shared vgpr (high lanes' bpermuted data) to output vgpr */
912 bld.vop1_dpp(aco_opcode::v_mov_b32, dst, Operand(shared_vgpr_hi, v1), dpp_quad_perm(0, 1, 2, 3), 0x3, 0xf, false);
913 /* HI: Copy shared vgpr (low lanes' bpermuted data) to output vgpr */
914 bld.vop1_dpp(aco_opcode::v_mov_b32, dst, Operand(shared_vgpr_lo, v1), dpp_quad_perm(0, 1, 2, 3), 0xc, 0xf, false);
915
916 /* Restore saved EXEC */
917 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(tmp_exec.physReg(), s2));
918
919 /* RA assumes that the result is always in the low part of the register, so we have to shift, if it's not there already */
920 if (input_data.physReg().byte()) {
921 unsigned right_shift = input_data.physReg().byte() * 8;
922 bld.vop2(aco_opcode::v_lshrrev_b32, dst, Operand(right_shift), Operand(dst.physReg(), v1));
923 }
924 }
925
926 void emit_gfx6_bpermute(Program *program, aco_ptr<Instruction> &instr, Builder &bld)
927 {
928 /* Emulates bpermute using readlane instructions */
929
930 Operand index = instr->operands[0];
931 Operand input = instr->operands[1];
932 Definition dst = instr->definitions[0];
933 Definition temp_exec = instr->definitions[1];
934 Definition clobber_vcc = instr->definitions[2];
935
936 assert(dst.regClass() == v1);
937 assert(temp_exec.regClass() == bld.lm);
938 assert(clobber_vcc.regClass() == bld.lm);
939 assert(clobber_vcc.physReg() == vcc);
940 assert(index.regClass() == v1);
941 assert(index.physReg() != dst.physReg());
942 assert(input.regClass().type() == RegType::vgpr);
943 assert(input.bytes() <= 4);
944 assert(input.physReg() != dst.physReg());
945
946 /* Save original EXEC */
947 bld.sop1(aco_opcode::s_mov_b64, temp_exec, Operand(exec, s2));
948
949 /* An "unrolled loop" that is executed per each lane.
950 * This takes only a few instructions per lane, as opposed to a "real" loop
951 * with branching, where the branch instruction alone would take 16+ cycles.
952 */
953 for (unsigned n = 0; n < program->wave_size; ++n) {
954 /* Activate the lane which has N for its source index */
955 bld.vopc(aco_opcode::v_cmpx_eq_u32, Definition(exec, bld.lm), clobber_vcc, Operand(n), index);
956 /* Read the data from lane N */
957 bld.readlane(Definition(vcc, s1), input, Operand(n));
958 /* On the active lane, move the data we read from lane N to the destination VGPR */
959 bld.vop1(aco_opcode::v_mov_b32, dst, Operand(vcc, s1));
960 /* Restore original EXEC */
961 bld.sop1(aco_opcode::s_mov_b64, Definition(exec, s2), Operand(temp_exec.physReg(), s2));
962 }
963 }
964
965 struct copy_operation {
966 Operand op;
967 Definition def;
968 unsigned bytes;
969 union {
970 uint8_t uses[8];
971 uint64_t is_used = 0;
972 };
973 };
974
975 void split_copy(unsigned offset, Definition *def, Operand *op, const copy_operation& src, bool ignore_uses, unsigned max_size)
976 {
977 PhysReg def_reg = src.def.physReg();
978 PhysReg op_reg = src.op.physReg();
979 def_reg.reg_b += offset;
980 op_reg.reg_b += offset;
981
982 max_size = MIN2(max_size, src.def.regClass().type() == RegType::vgpr ? 4 : 8);
983
984 /* make sure the size is a power of two and reg % bytes == 0 */
985 unsigned bytes = 1;
986 for (; bytes <= max_size; bytes *= 2) {
987 unsigned next = bytes * 2u;
988 bool can_increase = def_reg.reg_b % next == 0 &&
989 offset + next <= src.bytes && next <= max_size;
990 if (!src.op.isConstant() && can_increase)
991 can_increase = op_reg.reg_b % next == 0;
992 for (unsigned i = 0; !ignore_uses && can_increase && (i < bytes); i++)
993 can_increase = (src.uses[offset + bytes + i] == 0) == (src.uses[offset] == 0);
994 if (!can_increase)
995 break;
996 }
997
998 RegClass def_cls = bytes % 4 == 0 ? RegClass(src.def.regClass().type(), bytes / 4u) :
999 RegClass(src.def.regClass().type(), bytes).as_subdword();
1000 *def = Definition(src.def.tempId(), def_reg, def_cls);
1001 if (src.op.isConstant()) {
1002 assert(offset == 0 || (offset == 4 && src.op.bytes() == 8));
1003 if (src.op.bytes() == 8 && bytes == 4)
1004 *op = Operand(uint32_t(src.op.constantValue64() >> (offset * 8u)));
1005 else
1006 *op = src.op;
1007 } else {
1008 RegClass op_cls = bytes % 4 == 0 ? RegClass(src.op.regClass().type(), bytes / 4u) :
1009 RegClass(src.op.regClass().type(), bytes).as_subdword();
1010 *op = Operand(op_reg, op_cls);
1011 op->setTemp(Temp(src.op.tempId(), op_cls));
1012 }
1013 }
1014
1015 uint32_t get_intersection_mask(int a_start, int a_size,
1016 int b_start, int b_size)
1017 {
1018 int intersection_start = MAX2(b_start - a_start, 0);
1019 int intersection_end = MAX2(b_start + b_size - a_start, 0);
1020 if (intersection_start >= a_size || intersection_end == 0)
1021 return 0;
1022
1023 uint32_t mask = u_bit_consecutive(0, a_size);
1024 return u_bit_consecutive(intersection_start, intersection_end - intersection_start) & mask;
1025 }
1026
1027 bool do_copy(lower_context* ctx, Builder& bld, const copy_operation& copy, bool *preserve_scc, PhysReg scratch_sgpr)
1028 {
1029 bool did_copy = false;
1030 for (unsigned offset = 0; offset < copy.bytes;) {
1031 if (copy.uses[offset]) {
1032 offset++;
1033 continue;
1034 }
1035
1036 Definition def;
1037 Operand op;
1038 split_copy(offset, &def, &op, copy, false, 8);
1039
1040 if (def.physReg() == scc) {
1041 bld.sopc(aco_opcode::s_cmp_lg_i32, def, op, Operand(0u));
1042 *preserve_scc = true;
1043 } else if (def.bytes() == 8 && def.getTemp().type() == RegType::sgpr) {
1044 bld.sop1(aco_opcode::s_mov_b64, def, Operand(op.physReg(), s2));
1045 } else if (def.regClass().is_subdword() && ctx->program->chip_class < GFX8) {
1046 if (op.physReg().byte()) {
1047 assert(def.physReg().byte() == 0);
1048 bld.vop2(aco_opcode::v_lshrrev_b32, def, Operand(op.physReg().byte() * 8), op);
1049 } else if (def.physReg().byte() == 2) {
1050 assert(op.physReg().byte() == 0);
1051 /* preserve the target's lower half */
1052 def = Definition(def.physReg().advance(-2), v1);
1053 bld.vop2(aco_opcode::v_and_b32, Definition(op.physReg(), v1), Operand(0xFFFFu), op);
1054 if (def.physReg().reg() != op.physReg().reg())
1055 bld.vop2(aco_opcode::v_and_b32, def, Operand(0xFFFFu), Operand(def.physReg(), v2b));
1056 bld.vop2(aco_opcode::v_cvt_pk_u16_u32, def, Operand(def.physReg(), v2b), op);
1057 } else if (def.physReg().byte()) {
1058 unsigned bits = def.physReg().byte() * 8;
1059 assert(op.physReg().byte() == 0);
1060 def = Definition(def.physReg().advance(-def.physReg().byte()), v1);
1061 bld.vop2(aco_opcode::v_and_b32, def, Operand((1 << bits) - 1u), Operand(def.physReg(), op.regClass()));
1062 if (def.physReg().reg() == op.physReg().reg()) {
1063 if (bits < 24) {
1064 bld.vop2(aco_opcode::v_mul_u32_u24, def, Operand((1 << bits) + 1u), op);
1065 } else {
1066 bld.sop1(aco_opcode::s_mov_b32, Definition(scratch_sgpr, s1), Operand((1 << bits) + 1u));
1067 bld.vop3(aco_opcode::v_mul_lo_u32, def, Operand(scratch_sgpr, s1), op);
1068 }
1069 } else {
1070 bld.vop2(aco_opcode::v_lshlrev_b32, Definition(op.physReg(), def.regClass()), Operand(bits), op);
1071 bld.vop2(aco_opcode::v_or_b32, def, Operand(def.physReg(), op.regClass()), op);
1072 bld.vop2(aco_opcode::v_lshrrev_b32, Definition(op.physReg(), def.regClass()), Operand(bits), op);
1073 }
1074 } else {
1075 bld.vop1(aco_opcode::v_mov_b32, def, op);
1076 }
1077 } else {
1078 bld.copy(def, op);
1079 }
1080
1081 did_copy = true;
1082 offset += def.bytes();
1083 }
1084 return did_copy;
1085 }
1086
1087 void do_swap(lower_context *ctx, Builder& bld, const copy_operation& copy, bool preserve_scc, Pseudo_instruction *pi)
1088 {
1089 unsigned offset = 0;
1090
1091 if (copy.bytes == 3 && (copy.def.physReg().reg_b % 4 <= 1) &&
1092 (copy.def.physReg().reg_b % 4) == (copy.op.physReg().reg_b % 4)) {
1093 /* instead of doing a 2-byte and 1-byte swap, do a 4-byte swap and then fixup with a 1-byte swap */
1094 PhysReg op = copy.op.physReg();
1095 PhysReg def = copy.def.physReg();
1096 op.reg_b &= ~0x3;
1097 def.reg_b &= ~0x3;
1098
1099 copy_operation tmp;
1100 tmp.op = Operand(op, v1);
1101 tmp.def = Definition(def, v1);
1102 tmp.bytes = 4;
1103 memset(tmp.uses, 1, 4);
1104 do_swap(ctx, bld, tmp, preserve_scc, pi);
1105
1106 op.reg_b += copy.def.physReg().reg_b % 4 == 0 ? 3 : 0;
1107 def.reg_b += copy.def.physReg().reg_b % 4 == 0 ? 3 : 0;
1108 tmp.op = Operand(op, v1b);
1109 tmp.def = Definition(def, v1b);
1110 tmp.bytes = 1;
1111 tmp.uses[0] = 1;
1112 do_swap(ctx, bld, tmp, preserve_scc, pi);
1113
1114 offset = copy.bytes;
1115 }
1116
1117 for (; offset < copy.bytes;) {
1118 Definition def;
1119 Operand op;
1120 split_copy(offset, &def, &op, copy, true, 8);
1121
1122 assert(op.regClass() == def.regClass());
1123 Operand def_as_op = Operand(def.physReg(), def.regClass());
1124 Definition op_as_def = Definition(op.physReg(), op.regClass());
1125 if (ctx->program->chip_class >= GFX9 && def.regClass() == v1) {
1126 bld.vop1(aco_opcode::v_swap_b32, def, op_as_def, op, def_as_op);
1127 } else if (def.regClass() == v1 || (def.regClass().is_subdword() && ctx->program->chip_class < GFX8)) {
1128 assert(def.physReg().byte() == 0 && op.physReg().byte() == 0);
1129 bld.vop2(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1130 bld.vop2(aco_opcode::v_xor_b32, def, op, def_as_op);
1131 bld.vop2(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1132 } else if (op.physReg() == scc || def.physReg() == scc) {
1133 /* we need to swap scc and another sgpr */
1134 assert(!preserve_scc);
1135
1136 PhysReg other = op.physReg() == scc ? def.physReg() : op.physReg();
1137
1138 bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1));
1139 bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(other, s1), Operand(0u));
1140 bld.sop1(aco_opcode::s_mov_b32, Definition(other, s1), Operand(pi->scratch_sgpr, s1));
1141 } else if (def.regClass() == s1) {
1142 if (preserve_scc) {
1143 bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), op);
1144 bld.sop1(aco_opcode::s_mov_b32, op_as_def, def_as_op);
1145 bld.sop1(aco_opcode::s_mov_b32, def, Operand(pi->scratch_sgpr, s1));
1146 } else {
1147 bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), op, def_as_op);
1148 bld.sop2(aco_opcode::s_xor_b32, def, Definition(scc, s1), op, def_as_op);
1149 bld.sop2(aco_opcode::s_xor_b32, op_as_def, Definition(scc, s1), op, def_as_op);
1150 }
1151 } else if (def.regClass() == s2) {
1152 if (preserve_scc)
1153 bld.sop1(aco_opcode::s_mov_b32, Definition(pi->scratch_sgpr, s1), Operand(scc, s1));
1154 bld.sop2(aco_opcode::s_xor_b64, op_as_def, Definition(scc, s1), op, def_as_op);
1155 bld.sop2(aco_opcode::s_xor_b64, def, Definition(scc, s1), op, def_as_op);
1156 bld.sop2(aco_opcode::s_xor_b64, op_as_def, Definition(scc, s1), op, def_as_op);
1157 if (preserve_scc)
1158 bld.sopc(aco_opcode::s_cmp_lg_i32, Definition(scc, s1), Operand(pi->scratch_sgpr, s1), Operand(0u));
1159 } else if (ctx->program->chip_class >= GFX9 && def.bytes() == 2 && def.physReg().reg() == op.physReg().reg()) {
1160 aco_ptr<VOP3P_instruction> vop3p{create_instruction<VOP3P_instruction>(aco_opcode::v_pk_add_u16, Format::VOP3P, 2, 1)};
1161 vop3p->operands[0] = Operand(PhysReg{op.physReg().reg()}, v1);
1162 vop3p->operands[1] = Operand(0u);
1163 vop3p->definitions[0] = Definition(PhysReg{op.physReg().reg()}, v1);
1164 vop3p->opsel_lo = 0x1;
1165 vop3p->opsel_hi = 0x2;
1166 bld.insert(std::move(vop3p));
1167 } else {
1168 assert(def.regClass().is_subdword());
1169 bld.vop2_sdwa(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1170 bld.vop2_sdwa(aco_opcode::v_xor_b32, def, op, def_as_op);
1171 bld.vop2_sdwa(aco_opcode::v_xor_b32, op_as_def, op, def_as_op);
1172 }
1173
1174 offset += def.bytes();
1175 }
1176
1177 if (ctx->program->chip_class <= GFX7)
1178 return;
1179
1180 /* fixup in case we swapped bytes we shouldn't have */
1181 copy_operation tmp_copy = copy;
1182 tmp_copy.op.setFixed(copy.def.physReg());
1183 tmp_copy.def.setFixed(copy.op.physReg());
1184 do_copy(ctx, bld, tmp_copy, &preserve_scc, pi->scratch_sgpr);
1185 }
1186
1187 void handle_operands(std::map<PhysReg, copy_operation>& copy_map, lower_context* ctx, chip_class chip_class, Pseudo_instruction *pi)
1188 {
1189 Builder bld(ctx->program, &ctx->instructions);
1190 unsigned num_instructions_before = ctx->instructions.size();
1191 aco_ptr<Instruction> mov;
1192 std::map<PhysReg, copy_operation>::iterator it = copy_map.begin();
1193 std::map<PhysReg, copy_operation>::iterator target;
1194 bool writes_scc = false;
1195
1196 /* count the number of uses for each dst reg */
1197 while (it != copy_map.end()) {
1198
1199 if (it->second.def.physReg() == scc)
1200 writes_scc = true;
1201
1202 assert(!pi->tmp_in_scc || !(it->second.def.physReg() == pi->scratch_sgpr));
1203
1204 /* if src and dst reg are the same, remove operation */
1205 if (it->first == it->second.op.physReg()) {
1206 it = copy_map.erase(it);
1207 continue;
1208 }
1209
1210 /* split large copies */
1211 if (it->second.bytes > 8) {
1212 assert(!it->second.op.isConstant());
1213 assert(!it->second.def.regClass().is_subdword());
1214 RegClass rc = RegClass(it->second.def.regClass().type(), it->second.def.size() - 2);
1215 Definition hi_def = Definition(PhysReg{it->first + 2}, rc);
1216 rc = RegClass(it->second.op.regClass().type(), it->second.op.size() - 2);
1217 Operand hi_op = Operand(PhysReg{it->second.op.physReg() + 2}, rc);
1218 copy_operation copy = {hi_op, hi_def, it->second.bytes - 8};
1219 copy_map[hi_def.physReg()] = copy;
1220 assert(it->second.op.physReg().byte() == 0 && it->second.def.physReg().byte() == 0);
1221 it->second.op = Operand(it->second.op.physReg(), it->second.op.regClass().type() == RegType::sgpr ? s2 : v2);
1222 it->second.def = Definition(it->second.def.physReg(), it->second.def.regClass().type() == RegType::sgpr ? s2 : v2);
1223 it->second.bytes = 8;
1224 }
1225
1226 /* try to coalesce copies */
1227 if (it->second.bytes < 8 && !it->second.op.isConstant() &&
1228 it->first.reg_b % util_next_power_of_two(it->second.bytes + 1) == 0 &&
1229 it->second.op.physReg().reg_b % util_next_power_of_two(it->second.bytes + 1) == 0) {
1230 // TODO try more relaxed alignment for subdword copies
1231 PhysReg other_def_reg = it->first;
1232 other_def_reg.reg_b += it->second.bytes;
1233 PhysReg other_op_reg = it->second.op.physReg();
1234 other_op_reg.reg_b += it->second.bytes;
1235 std::map<PhysReg, copy_operation>::iterator other = copy_map.find(other_def_reg);
1236 if (other != copy_map.end() &&
1237 other->second.op.physReg() == other_op_reg &&
1238 it->second.bytes + other->second.bytes <= 8) {
1239 it->second.bytes += other->second.bytes;
1240 it->second.def = Definition(it->first, RegClass::get(it->second.def.regClass().type(), it->second.bytes));
1241 it->second.op = Operand(it->second.op.physReg(), RegClass::get(it->second.op.regClass().type(), it->second.bytes));
1242 copy_map.erase(other);
1243 }
1244 }
1245
1246 /* check if the definition reg is used by another copy operation */
1247 for (std::pair<const PhysReg, copy_operation>& copy : copy_map) {
1248 if (copy.second.op.isConstant())
1249 continue;
1250 for (uint16_t i = 0; i < it->second.bytes; i++) {
1251 /* distance might underflow */
1252 unsigned distance = it->first.reg_b + i - copy.second.op.physReg().reg_b;
1253 if (distance < copy.second.bytes)
1254 it->second.uses[i] += 1;
1255 }
1256 }
1257
1258 ++it;
1259 }
1260
1261 /* first, handle paths in the location transfer graph */
1262 bool preserve_scc = pi->tmp_in_scc && !writes_scc;
1263 bool skip_partial_copies = true;
1264 it = copy_map.begin();
1265 while (true) {
1266 if (copy_map.empty()) {
1267 ctx->program->statistics[statistic_copies] += ctx->instructions.size() - num_instructions_before;
1268 return;
1269 }
1270 if (it == copy_map.end()) {
1271 if (!skip_partial_copies)
1272 break;
1273 skip_partial_copies = false;
1274 it = copy_map.begin();
1275 }
1276
1277 /* on GFX6/7, we need some small workarounds as there is no
1278 * SDWA instruction to do partial register writes */
1279 if (ctx->program->chip_class < GFX8 && it->second.bytes < 4) {
1280 if (it->first.byte() == 0 && it->second.op.physReg().byte() == 0 &&
1281 !it->second.is_used && pi->opcode == aco_opcode::p_split_vector) {
1282 /* Other operations might overwrite the high bits, so change all users
1283 * of the high bits to the new target where they are still available.
1284 * This mechanism depends on also emitting dead definitions. */
1285 PhysReg reg_hi = it->second.op.physReg().advance(it->second.bytes);
1286 while (reg_hi != PhysReg(it->second.op.physReg().reg() + 1)) {
1287 std::map<PhysReg, copy_operation>::iterator other = copy_map.begin();
1288 for (other = copy_map.begin(); other != copy_map.end(); other++) {
1289 /* on GFX6/7, if the high bits are used as operand, they cannot be a target */
1290 if (other->second.op.physReg() == reg_hi) {
1291 other->second.op.setFixed(it->first.advance(reg_hi.byte()));
1292 break; /* break because an operand can only be used once */
1293 }
1294 }
1295 reg_hi = reg_hi.advance(it->second.bytes);
1296 }
1297 } else if (it->first.byte()) {
1298 assert(pi->opcode == aco_opcode::p_create_vector);
1299 /* on GFX6/7, if we target an upper half where the lower half hasn't yet been handled,
1300 * move to the target operand's high bits. This is save to do as it cannot be an operand */
1301 PhysReg lo = PhysReg(it->first.reg());
1302 std::map<PhysReg, copy_operation>::iterator other = copy_map.find(lo);
1303 if (other != copy_map.end()) {
1304 assert(other->second.bytes == it->first.byte());
1305 PhysReg new_reg_hi = other->second.op.physReg().advance(it->first.byte());
1306 it->second.def = Definition(new_reg_hi, it->second.def.regClass());
1307 it->second.is_used = 0;
1308 other->second.bytes += it->second.bytes;
1309 other->second.def.setTemp(Temp(other->second.def.tempId(), RegClass::get(RegType::vgpr, other->second.bytes)));
1310 other->second.op.setTemp(Temp(other->second.op.tempId(), RegClass::get(RegType::vgpr, other->second.bytes)));
1311 /* if the new target's high bits are also a target, change uses */
1312 std::map<PhysReg, copy_operation>::iterator target = copy_map.find(new_reg_hi);
1313 if (target != copy_map.end()) {
1314 for (unsigned i = 0; i < it->second.bytes; i++)
1315 target->second.uses[i]++;
1316 }
1317 }
1318 }
1319 }
1320
1321 /* find portions where the target reg is not used as operand for any other copy */
1322 if (it->second.is_used) {
1323 if (it->second.op.isConstant() || skip_partial_copies) {
1324 /* we have to skip constants until is_used=0.
1325 * we also skip partial copies at the beginning to help coalescing */
1326 ++it;
1327 continue;
1328 }
1329
1330 unsigned has_zero_use_bytes = 0;
1331 for (unsigned i = 0; i < it->second.bytes; i++)
1332 has_zero_use_bytes |= (it->second.uses[i] == 0) << i;
1333
1334 if (has_zero_use_bytes) {
1335 /* Skipping partial copying and doing a v_swap_b32 and then fixup
1336 * copies is usually beneficial for sub-dword copies, but if doing
1337 * a partial copy allows further copies, it should be done instead. */
1338 bool partial_copy = (has_zero_use_bytes == 0xf) || (has_zero_use_bytes == 0xf0);
1339 for (std::pair<const PhysReg, copy_operation>& copy : copy_map) {
1340 if (partial_copy)
1341 break;
1342 for (uint16_t i = 0; i < copy.second.bytes; i++) {
1343 /* distance might underflow */
1344 unsigned distance = copy.first.reg_b + i - it->second.op.physReg().reg_b;
1345 if (distance < it->second.bytes && copy.second.uses[i] == 1 &&
1346 !it->second.uses[distance])
1347 partial_copy = true;
1348 }
1349 }
1350
1351 if (!partial_copy) {
1352 ++it;
1353 continue;
1354 }
1355 } else {
1356 /* full target reg is used: register swapping needed */
1357 ++it;
1358 continue;
1359 }
1360 }
1361
1362 bool did_copy = do_copy(ctx, bld, it->second, &preserve_scc, pi->scratch_sgpr);
1363 skip_partial_copies = did_copy;
1364 std::pair<PhysReg, copy_operation> copy = *it;
1365
1366 if (it->second.is_used == 0) {
1367 /* the target reg is not used as operand for any other copy, so we
1368 * copied to all of it */
1369 copy_map.erase(it);
1370 it = copy_map.begin();
1371 } else {
1372 /* we only performed some portions of this copy, so split it to only
1373 * leave the portions that still need to be done */
1374 copy_operation original = it->second; /* the map insertion below can overwrite this */
1375 copy_map.erase(it);
1376 for (unsigned offset = 0; offset < original.bytes;) {
1377 if (original.uses[offset] == 0) {
1378 offset++;
1379 continue;
1380 }
1381 Definition def;
1382 Operand op;
1383 split_copy(offset, &def, &op, original, false, 8);
1384
1385 copy_operation copy = {op, def, def.bytes()};
1386 for (unsigned i = 0; i < copy.bytes; i++)
1387 copy.uses[i] = original.uses[i + offset];
1388 copy_map[def.physReg()] = copy;
1389
1390 offset += def.bytes();
1391 }
1392
1393 it = copy_map.begin();
1394 }
1395
1396 /* Reduce the number of uses of the operand reg by one. Do this after
1397 * splitting the copy or removing it in case the copy writes to it's own
1398 * operand (for example, v[7:8] = v[8:9]) */
1399 if (did_copy && !copy.second.op.isConstant()) {
1400 for (std::pair<const PhysReg, copy_operation>& other : copy_map) {
1401 for (uint16_t i = 0; i < other.second.bytes; i++) {
1402 /* distance might underflow */
1403 unsigned distance = other.first.reg_b + i - copy.second.op.physReg().reg_b;
1404 if (distance < copy.second.bytes && !copy.second.uses[distance])
1405 other.second.uses[i] -= 1;
1406 }
1407 }
1408 }
1409 }
1410
1411 /* all target regs are needed as operand somewhere which means, all entries are part of a cycle */
1412 unsigned largest = 0;
1413 for (const std::pair<const PhysReg, copy_operation>& op : copy_map)
1414 largest = MAX2(largest, op.second.bytes);
1415
1416 while (!copy_map.empty()) {
1417
1418 /* Perform larger swaps first, because larger swaps swaps can make other
1419 * swaps unnecessary. */
1420 auto it = copy_map.begin();
1421 for (auto it2 = copy_map.begin(); it2 != copy_map.end(); ++it2) {
1422 if (it2->second.bytes > it->second.bytes) {
1423 it = it2;
1424 if (it->second.bytes == largest)
1425 break;
1426 }
1427 }
1428
1429 /* should already be done */
1430 assert(!it->second.op.isConstant());
1431
1432 assert(it->second.op.isFixed());
1433 assert(it->second.def.regClass() == it->second.op.regClass());
1434
1435 if (it->first == it->second.op.physReg()) {
1436 copy_map.erase(it);
1437 continue;
1438 }
1439
1440 if (preserve_scc && it->second.def.getTemp().type() == RegType::sgpr)
1441 assert(!(it->second.def.physReg() == pi->scratch_sgpr));
1442
1443 /* to resolve the cycle, we have to swap the src reg with the dst reg */
1444 copy_operation swap = it->second;
1445
1446 /* if this is self-intersecting, we have to split it because
1447 * self-intersecting swaps don't make sense */
1448 PhysReg lower = swap.def.physReg();
1449 PhysReg higher = swap.op.physReg();
1450 if (lower.reg_b > higher.reg_b)
1451 std::swap(lower, higher);
1452 if (higher.reg_b - lower.reg_b < (int)swap.bytes) {
1453 unsigned offset = higher.reg_b - lower.reg_b;
1454 RegType type = swap.def.regClass().type();
1455
1456 copy_operation middle;
1457 lower.reg_b += offset;
1458 higher.reg_b += offset;
1459 middle.bytes = swap.bytes - offset * 2;
1460 memcpy(middle.uses, swap.uses + offset, middle.bytes);
1461 middle.op = Operand(lower, RegClass::get(type, middle.bytes));
1462 middle.def = Definition(higher, RegClass::get(type, middle.bytes));
1463 copy_map[higher] = middle;
1464
1465 copy_operation end;
1466 lower.reg_b += middle.bytes;
1467 higher.reg_b += middle.bytes;
1468 end.bytes = swap.bytes - (offset + middle.bytes);
1469 memcpy(end.uses, swap.uses + offset + middle.bytes, end.bytes);
1470 end.op = Operand(lower, RegClass::get(type, end.bytes));
1471 end.def = Definition(higher, RegClass::get(type, end.bytes));
1472 copy_map[higher] = end;
1473
1474 memset(swap.uses + offset, 0, swap.bytes - offset);
1475 swap.bytes = offset;
1476 }
1477
1478 do_swap(ctx, bld, swap, preserve_scc, pi);
1479
1480 /* remove from map */
1481 copy_map.erase(it);
1482
1483 /* change the operand reg of the target's uses and split uses if needed */
1484 target = copy_map.begin();
1485 uint32_t bytes_left = u_bit_consecutive(0, swap.bytes);
1486 for (; target != copy_map.end(); ++target) {
1487 if (target->second.op.physReg() == swap.def.physReg() && swap.bytes == target->second.bytes) {
1488 target->second.op.setFixed(swap.op.physReg());
1489 break;
1490 }
1491
1492 uint32_t imask = get_intersection_mask(swap.def.physReg().reg_b, swap.bytes,
1493 target->second.op.physReg().reg_b, target->second.bytes);
1494
1495 if (!imask)
1496 continue;
1497
1498 assert(target->second.bytes < swap.bytes);
1499
1500 int offset = (int)target->second.op.physReg().reg_b - (int)swap.def.physReg().reg_b;
1501
1502 /* split and update the middle (the portion that reads the swap's
1503 * definition) to read the swap's operand instead */
1504 int target_op_end = target->second.op.physReg().reg_b + target->second.bytes;
1505 int swap_def_end = swap.def.physReg().reg_b + swap.bytes;
1506 int before_bytes = MAX2(-offset, 0);
1507 int after_bytes = MAX2(target_op_end - swap_def_end, 0);
1508 int middle_bytes = target->second.bytes - before_bytes - after_bytes;
1509
1510 if (after_bytes) {
1511 unsigned after_offset = before_bytes + middle_bytes;
1512 assert(after_offset > 0);
1513 copy_operation copy;
1514 copy.bytes = after_bytes;
1515 memcpy(copy.uses, target->second.uses + after_offset, copy.bytes);
1516 RegClass rc = RegClass::get(target->second.op.regClass().type(), after_bytes);
1517 copy.op = Operand(target->second.op.physReg().advance(after_offset), rc);
1518 copy.def = Definition(target->second.def.physReg().advance(after_offset), rc);
1519 copy_map[copy.def.physReg()] = copy;
1520 }
1521
1522 if (middle_bytes) {
1523 copy_operation copy;
1524 copy.bytes = middle_bytes;
1525 memcpy(copy.uses, target->second.uses + before_bytes, copy.bytes);
1526 RegClass rc = RegClass::get(target->second.op.regClass().type(), middle_bytes);
1527 copy.op = Operand(swap.op.physReg().advance(MAX2(offset, 0)), rc);
1528 copy.def = Definition(target->second.def.physReg().advance(before_bytes), rc);
1529 copy_map[copy.def.physReg()] = copy;
1530 }
1531
1532 if (before_bytes) {
1533 copy_operation copy;
1534 target->second.bytes = before_bytes;
1535 RegClass rc = RegClass::get(target->second.op.regClass().type(), before_bytes);
1536 target->second.op = Operand(target->second.op.physReg(), rc);
1537 target->second.def = Definition(target->second.def.physReg(), rc);
1538 memset(target->second.uses + target->second.bytes, 0, 8 - target->second.bytes);
1539 }
1540
1541 /* break early since we know each byte of the swap's definition is used
1542 * at most once */
1543 bytes_left &= ~imask;
1544 if (!bytes_left)
1545 break;
1546 }
1547 }
1548 ctx->program->statistics[statistic_copies] += ctx->instructions.size() - num_instructions_before;
1549 }
1550
1551 void lower_to_hw_instr(Program* program)
1552 {
1553 Block *discard_block = NULL;
1554
1555 for (size_t i = 0; i < program->blocks.size(); i++)
1556 {
1557 Block *block = &program->blocks[i];
1558 lower_context ctx;
1559 ctx.program = program;
1560 Builder bld(program, &ctx.instructions);
1561
1562 bool set_mode = i == 0 && block->fp_mode.val != program->config->float_mode;
1563 for (unsigned pred : block->linear_preds) {
1564 if (program->blocks[pred].fp_mode.val != block->fp_mode.val) {
1565 set_mode = true;
1566 break;
1567 }
1568 }
1569 if (set_mode) {
1570 /* only allow changing modes at top-level blocks so this doesn't break
1571 * the "jump over empty blocks" optimization */
1572 assert(block->kind & block_kind_top_level);
1573 uint32_t mode = block->fp_mode.val;
1574 /* "((size - 1) << 11) | register" (MODE is encoded as register 1) */
1575 bld.sopk(aco_opcode::s_setreg_imm32_b32, Operand(mode), (7 << 11) | 1);
1576 }
1577
1578 for (size_t j = 0; j < block->instructions.size(); j++) {
1579 aco_ptr<Instruction>& instr = block->instructions[j];
1580 aco_ptr<Instruction> mov;
1581 if (instr->format == Format::PSEUDO) {
1582 Pseudo_instruction *pi = (Pseudo_instruction*)instr.get();
1583
1584 switch (instr->opcode)
1585 {
1586 case aco_opcode::p_extract_vector:
1587 {
1588 PhysReg reg = instr->operands[0].physReg();
1589 Definition& def = instr->definitions[0];
1590 reg.reg_b += instr->operands[1].constantValue() * def.bytes();
1591
1592 if (reg == def.physReg())
1593 break;
1594
1595 RegClass op_rc = def.regClass().is_subdword() ? def.regClass() :
1596 RegClass(instr->operands[0].getTemp().type(), def.size());
1597 std::map<PhysReg, copy_operation> copy_operations;
1598 copy_operations[def.physReg()] = {Operand(reg, op_rc), def, def.bytes()};
1599 handle_operands(copy_operations, &ctx, program->chip_class, pi);
1600 break;
1601 }
1602 case aco_opcode::p_create_vector:
1603 {
1604 std::map<PhysReg, copy_operation> copy_operations;
1605 PhysReg reg = instr->definitions[0].physReg();
1606
1607 for (const Operand& op : instr->operands) {
1608 if (op.isConstant()) {
1609 const Definition def = Definition(reg, RegClass(instr->definitions[0].getTemp().type(), op.size()));
1610 copy_operations[reg] = {op, def, op.bytes()};
1611 reg.reg_b += op.bytes();
1612 continue;
1613 }
1614 if (op.isUndefined()) {
1615 // TODO: coalesce subdword copies if dst byte is 0
1616 reg.reg_b += op.bytes();
1617 continue;
1618 }
1619
1620 RegClass rc_def = op.regClass().is_subdword() ? op.regClass() :
1621 RegClass(instr->definitions[0].getTemp().type(), op.size());
1622 const Definition def = Definition(reg, rc_def);
1623 copy_operations[def.physReg()] = {op, def, op.bytes()};
1624 reg.reg_b += op.bytes();
1625 }
1626 handle_operands(copy_operations, &ctx, program->chip_class, pi);
1627 break;
1628 }
1629 case aco_opcode::p_split_vector:
1630 {
1631 std::map<PhysReg, copy_operation> copy_operations;
1632 PhysReg reg = instr->operands[0].physReg();
1633
1634 for (const Definition& def : instr->definitions) {
1635 RegClass rc_op = def.regClass().is_subdword() ? def.regClass() :
1636 RegClass(instr->operands[0].getTemp().type(), def.size());
1637 const Operand op = Operand(reg, rc_op);
1638 copy_operations[def.physReg()] = {op, def, def.bytes()};
1639 reg.reg_b += def.bytes();
1640 }
1641 handle_operands(copy_operations, &ctx, program->chip_class, pi);
1642 break;
1643 }
1644 case aco_opcode::p_parallelcopy:
1645 case aco_opcode::p_wqm:
1646 {
1647 std::map<PhysReg, copy_operation> copy_operations;
1648 for (unsigned i = 0; i < instr->operands.size(); i++) {
1649 assert(instr->definitions[i].bytes() == instr->operands[i].bytes());
1650 copy_operations[instr->definitions[i].physReg()] = {instr->operands[i], instr->definitions[i], instr->operands[i].bytes()};
1651 }
1652 handle_operands(copy_operations, &ctx, program->chip_class, pi);
1653 break;
1654 }
1655 case aco_opcode::p_exit_early_if:
1656 {
1657 /* don't bother with an early exit near the end of the program */
1658 if ((block->instructions.size() - 1 - j) <= 4 &&
1659 block->instructions.back()->opcode == aco_opcode::s_endpgm) {
1660 unsigned null_exp_dest = (ctx.program->stage & hw_fs) ? 9 /* NULL */ : V_008DFC_SQ_EXP_POS;
1661 bool ignore_early_exit = true;
1662
1663 for (unsigned k = j + 1; k < block->instructions.size(); ++k) {
1664 const aco_ptr<Instruction> &instr = block->instructions[k];
1665 if (instr->opcode == aco_opcode::s_endpgm ||
1666 instr->opcode == aco_opcode::p_logical_end)
1667 continue;
1668 else if (instr->opcode == aco_opcode::exp &&
1669 static_cast<Export_instruction *>(instr.get())->dest == null_exp_dest)
1670 continue;
1671 else if (instr->opcode == aco_opcode::p_parallelcopy &&
1672 instr->definitions[0].isFixed() &&
1673 instr->definitions[0].physReg() == exec)
1674 continue;
1675
1676 ignore_early_exit = false;
1677 }
1678
1679 if (ignore_early_exit)
1680 break;
1681 }
1682
1683 if (!discard_block) {
1684 discard_block = program->create_and_insert_block();
1685 block = &program->blocks[i];
1686
1687 bld.reset(discard_block);
1688 bld.exp(aco_opcode::exp, Operand(v1), Operand(v1), Operand(v1), Operand(v1),
1689 0, V_008DFC_SQ_EXP_NULL, false, true, true);
1690 if (program->wb_smem_l1_on_end)
1691 bld.smem(aco_opcode::s_dcache_wb);
1692 bld.sopp(aco_opcode::s_endpgm);
1693
1694 bld.reset(&ctx.instructions);
1695 }
1696
1697 //TODO: exec can be zero here with block_kind_discard
1698
1699 assert(instr->operands[0].physReg() == scc);
1700 bld.sopp(aco_opcode::s_cbranch_scc0, instr->operands[0], discard_block->index);
1701
1702 discard_block->linear_preds.push_back(block->index);
1703 block->linear_succs.push_back(discard_block->index);
1704 break;
1705 }
1706 case aco_opcode::p_spill:
1707 {
1708 assert(instr->operands[0].regClass() == v1.as_linear());
1709 for (unsigned i = 0; i < instr->operands[2].size(); i++)
1710 bld.writelane(bld.def(v1, instr->operands[0].physReg()),
1711 Operand(PhysReg{instr->operands[2].physReg() + i}, s1),
1712 Operand(instr->operands[1].constantValue() + i),
1713 instr->operands[0]);
1714 break;
1715 }
1716 case aco_opcode::p_reload:
1717 {
1718 assert(instr->operands[0].regClass() == v1.as_linear());
1719 for (unsigned i = 0; i < instr->definitions[0].size(); i++)
1720 bld.readlane(bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
1721 instr->operands[0],
1722 Operand(instr->operands[1].constantValue() + i));
1723 break;
1724 }
1725 case aco_opcode::p_as_uniform:
1726 {
1727 if (instr->operands[0].isConstant() || instr->operands[0].regClass().type() == RegType::sgpr) {
1728 std::map<PhysReg, copy_operation> copy_operations;
1729 copy_operations[instr->definitions[0].physReg()] = {instr->operands[0], instr->definitions[0], instr->definitions[0].bytes()};
1730 handle_operands(copy_operations, &ctx, program->chip_class, pi);
1731 } else {
1732 assert(instr->operands[0].regClass().type() == RegType::vgpr);
1733 assert(instr->definitions[0].regClass().type() == RegType::sgpr);
1734 assert(instr->operands[0].size() == instr->definitions[0].size());
1735 for (unsigned i = 0; i < instr->definitions[0].size(); i++) {
1736 bld.vop1(aco_opcode::v_readfirstlane_b32,
1737 bld.def(s1, PhysReg{instr->definitions[0].physReg() + i}),
1738 Operand(PhysReg{instr->operands[0].physReg() + i}, v1));
1739 }
1740 }
1741 break;
1742 }
1743 case aco_opcode::p_bpermute:
1744 {
1745 if (ctx.program->chip_class <= GFX7)
1746 emit_gfx6_bpermute(program, instr, bld);
1747 else if (ctx.program->chip_class == GFX10 && ctx.program->wave_size == 64)
1748 emit_gfx10_wave64_bpermute(program, instr, bld);
1749 else
1750 unreachable("Current hardware supports ds_bpermute, don't emit p_bpermute.");
1751 }
1752 default:
1753 break;
1754 }
1755 } else if (instr->format == Format::PSEUDO_BRANCH) {
1756 Pseudo_branch_instruction* branch = static_cast<Pseudo_branch_instruction*>(instr.get());
1757 /* check if all blocks from current to target are empty */
1758 bool can_remove = block->index < branch->target[0];
1759 for (unsigned i = block->index + 1; can_remove && i < branch->target[0]; i++) {
1760 if (program->blocks[i].instructions.size())
1761 can_remove = false;
1762 }
1763 if (can_remove)
1764 continue;
1765
1766 switch (instr->opcode) {
1767 case aco_opcode::p_branch:
1768 assert(block->linear_succs[0] == branch->target[0]);
1769 bld.sopp(aco_opcode::s_branch, branch->target[0]);
1770 break;
1771 case aco_opcode::p_cbranch_nz:
1772 assert(block->linear_succs[1] == branch->target[0]);
1773 if (branch->operands[0].physReg() == exec)
1774 bld.sopp(aco_opcode::s_cbranch_execnz, branch->target[0]);
1775 else if (branch->operands[0].physReg() == vcc)
1776 bld.sopp(aco_opcode::s_cbranch_vccnz, branch->target[0]);
1777 else {
1778 assert(branch->operands[0].physReg() == scc);
1779 bld.sopp(aco_opcode::s_cbranch_scc1, branch->target[0]);
1780 }
1781 break;
1782 case aco_opcode::p_cbranch_z:
1783 assert(block->linear_succs[1] == branch->target[0]);
1784 if (branch->operands[0].physReg() == exec)
1785 bld.sopp(aco_opcode::s_cbranch_execz, branch->target[0]);
1786 else if (branch->operands[0].physReg() == vcc)
1787 bld.sopp(aco_opcode::s_cbranch_vccz, branch->target[0]);
1788 else {
1789 assert(branch->operands[0].physReg() == scc);
1790 bld.sopp(aco_opcode::s_cbranch_scc0, branch->target[0]);
1791 }
1792 break;
1793 default:
1794 unreachable("Unknown Pseudo branch instruction!");
1795 }
1796
1797 } else if (instr->format == Format::PSEUDO_REDUCTION) {
1798 Pseudo_reduction_instruction* reduce = static_cast<Pseudo_reduction_instruction*>(instr.get());
1799 emit_reduction(&ctx, reduce->opcode, reduce->reduce_op, reduce->cluster_size,
1800 reduce->operands[1].physReg(), // tmp
1801 reduce->definitions[1].physReg(), // stmp
1802 reduce->operands[2].physReg(), // vtmp
1803 reduce->definitions[2].physReg(), // sitmp
1804 reduce->operands[0], reduce->definitions[0]);
1805 } else {
1806 ctx.instructions.emplace_back(std::move(instr));
1807 }
1808
1809 }
1810 block->instructions.swap(ctx.instructions);
1811 }
1812 }
1813
1814 }