2 * Copyright © 2018 Valve Corporation
3 * Copyright © 2018 Google
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "aco_builder.h"
30 * Insert p_linear_start instructions right before RA to correctly allocate
31 * temporaries for reductions that have to disrespect EXEC by executing in
37 void setup_reduce_temp(Program
* program
)
39 unsigned last_top_level_block_idx
= 0;
42 std::vector
<bool> hasReductions(program
->blocks
.size());
43 for (Block
& block
: program
->blocks
) {
44 for (aco_ptr
<Instruction
>& instr
: block
.instructions
) {
45 if (instr
->format
!= Format::PSEUDO_REDUCTION
)
48 maxSize
= MAX2(maxSize
, instr
->operands
[0].size());
49 hasReductions
[block
.index
] = true;
56 assert(maxSize
== 1 || maxSize
== 2);
57 Temp
reduceTmp(0, RegClass(RegType::vgpr
, maxSize
).as_linear());
58 Temp
vtmp(0, RegClass(RegType::vgpr
, maxSize
).as_linear());
60 int vtmp_inserted_at
= -1;
61 bool reduceTmp_in_loop
= false;
62 bool vtmp_in_loop
= false;
64 for (Block
& block
: program
->blocks
) {
66 /* insert p_end_linear_vgpr after the outermost loop */
67 if (reduceTmp_in_loop
&& block
.loop_nest_depth
== 0) {
68 assert(inserted_at
== (int)last_top_level_block_idx
);
70 aco_ptr
<Instruction
> end
{create_instruction
<Instruction
>(aco_opcode::p_end_linear_vgpr
, Format::PSEUDO
, vtmp_in_loop
? 2 : 1, 0)};
71 end
->operands
[0] = Operand(reduceTmp
);
73 end
->operands
[1] = Operand(vtmp
);
74 /* insert after the phis of the loop exit block */
75 std::vector
<aco_ptr
<Instruction
>>::iterator it
= block
.instructions
.begin();
76 while ((*it
)->opcode
== aco_opcode::p_linear_phi
|| (*it
)->opcode
== aco_opcode::p_phi
)
78 block
.instructions
.insert(it
, std::move(end
));
79 reduceTmp_in_loop
= false;
82 if (block
.kind
& block_kind_top_level
)
83 last_top_level_block_idx
= block
.index
;
85 if (!hasReductions
[block
.index
])
88 std::vector
<aco_ptr
<Instruction
>>::iterator it
;
89 for (it
= block
.instructions
.begin(); it
!= block
.instructions
.end(); ++it
) {
90 Instruction
*instr
= (*it
).get();
91 if (instr
->format
!= Format::PSEUDO_REDUCTION
)
94 ReduceOp op
= static_cast<Pseudo_reduction_instruction
*>(instr
)->reduce_op
;
95 reduceTmp_in_loop
|= block
.loop_nest_depth
> 0;
97 if ((int)last_top_level_block_idx
!= inserted_at
) {
98 reduceTmp
= {program
->allocateId(), reduceTmp
.regClass()};
99 aco_ptr
<Pseudo_instruction
> create
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_start_linear_vgpr
, Format::PSEUDO
, 0, 1)};
100 create
->definitions
[0] = Definition(reduceTmp
);
101 /* find the right place to insert this definition */
102 if (last_top_level_block_idx
== block
.index
) {
103 /* insert right before the current instruction */
104 it
= block
.instructions
.insert(it
, std::move(create
));
106 /* inserted_at is intentionally not updated here, so later blocks
107 * would insert at the end instead of using this one. */
109 assert(last_top_level_block_idx
< block
.index
);
110 /* insert before the branch at last top level block */
111 std::vector
<aco_ptr
<Instruction
>>& instructions
= program
->blocks
[last_top_level_block_idx
].instructions
;
112 instructions
.insert(std::next(instructions
.begin(), instructions
.size() - 1), std::move(create
));
113 inserted_at
= last_top_level_block_idx
;
117 /* same as before, except for the vector temporary instead of the reduce temporary */
118 unsigned cluster_size
= static_cast<Pseudo_reduction_instruction
*>(instr
)->cluster_size
;
119 bool need_vtmp
= op
== imul32
|| op
== fadd64
|| op
== fmul64
||
120 op
== fmin64
|| op
== fmax64
|| op
== umin64
||
121 op
== umax64
|| op
== imin64
|| op
== imax64
||
123 bool gfx10_need_vtmp
= op
== imul8
|| op
== imax8
|| op
== imin8
|| op
== umin8
||
124 op
== imul16
|| op
== imax16
|| op
== imin16
|| op
== umin16
||
127 if (program
->chip_class
>= GFX10
&& cluster_size
== 64)
129 if (program
->chip_class
>= GFX10
&& gfx10_need_vtmp
)
131 if (program
->chip_class
<= GFX7
)
134 need_vtmp
|= cluster_size
== 32;
136 vtmp_in_loop
|= need_vtmp
&& block
.loop_nest_depth
> 0;
137 if (need_vtmp
&& (int)last_top_level_block_idx
!= vtmp_inserted_at
) {
138 vtmp
= {program
->allocateId(), vtmp
.regClass()};
139 aco_ptr
<Pseudo_instruction
> create
{create_instruction
<Pseudo_instruction
>(aco_opcode::p_start_linear_vgpr
, Format::PSEUDO
, 0, 1)};
140 create
->definitions
[0] = Definition(vtmp
);
141 if (last_top_level_block_idx
== block
.index
) {
142 it
= block
.instructions
.insert(it
, std::move(create
));
145 assert(last_top_level_block_idx
< block
.index
);
146 std::vector
<aco_ptr
<Instruction
>>& instructions
= program
->blocks
[last_top_level_block_idx
].instructions
;
147 instructions
.insert(std::next(instructions
.begin(), instructions
.size() - 1), std::move(create
));
148 vtmp_inserted_at
= last_top_level_block_idx
;
152 instr
->operands
[1] = Operand(reduceTmp
);
154 instr
->operands
[2] = Operand(vtmp
);
156 /* scalar temporary */
157 Builder
bld(program
);
158 instr
->definitions
[1] = bld
.def(s2
);
160 /* scalar identity temporary */
161 bool need_sitmp
= (program
->chip_class
<= GFX7
|| program
->chip_class
>= GFX10
) && instr
->opcode
!= aco_opcode::p_reduce
;
162 if (instr
->opcode
== aco_opcode::p_exclusive_scan
) {
164 (op
== imin8
|| op
== imin16
|| op
== imin32
|| op
== imin64
||
165 op
== imax8
|| op
== imax16
|| op
== imax32
|| op
== imax64
||
166 op
== fmin16
|| op
== fmin32
|| op
== fmin64
||
167 op
== fmax16
|| op
== fmax32
|| op
== fmax64
||
168 op
== fmul16
|| op
== fmul64
);
171 instr
->definitions
[2] = bld
.def(RegClass(RegType::sgpr
, instr
->operands
[0].size()));
175 bool clobber_vcc
= false;
176 if ((op
== iadd32
|| op
== imul64
) && program
->chip_class
< GFX9
)
178 if (op
== iadd64
|| op
== umin64
|| op
== umax64
|| op
== imin64
|| op
== imax64
)
182 instr
->definitions
[4] = Definition(vcc
, bld
.lm
);