broadcom/vc5: Use a physical-reg-only register class for LDVPM.
[mesa.git] / src / broadcom / compiler / vir_register_allocate.c
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "util/ralloc.h"
25 #include "util/register_allocate.h"
26 #include "v3d_compiler.h"
27
28 #define QPU_R(i) { .magic = false, .index = i }
29
30 #define ACC_INDEX 0
31 #define ACC_COUNT 5
32 #define PHYS_INDEX (ACC_INDEX + ACC_COUNT)
33 #define PHYS_COUNT 64
34
35 bool
36 vir_init_reg_sets(struct v3d_compiler *compiler)
37 {
38 compiler->regs = ra_alloc_reg_set(compiler, PHYS_INDEX + PHYS_COUNT,
39 true);
40 if (!compiler->regs)
41 return false;
42
43 /* Allocate 3 regfile classes, for the ways the physical register file
44 * can be divided up for fragment shader threading.
45 */
46 for (int threads = 0; threads < 3; threads++) {
47 compiler->reg_class_phys_or_acc[threads] =
48 ra_alloc_reg_class(compiler->regs);
49 compiler->reg_class_phys[threads] =
50 ra_alloc_reg_class(compiler->regs);
51
52 for (int i = PHYS_INDEX;
53 i < PHYS_INDEX + (PHYS_COUNT >> threads); i++) {
54 ra_class_add_reg(compiler->regs,
55 compiler->reg_class_phys_or_acc[threads], i);
56 ra_class_add_reg(compiler->regs,
57 compiler->reg_class_phys[threads], i);
58 }
59
60 for (int i = ACC_INDEX + 0; i < ACC_INDEX + ACC_COUNT; i++) {
61 ra_class_add_reg(compiler->regs,
62 compiler->reg_class_phys_or_acc[threads], i);
63 }
64 }
65
66 ra_set_finalize(compiler->regs, NULL);
67
68 return true;
69 }
70
71 struct node_to_temp_map {
72 uint32_t temp;
73 uint32_t priority;
74 };
75
76 static int
77 node_to_temp_priority(const void *in_a, const void *in_b)
78 {
79 const struct node_to_temp_map *a = in_a;
80 const struct node_to_temp_map *b = in_b;
81
82 return a->priority - b->priority;
83 }
84
85 #define CLASS_BIT_PHYS (1 << 0)
86 #define CLASS_BIT_R0_R2 (1 << 1)
87 #define CLASS_BIT_R3 (1 << 2)
88 #define CLASS_BIT_R4 (1 << 3)
89
90 /**
91 * Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
92 *
93 * The return value should be freed by the caller.
94 */
95 struct qpu_reg *
96 v3d_register_allocate(struct v3d_compile *c)
97 {
98 struct node_to_temp_map map[c->num_temps];
99 uint32_t temp_to_node[c->num_temps];
100 uint8_t class_bits[c->num_temps];
101 struct qpu_reg *temp_registers = calloc(c->num_temps,
102 sizeof(*temp_registers));
103 int acc_nodes[ACC_COUNT];
104
105 struct ra_graph *g = ra_alloc_interference_graph(c->compiler->regs,
106 c->num_temps +
107 ARRAY_SIZE(acc_nodes));
108
109 /* Make some fixed nodes for the accumulators, which we will need to
110 * interfere with when ops have implied r3/r4 writes or for the thread
111 * switches. We could represent these as classes for the nodes to
112 * live in, but the classes take up a lot of memory to set up, so we
113 * don't want to make too many.
114 */
115 for (int i = 0; i < ARRAY_SIZE(acc_nodes); i++) {
116 acc_nodes[i] = c->num_temps + i;
117 ra_set_node_reg(g, acc_nodes[i], ACC_INDEX + i);
118 }
119
120 /* Compute the live ranges so we can figure out interference. */
121 vir_calculate_live_intervals(c);
122
123 for (uint32_t i = 0; i < c->num_temps; i++) {
124 map[i].temp = i;
125 map[i].priority = c->temp_end[i] - c->temp_start[i];
126 }
127 qsort(map, c->num_temps, sizeof(map[0]), node_to_temp_priority);
128 for (uint32_t i = 0; i < c->num_temps; i++) {
129 temp_to_node[map[i].temp] = i;
130 }
131
132 /* Figure out our register classes and preallocated registers. We
133 * start with any temp being able to be in any file, then instructions
134 * incrementally remove bits that the temp definitely can't be in.
135 */
136 memset(class_bits,
137 CLASS_BIT_PHYS | CLASS_BIT_R0_R2 | CLASS_BIT_R3 | CLASS_BIT_R4,
138 sizeof(class_bits));
139
140 int ip = 0;
141 vir_for_each_inst_inorder(inst, c) {
142 /* If the instruction writes r3/r4 (and optionally moves its
143 * result to a temp), nothing else can be stored in r3/r4 across
144 * it.
145 */
146 if (vir_writes_r3(c->devinfo, inst)) {
147 for (int i = 0; i < c->num_temps; i++) {
148 if (c->temp_start[i] < ip &&
149 c->temp_end[i] > ip) {
150 ra_add_node_interference(g,
151 temp_to_node[i],
152 acc_nodes[3]);
153 }
154 }
155 }
156 if (vir_writes_r4(c->devinfo, inst)) {
157 for (int i = 0; i < c->num_temps; i++) {
158 if (c->temp_start[i] < ip &&
159 c->temp_end[i] > ip) {
160 ra_add_node_interference(g,
161 temp_to_node[i],
162 acc_nodes[4]);
163 }
164 }
165 }
166
167 if (inst->qpu.type == V3D_QPU_INSTR_TYPE_ALU) {
168 switch (inst->qpu.alu.add.op) {
169 case V3D_QPU_A_LDVPMV_IN:
170 case V3D_QPU_A_LDVPMV_OUT:
171 case V3D_QPU_A_LDVPMD_IN:
172 case V3D_QPU_A_LDVPMD_OUT:
173 case V3D_QPU_A_LDVPMP:
174 case V3D_QPU_A_LDVPMG_IN:
175 case V3D_QPU_A_LDVPMG_OUT:
176 /* LDVPMs only store to temps (the MA flag
177 * decides whether the LDVPM is in or out)
178 */
179 assert(inst->dst.file == QFILE_TEMP);
180 class_bits[inst->dst.index] &= CLASS_BIT_PHYS;
181 break;
182
183 default:
184 break;
185 }
186 }
187
188 if (inst->src[0].file == QFILE_REG) {
189 switch (inst->src[0].index) {
190 case 0:
191 case 1:
192 case 2:
193 /* Payload setup instructions: Force allocate
194 * the dst to the given register (so the MOV
195 * will disappear).
196 */
197 assert(inst->qpu.alu.mul.op == V3D_QPU_M_MOV);
198 assert(inst->dst.file == QFILE_TEMP);
199 ra_set_node_reg(g,
200 temp_to_node[inst->dst.index],
201 PHYS_INDEX +
202 inst->src[0].index);
203 break;
204 }
205 }
206
207 #if 0
208 switch (inst->op) {
209 case QOP_THRSW:
210 /* All accumulators are invalidated across a thread
211 * switch.
212 */
213 for (int i = 0; i < c->num_temps; i++) {
214 if (c->temp_start[i] < ip && c->temp_end[i] > ip)
215 class_bits[i] &= ~(CLASS_BIT_R0_R3 |
216 CLASS_BIT_R4);
217 }
218 break;
219
220 default:
221 break;
222 }
223 #endif
224
225 ip++;
226 }
227
228 for (uint32_t i = 0; i < c->num_temps; i++) {
229 if (class_bits[i] == CLASS_BIT_PHYS) {
230 ra_set_node_class(g, temp_to_node[i],
231 c->compiler->reg_class_phys[c->fs_threaded]);
232 } else {
233 assert(class_bits[i] == (CLASS_BIT_PHYS |
234 CLASS_BIT_R0_R2 |
235 CLASS_BIT_R3 |
236 CLASS_BIT_R4));
237 ra_set_node_class(g, temp_to_node[i],
238 c->compiler->reg_class_phys_or_acc[c->fs_threaded]);
239 }
240 }
241
242 for (uint32_t i = 0; i < c->num_temps; i++) {
243 for (uint32_t j = i + 1; j < c->num_temps; j++) {
244 if (!(c->temp_start[i] >= c->temp_end[j] ||
245 c->temp_start[j] >= c->temp_end[i])) {
246 ra_add_node_interference(g,
247 temp_to_node[i],
248 temp_to_node[j]);
249 }
250 }
251 }
252
253 bool ok = ra_allocate(g);
254 if (!ok) {
255 if (!c->fs_threaded) {
256 fprintf(stderr, "Failed to register allocate:\n");
257 vir_dump(c);
258 }
259
260 c->failed = true;
261 free(temp_registers);
262 return NULL;
263 }
264
265 for (uint32_t i = 0; i < c->num_temps; i++) {
266 int ra_reg = ra_get_node_reg(g, temp_to_node[i]);
267 if (ra_reg < PHYS_INDEX) {
268 temp_registers[i].magic = true;
269 temp_registers[i].index = (V3D_QPU_WADDR_R0 +
270 ra_reg - ACC_INDEX);
271 } else {
272 temp_registers[i].magic = false;
273 temp_registers[i].index = ra_reg - PHYS_INDEX;
274 }
275
276 /* If the value's never used, just write to the NOP register
277 * for clarity in debug output.
278 */
279 if (c->temp_start[i] == c->temp_end[i]) {
280 temp_registers[i].magic = true;
281 temp_registers[i].index = V3D_QPU_WADDR_NOP;
282 }
283 }
284
285 ralloc_free(g);
286
287 return temp_registers;
288 }