2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "util/ralloc.h"
25 #include "util/register_allocate.h"
26 #include "vc4_context.h"
30 #define QPU_R(file, index) { QPU_MUX_##file, index }
32 static const struct qpu_reg vc4_regs
[] = {
104 #define AB_INDEX (ACC_INDEX + 5)
107 vc4_alloc_reg_set(struct vc4_context
*vc4
)
109 assert(vc4_regs
[AB_INDEX
].addr
== 0);
110 assert(vc4_regs
[AB_INDEX
+ 1].addr
== 0);
111 STATIC_ASSERT(ARRAY_SIZE(vc4_regs
) == AB_INDEX
+ 64);
116 vc4
->regs
= ra_alloc_reg_set(vc4
, ARRAY_SIZE(vc4_regs
));
118 vc4
->reg_class_any
= ra_alloc_reg_class(vc4
->regs
);
119 for (uint32_t i
= 0; i
< ARRAY_SIZE(vc4_regs
); i
++) {
120 /* Reserve rb31 for spilling fixup_raddr_conflict() in
123 if (vc4_regs
[i
].mux
== QPU_MUX_B
&& vc4_regs
[i
].addr
== 31)
126 /* R4 can't be written as a general purpose register. (it's
127 * TMU_NOSWAP as a write address).
129 if (vc4_regs
[i
].mux
== QPU_MUX_R4
)
132 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_any
, i
);
135 vc4
->reg_class_a
= ra_alloc_reg_class(vc4
->regs
);
136 for (uint32_t i
= AB_INDEX
; i
< AB_INDEX
+ 64; i
+= 2)
137 ra_class_add_reg(vc4
->regs
, vc4
->reg_class_a
, i
);
139 ra_set_finalize(vc4
->regs
, NULL
);
142 struct node_to_temp_map
{
148 node_to_temp_priority(const void *in_a
, const void *in_b
)
150 const struct node_to_temp_map
*a
= in_a
;
151 const struct node_to_temp_map
*b
= in_b
;
153 return a
->priority
- b
->priority
;
157 * Returns a mapping from QFILE_TEMP indices to struct qpu_regs.
159 * The return value should be freed by the caller.
162 vc4_register_allocate(struct vc4_context
*vc4
, struct vc4_compile
*c
)
164 struct simple_node
*node
;
165 struct node_to_temp_map map
[c
->num_temps
];
166 uint32_t temp_to_node
[c
->num_temps
];
167 uint32_t def
[c
->num_temps
];
168 uint32_t use
[c
->num_temps
];
169 struct qpu_reg
*temp_registers
= calloc(c
->num_temps
,
170 sizeof(*temp_registers
));
171 memset(def
, 0, sizeof(def
));
172 memset(use
, 0, sizeof(use
));
174 /* If things aren't ever written (undefined values), just read from
177 for (uint32_t i
= 0; i
< c
->num_temps
; i
++)
178 temp_registers
[i
] = qpu_rn(0);
180 vc4_alloc_reg_set(vc4
);
182 struct ra_graph
*g
= ra_alloc_interference_graph(vc4
->regs
,
185 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
186 ra_set_node_class(g
, i
, vc4
->reg_class_any
);
189 /* Compute the live ranges so we can figure out interference.
192 foreach(node
, &c
->instructions
) {
193 struct qinst
*inst
= (struct qinst
*)node
;
195 if (inst
->dst
.file
== QFILE_TEMP
) {
196 def
[inst
->dst
.index
] = ip
;
197 use
[inst
->dst
.index
] = ip
;
200 for (int i
= 0; i
< qir_get_op_nsrc(inst
->op
); i
++) {
201 if (inst
->src
[i
].file
== QFILE_TEMP
)
202 use
[inst
->src
[i
].index
] = ip
;
208 /* The payload registers have values implicitly loaded
209 * at the start of the program.
211 def
[inst
->dst
.index
] = 0;
220 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
222 map
[i
].priority
= use
[i
] - def
[i
];
224 qsort(map
, c
->num_temps
, sizeof(map
[0]), node_to_temp_priority
);
225 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
226 temp_to_node
[map
[i
].temp
] = i
;
229 /* Figure out our register classes and preallocated registers*/
230 foreach(node
, &c
->instructions
) {
231 struct qinst
*inst
= (struct qinst
*)node
;
235 ra_set_node_reg(g
, temp_to_node
[inst
->dst
.index
],
236 AB_INDEX
+ QPU_R_FRAG_PAYLOAD_ZW
* 2 + 1);
240 ra_set_node_reg(g
, temp_to_node
[inst
->dst
.index
],
241 AB_INDEX
+ QPU_R_FRAG_PAYLOAD_ZW
* 2);
245 case QOP_TLB_COLOR_READ
:
246 assert(vc4_regs
[ACC_INDEX
+ 4].mux
== QPU_MUX_R4
);
247 ra_set_node_reg(g
, temp_to_node
[inst
->dst
.index
],
251 case QOP_PACK_SCALED
:
252 /* The pack flags require an A-file dst register. */
253 ra_set_node_class(g
, temp_to_node
[inst
->dst
.index
],
261 /* The unpack flags require an A-file src register. */
262 ra_set_node_class(g
, temp_to_node
[inst
->src
[0].index
],
271 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
272 for (uint32_t j
= i
+ 1; j
< c
->num_temps
; j
++) {
273 if (!(def
[i
] >= use
[j
] || def
[j
] >= use
[i
])) {
274 ra_add_node_interference(g
,
281 bool ok
= ra_allocate(g
);
284 for (uint32_t i
= 0; i
< c
->num_temps
; i
++) {
285 temp_registers
[i
] = vc4_regs
[ra_get_node_reg(g
, temp_to_node
[i
])];
287 /* If the value's never used, just write to the NOP register
288 * for clarity in debug output.
290 if (def
[i
] == use
[i
])
291 temp_registers
[i
] = qpu_ra(QPU_W_NOP
);
296 return temp_registers
;