vc4: Convert vc4_opt_dead_code to work in the presence of control flow.
[mesa.git] / src / gallium / drivers / vc4 / vc4_qpu.c
1 /*
2 * Copyright © 2014 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <stdbool.h>
25 #include "util/ralloc.h"
26 #include "vc4_qir.h"
27 #include "vc4_qpu.h"
28
29 #define QPU_MUX(mux, muxfield) \
30 QPU_SET_FIELD(mux != QPU_MUX_SMALL_IMM ? mux : QPU_MUX_B, muxfield)
31
32 static uint64_t
33 set_src_raddr(uint64_t inst, struct qpu_reg src)
34 {
35 if (src.mux == QPU_MUX_A) {
36 assert(QPU_GET_FIELD(inst, QPU_RADDR_A) == QPU_R_NOP ||
37 QPU_GET_FIELD(inst, QPU_RADDR_A) == src.addr);
38 return QPU_UPDATE_FIELD(inst, src.addr, QPU_RADDR_A);
39 }
40
41 if (src.mux == QPU_MUX_B) {
42 assert((QPU_GET_FIELD(inst, QPU_RADDR_B) == QPU_R_NOP ||
43 QPU_GET_FIELD(inst, QPU_RADDR_B) == src.addr) &&
44 QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_SMALL_IMM);
45 return QPU_UPDATE_FIELD(inst, src.addr, QPU_RADDR_B);
46 }
47
48 if (src.mux == QPU_MUX_SMALL_IMM) {
49 if (QPU_GET_FIELD(inst, QPU_SIG) == QPU_SIG_SMALL_IMM) {
50 assert(QPU_GET_FIELD(inst, QPU_RADDR_B) == src.addr);
51 } else {
52 inst = qpu_set_sig(inst, QPU_SIG_SMALL_IMM);
53 assert(QPU_GET_FIELD(inst, QPU_RADDR_B) == QPU_R_NOP);
54 }
55 return ((inst & ~QPU_RADDR_B_MASK) |
56 QPU_SET_FIELD(src.addr, QPU_RADDR_B));
57 }
58
59 return inst;
60 }
61
62 uint64_t
63 qpu_NOP()
64 {
65 uint64_t inst = 0;
66
67 inst |= QPU_SET_FIELD(QPU_A_NOP, QPU_OP_ADD);
68 inst |= QPU_SET_FIELD(QPU_M_NOP, QPU_OP_MUL);
69
70 /* Note: These field values are actually non-zero */
71 inst |= QPU_SET_FIELD(QPU_W_NOP, QPU_WADDR_ADD);
72 inst |= QPU_SET_FIELD(QPU_W_NOP, QPU_WADDR_MUL);
73 inst |= QPU_SET_FIELD(QPU_R_NOP, QPU_RADDR_A);
74 inst |= QPU_SET_FIELD(QPU_R_NOP, QPU_RADDR_B);
75 inst |= QPU_SET_FIELD(QPU_SIG_NONE, QPU_SIG);
76
77 return inst;
78 }
79
80 static uint64_t
81 qpu_a_dst(struct qpu_reg dst)
82 {
83 uint64_t inst = 0;
84
85 if (dst.mux <= QPU_MUX_R5) {
86 /* Translate the mux to the ACCn values. */
87 inst |= QPU_SET_FIELD(32 + dst.mux, QPU_WADDR_ADD);
88 } else {
89 inst |= QPU_SET_FIELD(dst.addr, QPU_WADDR_ADD);
90 if (dst.mux == QPU_MUX_B)
91 inst |= QPU_WS;
92 }
93
94 return inst;
95 }
96
97 static uint64_t
98 qpu_m_dst(struct qpu_reg dst)
99 {
100 uint64_t inst = 0;
101
102 if (dst.mux <= QPU_MUX_R5) {
103 /* Translate the mux to the ACCn values. */
104 inst |= QPU_SET_FIELD(32 + dst.mux, QPU_WADDR_MUL);
105 } else {
106 inst |= QPU_SET_FIELD(dst.addr, QPU_WADDR_MUL);
107 if (dst.mux == QPU_MUX_A)
108 inst |= QPU_WS;
109 }
110
111 return inst;
112 }
113
114 uint64_t
115 qpu_a_MOV(struct qpu_reg dst, struct qpu_reg src)
116 {
117 uint64_t inst = 0;
118
119 inst |= QPU_SET_FIELD(QPU_SIG_NONE, QPU_SIG);
120 inst |= QPU_SET_FIELD(QPU_A_OR, QPU_OP_ADD);
121 inst |= QPU_SET_FIELD(QPU_R_NOP, QPU_RADDR_A);
122 inst |= QPU_SET_FIELD(QPU_R_NOP, QPU_RADDR_B);
123 inst |= qpu_a_dst(dst);
124 inst |= QPU_SET_FIELD(QPU_COND_ALWAYS, QPU_COND_ADD);
125 inst |= QPU_MUX(src.mux, QPU_ADD_A);
126 inst |= QPU_MUX(src.mux, QPU_ADD_B);
127 inst = set_src_raddr(inst, src);
128 inst |= QPU_SET_FIELD(QPU_W_NOP, QPU_WADDR_MUL);
129
130 return inst;
131 }
132
133 uint64_t
134 qpu_m_MOV(struct qpu_reg dst, struct qpu_reg src)
135 {
136 uint64_t inst = 0;
137
138 inst |= QPU_SET_FIELD(QPU_SIG_NONE, QPU_SIG);
139 inst |= QPU_SET_FIELD(QPU_M_V8MIN, QPU_OP_MUL);
140 inst |= QPU_SET_FIELD(QPU_R_NOP, QPU_RADDR_A);
141 inst |= QPU_SET_FIELD(QPU_R_NOP, QPU_RADDR_B);
142 inst |= qpu_m_dst(dst);
143 inst |= QPU_SET_FIELD(QPU_COND_ALWAYS, QPU_COND_MUL);
144 inst |= QPU_MUX(src.mux, QPU_MUL_A);
145 inst |= QPU_MUX(src.mux, QPU_MUL_B);
146 inst = set_src_raddr(inst, src);
147 inst |= QPU_SET_FIELD(QPU_W_NOP, QPU_WADDR_ADD);
148
149 return inst;
150 }
151
152 uint64_t
153 qpu_load_imm_ui(struct qpu_reg dst, uint32_t val)
154 {
155 uint64_t inst = 0;
156
157 inst |= qpu_a_dst(dst);
158 inst |= QPU_SET_FIELD(QPU_W_NOP, QPU_WADDR_MUL);
159 inst |= QPU_SET_FIELD(QPU_COND_ALWAYS, QPU_COND_ADD);
160 inst |= QPU_SET_FIELD(QPU_COND_ALWAYS, QPU_COND_MUL);
161 inst |= QPU_SET_FIELD(QPU_SIG_LOAD_IMM, QPU_SIG);
162 inst |= val;
163
164 return inst;
165 }
166
167 uint64_t
168 qpu_branch(uint32_t cond, uint32_t target)
169 {
170 uint64_t inst = 0;
171
172 inst |= qpu_a_dst(qpu_ra(QPU_W_NOP));
173 inst |= qpu_m_dst(qpu_rb(QPU_W_NOP));
174 inst |= QPU_SET_FIELD(cond, QPU_BRANCH_COND);
175 inst |= QPU_SET_FIELD(QPU_SIG_BRANCH, QPU_SIG);
176 inst |= QPU_SET_FIELD(target, QPU_BRANCH_TARGET);
177
178 return inst;
179 }
180
181 uint64_t
182 qpu_a_alu2(enum qpu_op_add op,
183 struct qpu_reg dst, struct qpu_reg src0, struct qpu_reg src1)
184 {
185 uint64_t inst = 0;
186
187 inst |= QPU_SET_FIELD(QPU_SIG_NONE, QPU_SIG);
188 inst |= QPU_SET_FIELD(op, QPU_OP_ADD);
189 inst |= QPU_SET_FIELD(QPU_R_NOP, QPU_RADDR_A);
190 inst |= QPU_SET_FIELD(QPU_R_NOP, QPU_RADDR_B);
191 inst |= qpu_a_dst(dst);
192 inst |= QPU_SET_FIELD(QPU_COND_ALWAYS, QPU_COND_ADD);
193 inst |= QPU_MUX(src0.mux, QPU_ADD_A);
194 inst = set_src_raddr(inst, src0);
195 inst |= QPU_MUX(src1.mux, QPU_ADD_B);
196 inst = set_src_raddr(inst, src1);
197 inst |= QPU_SET_FIELD(QPU_W_NOP, QPU_WADDR_MUL);
198
199 return inst;
200 }
201
202 uint64_t
203 qpu_m_alu2(enum qpu_op_mul op,
204 struct qpu_reg dst, struct qpu_reg src0, struct qpu_reg src1)
205 {
206 uint64_t inst = 0;
207
208 inst |= QPU_SET_FIELD(QPU_SIG_NONE, QPU_SIG);
209 inst |= QPU_SET_FIELD(op, QPU_OP_MUL);
210 inst |= QPU_SET_FIELD(QPU_R_NOP, QPU_RADDR_A);
211 inst |= QPU_SET_FIELD(QPU_R_NOP, QPU_RADDR_B);
212 inst |= qpu_m_dst(dst);
213 inst |= QPU_SET_FIELD(QPU_COND_ALWAYS, QPU_COND_MUL);
214 inst |= QPU_MUX(src0.mux, QPU_MUL_A);
215 inst = set_src_raddr(inst, src0);
216 inst |= QPU_MUX(src1.mux, QPU_MUL_B);
217 inst = set_src_raddr(inst, src1);
218 inst |= QPU_SET_FIELD(QPU_W_NOP, QPU_WADDR_ADD);
219
220 return inst;
221 }
222
223 static bool
224 merge_fields(uint64_t *merge,
225 uint64_t a, uint64_t b,
226 uint64_t mask, uint64_t ignore)
227 {
228 if ((a & mask) == ignore) {
229 *merge = (*merge & ~mask) | (b & mask);
230 } else if ((b & mask) == ignore) {
231 *merge = (*merge & ~mask) | (a & mask);
232 } else {
233 if ((a & mask) != (b & mask))
234 return false;
235 }
236
237 return true;
238 }
239
240 int
241 qpu_num_sf_accesses(uint64_t inst)
242 {
243 int accesses = 0;
244 static const uint32_t specials[] = {
245 QPU_W_TLB_COLOR_MS,
246 QPU_W_TLB_COLOR_ALL,
247 QPU_W_TLB_Z,
248 QPU_W_TMU0_S,
249 QPU_W_TMU0_T,
250 QPU_W_TMU0_R,
251 QPU_W_TMU0_B,
252 QPU_W_TMU1_S,
253 QPU_W_TMU1_T,
254 QPU_W_TMU1_R,
255 QPU_W_TMU1_B,
256 QPU_W_SFU_RECIP,
257 QPU_W_SFU_RECIPSQRT,
258 QPU_W_SFU_EXP,
259 QPU_W_SFU_LOG,
260 };
261 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
262 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
263 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
264 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
265
266 for (int j = 0; j < ARRAY_SIZE(specials); j++) {
267 if (waddr_add == specials[j])
268 accesses++;
269 if (waddr_mul == specials[j])
270 accesses++;
271 }
272
273 if (raddr_a == QPU_R_MUTEX_ACQUIRE)
274 accesses++;
275 if (raddr_b == QPU_R_MUTEX_ACQUIRE &&
276 QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_SMALL_IMM)
277 accesses++;
278
279 /* XXX: semaphore, combined color read/write? */
280 switch (QPU_GET_FIELD(inst, QPU_SIG)) {
281 case QPU_SIG_COLOR_LOAD:
282 case QPU_SIG_COLOR_LOAD_END:
283 case QPU_SIG_LOAD_TMU0:
284 case QPU_SIG_LOAD_TMU1:
285 accesses++;
286 }
287
288 return accesses;
289 }
290
291 static bool
292 qpu_waddr_ignores_ws(uint32_t waddr)
293 {
294 switch(waddr) {
295 case QPU_W_ACC0:
296 case QPU_W_ACC1:
297 case QPU_W_ACC2:
298 case QPU_W_ACC3:
299 case QPU_W_TLB_Z:
300 case QPU_W_TLB_COLOR_MS:
301 case QPU_W_TLB_COLOR_ALL:
302 case QPU_W_TLB_ALPHA_MASK:
303 case QPU_W_VPM:
304 case QPU_W_SFU_RECIP:
305 case QPU_W_SFU_RECIPSQRT:
306 case QPU_W_SFU_EXP:
307 case QPU_W_SFU_LOG:
308 case QPU_W_TMU0_S:
309 case QPU_W_TMU0_T:
310 case QPU_W_TMU0_R:
311 case QPU_W_TMU0_B:
312 case QPU_W_TMU1_S:
313 case QPU_W_TMU1_T:
314 case QPU_W_TMU1_R:
315 case QPU_W_TMU1_B:
316 return true;
317 }
318
319 return false;
320 }
321
322 static void
323 swap_ra_file_mux_helper(uint64_t *merge, uint64_t *a, uint32_t mux_shift)
324 {
325 uint64_t mux_mask = (uint64_t)0x7 << mux_shift;
326 uint64_t mux_a_val = (uint64_t)QPU_MUX_A << mux_shift;
327 uint64_t mux_b_val = (uint64_t)QPU_MUX_B << mux_shift;
328
329 if ((*a & mux_mask) == mux_a_val) {
330 *a = (*a & ~mux_mask) | mux_b_val;
331 *merge = (*merge & ~mux_mask) | mux_b_val;
332 }
333 }
334
335 static bool
336 try_swap_ra_file(uint64_t *merge, uint64_t *a, uint64_t *b)
337 {
338 uint32_t raddr_a_a = QPU_GET_FIELD(*a, QPU_RADDR_A);
339 uint32_t raddr_a_b = QPU_GET_FIELD(*a, QPU_RADDR_B);
340 uint32_t raddr_b_a = QPU_GET_FIELD(*b, QPU_RADDR_A);
341 uint32_t raddr_b_b = QPU_GET_FIELD(*b, QPU_RADDR_B);
342
343 if (raddr_a_b != QPU_R_NOP)
344 return false;
345
346 switch (raddr_a_a) {
347 case QPU_R_UNIF:
348 case QPU_R_VARY:
349 break;
350 default:
351 return false;
352 }
353
354 if (!(*merge & QPU_PM) &&
355 QPU_GET_FIELD(*merge, QPU_UNPACK) != QPU_UNPACK_NOP) {
356 return false;
357 }
358
359 if (raddr_b_b != QPU_R_NOP &&
360 raddr_b_b != raddr_a_a)
361 return false;
362
363 /* Move raddr A to B in instruction a. */
364 *a = (*a & ~QPU_RADDR_A_MASK) | QPU_SET_FIELD(QPU_R_NOP, QPU_RADDR_A);
365 *a = (*a & ~QPU_RADDR_B_MASK) | QPU_SET_FIELD(raddr_a_a, QPU_RADDR_B);
366 *merge = QPU_UPDATE_FIELD(*merge, raddr_b_a, QPU_RADDR_A);
367 *merge = QPU_UPDATE_FIELD(*merge, raddr_a_a, QPU_RADDR_B);
368 swap_ra_file_mux_helper(merge, a, QPU_ADD_A_SHIFT);
369 swap_ra_file_mux_helper(merge, a, QPU_ADD_B_SHIFT);
370 swap_ra_file_mux_helper(merge, a, QPU_MUL_A_SHIFT);
371 swap_ra_file_mux_helper(merge, a, QPU_MUL_B_SHIFT);
372
373 return true;
374 }
375
376 static bool
377 convert_mov(uint64_t *inst)
378 {
379 uint32_t add_a = QPU_GET_FIELD(*inst, QPU_ADD_A);
380 uint32_t waddr_add = QPU_GET_FIELD(*inst, QPU_WADDR_ADD);
381 uint32_t cond_add = QPU_GET_FIELD(*inst, QPU_COND_ADD);
382
383 /* Is it a MOV? */
384 if (QPU_GET_FIELD(*inst, QPU_OP_ADD) != QPU_A_OR ||
385 (add_a != QPU_GET_FIELD(*inst, QPU_ADD_B))) {
386 return false;
387 }
388
389 if (QPU_GET_FIELD(*inst, QPU_SIG) != QPU_SIG_NONE)
390 return false;
391
392 /* We could maybe support this in the .8888 and .8a-.8d cases. */
393 if (*inst & QPU_PM)
394 return false;
395
396 *inst = QPU_UPDATE_FIELD(*inst, QPU_A_NOP, QPU_OP_ADD);
397 *inst = QPU_UPDATE_FIELD(*inst, QPU_M_V8MIN, QPU_OP_MUL);
398
399 *inst = QPU_UPDATE_FIELD(*inst, add_a, QPU_MUL_A);
400 *inst = QPU_UPDATE_FIELD(*inst, add_a, QPU_MUL_B);
401 *inst = QPU_UPDATE_FIELD(*inst, QPU_MUX_R0, QPU_ADD_A);
402 *inst = QPU_UPDATE_FIELD(*inst, QPU_MUX_R0, QPU_ADD_B);
403
404 *inst = QPU_UPDATE_FIELD(*inst, waddr_add, QPU_WADDR_MUL);
405 *inst = QPU_UPDATE_FIELD(*inst, QPU_W_NOP, QPU_WADDR_ADD);
406
407 *inst = QPU_UPDATE_FIELD(*inst, cond_add, QPU_COND_MUL);
408 *inst = QPU_UPDATE_FIELD(*inst, QPU_COND_NEVER, QPU_COND_ADD);
409
410 if (!qpu_waddr_ignores_ws(waddr_add))
411 *inst ^= QPU_WS;
412
413 return true;
414 }
415
416 static bool
417 writes_a_file(uint64_t inst)
418 {
419 if (!(inst & QPU_WS))
420 return QPU_GET_FIELD(inst, QPU_WADDR_ADD) < 32;
421 else
422 return QPU_GET_FIELD(inst, QPU_WADDR_MUL) < 32;
423 }
424
425 static bool
426 reads_r4(uint64_t inst)
427 {
428 return (QPU_GET_FIELD(inst, QPU_ADD_A) == QPU_MUX_R4 ||
429 QPU_GET_FIELD(inst, QPU_ADD_B) == QPU_MUX_R4 ||
430 QPU_GET_FIELD(inst, QPU_MUL_A) == QPU_MUX_R4 ||
431 QPU_GET_FIELD(inst, QPU_MUL_B) == QPU_MUX_R4);
432 }
433
434 uint64_t
435 qpu_merge_inst(uint64_t a, uint64_t b)
436 {
437 uint64_t merge = a | b;
438 bool ok = true;
439 uint32_t a_sig = QPU_GET_FIELD(a, QPU_SIG);
440 uint32_t b_sig = QPU_GET_FIELD(b, QPU_SIG);
441
442 if (QPU_GET_FIELD(a, QPU_OP_ADD) != QPU_A_NOP &&
443 QPU_GET_FIELD(b, QPU_OP_ADD) != QPU_A_NOP) {
444 if (QPU_GET_FIELD(a, QPU_OP_MUL) != QPU_M_NOP ||
445 QPU_GET_FIELD(b, QPU_OP_MUL) != QPU_M_NOP ||
446 !(convert_mov(&a) || convert_mov(&b))) {
447 return 0;
448 } else {
449 merge = a | b;
450 }
451 }
452
453 if (QPU_GET_FIELD(a, QPU_OP_MUL) != QPU_M_NOP &&
454 QPU_GET_FIELD(b, QPU_OP_MUL) != QPU_M_NOP)
455 return 0;
456
457 if (qpu_num_sf_accesses(a) && qpu_num_sf_accesses(b))
458 return 0;
459
460 if (a_sig == QPU_SIG_LOAD_IMM ||
461 b_sig == QPU_SIG_LOAD_IMM ||
462 a_sig == QPU_SIG_SMALL_IMM ||
463 b_sig == QPU_SIG_SMALL_IMM ||
464 a_sig == QPU_SIG_BRANCH ||
465 b_sig == QPU_SIG_BRANCH) {
466 return 0;
467 }
468
469 ok = ok && merge_fields(&merge, a, b, QPU_SIG_MASK,
470 QPU_SET_FIELD(QPU_SIG_NONE, QPU_SIG));
471
472 /* Misc fields that have to match exactly. */
473 ok = ok && merge_fields(&merge, a, b, QPU_SF, ~0);
474
475 if (!merge_fields(&merge, a, b, QPU_RADDR_A_MASK,
476 QPU_SET_FIELD(QPU_R_NOP, QPU_RADDR_A))) {
477 /* Since we tend to use regfile A by default both for register
478 * allocation and for our special values (uniforms and
479 * varyings), try swapping uniforms and varyings to regfile B
480 * to resolve raddr A conflicts.
481 */
482 if (!try_swap_ra_file(&merge, &a, &b) &&
483 !try_swap_ra_file(&merge, &b, &a)) {
484 return 0;
485 }
486 }
487
488 ok = ok && merge_fields(&merge, a, b, QPU_RADDR_B_MASK,
489 QPU_SET_FIELD(QPU_R_NOP, QPU_RADDR_B));
490
491 ok = ok && merge_fields(&merge, a, b, QPU_WADDR_ADD_MASK,
492 QPU_SET_FIELD(QPU_W_NOP, QPU_WADDR_ADD));
493 ok = ok && merge_fields(&merge, a, b, QPU_WADDR_MUL_MASK,
494 QPU_SET_FIELD(QPU_W_NOP, QPU_WADDR_MUL));
495
496 /* Allow disagreement on WS (swapping A vs B physical reg file as the
497 * destination for ADD/MUL) if one of the original instructions
498 * ignores it (probably because it's just writing to accumulators).
499 */
500 if (qpu_waddr_ignores_ws(QPU_GET_FIELD(a, QPU_WADDR_ADD)) &&
501 qpu_waddr_ignores_ws(QPU_GET_FIELD(a, QPU_WADDR_MUL))) {
502 merge = (merge & ~QPU_WS) | (b & QPU_WS);
503 } else if (qpu_waddr_ignores_ws(QPU_GET_FIELD(b, QPU_WADDR_ADD)) &&
504 qpu_waddr_ignores_ws(QPU_GET_FIELD(b, QPU_WADDR_MUL))) {
505 merge = (merge & ~QPU_WS) | (a & QPU_WS);
506 } else {
507 if ((a & QPU_WS) != (b & QPU_WS))
508 return 0;
509 }
510
511 if (!merge_fields(&merge, a, b, QPU_PM, ~0)) {
512 /* If one instruction has PM bit set and the other not, the
513 * one without PM shouldn't do packing/unpacking, and we
514 * have to make sure non-NOP packing/unpacking from PM
515 * instruction aren't added to it.
516 */
517 uint64_t temp;
518
519 /* Let a be the one with PM bit */
520 if (!(a & QPU_PM)) {
521 temp = a;
522 a = b;
523 b = temp;
524 }
525
526 if ((b & (QPU_PACK_MASK | QPU_UNPACK_MASK)) != 0)
527 return 0;
528
529 if ((a & QPU_PACK_MASK) != 0 &&
530 QPU_GET_FIELD(b, QPU_OP_MUL) != QPU_M_NOP)
531 return 0;
532
533 if ((a & QPU_UNPACK_MASK) != 0 && reads_r4(b))
534 return 0;
535 } else {
536 /* packing: Make sure that non-NOP packs agree, then deal with
537 * special-case failing of adding a non-NOP pack to something
538 * with a NOP pack.
539 */
540 if (!merge_fields(&merge, a, b, QPU_PACK_MASK, 0))
541 return 0;
542 bool new_a_pack = (QPU_GET_FIELD(a, QPU_PACK) !=
543 QPU_GET_FIELD(merge, QPU_PACK));
544 bool new_b_pack = (QPU_GET_FIELD(b, QPU_PACK) !=
545 QPU_GET_FIELD(merge, QPU_PACK));
546 if (!(merge & QPU_PM)) {
547 /* Make sure we're not going to be putting a new
548 * a-file packing on either half.
549 */
550 if (new_a_pack && writes_a_file(a))
551 return 0;
552
553 if (new_b_pack && writes_a_file(b))
554 return 0;
555 } else {
556 /* Make sure we're not going to be putting new MUL
557 * packing oneither half.
558 */
559 if (new_a_pack &&
560 QPU_GET_FIELD(a, QPU_OP_MUL) != QPU_M_NOP)
561 return 0;
562
563 if (new_b_pack &&
564 QPU_GET_FIELD(b, QPU_OP_MUL) != QPU_M_NOP)
565 return 0;
566 }
567
568 /* unpacking: Make sure that non-NOP unpacks agree, then deal
569 * with special-case failing of adding a non-NOP unpack to
570 * something with a NOP unpack.
571 */
572 if (!merge_fields(&merge, a, b, QPU_UNPACK_MASK, 0))
573 return 0;
574 bool new_a_unpack = (QPU_GET_FIELD(a, QPU_UNPACK) !=
575 QPU_GET_FIELD(merge, QPU_UNPACK));
576 bool new_b_unpack = (QPU_GET_FIELD(b, QPU_UNPACK) !=
577 QPU_GET_FIELD(merge, QPU_UNPACK));
578 if (!(merge & QPU_PM)) {
579 /* Make sure we're not going to be putting a new
580 * a-file packing on either half.
581 */
582 if (new_a_unpack &&
583 QPU_GET_FIELD(a, QPU_RADDR_A) != QPU_R_NOP)
584 return 0;
585
586 if (new_b_unpack &&
587 QPU_GET_FIELD(b, QPU_RADDR_A) != QPU_R_NOP)
588 return 0;
589 } else {
590 /* Make sure we're not going to be putting new r4
591 * unpack on either half.
592 */
593 if (new_a_unpack && reads_r4(a))
594 return 0;
595
596 if (new_b_unpack && reads_r4(b))
597 return 0;
598 }
599 }
600
601 if (ok)
602 return merge;
603 else
604 return 0;
605 }
606
607 uint64_t
608 qpu_set_sig(uint64_t inst, uint32_t sig)
609 {
610 assert(QPU_GET_FIELD(inst, QPU_SIG) == QPU_SIG_NONE);
611 return QPU_UPDATE_FIELD(inst, sig, QPU_SIG);
612 }
613
614 uint64_t
615 qpu_set_cond_add(uint64_t inst, uint32_t cond)
616 {
617 assert(QPU_GET_FIELD(inst, QPU_COND_ADD) == QPU_COND_ALWAYS);
618 return QPU_UPDATE_FIELD(inst, cond, QPU_COND_ADD);
619 }
620
621 uint64_t
622 qpu_set_cond_mul(uint64_t inst, uint32_t cond)
623 {
624 assert(QPU_GET_FIELD(inst, QPU_COND_MUL) == QPU_COND_ALWAYS);
625 return QPU_UPDATE_FIELD(inst, cond, QPU_COND_MUL);
626 }
627
628 bool
629 qpu_waddr_is_tlb(uint32_t waddr)
630 {
631 switch (waddr) {
632 case QPU_W_TLB_COLOR_ALL:
633 case QPU_W_TLB_COLOR_MS:
634 case QPU_W_TLB_Z:
635 return true;
636 default:
637 return false;
638 }
639 }
640
641 bool
642 qpu_inst_is_tlb(uint64_t inst)
643 {
644 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
645
646 return (qpu_waddr_is_tlb(QPU_GET_FIELD(inst, QPU_WADDR_ADD)) ||
647 qpu_waddr_is_tlb(QPU_GET_FIELD(inst, QPU_WADDR_MUL)) ||
648 sig == QPU_SIG_COLOR_LOAD ||
649 sig == QPU_SIG_WAIT_FOR_SCOREBOARD);
650 }
651
652 /**
653 * Returns the small immediate value to be encoded in to the raddr b field if
654 * the argument can be represented as one, or ~0 otherwise.
655 */
656 uint32_t
657 qpu_encode_small_immediate(uint32_t i)
658 {
659 if (i <= 15)
660 return i;
661 if ((int)i < 0 && (int)i >= -16)
662 return i + 32;
663
664 switch (i) {
665 case 0x3f800000:
666 return 32;
667 case 0x40000000:
668 return 33;
669 case 0x40800000:
670 return 34;
671 case 0x41000000:
672 return 35;
673 case 0x41800000:
674 return 36;
675 case 0x42000000:
676 return 37;
677 case 0x42800000:
678 return 38;
679 case 0x43000000:
680 return 39;
681 case 0x3b800000:
682 return 40;
683 case 0x3c000000:
684 return 41;
685 case 0x3c800000:
686 return 42;
687 case 0x3d000000:
688 return 43;
689 case 0x3d800000:
690 return 44;
691 case 0x3e000000:
692 return 45;
693 case 0x3e800000:
694 return 46;
695 case 0x3f000000:
696 return 47;
697 }
698
699 return ~0;
700 }
701
702 void
703 qpu_serialize_one_inst(struct vc4_compile *c, uint64_t inst)
704 {
705 if (c->qpu_inst_count >= c->qpu_inst_size) {
706 c->qpu_inst_size = MAX2(16, c->qpu_inst_size * 2);
707 c->qpu_insts = reralloc(c, c->qpu_insts,
708 uint64_t, c->qpu_inst_size);
709 }
710 c->qpu_insts[c->qpu_inst_count++] = inst;
711 }