2 * Copyright © 2016 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "compiler/v3d_compiler.h"
25 #include "qpu/qpu_instr.h"
26 #include "qpu/qpu_disasm.h"
28 static inline struct qpu_reg
31 struct qpu_reg reg
= {
38 static inline struct qpu_reg
39 qpu_magic(enum v3d_qpu_waddr waddr
)
41 struct qpu_reg reg
= {
48 static inline struct qpu_reg
51 return qpu_magic(V3D_QPU_WADDR_R0
+ acc
);
57 struct v3d_qpu_instr instr
= {
58 .type
= V3D_QPU_INSTR_TYPE_ALU
,
62 .waddr
= V3D_QPU_WADDR_NOP
,
67 .waddr
= V3D_QPU_WADDR_NOP
,
79 struct qreg undef
= vir_nop_reg();
80 struct qinst
*qinst
= vir_add_inst(V3D_QPU_A_NOP
, undef
, undef
, undef
);
86 new_qpu_nop_before(struct qinst
*inst
)
88 struct qinst
*q
= vir_nop();
90 list_addtail(&q
->link
, &inst
->link
);
96 new_ldunif_instr(struct qinst
*inst
, int i
)
98 struct qinst
*ldunif
= new_qpu_nop_before(inst
);
100 ldunif
->qpu
.sig
.ldunif
= true;
101 assert(inst
->src
[i
].file
== QFILE_UNIF
);
102 ldunif
->uniform
= inst
->src
[i
].index
;
106 * Allocates the src register (accumulator or register file) into the RADDR
107 * fields of the instruction.
110 set_src(struct v3d_qpu_instr
*instr
, enum v3d_qpu_mux
*mux
, struct qpu_reg src
)
113 assert(instr
->sig
.small_imm
);
114 *mux
= V3D_QPU_MUX_B
;
119 assert(src
.index
>= V3D_QPU_WADDR_R0
&&
120 src
.index
<= V3D_QPU_WADDR_R5
);
121 *mux
= src
.index
- V3D_QPU_WADDR_R0
+ V3D_QPU_MUX_R0
;
125 if (instr
->alu
.add
.a
!= V3D_QPU_MUX_A
&&
126 instr
->alu
.add
.b
!= V3D_QPU_MUX_A
&&
127 instr
->alu
.mul
.a
!= V3D_QPU_MUX_A
&&
128 instr
->alu
.mul
.b
!= V3D_QPU_MUX_A
) {
129 instr
->raddr_a
= src
.index
;
130 *mux
= V3D_QPU_MUX_A
;
132 if (instr
->raddr_a
== src
.index
) {
133 *mux
= V3D_QPU_MUX_A
;
135 assert(!(instr
->alu
.add
.a
== V3D_QPU_MUX_B
&&
136 instr
->alu
.add
.b
== V3D_QPU_MUX_B
&&
137 instr
->alu
.mul
.a
== V3D_QPU_MUX_B
&&
138 instr
->alu
.mul
.b
== V3D_QPU_MUX_B
) ||
139 src
.index
== instr
->raddr_b
);
141 instr
->raddr_b
= src
.index
;
142 *mux
= V3D_QPU_MUX_B
;
148 is_no_op_mov(struct qinst
*qinst
)
150 static const struct v3d_qpu_sig no_sig
= {0};
152 /* Make sure it's just a lone MOV. */
153 if (qinst
->qpu
.type
!= V3D_QPU_INSTR_TYPE_ALU
||
154 qinst
->qpu
.alu
.mul
.op
!= V3D_QPU_M_MOV
||
155 qinst
->qpu
.alu
.add
.op
!= V3D_QPU_A_NOP
||
156 memcmp(&qinst
->qpu
.sig
, &no_sig
, sizeof(no_sig
)) != 0) {
160 /* Check if it's a MOV from a register to itself. */
161 enum v3d_qpu_waddr waddr
= qinst
->qpu
.alu
.mul
.waddr
;
162 if (qinst
->qpu
.alu
.mul
.magic_write
) {
163 if (waddr
< V3D_QPU_WADDR_R0
|| waddr
> V3D_QPU_WADDR_R4
)
166 if (qinst
->qpu
.alu
.mul
.a
!=
167 V3D_QPU_MUX_R0
+ (waddr
- V3D_QPU_WADDR_R0
)) {
173 switch (qinst
->qpu
.alu
.mul
.a
) {
175 raddr
= qinst
->qpu
.raddr_a
;
178 raddr
= qinst
->qpu
.raddr_b
;
187 /* No packing or flags updates, or we need to execute the
190 if (qinst
->qpu
.alu
.mul
.a_unpack
!= V3D_QPU_UNPACK_NONE
||
191 qinst
->qpu
.alu
.mul
.output_pack
!= V3D_QPU_PACK_NONE
||
192 qinst
->qpu
.flags
.mc
!= V3D_QPU_COND_NONE
||
193 qinst
->qpu
.flags
.mpf
!= V3D_QPU_PF_NONE
||
194 qinst
->qpu
.flags
.muf
!= V3D_QPU_UF_NONE
) {
202 v3d_generate_code_block(struct v3d_compile
*c
,
203 struct qblock
*block
,
204 struct qpu_reg
*temp_registers
)
206 int last_vpm_read_index
= -1;
208 vir_for_each_inst_safe(qinst
, block
) {
210 fprintf(stderr
, "translating qinst to qpu: ");
211 vir_dump_inst(c
, qinst
);
212 fprintf(stderr
, "\n");
217 if (vir_has_implicit_uniform(qinst
)) {
218 int src
= vir_get_implicit_uniform_src(qinst
);
219 assert(qinst
->src
[src
].file
== QFILE_UNIF
);
220 qinst
->uniform
= qinst
->src
[src
].index
;
224 int nsrc
= vir_get_non_sideband_nsrc(qinst
);
225 struct qpu_reg src
[ARRAY_SIZE(qinst
->src
)];
226 bool emitted_ldunif
= false;
227 for (int i
= 0; i
< nsrc
; i
++) {
228 int index
= qinst
->src
[i
].index
;
229 switch (qinst
->src
[i
].file
) {
231 src
[i
] = qpu_reg(qinst
->src
[i
].index
);
234 src
[i
] = qpu_magic(qinst
->src
[i
].index
);
241 src
[i
] = temp_registers
[index
];
244 /* XXX perf: If the last ldunif we emitted was
245 * the same uniform value, skip it. Common
246 * for multop/umul24 sequences.
248 if (!emitted_ldunif
) {
249 new_ldunif_instr(qinst
, i
);
251 emitted_ldunif
= true;
256 case QFILE_SMALL_IMM
:
261 assert((int)qinst
->src
[i
].index
>=
262 last_vpm_read_index
);
263 (void)last_vpm_read_index
;
264 last_vpm_read_index
= qinst
->src
[i
].index
;
266 temp
= new_qpu_nop_before(qinst
);
267 temp
->qpu
.sig
.ldvpm
= true;
274 unreachable("bad vir src file");
279 switch (qinst
->dst
.file
) {
281 dst
= qpu_magic(V3D_QPU_WADDR_NOP
);
285 dst
= qpu_reg(qinst
->dst
.index
);
289 dst
= qpu_magic(qinst
->dst
.index
);
293 dst
= temp_registers
[qinst
->dst
.index
];
297 dst
= qpu_magic(V3D_QPU_WADDR_VPM
);
301 dst
= qpu_magic(V3D_QPU_WADDR_TLB
);
305 dst
= qpu_magic(V3D_QPU_WADDR_TLBU
);
309 case QFILE_SMALL_IMM
:
311 assert(!"not reached");
315 if (qinst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
) {
316 if (v3d_qpu_sig_writes_address(c
->devinfo
,
318 assert(qinst
->qpu
.alu
.add
.op
== V3D_QPU_A_NOP
);
319 assert(qinst
->qpu
.alu
.mul
.op
== V3D_QPU_M_NOP
);
321 qinst
->qpu
.sig_addr
= dst
.index
;
322 qinst
->qpu
.sig_magic
= dst
.magic
;
323 } else if (qinst
->qpu
.alu
.add
.op
!= V3D_QPU_A_NOP
) {
324 assert(qinst
->qpu
.alu
.mul
.op
== V3D_QPU_M_NOP
);
327 &qinst
->qpu
.alu
.add
.a
, src
[0]);
331 &qinst
->qpu
.alu
.add
.b
, src
[1]);
334 qinst
->qpu
.alu
.add
.waddr
= dst
.index
;
335 qinst
->qpu
.alu
.add
.magic_write
= dst
.magic
;
339 &qinst
->qpu
.alu
.mul
.a
, src
[0]);
343 &qinst
->qpu
.alu
.mul
.b
, src
[1]);
346 qinst
->qpu
.alu
.mul
.waddr
= dst
.index
;
347 qinst
->qpu
.alu
.mul
.magic_write
= dst
.magic
;
349 if (is_no_op_mov(qinst
)) {
350 vir_remove_instruction(c
, qinst
);
355 assert(qinst
->qpu
.type
== V3D_QPU_INSTR_TYPE_BRANCH
);
361 reads_uniform(const struct v3d_device_info
*devinfo
, uint64_t instruction
)
363 struct v3d_qpu_instr qpu
;
364 MAYBE_UNUSED
bool ok
= v3d_qpu_instr_unpack(devinfo
, instruction
, &qpu
);
367 if (qpu
.sig
.ldunif
||
373 if (qpu
.type
== V3D_QPU_INSTR_TYPE_BRANCH
)
376 if (qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
) {
377 if (qpu
.alu
.add
.magic_write
&&
378 v3d_qpu_magic_waddr_loads_unif(qpu
.alu
.add
.waddr
)) {
382 if (qpu
.alu
.mul
.magic_write
&&
383 v3d_qpu_magic_waddr_loads_unif(qpu
.alu
.mul
.waddr
)) {
392 v3d_dump_qpu(struct v3d_compile
*c
)
394 fprintf(stderr
, "%s prog %d/%d QPU:\n",
395 vir_get_stage_name(c
),
396 c
->program_id
, c
->variant_id
);
398 int next_uniform
= 0;
399 for (int i
= 0; i
< c
->qpu_inst_count
; i
++) {
400 const char *str
= v3d_qpu_disasm(c
->devinfo
, c
->qpu_insts
[i
]);
401 fprintf(stderr
, "0x%016"PRIx64
" %s", c
->qpu_insts
[i
], str
);
403 /* We can only do this on 4.x, because we're not tracking TMU
404 * implicit uniforms here on 3.x.
406 if (c
->devinfo
->ver
>= 40 &&
407 reads_uniform(c
->devinfo
, c
->qpu_insts
[i
])) {
408 fprintf(stderr
, " (");
409 vir_dump_uniform(c
->uniform_contents
[next_uniform
],
410 c
->uniform_data
[next_uniform
]);
411 fprintf(stderr
, ")");
414 fprintf(stderr
, "\n");
415 ralloc_free((void *)str
);
418 /* Make sure our dumping lined up. */
419 if (c
->devinfo
->ver
>= 40)
420 assert(next_uniform
== c
->num_uniforms
);
422 fprintf(stderr
, "\n");
426 v3d_vir_to_qpu(struct v3d_compile
*c
, struct qpu_reg
*temp_registers
)
428 /* Reset the uniform count to how many will be actually loaded by the
429 * generated QPU code.
433 vir_for_each_block(block
, c
)
434 v3d_generate_code_block(c
, block
, temp_registers
);
436 uint32_t cycles
= v3d_qpu_schedule_instructions(c
);
438 c
->qpu_insts
= rzalloc_array(c
, uint64_t, c
->qpu_inst_count
);
440 vir_for_each_inst_inorder(inst
, c
) {
441 bool ok
= v3d_qpu_instr_pack(c
->devinfo
, &inst
->qpu
,
444 fprintf(stderr
, "Failed to pack instruction:\n");
445 vir_dump_inst(c
, inst
);
446 fprintf(stderr
, "\n");
451 assert(i
== c
->qpu_inst_count
);
453 if (V3D_DEBUG
& V3D_DEBUG_SHADERDB
) {
454 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d instructions\n",
455 vir_get_stage_name(c
),
456 c
->program_id
, c
->variant_id
,
460 /* The QPU cycle estimates are pretty broken (see waddr_latency()), so
461 * don't report them for now.
464 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d estimated cycles\n",
465 vir_get_stage_name(c
),
466 c
->program_id
, c
->variant_id
,
470 if (V3D_DEBUG
& (V3D_DEBUG_QPU
|
471 v3d_debug_flag_for_shader_stage(c
->s
->info
.stage
))) {
477 free(temp_registers
);