2 * Copyright © 2016-2017 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "broadcom/common/v3d_device_info.h"
25 #include "v3d_compiler.h"
28 vir_get_non_sideband_nsrc(struct qinst
*inst
)
30 switch (inst
->qpu
.type
) {
31 case V3D_QPU_INSTR_TYPE_BRANCH
:
33 case V3D_QPU_INSTR_TYPE_ALU
:
34 if (inst
->qpu
.alu
.add
.op
!= V3D_QPU_A_NOP
)
35 return v3d_qpu_add_op_num_src(inst
->qpu
.alu
.add
.op
);
37 return v3d_qpu_mul_op_num_src(inst
->qpu
.alu
.mul
.op
);
44 vir_get_nsrc(struct qinst
*inst
)
46 int nsrc
= vir_get_non_sideband_nsrc(inst
);
48 if (vir_has_implicit_uniform(inst
))
55 vir_has_implicit_uniform(struct qinst
*inst
)
57 switch (inst
->qpu
.type
) {
58 case V3D_QPU_INSTR_TYPE_BRANCH
:
60 case V3D_QPU_INSTR_TYPE_ALU
:
61 switch (inst
->dst
.file
) {
65 return inst
->has_implicit_uniform
;
71 /* The sideband uniform for textures gets stored after the normal ALU
75 vir_get_implicit_uniform_src(struct qinst
*inst
)
77 if (!vir_has_implicit_uniform(inst
))
79 return vir_get_nsrc(inst
) - 1;
83 * Returns whether the instruction has any side effects that must be
87 vir_has_side_effects(struct v3d_compile
*c
, struct qinst
*inst
)
89 switch (inst
->qpu
.type
) {
90 case V3D_QPU_INSTR_TYPE_BRANCH
:
92 case V3D_QPU_INSTR_TYPE_ALU
:
93 switch (inst
->qpu
.alu
.add
.op
) {
94 case V3D_QPU_A_SETREVF
:
95 case V3D_QPU_A_SETMSF
:
96 case V3D_QPU_A_VPMSETUP
:
97 case V3D_QPU_A_STVPMV
:
98 case V3D_QPU_A_STVPMD
:
99 case V3D_QPU_A_STVPMP
:
100 case V3D_QPU_A_VPMWT
:
101 case V3D_QPU_A_TMUWT
:
107 switch (inst
->qpu
.alu
.mul
.op
) {
108 case V3D_QPU_M_MULTOP
:
115 if (inst
->qpu
.sig
.ldtmu
||
116 inst
->qpu
.sig
.ldvary
||
117 inst
->qpu
.sig
.wrtmuc
||
118 inst
->qpu
.sig
.thrsw
) {
126 vir_is_float_input(struct qinst
*inst
)
128 /* XXX: More instrs */
129 switch (inst
->qpu
.type
) {
130 case V3D_QPU_INSTR_TYPE_BRANCH
:
132 case V3D_QPU_INSTR_TYPE_ALU
:
133 switch (inst
->qpu
.alu
.add
.op
) {
138 case V3D_QPU_A_FTOIN
:
144 switch (inst
->qpu
.alu
.mul
.op
) {
146 case V3D_QPU_M_VFMUL
:
158 vir_is_raw_mov(struct qinst
*inst
)
160 if (inst
->qpu
.type
!= V3D_QPU_INSTR_TYPE_ALU
||
161 (inst
->qpu
.alu
.mul
.op
!= V3D_QPU_M_FMOV
&&
162 inst
->qpu
.alu
.mul
.op
!= V3D_QPU_M_MOV
)) {
166 if (inst
->qpu
.alu
.add
.output_pack
!= V3D_QPU_PACK_NONE
||
167 inst
->qpu
.alu
.mul
.output_pack
!= V3D_QPU_PACK_NONE
) {
171 if (inst
->qpu
.flags
.ac
!= V3D_QPU_COND_NONE
||
172 inst
->qpu
.flags
.mc
!= V3D_QPU_COND_NONE
)
179 vir_is_add(struct qinst
*inst
)
181 return (inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
&&
182 inst
->qpu
.alu
.add
.op
!= V3D_QPU_A_NOP
);
186 vir_is_mul(struct qinst
*inst
)
188 return (inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
&&
189 inst
->qpu
.alu
.mul
.op
!= V3D_QPU_M_NOP
);
193 vir_is_tex(struct qinst
*inst
)
195 if (inst
->dst
.file
== QFILE_MAGIC
)
196 return v3d_qpu_magic_waddr_is_tmu(inst
->dst
.index
);
198 if (inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
&&
199 inst
->qpu
.alu
.add
.op
== V3D_QPU_A_TMUWT
) {
207 vir_writes_r3(const struct v3d_device_info
*devinfo
, struct qinst
*inst
)
209 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
210 switch (inst
->src
[i
].file
) {
218 if (devinfo
->ver
< 41 && (inst
->qpu
.sig
.ldvary
||
219 inst
->qpu
.sig
.ldtlb
||
220 inst
->qpu
.sig
.ldtlbu
||
221 inst
->qpu
.sig
.ldvpm
)) {
229 vir_writes_r4(const struct v3d_device_info
*devinfo
, struct qinst
*inst
)
231 switch (inst
->dst
.file
) {
233 switch (inst
->dst
.index
) {
234 case V3D_QPU_WADDR_RECIP
:
235 case V3D_QPU_WADDR_RSQRT
:
236 case V3D_QPU_WADDR_EXP
:
237 case V3D_QPU_WADDR_LOG
:
238 case V3D_QPU_WADDR_SIN
:
246 if (devinfo
->ver
< 41 && inst
->qpu
.sig
.ldtmu
)
253 vir_set_unpack(struct qinst
*inst
, int src
,
254 enum v3d_qpu_input_unpack unpack
)
256 assert(src
== 0 || src
== 1);
258 if (vir_is_add(inst
)) {
260 inst
->qpu
.alu
.add
.a_unpack
= unpack
;
262 inst
->qpu
.alu
.add
.b_unpack
= unpack
;
264 assert(vir_is_mul(inst
));
266 inst
->qpu
.alu
.mul
.a_unpack
= unpack
;
268 inst
->qpu
.alu
.mul
.b_unpack
= unpack
;
273 vir_set_cond(struct qinst
*inst
, enum v3d_qpu_cond cond
)
275 if (vir_is_add(inst
)) {
276 inst
->qpu
.flags
.ac
= cond
;
278 assert(vir_is_mul(inst
));
279 inst
->qpu
.flags
.mc
= cond
;
284 vir_set_pf(struct qinst
*inst
, enum v3d_qpu_pf pf
)
286 if (vir_is_add(inst
)) {
287 inst
->qpu
.flags
.apf
= pf
;
289 assert(vir_is_mul(inst
));
290 inst
->qpu
.flags
.mpf
= pf
;
295 vir_set_uf(struct qinst
*inst
, enum v3d_qpu_uf uf
)
297 if (vir_is_add(inst
)) {
298 inst
->qpu
.flags
.auf
= uf
;
300 assert(vir_is_mul(inst
));
301 inst
->qpu
.flags
.muf
= uf
;
307 vir_channels_written(struct qinst
*inst
)
309 if (vir_is_mul(inst
)) {
310 switch (inst
->dst
.pack
) {
311 case QPU_PACK_MUL_NOP
:
312 case QPU_PACK_MUL_8888
:
314 case QPU_PACK_MUL_8A
:
316 case QPU_PACK_MUL_8B
:
318 case QPU_PACK_MUL_8C
:
320 case QPU_PACK_MUL_8D
:
324 switch (inst
->dst
.pack
) {
326 case QPU_PACK_A_8888
:
327 case QPU_PACK_A_8888_SAT
:
328 case QPU_PACK_A_32_SAT
:
331 case QPU_PACK_A_8A_SAT
:
334 case QPU_PACK_A_8B_SAT
:
337 case QPU_PACK_A_8C_SAT
:
340 case QPU_PACK_A_8D_SAT
:
343 case QPU_PACK_A_16A_SAT
:
346 case QPU_PACK_A_16B_SAT
:
350 unreachable("Bad pack field");
355 vir_get_temp(struct v3d_compile
*c
)
359 reg
.file
= QFILE_TEMP
;
360 reg
.index
= c
->num_temps
++;
362 if (c
->num_temps
> c
->defs_array_size
) {
363 uint32_t old_size
= c
->defs_array_size
;
364 c
->defs_array_size
= MAX2(old_size
* 2, 16);
366 c
->defs
= reralloc(c
, c
->defs
, struct qinst
*,
368 memset(&c
->defs
[old_size
], 0,
369 sizeof(c
->defs
[0]) * (c
->defs_array_size
- old_size
));
371 c
->spillable
= reralloc(c
, c
->spillable
,
373 BITSET_WORDS(c
->defs_array_size
));
374 for (int i
= old_size
; i
< c
->defs_array_size
; i
++)
375 BITSET_SET(c
->spillable
, i
);
382 vir_add_inst(enum v3d_qpu_add_op op
, struct qreg dst
, struct qreg src0
, struct qreg src1
)
384 struct qinst
*inst
= calloc(1, sizeof(*inst
));
386 inst
->qpu
= v3d_qpu_nop();
387 inst
->qpu
.alu
.add
.op
= op
;
398 vir_mul_inst(enum v3d_qpu_mul_op op
, struct qreg dst
, struct qreg src0
, struct qreg src1
)
400 struct qinst
*inst
= calloc(1, sizeof(*inst
));
402 inst
->qpu
= v3d_qpu_nop();
403 inst
->qpu
.alu
.mul
.op
= op
;
414 vir_branch_inst(enum v3d_qpu_branch_cond cond
, struct qreg src
)
416 struct qinst
*inst
= calloc(1, sizeof(*inst
));
418 inst
->qpu
= v3d_qpu_nop();
419 inst
->qpu
.type
= V3D_QPU_INSTR_TYPE_BRANCH
;
420 inst
->qpu
.branch
.cond
= cond
;
421 inst
->qpu
.branch
.msfign
= V3D_QPU_MSFIGN_NONE
;
422 inst
->qpu
.branch
.bdi
= V3D_QPU_BRANCH_DEST_REL
;
423 inst
->qpu
.branch
.ub
= true;
424 inst
->qpu
.branch
.bdu
= V3D_QPU_BRANCH_DEST_REL
;
426 inst
->dst
= vir_reg(QFILE_NULL
, 0);
434 vir_emit(struct v3d_compile
*c
, struct qinst
*inst
)
436 switch (c
->cursor
.mode
) {
438 list_add(&inst
->link
, c
->cursor
.link
);
440 case vir_cursor_addtail
:
441 list_addtail(&inst
->link
, c
->cursor
.link
);
445 c
->cursor
= vir_after_inst(inst
);
446 c
->live_intervals_valid
= false;
449 /* Updates inst to write to a new temporary, emits it, and notes the def. */
451 vir_emit_def(struct v3d_compile
*c
, struct qinst
*inst
)
453 assert(inst
->dst
.file
== QFILE_NULL
);
455 /* If we're emitting an instruction that's a def, it had better be
456 * writing a register.
458 if (inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
) {
459 assert(inst
->qpu
.alu
.add
.op
== V3D_QPU_A_NOP
||
460 v3d_qpu_add_op_has_dst(inst
->qpu
.alu
.add
.op
));
461 assert(inst
->qpu
.alu
.mul
.op
== V3D_QPU_M_NOP
||
462 v3d_qpu_mul_op_has_dst(inst
->qpu
.alu
.mul
.op
));
465 inst
->dst
= vir_get_temp(c
);
467 if (inst
->dst
.file
== QFILE_TEMP
)
468 c
->defs
[inst
->dst
.index
] = inst
;
476 vir_emit_nondef(struct v3d_compile
*c
, struct qinst
*inst
)
478 if (inst
->dst
.file
== QFILE_TEMP
)
479 c
->defs
[inst
->dst
.index
] = NULL
;
487 vir_new_block(struct v3d_compile
*c
)
489 struct qblock
*block
= rzalloc(c
, struct qblock
);
491 list_inithead(&block
->instructions
);
493 block
->predecessors
= _mesa_set_create(block
,
495 _mesa_key_pointer_equal
);
497 block
->index
= c
->next_block_index
++;
503 vir_set_emit_block(struct v3d_compile
*c
, struct qblock
*block
)
505 c
->cur_block
= block
;
506 c
->cursor
= vir_after_block(block
);
507 list_addtail(&block
->link
, &c
->blocks
);
511 vir_entry_block(struct v3d_compile
*c
)
513 return list_first_entry(&c
->blocks
, struct qblock
, link
);
517 vir_exit_block(struct v3d_compile
*c
)
519 return list_last_entry(&c
->blocks
, struct qblock
, link
);
523 vir_link_blocks(struct qblock
*predecessor
, struct qblock
*successor
)
525 _mesa_set_add(successor
->predecessors
, predecessor
);
526 if (predecessor
->successors
[0]) {
527 assert(!predecessor
->successors
[1]);
528 predecessor
->successors
[1] = successor
;
530 predecessor
->successors
[0] = successor
;
534 const struct v3d_compiler
*
535 v3d_compiler_init(const struct v3d_device_info
*devinfo
)
537 struct v3d_compiler
*compiler
= rzalloc(NULL
, struct v3d_compiler
);
541 compiler
->devinfo
= devinfo
;
543 if (!vir_init_reg_sets(compiler
)) {
544 ralloc_free(compiler
);
552 v3d_compiler_free(const struct v3d_compiler
*compiler
)
554 ralloc_free((void *)compiler
);
557 static struct v3d_compile
*
558 vir_compile_init(const struct v3d_compiler
*compiler
,
561 void (*debug_output
)(const char *msg
,
562 void *debug_output_data
),
563 void *debug_output_data
,
564 int program_id
, int variant_id
)
566 struct v3d_compile
*c
= rzalloc(NULL
, struct v3d_compile
);
568 c
->compiler
= compiler
;
569 c
->devinfo
= compiler
->devinfo
;
571 c
->program_id
= program_id
;
572 c
->variant_id
= variant_id
;
574 c
->debug_output
= debug_output
;
575 c
->debug_output_data
= debug_output_data
;
577 s
= nir_shader_clone(c
, s
);
580 list_inithead(&c
->blocks
);
581 vir_set_emit_block(c
, vir_new_block(c
));
583 c
->output_position_index
= -1;
584 c
->output_point_size_index
= -1;
585 c
->output_sample_mask_index
= -1;
587 c
->def_ht
= _mesa_hash_table_create(c
, _mesa_hash_pointer
,
588 _mesa_key_pointer_equal
);
594 type_size_vec4(const struct glsl_type
*type
)
596 return glsl_count_attribute_slots(type
, false);
600 v3d_lower_nir(struct v3d_compile
*c
)
602 struct nir_lower_tex_options tex_options
= {
604 .lower_rect
= false, /* XXX: Use this on V3D 3.x */
606 /* Apply swizzles to all samplers. */
607 .swizzle_result
= ~0,
610 /* Lower the format swizzle and (for 32-bit returns)
611 * ARB_texture_swizzle-style swizzle.
613 for (int i
= 0; i
< ARRAY_SIZE(c
->key
->tex
); i
++) {
614 for (int j
= 0; j
< 4; j
++)
615 tex_options
.swizzles
[i
][j
] = c
->key
->tex
[i
].swizzle
[j
];
617 if (c
->key
->tex
[i
].clamp_s
)
618 tex_options
.saturate_s
|= 1 << i
;
619 if (c
->key
->tex
[i
].clamp_t
)
620 tex_options
.saturate_t
|= 1 << i
;
621 if (c
->key
->tex
[i
].clamp_r
)
622 tex_options
.saturate_r
|= 1 << i
;
625 NIR_PASS_V(c
->s
, nir_lower_tex
, &tex_options
);
629 v3d_lower_nir_late(struct v3d_compile
*c
)
631 NIR_PASS_V(c
->s
, v3d_nir_lower_io
, c
);
632 NIR_PASS_V(c
->s
, v3d_nir_lower_txf_ms
, c
);
633 NIR_PASS_V(c
->s
, nir_lower_idiv
);
637 v3d_set_prog_data_uniforms(struct v3d_compile
*c
,
638 struct v3d_prog_data
*prog_data
)
640 int count
= c
->num_uniforms
;
641 struct v3d_uniform_list
*ulist
= &prog_data
->uniforms
;
643 ulist
->count
= count
;
644 ulist
->data
= ralloc_array(prog_data
, uint32_t, count
);
645 memcpy(ulist
->data
, c
->uniform_data
,
646 count
* sizeof(*ulist
->data
));
647 ulist
->contents
= ralloc_array(prog_data
, enum quniform_contents
, count
);
648 memcpy(ulist
->contents
, c
->uniform_contents
,
649 count
* sizeof(*ulist
->contents
));
652 /* Copy the compiler UBO range state to the compiled shader, dropping out
653 * arrays that were never referenced by an indirect load.
655 * (Note that QIR dead code elimination of an array access still leaves that
656 * array alive, though)
659 v3d_set_prog_data_ubo(struct v3d_compile
*c
,
660 struct v3d_prog_data
*prog_data
)
662 if (!c
->num_ubo_ranges
)
665 prog_data
->num_ubo_ranges
= 0;
666 prog_data
->ubo_ranges
= ralloc_array(prog_data
, struct v3d_ubo_range
,
668 for (int i
= 0; i
< c
->num_ubo_ranges
; i
++) {
669 if (!c
->ubo_range_used
[i
])
672 struct v3d_ubo_range
*range
= &c
->ubo_ranges
[i
];
673 prog_data
->ubo_ranges
[prog_data
->num_ubo_ranges
++] = *range
;
674 prog_data
->ubo_size
+= range
->size
;
677 if (prog_data
->ubo_size
) {
678 if (V3D_DEBUG
& V3D_DEBUG_SHADERDB
) {
679 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
680 vir_get_stage_name(c
),
681 c
->program_id
, c
->variant_id
,
682 prog_data
->ubo_size
/ 4);
688 v3d_set_prog_data(struct v3d_compile
*c
,
689 struct v3d_prog_data
*prog_data
)
691 prog_data
->threads
= c
->threads
;
692 prog_data
->single_seg
= !c
->last_thrsw
;
693 prog_data
->spill_size
= c
->spill_size
;
695 v3d_set_prog_data_uniforms(c
, prog_data
);
696 v3d_set_prog_data_ubo(c
, prog_data
);
700 v3d_return_qpu_insts(struct v3d_compile
*c
, uint32_t *final_assembly_size
)
702 *final_assembly_size
= c
->qpu_inst_count
* sizeof(uint64_t);
704 uint64_t *qpu_insts
= malloc(*final_assembly_size
);
708 memcpy(qpu_insts
, c
->qpu_insts
, *final_assembly_size
);
711 int ret
= asprintf(&shaderdb
,
712 "%s shader: %d inst, %d threads, %d loops, "
713 "%d uniforms, %d:%d spills:fills",
714 vir_get_stage_name(c
),
722 c
->debug_output(shaderdb
, c
->debug_output_data
);
726 vir_compile_destroy(c
);
731 uint64_t *v3d_compile_vs(const struct v3d_compiler
*compiler
,
732 struct v3d_vs_key
*key
,
733 struct v3d_vs_prog_data
*prog_data
,
735 void (*debug_output
)(const char *msg
,
736 void *debug_output_data
),
737 void *debug_output_data
,
738 int program_id
, int variant_id
,
739 uint32_t *final_assembly_size
)
741 struct v3d_compile
*c
= vir_compile_init(compiler
, &key
->base
, s
,
742 debug_output
, debug_output_data
,
743 program_id
, variant_id
);
747 /* Split our I/O vars and dead code eliminate the unused
750 NIR_PASS_V(c
->s
, nir_lower_io_to_scalar_early
,
751 nir_var_shader_in
| nir_var_shader_out
);
752 uint64_t used_outputs
[4] = {0};
753 for (int i
= 0; i
< c
->vs_key
->num_fs_inputs
; i
++) {
754 int slot
= v3d_slot_get_slot(c
->vs_key
->fs_inputs
[i
]);
755 int comp
= v3d_slot_get_component(c
->vs_key
->fs_inputs
[i
]);
756 used_outputs
[comp
] |= 1ull << slot
;
758 NIR_PASS_V(c
->s
, nir_remove_unused_io_vars
,
759 &c
->s
->outputs
, used_outputs
, NULL
); /* demotes to globals */
760 NIR_PASS_V(c
->s
, nir_lower_global_vars_to_local
);
761 v3d_optimize_nir(c
->s
);
762 NIR_PASS_V(c
->s
, nir_remove_dead_variables
, nir_var_shader_in
);
763 NIR_PASS_V(c
->s
, nir_lower_io
, nir_var_shader_in
| nir_var_shader_out
,
765 (nir_lower_io_options
)0);
769 if (key
->clamp_color
)
770 NIR_PASS_V(c
->s
, nir_lower_clamp_color_outputs
);
772 if (key
->base
.ucp_enables
) {
773 NIR_PASS_V(c
->s
, nir_lower_clip_vs
, key
->base
.ucp_enables
,
775 NIR_PASS_V(c
->s
, nir_lower_io_to_scalar
,
779 /* Note: VS output scalarizing must happen after nir_lower_clip_vs. */
780 NIR_PASS_V(c
->s
, nir_lower_io_to_scalar
, nir_var_shader_out
);
782 v3d_lower_nir_late(c
);
783 v3d_optimize_nir(c
->s
);
784 NIR_PASS_V(c
->s
, nir_lower_bool_to_int32
);
785 NIR_PASS_V(c
->s
, nir_convert_from_ssa
, true);
789 v3d_set_prog_data(c
, &prog_data
->base
);
791 prog_data
->base
.num_inputs
= c
->num_inputs
;
793 /* The vertex data gets format converted by the VPM so that
794 * each attribute channel takes up a VPM column. Precompute
795 * the sizes for the shader record.
797 for (int i
= 0; i
< ARRAY_SIZE(prog_data
->vattr_sizes
); i
++) {
798 prog_data
->vattr_sizes
[i
] = c
->vattr_sizes
[i
];
799 prog_data
->vpm_input_size
+= c
->vattr_sizes
[i
];
802 prog_data
->uses_vid
= (s
->info
.system_values_read
&
803 (1ull << SYSTEM_VALUE_VERTEX_ID
));
804 prog_data
->uses_iid
= (s
->info
.system_values_read
&
805 (1ull << SYSTEM_VALUE_INSTANCE_ID
));
807 if (prog_data
->uses_vid
)
808 prog_data
->vpm_input_size
++;
809 if (prog_data
->uses_iid
)
810 prog_data
->vpm_input_size
++;
812 /* Input/output segment size are in sectors (8 rows of 32 bits per
815 prog_data
->vpm_input_size
= align(prog_data
->vpm_input_size
, 8) / 8;
816 prog_data
->vpm_output_size
= align(c
->num_vpm_writes
, 8) / 8;
818 /* Set us up for shared input/output segments. This is apparently
819 * necessary for our VCM setup to avoid varying corruption.
821 prog_data
->separate_segments
= false;
822 prog_data
->vpm_output_size
= MAX2(prog_data
->vpm_output_size
,
823 prog_data
->vpm_input_size
);
824 prog_data
->vpm_input_size
= 0;
826 /* Compute VCM cache size. We set up our program to take up less than
827 * half of the VPM, so that any set of bin and render programs won't
828 * run out of space. We need space for at least one input segment,
829 * and then allocate the rest to output segments (one for the current
830 * program, the rest to VCM). The valid range of the VCM cache size
831 * field is 1-4 16-vertex batches, but GFXH-1744 limits us to 2-4
834 assert(c
->devinfo
->vpm_size
);
835 int sector_size
= 16 * sizeof(uint32_t) * 8;
836 int vpm_size_in_sectors
= c
->devinfo
->vpm_size
/ sector_size
;
837 int half_vpm
= vpm_size_in_sectors
/ 2;
838 int vpm_output_sectors
= half_vpm
- prog_data
->vpm_input_size
;
839 int vpm_output_batches
= vpm_output_sectors
/ prog_data
->vpm_output_size
;
840 assert(vpm_output_batches
>= 2);
841 prog_data
->vcm_cache_size
= CLAMP(vpm_output_batches
- 1, 2, 4);
843 return v3d_return_qpu_insts(c
, final_assembly_size
);
847 v3d_set_fs_prog_data_inputs(struct v3d_compile
*c
,
848 struct v3d_fs_prog_data
*prog_data
)
850 prog_data
->base
.num_inputs
= c
->num_inputs
;
851 memcpy(prog_data
->input_slots
, c
->input_slots
,
852 c
->num_inputs
* sizeof(*c
->input_slots
));
854 STATIC_ASSERT(ARRAY_SIZE(prog_data
->flat_shade_flags
) >
855 (V3D_MAX_FS_INPUTS
- 1) / 24);
856 for (int i
= 0; i
< V3D_MAX_FS_INPUTS
; i
++) {
857 if (BITSET_TEST(c
->flat_shade_flags
, i
))
858 prog_data
->flat_shade_flags
[i
/ 24] |= 1 << (i
% 24);
860 if (BITSET_TEST(c
->noperspective_flags
, i
))
861 prog_data
->noperspective_flags
[i
/ 24] |= 1 << (i
% 24);
863 if (BITSET_TEST(c
->centroid_flags
, i
))
864 prog_data
->centroid_flags
[i
/ 24] |= 1 << (i
% 24);
869 v3d_fixup_fs_output_types(struct v3d_compile
*c
)
871 nir_foreach_variable(var
, &c
->s
->outputs
) {
874 switch (var
->data
.location
) {
875 case FRAG_RESULT_COLOR
:
878 case FRAG_RESULT_DATA0
:
879 case FRAG_RESULT_DATA1
:
880 case FRAG_RESULT_DATA2
:
881 case FRAG_RESULT_DATA3
:
882 mask
= 1 << (var
->data
.location
- FRAG_RESULT_DATA0
);
886 if (c
->fs_key
->int_color_rb
& mask
) {
888 glsl_vector_type(GLSL_TYPE_INT
,
889 glsl_get_components(var
->type
));
890 } else if (c
->fs_key
->uint_color_rb
& mask
) {
892 glsl_vector_type(GLSL_TYPE_UINT
,
893 glsl_get_components(var
->type
));
898 uint64_t *v3d_compile_fs(const struct v3d_compiler
*compiler
,
899 struct v3d_fs_key
*key
,
900 struct v3d_fs_prog_data
*prog_data
,
902 void (*debug_output
)(const char *msg
,
903 void *debug_output_data
),
904 void *debug_output_data
,
905 int program_id
, int variant_id
,
906 uint32_t *final_assembly_size
)
908 struct v3d_compile
*c
= vir_compile_init(compiler
, &key
->base
, s
,
909 debug_output
, debug_output_data
,
910 program_id
, variant_id
);
914 if (key
->int_color_rb
|| key
->uint_color_rb
)
915 v3d_fixup_fs_output_types(c
);
919 if (key
->light_twoside
)
920 NIR_PASS_V(c
->s
, nir_lower_two_sided_color
);
922 if (key
->clamp_color
)
923 NIR_PASS_V(c
->s
, nir_lower_clamp_color_outputs
);
925 if (key
->alpha_test
) {
926 NIR_PASS_V(c
->s
, nir_lower_alpha_test
, key
->alpha_test_func
,
930 if (key
->base
.ucp_enables
)
931 NIR_PASS_V(c
->s
, nir_lower_clip_fs
, key
->base
.ucp_enables
);
933 /* Note: FS input scalarizing must happen after
934 * nir_lower_two_sided_color, which only handles a vec4 at a time.
936 NIR_PASS_V(c
->s
, nir_lower_io_to_scalar
, nir_var_shader_in
);
938 v3d_lower_nir_late(c
);
939 v3d_optimize_nir(c
->s
);
940 NIR_PASS_V(c
->s
, nir_lower_bool_to_int32
);
941 NIR_PASS_V(c
->s
, nir_convert_from_ssa
, true);
945 v3d_set_prog_data(c
, &prog_data
->base
);
946 v3d_set_fs_prog_data_inputs(c
, prog_data
);
947 prog_data
->writes_z
= (c
->s
->info
.outputs_written
&
948 (1 << FRAG_RESULT_DEPTH
));
949 prog_data
->discard
= (c
->s
->info
.fs
.uses_discard
||
950 c
->fs_key
->sample_alpha_to_coverage
);
951 prog_data
->uses_center_w
= c
->uses_center_w
;
953 return v3d_return_qpu_insts(c
, final_assembly_size
);
957 vir_remove_instruction(struct v3d_compile
*c
, struct qinst
*qinst
)
959 if (qinst
->dst
.file
== QFILE_TEMP
)
960 c
->defs
[qinst
->dst
.index
] = NULL
;
962 assert(&qinst
->link
!= c
->cursor
.link
);
964 list_del(&qinst
->link
);
967 c
->live_intervals_valid
= false;
971 vir_follow_movs(struct v3d_compile
*c
, struct qreg reg
)
976 while (reg.file == QFILE_TEMP &&
977 c->defs[reg.index] &&
978 (c->defs[reg.index]->op == QOP_MOV ||
979 c->defs[reg.index]->op == QOP_FMOV) &&
980 !c->defs[reg.index]->dst.pack &&
981 !c->defs[reg.index]->src[0].pack) {
982 reg = c->defs[reg.index]->src[0];
991 vir_compile_destroy(struct v3d_compile
*c
)
993 /* Defuse the assert that we aren't removing the cursor's instruction.
995 c
->cursor
.link
= NULL
;
997 vir_for_each_block(block
, c
) {
998 while (!list_empty(&block
->instructions
)) {
999 struct qinst
*qinst
=
1000 list_first_entry(&block
->instructions
,
1001 struct qinst
, link
);
1002 vir_remove_instruction(c
, qinst
);
1010 vir_uniform(struct v3d_compile
*c
,
1011 enum quniform_contents contents
,
1014 for (int i
= 0; i
< c
->num_uniforms
; i
++) {
1015 if (c
->uniform_contents
[i
] == contents
&&
1016 c
->uniform_data
[i
] == data
) {
1017 return vir_reg(QFILE_UNIF
, i
);
1021 uint32_t uniform
= c
->num_uniforms
++;
1023 if (uniform
>= c
->uniform_array_size
) {
1024 c
->uniform_array_size
= MAX2(MAX2(16, uniform
+ 1),
1025 c
->uniform_array_size
* 2);
1027 c
->uniform_data
= reralloc(c
, c
->uniform_data
,
1029 c
->uniform_array_size
);
1030 c
->uniform_contents
= reralloc(c
, c
->uniform_contents
,
1031 enum quniform_contents
,
1032 c
->uniform_array_size
);
1035 c
->uniform_contents
[uniform
] = contents
;
1036 c
->uniform_data
[uniform
] = data
;
1038 return vir_reg(QFILE_UNIF
, uniform
);
1042 vir_can_set_flags(struct v3d_compile
*c
, struct qinst
*inst
)
1044 if (c
->devinfo
->ver
>= 40 && (v3d_qpu_reads_vpm(&inst
->qpu
) ||
1045 v3d_qpu_uses_sfu(&inst
->qpu
))) {
1049 if (inst
->qpu
.type
!= V3D_QPU_INSTR_TYPE_ALU
||
1050 (inst
->qpu
.alu
.add
.op
== V3D_QPU_A_NOP
&&
1051 inst
->qpu
.alu
.mul
.op
== V3D_QPU_M_NOP
)) {
1059 vir_PF(struct v3d_compile
*c
, struct qreg src
, enum v3d_qpu_pf pf
)
1061 struct qinst
*last_inst
= NULL
;
1063 if (!list_empty(&c
->cur_block
->instructions
)) {
1064 last_inst
= (struct qinst
*)c
->cur_block
->instructions
.prev
;
1066 /* Can't stuff the PF into the last last inst if our cursor
1067 * isn't pointing after it.
1069 struct vir_cursor after_inst
= vir_after_inst(last_inst
);
1070 if (c
->cursor
.mode
!= after_inst
.mode
||
1071 c
->cursor
.link
!= after_inst
.link
)
1075 if (src
.file
!= QFILE_TEMP
||
1076 !c
->defs
[src
.index
] ||
1077 last_inst
!= c
->defs
[src
.index
] ||
1078 !vir_can_set_flags(c
, last_inst
)) {
1079 /* XXX: Make the MOV be the appropriate type */
1080 last_inst
= vir_MOV_dest(c
, vir_reg(QFILE_NULL
, 0), src
);
1083 vir_set_pf(last_inst
, pf
);
1086 #define OPTPASS(func) \
1088 bool stage_progress = func(c); \
1089 if (stage_progress) { \
1091 if (print_opt_debug) { \
1093 "VIR opt pass %2d: %s progress\n", \
1096 /*XXX vir_validate(c);*/ \
1101 vir_optimize(struct v3d_compile
*c
)
1103 bool print_opt_debug
= false;
1107 bool progress
= false;
1109 OPTPASS(vir_opt_copy_propagate
);
1110 OPTPASS(vir_opt_dead_code
);
1111 OPTPASS(vir_opt_small_immediates
);
1121 vir_get_stage_name(struct v3d_compile
*c
)
1123 if (c
->vs_key
&& c
->vs_key
->is_coord
)
1124 return "MESA_SHADER_COORD";
1126 return gl_shader_stage_name(c
->s
->info
.stage
);