2 * Copyright © 2016-2017 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "broadcom/common/v3d_device_info.h"
25 #include "v3d_compiler.h"
28 vir_get_non_sideband_nsrc(struct qinst
*inst
)
30 switch (inst
->qpu
.type
) {
31 case V3D_QPU_INSTR_TYPE_BRANCH
:
33 case V3D_QPU_INSTR_TYPE_ALU
:
34 if (inst
->qpu
.alu
.add
.op
!= V3D_QPU_A_NOP
)
35 return v3d_qpu_add_op_num_src(inst
->qpu
.alu
.add
.op
);
37 return v3d_qpu_mul_op_num_src(inst
->qpu
.alu
.mul
.op
);
44 vir_get_nsrc(struct qinst
*inst
)
46 int nsrc
= vir_get_non_sideband_nsrc(inst
);
48 if (vir_has_implicit_uniform(inst
))
55 vir_has_implicit_uniform(struct qinst
*inst
)
57 switch (inst
->qpu
.type
) {
58 case V3D_QPU_INSTR_TYPE_BRANCH
:
60 case V3D_QPU_INSTR_TYPE_ALU
:
61 switch (inst
->dst
.file
) {
65 switch (inst
->dst
.index
) {
66 case V3D_QPU_WADDR_TLBU
:
67 case V3D_QPU_WADDR_TMUAU
:
68 case V3D_QPU_WADDR_SYNCU
:
75 return inst
->has_implicit_uniform
;
81 /* The sideband uniform for textures gets stored after the normal ALU
85 vir_get_implicit_uniform_src(struct qinst
*inst
)
87 if (!vir_has_implicit_uniform(inst
))
89 return vir_get_nsrc(inst
) - 1;
93 * Returns whether the instruction has any side effects that must be
97 vir_has_side_effects(struct v3d_compile
*c
, struct qinst
*inst
)
99 switch (inst
->qpu
.type
) {
100 case V3D_QPU_INSTR_TYPE_BRANCH
:
102 case V3D_QPU_INSTR_TYPE_ALU
:
103 switch (inst
->qpu
.alu
.add
.op
) {
104 case V3D_QPU_A_SETREVF
:
105 case V3D_QPU_A_SETMSF
:
106 case V3D_QPU_A_VPMSETUP
:
107 case V3D_QPU_A_STVPMV
:
108 case V3D_QPU_A_STVPMD
:
109 case V3D_QPU_A_STVPMP
:
110 case V3D_QPU_A_VPMWT
:
111 case V3D_QPU_A_TMUWT
:
117 switch (inst
->qpu
.alu
.mul
.op
) {
118 case V3D_QPU_M_MULTOP
:
125 if (inst
->qpu
.sig
.ldtmu
||
126 inst
->qpu
.sig
.ldvary
||
127 inst
->qpu
.sig
.wrtmuc
||
128 inst
->qpu
.sig
.thrsw
) {
136 vir_is_raw_mov(struct qinst
*inst
)
138 if (inst
->qpu
.type
!= V3D_QPU_INSTR_TYPE_ALU
||
139 (inst
->qpu
.alu
.mul
.op
!= V3D_QPU_M_FMOV
&&
140 inst
->qpu
.alu
.mul
.op
!= V3D_QPU_M_MOV
)) {
144 if (inst
->qpu
.alu
.add
.output_pack
!= V3D_QPU_PACK_NONE
||
145 inst
->qpu
.alu
.mul
.output_pack
!= V3D_QPU_PACK_NONE
) {
149 if (inst
->qpu
.flags
.ac
!= V3D_QPU_COND_NONE
||
150 inst
->qpu
.flags
.mc
!= V3D_QPU_COND_NONE
)
157 vir_is_add(struct qinst
*inst
)
159 return (inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
&&
160 inst
->qpu
.alu
.add
.op
!= V3D_QPU_A_NOP
);
164 vir_is_mul(struct qinst
*inst
)
166 return (inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
&&
167 inst
->qpu
.alu
.mul
.op
!= V3D_QPU_M_NOP
);
171 vir_is_tex(struct qinst
*inst
)
173 if (inst
->dst
.file
== QFILE_MAGIC
)
174 return v3d_qpu_magic_waddr_is_tmu(inst
->dst
.index
);
176 if (inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
&&
177 inst
->qpu
.alu
.add
.op
== V3D_QPU_A_TMUWT
) {
185 vir_writes_r3(const struct v3d_device_info
*devinfo
, struct qinst
*inst
)
187 for (int i
= 0; i
< vir_get_nsrc(inst
); i
++) {
188 switch (inst
->src
[i
].file
) {
196 if (devinfo
->ver
< 41 && (inst
->qpu
.sig
.ldvary
||
197 inst
->qpu
.sig
.ldtlb
||
198 inst
->qpu
.sig
.ldtlbu
||
199 inst
->qpu
.sig
.ldvpm
)) {
207 vir_writes_r4(const struct v3d_device_info
*devinfo
, struct qinst
*inst
)
209 switch (inst
->dst
.file
) {
211 switch (inst
->dst
.index
) {
212 case V3D_QPU_WADDR_RECIP
:
213 case V3D_QPU_WADDR_RSQRT
:
214 case V3D_QPU_WADDR_EXP
:
215 case V3D_QPU_WADDR_LOG
:
216 case V3D_QPU_WADDR_SIN
:
224 if (devinfo
->ver
< 41 && inst
->qpu
.sig
.ldtmu
)
231 vir_set_unpack(struct qinst
*inst
, int src
,
232 enum v3d_qpu_input_unpack unpack
)
234 assert(src
== 0 || src
== 1);
236 if (vir_is_add(inst
)) {
238 inst
->qpu
.alu
.add
.a_unpack
= unpack
;
240 inst
->qpu
.alu
.add
.b_unpack
= unpack
;
242 assert(vir_is_mul(inst
));
244 inst
->qpu
.alu
.mul
.a_unpack
= unpack
;
246 inst
->qpu
.alu
.mul
.b_unpack
= unpack
;
251 vir_set_cond(struct qinst
*inst
, enum v3d_qpu_cond cond
)
253 if (vir_is_add(inst
)) {
254 inst
->qpu
.flags
.ac
= cond
;
256 assert(vir_is_mul(inst
));
257 inst
->qpu
.flags
.mc
= cond
;
262 vir_set_pf(struct qinst
*inst
, enum v3d_qpu_pf pf
)
264 if (vir_is_add(inst
)) {
265 inst
->qpu
.flags
.apf
= pf
;
267 assert(vir_is_mul(inst
));
268 inst
->qpu
.flags
.mpf
= pf
;
273 vir_set_uf(struct qinst
*inst
, enum v3d_qpu_uf uf
)
275 if (vir_is_add(inst
)) {
276 inst
->qpu
.flags
.auf
= uf
;
278 assert(vir_is_mul(inst
));
279 inst
->qpu
.flags
.muf
= uf
;
285 vir_channels_written(struct qinst
*inst
)
287 if (vir_is_mul(inst
)) {
288 switch (inst
->dst
.pack
) {
289 case QPU_PACK_MUL_NOP
:
290 case QPU_PACK_MUL_8888
:
292 case QPU_PACK_MUL_8A
:
294 case QPU_PACK_MUL_8B
:
296 case QPU_PACK_MUL_8C
:
298 case QPU_PACK_MUL_8D
:
302 switch (inst
->dst
.pack
) {
304 case QPU_PACK_A_8888
:
305 case QPU_PACK_A_8888_SAT
:
306 case QPU_PACK_A_32_SAT
:
309 case QPU_PACK_A_8A_SAT
:
312 case QPU_PACK_A_8B_SAT
:
315 case QPU_PACK_A_8C_SAT
:
318 case QPU_PACK_A_8D_SAT
:
321 case QPU_PACK_A_16A_SAT
:
324 case QPU_PACK_A_16B_SAT
:
328 unreachable("Bad pack field");
333 vir_get_temp(struct v3d_compile
*c
)
337 reg
.file
= QFILE_TEMP
;
338 reg
.index
= c
->num_temps
++;
340 if (c
->num_temps
> c
->defs_array_size
) {
341 uint32_t old_size
= c
->defs_array_size
;
342 c
->defs_array_size
= MAX2(old_size
* 2, 16);
344 c
->defs
= reralloc(c
, c
->defs
, struct qinst
*,
346 memset(&c
->defs
[old_size
], 0,
347 sizeof(c
->defs
[0]) * (c
->defs_array_size
- old_size
));
349 c
->spillable
= reralloc(c
, c
->spillable
,
351 BITSET_WORDS(c
->defs_array_size
));
352 for (int i
= old_size
; i
< c
->defs_array_size
; i
++)
353 BITSET_SET(c
->spillable
, i
);
360 vir_add_inst(enum v3d_qpu_add_op op
, struct qreg dst
, struct qreg src0
, struct qreg src1
)
362 struct qinst
*inst
= calloc(1, sizeof(*inst
));
364 inst
->qpu
= v3d_qpu_nop();
365 inst
->qpu
.alu
.add
.op
= op
;
376 vir_mul_inst(enum v3d_qpu_mul_op op
, struct qreg dst
, struct qreg src0
, struct qreg src1
)
378 struct qinst
*inst
= calloc(1, sizeof(*inst
));
380 inst
->qpu
= v3d_qpu_nop();
381 inst
->qpu
.alu
.mul
.op
= op
;
392 vir_branch_inst(enum v3d_qpu_branch_cond cond
, struct qreg src
)
394 struct qinst
*inst
= calloc(1, sizeof(*inst
));
396 inst
->qpu
= v3d_qpu_nop();
397 inst
->qpu
.type
= V3D_QPU_INSTR_TYPE_BRANCH
;
398 inst
->qpu
.branch
.cond
= cond
;
399 inst
->qpu
.branch
.msfign
= V3D_QPU_MSFIGN_NONE
;
400 inst
->qpu
.branch
.bdi
= V3D_QPU_BRANCH_DEST_REL
;
401 inst
->qpu
.branch
.ub
= true;
402 inst
->qpu
.branch
.bdu
= V3D_QPU_BRANCH_DEST_REL
;
404 inst
->dst
= vir_reg(QFILE_NULL
, 0);
412 vir_emit(struct v3d_compile
*c
, struct qinst
*inst
)
414 switch (c
->cursor
.mode
) {
416 list_add(&inst
->link
, c
->cursor
.link
);
418 case vir_cursor_addtail
:
419 list_addtail(&inst
->link
, c
->cursor
.link
);
423 c
->cursor
= vir_after_inst(inst
);
424 c
->live_intervals_valid
= false;
427 /* Updates inst to write to a new temporary, emits it, and notes the def. */
429 vir_emit_def(struct v3d_compile
*c
, struct qinst
*inst
)
431 assert(inst
->dst
.file
== QFILE_NULL
);
433 /* If we're emitting an instruction that's a def, it had better be
434 * writing a register.
436 if (inst
->qpu
.type
== V3D_QPU_INSTR_TYPE_ALU
) {
437 assert(inst
->qpu
.alu
.add
.op
== V3D_QPU_A_NOP
||
438 v3d_qpu_add_op_has_dst(inst
->qpu
.alu
.add
.op
));
439 assert(inst
->qpu
.alu
.mul
.op
== V3D_QPU_M_NOP
||
440 v3d_qpu_mul_op_has_dst(inst
->qpu
.alu
.mul
.op
));
443 inst
->dst
= vir_get_temp(c
);
445 if (inst
->dst
.file
== QFILE_TEMP
)
446 c
->defs
[inst
->dst
.index
] = inst
;
454 vir_emit_nondef(struct v3d_compile
*c
, struct qinst
*inst
)
456 if (inst
->dst
.file
== QFILE_TEMP
)
457 c
->defs
[inst
->dst
.index
] = NULL
;
465 vir_new_block(struct v3d_compile
*c
)
467 struct qblock
*block
= rzalloc(c
, struct qblock
);
469 list_inithead(&block
->instructions
);
471 block
->predecessors
= _mesa_set_create(block
,
473 _mesa_key_pointer_equal
);
475 block
->index
= c
->next_block_index
++;
481 vir_set_emit_block(struct v3d_compile
*c
, struct qblock
*block
)
483 c
->cur_block
= block
;
484 c
->cursor
= vir_after_block(block
);
485 list_addtail(&block
->link
, &c
->blocks
);
489 vir_entry_block(struct v3d_compile
*c
)
491 return list_first_entry(&c
->blocks
, struct qblock
, link
);
495 vir_exit_block(struct v3d_compile
*c
)
497 return list_last_entry(&c
->blocks
, struct qblock
, link
);
501 vir_link_blocks(struct qblock
*predecessor
, struct qblock
*successor
)
503 _mesa_set_add(successor
->predecessors
, predecessor
);
504 if (predecessor
->successors
[0]) {
505 assert(!predecessor
->successors
[1]);
506 predecessor
->successors
[1] = successor
;
508 predecessor
->successors
[0] = successor
;
512 const struct v3d_compiler
*
513 v3d_compiler_init(const struct v3d_device_info
*devinfo
)
515 struct v3d_compiler
*compiler
= rzalloc(NULL
, struct v3d_compiler
);
519 compiler
->devinfo
= devinfo
;
521 if (!vir_init_reg_sets(compiler
)) {
522 ralloc_free(compiler
);
530 v3d_compiler_free(const struct v3d_compiler
*compiler
)
532 ralloc_free((void *)compiler
);
535 static struct v3d_compile
*
536 vir_compile_init(const struct v3d_compiler
*compiler
,
539 void (*debug_output
)(const char *msg
,
540 void *debug_output_data
),
541 void *debug_output_data
,
542 int program_id
, int variant_id
)
544 struct v3d_compile
*c
= rzalloc(NULL
, struct v3d_compile
);
546 c
->compiler
= compiler
;
547 c
->devinfo
= compiler
->devinfo
;
549 c
->program_id
= program_id
;
550 c
->variant_id
= variant_id
;
552 c
->debug_output
= debug_output
;
553 c
->debug_output_data
= debug_output_data
;
555 s
= nir_shader_clone(c
, s
);
558 list_inithead(&c
->blocks
);
559 vir_set_emit_block(c
, vir_new_block(c
));
561 c
->output_position_index
= -1;
562 c
->output_point_size_index
= -1;
563 c
->output_sample_mask_index
= -1;
565 c
->def_ht
= _mesa_hash_table_create(c
, _mesa_hash_pointer
,
566 _mesa_key_pointer_equal
);
572 type_size_vec4(const struct glsl_type
*type
)
574 return glsl_count_attribute_slots(type
, false);
578 v3d_lower_nir(struct v3d_compile
*c
)
580 struct nir_lower_tex_options tex_options
= {
582 .lower_tg4_broadcom_swizzle
= true,
584 .lower_rect
= false, /* XXX: Use this on V3D 3.x */
586 /* Apply swizzles to all samplers. */
587 .swizzle_result
= ~0,
590 /* Lower the format swizzle and (for 32-bit returns)
591 * ARB_texture_swizzle-style swizzle.
593 for (int i
= 0; i
< ARRAY_SIZE(c
->key
->tex
); i
++) {
594 for (int j
= 0; j
< 4; j
++)
595 tex_options
.swizzles
[i
][j
] = c
->key
->tex
[i
].swizzle
[j
];
597 if (c
->key
->tex
[i
].clamp_s
)
598 tex_options
.saturate_s
|= 1 << i
;
599 if (c
->key
->tex
[i
].clamp_t
)
600 tex_options
.saturate_t
|= 1 << i
;
601 if (c
->key
->tex
[i
].clamp_r
)
602 tex_options
.saturate_r
|= 1 << i
;
603 if (c
->key
->tex
[i
].return_size
== 16) {
604 tex_options
.lower_tex_packing
[i
] =
605 nir_lower_tex_packing_16
;
609 NIR_PASS_V(c
->s
, nir_lower_tex
, &tex_options
);
610 NIR_PASS_V(c
->s
, nir_lower_system_values
);
614 v3d_set_prog_data_uniforms(struct v3d_compile
*c
,
615 struct v3d_prog_data
*prog_data
)
617 int count
= c
->num_uniforms
;
618 struct v3d_uniform_list
*ulist
= &prog_data
->uniforms
;
620 ulist
->count
= count
;
621 ulist
->data
= ralloc_array(prog_data
, uint32_t, count
);
622 memcpy(ulist
->data
, c
->uniform_data
,
623 count
* sizeof(*ulist
->data
));
624 ulist
->contents
= ralloc_array(prog_data
, enum quniform_contents
, count
);
625 memcpy(ulist
->contents
, c
->uniform_contents
,
626 count
* sizeof(*ulist
->contents
));
629 /* Copy the compiler UBO range state to the compiled shader, dropping out
630 * arrays that were never referenced by an indirect load.
632 * (Note that QIR dead code elimination of an array access still leaves that
633 * array alive, though)
636 v3d_set_prog_data_ubo(struct v3d_compile
*c
,
637 struct v3d_prog_data
*prog_data
)
639 if (!c
->num_ubo_ranges
)
642 prog_data
->num_ubo_ranges
= 0;
643 prog_data
->ubo_ranges
= ralloc_array(prog_data
, struct v3d_ubo_range
,
645 for (int i
= 0; i
< c
->num_ubo_ranges
; i
++) {
646 if (!c
->ubo_range_used
[i
])
649 struct v3d_ubo_range
*range
= &c
->ubo_ranges
[i
];
650 prog_data
->ubo_ranges
[prog_data
->num_ubo_ranges
++] = *range
;
651 prog_data
->ubo_size
+= range
->size
;
654 if (prog_data
->ubo_size
) {
655 if (V3D_DEBUG
& V3D_DEBUG_SHADERDB
) {
656 fprintf(stderr
, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
657 vir_get_stage_name(c
),
658 c
->program_id
, c
->variant_id
,
659 prog_data
->ubo_size
/ 4);
665 v3d_vs_set_prog_data(struct v3d_compile
*c
,
666 struct v3d_vs_prog_data
*prog_data
)
668 prog_data
->base
.num_inputs
= c
->num_inputs
;
670 /* The vertex data gets format converted by the VPM so that
671 * each attribute channel takes up a VPM column. Precompute
672 * the sizes for the shader record.
674 for (int i
= 0; i
< ARRAY_SIZE(prog_data
->vattr_sizes
); i
++) {
675 prog_data
->vattr_sizes
[i
] = c
->vattr_sizes
[i
];
676 prog_data
->vpm_input_size
+= c
->vattr_sizes
[i
];
679 prog_data
->uses_vid
= (c
->s
->info
.system_values_read
&
680 (1ull << SYSTEM_VALUE_VERTEX_ID
));
681 prog_data
->uses_iid
= (c
->s
->info
.system_values_read
&
682 (1ull << SYSTEM_VALUE_INSTANCE_ID
));
684 if (prog_data
->uses_vid
)
685 prog_data
->vpm_input_size
++;
686 if (prog_data
->uses_iid
)
687 prog_data
->vpm_input_size
++;
689 /* Input/output segment size are in sectors (8 rows of 32 bits per
692 prog_data
->vpm_input_size
= align(prog_data
->vpm_input_size
, 8) / 8;
693 prog_data
->vpm_output_size
= align(c
->num_vpm_writes
, 8) / 8;
695 /* Set us up for shared input/output segments. This is apparently
696 * necessary for our VCM setup to avoid varying corruption.
698 prog_data
->separate_segments
= false;
699 prog_data
->vpm_output_size
= MAX2(prog_data
->vpm_output_size
,
700 prog_data
->vpm_input_size
);
701 prog_data
->vpm_input_size
= 0;
703 /* Compute VCM cache size. We set up our program to take up less than
704 * half of the VPM, so that any set of bin and render programs won't
705 * run out of space. We need space for at least one input segment,
706 * and then allocate the rest to output segments (one for the current
707 * program, the rest to VCM). The valid range of the VCM cache size
708 * field is 1-4 16-vertex batches, but GFXH-1744 limits us to 2-4
711 assert(c
->devinfo
->vpm_size
);
712 int sector_size
= 16 * sizeof(uint32_t) * 8;
713 int vpm_size_in_sectors
= c
->devinfo
->vpm_size
/ sector_size
;
714 int half_vpm
= vpm_size_in_sectors
/ 2;
715 int vpm_output_sectors
= half_vpm
- prog_data
->vpm_input_size
;
716 int vpm_output_batches
= vpm_output_sectors
/ prog_data
->vpm_output_size
;
717 assert(vpm_output_batches
>= 2);
718 prog_data
->vcm_cache_size
= CLAMP(vpm_output_batches
- 1, 2, 4);
722 v3d_set_fs_prog_data_inputs(struct v3d_compile
*c
,
723 struct v3d_fs_prog_data
*prog_data
)
725 prog_data
->base
.num_inputs
= c
->num_inputs
;
726 memcpy(prog_data
->input_slots
, c
->input_slots
,
727 c
->num_inputs
* sizeof(*c
->input_slots
));
729 STATIC_ASSERT(ARRAY_SIZE(prog_data
->flat_shade_flags
) >
730 (V3D_MAX_FS_INPUTS
- 1) / 24);
731 for (int i
= 0; i
< V3D_MAX_FS_INPUTS
; i
++) {
732 if (BITSET_TEST(c
->flat_shade_flags
, i
))
733 prog_data
->flat_shade_flags
[i
/ 24] |= 1 << (i
% 24);
735 if (BITSET_TEST(c
->noperspective_flags
, i
))
736 prog_data
->noperspective_flags
[i
/ 24] |= 1 << (i
% 24);
738 if (BITSET_TEST(c
->centroid_flags
, i
))
739 prog_data
->centroid_flags
[i
/ 24] |= 1 << (i
% 24);
744 v3d_fs_set_prog_data(struct v3d_compile
*c
,
745 struct v3d_fs_prog_data
*prog_data
)
747 v3d_set_fs_prog_data_inputs(c
, prog_data
);
748 prog_data
->writes_z
= c
->writes_z
;
749 prog_data
->disable_ez
= !c
->s
->info
.fs
.early_fragment_tests
;
750 prog_data
->uses_center_w
= c
->uses_center_w
;
754 v3d_set_prog_data(struct v3d_compile
*c
,
755 struct v3d_prog_data
*prog_data
)
757 prog_data
->threads
= c
->threads
;
758 prog_data
->single_seg
= !c
->last_thrsw
;
759 prog_data
->spill_size
= c
->spill_size
;
761 v3d_set_prog_data_uniforms(c
, prog_data
);
762 v3d_set_prog_data_ubo(c
, prog_data
);
764 if (c
->s
->info
.stage
== MESA_SHADER_VERTEX
) {
765 v3d_vs_set_prog_data(c
, (struct v3d_vs_prog_data
*)prog_data
);
767 assert(c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
);
768 v3d_fs_set_prog_data(c
, (struct v3d_fs_prog_data
*)prog_data
);
773 v3d_return_qpu_insts(struct v3d_compile
*c
, uint32_t *final_assembly_size
)
775 *final_assembly_size
= c
->qpu_inst_count
* sizeof(uint64_t);
777 uint64_t *qpu_insts
= malloc(*final_assembly_size
);
781 memcpy(qpu_insts
, c
->qpu_insts
, *final_assembly_size
);
783 vir_compile_destroy(c
);
789 v3d_nir_lower_vs_early(struct v3d_compile
*c
)
791 /* Split our I/O vars and dead code eliminate the unused
794 NIR_PASS_V(c
->s
, nir_lower_io_to_scalar_early
,
795 nir_var_shader_in
| nir_var_shader_out
);
796 uint64_t used_outputs
[4] = {0};
797 for (int i
= 0; i
< c
->vs_key
->num_fs_inputs
; i
++) {
798 int slot
= v3d_slot_get_slot(c
->vs_key
->fs_inputs
[i
]);
799 int comp
= v3d_slot_get_component(c
->vs_key
->fs_inputs
[i
]);
800 used_outputs
[comp
] |= 1ull << slot
;
802 NIR_PASS_V(c
->s
, nir_remove_unused_io_vars
,
803 &c
->s
->outputs
, used_outputs
, NULL
); /* demotes to globals */
804 NIR_PASS_V(c
->s
, nir_lower_global_vars_to_local
);
805 v3d_optimize_nir(c
->s
);
806 NIR_PASS_V(c
->s
, nir_remove_dead_variables
, nir_var_shader_in
);
807 NIR_PASS_V(c
->s
, nir_lower_io
, nir_var_shader_in
| nir_var_shader_out
,
809 (nir_lower_io_options
)0);
813 v3d_fixup_fs_output_types(struct v3d_compile
*c
)
815 nir_foreach_variable(var
, &c
->s
->outputs
) {
818 switch (var
->data
.location
) {
819 case FRAG_RESULT_COLOR
:
822 case FRAG_RESULT_DATA0
:
823 case FRAG_RESULT_DATA1
:
824 case FRAG_RESULT_DATA2
:
825 case FRAG_RESULT_DATA3
:
826 mask
= 1 << (var
->data
.location
- FRAG_RESULT_DATA0
);
830 if (c
->fs_key
->int_color_rb
& mask
) {
832 glsl_vector_type(GLSL_TYPE_INT
,
833 glsl_get_components(var
->type
));
834 } else if (c
->fs_key
->uint_color_rb
& mask
) {
836 glsl_vector_type(GLSL_TYPE_UINT
,
837 glsl_get_components(var
->type
));
843 v3d_nir_lower_fs_early(struct v3d_compile
*c
)
845 if (c
->fs_key
->int_color_rb
|| c
->fs_key
->uint_color_rb
)
846 v3d_fixup_fs_output_types(c
);
848 /* If the shader has no non-TLB side effects, we can promote it to
849 * enabling early_fragment_tests even if the user didn't.
851 if (!(c
->s
->info
.num_images
||
852 c
->s
->info
.num_ssbos
||
853 c
->s
->info
.num_abos
)) {
854 c
->s
->info
.fs
.early_fragment_tests
= true;
859 v3d_nir_lower_vs_late(struct v3d_compile
*c
)
861 if (c
->vs_key
->clamp_color
)
862 NIR_PASS_V(c
->s
, nir_lower_clamp_color_outputs
);
864 if (c
->key
->ucp_enables
) {
865 NIR_PASS_V(c
->s
, nir_lower_clip_vs
, c
->key
->ucp_enables
,
867 NIR_PASS_V(c
->s
, nir_lower_io_to_scalar
,
871 /* Note: VS output scalarizing must happen after nir_lower_clip_vs. */
872 NIR_PASS_V(c
->s
, nir_lower_io_to_scalar
, nir_var_shader_out
);
876 v3d_nir_lower_fs_late(struct v3d_compile
*c
)
878 if (c
->fs_key
->light_twoside
)
879 NIR_PASS_V(c
->s
, nir_lower_two_sided_color
);
881 if (c
->fs_key
->clamp_color
)
882 NIR_PASS_V(c
->s
, nir_lower_clamp_color_outputs
);
884 if (c
->fs_key
->alpha_test
) {
885 NIR_PASS_V(c
->s
, nir_lower_alpha_test
,
886 c
->fs_key
->alpha_test_func
,
890 if (c
->key
->ucp_enables
)
891 NIR_PASS_V(c
->s
, nir_lower_clip_fs
, c
->key
->ucp_enables
);
893 /* Note: FS input scalarizing must happen after
894 * nir_lower_two_sided_color, which only handles a vec4 at a time.
896 NIR_PASS_V(c
->s
, nir_lower_io_to_scalar
, nir_var_shader_in
);
899 uint64_t *v3d_compile(const struct v3d_compiler
*compiler
,
901 struct v3d_prog_data
**out_prog_data
,
903 void (*debug_output
)(const char *msg
,
904 void *debug_output_data
),
905 void *debug_output_data
,
906 int program_id
, int variant_id
,
907 uint32_t *final_assembly_size
)
909 struct v3d_prog_data
*prog_data
;
910 struct v3d_compile
*c
= vir_compile_init(compiler
, key
, s
,
911 debug_output
, debug_output_data
,
912 program_id
, variant_id
);
914 switch (c
->s
->info
.stage
) {
915 case MESA_SHADER_VERTEX
:
916 c
->vs_key
= (struct v3d_vs_key
*)key
;
917 prog_data
= rzalloc_size(NULL
, sizeof(struct v3d_vs_prog_data
));
919 case MESA_SHADER_FRAGMENT
:
920 c
->fs_key
= (struct v3d_fs_key
*)key
;
921 prog_data
= rzalloc_size(NULL
, sizeof(struct v3d_fs_prog_data
));
924 unreachable("unsupported shader stage");
927 if (c
->s
->info
.stage
== MESA_SHADER_VERTEX
) {
928 v3d_nir_lower_vs_early(c
);
930 assert(c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
);
931 v3d_nir_lower_fs_early(c
);
936 if (c
->s
->info
.stage
== MESA_SHADER_VERTEX
) {
937 v3d_nir_lower_vs_late(c
);
939 assert(c
->s
->info
.stage
== MESA_SHADER_FRAGMENT
);
940 v3d_nir_lower_fs_late(c
);
943 NIR_PASS_V(c
->s
, v3d_nir_lower_io
, c
);
944 NIR_PASS_V(c
->s
, v3d_nir_lower_txf_ms
, c
);
945 NIR_PASS_V(c
->s
, v3d_nir_lower_image_load_store
);
946 NIR_PASS_V(c
->s
, nir_lower_idiv
);
948 v3d_optimize_nir(c
->s
);
949 NIR_PASS_V(c
->s
, nir_lower_bool_to_int32
);
950 NIR_PASS_V(c
->s
, nir_convert_from_ssa
, true);
954 v3d_set_prog_data(c
, prog_data
);
956 *out_prog_data
= prog_data
;
959 int ret
= asprintf(&shaderdb
,
960 "%s shader: %d inst, %d threads, %d loops, "
961 "%d uniforms, %d:%d spills:fills",
962 vir_get_stage_name(c
),
970 c
->debug_output(shaderdb
, c
->debug_output_data
);
974 return v3d_return_qpu_insts(c
, final_assembly_size
);
978 vir_remove_instruction(struct v3d_compile
*c
, struct qinst
*qinst
)
980 if (qinst
->dst
.file
== QFILE_TEMP
)
981 c
->defs
[qinst
->dst
.index
] = NULL
;
983 assert(&qinst
->link
!= c
->cursor
.link
);
985 list_del(&qinst
->link
);
988 c
->live_intervals_valid
= false;
992 vir_follow_movs(struct v3d_compile
*c
, struct qreg reg
)
997 while (reg.file == QFILE_TEMP &&
998 c->defs[reg.index] &&
999 (c->defs[reg.index]->op == QOP_MOV ||
1000 c->defs[reg.index]->op == QOP_FMOV) &&
1001 !c->defs[reg.index]->dst.pack &&
1002 !c->defs[reg.index]->src[0].pack) {
1003 reg = c->defs[reg.index]->src[0];
1012 vir_compile_destroy(struct v3d_compile
*c
)
1014 /* Defuse the assert that we aren't removing the cursor's instruction.
1016 c
->cursor
.link
= NULL
;
1018 vir_for_each_block(block
, c
) {
1019 while (!list_empty(&block
->instructions
)) {
1020 struct qinst
*qinst
=
1021 list_first_entry(&block
->instructions
,
1022 struct qinst
, link
);
1023 vir_remove_instruction(c
, qinst
);
1031 vir_uniform(struct v3d_compile
*c
,
1032 enum quniform_contents contents
,
1035 for (int i
= 0; i
< c
->num_uniforms
; i
++) {
1036 if (c
->uniform_contents
[i
] == contents
&&
1037 c
->uniform_data
[i
] == data
) {
1038 return vir_reg(QFILE_UNIF
, i
);
1042 uint32_t uniform
= c
->num_uniforms
++;
1044 if (uniform
>= c
->uniform_array_size
) {
1045 c
->uniform_array_size
= MAX2(MAX2(16, uniform
+ 1),
1046 c
->uniform_array_size
* 2);
1048 c
->uniform_data
= reralloc(c
, c
->uniform_data
,
1050 c
->uniform_array_size
);
1051 c
->uniform_contents
= reralloc(c
, c
->uniform_contents
,
1052 enum quniform_contents
,
1053 c
->uniform_array_size
);
1056 c
->uniform_contents
[uniform
] = contents
;
1057 c
->uniform_data
[uniform
] = data
;
1059 return vir_reg(QFILE_UNIF
, uniform
);
1063 vir_can_set_flags(struct v3d_compile
*c
, struct qinst
*inst
)
1065 if (c
->devinfo
->ver
>= 40 && (v3d_qpu_reads_vpm(&inst
->qpu
) ||
1066 v3d_qpu_uses_sfu(&inst
->qpu
))) {
1070 if (inst
->qpu
.type
!= V3D_QPU_INSTR_TYPE_ALU
||
1071 (inst
->qpu
.alu
.add
.op
== V3D_QPU_A_NOP
&&
1072 inst
->qpu
.alu
.mul
.op
== V3D_QPU_M_NOP
)) {
1080 vir_PF(struct v3d_compile
*c
, struct qreg src
, enum v3d_qpu_pf pf
)
1082 struct qinst
*last_inst
= NULL
;
1084 if (!list_empty(&c
->cur_block
->instructions
)) {
1085 last_inst
= (struct qinst
*)c
->cur_block
->instructions
.prev
;
1087 /* Can't stuff the PF into the last last inst if our cursor
1088 * isn't pointing after it.
1090 struct vir_cursor after_inst
= vir_after_inst(last_inst
);
1091 if (c
->cursor
.mode
!= after_inst
.mode
||
1092 c
->cursor
.link
!= after_inst
.link
)
1096 if (src
.file
!= QFILE_TEMP
||
1097 !c
->defs
[src
.index
] ||
1098 last_inst
!= c
->defs
[src
.index
] ||
1099 !vir_can_set_flags(c
, last_inst
)) {
1100 /* XXX: Make the MOV be the appropriate type */
1101 last_inst
= vir_MOV_dest(c
, vir_reg(QFILE_NULL
, 0), src
);
1104 vir_set_pf(last_inst
, pf
);
1107 #define OPTPASS(func) \
1109 bool stage_progress = func(c); \
1110 if (stage_progress) { \
1112 if (print_opt_debug) { \
1114 "VIR opt pass %2d: %s progress\n", \
1117 /*XXX vir_validate(c);*/ \
1122 vir_optimize(struct v3d_compile
*c
)
1124 bool print_opt_debug
= false;
1128 bool progress
= false;
1130 OPTPASS(vir_opt_copy_propagate
);
1131 OPTPASS(vir_opt_dead_code
);
1132 OPTPASS(vir_opt_small_immediates
);
1142 vir_get_stage_name(struct v3d_compile
*c
)
1144 if (c
->vs_key
&& c
->vs_key
->is_coord
)
1145 return "MESA_SHADER_COORD";
1147 return gl_shader_stage_name(c
->s
->info
.stage
);