2 * Copyright © 2015 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "tgsi/tgsi_info.h"
26 #include "glsl/nir/nir_builder.h"
29 * Walks the NIR generated by TGSI-to-NIR to lower its io intrinsics into
30 * something amenable to the VC4 architecture.
32 * Currently, it split inputs and outputs into scalars, and drops any
33 * non-position outputs in coordinate shaders.
37 vc4_nir_lower_input(struct vc4_compile
*c
, nir_builder
*b
,
38 nir_intrinsic_instr
*intr
)
40 /* All TGSI-to-NIR inputs are vec4. */
41 assert(intr
->num_components
== 4);
43 nir_builder_insert_before_instr(b
, &intr
->instr
);
45 /* Generate scalar loads equivalent to the original VEC4. */
46 nir_ssa_def
*dests
[4];
47 for (unsigned i
= 0; i
< intr
->num_components
; i
++) {
48 nir_intrinsic_instr
*intr_comp
=
49 nir_intrinsic_instr_create(c
->s
, nir_intrinsic_load_input
);
50 intr_comp
->num_components
= 1;
51 intr_comp
->const_index
[0] = intr
->const_index
[0] * 4 + i
;
52 nir_ssa_dest_init(&intr_comp
->instr
, &intr_comp
->dest
, 1, NULL
);
53 nir_builder_instr_insert(b
, &intr_comp
->instr
);
55 dests
[i
] = &intr_comp
->dest
.ssa
;
58 /* Batch things back together into a vec4. This will get split by the
59 * later ALU scalarization pass.
61 nir_ssa_def
*vec_instr
= nir_vec4(b
, dests
[0], dests
[1],
64 /* Replace the old intrinsic with a reference to our reconstructed
67 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(vec_instr
),
68 ralloc_parent(b
->impl
));
69 nir_instr_remove(&intr
->instr
);
73 vc4_nir_lower_output(struct vc4_compile
*c
, nir_builder
*b
,
74 nir_intrinsic_instr
*intr
)
76 nir_variable
*output_var
= NULL
;
77 foreach_list_typed(nir_variable
, var
, node
, &c
->s
->outputs
) {
78 if (var
->data
.driver_location
== intr
->const_index
[0]) {
84 unsigned semantic_name
= output_var
->data
.location
;
86 if (c
->stage
== QSTAGE_COORD
&&
87 (semantic_name
!= TGSI_SEMANTIC_POSITION
&&
88 semantic_name
!= TGSI_SEMANTIC_PSIZE
)) {
89 nir_instr_remove(&intr
->instr
);
93 /* All TGSI-to-NIR outputs are VEC4. */
94 assert(intr
->num_components
== 4);
96 nir_builder_insert_before_instr(b
, &intr
->instr
);
98 for (unsigned i
= 0; i
< intr
->num_components
; i
++) {
99 nir_intrinsic_instr
*intr_comp
=
100 nir_intrinsic_instr_create(c
->s
, nir_intrinsic_store_output
);
101 intr_comp
->num_components
= 1;
102 intr_comp
->const_index
[0] = intr
->const_index
[0] * 4 + i
;
104 assert(intr
->src
[0].is_ssa
);
105 intr_comp
->src
[0] = nir_src_for_ssa(nir_swizzle(b
,
108 nir_builder_instr_insert(b
, &intr_comp
->instr
);
111 nir_instr_remove(&intr
->instr
);
115 vc4_nir_lower_io_instr(struct vc4_compile
*c
, nir_builder
*b
,
116 struct nir_instr
*instr
)
118 if (instr
->type
!= nir_instr_type_intrinsic
)
120 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
122 switch (intr
->intrinsic
) {
123 case nir_intrinsic_load_input
:
124 vc4_nir_lower_input(c
, b
, intr
);
127 case nir_intrinsic_store_output
:
128 vc4_nir_lower_output(c
, b
, intr
);
137 vc4_nir_lower_io_block(nir_block
*block
, void *arg
)
139 struct vc4_compile
*c
= arg
;
140 nir_function_impl
*impl
=
141 nir_cf_node_get_function(&block
->cf_node
);
144 nir_builder_init(&b
, impl
);
146 nir_foreach_instr_safe(block
, instr
)
147 vc4_nir_lower_io_instr(c
, &b
, instr
);
153 vc4_nir_lower_io_impl(struct vc4_compile
*c
, nir_function_impl
*impl
)
155 nir_foreach_block(impl
, vc4_nir_lower_io_block
, c
);
157 nir_metadata_preserve(impl
, nir_metadata_block_index
|
158 nir_metadata_dominance
);
164 vc4_nir_lower_io(struct vc4_compile
*c
)
166 nir_foreach_overload(c
->s
, overload
) {
168 vc4_nir_lower_io_impl(c
, overload
->impl
);