9882b6b8a35aad4726a27eb374101dac33aefd41
[mesa.git] / src / gallium / drivers / vc4 / vc4_nir_lower_io.c
1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "vc4_qir.h"
25 #include "tgsi/tgsi_info.h"
26 #include "glsl/nir/nir_builder.h"
27
28 /**
29 * Walks the NIR generated by TGSI-to-NIR to lower its io intrinsics into
30 * something amenable to the VC4 architecture.
31 *
32 * Currently, it split inputs and outputs into scalars, and drops any
33 * non-position outputs in coordinate shaders.
34 */
35
36 static void
37 vc4_nir_lower_input(struct vc4_compile *c, nir_builder *b,
38 nir_intrinsic_instr *intr)
39 {
40 /* All TGSI-to-NIR inputs are vec4. */
41 assert(intr->num_components == 4);
42
43 nir_builder_insert_before_instr(b, &intr->instr);
44
45 /* Generate scalar loads equivalent to the original VEC4. */
46 nir_ssa_def *dests[4];
47 for (unsigned i = 0; i < intr->num_components; i++) {
48 nir_intrinsic_instr *intr_comp =
49 nir_intrinsic_instr_create(c->s, nir_intrinsic_load_input);
50 intr_comp->num_components = 1;
51 intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
52 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, NULL);
53 nir_builder_instr_insert(b, &intr_comp->instr);
54
55 dests[i] = &intr_comp->dest.ssa;
56 }
57
58 /* Batch things back together into a vec4. This will get split by the
59 * later ALU scalarization pass.
60 */
61 nir_ssa_def *vec_instr = nir_vec4(b, dests[0], dests[1],
62 dests[2], dests[3]);
63
64 /* Replace the old intrinsic with a reference to our reconstructed
65 * vec4.
66 */
67 nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec_instr),
68 ralloc_parent(b->impl));
69 nir_instr_remove(&intr->instr);
70 }
71
72 static void
73 vc4_nir_lower_output(struct vc4_compile *c, nir_builder *b,
74 nir_intrinsic_instr *intr)
75 {
76 nir_variable *output_var = NULL;
77 foreach_list_typed(nir_variable, var, node, &c->s->outputs) {
78 if (var->data.driver_location == intr->const_index[0]) {
79 output_var = var;
80 break;
81 }
82 }
83 assert(output_var);
84 unsigned semantic_name = output_var->data.location;
85
86 if (c->stage == QSTAGE_COORD &&
87 (semantic_name != TGSI_SEMANTIC_POSITION &&
88 semantic_name != TGSI_SEMANTIC_PSIZE)) {
89 nir_instr_remove(&intr->instr);
90 return;
91 }
92
93 /* All TGSI-to-NIR outputs are VEC4. */
94 assert(intr->num_components == 4);
95
96 nir_builder_insert_before_instr(b, &intr->instr);
97
98 for (unsigned i = 0; i < intr->num_components; i++) {
99 nir_intrinsic_instr *intr_comp =
100 nir_intrinsic_instr_create(c->s, nir_intrinsic_store_output);
101 intr_comp->num_components = 1;
102 intr_comp->const_index[0] = intr->const_index[0] * 4 + i;
103
104 assert(intr->src[0].is_ssa);
105 intr_comp->src[0] = nir_src_for_ssa(nir_swizzle(b,
106 intr->src[0].ssa,
107 &i, 1, false));
108 nir_builder_instr_insert(b, &intr_comp->instr);
109 }
110
111 nir_instr_remove(&intr->instr);
112 }
113
114 static void
115 vc4_nir_lower_io_instr(struct vc4_compile *c, nir_builder *b,
116 struct nir_instr *instr)
117 {
118 if (instr->type != nir_instr_type_intrinsic)
119 return;
120 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
121
122 switch (intr->intrinsic) {
123 case nir_intrinsic_load_input:
124 vc4_nir_lower_input(c, b, intr);
125 break;
126
127 case nir_intrinsic_store_output:
128 vc4_nir_lower_output(c, b, intr);
129 break;
130
131 default:
132 break;
133 }
134 }
135
136 static bool
137 vc4_nir_lower_io_block(nir_block *block, void *arg)
138 {
139 struct vc4_compile *c = arg;
140 nir_function_impl *impl =
141 nir_cf_node_get_function(&block->cf_node);
142
143 nir_builder b;
144 nir_builder_init(&b, impl);
145
146 nir_foreach_instr_safe(block, instr)
147 vc4_nir_lower_io_instr(c, &b, instr);
148
149 return true;
150 }
151
152 static bool
153 vc4_nir_lower_io_impl(struct vc4_compile *c, nir_function_impl *impl)
154 {
155 nir_foreach_block(impl, vc4_nir_lower_io_block, c);
156
157 nir_metadata_preserve(impl, nir_metadata_block_index |
158 nir_metadata_dominance);
159
160 return true;
161 }
162
163 void
164 vc4_nir_lower_io(struct vc4_compile *c)
165 {
166 nir_foreach_overload(c->s, overload) {
167 if (overload->impl)
168 vc4_nir_lower_io_impl(c, overload->impl);
169 }
170 }