2 * Copyright © 2015 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "compiler/nir/nir_builder.h"
26 #include "util/u_format.h"
29 * Walks the NIR generated by TGSI-to-NIR to lower its io intrinsics into
30 * something amenable to the VC4 architecture.
32 * Currently, it splits VS inputs and uniforms into scalars, drops any
33 * non-position outputs in coordinate shaders, and fixes up the addressing on
34 * indirect uniform loads. FS input and VS output scalarization is handled by
35 * nir_lower_io_to_scalar().
39 replace_intrinsic_with_vec4(nir_builder
*b
, nir_intrinsic_instr
*intr
,
43 /* Batch things back together into a vec4. This will get split by the
44 * later ALU scalarization pass.
46 nir_ssa_def
*vec
= nir_vec4(b
, comps
[0], comps
[1], comps
[2], comps
[3]);
48 /* Replace the old intrinsic with a reference to our reconstructed
51 nir_ssa_def_rewrite_uses(&intr
->dest
.ssa
, nir_src_for_ssa(vec
));
52 nir_instr_remove(&intr
->instr
);
56 vc4_nir_unpack_8i(nir_builder
*b
, nir_ssa_def
*src
, unsigned chan
)
58 return nir_ubitfield_extract(b
,
60 nir_imm_int(b
, 8 * chan
),
64 /** Returns the 16 bit field as a sign-extended 32-bit value. */
66 vc4_nir_unpack_16i(nir_builder
*b
, nir_ssa_def
*src
, unsigned chan
)
68 return nir_ibitfield_extract(b
,
70 nir_imm_int(b
, 16 * chan
),
74 /** Returns the 16 bit field as an unsigned 32 bit value. */
76 vc4_nir_unpack_16u(nir_builder
*b
, nir_ssa_def
*src
, unsigned chan
)
79 return nir_iand(b
, src
, nir_imm_int(b
, 0xffff));
81 return nir_ushr(b
, src
, nir_imm_int(b
, 16));
86 vc4_nir_unpack_8f(nir_builder
*b
, nir_ssa_def
*src
, unsigned chan
)
88 return nir_channel(b
, nir_unpack_unorm_4x8(b
, src
), chan
);
92 vc4_nir_get_vattr_channel_vpm(struct vc4_compile
*c
,
94 nir_ssa_def
**vpm_reads
,
96 const struct util_format_description
*desc
)
98 const struct util_format_channel_description
*chan
=
102 if (swiz
> PIPE_SWIZZLE_W
) {
103 return vc4_nir_get_swizzled_channel(b
, vpm_reads
, swiz
);
104 } else if (chan
->size
== 32 && chan
->type
== UTIL_FORMAT_TYPE_FLOAT
) {
105 return vc4_nir_get_swizzled_channel(b
, vpm_reads
, swiz
);
106 } else if (chan
->size
== 32 && chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
107 if (chan
->normalized
) {
109 nir_i2f(b
, vpm_reads
[swiz
]),
113 return nir_i2f(b
, vpm_reads
[swiz
]);
115 } else if (chan
->size
== 8 &&
116 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
117 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
118 nir_ssa_def
*vpm
= vpm_reads
[0];
119 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
120 temp
= nir_ixor(b
, vpm
, nir_imm_int(b
, 0x80808080));
121 if (chan
->normalized
) {
122 return nir_fsub(b
, nir_fmul(b
,
123 vc4_nir_unpack_8f(b
, temp
, swiz
),
124 nir_imm_float(b
, 2.0)),
125 nir_imm_float(b
, 1.0));
129 vc4_nir_unpack_8i(b
, temp
,
131 nir_imm_float(b
, -128.0));
134 if (chan
->normalized
) {
135 return vc4_nir_unpack_8f(b
, vpm
, swiz
);
137 return nir_i2f(b
, vc4_nir_unpack_8i(b
, vpm
, swiz
));
140 } else if (chan
->size
== 16 &&
141 (chan
->type
== UTIL_FORMAT_TYPE_UNSIGNED
||
142 chan
->type
== UTIL_FORMAT_TYPE_SIGNED
)) {
143 nir_ssa_def
*vpm
= vpm_reads
[swiz
/ 2];
145 /* Note that UNPACK_16F eats a half float, not ints, so we use
146 * UNPACK_16_I for all of these.
148 if (chan
->type
== UTIL_FORMAT_TYPE_SIGNED
) {
149 temp
= nir_i2f(b
, vc4_nir_unpack_16i(b
, vpm
, swiz
& 1));
150 if (chan
->normalized
) {
151 return nir_fmul(b
, temp
,
152 nir_imm_float(b
, 1/32768.0f
));
157 temp
= nir_i2f(b
, vc4_nir_unpack_16u(b
, vpm
, swiz
& 1));
158 if (chan
->normalized
) {
159 return nir_fmul(b
, temp
,
160 nir_imm_float(b
, 1 / 65535.0));
171 vc4_nir_lower_vertex_attr(struct vc4_compile
*c
, nir_builder
*b
,
172 nir_intrinsic_instr
*intr
)
174 b
->cursor
= nir_before_instr(&intr
->instr
);
176 int attr
= nir_intrinsic_base(intr
);
177 enum pipe_format format
= c
->vs_key
->attr_formats
[attr
];
178 uint32_t attr_size
= util_format_get_blocksize(format
);
180 /* All TGSI-to-NIR inputs are vec4. */
181 assert(intr
->num_components
== 4);
183 /* We only accept direct outputs and TGSI only ever gives them to us
184 * with an offset value of 0.
186 assert(nir_src_as_const_value(intr
->src
[0]) &&
187 nir_src_as_const_value(intr
->src
[0])->u32
[0] == 0);
189 /* Generate dword loads for the VPM values (Since these intrinsics may
190 * be reordered, the actual reads will be generated at the top of the
191 * shader by ntq_setup_inputs().
193 nir_ssa_def
*vpm_reads
[4];
194 for (int i
= 0; i
< align(attr_size
, 4) / 4; i
++) {
195 nir_intrinsic_instr
*intr_comp
=
196 nir_intrinsic_instr_create(c
->s
,
197 nir_intrinsic_load_input
);
198 intr_comp
->num_components
= 1;
199 nir_intrinsic_set_base(intr_comp
, nir_intrinsic_base(intr
));
200 nir_intrinsic_set_component(intr_comp
, i
);
201 intr_comp
->src
[0] = nir_src_for_ssa(nir_imm_int(b
, 0));
202 nir_ssa_dest_init(&intr_comp
->instr
, &intr_comp
->dest
, 1, 32, NULL
);
203 nir_builder_instr_insert(b
, &intr_comp
->instr
);
205 vpm_reads
[i
] = &intr_comp
->dest
.ssa
;
208 bool format_warned
= false;
209 const struct util_format_description
*desc
=
210 util_format_description(format
);
212 nir_ssa_def
*dests
[4];
213 for (int i
= 0; i
< 4; i
++) {
214 uint8_t swiz
= desc
->swizzle
[i
];
215 dests
[i
] = vc4_nir_get_vattr_channel_vpm(c
, b
, vpm_reads
, swiz
,
219 if (!format_warned
) {
221 "vtx element %d unsupported type: %s\n",
222 attr
, util_format_name(format
));
223 format_warned
= true;
225 dests
[i
] = nir_imm_float(b
, 0.0);
229 replace_intrinsic_with_vec4(b
, intr
, dests
);
233 is_point_sprite(struct vc4_compile
*c
, nir_variable
*var
)
235 if (var
->data
.location
< VARYING_SLOT_VAR0
||
236 var
->data
.location
> VARYING_SLOT_VAR31
)
239 return (c
->fs_key
->point_sprite_mask
&
240 (1 << (var
->data
.location
- VARYING_SLOT_VAR0
)));
244 vc4_nir_lower_fs_input(struct vc4_compile
*c
, nir_builder
*b
,
245 nir_intrinsic_instr
*intr
)
247 b
->cursor
= nir_after_instr(&intr
->instr
);
249 if (nir_intrinsic_base(intr
) >= VC4_NIR_TLB_COLOR_READ_INPUT
&&
250 nir_intrinsic_base(intr
) < (VC4_NIR_TLB_COLOR_READ_INPUT
+
252 /* This doesn't need any lowering. */
256 nir_variable
*input_var
= NULL
;
257 nir_foreach_variable(var
, &c
->s
->inputs
) {
258 if (var
->data
.driver_location
== nir_intrinsic_base(intr
)) {
265 int comp
= nir_intrinsic_component(intr
);
267 /* Lower away point coordinates, and fix up PNTC. */
268 if (is_point_sprite(c
, input_var
) ||
269 input_var
->data
.location
== VARYING_SLOT_PNTC
) {
270 assert(intr
->num_components
== 1);
272 nir_ssa_def
*result
= &intr
->dest
.ssa
;
277 /* If we're not rendering points, we need to set a
278 * defined value for the input that would come from
281 if (!c
->fs_key
->is_points
)
282 result
= nir_imm_float(b
, 0.0);
285 result
= nir_imm_float(b
, 0.0);
288 result
= nir_imm_float(b
, 1.0);
292 if (c
->fs_key
->point_coord_upper_left
&& comp
== 1)
293 result
= nir_fsub(b
, nir_imm_float(b
, 1.0), result
);
295 if (result
!= &intr
->dest
.ssa
) {
296 nir_ssa_def_rewrite_uses_after(&intr
->dest
.ssa
,
297 nir_src_for_ssa(result
),
298 result
->parent_instr
);
304 vc4_nir_lower_output(struct vc4_compile
*c
, nir_builder
*b
,
305 nir_intrinsic_instr
*intr
)
307 nir_variable
*output_var
= NULL
;
308 nir_foreach_variable(var
, &c
->s
->outputs
) {
309 if (var
->data
.driver_location
== nir_intrinsic_base(intr
)) {
316 if (c
->stage
== QSTAGE_COORD
&&
317 output_var
->data
.location
!= VARYING_SLOT_POS
&&
318 output_var
->data
.location
!= VARYING_SLOT_PSIZ
) {
319 nir_instr_remove(&intr
->instr
);
325 vc4_nir_lower_uniform(struct vc4_compile
*c
, nir_builder
*b
,
326 nir_intrinsic_instr
*intr
)
328 /* All TGSI-to-NIR uniform loads are vec4, but we need byte offsets
331 if (intr
->num_components
== 1)
333 assert(intr
->num_components
== 4);
335 b
->cursor
= nir_before_instr(&intr
->instr
);
337 /* Generate scalar loads equivalent to the original VEC4. */
338 nir_ssa_def
*dests
[4];
339 for (unsigned i
= 0; i
< intr
->num_components
; i
++) {
340 nir_intrinsic_instr
*intr_comp
=
341 nir_intrinsic_instr_create(c
->s
, intr
->intrinsic
);
342 intr_comp
->num_components
= 1;
343 nir_ssa_dest_init(&intr_comp
->instr
, &intr_comp
->dest
, 1, 32, NULL
);
345 /* Convert the uniform offset to bytes. If it happens
346 * to be a constant, constant-folding will clean up
349 nir_intrinsic_set_base(intr_comp
,
350 nir_intrinsic_base(intr
) * 16 +
354 nir_src_for_ssa(nir_ishl(b
, intr
->src
[0].ssa
,
357 dests
[i
] = &intr_comp
->dest
.ssa
;
359 nir_builder_instr_insert(b
, &intr_comp
->instr
);
362 replace_intrinsic_with_vec4(b
, intr
, dests
);
366 vc4_nir_lower_io_instr(struct vc4_compile
*c
, nir_builder
*b
,
367 struct nir_instr
*instr
)
369 if (instr
->type
!= nir_instr_type_intrinsic
)
371 nir_intrinsic_instr
*intr
= nir_instr_as_intrinsic(instr
);
373 switch (intr
->intrinsic
) {
374 case nir_intrinsic_load_input
:
375 if (c
->stage
== QSTAGE_FRAG
)
376 vc4_nir_lower_fs_input(c
, b
, intr
);
378 vc4_nir_lower_vertex_attr(c
, b
, intr
);
381 case nir_intrinsic_store_output
:
382 vc4_nir_lower_output(c
, b
, intr
);
385 case nir_intrinsic_load_uniform
:
386 vc4_nir_lower_uniform(c
, b
, intr
);
389 case nir_intrinsic_load_user_clip_plane
:
396 vc4_nir_lower_io_impl(struct vc4_compile
*c
, nir_function_impl
*impl
)
399 nir_builder_init(&b
, impl
);
401 nir_foreach_block(block
, impl
) {
402 nir_foreach_instr_safe(instr
, block
)
403 vc4_nir_lower_io_instr(c
, &b
, instr
);
406 nir_metadata_preserve(impl
, nir_metadata_block_index
|
407 nir_metadata_dominance
);
413 vc4_nir_lower_io(nir_shader
*s
, struct vc4_compile
*c
)
415 nir_foreach_function(function
, s
) {
417 vc4_nir_lower_io_impl(c
, function
->impl
);