v3d/compiler: handle compact varyings
[mesa.git] / src / broadcom / compiler / v3d_nir_lower_io.c
1 /*
2 * Copyright © 2015 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "compiler/v3d_compiler.h"
25 #include "compiler/nir/nir_builder.h"
26
27 /**
28 * Walks the NIR generated by TGSI-to-NIR or GLSL-to-NIR to lower its io
29 * intrinsics into something amenable to the V3D architecture.
30 *
31 * Most of the work is turning the VS's store_output intrinsics from working
32 * on a base representing the gallium-level vec4 driver_location to an offset
33 * within the VPM, and emitting the header that's read by the fixed function
34 * hardware between the VS and FS.
35 *
36 * We also adjust the offsets on uniform loads to be in bytes, since that's
37 * what we need for indirect addressing with general TMU access.
38 */
39
40 struct v3d_nir_lower_io_state {
41 int pos_vpm_offset;
42 int vp_vpm_offset;
43 int zs_vpm_offset;
44 int rcp_wc_vpm_offset;
45 int psiz_vpm_offset;
46 int varyings_vpm_offset;
47
48 /* Geometry shader state */
49 struct {
50 /* VPM offset for the current vertex data output */
51 nir_variable *output_offset_var;
52 /* VPM offset for the current vertex header */
53 nir_variable *header_offset_var;
54 /* VPM header for the current vertex */
55 nir_variable *header_var;
56
57 /* Size of the complete VPM output header */
58 uint32_t output_header_size;
59 /* Size of the output data for a single vertex */
60 uint32_t output_vertex_data_size;
61 } gs;
62
63 BITSET_WORD varyings_stored[BITSET_WORDS(V3D_MAX_ANY_STAGE_INPUTS)];
64
65 nir_ssa_def *pos[4];
66 };
67
68 static void
69 v3d_nir_emit_ff_vpm_outputs(struct v3d_compile *c, nir_builder *b,
70 struct v3d_nir_lower_io_state *state);
71
72 static void
73 v3d_nir_store_output(nir_builder *b, int base, nir_ssa_def *offset,
74 nir_ssa_def *chan)
75 {
76 nir_intrinsic_instr *intr =
77 nir_intrinsic_instr_create(b->shader,
78 nir_intrinsic_store_output);
79 nir_ssa_dest_init(&intr->instr, &intr->dest,
80 1, intr->dest.ssa.bit_size, NULL);
81 intr->num_components = 1;
82
83 intr->src[0] = nir_src_for_ssa(chan);
84 if (offset) {
85 /* When generating the VIR instruction, the base and the offset
86 * are just going to get added together with an ADD instruction
87 * so we might as well do the add here at the NIR level instead
88 * and let the constant folding do its magic.
89 */
90 intr->src[1] = nir_src_for_ssa(nir_iadd_imm(b, offset, base));
91 base = 0;
92 } else {
93 intr->src[1] = nir_src_for_ssa(nir_imm_int(b, 0));
94 }
95
96 nir_intrinsic_set_base(intr, base);
97 nir_intrinsic_set_write_mask(intr, 0x1);
98 nir_intrinsic_set_component(intr, 0);
99
100 nir_builder_instr_insert(b, &intr->instr);
101 }
102
103 /* Convert the uniform offset to bytes. If it happens to be a constant,
104 * constant-folding will clean up the shift for us.
105 */
106 static void
107 v3d_nir_lower_uniform(struct v3d_compile *c, nir_builder *b,
108 nir_intrinsic_instr *intr)
109 {
110 b->cursor = nir_before_instr(&intr->instr);
111
112 nir_intrinsic_set_base(intr, nir_intrinsic_base(intr) * 16);
113
114 nir_instr_rewrite_src(&intr->instr,
115 &intr->src[0],
116 nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa,
117 nir_imm_int(b, 4))));
118 }
119
120 static int
121 v3d_varying_slot_vpm_offset(struct v3d_compile *c, nir_variable *var, int chan)
122 {
123 int component = var->data.location_frac + chan;
124
125 uint32_t num_used_outputs = 0;
126 struct v3d_varying_slot *used_outputs = NULL;
127 switch (c->s->info.stage) {
128 case MESA_SHADER_VERTEX:
129 num_used_outputs = c->vs_key->num_used_outputs;
130 used_outputs = c->vs_key->used_outputs;
131 break;
132 case MESA_SHADER_GEOMETRY:
133 num_used_outputs = c->gs_key->num_used_outputs;
134 used_outputs = c->gs_key->used_outputs;
135 break;
136 default:
137 unreachable("Unsupported shader stage");
138 }
139
140 for (int i = 0; i < num_used_outputs; i++) {
141 struct v3d_varying_slot slot = used_outputs[i];
142
143 if (v3d_slot_get_slot(slot) == var->data.location &&
144 v3d_slot_get_component(slot) == component) {
145 return i;
146 }
147 }
148
149 return -1;
150 }
151
152 /* Lowers a store_output(gallium driver location) to a series of store_outputs
153 * with a driver_location equal to the offset in the VPM.
154 *
155 * For geometry shaders we need to emit multiple vertices so the VPM offsets
156 * need to be computed in the shader code based on the current vertex index.
157 */
158 static void
159 v3d_nir_lower_vpm_output(struct v3d_compile *c, nir_builder *b,
160 nir_intrinsic_instr *intr,
161 struct v3d_nir_lower_io_state *state)
162 {
163 b->cursor = nir_before_instr(&intr->instr);
164
165 /* If this is a geometry shader we need to emit our outputs
166 * to the current vertex offset in the VPM.
167 */
168 nir_ssa_def *offset_reg =
169 c->s->info.stage == MESA_SHADER_GEOMETRY ?
170 nir_load_var(b, state->gs.output_offset_var) : NULL;
171
172 int start_comp = nir_intrinsic_component(intr);
173 nir_ssa_def *src = nir_ssa_for_src(b, intr->src[0],
174 intr->num_components);
175 nir_variable *var = NULL;
176 nir_foreach_variable(scan_var, &c->s->outputs) {
177 int components = scan_var->data.compact ?
178 glsl_get_length(scan_var->type) :
179 glsl_get_components(scan_var->type);
180 if (scan_var->data.driver_location != nir_intrinsic_base(intr) ||
181 start_comp < scan_var->data.location_frac ||
182 start_comp >= scan_var->data.location_frac + components) {
183 continue;
184 }
185 var = scan_var;
186 }
187 assert(var);
188
189 /* Save off the components of the position for the setup of VPM inputs
190 * read by fixed function HW.
191 */
192 if (var->data.location == VARYING_SLOT_POS) {
193 for (int i = 0; i < intr->num_components; i++) {
194 state->pos[start_comp + i] = nir_channel(b, src, i);
195 }
196 }
197
198 /* Just psiz to the position in the FF header right now. */
199 if (var->data.location == VARYING_SLOT_PSIZ &&
200 state->psiz_vpm_offset != -1) {
201 v3d_nir_store_output(b, state->psiz_vpm_offset, offset_reg, src);
202 }
203
204 if (var->data.location == VARYING_SLOT_LAYER) {
205 assert(c->s->info.stage == MESA_SHADER_GEOMETRY);
206 nir_ssa_def *header = nir_load_var(b, state->gs.header_var);
207 header = nir_iand(b, header, nir_imm_int(b, 0xff00ffff));
208
209 /* From the GLES 3.2 spec:
210 *
211 * "When fragments are written to a layered framebuffer, the
212 * fragment’s layer number selects an image from the array
213 * of images at each attachment (...). If the fragment’s
214 * layer number is negative, or greater than or equal to
215 * the minimum number of layers of any attachment, the
216 * effects of the fragment on the framebuffer contents are
217 * undefined."
218 *
219 * This suggests we can just ignore that situation, however,
220 * for V3D an out-of-bounds layer index means that the binner
221 * might do out-of-bounds writes access to the tile state. The
222 * simulator has an assert to catch this, so we play safe here
223 * and we make sure that doesn't happen by setting gl_Layer
224 * to 0 in that case (we always allocate tile state for at
225 * least one layer).
226 */
227 nir_intrinsic_instr *load =
228 nir_intrinsic_instr_create(b->shader,
229 nir_intrinsic_load_fb_layers_v3d);
230 nir_ssa_dest_init(&load->instr, &load->dest, 1, 32, NULL);
231 nir_builder_instr_insert(b, &load->instr);
232 nir_ssa_def *fb_layers = &load->dest.ssa;
233
234 nir_ssa_def *cond = nir_ige(b, src, fb_layers);
235 nir_ssa_def *layer_id =
236 nir_bcsel(b, cond,
237 nir_imm_int(b, 0),
238 nir_ishl(b, src, nir_imm_int(b, 16)));
239 header = nir_ior(b, header, layer_id);
240 nir_store_var(b, state->gs.header_var, header, 0x1);
241 }
242
243 /* Scalarize outputs if it hasn't happened already, since we want to
244 * schedule each VPM write individually. We can skip any outut
245 * components not read by the FS.
246 */
247 for (int i = 0; i < intr->num_components; i++) {
248 int vpm_offset =
249 v3d_varying_slot_vpm_offset(c, var,
250 i +
251 start_comp -
252 var->data.location_frac);
253
254 if (vpm_offset == -1)
255 continue;
256
257 if (var->data.compact)
258 vpm_offset += nir_src_as_uint(intr->src[1]) * 4;
259
260 BITSET_SET(state->varyings_stored, vpm_offset);
261
262 v3d_nir_store_output(b, state->varyings_vpm_offset + vpm_offset,
263 offset_reg, nir_channel(b, src, i));
264 }
265
266 nir_instr_remove(&intr->instr);
267 }
268
269 static inline void
270 reset_gs_header(nir_builder *b, struct v3d_nir_lower_io_state *state)
271 {
272 const uint8_t NEW_PRIMITIVE_OFFSET = 0;
273 const uint8_t VERTEX_DATA_LENGTH_OFFSET = 8;
274
275 uint32_t vertex_data_size = state->gs.output_vertex_data_size;
276 assert((vertex_data_size & 0xffffff00) == 0);
277
278 uint32_t header;
279 header = 1 << NEW_PRIMITIVE_OFFSET;
280 header |= vertex_data_size << VERTEX_DATA_LENGTH_OFFSET;
281 nir_store_var(b, state->gs.header_var, nir_imm_int(b, header), 0x1);
282 }
283
284 static void
285 v3d_nir_lower_emit_vertex(struct v3d_compile *c, nir_builder *b,
286 nir_intrinsic_instr *instr,
287 struct v3d_nir_lower_io_state *state)
288 {
289 b->cursor = nir_before_instr(&instr->instr);
290
291 nir_ssa_def *header = nir_load_var(b, state->gs.header_var);
292 nir_ssa_def *header_offset = nir_load_var(b, state->gs.header_offset_var);
293 nir_ssa_def *output_offset = nir_load_var(b, state->gs.output_offset_var);
294
295 /* Emit fixed function outputs */
296 v3d_nir_emit_ff_vpm_outputs(c, b, state);
297
298 /* Emit vertex header */
299 v3d_nir_store_output(b, 0, header_offset, header);
300
301 /* Update VPM offset for next vertex output data and header */
302 output_offset =
303 nir_iadd(b, output_offset,
304 nir_imm_int(b, state->gs.output_vertex_data_size));
305
306 header_offset = nir_iadd(b, header_offset, nir_imm_int(b, 1));
307
308 /* Reset the New Primitive bit */
309 header = nir_iand(b, header, nir_imm_int(b, 0xfffffffe));
310
311 nir_store_var(b, state->gs.output_offset_var, output_offset, 0x1);
312 nir_store_var(b, state->gs.header_offset_var, header_offset, 0x1);
313 nir_store_var(b, state->gs.header_var, header, 0x1);
314
315 nir_instr_remove(&instr->instr);
316 }
317
318 static void
319 v3d_nir_lower_end_primitive(struct v3d_compile *c, nir_builder *b,
320 nir_intrinsic_instr *instr,
321 struct v3d_nir_lower_io_state *state)
322 {
323 assert(state->gs.header_var);
324 b->cursor = nir_before_instr(&instr->instr);
325 reset_gs_header(b, state);
326
327 nir_instr_remove(&instr->instr);
328 }
329
330 static void
331 v3d_nir_lower_io_instr(struct v3d_compile *c, nir_builder *b,
332 struct nir_instr *instr,
333 struct v3d_nir_lower_io_state *state)
334 {
335 if (instr->type != nir_instr_type_intrinsic)
336 return;
337 nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
338
339 switch (intr->intrinsic) {
340 case nir_intrinsic_load_uniform:
341 v3d_nir_lower_uniform(c, b, intr);
342 break;
343
344 case nir_intrinsic_store_output:
345 if (c->s->info.stage == MESA_SHADER_VERTEX ||
346 c->s->info.stage == MESA_SHADER_GEOMETRY) {
347 v3d_nir_lower_vpm_output(c, b, intr, state);
348 }
349 break;
350
351 case nir_intrinsic_emit_vertex:
352 v3d_nir_lower_emit_vertex(c, b, intr, state);
353 break;
354
355 case nir_intrinsic_end_primitive:
356 v3d_nir_lower_end_primitive(c, b, intr, state);
357 break;
358
359 default:
360 break;
361 }
362 }
363
364 /* Remap the output var's .driver_location. This is purely for
365 * nir_print_shader() so that store_output can map back to a variable name.
366 */
367 static void
368 v3d_nir_lower_io_update_output_var_base(struct v3d_compile *c,
369 struct v3d_nir_lower_io_state *state)
370 {
371 nir_foreach_variable_safe(var, &c->s->outputs) {
372 if (var->data.location == VARYING_SLOT_POS &&
373 state->pos_vpm_offset != -1) {
374 var->data.driver_location = state->pos_vpm_offset;
375 continue;
376 }
377
378 if (var->data.location == VARYING_SLOT_PSIZ &&
379 state->psiz_vpm_offset != -1) {
380 var->data.driver_location = state->psiz_vpm_offset;
381 continue;
382 }
383
384 int vpm_offset = v3d_varying_slot_vpm_offset(c, var, 0);
385 if (vpm_offset != -1) {
386 var->data.driver_location =
387 state->varyings_vpm_offset + vpm_offset;
388 } else {
389 /* If we couldn't find a mapping for the var, delete
390 * it so that its old .driver_location doesn't confuse
391 * nir_print_shader().
392 */
393 exec_node_remove(&var->node);
394 }
395 }
396 }
397
398 static void
399 v3d_nir_setup_vpm_layout_vs(struct v3d_compile *c,
400 struct v3d_nir_lower_io_state *state)
401 {
402 uint32_t vpm_offset = 0;
403
404 state->pos_vpm_offset = -1;
405 state->vp_vpm_offset = -1;
406 state->zs_vpm_offset = -1;
407 state->rcp_wc_vpm_offset = -1;
408 state->psiz_vpm_offset = -1;
409
410 bool needs_ff_outputs = c->vs_key->base.is_last_geometry_stage;
411 if (needs_ff_outputs) {
412 if (c->vs_key->is_coord) {
413 state->pos_vpm_offset = vpm_offset;
414 vpm_offset += 4;
415 }
416
417 state->vp_vpm_offset = vpm_offset;
418 vpm_offset += 2;
419
420 if (!c->vs_key->is_coord) {
421 state->zs_vpm_offset = vpm_offset++;
422 state->rcp_wc_vpm_offset = vpm_offset++;
423 }
424
425 if (c->vs_key->per_vertex_point_size)
426 state->psiz_vpm_offset = vpm_offset++;
427 }
428
429 state->varyings_vpm_offset = vpm_offset;
430
431 c->vpm_output_size = MAX2(1, vpm_offset + c->vs_key->num_used_outputs);
432 }
433
434 static void
435 v3d_nir_setup_vpm_layout_gs(struct v3d_compile *c,
436 struct v3d_nir_lower_io_state *state)
437 {
438 /* 1 header slot for number of output vertices */
439 uint32_t vpm_offset = 1;
440
441 /* 1 header slot per output vertex */
442 const uint32_t num_vertices = c->s->info.gs.vertices_out;
443 vpm_offset += num_vertices;
444
445 state->gs.output_header_size = vpm_offset;
446
447 /* Vertex data: here we only compute offsets into a generic vertex data
448 * elements. When it is time to actually write a particular vertex to
449 * the VPM, we will add the offset for that vertex into the VPM output
450 * to these offsets.
451 *
452 * If geometry shaders are present, they are always the last shader
453 * stage before rasterization, so we always emit fixed function outputs.
454 */
455 vpm_offset = 0;
456 if (c->gs_key->is_coord) {
457 state->pos_vpm_offset = vpm_offset;
458 vpm_offset += 4;
459 } else {
460 state->pos_vpm_offset = -1;
461 }
462
463 state->vp_vpm_offset = vpm_offset;
464 vpm_offset += 2;
465
466 if (!c->gs_key->is_coord) {
467 state->zs_vpm_offset = vpm_offset++;
468 state->rcp_wc_vpm_offset = vpm_offset++;
469 } else {
470 state->zs_vpm_offset = -1;
471 state->rcp_wc_vpm_offset = -1;
472 }
473
474 /* Mesa enables OES_geometry_shader_point_size automatically with
475 * OES_geometry_shader so we always need to handle point size
476 * writes if present.
477 */
478 if (c->gs_key->per_vertex_point_size)
479 state->psiz_vpm_offset = vpm_offset++;
480
481 state->varyings_vpm_offset = vpm_offset;
482
483 state->gs.output_vertex_data_size =
484 state->varyings_vpm_offset + c->gs_key->num_used_outputs;
485
486 c->vpm_output_size =
487 state->gs.output_header_size +
488 state->gs.output_vertex_data_size * num_vertices;
489 }
490
491 static void
492 v3d_nir_emit_ff_vpm_outputs(struct v3d_compile *c, nir_builder *b,
493 struct v3d_nir_lower_io_state *state)
494 {
495 /* If this is a geometry shader we need to emit our fixed function
496 * outputs to the current vertex offset in the VPM.
497 */
498 nir_ssa_def *offset_reg =
499 c->s->info.stage == MESA_SHADER_GEOMETRY ?
500 nir_load_var(b, state->gs.output_offset_var) : NULL;
501
502 for (int i = 0; i < 4; i++) {
503 if (!state->pos[i])
504 state->pos[i] = nir_ssa_undef(b, 1, 32);
505 }
506
507 nir_ssa_def *rcp_wc = nir_frcp(b, state->pos[3]);
508
509 if (state->pos_vpm_offset != -1) {
510 for (int i = 0; i < 4; i++) {
511 v3d_nir_store_output(b, state->pos_vpm_offset + i,
512 offset_reg, state->pos[i]);
513 }
514 }
515
516 if (state->vp_vpm_offset != -1) {
517 for (int i = 0; i < 2; i++) {
518 nir_ssa_def *pos;
519 nir_ssa_def *scale;
520 pos = state->pos[i];
521 if (i == 0)
522 scale = nir_load_viewport_x_scale(b);
523 else
524 scale = nir_load_viewport_y_scale(b);
525 pos = nir_fmul(b, pos, scale);
526 pos = nir_fmul(b, pos, rcp_wc);
527 pos = nir_f2i32(b, nir_fround_even(b, pos));
528 v3d_nir_store_output(b, state->vp_vpm_offset + i,
529 offset_reg, pos);
530 }
531 }
532
533 if (state->zs_vpm_offset != -1) {
534 nir_ssa_def *z = state->pos[2];
535 z = nir_fmul(b, z, nir_load_viewport_z_scale(b));
536 z = nir_fmul(b, z, rcp_wc);
537 z = nir_fadd(b, z, nir_load_viewport_z_offset(b));
538 v3d_nir_store_output(b, state->zs_vpm_offset, offset_reg, z);
539 }
540
541 if (state->rcp_wc_vpm_offset != -1) {
542 v3d_nir_store_output(b, state->rcp_wc_vpm_offset,
543 offset_reg, rcp_wc);
544 }
545
546 /* Store 0 to varyings requested by the FS but not stored by the
547 * previous stage. This should be undefined behavior, but
548 * glsl-routing seems to rely on it.
549 */
550 uint32_t num_used_outputs;
551 switch (c->s->info.stage) {
552 case MESA_SHADER_VERTEX:
553 num_used_outputs = c->vs_key->num_used_outputs;
554 break;
555 case MESA_SHADER_GEOMETRY:
556 num_used_outputs = c->gs_key->num_used_outputs;
557 break;
558 default:
559 unreachable("Unsupported shader stage");
560 }
561
562 for (int i = 0; i < num_used_outputs; i++) {
563 if (!BITSET_TEST(state->varyings_stored, i)) {
564 v3d_nir_store_output(b, state->varyings_vpm_offset + i,
565 offset_reg, nir_imm_int(b, 0));
566 }
567 }
568 }
569
570 static void
571 emit_gs_prolog(struct v3d_compile *c, nir_builder *b,
572 nir_function_impl *impl,
573 struct v3d_nir_lower_io_state *state)
574 {
575 nir_block *first = nir_start_block(impl);
576 b->cursor = nir_before_block(first);
577
578 const struct glsl_type *uint_type = glsl_uint_type();
579
580 assert(!state->gs.output_offset_var);
581 state->gs.output_offset_var =
582 nir_local_variable_create(impl, uint_type, "output_offset");
583 nir_store_var(b, state->gs.output_offset_var,
584 nir_imm_int(b, state->gs.output_header_size), 0x1);
585
586 assert(!state->gs.header_offset_var);
587 state->gs.header_offset_var =
588 nir_local_variable_create(impl, uint_type, "header_offset");
589 nir_store_var(b, state->gs.header_offset_var, nir_imm_int(b, 1), 0x1);
590
591 assert(!state->gs.header_var);
592 state->gs.header_var =
593 nir_local_variable_create(impl, uint_type, "header");
594 reset_gs_header(b, state);
595 }
596
597 static void
598 emit_gs_vpm_output_header_prolog(struct v3d_compile *c, nir_builder *b,
599 struct v3d_nir_lower_io_state *state)
600 {
601 const uint8_t VERTEX_COUNT_OFFSET = 16;
602
603 /* Our GS header has 1 generic header slot (at VPM offset 0) and then
604 * one slot per output vertex after it. This means we don't need to
605 * have a variable just to keep track of the number of vertices we
606 * emitted and instead we can just compute it here from the header
607 * offset variable by removing the one generic header slot that always
608 * goes at the begining of out header.
609 */
610 nir_ssa_def *header_offset =
611 nir_load_var(b, state->gs.header_offset_var);
612 nir_ssa_def *vertex_count =
613 nir_isub(b, header_offset, nir_imm_int(b, 1));
614 nir_ssa_def *header =
615 nir_ior(b, nir_imm_int(b, state->gs.output_header_size),
616 nir_ishl(b, vertex_count,
617 nir_imm_int(b, VERTEX_COUNT_OFFSET)));
618
619 v3d_nir_store_output(b, 0, NULL, header);
620 }
621
622 void
623 v3d_nir_lower_io(nir_shader *s, struct v3d_compile *c)
624 {
625 struct v3d_nir_lower_io_state state = { 0 };
626
627 /* Set up the layout of the VPM outputs. */
628 switch (s->info.stage) {
629 case MESA_SHADER_VERTEX:
630 v3d_nir_setup_vpm_layout_vs(c, &state);
631 break;
632 case MESA_SHADER_GEOMETRY:
633 v3d_nir_setup_vpm_layout_gs(c, &state);
634 break;
635 case MESA_SHADER_FRAGMENT:
636 case MESA_SHADER_COMPUTE:
637 break;
638 default:
639 unreachable("Unsupported shader stage");
640 }
641
642 nir_foreach_function(function, s) {
643 if (function->impl) {
644 nir_builder b;
645 nir_builder_init(&b, function->impl);
646
647 if (c->s->info.stage == MESA_SHADER_GEOMETRY)
648 emit_gs_prolog(c, &b, function->impl, &state);
649
650 nir_foreach_block(block, function->impl) {
651 nir_foreach_instr_safe(instr, block)
652 v3d_nir_lower_io_instr(c, &b, instr,
653 &state);
654 }
655
656 nir_block *last = nir_impl_last_block(function->impl);
657 b.cursor = nir_after_block(last);
658 if (s->info.stage == MESA_SHADER_VERTEX) {
659 v3d_nir_emit_ff_vpm_outputs(c, &b, &state);
660 } else if (s->info.stage == MESA_SHADER_GEOMETRY) {
661 emit_gs_vpm_output_header_prolog(c, &b, &state);
662 }
663
664 nir_metadata_preserve(function->impl,
665 nir_metadata_block_index |
666 nir_metadata_dominance);
667 }
668 }
669
670 if (s->info.stage == MESA_SHADER_VERTEX ||
671 s->info.stage == MESA_SHADER_GEOMETRY) {
672 v3d_nir_lower_io_update_output_var_base(c, &state);
673 }
674 }