vk/image: Remove unneeded data from anv_buffer_view
[mesa.git] / src / vulkan / compiler.cpp
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include <sys/stat.h>
25 #include <unistd.h>
26 #include <fcntl.h>
27
28 #include "private.h"
29
30 #include <brw_context.h>
31 #include <brw_wm.h> /* brw_new_shader_program is here */
32 #include <brw_nir.h>
33
34 #include <brw_vs.h>
35 #include <brw_gs.h>
36 #include <brw_cs.h>
37
38 #include <mesa/main/shaderobj.h>
39 #include <mesa/main/fbobject.h>
40 #include <mesa/main/context.h>
41 #include <mesa/program/program.h>
42 #include <glsl/program.h>
43
44 /* XXX: We need this to keep symbols in nir.h from conflicting with the
45 * generated GEN command packing headers. We need to fix *both* to not
46 * define something as generic as LOAD.
47 */
48 #undef LOAD
49
50 #include <glsl/nir/nir_spirv.h>
51
52 #define SPIR_V_MAGIC_NUMBER 0x07230203
53
54 static void
55 fail_if(int cond, const char *format, ...)
56 {
57 va_list args;
58
59 if (!cond)
60 return;
61
62 va_start(args, format);
63 vfprintf(stderr, format, args);
64 va_end(args);
65
66 exit(1);
67 }
68
69 static VkResult
70 set_binding_table_layout(struct brw_stage_prog_data *prog_data,
71 struct anv_pipeline *pipeline, uint32_t stage)
72 {
73 uint32_t bias, count, k, *map;
74 struct anv_pipeline_layout *layout = pipeline->layout;
75
76 /* No layout is valid for shaders that don't bind any resources. */
77 if (pipeline->layout == NULL)
78 return VK_SUCCESS;
79
80 if (stage == VK_SHADER_STAGE_FRAGMENT)
81 bias = MAX_RTS;
82 else
83 bias = 0;
84
85 count = layout->stage[stage].surface_count;
86 prog_data->map_entries =
87 (uint32_t *) malloc(count * sizeof(prog_data->map_entries[0]));
88 if (prog_data->map_entries == NULL)
89 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
90
91 k = bias;
92 map = prog_data->map_entries;
93 for (uint32_t i = 0; i < layout->num_sets; i++) {
94 prog_data->bind_map[i].index = map;
95 for (uint32_t j = 0; j < layout->set[i].layout->stage[stage].surface_count; j++)
96 *map++ = k++;
97
98 prog_data->bind_map[i].index_count =
99 layout->set[i].layout->stage[stage].surface_count;
100 }
101
102 return VK_SUCCESS;
103 }
104
105 static void
106 brw_vs_populate_key(struct brw_context *brw,
107 struct brw_vertex_program *vp,
108 struct brw_vs_prog_key *key)
109 {
110 struct gl_context *ctx = &brw->ctx;
111 /* BRW_NEW_VERTEX_PROGRAM */
112 struct gl_program *prog = (struct gl_program *) vp;
113
114 memset(key, 0, sizeof(*key));
115
116 /* Just upload the program verbatim for now. Always send it all
117 * the inputs it asks for, whether they are varying or not.
118 */
119 key->base.program_string_id = vp->id;
120 brw_setup_vue_key_clip_info(brw, &key->base,
121 vp->program.Base.UsesClipDistanceOut);
122
123 /* _NEW_POLYGON */
124 if (brw->gen < 6) {
125 key->copy_edgeflag = (ctx->Polygon.FrontMode != GL_FILL ||
126 ctx->Polygon.BackMode != GL_FILL);
127 }
128
129 if (prog->OutputsWritten & (VARYING_BIT_COL0 | VARYING_BIT_COL1 |
130 VARYING_BIT_BFC0 | VARYING_BIT_BFC1)) {
131 /* _NEW_LIGHT | _NEW_BUFFERS */
132 key->clamp_vertex_color = ctx->Light._ClampVertexColor;
133 }
134
135 /* _NEW_POINT */
136 if (brw->gen < 6 && ctx->Point.PointSprite) {
137 for (int i = 0; i < 8; i++) {
138 if (ctx->Point.CoordReplace[i])
139 key->point_coord_replace |= (1 << i);
140 }
141 }
142
143 /* _NEW_TEXTURE */
144 brw_populate_sampler_prog_key_data(ctx, prog, brw->vs.base.sampler_count,
145 &key->base.tex);
146 }
147
148 static bool
149 really_do_vs_prog(struct brw_context *brw,
150 struct gl_shader_program *prog,
151 struct brw_vertex_program *vp,
152 struct brw_vs_prog_key *key, struct anv_pipeline *pipeline)
153 {
154 GLuint program_size;
155 const GLuint *program;
156 struct brw_vs_compile c;
157 struct brw_vs_prog_data *prog_data = &pipeline->vs_prog_data;
158 struct brw_stage_prog_data *stage_prog_data = &prog_data->base.base;
159 void *mem_ctx;
160 struct gl_shader *vs = NULL;
161
162 if (prog)
163 vs = prog->_LinkedShaders[MESA_SHADER_VERTEX];
164
165 memset(&c, 0, sizeof(c));
166 memcpy(&c.key, key, sizeof(*key));
167 memset(prog_data, 0, sizeof(*prog_data));
168
169 mem_ctx = ralloc_context(NULL);
170
171 c.vp = vp;
172
173 /* Allocate the references to the uniforms that will end up in the
174 * prog_data associated with the compiled program, and which will be freed
175 * by the state cache.
176 */
177 int param_count;
178 if (vs) {
179 /* We add padding around uniform values below vec4 size, with the worst
180 * case being a float value that gets blown up to a vec4, so be
181 * conservative here.
182 */
183 param_count = vs->num_uniform_components * 4;
184
185 } else {
186 param_count = vp->program.Base.Parameters->NumParameters * 4;
187 }
188 /* vec4_visitor::setup_uniform_clipplane_values() also uploads user clip
189 * planes as uniforms.
190 */
191 param_count += c.key.base.nr_userclip_plane_consts * 4;
192
193 /* Setting nr_params here NOT to the size of the param and pull_param
194 * arrays, but to the number of uniform components vec4_visitor
195 * needs. vec4_visitor::setup_uniforms() will set it back to a proper value.
196 */
197 stage_prog_data->nr_params = ALIGN(param_count, 4) / 4;
198 if (vs) {
199 stage_prog_data->nr_params += vs->num_samplers;
200 }
201
202 GLbitfield64 outputs_written = vp->program.Base.OutputsWritten;
203 prog_data->inputs_read = vp->program.Base.InputsRead;
204
205 if (c.key.copy_edgeflag) {
206 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_EDGE);
207 prog_data->inputs_read |= VERT_BIT_EDGEFLAG;
208 }
209
210 if (brw->gen < 6) {
211 /* Put dummy slots into the VUE for the SF to put the replaced
212 * point sprite coords in. We shouldn't need these dummy slots,
213 * which take up precious URB space, but it would mean that the SF
214 * doesn't get nice aligned pairs of input coords into output
215 * coords, which would be a pain to handle.
216 */
217 for (int i = 0; i < 8; i++) {
218 if (c.key.point_coord_replace & (1 << i))
219 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_TEX0 + i);
220 }
221
222 /* if back colors are written, allocate slots for front colors too */
223 if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC0))
224 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL0);
225 if (outputs_written & BITFIELD64_BIT(VARYING_SLOT_BFC1))
226 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_COL1);
227 }
228
229 /* In order for legacy clipping to work, we need to populate the clip
230 * distance varying slots whenever clipping is enabled, even if the vertex
231 * shader doesn't write to gl_ClipDistance.
232 */
233 if (c.key.base.userclip_active) {
234 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0);
235 outputs_written |= BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1);
236 }
237
238 brw_compute_vue_map(brw->intelScreen->devinfo,
239 &prog_data->base.vue_map, outputs_written);
240 \
241 set_binding_table_layout(&prog_data->base.base, pipeline,
242 VK_SHADER_STAGE_VERTEX);
243
244 /* Emit GEN4 code.
245 */
246 program = brw_vs_emit(brw, prog, &c, prog_data, mem_ctx, &program_size);
247 if (program == NULL) {
248 ralloc_free(mem_ctx);
249 return false;
250 }
251
252 struct anv_state vs_state = anv_state_stream_alloc(&pipeline->program_stream,
253 program_size, 64);
254 memcpy(vs_state.map, program, program_size);
255
256 pipeline->vs_simd8 = vs_state.offset;
257
258 ralloc_free(mem_ctx);
259
260 return true;
261 }
262
263 void brw_wm_populate_key(struct brw_context *brw,
264 struct brw_fragment_program *fp,
265 struct brw_wm_prog_key *key)
266 {
267 struct gl_context *ctx = &brw->ctx;
268 struct gl_program *prog = (struct gl_program *) brw->fragment_program;
269 GLuint lookup = 0;
270 GLuint line_aa;
271 bool program_uses_dfdy = fp->program.UsesDFdy;
272 struct gl_framebuffer draw_buffer;
273 bool multisample_fbo;
274
275 memset(key, 0, sizeof(*key));
276
277 for (int i = 0; i < MAX_SAMPLERS; i++) {
278 /* Assume color sampler, no swizzling. */
279 key->tex.swizzles[i] = SWIZZLE_XYZW;
280 }
281
282 /* A non-zero framebuffer name indicates that the framebuffer was created by
283 * the user rather than the window system. */
284 draw_buffer.Name = 1;
285 draw_buffer.Visual.samples = 1;
286 draw_buffer._NumColorDrawBuffers = 1;
287 draw_buffer._NumColorDrawBuffers = 1;
288 draw_buffer.Width = 400;
289 draw_buffer.Height = 400;
290 ctx->DrawBuffer = &draw_buffer;
291
292 multisample_fbo = ctx->DrawBuffer->Visual.samples > 1;
293
294 /* Build the index for table lookup
295 */
296 if (brw->gen < 6) {
297 /* _NEW_COLOR */
298 if (fp->program.UsesKill || ctx->Color.AlphaEnabled)
299 lookup |= IZ_PS_KILL_ALPHATEST_BIT;
300
301 if (fp->program.Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
302 lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
303
304 /* _NEW_DEPTH */
305 if (ctx->Depth.Test)
306 lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
307
308 if (ctx->Depth.Test && ctx->Depth.Mask) /* ?? */
309 lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
310
311 /* _NEW_STENCIL | _NEW_BUFFERS */
312 if (ctx->Stencil._Enabled) {
313 lookup |= IZ_STENCIL_TEST_ENABLE_BIT;
314
315 if (ctx->Stencil.WriteMask[0] ||
316 ctx->Stencil.WriteMask[ctx->Stencil._BackFace])
317 lookup |= IZ_STENCIL_WRITE_ENABLE_BIT;
318 }
319 key->iz_lookup = lookup;
320 }
321
322 line_aa = AA_NEVER;
323
324 /* _NEW_LINE, _NEW_POLYGON, BRW_NEW_REDUCED_PRIMITIVE */
325 if (ctx->Line.SmoothFlag) {
326 if (brw->reduced_primitive == GL_LINES) {
327 line_aa = AA_ALWAYS;
328 }
329 else if (brw->reduced_primitive == GL_TRIANGLES) {
330 if (ctx->Polygon.FrontMode == GL_LINE) {
331 line_aa = AA_SOMETIMES;
332
333 if (ctx->Polygon.BackMode == GL_LINE ||
334 (ctx->Polygon.CullFlag &&
335 ctx->Polygon.CullFaceMode == GL_BACK))
336 line_aa = AA_ALWAYS;
337 }
338 else if (ctx->Polygon.BackMode == GL_LINE) {
339 line_aa = AA_SOMETIMES;
340
341 if ((ctx->Polygon.CullFlag &&
342 ctx->Polygon.CullFaceMode == GL_FRONT))
343 line_aa = AA_ALWAYS;
344 }
345 }
346 }
347
348 key->line_aa = line_aa;
349
350 /* _NEW_HINT */
351 key->high_quality_derivatives =
352 ctx->Hint.FragmentShaderDerivative == GL_NICEST;
353
354 if (brw->gen < 6)
355 key->stats_wm = brw->stats_wm;
356
357 /* _NEW_LIGHT */
358 key->flat_shade = (ctx->Light.ShadeModel == GL_FLAT);
359
360 /* _NEW_FRAG_CLAMP | _NEW_BUFFERS */
361 key->clamp_fragment_color = ctx->Color._ClampFragmentColor;
362
363 /* _NEW_TEXTURE */
364 brw_populate_sampler_prog_key_data(ctx, prog, brw->wm.base.sampler_count,
365 &key->tex);
366
367 /* _NEW_BUFFERS */
368 /*
369 * Include the draw buffer origin and height so that we can calculate
370 * fragment position values relative to the bottom left of the drawable,
371 * from the incoming screen origin relative position we get as part of our
372 * payload.
373 *
374 * This is only needed for the WM_WPOSXY opcode when the fragment program
375 * uses the gl_FragCoord input.
376 *
377 * We could avoid recompiling by including this as a constant referenced by
378 * our program, but if we were to do that it would also be nice to handle
379 * getting that constant updated at batchbuffer submit time (when we
380 * hold the lock and know where the buffer really is) rather than at emit
381 * time when we don't hold the lock and are just guessing. We could also
382 * just avoid using this as key data if the program doesn't use
383 * fragment.position.
384 *
385 * For DRI2 the origin_x/y will always be (0,0) but we still need the
386 * drawable height in order to invert the Y axis.
387 */
388 if (fp->program.Base.InputsRead & VARYING_BIT_POS) {
389 key->drawable_height = ctx->DrawBuffer->Height;
390 }
391
392 if ((fp->program.Base.InputsRead & VARYING_BIT_POS) || program_uses_dfdy) {
393 key->render_to_fbo = _mesa_is_user_fbo(ctx->DrawBuffer);
394 }
395
396 /* _NEW_BUFFERS */
397 key->nr_color_regions = ctx->DrawBuffer->_NumColorDrawBuffers;
398
399 /* _NEW_MULTISAMPLE, _NEW_COLOR, _NEW_BUFFERS */
400 key->replicate_alpha = ctx->DrawBuffer->_NumColorDrawBuffers > 1 &&
401 (ctx->Multisample.SampleAlphaToCoverage || ctx->Color.AlphaEnabled);
402
403 /* _NEW_BUFFERS _NEW_MULTISAMPLE */
404 /* Ignore sample qualifier while computing this flag. */
405 key->persample_shading =
406 _mesa_get_min_invocations_per_fragment(ctx, &fp->program, true) > 1;
407 if (key->persample_shading)
408 key->persample_2x = ctx->DrawBuffer->Visual.samples == 2;
409
410 key->compute_pos_offset =
411 _mesa_get_min_invocations_per_fragment(ctx, &fp->program, false) > 1 &&
412 fp->program.Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_POS;
413
414 key->compute_sample_id =
415 multisample_fbo &&
416 ctx->Multisample.Enabled &&
417 (fp->program.Base.SystemValuesRead & SYSTEM_BIT_SAMPLE_ID);
418
419 /* BRW_NEW_VUE_MAP_GEOM_OUT */
420 if (brw->gen < 6 || _mesa_bitcount_64(fp->program.Base.InputsRead &
421 BRW_FS_VARYING_INPUT_MASK) > 16)
422 key->input_slots_valid = brw->vue_map_geom_out.slots_valid;
423
424
425 /* _NEW_COLOR | _NEW_BUFFERS */
426 /* Pre-gen6, the hardware alpha test always used each render
427 * target's alpha to do alpha test, as opposed to render target 0's alpha
428 * like GL requires. Fix that by building the alpha test into the
429 * shader, and we'll skip enabling the fixed function alpha test.
430 */
431 if (brw->gen < 6 && ctx->DrawBuffer->_NumColorDrawBuffers > 1 && ctx->Color.AlphaEnabled) {
432 key->alpha_test_func = ctx->Color.AlphaFunc;
433 key->alpha_test_ref = ctx->Color.AlphaRef;
434 }
435
436 /* The unique fragment program ID */
437 key->program_string_id = fp->id;
438
439 ctx->DrawBuffer = NULL;
440 }
441
442 static uint8_t
443 computed_depth_mode(struct gl_fragment_program *fp)
444 {
445 if (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
446 switch (fp->FragDepthLayout) {
447 case FRAG_DEPTH_LAYOUT_NONE:
448 case FRAG_DEPTH_LAYOUT_ANY:
449 return BRW_PSCDEPTH_ON;
450 case FRAG_DEPTH_LAYOUT_GREATER:
451 return BRW_PSCDEPTH_ON_GE;
452 case FRAG_DEPTH_LAYOUT_LESS:
453 return BRW_PSCDEPTH_ON_LE;
454 case FRAG_DEPTH_LAYOUT_UNCHANGED:
455 return BRW_PSCDEPTH_OFF;
456 }
457 }
458 return BRW_PSCDEPTH_OFF;
459 }
460
461 static bool
462 really_do_wm_prog(struct brw_context *brw,
463 struct gl_shader_program *prog,
464 struct brw_fragment_program *fp,
465 struct brw_wm_prog_key *key, struct anv_pipeline *pipeline)
466 {
467 struct gl_context *ctx = &brw->ctx;
468 void *mem_ctx = ralloc_context(NULL);
469 struct brw_wm_prog_data *prog_data = &pipeline->wm_prog_data;
470 struct gl_shader *fs = NULL;
471 unsigned int program_size;
472 const uint32_t *program;
473
474 if (prog)
475 fs = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
476
477 memset(prog_data, 0, sizeof(*prog_data));
478
479 /* key->alpha_test_func means simulating alpha testing via discards,
480 * so the shader definitely kills pixels.
481 */
482 prog_data->uses_kill = fp->program.UsesKill || key->alpha_test_func;
483
484 prog_data->computed_depth_mode = computed_depth_mode(&fp->program);
485
486 /* Allocate the references to the uniforms that will end up in the
487 * prog_data associated with the compiled program, and which will be freed
488 * by the state cache.
489 */
490 int param_count;
491 if (fs) {
492 param_count = fs->num_uniform_components;
493 } else {
494 param_count = fp->program.Base.Parameters->NumParameters * 4;
495 }
496 /* The backend also sometimes adds params for texture size. */
497 param_count += 2 * ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits;
498 prog_data->base.param =
499 rzalloc_array(NULL, const gl_constant_value *, param_count);
500 prog_data->base.pull_param =
501 rzalloc_array(NULL, const gl_constant_value *, param_count);
502 prog_data->base.nr_params = param_count;
503
504 prog_data->barycentric_interp_modes =
505 brw_compute_barycentric_interp_modes(brw, key->flat_shade,
506 key->persample_shading,
507 &fp->program);
508
509 set_binding_table_layout(&prog_data->base, pipeline,
510 VK_SHADER_STAGE_FRAGMENT);
511 /* This needs to come after shader time and pull constant entries, but we
512 * don't have those set up now, so just put it after the layout entries.
513 */
514 prog_data->binding_table.render_target_start = 0;
515
516 program = brw_wm_fs_emit(brw, mem_ctx, key, prog_data,
517 &fp->program, prog, &program_size);
518 if (program == NULL) {
519 ralloc_free(mem_ctx);
520 return false;
521 }
522
523 struct anv_state ps_state = anv_state_stream_alloc(&pipeline->program_stream,
524 program_size, 64);
525 memcpy(ps_state.map, program, program_size);
526
527 if (prog_data->no_8)
528 pipeline->ps_simd8 = NO_KERNEL;
529 else
530 pipeline->ps_simd8 = ps_state.offset;
531
532 if (prog_data->no_8 || prog_data->prog_offset_16) {
533 pipeline->ps_simd16 = ps_state.offset + prog_data->prog_offset_16;
534 } else {
535 pipeline->ps_simd16 = NO_KERNEL;
536 }
537
538 ralloc_free(mem_ctx);
539
540 return true;
541 }
542
543 static void
544 brw_gs_populate_key(struct brw_context *brw,
545 struct anv_pipeline *pipeline,
546 struct brw_geometry_program *gp,
547 struct brw_gs_prog_key *key)
548 {
549 struct gl_context *ctx = &brw->ctx;
550 struct brw_stage_state *stage_state = &brw->gs.base;
551 struct gl_program *prog = &gp->program.Base;
552
553 memset(key, 0, sizeof(*key));
554
555 key->base.program_string_id = gp->id;
556 brw_setup_vue_key_clip_info(brw, &key->base,
557 gp->program.Base.UsesClipDistanceOut);
558
559 /* _NEW_TEXTURE */
560 brw_populate_sampler_prog_key_data(ctx, prog, stage_state->sampler_count,
561 &key->base.tex);
562
563 struct brw_vs_prog_data *prog_data = &pipeline->vs_prog_data;
564
565 /* BRW_NEW_VUE_MAP_VS */
566 key->input_varyings = prog_data->base.vue_map.slots_valid;
567 }
568
569 static bool
570 really_do_gs_prog(struct brw_context *brw,
571 struct gl_shader_program *prog,
572 struct brw_geometry_program *gp,
573 struct brw_gs_prog_key *key, struct anv_pipeline *pipeline)
574 {
575 struct brw_gs_compile_output output;
576
577 /* FIXME: We pass the bind map to the compile in the output struct. Need
578 * something better. */
579 set_binding_table_layout(&output.prog_data.base.base,
580 pipeline, VK_SHADER_STAGE_GEOMETRY);
581
582 brw_compile_gs_prog(brw, prog, gp, key, &output);
583
584 struct anv_state gs_state = anv_state_stream_alloc(&pipeline->program_stream,
585 output.program_size, 64);
586 memcpy(gs_state.map, output.program, output.program_size);
587
588 pipeline->gs_vec4 = gs_state.offset;
589 pipeline->gs_vertex_count = gp->program.VerticesIn;
590
591 ralloc_free(output.mem_ctx);
592
593 return true;
594 }
595
596 static bool
597 brw_codegen_cs_prog(struct brw_context *brw,
598 struct gl_shader_program *prog,
599 struct brw_compute_program *cp,
600 struct brw_cs_prog_key *key, struct anv_pipeline *pipeline)
601 {
602 struct gl_context *ctx = &brw->ctx;
603 const GLuint *program;
604 void *mem_ctx = ralloc_context(NULL);
605 GLuint program_size;
606 struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
607
608 struct gl_shader *cs = prog->_LinkedShaders[MESA_SHADER_COMPUTE];
609 assert (cs);
610
611 memset(prog_data, 0, sizeof(*prog_data));
612
613 set_binding_table_layout(&prog_data->base, pipeline, VK_SHADER_STAGE_COMPUTE);
614
615 /* Allocate the references to the uniforms that will end up in the
616 * prog_data associated with the compiled program, and which will be freed
617 * by the state cache.
618 */
619 int param_count = cs->num_uniform_components;
620
621 /* The backend also sometimes adds params for texture size. */
622 param_count += 2 * ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits;
623 prog_data->base.param =
624 rzalloc_array(NULL, const gl_constant_value *, param_count);
625 prog_data->base.pull_param =
626 rzalloc_array(NULL, const gl_constant_value *, param_count);
627 prog_data->base.nr_params = param_count;
628
629 program = brw_cs_emit(brw, mem_ctx, key, prog_data,
630 &cp->program, prog, &program_size);
631 if (program == NULL) {
632 ralloc_free(mem_ctx);
633 return false;
634 }
635
636 if (unlikely(INTEL_DEBUG & DEBUG_CS))
637 fprintf(stderr, "\n");
638
639 struct anv_state cs_state = anv_state_stream_alloc(&pipeline->program_stream,
640 program_size, 64);
641 memcpy(cs_state.map, program, program_size);
642
643 pipeline->cs_simd = cs_state.offset;
644
645 ralloc_free(mem_ctx);
646
647 return true;
648 }
649
650 static void
651 brw_cs_populate_key(struct brw_context *brw,
652 struct brw_compute_program *bcp, struct brw_cs_prog_key *key)
653 {
654 memset(key, 0, sizeof(*key));
655
656 /* The unique compute program ID */
657 key->program_string_id = bcp->id;
658 }
659
660 static void
661 fail_on_compile_error(int status, const char *msg)
662 {
663 int source, line, column;
664 char error[256];
665
666 if (status)
667 return;
668
669 if (sscanf(msg, "%d:%d(%d): error: %255[^\n]", &source, &line, &column, error) == 4)
670 fail_if(!status, "%d:%s\n", line, error);
671 else
672 fail_if(!status, "%s\n", msg);
673 }
674
675 struct anv_compiler {
676 struct anv_device *device;
677 struct intel_screen *screen;
678 struct brw_context *brw;
679 struct gl_pipeline_object pipeline;
680 };
681
682 extern "C" {
683
684 struct anv_compiler *
685 anv_compiler_create(struct anv_device *device)
686 {
687 const struct brw_device_info *devinfo = &device->info;
688 struct anv_compiler *compiler;
689 struct gl_context *ctx;
690
691 compiler = rzalloc(NULL, struct anv_compiler);
692 if (compiler == NULL)
693 return NULL;
694
695 compiler->screen = rzalloc(compiler, struct intel_screen);
696 if (compiler->screen == NULL)
697 goto fail;
698
699 compiler->brw = rzalloc(compiler, struct brw_context);
700 if (compiler->brw == NULL)
701 goto fail;
702
703 compiler->device = device;
704
705 compiler->brw->optionCache.info = NULL;
706 compiler->brw->bufmgr = NULL;
707 compiler->brw->gen = devinfo->gen;
708 compiler->brw->is_g4x = devinfo->is_g4x;
709 compiler->brw->is_baytrail = devinfo->is_baytrail;
710 compiler->brw->is_haswell = devinfo->is_haswell;
711 compiler->brw->is_cherryview = devinfo->is_cherryview;
712
713 /* We need this at least for CS, which will check brw->max_cs_threads
714 * against the work group size. */
715 compiler->brw->max_vs_threads = devinfo->max_vs_threads;
716 compiler->brw->max_hs_threads = devinfo->max_hs_threads;
717 compiler->brw->max_ds_threads = devinfo->max_ds_threads;
718 compiler->brw->max_gs_threads = devinfo->max_gs_threads;
719 compiler->brw->max_wm_threads = devinfo->max_wm_threads;
720 compiler->brw->max_cs_threads = devinfo->max_cs_threads;
721 compiler->brw->urb.size = devinfo->urb.size;
722 compiler->brw->urb.min_vs_entries = devinfo->urb.min_vs_entries;
723 compiler->brw->urb.max_vs_entries = devinfo->urb.max_vs_entries;
724 compiler->brw->urb.max_hs_entries = devinfo->urb.max_hs_entries;
725 compiler->brw->urb.max_ds_entries = devinfo->urb.max_ds_entries;
726 compiler->brw->urb.max_gs_entries = devinfo->urb.max_gs_entries;
727
728 compiler->brw->intelScreen = compiler->screen;
729 compiler->screen->devinfo = &device->info;
730
731 brw_process_intel_debug_variable(compiler->screen);
732
733 compiler->screen->compiler = brw_compiler_create(compiler, &device->info);
734
735 ctx = &compiler->brw->ctx;
736 _mesa_init_shader_object_functions(&ctx->Driver);
737
738 _mesa_init_constants(&ctx->Const, API_OPENGL_CORE);
739
740 brw_initialize_context_constants(compiler->brw);
741
742 intelInitExtensions(ctx);
743
744 /* Set dd::NewShader */
745 brwInitFragProgFuncs(&ctx->Driver);
746
747 ctx->_Shader = &compiler->pipeline;
748
749 compiler->brw->precompile = false;
750
751 return compiler;
752
753 fail:
754 ralloc_free(compiler);
755 return NULL;
756 }
757
758 void
759 anv_compiler_destroy(struct anv_compiler *compiler)
760 {
761 _mesa_free_errors_data(&compiler->brw->ctx);
762 ralloc_free(compiler);
763 }
764
765 /* From gen7_urb.c */
766
767 /* FIXME: Add to struct intel_device_info */
768
769 static const int gen8_push_size = 32 * 1024;
770
771 static void
772 gen7_compute_urb_partition(struct anv_pipeline *pipeline)
773 {
774 const struct brw_device_info *devinfo = &pipeline->device->info;
775 bool vs_present = pipeline->vs_simd8 != NO_KERNEL;
776 unsigned vs_size = vs_present ? pipeline->vs_prog_data.base.urb_entry_size : 1;
777 unsigned vs_entry_size_bytes = vs_size * 64;
778 bool gs_present = pipeline->gs_vec4 != NO_KERNEL;
779 unsigned gs_size = gs_present ? pipeline->gs_prog_data.base.urb_entry_size : 1;
780 unsigned gs_entry_size_bytes = gs_size * 64;
781
782 /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
783 *
784 * VS Number of URB Entries must be divisible by 8 if the VS URB Entry
785 * Allocation Size is less than 9 512-bit URB entries.
786 *
787 * Similar text exists for GS.
788 */
789 unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
790 unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
791
792 /* URB allocations must be done in 8k chunks. */
793 unsigned chunk_size_bytes = 8192;
794
795 /* Determine the size of the URB in chunks. */
796 unsigned urb_chunks = devinfo->urb.size * 1024 / chunk_size_bytes;
797
798 /* Reserve space for push constants */
799 unsigned push_constant_bytes = gen8_push_size;
800 unsigned push_constant_chunks =
801 push_constant_bytes / chunk_size_bytes;
802
803 /* Initially, assign each stage the minimum amount of URB space it needs,
804 * and make a note of how much additional space it "wants" (the amount of
805 * additional space it could actually make use of).
806 */
807
808 /* VS has a lower limit on the number of URB entries */
809 unsigned vs_chunks =
810 ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes,
811 chunk_size_bytes) / chunk_size_bytes;
812 unsigned vs_wants =
813 ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes,
814 chunk_size_bytes) / chunk_size_bytes - vs_chunks;
815
816 unsigned gs_chunks = 0;
817 unsigned gs_wants = 0;
818 if (gs_present) {
819 /* There are two constraints on the minimum amount of URB space we can
820 * allocate:
821 *
822 * (1) We need room for at least 2 URB entries, since we always operate
823 * the GS in DUAL_OBJECT mode.
824 *
825 * (2) We can't allocate less than nr_gs_entries_granularity.
826 */
827 gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
828 chunk_size_bytes) / chunk_size_bytes;
829 gs_wants =
830 ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes,
831 chunk_size_bytes) / chunk_size_bytes - gs_chunks;
832 }
833
834 /* There should always be enough URB space to satisfy the minimum
835 * requirements of each stage.
836 */
837 unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
838 assert(total_needs <= urb_chunks);
839
840 /* Mete out remaining space (if any) in proportion to "wants". */
841 unsigned total_wants = vs_wants + gs_wants;
842 unsigned remaining_space = urb_chunks - total_needs;
843 if (remaining_space > total_wants)
844 remaining_space = total_wants;
845 if (remaining_space > 0) {
846 unsigned vs_additional = (unsigned)
847 round(vs_wants * (((double) remaining_space) / total_wants));
848 vs_chunks += vs_additional;
849 remaining_space -= vs_additional;
850 gs_chunks += remaining_space;
851 }
852
853 /* Sanity check that we haven't over-allocated. */
854 assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
855
856 /* Finally, compute the number of entries that can fit in the space
857 * allocated to each stage.
858 */
859 unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
860 unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
861
862 /* Since we rounded up when computing *_wants, this may be slightly more
863 * than the maximum allowed amount, so correct for that.
864 */
865 nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries);
866 nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries);
867
868 /* Ensure that we program a multiple of the granularity. */
869 nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
870 nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
871
872 /* Finally, sanity check to make sure we have at least the minimum number
873 * of entries needed for each stage.
874 */
875 assert(nr_vs_entries >= devinfo->urb.min_vs_entries);
876 if (gs_present)
877 assert(nr_gs_entries >= 2);
878
879 /* Lay out the URB in the following order:
880 * - push constants
881 * - VS
882 * - GS
883 */
884 pipeline->urb.vs_start = push_constant_chunks;
885 pipeline->urb.vs_size = vs_size;
886 pipeline->urb.nr_vs_entries = nr_vs_entries;
887
888 pipeline->urb.gs_start = push_constant_chunks + vs_chunks;
889 pipeline->urb.gs_size = gs_size;
890 pipeline->urb.nr_gs_entries = nr_gs_entries;
891 }
892
893 static const struct {
894 uint32_t token;
895 gl_shader_stage stage;
896 const char *name;
897 } stage_info[] = {
898 { GL_VERTEX_SHADER, MESA_SHADER_VERTEX, "vertex" },
899 { GL_TESS_CONTROL_SHADER, (gl_shader_stage)-1,"tess control" },
900 { GL_TESS_EVALUATION_SHADER, (gl_shader_stage)-1, "tess evaluation" },
901 { GL_GEOMETRY_SHADER, MESA_SHADER_GEOMETRY, "geometry" },
902 { GL_FRAGMENT_SHADER, MESA_SHADER_FRAGMENT, "fragment" },
903 { GL_COMPUTE_SHADER, MESA_SHADER_COMPUTE, "compute" },
904 };
905
906 struct spirv_header{
907 uint32_t magic;
908 uint32_t version;
909 uint32_t gen_magic;
910 };
911
912 static const char *
913 src_as_glsl(const char *data)
914 {
915 const struct spirv_header *as_spirv = (const struct spirv_header *)data;
916
917 /* Check alignment */
918 if ((intptr_t)data & 0x3) {
919 return data;
920 }
921
922 if (as_spirv->magic == SPIR_V_MAGIC_NUMBER) {
923 /* LunarG back-door */
924 if (as_spirv->version == 0)
925 return data + 12;
926 else
927 return NULL;
928 } else {
929 return data;
930 }
931 }
932
933 static void
934 anv_compile_shader_glsl(struct anv_compiler *compiler,
935 struct gl_shader_program *program,
936 struct anv_pipeline *pipeline, uint32_t stage)
937 {
938 struct brw_context *brw = compiler->brw;
939 struct gl_shader *shader;
940 int name = 0;
941
942 shader = brw_new_shader(&brw->ctx, name, stage_info[stage].token);
943 fail_if(shader == NULL, "failed to create %s shader\n", stage_info[stage].name);
944
945 shader->Source = strdup(src_as_glsl(pipeline->shaders[stage]->module->data));
946 _mesa_glsl_compile_shader(&brw->ctx, shader, false, false);
947 fail_on_compile_error(shader->CompileStatus, shader->InfoLog);
948
949 program->Shaders[program->NumShaders] = shader;
950 program->NumShaders++;
951 }
952
953 static void
954 setup_nir_io(struct gl_program *prog,
955 nir_shader *shader)
956 {
957 foreach_list_typed(nir_variable, var, node, &shader->inputs) {
958 prog->InputsRead |= BITFIELD64_BIT(var->data.location);
959 }
960
961 foreach_list_typed(nir_variable, var, node, &shader->outputs) {
962 prog->OutputsWritten |= BITFIELD64_BIT(var->data.location);
963 }
964 }
965
966 static void
967 anv_compile_shader_spirv(struct anv_compiler *compiler,
968 struct gl_shader_program *program,
969 struct anv_pipeline *pipeline, uint32_t stage)
970 {
971 struct brw_context *brw = compiler->brw;
972 struct anv_shader *shader = pipeline->shaders[stage];
973 struct gl_shader *mesa_shader;
974 int name = 0;
975
976 mesa_shader = brw_new_shader(&brw->ctx, name, stage_info[stage].token);
977 fail_if(mesa_shader == NULL,
978 "failed to create %s shader\n", stage_info[stage].name);
979
980 switch (stage) {
981 case VK_SHADER_STAGE_VERTEX:
982 mesa_shader->Program = &rzalloc(mesa_shader, struct brw_vertex_program)->program.Base;
983 break;
984 case VK_SHADER_STAGE_GEOMETRY:
985 mesa_shader->Program = &rzalloc(mesa_shader, struct brw_geometry_program)->program.Base;
986 break;
987 case VK_SHADER_STAGE_FRAGMENT:
988 mesa_shader->Program = &rzalloc(mesa_shader, struct brw_fragment_program)->program.Base;
989 break;
990 case VK_SHADER_STAGE_COMPUTE:
991 mesa_shader->Program = &rzalloc(mesa_shader, struct brw_compute_program)->program.Base;
992 break;
993 }
994
995 mesa_shader->Program->Parameters =
996 rzalloc(mesa_shader, struct gl_program_parameter_list);
997
998 mesa_shader->Type = stage_info[stage].token;
999 mesa_shader->Stage = stage_info[stage].stage;
1000
1001 assert(shader->module->size % 4 == 0);
1002
1003 struct gl_shader_compiler_options *glsl_options =
1004 &compiler->screen->compiler->glsl_compiler_options[stage_info[stage].stage];
1005
1006 mesa_shader->Program->nir =
1007 spirv_to_nir((uint32_t *)shader->module->data, shader->module->size / 4,
1008 glsl_options->NirOptions);
1009 nir_validate_shader(mesa_shader->Program->nir);
1010
1011 brw_process_nir(mesa_shader->Program->nir,
1012 compiler->screen->devinfo,
1013 NULL, mesa_shader->Stage);
1014
1015 setup_nir_io(mesa_shader->Program, mesa_shader->Program->nir);
1016
1017 fail_if(mesa_shader->Program->nir == NULL,
1018 "failed to translate SPIR-V to NIR\n");
1019
1020 program->Shaders[program->NumShaders] = mesa_shader;
1021 program->NumShaders++;
1022 }
1023
1024 static void
1025 add_compiled_stage(struct anv_pipeline *pipeline, uint32_t stage,
1026 struct brw_stage_prog_data *prog_data)
1027 {
1028 struct brw_device_info *devinfo = &pipeline->device->info;
1029 uint32_t max_threads[] = {
1030 [VK_SHADER_STAGE_VERTEX] = devinfo->max_vs_threads,
1031 [VK_SHADER_STAGE_TESS_CONTROL] = 0,
1032 [VK_SHADER_STAGE_TESS_EVALUATION] = 0,
1033 [VK_SHADER_STAGE_GEOMETRY] = devinfo->max_gs_threads,
1034 [VK_SHADER_STAGE_FRAGMENT] = devinfo->max_wm_threads,
1035 [VK_SHADER_STAGE_COMPUTE] = devinfo->max_cs_threads,
1036 };
1037
1038 pipeline->prog_data[stage] = prog_data;
1039 pipeline->active_stages |= 1 << stage;
1040 pipeline->scratch_start[stage] = pipeline->total_scratch;
1041 pipeline->total_scratch =
1042 align_u32(pipeline->total_scratch, 1024) +
1043 prog_data->total_scratch * max_threads[stage];
1044 }
1045
1046 int
1047 anv_compiler_run(struct anv_compiler *compiler, struct anv_pipeline *pipeline)
1048 {
1049 struct gl_shader_program *program;
1050 int name = 0;
1051 struct brw_context *brw = compiler->brw;
1052
1053 pipeline->writes_point_size = false;
1054
1055 /* When we free the pipeline, we detect stages based on the NULL status
1056 * of various prog_data pointers. Make them NULL by default.
1057 */
1058 memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data));
1059 memset(pipeline->scratch_start, 0, sizeof(pipeline->scratch_start));
1060
1061 brw->use_rep_send = pipeline->use_repclear;
1062 brw->no_simd8 = pipeline->use_repclear;
1063
1064 program = brw->ctx.Driver.NewShaderProgram(name);
1065 program->Shaders = (struct gl_shader **)
1066 calloc(VK_SHADER_STAGE_NUM, sizeof(struct gl_shader *));
1067 fail_if(program == NULL || program->Shaders == NULL,
1068 "failed to create program\n");
1069
1070 bool all_spirv = true;
1071 for (unsigned i = 0; i < VK_SHADER_STAGE_NUM; i++) {
1072 if (pipeline->shaders[i] == NULL)
1073 continue;
1074
1075 /* You need at least this much for "void main() { }" anyway */
1076 assert(pipeline->shaders[i]->module->size >= 12);
1077
1078 if (src_as_glsl(pipeline->shaders[i]->module->data)) {
1079 all_spirv = false;
1080 break;
1081 }
1082
1083 assert(pipeline->shaders[i]->module->size % 4 == 0);
1084 }
1085
1086 if (all_spirv) {
1087 for (unsigned i = 0; i < VK_SHADER_STAGE_NUM; i++) {
1088 if (pipeline->shaders[i])
1089 anv_compile_shader_spirv(compiler, program, pipeline, i);
1090 }
1091
1092 for (unsigned i = 0; i < program->NumShaders; i++) {
1093 struct gl_shader *shader = program->Shaders[i];
1094 program->_LinkedShaders[shader->Stage] = shader;
1095 }
1096 } else {
1097 for (unsigned i = 0; i < VK_SHADER_STAGE_NUM; i++) {
1098 if (pipeline->shaders[i])
1099 anv_compile_shader_glsl(compiler, program, pipeline, i);
1100 }
1101
1102 _mesa_glsl_link_shader(&brw->ctx, program);
1103 fail_on_compile_error(program->LinkStatus,
1104 program->InfoLog);
1105 }
1106
1107 bool success;
1108 pipeline->active_stages = 0;
1109 pipeline->total_scratch = 0;
1110
1111 if (pipeline->shaders[VK_SHADER_STAGE_VERTEX]) {
1112 struct brw_vs_prog_key vs_key;
1113 struct gl_vertex_program *vp = (struct gl_vertex_program *)
1114 program->_LinkedShaders[MESA_SHADER_VERTEX]->Program;
1115 struct brw_vertex_program *bvp = brw_vertex_program(vp);
1116
1117 brw_vs_populate_key(brw, bvp, &vs_key);
1118
1119 success = really_do_vs_prog(brw, program, bvp, &vs_key, pipeline);
1120 fail_if(!success, "do_wm_prog failed\n");
1121 add_compiled_stage(pipeline, VK_SHADER_STAGE_VERTEX,
1122 &pipeline->vs_prog_data.base.base);
1123
1124 if (vp->Base.OutputsWritten & VARYING_SLOT_PSIZ)
1125 pipeline->writes_point_size = true;
1126 } else {
1127 memset(&pipeline->vs_prog_data, 0, sizeof(pipeline->vs_prog_data));
1128 pipeline->vs_simd8 = NO_KERNEL;
1129 }
1130
1131
1132 if (pipeline->shaders[VK_SHADER_STAGE_GEOMETRY]) {
1133 struct brw_gs_prog_key gs_key;
1134 struct gl_geometry_program *gp = (struct gl_geometry_program *)
1135 program->_LinkedShaders[MESA_SHADER_GEOMETRY]->Program;
1136 struct brw_geometry_program *bgp = brw_geometry_program(gp);
1137
1138 brw_gs_populate_key(brw, pipeline, bgp, &gs_key);
1139
1140 success = really_do_gs_prog(brw, program, bgp, &gs_key, pipeline);
1141 fail_if(!success, "do_gs_prog failed\n");
1142 add_compiled_stage(pipeline, VK_SHADER_STAGE_GEOMETRY,
1143 &pipeline->gs_prog_data.base.base);
1144
1145 if (gp->Base.OutputsWritten & VARYING_SLOT_PSIZ)
1146 pipeline->writes_point_size = true;
1147 } else {
1148 pipeline->gs_vec4 = NO_KERNEL;
1149 }
1150
1151 if (pipeline->shaders[VK_SHADER_STAGE_FRAGMENT]) {
1152 struct brw_wm_prog_key wm_key;
1153 struct gl_fragment_program *fp = (struct gl_fragment_program *)
1154 program->_LinkedShaders[MESA_SHADER_FRAGMENT]->Program;
1155 struct brw_fragment_program *bfp = brw_fragment_program(fp);
1156
1157 brw_wm_populate_key(brw, bfp, &wm_key);
1158
1159 success = really_do_wm_prog(brw, program, bfp, &wm_key, pipeline);
1160 fail_if(!success, "do_wm_prog failed\n");
1161 add_compiled_stage(pipeline, VK_SHADER_STAGE_FRAGMENT,
1162 &pipeline->wm_prog_data.base);
1163 }
1164
1165 if (pipeline->shaders[VK_SHADER_STAGE_COMPUTE]) {
1166 struct brw_cs_prog_key cs_key;
1167 struct gl_compute_program *cp = (struct gl_compute_program *)
1168 program->_LinkedShaders[MESA_SHADER_COMPUTE]->Program;
1169 struct brw_compute_program *bcp = brw_compute_program(cp);
1170
1171 brw_cs_populate_key(brw, bcp, &cs_key);
1172
1173 success = brw_codegen_cs_prog(brw, program, bcp, &cs_key, pipeline);
1174 fail_if(!success, "brw_codegen_cs_prog failed\n");
1175 add_compiled_stage(pipeline, VK_SHADER_STAGE_COMPUTE,
1176 &pipeline->cs_prog_data.base);
1177 }
1178
1179 /* XXX: Deleting the shader is broken with our current SPIR-V hacks. We
1180 * need to fix this ASAP.
1181 */
1182 if (!all_spirv)
1183 brw->ctx.Driver.DeleteShaderProgram(&brw->ctx, program);
1184
1185 struct anv_device *device = compiler->device;
1186 while (device->scratch_block_pool.bo.size < pipeline->total_scratch)
1187 anv_block_pool_alloc(&device->scratch_block_pool);
1188
1189 gen7_compute_urb_partition(pipeline);
1190
1191 return 0;
1192 }
1193
1194 /* This badly named function frees the struct anv_pipeline data that the compiler
1195 * allocates. Currently just the prog_data structs.
1196 */
1197 void
1198 anv_compiler_free(struct anv_pipeline *pipeline)
1199 {
1200 for (uint32_t stage = 0; stage < VK_SHADER_STAGE_NUM; stage++) {
1201 if (pipeline->prog_data[stage]) {
1202 free(pipeline->prog_data[stage]->map_entries);
1203 ralloc_free(pipeline->prog_data[stage]->param);
1204 ralloc_free(pipeline->prog_data[stage]->pull_param);
1205 }
1206 }
1207 }
1208
1209 }