2 * Copyright © 2014-2017 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "tgsi/tgsi_dump.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "compiler/nir/nir.h"
33 #include "compiler/nir/nir_builder.h"
34 #include "nir/tgsi_to_nir.h"
35 #include "compiler/v3d_compiler.h"
36 #include "v3d_context.h"
37 #include "broadcom/cle/v3d_packet_v33_pack.h"
38 #include "mesa/state_tracker/st_glsl_types.h"
40 static gl_varying_slot
41 vc5_get_slot_for_driver_location(nir_shader
*s
, uint32_t driver_location
)
43 nir_foreach_variable(var
, &s
->outputs
) {
44 if (var
->data
.driver_location
== driver_location
) {
45 return var
->data
.location
;
53 * Precomputes the TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC array for the shader.
55 * A shader can have 16 of these specs, and each one of them can write up to
56 * 16 dwords. Since we allow a total of 64 transform feedback output
57 * components (not 16 vectors), we have to group the writes of multiple
58 * varyings together in a single data spec.
61 vc5_set_transform_feedback_outputs(struct vc5_uncompiled_shader
*so
,
62 const struct pipe_stream_output_info
*stream_output
)
64 if (!stream_output
->num_outputs
)
67 struct v3d_varying_slot slots
[PIPE_MAX_SO_OUTPUTS
* 4];
70 for (int buffer
= 0; buffer
< PIPE_MAX_SO_BUFFERS
; buffer
++) {
71 uint32_t buffer_offset
= 0;
72 uint32_t vpm_start
= slot_count
;
74 for (int i
= 0; i
< stream_output
->num_outputs
; i
++) {
75 const struct pipe_stream_output
*output
=
76 &stream_output
->output
[i
];
78 if (output
->output_buffer
!= buffer
)
81 /* We assume that the SO outputs appear in increasing
82 * order in the buffer.
84 assert(output
->dst_offset
>= buffer_offset
);
86 /* Pad any undefined slots in the output */
87 for (int j
= buffer_offset
; j
< output
->dst_offset
; j
++) {
89 v3d_slot_from_slot_and_component(VARYING_SLOT_POS
, 0);
94 /* Set the coordinate shader up to output the
95 * components of this varying.
97 for (int j
= 0; j
< output
->num_components
; j
++) {
98 gl_varying_slot slot
=
99 vc5_get_slot_for_driver_location(so
->base
.ir
.nir
, output
->register_index
);
102 v3d_slot_from_slot_and_component(slot
,
103 output
->start_component
+ j
);
109 uint32_t vpm_size
= slot_count
- vpm_start
;
113 uint32_t vpm_start_offset
= vpm_start
+ 6;
116 uint32_t write_size
= MIN2(vpm_size
, 1 << 4);
118 struct V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC unpacked
= {
119 /* We need the offset from the coordinate shader's VPM
120 * output block, which has the [X, Y, Z, W, Xs, Ys]
121 * values at the start.
123 .first_shaded_vertex_value_to_output
= vpm_start_offset
,
124 .number_of_consecutive_vertex_values_to_output_as_32_bit_values_minus_1
= write_size
- 1,
125 .output_buffer_to_write_to
= buffer
,
129 assert(unpacked
.first_shaded_vertex_value_to_output
!= 8 ||
130 so
->num_tf_specs
!= 0);
132 assert(so
->num_tf_specs
!= ARRAY_SIZE(so
->tf_specs
));
133 V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL
,
134 (void *)&so
->tf_specs
[so
->num_tf_specs
],
137 /* If point size is being written by the shader, then
138 * all the VPM start offsets are shifted up by one.
139 * We won't know that until the variant is compiled,
142 unpacked
.first_shaded_vertex_value_to_output
++;
145 assert(unpacked
.first_shaded_vertex_value_to_output
!= 8 ||
146 so
->num_tf_specs
!= 0);
148 V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL
,
149 (void *)&so
->tf_specs_psiz
[so
->num_tf_specs
],
152 vpm_start_offset
+= write_size
;
153 vpm_size
-= write_size
;
157 so
->num_tf_outputs
= slot_count
;
158 so
->tf_outputs
= ralloc_array(so
->base
.ir
.nir
, struct v3d_varying_slot
,
160 memcpy(so
->tf_outputs
, slots
, sizeof(*slots
) * slot_count
);
164 type_size(const struct glsl_type
*type
)
166 return glsl_count_attribute_slots(type
, false);
170 uniforms_type_size(const struct glsl_type
*type
)
172 return st_glsl_storage_type_size(type
, false);
176 vc5_shader_state_create(struct pipe_context
*pctx
,
177 const struct pipe_shader_state
*cso
)
179 struct vc5_context
*vc5
= vc5_context(pctx
);
180 struct vc5_uncompiled_shader
*so
= CALLOC_STRUCT(vc5_uncompiled_shader
);
184 so
->program_id
= vc5
->next_uncompiled_program_id
++;
188 if (cso
->type
== PIPE_SHADER_IR_NIR
) {
189 /* The backend takes ownership of the NIR shader on state
194 NIR_PASS_V(s
, nir_lower_io
, nir_var_all
& ~nir_var_uniform
,
196 (nir_lower_io_options
)0);
197 NIR_PASS_V(s
, nir_lower_io
, nir_var_uniform
,
199 (nir_lower_io_options
)0);
201 assert(cso
->type
== PIPE_SHADER_IR_TGSI
);
203 if (V3D_DEBUG
& V3D_DEBUG_TGSI
) {
204 fprintf(stderr
, "prog %d TGSI:\n",
206 tgsi_dump(cso
->tokens
, 0);
207 fprintf(stderr
, "\n");
209 s
= tgsi_to_nir(cso
->tokens
, &v3d_nir_options
);
214 NIR_PASS_V(s
, nir_opt_global_to_local
);
215 NIR_PASS_V(s
, nir_lower_regs_to_ssa
);
216 NIR_PASS_V(s
, nir_normalize_cubemap_coords
);
218 NIR_PASS_V(s
, nir_lower_load_const_to_scalar
);
222 NIR_PASS_V(s
, nir_remove_dead_variables
, nir_var_local
);
224 /* Garbage collect dead instructions */
227 so
->base
.type
= PIPE_SHADER_IR_NIR
;
230 vc5_set_transform_feedback_outputs(so
, &cso
->stream_output
);
232 if (V3D_DEBUG
& (V3D_DEBUG_NIR
|
233 v3d_debug_flag_for_shader_stage(s
->info
.stage
))) {
234 fprintf(stderr
, "%s prog %d NIR:\n",
235 gl_shader_stage_name(s
->info
.stage
),
237 nir_print_shader(s
, stderr
);
238 fprintf(stderr
, "\n");
244 static struct vc5_compiled_shader
*
245 vc5_get_compiled_shader(struct vc5_context
*vc5
, struct v3d_key
*key
)
247 struct vc5_uncompiled_shader
*shader_state
= key
->shader_state
;
248 nir_shader
*s
= shader_state
->base
.ir
.nir
;
250 struct hash_table
*ht
;
252 if (s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
254 key_size
= sizeof(struct v3d_fs_key
);
257 key_size
= sizeof(struct v3d_vs_key
);
260 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, key
);
264 struct vc5_compiled_shader
*shader
=
265 rzalloc(NULL
, struct vc5_compiled_shader
);
267 int program_id
= shader_state
->program_id
;
269 p_atomic_inc_return(&shader_state
->compiled_variant_count
);
271 uint32_t shader_size
;
273 switch (s
->info
.stage
) {
274 case MESA_SHADER_VERTEX
:
275 shader
->prog_data
.vs
= rzalloc(shader
, struct v3d_vs_prog_data
);
277 qpu_insts
= v3d_compile_vs(vc5
->screen
->compiler
,
278 (struct v3d_vs_key
*)key
,
279 shader
->prog_data
.vs
, s
,
280 program_id
, variant_id
,
283 case MESA_SHADER_FRAGMENT
:
284 shader
->prog_data
.fs
= rzalloc(shader
, struct v3d_fs_prog_data
);
286 qpu_insts
= v3d_compile_fs(vc5
->screen
->compiler
,
287 (struct v3d_fs_key
*)key
,
288 shader
->prog_data
.fs
, s
,
289 program_id
, variant_id
,
293 unreachable("bad stage");
296 vc5_set_shader_uniform_dirty_flags(shader
);
298 shader
->bo
= vc5_bo_alloc(vc5
->screen
, shader_size
, "shader");
299 vc5_bo_map(shader
->bo
);
300 memcpy(shader
->bo
->map
, qpu_insts
, shader_size
);
304 struct vc5_key
*dup_key
;
305 dup_key
= ralloc_size(shader
, key_size
);
306 memcpy(dup_key
, key
, key_size
);
307 _mesa_hash_table_insert(ht
, dup_key
, shader
);
309 if (shader
->prog_data
.base
->spill_size
>
310 vc5
->prog
.spill_size_per_thread
) {
311 /* Max 4 QPUs per slice, 3 slices per core. We only do single
312 * core so far. This overallocates memory on smaller cores.
314 int total_spill_size
=
315 4 * 3 * shader
->prog_data
.base
->spill_size
;
317 vc5_bo_unreference(&vc5
->prog
.spill_bo
);
318 vc5
->prog
.spill_bo
= vc5_bo_alloc(vc5
->screen
,
319 total_spill_size
, "spill");
320 vc5
->prog
.spill_size_per_thread
=
321 shader
->prog_data
.base
->spill_size
;
328 vc5_setup_shared_key(struct vc5_context
*vc5
, struct v3d_key
*key
,
329 struct vc5_texture_stateobj
*texstate
)
331 const struct v3d_device_info
*devinfo
= &vc5
->screen
->devinfo
;
333 for (int i
= 0; i
< texstate
->num_textures
; i
++) {
334 struct pipe_sampler_view
*sampler
= texstate
->textures
[i
];
335 struct vc5_sampler_view
*vc5_sampler
= vc5_sampler_view(sampler
);
336 struct pipe_sampler_state
*sampler_state
=
337 texstate
->samplers
[i
];
342 key
->tex
[i
].return_size
=
343 vc5_get_tex_return_size(devinfo
,
345 sampler_state
->compare_mode
);
347 /* For 16-bit, we set up the sampler to always return 2
348 * channels (meaning no recompiles for most statechanges),
349 * while for 32 we actually scale the returns with channels.
351 if (key
->tex
[i
].return_size
== 16) {
352 key
->tex
[i
].return_channels
= 2;
353 } else if (devinfo
->ver
> 40) {
354 key
->tex
[i
].return_channels
= 4;
356 key
->tex
[i
].return_channels
=
357 vc5_get_tex_return_channels(devinfo
,
361 if (key
->tex
[i
].return_size
== 32 && devinfo
->ver
< 40) {
362 memcpy(key
->tex
[i
].swizzle
,
363 vc5_sampler
->swizzle
,
364 sizeof(vc5_sampler
->swizzle
));
366 /* For 16-bit returns, we let the sampler state handle
369 key
->tex
[i
].swizzle
[0] = PIPE_SWIZZLE_X
;
370 key
->tex
[i
].swizzle
[1] = PIPE_SWIZZLE_Y
;
371 key
->tex
[i
].swizzle
[2] = PIPE_SWIZZLE_Z
;
372 key
->tex
[i
].swizzle
[3] = PIPE_SWIZZLE_W
;
376 key
->tex
[i
].compare_mode
= sampler_state
->compare_mode
;
377 key
->tex
[i
].compare_func
= sampler_state
->compare_func
;
378 key
->tex
[i
].clamp_s
=
379 sampler_state
->wrap_s
== PIPE_TEX_WRAP_CLAMP
;
380 key
->tex
[i
].clamp_t
=
381 sampler_state
->wrap_t
== PIPE_TEX_WRAP_CLAMP
;
382 key
->tex
[i
].clamp_r
=
383 sampler_state
->wrap_r
== PIPE_TEX_WRAP_CLAMP
;
387 key
->ucp_enables
= vc5
->rasterizer
->base
.clip_plane_enable
;
391 vc5_update_compiled_fs(struct vc5_context
*vc5
, uint8_t prim_mode
)
393 struct vc5_job
*job
= vc5
->job
;
394 struct v3d_fs_key local_key
;
395 struct v3d_fs_key
*key
= &local_key
;
397 if (!(vc5
->dirty
& (VC5_DIRTY_PRIM_MODE
|
399 VC5_DIRTY_FRAMEBUFFER
|
401 VC5_DIRTY_RASTERIZER
|
402 VC5_DIRTY_SAMPLE_MASK
|
404 VC5_DIRTY_UNCOMPILED_FS
))) {
408 memset(key
, 0, sizeof(*key
));
409 vc5_setup_shared_key(vc5
, &key
->base
, &vc5
->fragtex
);
410 key
->base
.shader_state
= vc5
->prog
.bind_fs
;
411 key
->is_points
= (prim_mode
== PIPE_PRIM_POINTS
);
412 key
->is_lines
= (prim_mode
>= PIPE_PRIM_LINES
&&
413 prim_mode
<= PIPE_PRIM_LINE_STRIP
);
414 key
->clamp_color
= vc5
->rasterizer
->base
.clamp_fragment_color
;
415 if (vc5
->blend
->logicop_enable
) {
416 key
->logicop_func
= vc5
->blend
->logicop_func
;
418 key
->logicop_func
= PIPE_LOGICOP_COPY
;
421 key
->msaa
= vc5
->rasterizer
->base
.multisample
;
422 key
->sample_coverage
= (vc5
->rasterizer
->base
.multisample
&&
423 vc5
->sample_mask
!= (1 << VC5_MAX_SAMPLES
) - 1);
424 key
->sample_alpha_to_coverage
= vc5
->blend
->alpha_to_coverage
;
425 key
->sample_alpha_to_one
= vc5
->blend
->alpha_to_one
;
428 key
->depth_enabled
= (vc5
->zsa
->base
.depth
.enabled
||
429 vc5
->zsa
->base
.stencil
[0].enabled
);
430 if (vc5
->zsa
->base
.alpha
.enabled
) {
431 key
->alpha_test
= true;
432 key
->alpha_test_func
= vc5
->zsa
->base
.alpha
.func
;
435 /* gl_FragColor's propagation to however many bound color buffers
436 * there are means that the buffer count needs to be in the key.
438 key
->nr_cbufs
= vc5
->framebuffer
.nr_cbufs
;
439 key
->swap_color_rb
= vc5
->swap_color_rb
;
441 for (int i
= 0; i
< key
->nr_cbufs
; i
++) {
442 struct pipe_surface
*cbuf
= vc5
->framebuffer
.cbufs
[i
];
446 const struct util_format_description
*desc
=
447 util_format_description(cbuf
->format
);
449 if (desc
->channel
[0].type
== UTIL_FORMAT_TYPE_FLOAT
&&
450 desc
->channel
[0].size
== 32) {
451 key
->f32_color_rb
|= 1 << i
;
454 if (vc5
->prog
.bind_fs
->was_tgsi
) {
455 if (util_format_is_pure_uint(cbuf
->format
))
456 key
->uint_color_rb
|= 1 << i
;
457 else if (util_format_is_pure_sint(cbuf
->format
))
458 key
->int_color_rb
|= 1 << i
;
462 if (key
->is_points
) {
463 key
->point_sprite_mask
=
464 vc5
->rasterizer
->base
.sprite_coord_enable
;
465 key
->point_coord_upper_left
=
466 (vc5
->rasterizer
->base
.sprite_coord_mode
==
467 PIPE_SPRITE_COORD_UPPER_LEFT
);
470 key
->light_twoside
= vc5
->rasterizer
->base
.light_twoside
;
471 key
->shade_model_flat
= vc5
->rasterizer
->base
.flatshade
;
473 struct vc5_compiled_shader
*old_fs
= vc5
->prog
.fs
;
474 vc5
->prog
.fs
= vc5_get_compiled_shader(vc5
, &key
->base
);
475 if (vc5
->prog
.fs
== old_fs
)
478 vc5
->dirty
|= VC5_DIRTY_COMPILED_FS
;
481 if (vc5
->prog
.fs
->prog_data
.fs
->flat_shade_flags
!=
482 old_fs
->prog_data
.fs
->flat_shade_flags
) {
483 vc5
->dirty
|= VC5_DIRTY_FLAT_SHADE_FLAGS
;
486 if (vc5
->prog
.fs
->prog_data
.fs
->centroid_flags
!=
487 old_fs
->prog_data
.fs
->centroid_flags
) {
488 vc5
->dirty
|= VC5_DIRTY_CENTROID_FLAGS
;
492 if (old_fs
&& memcmp(vc5
->prog
.fs
->prog_data
.fs
->input_slots
,
493 old_fs
->prog_data
.fs
->input_slots
,
494 sizeof(vc5
->prog
.fs
->prog_data
.fs
->input_slots
))) {
495 vc5
->dirty
|= VC5_DIRTY_FS_INPUTS
;
500 vc5_update_compiled_vs(struct vc5_context
*vc5
, uint8_t prim_mode
)
502 struct v3d_vs_key local_key
;
503 struct v3d_vs_key
*key
= &local_key
;
505 if (!(vc5
->dirty
& (VC5_DIRTY_PRIM_MODE
|
506 VC5_DIRTY_RASTERIZER
|
509 VC5_DIRTY_UNCOMPILED_VS
|
510 VC5_DIRTY_FS_INPUTS
))) {
514 memset(key
, 0, sizeof(*key
));
515 vc5_setup_shared_key(vc5
, &key
->base
, &vc5
->verttex
);
516 key
->base
.shader_state
= vc5
->prog
.bind_vs
;
517 key
->num_fs_inputs
= vc5
->prog
.fs
->prog_data
.fs
->base
.num_inputs
;
518 STATIC_ASSERT(sizeof(key
->fs_inputs
) ==
519 sizeof(vc5
->prog
.fs
->prog_data
.fs
->input_slots
));
520 memcpy(key
->fs_inputs
, vc5
->prog
.fs
->prog_data
.fs
->input_slots
,
521 sizeof(key
->fs_inputs
));
522 key
->clamp_color
= vc5
->rasterizer
->base
.clamp_vertex_color
;
524 key
->per_vertex_point_size
=
525 (prim_mode
== PIPE_PRIM_POINTS
&&
526 vc5
->rasterizer
->base
.point_size_per_vertex
);
528 struct vc5_compiled_shader
*vs
=
529 vc5_get_compiled_shader(vc5
, &key
->base
);
530 if (vs
!= vc5
->prog
.vs
) {
532 vc5
->dirty
|= VC5_DIRTY_COMPILED_VS
;
535 key
->is_coord
= true;
536 /* Coord shaders only output varyings used by transform feedback. */
537 struct vc5_uncompiled_shader
*shader_state
= key
->base
.shader_state
;
538 memcpy(key
->fs_inputs
, shader_state
->tf_outputs
,
539 sizeof(*key
->fs_inputs
) * shader_state
->num_tf_outputs
);
540 if (shader_state
->num_tf_outputs
< key
->num_fs_inputs
) {
541 memset(&key
->fs_inputs
[shader_state
->num_tf_outputs
],
543 sizeof(*key
->fs_inputs
) * (key
->num_fs_inputs
-
544 shader_state
->num_tf_outputs
));
546 key
->num_fs_inputs
= shader_state
->num_tf_outputs
;
548 struct vc5_compiled_shader
*cs
=
549 vc5_get_compiled_shader(vc5
, &key
->base
);
550 if (cs
!= vc5
->prog
.cs
) {
552 vc5
->dirty
|= VC5_DIRTY_COMPILED_CS
;
557 vc5_update_compiled_shaders(struct vc5_context
*vc5
, uint8_t prim_mode
)
559 vc5_update_compiled_fs(vc5
, prim_mode
);
560 vc5_update_compiled_vs(vc5
, prim_mode
);
564 fs_cache_hash(const void *key
)
566 return _mesa_hash_data(key
, sizeof(struct v3d_fs_key
));
570 vs_cache_hash(const void *key
)
572 return _mesa_hash_data(key
, sizeof(struct v3d_vs_key
));
576 fs_cache_compare(const void *key1
, const void *key2
)
578 return memcmp(key1
, key2
, sizeof(struct v3d_fs_key
)) == 0;
582 vs_cache_compare(const void *key1
, const void *key2
)
584 return memcmp(key1
, key2
, sizeof(struct v3d_vs_key
)) == 0;
588 delete_from_cache_if_matches(struct hash_table
*ht
,
589 struct vc5_compiled_shader
**last_compile
,
590 struct hash_entry
*entry
,
591 struct vc5_uncompiled_shader
*so
)
593 const struct v3d_key
*key
= entry
->key
;
595 if (key
->shader_state
== so
) {
596 struct vc5_compiled_shader
*shader
= entry
->data
;
597 _mesa_hash_table_remove(ht
, entry
);
598 vc5_bo_unreference(&shader
->bo
);
600 if (shader
== *last_compile
)
601 *last_compile
= NULL
;
608 vc5_shader_state_delete(struct pipe_context
*pctx
, void *hwcso
)
610 struct vc5_context
*vc5
= vc5_context(pctx
);
611 struct vc5_uncompiled_shader
*so
= hwcso
;
613 struct hash_entry
*entry
;
614 hash_table_foreach(vc5
->fs_cache
, entry
) {
615 delete_from_cache_if_matches(vc5
->fs_cache
, &vc5
->prog
.fs
,
618 hash_table_foreach(vc5
->vs_cache
, entry
) {
619 delete_from_cache_if_matches(vc5
->vs_cache
, &vc5
->prog
.vs
,
623 ralloc_free(so
->base
.ir
.nir
);
628 vc5_fp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
630 struct vc5_context
*vc5
= vc5_context(pctx
);
631 vc5
->prog
.bind_fs
= hwcso
;
632 vc5
->dirty
|= VC5_DIRTY_UNCOMPILED_FS
;
636 vc5_vp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
638 struct vc5_context
*vc5
= vc5_context(pctx
);
639 vc5
->prog
.bind_vs
= hwcso
;
640 vc5
->dirty
|= VC5_DIRTY_UNCOMPILED_VS
;
644 vc5_program_init(struct pipe_context
*pctx
)
646 struct vc5_context
*vc5
= vc5_context(pctx
);
648 pctx
->create_vs_state
= vc5_shader_state_create
;
649 pctx
->delete_vs_state
= vc5_shader_state_delete
;
651 pctx
->create_fs_state
= vc5_shader_state_create
;
652 pctx
->delete_fs_state
= vc5_shader_state_delete
;
654 pctx
->bind_fs_state
= vc5_fp_state_bind
;
655 pctx
->bind_vs_state
= vc5_vp_state_bind
;
657 vc5
->fs_cache
= _mesa_hash_table_create(pctx
, fs_cache_hash
,
659 vc5
->vs_cache
= _mesa_hash_table_create(pctx
, vs_cache_hash
,
664 vc5_program_fini(struct pipe_context
*pctx
)
666 struct vc5_context
*vc5
= vc5_context(pctx
);
668 struct hash_entry
*entry
;
669 hash_table_foreach(vc5
->fs_cache
, entry
) {
670 struct vc5_compiled_shader
*shader
= entry
->data
;
671 vc5_bo_unreference(&shader
->bo
);
673 _mesa_hash_table_remove(vc5
->fs_cache
, entry
);
676 hash_table_foreach(vc5
->vs_cache
, entry
) {
677 struct vc5_compiled_shader
*shader
= entry
->data
;
678 vc5_bo_unreference(&shader
->bo
);
680 _mesa_hash_table_remove(vc5
->vs_cache
, entry
);