2 * Copyright © 2014-2017 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "tgsi/tgsi_dump.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "compiler/nir/nir.h"
33 #include "compiler/nir/nir_builder.h"
34 #include "nir/tgsi_to_nir.h"
35 #include "compiler/v3d_compiler.h"
36 #include "vc5_context.h"
37 #include "broadcom/cle/v3d_packet_v33_pack.h"
38 #include "mesa/state_tracker/st_glsl_types.h"
40 static gl_varying_slot
41 vc5_get_slot_for_driver_location(nir_shader
*s
, uint32_t driver_location
)
43 nir_foreach_variable(var
, &s
->outputs
) {
44 if (var
->data
.driver_location
== driver_location
) {
45 return var
->data
.location
;
53 * Precomputes the TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC array for the shader.
55 * A shader can have 16 of these specs, and each one of them can write up to
56 * 16 dwords. Since we allow a total of 64 transform feedback output
57 * components (not 16 vectors), we have to group the writes of multiple
58 * varyings together in a single data spec.
61 vc5_set_transform_feedback_outputs(struct vc5_uncompiled_shader
*so
,
62 const struct pipe_stream_output_info
*stream_output
)
64 if (!stream_output
->num_outputs
)
67 struct v3d_varying_slot slots
[PIPE_MAX_SO_OUTPUTS
* 4];
70 for (int buffer
= 0; buffer
< PIPE_MAX_SO_BUFFERS
; buffer
++) {
71 uint32_t buffer_offset
= 0;
72 uint32_t vpm_start
= slot_count
;
74 for (int i
= 0; i
< stream_output
->num_outputs
; i
++) {
75 const struct pipe_stream_output
*output
=
76 &stream_output
->output
[i
];
78 if (output
->output_buffer
!= buffer
)
81 /* We assume that the SO outputs appear in increasing
82 * order in the buffer.
84 assert(output
->dst_offset
>= buffer_offset
);
86 /* Pad any undefined slots in the output */
87 for (int j
= buffer_offset
; j
< output
->dst_offset
; j
++) {
89 v3d_slot_from_slot_and_component(VARYING_SLOT_POS
, 0);
94 /* Set the coordinate shader up to output the
95 * components of this varying.
97 for (int j
= 0; j
< output
->num_components
; j
++) {
98 gl_varying_slot slot
=
99 vc5_get_slot_for_driver_location(so
->base
.ir
.nir
, output
->register_index
);
102 v3d_slot_from_slot_and_component(slot
,
103 output
->start_component
+ j
);
109 uint32_t vpm_size
= slot_count
- vpm_start
;
113 uint32_t vpm_start_offset
= vpm_start
+ 6;
116 uint32_t write_size
= MIN2(vpm_size
, 1 << 4);
118 struct V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC unpacked
= {
119 /* We need the offset from the coordinate shader's VPM
120 * output block, which has the [X, Y, Z, W, Xs, Ys]
121 * values at the start.
123 .first_shaded_vertex_value_to_output
= vpm_start_offset
,
124 .number_of_consecutive_vertex_values_to_output_as_32_bit_values_minus_1
= write_size
- 1,
125 .output_buffer_to_write_to
= buffer
,
128 assert(so
->num_tf_specs
!= ARRAY_SIZE(so
->tf_specs
));
129 V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL
,
130 (void *)&so
->tf_specs
[so
->num_tf_specs
],
133 /* If point size is being written by the shader, then
134 * all the VPM start offsets are shifted up by one.
135 * We won't know that until the variant is compiled,
138 unpacked
.first_shaded_vertex_value_to_output
++;
139 V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL
,
140 (void *)&so
->tf_specs_psiz
[so
->num_tf_specs
],
143 vpm_start_offset
+= write_size
;
144 vpm_size
-= write_size
;
148 so
->num_tf_outputs
= slot_count
;
149 so
->tf_outputs
= ralloc_array(so
->base
.ir
.nir
, struct v3d_varying_slot
,
151 memcpy(so
->tf_outputs
, slots
, sizeof(*slots
) * slot_count
);
155 type_size(const struct glsl_type
*type
)
157 return glsl_count_attribute_slots(type
, false);
161 uniforms_type_size(const struct glsl_type
*type
)
163 return st_glsl_storage_type_size(type
, false);
167 vc5_shader_state_create(struct pipe_context
*pctx
,
168 const struct pipe_shader_state
*cso
)
170 struct vc5_context
*vc5
= vc5_context(pctx
);
171 struct vc5_uncompiled_shader
*so
= CALLOC_STRUCT(vc5_uncompiled_shader
);
175 so
->program_id
= vc5
->next_uncompiled_program_id
++;
179 if (cso
->type
== PIPE_SHADER_IR_NIR
) {
180 /* The backend takes ownership of the NIR shader on state
185 NIR_PASS_V(s
, nir_lower_io
, nir_var_all
& ~nir_var_uniform
,
187 (nir_lower_io_options
)0);
188 NIR_PASS_V(s
, nir_lower_io
, nir_var_uniform
,
190 (nir_lower_io_options
)0);
192 assert(cso
->type
== PIPE_SHADER_IR_TGSI
);
194 if (V3D_DEBUG
& V3D_DEBUG_TGSI
) {
195 fprintf(stderr
, "prog %d TGSI:\n",
197 tgsi_dump(cso
->tokens
, 0);
198 fprintf(stderr
, "\n");
200 s
= tgsi_to_nir(cso
->tokens
, &v3d_nir_options
);
205 NIR_PASS_V(s
, nir_opt_global_to_local
);
206 NIR_PASS_V(s
, nir_lower_regs_to_ssa
);
207 NIR_PASS_V(s
, nir_normalize_cubemap_coords
);
209 NIR_PASS_V(s
, nir_lower_load_const_to_scalar
);
213 NIR_PASS_V(s
, nir_remove_dead_variables
, nir_var_local
);
215 /* Garbage collect dead instructions */
218 so
->base
.type
= PIPE_SHADER_IR_NIR
;
221 vc5_set_transform_feedback_outputs(so
, &cso
->stream_output
);
223 if (V3D_DEBUG
& (V3D_DEBUG_NIR
|
224 v3d_debug_flag_for_shader_stage(s
->info
.stage
))) {
225 fprintf(stderr
, "%s prog %d NIR:\n",
226 gl_shader_stage_name(s
->info
.stage
),
228 nir_print_shader(s
, stderr
);
229 fprintf(stderr
, "\n");
235 static struct vc5_compiled_shader
*
236 vc5_get_compiled_shader(struct vc5_context
*vc5
, struct v3d_key
*key
)
238 struct vc5_uncompiled_shader
*shader_state
= key
->shader_state
;
239 nir_shader
*s
= shader_state
->base
.ir
.nir
;
241 struct hash_table
*ht
;
243 if (s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
245 key_size
= sizeof(struct v3d_fs_key
);
248 key_size
= sizeof(struct v3d_vs_key
);
251 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, key
);
255 struct vc5_compiled_shader
*shader
=
256 rzalloc(NULL
, struct vc5_compiled_shader
);
258 int program_id
= shader_state
->program_id
;
260 p_atomic_inc_return(&shader_state
->compiled_variant_count
);
262 uint32_t shader_size
;
264 switch (s
->info
.stage
) {
265 case MESA_SHADER_VERTEX
:
266 shader
->prog_data
.vs
= rzalloc(shader
, struct v3d_vs_prog_data
);
268 qpu_insts
= v3d_compile_vs(vc5
->screen
->compiler
,
269 (struct v3d_vs_key
*)key
,
270 shader
->prog_data
.vs
, s
,
271 program_id
, variant_id
,
274 case MESA_SHADER_FRAGMENT
:
275 shader
->prog_data
.fs
= rzalloc(shader
, struct v3d_fs_prog_data
);
277 qpu_insts
= v3d_compile_fs(vc5
->screen
->compiler
,
278 (struct v3d_fs_key
*)key
,
279 shader
->prog_data
.fs
, s
,
280 program_id
, variant_id
,
284 unreachable("bad stage");
287 vc5_set_shader_uniform_dirty_flags(shader
);
289 shader
->bo
= vc5_bo_alloc(vc5
->screen
, shader_size
, "shader");
290 vc5_bo_map(shader
->bo
);
291 memcpy(shader
->bo
->map
, qpu_insts
, shader_size
);
295 struct vc5_key
*dup_key
;
296 dup_key
= ralloc_size(shader
, key_size
);
297 memcpy(dup_key
, key
, key_size
);
298 _mesa_hash_table_insert(ht
, dup_key
, shader
);
300 if (shader
->prog_data
.base
->spill_size
>
301 vc5
->prog
.spill_size_per_thread
) {
302 /* Max 4 QPUs per slice, 3 slices per core. We only do single
303 * core so far. This overallocates memory on smaller cores.
305 int total_spill_size
=
306 4 * 3 * shader
->prog_data
.base
->spill_size
;
308 vc5_bo_unreference(&vc5
->prog
.spill_bo
);
309 vc5
->prog
.spill_bo
= vc5_bo_alloc(vc5
->screen
,
310 total_spill_size
, "spill");
311 vc5
->prog
.spill_size_per_thread
=
312 shader
->prog_data
.base
->spill_size
;
319 vc5_setup_shared_key(struct vc5_context
*vc5
, struct v3d_key
*key
,
320 struct vc5_texture_stateobj
*texstate
)
322 const struct v3d_device_info
*devinfo
= &vc5
->screen
->devinfo
;
324 for (int i
= 0; i
< texstate
->num_textures
; i
++) {
325 struct pipe_sampler_view
*sampler
= texstate
->textures
[i
];
326 struct vc5_sampler_view
*vc5_sampler
= vc5_sampler_view(sampler
);
327 struct pipe_sampler_state
*sampler_state
=
328 texstate
->samplers
[i
];
333 key
->tex
[i
].return_size
=
334 vc5_get_tex_return_size(devinfo
,
336 sampler_state
->compare_mode
);
338 /* For 16-bit, we set up the sampler to always return 2
339 * channels (meaning no recompiles for most statechanges),
340 * while for 32 we actually scale the returns with channels.
342 if (key
->tex
[i
].return_size
== 16) {
343 key
->tex
[i
].return_channels
= 2;
344 } else if (devinfo
->ver
> 40) {
345 key
->tex
[i
].return_channels
= 4;
347 key
->tex
[i
].return_channels
=
348 vc5_get_tex_return_channels(devinfo
,
352 if (key
->tex
[i
].return_size
== 32 && devinfo
->ver
< 40) {
353 memcpy(key
->tex
[i
].swizzle
,
354 vc5_sampler
->swizzle
,
355 sizeof(vc5_sampler
->swizzle
));
357 /* For 16-bit returns, we let the sampler state handle
360 key
->tex
[i
].swizzle
[0] = PIPE_SWIZZLE_X
;
361 key
->tex
[i
].swizzle
[1] = PIPE_SWIZZLE_Y
;
362 key
->tex
[i
].swizzle
[2] = PIPE_SWIZZLE_Z
;
363 key
->tex
[i
].swizzle
[3] = PIPE_SWIZZLE_W
;
367 key
->tex
[i
].compare_mode
= sampler_state
->compare_mode
;
368 key
->tex
[i
].compare_func
= sampler_state
->compare_func
;
369 key
->tex
[i
].clamp_s
=
370 sampler_state
->wrap_s
== PIPE_TEX_WRAP_CLAMP
;
371 key
->tex
[i
].clamp_t
=
372 sampler_state
->wrap_t
== PIPE_TEX_WRAP_CLAMP
;
373 key
->tex
[i
].clamp_r
=
374 sampler_state
->wrap_r
== PIPE_TEX_WRAP_CLAMP
;
378 key
->ucp_enables
= vc5
->rasterizer
->base
.clip_plane_enable
;
382 vc5_update_compiled_fs(struct vc5_context
*vc5
, uint8_t prim_mode
)
384 struct vc5_job
*job
= vc5
->job
;
385 struct v3d_fs_key local_key
;
386 struct v3d_fs_key
*key
= &local_key
;
388 if (!(vc5
->dirty
& (VC5_DIRTY_PRIM_MODE
|
390 VC5_DIRTY_FRAMEBUFFER
|
392 VC5_DIRTY_RASTERIZER
|
393 VC5_DIRTY_SAMPLE_MASK
|
395 VC5_DIRTY_UNCOMPILED_FS
))) {
399 memset(key
, 0, sizeof(*key
));
400 vc5_setup_shared_key(vc5
, &key
->base
, &vc5
->fragtex
);
401 key
->base
.shader_state
= vc5
->prog
.bind_fs
;
402 key
->is_points
= (prim_mode
== PIPE_PRIM_POINTS
);
403 key
->is_lines
= (prim_mode
>= PIPE_PRIM_LINES
&&
404 prim_mode
<= PIPE_PRIM_LINE_STRIP
);
405 key
->clamp_color
= vc5
->rasterizer
->base
.clamp_fragment_color
;
406 if (vc5
->blend
->logicop_enable
) {
407 key
->logicop_func
= vc5
->blend
->logicop_func
;
409 key
->logicop_func
= PIPE_LOGICOP_COPY
;
412 key
->msaa
= vc5
->rasterizer
->base
.multisample
;
413 key
->sample_coverage
= (vc5
->rasterizer
->base
.multisample
&&
414 vc5
->sample_mask
!= (1 << VC5_MAX_SAMPLES
) - 1);
415 key
->sample_alpha_to_coverage
= vc5
->blend
->alpha_to_coverage
;
416 key
->sample_alpha_to_one
= vc5
->blend
->alpha_to_one
;
419 key
->depth_enabled
= (vc5
->zsa
->base
.depth
.enabled
||
420 vc5
->zsa
->base
.stencil
[0].enabled
);
421 if (vc5
->zsa
->base
.alpha
.enabled
) {
422 key
->alpha_test
= true;
423 key
->alpha_test_func
= vc5
->zsa
->base
.alpha
.func
;
426 /* gl_FragColor's propagation to however many bound color buffers
427 * there are means that the buffer count needs to be in the key.
429 key
->nr_cbufs
= vc5
->framebuffer
.nr_cbufs
;
430 key
->swap_color_rb
= vc5
->swap_color_rb
;
432 for (int i
= 0; i
< key
->nr_cbufs
; i
++) {
433 struct pipe_surface
*cbuf
= vc5
->framebuffer
.cbufs
[i
];
437 const struct util_format_description
*desc
=
438 util_format_description(cbuf
->format
);
440 if (desc
->channel
[0].type
== UTIL_FORMAT_TYPE_FLOAT
&&
441 desc
->channel
[0].size
== 32) {
442 key
->f32_color_rb
|= 1 << i
;
445 if (vc5
->prog
.bind_fs
->was_tgsi
) {
446 if (util_format_is_pure_uint(cbuf
->format
))
447 key
->uint_color_rb
|= 1 << i
;
448 else if (util_format_is_pure_sint(cbuf
->format
))
449 key
->int_color_rb
|= 1 << i
;
453 if (key
->is_points
) {
454 key
->point_sprite_mask
=
455 vc5
->rasterizer
->base
.sprite_coord_enable
;
456 key
->point_coord_upper_left
=
457 (vc5
->rasterizer
->base
.sprite_coord_mode
==
458 PIPE_SPRITE_COORD_UPPER_LEFT
);
461 key
->light_twoside
= vc5
->rasterizer
->base
.light_twoside
;
462 key
->shade_model_flat
= vc5
->rasterizer
->base
.flatshade
;
464 struct vc5_compiled_shader
*old_fs
= vc5
->prog
.fs
;
465 vc5
->prog
.fs
= vc5_get_compiled_shader(vc5
, &key
->base
);
466 if (vc5
->prog
.fs
== old_fs
)
469 vc5
->dirty
|= VC5_DIRTY_COMPILED_FS
;
472 vc5
->prog
.fs
->prog_data
.fs
->flat_shade_flags
!=
473 old_fs
->prog_data
.fs
->flat_shade_flags
) {
474 vc5
->dirty
|= VC5_DIRTY_FLAT_SHADE_FLAGS
;
477 if (old_fs
&& memcmp(vc5
->prog
.fs
->prog_data
.fs
->input_slots
,
478 old_fs
->prog_data
.fs
->input_slots
,
479 sizeof(vc5
->prog
.fs
->prog_data
.fs
->input_slots
))) {
480 vc5
->dirty
|= VC5_DIRTY_FS_INPUTS
;
485 vc5_update_compiled_vs(struct vc5_context
*vc5
, uint8_t prim_mode
)
487 struct v3d_vs_key local_key
;
488 struct v3d_vs_key
*key
= &local_key
;
490 if (!(vc5
->dirty
& (VC5_DIRTY_PRIM_MODE
|
491 VC5_DIRTY_RASTERIZER
|
494 VC5_DIRTY_UNCOMPILED_VS
|
495 VC5_DIRTY_FS_INPUTS
))) {
499 memset(key
, 0, sizeof(*key
));
500 vc5_setup_shared_key(vc5
, &key
->base
, &vc5
->verttex
);
501 key
->base
.shader_state
= vc5
->prog
.bind_vs
;
502 key
->num_fs_inputs
= vc5
->prog
.fs
->prog_data
.fs
->base
.num_inputs
;
503 STATIC_ASSERT(sizeof(key
->fs_inputs
) ==
504 sizeof(vc5
->prog
.fs
->prog_data
.fs
->input_slots
));
505 memcpy(key
->fs_inputs
, vc5
->prog
.fs
->prog_data
.fs
->input_slots
,
506 sizeof(key
->fs_inputs
));
507 key
->clamp_color
= vc5
->rasterizer
->base
.clamp_vertex_color
;
509 key
->per_vertex_point_size
=
510 (prim_mode
== PIPE_PRIM_POINTS
&&
511 vc5
->rasterizer
->base
.point_size_per_vertex
);
513 struct vc5_compiled_shader
*vs
=
514 vc5_get_compiled_shader(vc5
, &key
->base
);
515 if (vs
!= vc5
->prog
.vs
) {
517 vc5
->dirty
|= VC5_DIRTY_COMPILED_VS
;
520 key
->is_coord
= true;
521 /* Coord shaders only output varyings used by transform feedback. */
522 struct vc5_uncompiled_shader
*shader_state
= key
->base
.shader_state
;
523 memcpy(key
->fs_inputs
, shader_state
->tf_outputs
,
524 sizeof(*key
->fs_inputs
) * shader_state
->num_tf_outputs
);
525 if (shader_state
->num_tf_outputs
< key
->num_fs_inputs
) {
526 memset(&key
->fs_inputs
[shader_state
->num_tf_outputs
],
528 sizeof(*key
->fs_inputs
) * (key
->num_fs_inputs
-
529 shader_state
->num_tf_outputs
));
531 key
->num_fs_inputs
= shader_state
->num_tf_outputs
;
533 struct vc5_compiled_shader
*cs
=
534 vc5_get_compiled_shader(vc5
, &key
->base
);
535 if (cs
!= vc5
->prog
.cs
) {
537 vc5
->dirty
|= VC5_DIRTY_COMPILED_CS
;
542 vc5_update_compiled_shaders(struct vc5_context
*vc5
, uint8_t prim_mode
)
544 vc5_update_compiled_fs(vc5
, prim_mode
);
545 vc5_update_compiled_vs(vc5
, prim_mode
);
549 fs_cache_hash(const void *key
)
551 return _mesa_hash_data(key
, sizeof(struct v3d_fs_key
));
555 vs_cache_hash(const void *key
)
557 return _mesa_hash_data(key
, sizeof(struct v3d_vs_key
));
561 fs_cache_compare(const void *key1
, const void *key2
)
563 return memcmp(key1
, key2
, sizeof(struct v3d_fs_key
)) == 0;
567 vs_cache_compare(const void *key1
, const void *key2
)
569 return memcmp(key1
, key2
, sizeof(struct v3d_vs_key
)) == 0;
573 delete_from_cache_if_matches(struct hash_table
*ht
,
574 struct vc5_compiled_shader
**last_compile
,
575 struct hash_entry
*entry
,
576 struct vc5_uncompiled_shader
*so
)
578 const struct v3d_key
*key
= entry
->key
;
580 if (key
->shader_state
== so
) {
581 struct vc5_compiled_shader
*shader
= entry
->data
;
582 _mesa_hash_table_remove(ht
, entry
);
583 vc5_bo_unreference(&shader
->bo
);
585 if (shader
== *last_compile
)
586 *last_compile
= NULL
;
593 vc5_shader_state_delete(struct pipe_context
*pctx
, void *hwcso
)
595 struct vc5_context
*vc5
= vc5_context(pctx
);
596 struct vc5_uncompiled_shader
*so
= hwcso
;
598 struct hash_entry
*entry
;
599 hash_table_foreach(vc5
->fs_cache
, entry
) {
600 delete_from_cache_if_matches(vc5
->fs_cache
, &vc5
->prog
.fs
,
603 hash_table_foreach(vc5
->vs_cache
, entry
) {
604 delete_from_cache_if_matches(vc5
->vs_cache
, &vc5
->prog
.vs
,
608 ralloc_free(so
->base
.ir
.nir
);
613 vc5_fp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
615 struct vc5_context
*vc5
= vc5_context(pctx
);
616 vc5
->prog
.bind_fs
= hwcso
;
617 vc5
->dirty
|= VC5_DIRTY_UNCOMPILED_FS
;
621 vc5_vp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
623 struct vc5_context
*vc5
= vc5_context(pctx
);
624 vc5
->prog
.bind_vs
= hwcso
;
625 vc5
->dirty
|= VC5_DIRTY_UNCOMPILED_VS
;
629 vc5_program_init(struct pipe_context
*pctx
)
631 struct vc5_context
*vc5
= vc5_context(pctx
);
633 pctx
->create_vs_state
= vc5_shader_state_create
;
634 pctx
->delete_vs_state
= vc5_shader_state_delete
;
636 pctx
->create_fs_state
= vc5_shader_state_create
;
637 pctx
->delete_fs_state
= vc5_shader_state_delete
;
639 pctx
->bind_fs_state
= vc5_fp_state_bind
;
640 pctx
->bind_vs_state
= vc5_vp_state_bind
;
642 vc5
->fs_cache
= _mesa_hash_table_create(pctx
, fs_cache_hash
,
644 vc5
->vs_cache
= _mesa_hash_table_create(pctx
, vs_cache_hash
,
649 vc5_program_fini(struct pipe_context
*pctx
)
651 struct vc5_context
*vc5
= vc5_context(pctx
);
653 struct hash_entry
*entry
;
654 hash_table_foreach(vc5
->fs_cache
, entry
) {
655 struct vc5_compiled_shader
*shader
= entry
->data
;
656 vc5_bo_unreference(&shader
->bo
);
658 _mesa_hash_table_remove(vc5
->fs_cache
, entry
);
661 hash_table_foreach(vc5
->vs_cache
, entry
) {
662 struct vc5_compiled_shader
*shader
= entry
->data
;
663 vc5_bo_unreference(&shader
->bo
);
665 _mesa_hash_table_remove(vc5
->vs_cache
, entry
);