2 * Copyright © 2014-2017 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "tgsi/tgsi_dump.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "compiler/nir/nir.h"
33 #include "compiler/nir/nir_builder.h"
34 #include "nir/tgsi_to_nir.h"
35 #include "compiler/v3d_compiler.h"
36 #include "v3d_context.h"
37 #include "broadcom/cle/v3d_packet_v33_pack.h"
38 #include "mesa/state_tracker/st_glsl_types.h"
40 static gl_varying_slot
41 v3d_get_slot_for_driver_location(nir_shader
*s
, uint32_t driver_location
)
43 nir_foreach_variable(var
, &s
->outputs
) {
44 if (var
->data
.driver_location
== driver_location
) {
45 return var
->data
.location
;
53 * Precomputes the TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC array for the shader.
55 * A shader can have 16 of these specs, and each one of them can write up to
56 * 16 dwords. Since we allow a total of 64 transform feedback output
57 * components (not 16 vectors), we have to group the writes of multiple
58 * varyings together in a single data spec.
61 v3d_set_transform_feedback_outputs(struct v3d_uncompiled_shader
*so
,
62 const struct pipe_stream_output_info
*stream_output
)
64 if (!stream_output
->num_outputs
)
67 struct v3d_varying_slot slots
[PIPE_MAX_SO_OUTPUTS
* 4];
70 for (int buffer
= 0; buffer
< PIPE_MAX_SO_BUFFERS
; buffer
++) {
71 uint32_t buffer_offset
= 0;
72 uint32_t vpm_start
= slot_count
;
74 for (int i
= 0; i
< stream_output
->num_outputs
; i
++) {
75 const struct pipe_stream_output
*output
=
76 &stream_output
->output
[i
];
78 if (output
->output_buffer
!= buffer
)
81 /* We assume that the SO outputs appear in increasing
82 * order in the buffer.
84 assert(output
->dst_offset
>= buffer_offset
);
86 /* Pad any undefined slots in the output */
87 for (int j
= buffer_offset
; j
< output
->dst_offset
; j
++) {
89 v3d_slot_from_slot_and_component(VARYING_SLOT_POS
, 0);
94 /* Set the coordinate shader up to output the
95 * components of this varying.
97 for (int j
= 0; j
< output
->num_components
; j
++) {
98 gl_varying_slot slot
=
99 v3d_get_slot_for_driver_location(so
->base
.ir
.nir
, output
->register_index
);
102 v3d_slot_from_slot_and_component(slot
,
103 output
->start_component
+ j
);
109 uint32_t vpm_size
= slot_count
- vpm_start
;
113 uint32_t vpm_start_offset
= vpm_start
+ 6;
116 uint32_t write_size
= MIN2(vpm_size
, 1 << 4);
118 struct V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC unpacked
= {
119 /* We need the offset from the coordinate shader's VPM
120 * output block, which has the [X, Y, Z, W, Xs, Ys]
121 * values at the start.
123 .first_shaded_vertex_value_to_output
= vpm_start_offset
,
124 .number_of_consecutive_vertex_values_to_output_as_32_bit_values
= write_size
,
125 .output_buffer_to_write_to
= buffer
,
129 assert(unpacked
.first_shaded_vertex_value_to_output
!= 8 ||
130 so
->num_tf_specs
!= 0);
132 assert(so
->num_tf_specs
!= ARRAY_SIZE(so
->tf_specs
));
133 V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL
,
134 (void *)&so
->tf_specs
[so
->num_tf_specs
],
137 /* If point size is being written by the shader, then
138 * all the VPM start offsets are shifted up by one.
139 * We won't know that until the variant is compiled,
142 unpacked
.first_shaded_vertex_value_to_output
++;
145 assert(unpacked
.first_shaded_vertex_value_to_output
!= 8 ||
146 so
->num_tf_specs
!= 0);
148 V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL
,
149 (void *)&so
->tf_specs_psiz
[so
->num_tf_specs
],
152 vpm_start_offset
+= write_size
;
153 vpm_size
-= write_size
;
155 so
->base
.stream_output
.stride
[buffer
] =
156 stream_output
->stride
[buffer
];
159 so
->num_tf_outputs
= slot_count
;
160 so
->tf_outputs
= ralloc_array(so
->base
.ir
.nir
, struct v3d_varying_slot
,
162 memcpy(so
->tf_outputs
, slots
, sizeof(*slots
) * slot_count
);
166 type_size(const struct glsl_type
*type
)
168 return glsl_count_attribute_slots(type
, false);
172 uniforms_type_size(const struct glsl_type
*type
)
174 return st_glsl_storage_type_size(type
, false);
178 v3d_shader_state_create(struct pipe_context
*pctx
,
179 const struct pipe_shader_state
*cso
)
181 struct v3d_context
*v3d
= v3d_context(pctx
);
182 struct v3d_uncompiled_shader
*so
= CALLOC_STRUCT(v3d_uncompiled_shader
);
186 so
->program_id
= v3d
->next_uncompiled_program_id
++;
190 if (cso
->type
== PIPE_SHADER_IR_NIR
) {
191 /* The backend takes ownership of the NIR shader on state
196 NIR_PASS_V(s
, nir_lower_io
, nir_var_all
& ~nir_var_uniform
,
198 (nir_lower_io_options
)0);
199 NIR_PASS_V(s
, nir_lower_io
, nir_var_uniform
,
201 (nir_lower_io_options
)0);
203 assert(cso
->type
== PIPE_SHADER_IR_TGSI
);
205 if (V3D_DEBUG
& V3D_DEBUG_TGSI
) {
206 fprintf(stderr
, "prog %d TGSI:\n",
208 tgsi_dump(cso
->tokens
, 0);
209 fprintf(stderr
, "\n");
211 s
= tgsi_to_nir(cso
->tokens
, &v3d_nir_options
);
216 NIR_PASS_V(s
, nir_opt_global_to_local
);
217 NIR_PASS_V(s
, nir_lower_regs_to_ssa
);
218 NIR_PASS_V(s
, nir_normalize_cubemap_coords
);
220 NIR_PASS_V(s
, nir_lower_load_const_to_scalar
);
224 NIR_PASS_V(s
, nir_remove_dead_variables
, nir_var_local
);
226 /* Garbage collect dead instructions */
229 so
->base
.type
= PIPE_SHADER_IR_NIR
;
232 v3d_set_transform_feedback_outputs(so
, &cso
->stream_output
);
234 if (V3D_DEBUG
& (V3D_DEBUG_NIR
|
235 v3d_debug_flag_for_shader_stage(s
->info
.stage
))) {
236 fprintf(stderr
, "%s prog %d NIR:\n",
237 gl_shader_stage_name(s
->info
.stage
),
239 nir_print_shader(s
, stderr
);
240 fprintf(stderr
, "\n");
246 static struct v3d_compiled_shader
*
247 v3d_get_compiled_shader(struct v3d_context
*v3d
, struct v3d_key
*key
)
249 struct v3d_uncompiled_shader
*shader_state
= key
->shader_state
;
250 nir_shader
*s
= shader_state
->base
.ir
.nir
;
252 struct hash_table
*ht
;
254 if (s
->info
.stage
== MESA_SHADER_FRAGMENT
) {
256 key_size
= sizeof(struct v3d_fs_key
);
259 key_size
= sizeof(struct v3d_vs_key
);
262 struct hash_entry
*entry
= _mesa_hash_table_search(ht
, key
);
266 struct v3d_compiled_shader
*shader
=
267 rzalloc(NULL
, struct v3d_compiled_shader
);
269 int program_id
= shader_state
->program_id
;
271 p_atomic_inc_return(&shader_state
->compiled_variant_count
);
273 uint32_t shader_size
;
275 switch (s
->info
.stage
) {
276 case MESA_SHADER_VERTEX
:
277 shader
->prog_data
.vs
= rzalloc(shader
, struct v3d_vs_prog_data
);
279 qpu_insts
= v3d_compile_vs(v3d
->screen
->compiler
,
280 (struct v3d_vs_key
*)key
,
281 shader
->prog_data
.vs
, s
,
282 program_id
, variant_id
,
285 case MESA_SHADER_FRAGMENT
:
286 shader
->prog_data
.fs
= rzalloc(shader
, struct v3d_fs_prog_data
);
288 qpu_insts
= v3d_compile_fs(v3d
->screen
->compiler
,
289 (struct v3d_fs_key
*)key
,
290 shader
->prog_data
.fs
, s
,
291 program_id
, variant_id
,
295 unreachable("bad stage");
298 v3d_set_shader_uniform_dirty_flags(shader
);
300 shader
->bo
= v3d_bo_alloc(v3d
->screen
, shader_size
, "shader");
301 v3d_bo_map(shader
->bo
);
302 memcpy(shader
->bo
->map
, qpu_insts
, shader_size
);
306 struct v3d_key
*dup_key
;
307 dup_key
= ralloc_size(shader
, key_size
);
308 memcpy(dup_key
, key
, key_size
);
309 _mesa_hash_table_insert(ht
, dup_key
, shader
);
311 if (shader
->prog_data
.base
->spill_size
>
312 v3d
->prog
.spill_size_per_thread
) {
313 /* Max 4 QPUs per slice, 3 slices per core. We only do single
314 * core so far. This overallocates memory on smaller cores.
316 int total_spill_size
=
317 4 * 3 * shader
->prog_data
.base
->spill_size
;
319 v3d_bo_unreference(&v3d
->prog
.spill_bo
);
320 v3d
->prog
.spill_bo
= v3d_bo_alloc(v3d
->screen
,
321 total_spill_size
, "spill");
322 v3d
->prog
.spill_size_per_thread
=
323 shader
->prog_data
.base
->spill_size
;
330 v3d_setup_shared_key(struct v3d_context
*v3d
, struct v3d_key
*key
,
331 struct v3d_texture_stateobj
*texstate
)
333 const struct v3d_device_info
*devinfo
= &v3d
->screen
->devinfo
;
335 for (int i
= 0; i
< texstate
->num_textures
; i
++) {
336 struct pipe_sampler_view
*sampler
= texstate
->textures
[i
];
337 struct v3d_sampler_view
*v3d_sampler
= v3d_sampler_view(sampler
);
338 struct pipe_sampler_state
*sampler_state
=
339 texstate
->samplers
[i
];
344 key
->tex
[i
].return_size
=
345 v3d_get_tex_return_size(devinfo
,
347 sampler_state
->compare_mode
);
349 /* For 16-bit, we set up the sampler to always return 2
350 * channels (meaning no recompiles for most statechanges),
351 * while for 32 we actually scale the returns with channels.
353 if (key
->tex
[i
].return_size
== 16) {
354 key
->tex
[i
].return_channels
= 2;
355 } else if (devinfo
->ver
> 40) {
356 key
->tex
[i
].return_channels
= 4;
358 key
->tex
[i
].return_channels
=
359 v3d_get_tex_return_channels(devinfo
,
363 if (key
->tex
[i
].return_size
== 32 && devinfo
->ver
< 40) {
364 memcpy(key
->tex
[i
].swizzle
,
365 v3d_sampler
->swizzle
,
366 sizeof(v3d_sampler
->swizzle
));
368 /* For 16-bit returns, we let the sampler state handle
371 key
->tex
[i
].swizzle
[0] = PIPE_SWIZZLE_X
;
372 key
->tex
[i
].swizzle
[1] = PIPE_SWIZZLE_Y
;
373 key
->tex
[i
].swizzle
[2] = PIPE_SWIZZLE_Z
;
374 key
->tex
[i
].swizzle
[3] = PIPE_SWIZZLE_W
;
378 key
->tex
[i
].compare_mode
= sampler_state
->compare_mode
;
379 key
->tex
[i
].compare_func
= sampler_state
->compare_func
;
380 key
->tex
[i
].clamp_s
=
381 sampler_state
->wrap_s
== PIPE_TEX_WRAP_CLAMP
;
382 key
->tex
[i
].clamp_t
=
383 sampler_state
->wrap_t
== PIPE_TEX_WRAP_CLAMP
;
384 key
->tex
[i
].clamp_r
=
385 sampler_state
->wrap_r
== PIPE_TEX_WRAP_CLAMP
;
389 key
->ucp_enables
= v3d
->rasterizer
->base
.clip_plane_enable
;
393 v3d_update_compiled_fs(struct v3d_context
*v3d
, uint8_t prim_mode
)
395 struct v3d_job
*job
= v3d
->job
;
396 struct v3d_fs_key local_key
;
397 struct v3d_fs_key
*key
= &local_key
;
399 if (!(v3d
->dirty
& (VC5_DIRTY_PRIM_MODE
|
401 VC5_DIRTY_FRAMEBUFFER
|
403 VC5_DIRTY_RASTERIZER
|
404 VC5_DIRTY_SAMPLE_STATE
|
406 VC5_DIRTY_UNCOMPILED_FS
))) {
410 memset(key
, 0, sizeof(*key
));
411 v3d_setup_shared_key(v3d
, &key
->base
, &v3d
->fragtex
);
412 key
->base
.shader_state
= v3d
->prog
.bind_fs
;
413 key
->is_points
= (prim_mode
== PIPE_PRIM_POINTS
);
414 key
->is_lines
= (prim_mode
>= PIPE_PRIM_LINES
&&
415 prim_mode
<= PIPE_PRIM_LINE_STRIP
);
416 key
->clamp_color
= v3d
->rasterizer
->base
.clamp_fragment_color
;
417 if (v3d
->blend
->base
.logicop_enable
) {
418 key
->logicop_func
= v3d
->blend
->base
.logicop_func
;
420 key
->logicop_func
= PIPE_LOGICOP_COPY
;
423 key
->msaa
= v3d
->rasterizer
->base
.multisample
;
424 key
->sample_coverage
= (v3d
->rasterizer
->base
.multisample
&&
425 v3d
->sample_mask
!= (1 << VC5_MAX_SAMPLES
) - 1);
426 key
->sample_alpha_to_coverage
= v3d
->blend
->base
.alpha_to_coverage
;
427 key
->sample_alpha_to_one
= v3d
->blend
->base
.alpha_to_one
;
430 key
->depth_enabled
= (v3d
->zsa
->base
.depth
.enabled
||
431 v3d
->zsa
->base
.stencil
[0].enabled
);
432 if (v3d
->zsa
->base
.alpha
.enabled
) {
433 key
->alpha_test
= true;
434 key
->alpha_test_func
= v3d
->zsa
->base
.alpha
.func
;
437 /* gl_FragColor's propagation to however many bound color buffers
438 * there are means that the buffer count needs to be in the key.
440 key
->nr_cbufs
= v3d
->framebuffer
.nr_cbufs
;
441 key
->swap_color_rb
= v3d
->swap_color_rb
;
443 for (int i
= 0; i
< key
->nr_cbufs
; i
++) {
444 struct pipe_surface
*cbuf
= v3d
->framebuffer
.cbufs
[i
];
448 const struct util_format_description
*desc
=
449 util_format_description(cbuf
->format
);
451 if (desc
->channel
[0].type
== UTIL_FORMAT_TYPE_FLOAT
&&
452 desc
->channel
[0].size
== 32) {
453 key
->f32_color_rb
|= 1 << i
;
456 if (v3d
->prog
.bind_fs
->was_tgsi
) {
457 if (util_format_is_pure_uint(cbuf
->format
))
458 key
->uint_color_rb
|= 1 << i
;
459 else if (util_format_is_pure_sint(cbuf
->format
))
460 key
->int_color_rb
|= 1 << i
;
464 if (key
->is_points
) {
465 key
->point_sprite_mask
=
466 v3d
->rasterizer
->base
.sprite_coord_enable
;
467 key
->point_coord_upper_left
=
468 (v3d
->rasterizer
->base
.sprite_coord_mode
==
469 PIPE_SPRITE_COORD_UPPER_LEFT
);
472 key
->light_twoside
= v3d
->rasterizer
->base
.light_twoside
;
473 key
->shade_model_flat
= v3d
->rasterizer
->base
.flatshade
;
475 struct v3d_compiled_shader
*old_fs
= v3d
->prog
.fs
;
476 v3d
->prog
.fs
= v3d_get_compiled_shader(v3d
, &key
->base
);
477 if (v3d
->prog
.fs
== old_fs
)
480 v3d
->dirty
|= VC5_DIRTY_COMPILED_FS
;
483 if (v3d
->prog
.fs
->prog_data
.fs
->flat_shade_flags
!=
484 old_fs
->prog_data
.fs
->flat_shade_flags
) {
485 v3d
->dirty
|= VC5_DIRTY_FLAT_SHADE_FLAGS
;
488 if (v3d
->prog
.fs
->prog_data
.fs
->noperspective_flags
!=
489 old_fs
->prog_data
.fs
->noperspective_flags
) {
490 v3d
->dirty
|= VC5_DIRTY_NOPERSPECTIVE_FLAGS
;
493 if (v3d
->prog
.fs
->prog_data
.fs
->centroid_flags
!=
494 old_fs
->prog_data
.fs
->centroid_flags
) {
495 v3d
->dirty
|= VC5_DIRTY_CENTROID_FLAGS
;
499 if (old_fs
&& memcmp(v3d
->prog
.fs
->prog_data
.fs
->input_slots
,
500 old_fs
->prog_data
.fs
->input_slots
,
501 sizeof(v3d
->prog
.fs
->prog_data
.fs
->input_slots
))) {
502 v3d
->dirty
|= VC5_DIRTY_FS_INPUTS
;
507 v3d_update_compiled_vs(struct v3d_context
*v3d
, uint8_t prim_mode
)
509 struct v3d_vs_key local_key
;
510 struct v3d_vs_key
*key
= &local_key
;
512 if (!(v3d
->dirty
& (VC5_DIRTY_PRIM_MODE
|
513 VC5_DIRTY_RASTERIZER
|
516 VC5_DIRTY_UNCOMPILED_VS
|
517 VC5_DIRTY_FS_INPUTS
))) {
521 memset(key
, 0, sizeof(*key
));
522 v3d_setup_shared_key(v3d
, &key
->base
, &v3d
->verttex
);
523 key
->base
.shader_state
= v3d
->prog
.bind_vs
;
524 key
->num_fs_inputs
= v3d
->prog
.fs
->prog_data
.fs
->base
.num_inputs
;
525 STATIC_ASSERT(sizeof(key
->fs_inputs
) ==
526 sizeof(v3d
->prog
.fs
->prog_data
.fs
->input_slots
));
527 memcpy(key
->fs_inputs
, v3d
->prog
.fs
->prog_data
.fs
->input_slots
,
528 sizeof(key
->fs_inputs
));
529 key
->clamp_color
= v3d
->rasterizer
->base
.clamp_vertex_color
;
531 key
->per_vertex_point_size
=
532 (prim_mode
== PIPE_PRIM_POINTS
&&
533 v3d
->rasterizer
->base
.point_size_per_vertex
);
535 struct v3d_compiled_shader
*vs
=
536 v3d_get_compiled_shader(v3d
, &key
->base
);
537 if (vs
!= v3d
->prog
.vs
) {
539 v3d
->dirty
|= VC5_DIRTY_COMPILED_VS
;
542 key
->is_coord
= true;
543 /* Coord shaders only output varyings used by transform feedback. */
544 struct v3d_uncompiled_shader
*shader_state
= key
->base
.shader_state
;
545 memcpy(key
->fs_inputs
, shader_state
->tf_outputs
,
546 sizeof(*key
->fs_inputs
) * shader_state
->num_tf_outputs
);
547 if (shader_state
->num_tf_outputs
< key
->num_fs_inputs
) {
548 memset(&key
->fs_inputs
[shader_state
->num_tf_outputs
],
550 sizeof(*key
->fs_inputs
) * (key
->num_fs_inputs
-
551 shader_state
->num_tf_outputs
));
553 key
->num_fs_inputs
= shader_state
->num_tf_outputs
;
555 struct v3d_compiled_shader
*cs
=
556 v3d_get_compiled_shader(v3d
, &key
->base
);
557 if (cs
!= v3d
->prog
.cs
) {
559 v3d
->dirty
|= VC5_DIRTY_COMPILED_CS
;
564 v3d_update_compiled_shaders(struct v3d_context
*v3d
, uint8_t prim_mode
)
566 v3d_update_compiled_fs(v3d
, prim_mode
);
567 v3d_update_compiled_vs(v3d
, prim_mode
);
571 fs_cache_hash(const void *key
)
573 return _mesa_hash_data(key
, sizeof(struct v3d_fs_key
));
577 vs_cache_hash(const void *key
)
579 return _mesa_hash_data(key
, sizeof(struct v3d_vs_key
));
583 fs_cache_compare(const void *key1
, const void *key2
)
585 return memcmp(key1
, key2
, sizeof(struct v3d_fs_key
)) == 0;
589 vs_cache_compare(const void *key1
, const void *key2
)
591 return memcmp(key1
, key2
, sizeof(struct v3d_vs_key
)) == 0;
595 delete_from_cache_if_matches(struct hash_table
*ht
,
596 struct v3d_compiled_shader
**last_compile
,
597 struct hash_entry
*entry
,
598 struct v3d_uncompiled_shader
*so
)
600 const struct v3d_key
*key
= entry
->key
;
602 if (key
->shader_state
== so
) {
603 struct v3d_compiled_shader
*shader
= entry
->data
;
604 _mesa_hash_table_remove(ht
, entry
);
605 v3d_bo_unreference(&shader
->bo
);
607 if (shader
== *last_compile
)
608 *last_compile
= NULL
;
615 v3d_shader_state_delete(struct pipe_context
*pctx
, void *hwcso
)
617 struct v3d_context
*v3d
= v3d_context(pctx
);
618 struct v3d_uncompiled_shader
*so
= hwcso
;
620 struct hash_entry
*entry
;
621 hash_table_foreach(v3d
->fs_cache
, entry
) {
622 delete_from_cache_if_matches(v3d
->fs_cache
, &v3d
->prog
.fs
,
625 hash_table_foreach(v3d
->vs_cache
, entry
) {
626 delete_from_cache_if_matches(v3d
->vs_cache
, &v3d
->prog
.vs
,
630 ralloc_free(so
->base
.ir
.nir
);
635 v3d_fp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
637 struct v3d_context
*v3d
= v3d_context(pctx
);
638 v3d
->prog
.bind_fs
= hwcso
;
639 v3d
->dirty
|= VC5_DIRTY_UNCOMPILED_FS
;
643 v3d_vp_state_bind(struct pipe_context
*pctx
, void *hwcso
)
645 struct v3d_context
*v3d
= v3d_context(pctx
);
646 v3d
->prog
.bind_vs
= hwcso
;
647 v3d
->dirty
|= VC5_DIRTY_UNCOMPILED_VS
;
651 v3d_program_init(struct pipe_context
*pctx
)
653 struct v3d_context
*v3d
= v3d_context(pctx
);
655 pctx
->create_vs_state
= v3d_shader_state_create
;
656 pctx
->delete_vs_state
= v3d_shader_state_delete
;
658 pctx
->create_fs_state
= v3d_shader_state_create
;
659 pctx
->delete_fs_state
= v3d_shader_state_delete
;
661 pctx
->bind_fs_state
= v3d_fp_state_bind
;
662 pctx
->bind_vs_state
= v3d_vp_state_bind
;
664 v3d
->fs_cache
= _mesa_hash_table_create(pctx
, fs_cache_hash
,
666 v3d
->vs_cache
= _mesa_hash_table_create(pctx
, vs_cache_hash
,
671 v3d_program_fini(struct pipe_context
*pctx
)
673 struct v3d_context
*v3d
= v3d_context(pctx
);
675 struct hash_entry
*entry
;
676 hash_table_foreach(v3d
->fs_cache
, entry
) {
677 struct v3d_compiled_shader
*shader
= entry
->data
;
678 v3d_bo_unreference(&shader
->bo
);
680 _mesa_hash_table_remove(v3d
->fs_cache
, entry
);
683 hash_table_foreach(v3d
->vs_cache
, entry
) {
684 struct v3d_compiled_shader
*shader
= entry
->data
;
685 v3d_bo_unreference(&shader
->bo
);
687 _mesa_hash_table_remove(v3d
->vs_cache
, entry
);
690 v3d_bo_unreference(&v3d
->prog
.spill_bo
);