v3d: Make an array for frag/vert texture state in the context.
[mesa.git] / src / gallium / drivers / v3d / v3d_program.c
1 /*
2 * Copyright © 2014-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "util/u_upload_mgr.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "tgsi/tgsi_parse.h"
33 #include "compiler/nir/nir.h"
34 #include "compiler/nir/nir_builder.h"
35 #include "nir/tgsi_to_nir.h"
36 #include "compiler/v3d_compiler.h"
37 #include "v3d_context.h"
38 #include "broadcom/cle/v3d_packet_v33_pack.h"
39 #include "mesa/state_tracker/st_glsl_types.h"
40
41 static gl_varying_slot
42 v3d_get_slot_for_driver_location(nir_shader *s, uint32_t driver_location)
43 {
44 nir_foreach_variable(var, &s->outputs) {
45 if (var->data.driver_location == driver_location) {
46 return var->data.location;
47 }
48 }
49
50 return -1;
51 }
52
53 /**
54 * Precomputes the TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC array for the shader.
55 *
56 * A shader can have 16 of these specs, and each one of them can write up to
57 * 16 dwords. Since we allow a total of 64 transform feedback output
58 * components (not 16 vectors), we have to group the writes of multiple
59 * varyings together in a single data spec.
60 */
61 static void
62 v3d_set_transform_feedback_outputs(struct v3d_uncompiled_shader *so,
63 const struct pipe_stream_output_info *stream_output)
64 {
65 if (!stream_output->num_outputs)
66 return;
67
68 struct v3d_varying_slot slots[PIPE_MAX_SO_OUTPUTS * 4];
69 int slot_count = 0;
70
71 for (int buffer = 0; buffer < PIPE_MAX_SO_BUFFERS; buffer++) {
72 uint32_t buffer_offset = 0;
73 uint32_t vpm_start = slot_count;
74
75 for (int i = 0; i < stream_output->num_outputs; i++) {
76 const struct pipe_stream_output *output =
77 &stream_output->output[i];
78
79 if (output->output_buffer != buffer)
80 continue;
81
82 /* We assume that the SO outputs appear in increasing
83 * order in the buffer.
84 */
85 assert(output->dst_offset >= buffer_offset);
86
87 /* Pad any undefined slots in the output */
88 for (int j = buffer_offset; j < output->dst_offset; j++) {
89 slots[slot_count] =
90 v3d_slot_from_slot_and_component(VARYING_SLOT_POS, 0);
91 slot_count++;
92 buffer_offset++;
93 }
94
95 /* Set the coordinate shader up to output the
96 * components of this varying.
97 */
98 for (int j = 0; j < output->num_components; j++) {
99 gl_varying_slot slot =
100 v3d_get_slot_for_driver_location(so->base.ir.nir, output->register_index);
101
102 slots[slot_count] =
103 v3d_slot_from_slot_and_component(slot,
104 output->start_component + j);
105 slot_count++;
106 buffer_offset++;
107 }
108 }
109
110 uint32_t vpm_size = slot_count - vpm_start;
111 if (!vpm_size)
112 continue;
113
114 uint32_t vpm_start_offset = vpm_start + 6;
115
116 while (vpm_size) {
117 uint32_t write_size = MIN2(vpm_size, 1 << 4);
118
119 struct V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC unpacked = {
120 /* We need the offset from the coordinate shader's VPM
121 * output block, which has the [X, Y, Z, W, Xs, Ys]
122 * values at the start.
123 */
124 .first_shaded_vertex_value_to_output = vpm_start_offset,
125 .number_of_consecutive_vertex_values_to_output_as_32_bit_values = write_size,
126 .output_buffer_to_write_to = buffer,
127 };
128
129 /* GFXH-1559 */
130 assert(unpacked.first_shaded_vertex_value_to_output != 8 ||
131 so->num_tf_specs != 0);
132
133 assert(so->num_tf_specs != ARRAY_SIZE(so->tf_specs));
134 V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL,
135 (void *)&so->tf_specs[so->num_tf_specs],
136 &unpacked);
137
138 /* If point size is being written by the shader, then
139 * all the VPM start offsets are shifted up by one.
140 * We won't know that until the variant is compiled,
141 * though.
142 */
143 unpacked.first_shaded_vertex_value_to_output++;
144
145 /* GFXH-1559 */
146 assert(unpacked.first_shaded_vertex_value_to_output != 8 ||
147 so->num_tf_specs != 0);
148
149 V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL,
150 (void *)&so->tf_specs_psiz[so->num_tf_specs],
151 &unpacked);
152 so->num_tf_specs++;
153 vpm_start_offset += write_size;
154 vpm_size -= write_size;
155 }
156 so->base.stream_output.stride[buffer] =
157 stream_output->stride[buffer];
158 }
159
160 so->num_tf_outputs = slot_count;
161 so->tf_outputs = ralloc_array(so->base.ir.nir, struct v3d_varying_slot,
162 slot_count);
163 memcpy(so->tf_outputs, slots, sizeof(*slots) * slot_count);
164 }
165
166 static int
167 type_size(const struct glsl_type *type)
168 {
169 return glsl_count_attribute_slots(type, false);
170 }
171
172 static int
173 uniforms_type_size(const struct glsl_type *type)
174 {
175 return st_glsl_storage_type_size(type, false);
176 }
177
178 static void *
179 v3d_shader_state_create(struct pipe_context *pctx,
180 const struct pipe_shader_state *cso)
181 {
182 struct v3d_context *v3d = v3d_context(pctx);
183 struct v3d_uncompiled_shader *so = CALLOC_STRUCT(v3d_uncompiled_shader);
184 if (!so)
185 return NULL;
186
187 so->program_id = v3d->next_uncompiled_program_id++;
188
189 nir_shader *s;
190
191 if (cso->type == PIPE_SHADER_IR_NIR) {
192 /* The backend takes ownership of the NIR shader on state
193 * creation.
194 */
195 s = cso->ir.nir;
196
197 NIR_PASS_V(s, nir_lower_io, nir_var_uniform,
198 uniforms_type_size,
199 (nir_lower_io_options)0);
200 } else {
201 assert(cso->type == PIPE_SHADER_IR_TGSI);
202
203 if (V3D_DEBUG & V3D_DEBUG_TGSI) {
204 fprintf(stderr, "prog %d TGSI:\n",
205 so->program_id);
206 tgsi_dump(cso->tokens, 0);
207 fprintf(stderr, "\n");
208 }
209 s = tgsi_to_nir(cso->tokens, &v3d_nir_options);
210
211 so->was_tgsi = true;
212 }
213
214 nir_variable_mode lower_mode = nir_var_all & ~nir_var_uniform;
215 if (s->info.stage == MESA_SHADER_VERTEX)
216 lower_mode &= ~(nir_var_shader_in | nir_var_shader_out);
217 NIR_PASS_V(s, nir_lower_io, lower_mode,
218 type_size,
219 (nir_lower_io_options)0);
220
221 NIR_PASS_V(s, nir_opt_global_to_local);
222 NIR_PASS_V(s, nir_lower_regs_to_ssa);
223 NIR_PASS_V(s, nir_normalize_cubemap_coords);
224
225 NIR_PASS_V(s, nir_lower_load_const_to_scalar);
226
227 v3d_optimize_nir(s);
228
229 NIR_PASS_V(s, nir_remove_dead_variables, nir_var_local);
230
231 /* Garbage collect dead instructions */
232 nir_sweep(s);
233
234 so->base.type = PIPE_SHADER_IR_NIR;
235 so->base.ir.nir = s;
236
237 v3d_set_transform_feedback_outputs(so, &cso->stream_output);
238
239 if (V3D_DEBUG & (V3D_DEBUG_NIR |
240 v3d_debug_flag_for_shader_stage(s->info.stage))) {
241 fprintf(stderr, "%s prog %d NIR:\n",
242 gl_shader_stage_name(s->info.stage),
243 so->program_id);
244 nir_print_shader(s, stderr);
245 fprintf(stderr, "\n");
246 }
247
248 return so;
249 }
250
251 static struct v3d_compiled_shader *
252 v3d_get_compiled_shader(struct v3d_context *v3d, struct v3d_key *key)
253 {
254 struct v3d_uncompiled_shader *shader_state = key->shader_state;
255 nir_shader *s = shader_state->base.ir.nir;
256
257 struct hash_table *ht;
258 uint32_t key_size;
259 if (s->info.stage == MESA_SHADER_FRAGMENT) {
260 ht = v3d->fs_cache;
261 key_size = sizeof(struct v3d_fs_key);
262 } else {
263 ht = v3d->vs_cache;
264 key_size = sizeof(struct v3d_vs_key);
265 }
266
267 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
268 if (entry)
269 return entry->data;
270
271 struct v3d_compiled_shader *shader =
272 rzalloc(NULL, struct v3d_compiled_shader);
273
274 int program_id = shader_state->program_id;
275 int variant_id =
276 p_atomic_inc_return(&shader_state->compiled_variant_count);
277 uint64_t *qpu_insts;
278 uint32_t shader_size;
279
280 switch (s->info.stage) {
281 case MESA_SHADER_VERTEX:
282 shader->prog_data.vs = rzalloc(shader, struct v3d_vs_prog_data);
283
284 qpu_insts = v3d_compile_vs(v3d->screen->compiler,
285 (struct v3d_vs_key *)key,
286 shader->prog_data.vs, s,
287 program_id, variant_id,
288 &shader_size);
289 break;
290 case MESA_SHADER_FRAGMENT:
291 shader->prog_data.fs = rzalloc(shader, struct v3d_fs_prog_data);
292
293 qpu_insts = v3d_compile_fs(v3d->screen->compiler,
294 (struct v3d_fs_key *)key,
295 shader->prog_data.fs, s,
296 program_id, variant_id,
297 &shader_size);
298 break;
299 default:
300 unreachable("bad stage");
301 }
302
303 v3d_set_shader_uniform_dirty_flags(shader);
304
305 u_upload_data(v3d->state_uploader, 0, shader_size, 8,
306 qpu_insts, &shader->offset, &shader->resource);
307
308 free(qpu_insts);
309
310 struct v3d_key *dup_key;
311 dup_key = ralloc_size(shader, key_size);
312 memcpy(dup_key, key, key_size);
313 _mesa_hash_table_insert(ht, dup_key, shader);
314
315 if (shader->prog_data.base->spill_size >
316 v3d->prog.spill_size_per_thread) {
317 /* Max 4 QPUs per slice, 3 slices per core. We only do single
318 * core so far. This overallocates memory on smaller cores.
319 */
320 int total_spill_size =
321 4 * 3 * shader->prog_data.base->spill_size;
322
323 v3d_bo_unreference(&v3d->prog.spill_bo);
324 v3d->prog.spill_bo = v3d_bo_alloc(v3d->screen,
325 total_spill_size, "spill");
326 v3d->prog.spill_size_per_thread =
327 shader->prog_data.base->spill_size;
328 }
329
330 return shader;
331 }
332
333 static void
334 v3d_free_compiled_shader(struct v3d_compiled_shader *shader)
335 {
336 pipe_resource_reference(&shader->resource, NULL);
337 ralloc_free(shader);
338 }
339
340 static void
341 v3d_setup_shared_key(struct v3d_context *v3d, struct v3d_key *key,
342 struct v3d_texture_stateobj *texstate)
343 {
344 const struct v3d_device_info *devinfo = &v3d->screen->devinfo;
345
346 for (int i = 0; i < texstate->num_textures; i++) {
347 struct pipe_sampler_view *sampler = texstate->textures[i];
348 struct v3d_sampler_view *v3d_sampler = v3d_sampler_view(sampler);
349 struct pipe_sampler_state *sampler_state =
350 texstate->samplers[i];
351
352 if (!sampler)
353 continue;
354
355 key->tex[i].return_size =
356 v3d_get_tex_return_size(devinfo,
357 sampler->format,
358 sampler_state->compare_mode);
359
360 /* For 16-bit, we set up the sampler to always return 2
361 * channels (meaning no recompiles for most statechanges),
362 * while for 32 we actually scale the returns with channels.
363 */
364 if (key->tex[i].return_size == 16) {
365 key->tex[i].return_channels = 2;
366 } else if (devinfo->ver > 40) {
367 key->tex[i].return_channels = 4;
368 } else {
369 key->tex[i].return_channels =
370 v3d_get_tex_return_channels(devinfo,
371 sampler->format);
372 }
373
374 if (key->tex[i].return_size == 32 && devinfo->ver < 40) {
375 memcpy(key->tex[i].swizzle,
376 v3d_sampler->swizzle,
377 sizeof(v3d_sampler->swizzle));
378 } else {
379 /* For 16-bit returns, we let the sampler state handle
380 * the swizzle.
381 */
382 key->tex[i].swizzle[0] = PIPE_SWIZZLE_X;
383 key->tex[i].swizzle[1] = PIPE_SWIZZLE_Y;
384 key->tex[i].swizzle[2] = PIPE_SWIZZLE_Z;
385 key->tex[i].swizzle[3] = PIPE_SWIZZLE_W;
386 }
387
388 if (sampler) {
389 key->tex[i].compare_mode = sampler_state->compare_mode;
390 key->tex[i].compare_func = sampler_state->compare_func;
391 key->tex[i].clamp_s =
392 sampler_state->wrap_s == PIPE_TEX_WRAP_CLAMP;
393 key->tex[i].clamp_t =
394 sampler_state->wrap_t == PIPE_TEX_WRAP_CLAMP;
395 key->tex[i].clamp_r =
396 sampler_state->wrap_r == PIPE_TEX_WRAP_CLAMP;
397 }
398 }
399
400 key->ucp_enables = v3d->rasterizer->base.clip_plane_enable;
401 }
402
403 static void
404 v3d_update_compiled_fs(struct v3d_context *v3d, uint8_t prim_mode)
405 {
406 struct v3d_job *job = v3d->job;
407 struct v3d_fs_key local_key;
408 struct v3d_fs_key *key = &local_key;
409
410 if (!(v3d->dirty & (VC5_DIRTY_PRIM_MODE |
411 VC5_DIRTY_BLEND |
412 VC5_DIRTY_FRAMEBUFFER |
413 VC5_DIRTY_ZSA |
414 VC5_DIRTY_RASTERIZER |
415 VC5_DIRTY_SAMPLE_STATE |
416 VC5_DIRTY_FRAGTEX |
417 VC5_DIRTY_UNCOMPILED_FS))) {
418 return;
419 }
420
421 memset(key, 0, sizeof(*key));
422 v3d_setup_shared_key(v3d, &key->base, &v3d->tex[PIPE_SHADER_FRAGMENT]);
423 key->base.shader_state = v3d->prog.bind_fs;
424 key->is_points = (prim_mode == PIPE_PRIM_POINTS);
425 key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
426 prim_mode <= PIPE_PRIM_LINE_STRIP);
427 key->clamp_color = v3d->rasterizer->base.clamp_fragment_color;
428 if (v3d->blend->base.logicop_enable) {
429 key->logicop_func = v3d->blend->base.logicop_func;
430 } else {
431 key->logicop_func = PIPE_LOGICOP_COPY;
432 }
433 if (job->msaa) {
434 key->msaa = v3d->rasterizer->base.multisample;
435 key->sample_coverage = (v3d->rasterizer->base.multisample &&
436 v3d->sample_mask != (1 << VC5_MAX_SAMPLES) - 1);
437 key->sample_alpha_to_coverage = v3d->blend->base.alpha_to_coverage;
438 key->sample_alpha_to_one = v3d->blend->base.alpha_to_one;
439 }
440
441 key->depth_enabled = (v3d->zsa->base.depth.enabled ||
442 v3d->zsa->base.stencil[0].enabled);
443 if (v3d->zsa->base.alpha.enabled) {
444 key->alpha_test = true;
445 key->alpha_test_func = v3d->zsa->base.alpha.func;
446 }
447
448 /* gl_FragColor's propagation to however many bound color buffers
449 * there are means that the buffer count needs to be in the key.
450 */
451 key->nr_cbufs = v3d->framebuffer.nr_cbufs;
452 key->swap_color_rb = v3d->swap_color_rb;
453
454 for (int i = 0; i < key->nr_cbufs; i++) {
455 struct pipe_surface *cbuf = v3d->framebuffer.cbufs[i];
456 if (!cbuf)
457 continue;
458
459 const struct util_format_description *desc =
460 util_format_description(cbuf->format);
461
462 if (desc->channel[0].type == UTIL_FORMAT_TYPE_FLOAT &&
463 desc->channel[0].size == 32) {
464 key->f32_color_rb |= 1 << i;
465 }
466
467 if (v3d->prog.bind_fs->was_tgsi) {
468 if (util_format_is_pure_uint(cbuf->format))
469 key->uint_color_rb |= 1 << i;
470 else if (util_format_is_pure_sint(cbuf->format))
471 key->int_color_rb |= 1 << i;
472 }
473 }
474
475 if (key->is_points) {
476 key->point_sprite_mask =
477 v3d->rasterizer->base.sprite_coord_enable;
478 key->point_coord_upper_left =
479 (v3d->rasterizer->base.sprite_coord_mode ==
480 PIPE_SPRITE_COORD_UPPER_LEFT);
481 }
482
483 key->light_twoside = v3d->rasterizer->base.light_twoside;
484 key->shade_model_flat = v3d->rasterizer->base.flatshade;
485
486 struct v3d_compiled_shader *old_fs = v3d->prog.fs;
487 v3d->prog.fs = v3d_get_compiled_shader(v3d, &key->base);
488 if (v3d->prog.fs == old_fs)
489 return;
490
491 v3d->dirty |= VC5_DIRTY_COMPILED_FS;
492
493 if (old_fs) {
494 if (v3d->prog.fs->prog_data.fs->flat_shade_flags !=
495 old_fs->prog_data.fs->flat_shade_flags) {
496 v3d->dirty |= VC5_DIRTY_FLAT_SHADE_FLAGS;
497 }
498
499 if (v3d->prog.fs->prog_data.fs->noperspective_flags !=
500 old_fs->prog_data.fs->noperspective_flags) {
501 v3d->dirty |= VC5_DIRTY_NOPERSPECTIVE_FLAGS;
502 }
503
504 if (v3d->prog.fs->prog_data.fs->centroid_flags !=
505 old_fs->prog_data.fs->centroid_flags) {
506 v3d->dirty |= VC5_DIRTY_CENTROID_FLAGS;
507 }
508 }
509
510 if (old_fs && memcmp(v3d->prog.fs->prog_data.fs->input_slots,
511 old_fs->prog_data.fs->input_slots,
512 sizeof(v3d->prog.fs->prog_data.fs->input_slots))) {
513 v3d->dirty |= VC5_DIRTY_FS_INPUTS;
514 }
515 }
516
517 static void
518 v3d_update_compiled_vs(struct v3d_context *v3d, uint8_t prim_mode)
519 {
520 struct v3d_vs_key local_key;
521 struct v3d_vs_key *key = &local_key;
522
523 if (!(v3d->dirty & (VC5_DIRTY_PRIM_MODE |
524 VC5_DIRTY_RASTERIZER |
525 VC5_DIRTY_VERTTEX |
526 VC5_DIRTY_VTXSTATE |
527 VC5_DIRTY_UNCOMPILED_VS |
528 VC5_DIRTY_FS_INPUTS))) {
529 return;
530 }
531
532 memset(key, 0, sizeof(*key));
533 v3d_setup_shared_key(v3d, &key->base, &v3d->tex[PIPE_SHADER_VERTEX]);
534 key->base.shader_state = v3d->prog.bind_vs;
535 key->num_fs_inputs = v3d->prog.fs->prog_data.fs->base.num_inputs;
536 STATIC_ASSERT(sizeof(key->fs_inputs) ==
537 sizeof(v3d->prog.fs->prog_data.fs->input_slots));
538 memcpy(key->fs_inputs, v3d->prog.fs->prog_data.fs->input_slots,
539 sizeof(key->fs_inputs));
540 key->clamp_color = v3d->rasterizer->base.clamp_vertex_color;
541
542 key->per_vertex_point_size =
543 (prim_mode == PIPE_PRIM_POINTS &&
544 v3d->rasterizer->base.point_size_per_vertex);
545
546 struct v3d_compiled_shader *vs =
547 v3d_get_compiled_shader(v3d, &key->base);
548 if (vs != v3d->prog.vs) {
549 v3d->prog.vs = vs;
550 v3d->dirty |= VC5_DIRTY_COMPILED_VS;
551 }
552
553 key->is_coord = true;
554 /* Coord shaders only output varyings used by transform feedback. */
555 struct v3d_uncompiled_shader *shader_state = key->base.shader_state;
556 memcpy(key->fs_inputs, shader_state->tf_outputs,
557 sizeof(*key->fs_inputs) * shader_state->num_tf_outputs);
558 if (shader_state->num_tf_outputs < key->num_fs_inputs) {
559 memset(&key->fs_inputs[shader_state->num_tf_outputs],
560 0,
561 sizeof(*key->fs_inputs) * (key->num_fs_inputs -
562 shader_state->num_tf_outputs));
563 }
564 key->num_fs_inputs = shader_state->num_tf_outputs;
565
566 struct v3d_compiled_shader *cs =
567 v3d_get_compiled_shader(v3d, &key->base);
568 if (cs != v3d->prog.cs) {
569 v3d->prog.cs = cs;
570 v3d->dirty |= VC5_DIRTY_COMPILED_CS;
571 }
572 }
573
574 void
575 v3d_update_compiled_shaders(struct v3d_context *v3d, uint8_t prim_mode)
576 {
577 v3d_update_compiled_fs(v3d, prim_mode);
578 v3d_update_compiled_vs(v3d, prim_mode);
579 }
580
581 static uint32_t
582 fs_cache_hash(const void *key)
583 {
584 return _mesa_hash_data(key, sizeof(struct v3d_fs_key));
585 }
586
587 static uint32_t
588 vs_cache_hash(const void *key)
589 {
590 return _mesa_hash_data(key, sizeof(struct v3d_vs_key));
591 }
592
593 static bool
594 fs_cache_compare(const void *key1, const void *key2)
595 {
596 return memcmp(key1, key2, sizeof(struct v3d_fs_key)) == 0;
597 }
598
599 static bool
600 vs_cache_compare(const void *key1, const void *key2)
601 {
602 return memcmp(key1, key2, sizeof(struct v3d_vs_key)) == 0;
603 }
604
605 static void
606 delete_from_cache_if_matches(struct hash_table *ht,
607 struct v3d_compiled_shader **last_compile,
608 struct hash_entry *entry,
609 struct v3d_uncompiled_shader *so)
610 {
611 const struct v3d_key *key = entry->key;
612
613 if (key->shader_state == so) {
614 struct v3d_compiled_shader *shader = entry->data;
615 _mesa_hash_table_remove(ht, entry);
616
617 if (shader == *last_compile)
618 *last_compile = NULL;
619
620 v3d_free_compiled_shader(shader);
621 }
622 }
623
624 static void
625 v3d_shader_state_delete(struct pipe_context *pctx, void *hwcso)
626 {
627 struct v3d_context *v3d = v3d_context(pctx);
628 struct v3d_uncompiled_shader *so = hwcso;
629
630 hash_table_foreach(v3d->fs_cache, entry) {
631 delete_from_cache_if_matches(v3d->fs_cache, &v3d->prog.fs,
632 entry, so);
633 }
634 hash_table_foreach(v3d->vs_cache, entry) {
635 delete_from_cache_if_matches(v3d->vs_cache, &v3d->prog.vs,
636 entry, so);
637 }
638
639 ralloc_free(so->base.ir.nir);
640 free(so);
641 }
642
643 static void
644 v3d_fp_state_bind(struct pipe_context *pctx, void *hwcso)
645 {
646 struct v3d_context *v3d = v3d_context(pctx);
647 v3d->prog.bind_fs = hwcso;
648 v3d->dirty |= VC5_DIRTY_UNCOMPILED_FS;
649 }
650
651 static void
652 v3d_vp_state_bind(struct pipe_context *pctx, void *hwcso)
653 {
654 struct v3d_context *v3d = v3d_context(pctx);
655 v3d->prog.bind_vs = hwcso;
656 v3d->dirty |= VC5_DIRTY_UNCOMPILED_VS;
657 }
658
659 void
660 v3d_program_init(struct pipe_context *pctx)
661 {
662 struct v3d_context *v3d = v3d_context(pctx);
663
664 pctx->create_vs_state = v3d_shader_state_create;
665 pctx->delete_vs_state = v3d_shader_state_delete;
666
667 pctx->create_fs_state = v3d_shader_state_create;
668 pctx->delete_fs_state = v3d_shader_state_delete;
669
670 pctx->bind_fs_state = v3d_fp_state_bind;
671 pctx->bind_vs_state = v3d_vp_state_bind;
672
673 v3d->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
674 fs_cache_compare);
675 v3d->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
676 vs_cache_compare);
677 }
678
679 void
680 v3d_program_fini(struct pipe_context *pctx)
681 {
682 struct v3d_context *v3d = v3d_context(pctx);
683
684 hash_table_foreach(v3d->fs_cache, entry) {
685 struct v3d_compiled_shader *shader = entry->data;
686 v3d_free_compiled_shader(shader);
687 _mesa_hash_table_remove(v3d->fs_cache, entry);
688 }
689
690 hash_table_foreach(v3d->vs_cache, entry) {
691 struct v3d_compiled_shader *shader = entry->data;
692 v3d_free_compiled_shader(shader);
693 _mesa_hash_table_remove(v3d->vs_cache, entry);
694 }
695
696 v3d_bo_unreference(&v3d->prog.spill_bo);
697 }