v3d: Implement noperspective varyings on V3D 4.x.
[mesa.git] / src / gallium / drivers / v3d / v3d_program.c
1 /*
2 * Copyright © 2014-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25 #include "util/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "tgsi/tgsi_dump.h"
31 #include "tgsi/tgsi_parse.h"
32 #include "compiler/nir/nir.h"
33 #include "compiler/nir/nir_builder.h"
34 #include "nir/tgsi_to_nir.h"
35 #include "compiler/v3d_compiler.h"
36 #include "v3d_context.h"
37 #include "broadcom/cle/v3d_packet_v33_pack.h"
38 #include "mesa/state_tracker/st_glsl_types.h"
39
40 static gl_varying_slot
41 v3d_get_slot_for_driver_location(nir_shader *s, uint32_t driver_location)
42 {
43 nir_foreach_variable(var, &s->outputs) {
44 if (var->data.driver_location == driver_location) {
45 return var->data.location;
46 }
47 }
48
49 return -1;
50 }
51
52 /**
53 * Precomputes the TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC array for the shader.
54 *
55 * A shader can have 16 of these specs, and each one of them can write up to
56 * 16 dwords. Since we allow a total of 64 transform feedback output
57 * components (not 16 vectors), we have to group the writes of multiple
58 * varyings together in a single data spec.
59 */
60 static void
61 v3d_set_transform_feedback_outputs(struct v3d_uncompiled_shader *so,
62 const struct pipe_stream_output_info *stream_output)
63 {
64 if (!stream_output->num_outputs)
65 return;
66
67 struct v3d_varying_slot slots[PIPE_MAX_SO_OUTPUTS * 4];
68 int slot_count = 0;
69
70 for (int buffer = 0; buffer < PIPE_MAX_SO_BUFFERS; buffer++) {
71 uint32_t buffer_offset = 0;
72 uint32_t vpm_start = slot_count;
73
74 for (int i = 0; i < stream_output->num_outputs; i++) {
75 const struct pipe_stream_output *output =
76 &stream_output->output[i];
77
78 if (output->output_buffer != buffer)
79 continue;
80
81 /* We assume that the SO outputs appear in increasing
82 * order in the buffer.
83 */
84 assert(output->dst_offset >= buffer_offset);
85
86 /* Pad any undefined slots in the output */
87 for (int j = buffer_offset; j < output->dst_offset; j++) {
88 slots[slot_count] =
89 v3d_slot_from_slot_and_component(VARYING_SLOT_POS, 0);
90 slot_count++;
91 buffer_offset++;
92 }
93
94 /* Set the coordinate shader up to output the
95 * components of this varying.
96 */
97 for (int j = 0; j < output->num_components; j++) {
98 gl_varying_slot slot =
99 v3d_get_slot_for_driver_location(so->base.ir.nir, output->register_index);
100
101 slots[slot_count] =
102 v3d_slot_from_slot_and_component(slot,
103 output->start_component + j);
104 slot_count++;
105 buffer_offset++;
106 }
107 }
108
109 uint32_t vpm_size = slot_count - vpm_start;
110 if (!vpm_size)
111 continue;
112
113 uint32_t vpm_start_offset = vpm_start + 6;
114
115 while (vpm_size) {
116 uint32_t write_size = MIN2(vpm_size, 1 << 4);
117
118 struct V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC unpacked = {
119 /* We need the offset from the coordinate shader's VPM
120 * output block, which has the [X, Y, Z, W, Xs, Ys]
121 * values at the start.
122 */
123 .first_shaded_vertex_value_to_output = vpm_start_offset,
124 .number_of_consecutive_vertex_values_to_output_as_32_bit_values = write_size,
125 .output_buffer_to_write_to = buffer,
126 };
127
128 /* GFXH-1559 */
129 assert(unpacked.first_shaded_vertex_value_to_output != 8 ||
130 so->num_tf_specs != 0);
131
132 assert(so->num_tf_specs != ARRAY_SIZE(so->tf_specs));
133 V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL,
134 (void *)&so->tf_specs[so->num_tf_specs],
135 &unpacked);
136
137 /* If point size is being written by the shader, then
138 * all the VPM start offsets are shifted up by one.
139 * We won't know that until the variant is compiled,
140 * though.
141 */
142 unpacked.first_shaded_vertex_value_to_output++;
143
144 /* GFXH-1559 */
145 assert(unpacked.first_shaded_vertex_value_to_output != 8 ||
146 so->num_tf_specs != 0);
147
148 V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL,
149 (void *)&so->tf_specs_psiz[so->num_tf_specs],
150 &unpacked);
151 so->num_tf_specs++;
152 vpm_start_offset += write_size;
153 vpm_size -= write_size;
154 }
155 so->base.stream_output.stride[buffer] =
156 stream_output->stride[buffer];
157 }
158
159 so->num_tf_outputs = slot_count;
160 so->tf_outputs = ralloc_array(so->base.ir.nir, struct v3d_varying_slot,
161 slot_count);
162 memcpy(so->tf_outputs, slots, sizeof(*slots) * slot_count);
163 }
164
165 static int
166 type_size(const struct glsl_type *type)
167 {
168 return glsl_count_attribute_slots(type, false);
169 }
170
171 static int
172 uniforms_type_size(const struct glsl_type *type)
173 {
174 return st_glsl_storage_type_size(type, false);
175 }
176
177 static void *
178 v3d_shader_state_create(struct pipe_context *pctx,
179 const struct pipe_shader_state *cso)
180 {
181 struct v3d_context *v3d = v3d_context(pctx);
182 struct v3d_uncompiled_shader *so = CALLOC_STRUCT(v3d_uncompiled_shader);
183 if (!so)
184 return NULL;
185
186 so->program_id = v3d->next_uncompiled_program_id++;
187
188 nir_shader *s;
189
190 if (cso->type == PIPE_SHADER_IR_NIR) {
191 /* The backend takes ownership of the NIR shader on state
192 * creation.
193 */
194 s = cso->ir.nir;
195
196 NIR_PASS_V(s, nir_lower_io, nir_var_all & ~nir_var_uniform,
197 type_size,
198 (nir_lower_io_options)0);
199 NIR_PASS_V(s, nir_lower_io, nir_var_uniform,
200 uniforms_type_size,
201 (nir_lower_io_options)0);
202 } else {
203 assert(cso->type == PIPE_SHADER_IR_TGSI);
204
205 if (V3D_DEBUG & V3D_DEBUG_TGSI) {
206 fprintf(stderr, "prog %d TGSI:\n",
207 so->program_id);
208 tgsi_dump(cso->tokens, 0);
209 fprintf(stderr, "\n");
210 }
211 s = tgsi_to_nir(cso->tokens, &v3d_nir_options);
212
213 so->was_tgsi = true;
214 }
215
216 NIR_PASS_V(s, nir_opt_global_to_local);
217 NIR_PASS_V(s, nir_lower_regs_to_ssa);
218 NIR_PASS_V(s, nir_normalize_cubemap_coords);
219
220 NIR_PASS_V(s, nir_lower_load_const_to_scalar);
221
222 v3d_optimize_nir(s);
223
224 NIR_PASS_V(s, nir_remove_dead_variables, nir_var_local);
225
226 /* Garbage collect dead instructions */
227 nir_sweep(s);
228
229 so->base.type = PIPE_SHADER_IR_NIR;
230 so->base.ir.nir = s;
231
232 v3d_set_transform_feedback_outputs(so, &cso->stream_output);
233
234 if (V3D_DEBUG & (V3D_DEBUG_NIR |
235 v3d_debug_flag_for_shader_stage(s->info.stage))) {
236 fprintf(stderr, "%s prog %d NIR:\n",
237 gl_shader_stage_name(s->info.stage),
238 so->program_id);
239 nir_print_shader(s, stderr);
240 fprintf(stderr, "\n");
241 }
242
243 return so;
244 }
245
246 static struct v3d_compiled_shader *
247 v3d_get_compiled_shader(struct v3d_context *v3d, struct v3d_key *key)
248 {
249 struct v3d_uncompiled_shader *shader_state = key->shader_state;
250 nir_shader *s = shader_state->base.ir.nir;
251
252 struct hash_table *ht;
253 uint32_t key_size;
254 if (s->info.stage == MESA_SHADER_FRAGMENT) {
255 ht = v3d->fs_cache;
256 key_size = sizeof(struct v3d_fs_key);
257 } else {
258 ht = v3d->vs_cache;
259 key_size = sizeof(struct v3d_vs_key);
260 }
261
262 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
263 if (entry)
264 return entry->data;
265
266 struct v3d_compiled_shader *shader =
267 rzalloc(NULL, struct v3d_compiled_shader);
268
269 int program_id = shader_state->program_id;
270 int variant_id =
271 p_atomic_inc_return(&shader_state->compiled_variant_count);
272 uint64_t *qpu_insts;
273 uint32_t shader_size;
274
275 switch (s->info.stage) {
276 case MESA_SHADER_VERTEX:
277 shader->prog_data.vs = rzalloc(shader, struct v3d_vs_prog_data);
278
279 qpu_insts = v3d_compile_vs(v3d->screen->compiler,
280 (struct v3d_vs_key *)key,
281 shader->prog_data.vs, s,
282 program_id, variant_id,
283 &shader_size);
284 break;
285 case MESA_SHADER_FRAGMENT:
286 shader->prog_data.fs = rzalloc(shader, struct v3d_fs_prog_data);
287
288 qpu_insts = v3d_compile_fs(v3d->screen->compiler,
289 (struct v3d_fs_key *)key,
290 shader->prog_data.fs, s,
291 program_id, variant_id,
292 &shader_size);
293 break;
294 default:
295 unreachable("bad stage");
296 }
297
298 v3d_set_shader_uniform_dirty_flags(shader);
299
300 shader->bo = v3d_bo_alloc(v3d->screen, shader_size, "shader");
301 v3d_bo_map(shader->bo);
302 memcpy(shader->bo->map, qpu_insts, shader_size);
303
304 free(qpu_insts);
305
306 struct v3d_key *dup_key;
307 dup_key = ralloc_size(shader, key_size);
308 memcpy(dup_key, key, key_size);
309 _mesa_hash_table_insert(ht, dup_key, shader);
310
311 if (shader->prog_data.base->spill_size >
312 v3d->prog.spill_size_per_thread) {
313 /* Max 4 QPUs per slice, 3 slices per core. We only do single
314 * core so far. This overallocates memory on smaller cores.
315 */
316 int total_spill_size =
317 4 * 3 * shader->prog_data.base->spill_size;
318
319 v3d_bo_unreference(&v3d->prog.spill_bo);
320 v3d->prog.spill_bo = v3d_bo_alloc(v3d->screen,
321 total_spill_size, "spill");
322 v3d->prog.spill_size_per_thread =
323 shader->prog_data.base->spill_size;
324 }
325
326 return shader;
327 }
328
329 static void
330 v3d_setup_shared_key(struct v3d_context *v3d, struct v3d_key *key,
331 struct v3d_texture_stateobj *texstate)
332 {
333 const struct v3d_device_info *devinfo = &v3d->screen->devinfo;
334
335 for (int i = 0; i < texstate->num_textures; i++) {
336 struct pipe_sampler_view *sampler = texstate->textures[i];
337 struct v3d_sampler_view *v3d_sampler = v3d_sampler_view(sampler);
338 struct pipe_sampler_state *sampler_state =
339 texstate->samplers[i];
340
341 if (!sampler)
342 continue;
343
344 key->tex[i].return_size =
345 v3d_get_tex_return_size(devinfo,
346 sampler->format,
347 sampler_state->compare_mode);
348
349 /* For 16-bit, we set up the sampler to always return 2
350 * channels (meaning no recompiles for most statechanges),
351 * while for 32 we actually scale the returns with channels.
352 */
353 if (key->tex[i].return_size == 16) {
354 key->tex[i].return_channels = 2;
355 } else if (devinfo->ver > 40) {
356 key->tex[i].return_channels = 4;
357 } else {
358 key->tex[i].return_channels =
359 v3d_get_tex_return_channels(devinfo,
360 sampler->format);
361 }
362
363 if (key->tex[i].return_size == 32 && devinfo->ver < 40) {
364 memcpy(key->tex[i].swizzle,
365 v3d_sampler->swizzle,
366 sizeof(v3d_sampler->swizzle));
367 } else {
368 /* For 16-bit returns, we let the sampler state handle
369 * the swizzle.
370 */
371 key->tex[i].swizzle[0] = PIPE_SWIZZLE_X;
372 key->tex[i].swizzle[1] = PIPE_SWIZZLE_Y;
373 key->tex[i].swizzle[2] = PIPE_SWIZZLE_Z;
374 key->tex[i].swizzle[3] = PIPE_SWIZZLE_W;
375 }
376
377 if (sampler) {
378 key->tex[i].compare_mode = sampler_state->compare_mode;
379 key->tex[i].compare_func = sampler_state->compare_func;
380 key->tex[i].clamp_s =
381 sampler_state->wrap_s == PIPE_TEX_WRAP_CLAMP;
382 key->tex[i].clamp_t =
383 sampler_state->wrap_t == PIPE_TEX_WRAP_CLAMP;
384 key->tex[i].clamp_r =
385 sampler_state->wrap_r == PIPE_TEX_WRAP_CLAMP;
386 }
387 }
388
389 key->ucp_enables = v3d->rasterizer->base.clip_plane_enable;
390 }
391
392 static void
393 v3d_update_compiled_fs(struct v3d_context *v3d, uint8_t prim_mode)
394 {
395 struct v3d_job *job = v3d->job;
396 struct v3d_fs_key local_key;
397 struct v3d_fs_key *key = &local_key;
398
399 if (!(v3d->dirty & (VC5_DIRTY_PRIM_MODE |
400 VC5_DIRTY_BLEND |
401 VC5_DIRTY_FRAMEBUFFER |
402 VC5_DIRTY_ZSA |
403 VC5_DIRTY_RASTERIZER |
404 VC5_DIRTY_SAMPLE_STATE |
405 VC5_DIRTY_FRAGTEX |
406 VC5_DIRTY_UNCOMPILED_FS))) {
407 return;
408 }
409
410 memset(key, 0, sizeof(*key));
411 v3d_setup_shared_key(v3d, &key->base, &v3d->fragtex);
412 key->base.shader_state = v3d->prog.bind_fs;
413 key->is_points = (prim_mode == PIPE_PRIM_POINTS);
414 key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
415 prim_mode <= PIPE_PRIM_LINE_STRIP);
416 key->clamp_color = v3d->rasterizer->base.clamp_fragment_color;
417 if (v3d->blend->base.logicop_enable) {
418 key->logicop_func = v3d->blend->base.logicop_func;
419 } else {
420 key->logicop_func = PIPE_LOGICOP_COPY;
421 }
422 if (job->msaa) {
423 key->msaa = v3d->rasterizer->base.multisample;
424 key->sample_coverage = (v3d->rasterizer->base.multisample &&
425 v3d->sample_mask != (1 << VC5_MAX_SAMPLES) - 1);
426 key->sample_alpha_to_coverage = v3d->blend->base.alpha_to_coverage;
427 key->sample_alpha_to_one = v3d->blend->base.alpha_to_one;
428 }
429
430 key->depth_enabled = (v3d->zsa->base.depth.enabled ||
431 v3d->zsa->base.stencil[0].enabled);
432 if (v3d->zsa->base.alpha.enabled) {
433 key->alpha_test = true;
434 key->alpha_test_func = v3d->zsa->base.alpha.func;
435 }
436
437 /* gl_FragColor's propagation to however many bound color buffers
438 * there are means that the buffer count needs to be in the key.
439 */
440 key->nr_cbufs = v3d->framebuffer.nr_cbufs;
441 key->swap_color_rb = v3d->swap_color_rb;
442
443 for (int i = 0; i < key->nr_cbufs; i++) {
444 struct pipe_surface *cbuf = v3d->framebuffer.cbufs[i];
445 if (!cbuf)
446 continue;
447
448 const struct util_format_description *desc =
449 util_format_description(cbuf->format);
450
451 if (desc->channel[0].type == UTIL_FORMAT_TYPE_FLOAT &&
452 desc->channel[0].size == 32) {
453 key->f32_color_rb |= 1 << i;
454 }
455
456 if (v3d->prog.bind_fs->was_tgsi) {
457 if (util_format_is_pure_uint(cbuf->format))
458 key->uint_color_rb |= 1 << i;
459 else if (util_format_is_pure_sint(cbuf->format))
460 key->int_color_rb |= 1 << i;
461 }
462 }
463
464 if (key->is_points) {
465 key->point_sprite_mask =
466 v3d->rasterizer->base.sprite_coord_enable;
467 key->point_coord_upper_left =
468 (v3d->rasterizer->base.sprite_coord_mode ==
469 PIPE_SPRITE_COORD_UPPER_LEFT);
470 }
471
472 key->light_twoside = v3d->rasterizer->base.light_twoside;
473 key->shade_model_flat = v3d->rasterizer->base.flatshade;
474
475 struct v3d_compiled_shader *old_fs = v3d->prog.fs;
476 v3d->prog.fs = v3d_get_compiled_shader(v3d, &key->base);
477 if (v3d->prog.fs == old_fs)
478 return;
479
480 v3d->dirty |= VC5_DIRTY_COMPILED_FS;
481
482 if (old_fs) {
483 if (v3d->prog.fs->prog_data.fs->flat_shade_flags !=
484 old_fs->prog_data.fs->flat_shade_flags) {
485 v3d->dirty |= VC5_DIRTY_FLAT_SHADE_FLAGS;
486 }
487
488 if (v3d->prog.fs->prog_data.fs->noperspective_flags !=
489 old_fs->prog_data.fs->noperspective_flags) {
490 v3d->dirty |= VC5_DIRTY_NOPERSPECTIVE_FLAGS;
491 }
492
493 if (v3d->prog.fs->prog_data.fs->centroid_flags !=
494 old_fs->prog_data.fs->centroid_flags) {
495 v3d->dirty |= VC5_DIRTY_CENTROID_FLAGS;
496 }
497 }
498
499 if (old_fs && memcmp(v3d->prog.fs->prog_data.fs->input_slots,
500 old_fs->prog_data.fs->input_slots,
501 sizeof(v3d->prog.fs->prog_data.fs->input_slots))) {
502 v3d->dirty |= VC5_DIRTY_FS_INPUTS;
503 }
504 }
505
506 static void
507 v3d_update_compiled_vs(struct v3d_context *v3d, uint8_t prim_mode)
508 {
509 struct v3d_vs_key local_key;
510 struct v3d_vs_key *key = &local_key;
511
512 if (!(v3d->dirty & (VC5_DIRTY_PRIM_MODE |
513 VC5_DIRTY_RASTERIZER |
514 VC5_DIRTY_VERTTEX |
515 VC5_DIRTY_VTXSTATE |
516 VC5_DIRTY_UNCOMPILED_VS |
517 VC5_DIRTY_FS_INPUTS))) {
518 return;
519 }
520
521 memset(key, 0, sizeof(*key));
522 v3d_setup_shared_key(v3d, &key->base, &v3d->verttex);
523 key->base.shader_state = v3d->prog.bind_vs;
524 key->num_fs_inputs = v3d->prog.fs->prog_data.fs->base.num_inputs;
525 STATIC_ASSERT(sizeof(key->fs_inputs) ==
526 sizeof(v3d->prog.fs->prog_data.fs->input_slots));
527 memcpy(key->fs_inputs, v3d->prog.fs->prog_data.fs->input_slots,
528 sizeof(key->fs_inputs));
529 key->clamp_color = v3d->rasterizer->base.clamp_vertex_color;
530
531 key->per_vertex_point_size =
532 (prim_mode == PIPE_PRIM_POINTS &&
533 v3d->rasterizer->base.point_size_per_vertex);
534
535 struct v3d_compiled_shader *vs =
536 v3d_get_compiled_shader(v3d, &key->base);
537 if (vs != v3d->prog.vs) {
538 v3d->prog.vs = vs;
539 v3d->dirty |= VC5_DIRTY_COMPILED_VS;
540 }
541
542 key->is_coord = true;
543 /* Coord shaders only output varyings used by transform feedback. */
544 struct v3d_uncompiled_shader *shader_state = key->base.shader_state;
545 memcpy(key->fs_inputs, shader_state->tf_outputs,
546 sizeof(*key->fs_inputs) * shader_state->num_tf_outputs);
547 if (shader_state->num_tf_outputs < key->num_fs_inputs) {
548 memset(&key->fs_inputs[shader_state->num_tf_outputs],
549 0,
550 sizeof(*key->fs_inputs) * (key->num_fs_inputs -
551 shader_state->num_tf_outputs));
552 }
553 key->num_fs_inputs = shader_state->num_tf_outputs;
554
555 struct v3d_compiled_shader *cs =
556 v3d_get_compiled_shader(v3d, &key->base);
557 if (cs != v3d->prog.cs) {
558 v3d->prog.cs = cs;
559 v3d->dirty |= VC5_DIRTY_COMPILED_CS;
560 }
561 }
562
563 void
564 v3d_update_compiled_shaders(struct v3d_context *v3d, uint8_t prim_mode)
565 {
566 v3d_update_compiled_fs(v3d, prim_mode);
567 v3d_update_compiled_vs(v3d, prim_mode);
568 }
569
570 static uint32_t
571 fs_cache_hash(const void *key)
572 {
573 return _mesa_hash_data(key, sizeof(struct v3d_fs_key));
574 }
575
576 static uint32_t
577 vs_cache_hash(const void *key)
578 {
579 return _mesa_hash_data(key, sizeof(struct v3d_vs_key));
580 }
581
582 static bool
583 fs_cache_compare(const void *key1, const void *key2)
584 {
585 return memcmp(key1, key2, sizeof(struct v3d_fs_key)) == 0;
586 }
587
588 static bool
589 vs_cache_compare(const void *key1, const void *key2)
590 {
591 return memcmp(key1, key2, sizeof(struct v3d_vs_key)) == 0;
592 }
593
594 static void
595 delete_from_cache_if_matches(struct hash_table *ht,
596 struct v3d_compiled_shader **last_compile,
597 struct hash_entry *entry,
598 struct v3d_uncompiled_shader *so)
599 {
600 const struct v3d_key *key = entry->key;
601
602 if (key->shader_state == so) {
603 struct v3d_compiled_shader *shader = entry->data;
604 _mesa_hash_table_remove(ht, entry);
605 v3d_bo_unreference(&shader->bo);
606
607 if (shader == *last_compile)
608 *last_compile = NULL;
609
610 ralloc_free(shader);
611 }
612 }
613
614 static void
615 v3d_shader_state_delete(struct pipe_context *pctx, void *hwcso)
616 {
617 struct v3d_context *v3d = v3d_context(pctx);
618 struct v3d_uncompiled_shader *so = hwcso;
619
620 struct hash_entry *entry;
621 hash_table_foreach(v3d->fs_cache, entry) {
622 delete_from_cache_if_matches(v3d->fs_cache, &v3d->prog.fs,
623 entry, so);
624 }
625 hash_table_foreach(v3d->vs_cache, entry) {
626 delete_from_cache_if_matches(v3d->vs_cache, &v3d->prog.vs,
627 entry, so);
628 }
629
630 ralloc_free(so->base.ir.nir);
631 free(so);
632 }
633
634 static void
635 v3d_fp_state_bind(struct pipe_context *pctx, void *hwcso)
636 {
637 struct v3d_context *v3d = v3d_context(pctx);
638 v3d->prog.bind_fs = hwcso;
639 v3d->dirty |= VC5_DIRTY_UNCOMPILED_FS;
640 }
641
642 static void
643 v3d_vp_state_bind(struct pipe_context *pctx, void *hwcso)
644 {
645 struct v3d_context *v3d = v3d_context(pctx);
646 v3d->prog.bind_vs = hwcso;
647 v3d->dirty |= VC5_DIRTY_UNCOMPILED_VS;
648 }
649
650 void
651 v3d_program_init(struct pipe_context *pctx)
652 {
653 struct v3d_context *v3d = v3d_context(pctx);
654
655 pctx->create_vs_state = v3d_shader_state_create;
656 pctx->delete_vs_state = v3d_shader_state_delete;
657
658 pctx->create_fs_state = v3d_shader_state_create;
659 pctx->delete_fs_state = v3d_shader_state_delete;
660
661 pctx->bind_fs_state = v3d_fp_state_bind;
662 pctx->bind_vs_state = v3d_vp_state_bind;
663
664 v3d->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
665 fs_cache_compare);
666 v3d->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
667 vs_cache_compare);
668 }
669
670 void
671 v3d_program_fini(struct pipe_context *pctx)
672 {
673 struct v3d_context *v3d = v3d_context(pctx);
674
675 struct hash_entry *entry;
676 hash_table_foreach(v3d->fs_cache, entry) {
677 struct v3d_compiled_shader *shader = entry->data;
678 v3d_bo_unreference(&shader->bo);
679 ralloc_free(shader);
680 _mesa_hash_table_remove(v3d->fs_cache, entry);
681 }
682
683 hash_table_foreach(v3d->vs_cache, entry) {
684 struct v3d_compiled_shader *shader = entry->data;
685 v3d_bo_unreference(&shader->bo);
686 ralloc_free(shader);
687 _mesa_hash_table_remove(v3d->vs_cache, entry);
688 }
689
690 v3d_bo_unreference(&v3d->prog.spill_bo);
691 }