ttn: Add new allow_disk_cache parameter
[mesa.git] / src / gallium / drivers / v3d / v3d_program.c
1 /*
2 * Copyright © 2014-2017 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <inttypes.h>
25 #include "util/format/u_format.h"
26 #include "util/u_math.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "util/hash_table.h"
30 #include "util/u_upload_mgr.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "tgsi/tgsi_parse.h"
33 #include "compiler/nir/nir.h"
34 #include "compiler/nir/nir_builder.h"
35 #include "nir/tgsi_to_nir.h"
36 #include "compiler/v3d_compiler.h"
37 #include "v3d_context.h"
38 #include "broadcom/cle/v3d_packet_v33_pack.h"
39
40 static struct v3d_compiled_shader *
41 v3d_get_compiled_shader(struct v3d_context *v3d,
42 struct v3d_key *key, size_t key_size);
43 static void
44 v3d_setup_shared_precompile_key(struct v3d_uncompiled_shader *uncompiled,
45 struct v3d_key *key);
46
47 static gl_varying_slot
48 v3d_get_slot_for_driver_location(nir_shader *s, uint32_t driver_location)
49 {
50 nir_foreach_variable(var, &s->outputs) {
51 if (var->data.driver_location == driver_location) {
52 return var->data.location;
53 }
54 }
55
56 return -1;
57 }
58
59 /**
60 * Precomputes the TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC array for the shader.
61 *
62 * A shader can have 16 of these specs, and each one of them can write up to
63 * 16 dwords. Since we allow a total of 64 transform feedback output
64 * components (not 16 vectors), we have to group the writes of multiple
65 * varyings together in a single data spec.
66 */
67 static void
68 v3d_set_transform_feedback_outputs(struct v3d_uncompiled_shader *so,
69 const struct pipe_stream_output_info *stream_output)
70 {
71 if (!stream_output->num_outputs)
72 return;
73
74 struct v3d_varying_slot slots[PIPE_MAX_SO_OUTPUTS * 4];
75 int slot_count = 0;
76
77 for (int buffer = 0; buffer < PIPE_MAX_SO_BUFFERS; buffer++) {
78 uint32_t buffer_offset = 0;
79 uint32_t vpm_start = slot_count;
80
81 for (int i = 0; i < stream_output->num_outputs; i++) {
82 const struct pipe_stream_output *output =
83 &stream_output->output[i];
84
85 if (output->output_buffer != buffer)
86 continue;
87
88 /* We assume that the SO outputs appear in increasing
89 * order in the buffer.
90 */
91 assert(output->dst_offset >= buffer_offset);
92
93 /* Pad any undefined slots in the output */
94 for (int j = buffer_offset; j < output->dst_offset; j++) {
95 slots[slot_count] =
96 v3d_slot_from_slot_and_component(VARYING_SLOT_POS, 0);
97 slot_count++;
98 buffer_offset++;
99 }
100
101 /* Set the coordinate shader up to output the
102 * components of this varying.
103 */
104 for (int j = 0; j < output->num_components; j++) {
105 gl_varying_slot slot =
106 v3d_get_slot_for_driver_location(so->base.ir.nir, output->register_index);
107
108 slots[slot_count] =
109 v3d_slot_from_slot_and_component(slot,
110 output->start_component + j);
111 slot_count++;
112 buffer_offset++;
113 }
114 }
115
116 uint32_t vpm_size = slot_count - vpm_start;
117 if (!vpm_size)
118 continue;
119
120 uint32_t vpm_start_offset = vpm_start + 6;
121
122 while (vpm_size) {
123 uint32_t write_size = MIN2(vpm_size, 1 << 4);
124
125 struct V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC unpacked = {
126 /* We need the offset from the coordinate shader's VPM
127 * output block, which has the [X, Y, Z, W, Xs, Ys]
128 * values at the start.
129 */
130 .first_shaded_vertex_value_to_output = vpm_start_offset,
131 .number_of_consecutive_vertex_values_to_output_as_32_bit_values = write_size,
132 .output_buffer_to_write_to = buffer,
133 };
134
135 /* GFXH-1559 */
136 assert(unpacked.first_shaded_vertex_value_to_output != 8 ||
137 so->num_tf_specs != 0);
138
139 assert(so->num_tf_specs != ARRAY_SIZE(so->tf_specs));
140 V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL,
141 (void *)&so->tf_specs[so->num_tf_specs],
142 &unpacked);
143
144 /* If point size is being written by the shader, then
145 * all the VPM start offsets are shifted up by one.
146 * We won't know that until the variant is compiled,
147 * though.
148 */
149 unpacked.first_shaded_vertex_value_to_output++;
150
151 /* GFXH-1559 */
152 assert(unpacked.first_shaded_vertex_value_to_output != 8 ||
153 so->num_tf_specs != 0);
154
155 V3D33_TRANSFORM_FEEDBACK_OUTPUT_DATA_SPEC_pack(NULL,
156 (void *)&so->tf_specs_psiz[so->num_tf_specs],
157 &unpacked);
158 so->num_tf_specs++;
159 vpm_start_offset += write_size;
160 vpm_size -= write_size;
161 }
162 so->base.stream_output.stride[buffer] =
163 stream_output->stride[buffer];
164 }
165
166 so->num_tf_outputs = slot_count;
167 so->tf_outputs = ralloc_array(so->base.ir.nir, struct v3d_varying_slot,
168 slot_count);
169 memcpy(so->tf_outputs, slots, sizeof(*slots) * slot_count);
170 }
171
172 static int
173 type_size(const struct glsl_type *type, bool bindless)
174 {
175 return glsl_count_attribute_slots(type, false);
176 }
177
178 static void
179 precompile_all_outputs(nir_shader *s,
180 struct v3d_varying_slot *outputs,
181 uint8_t *num_outputs)
182 {
183 nir_foreach_variable(var, &s->outputs) {
184 const int array_len = MAX2(glsl_get_length(var->type), 1);
185 for (int j = 0; j < array_len; j++) {
186 const int slot = var->data.location + j;
187 const int num_components =
188 glsl_get_components(var->type);
189 for (int i = 0; i < num_components; i++) {
190 const int swiz = var->data.location_frac + i;
191 outputs[(*num_outputs)++] =
192 v3d_slot_from_slot_and_component(slot,
193 swiz);
194 }
195 }
196 }
197 }
198
199 /**
200 * Precompiles a shader variant at shader state creation time if
201 * V3D_DEBUG=precompile is set. Used for shader-db
202 * (https://gitlab.freedesktop.org/mesa/shader-db)
203 */
204 static void
205 v3d_shader_precompile(struct v3d_context *v3d,
206 struct v3d_uncompiled_shader *so)
207 {
208 nir_shader *s = so->base.ir.nir;
209
210 if (s->info.stage == MESA_SHADER_FRAGMENT) {
211 struct v3d_fs_key key = {
212 .base.shader_state = so,
213 };
214
215 nir_foreach_variable(var, &s->outputs) {
216 if (var->data.location == FRAG_RESULT_COLOR) {
217 key.cbufs |= 1 << 0;
218 } else if (var->data.location >= FRAG_RESULT_DATA0) {
219 key.cbufs |= 1 << (var->data.location -
220 FRAG_RESULT_DATA0);
221 }
222 }
223
224 key.logicop_func = PIPE_LOGICOP_COPY;
225
226 v3d_setup_shared_precompile_key(so, &key.base);
227 v3d_get_compiled_shader(v3d, &key.base, sizeof(key));
228 } else if (s->info.stage == MESA_SHADER_GEOMETRY) {
229 struct v3d_gs_key key = {
230 .base.shader_state = so,
231 .base.is_last_geometry_stage = true,
232 };
233
234 v3d_setup_shared_precompile_key(so, &key.base);
235
236 precompile_all_outputs(s,
237 key.used_outputs,
238 &key.num_used_outputs);
239
240 v3d_get_compiled_shader(v3d, &key.base, sizeof(key));
241
242 /* Compile GS bin shader: only position (XXX: include TF) */
243 key.is_coord = true;
244 key.num_used_outputs = 0;
245 for (int i = 0; i < 4; i++) {
246 key.used_outputs[key.num_used_outputs++] =
247 v3d_slot_from_slot_and_component(VARYING_SLOT_POS,
248 i);
249 }
250 v3d_get_compiled_shader(v3d, &key.base, sizeof(key));
251 } else {
252 assert(s->info.stage == MESA_SHADER_VERTEX);
253 struct v3d_vs_key key = {
254 .base.shader_state = so,
255 /* Emit fixed function outputs */
256 .base.is_last_geometry_stage = true,
257 };
258
259 v3d_setup_shared_precompile_key(so, &key.base);
260
261 precompile_all_outputs(s,
262 key.used_outputs,
263 &key.num_used_outputs);
264
265 v3d_get_compiled_shader(v3d, &key.base, sizeof(key));
266
267 /* Compile VS bin shader: only position (XXX: include TF) */
268 key.is_coord = true;
269 key.num_used_outputs = 0;
270 for (int i = 0; i < 4; i++) {
271 key.used_outputs[key.num_used_outputs++] =
272 v3d_slot_from_slot_and_component(VARYING_SLOT_POS,
273 i);
274 }
275 v3d_get_compiled_shader(v3d, &key.base, sizeof(key));
276 }
277 }
278
279 static void *
280 v3d_uncompiled_shader_create(struct pipe_context *pctx,
281 enum pipe_shader_ir type, void *ir)
282 {
283 struct v3d_context *v3d = v3d_context(pctx);
284 struct v3d_uncompiled_shader *so = CALLOC_STRUCT(v3d_uncompiled_shader);
285 if (!so)
286 return NULL;
287
288 so->program_id = v3d->next_uncompiled_program_id++;
289
290 nir_shader *s;
291
292 if (type == PIPE_SHADER_IR_NIR) {
293 /* The backend takes ownership of the NIR shader on state
294 * creation.
295 */
296 s = ir;
297 } else {
298 assert(type == PIPE_SHADER_IR_TGSI);
299
300 if (V3D_DEBUG & V3D_DEBUG_TGSI) {
301 fprintf(stderr, "prog %d TGSI:\n",
302 so->program_id);
303 tgsi_dump(ir, 0);
304 fprintf(stderr, "\n");
305 }
306 s = tgsi_to_nir(ir, pctx->screen, false);
307 }
308
309 nir_variable_mode lower_mode = nir_var_all & ~nir_var_uniform;
310 if (s->info.stage == MESA_SHADER_VERTEX ||
311 s->info.stage == MESA_SHADER_GEOMETRY) {
312 lower_mode &= ~(nir_var_shader_in | nir_var_shader_out);
313 }
314 NIR_PASS_V(s, nir_lower_io, lower_mode,
315 type_size,
316 (nir_lower_io_options)0);
317
318 NIR_PASS_V(s, nir_lower_regs_to_ssa);
319 NIR_PASS_V(s, nir_normalize_cubemap_coords);
320
321 NIR_PASS_V(s, nir_lower_load_const_to_scalar);
322
323 v3d_optimize_nir(s);
324
325 NIR_PASS_V(s, nir_remove_dead_variables, nir_var_function_temp);
326
327 /* Garbage collect dead instructions */
328 nir_sweep(s);
329
330 so->base.type = PIPE_SHADER_IR_NIR;
331 so->base.ir.nir = s;
332
333 if (V3D_DEBUG & (V3D_DEBUG_NIR |
334 v3d_debug_flag_for_shader_stage(s->info.stage))) {
335 fprintf(stderr, "%s prog %d NIR:\n",
336 gl_shader_stage_name(s->info.stage),
337 so->program_id);
338 nir_print_shader(s, stderr);
339 fprintf(stderr, "\n");
340 }
341
342 if (V3D_DEBUG & V3D_DEBUG_PRECOMPILE)
343 v3d_shader_precompile(v3d, so);
344
345 return so;
346 }
347
348 static void
349 v3d_shader_debug_output(const char *message, void *data)
350 {
351 struct v3d_context *v3d = data;
352
353 pipe_debug_message(&v3d->debug, SHADER_INFO, "%s", message);
354 }
355
356 static void *
357 v3d_shader_state_create(struct pipe_context *pctx,
358 const struct pipe_shader_state *cso)
359 {
360 struct v3d_uncompiled_shader *so =
361 v3d_uncompiled_shader_create(pctx,
362 cso->type,
363 (cso->type == PIPE_SHADER_IR_TGSI ?
364 (void *)cso->tokens :
365 cso->ir.nir));
366
367 v3d_set_transform_feedback_outputs(so, &cso->stream_output);
368
369 return so;
370 }
371
372 struct v3d_compiled_shader *
373 v3d_get_compiled_shader(struct v3d_context *v3d,
374 struct v3d_key *key,
375 size_t key_size)
376 {
377 struct v3d_uncompiled_shader *shader_state = key->shader_state;
378 nir_shader *s = shader_state->base.ir.nir;
379
380 struct hash_table *ht = v3d->prog.cache[s->info.stage];
381 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
382 if (entry)
383 return entry->data;
384
385 struct v3d_compiled_shader *shader =
386 rzalloc(NULL, struct v3d_compiled_shader);
387
388 int program_id = shader_state->program_id;
389 int variant_id =
390 p_atomic_inc_return(&shader_state->compiled_variant_count);
391 uint64_t *qpu_insts;
392 uint32_t shader_size;
393
394 qpu_insts = v3d_compile(v3d->screen->compiler, key,
395 &shader->prog_data.base, s,
396 v3d_shader_debug_output,
397 v3d,
398 program_id, variant_id, &shader_size);
399 ralloc_steal(shader, shader->prog_data.base);
400
401 v3d_set_shader_uniform_dirty_flags(shader);
402
403 if (shader_size) {
404 u_upload_data(v3d->state_uploader, 0, shader_size, 8,
405 qpu_insts, &shader->offset, &shader->resource);
406 }
407
408 free(qpu_insts);
409
410 if (ht) {
411 struct v3d_key *dup_key;
412 dup_key = ralloc_size(shader, key_size);
413 memcpy(dup_key, key, key_size);
414 _mesa_hash_table_insert(ht, dup_key, shader);
415 }
416
417 if (shader->prog_data.base->spill_size >
418 v3d->prog.spill_size_per_thread) {
419 /* The TIDX register we use for choosing the area to access
420 * for scratch space is: (core << 6) | (qpu << 2) | thread.
421 * Even at minimum threadcount in a particular shader, that
422 * means we still multiply by qpus by 4.
423 */
424 int total_spill_size = (v3d->screen->devinfo.qpu_count * 4 *
425 shader->prog_data.base->spill_size);
426
427 v3d_bo_unreference(&v3d->prog.spill_bo);
428 v3d->prog.spill_bo = v3d_bo_alloc(v3d->screen,
429 total_spill_size, "spill");
430 v3d->prog.spill_size_per_thread =
431 shader->prog_data.base->spill_size;
432 }
433
434 return shader;
435 }
436
437 static void
438 v3d_free_compiled_shader(struct v3d_compiled_shader *shader)
439 {
440 pipe_resource_reference(&shader->resource, NULL);
441 ralloc_free(shader);
442 }
443
444 static void
445 v3d_setup_shared_key(struct v3d_context *v3d, struct v3d_key *key,
446 struct v3d_texture_stateobj *texstate)
447 {
448 const struct v3d_device_info *devinfo = &v3d->screen->devinfo;
449
450 for (int i = 0; i < texstate->num_textures; i++) {
451 struct pipe_sampler_view *sampler = texstate->textures[i];
452 struct v3d_sampler_view *v3d_sampler = v3d_sampler_view(sampler);
453 struct pipe_sampler_state *sampler_state =
454 texstate->samplers[i];
455
456 if (!sampler)
457 continue;
458
459 key->tex[i].return_size =
460 v3d_get_tex_return_size(devinfo,
461 sampler->format,
462 sampler_state->compare_mode);
463
464 /* For 16-bit, we set up the sampler to always return 2
465 * channels (meaning no recompiles for most statechanges),
466 * while for 32 we actually scale the returns with channels.
467 */
468 if (key->tex[i].return_size == 16) {
469 key->tex[i].return_channels = 2;
470 } else if (devinfo->ver > 40) {
471 key->tex[i].return_channels = 4;
472 } else {
473 key->tex[i].return_channels =
474 v3d_get_tex_return_channels(devinfo,
475 sampler->format);
476 }
477
478 if (key->tex[i].return_size == 32 && devinfo->ver < 40) {
479 memcpy(key->tex[i].swizzle,
480 v3d_sampler->swizzle,
481 sizeof(v3d_sampler->swizzle));
482 } else {
483 /* For 16-bit returns, we let the sampler state handle
484 * the swizzle.
485 */
486 key->tex[i].swizzle[0] = PIPE_SWIZZLE_X;
487 key->tex[i].swizzle[1] = PIPE_SWIZZLE_Y;
488 key->tex[i].swizzle[2] = PIPE_SWIZZLE_Z;
489 key->tex[i].swizzle[3] = PIPE_SWIZZLE_W;
490 }
491
492 if (sampler) {
493 key->tex[i].clamp_s =
494 sampler_state->wrap_s == PIPE_TEX_WRAP_CLAMP;
495 key->tex[i].clamp_t =
496 sampler_state->wrap_t == PIPE_TEX_WRAP_CLAMP;
497 key->tex[i].clamp_r =
498 sampler_state->wrap_r == PIPE_TEX_WRAP_CLAMP;
499 }
500 }
501 }
502
503 static void
504 v3d_setup_shared_precompile_key(struct v3d_uncompiled_shader *uncompiled,
505 struct v3d_key *key)
506 {
507 nir_shader *s = uncompiled->base.ir.nir;
508
509 for (int i = 0; i < s->info.num_textures; i++) {
510 key->tex[i].return_size = 16;
511 key->tex[i].return_channels = 2;
512
513 key->tex[i].swizzle[0] = PIPE_SWIZZLE_X;
514 key->tex[i].swizzle[1] = PIPE_SWIZZLE_Y;
515 key->tex[i].swizzle[2] = PIPE_SWIZZLE_Z;
516 key->tex[i].swizzle[3] = PIPE_SWIZZLE_W;
517 }
518 }
519
520 static void
521 v3d_update_compiled_fs(struct v3d_context *v3d, uint8_t prim_mode)
522 {
523 struct v3d_job *job = v3d->job;
524 struct v3d_fs_key local_key;
525 struct v3d_fs_key *key = &local_key;
526 nir_shader *s = v3d->prog.bind_fs->base.ir.nir;
527
528 if (!(v3d->dirty & (VC5_DIRTY_PRIM_MODE |
529 VC5_DIRTY_BLEND |
530 VC5_DIRTY_FRAMEBUFFER |
531 VC5_DIRTY_ZSA |
532 VC5_DIRTY_RASTERIZER |
533 VC5_DIRTY_SAMPLE_STATE |
534 VC5_DIRTY_FRAGTEX |
535 VC5_DIRTY_UNCOMPILED_FS))) {
536 return;
537 }
538
539 memset(key, 0, sizeof(*key));
540 v3d_setup_shared_key(v3d, &key->base, &v3d->tex[PIPE_SHADER_FRAGMENT]);
541 key->base.shader_state = v3d->prog.bind_fs;
542 key->base.ucp_enables = v3d->rasterizer->base.clip_plane_enable;
543 key->is_points = (prim_mode == PIPE_PRIM_POINTS);
544 key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
545 prim_mode <= PIPE_PRIM_LINE_STRIP);
546 key->clamp_color = v3d->rasterizer->base.clamp_fragment_color;
547 if (v3d->blend->base.logicop_enable) {
548 key->logicop_func = v3d->blend->base.logicop_func;
549 } else {
550 key->logicop_func = PIPE_LOGICOP_COPY;
551 }
552 if (job->msaa) {
553 key->msaa = v3d->rasterizer->base.multisample;
554 key->sample_coverage = (v3d->rasterizer->base.multisample &&
555 v3d->sample_mask != (1 << V3D_MAX_SAMPLES) - 1);
556 key->sample_alpha_to_coverage = v3d->blend->base.alpha_to_coverage;
557 key->sample_alpha_to_one = v3d->blend->base.alpha_to_one;
558 }
559
560 key->depth_enabled = (v3d->zsa->base.depth.enabled ||
561 v3d->zsa->base.stencil[0].enabled);
562 if (v3d->zsa->base.alpha.enabled) {
563 key->alpha_test = true;
564 key->alpha_test_func = v3d->zsa->base.alpha.func;
565 }
566
567 key->swap_color_rb = v3d->swap_color_rb;
568
569 for (int i = 0; i < v3d->framebuffer.nr_cbufs; i++) {
570 struct pipe_surface *cbuf = v3d->framebuffer.cbufs[i];
571 if (!cbuf)
572 continue;
573
574 /* gl_FragColor's propagation to however many bound color
575 * buffers there are means that the shader compile needs to
576 * know what buffers are present.
577 */
578 key->cbufs |= 1 << i;
579
580 /* If logic operations are enabled then we might emit color
581 * reads and we need to know the color buffer format and
582 * swizzle for that.
583 */
584 if (key->logicop_func != PIPE_LOGICOP_COPY) {
585 key->color_fmt[i].format = cbuf->format;
586 key->color_fmt[i].swizzle =
587 v3d_get_format_swizzle(&v3d->screen->devinfo,
588 cbuf->format);
589 }
590
591 const struct util_format_description *desc =
592 util_format_description(cbuf->format);
593
594 if (desc->channel[0].type == UTIL_FORMAT_TYPE_FLOAT &&
595 desc->channel[0].size == 32) {
596 key->f32_color_rb |= 1 << i;
597 }
598
599 if (s->info.fs.untyped_color_outputs) {
600 if (util_format_is_pure_uint(cbuf->format))
601 key->uint_color_rb |= 1 << i;
602 else if (util_format_is_pure_sint(cbuf->format))
603 key->int_color_rb |= 1 << i;
604 }
605 }
606
607 if (key->is_points) {
608 key->point_sprite_mask =
609 v3d->rasterizer->base.sprite_coord_enable;
610 key->point_coord_upper_left =
611 (v3d->rasterizer->base.sprite_coord_mode ==
612 PIPE_SPRITE_COORD_UPPER_LEFT);
613 }
614
615 key->light_twoside = v3d->rasterizer->base.light_twoside;
616 key->shade_model_flat = v3d->rasterizer->base.flatshade;
617
618 struct v3d_compiled_shader *old_fs = v3d->prog.fs;
619 v3d->prog.fs = v3d_get_compiled_shader(v3d, &key->base, sizeof(*key));
620 if (v3d->prog.fs == old_fs)
621 return;
622
623 v3d->dirty |= VC5_DIRTY_COMPILED_FS;
624
625 if (old_fs) {
626 if (v3d->prog.fs->prog_data.fs->flat_shade_flags !=
627 old_fs->prog_data.fs->flat_shade_flags) {
628 v3d->dirty |= VC5_DIRTY_FLAT_SHADE_FLAGS;
629 }
630
631 if (v3d->prog.fs->prog_data.fs->noperspective_flags !=
632 old_fs->prog_data.fs->noperspective_flags) {
633 v3d->dirty |= VC5_DIRTY_NOPERSPECTIVE_FLAGS;
634 }
635
636 if (v3d->prog.fs->prog_data.fs->centroid_flags !=
637 old_fs->prog_data.fs->centroid_flags) {
638 v3d->dirty |= VC5_DIRTY_CENTROID_FLAGS;
639 }
640 }
641
642 if (old_fs && memcmp(v3d->prog.fs->prog_data.fs->input_slots,
643 old_fs->prog_data.fs->input_slots,
644 sizeof(v3d->prog.fs->prog_data.fs->input_slots))) {
645 v3d->dirty |= VC5_DIRTY_FS_INPUTS;
646 }
647 }
648
649 static void
650 v3d_update_compiled_gs(struct v3d_context *v3d, uint8_t prim_mode)
651 {
652 struct v3d_gs_key local_key;
653 struct v3d_gs_key *key = &local_key;
654
655 if (!(v3d->dirty & (VC5_DIRTY_GEOMTEX |
656 VC5_DIRTY_RASTERIZER |
657 VC5_DIRTY_UNCOMPILED_GS |
658 VC5_DIRTY_PRIM_MODE |
659 VC5_DIRTY_FS_INPUTS))) {
660 return;
661 }
662
663 if (!v3d->prog.bind_gs) {
664 v3d->prog.gs = NULL;
665 v3d->prog.gs_bin = NULL;
666 return;
667 }
668
669 memset(key, 0, sizeof(*key));
670 v3d_setup_shared_key(v3d, &key->base, &v3d->tex[PIPE_SHADER_GEOMETRY]);
671 key->base.shader_state = v3d->prog.bind_gs;
672 key->base.ucp_enables = v3d->rasterizer->base.clip_plane_enable;
673 key->base.is_last_geometry_stage = true;
674 key->num_used_outputs = v3d->prog.fs->prog_data.fs->num_inputs;
675 STATIC_ASSERT(sizeof(key->used_outputs) ==
676 sizeof(v3d->prog.fs->prog_data.fs->input_slots));
677 memcpy(key->used_outputs, v3d->prog.fs->prog_data.fs->input_slots,
678 sizeof(key->used_outputs));
679
680 key->per_vertex_point_size =
681 (prim_mode == PIPE_PRIM_POINTS &&
682 v3d->rasterizer->base.point_size_per_vertex);
683
684 struct v3d_compiled_shader *gs =
685 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key));
686 if (gs != v3d->prog.gs) {
687 v3d->prog.gs = gs;
688 v3d->dirty |= VC5_DIRTY_COMPILED_GS;
689 }
690
691 key->is_coord = true;
692
693 /* The last bin-mode shader in the geometry pipeline only outputs
694 * varyings used by transform feedback.
695 */
696 struct v3d_uncompiled_shader *shader_state = key->base.shader_state;
697 memcpy(key->used_outputs, shader_state->tf_outputs,
698 sizeof(*key->used_outputs) * shader_state->num_tf_outputs);
699 if (shader_state->num_tf_outputs < key->num_used_outputs) {
700 uint32_t size = sizeof(*key->used_outputs) *
701 (key->num_used_outputs -
702 shader_state->num_tf_outputs);
703 memset(&key->used_outputs[shader_state->num_tf_outputs],
704 0, size);
705 }
706 key->num_used_outputs = shader_state->num_tf_outputs;
707
708 struct v3d_compiled_shader *old_gs = v3d->prog.gs;
709 struct v3d_compiled_shader *gs_bin =
710 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key));
711 if (gs_bin != old_gs) {
712 v3d->prog.gs_bin = gs_bin;
713 v3d->dirty |= VC5_DIRTY_COMPILED_GS_BIN;
714 }
715
716 if (old_gs && memcmp(v3d->prog.gs->prog_data.gs->input_slots,
717 old_gs->prog_data.gs->input_slots,
718 sizeof(v3d->prog.gs->prog_data.gs->input_slots))) {
719 v3d->dirty |= VC5_DIRTY_GS_INPUTS;
720 }
721 }
722
723 static void
724 v3d_update_compiled_vs(struct v3d_context *v3d, uint8_t prim_mode)
725 {
726 struct v3d_vs_key local_key;
727 struct v3d_vs_key *key = &local_key;
728
729 if (!(v3d->dirty & (VC5_DIRTY_VERTTEX |
730 VC5_DIRTY_VTXSTATE |
731 VC5_DIRTY_UNCOMPILED_VS |
732 (v3d->prog.bind_gs ? 0 : VC5_DIRTY_RASTERIZER) |
733 (v3d->prog.bind_gs ? 0 : VC5_DIRTY_PRIM_MODE) |
734 (v3d->prog.bind_gs ? VC5_DIRTY_GS_INPUTS :
735 VC5_DIRTY_FS_INPUTS)))) {
736 return;
737 }
738
739 memset(key, 0, sizeof(*key));
740 v3d_setup_shared_key(v3d, &key->base, &v3d->tex[PIPE_SHADER_VERTEX]);
741 key->base.shader_state = v3d->prog.bind_vs;
742 key->base.ucp_enables = v3d->rasterizer->base.clip_plane_enable;
743 key->base.is_last_geometry_stage = !v3d->prog.bind_gs;
744
745 if (!v3d->prog.bind_gs) {
746 key->num_used_outputs = v3d->prog.fs->prog_data.fs->num_inputs;
747 STATIC_ASSERT(sizeof(key->used_outputs) ==
748 sizeof(v3d->prog.fs->prog_data.fs->input_slots));
749 memcpy(key->used_outputs, v3d->prog.fs->prog_data.fs->input_slots,
750 sizeof(key->used_outputs));
751 } else {
752 key->num_used_outputs = v3d->prog.gs->prog_data.gs->num_inputs;
753 STATIC_ASSERT(sizeof(key->used_outputs) ==
754 sizeof(v3d->prog.gs->prog_data.gs->input_slots));
755 memcpy(key->used_outputs, v3d->prog.gs->prog_data.gs->input_slots,
756 sizeof(key->used_outputs));
757 }
758
759 key->clamp_color = v3d->rasterizer->base.clamp_vertex_color;
760
761 key->per_vertex_point_size =
762 (prim_mode == PIPE_PRIM_POINTS &&
763 v3d->rasterizer->base.point_size_per_vertex);
764
765 struct v3d_compiled_shader *vs =
766 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key));
767 if (vs != v3d->prog.vs) {
768 v3d->prog.vs = vs;
769 v3d->dirty |= VC5_DIRTY_COMPILED_VS;
770 }
771
772 key->is_coord = true;
773
774 /* Coord shaders only output varyings used by transform feedback,
775 * unless they are linked to other shaders in the geometry side
776 * of the pipeline, since in that case any of the output varyings
777 * could be required in later geometry stages to compute
778 * gl_Position or TF outputs.
779 */
780 if (!v3d->prog.bind_gs) {
781 struct v3d_uncompiled_shader *shader_state =
782 key->base.shader_state;
783 memcpy(key->used_outputs, shader_state->tf_outputs,
784 sizeof(*key->used_outputs) *
785 shader_state->num_tf_outputs);
786 if (shader_state->num_tf_outputs < key->num_used_outputs) {
787 uint32_t tail_bytes =
788 sizeof(*key->used_outputs) *
789 (key->num_used_outputs -
790 shader_state->num_tf_outputs);
791 memset(&key->used_outputs[shader_state->num_tf_outputs],
792 0, tail_bytes);
793 }
794 key->num_used_outputs = shader_state->num_tf_outputs;
795 }
796
797 struct v3d_compiled_shader *cs =
798 v3d_get_compiled_shader(v3d, &key->base, sizeof(*key));
799 if (cs != v3d->prog.cs) {
800 v3d->prog.cs = cs;
801 v3d->dirty |= VC5_DIRTY_COMPILED_CS;
802 }
803 }
804
805 void
806 v3d_update_compiled_shaders(struct v3d_context *v3d, uint8_t prim_mode)
807 {
808 v3d_update_compiled_fs(v3d, prim_mode);
809 v3d_update_compiled_gs(v3d, prim_mode);
810 v3d_update_compiled_vs(v3d, prim_mode);
811 }
812
813 void
814 v3d_update_compiled_cs(struct v3d_context *v3d)
815 {
816 struct v3d_key local_key;
817 struct v3d_key *key = &local_key;
818
819 if (!(v3d->dirty & (VC5_DIRTY_UNCOMPILED_CS |
820 VC5_DIRTY_COMPTEX))) {
821 return;
822 }
823
824 memset(key, 0, sizeof(*key));
825 v3d_setup_shared_key(v3d, key, &v3d->tex[PIPE_SHADER_COMPUTE]);
826 key->shader_state = v3d->prog.bind_compute;
827
828 struct v3d_compiled_shader *cs =
829 v3d_get_compiled_shader(v3d, key, sizeof(*key));
830 if (cs != v3d->prog.compute) {
831 v3d->prog.compute = cs;
832 v3d->dirty |= VC5_DIRTY_COMPILED_CS; /* XXX */
833 }
834 }
835
836 static uint32_t
837 fs_cache_hash(const void *key)
838 {
839 return _mesa_hash_data(key, sizeof(struct v3d_fs_key));
840 }
841
842 static uint32_t
843 gs_cache_hash(const void *key)
844 {
845 return _mesa_hash_data(key, sizeof(struct v3d_gs_key));
846 }
847
848 static uint32_t
849 vs_cache_hash(const void *key)
850 {
851 return _mesa_hash_data(key, sizeof(struct v3d_vs_key));
852 }
853
854 static uint32_t
855 cs_cache_hash(const void *key)
856 {
857 return _mesa_hash_data(key, sizeof(struct v3d_key));
858 }
859
860 static bool
861 fs_cache_compare(const void *key1, const void *key2)
862 {
863 return memcmp(key1, key2, sizeof(struct v3d_fs_key)) == 0;
864 }
865
866 static bool
867 gs_cache_compare(const void *key1, const void *key2)
868 {
869 return memcmp(key1, key2, sizeof(struct v3d_gs_key)) == 0;
870 }
871
872 static bool
873 vs_cache_compare(const void *key1, const void *key2)
874 {
875 return memcmp(key1, key2, sizeof(struct v3d_vs_key)) == 0;
876 }
877
878 static bool
879 cs_cache_compare(const void *key1, const void *key2)
880 {
881 return memcmp(key1, key2, sizeof(struct v3d_key)) == 0;
882 }
883
884 static void
885 v3d_shader_state_delete(struct pipe_context *pctx, void *hwcso)
886 {
887 struct v3d_context *v3d = v3d_context(pctx);
888 struct v3d_uncompiled_shader *so = hwcso;
889 nir_shader *s = so->base.ir.nir;
890
891 hash_table_foreach(v3d->prog.cache[s->info.stage], entry) {
892 const struct v3d_key *key = entry->key;
893 struct v3d_compiled_shader *shader = entry->data;
894
895 if (key->shader_state != so)
896 continue;
897
898 if (v3d->prog.fs == shader)
899 v3d->prog.fs = NULL;
900 if (v3d->prog.vs == shader)
901 v3d->prog.vs = NULL;
902 if (v3d->prog.cs == shader)
903 v3d->prog.cs = NULL;
904 if (v3d->prog.compute == shader)
905 v3d->prog.compute = NULL;
906
907 _mesa_hash_table_remove(v3d->prog.cache[s->info.stage], entry);
908 v3d_free_compiled_shader(shader);
909 }
910
911 ralloc_free(so->base.ir.nir);
912 free(so);
913 }
914
915 static void
916 v3d_fp_state_bind(struct pipe_context *pctx, void *hwcso)
917 {
918 struct v3d_context *v3d = v3d_context(pctx);
919 v3d->prog.bind_fs = hwcso;
920 v3d->dirty |= VC5_DIRTY_UNCOMPILED_FS;
921 }
922
923 static void
924 v3d_gp_state_bind(struct pipe_context *pctx, void *hwcso)
925 {
926 struct v3d_context *v3d = v3d_context(pctx);
927 v3d->prog.bind_gs = hwcso;
928 v3d->dirty |= VC5_DIRTY_UNCOMPILED_GS;
929 }
930
931 static void
932 v3d_vp_state_bind(struct pipe_context *pctx, void *hwcso)
933 {
934 struct v3d_context *v3d = v3d_context(pctx);
935 v3d->prog.bind_vs = hwcso;
936 v3d->dirty |= VC5_DIRTY_UNCOMPILED_VS;
937 }
938
939 static void
940 v3d_compute_state_bind(struct pipe_context *pctx, void *state)
941 {
942 struct v3d_context *v3d = v3d_context(pctx);
943
944 v3d->prog.bind_compute = state;
945 v3d->dirty |= VC5_DIRTY_UNCOMPILED_CS;
946 }
947
948 static void *
949 v3d_create_compute_state(struct pipe_context *pctx,
950 const struct pipe_compute_state *cso)
951 {
952 return v3d_uncompiled_shader_create(pctx, cso->ir_type,
953 (void *)cso->prog);
954 }
955
956 void
957 v3d_program_init(struct pipe_context *pctx)
958 {
959 struct v3d_context *v3d = v3d_context(pctx);
960
961 pctx->create_vs_state = v3d_shader_state_create;
962 pctx->delete_vs_state = v3d_shader_state_delete;
963
964 pctx->create_gs_state = v3d_shader_state_create;
965 pctx->delete_gs_state = v3d_shader_state_delete;
966
967 pctx->create_fs_state = v3d_shader_state_create;
968 pctx->delete_fs_state = v3d_shader_state_delete;
969
970 pctx->bind_fs_state = v3d_fp_state_bind;
971 pctx->bind_gs_state = v3d_gp_state_bind;
972 pctx->bind_vs_state = v3d_vp_state_bind;
973
974 if (v3d->screen->has_csd) {
975 pctx->create_compute_state = v3d_create_compute_state;
976 pctx->delete_compute_state = v3d_shader_state_delete;
977 pctx->bind_compute_state = v3d_compute_state_bind;
978 }
979
980 v3d->prog.cache[MESA_SHADER_VERTEX] =
981 _mesa_hash_table_create(pctx, vs_cache_hash, vs_cache_compare);
982 v3d->prog.cache[MESA_SHADER_GEOMETRY] =
983 _mesa_hash_table_create(pctx, gs_cache_hash, gs_cache_compare);
984 v3d->prog.cache[MESA_SHADER_FRAGMENT] =
985 _mesa_hash_table_create(pctx, fs_cache_hash, fs_cache_compare);
986 v3d->prog.cache[MESA_SHADER_COMPUTE] =
987 _mesa_hash_table_create(pctx, cs_cache_hash, cs_cache_compare);
988 }
989
990 void
991 v3d_program_fini(struct pipe_context *pctx)
992 {
993 struct v3d_context *v3d = v3d_context(pctx);
994
995 for (int i = 0; i < MESA_SHADER_STAGES; i++) {
996 struct hash_table *cache = v3d->prog.cache[i];
997 if (!cache)
998 continue;
999
1000 hash_table_foreach(cache, entry) {
1001 struct v3d_compiled_shader *shader = entry->data;
1002 v3d_free_compiled_shader(shader);
1003 _mesa_hash_table_remove(cache, entry);
1004 }
1005 }
1006
1007 v3d_bo_unreference(&v3d->prog.spill_bo);
1008 }