2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 * @file iris_program.c
26 * This file contains the driver interface for compiling shaders.
28 * See iris_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "compiler/nir/nir.h"
40 #include "compiler/nir/nir_builder.h"
41 #include "intel/compiler/brw_compiler.h"
42 #include "intel/compiler/brw_nir.h"
43 #include "iris_context.h"
46 get_new_program_id(struct iris_screen
*screen
)
48 return p_atomic_inc_return(&screen
->program_id
);
52 * An uncompiled, API-facing shader. This is the Gallium CSO for shaders.
53 * It primarily contains the NIR for the shader.
55 * Each API-facing shader can be compiled into multiple shader variants,
56 * based on non-orthogonal state dependencies, recorded in the shader key.
58 * See iris_compiled_shader, which represents a compiled shader variant.
60 struct iris_uncompiled_shader
{
63 struct pipe_stream_output_info stream_output
;
67 /** Bitfield of (1 << IRIS_NOS_*) flags. */
72 get_aoa_deref_offset(nir_builder
*b
,
73 nir_deref_instr
*deref
,
76 unsigned array_size
= elem_size
;
77 nir_ssa_def
*offset
= nir_imm_int(b
, 0);
79 while (deref
->deref_type
!= nir_deref_type_var
) {
80 assert(deref
->deref_type
== nir_deref_type_array
);
82 /* This level's element size is the previous level's array size */
83 nir_ssa_def
*index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
84 assert(deref
->arr
.index
.ssa
);
85 offset
= nir_iadd(b
, offset
,
86 nir_imul(b
, index
, nir_imm_int(b
, array_size
)));
88 deref
= nir_deref_instr_parent(deref
);
89 assert(glsl_type_is_array(deref
->type
));
90 array_size
*= glsl_get_length(deref
->type
);
93 /* Accessing an invalid surface index with the dataport can result in a
94 * hang. According to the spec "if the index used to select an individual
95 * element is negative or greater than or equal to the size of the array,
96 * the results of the operation are undefined but may not lead to
97 * termination" -- which is one of the possible outcomes of the hang.
98 * Clamp the index to prevent access outside of the array bounds.
100 return nir_umin(b
, offset
, nir_imm_int(b
, array_size
- elem_size
));
104 iris_lower_storage_image_derefs(nir_shader
*nir
)
106 nir_function_impl
*impl
= nir_shader_get_entrypoint(nir
);
109 nir_builder_init(&b
, impl
);
111 nir_foreach_block(block
, impl
) {
112 nir_foreach_instr_safe(instr
, block
) {
113 if (instr
->type
!= nir_instr_type_intrinsic
)
116 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
117 switch (intrin
->intrinsic
) {
118 case nir_intrinsic_image_deref_load
:
119 case nir_intrinsic_image_deref_store
:
120 case nir_intrinsic_image_deref_atomic_add
:
121 case nir_intrinsic_image_deref_atomic_min
:
122 case nir_intrinsic_image_deref_atomic_max
:
123 case nir_intrinsic_image_deref_atomic_and
:
124 case nir_intrinsic_image_deref_atomic_or
:
125 case nir_intrinsic_image_deref_atomic_xor
:
126 case nir_intrinsic_image_deref_atomic_exchange
:
127 case nir_intrinsic_image_deref_atomic_comp_swap
:
128 case nir_intrinsic_image_deref_size
:
129 case nir_intrinsic_image_deref_samples
: {
130 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
131 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
133 b
.cursor
= nir_before_instr(&intrin
->instr
);
135 nir_iadd(&b
, nir_imm_int(&b
, var
->data
.driver_location
),
136 get_aoa_deref_offset(&b
, deref
, 1));
137 brw_nir_rewrite_image_intrinsic(intrin
, index
);
150 // XXX: need unify_interfaces() at link time...
153 * The pipe->create_[stage]_state() driver hooks.
155 * Performs basic NIR preprocessing, records any state dependencies, and
156 * returns an iris_uncompiled_shader as the Gallium CSO.
158 * Actual shader compilation to assembly happens later, at first use.
161 iris_create_uncompiled_shader(struct pipe_context
*ctx
,
163 const struct pipe_stream_output_info
*so_info
)
165 //struct iris_context *ice = (struct iris_context *)ctx;
166 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
167 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
169 struct iris_uncompiled_shader
*ish
=
170 calloc(1, sizeof(struct iris_uncompiled_shader
));
174 nir
= brw_preprocess_nir(screen
->compiler
, nir
);
176 NIR_PASS_V(nir
, brw_nir_lower_image_load_store
, devinfo
);
177 NIR_PASS_V(nir
, iris_lower_storage_image_derefs
);
179 ish
->program_id
= get_new_program_id(screen
);
182 memcpy(&ish
->stream_output
, so_info
, sizeof(*so_info
));
184 switch (nir
->info
.stage
) {
185 case MESA_SHADER_VERTEX
:
186 /* User clip planes */
187 if (nir
->info
.clip_distance_array_size
== 0)
188 ish
->nos
|= IRIS_NOS_RASTERIZER
;
191 case MESA_SHADER_TESS_CTRL
:
194 case MESA_SHADER_TESS_EVAL
:
197 case MESA_SHADER_GEOMETRY
:
200 case MESA_SHADER_FRAGMENT
:
201 ish
->nos
|= IRIS_NOS_FRAMEBUFFER
|
202 IRIS_NOS_DEPTH_STENCIL_ALPHA
|
203 IRIS_NOS_RASTERIZER
|
206 /* The program key needs the VUE map if there are > 16 inputs */
207 if (util_bitcount64(ish
->nir
->info
.inputs_read
&
208 BRW_FS_VARYING_INPUT_MASK
) > 16) {
209 ish
->nos
|= IRIS_NOS_LAST_VUE_MAP
;
212 case MESA_SHADER_COMPUTE
:
220 // XXX: disallow more than 64KB of shared variables
226 * The pipe->delete_[stage]_state() driver hooks.
228 * Frees the iris_uncompiled_shader.
231 iris_create_shader_state(struct pipe_context
*ctx
,
232 const struct pipe_shader_state
*state
)
234 assert(state
->type
== PIPE_SHADER_IR_NIR
);
236 return iris_create_uncompiled_shader(ctx
, state
->ir
.nir
,
237 &state
->stream_output
);
241 iris_create_compute_state(struct pipe_context
*ctx
,
242 const struct pipe_compute_state
*state
)
244 assert(state
->ir_type
== PIPE_SHADER_IR_NIR
);
246 return iris_create_uncompiled_shader(ctx
, (void *) state
->prog
, NULL
);
250 iris_delete_shader_state(struct pipe_context
*ctx
, void *state
)
252 struct iris_uncompiled_shader
*ish
= state
;
254 ralloc_free(ish
->nir
);
259 * The pipe->bind_[stage]_state() driver hook.
261 * Binds an uncompiled shader as the current one for a particular stage.
262 * Updates dirty tracking to account for the shader's NOS.
265 bind_state(struct iris_context
*ice
,
266 struct iris_uncompiled_shader
*ish
,
267 gl_shader_stage stage
)
269 uint64_t dirty_bit
= IRIS_DIRTY_UNCOMPILED_VS
<< stage
;
270 const uint64_t nos
= ish
? ish
->nos
: 0;
272 ice
->shaders
.uncompiled
[stage
] = ish
;
273 ice
->state
.dirty
|= dirty_bit
;
275 /* Record that CSOs need to mark IRIS_DIRTY_UNCOMPILED_XS when they change
276 * (or that they no longer need to do so).
278 for (int i
= 0; i
< IRIS_NOS_COUNT
; i
++) {
280 ice
->state
.dirty_for_nos
[i
] |= dirty_bit
;
282 ice
->state
.dirty_for_nos
[i
] &= ~dirty_bit
;
287 iris_bind_vs_state(struct pipe_context
*ctx
, void *state
)
289 bind_state((void *) ctx
, state
, MESA_SHADER_VERTEX
);
293 iris_bind_tcs_state(struct pipe_context
*ctx
, void *state
)
295 bind_state((void *) ctx
, state
, MESA_SHADER_TESS_CTRL
);
299 iris_bind_tes_state(struct pipe_context
*ctx
, void *state
)
301 struct iris_context
*ice
= (struct iris_context
*)ctx
;
303 /* Enabling/disabling optional stages requires a URB reconfiguration. */
304 if (!!state
!= !!ice
->shaders
.uncompiled
[MESA_SHADER_TESS_EVAL
])
305 ice
->state
.dirty
|= IRIS_DIRTY_URB
;
307 bind_state((void *) ctx
, state
, MESA_SHADER_TESS_EVAL
);
311 iris_bind_gs_state(struct pipe_context
*ctx
, void *state
)
313 struct iris_context
*ice
= (struct iris_context
*)ctx
;
315 /* Enabling/disabling optional stages requires a URB reconfiguration. */
316 if (!!state
!= !!ice
->shaders
.uncompiled
[MESA_SHADER_GEOMETRY
])
317 ice
->state
.dirty
|= IRIS_DIRTY_URB
;
319 bind_state((void *) ctx
, state
, MESA_SHADER_GEOMETRY
);
323 iris_bind_fs_state(struct pipe_context
*ctx
, void *state
)
325 bind_state((void *) ctx
, state
, MESA_SHADER_FRAGMENT
);
329 iris_bind_cs_state(struct pipe_context
*ctx
, void *state
)
331 bind_state((void *) ctx
, state
, MESA_SHADER_COMPUTE
);
335 * Sets up the starting offsets for the groups of binding table entries
336 * common to all pipeline stages.
338 * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
339 * unused but also make sure that addition of small offsets to them will
340 * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
343 assign_common_binding_table_offsets(const struct gen_device_info
*devinfo
,
344 const struct nir_shader
*nir
,
345 struct brw_stage_prog_data
*prog_data
,
346 uint32_t next_binding_table_offset
,
347 unsigned num_system_values
)
349 const struct shader_info
*info
= &nir
->info
;
351 if (info
->num_textures
) {
352 prog_data
->binding_table
.texture_start
= next_binding_table_offset
;
353 prog_data
->binding_table
.gather_texture_start
= next_binding_table_offset
;
354 next_binding_table_offset
+= info
->num_textures
;
356 prog_data
->binding_table
.texture_start
= 0xd0d0d0d0;
357 prog_data
->binding_table
.gather_texture_start
= 0xd0d0d0d0;
360 if (info
->num_images
) {
361 prog_data
->binding_table
.image_start
= next_binding_table_offset
;
362 next_binding_table_offset
+= info
->num_images
;
364 prog_data
->binding_table
.image_start
= 0xd0d0d0d0;
367 int num_ubos
= info
->num_ubos
+
368 ((nir
->num_uniforms
|| num_system_values
) ? 1 : 0);
371 //assert(info->num_ubos <= BRW_MAX_UBO);
372 prog_data
->binding_table
.ubo_start
= next_binding_table_offset
;
373 next_binding_table_offset
+= num_ubos
;
375 prog_data
->binding_table
.ubo_start
= 0xd0d0d0d0;
378 if (info
->num_ssbos
|| info
->num_abos
) {
379 prog_data
->binding_table
.ssbo_start
= next_binding_table_offset
;
380 // XXX: see iris_state "wasting 16 binding table slots for ABOs" comment
381 next_binding_table_offset
+= IRIS_MAX_ABOS
+ info
->num_ssbos
;
383 prog_data
->binding_table
.ssbo_start
= 0xd0d0d0d0;
386 prog_data
->binding_table
.shader_time_start
= 0xd0d0d0d0;
388 /* This may or may not be used depending on how the compile goes. */
389 prog_data
->binding_table
.pull_constants_start
= next_binding_table_offset
;
390 next_binding_table_offset
++;
392 /* Plane 0 is just the regular texture section */
393 prog_data
->binding_table
.plane_start
[0] = prog_data
->binding_table
.texture_start
;
395 prog_data
->binding_table
.plane_start
[1] = next_binding_table_offset
;
396 next_binding_table_offset
+= info
->num_textures
;
398 prog_data
->binding_table
.plane_start
[2] = next_binding_table_offset
;
399 next_binding_table_offset
+= info
->num_textures
;
401 /* Set the binding table size */
402 prog_data
->binding_table
.size_bytes
= next_binding_table_offset
* 4;
404 return next_binding_table_offset
;
408 * Associate NIR uniform variables with the prog_data->param[] mechanism
409 * used by the backend. Also, decide which UBOs we'd like to push in an
410 * ideal situation (though the backend can reduce this).
413 iris_setup_uniforms(const struct brw_compiler
*compiler
,
416 struct brw_stage_prog_data
*prog_data
,
417 enum brw_param_builtin
**out_system_values
,
418 unsigned *out_num_system_values
)
420 /* We don't use params[], but fs_visitor::nir_setup_uniforms() asserts
421 * about it for compute shaders, so go ahead and make some fake ones
422 * which the backend will dead code eliminate.
424 prog_data
->nr_params
= nir
->num_uniforms
;
425 prog_data
->param
= rzalloc_array(mem_ctx
, uint32_t, prog_data
->nr_params
);
427 /* The intel compiler assumes that num_uniforms is in bytes. For
428 * scalar that means 4 bytes per uniform slot.
430 * Ref: brw_nir_lower_uniforms, type_size_scalar_bytes.
432 nir
->num_uniforms
*= 4;
434 const unsigned IRIS_MAX_SYSTEM_VALUES
= 32;
435 enum brw_param_builtin
*system_values
=
436 rzalloc_array(mem_ctx
, enum brw_param_builtin
, IRIS_MAX_SYSTEM_VALUES
);
437 unsigned num_system_values
= 0;
439 nir_function_impl
*impl
= nir_shader_get_entrypoint(nir
);
442 nir_builder_init(&b
, impl
);
444 b
.cursor
= nir_before_block(nir_start_block(impl
));
445 nir_ssa_def
*temp_ubo_name
= nir_ssa_undef(&b
, 1, 32);
447 /* Turn system value intrinsics into uniforms */
448 nir_foreach_block(block
, impl
) {
449 nir_foreach_instr_safe(instr
, block
) {
450 if (instr
->type
!= nir_instr_type_intrinsic
)
453 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
455 unsigned idx
= num_system_values
;
457 switch (intrin
->intrinsic
) {
458 case nir_intrinsic_load_user_clip_plane
: {
459 unsigned ucp
= nir_intrinsic_ucp_id(intrin
);
460 for (int i
= 0; i
< 4; i
++) {
461 system_values
[num_system_values
++] =
462 BRW_PARAM_BUILTIN_CLIP_PLANE(ucp
, i
);
470 b
.cursor
= nir_before_instr(instr
);
472 unsigned comps
= nir_intrinsic_dest_components(intrin
);
473 nir_ssa_def
*offset
= nir_imm_int(&b
, idx
* sizeof(uint32_t));
475 nir_intrinsic_instr
*load
=
476 nir_intrinsic_instr_create(nir
, nir_intrinsic_load_ubo
);
477 load
->num_components
= comps
;
478 load
->src
[0] = nir_src_for_ssa(temp_ubo_name
);
479 load
->src
[1] = nir_src_for_ssa(offset
);
480 nir_ssa_dest_init(&load
->instr
, &load
->dest
, comps
, 32, NULL
);
481 nir_builder_instr_insert(&b
, &load
->instr
);
482 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
483 nir_src_for_ssa(&load
->dest
.ssa
));
484 nir_instr_remove(instr
);
488 nir_validate_shader(nir
, "before remapping");
490 /* Place the new params at the front of constant buffer 0. */
491 if (num_system_values
> 0) {
492 nir
->num_uniforms
+= num_system_values
* sizeof(uint32_t);
494 system_values
= reralloc(mem_ctx
, system_values
, enum brw_param_builtin
,
497 nir_foreach_block(block
, impl
) {
498 nir_foreach_instr_safe(instr
, block
) {
499 if (instr
->type
!= nir_instr_type_intrinsic
)
502 nir_intrinsic_instr
*load
= nir_instr_as_intrinsic(instr
);
504 if (load
->intrinsic
!= nir_intrinsic_load_ubo
)
507 b
.cursor
= nir_before_instr(instr
);
509 assert(load
->src
[0].is_ssa
);
511 if (load
->src
[0].ssa
== temp_ubo_name
) {
512 nir_instr_rewrite_src(instr
, &load
->src
[0],
513 nir_src_for_ssa(nir_imm_int(&b
, 0)));
514 } else if (nir_src_as_uint(load
->src
[0]) == 0) {
515 nir_ssa_def
*offset
=
516 nir_iadd(&b
, load
->src
[1].ssa
,
517 nir_imm_int(&b
, 4 * num_system_values
));
518 nir_instr_rewrite_src(instr
, &load
->src
[1],
519 nir_src_for_ssa(offset
));
524 /* We need to fold the new iadds for brw_nir_analyze_ubo_ranges */
525 nir_opt_constant_folding(nir
);
527 ralloc_free(system_values
);
528 system_values
= NULL
;
531 nir_validate_shader(nir
, "after remap");
533 // XXX: vs clip planes?
534 if (nir
->info
.stage
!= MESA_SHADER_COMPUTE
)
535 brw_nir_analyze_ubo_ranges(compiler
, nir
, NULL
, prog_data
->ubo_ranges
);
537 *out_system_values
= system_values
;
538 *out_num_system_values
= num_system_values
;
542 * Compile a vertex shader, and upload the assembly.
545 iris_compile_vs(struct iris_context
*ice
,
546 struct iris_uncompiled_shader
*ish
,
547 const struct brw_vs_prog_key
*key
)
549 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
550 const struct brw_compiler
*compiler
= screen
->compiler
;
551 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
552 void *mem_ctx
= ralloc_context(NULL
);
553 struct brw_vs_prog_data
*vs_prog_data
=
554 rzalloc(mem_ctx
, struct brw_vs_prog_data
);
555 struct brw_vue_prog_data
*vue_prog_data
= &vs_prog_data
->base
;
556 struct brw_stage_prog_data
*prog_data
= &vue_prog_data
->base
;
557 enum brw_param_builtin
*system_values
;
558 unsigned num_system_values
;
560 nir_shader
*nir
= nir_shader_clone(mem_ctx
, ish
->nir
);
562 if (key
->nr_userclip_plane_consts
) {
563 nir_function_impl
*impl
= nir_shader_get_entrypoint(nir
);
564 nir_lower_clip_vs(nir
, (1 << key
->nr_userclip_plane_consts
) - 1, true);
565 nir_lower_io_to_temporaries(nir
, impl
, true, false);
566 nir_lower_global_vars_to_local(nir
);
567 nir_lower_vars_to_ssa(nir
);
568 nir_shader_gather_info(nir
, impl
);
573 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
, &system_values
,
576 assign_common_binding_table_offsets(devinfo
, nir
, prog_data
, 0,
579 brw_compute_vue_map(devinfo
,
580 &vue_prog_data
->vue_map
, nir
->info
.outputs_written
,
581 nir
->info
.separate_shader
);
583 /* Don't tell the backend about our clip plane constants, we've already
584 * lowered them in NIR and we don't want it doing it again.
586 struct brw_vs_prog_key key_no_ucp
= *key
;
587 key_no_ucp
.nr_userclip_plane_consts
= 0;
589 char *error_str
= NULL
;
590 const unsigned *program
=
591 brw_compile_vs(compiler
, &ice
->dbg
, mem_ctx
, &key_no_ucp
, vs_prog_data
,
592 nir
, -1, &error_str
);
593 if (program
== NULL
) {
594 dbg_printf("Failed to compile vertex shader: %s\n", error_str
);
595 ralloc_free(mem_ctx
);
600 ice
->vtbl
.create_so_decl_list(&ish
->stream_output
,
601 &vue_prog_data
->vue_map
);
603 iris_upload_and_bind_shader(ice
, IRIS_CACHE_VS
, key
, program
, prog_data
,
604 so_decls
, system_values
, num_system_values
);
606 ralloc_free(mem_ctx
);
611 * Update the current vertex shader variant.
613 * Fill out the key, look in the cache, compile and bind if needed.
616 iris_update_compiled_vs(struct iris_context
*ice
)
618 struct iris_uncompiled_shader
*ish
=
619 ice
->shaders
.uncompiled
[MESA_SHADER_VERTEX
];
621 struct brw_vs_prog_key key
= { .program_string_id
= ish
->program_id
};
622 ice
->vtbl
.populate_vs_key(ice
, &ish
->nir
->info
, &key
);
624 if (iris_bind_cached_shader(ice
, IRIS_CACHE_VS
, &key
))
627 UNUSED
bool success
= iris_compile_vs(ice
, ish
, &key
);
631 * Get the shader_info for a given stage, or NULL if the stage is disabled.
633 const struct shader_info
*
634 iris_get_shader_info(const struct iris_context
*ice
, gl_shader_stage stage
)
636 const struct iris_uncompiled_shader
*ish
= ice
->shaders
.uncompiled
[stage
];
641 const nir_shader
*nir
= ish
->nir
;
645 // XXX: this function is gross
647 iris_get_shader_num_ubos(const struct iris_context
*ice
, gl_shader_stage stage
)
649 const struct iris_uncompiled_shader
*ish
= ice
->shaders
.uncompiled
[stage
];
650 const struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
653 const nir_shader
*nir
= ish
->nir
;
654 /* see assign_common_binding_table_offsets */
655 return nir
->info
.num_ubos
+
656 ((nir
->num_uniforms
|| shader
->num_system_values
) ? 1 : 0);
662 * Get the union of TCS output and TES input slots.
664 * TCS and TES need to agree on a common URB entry layout. In particular,
665 * the data for all patch vertices is stored in a single URB entry (unlike
666 * GS which has one entry per input vertex). This means that per-vertex
667 * array indexing needs a stride.
669 * SSO requires locations to match, but doesn't require the number of
670 * outputs/inputs to match (in fact, the TCS often has extra outputs).
671 * So, we need to take the extra step of unifying these on the fly.
674 get_unified_tess_slots(const struct iris_context
*ice
,
675 uint64_t *per_vertex_slots
,
676 uint32_t *per_patch_slots
)
678 const struct shader_info
*tcs
=
679 iris_get_shader_info(ice
, MESA_SHADER_TESS_CTRL
);
680 const struct shader_info
*tes
=
681 iris_get_shader_info(ice
, MESA_SHADER_TESS_EVAL
);
683 *per_vertex_slots
= tes
->inputs_read
;
684 *per_patch_slots
= tes
->patch_inputs_read
;
687 *per_vertex_slots
|= tcs
->inputs_read
;
688 *per_patch_slots
|= tcs
->patch_inputs_read
;
693 * Compile a tessellation control shader, and upload the assembly.
696 iris_compile_tcs(struct iris_context
*ice
,
697 struct iris_uncompiled_shader
*ish
,
698 const struct brw_tcs_prog_key
*key
)
700 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
701 const struct brw_compiler
*compiler
= screen
->compiler
;
702 const struct nir_shader_compiler_options
*options
=
703 compiler
->glsl_compiler_options
[MESA_SHADER_TESS_CTRL
].NirOptions
;
704 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
705 void *mem_ctx
= ralloc_context(NULL
);
706 struct brw_tcs_prog_data
*tcs_prog_data
=
707 rzalloc(mem_ctx
, struct brw_tcs_prog_data
);
708 struct brw_vue_prog_data
*vue_prog_data
= &tcs_prog_data
->base
;
709 struct brw_stage_prog_data
*prog_data
= &vue_prog_data
->base
;
710 enum brw_param_builtin
*system_values
= NULL
;
711 unsigned num_system_values
= 0;
716 nir
= nir_shader_clone(mem_ctx
, ish
->nir
);
718 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
, &system_values
,
720 assign_common_binding_table_offsets(devinfo
, nir
, prog_data
, 0,
723 nir
= brw_nir_create_passthrough_tcs(mem_ctx
, compiler
, options
, key
);
725 /* Reserve space for passing the default tess levels as constants. */
726 prog_data
->param
= rzalloc_array(mem_ctx
, uint32_t, 8);
727 prog_data
->nr_params
= 8;
728 prog_data
->ubo_ranges
[0].length
= 1;
731 char *error_str
= NULL
;
732 const unsigned *program
=
733 brw_compile_tcs(compiler
, &ice
->dbg
, mem_ctx
, key
, tcs_prog_data
, nir
,
735 if (program
== NULL
) {
736 dbg_printf("Failed to compile evaluation shader: %s\n", error_str
);
737 ralloc_free(mem_ctx
);
741 iris_upload_and_bind_shader(ice
, IRIS_CACHE_TCS
, key
, program
, prog_data
,
742 NULL
, system_values
, num_system_values
);
744 ralloc_free(mem_ctx
);
749 * Update the current tessellation control shader variant.
751 * Fill out the key, look in the cache, compile and bind if needed.
754 iris_update_compiled_tcs(struct iris_context
*ice
)
756 struct iris_uncompiled_shader
*tcs
=
757 ice
->shaders
.uncompiled
[MESA_SHADER_TESS_CTRL
];
759 const struct shader_info
*tes_info
=
760 iris_get_shader_info(ice
, MESA_SHADER_TESS_EVAL
);
761 struct brw_tcs_prog_key key
= {
762 .program_string_id
= tcs
? tcs
->program_id
: 0,
763 .tes_primitive_mode
= tes_info
->tess
.primitive_mode
,
764 .input_vertices
= ice
->state
.vertices_per_patch
,
766 get_unified_tess_slots(ice
, &key
.outputs_written
,
767 &key
.patch_outputs_written
);
768 ice
->vtbl
.populate_tcs_key(ice
, &key
);
770 if (iris_bind_cached_shader(ice
, IRIS_CACHE_TCS
, &key
))
773 UNUSED
bool success
= iris_compile_tcs(ice
, tcs
, &key
);
777 * Compile a tessellation evaluation shader, and upload the assembly.
780 iris_compile_tes(struct iris_context
*ice
,
781 struct iris_uncompiled_shader
*ish
,
782 const struct brw_tes_prog_key
*key
)
784 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
785 const struct brw_compiler
*compiler
= screen
->compiler
;
786 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
787 void *mem_ctx
= ralloc_context(NULL
);
788 struct brw_tes_prog_data
*tes_prog_data
=
789 rzalloc(mem_ctx
, struct brw_tes_prog_data
);
790 struct brw_vue_prog_data
*vue_prog_data
= &tes_prog_data
->base
;
791 struct brw_stage_prog_data
*prog_data
= &vue_prog_data
->base
;
792 enum brw_param_builtin
*system_values
;
793 unsigned num_system_values
;
795 nir_shader
*nir
= nir_shader_clone(mem_ctx
, ish
->nir
);
797 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
, &system_values
,
800 assign_common_binding_table_offsets(devinfo
, nir
, prog_data
, 0,
803 struct brw_vue_map input_vue_map
;
804 brw_compute_tess_vue_map(&input_vue_map
, key
->inputs_read
,
805 key
->patch_inputs_read
);
807 char *error_str
= NULL
;
808 const unsigned *program
=
809 brw_compile_tes(compiler
, &ice
->dbg
, mem_ctx
, key
, &input_vue_map
,
810 tes_prog_data
, nir
, NULL
, -1, &error_str
);
811 if (program
== NULL
) {
812 dbg_printf("Failed to compile evaluation shader: %s\n", error_str
);
813 ralloc_free(mem_ctx
);
818 ice
->vtbl
.create_so_decl_list(&ish
->stream_output
,
819 &vue_prog_data
->vue_map
);
821 iris_upload_and_bind_shader(ice
, IRIS_CACHE_TES
, key
, program
, prog_data
,
822 so_decls
, system_values
, num_system_values
);
824 ralloc_free(mem_ctx
);
829 * Update the current tessellation evaluation shader variant.
831 * Fill out the key, look in the cache, compile and bind if needed.
834 iris_update_compiled_tes(struct iris_context
*ice
)
836 struct iris_uncompiled_shader
*ish
=
837 ice
->shaders
.uncompiled
[MESA_SHADER_TESS_EVAL
];
839 struct brw_tes_prog_key key
= { .program_string_id
= ish
->program_id
};
840 get_unified_tess_slots(ice
, &key
.inputs_read
, &key
.patch_inputs_read
);
841 ice
->vtbl
.populate_tes_key(ice
, &key
);
843 if (iris_bind_cached_shader(ice
, IRIS_CACHE_TES
, &key
))
846 UNUSED
bool success
= iris_compile_tes(ice
, ish
, &key
);
850 * Compile a geometry shader, and upload the assembly.
853 iris_compile_gs(struct iris_context
*ice
,
854 struct iris_uncompiled_shader
*ish
,
855 const struct brw_gs_prog_key
*key
)
857 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
858 const struct brw_compiler
*compiler
= screen
->compiler
;
859 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
860 void *mem_ctx
= ralloc_context(NULL
);
861 struct brw_gs_prog_data
*gs_prog_data
=
862 rzalloc(mem_ctx
, struct brw_gs_prog_data
);
863 struct brw_vue_prog_data
*vue_prog_data
= &gs_prog_data
->base
;
864 struct brw_stage_prog_data
*prog_data
= &vue_prog_data
->base
;
865 enum brw_param_builtin
*system_values
;
866 unsigned num_system_values
;
868 nir_shader
*nir
= nir_shader_clone(mem_ctx
, ish
->nir
);
870 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
, &system_values
,
873 assign_common_binding_table_offsets(devinfo
, nir
, prog_data
, 0,
876 brw_compute_vue_map(devinfo
,
877 &vue_prog_data
->vue_map
, nir
->info
.outputs_written
,
878 nir
->info
.separate_shader
);
880 char *error_str
= NULL
;
881 const unsigned *program
=
882 brw_compile_gs(compiler
, &ice
->dbg
, mem_ctx
, key
, gs_prog_data
, nir
,
883 NULL
, -1, &error_str
);
884 if (program
== NULL
) {
885 dbg_printf("Failed to compile geometry shader: %s\n", error_str
);
886 ralloc_free(mem_ctx
);
891 ice
->vtbl
.create_so_decl_list(&ish
->stream_output
,
892 &vue_prog_data
->vue_map
);
894 iris_upload_and_bind_shader(ice
, IRIS_CACHE_GS
, key
, program
, prog_data
,
895 so_decls
, system_values
, num_system_values
);
897 ralloc_free(mem_ctx
);
902 * Update the current geometry shader variant.
904 * Fill out the key, look in the cache, compile and bind if needed.
907 iris_update_compiled_gs(struct iris_context
*ice
)
909 struct iris_uncompiled_shader
*ish
=
910 ice
->shaders
.uncompiled
[MESA_SHADER_GEOMETRY
];
913 iris_unbind_shader(ice
, IRIS_CACHE_GS
);
917 struct brw_gs_prog_key key
= { .program_string_id
= ish
->program_id
};
918 ice
->vtbl
.populate_gs_key(ice
, &key
);
920 if (iris_bind_cached_shader(ice
, IRIS_CACHE_GS
, &key
))
923 UNUSED
bool success
= iris_compile_gs(ice
, ish
, &key
);
927 * Compile a fragment (pixel) shader, and upload the assembly.
930 iris_compile_fs(struct iris_context
*ice
,
931 struct iris_uncompiled_shader
*ish
,
932 const struct brw_wm_prog_key
*key
,
933 struct brw_vue_map
*vue_map
)
935 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
936 const struct brw_compiler
*compiler
= screen
->compiler
;
937 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
938 void *mem_ctx
= ralloc_context(NULL
);
939 struct brw_wm_prog_data
*fs_prog_data
=
940 rzalloc(mem_ctx
, struct brw_wm_prog_data
);
941 struct brw_stage_prog_data
*prog_data
= &fs_prog_data
->base
;
942 enum brw_param_builtin
*system_values
;
943 unsigned num_system_values
;
945 nir_shader
*nir
= nir_shader_clone(mem_ctx
, ish
->nir
);
949 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
, &system_values
,
952 assign_common_binding_table_offsets(devinfo
, nir
, prog_data
,
953 MAX2(key
->nr_color_regions
, 1),
955 char *error_str
= NULL
;
956 const unsigned *program
=
957 brw_compile_fs(compiler
, &ice
->dbg
, mem_ctx
, key
, fs_prog_data
,
958 nir
, NULL
, -1, -1, -1, true, false, vue_map
, &error_str
);
959 if (program
== NULL
) {
960 dbg_printf("Failed to compile fragment shader: %s\n", error_str
);
961 ralloc_free(mem_ctx
);
965 //brw_alloc_stage_scratch(brw, &brw->wm.base, prog_data.base.total_scratch);
967 iris_upload_and_bind_shader(ice
, IRIS_CACHE_FS
, key
, program
, prog_data
,
968 NULL
, system_values
, num_system_values
);
970 ralloc_free(mem_ctx
);
975 * Update the current fragment shader variant.
977 * Fill out the key, look in the cache, compile and bind if needed.
980 iris_update_compiled_fs(struct iris_context
*ice
)
982 struct iris_uncompiled_shader
*ish
=
983 ice
->shaders
.uncompiled
[MESA_SHADER_FRAGMENT
];
984 struct brw_wm_prog_key key
= { .program_string_id
= ish
->program_id
};
985 ice
->vtbl
.populate_fs_key(ice
, &key
);
987 if (ish
->nos
& IRIS_NOS_LAST_VUE_MAP
)
988 key
.input_slots_valid
= ice
->shaders
.last_vue_map
->slots_valid
;
990 if (iris_bind_cached_shader(ice
, IRIS_CACHE_FS
, &key
))
993 UNUSED
bool success
=
994 iris_compile_fs(ice
, ish
, &key
, ice
->shaders
.last_vue_map
);
998 * Get the compiled shader for the last enabled geometry stage.
1000 * This stage is the one which will feed stream output and the rasterizer.
1002 static struct iris_compiled_shader
*
1003 last_vue_shader(struct iris_context
*ice
)
1005 if (ice
->shaders
.prog
[MESA_SHADER_GEOMETRY
])
1006 return ice
->shaders
.prog
[MESA_SHADER_GEOMETRY
];
1008 if (ice
->shaders
.prog
[MESA_SHADER_TESS_EVAL
])
1009 return ice
->shaders
.prog
[MESA_SHADER_TESS_EVAL
];
1011 return ice
->shaders
.prog
[MESA_SHADER_VERTEX
];
1015 * Update the last enabled stage's VUE map.
1017 * When the shader feeding the rasterizer's output interface changes, we
1018 * need to re-emit various packets.
1021 update_last_vue_map(struct iris_context
*ice
,
1022 struct brw_stage_prog_data
*prog_data
)
1024 struct brw_vue_prog_data
*vue_prog_data
= (void *) prog_data
;
1025 struct brw_vue_map
*vue_map
= &vue_prog_data
->vue_map
;
1026 struct brw_vue_map
*old_map
= ice
->shaders
.last_vue_map
;
1027 const uint64_t changed_slots
=
1028 (old_map
? old_map
->slots_valid
: 0ull) ^ vue_map
->slots_valid
;
1030 if (changed_slots
& VARYING_BIT_VIEWPORT
) {
1031 // XXX: could use ctx->Const.MaxViewports for old API efficiency
1032 ice
->state
.num_viewports
=
1033 (vue_map
->slots_valid
& VARYING_BIT_VIEWPORT
) ? IRIS_MAX_VIEWPORTS
: 1;
1034 ice
->state
.dirty
|= IRIS_DIRTY_CLIP
|
1035 IRIS_DIRTY_SF_CL_VIEWPORT
|
1036 IRIS_DIRTY_CC_VIEWPORT
|
1037 IRIS_DIRTY_SCISSOR_RECT
|
1038 IRIS_DIRTY_UNCOMPILED_FS
|
1039 ice
->state
.dirty_for_nos
[IRIS_NOS_LAST_VUE_MAP
];
1040 // XXX: CC_VIEWPORT?
1043 if (changed_slots
|| (old_map
&& old_map
->separate
!= vue_map
->separate
)) {
1044 ice
->state
.dirty
|= IRIS_DIRTY_SBE
;
1047 ice
->shaders
.last_vue_map
= &vue_prog_data
->vue_map
;
1051 * Get the prog_data for a given stage, or NULL if the stage is disabled.
1053 static struct brw_vue_prog_data
*
1054 get_vue_prog_data(struct iris_context
*ice
, gl_shader_stage stage
)
1056 if (!ice
->shaders
.prog
[stage
])
1059 return (void *) ice
->shaders
.prog
[stage
]->prog_data
;
1063 * Update the current shader variants for the given state.
1065 * This should be called on every draw call to ensure that the correct
1066 * shaders are bound. It will also flag any dirty state triggered by
1067 * swapping out those shaders.
1070 iris_update_compiled_shaders(struct iris_context
*ice
)
1072 const uint64_t dirty
= ice
->state
.dirty
;
1074 struct brw_vue_prog_data
*old_prog_datas
[4];
1075 if (!(dirty
& IRIS_DIRTY_URB
)) {
1076 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_GEOMETRY
; i
++)
1077 old_prog_datas
[i
] = get_vue_prog_data(ice
, i
);
1080 if (dirty
& (IRIS_DIRTY_UNCOMPILED_TCS
| IRIS_DIRTY_UNCOMPILED_TES
)) {
1081 struct iris_uncompiled_shader
*tes
=
1082 ice
->shaders
.uncompiled
[MESA_SHADER_TESS_EVAL
];
1084 iris_update_compiled_tcs(ice
);
1085 iris_update_compiled_tes(ice
);
1087 iris_unbind_shader(ice
, IRIS_CACHE_TCS
);
1088 iris_unbind_shader(ice
, IRIS_CACHE_TES
);
1092 if (dirty
& IRIS_DIRTY_UNCOMPILED_VS
)
1093 iris_update_compiled_vs(ice
);
1094 if (dirty
& IRIS_DIRTY_UNCOMPILED_GS
)
1095 iris_update_compiled_gs(ice
);
1097 struct iris_compiled_shader
*shader
= last_vue_shader(ice
);
1098 update_last_vue_map(ice
, shader
->prog_data
);
1099 if (ice
->state
.streamout
!= shader
->streamout
) {
1100 ice
->state
.streamout
= shader
->streamout
;
1101 ice
->state
.dirty
|= IRIS_DIRTY_SO_DECL_LIST
| IRIS_DIRTY_STREAMOUT
;
1104 if (dirty
& IRIS_DIRTY_UNCOMPILED_FS
)
1105 iris_update_compiled_fs(ice
);
1108 /* Changing shader interfaces may require a URB configuration. */
1109 if (!(dirty
& IRIS_DIRTY_URB
)) {
1110 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_GEOMETRY
; i
++) {
1111 struct brw_vue_prog_data
*old
= old_prog_datas
[i
];
1112 struct brw_vue_prog_data
*new = get_vue_prog_data(ice
, i
);
1113 if (!!old
!= !!new ||
1114 (new && new->urb_entry_size
!= old
->urb_entry_size
)) {
1115 ice
->state
.dirty
|= IRIS_DIRTY_URB
;
1123 iris_compile_cs(struct iris_context
*ice
,
1124 struct iris_uncompiled_shader
*ish
,
1125 const struct brw_cs_prog_key
*key
)
1127 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
1128 const struct brw_compiler
*compiler
= screen
->compiler
;
1129 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1130 void *mem_ctx
= ralloc_context(NULL
);
1131 struct brw_cs_prog_data
*cs_prog_data
=
1132 rzalloc(mem_ctx
, struct brw_cs_prog_data
);
1133 struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
1134 enum brw_param_builtin
*system_values
;
1135 unsigned num_system_values
;
1137 nir_shader
*nir
= nir_shader_clone(mem_ctx
, ish
->nir
);
1139 cs_prog_data
->binding_table
.work_groups_start
= 0;
1141 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
, &system_values
,
1142 &num_system_values
);
1144 assign_common_binding_table_offsets(devinfo
, nir
, prog_data
, 1,
1147 char *error_str
= NULL
;
1148 const unsigned *program
=
1149 brw_compile_cs(compiler
, &ice
->dbg
, mem_ctx
, key
, cs_prog_data
,
1150 nir
, -1, &error_str
);
1151 if (program
== NULL
) {
1152 dbg_printf("Failed to compile compute shader: %s\n", error_str
);
1153 ralloc_free(mem_ctx
);
1157 iris_upload_and_bind_shader(ice
, IRIS_CACHE_CS
, key
, program
, prog_data
,
1158 NULL
, system_values
, num_system_values
);
1160 ralloc_free(mem_ctx
);
1165 iris_update_compiled_compute_shader(struct iris_context
*ice
)
1167 struct iris_uncompiled_shader
*ish
=
1168 ice
->shaders
.uncompiled
[MESA_SHADER_COMPUTE
];
1170 struct brw_cs_prog_key key
= { .program_string_id
= ish
->program_id
};
1171 ice
->vtbl
.populate_cs_key(ice
, &key
);
1173 if (iris_bind_cached_shader(ice
, IRIS_CACHE_CS
, &key
))
1176 UNUSED
bool success
= iris_compile_cs(ice
, ish
, &key
);
1180 iris_fill_cs_push_const_buffer(struct brw_cs_prog_data
*cs_prog_data
,
1183 struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
1184 assert(cs_prog_data
->push
.total
.size
> 0);
1185 assert(cs_prog_data
->push
.cross_thread
.size
== 0);
1186 assert(cs_prog_data
->push
.per_thread
.dwords
== 1);
1187 assert(prog_data
->param
[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID
);
1188 for (unsigned t
= 0; t
< cs_prog_data
->threads
; t
++)
1193 * Allocate scratch BOs as needed for the given per-thread size and stage.
1195 * Returns the 32-bit "Scratch Space Base Pointer" value.
1198 iris_get_scratch_space(struct iris_context
*ice
,
1199 unsigned per_thread_scratch
,
1200 gl_shader_stage stage
)
1202 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
1203 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
1204 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1206 unsigned encoded_size
= ffs(per_thread_scratch
) - 11;
1207 assert(encoded_size
< (1 << 16));
1209 struct iris_bo
**bop
= &ice
->shaders
.scratch_bos
[encoded_size
][stage
];
1211 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
1213 * "Scratch Space per slice is computed based on 4 sub-slices. SW must
1214 * allocate scratch space enough so that each slice has 4 slices
1217 * According to the other driver team, this applies to compute shaders
1218 * as well. This is not currently documented at all.
1220 unsigned subslice_total
= 4 * devinfo
->num_slices
;
1221 assert(subslice_total
>= screen
->subslice_total
);
1224 unsigned scratch_ids_per_subslice
= devinfo
->max_cs_threads
;
1225 uint32_t max_threads
[] = {
1226 [MESA_SHADER_VERTEX
] = devinfo
->max_vs_threads
,
1227 [MESA_SHADER_TESS_CTRL
] = devinfo
->max_tcs_threads
,
1228 [MESA_SHADER_TESS_EVAL
] = devinfo
->max_tes_threads
,
1229 [MESA_SHADER_GEOMETRY
] = devinfo
->max_gs_threads
,
1230 [MESA_SHADER_FRAGMENT
] = devinfo
->max_wm_threads
,
1231 [MESA_SHADER_COMPUTE
] = scratch_ids_per_subslice
* subslice_total
,
1234 uint32_t size
= per_thread_scratch
* max_threads
[stage
];
1236 *bop
= iris_bo_alloc(bufmgr
, "scratch", size
, IRIS_MEMZONE_SHADER
);
1239 return (*bop
)->gtt_offset
;
1243 iris_init_program_functions(struct pipe_context
*ctx
)
1245 ctx
->create_vs_state
= iris_create_shader_state
;
1246 ctx
->create_tcs_state
= iris_create_shader_state
;
1247 ctx
->create_tes_state
= iris_create_shader_state
;
1248 ctx
->create_gs_state
= iris_create_shader_state
;
1249 ctx
->create_fs_state
= iris_create_shader_state
;
1250 ctx
->create_compute_state
= iris_create_compute_state
;
1252 ctx
->delete_vs_state
= iris_delete_shader_state
;
1253 ctx
->delete_tcs_state
= iris_delete_shader_state
;
1254 ctx
->delete_tes_state
= iris_delete_shader_state
;
1255 ctx
->delete_gs_state
= iris_delete_shader_state
;
1256 ctx
->delete_fs_state
= iris_delete_shader_state
;
1257 ctx
->delete_compute_state
= iris_delete_shader_state
;
1259 ctx
->bind_vs_state
= iris_bind_vs_state
;
1260 ctx
->bind_tcs_state
= iris_bind_tcs_state
;
1261 ctx
->bind_tes_state
= iris_bind_tes_state
;
1262 ctx
->bind_gs_state
= iris_bind_gs_state
;
1263 ctx
->bind_fs_state
= iris_bind_fs_state
;
1264 ctx
->bind_compute_state
= iris_bind_cs_state
;