2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
24 * @file iris_program.c
26 * This file contains the driver interface for compiling shaders.
28 * See iris_program_cache.c for the in-memory program cache where the
29 * compiled shaders are stored.
34 #include "pipe/p_defines.h"
35 #include "pipe/p_state.h"
36 #include "pipe/p_context.h"
37 #include "pipe/p_screen.h"
38 #include "util/u_atomic.h"
39 #include "util/u_upload_mgr.h"
40 #include "util/debug.h"
41 #include "compiler/nir/nir.h"
42 #include "compiler/nir/nir_builder.h"
43 #include "compiler/nir/nir_serialize.h"
44 #include "intel/compiler/brw_compiler.h"
45 #include "intel/compiler/brw_nir.h"
46 #include "iris_context.h"
47 #include "nir/tgsi_to_nir.h"
49 #define KEY_ID(prefix) .prefix.program_string_id = ish->program_id
50 #define BRW_KEY_INIT(gen, prog_id) \
51 .base.program_string_id = prog_id, \
52 .base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM, \
53 .base.tex.swizzles[0 ... MAX_SAMPLERS - 1] = 0x688, \
54 .base.tex.compressed_multisample_layout_mask = ~0, \
55 .base.tex.msaa_16 = (gen >= 9 ? ~0 : 0)
58 get_new_program_id(struct iris_screen
*screen
)
60 return p_atomic_inc_return(&screen
->program_id
);
63 static struct brw_vs_prog_key
64 iris_to_brw_vs_key(const struct gen_device_info
*devinfo
,
65 const struct iris_vs_prog_key
*key
)
67 return (struct brw_vs_prog_key
) {
68 BRW_KEY_INIT(devinfo
->gen
, key
->vue
.base
.program_string_id
),
70 /* Don't tell the backend about our clip plane constants, we've
71 * already lowered them in NIR and don't want it doing it again.
73 .nr_userclip_plane_consts
= 0,
77 static struct brw_tcs_prog_key
78 iris_to_brw_tcs_key(const struct gen_device_info
*devinfo
,
79 const struct iris_tcs_prog_key
*key
)
81 return (struct brw_tcs_prog_key
) {
82 BRW_KEY_INIT(devinfo
->gen
, key
->vue
.base
.program_string_id
),
83 .tes_primitive_mode
= key
->tes_primitive_mode
,
84 .input_vertices
= key
->input_vertices
,
85 .patch_outputs_written
= key
->patch_outputs_written
,
86 .outputs_written
= key
->outputs_written
,
87 .quads_workaround
= key
->quads_workaround
,
91 static struct brw_tes_prog_key
92 iris_to_brw_tes_key(const struct gen_device_info
*devinfo
,
93 const struct iris_tes_prog_key
*key
)
95 return (struct brw_tes_prog_key
) {
96 BRW_KEY_INIT(devinfo
->gen
, key
->vue
.base
.program_string_id
),
97 .patch_inputs_read
= key
->patch_inputs_read
,
98 .inputs_read
= key
->inputs_read
,
102 static struct brw_gs_prog_key
103 iris_to_brw_gs_key(const struct gen_device_info
*devinfo
,
104 const struct iris_gs_prog_key
*key
)
106 return (struct brw_gs_prog_key
) {
107 BRW_KEY_INIT(devinfo
->gen
, key
->vue
.base
.program_string_id
),
111 static struct brw_wm_prog_key
112 iris_to_brw_fs_key(const struct gen_device_info
*devinfo
,
113 const struct iris_fs_prog_key
*key
)
115 return (struct brw_wm_prog_key
) {
116 BRW_KEY_INIT(devinfo
->gen
, key
->base
.program_string_id
),
117 .nr_color_regions
= key
->nr_color_regions
,
118 .flat_shade
= key
->flat_shade
,
119 .alpha_test_replicate_alpha
= key
->alpha_test_replicate_alpha
,
120 .alpha_to_coverage
= key
->alpha_to_coverage
,
121 .clamp_fragment_color
= key
->clamp_fragment_color
,
122 .persample_interp
= key
->persample_interp
,
123 .multisample_fbo
= key
->multisample_fbo
,
124 .force_dual_color_blend
= key
->force_dual_color_blend
,
125 .coherent_fb_fetch
= key
->coherent_fb_fetch
,
126 .color_outputs_valid
= key
->color_outputs_valid
,
127 .input_slots_valid
= key
->input_slots_valid
,
128 .ignore_sample_mask_out
= !key
->multisample_fbo
,
132 static struct brw_cs_prog_key
133 iris_to_brw_cs_key(const struct gen_device_info
*devinfo
,
134 const struct iris_cs_prog_key
*key
)
136 return (struct brw_cs_prog_key
) {
137 BRW_KEY_INIT(devinfo
->gen
, key
->base
.program_string_id
),
142 upload_state(struct u_upload_mgr
*uploader
,
143 struct iris_state_ref
*ref
,
148 u_upload_alloc(uploader
, 0, size
, alignment
, &ref
->offset
, &ref
->res
, &p
);
153 iris_upload_ubo_ssbo_surf_state(struct iris_context
*ice
,
154 struct pipe_shader_buffer
*buf
,
155 struct iris_state_ref
*surf_state
,
158 struct pipe_context
*ctx
= &ice
->ctx
;
159 struct iris_screen
*screen
= (struct iris_screen
*) ctx
->screen
;
162 upload_state(ice
->state
.surface_uploader
, surf_state
,
163 screen
->isl_dev
.ss
.size
, 64);
164 if (!unlikely(map
)) {
165 surf_state
->res
= NULL
;
169 struct iris_resource
*res
= (void *) buf
->buffer
;
170 struct iris_bo
*surf_bo
= iris_resource_bo(surf_state
->res
);
171 surf_state
->offset
+= iris_bo_offset_from_base_address(surf_bo
);
173 isl_buffer_fill_state(&screen
->isl_dev
, map
,
174 .address
= res
->bo
->gtt_offset
+ res
->offset
+
176 .size_B
= buf
->buffer_size
- res
->offset
,
177 .format
= ssbo
? ISL_FORMAT_RAW
178 : ISL_FORMAT_R32G32B32A32_FLOAT
,
179 .swizzle
= ISL_SWIZZLE_IDENTITY
,
181 .mocs
= iris_mocs(res
->bo
, &screen
->isl_dev
));
185 get_aoa_deref_offset(nir_builder
*b
,
186 nir_deref_instr
*deref
,
189 unsigned array_size
= elem_size
;
190 nir_ssa_def
*offset
= nir_imm_int(b
, 0);
192 while (deref
->deref_type
!= nir_deref_type_var
) {
193 assert(deref
->deref_type
== nir_deref_type_array
);
195 /* This level's element size is the previous level's array size */
196 nir_ssa_def
*index
= nir_ssa_for_src(b
, deref
->arr
.index
, 1);
197 assert(deref
->arr
.index
.ssa
);
198 offset
= nir_iadd(b
, offset
,
199 nir_imul(b
, index
, nir_imm_int(b
, array_size
)));
201 deref
= nir_deref_instr_parent(deref
);
202 assert(glsl_type_is_array(deref
->type
));
203 array_size
*= glsl_get_length(deref
->type
);
206 /* Accessing an invalid surface index with the dataport can result in a
207 * hang. According to the spec "if the index used to select an individual
208 * element is negative or greater than or equal to the size of the array,
209 * the results of the operation are undefined but may not lead to
210 * termination" -- which is one of the possible outcomes of the hang.
211 * Clamp the index to prevent access outside of the array bounds.
213 return nir_umin(b
, offset
, nir_imm_int(b
, array_size
- elem_size
));
217 iris_lower_storage_image_derefs(nir_shader
*nir
)
219 nir_function_impl
*impl
= nir_shader_get_entrypoint(nir
);
222 nir_builder_init(&b
, impl
);
224 nir_foreach_block(block
, impl
) {
225 nir_foreach_instr_safe(instr
, block
) {
226 if (instr
->type
!= nir_instr_type_intrinsic
)
229 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
230 switch (intrin
->intrinsic
) {
231 case nir_intrinsic_image_deref_load
:
232 case nir_intrinsic_image_deref_store
:
233 case nir_intrinsic_image_deref_atomic_add
:
234 case nir_intrinsic_image_deref_atomic_imin
:
235 case nir_intrinsic_image_deref_atomic_umin
:
236 case nir_intrinsic_image_deref_atomic_imax
:
237 case nir_intrinsic_image_deref_atomic_umax
:
238 case nir_intrinsic_image_deref_atomic_and
:
239 case nir_intrinsic_image_deref_atomic_or
:
240 case nir_intrinsic_image_deref_atomic_xor
:
241 case nir_intrinsic_image_deref_atomic_exchange
:
242 case nir_intrinsic_image_deref_atomic_comp_swap
:
243 case nir_intrinsic_image_deref_size
:
244 case nir_intrinsic_image_deref_samples
:
245 case nir_intrinsic_image_deref_load_raw_intel
:
246 case nir_intrinsic_image_deref_store_raw_intel
: {
247 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
248 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
250 b
.cursor
= nir_before_instr(&intrin
->instr
);
252 nir_iadd(&b
, nir_imm_int(&b
, var
->data
.driver_location
),
253 get_aoa_deref_offset(&b
, deref
, 1));
254 nir_rewrite_image_intrinsic(intrin
, index
, false);
266 * Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
269 iris_fix_edge_flags(nir_shader
*nir
)
271 if (nir
->info
.stage
!= MESA_SHADER_VERTEX
) {
272 nir_shader_preserve_all_metadata(nir
);
276 nir_variable
*var
= nir_find_variable_with_location(nir
, nir_var_shader_out
,
279 nir_shader_preserve_all_metadata(nir
);
283 var
->data
.mode
= nir_var_shader_temp
;
284 nir
->info
.outputs_written
&= ~VARYING_BIT_EDGE
;
285 nir
->info
.inputs_read
&= ~VERT_BIT_EDGEFLAG
;
286 nir_fixup_deref_modes(nir
);
288 nir_foreach_function(f
, nir
) {
290 nir_metadata_preserve(f
->impl
, nir_metadata_block_index
|
291 nir_metadata_dominance
|
292 nir_metadata_live_ssa_defs
|
293 nir_metadata_loop_analysis
);
295 nir_metadata_preserve(f
->impl
, nir_metadata_all
);
303 * Fix an uncompiled shader's stream output info.
305 * Core Gallium stores output->register_index as a "slot" number, where
306 * slots are assigned consecutively to all outputs in info->outputs_written.
307 * This naive packing of outputs doesn't work for us - we too have slots,
308 * but the layout is defined by the VUE map, which we won't have until we
309 * compile a specific shader variant. So, we remap these and simply store
310 * VARYING_SLOT_* in our copy's output->register_index fields.
312 * We also fix up VARYING_SLOT_{LAYER,VIEWPORT,PSIZ} to select the Y/Z/W
313 * components of our VUE header. See brw_vue_map.c for the layout.
316 update_so_info(struct pipe_stream_output_info
*so_info
,
317 uint64_t outputs_written
)
319 uint8_t reverse_map
[64] = {};
321 while (outputs_written
) {
322 reverse_map
[slot
++] = u_bit_scan64(&outputs_written
);
325 for (unsigned i
= 0; i
< so_info
->num_outputs
; i
++) {
326 struct pipe_stream_output
*output
= &so_info
->output
[i
];
328 /* Map Gallium's condensed "slots" back to real VARYING_SLOT_* enums */
329 output
->register_index
= reverse_map
[output
->register_index
];
331 /* The VUE header contains three scalar fields packed together:
332 * - gl_PointSize is stored in VARYING_SLOT_PSIZ.w
333 * - gl_Layer is stored in VARYING_SLOT_PSIZ.y
334 * - gl_ViewportIndex is stored in VARYING_SLOT_PSIZ.z
336 switch (output
->register_index
) {
337 case VARYING_SLOT_LAYER
:
338 assert(output
->num_components
== 1);
339 output
->register_index
= VARYING_SLOT_PSIZ
;
340 output
->start_component
= 1;
342 case VARYING_SLOT_VIEWPORT
:
343 assert(output
->num_components
== 1);
344 output
->register_index
= VARYING_SLOT_PSIZ
;
345 output
->start_component
= 2;
347 case VARYING_SLOT_PSIZ
:
348 assert(output
->num_components
== 1);
349 output
->start_component
= 3;
353 //info->outputs_written |= 1ull << output->register_index;
358 setup_vec4_image_sysval(uint32_t *sysvals
, uint32_t idx
,
359 unsigned offset
, unsigned n
)
361 assert(offset
% sizeof(uint32_t) == 0);
363 for (unsigned i
= 0; i
< n
; ++i
)
364 sysvals
[i
] = BRW_PARAM_IMAGE(idx
, offset
/ sizeof(uint32_t) + i
);
366 for (unsigned i
= n
; i
< 4; ++i
)
367 sysvals
[i
] = BRW_PARAM_BUILTIN_ZERO
;
371 * Associate NIR uniform variables with the prog_data->param[] mechanism
372 * used by the backend. Also, decide which UBOs we'd like to push in an
373 * ideal situation (though the backend can reduce this).
376 iris_setup_uniforms(const struct brw_compiler
*compiler
,
379 struct brw_stage_prog_data
*prog_data
,
380 unsigned kernel_input_size
,
381 enum brw_param_builtin
**out_system_values
,
382 unsigned *out_num_system_values
,
383 unsigned *out_num_cbufs
)
385 UNUSED
const struct gen_device_info
*devinfo
= compiler
->devinfo
;
387 unsigned system_values_start
= ALIGN(kernel_input_size
, sizeof(uint32_t));
389 const unsigned IRIS_MAX_SYSTEM_VALUES
=
390 PIPE_MAX_SHADER_IMAGES
* BRW_IMAGE_PARAM_SIZE
;
391 enum brw_param_builtin
*system_values
=
392 rzalloc_array(mem_ctx
, enum brw_param_builtin
, IRIS_MAX_SYSTEM_VALUES
);
393 unsigned num_system_values
= 0;
395 unsigned patch_vert_idx
= -1;
396 unsigned ucp_idx
[IRIS_MAX_CLIP_PLANES
];
397 unsigned img_idx
[PIPE_MAX_SHADER_IMAGES
];
398 unsigned variable_group_size_idx
= -1;
399 memset(ucp_idx
, -1, sizeof(ucp_idx
));
400 memset(img_idx
, -1, sizeof(img_idx
));
402 nir_function_impl
*impl
= nir_shader_get_entrypoint(nir
);
405 nir_builder_init(&b
, impl
);
407 b
.cursor
= nir_before_block(nir_start_block(impl
));
408 nir_ssa_def
*temp_ubo_name
= nir_ssa_undef(&b
, 1, 32);
409 nir_ssa_def
*temp_const_ubo_name
= NULL
;
411 /* Turn system value intrinsics into uniforms */
412 nir_foreach_block(block
, impl
) {
413 nir_foreach_instr_safe(instr
, block
) {
414 if (instr
->type
!= nir_instr_type_intrinsic
)
417 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
420 switch (intrin
->intrinsic
) {
421 case nir_intrinsic_load_constant
: {
422 /* This one is special because it reads from the shader constant
423 * data and not cbuf0 which gallium uploads for us.
425 b
.cursor
= nir_before_instr(instr
);
426 nir_ssa_def
*offset
=
427 nir_iadd_imm(&b
, nir_ssa_for_src(&b
, intrin
->src
[0], 1),
428 nir_intrinsic_base(intrin
));
430 if (temp_const_ubo_name
== NULL
)
431 temp_const_ubo_name
= nir_imm_int(&b
, 0);
433 nir_intrinsic_instr
*load_ubo
=
434 nir_intrinsic_instr_create(b
.shader
, nir_intrinsic_load_ubo
);
435 load_ubo
->num_components
= intrin
->num_components
;
436 load_ubo
->src
[0] = nir_src_for_ssa(temp_const_ubo_name
);
437 load_ubo
->src
[1] = nir_src_for_ssa(offset
);
438 nir_intrinsic_set_align(load_ubo
,
439 nir_intrinsic_align_mul(intrin
),
440 nir_intrinsic_align_offset(intrin
));
441 nir_ssa_dest_init(&load_ubo
->instr
, &load_ubo
->dest
,
442 intrin
->dest
.ssa
.num_components
,
443 intrin
->dest
.ssa
.bit_size
,
444 intrin
->dest
.ssa
.name
);
445 nir_builder_instr_insert(&b
, &load_ubo
->instr
);
447 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
448 nir_src_for_ssa(&load_ubo
->dest
.ssa
));
449 nir_instr_remove(&intrin
->instr
);
452 case nir_intrinsic_load_user_clip_plane
: {
453 unsigned ucp
= nir_intrinsic_ucp_id(intrin
);
455 if (ucp_idx
[ucp
] == -1) {
456 ucp_idx
[ucp
] = num_system_values
;
457 num_system_values
+= 4;
460 for (int i
= 0; i
< 4; i
++) {
461 system_values
[ucp_idx
[ucp
] + i
] =
462 BRW_PARAM_BUILTIN_CLIP_PLANE(ucp
, i
);
465 b
.cursor
= nir_before_instr(instr
);
466 offset
= nir_imm_int(&b
, system_values_start
+
467 ucp_idx
[ucp
] * sizeof(uint32_t));
470 case nir_intrinsic_load_patch_vertices_in
:
471 if (patch_vert_idx
== -1)
472 patch_vert_idx
= num_system_values
++;
474 system_values
[patch_vert_idx
] =
475 BRW_PARAM_BUILTIN_PATCH_VERTICES_IN
;
477 b
.cursor
= nir_before_instr(instr
);
478 offset
= nir_imm_int(&b
, system_values_start
+
479 patch_vert_idx
* sizeof(uint32_t));
481 case nir_intrinsic_image_deref_load_param_intel
: {
482 assert(devinfo
->gen
< 9);
483 nir_deref_instr
*deref
= nir_src_as_deref(intrin
->src
[0]);
484 nir_variable
*var
= nir_deref_instr_get_variable(deref
);
486 if (img_idx
[var
->data
.binding
] == -1) {
487 /* GL only allows arrays of arrays of images. */
488 assert(glsl_type_is_image(glsl_without_array(var
->type
)));
489 unsigned num_images
= MAX2(1, glsl_get_aoa_size(var
->type
));
491 for (int i
= 0; i
< num_images
; i
++) {
492 const unsigned img
= var
->data
.binding
+ i
;
494 img_idx
[img
] = num_system_values
;
495 num_system_values
+= BRW_IMAGE_PARAM_SIZE
;
497 uint32_t *img_sv
= &system_values
[img_idx
[img
]];
499 setup_vec4_image_sysval(
500 img_sv
+ BRW_IMAGE_PARAM_OFFSET_OFFSET
, img
,
501 offsetof(struct brw_image_param
, offset
), 2);
502 setup_vec4_image_sysval(
503 img_sv
+ BRW_IMAGE_PARAM_SIZE_OFFSET
, img
,
504 offsetof(struct brw_image_param
, size
), 3);
505 setup_vec4_image_sysval(
506 img_sv
+ BRW_IMAGE_PARAM_STRIDE_OFFSET
, img
,
507 offsetof(struct brw_image_param
, stride
), 4);
508 setup_vec4_image_sysval(
509 img_sv
+ BRW_IMAGE_PARAM_TILING_OFFSET
, img
,
510 offsetof(struct brw_image_param
, tiling
), 3);
511 setup_vec4_image_sysval(
512 img_sv
+ BRW_IMAGE_PARAM_SWIZZLING_OFFSET
, img
,
513 offsetof(struct brw_image_param
, swizzling
), 2);
517 b
.cursor
= nir_before_instr(instr
);
518 offset
= nir_iadd(&b
,
519 get_aoa_deref_offset(&b
, deref
, BRW_IMAGE_PARAM_SIZE
* 4),
520 nir_imm_int(&b
, system_values_start
+
521 img_idx
[var
->data
.binding
] * 4 +
522 nir_intrinsic_base(intrin
) * 16));
525 case nir_intrinsic_load_local_group_size
: {
526 assert(nir
->info
.cs
.local_size_variable
);
527 if (variable_group_size_idx
== -1) {
528 variable_group_size_idx
= num_system_values
;
529 num_system_values
+= 3;
530 for (int i
= 0; i
< 3; i
++) {
531 system_values
[variable_group_size_idx
+ i
] =
532 BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X
+ i
;
536 b
.cursor
= nir_before_instr(instr
);
537 offset
= nir_imm_int(&b
, system_values_start
+
538 variable_group_size_idx
* sizeof(uint32_t));
541 case nir_intrinsic_load_kernel_input
: {
542 assert(nir_intrinsic_base(intrin
) +
543 nir_intrinsic_range(intrin
) <= kernel_input_size
);
544 b
.cursor
= nir_before_instr(instr
);
545 offset
= nir_iadd_imm(&b
, intrin
->src
[0].ssa
,
546 nir_intrinsic_base(intrin
));
553 nir_intrinsic_instr
*load
=
554 nir_intrinsic_instr_create(nir
, nir_intrinsic_load_ubo
);
555 load
->num_components
= intrin
->dest
.ssa
.num_components
;
556 load
->src
[0] = nir_src_for_ssa(temp_ubo_name
);
557 load
->src
[1] = nir_src_for_ssa(offset
);
558 nir_intrinsic_set_align(load
, 4, 0);
559 nir_ssa_dest_init(&load
->instr
, &load
->dest
,
560 intrin
->dest
.ssa
.num_components
,
561 intrin
->dest
.ssa
.bit_size
, NULL
);
562 nir_builder_instr_insert(&b
, &load
->instr
);
563 nir_ssa_def_rewrite_uses(&intrin
->dest
.ssa
,
564 nir_src_for_ssa(&load
->dest
.ssa
));
565 nir_instr_remove(instr
);
569 nir_validate_shader(nir
, "before remapping");
571 /* Uniforms are stored in constant buffer 0, the
572 * user-facing UBOs are indexed by one. So if any constant buffer is
573 * needed, the constant buffer 0 will be needed, so account for it.
575 unsigned num_cbufs
= nir
->info
.num_ubos
;
576 if (num_cbufs
|| nir
->num_uniforms
)
579 /* Place the new params in a new cbuf. */
580 if (num_system_values
> 0 || kernel_input_size
> 0) {
581 unsigned sysval_cbuf_index
= num_cbufs
;
584 system_values
= reralloc(mem_ctx
, system_values
, enum brw_param_builtin
,
587 nir_foreach_block(block
, impl
) {
588 nir_foreach_instr_safe(instr
, block
) {
589 if (instr
->type
!= nir_instr_type_intrinsic
)
592 nir_intrinsic_instr
*load
= nir_instr_as_intrinsic(instr
);
594 if (load
->intrinsic
!= nir_intrinsic_load_ubo
)
597 b
.cursor
= nir_before_instr(instr
);
599 assert(load
->src
[0].is_ssa
);
601 if (load
->src
[0].ssa
== temp_ubo_name
) {
602 nir_ssa_def
*imm
= nir_imm_int(&b
, sysval_cbuf_index
);
603 nir_instr_rewrite_src(instr
, &load
->src
[0],
604 nir_src_for_ssa(imm
));
609 /* We need to fold the new iadds for brw_nir_analyze_ubo_ranges */
610 nir_opt_constant_folding(nir
);
612 ralloc_free(system_values
);
613 system_values
= NULL
;
616 assert(num_cbufs
< PIPE_MAX_CONSTANT_BUFFERS
);
617 nir_validate_shader(nir
, "after remap");
619 /* We don't use params[] but gallium leaves num_uniforms set. We use this
620 * to detect when cbuf0 exists but we don't need it anymore when we get
621 * here. Instead, zero it out so that the back-end doesn't get confused
622 * when nr_params * 4 != num_uniforms != nr_params * 4.
624 nir
->num_uniforms
= 0;
626 /* Constant loads (if any) need to go at the end of the constant buffers so
627 * we need to know num_cbufs before we can lower to them.
629 if (temp_const_ubo_name
!= NULL
) {
630 nir_load_const_instr
*const_ubo_index
=
631 nir_instr_as_load_const(temp_const_ubo_name
->parent_instr
);
632 assert(const_ubo_index
->def
.bit_size
== 32);
633 const_ubo_index
->value
[0].u32
= num_cbufs
;
636 *out_system_values
= system_values
;
637 *out_num_system_values
= num_system_values
;
638 *out_num_cbufs
= num_cbufs
;
641 static const char *surface_group_names
[] = {
642 [IRIS_SURFACE_GROUP_RENDER_TARGET
] = "render target",
643 [IRIS_SURFACE_GROUP_RENDER_TARGET_READ
] = "non-coherent render target read",
644 [IRIS_SURFACE_GROUP_CS_WORK_GROUPS
] = "CS work groups",
645 [IRIS_SURFACE_GROUP_TEXTURE
] = "texture",
646 [IRIS_SURFACE_GROUP_UBO
] = "ubo",
647 [IRIS_SURFACE_GROUP_SSBO
] = "ssbo",
648 [IRIS_SURFACE_GROUP_IMAGE
] = "image",
652 iris_print_binding_table(FILE *fp
, const char *name
,
653 const struct iris_binding_table
*bt
)
655 STATIC_ASSERT(ARRAY_SIZE(surface_group_names
) == IRIS_SURFACE_GROUP_COUNT
);
658 uint32_t compacted
= 0;
660 for (int i
= 0; i
< IRIS_SURFACE_GROUP_COUNT
; i
++) {
661 uint32_t size
= bt
->sizes
[i
];
664 compacted
+= util_bitcount64(bt
->used_mask
[i
]);
668 fprintf(fp
, "Binding table for %s is empty\n\n", name
);
672 if (total
!= compacted
) {
673 fprintf(fp
, "Binding table for %s "
674 "(compacted to %u entries from %u entries)\n",
675 name
, compacted
, total
);
677 fprintf(fp
, "Binding table for %s (%u entries)\n", name
, total
);
681 for (int i
= 0; i
< IRIS_SURFACE_GROUP_COUNT
; i
++) {
682 uint64_t mask
= bt
->used_mask
[i
];
684 int index
= u_bit_scan64(&mask
);
685 fprintf(fp
, " [%u] %s #%d\n", entry
++, surface_group_names
[i
], index
);
692 /* Max elements in a surface group. */
693 SURFACE_GROUP_MAX_ELEMENTS
= 64,
697 * Map a <group, index> pair to a binding table index.
699 * For example: <UBO, 5> => binding table index 12
702 iris_group_index_to_bti(const struct iris_binding_table
*bt
,
703 enum iris_surface_group group
, uint32_t index
)
705 assert(index
< bt
->sizes
[group
]);
706 uint64_t mask
= bt
->used_mask
[group
];
707 uint64_t bit
= 1ull << index
;
709 return bt
->offsets
[group
] + util_bitcount64((bit
- 1) & mask
);
711 return IRIS_SURFACE_NOT_USED
;
716 * Map a binding table index back to a <group, index> pair.
718 * For example: binding table index 12 => <UBO, 5>
721 iris_bti_to_group_index(const struct iris_binding_table
*bt
,
722 enum iris_surface_group group
, uint32_t bti
)
724 uint64_t used_mask
= bt
->used_mask
[group
];
725 assert(bti
>= bt
->offsets
[group
]);
727 uint32_t c
= bti
- bt
->offsets
[group
];
729 int i
= u_bit_scan64(&used_mask
);
735 return IRIS_SURFACE_NOT_USED
;
739 rewrite_src_with_bti(nir_builder
*b
, struct iris_binding_table
*bt
,
740 nir_instr
*instr
, nir_src
*src
,
741 enum iris_surface_group group
)
743 assert(bt
->sizes
[group
] > 0);
745 b
->cursor
= nir_before_instr(instr
);
747 if (nir_src_is_const(*src
)) {
748 uint32_t index
= nir_src_as_uint(*src
);
749 bti
= nir_imm_intN_t(b
, iris_group_index_to_bti(bt
, group
, index
),
752 /* Indirect usage makes all the surfaces of the group to be available,
753 * so we can just add the base.
755 assert(bt
->used_mask
[group
] == BITFIELD64_MASK(bt
->sizes
[group
]));
756 bti
= nir_iadd_imm(b
, src
->ssa
, bt
->offsets
[group
]);
758 nir_instr_rewrite_src(instr
, src
, nir_src_for_ssa(bti
));
762 mark_used_with_src(struct iris_binding_table
*bt
, nir_src
*src
,
763 enum iris_surface_group group
)
765 assert(bt
->sizes
[group
] > 0);
767 if (nir_src_is_const(*src
)) {
768 uint64_t index
= nir_src_as_uint(*src
);
769 assert(index
< bt
->sizes
[group
]);
770 bt
->used_mask
[group
] |= 1ull << index
;
772 /* There's an indirect usage, we need all the surfaces. */
773 bt
->used_mask
[group
] = BITFIELD64_MASK(bt
->sizes
[group
]);
778 skip_compacting_binding_tables(void)
780 static int skip
= -1;
782 skip
= env_var_as_boolean("INTEL_DISABLE_COMPACT_BINDING_TABLE", false);
787 * Set up the binding table indices and apply to the shader.
790 iris_setup_binding_table(const struct gen_device_info
*devinfo
,
791 struct nir_shader
*nir
,
792 struct iris_binding_table
*bt
,
793 unsigned num_render_targets
,
794 unsigned num_system_values
,
797 const struct shader_info
*info
= &nir
->info
;
799 memset(bt
, 0, sizeof(*bt
));
801 /* Set the sizes for each surface group. For some groups, we already know
802 * upfront how many will be used, so mark them.
804 if (info
->stage
== MESA_SHADER_FRAGMENT
) {
805 bt
->sizes
[IRIS_SURFACE_GROUP_RENDER_TARGET
] = num_render_targets
;
806 /* All render targets used. */
807 bt
->used_mask
[IRIS_SURFACE_GROUP_RENDER_TARGET
] =
808 BITFIELD64_MASK(num_render_targets
);
810 /* Setup render target read surface group inorder to support non-coherent
811 * framebuffer fetch on Gen8
813 if (devinfo
->gen
== 8 && info
->outputs_read
) {
814 bt
->sizes
[IRIS_SURFACE_GROUP_RENDER_TARGET_READ
] = num_render_targets
;
815 bt
->used_mask
[IRIS_SURFACE_GROUP_RENDER_TARGET_READ
] =
816 BITFIELD64_MASK(num_render_targets
);
818 } else if (info
->stage
== MESA_SHADER_COMPUTE
) {
819 bt
->sizes
[IRIS_SURFACE_GROUP_CS_WORK_GROUPS
] = 1;
822 bt
->sizes
[IRIS_SURFACE_GROUP_TEXTURE
] = util_last_bit(info
->textures_used
);
823 bt
->used_mask
[IRIS_SURFACE_GROUP_TEXTURE
] = info
->textures_used
;
825 bt
->sizes
[IRIS_SURFACE_GROUP_IMAGE
] = info
->num_images
;
827 /* Allocate an extra slot in the UBO section for NIR constants.
828 * Binding table compaction will remove it if unnecessary.
830 * We don't include them in iris_compiled_shader::num_cbufs because
831 * they are uploaded separately from shs->constbuf[], but from a shader
832 * point of view, they're another UBO (at the end of the section).
834 bt
->sizes
[IRIS_SURFACE_GROUP_UBO
] = num_cbufs
+ 1;
836 bt
->sizes
[IRIS_SURFACE_GROUP_SSBO
] = info
->num_ssbos
;
838 for (int i
= 0; i
< IRIS_SURFACE_GROUP_COUNT
; i
++)
839 assert(bt
->sizes
[i
] <= SURFACE_GROUP_MAX_ELEMENTS
);
841 /* Mark surfaces used for the cases we don't have the information available
844 nir_function_impl
*impl
= nir_shader_get_entrypoint(nir
);
845 nir_foreach_block (block
, impl
) {
846 nir_foreach_instr (instr
, block
) {
847 if (instr
->type
!= nir_instr_type_intrinsic
)
850 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
851 switch (intrin
->intrinsic
) {
852 case nir_intrinsic_load_num_work_groups
:
853 bt
->used_mask
[IRIS_SURFACE_GROUP_CS_WORK_GROUPS
] = 1;
856 case nir_intrinsic_load_output
:
857 if (devinfo
->gen
== 8) {
858 mark_used_with_src(bt
, &intrin
->src
[0],
859 IRIS_SURFACE_GROUP_RENDER_TARGET_READ
);
863 case nir_intrinsic_image_size
:
864 case nir_intrinsic_image_load
:
865 case nir_intrinsic_image_store
:
866 case nir_intrinsic_image_atomic_add
:
867 case nir_intrinsic_image_atomic_imin
:
868 case nir_intrinsic_image_atomic_umin
:
869 case nir_intrinsic_image_atomic_imax
:
870 case nir_intrinsic_image_atomic_umax
:
871 case nir_intrinsic_image_atomic_and
:
872 case nir_intrinsic_image_atomic_or
:
873 case nir_intrinsic_image_atomic_xor
:
874 case nir_intrinsic_image_atomic_exchange
:
875 case nir_intrinsic_image_atomic_comp_swap
:
876 case nir_intrinsic_image_load_raw_intel
:
877 case nir_intrinsic_image_store_raw_intel
:
878 mark_used_with_src(bt
, &intrin
->src
[0], IRIS_SURFACE_GROUP_IMAGE
);
881 case nir_intrinsic_load_ubo
:
882 mark_used_with_src(bt
, &intrin
->src
[0], IRIS_SURFACE_GROUP_UBO
);
885 case nir_intrinsic_store_ssbo
:
886 mark_used_with_src(bt
, &intrin
->src
[1], IRIS_SURFACE_GROUP_SSBO
);
889 case nir_intrinsic_get_buffer_size
:
890 case nir_intrinsic_ssbo_atomic_add
:
891 case nir_intrinsic_ssbo_atomic_imin
:
892 case nir_intrinsic_ssbo_atomic_umin
:
893 case nir_intrinsic_ssbo_atomic_imax
:
894 case nir_intrinsic_ssbo_atomic_umax
:
895 case nir_intrinsic_ssbo_atomic_and
:
896 case nir_intrinsic_ssbo_atomic_or
:
897 case nir_intrinsic_ssbo_atomic_xor
:
898 case nir_intrinsic_ssbo_atomic_exchange
:
899 case nir_intrinsic_ssbo_atomic_comp_swap
:
900 case nir_intrinsic_ssbo_atomic_fmin
:
901 case nir_intrinsic_ssbo_atomic_fmax
:
902 case nir_intrinsic_ssbo_atomic_fcomp_swap
:
903 case nir_intrinsic_load_ssbo
:
904 mark_used_with_src(bt
, &intrin
->src
[0], IRIS_SURFACE_GROUP_SSBO
);
913 /* When disable we just mark everything as used. */
914 if (unlikely(skip_compacting_binding_tables())) {
915 for (int i
= 0; i
< IRIS_SURFACE_GROUP_COUNT
; i
++)
916 bt
->used_mask
[i
] = BITFIELD64_MASK(bt
->sizes
[i
]);
919 /* Calculate the offsets and the binding table size based on the used
920 * surfaces. After this point, the functions to go between "group indices"
921 * and binding table indices can be used.
924 for (int i
= 0; i
< IRIS_SURFACE_GROUP_COUNT
; i
++) {
925 if (bt
->used_mask
[i
] != 0) {
926 bt
->offsets
[i
] = next
;
927 next
+= util_bitcount64(bt
->used_mask
[i
]);
930 bt
->size_bytes
= next
* 4;
932 if (unlikely(INTEL_DEBUG
& DEBUG_BT
)) {
933 iris_print_binding_table(stderr
, gl_shader_stage_name(info
->stage
), bt
);
936 /* Apply the binding table indices. The backend compiler is not expected
937 * to change those, as we haven't set any of the *_start entries in brw
941 nir_builder_init(&b
, impl
);
943 nir_foreach_block (block
, impl
) {
944 nir_foreach_instr (instr
, block
) {
945 if (instr
->type
== nir_instr_type_tex
) {
946 nir_tex_instr
*tex
= nir_instr_as_tex(instr
);
948 iris_group_index_to_bti(bt
, IRIS_SURFACE_GROUP_TEXTURE
,
953 if (instr
->type
!= nir_instr_type_intrinsic
)
956 nir_intrinsic_instr
*intrin
= nir_instr_as_intrinsic(instr
);
957 switch (intrin
->intrinsic
) {
958 case nir_intrinsic_image_size
:
959 case nir_intrinsic_image_load
:
960 case nir_intrinsic_image_store
:
961 case nir_intrinsic_image_atomic_add
:
962 case nir_intrinsic_image_atomic_imin
:
963 case nir_intrinsic_image_atomic_umin
:
964 case nir_intrinsic_image_atomic_imax
:
965 case nir_intrinsic_image_atomic_umax
:
966 case nir_intrinsic_image_atomic_and
:
967 case nir_intrinsic_image_atomic_or
:
968 case nir_intrinsic_image_atomic_xor
:
969 case nir_intrinsic_image_atomic_exchange
:
970 case nir_intrinsic_image_atomic_comp_swap
:
971 case nir_intrinsic_image_load_raw_intel
:
972 case nir_intrinsic_image_store_raw_intel
:
973 rewrite_src_with_bti(&b
, bt
, instr
, &intrin
->src
[0],
974 IRIS_SURFACE_GROUP_IMAGE
);
977 case nir_intrinsic_load_ubo
:
978 rewrite_src_with_bti(&b
, bt
, instr
, &intrin
->src
[0],
979 IRIS_SURFACE_GROUP_UBO
);
982 case nir_intrinsic_store_ssbo
:
983 rewrite_src_with_bti(&b
, bt
, instr
, &intrin
->src
[1],
984 IRIS_SURFACE_GROUP_SSBO
);
987 case nir_intrinsic_load_output
:
988 if (devinfo
->gen
== 8) {
989 rewrite_src_with_bti(&b
, bt
, instr
, &intrin
->src
[0],
990 IRIS_SURFACE_GROUP_RENDER_TARGET_READ
);
994 case nir_intrinsic_get_buffer_size
:
995 case nir_intrinsic_ssbo_atomic_add
:
996 case nir_intrinsic_ssbo_atomic_imin
:
997 case nir_intrinsic_ssbo_atomic_umin
:
998 case nir_intrinsic_ssbo_atomic_imax
:
999 case nir_intrinsic_ssbo_atomic_umax
:
1000 case nir_intrinsic_ssbo_atomic_and
:
1001 case nir_intrinsic_ssbo_atomic_or
:
1002 case nir_intrinsic_ssbo_atomic_xor
:
1003 case nir_intrinsic_ssbo_atomic_exchange
:
1004 case nir_intrinsic_ssbo_atomic_comp_swap
:
1005 case nir_intrinsic_ssbo_atomic_fmin
:
1006 case nir_intrinsic_ssbo_atomic_fmax
:
1007 case nir_intrinsic_ssbo_atomic_fcomp_swap
:
1008 case nir_intrinsic_load_ssbo
:
1009 rewrite_src_with_bti(&b
, bt
, instr
, &intrin
->src
[0],
1010 IRIS_SURFACE_GROUP_SSBO
);
1021 iris_debug_recompile(struct iris_context
*ice
,
1022 struct shader_info
*info
,
1023 const struct brw_base_prog_key
*key
)
1025 struct iris_screen
*screen
= (struct iris_screen
*) ice
->ctx
.screen
;
1026 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1027 const struct brw_compiler
*c
= screen
->compiler
;
1032 c
->shader_perf_log(&ice
->dbg
, "Recompiling %s shader for program %s: %s\n",
1033 _mesa_shader_stage_to_string(info
->stage
),
1034 info
->name
? info
->name
: "(no identifier)",
1035 info
->label
? info
->label
: "");
1037 const void *old_iris_key
=
1038 iris_find_previous_compile(ice
, info
->stage
, key
->program_string_id
);
1040 union brw_any_prog_key old_key
;
1042 switch (info
->stage
) {
1043 case MESA_SHADER_VERTEX
:
1044 old_key
.vs
= iris_to_brw_vs_key(devinfo
, old_iris_key
);
1046 case MESA_SHADER_TESS_CTRL
:
1047 old_key
.tcs
= iris_to_brw_tcs_key(devinfo
, old_iris_key
);
1049 case MESA_SHADER_TESS_EVAL
:
1050 old_key
.tes
= iris_to_brw_tes_key(devinfo
, old_iris_key
);
1052 case MESA_SHADER_GEOMETRY
:
1053 old_key
.gs
= iris_to_brw_gs_key(devinfo
, old_iris_key
);
1055 case MESA_SHADER_FRAGMENT
:
1056 old_key
.wm
= iris_to_brw_fs_key(devinfo
, old_iris_key
);
1058 case MESA_SHADER_COMPUTE
:
1059 old_key
.cs
= iris_to_brw_cs_key(devinfo
, old_iris_key
);
1062 unreachable("invalid shader stage");
1065 brw_debug_key_recompile(c
, &ice
->dbg
, info
->stage
, &old_key
.base
, key
);
1069 * Get the shader for the last enabled geometry stage.
1071 * This stage is the one which will feed stream output and the rasterizer.
1073 static gl_shader_stage
1074 last_vue_stage(struct iris_context
*ice
)
1076 if (ice
->shaders
.uncompiled
[MESA_SHADER_GEOMETRY
])
1077 return MESA_SHADER_GEOMETRY
;
1079 if (ice
->shaders
.uncompiled
[MESA_SHADER_TESS_EVAL
])
1080 return MESA_SHADER_TESS_EVAL
;
1082 return MESA_SHADER_VERTEX
;
1086 * Compile a vertex shader, and upload the assembly.
1088 static struct iris_compiled_shader
*
1089 iris_compile_vs(struct iris_context
*ice
,
1090 struct iris_uncompiled_shader
*ish
,
1091 const struct iris_vs_prog_key
*key
)
1093 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
1094 const struct brw_compiler
*compiler
= screen
->compiler
;
1095 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1096 void *mem_ctx
= ralloc_context(NULL
);
1097 struct brw_vs_prog_data
*vs_prog_data
=
1098 rzalloc(mem_ctx
, struct brw_vs_prog_data
);
1099 struct brw_vue_prog_data
*vue_prog_data
= &vs_prog_data
->base
;
1100 struct brw_stage_prog_data
*prog_data
= &vue_prog_data
->base
;
1101 enum brw_param_builtin
*system_values
;
1102 unsigned num_system_values
;
1105 nir_shader
*nir
= nir_shader_clone(mem_ctx
, ish
->nir
);
1107 if (key
->vue
.nr_userclip_plane_consts
) {
1108 nir_function_impl
*impl
= nir_shader_get_entrypoint(nir
);
1109 nir_lower_clip_vs(nir
, (1 << key
->vue
.nr_userclip_plane_consts
) - 1,
1111 nir_lower_io_to_temporaries(nir
, impl
, true, false);
1112 nir_lower_global_vars_to_local(nir
);
1113 nir_lower_vars_to_ssa(nir
);
1114 nir_shader_gather_info(nir
, impl
);
1117 prog_data
->use_alt_mode
= ish
->use_alt_mode
;
1119 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
, 0, &system_values
,
1120 &num_system_values
, &num_cbufs
);
1122 struct iris_binding_table bt
;
1123 iris_setup_binding_table(devinfo
, nir
, &bt
, /* num_render_targets */ 0,
1124 num_system_values
, num_cbufs
);
1126 brw_nir_analyze_ubo_ranges(compiler
, nir
, NULL
, prog_data
->ubo_ranges
);
1128 brw_compute_vue_map(devinfo
,
1129 &vue_prog_data
->vue_map
, nir
->info
.outputs_written
,
1130 nir
->info
.separate_shader
, /* pos_slots */ 1);
1132 struct brw_vs_prog_key brw_key
= iris_to_brw_vs_key(devinfo
, key
);
1134 char *error_str
= NULL
;
1135 const unsigned *program
=
1136 brw_compile_vs(compiler
, &ice
->dbg
, mem_ctx
, &brw_key
, vs_prog_data
,
1137 nir
, -1, NULL
, &error_str
);
1138 if (program
== NULL
) {
1139 dbg_printf("Failed to compile vertex shader: %s\n", error_str
);
1140 ralloc_free(mem_ctx
);
1144 if (ish
->compiled_once
) {
1145 iris_debug_recompile(ice
, &nir
->info
, &brw_key
.base
);
1147 ish
->compiled_once
= true;
1150 uint32_t *so_decls
=
1151 screen
->vtbl
.create_so_decl_list(&ish
->stream_output
,
1152 &vue_prog_data
->vue_map
);
1154 struct iris_compiled_shader
*shader
=
1155 iris_upload_shader(ice
, IRIS_CACHE_VS
, sizeof(*key
), key
, program
,
1156 prog_data
, so_decls
, system_values
, num_system_values
,
1159 iris_disk_cache_store(screen
->disk_cache
, ish
, shader
, key
, sizeof(*key
));
1161 ralloc_free(mem_ctx
);
1166 * Update the current vertex shader variant.
1168 * Fill out the key, look in the cache, compile and bind if needed.
1171 iris_update_compiled_vs(struct iris_context
*ice
)
1173 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
1174 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_VERTEX
];
1175 struct iris_uncompiled_shader
*ish
=
1176 ice
->shaders
.uncompiled
[MESA_SHADER_VERTEX
];
1178 struct iris_vs_prog_key key
= { KEY_ID(vue
.base
) };
1179 screen
->vtbl
.populate_vs_key(ice
, &ish
->nir
->info
, last_vue_stage(ice
), &key
);
1181 struct iris_compiled_shader
*old
= ice
->shaders
.prog
[IRIS_CACHE_VS
];
1182 struct iris_compiled_shader
*shader
=
1183 iris_find_cached_shader(ice
, IRIS_CACHE_VS
, sizeof(key
), &key
);
1186 shader
= iris_disk_cache_retrieve(ice
, ish
, &key
, sizeof(key
));
1189 shader
= iris_compile_vs(ice
, ish
, &key
);
1191 if (old
!= shader
) {
1192 ice
->shaders
.prog
[IRIS_CACHE_VS
] = shader
;
1193 ice
->state
.dirty
|= IRIS_DIRTY_VF_SGVS
;
1194 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_VS
|
1195 IRIS_STAGE_DIRTY_BINDINGS_VS
|
1196 IRIS_STAGE_DIRTY_CONSTANTS_VS
;
1197 shs
->sysvals_need_upload
= true;
1199 const struct brw_vs_prog_data
*vs_prog_data
=
1200 (void *) shader
->prog_data
;
1201 const bool uses_draw_params
= vs_prog_data
->uses_firstvertex
||
1202 vs_prog_data
->uses_baseinstance
;
1203 const bool uses_derived_draw_params
= vs_prog_data
->uses_drawid
||
1204 vs_prog_data
->uses_is_indexed_draw
;
1205 const bool needs_sgvs_element
= uses_draw_params
||
1206 vs_prog_data
->uses_instanceid
||
1207 vs_prog_data
->uses_vertexid
;
1209 if (ice
->state
.vs_uses_draw_params
!= uses_draw_params
||
1210 ice
->state
.vs_uses_derived_draw_params
!= uses_derived_draw_params
||
1211 ice
->state
.vs_needs_edge_flag
!= ish
->needs_edge_flag
) {
1212 ice
->state
.dirty
|= IRIS_DIRTY_VERTEX_BUFFERS
|
1213 IRIS_DIRTY_VERTEX_ELEMENTS
;
1215 ice
->state
.vs_uses_draw_params
= uses_draw_params
;
1216 ice
->state
.vs_uses_derived_draw_params
= uses_derived_draw_params
;
1217 ice
->state
.vs_needs_sgvs_element
= needs_sgvs_element
;
1218 ice
->state
.vs_needs_edge_flag
= ish
->needs_edge_flag
;
1223 * Get the shader_info for a given stage, or NULL if the stage is disabled.
1225 const struct shader_info
*
1226 iris_get_shader_info(const struct iris_context
*ice
, gl_shader_stage stage
)
1228 const struct iris_uncompiled_shader
*ish
= ice
->shaders
.uncompiled
[stage
];
1233 const nir_shader
*nir
= ish
->nir
;
1238 * Get the union of TCS output and TES input slots.
1240 * TCS and TES need to agree on a common URB entry layout. In particular,
1241 * the data for all patch vertices is stored in a single URB entry (unlike
1242 * GS which has one entry per input vertex). This means that per-vertex
1243 * array indexing needs a stride.
1245 * SSO requires locations to match, but doesn't require the number of
1246 * outputs/inputs to match (in fact, the TCS often has extra outputs).
1247 * So, we need to take the extra step of unifying these on the fly.
1250 get_unified_tess_slots(const struct iris_context
*ice
,
1251 uint64_t *per_vertex_slots
,
1252 uint32_t *per_patch_slots
)
1254 const struct shader_info
*tcs
=
1255 iris_get_shader_info(ice
, MESA_SHADER_TESS_CTRL
);
1256 const struct shader_info
*tes
=
1257 iris_get_shader_info(ice
, MESA_SHADER_TESS_EVAL
);
1259 *per_vertex_slots
= tes
->inputs_read
;
1260 *per_patch_slots
= tes
->patch_inputs_read
;
1263 *per_vertex_slots
|= tcs
->outputs_written
;
1264 *per_patch_slots
|= tcs
->patch_outputs_written
;
1269 * Compile a tessellation control shader, and upload the assembly.
1271 static struct iris_compiled_shader
*
1272 iris_compile_tcs(struct iris_context
*ice
,
1273 struct iris_uncompiled_shader
*ish
,
1274 const struct iris_tcs_prog_key
*key
)
1276 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
1277 const struct brw_compiler
*compiler
= screen
->compiler
;
1278 const struct nir_shader_compiler_options
*options
=
1279 compiler
->glsl_compiler_options
[MESA_SHADER_TESS_CTRL
].NirOptions
;
1280 void *mem_ctx
= ralloc_context(NULL
);
1281 struct brw_tcs_prog_data
*tcs_prog_data
=
1282 rzalloc(mem_ctx
, struct brw_tcs_prog_data
);
1283 struct brw_vue_prog_data
*vue_prog_data
= &tcs_prog_data
->base
;
1284 struct brw_stage_prog_data
*prog_data
= &vue_prog_data
->base
;
1285 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1286 enum brw_param_builtin
*system_values
= NULL
;
1287 unsigned num_system_values
= 0;
1288 unsigned num_cbufs
= 0;
1292 struct iris_binding_table bt
;
1294 struct brw_tcs_prog_key brw_key
= iris_to_brw_tcs_key(devinfo
, key
);
1297 nir
= nir_shader_clone(mem_ctx
, ish
->nir
);
1299 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
, 0, &system_values
,
1300 &num_system_values
, &num_cbufs
);
1301 iris_setup_binding_table(devinfo
, nir
, &bt
, /* num_render_targets */ 0,
1302 num_system_values
, num_cbufs
);
1303 brw_nir_analyze_ubo_ranges(compiler
, nir
, NULL
, prog_data
->ubo_ranges
);
1306 brw_nir_create_passthrough_tcs(mem_ctx
, compiler
, options
, &brw_key
);
1308 /* Reserve space for passing the default tess levels as constants. */
1310 num_system_values
= 8;
1312 rzalloc_array(mem_ctx
, enum brw_param_builtin
, num_system_values
);
1313 prog_data
->param
= rzalloc_array(mem_ctx
, uint32_t, num_system_values
);
1314 prog_data
->nr_params
= num_system_values
;
1316 if (key
->tes_primitive_mode
== GL_QUADS
) {
1317 for (int i
= 0; i
< 4; i
++)
1318 system_values
[7 - i
] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X
+ i
;
1320 system_values
[3] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X
;
1321 system_values
[2] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y
;
1322 } else if (key
->tes_primitive_mode
== GL_TRIANGLES
) {
1323 for (int i
= 0; i
< 3; i
++)
1324 system_values
[7 - i
] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X
+ i
;
1326 system_values
[4] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X
;
1328 assert(key
->tes_primitive_mode
== GL_ISOLINES
);
1329 system_values
[7] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y
;
1330 system_values
[6] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X
;
1333 /* Manually setup the TCS binding table. */
1334 memset(&bt
, 0, sizeof(bt
));
1335 bt
.sizes
[IRIS_SURFACE_GROUP_UBO
] = 1;
1336 bt
.used_mask
[IRIS_SURFACE_GROUP_UBO
] = 1;
1339 prog_data
->ubo_ranges
[0].length
= 1;
1342 char *error_str
= NULL
;
1343 const unsigned *program
=
1344 brw_compile_tcs(compiler
, &ice
->dbg
, mem_ctx
, &brw_key
, tcs_prog_data
,
1345 nir
, -1, NULL
, &error_str
);
1346 if (program
== NULL
) {
1347 dbg_printf("Failed to compile control shader: %s\n", error_str
);
1348 ralloc_free(mem_ctx
);
1353 if (ish
->compiled_once
) {
1354 iris_debug_recompile(ice
, &nir
->info
, &brw_key
.base
);
1356 ish
->compiled_once
= true;
1360 struct iris_compiled_shader
*shader
=
1361 iris_upload_shader(ice
, IRIS_CACHE_TCS
, sizeof(*key
), key
, program
,
1362 prog_data
, NULL
, system_values
, num_system_values
,
1366 iris_disk_cache_store(screen
->disk_cache
, ish
, shader
, key
, sizeof(*key
));
1368 ralloc_free(mem_ctx
);
1373 * Update the current tessellation control shader variant.
1375 * Fill out the key, look in the cache, compile and bind if needed.
1378 iris_update_compiled_tcs(struct iris_context
*ice
)
1380 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_TESS_CTRL
];
1381 struct iris_uncompiled_shader
*tcs
=
1382 ice
->shaders
.uncompiled
[MESA_SHADER_TESS_CTRL
];
1383 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
1384 const struct brw_compiler
*compiler
= screen
->compiler
;
1385 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1387 const struct shader_info
*tes_info
=
1388 iris_get_shader_info(ice
, MESA_SHADER_TESS_EVAL
);
1389 struct iris_tcs_prog_key key
= {
1390 .vue
.base
.program_string_id
= tcs
? tcs
->program_id
: 0,
1391 .tes_primitive_mode
= tes_info
->tess
.primitive_mode
,
1393 !tcs
|| compiler
->use_tcs_8_patch
? ice
->state
.vertices_per_patch
: 0,
1394 .quads_workaround
= devinfo
->gen
< 9 &&
1395 tes_info
->tess
.primitive_mode
== GL_QUADS
&&
1396 tes_info
->tess
.spacing
== TESS_SPACING_EQUAL
,
1398 get_unified_tess_slots(ice
, &key
.outputs_written
,
1399 &key
.patch_outputs_written
);
1400 screen
->vtbl
.populate_tcs_key(ice
, &key
);
1402 struct iris_compiled_shader
*old
= ice
->shaders
.prog
[IRIS_CACHE_TCS
];
1403 struct iris_compiled_shader
*shader
=
1404 iris_find_cached_shader(ice
, IRIS_CACHE_TCS
, sizeof(key
), &key
);
1407 shader
= iris_disk_cache_retrieve(ice
, tcs
, &key
, sizeof(key
));
1410 shader
= iris_compile_tcs(ice
, tcs
, &key
);
1412 if (old
!= shader
) {
1413 ice
->shaders
.prog
[IRIS_CACHE_TCS
] = shader
;
1414 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_TCS
|
1415 IRIS_STAGE_DIRTY_BINDINGS_TCS
|
1416 IRIS_STAGE_DIRTY_CONSTANTS_TCS
;
1417 shs
->sysvals_need_upload
= true;
1422 * Compile a tessellation evaluation shader, and upload the assembly.
1424 static struct iris_compiled_shader
*
1425 iris_compile_tes(struct iris_context
*ice
,
1426 struct iris_uncompiled_shader
*ish
,
1427 const struct iris_tes_prog_key
*key
)
1429 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
1430 const struct brw_compiler
*compiler
= screen
->compiler
;
1431 void *mem_ctx
= ralloc_context(NULL
);
1432 struct brw_tes_prog_data
*tes_prog_data
=
1433 rzalloc(mem_ctx
, struct brw_tes_prog_data
);
1434 struct brw_vue_prog_data
*vue_prog_data
= &tes_prog_data
->base
;
1435 struct brw_stage_prog_data
*prog_data
= &vue_prog_data
->base
;
1436 enum brw_param_builtin
*system_values
;
1437 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1438 unsigned num_system_values
;
1441 nir_shader
*nir
= nir_shader_clone(mem_ctx
, ish
->nir
);
1443 if (key
->vue
.nr_userclip_plane_consts
) {
1444 nir_function_impl
*impl
= nir_shader_get_entrypoint(nir
);
1445 nir_lower_clip_vs(nir
, (1 << key
->vue
.nr_userclip_plane_consts
) - 1,
1447 nir_lower_io_to_temporaries(nir
, impl
, true, false);
1448 nir_lower_global_vars_to_local(nir
);
1449 nir_lower_vars_to_ssa(nir
);
1450 nir_shader_gather_info(nir
, impl
);
1453 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
, 0, &system_values
,
1454 &num_system_values
, &num_cbufs
);
1456 struct iris_binding_table bt
;
1457 iris_setup_binding_table(devinfo
, nir
, &bt
, /* num_render_targets */ 0,
1458 num_system_values
, num_cbufs
);
1460 brw_nir_analyze_ubo_ranges(compiler
, nir
, NULL
, prog_data
->ubo_ranges
);
1462 struct brw_vue_map input_vue_map
;
1463 brw_compute_tess_vue_map(&input_vue_map
, key
->inputs_read
,
1464 key
->patch_inputs_read
);
1466 struct brw_tes_prog_key brw_key
= iris_to_brw_tes_key(devinfo
, key
);
1468 char *error_str
= NULL
;
1469 const unsigned *program
=
1470 brw_compile_tes(compiler
, &ice
->dbg
, mem_ctx
, &brw_key
, &input_vue_map
,
1471 tes_prog_data
, nir
, -1, NULL
, &error_str
);
1472 if (program
== NULL
) {
1473 dbg_printf("Failed to compile evaluation shader: %s\n", error_str
);
1474 ralloc_free(mem_ctx
);
1478 if (ish
->compiled_once
) {
1479 iris_debug_recompile(ice
, &nir
->info
, &brw_key
.base
);
1481 ish
->compiled_once
= true;
1484 uint32_t *so_decls
=
1485 screen
->vtbl
.create_so_decl_list(&ish
->stream_output
,
1486 &vue_prog_data
->vue_map
);
1489 struct iris_compiled_shader
*shader
=
1490 iris_upload_shader(ice
, IRIS_CACHE_TES
, sizeof(*key
), key
, program
,
1491 prog_data
, so_decls
, system_values
, num_system_values
,
1494 iris_disk_cache_store(screen
->disk_cache
, ish
, shader
, key
, sizeof(*key
));
1496 ralloc_free(mem_ctx
);
1501 * Update the current tessellation evaluation shader variant.
1503 * Fill out the key, look in the cache, compile and bind if needed.
1506 iris_update_compiled_tes(struct iris_context
*ice
)
1508 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
1509 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_TESS_EVAL
];
1510 struct iris_uncompiled_shader
*ish
=
1511 ice
->shaders
.uncompiled
[MESA_SHADER_TESS_EVAL
];
1513 struct iris_tes_prog_key key
= { KEY_ID(vue
.base
) };
1514 get_unified_tess_slots(ice
, &key
.inputs_read
, &key
.patch_inputs_read
);
1515 screen
->vtbl
.populate_tes_key(ice
, &ish
->nir
->info
, last_vue_stage(ice
), &key
);
1517 struct iris_compiled_shader
*old
= ice
->shaders
.prog
[IRIS_CACHE_TES
];
1518 struct iris_compiled_shader
*shader
=
1519 iris_find_cached_shader(ice
, IRIS_CACHE_TES
, sizeof(key
), &key
);
1522 shader
= iris_disk_cache_retrieve(ice
, ish
, &key
, sizeof(key
));
1525 shader
= iris_compile_tes(ice
, ish
, &key
);
1527 if (old
!= shader
) {
1528 ice
->shaders
.prog
[IRIS_CACHE_TES
] = shader
;
1529 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_TES
|
1530 IRIS_STAGE_DIRTY_BINDINGS_TES
|
1531 IRIS_STAGE_DIRTY_CONSTANTS_TES
;
1532 shs
->sysvals_need_upload
= true;
1535 /* TODO: Could compare and avoid flagging this. */
1536 const struct shader_info
*tes_info
= &ish
->nir
->info
;
1537 if (tes_info
->system_values_read
& (1ull << SYSTEM_VALUE_VERTICES_IN
)) {
1538 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_CONSTANTS_TES
;
1539 ice
->state
.shaders
[MESA_SHADER_TESS_EVAL
].sysvals_need_upload
= true;
1544 * Compile a geometry shader, and upload the assembly.
1546 static struct iris_compiled_shader
*
1547 iris_compile_gs(struct iris_context
*ice
,
1548 struct iris_uncompiled_shader
*ish
,
1549 const struct iris_gs_prog_key
*key
)
1551 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
1552 const struct brw_compiler
*compiler
= screen
->compiler
;
1553 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1554 void *mem_ctx
= ralloc_context(NULL
);
1555 struct brw_gs_prog_data
*gs_prog_data
=
1556 rzalloc(mem_ctx
, struct brw_gs_prog_data
);
1557 struct brw_vue_prog_data
*vue_prog_data
= &gs_prog_data
->base
;
1558 struct brw_stage_prog_data
*prog_data
= &vue_prog_data
->base
;
1559 enum brw_param_builtin
*system_values
;
1560 unsigned num_system_values
;
1563 nir_shader
*nir
= nir_shader_clone(mem_ctx
, ish
->nir
);
1565 if (key
->vue
.nr_userclip_plane_consts
) {
1566 nir_function_impl
*impl
= nir_shader_get_entrypoint(nir
);
1567 nir_lower_clip_gs(nir
, (1 << key
->vue
.nr_userclip_plane_consts
) - 1,
1569 nir_lower_io_to_temporaries(nir
, impl
, true, false);
1570 nir_lower_global_vars_to_local(nir
);
1571 nir_lower_vars_to_ssa(nir
);
1572 nir_shader_gather_info(nir
, impl
);
1575 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
, 0, &system_values
,
1576 &num_system_values
, &num_cbufs
);
1578 struct iris_binding_table bt
;
1579 iris_setup_binding_table(devinfo
, nir
, &bt
, /* num_render_targets */ 0,
1580 num_system_values
, num_cbufs
);
1582 brw_nir_analyze_ubo_ranges(compiler
, nir
, NULL
, prog_data
->ubo_ranges
);
1584 brw_compute_vue_map(devinfo
,
1585 &vue_prog_data
->vue_map
, nir
->info
.outputs_written
,
1586 nir
->info
.separate_shader
, /* pos_slots */ 1);
1588 struct brw_gs_prog_key brw_key
= iris_to_brw_gs_key(devinfo
, key
);
1590 char *error_str
= NULL
;
1591 const unsigned *program
=
1592 brw_compile_gs(compiler
, &ice
->dbg
, mem_ctx
, &brw_key
, gs_prog_data
,
1593 nir
, NULL
, -1, NULL
, &error_str
);
1594 if (program
== NULL
) {
1595 dbg_printf("Failed to compile geometry shader: %s\n", error_str
);
1596 ralloc_free(mem_ctx
);
1600 if (ish
->compiled_once
) {
1601 iris_debug_recompile(ice
, &nir
->info
, &brw_key
.base
);
1603 ish
->compiled_once
= true;
1606 uint32_t *so_decls
=
1607 screen
->vtbl
.create_so_decl_list(&ish
->stream_output
,
1608 &vue_prog_data
->vue_map
);
1610 struct iris_compiled_shader
*shader
=
1611 iris_upload_shader(ice
, IRIS_CACHE_GS
, sizeof(*key
), key
, program
,
1612 prog_data
, so_decls
, system_values
, num_system_values
,
1615 iris_disk_cache_store(screen
->disk_cache
, ish
, shader
, key
, sizeof(*key
));
1617 ralloc_free(mem_ctx
);
1622 * Update the current geometry shader variant.
1624 * Fill out the key, look in the cache, compile and bind if needed.
1627 iris_update_compiled_gs(struct iris_context
*ice
)
1629 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_GEOMETRY
];
1630 struct iris_uncompiled_shader
*ish
=
1631 ice
->shaders
.uncompiled
[MESA_SHADER_GEOMETRY
];
1632 struct iris_compiled_shader
*old
= ice
->shaders
.prog
[IRIS_CACHE_GS
];
1633 struct iris_compiled_shader
*shader
= NULL
;
1634 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
1637 struct iris_gs_prog_key key
= { KEY_ID(vue
.base
) };
1638 screen
->vtbl
.populate_gs_key(ice
, &ish
->nir
->info
, last_vue_stage(ice
), &key
);
1641 iris_find_cached_shader(ice
, IRIS_CACHE_GS
, sizeof(key
), &key
);
1644 shader
= iris_disk_cache_retrieve(ice
, ish
, &key
, sizeof(key
));
1647 shader
= iris_compile_gs(ice
, ish
, &key
);
1650 if (old
!= shader
) {
1651 ice
->shaders
.prog
[IRIS_CACHE_GS
] = shader
;
1652 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_GS
|
1653 IRIS_STAGE_DIRTY_BINDINGS_GS
|
1654 IRIS_STAGE_DIRTY_CONSTANTS_GS
;
1655 shs
->sysvals_need_upload
= true;
1660 * Compile a fragment (pixel) shader, and upload the assembly.
1662 static struct iris_compiled_shader
*
1663 iris_compile_fs(struct iris_context
*ice
,
1664 struct iris_uncompiled_shader
*ish
,
1665 const struct iris_fs_prog_key
*key
,
1666 struct brw_vue_map
*vue_map
)
1668 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
1669 const struct brw_compiler
*compiler
= screen
->compiler
;
1670 void *mem_ctx
= ralloc_context(NULL
);
1671 struct brw_wm_prog_data
*fs_prog_data
=
1672 rzalloc(mem_ctx
, struct brw_wm_prog_data
);
1673 struct brw_stage_prog_data
*prog_data
= &fs_prog_data
->base
;
1674 enum brw_param_builtin
*system_values
;
1675 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1676 unsigned num_system_values
;
1679 nir_shader
*nir
= nir_shader_clone(mem_ctx
, ish
->nir
);
1681 prog_data
->use_alt_mode
= ish
->use_alt_mode
;
1683 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
, 0, &system_values
,
1684 &num_system_values
, &num_cbufs
);
1686 /* Lower output variables to load_output intrinsics before setting up
1687 * binding tables, so iris_setup_binding_table can map any load_output
1688 * intrinsics to IRIS_SURFACE_GROUP_RENDER_TARGET_READ on Gen8 for
1689 * non-coherent framebuffer fetches.
1691 brw_nir_lower_fs_outputs(nir
);
1693 /* On Gen11+, shader RT write messages have a "Null Render Target" bit
1694 * and do not need a binding table entry with a null surface. Earlier
1695 * generations need an entry for a null surface.
1697 int null_rts
= devinfo
->gen
< 11 ? 1 : 0;
1699 struct iris_binding_table bt
;
1700 iris_setup_binding_table(devinfo
, nir
, &bt
,
1701 MAX2(key
->nr_color_regions
, null_rts
),
1702 num_system_values
, num_cbufs
);
1704 brw_nir_analyze_ubo_ranges(compiler
, nir
, NULL
, prog_data
->ubo_ranges
);
1706 struct brw_wm_prog_key brw_key
= iris_to_brw_fs_key(devinfo
, key
);
1708 char *error_str
= NULL
;
1709 const unsigned *program
=
1710 brw_compile_fs(compiler
, &ice
->dbg
, mem_ctx
, &brw_key
, fs_prog_data
,
1711 nir
, -1, -1, -1, true, false, vue_map
,
1713 if (program
== NULL
) {
1714 dbg_printf("Failed to compile fragment shader: %s\n", error_str
);
1715 ralloc_free(mem_ctx
);
1719 if (ish
->compiled_once
) {
1720 iris_debug_recompile(ice
, &nir
->info
, &brw_key
.base
);
1722 ish
->compiled_once
= true;
1725 struct iris_compiled_shader
*shader
=
1726 iris_upload_shader(ice
, IRIS_CACHE_FS
, sizeof(*key
), key
, program
,
1727 prog_data
, NULL
, system_values
, num_system_values
,
1730 iris_disk_cache_store(screen
->disk_cache
, ish
, shader
, key
, sizeof(*key
));
1732 ralloc_free(mem_ctx
);
1737 * Update the current fragment shader variant.
1739 * Fill out the key, look in the cache, compile and bind if needed.
1742 iris_update_compiled_fs(struct iris_context
*ice
)
1744 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_FRAGMENT
];
1745 struct iris_uncompiled_shader
*ish
=
1746 ice
->shaders
.uncompiled
[MESA_SHADER_FRAGMENT
];
1747 struct iris_fs_prog_key key
= { KEY_ID(base
) };
1748 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
1749 screen
->vtbl
.populate_fs_key(ice
, &ish
->nir
->info
, &key
);
1751 if (ish
->nos
& (1ull << IRIS_NOS_LAST_VUE_MAP
))
1752 key
.input_slots_valid
= ice
->shaders
.last_vue_map
->slots_valid
;
1754 struct iris_compiled_shader
*old
= ice
->shaders
.prog
[IRIS_CACHE_FS
];
1755 struct iris_compiled_shader
*shader
=
1756 iris_find_cached_shader(ice
, IRIS_CACHE_FS
, sizeof(key
), &key
);
1759 shader
= iris_disk_cache_retrieve(ice
, ish
, &key
, sizeof(key
));
1762 shader
= iris_compile_fs(ice
, ish
, &key
, ice
->shaders
.last_vue_map
);
1764 if (old
!= shader
) {
1765 // XXX: only need to flag CLIP if barycentric has NONPERSPECTIVE
1766 // toggles. might be able to avoid flagging SBE too.
1767 ice
->shaders
.prog
[IRIS_CACHE_FS
] = shader
;
1768 ice
->state
.dirty
|= IRIS_DIRTY_WM
|
1771 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_FS
|
1772 IRIS_STAGE_DIRTY_BINDINGS_FS
|
1773 IRIS_STAGE_DIRTY_CONSTANTS_FS
;
1774 shs
->sysvals_need_upload
= true;
1779 * Update the last enabled stage's VUE map.
1781 * When the shader feeding the rasterizer's output interface changes, we
1782 * need to re-emit various packets.
1785 update_last_vue_map(struct iris_context
*ice
,
1786 struct brw_stage_prog_data
*prog_data
)
1788 struct brw_vue_prog_data
*vue_prog_data
= (void *) prog_data
;
1789 struct brw_vue_map
*vue_map
= &vue_prog_data
->vue_map
;
1790 struct brw_vue_map
*old_map
= ice
->shaders
.last_vue_map
;
1791 const uint64_t changed_slots
=
1792 (old_map
? old_map
->slots_valid
: 0ull) ^ vue_map
->slots_valid
;
1794 if (changed_slots
& VARYING_BIT_VIEWPORT
) {
1795 ice
->state
.num_viewports
=
1796 (vue_map
->slots_valid
& VARYING_BIT_VIEWPORT
) ? IRIS_MAX_VIEWPORTS
: 1;
1797 ice
->state
.dirty
|= IRIS_DIRTY_CLIP
|
1798 IRIS_DIRTY_SF_CL_VIEWPORT
|
1799 IRIS_DIRTY_CC_VIEWPORT
|
1800 IRIS_DIRTY_SCISSOR_RECT
;
1801 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_UNCOMPILED_FS
|
1802 ice
->state
.stage_dirty_for_nos
[IRIS_NOS_LAST_VUE_MAP
];
1805 if (changed_slots
|| (old_map
&& old_map
->separate
!= vue_map
->separate
)) {
1806 ice
->state
.dirty
|= IRIS_DIRTY_SBE
;
1809 ice
->shaders
.last_vue_map
= &vue_prog_data
->vue_map
;
1813 iris_update_pull_constant_descriptors(struct iris_context
*ice
,
1814 gl_shader_stage stage
)
1816 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[stage
];
1818 if (!shader
|| !shader
->prog_data
->has_ubo_pull
)
1821 struct iris_shader_state
*shs
= &ice
->state
.shaders
[stage
];
1822 bool any_new_descriptors
=
1823 shader
->num_system_values
> 0 && shs
->sysvals_need_upload
;
1825 unsigned bound_cbufs
= shs
->bound_cbufs
;
1827 while (bound_cbufs
) {
1828 const int i
= u_bit_scan(&bound_cbufs
);
1829 struct pipe_shader_buffer
*cbuf
= &shs
->constbuf
[i
];
1830 struct iris_state_ref
*surf_state
= &shs
->constbuf_surf_state
[i
];
1831 if (!surf_state
->res
&& cbuf
->buffer
) {
1832 iris_upload_ubo_ssbo_surf_state(ice
, cbuf
, surf_state
, false);
1833 any_new_descriptors
= true;
1837 if (any_new_descriptors
)
1838 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_BINDINGS_VS
<< stage
;
1842 * Get the prog_data for a given stage, or NULL if the stage is disabled.
1844 static struct brw_vue_prog_data
*
1845 get_vue_prog_data(struct iris_context
*ice
, gl_shader_stage stage
)
1847 if (!ice
->shaders
.prog
[stage
])
1850 return (void *) ice
->shaders
.prog
[stage
]->prog_data
;
1854 * Update the current shader variants for the given state.
1856 * This should be called on every draw call to ensure that the correct
1857 * shaders are bound. It will also flag any dirty state triggered by
1858 * swapping out those shaders.
1861 iris_update_compiled_shaders(struct iris_context
*ice
)
1863 const uint64_t dirty
= ice
->state
.dirty
;
1864 const uint64_t stage_dirty
= ice
->state
.stage_dirty
;
1866 struct brw_vue_prog_data
*old_prog_datas
[4];
1867 if (!(dirty
& IRIS_DIRTY_URB
)) {
1868 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_GEOMETRY
; i
++)
1869 old_prog_datas
[i
] = get_vue_prog_data(ice
, i
);
1872 if (stage_dirty
& (IRIS_STAGE_DIRTY_UNCOMPILED_TCS
|
1873 IRIS_STAGE_DIRTY_UNCOMPILED_TES
)) {
1874 struct iris_uncompiled_shader
*tes
=
1875 ice
->shaders
.uncompiled
[MESA_SHADER_TESS_EVAL
];
1877 iris_update_compiled_tcs(ice
);
1878 iris_update_compiled_tes(ice
);
1880 ice
->shaders
.prog
[IRIS_CACHE_TCS
] = NULL
;
1881 ice
->shaders
.prog
[IRIS_CACHE_TES
] = NULL
;
1882 ice
->state
.stage_dirty
|=
1883 IRIS_STAGE_DIRTY_TCS
| IRIS_STAGE_DIRTY_TES
|
1884 IRIS_STAGE_DIRTY_BINDINGS_TCS
| IRIS_STAGE_DIRTY_BINDINGS_TES
|
1885 IRIS_STAGE_DIRTY_CONSTANTS_TCS
| IRIS_STAGE_DIRTY_CONSTANTS_TES
;
1889 if (stage_dirty
& IRIS_STAGE_DIRTY_UNCOMPILED_VS
)
1890 iris_update_compiled_vs(ice
);
1891 if (stage_dirty
& IRIS_STAGE_DIRTY_UNCOMPILED_GS
)
1892 iris_update_compiled_gs(ice
);
1894 if (stage_dirty
& (IRIS_STAGE_DIRTY_UNCOMPILED_GS
|
1895 IRIS_STAGE_DIRTY_UNCOMPILED_TES
)) {
1896 const struct iris_compiled_shader
*gs
=
1897 ice
->shaders
.prog
[MESA_SHADER_GEOMETRY
];
1898 const struct iris_compiled_shader
*tes
=
1899 ice
->shaders
.prog
[MESA_SHADER_TESS_EVAL
];
1901 bool points_or_lines
= false;
1904 const struct brw_gs_prog_data
*gs_prog_data
= (void *) gs
->prog_data
;
1906 gs_prog_data
->output_topology
== _3DPRIM_POINTLIST
||
1907 gs_prog_data
->output_topology
== _3DPRIM_LINESTRIP
;
1909 const struct brw_tes_prog_data
*tes_data
= (void *) tes
->prog_data
;
1911 tes_data
->output_topology
== BRW_TESS_OUTPUT_TOPOLOGY_LINE
||
1912 tes_data
->output_topology
== BRW_TESS_OUTPUT_TOPOLOGY_POINT
;
1915 if (ice
->shaders
.output_topology_is_points_or_lines
!= points_or_lines
) {
1916 /* Outbound to XY Clip enables */
1917 ice
->shaders
.output_topology_is_points_or_lines
= points_or_lines
;
1918 ice
->state
.dirty
|= IRIS_DIRTY_CLIP
;
1922 gl_shader_stage last_stage
= last_vue_stage(ice
);
1923 struct iris_compiled_shader
*shader
= ice
->shaders
.prog
[last_stage
];
1924 struct iris_uncompiled_shader
*ish
= ice
->shaders
.uncompiled
[last_stage
];
1925 update_last_vue_map(ice
, shader
->prog_data
);
1926 if (ice
->state
.streamout
!= shader
->streamout
) {
1927 ice
->state
.streamout
= shader
->streamout
;
1928 ice
->state
.dirty
|= IRIS_DIRTY_SO_DECL_LIST
| IRIS_DIRTY_STREAMOUT
;
1931 if (ice
->state
.streamout_active
) {
1932 for (int i
= 0; i
< PIPE_MAX_SO_BUFFERS
; i
++) {
1933 struct iris_stream_output_target
*so
=
1934 (void *) ice
->state
.so_target
[i
];
1936 so
->stride
= ish
->stream_output
.stride
[i
] * sizeof(uint32_t);
1940 if (stage_dirty
& IRIS_STAGE_DIRTY_UNCOMPILED_FS
)
1941 iris_update_compiled_fs(ice
);
1943 /* Changing shader interfaces may require a URB configuration. */
1944 if (!(dirty
& IRIS_DIRTY_URB
)) {
1945 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_GEOMETRY
; i
++) {
1946 struct brw_vue_prog_data
*old
= old_prog_datas
[i
];
1947 struct brw_vue_prog_data
*new = get_vue_prog_data(ice
, i
);
1948 if (!!old
!= !!new ||
1949 (new && new->urb_entry_size
!= old
->urb_entry_size
)) {
1950 ice
->state
.dirty
|= IRIS_DIRTY_URB
;
1956 for (int i
= MESA_SHADER_VERTEX
; i
<= MESA_SHADER_FRAGMENT
; i
++) {
1957 if (ice
->state
.stage_dirty
& (IRIS_STAGE_DIRTY_CONSTANTS_VS
<< i
))
1958 iris_update_pull_constant_descriptors(ice
, i
);
1962 static struct iris_compiled_shader
*
1963 iris_compile_cs(struct iris_context
*ice
,
1964 struct iris_uncompiled_shader
*ish
,
1965 const struct iris_cs_prog_key
*key
)
1967 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
1968 const struct brw_compiler
*compiler
= screen
->compiler
;
1969 void *mem_ctx
= ralloc_context(NULL
);
1970 struct brw_cs_prog_data
*cs_prog_data
=
1971 rzalloc(mem_ctx
, struct brw_cs_prog_data
);
1972 struct brw_stage_prog_data
*prog_data
= &cs_prog_data
->base
;
1973 enum brw_param_builtin
*system_values
;
1974 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
1975 unsigned num_system_values
;
1978 nir_shader
*nir
= nir_shader_clone(mem_ctx
, ish
->nir
);
1980 NIR_PASS_V(nir
, brw_nir_lower_cs_intrinsics
);
1982 iris_setup_uniforms(compiler
, mem_ctx
, nir
, prog_data
,
1983 ish
->kernel_input_size
,
1984 &system_values
, &num_system_values
, &num_cbufs
);
1986 struct iris_binding_table bt
;
1987 iris_setup_binding_table(devinfo
, nir
, &bt
, /* num_render_targets */ 0,
1988 num_system_values
, num_cbufs
);
1990 struct brw_cs_prog_key brw_key
= iris_to_brw_cs_key(devinfo
, key
);
1992 char *error_str
= NULL
;
1993 const unsigned *program
=
1994 brw_compile_cs(compiler
, &ice
->dbg
, mem_ctx
, &brw_key
, cs_prog_data
,
1995 nir
, -1, NULL
, &error_str
);
1996 if (program
== NULL
) {
1997 dbg_printf("Failed to compile compute shader: %s\n", error_str
);
1998 ralloc_free(mem_ctx
);
2002 if (ish
->compiled_once
) {
2003 iris_debug_recompile(ice
, &nir
->info
, &brw_key
.base
);
2005 ish
->compiled_once
= true;
2008 struct iris_compiled_shader
*shader
=
2009 iris_upload_shader(ice
, IRIS_CACHE_CS
, sizeof(*key
), key
, program
,
2010 prog_data
, NULL
, system_values
, num_system_values
,
2011 ish
->kernel_input_size
, num_cbufs
, &bt
);
2013 iris_disk_cache_store(screen
->disk_cache
, ish
, shader
, key
, sizeof(*key
));
2015 ralloc_free(mem_ctx
);
2020 iris_update_compiled_cs(struct iris_context
*ice
)
2022 struct iris_shader_state
*shs
= &ice
->state
.shaders
[MESA_SHADER_COMPUTE
];
2023 struct iris_uncompiled_shader
*ish
=
2024 ice
->shaders
.uncompiled
[MESA_SHADER_COMPUTE
];
2026 struct iris_cs_prog_key key
= { KEY_ID(base
) };
2027 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
2028 screen
->vtbl
.populate_cs_key(ice
, &key
);
2030 struct iris_compiled_shader
*old
= ice
->shaders
.prog
[IRIS_CACHE_CS
];
2031 struct iris_compiled_shader
*shader
=
2032 iris_find_cached_shader(ice
, IRIS_CACHE_CS
, sizeof(key
), &key
);
2035 shader
= iris_disk_cache_retrieve(ice
, ish
, &key
, sizeof(key
));
2038 shader
= iris_compile_cs(ice
, ish
, &key
);
2040 if (old
!= shader
) {
2041 ice
->shaders
.prog
[IRIS_CACHE_CS
] = shader
;
2042 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_CS
|
2043 IRIS_STAGE_DIRTY_BINDINGS_CS
|
2044 IRIS_STAGE_DIRTY_CONSTANTS_CS
;
2045 shs
->sysvals_need_upload
= true;
2050 iris_update_compiled_compute_shader(struct iris_context
*ice
)
2052 if (ice
->state
.stage_dirty
& IRIS_STAGE_DIRTY_UNCOMPILED_CS
)
2053 iris_update_compiled_cs(ice
);
2055 if (ice
->state
.stage_dirty
& IRIS_STAGE_DIRTY_CONSTANTS_CS
)
2056 iris_update_pull_constant_descriptors(ice
, MESA_SHADER_COMPUTE
);
2060 iris_fill_cs_push_const_buffer(struct brw_cs_prog_data
*cs_prog_data
,
2064 assert(brw_cs_push_const_total_size(cs_prog_data
, threads
) > 0);
2065 assert(cs_prog_data
->push
.cross_thread
.size
== 0);
2066 assert(cs_prog_data
->push
.per_thread
.dwords
== 1);
2067 assert(cs_prog_data
->base
.param
[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID
);
2068 for (unsigned t
= 0; t
< threads
; t
++)
2073 * Allocate scratch BOs as needed for the given per-thread size and stage.
2076 iris_get_scratch_space(struct iris_context
*ice
,
2077 unsigned per_thread_scratch
,
2078 gl_shader_stage stage
)
2080 struct iris_screen
*screen
= (struct iris_screen
*)ice
->ctx
.screen
;
2081 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
2082 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
2084 unsigned encoded_size
= ffs(per_thread_scratch
) - 11;
2085 assert(encoded_size
< (1 << 16));
2087 struct iris_bo
**bop
= &ice
->shaders
.scratch_bos
[encoded_size
][stage
];
2089 /* The documentation for 3DSTATE_PS "Scratch Space Base Pointer" says:
2091 * "Scratch Space per slice is computed based on 4 sub-slices. SW
2092 * must allocate scratch space enough so that each slice has 4
2095 * According to the other driver team, this applies to compute shaders
2096 * as well. This is not currently documented at all.
2098 * This hack is no longer necessary on Gen11+.
2100 * For, Gen11+, scratch space allocation is based on the number of threads
2101 * in the base configuration.
2103 unsigned subslice_total
= screen
->subslice_total
;
2104 if (devinfo
->gen
>= 12)
2105 subslice_total
= devinfo
->num_subslices
[0];
2106 else if (devinfo
->gen
== 11)
2108 else if (devinfo
->gen
< 11)
2109 subslice_total
= 4 * devinfo
->num_slices
;
2110 assert(subslice_total
>= screen
->subslice_total
);
2113 unsigned scratch_ids_per_subslice
= devinfo
->max_cs_threads
;
2115 if (devinfo
->gen
>= 12) {
2116 /* Same as ICL below, but with 16 EUs. */
2117 scratch_ids_per_subslice
= 16 * 8;
2118 } else if (devinfo
->gen
== 11) {
2119 /* The MEDIA_VFE_STATE docs say:
2121 * "Starting with this configuration, the Maximum Number of
2122 * Threads must be set to (#EU * 8) for GPGPU dispatches.
2124 * Although there are only 7 threads per EU in the configuration,
2125 * the FFTID is calculated as if there are 8 threads per EU,
2126 * which in turn requires a larger amount of Scratch Space to be
2127 * allocated by the driver."
2129 scratch_ids_per_subslice
= 8 * 8;
2132 uint32_t max_threads
[] = {
2133 [MESA_SHADER_VERTEX
] = devinfo
->max_vs_threads
,
2134 [MESA_SHADER_TESS_CTRL
] = devinfo
->max_tcs_threads
,
2135 [MESA_SHADER_TESS_EVAL
] = devinfo
->max_tes_threads
,
2136 [MESA_SHADER_GEOMETRY
] = devinfo
->max_gs_threads
,
2137 [MESA_SHADER_FRAGMENT
] = devinfo
->max_wm_threads
,
2138 [MESA_SHADER_COMPUTE
] = scratch_ids_per_subslice
* subslice_total
,
2141 uint32_t size
= per_thread_scratch
* max_threads
[stage
];
2143 *bop
= iris_bo_alloc(bufmgr
, "scratch", size
, IRIS_MEMZONE_SHADER
);
2149 /* ------------------------------------------------------------------- */
2152 * The pipe->create_[stage]_state() driver hooks.
2154 * Performs basic NIR preprocessing, records any state dependencies, and
2155 * returns an iris_uncompiled_shader as the Gallium CSO.
2157 * Actual shader compilation to assembly happens later, at first use.
2160 iris_create_uncompiled_shader(struct pipe_context
*ctx
,
2162 const struct pipe_stream_output_info
*so_info
)
2164 struct iris_context
*ice
= (void *)ctx
;
2165 struct iris_screen
*screen
= (struct iris_screen
*)ctx
->screen
;
2166 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
2168 struct iris_uncompiled_shader
*ish
=
2169 calloc(1, sizeof(struct iris_uncompiled_shader
));
2173 NIR_PASS(ish
->needs_edge_flag
, nir
, iris_fix_edge_flags
);
2175 brw_preprocess_nir(screen
->compiler
, nir
, NULL
);
2177 NIR_PASS_V(nir
, brw_nir_lower_image_load_store
, devinfo
,
2178 &ish
->uses_atomic_load_store
);
2179 NIR_PASS_V(nir
, iris_lower_storage_image_derefs
);
2183 if (nir
->constant_data_size
> 0) {
2184 unsigned data_offset
;
2185 u_upload_data(ice
->shaders
.uploader
, 0, nir
->constant_data_size
,
2186 32, nir
->constant_data
, &data_offset
, &ish
->const_data
);
2188 struct pipe_shader_buffer psb
= {
2189 .buffer
= ish
->const_data
,
2190 .buffer_offset
= data_offset
,
2191 .buffer_size
= nir
->constant_data_size
,
2193 iris_upload_ubo_ssbo_surf_state(ice
, &psb
, &ish
->const_data_state
, false);
2196 ish
->program_id
= get_new_program_id(screen
);
2199 memcpy(&ish
->stream_output
, so_info
, sizeof(*so_info
));
2200 update_so_info(&ish
->stream_output
, nir
->info
.outputs_written
);
2203 /* Save this now before potentially dropping nir->info.name */
2204 if (nir
->info
.name
&& strncmp(nir
->info
.name
, "ARB", 3) == 0)
2205 ish
->use_alt_mode
= true;
2207 if (screen
->disk_cache
) {
2208 /* Serialize the NIR to a binary blob that we can hash for the disk
2209 * cache. Drop unnecessary information (like variable names)
2210 * so the serialized NIR is smaller, and also to let us detect more
2211 * isomorphic shaders when hashing, increasing cache hits.
2215 nir_serialize(&blob
, nir
, true);
2216 _mesa_sha1_compute(blob
.data
, blob
.size
, ish
->nir_sha1
);
2223 static struct iris_uncompiled_shader
*
2224 iris_create_shader_state(struct pipe_context
*ctx
,
2225 const struct pipe_shader_state
*state
)
2227 struct nir_shader
*nir
;
2229 if (state
->type
== PIPE_SHADER_IR_TGSI
)
2230 nir
= tgsi_to_nir(state
->tokens
, ctx
->screen
, false);
2232 nir
= state
->ir
.nir
;
2234 return iris_create_uncompiled_shader(ctx
, nir
, &state
->stream_output
);
2238 iris_create_vs_state(struct pipe_context
*ctx
,
2239 const struct pipe_shader_state
*state
)
2241 struct iris_context
*ice
= (void *) ctx
;
2242 struct iris_screen
*screen
= (void *) ctx
->screen
;
2243 struct iris_uncompiled_shader
*ish
= iris_create_shader_state(ctx
, state
);
2245 /* User clip planes */
2246 if (ish
->nir
->info
.clip_distance_array_size
== 0)
2247 ish
->nos
|= (1ull << IRIS_NOS_RASTERIZER
);
2249 if (screen
->precompile
) {
2250 struct iris_vs_prog_key key
= { KEY_ID(vue
.base
) };
2252 if (!iris_disk_cache_retrieve(ice
, ish
, &key
, sizeof(key
)))
2253 iris_compile_vs(ice
, ish
, &key
);
2260 iris_create_tcs_state(struct pipe_context
*ctx
,
2261 const struct pipe_shader_state
*state
)
2263 struct iris_context
*ice
= (void *) ctx
;
2264 struct iris_screen
*screen
= (void *) ctx
->screen
;
2265 const struct brw_compiler
*compiler
= screen
->compiler
;
2266 struct iris_uncompiled_shader
*ish
= iris_create_shader_state(ctx
, state
);
2267 struct shader_info
*info
= &ish
->nir
->info
;
2269 if (screen
->precompile
) {
2270 const unsigned _GL_TRIANGLES
= 0x0004;
2271 struct iris_tcs_prog_key key
= {
2273 // XXX: make sure the linker fills this out from the TES...
2274 .tes_primitive_mode
=
2275 info
->tess
.primitive_mode
? info
->tess
.primitive_mode
2277 .outputs_written
= info
->outputs_written
,
2278 .patch_outputs_written
= info
->patch_outputs_written
,
2281 /* 8_PATCH mode needs the key to contain the input patch dimensionality.
2282 * We don't have that information, so we randomly guess that the input
2283 * and output patches are the same size. This is a bad guess, but we
2284 * can't do much better.
2286 if (compiler
->use_tcs_8_patch
)
2287 key
.input_vertices
= info
->tess
.tcs_vertices_out
;
2289 if (!iris_disk_cache_retrieve(ice
, ish
, &key
, sizeof(key
)))
2290 iris_compile_tcs(ice
, ish
, &key
);
2297 iris_create_tes_state(struct pipe_context
*ctx
,
2298 const struct pipe_shader_state
*state
)
2300 struct iris_context
*ice
= (void *) ctx
;
2301 struct iris_screen
*screen
= (void *) ctx
->screen
;
2302 struct iris_uncompiled_shader
*ish
= iris_create_shader_state(ctx
, state
);
2303 struct shader_info
*info
= &ish
->nir
->info
;
2305 /* User clip planes */
2306 if (ish
->nir
->info
.clip_distance_array_size
== 0)
2307 ish
->nos
|= (1ull << IRIS_NOS_RASTERIZER
);
2309 if (screen
->precompile
) {
2310 struct iris_tes_prog_key key
= {
2312 // XXX: not ideal, need TCS output/TES input unification
2313 .inputs_read
= info
->inputs_read
,
2314 .patch_inputs_read
= info
->patch_inputs_read
,
2317 if (!iris_disk_cache_retrieve(ice
, ish
, &key
, sizeof(key
)))
2318 iris_compile_tes(ice
, ish
, &key
);
2325 iris_create_gs_state(struct pipe_context
*ctx
,
2326 const struct pipe_shader_state
*state
)
2328 struct iris_context
*ice
= (void *) ctx
;
2329 struct iris_screen
*screen
= (void *) ctx
->screen
;
2330 struct iris_uncompiled_shader
*ish
= iris_create_shader_state(ctx
, state
);
2332 /* User clip planes */
2333 if (ish
->nir
->info
.clip_distance_array_size
== 0)
2334 ish
->nos
|= (1ull << IRIS_NOS_RASTERIZER
);
2336 if (screen
->precompile
) {
2337 struct iris_gs_prog_key key
= { KEY_ID(vue
.base
) };
2339 if (!iris_disk_cache_retrieve(ice
, ish
, &key
, sizeof(key
)))
2340 iris_compile_gs(ice
, ish
, &key
);
2347 iris_create_fs_state(struct pipe_context
*ctx
,
2348 const struct pipe_shader_state
*state
)
2350 struct iris_context
*ice
= (void *) ctx
;
2351 struct iris_screen
*screen
= (void *) ctx
->screen
;
2352 struct iris_uncompiled_shader
*ish
= iris_create_shader_state(ctx
, state
);
2353 struct shader_info
*info
= &ish
->nir
->info
;
2355 ish
->nos
|= (1ull << IRIS_NOS_FRAMEBUFFER
) |
2356 (1ull << IRIS_NOS_DEPTH_STENCIL_ALPHA
) |
2357 (1ull << IRIS_NOS_RASTERIZER
) |
2358 (1ull << IRIS_NOS_BLEND
);
2360 /* The program key needs the VUE map if there are > 16 inputs */
2361 if (util_bitcount64(ish
->nir
->info
.inputs_read
&
2362 BRW_FS_VARYING_INPUT_MASK
) > 16) {
2363 ish
->nos
|= (1ull << IRIS_NOS_LAST_VUE_MAP
);
2366 if (screen
->precompile
) {
2367 const uint64_t color_outputs
= info
->outputs_written
&
2368 ~(BITFIELD64_BIT(FRAG_RESULT_DEPTH
) |
2369 BITFIELD64_BIT(FRAG_RESULT_STENCIL
) |
2370 BITFIELD64_BIT(FRAG_RESULT_SAMPLE_MASK
));
2372 bool can_rearrange_varyings
=
2373 util_bitcount64(info
->inputs_read
& BRW_FS_VARYING_INPUT_MASK
) <= 16;
2375 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
2376 struct iris_fs_prog_key key
= {
2378 .nr_color_regions
= util_bitcount(color_outputs
),
2379 .coherent_fb_fetch
= devinfo
->gen
>= 9,
2380 .input_slots_valid
=
2381 can_rearrange_varyings
? 0 : info
->inputs_read
| VARYING_BIT_POS
,
2384 if (!iris_disk_cache_retrieve(ice
, ish
, &key
, sizeof(key
)))
2385 iris_compile_fs(ice
, ish
, &key
, NULL
);
2392 iris_create_compute_state(struct pipe_context
*ctx
,
2393 const struct pipe_compute_state
*state
)
2395 struct iris_context
*ice
= (void *) ctx
;
2396 struct iris_screen
*screen
= (void *) ctx
->screen
;
2397 const nir_shader_compiler_options
*options
=
2398 screen
->compiler
->glsl_compiler_options
[MESA_SHADER_COMPUTE
].NirOptions
;
2401 switch (state
->ir_type
) {
2402 case PIPE_SHADER_IR_NIR
:
2403 nir
= (void *)state
->prog
;
2406 case PIPE_SHADER_IR_NIR_SERIALIZED
: {
2407 struct blob_reader reader
;
2408 const struct pipe_binary_program_header
*hdr
= state
->prog
;
2409 blob_reader_init(&reader
, hdr
->blob
, hdr
->num_bytes
);
2410 nir
= nir_deserialize(NULL
, options
, &reader
);
2415 unreachable("Unsupported IR");
2418 struct iris_uncompiled_shader
*ish
=
2419 iris_create_uncompiled_shader(ctx
, nir
, NULL
);
2420 ish
->kernel_input_size
= state
->req_input_mem
;
2422 // XXX: disallow more than 64KB of shared variables
2424 if (screen
->precompile
) {
2425 struct iris_cs_prog_key key
= { KEY_ID(base
) };
2427 if (!iris_disk_cache_retrieve(ice
, ish
, &key
, sizeof(key
)))
2428 iris_compile_cs(ice
, ish
, &key
);
2435 * The pipe->delete_[stage]_state() driver hooks.
2437 * Frees the iris_uncompiled_shader.
2440 iris_delete_shader_state(struct pipe_context
*ctx
, void *state
, gl_shader_stage stage
)
2442 struct iris_uncompiled_shader
*ish
= state
;
2443 struct iris_context
*ice
= (void *) ctx
;
2445 if (ice
->shaders
.uncompiled
[stage
] == ish
) {
2446 ice
->shaders
.uncompiled
[stage
] = NULL
;
2447 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_UNCOMPILED_VS
<< stage
;
2450 if (ish
->const_data
) {
2451 pipe_resource_reference(&ish
->const_data
, NULL
);
2452 pipe_resource_reference(&ish
->const_data_state
.res
, NULL
);
2455 iris_delete_shader_variants(ice
, ish
);
2457 ralloc_free(ish
->nir
);
2462 iris_delete_vs_state(struct pipe_context
*ctx
, void *state
)
2464 iris_delete_shader_state(ctx
, state
, MESA_SHADER_VERTEX
);
2468 iris_delete_tcs_state(struct pipe_context
*ctx
, void *state
)
2470 iris_delete_shader_state(ctx
, state
, MESA_SHADER_TESS_CTRL
);
2474 iris_delete_tes_state(struct pipe_context
*ctx
, void *state
)
2476 iris_delete_shader_state(ctx
, state
, MESA_SHADER_TESS_EVAL
);
2480 iris_delete_gs_state(struct pipe_context
*ctx
, void *state
)
2482 iris_delete_shader_state(ctx
, state
, MESA_SHADER_GEOMETRY
);
2486 iris_delete_fs_state(struct pipe_context
*ctx
, void *state
)
2488 iris_delete_shader_state(ctx
, state
, MESA_SHADER_FRAGMENT
);
2492 iris_delete_cs_state(struct pipe_context
*ctx
, void *state
)
2494 iris_delete_shader_state(ctx
, state
, MESA_SHADER_COMPUTE
);
2498 * The pipe->bind_[stage]_state() driver hook.
2500 * Binds an uncompiled shader as the current one for a particular stage.
2501 * Updates dirty tracking to account for the shader's NOS.
2504 bind_shader_state(struct iris_context
*ice
,
2505 struct iris_uncompiled_shader
*ish
,
2506 gl_shader_stage stage
)
2508 uint64_t stage_dirty_bit
= IRIS_STAGE_DIRTY_UNCOMPILED_VS
<< stage
;
2509 const uint64_t nos
= ish
? ish
->nos
: 0;
2511 const struct shader_info
*old_info
= iris_get_shader_info(ice
, stage
);
2512 const struct shader_info
*new_info
= ish
? &ish
->nir
->info
: NULL
;
2514 if ((old_info
? util_last_bit(old_info
->textures_used
) : 0) !=
2515 (new_info
? util_last_bit(new_info
->textures_used
) : 0)) {
2516 ice
->state
.stage_dirty
|= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS
<< stage
;
2519 ice
->shaders
.uncompiled
[stage
] = ish
;
2520 ice
->state
.stage_dirty
|= stage_dirty_bit
;
2522 /* Record that CSOs need to mark IRIS_DIRTY_UNCOMPILED_XS when they change
2523 * (or that they no longer need to do so).
2525 for (int i
= 0; i
< IRIS_NOS_COUNT
; i
++) {
2527 ice
->state
.stage_dirty_for_nos
[i
] |= stage_dirty_bit
;
2529 ice
->state
.stage_dirty_for_nos
[i
] &= ~stage_dirty_bit
;
2534 iris_bind_vs_state(struct pipe_context
*ctx
, void *state
)
2536 struct iris_context
*ice
= (struct iris_context
*)ctx
;
2537 struct iris_uncompiled_shader
*new_ish
= state
;
2540 ice
->state
.window_space_position
!=
2541 new_ish
->nir
->info
.vs
.window_space_position
) {
2542 ice
->state
.window_space_position
=
2543 new_ish
->nir
->info
.vs
.window_space_position
;
2545 ice
->state
.dirty
|= IRIS_DIRTY_CLIP
|
2547 IRIS_DIRTY_CC_VIEWPORT
;
2550 bind_shader_state((void *) ctx
, state
, MESA_SHADER_VERTEX
);
2554 iris_bind_tcs_state(struct pipe_context
*ctx
, void *state
)
2556 bind_shader_state((void *) ctx
, state
, MESA_SHADER_TESS_CTRL
);
2560 iris_bind_tes_state(struct pipe_context
*ctx
, void *state
)
2562 struct iris_context
*ice
= (struct iris_context
*)ctx
;
2564 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2565 if (!!state
!= !!ice
->shaders
.uncompiled
[MESA_SHADER_TESS_EVAL
])
2566 ice
->state
.dirty
|= IRIS_DIRTY_URB
;
2568 bind_shader_state((void *) ctx
, state
, MESA_SHADER_TESS_EVAL
);
2572 iris_bind_gs_state(struct pipe_context
*ctx
, void *state
)
2574 struct iris_context
*ice
= (struct iris_context
*)ctx
;
2576 /* Enabling/disabling optional stages requires a URB reconfiguration. */
2577 if (!!state
!= !!ice
->shaders
.uncompiled
[MESA_SHADER_GEOMETRY
])
2578 ice
->state
.dirty
|= IRIS_DIRTY_URB
;
2580 bind_shader_state((void *) ctx
, state
, MESA_SHADER_GEOMETRY
);
2584 iris_bind_fs_state(struct pipe_context
*ctx
, void *state
)
2586 struct iris_context
*ice
= (struct iris_context
*) ctx
;
2587 struct iris_screen
*screen
= (struct iris_screen
*) ctx
->screen
;
2588 const struct gen_device_info
*devinfo
= &screen
->devinfo
;
2589 struct iris_uncompiled_shader
*old_ish
=
2590 ice
->shaders
.uncompiled
[MESA_SHADER_FRAGMENT
];
2591 struct iris_uncompiled_shader
*new_ish
= state
;
2593 const unsigned color_bits
=
2594 BITFIELD64_BIT(FRAG_RESULT_COLOR
) |
2595 BITFIELD64_RANGE(FRAG_RESULT_DATA0
, BRW_MAX_DRAW_BUFFERS
);
2597 /* Fragment shader outputs influence HasWriteableRT */
2598 if (!old_ish
|| !new_ish
||
2599 (old_ish
->nir
->info
.outputs_written
& color_bits
) !=
2600 (new_ish
->nir
->info
.outputs_written
& color_bits
))
2601 ice
->state
.dirty
|= IRIS_DIRTY_PS_BLEND
;
2603 if (devinfo
->gen
== 8)
2604 ice
->state
.dirty
|= IRIS_DIRTY_PMA_FIX
;
2606 bind_shader_state((void *) ctx
, state
, MESA_SHADER_FRAGMENT
);
2610 iris_bind_cs_state(struct pipe_context
*ctx
, void *state
)
2612 bind_shader_state((void *) ctx
, state
, MESA_SHADER_COMPUTE
);
2616 iris_init_program_functions(struct pipe_context
*ctx
)
2618 ctx
->create_vs_state
= iris_create_vs_state
;
2619 ctx
->create_tcs_state
= iris_create_tcs_state
;
2620 ctx
->create_tes_state
= iris_create_tes_state
;
2621 ctx
->create_gs_state
= iris_create_gs_state
;
2622 ctx
->create_fs_state
= iris_create_fs_state
;
2623 ctx
->create_compute_state
= iris_create_compute_state
;
2625 ctx
->delete_vs_state
= iris_delete_vs_state
;
2626 ctx
->delete_tcs_state
= iris_delete_tcs_state
;
2627 ctx
->delete_tes_state
= iris_delete_tes_state
;
2628 ctx
->delete_gs_state
= iris_delete_gs_state
;
2629 ctx
->delete_fs_state
= iris_delete_fs_state
;
2630 ctx
->delete_compute_state
= iris_delete_cs_state
;
2632 ctx
->bind_vs_state
= iris_bind_vs_state
;
2633 ctx
->bind_tcs_state
= iris_bind_tcs_state
;
2634 ctx
->bind_tes_state
= iris_bind_tes_state
;
2635 ctx
->bind_gs_state
= iris_bind_gs_state
;
2636 ctx
->bind_fs_state
= iris_bind_fs_state
;
2637 ctx
->bind_compute_state
= iris_bind_cs_state
;