#include "pipe/p_context.h"
#include "pipe/p_screen.h"
#include "util/u_atomic.h"
+#include "util/u_upload_mgr.h"
+#include "util/debug.h"
#include "compiler/nir/nir.h"
#include "compiler/nir/nir_builder.h"
+#include "compiler/nir/nir_serialize.h"
#include "intel/compiler/brw_compiler.h"
#include "intel/compiler/brw_nir.h"
#include "iris_context.h"
+#include "nir/tgsi_to_nir.h"
-#define KEY_INIT_NO_ID(gen) \
- .tex.swizzles[0 ... MAX_SAMPLERS - 1] = 0x688, \
- .tex.compressed_multisample_layout_mask = ~0, \
- .tex.msaa_16 = (gen >= 9 ? ~0 : 0)
-#define KEY_INIT(gen) .program_string_id = ish->program_id, KEY_INIT_NO_ID(gen)
+#define KEY_ID(prefix) .prefix.program_string_id = ish->program_id
+#define BRW_KEY_INIT(gen, prog_id) \
+ .base.program_string_id = prog_id, \
+ .base.subgroup_size_type = BRW_SUBGROUP_SIZE_UNIFORM, \
+ .base.tex.swizzles[0 ... MAX_SAMPLERS - 1] = 0x688, \
+ .base.tex.compressed_multisample_layout_mask = ~0, \
+ .base.tex.msaa_16 = (gen >= 9 ? ~0 : 0)
static unsigned
get_new_program_id(struct iris_screen *screen)
return p_atomic_inc_return(&screen->program_id);
}
-/**
- * An uncompiled, API-facing shader. This is the Gallium CSO for shaders.
- * It primarily contains the NIR for the shader.
- *
- * Each API-facing shader can be compiled into multiple shader variants,
- * based on non-orthogonal state dependencies, recorded in the shader key.
- *
- * See iris_compiled_shader, which represents a compiled shader variant.
- */
-struct iris_uncompiled_shader {
- nir_shader *nir;
+static struct brw_vs_prog_key
+iris_to_brw_vs_key(const struct gen_device_info *devinfo,
+ const struct iris_vs_prog_key *key)
+{
+ return (struct brw_vs_prog_key) {
+ BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
- struct pipe_stream_output_info stream_output;
+ /* Don't tell the backend about our clip plane constants, we've
+ * already lowered them in NIR and don't want it doing it again.
+ */
+ .nr_userclip_plane_consts = 0,
+ };
+}
- unsigned program_id;
+static struct brw_tcs_prog_key
+iris_to_brw_tcs_key(const struct gen_device_info *devinfo,
+ const struct iris_tcs_prog_key *key)
+{
+ return (struct brw_tcs_prog_key) {
+ BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
+ .tes_primitive_mode = key->tes_primitive_mode,
+ .input_vertices = key->input_vertices,
+ .patch_outputs_written = key->patch_outputs_written,
+ .outputs_written = key->outputs_written,
+ .quads_workaround = key->quads_workaround,
+ };
+}
- /** Bitfield of (1 << IRIS_NOS_*) flags. */
- unsigned nos;
+static struct brw_tes_prog_key
+iris_to_brw_tes_key(const struct gen_device_info *devinfo,
+ const struct iris_tes_prog_key *key)
+{
+ return (struct brw_tes_prog_key) {
+ BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
+ .patch_inputs_read = key->patch_inputs_read,
+ .inputs_read = key->inputs_read,
+ };
+}
- /** Have any shader variants been compiled yet? */
- bool compiled_once;
-};
+static struct brw_gs_prog_key
+iris_to_brw_gs_key(const struct gen_device_info *devinfo,
+ const struct iris_gs_prog_key *key)
+{
+ return (struct brw_gs_prog_key) {
+ BRW_KEY_INIT(devinfo->gen, key->vue.base.program_string_id),
+ };
+}
+
+static struct brw_wm_prog_key
+iris_to_brw_fs_key(const struct gen_device_info *devinfo,
+ const struct iris_fs_prog_key *key)
+{
+ return (struct brw_wm_prog_key) {
+ BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
+ .nr_color_regions = key->nr_color_regions,
+ .flat_shade = key->flat_shade,
+ .alpha_test_replicate_alpha = key->alpha_test_replicate_alpha,
+ .alpha_to_coverage = key->alpha_to_coverage,
+ .clamp_fragment_color = key->clamp_fragment_color,
+ .persample_interp = key->persample_interp,
+ .multisample_fbo = key->multisample_fbo,
+ .force_dual_color_blend = key->force_dual_color_blend,
+ .coherent_fb_fetch = key->coherent_fb_fetch,
+ .color_outputs_valid = key->color_outputs_valid,
+ .input_slots_valid = key->input_slots_valid,
+ };
+}
+
+static struct brw_cs_prog_key
+iris_to_brw_cs_key(const struct gen_device_info *devinfo,
+ const struct iris_cs_prog_key *key)
+{
+ return (struct brw_cs_prog_key) {
+ BRW_KEY_INIT(devinfo->gen, key->base.program_string_id),
+ };
+}
+
+static void *
+upload_state(struct u_upload_mgr *uploader,
+ struct iris_state_ref *ref,
+ unsigned size,
+ unsigned alignment)
+{
+ void *p = NULL;
+ u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
+ return p;
+}
+
+void
+iris_upload_ubo_ssbo_surf_state(struct iris_context *ice,
+ struct pipe_shader_buffer *buf,
+ struct iris_state_ref *surf_state,
+ bool ssbo)
+{
+ struct pipe_context *ctx = &ice->ctx;
+ struct iris_screen *screen = (struct iris_screen *) ctx->screen;
+
+ void *map =
+ upload_state(ice->state.surface_uploader, surf_state,
+ screen->isl_dev.ss.size, 64);
+ if (!unlikely(map)) {
+ surf_state->res = NULL;
+ return;
+ }
+
+ struct iris_resource *res = (void *) buf->buffer;
+ struct iris_bo *surf_bo = iris_resource_bo(surf_state->res);
+ surf_state->offset += iris_bo_offset_from_base_address(surf_bo);
+
+ isl_buffer_fill_state(&screen->isl_dev, map,
+ .address = res->bo->gtt_offset + res->offset +
+ buf->buffer_offset,
+ .size_B = buf->buffer_size - res->offset,
+ .format = ssbo ? ISL_FORMAT_RAW
+ : ISL_FORMAT_R32G32B32A32_FLOAT,
+ .swizzle = ISL_SWIZZLE_IDENTITY,
+ .stride_B = 1,
+ .mocs = iris_mocs(res->bo, &screen->isl_dev));
+}
static nir_ssa_def *
get_aoa_deref_offset(nir_builder *b,
case nir_intrinsic_image_deref_load:
case nir_intrinsic_image_deref_store:
case nir_intrinsic_image_deref_atomic_add:
- case nir_intrinsic_image_deref_atomic_min:
- case nir_intrinsic_image_deref_atomic_max:
+ case nir_intrinsic_image_deref_atomic_imin:
+ case nir_intrinsic_image_deref_atomic_umin:
+ case nir_intrinsic_image_deref_atomic_imax:
+ case nir_intrinsic_image_deref_atomic_umax:
case nir_intrinsic_image_deref_atomic_and:
case nir_intrinsic_image_deref_atomic_or:
case nir_intrinsic_image_deref_atomic_xor:
nir_ssa_def *index =
nir_iadd(&b, nir_imm_int(&b, var->data.driver_location),
get_aoa_deref_offset(&b, deref, 1));
- brw_nir_rewrite_image_intrinsic(intrin, index);
+ nir_rewrite_image_intrinsic(intrin, index, false);
break;
}
}
}
-// XXX: need unify_interfaces() at link time...
+/**
+ * Undo nir_lower_passthrough_edgeflags but keep the inputs_read flag.
+ */
+static bool
+iris_fix_edge_flags(nir_shader *nir)
+{
+ if (nir->info.stage != MESA_SHADER_VERTEX)
+ return false;
+
+ nir_variable *var = NULL;
+ nir_foreach_variable(v, &nir->outputs) {
+ if (v->data.location == VARYING_SLOT_EDGE) {
+ var = v;
+ break;
+ }
+ }
+
+ if (!var)
+ return false;
+
+ exec_node_remove(&var->node);
+ var->data.mode = nir_var_shader_temp;
+ exec_list_push_tail(&nir->globals, &var->node);
+ nir->info.outputs_written &= ~VARYING_BIT_EDGE;
+ nir->info.inputs_read &= ~VERT_BIT_EDGEFLAG;
+ nir_fixup_deref_modes(nir);
+
+ nir_foreach_function(f, nir) {
+ if (f->impl) {
+ nir_metadata_preserve(f->impl, nir_metadata_block_index |
+ nir_metadata_dominance |
+ nir_metadata_live_ssa_defs |
+ nir_metadata_loop_analysis);
+ }
+ }
+
+ return true;
+}
/**
* Fix an uncompiled shader's stream output info.
}
}
-/**
- * Sets up the starting offsets for the groups of binding table entries
- * common to all pipeline stages.
- *
- * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
- * unused but also make sure that addition of small offsets to them will
- * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
- */
-static uint32_t
-assign_common_binding_table_offsets(const struct gen_device_info *devinfo,
- const struct nir_shader *nir,
- struct brw_stage_prog_data *prog_data,
- uint32_t next_binding_table_offset,
- unsigned num_system_values,
- unsigned num_cbufs)
-{
- const struct shader_info *info = &nir->info;
-
- unsigned num_textures = util_last_bit(info->textures_used);
-
- if (num_textures) {
- prog_data->binding_table.texture_start = next_binding_table_offset;
- prog_data->binding_table.gather_texture_start = next_binding_table_offset;
- next_binding_table_offset += num_textures;
- } else {
- prog_data->binding_table.texture_start = 0xd0d0d0d0;
- prog_data->binding_table.gather_texture_start = 0xd0d0d0d0;
- }
-
- if (info->num_images) {
- prog_data->binding_table.image_start = next_binding_table_offset;
- next_binding_table_offset += info->num_images;
- } else {
- prog_data->binding_table.image_start = 0xd0d0d0d0;
- }
-
- if (num_cbufs) {
- //assert(info->num_ubos <= BRW_MAX_UBO);
- prog_data->binding_table.ubo_start = next_binding_table_offset;
- next_binding_table_offset += num_cbufs;
- } else {
- prog_data->binding_table.ubo_start = 0xd0d0d0d0;
- }
-
- if (info->num_ssbos || info->num_abos) {
- prog_data->binding_table.ssbo_start = next_binding_table_offset;
- // XXX: see iris_state "wasting 16 binding table slots for ABOs" comment
- next_binding_table_offset += IRIS_MAX_ABOS + info->num_ssbos;
- } else {
- prog_data->binding_table.ssbo_start = 0xd0d0d0d0;
- }
-
- prog_data->binding_table.shader_time_start = 0xd0d0d0d0;
-
- /* Plane 0 is just the regular texture section */
- prog_data->binding_table.plane_start[0] = prog_data->binding_table.texture_start;
-
- prog_data->binding_table.plane_start[1] = next_binding_table_offset;
- next_binding_table_offset += num_textures;
-
- prog_data->binding_table.plane_start[2] = next_binding_table_offset;
- next_binding_table_offset += num_textures;
-
- /* Set the binding table size */
- prog_data->binding_table.size_bytes = next_binding_table_offset * 4;
-
- return next_binding_table_offset;
-}
-
static void
setup_vec4_image_sysval(uint32_t *sysvals, uint32_t idx,
unsigned offset, unsigned n)
unsigned *out_num_system_values,
unsigned *out_num_cbufs)
{
- const struct gen_device_info *devinfo = compiler->devinfo;
-
- /* The intel compiler assumes that num_uniforms is in bytes. For
- * scalar that means 4 bytes per uniform slot.
- *
- * Ref: brw_nir_lower_uniforms, type_size_scalar_bytes.
- */
- nir->num_uniforms *= 4;
+ UNUSED const struct gen_device_info *devinfo = compiler->devinfo;
const unsigned IRIS_MAX_SYSTEM_VALUES =
PIPE_MAX_SHADER_IMAGES * BRW_IMAGE_PARAM_SIZE;
b.cursor = nir_before_block(nir_start_block(impl));
nir_ssa_def *temp_ubo_name = nir_ssa_undef(&b, 1, 32);
+ nir_ssa_def *temp_const_ubo_name = NULL;
/* Turn system value intrinsics into uniforms */
nir_foreach_block(block, impl) {
nir_ssa_def *offset;
switch (intrin->intrinsic) {
+ case nir_intrinsic_load_constant: {
+ /* This one is special because it reads from the shader constant
+ * data and not cbuf0 which gallium uploads for us.
+ */
+ b.cursor = nir_before_instr(instr);
+ nir_ssa_def *offset =
+ nir_iadd_imm(&b, nir_ssa_for_src(&b, intrin->src[0], 1),
+ nir_intrinsic_base(intrin));
+
+ if (temp_const_ubo_name == NULL)
+ temp_const_ubo_name = nir_imm_int(&b, 0);
+
+ nir_intrinsic_instr *load_ubo =
+ nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
+ load_ubo->num_components = intrin->num_components;
+ load_ubo->src[0] = nir_src_for_ssa(temp_const_ubo_name);
+ load_ubo->src[1] = nir_src_for_ssa(offset);
+ nir_intrinsic_set_align(load_ubo,
+ nir_intrinsic_align_mul(intrin),
+ nir_intrinsic_align_offset(intrin));
+ nir_ssa_dest_init(&load_ubo->instr, &load_ubo->dest,
+ intrin->dest.ssa.num_components,
+ intrin->dest.ssa.bit_size,
+ intrin->dest.ssa.name);
+ nir_builder_instr_insert(&b, &load_ubo->instr);
+
+ nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
+ nir_src_for_ssa(&load_ubo->dest.ssa));
+ nir_instr_remove(&intrin->instr);
+ continue;
+ }
case nir_intrinsic_load_user_clip_plane: {
unsigned ucp = nir_intrinsic_ucp_id(intrin);
nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
nir_variable *var = nir_deref_instr_get_variable(deref);
- /* XXX: var->data.binding is not set properly. We need to run
- * some form of gl_nir_lower_samplers_as_deref() to get it.
- * This breaks tests which use more than one image.
- */
if (img_idx[var->data.binding] == -1) {
/* GL only allows arrays of arrays of images. */
assert(glsl_type_is_image(glsl_without_array(var->type)));
load->num_components = comps;
load->src[0] = nir_src_for_ssa(temp_ubo_name);
load->src[1] = nir_src_for_ssa(offset);
+ nir_intrinsic_set_align(load, 4, 0);
nir_ssa_dest_init(&load->instr, &load->dest, comps, 32, NULL);
nir_builder_instr_insert(&b, &load->instr);
nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
nir_validate_shader(nir, "before remapping");
- /* Place the new params at the front of constant buffer 0. */
+ /* Uniforms are stored in constant buffer 0, the
+ * user-facing UBOs are indexed by one. So if any constant buffer is
+ * needed, the constant buffer 0 will be needed, so account for it.
+ */
+ unsigned num_cbufs = nir->info.num_ubos;
+ if (num_cbufs || nir->num_uniforms)
+ num_cbufs++;
+
+ /* Place the new params in a new cbuf. */
if (num_system_values > 0) {
- nir->num_uniforms += num_system_values * sizeof(uint32_t);
+ unsigned sysval_cbuf_index = num_cbufs;
+ num_cbufs++;
system_values = reralloc(mem_ctx, system_values, enum brw_param_builtin,
num_system_values);
assert(load->src[0].is_ssa);
if (load->src[0].ssa == temp_ubo_name) {
+ nir_ssa_def *imm = nir_imm_int(&b, sysval_cbuf_index);
nir_instr_rewrite_src(instr, &load->src[0],
- nir_src_for_ssa(nir_imm_int(&b, 0)));
- } else if (nir_src_as_uint(load->src[0]) == 0) {
- nir_ssa_def *offset =
- nir_iadd(&b, load->src[1].ssa,
- nir_imm_int(&b, 4 * num_system_values));
- nir_instr_rewrite_src(instr, &load->src[1],
- nir_src_for_ssa(offset));
+ nir_src_for_ssa(imm));
}
}
}
system_values = NULL;
}
+ assert(num_cbufs < PIPE_MAX_CONSTANT_BUFFERS);
nir_validate_shader(nir, "after remap");
- if (nir->info.stage != MESA_SHADER_COMPUTE)
- brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
-
- /* We don't use params[], but fs_visitor::nir_setup_uniforms() asserts
- * about it for compute shaders, so go ahead and make some fake ones
- * which the backend will dead code eliminate.
+ /* We don't use params[] but gallium leaves num_uniforms set. We use this
+ * to detect when cbuf0 exists but we don't need it anymore when we get
+ * here. Instead, zero it out so that the back-end doesn't get confused
+ * when nr_params * 4 != num_uniforms != nr_params * 4.
*/
- prog_data->nr_params = nir->num_uniforms / 4;
- prog_data->param = rzalloc_array(mem_ctx, uint32_t, prog_data->nr_params);
+ nir->num_uniforms = 0;
- /* System values and uniforms are stored in constant buffer 0, the
- * user-facing UBOs are indexed by one. So if any constant buffer is
- * needed, the constant buffer 0 will be needed, so account for it.
+ /* Constant loads (if any) need to go at the end of the constant buffers so
+ * we need to know num_cbufs before we can lower to them.
*/
- unsigned num_cbufs = nir->info.num_ubos;
- if (num_cbufs || num_system_values || nir->num_uniforms)
- num_cbufs++;
+ if (temp_const_ubo_name != NULL) {
+ nir_load_const_instr *const_ubo_index =
+ nir_instr_as_load_const(temp_const_ubo_name->parent_instr);
+ assert(const_ubo_index->def.bit_size == 32);
+ const_ubo_index->value[0].u32 = num_cbufs;
+ }
*out_system_values = system_values;
*out_num_system_values = num_system_values;
*out_num_cbufs = num_cbufs;
}
+static const char *surface_group_names[] = {
+ [IRIS_SURFACE_GROUP_RENDER_TARGET] = "render target",
+ [IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = "non-coherent render target read",
+ [IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = "CS work groups",
+ [IRIS_SURFACE_GROUP_TEXTURE] = "texture",
+ [IRIS_SURFACE_GROUP_UBO] = "ubo",
+ [IRIS_SURFACE_GROUP_SSBO] = "ssbo",
+ [IRIS_SURFACE_GROUP_IMAGE] = "image",
+};
+
+static void
+iris_print_binding_table(FILE *fp, const char *name,
+ const struct iris_binding_table *bt)
+{
+ STATIC_ASSERT(ARRAY_SIZE(surface_group_names) == IRIS_SURFACE_GROUP_COUNT);
+
+ uint32_t total = 0;
+ uint32_t compacted = 0;
+
+ for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
+ uint32_t size = bt->sizes[i];
+ total += size;
+ if (size)
+ compacted += util_bitcount64(bt->used_mask[i]);
+ }
+
+ if (total == 0) {
+ fprintf(fp, "Binding table for %s is empty\n\n", name);
+ return;
+ }
+
+ if (total != compacted) {
+ fprintf(fp, "Binding table for %s "
+ "(compacted to %u entries from %u entries)\n",
+ name, compacted, total);
+ } else {
+ fprintf(fp, "Binding table for %s (%u entries)\n", name, total);
+ }
+
+ uint32_t entry = 0;
+ for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
+ uint64_t mask = bt->used_mask[i];
+ while (mask) {
+ int index = u_bit_scan64(&mask);
+ fprintf(fp, " [%u] %s #%d\n", entry++, surface_group_names[i], index);
+ }
+ }
+ fprintf(fp, "\n");
+}
+
+enum {
+ /* Max elements in a surface group. */
+ SURFACE_GROUP_MAX_ELEMENTS = 64,
+};
+
+/**
+ * Map a <group, index> pair to a binding table index.
+ *
+ * For example: <UBO, 5> => binding table index 12
+ */
+uint32_t
+iris_group_index_to_bti(const struct iris_binding_table *bt,
+ enum iris_surface_group group, uint32_t index)
+{
+ assert(index < bt->sizes[group]);
+ uint64_t mask = bt->used_mask[group];
+ uint64_t bit = 1ull << index;
+ if (bit & mask) {
+ return bt->offsets[group] + util_bitcount64((bit - 1) & mask);
+ } else {
+ return IRIS_SURFACE_NOT_USED;
+ }
+}
+
+/**
+ * Map a binding table index back to a <group, index> pair.
+ *
+ * For example: binding table index 12 => <UBO, 5>
+ */
+uint32_t
+iris_bti_to_group_index(const struct iris_binding_table *bt,
+ enum iris_surface_group group, uint32_t bti)
+{
+ uint64_t used_mask = bt->used_mask[group];
+ assert(bti >= bt->offsets[group]);
+
+ uint32_t c = bti - bt->offsets[group];
+ while (used_mask) {
+ int i = u_bit_scan64(&used_mask);
+ if (c == 0)
+ return i;
+ c--;
+ }
+
+ return IRIS_SURFACE_NOT_USED;
+}
+
+static void
+rewrite_src_with_bti(nir_builder *b, struct iris_binding_table *bt,
+ nir_instr *instr, nir_src *src,
+ enum iris_surface_group group)
+{
+ assert(bt->sizes[group] > 0);
+
+ b->cursor = nir_before_instr(instr);
+ nir_ssa_def *bti;
+ if (nir_src_is_const(*src)) {
+ uint32_t index = nir_src_as_uint(*src);
+ bti = nir_imm_intN_t(b, iris_group_index_to_bti(bt, group, index),
+ src->ssa->bit_size);
+ } else {
+ /* Indirect usage makes all the surfaces of the group to be available,
+ * so we can just add the base.
+ */
+ assert(bt->used_mask[group] == BITFIELD64_MASK(bt->sizes[group]));
+ bti = nir_iadd_imm(b, src->ssa, bt->offsets[group]);
+ }
+ nir_instr_rewrite_src(instr, src, nir_src_for_ssa(bti));
+}
+
+static void
+mark_used_with_src(struct iris_binding_table *bt, nir_src *src,
+ enum iris_surface_group group)
+{
+ assert(bt->sizes[group] > 0);
+
+ if (nir_src_is_const(*src)) {
+ uint64_t index = nir_src_as_uint(*src);
+ assert(index < bt->sizes[group]);
+ bt->used_mask[group] |= 1ull << index;
+ } else {
+ /* There's an indirect usage, we need all the surfaces. */
+ bt->used_mask[group] = BITFIELD64_MASK(bt->sizes[group]);
+ }
+}
+
+static bool
+skip_compacting_binding_tables(void)
+{
+ static int skip = -1;
+ if (skip < 0)
+ skip = env_var_as_boolean("INTEL_DISABLE_COMPACT_BINDING_TABLE", false);
+ return skip;
+}
+
+/**
+ * Set up the binding table indices and apply to the shader.
+ */
+static void
+iris_setup_binding_table(const struct gen_device_info *devinfo,
+ struct nir_shader *nir,
+ struct iris_binding_table *bt,
+ unsigned num_render_targets,
+ unsigned num_system_values,
+ unsigned num_cbufs)
+{
+ const struct shader_info *info = &nir->info;
+
+ memset(bt, 0, sizeof(*bt));
+
+ /* Set the sizes for each surface group. For some groups, we already know
+ * upfront how many will be used, so mark them.
+ */
+ if (info->stage == MESA_SHADER_FRAGMENT) {
+ bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET] = num_render_targets;
+ /* All render targets used. */
+ bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET] =
+ BITFIELD64_MASK(num_render_targets);
+
+ /* Setup render target read surface group inorder to support non-coherent
+ * framebuffer fetch on Gen8
+ */
+ if (devinfo->gen == 8 && info->outputs_read) {
+ bt->sizes[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] = num_render_targets;
+ bt->used_mask[IRIS_SURFACE_GROUP_RENDER_TARGET_READ] =
+ BITFIELD64_MASK(num_render_targets);
+ }
+ } else if (info->stage == MESA_SHADER_COMPUTE) {
+ bt->sizes[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
+ }
+
+ bt->sizes[IRIS_SURFACE_GROUP_TEXTURE] = util_last_bit(info->textures_used);
+ bt->used_mask[IRIS_SURFACE_GROUP_TEXTURE] = info->textures_used;
+
+ bt->sizes[IRIS_SURFACE_GROUP_IMAGE] = info->num_images;
+
+ /* Allocate an extra slot in the UBO section for NIR constants.
+ * Binding table compaction will remove it if unnecessary.
+ *
+ * We don't include them in iris_compiled_shader::num_cbufs because
+ * they are uploaded separately from shs->constbuf[], but from a shader
+ * point of view, they're another UBO (at the end of the section).
+ */
+ bt->sizes[IRIS_SURFACE_GROUP_UBO] = num_cbufs + 1;
+
+ bt->sizes[IRIS_SURFACE_GROUP_SSBO] = info->num_ssbos;
+
+ for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
+ assert(bt->sizes[i] <= SURFACE_GROUP_MAX_ELEMENTS);
+
+ /* Mark surfaces used for the cases we don't have the information available
+ * upfront.
+ */
+ nir_function_impl *impl = nir_shader_get_entrypoint(nir);
+ nir_foreach_block (block, impl) {
+ nir_foreach_instr (instr, block) {
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+ switch (intrin->intrinsic) {
+ case nir_intrinsic_load_num_work_groups:
+ bt->used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS] = 1;
+ break;
+
+ case nir_intrinsic_load_output:
+ if (devinfo->gen == 8) {
+ mark_used_with_src(bt, &intrin->src[0],
+ IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
+ }
+ break;
+
+ case nir_intrinsic_image_size:
+ case nir_intrinsic_image_load:
+ case nir_intrinsic_image_store:
+ case nir_intrinsic_image_atomic_add:
+ case nir_intrinsic_image_atomic_imin:
+ case nir_intrinsic_image_atomic_umin:
+ case nir_intrinsic_image_atomic_imax:
+ case nir_intrinsic_image_atomic_umax:
+ case nir_intrinsic_image_atomic_and:
+ case nir_intrinsic_image_atomic_or:
+ case nir_intrinsic_image_atomic_xor:
+ case nir_intrinsic_image_atomic_exchange:
+ case nir_intrinsic_image_atomic_comp_swap:
+ case nir_intrinsic_image_load_raw_intel:
+ case nir_intrinsic_image_store_raw_intel:
+ mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_IMAGE);
+ break;
+
+ case nir_intrinsic_load_ubo:
+ mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_UBO);
+ break;
+
+ case nir_intrinsic_store_ssbo:
+ mark_used_with_src(bt, &intrin->src[1], IRIS_SURFACE_GROUP_SSBO);
+ break;
+
+ case nir_intrinsic_get_buffer_size:
+ case nir_intrinsic_ssbo_atomic_add:
+ case nir_intrinsic_ssbo_atomic_imin:
+ case nir_intrinsic_ssbo_atomic_umin:
+ case nir_intrinsic_ssbo_atomic_imax:
+ case nir_intrinsic_ssbo_atomic_umax:
+ case nir_intrinsic_ssbo_atomic_and:
+ case nir_intrinsic_ssbo_atomic_or:
+ case nir_intrinsic_ssbo_atomic_xor:
+ case nir_intrinsic_ssbo_atomic_exchange:
+ case nir_intrinsic_ssbo_atomic_comp_swap:
+ case nir_intrinsic_ssbo_atomic_fmin:
+ case nir_intrinsic_ssbo_atomic_fmax:
+ case nir_intrinsic_ssbo_atomic_fcomp_swap:
+ case nir_intrinsic_load_ssbo:
+ mark_used_with_src(bt, &intrin->src[0], IRIS_SURFACE_GROUP_SSBO);
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ /* When disable we just mark everything as used. */
+ if (unlikely(skip_compacting_binding_tables())) {
+ for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++)
+ bt->used_mask[i] = BITFIELD64_MASK(bt->sizes[i]);
+ }
+
+ /* Calculate the offsets and the binding table size based on the used
+ * surfaces. After this point, the functions to go between "group indices"
+ * and binding table indices can be used.
+ */
+ uint32_t next = 0;
+ for (int i = 0; i < IRIS_SURFACE_GROUP_COUNT; i++) {
+ if (bt->used_mask[i] != 0) {
+ bt->offsets[i] = next;
+ next += util_bitcount64(bt->used_mask[i]);
+ }
+ }
+ bt->size_bytes = next * 4;
+
+ if (unlikely(INTEL_DEBUG & DEBUG_BT)) {
+ iris_print_binding_table(stderr, gl_shader_stage_name(info->stage), bt);
+ }
+
+ /* Apply the binding table indices. The backend compiler is not expected
+ * to change those, as we haven't set any of the *_start entries in brw
+ * binding_table.
+ */
+ nir_builder b;
+ nir_builder_init(&b, impl);
+
+ nir_foreach_block (block, impl) {
+ nir_foreach_instr (instr, block) {
+ if (instr->type == nir_instr_type_tex) {
+ nir_tex_instr *tex = nir_instr_as_tex(instr);
+ tex->texture_index =
+ iris_group_index_to_bti(bt, IRIS_SURFACE_GROUP_TEXTURE,
+ tex->texture_index);
+ continue;
+ }
+
+ if (instr->type != nir_instr_type_intrinsic)
+ continue;
+
+ nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
+ switch (intrin->intrinsic) {
+ case nir_intrinsic_image_size:
+ case nir_intrinsic_image_load:
+ case nir_intrinsic_image_store:
+ case nir_intrinsic_image_atomic_add:
+ case nir_intrinsic_image_atomic_imin:
+ case nir_intrinsic_image_atomic_umin:
+ case nir_intrinsic_image_atomic_imax:
+ case nir_intrinsic_image_atomic_umax:
+ case nir_intrinsic_image_atomic_and:
+ case nir_intrinsic_image_atomic_or:
+ case nir_intrinsic_image_atomic_xor:
+ case nir_intrinsic_image_atomic_exchange:
+ case nir_intrinsic_image_atomic_comp_swap:
+ case nir_intrinsic_image_load_raw_intel:
+ case nir_intrinsic_image_store_raw_intel:
+ rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
+ IRIS_SURFACE_GROUP_IMAGE);
+ break;
+
+ case nir_intrinsic_load_ubo:
+ rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
+ IRIS_SURFACE_GROUP_UBO);
+ break;
+
+ case nir_intrinsic_store_ssbo:
+ rewrite_src_with_bti(&b, bt, instr, &intrin->src[1],
+ IRIS_SURFACE_GROUP_SSBO);
+ break;
+
+ case nir_intrinsic_load_output:
+ if (devinfo->gen == 8) {
+ rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
+ IRIS_SURFACE_GROUP_RENDER_TARGET_READ);
+ }
+ break;
+
+ case nir_intrinsic_get_buffer_size:
+ case nir_intrinsic_ssbo_atomic_add:
+ case nir_intrinsic_ssbo_atomic_imin:
+ case nir_intrinsic_ssbo_atomic_umin:
+ case nir_intrinsic_ssbo_atomic_imax:
+ case nir_intrinsic_ssbo_atomic_umax:
+ case nir_intrinsic_ssbo_atomic_and:
+ case nir_intrinsic_ssbo_atomic_or:
+ case nir_intrinsic_ssbo_atomic_xor:
+ case nir_intrinsic_ssbo_atomic_exchange:
+ case nir_intrinsic_ssbo_atomic_comp_swap:
+ case nir_intrinsic_ssbo_atomic_fmin:
+ case nir_intrinsic_ssbo_atomic_fmax:
+ case nir_intrinsic_ssbo_atomic_fcomp_swap:
+ case nir_intrinsic_load_ssbo:
+ rewrite_src_with_bti(&b, bt, instr, &intrin->src[0],
+ IRIS_SURFACE_GROUP_SSBO);
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+}
+
+static void
+iris_debug_recompile(struct iris_context *ice,
+ struct shader_info *info,
+ const struct brw_base_prog_key *key)
+{
+ struct iris_screen *screen = (struct iris_screen *) ice->ctx.screen;
+ const struct gen_device_info *devinfo = &screen->devinfo;
+ const struct brw_compiler *c = screen->compiler;
+
+ if (!info)
+ return;
+
+ c->shader_perf_log(&ice->dbg, "Recompiling %s shader for program %s: %s\n",
+ _mesa_shader_stage_to_string(info->stage),
+ info->name ? info->name : "(no identifier)",
+ info->label ? info->label : "");
+
+ const void *old_iris_key =
+ iris_find_previous_compile(ice, info->stage, key->program_string_id);
+
+ union brw_any_prog_key old_key;
+
+ switch (info->stage) {
+ case MESA_SHADER_VERTEX:
+ old_key.vs = iris_to_brw_vs_key(devinfo, old_iris_key);
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ old_key.tcs = iris_to_brw_tcs_key(devinfo, old_iris_key);
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ old_key.tes = iris_to_brw_tes_key(devinfo, old_iris_key);
+ break;
+ case MESA_SHADER_GEOMETRY:
+ old_key.gs = iris_to_brw_gs_key(devinfo, old_iris_key);
+ break;
+ case MESA_SHADER_FRAGMENT:
+ old_key.wm = iris_to_brw_fs_key(devinfo, old_iris_key);
+ break;
+ case MESA_SHADER_COMPUTE:
+ old_key.cs = iris_to_brw_cs_key(devinfo, old_iris_key);
+ break;
+ default:
+ unreachable("invalid shader stage");
+ }
+
+ brw_debug_key_recompile(c, &ice->dbg, info->stage, &old_key.base, key);
+}
+
+/**
+ * Get the shader for the last enabled geometry stage.
+ *
+ * This stage is the one which will feed stream output and the rasterizer.
+ */
+static gl_shader_stage
+last_vue_stage(struct iris_context *ice)
+{
+ if (ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
+ return MESA_SHADER_GEOMETRY;
+
+ if (ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
+ return MESA_SHADER_TESS_EVAL;
+
+ return MESA_SHADER_VERTEX;
+}
+
/**
* Compile a vertex shader, and upload the assembly.
*/
static struct iris_compiled_shader *
iris_compile_vs(struct iris_context *ice,
struct iris_uncompiled_shader *ish,
- const struct brw_vs_prog_key *key)
+ const struct iris_vs_prog_key *key)
{
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
const struct brw_compiler *compiler = screen->compiler;
nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
- if (key->nr_userclip_plane_consts) {
+ if (key->vue.nr_userclip_plane_consts) {
nir_function_impl *impl = nir_shader_get_entrypoint(nir);
- nir_lower_clip_vs(nir, (1 << key->nr_userclip_plane_consts) - 1, true);
+ nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
+ true, false, NULL);
nir_lower_io_to_temporaries(nir, impl, true, false);
nir_lower_global_vars_to_local(nir);
nir_lower_vars_to_ssa(nir);
nir_shader_gather_info(nir, impl);
}
- if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
- prog_data->use_alt_mode = true;
+ prog_data->use_alt_mode = ish->use_alt_mode;
iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
&num_system_values, &num_cbufs);
- assign_common_binding_table_offsets(devinfo, nir, prog_data, 0,
- num_system_values, num_cbufs);
+ struct iris_binding_table bt;
+ iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
+ num_system_values, num_cbufs);
+
+ brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
brw_compute_vue_map(devinfo,
&vue_prog_data->vue_map, nir->info.outputs_written,
- nir->info.separate_shader);
+ nir->info.separate_shader, /* pos_slots */ 1);
- /* Don't tell the backend about our clip plane constants, we've already
- * lowered them in NIR and we don't want it doing it again.
- */
- struct brw_vs_prog_key key_no_ucp = *key;
- key_no_ucp.nr_userclip_plane_consts = 0;
+ struct brw_vs_prog_key brw_key = iris_to_brw_vs_key(devinfo, key);
char *error_str = NULL;
const unsigned *program =
- brw_compile_vs(compiler, &ice->dbg, mem_ctx, &key_no_ucp, vs_prog_data,
- nir, -1, &error_str);
+ brw_compile_vs(compiler, &ice->dbg, mem_ctx, &brw_key, vs_prog_data,
+ nir, -1, NULL, &error_str);
if (program == NULL) {
dbg_printf("Failed to compile vertex shader: %s\n", error_str);
ralloc_free(mem_ctx);
return false;
}
+ if (ish->compiled_once) {
+ iris_debug_recompile(ice, &nir->info, &brw_key.base);
+ } else {
+ ish->compiled_once = true;
+ }
+
uint32_t *so_decls =
ice->vtbl.create_so_decl_list(&ish->stream_output,
&vue_prog_data->vue_map);
struct iris_compiled_shader *shader =
iris_upload_shader(ice, IRIS_CACHE_VS, sizeof(*key), key, program,
prog_data, so_decls, system_values, num_system_values,
- num_cbufs);
+ num_cbufs, &bt);
- if (ish->compiled_once) {
- perf_debug(&ice->dbg, "Recompiling vertex shader\n");
- } else {
- ish->compiled_once = true;
- }
+ iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
ralloc_free(mem_ctx);
return shader;
static void
iris_update_compiled_vs(struct iris_context *ice)
{
+ struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
struct iris_uncompiled_shader *ish =
ice->shaders.uncompiled[MESA_SHADER_VERTEX];
- struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
- const struct gen_device_info *devinfo = &screen->devinfo;
- struct brw_vs_prog_key key = { KEY_INIT(devinfo->gen) };
- ice->vtbl.populate_vs_key(ice, &ish->nir->info, &key);
+ struct iris_vs_prog_key key = { KEY_ID(vue.base) };
+ ice->vtbl.populate_vs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_VS];
struct iris_compiled_shader *shader =
iris_find_cached_shader(ice, IRIS_CACHE_VS, sizeof(key), &key);
+ if (!shader)
+ shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
+
if (!shader)
shader = iris_compile_vs(ice, ish, &key);
IRIS_DIRTY_BINDINGS_VS |
IRIS_DIRTY_CONSTANTS_VS |
IRIS_DIRTY_VF_SGVS;
+ shs->sysvals_need_upload = true;
+
+ const struct brw_vs_prog_data *vs_prog_data =
+ (void *) shader->prog_data;
+ const bool uses_draw_params = vs_prog_data->uses_firstvertex ||
+ vs_prog_data->uses_baseinstance;
+ const bool uses_derived_draw_params = vs_prog_data->uses_drawid ||
+ vs_prog_data->uses_is_indexed_draw;
+ const bool needs_sgvs_element = uses_draw_params ||
+ vs_prog_data->uses_instanceid ||
+ vs_prog_data->uses_vertexid;
+
+ if (ice->state.vs_uses_draw_params != uses_draw_params ||
+ ice->state.vs_uses_derived_draw_params != uses_derived_draw_params ||
+ ice->state.vs_needs_edge_flag != ish->needs_edge_flag) {
+ ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS |
+ IRIS_DIRTY_VERTEX_ELEMENTS;
+ }
+ ice->state.vs_uses_draw_params = uses_draw_params;
+ ice->state.vs_uses_derived_draw_params = uses_derived_draw_params;
+ ice->state.vs_needs_sgvs_element = needs_sgvs_element;
+ ice->state.vs_needs_edge_flag = ish->needs_edge_flag;
}
}
static struct iris_compiled_shader *
iris_compile_tcs(struct iris_context *ice,
struct iris_uncompiled_shader *ish,
- const struct brw_tcs_prog_key *key)
+ const struct iris_tcs_prog_key *key)
{
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
const struct brw_compiler *compiler = screen->compiler;
const struct nir_shader_compiler_options *options =
compiler->glsl_compiler_options[MESA_SHADER_TESS_CTRL].NirOptions;
- const struct gen_device_info *devinfo = &screen->devinfo;
void *mem_ctx = ralloc_context(NULL);
struct brw_tcs_prog_data *tcs_prog_data =
rzalloc(mem_ctx, struct brw_tcs_prog_data);
struct brw_vue_prog_data *vue_prog_data = &tcs_prog_data->base;
struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
+ const struct gen_device_info *devinfo = &screen->devinfo;
enum brw_param_builtin *system_values = NULL;
unsigned num_system_values = 0;
- unsigned num_cbufs;
+ unsigned num_cbufs = 0;
nir_shader *nir;
+ struct iris_binding_table bt;
+
+ struct brw_tcs_prog_key brw_key = iris_to_brw_tcs_key(devinfo, key);
+
if (ish) {
nir = nir_shader_clone(mem_ctx, ish->nir);
iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
&num_system_values, &num_cbufs);
- assign_common_binding_table_offsets(devinfo, nir, prog_data, 0,
- num_system_values, num_cbufs);
+ iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
+ num_system_values, num_cbufs);
+ brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
} else {
- nir = brw_nir_create_passthrough_tcs(mem_ctx, compiler, options, key);
+ nir =
+ brw_nir_create_passthrough_tcs(mem_ctx, compiler, options, &brw_key);
/* Reserve space for passing the default tess levels as constants. */
- prog_data->param = rzalloc_array(mem_ctx, uint32_t, 8);
- prog_data->nr_params = 8;
+ num_cbufs = 1;
+ num_system_values = 8;
+ system_values =
+ rzalloc_array(mem_ctx, enum brw_param_builtin, num_system_values);
+ prog_data->param = rzalloc_array(mem_ctx, uint32_t, num_system_values);
+ prog_data->nr_params = num_system_values;
+
+ if (key->tes_primitive_mode == GL_QUADS) {
+ for (int i = 0; i < 4; i++)
+ system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
+
+ system_values[3] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
+ system_values[2] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y;
+ } else if (key->tes_primitive_mode == GL_TRIANGLES) {
+ for (int i = 0; i < 3; i++)
+ system_values[7 - i] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X + i;
+
+ system_values[4] = BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X;
+ } else {
+ assert(key->tes_primitive_mode == GL_ISOLINES);
+ system_values[7] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_Y;
+ system_values[6] = BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
+ }
+
+ /* Manually setup the TCS binding table. */
+ memset(&bt, 0, sizeof(bt));
+ bt.sizes[IRIS_SURFACE_GROUP_UBO] = 1;
+ bt.used_mask[IRIS_SURFACE_GROUP_UBO] = 1;
+ bt.size_bytes = 4;
+
prog_data->ubo_ranges[0].length = 1;
}
char *error_str = NULL;
const unsigned *program =
- brw_compile_tcs(compiler, &ice->dbg, mem_ctx, key, tcs_prog_data, nir,
- -1, &error_str);
+ brw_compile_tcs(compiler, &ice->dbg, mem_ctx, &brw_key, tcs_prog_data,
+ nir, -1, NULL, &error_str);
if (program == NULL) {
dbg_printf("Failed to compile control shader: %s\n", error_str);
ralloc_free(mem_ctx);
return false;
}
- struct iris_compiled_shader *shader =
- iris_upload_shader(ice, IRIS_CACHE_TCS, sizeof(*key), key, program,
- prog_data, NULL, system_values, num_system_values,
- num_cbufs);
-
if (ish) {
if (ish->compiled_once) {
- perf_debug(&ice->dbg, "Recompiling tessellation control shader\n");
+ iris_debug_recompile(ice, &nir->info, &brw_key.base);
} else {
ish->compiled_once = true;
}
}
+ struct iris_compiled_shader *shader =
+ iris_upload_shader(ice, IRIS_CACHE_TCS, sizeof(*key), key, program,
+ prog_data, NULL, system_values, num_system_values,
+ num_cbufs, &bt);
+
+ if (ish)
+ iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
+
ralloc_free(mem_ctx);
return shader;
}
static void
iris_update_compiled_tcs(struct iris_context *ice)
{
+ struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
struct iris_uncompiled_shader *tcs =
ice->shaders.uncompiled[MESA_SHADER_TESS_CTRL];
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
+ const struct brw_compiler *compiler = screen->compiler;
const struct gen_device_info *devinfo = &screen->devinfo;
const struct shader_info *tes_info =
iris_get_shader_info(ice, MESA_SHADER_TESS_EVAL);
- struct brw_tcs_prog_key key = {
- KEY_INIT_NO_ID(devinfo->gen),
- .program_string_id = tcs ? tcs->program_id : 0,
+ struct iris_tcs_prog_key key = {
+ .vue.base.program_string_id = tcs ? tcs->program_id : 0,
.tes_primitive_mode = tes_info->tess.primitive_mode,
- .input_vertices = ice->state.vertices_per_patch,
+ .input_vertices =
+ !tcs || compiler->use_tcs_8_patch ? ice->state.vertices_per_patch : 0,
+ .quads_workaround = devinfo->gen < 9 &&
+ tes_info->tess.primitive_mode == GL_QUADS &&
+ tes_info->tess.spacing == TESS_SPACING_EQUAL,
};
get_unified_tess_slots(ice, &key.outputs_written,
&key.patch_outputs_written);
struct iris_compiled_shader *shader =
iris_find_cached_shader(ice, IRIS_CACHE_TCS, sizeof(key), &key);
+ if (tcs && !shader)
+ shader = iris_disk_cache_retrieve(ice, tcs, &key, sizeof(key));
+
if (!shader)
shader = iris_compile_tcs(ice, tcs, &key);
ice->state.dirty |= IRIS_DIRTY_TCS |
IRIS_DIRTY_BINDINGS_TCS |
IRIS_DIRTY_CONSTANTS_TCS;
+ shs->sysvals_need_upload = true;
}
}
static struct iris_compiled_shader *
iris_compile_tes(struct iris_context *ice,
struct iris_uncompiled_shader *ish,
- const struct brw_tes_prog_key *key)
+ const struct iris_tes_prog_key *key)
{
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
const struct brw_compiler *compiler = screen->compiler;
- const struct gen_device_info *devinfo = &screen->devinfo;
void *mem_ctx = ralloc_context(NULL);
struct brw_tes_prog_data *tes_prog_data =
rzalloc(mem_ctx, struct brw_tes_prog_data);
struct brw_vue_prog_data *vue_prog_data = &tes_prog_data->base;
struct brw_stage_prog_data *prog_data = &vue_prog_data->base;
enum brw_param_builtin *system_values;
+ const struct gen_device_info *devinfo = &screen->devinfo;
unsigned num_system_values;
unsigned num_cbufs;
nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
+ if (key->vue.nr_userclip_plane_consts) {
+ nir_function_impl *impl = nir_shader_get_entrypoint(nir);
+ nir_lower_clip_vs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
+ true, false, NULL);
+ nir_lower_io_to_temporaries(nir, impl, true, false);
+ nir_lower_global_vars_to_local(nir);
+ nir_lower_vars_to_ssa(nir);
+ nir_shader_gather_info(nir, impl);
+ }
+
iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
&num_system_values, &num_cbufs);
- assign_common_binding_table_offsets(devinfo, nir, prog_data, 0,
- num_system_values, num_cbufs);
+ struct iris_binding_table bt;
+ iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
+ num_system_values, num_cbufs);
+
+ brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
struct brw_vue_map input_vue_map;
brw_compute_tess_vue_map(&input_vue_map, key->inputs_read,
key->patch_inputs_read);
+ struct brw_tes_prog_key brw_key = iris_to_brw_tes_key(devinfo, key);
+
char *error_str = NULL;
const unsigned *program =
- brw_compile_tes(compiler, &ice->dbg, mem_ctx, key, &input_vue_map,
- tes_prog_data, nir, NULL, -1, &error_str);
+ brw_compile_tes(compiler, &ice->dbg, mem_ctx, &brw_key, &input_vue_map,
+ tes_prog_data, nir, -1, NULL, &error_str);
if (program == NULL) {
dbg_printf("Failed to compile evaluation shader: %s\n", error_str);
ralloc_free(mem_ctx);
return false;
}
+ if (ish->compiled_once) {
+ iris_debug_recompile(ice, &nir->info, &brw_key.base);
+ } else {
+ ish->compiled_once = true;
+ }
+
uint32_t *so_decls =
ice->vtbl.create_so_decl_list(&ish->stream_output,
&vue_prog_data->vue_map);
struct iris_compiled_shader *shader =
iris_upload_shader(ice, IRIS_CACHE_TES, sizeof(*key), key, program,
prog_data, so_decls, system_values, num_system_values,
- num_cbufs);
+ num_cbufs, &bt);
- if (ish->compiled_once) {
- perf_debug(&ice->dbg, "Recompiling tessellation evaluation shader\n");
- } else {
- ish->compiled_once = true;
- }
+ iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
ralloc_free(mem_ctx);
return shader;
static void
iris_update_compiled_tes(struct iris_context *ice)
{
+ struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
struct iris_uncompiled_shader *ish =
ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL];
- struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
- const struct gen_device_info *devinfo = &screen->devinfo;
- struct brw_tes_prog_key key = { KEY_INIT(devinfo->gen) };
+ struct iris_tes_prog_key key = { KEY_ID(vue.base) };
get_unified_tess_slots(ice, &key.inputs_read, &key.patch_inputs_read);
- ice->vtbl.populate_tes_key(ice, &key);
+ ice->vtbl.populate_tes_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_TES];
struct iris_compiled_shader *shader =
iris_find_cached_shader(ice, IRIS_CACHE_TES, sizeof(key), &key);
+ if (!shader)
+ shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
+
if (!shader)
shader = iris_compile_tes(ice, ish, &key);
ice->state.dirty |= IRIS_DIRTY_TES |
IRIS_DIRTY_BINDINGS_TES |
IRIS_DIRTY_CONSTANTS_TES;
+ shs->sysvals_need_upload = true;
+ }
+
+ /* TODO: Could compare and avoid flagging this. */
+ const struct shader_info *tes_info = &ish->nir->info;
+ if (tes_info->system_values_read & (1ull << SYSTEM_VALUE_VERTICES_IN)) {
+ ice->state.dirty |= IRIS_DIRTY_CONSTANTS_TES;
+ ice->state.shaders[MESA_SHADER_TESS_EVAL].sysvals_need_upload = true;
}
}
static struct iris_compiled_shader *
iris_compile_gs(struct iris_context *ice,
struct iris_uncompiled_shader *ish,
- const struct brw_gs_prog_key *key)
+ const struct iris_gs_prog_key *key)
{
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
const struct brw_compiler *compiler = screen->compiler;
nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
+ if (key->vue.nr_userclip_plane_consts) {
+ nir_function_impl *impl = nir_shader_get_entrypoint(nir);
+ nir_lower_clip_gs(nir, (1 << key->vue.nr_userclip_plane_consts) - 1,
+ false, NULL);
+ nir_lower_io_to_temporaries(nir, impl, true, false);
+ nir_lower_global_vars_to_local(nir);
+ nir_lower_vars_to_ssa(nir);
+ nir_shader_gather_info(nir, impl);
+ }
+
iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
&num_system_values, &num_cbufs);
- assign_common_binding_table_offsets(devinfo, nir, prog_data, 0,
- num_system_values, num_cbufs);
+ struct iris_binding_table bt;
+ iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
+ num_system_values, num_cbufs);
+
+ brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
brw_compute_vue_map(devinfo,
&vue_prog_data->vue_map, nir->info.outputs_written,
- nir->info.separate_shader);
+ nir->info.separate_shader, /* pos_slots */ 1);
+
+ struct brw_gs_prog_key brw_key = iris_to_brw_gs_key(devinfo, key);
char *error_str = NULL;
const unsigned *program =
- brw_compile_gs(compiler, &ice->dbg, mem_ctx, key, gs_prog_data, nir,
- NULL, -1, &error_str);
+ brw_compile_gs(compiler, &ice->dbg, mem_ctx, &brw_key, gs_prog_data,
+ nir, NULL, -1, NULL, &error_str);
if (program == NULL) {
dbg_printf("Failed to compile geometry shader: %s\n", error_str);
ralloc_free(mem_ctx);
return false;
}
+ if (ish->compiled_once) {
+ iris_debug_recompile(ice, &nir->info, &brw_key.base);
+ } else {
+ ish->compiled_once = true;
+ }
+
uint32_t *so_decls =
ice->vtbl.create_so_decl_list(&ish->stream_output,
&vue_prog_data->vue_map);
struct iris_compiled_shader *shader =
iris_upload_shader(ice, IRIS_CACHE_GS, sizeof(*key), key, program,
prog_data, so_decls, system_values, num_system_values,
- num_cbufs);
+ num_cbufs, &bt);
- if (ish->compiled_once) {
- perf_debug(&ice->dbg, "Recompiling geometry shader\n");
- } else {
- ish->compiled_once = true;
- }
+ iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
ralloc_free(mem_ctx);
return shader;
static void
iris_update_compiled_gs(struct iris_context *ice)
{
+ struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
struct iris_uncompiled_shader *ish =
ice->shaders.uncompiled[MESA_SHADER_GEOMETRY];
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_GS];
struct iris_compiled_shader *shader = NULL;
if (ish) {
- struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
- const struct gen_device_info *devinfo = &screen->devinfo;
- struct brw_gs_prog_key key = { KEY_INIT(devinfo->gen) };
- ice->vtbl.populate_gs_key(ice, &key);
+ struct iris_gs_prog_key key = { KEY_ID(vue.base) };
+ ice->vtbl.populate_gs_key(ice, &ish->nir->info, last_vue_stage(ice), &key);
shader =
iris_find_cached_shader(ice, IRIS_CACHE_GS, sizeof(key), &key);
+ if (!shader)
+ shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
+
if (!shader)
shader = iris_compile_gs(ice, ish, &key);
}
ice->state.dirty |= IRIS_DIRTY_GS |
IRIS_DIRTY_BINDINGS_GS |
IRIS_DIRTY_CONSTANTS_GS;
+ shs->sysvals_need_upload = true;
}
}
static struct iris_compiled_shader *
iris_compile_fs(struct iris_context *ice,
struct iris_uncompiled_shader *ish,
- const struct brw_wm_prog_key *key,
+ const struct iris_fs_prog_key *key,
struct brw_vue_map *vue_map)
{
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
const struct brw_compiler *compiler = screen->compiler;
- const struct gen_device_info *devinfo = &screen->devinfo;
void *mem_ctx = ralloc_context(NULL);
struct brw_wm_prog_data *fs_prog_data =
rzalloc(mem_ctx, struct brw_wm_prog_data);
struct brw_stage_prog_data *prog_data = &fs_prog_data->base;
enum brw_param_builtin *system_values;
+ const struct gen_device_info *devinfo = &screen->devinfo;
unsigned num_system_values;
unsigned num_cbufs;
nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
- if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
- prog_data->use_alt_mode = true;
+ prog_data->use_alt_mode = ish->use_alt_mode;
iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
&num_system_values, &num_cbufs);
- assign_common_binding_table_offsets(devinfo, nir, prog_data,
- MAX2(key->nr_color_regions, 1),
- num_system_values, num_cbufs);
+ /* Lower output variables to load_output intrinsics before setting up
+ * binding tables, so iris_setup_binding_table can map any load_output
+ * intrinsics to IRIS_SURFACE_GROUP_RENDER_TARGET_READ on Gen8 for
+ * non-coherent framebuffer fetches.
+ */
+ brw_nir_lower_fs_outputs(nir);
+
+ /* On Gen11+, shader RT write messages have a "Null Render Target" bit
+ * and do not need a binding table entry with a null surface. Earlier
+ * generations need an entry for a null surface.
+ */
+ int null_rts = devinfo->gen < 11 ? 1 : 0;
+
+ struct iris_binding_table bt;
+ iris_setup_binding_table(devinfo, nir, &bt,
+ MAX2(key->nr_color_regions, null_rts),
+ num_system_values, num_cbufs);
+
+ brw_nir_analyze_ubo_ranges(compiler, nir, NULL, prog_data->ubo_ranges);
+
+ struct brw_wm_prog_key brw_key = iris_to_brw_fs_key(devinfo, key);
+
char *error_str = NULL;
const unsigned *program =
- brw_compile_fs(compiler, &ice->dbg, mem_ctx, key, fs_prog_data,
- nir, NULL, -1, -1, -1, true, false, vue_map, &error_str);
+ brw_compile_fs(compiler, &ice->dbg, mem_ctx, &brw_key, fs_prog_data,
+ nir, -1, -1, -1, true, false, vue_map,
+ NULL, &error_str);
if (program == NULL) {
dbg_printf("Failed to compile fragment shader: %s\n", error_str);
ralloc_free(mem_ctx);
return false;
}
- struct iris_compiled_shader *shader =
- iris_upload_shader(ice, IRIS_CACHE_FS, sizeof(*key), key, program,
- prog_data, NULL, system_values, num_system_values,
- num_cbufs);
-
if (ish->compiled_once) {
- perf_debug(&ice->dbg, "Recompiling fragment shader\n");
+ iris_debug_recompile(ice, &nir->info, &brw_key.base);
} else {
ish->compiled_once = true;
}
+ struct iris_compiled_shader *shader =
+ iris_upload_shader(ice, IRIS_CACHE_FS, sizeof(*key), key, program,
+ prog_data, NULL, system_values, num_system_values,
+ num_cbufs, &bt);
+
+ iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
+
ralloc_free(mem_ctx);
return shader;
}
static void
iris_update_compiled_fs(struct iris_context *ice)
{
+ struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_FRAGMENT];
struct iris_uncompiled_shader *ish =
ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
- struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
- const struct gen_device_info *devinfo = &screen->devinfo;
- struct brw_wm_prog_key key = { KEY_INIT(devinfo->gen) };
- ice->vtbl.populate_fs_key(ice, &key);
+ struct iris_fs_prog_key key = { KEY_ID(base) };
+ ice->vtbl.populate_fs_key(ice, &ish->nir->info, &key);
if (ish->nos & (1ull << IRIS_NOS_LAST_VUE_MAP))
key.input_slots_valid = ice->shaders.last_vue_map->slots_valid;
struct iris_compiled_shader *shader =
iris_find_cached_shader(ice, IRIS_CACHE_FS, sizeof(key), &key);
+ if (!shader)
+ shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
+
if (!shader)
shader = iris_compile_fs(ice, ish, &key, ice->shaders.last_vue_map);
IRIS_DIRTY_WM |
IRIS_DIRTY_CLIP |
IRIS_DIRTY_SBE;
+ shs->sysvals_need_upload = true;
}
}
-/**
- * Get the compiled shader for the last enabled geometry stage.
- *
- * This stage is the one which will feed stream output and the rasterizer.
- */
-static gl_shader_stage
-last_vue_stage(struct iris_context *ice)
-{
- if (ice->shaders.prog[MESA_SHADER_GEOMETRY])
- return MESA_SHADER_GEOMETRY;
-
- if (ice->shaders.prog[MESA_SHADER_TESS_EVAL])
- return MESA_SHADER_TESS_EVAL;
-
- return MESA_SHADER_VERTEX;
-}
-
/**
* Update the last enabled stage's VUE map.
*
(old_map ? old_map->slots_valid : 0ull) ^ vue_map->slots_valid;
if (changed_slots & VARYING_BIT_VIEWPORT) {
- // XXX: could use ctx->Const.MaxViewports for old API efficiency
ice->state.num_viewports =
(vue_map->slots_valid & VARYING_BIT_VIEWPORT) ? IRIS_MAX_VIEWPORTS : 1;
ice->state.dirty |= IRIS_DIRTY_CLIP |
IRIS_DIRTY_SCISSOR_RECT |
IRIS_DIRTY_UNCOMPILED_FS |
ice->state.dirty_for_nos[IRIS_NOS_LAST_VUE_MAP];
- // XXX: CC_VIEWPORT?
}
if (changed_slots || (old_map && old_map->separate != vue_map->separate)) {
ice->shaders.last_vue_map = &vue_prog_data->vue_map;
}
+static void
+iris_update_pull_constant_descriptors(struct iris_context *ice,
+ gl_shader_stage stage)
+{
+ struct iris_compiled_shader *shader = ice->shaders.prog[stage];
+
+ if (!shader || !shader->prog_data->has_ubo_pull)
+ return;
+
+ struct iris_shader_state *shs = &ice->state.shaders[stage];
+ bool any_new_descriptors =
+ shader->num_system_values > 0 && shs->sysvals_need_upload;
+
+ unsigned bound_cbufs = shs->bound_cbufs;
+
+ while (bound_cbufs) {
+ const int i = u_bit_scan(&bound_cbufs);
+ struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
+ struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
+ if (!surf_state->res && cbuf->buffer) {
+ iris_upload_ubo_ssbo_surf_state(ice, cbuf, surf_state, false);
+ any_new_descriptors = true;
+ }
+ }
+
+ if (any_new_descriptors)
+ ice->state.dirty |= IRIS_DIRTY_BINDINGS_VS << stage;
+}
+
/**
* Get the prog_data for a given stage, or NULL if the stage is disabled.
*/
if (dirty & IRIS_DIRTY_UNCOMPILED_GS)
iris_update_compiled_gs(ice);
+ if (dirty & (IRIS_DIRTY_UNCOMPILED_GS | IRIS_DIRTY_UNCOMPILED_TES)) {
+ const struct iris_compiled_shader *gs =
+ ice->shaders.prog[MESA_SHADER_GEOMETRY];
+ const struct iris_compiled_shader *tes =
+ ice->shaders.prog[MESA_SHADER_TESS_EVAL];
+
+ bool points_or_lines = false;
+
+ if (gs) {
+ const struct brw_gs_prog_data *gs_prog_data = (void *) gs->prog_data;
+ points_or_lines =
+ gs_prog_data->output_topology == _3DPRIM_POINTLIST ||
+ gs_prog_data->output_topology == _3DPRIM_LINESTRIP;
+ } else if (tes) {
+ const struct brw_tes_prog_data *tes_data = (void *) tes->prog_data;
+ points_or_lines =
+ tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_LINE ||
+ tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
+ }
+
+ if (ice->shaders.output_topology_is_points_or_lines != points_or_lines) {
+ /* Outbound to XY Clip enables */
+ ice->shaders.output_topology_is_points_or_lines = points_or_lines;
+ ice->state.dirty |= IRIS_DIRTY_CLIP;
+ }
+ }
+
gl_shader_stage last_stage = last_vue_stage(ice);
struct iris_compiled_shader *shader = ice->shaders.prog[last_stage];
struct iris_uncompiled_shader *ish = ice->shaders.uncompiled[last_stage];
struct iris_stream_output_target *so =
(void *) ice->state.so_target[i];
if (so)
- so->stride = ish->stream_output.stride[i];
+ so->stride = ish->stream_output.stride[i] * sizeof(uint32_t);
}
}
if (dirty & IRIS_DIRTY_UNCOMPILED_FS)
iris_update_compiled_fs(ice);
- // ...
/* Changing shader interfaces may require a URB configuration. */
if (!(dirty & IRIS_DIRTY_URB)) {
}
}
}
+
+ for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
+ if (ice->state.dirty & (IRIS_DIRTY_CONSTANTS_VS << i))
+ iris_update_pull_constant_descriptors(ice, i);
+ }
}
static struct iris_compiled_shader *
iris_compile_cs(struct iris_context *ice,
struct iris_uncompiled_shader *ish,
- const struct brw_cs_prog_key *key)
+ const struct iris_cs_prog_key *key)
{
struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
const struct brw_compiler *compiler = screen->compiler;
- const struct gen_device_info *devinfo = &screen->devinfo;
void *mem_ctx = ralloc_context(NULL);
struct brw_cs_prog_data *cs_prog_data =
rzalloc(mem_ctx, struct brw_cs_prog_data);
struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
enum brw_param_builtin *system_values;
+ const struct gen_device_info *devinfo = &screen->devinfo;
unsigned num_system_values;
unsigned num_cbufs;
nir_shader *nir = nir_shader_clone(mem_ctx, ish->nir);
- cs_prog_data->binding_table.work_groups_start = 0;
-
- prog_data->total_shared = nir->info.cs.shared_size;
-
iris_setup_uniforms(compiler, mem_ctx, nir, prog_data, &system_values,
&num_system_values, &num_cbufs);
- assign_common_binding_table_offsets(devinfo, nir, prog_data, 1,
- num_system_values, num_cbufs);
+ struct iris_binding_table bt;
+ iris_setup_binding_table(devinfo, nir, &bt, /* num_render_targets */ 0,
+ num_system_values, num_cbufs);
+
+ struct brw_cs_prog_key brw_key = iris_to_brw_cs_key(devinfo, key);
char *error_str = NULL;
const unsigned *program =
- brw_compile_cs(compiler, &ice->dbg, mem_ctx, key, cs_prog_data,
- nir, -1, &error_str);
+ brw_compile_cs(compiler, &ice->dbg, mem_ctx, &brw_key, cs_prog_data,
+ nir, -1, NULL, &error_str);
if (program == NULL) {
dbg_printf("Failed to compile compute shader: %s\n", error_str);
ralloc_free(mem_ctx);
return false;
}
- struct iris_compiled_shader *shader =
- iris_upload_shader(ice, IRIS_CACHE_CS, sizeof(*key), key, program,
- prog_data, NULL, system_values, num_system_values,
- num_cbufs);
-
if (ish->compiled_once) {
- perf_debug(&ice->dbg, "Recompiling compute shader\n");
+ iris_debug_recompile(ice, &nir->info, &brw_key.base);
} else {
ish->compiled_once = true;
}
+ struct iris_compiled_shader *shader =
+ iris_upload_shader(ice, IRIS_CACHE_CS, sizeof(*key), key, program,
+ prog_data, NULL, system_values, num_system_values,
+ num_cbufs, &bt);
+
+ iris_disk_cache_store(screen->disk_cache, ish, shader, key, sizeof(*key));
+
ralloc_free(mem_ctx);
return shader;
}
-void
-iris_update_compiled_compute_shader(struct iris_context *ice)
+static void
+iris_update_compiled_cs(struct iris_context *ice)
{
+ struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
struct iris_uncompiled_shader *ish =
ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
- struct iris_screen *screen = (struct iris_screen *)ice->ctx.screen;
- const struct gen_device_info *devinfo = &screen->devinfo;
- struct brw_cs_prog_key key = { KEY_INIT(devinfo->gen) };
+ struct iris_cs_prog_key key = { KEY_ID(base) };
ice->vtbl.populate_cs_key(ice, &key);
struct iris_compiled_shader *old = ice->shaders.prog[IRIS_CACHE_CS];
struct iris_compiled_shader *shader =
iris_find_cached_shader(ice, IRIS_CACHE_CS, sizeof(key), &key);
+ if (!shader)
+ shader = iris_disk_cache_retrieve(ice, ish, &key, sizeof(key));
+
if (!shader)
shader = iris_compile_cs(ice, ish, &key);
ice->state.dirty |= IRIS_DIRTY_CS |
IRIS_DIRTY_BINDINGS_CS |
IRIS_DIRTY_CONSTANTS_CS;
+ shs->sysvals_need_upload = true;
}
}
+void
+iris_update_compiled_compute_shader(struct iris_context *ice)
+{
+ if (ice->state.dirty & IRIS_DIRTY_UNCOMPILED_CS)
+ iris_update_compiled_cs(ice);
+
+ if (ice->state.dirty & IRIS_DIRTY_CONSTANTS_CS)
+ iris_update_pull_constant_descriptors(ice, MESA_SHADER_COMPUTE);
+}
+
void
iris_fill_cs_push_const_buffer(struct brw_cs_prog_data *cs_prog_data,
uint32_t *dst)
{
- struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
assert(cs_prog_data->push.total.size > 0);
assert(cs_prog_data->push.cross_thread.size == 0);
assert(cs_prog_data->push.per_thread.dwords == 1);
- assert(prog_data->param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
+ assert(cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
for (unsigned t = 0; t < cs_prog_data->threads; t++)
dst[8 * t] = t;
}
* as well. This is not currently documented at all.
*
* This hack is no longer necessary on Gen11+.
+ *
+ * For, Gen11+, scratch space allocation is based on the number of threads
+ * in the base configuration.
*/
unsigned subslice_total = screen->subslice_total;
- if (devinfo->gen < 11)
+ if (devinfo->gen >= 12)
+ subslice_total = devinfo->num_subslices[0];
+ else if (devinfo->gen == 11)
+ subslice_total = 8;
+ else if (devinfo->gen < 11)
subslice_total = 4 * devinfo->num_slices;
assert(subslice_total >= screen->subslice_total);
if (!*bop) {
unsigned scratch_ids_per_subslice = devinfo->max_cs_threads;
+
+ if (devinfo->gen >= 12) {
+ /* Same as ICL below, but with 16 EUs. */
+ scratch_ids_per_subslice = 16 * 8;
+ } else if (devinfo->gen == 11) {
+ /* The MEDIA_VFE_STATE docs say:
+ *
+ * "Starting with this configuration, the Maximum Number of
+ * Threads must be set to (#EU * 8) for GPGPU dispatches.
+ *
+ * Although there are only 7 threads per EU in the configuration,
+ * the FFTID is calculated as if there are 8 threads per EU,
+ * which in turn requires a larger amount of Scratch Space to be
+ * allocated by the driver."
+ */
+ scratch_ids_per_subslice = 8 * 8;
+ }
+
uint32_t max_threads[] = {
[MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
[MESA_SHADER_TESS_CTRL] = devinfo->max_tcs_threads,
nir_shader *nir,
const struct pipe_stream_output_info *so_info)
{
+ struct iris_context *ice = (void *)ctx;
struct iris_screen *screen = (struct iris_screen *)ctx->screen;
const struct gen_device_info *devinfo = &screen->devinfo;
if (!ish)
return NULL;
- nir = brw_preprocess_nir(screen->compiler, nir);
+ NIR_PASS(ish->needs_edge_flag, nir, iris_fix_edge_flags);
- NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo);
+ brw_preprocess_nir(screen->compiler, nir, NULL);
+
+ NIR_PASS_V(nir, brw_nir_lower_image_load_store, devinfo,
+ &ish->uses_atomic_load_store);
NIR_PASS_V(nir, iris_lower_storage_image_derefs);
+ nir_sweep(nir);
+
+ if (nir->constant_data_size > 0) {
+ unsigned data_offset;
+ u_upload_data(ice->shaders.uploader, 0, nir->constant_data_size,
+ 32, nir->constant_data, &data_offset, &ish->const_data);
+
+ struct pipe_shader_buffer psb = {
+ .buffer = ish->const_data,
+ .buffer_offset = data_offset,
+ .buffer_size = nir->constant_data_size,
+ };
+ iris_upload_ubo_ssbo_surf_state(ice, &psb, &ish->const_data_state, false);
+ }
+
ish->program_id = get_new_program_id(screen);
ish->nir = nir;
if (so_info) {
update_so_info(&ish->stream_output, nir->info.outputs_written);
}
+ /* Save this now before potentially dropping nir->info.name */
+ if (nir->info.name && strncmp(nir->info.name, "ARB", 3) == 0)
+ ish->use_alt_mode = true;
+
+ if (screen->disk_cache) {
+ /* Serialize the NIR to a binary blob that we can hash for the disk
+ * cache. Drop unnecessary information (like variable names)
+ * so the serialized NIR is smaller, and also to let us detect more
+ * isomorphic shaders when hashing, increasing cache hits.
+ */
+ struct blob blob;
+ blob_init(&blob);
+ nir_serialize(&blob, nir, true);
+ _mesa_sha1_compute(blob.data, blob.size, ish->nir_sha1);
+ blob_finish(&blob);
+ }
+
return ish;
}
iris_create_shader_state(struct pipe_context *ctx,
const struct pipe_shader_state *state)
{
- assert(state->type == PIPE_SHADER_IR_NIR);
+ struct nir_shader *nir;
- return iris_create_uncompiled_shader(ctx, state->ir.nir,
- &state->stream_output);
+ if (state->type == PIPE_SHADER_IR_TGSI)
+ nir = tgsi_to_nir(state->tokens, ctx->screen);
+ else
+ nir = state->ir.nir;
+
+ return iris_create_uncompiled_shader(ctx, nir, &state->stream_output);
}
static void *
ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
if (screen->precompile) {
- const struct gen_device_info *devinfo = &screen->devinfo;
- struct brw_vs_prog_key key = { KEY_INIT(devinfo->gen) };
+ struct iris_vs_prog_key key = { KEY_ID(vue.base) };
- iris_compile_vs(ice, ish, &key);
+ if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
+ iris_compile_vs(ice, ish, &key);
}
return ish;
{
struct iris_context *ice = (void *) ctx;
struct iris_screen *screen = (void *) ctx->screen;
+ const struct brw_compiler *compiler = screen->compiler;
struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
struct shader_info *info = &ish->nir->info;
- // XXX: NOS?
-
if (screen->precompile) {
const unsigned _GL_TRIANGLES = 0x0004;
- const struct gen_device_info *devinfo = &screen->devinfo;
- struct brw_tcs_prog_key key = {
- KEY_INIT(devinfo->gen),
+ struct iris_tcs_prog_key key = {
+ KEY_ID(vue.base),
// XXX: make sure the linker fills this out from the TES...
.tes_primitive_mode =
info->tess.primitive_mode ? info->tess.primitive_mode
.patch_outputs_written = info->patch_outputs_written,
};
- iris_compile_tcs(ice, ish, &key);
+ /* 8_PATCH mode needs the key to contain the input patch dimensionality.
+ * We don't have that information, so we randomly guess that the input
+ * and output patches are the same size. This is a bad guess, but we
+ * can't do much better.
+ */
+ if (compiler->use_tcs_8_patch)
+ key.input_vertices = info->tess.tcs_vertices_out;
+
+ if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
+ iris_compile_tcs(ice, ish, &key);
}
return ish;
struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
struct shader_info *info = &ish->nir->info;
- // XXX: NOS?
+ /* User clip planes */
+ if (ish->nir->info.clip_distance_array_size == 0)
+ ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
if (screen->precompile) {
- const struct gen_device_info *devinfo = &screen->devinfo;
- struct brw_tes_prog_key key = {
- KEY_INIT(devinfo->gen),
+ struct iris_tes_prog_key key = {
+ KEY_ID(vue.base),
// XXX: not ideal, need TCS output/TES input unification
.inputs_read = info->inputs_read,
.patch_inputs_read = info->patch_inputs_read,
};
- iris_compile_tes(ice, ish, &key);
+ if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
+ iris_compile_tes(ice, ish, &key);
}
return ish;
struct iris_screen *screen = (void *) ctx->screen;
struct iris_uncompiled_shader *ish = iris_create_shader_state(ctx, state);
- // XXX: NOS?
+ /* User clip planes */
+ if (ish->nir->info.clip_distance_array_size == 0)
+ ish->nos |= (1ull << IRIS_NOS_RASTERIZER);
if (screen->precompile) {
- const struct gen_device_info *devinfo = &screen->devinfo;
- struct brw_gs_prog_key key = { KEY_INIT(devinfo->gen) };
+ struct iris_gs_prog_key key = { KEY_ID(vue.base) };
- iris_compile_gs(ice, ish, &key);
+ if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
+ iris_compile_gs(ice, ish, &key);
}
return ish;
util_bitcount64(info->inputs_read & BRW_FS_VARYING_INPUT_MASK) <= 16;
const struct gen_device_info *devinfo = &screen->devinfo;
- struct brw_wm_prog_key key = {
- KEY_INIT(devinfo->gen),
+ struct iris_fs_prog_key key = {
+ KEY_ID(base),
.nr_color_regions = util_bitcount(color_outputs),
- .coherent_fb_fetch = true,
+ .coherent_fb_fetch = devinfo->gen >= 9,
.input_slots_valid =
can_rearrange_varyings ? 0 : info->inputs_read | VARYING_BIT_POS,
};
- iris_compile_fs(ice, ish, &key, NULL);
+ if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
+ iris_compile_fs(ice, ish, &key, NULL);
}
return ish;
// XXX: disallow more than 64KB of shared variables
if (screen->precompile) {
- const struct gen_device_info *devinfo = &screen->devinfo;
- struct brw_cs_prog_key key = { KEY_INIT(devinfo->gen) };
+ struct iris_cs_prog_key key = { KEY_ID(base) };
- iris_compile_cs(ice, ish, &key);
+ if (!iris_disk_cache_retrieve(ice, ish, &key, sizeof(key)))
+ iris_compile_cs(ice, ish, &key);
}
return ish;
* Frees the iris_uncompiled_shader.
*/
static void
-iris_delete_shader_state(struct pipe_context *ctx, void *state)
+iris_delete_shader_state(struct pipe_context *ctx, void *state, gl_shader_stage stage)
{
struct iris_uncompiled_shader *ish = state;
+ struct iris_context *ice = (void *) ctx;
+
+ if (ice->shaders.uncompiled[stage] == ish) {
+ ice->shaders.uncompiled[stage] = NULL;
+ ice->state.dirty |= IRIS_DIRTY_UNCOMPILED_VS << stage;
+ }
+
+ if (ish->const_data) {
+ pipe_resource_reference(&ish->const_data, NULL);
+ pipe_resource_reference(&ish->const_data_state.res, NULL);
+ }
ralloc_free(ish->nir);
free(ish);
}
+static void
+iris_delete_vs_state(struct pipe_context *ctx, void *state)
+{
+ iris_delete_shader_state(ctx, state, MESA_SHADER_VERTEX);
+}
+
+static void
+iris_delete_tcs_state(struct pipe_context *ctx, void *state)
+{
+ iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_CTRL);
+}
+
+static void
+iris_delete_tes_state(struct pipe_context *ctx, void *state)
+{
+ iris_delete_shader_state(ctx, state, MESA_SHADER_TESS_EVAL);
+}
+
+static void
+iris_delete_gs_state(struct pipe_context *ctx, void *state)
+{
+ iris_delete_shader_state(ctx, state, MESA_SHADER_GEOMETRY);
+}
+
+static void
+iris_delete_fs_state(struct pipe_context *ctx, void *state)
+{
+ iris_delete_shader_state(ctx, state, MESA_SHADER_FRAGMENT);
+}
+
+static void
+iris_delete_cs_state(struct pipe_context *ctx, void *state)
+{
+ iris_delete_shader_state(ctx, state, MESA_SHADER_COMPUTE);
+}
+
/**
* The pipe->bind_[stage]_state() driver hook.
*
* Updates dirty tracking to account for the shader's NOS.
*/
static void
-bind_state(struct iris_context *ice,
- struct iris_uncompiled_shader *ish,
- gl_shader_stage stage)
+bind_shader_state(struct iris_context *ice,
+ struct iris_uncompiled_shader *ish,
+ gl_shader_stage stage)
{
uint64_t dirty_bit = IRIS_DIRTY_UNCOMPILED_VS << stage;
const uint64_t nos = ish ? ish->nos : 0;
+ const struct shader_info *old_info = iris_get_shader_info(ice, stage);
+ const struct shader_info *new_info = ish ? &ish->nir->info : NULL;
+
+ if ((old_info ? util_last_bit(old_info->textures_used) : 0) !=
+ (new_info ? util_last_bit(new_info->textures_used) : 0)) {
+ ice->state.dirty |= IRIS_DIRTY_SAMPLER_STATES_VS << stage;
+ }
+
ice->shaders.uncompiled[stage] = ish;
ice->state.dirty |= dirty_bit;
static void
iris_bind_vs_state(struct pipe_context *ctx, void *state)
{
- bind_state((void *) ctx, state, MESA_SHADER_VERTEX);
+ struct iris_context *ice = (struct iris_context *)ctx;
+ struct iris_uncompiled_shader *new_ish = state;
+
+ if (new_ish &&
+ ice->state.window_space_position !=
+ new_ish->nir->info.vs.window_space_position) {
+ ice->state.window_space_position =
+ new_ish->nir->info.vs.window_space_position;
+
+ ice->state.dirty |= IRIS_DIRTY_CLIP |
+ IRIS_DIRTY_RASTER |
+ IRIS_DIRTY_CC_VIEWPORT;
+ }
+
+ bind_shader_state((void *) ctx, state, MESA_SHADER_VERTEX);
}
static void
iris_bind_tcs_state(struct pipe_context *ctx, void *state)
{
- bind_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
+ bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_CTRL);
}
static void
if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_TESS_EVAL])
ice->state.dirty |= IRIS_DIRTY_URB;
- bind_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
+ bind_shader_state((void *) ctx, state, MESA_SHADER_TESS_EVAL);
}
static void
if (!!state != !!ice->shaders.uncompiled[MESA_SHADER_GEOMETRY])
ice->state.dirty |= IRIS_DIRTY_URB;
- bind_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
+ bind_shader_state((void *) ctx, state, MESA_SHADER_GEOMETRY);
}
static void
iris_bind_fs_state(struct pipe_context *ctx, void *state)
{
struct iris_context *ice = (struct iris_context *) ctx;
+ struct iris_screen *screen = (struct iris_screen *) ctx->screen;
+ const struct gen_device_info *devinfo = &screen->devinfo;
struct iris_uncompiled_shader *old_ish =
ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
struct iris_uncompiled_shader *new_ish = state;
(new_ish->nir->info.outputs_written & color_bits))
ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
- bind_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
+ if (devinfo->gen == 8)
+ ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
+
+ bind_shader_state((void *) ctx, state, MESA_SHADER_FRAGMENT);
}
static void
iris_bind_cs_state(struct pipe_context *ctx, void *state)
{
- bind_state((void *) ctx, state, MESA_SHADER_COMPUTE);
+ bind_shader_state((void *) ctx, state, MESA_SHADER_COMPUTE);
}
void
ctx->create_fs_state = iris_create_fs_state;
ctx->create_compute_state = iris_create_compute_state;
- ctx->delete_vs_state = iris_delete_shader_state;
- ctx->delete_tcs_state = iris_delete_shader_state;
- ctx->delete_tes_state = iris_delete_shader_state;
- ctx->delete_gs_state = iris_delete_shader_state;
- ctx->delete_fs_state = iris_delete_shader_state;
- ctx->delete_compute_state = iris_delete_shader_state;
+ ctx->delete_vs_state = iris_delete_vs_state;
+ ctx->delete_tcs_state = iris_delete_tcs_state;
+ ctx->delete_tes_state = iris_delete_tes_state;
+ ctx->delete_gs_state = iris_delete_gs_state;
+ ctx->delete_fs_state = iris_delete_fs_state;
+ ctx->delete_compute_state = iris_delete_cs_state;
ctx->bind_vs_state = iris_bind_vs_state;
ctx->bind_tcs_state = iris_bind_tcs_state;