+
+/**
+ * Sets up the starting offsets for the groups of binding table entries
+ * common to all pipeline stages.
+ *
+ * Unused groups are initialized to 0xd0d0d0d0 to make it obvious that they're
+ * unused but also make sure that addition of small offsets to them will
+ * trigger some of our asserts that surface indices are < BRW_MAX_SURFACES.
+ */
+uint32_t
+brw_assign_common_binding_table_offsets(const struct gen_device_info *devinfo,
+ const struct gl_program *prog,
+ struct brw_stage_prog_data *stage_prog_data,
+ uint32_t next_binding_table_offset)
+{
+ int num_textures = util_last_bit(prog->SamplersUsed);
+
+ stage_prog_data->binding_table.texture_start = next_binding_table_offset;
+ next_binding_table_offset += num_textures;
+
+ if (prog->info.num_ubos) {
+ assert(prog->info.num_ubos <= BRW_MAX_UBO);
+ stage_prog_data->binding_table.ubo_start = next_binding_table_offset;
+ next_binding_table_offset += prog->info.num_ubos;
+ } else {
+ stage_prog_data->binding_table.ubo_start = 0xd0d0d0d0;
+ }
+
+ if (prog->info.num_ssbos || prog->info.num_abos) {
+ assert(prog->info.num_abos <= BRW_MAX_ABO);
+ assert(prog->info.num_ssbos <= BRW_MAX_SSBO);
+ stage_prog_data->binding_table.ssbo_start = next_binding_table_offset;
+ next_binding_table_offset += prog->info.num_abos + prog->info.num_ssbos;
+ } else {
+ stage_prog_data->binding_table.ssbo_start = 0xd0d0d0d0;
+ }
+
+ if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
+ stage_prog_data->binding_table.shader_time_start = next_binding_table_offset;
+ next_binding_table_offset++;
+ } else {
+ stage_prog_data->binding_table.shader_time_start = 0xd0d0d0d0;
+ }
+
+ if (prog->info.uses_texture_gather) {
+ if (devinfo->gen >= 8) {
+ stage_prog_data->binding_table.gather_texture_start =
+ stage_prog_data->binding_table.texture_start;
+ } else {
+ stage_prog_data->binding_table.gather_texture_start = next_binding_table_offset;
+ next_binding_table_offset += num_textures;
+ }
+ } else {
+ stage_prog_data->binding_table.gather_texture_start = 0xd0d0d0d0;
+ }
+
+ if (prog->info.num_images) {
+ stage_prog_data->binding_table.image_start = next_binding_table_offset;
+ next_binding_table_offset += prog->info.num_images;
+ } else {
+ stage_prog_data->binding_table.image_start = 0xd0d0d0d0;
+ }
+
+ /* This may or may not be used depending on how the compile goes. */
+ stage_prog_data->binding_table.pull_constants_start = next_binding_table_offset;
+ next_binding_table_offset++;
+
+ /* Plane 0 is just the regular texture section */
+ stage_prog_data->binding_table.plane_start[0] = stage_prog_data->binding_table.texture_start;
+
+ stage_prog_data->binding_table.plane_start[1] = next_binding_table_offset;
+ next_binding_table_offset += num_textures;
+
+ stage_prog_data->binding_table.plane_start[2] = next_binding_table_offset;
+ next_binding_table_offset += num_textures;
+
+ /* prog_data->base.binding_table.size will be set by brw_mark_surface_used. */
+
+ assert(next_binding_table_offset <= BRW_MAX_SURFACES);
+ return next_binding_table_offset;
+}
+
+void
+brw_program_serialize_nir(struct gl_context *ctx, struct gl_program *prog)
+{
+ struct blob writer;
+ blob_init(&writer);
+ nir_serialize(&writer, prog->nir);
+ prog->driver_cache_blob = ralloc_size(NULL, writer.size);
+ memcpy(prog->driver_cache_blob, writer.data, writer.size);
+ prog->driver_cache_blob_size = writer.size;
+ blob_finish(&writer);
+}
+
+void
+brw_program_deserialize_nir(struct gl_context *ctx, struct gl_program *prog,
+ gl_shader_stage stage)
+{
+ if (!prog->nir) {
+ assert(prog->driver_cache_blob && prog->driver_cache_blob_size > 0);
+ const struct nir_shader_compiler_options *options =
+ ctx->Const.ShaderCompilerOptions[stage].NirOptions;
+ struct blob_reader reader;
+ blob_reader_init(&reader, prog->driver_cache_blob,
+ prog->driver_cache_blob_size);
+ prog->nir = nir_deserialize(NULL, options, &reader);
+ }
+
+ if (prog->driver_cache_blob) {
+ ralloc_free(prog->driver_cache_blob);
+ prog->driver_cache_blob = NULL;
+ prog->driver_cache_blob_size = 0;
+ }
+}