if (pipeline->layout)
anv_nir_apply_pipeline_layout(pipeline, nir, prog_data, map);
- /* All binding table offsets provided by apply_pipeline_layout() are
- * relative to the start of the bindint table (plus MAX_RTS for VS).
- */
- unsigned bias;
- switch (stage) {
- case MESA_SHADER_FRAGMENT:
- bias = MAX_RTS;
- break;
- case MESA_SHADER_COMPUTE:
- bias = 1;
- break;
- default:
- bias = 0;
- break;
- }
- prog_data->binding_table.size_bytes = 0;
- prog_data->binding_table.texture_start = bias;
- prog_data->binding_table.ubo_start = bias;
- prog_data->binding_table.ssbo_start = bias;
- prog_data->binding_table.image_start = bias;
-
/* Finish the optimization and compilation process */
if (nir->stage == MESA_SHADER_COMPUTE)
brw_nir_lower_shared(nir);
return nir;
}
+static void
+anv_fill_binding_table(struct brw_stage_prog_data *prog_data, unsigned bias)
+{
+ prog_data->binding_table.size_bytes = 0;
+ prog_data->binding_table.texture_start = bias;
+ prog_data->binding_table.ubo_start = bias;
+ prog_data->binding_table.ssbo_start = bias;
+ prog_data->binding_table.image_start = bias;
+}
+
static void
anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
gl_shader_stage stage,
if (nir == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ anv_fill_binding_table(&prog_data.base.base, 0);
+
void *mem_ctx = ralloc_context(NULL);
if (module->nir == NULL)
if (nir == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ anv_fill_binding_table(&prog_data.base.base, 0);
+
void *mem_ctx = ralloc_context(NULL);
if (module->nir == NULL)
}
}
+ anv_fill_binding_table(&prog_data.base, MAX_RTS);
+
void *mem_ctx = ralloc_context(NULL);
if (module->nir == NULL)
if (nir == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ anv_fill_binding_table(&prog_data.base, 1);
+
prog_data.base.total_shared = nir->num_shared;
void *mem_ctx = ralloc_context(NULL);