ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
- for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
- free(pipeline->bindings[s].surface_to_descriptor);
- free(pipeline->bindings[s].sampler_to_descriptor);
- }
-
anv_reloc_list_finish(&pipeline->batch_relocs,
pAllocator ? pAllocator : &device->alloc);
if (pipeline->blend_state.map)
const char *entrypoint,
gl_shader_stage stage,
const VkSpecializationInfo *spec_info,
- struct brw_stage_prog_data *prog_data)
+ struct brw_stage_prog_data *prog_data,
+ struct anv_pipeline_bind_map *map)
{
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
/* Set up dynamic offsets */
anv_nir_apply_dynamic_offsets(pipeline, nir, prog_data);
- char surface_usage_mask[256], sampler_usage_mask[256];
- zero(surface_usage_mask);
- zero(sampler_usage_mask);
-
/* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
if (pipeline->layout)
- anv_nir_apply_pipeline_layout(pipeline, nir, prog_data);
-
- /* All binding table offsets provided by apply_pipeline_layout() are
- * relative to the start of the bindint table (plus MAX_RTS for VS).
- */
- unsigned bias;
- switch (stage) {
- case MESA_SHADER_FRAGMENT:
- bias = MAX_RTS;
- break;
- case MESA_SHADER_COMPUTE:
- bias = 1;
- break;
- default:
- bias = 0;
- break;
- }
- prog_data->binding_table.size_bytes = 0;
- prog_data->binding_table.texture_start = bias;
- prog_data->binding_table.ubo_start = bias;
- prog_data->binding_table.ssbo_start = bias;
- prog_data->binding_table.image_start = bias;
-
- /* Finish the optimization and compilation process */
- if (nir->stage != MESA_SHADER_VERTEX &&
- nir->stage != MESA_SHADER_TESS_CTRL &&
- nir->stage != MESA_SHADER_TESS_EVAL &&
- nir->stage != MESA_SHADER_FRAGMENT) {
- nir = brw_nir_lower_io(nir, &pipeline->device->info,
- compiler->scalar_stage[stage], false, NULL);
- }
+ anv_nir_apply_pipeline_layout(pipeline, nir, prog_data, map);
/* nir_lower_io will only handle the push constants; we need to set this
* to the full number of possible uniforms.
return nir;
}
+static void
+anv_fill_binding_table(struct brw_stage_prog_data *prog_data, unsigned bias)
+{
+ prog_data->binding_table.size_bytes = 0;
+ prog_data->binding_table.texture_start = bias;
+ prog_data->binding_table.ubo_start = bias;
+ prog_data->binding_table.ssbo_start = bias;
+ prog_data->binding_table.image_start = bias;
+}
+
static void
anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
gl_shader_stage stage,
- struct brw_stage_prog_data *prog_data)
+ const struct brw_stage_prog_data *prog_data,
+ struct anv_pipeline_bind_map *map)
{
struct brw_device_info *devinfo = &pipeline->device->info;
uint32_t max_threads[] = {
pipeline->total_scratch =
align_u32(pipeline->total_scratch, 1024) +
prog_data->total_scratch * max_threads[stage];
+ pipeline->bindings[stage] = *map;
}
static VkResult
{
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
- struct brw_vs_prog_data *prog_data = &pipeline->vs_prog_data;
+ const struct brw_stage_prog_data *stage_prog_data;
+ struct anv_pipeline_bind_map map;
struct brw_vs_prog_key key;
- uint32_t kernel;
- unsigned char sha1[20], *hash;
+ uint32_t kernel = NO_KERNEL;
+ unsigned char sha1[20];
populate_vs_prog_key(&pipeline->device->info, &key);
if (module->size > 0) {
- hash = sha1;
- anv_hash_shader(hash, &key, sizeof(key), module, entrypoint, spec_info);
- kernel = anv_pipeline_cache_search(cache, hash, prog_data);
- } else {
- hash = NULL;
+ anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
+ kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
}
- if (module->size == 0 || kernel == NO_KERNEL) {
- memset(prog_data, 0, sizeof(*prog_data));
+ if (kernel == NO_KERNEL) {
+ struct brw_vs_prog_data prog_data = { 0, };
+ struct anv_pipeline_binding surface_to_descriptor[256];
+ struct anv_pipeline_binding sampler_to_descriptor[256];
+
+ map = (struct anv_pipeline_bind_map) {
+ .surface_to_descriptor = surface_to_descriptor,
+ .sampler_to_descriptor = sampler_to_descriptor
+ };
nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
MESA_SHADER_VERTEX, spec_info,
- &prog_data->base.base);
+ &prog_data.base.base, &map);
if (nir == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ anv_fill_binding_table(&prog_data.base.base, 0);
+
void *mem_ctx = ralloc_context(NULL);
if (module->nir == NULL)
ralloc_steal(mem_ctx, nir);
- prog_data->inputs_read = nir->info.inputs_read;
- if (nir->info.outputs_written & (1ull << VARYING_SLOT_PSIZ))
- pipeline->writes_point_size = true;
+ prog_data.inputs_read = nir->info.inputs_read;
brw_compute_vue_map(&pipeline->device->info,
- &prog_data->base.vue_map,
+ &prog_data.base.vue_map,
nir->info.outputs_written,
nir->info.separate_shader);
unsigned code_size;
const unsigned *shader_code =
- brw_compile_vs(compiler, NULL, mem_ctx, &key, prog_data, nir,
+ brw_compile_vs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
NULL, false, -1, &code_size, NULL);
if (shader_code == NULL) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
- kernel = anv_pipeline_cache_upload_kernel(cache, hash,
+ stage_prog_data = &prog_data.base.base;
+ kernel = anv_pipeline_cache_upload_kernel(cache,
+ module->size > 0 ? sha1 : NULL,
shader_code, code_size,
- prog_data, sizeof(*prog_data));
+ &stage_prog_data, sizeof(prog_data),
+ &map);
ralloc_free(mem_ctx);
}
- if (prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8) {
+ const struct brw_vs_prog_data *vs_prog_data =
+ (const struct brw_vs_prog_data *) stage_prog_data;
+
+ if (vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8) {
pipeline->vs_simd8 = kernel;
pipeline->vs_vec4 = NO_KERNEL;
} else {
}
anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX,
- &prog_data->base.base);
+ stage_prog_data, &map);
return VK_SUCCESS;
}
{
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
- struct brw_gs_prog_data *prog_data = &pipeline->gs_prog_data;
+ const struct brw_stage_prog_data *stage_prog_data;
+ struct anv_pipeline_bind_map map;
struct brw_gs_prog_key key;
- uint32_t kernel;
- unsigned char sha1[20], *hash;
+ uint32_t kernel = NO_KERNEL;
+ unsigned char sha1[20];
populate_gs_prog_key(&pipeline->device->info, &key);
if (module->size > 0) {
- hash = sha1;
- anv_hash_shader(hash, &key, sizeof(key), module, entrypoint, spec_info);
- kernel = anv_pipeline_cache_search(cache, hash, prog_data);
- } else {
- hash = NULL;
+ anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
+ kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
}
- if (module->size == 0 || kernel == NO_KERNEL) {
- memset(prog_data, 0, sizeof(*prog_data));
+ if (kernel == NO_KERNEL) {
+ struct brw_gs_prog_data prog_data = { 0, };
+ struct anv_pipeline_binding surface_to_descriptor[256];
+ struct anv_pipeline_binding sampler_to_descriptor[256];
+
+ map = (struct anv_pipeline_bind_map) {
+ .surface_to_descriptor = surface_to_descriptor,
+ .sampler_to_descriptor = sampler_to_descriptor
+ };
nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
MESA_SHADER_GEOMETRY, spec_info,
- &prog_data->base.base);
+ &prog_data.base.base, &map);
if (nir == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ anv_fill_binding_table(&prog_data.base.base, 0);
+
void *mem_ctx = ralloc_context(NULL);
if (module->nir == NULL)
ralloc_steal(mem_ctx, nir);
- if (nir->info.outputs_written & (1ull << VARYING_SLOT_PSIZ))
- pipeline->writes_point_size = true;
-
brw_compute_vue_map(&pipeline->device->info,
- &prog_data->base.vue_map,
+ &prog_data.base.vue_map,
nir->info.outputs_written,
nir->info.separate_shader);
unsigned code_size;
const unsigned *shader_code =
- brw_compile_gs(compiler, NULL, mem_ctx, &key, prog_data, nir,
+ brw_compile_gs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
NULL, -1, &code_size, NULL);
if (shader_code == NULL) {
ralloc_free(mem_ctx);
}
/* TODO: SIMD8 GS */
- kernel = anv_pipeline_cache_upload_kernel(cache, hash,
+ stage_prog_data = &prog_data.base.base;
+ kernel = anv_pipeline_cache_upload_kernel(cache,
+ module->size > 0 ? sha1 : NULL,
shader_code, code_size,
- prog_data, sizeof(*prog_data));
+ &stage_prog_data, sizeof(prog_data),
+ &map);
ralloc_free(mem_ctx);
}
pipeline->gs_kernel = kernel;
anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY,
- &prog_data->base.base);
+ stage_prog_data, &map);
return VK_SUCCESS;
}
{
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
- struct brw_wm_prog_data *prog_data = &pipeline->wm_prog_data;
+ const struct brw_stage_prog_data *stage_prog_data;
+ struct anv_pipeline_bind_map map;
struct brw_wm_prog_key key;
- uint32_t kernel;
- unsigned char sha1[20], *hash;
+ uint32_t kernel = NO_KERNEL;
+ unsigned char sha1[20];
populate_wm_prog_key(&pipeline->device->info, info, extra, &key);
- if (pipeline->use_repclear)
- key.nr_color_regions = 1;
-
if (module->size > 0) {
- hash = sha1;
- anv_hash_shader(hash, &key, sizeof(key), module, entrypoint, spec_info);
- kernel = anv_pipeline_cache_search(cache, hash, prog_data);
- } else {
- hash = NULL;
+ anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
+ kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
}
- if (module->size == 0 || kernel == NO_KERNEL) {
- memset(prog_data, 0, sizeof(*prog_data));
+ if (kernel == NO_KERNEL) {
+ struct brw_wm_prog_data prog_data = { 0, };
+ struct anv_pipeline_binding surface_to_descriptor[256];
+ struct anv_pipeline_binding sampler_to_descriptor[256];
- prog_data->binding_table.render_target_start = 0;
+ map = (struct anv_pipeline_bind_map) {
+ .surface_to_descriptor = surface_to_descriptor + 8,
+ .sampler_to_descriptor = sampler_to_descriptor
+ };
nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
MESA_SHADER_FRAGMENT, spec_info,
- &prog_data->base);
+ &prog_data.base, &map);
if (nir == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ unsigned num_rts = 0;
+ struct anv_pipeline_binding rt_bindings[8];
nir_function_impl *impl = nir_shader_get_entrypoint(nir)->impl;
nir_foreach_variable_safe(var, &nir->outputs) {
if (var->data.location < FRAG_RESULT_DATA0)
unsigned rt = var->data.location - FRAG_RESULT_DATA0;
if (rt >= key.nr_color_regions) {
+ /* Out-of-bounds, throw it away */
var->data.mode = nir_var_local;
exec_node_remove(&var->node);
exec_list_push_tail(&impl->locals, &var->node);
+ continue;
}
+
+ /* Give it a new, compacted, location */
+ var->data.location = FRAG_RESULT_DATA0 + num_rts;
+
+ unsigned array_len =
+ glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
+ assert(num_rts + array_len <= 8);
+
+ for (unsigned i = 0; i < array_len; i++) {
+ rt_bindings[num_rts] = (struct anv_pipeline_binding) {
+ .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
+ .offset = rt + i,
+ };
+ }
+
+ num_rts += array_len;
}
+ if (pipeline->use_repclear) {
+ assert(num_rts == 1);
+ key.nr_color_regions = 1;
+ }
+
+ if (num_rts == 0) {
+ /* If we have no render targets, we need a null render target */
+ rt_bindings[0] = (struct anv_pipeline_binding) {
+ .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
+ .offset = UINT16_MAX,
+ };
+ num_rts = 1;
+ }
+
+ assert(num_rts <= 8);
+ map.surface_to_descriptor -= num_rts;
+ map.surface_count += num_rts;
+ assert(map.surface_count <= 256);
+ memcpy(map.surface_to_descriptor, rt_bindings,
+ num_rts * sizeof(*rt_bindings));
+
+ anv_fill_binding_table(&prog_data.base, num_rts);
+
void *mem_ctx = ralloc_context(NULL);
if (module->nir == NULL)
unsigned code_size;
const unsigned *shader_code =
- brw_compile_fs(compiler, NULL, mem_ctx, &key, prog_data, nir,
+ brw_compile_fs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
NULL, -1, -1, pipeline->use_repclear, &code_size, NULL);
if (shader_code == NULL) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
- kernel = anv_pipeline_cache_upload_kernel(cache, hash,
+ stage_prog_data = &prog_data.base;
+ kernel = anv_pipeline_cache_upload_kernel(cache,
+ module->size > 0 ? sha1 : NULL,
shader_code, code_size,
- prog_data, sizeof(*prog_data));
+ &stage_prog_data, sizeof(prog_data),
+ &map);
ralloc_free(mem_ctx);
}
- if (prog_data->no_8)
+ const struct brw_wm_prog_data *wm_prog_data =
+ (const struct brw_wm_prog_data *) stage_prog_data;
+
+ if (wm_prog_data->no_8)
pipeline->ps_simd8 = NO_KERNEL;
else
pipeline->ps_simd8 = kernel;
- if (prog_data->no_8 || prog_data->prog_offset_16) {
- pipeline->ps_simd16 = kernel + prog_data->prog_offset_16;
+ if (wm_prog_data->no_8 || wm_prog_data->prog_offset_16) {
+ pipeline->ps_simd16 = kernel + wm_prog_data->prog_offset_16;
} else {
pipeline->ps_simd16 = NO_KERNEL;
}
pipeline->ps_grf_start2 = 0;
if (pipeline->ps_simd8 != NO_KERNEL) {
pipeline->ps_ksp0 = pipeline->ps_simd8;
- pipeline->ps_grf_start0 = prog_data->base.dispatch_grf_start_reg;
+ pipeline->ps_grf_start0 = wm_prog_data->base.dispatch_grf_start_reg;
if (pipeline->ps_simd16 != NO_KERNEL) {
pipeline->ps_ksp2 = pipeline->ps_simd16;
- pipeline->ps_grf_start2 = prog_data->dispatch_grf_start_reg_16;
+ pipeline->ps_grf_start2 = wm_prog_data->dispatch_grf_start_reg_16;
}
} else if (pipeline->ps_simd16 != NO_KERNEL) {
pipeline->ps_ksp0 = pipeline->ps_simd16;
- pipeline->ps_grf_start0 = prog_data->dispatch_grf_start_reg_16;
+ pipeline->ps_grf_start0 = wm_prog_data->dispatch_grf_start_reg_16;
}
anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT,
- &prog_data->base);
+ stage_prog_data, &map);
return VK_SUCCESS;
}
{
const struct brw_compiler *compiler =
pipeline->device->instance->physicalDevice.compiler;
- struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
+ const struct brw_stage_prog_data *stage_prog_data;
+ struct anv_pipeline_bind_map map;
struct brw_cs_prog_key key;
- uint32_t kernel;
- unsigned char sha1[20], *hash;
+ uint32_t kernel = NO_KERNEL;
+ unsigned char sha1[20];
populate_cs_prog_key(&pipeline->device->info, &key);
if (module->size > 0) {
- hash = sha1;
- anv_hash_shader(hash, &key, sizeof(key), module, entrypoint, spec_info);
- kernel = anv_pipeline_cache_search(cache, hash, prog_data);
- } else {
- hash = NULL;
+ anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
+ kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
}
if (module->size == 0 || kernel == NO_KERNEL) {
- memset(prog_data, 0, sizeof(*prog_data));
+ struct brw_cs_prog_data prog_data = { 0, };
+ struct anv_pipeline_binding surface_to_descriptor[256];
+ struct anv_pipeline_binding sampler_to_descriptor[256];
- prog_data->binding_table.work_groups_start = 0;
+ map = (struct anv_pipeline_bind_map) {
+ .surface_to_descriptor = surface_to_descriptor,
+ .sampler_to_descriptor = sampler_to_descriptor
+ };
nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
MESA_SHADER_COMPUTE, spec_info,
- &prog_data->base);
+ &prog_data.base, &map);
if (nir == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- prog_data->base.total_shared = nir->num_shared;
+ anv_fill_binding_table(&prog_data.base, 1);
void *mem_ctx = ralloc_context(NULL);
unsigned code_size;
const unsigned *shader_code =
- brw_compile_cs(compiler, NULL, mem_ctx, &key, prog_data, nir,
+ brw_compile_cs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
-1, &code_size, NULL);
if (shader_code == NULL) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
- kernel = anv_pipeline_cache_upload_kernel(cache, hash,
+ stage_prog_data = &prog_data.base;
+ kernel = anv_pipeline_cache_upload_kernel(cache,
+ module->size > 0 ? sha1 : NULL,
shader_code, code_size,
- prog_data, sizeof(*prog_data));
+ &stage_prog_data, sizeof(prog_data),
+ &map);
+
ralloc_free(mem_ctx);
}
pipeline->cs_simd = kernel;
anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE,
- &prog_data->base);
+ stage_prog_data, &map);
return VK_SUCCESS;
}
{
const struct brw_device_info *devinfo = &pipeline->device->info;
bool vs_present = pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT;
- unsigned vs_size = vs_present ? pipeline->vs_prog_data.base.urb_entry_size : 1;
+ unsigned vs_size = vs_present ?
+ get_vs_prog_data(pipeline)->base.urb_entry_size : 1;
unsigned vs_entry_size_bytes = vs_size * 64;
bool gs_present = pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT;
- unsigned gs_size = gs_present ? pipeline->gs_prog_data.base.urb_entry_size : 1;
+ unsigned gs_size = gs_present ?
+ get_gs_prog_data(pipeline)->base.urb_entry_size : 1;
unsigned gs_entry_size_bytes = gs_size * 64;
/* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
const unsigned stages =
_mesa_bitcount(pipeline->active_stages & VK_SHADER_STAGE_ALL_GRAPHICS);
- const unsigned size_per_stage = stages ? (push_constant_kb / stages) : 0;
+ unsigned size_per_stage = stages ? (push_constant_kb / stages) : 0;
unsigned used_kb = 0;
+ /* Broadwell+ and Haswell gt3 require that the push constant sizes be in
+ * units of 2KB. Incidentally, these are the same platforms that have
+ * 32KB worth of push constant space.
+ */
+ if (push_constant_kb == 32)
+ size_per_stage &= ~1u;
+
for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_FRAGMENT; i++) {
pipeline->urb.push_size[i] =
(pipeline->active_stages & (1 << i)) ? size_per_stage : 0;
anv_pipeline_init_dynamic_state(pipeline, pCreateInfo);
- if (pCreateInfo->pTessellationState)
- anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO");
-
pipeline->use_repclear = extra && extra->use_repclear;
- pipeline->writes_point_size = false;
/* When we free the pipeline, we detect stages based on the NULL status
* of various prog_data pointers. Make them NULL by default.
pipeline->active_stages = 0;
pipeline->total_scratch = 0;
+ const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = { 0, };
+ struct anv_shader_module *modules[MESA_SHADER_STAGES] = { 0, };
for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
- ANV_FROM_HANDLE(anv_shader_module, module,
- pCreateInfo->pStages[i].module);
-
- switch (pCreateInfo->pStages[i].stage) {
- case VK_SHADER_STAGE_VERTEX_BIT:
- anv_pipeline_compile_vs(pipeline, cache, pCreateInfo, module,
- pCreateInfo->pStages[i].pName,
- pCreateInfo->pStages[i].pSpecializationInfo);
- break;
- case VK_SHADER_STAGE_GEOMETRY_BIT:
- anv_pipeline_compile_gs(pipeline, cache, pCreateInfo, module,
- pCreateInfo->pStages[i].pName,
- pCreateInfo->pStages[i].pSpecializationInfo);
- break;
- case VK_SHADER_STAGE_FRAGMENT_BIT:
- anv_pipeline_compile_fs(pipeline, cache, pCreateInfo, extra, module,
- pCreateInfo->pStages[i].pName,
- pCreateInfo->pStages[i].pSpecializationInfo);
- break;
- default:
- anv_finishme("Unsupported shader stage");
- }
+ gl_shader_stage stage = ffs(pCreateInfo->pStages[i].stage) - 1;
+ pStages[stage] = &pCreateInfo->pStages[i];
+ modules[stage] = anv_shader_module_from_handle(pStages[stage]->module);
+ }
+
+ if (modules[MESA_SHADER_VERTEX]) {
+ anv_pipeline_compile_vs(pipeline, cache, pCreateInfo,
+ modules[MESA_SHADER_VERTEX],
+ pStages[MESA_SHADER_VERTEX]->pName,
+ pStages[MESA_SHADER_VERTEX]->pSpecializationInfo);
+ }
+
+ if (modules[MESA_SHADER_TESS_CTRL] || modules[MESA_SHADER_TESS_EVAL])
+ anv_finishme("no tessellation support");
+
+ if (modules[MESA_SHADER_GEOMETRY]) {
+ anv_pipeline_compile_gs(pipeline, cache, pCreateInfo,
+ modules[MESA_SHADER_GEOMETRY],
+ pStages[MESA_SHADER_GEOMETRY]->pName,
+ pStages[MESA_SHADER_GEOMETRY]->pSpecializationInfo);
+ }
+
+ if (modules[MESA_SHADER_FRAGMENT]) {
+ anv_pipeline_compile_fs(pipeline, cache, pCreateInfo, extra,
+ modules[MESA_SHADER_FRAGMENT],
+ pStages[MESA_SHADER_FRAGMENT]->pName,
+ pStages[MESA_SHADER_FRAGMENT]->pSpecializationInfo);
}
if (!(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT)) {
/* Vertex is only optional if disable_vs is set */
assert(extra->disable_vs);
- memset(&pipeline->vs_prog_data, 0, sizeof(pipeline->vs_prog_data));
}
gen7_compute_urb_partition(pipeline);
*/
inputs_read = ~0ull;
} else {
- inputs_read = pipeline->vs_prog_data.inputs_read;
+ inputs_read = get_vs_prog_data(pipeline)->inputs_read;
}
pipeline->vb_used = 0;