anv/pipeline: Pass through specialization constants
authorJason Ekstrand <jason.ekstrand@intel.com>
Wed, 13 Jan 2016 00:30:43 +0000 (16:30 -0800)
committerJason Ekstrand <jason.ekstrand@intel.com>
Wed, 13 Jan 2016 23:18:36 +0000 (15:18 -0800)
src/vulkan/anv_pipeline.c
src/vulkan/anv_private.h
src/vulkan/gen8_pipeline.c

index 3d632dec0383edac27df8db04f90c2634c19d4e4..8de889306ccdfbfd29da0b7cc951528a5ee7a568 100644 (file)
@@ -84,7 +84,8 @@ static nir_shader *
 anv_shader_compile_to_nir(struct anv_device *device,
                           struct anv_shader_module *module,
                           const char *entrypoint_name,
-                          gl_shader_stage stage)
+                          gl_shader_stage stage,
+                          const VkSpecializationInfo *spec_info)
 {
    if (strcmp(entrypoint_name, "main") != 0) {
       anv_finishme("Multiple shaders per module not really supported");
@@ -113,12 +114,31 @@ anv_shader_compile_to_nir(struct anv_device *device,
       assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
       assert(module->size % 4 == 0);
 
-      entry_point = spirv_to_nir(spirv, module->size / 4, NULL, 0, stage,
-                                 entrypoint_name, nir_options);
+      uint32_t num_spec_entries = 0;
+      struct nir_spirv_specialization *spec_entries = NULL;
+      if (spec_info && spec_info->mapEntryCount > 0) {
+         num_spec_entries = spec_info->mapEntryCount;
+         spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
+         for (uint32_t i = 0; i < num_spec_entries; i++) {
+            const uint32_t *data =
+               spec_info->pData + spec_info->pMapEntries[i].offset;
+            assert((const void *)(data + 1) <=
+                   spec_info->pData + spec_info->dataSize);
+
+            spec_entries[i].id = spec_info->pMapEntries[i].constantID;
+            spec_entries[i].data = *data;
+         }
+      }
+
+      entry_point = spirv_to_nir(spirv, module->size / 4,
+                                 spec_entries, num_spec_entries,
+                                 stage, entrypoint_name, nir_options);
       nir = entry_point->shader;
       assert(nir->stage == stage);
       nir_validate_shader(nir);
 
+      free(spec_entries);
+
       nir_lower_returns(nir);
       nir_validate_shader(nir);
 
@@ -374,13 +394,15 @@ anv_pipeline_compile(struct anv_pipeline *pipeline,
                      struct anv_shader_module *module,
                      const char *entrypoint,
                      gl_shader_stage stage,
+                     const VkSpecializationInfo *spec_info,
                      struct brw_stage_prog_data *prog_data)
 {
    const struct brw_compiler *compiler =
       pipeline->device->instance->physicalDevice.compiler;
 
    nir_shader *nir = anv_shader_compile_to_nir(pipeline->device,
-                                               module, entrypoint, stage);
+                                               module, entrypoint, stage,
+                                               spec_info);
    if (nir == NULL)
       return NULL;
 
@@ -490,7 +512,8 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
                         struct anv_pipeline_cache *cache,
                         const VkGraphicsPipelineCreateInfo *info,
                         struct anv_shader_module *module,
-                        const char *entrypoint)
+                        const char *entrypoint,
+                        const VkSpecializationInfo *spec_info)
 {
    const struct brw_compiler *compiler =
       pipeline->device->instance->physicalDevice.compiler;
@@ -504,7 +527,7 @@ anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
    memset(prog_data, 0, sizeof(*prog_data));
 
    nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
-                                          MESA_SHADER_VERTEX,
+                                          MESA_SHADER_VERTEX, spec_info,
                                           &prog_data->base.base);
    if (nir == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
@@ -554,7 +577,8 @@ anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
                         struct anv_pipeline_cache *cache,
                         const VkGraphicsPipelineCreateInfo *info,
                         struct anv_shader_module *module,
-                        const char *entrypoint)
+                        const char *entrypoint,
+                        const VkSpecializationInfo *spec_info)
 {
    const struct brw_compiler *compiler =
       pipeline->device->instance->physicalDevice.compiler;
@@ -568,7 +592,7 @@ anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
    memset(prog_data, 0, sizeof(*prog_data));
 
    nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
-                                          MESA_SHADER_GEOMETRY,
+                                          MESA_SHADER_GEOMETRY, spec_info,
                                           &prog_data->base.base);
    if (nir == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
@@ -610,7 +634,8 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
                         struct anv_pipeline_cache *cache,
                         const VkGraphicsPipelineCreateInfo *info,
                         struct anv_shader_module *module,
-                        const char *entrypoint)
+                        const char *entrypoint,
+                        const VkSpecializationInfo *spec_info)
 {
    const struct brw_compiler *compiler =
       pipeline->device->instance->physicalDevice.compiler;
@@ -629,7 +654,7 @@ anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
    prog_data->binding_table.render_target_start = 0;
 
    nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
-                                          MESA_SHADER_FRAGMENT,
+                                          MESA_SHADER_FRAGMENT, spec_info,
                                           &prog_data->base);
    if (nir == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
@@ -688,7 +713,8 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
                         struct anv_pipeline_cache *cache,
                         const VkComputePipelineCreateInfo *info,
                         struct anv_shader_module *module,
-                        const char *entrypoint)
+                        const char *entrypoint,
+                        const VkSpecializationInfo *spec_info)
 {
    const struct brw_compiler *compiler =
       pipeline->device->instance->physicalDevice.compiler;
@@ -704,7 +730,7 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
    prog_data->binding_table.work_groups_start = 0;
 
    nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
-                                          MESA_SHADER_COMPUTE,
+                                          MESA_SHADER_COMPUTE, spec_info,
                                           &prog_data->base);
    if (nir == NULL)
       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
@@ -1059,17 +1085,22 @@ anv_pipeline_init(struct anv_pipeline *pipeline,
    for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
       ANV_FROM_HANDLE(anv_shader_module, module,
                       pCreateInfo->pStages[i].module);
-      const char *entrypoint = pCreateInfo->pStages[i].pName;
 
       switch (pCreateInfo->pStages[i].stage) {
       case VK_SHADER_STAGE_VERTEX_BIT:
-         anv_pipeline_compile_vs(pipeline, cache, pCreateInfo, module, entrypoint);
+         anv_pipeline_compile_vs(pipeline, cache, pCreateInfo, module,
+                                 pCreateInfo->pStages[i].pName,
+                                 pCreateInfo->pStages[i].pSpecializationInfo);
          break;
       case VK_SHADER_STAGE_GEOMETRY_BIT:
-         anv_pipeline_compile_gs(pipeline, cache, pCreateInfo, module, entrypoint);
+         anv_pipeline_compile_gs(pipeline, cache, pCreateInfo, module,
+                                 pCreateInfo->pStages[i].pName,
+                                 pCreateInfo->pStages[i].pSpecializationInfo);
          break;
       case VK_SHADER_STAGE_FRAGMENT_BIT:
-         anv_pipeline_compile_fs(pipeline, cache, pCreateInfo, module, entrypoint);
+         anv_pipeline_compile_fs(pipeline, cache, pCreateInfo, module,
+                                 pCreateInfo->pStages[i].pName,
+                                 pCreateInfo->pStages[i].pSpecializationInfo);
          break;
       default:
          anv_finishme("Unsupported shader stage");
index 138a40751f299ed8b31df63a938c4575c69967a6..a0ac340cc62714b0a2ba1545fad5bf474aabdf3a 100644 (file)
@@ -1369,7 +1369,8 @@ anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
                         struct anv_pipeline_cache *cache,
                         const VkComputePipelineCreateInfo *info,
                         struct anv_shader_module *module,
-                        const char *entrypoint_name);
+                        const char *entrypoint,
+                        const VkSpecializationInfo *spec_info);
 
 VkResult
 anv_graphics_pipeline_create(VkDevice device,
index dee3c4049c2bebc76e0fd76f6057155db095ebe4..2be71a05af8ed7153ab956aa2c02013b557def19 100644 (file)
@@ -666,7 +666,8 @@ VkResult genX(compute_pipeline_create)(
    assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE_BIT);
    ANV_FROM_HANDLE(anv_shader_module, module,  pCreateInfo->stage.module);
    anv_pipeline_compile_cs(pipeline, cache, pCreateInfo, module,
-                           pCreateInfo->stage.pName);
+                           pCreateInfo->stage.pName,
+                           pCreateInfo->stage.pSpecializationInfo);
 
    pipeline->use_repclear = false;