genX(graphics_pipeline_create)(VkDevice _device,
struct anv_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
- const struct anv_graphics_pipeline_create_info *extra,
const VkAllocationCallbacks *alloc,
VkPipeline *pPipeline);
static void
populate_wm_prog_key(const struct gen_device_info *devinfo,
const VkGraphicsPipelineCreateInfo *info,
- const struct anv_graphics_pipeline_create_info *extra,
struct brw_wm_prog_key *key)
{
ANV_FROM_HANDLE(anv_render_pass, render_pass, info->renderPass);
/* XXX Vulkan doesn't appear to specify */
key->clamp_fragment_color = false;
- if (extra && extra->color_attachment_count >= 0) {
- key->nr_color_regions = extra->color_attachment_count;
- } else {
- key->nr_color_regions =
- render_pass->subpasses[info->subpass].color_count;
- }
+ key->nr_color_regions =
+ render_pass->subpasses[info->subpass].color_count;
key->replicate_alpha = key->nr_color_regions > 1 &&
info->pMultisampleState &&
anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
struct anv_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *info,
- const struct anv_graphics_pipeline_create_info *extra,
struct anv_shader_module *module,
const char *entrypoint,
const VkSpecializationInfo *spec_info)
struct anv_shader_bin *bin = NULL;
unsigned char sha1[20];
- populate_wm_prog_key(&pipeline->device->info, info, extra, &key);
+ populate_wm_prog_key(&pipeline->device->info, info, &key);
if (cache) {
anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
num_rts += array_len;
}
- if (pipeline->use_repclear) {
- assert(num_rts == 1);
- key.nr_color_regions = 1;
- }
-
if (num_rts == 0) {
/* If we have no render targets, we need a null render target */
rt_bindings[0] = (struct anv_pipeline_binding) {
unsigned code_size;
const unsigned *shader_code =
brw_compile_fs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
- NULL, -1, -1, true, pipeline->use_repclear,
- &code_size, NULL);
+ NULL, -1, -1, true, false, &code_size, NULL);
if (shader_code == NULL) {
ralloc_free(mem_ctx);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
struct anv_device *device,
struct anv_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
- const struct anv_graphics_pipeline_create_info *extra,
const VkAllocationCallbacks *alloc)
{
VkResult result;
pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
pCreateInfo->pRasterizationState->depthClampEnable;
- pipeline->use_repclear = extra && extra->use_repclear;
-
pipeline->needs_data_cache = false;
/* When we free the pipeline, we detect stages based on the NULL status
}
if (modules[MESA_SHADER_FRAGMENT]) {
- result = anv_pipeline_compile_fs(pipeline, cache, pCreateInfo, extra,
+ result = anv_pipeline_compile_fs(pipeline, cache, pCreateInfo,
modules[MESA_SHADER_FRAGMENT],
pStages[MESA_SHADER_FRAGMENT]->pName,
pStages[MESA_SHADER_FRAGMENT]->pSpecializationInfo);
goto compile_fail;
}
- if (!(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT)) {
- /* Vertex is only optional if disable_vs is set */
- assert(extra->disable_vs);
- }
+ assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
anv_pipeline_setup_l3_config(pipeline, false);
const VkPipelineVertexInputStateCreateInfo *vi_info =
pCreateInfo->pVertexInputState;
- uint64_t inputs_read;
- if (extra && extra->disable_vs) {
- /* If the VS is disabled, just assume the user knows what they're
- * doing and apply the layout blindly. This can only come from
- * meta, so this *should* be safe.
- */
- inputs_read = ~0ull;
- } else {
- inputs_read = get_vs_prog_data(pipeline)->inputs_read;
- }
+ const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
pipeline->vb_used = 0;
for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
pipeline->primitive_restart = ia_info->primitiveRestartEnable;
pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
- if (extra && extra->use_rectlist)
- pipeline->topology = _3DPRIM_RECTLIST;
-
return VK_SUCCESS;
compile_fail:
VkDevice _device,
VkPipelineCache _cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
- const struct anv_graphics_pipeline_create_info *extra,
const VkAllocationCallbacks *pAllocator,
VkPipeline *pPipeline)
{
switch (device->info.gen) {
case 7:
if (device->info.is_haswell)
- return gen75_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
+ return gen75_graphics_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
else
- return gen7_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
+ return gen7_graphics_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
case 8:
- return gen8_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
+ return gen8_graphics_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
case 9:
- return gen9_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
+ return gen9_graphics_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
default:
unreachable("unsupported gen\n");
}
result = anv_graphics_pipeline_create(_device,
pipelineCache,
&pCreateInfos[i],
- NULL, pAllocator, &pPipelines[i]);
+ pAllocator, &pPipelines[i]);
if (result != VK_SUCCESS) {
for (unsigned j = 0; j < i; j++) {
anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
struct anv_pipeline_layout * layout;
- bool use_repclear;
bool needs_data_cache;
struct anv_shader_bin * shaders[MESA_SHADER_STAGES];
ANV_DECL_GET_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT)
ANV_DECL_GET_PROG_DATA_FUNC(cs, MESA_SHADER_COMPUTE)
-struct anv_graphics_pipeline_create_info {
- /**
- * If non-negative, overrides the color attachment count of the pipeline's
- * subpass.
- */
- int8_t color_attachment_count;
-
- bool use_repclear;
- bool disable_vs;
- bool use_rectlist;
-};
-
VkResult
anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
struct anv_pipeline_cache *cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
- const struct anv_graphics_pipeline_create_info *extra,
const VkAllocationCallbacks *alloc);
VkResult
anv_graphics_pipeline_create(VkDevice device,
VkPipelineCache cache,
const VkGraphicsPipelineCreateInfo *pCreateInfo,
- const struct anv_graphics_pipeline_create_info *extra,
const VkAllocationCallbacks *alloc,
VkPipeline *pPipeline);
VkDevice _device,
struct anv_pipeline_cache * cache,
const VkGraphicsPipelineCreateInfo* pCreateInfo,
- const struct anv_graphics_pipeline_create_info *extra,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipeline)
{
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_pipeline_init(pipeline, device, cache,
- pCreateInfo, extra, pAllocator);
+ pCreateInfo, pAllocator);
if (result != VK_SUCCESS) {
anv_free2(&device->alloc, pAllocator, pipeline);
return result;
}
assert(pCreateInfo->pVertexInputState);
- emit_vertex_input(pipeline, pCreateInfo->pVertexInputState, extra);
+ emit_vertex_input(pipeline, pCreateInfo->pVertexInputState);
assert(pCreateInfo->pRasterizationState);
emit_rs_state(pipeline, pCreateInfo->pRasterizationState,
- pCreateInfo->pMultisampleState, pass, subpass, extra);
+ pCreateInfo->pMultisampleState, pass, subpass);
emit_ds_state(pipeline, pCreateInfo->pDepthStencilState, pass, subpass);
emit_urb_setup(pipeline);
emit_3dstate_clip(pipeline, pCreateInfo->pViewportState,
- pCreateInfo->pRasterizationState, extra);
+ pCreateInfo->pRasterizationState);
emit_3dstate_streamout(pipeline, pCreateInfo->pRasterizationState);
emit_ms_state(pipeline, pCreateInfo->pMultisampleState);
gen7_emit_vs_workaround_flush(brw);
#endif
- if (pipeline->vs_vec4 == NO_KERNEL || (extra && extra->disable_vs))
+ if (pipeline->vs_vec4 == NO_KERNEL)
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs);
else
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) {
const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline);
- if (pipeline->gs_kernel == NO_KERNEL || (extra && extra->disable_vs)) {
+ if (pipeline->gs_kernel == NO_KERNEL) {
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs);
} else {
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs) {
static void
emit_ia_state(struct anv_pipeline *pipeline,
- const VkPipelineInputAssemblyStateCreateInfo *info,
- const struct anv_graphics_pipeline_create_info *extra)
+ const VkPipelineInputAssemblyStateCreateInfo *info)
{
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_TOPOLOGY), vft) {
vft.PrimitiveTopologyType = pipeline->topology;
VkDevice _device,
struct anv_pipeline_cache * cache,
const VkGraphicsPipelineCreateInfo* pCreateInfo,
- const struct anv_graphics_pipeline_create_info *extra,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipeline)
{
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
result = anv_pipeline_init(pipeline, device, cache,
- pCreateInfo, extra, pAllocator);
+ pCreateInfo, pAllocator);
if (result != VK_SUCCESS) {
anv_free2(&device->alloc, pAllocator, pipeline);
return result;
}
assert(pCreateInfo->pVertexInputState);
- emit_vertex_input(pipeline, pCreateInfo->pVertexInputState, extra);
+ emit_vertex_input(pipeline, pCreateInfo->pVertexInputState);
assert(pCreateInfo->pInputAssemblyState);
- emit_ia_state(pipeline, pCreateInfo->pInputAssemblyState, extra);
+ emit_ia_state(pipeline, pCreateInfo->pInputAssemblyState);
assert(pCreateInfo->pRasterizationState);
emit_rs_state(pipeline, pCreateInfo->pRasterizationState,
- pCreateInfo->pMultisampleState, pass, subpass, extra);
+ pCreateInfo->pMultisampleState, pass, subpass);
emit_ms_state(pipeline, pCreateInfo->pMultisampleState);
emit_ds_state(pipeline, pCreateInfo->pDepthStencilState, pass, subpass);
emit_cb_state(pipeline, pCreateInfo->pColorBlendState,
emit_urb_setup(pipeline);
emit_3dstate_clip(pipeline, pCreateInfo->pViewportState,
- pCreateInfo->pRasterizationState, extra);
+ pCreateInfo->pRasterizationState);
emit_3dstate_streamout(pipeline, pCreateInfo->pRasterizationState);
const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
uint32_t vs_start = pipeline->vs_simd8 != NO_KERNEL ? pipeline->vs_simd8 :
pipeline->vs_vec4;
- if (vs_start == NO_KERNEL || (extra && extra->disable_vs)) {
+ if (vs_start == NO_KERNEL) {
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) {
vs.FunctionEnable = false;
/* Even if VS is disabled, SBE still gets the amount of
return result;
}
- pipeline->use_repclear = false;
-
const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
anv_pipeline_setup_l3_config(pipeline, cs_prog_data->base.total_shared > 0);
static void
emit_vertex_input(struct anv_pipeline *pipeline,
- const VkPipelineVertexInputStateCreateInfo *info,
- const struct anv_graphics_pipeline_create_info *extra)
+ const VkPipelineVertexInputStateCreateInfo *info)
{
const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
- uint32_t elements;
- if (extra && extra->disable_vs) {
- /* If the VS is disabled, just assume the user knows what they're
- * doing and apply the layout blindly. This can only come from
- * meta, so this *should* be safe.
- */
- elements = 0;
- for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++)
- elements |= (1 << info->pVertexAttributeDescriptions[i].location);
- } else {
- /* Pull inputs_read out of the VS prog data */
- uint64_t inputs_read = vs_prog_data->inputs_read;
- assert((inputs_read & ((1 << VERT_ATTRIB_GENERIC0) - 1)) == 0);
- elements = inputs_read >> VERT_ATTRIB_GENERIC0;
- }
+ /* Pull inputs_read out of the VS prog data */
+ const uint64_t inputs_read = vs_prog_data->inputs_read;
+ assert((inputs_read & ((1 << VERT_ATTRIB_GENERIC0) - 1)) == 0);
+ const uint32_t elements = inputs_read >> VERT_ATTRIB_GENERIC0;
#if GEN_GEN >= 8
/* On BDW+, we only need to allocate space for base ids. Setting up
const VkPipelineRasterizationStateCreateInfo *rs_info,
const VkPipelineMultisampleStateCreateInfo *ms_info,
const struct anv_render_pass *pass,
- const struct anv_subpass *subpass,
- const struct anv_graphics_pipeline_create_info *extra)
+ const struct anv_subpass *subpass)
{
struct GENX(3DSTATE_SF) sf = {
GENX(3DSTATE_SF_header),
};
- sf.ViewportTransformEnable = !(extra && extra->use_rectlist);
+ sf.ViewportTransformEnable = true;
sf.StatisticsEnable = true;
sf.TriangleStripListProvokingVertexSelect = 0;
sf.LineStripListProvokingVertexSelect = 0;
raster.CullMode = vk_to_gen_cullmode[rs_info->cullMode];
raster.FrontFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode];
raster.BackFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode];
- raster.ScissorRectangleEnable = !(extra && extra->use_rectlist);
+ raster.ScissorRectangleEnable = true;
#if GEN_GEN >= 9
/* GEN9+ splits ViewportZClipTestEnable into near and far enable bits */
static void
emit_3dstate_clip(struct anv_pipeline *pipeline,
const VkPipelineViewportStateCreateInfo *vp_info,
- const VkPipelineRasterizationStateCreateInfo *rs_info,
- const struct anv_graphics_pipeline_create_info *extra)
+ const VkPipelineRasterizationStateCreateInfo *rs_info)
{
const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
(void) wm_prog_data;
anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP), clip) {
- clip.ClipEnable = !(extra && extra->use_rectlist);
+ clip.ClipEnable = true;
clip.EarlyCullEnable = true;
clip.APIMode = APIMODE_D3D,
clip.ViewportXYClipTestEnable = true;