2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 VkResult
anv_CreateShader(
36 const VkShaderCreateInfo
* pCreateInfo
,
39 struct anv_device
*device
= (struct anv_device
*) _device
;
40 struct anv_shader
*shader
;
42 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_SHADER_CREATE_INFO
);
44 shader
= anv_device_alloc(device
, sizeof(*shader
) + pCreateInfo
->codeSize
, 8,
45 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
47 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
49 shader
->size
= pCreateInfo
->codeSize
;
50 memcpy(shader
->data
, pCreateInfo
->pCode
, shader
->size
);
52 *pShader
= (VkShader
) shader
;
60 emit_vertex_input(struct anv_pipeline
*pipeline
, VkPipelineVertexInputStateCreateInfo
*info
)
62 const uint32_t num_dwords
= 1 + info
->attributeCount
* 2;
64 bool instancing_enable
[32];
66 pipeline
->vb_used
= 0;
67 for (uint32_t i
= 0; i
< info
->bindingCount
; i
++) {
68 const VkVertexInputBindingDescription
*desc
=
69 &info
->pVertexBindingDescriptions
[i
];
71 pipeline
->vb_used
|= 1 << desc
->binding
;
72 pipeline
->binding_stride
[desc
->binding
] = desc
->strideInBytes
;
74 /* Step rate is programmed per vertex element (attribute), not
75 * binding. Set up a map of which bindings step per instance, for
76 * reference by vertex element setup. */
77 switch (desc
->stepRate
) {
79 case VK_VERTEX_INPUT_STEP_RATE_VERTEX
:
80 instancing_enable
[desc
->binding
] = false;
82 case VK_VERTEX_INPUT_STEP_RATE_INSTANCE
:
83 instancing_enable
[desc
->binding
] = true;
88 p
= anv_batch_emitn(&pipeline
->batch
, num_dwords
,
89 GEN8_3DSTATE_VERTEX_ELEMENTS
);
91 for (uint32_t i
= 0; i
< info
->attributeCount
; i
++) {
92 const VkVertexInputAttributeDescription
*desc
=
93 &info
->pVertexAttributeDescriptions
[i
];
94 const struct anv_format
*format
= anv_format_for_vk_format(desc
->format
);
96 struct GEN8_VERTEX_ELEMENT_STATE element
= {
97 .VertexBufferIndex
= desc
->binding
,
99 .SourceElementFormat
= format
->surface_format
,
100 .EdgeFlagEnable
= false,
101 .SourceElementOffset
= desc
->offsetInBytes
,
102 .Component0Control
= VFCOMP_STORE_SRC
,
103 .Component1Control
= format
->num_channels
>= 2 ? VFCOMP_STORE_SRC
: VFCOMP_STORE_0
,
104 .Component2Control
= format
->num_channels
>= 3 ? VFCOMP_STORE_SRC
: VFCOMP_STORE_0
,
105 .Component3Control
= format
->num_channels
>= 4 ? VFCOMP_STORE_SRC
: VFCOMP_STORE_1_FP
107 GEN8_VERTEX_ELEMENT_STATE_pack(NULL
, &p
[1 + i
* 2], &element
);
109 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF_INSTANCING
,
110 .InstancingEnable
= instancing_enable
[desc
->binding
],
111 .VertexElementIndex
= i
,
112 /* Vulkan so far doesn't have an instance divisor, so
113 * this is always 1 (ignored if not instancing). */
114 .InstanceDataStepRate
= 1);
117 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF_SGVS
,
118 .VertexIDEnable
= pipeline
->vs_prog_data
.uses_vertexid
,
119 .VertexIDComponentNumber
= 2,
120 .VertexIDElementOffset
= info
->bindingCount
,
121 .InstanceIDEnable
= pipeline
->vs_prog_data
.uses_instanceid
,
122 .InstanceIDComponentNumber
= 3,
123 .InstanceIDElementOffset
= info
->bindingCount
);
127 emit_ia_state(struct anv_pipeline
*pipeline
,
128 VkPipelineIaStateCreateInfo
*info
,
129 const struct anv_pipeline_create_info
*extra
)
131 static const uint32_t vk_to_gen_primitive_type
[] = {
132 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST
] = _3DPRIM_POINTLIST
,
133 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST
] = _3DPRIM_LINELIST
,
134 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP
] = _3DPRIM_LINESTRIP
,
135 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST
] = _3DPRIM_TRILIST
,
136 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP
] = _3DPRIM_TRISTRIP
,
137 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN
] = _3DPRIM_TRIFAN
,
138 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_ADJ
] = _3DPRIM_LINELIST_ADJ
,
139 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_ADJ
] = _3DPRIM_LINESTRIP_ADJ
,
140 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_ADJ
] = _3DPRIM_TRILIST_ADJ
,
141 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_ADJ
] = _3DPRIM_TRISTRIP_ADJ
,
142 [VK_PRIMITIVE_TOPOLOGY_PATCH
] = _3DPRIM_PATCHLIST_1
144 uint32_t topology
= vk_to_gen_primitive_type
[info
->topology
];
146 if (extra
&& extra
->use_rectlist
)
147 topology
= _3DPRIM_RECTLIST
;
149 struct GEN8_3DSTATE_VF vf
= {
150 GEN8_3DSTATE_VF_header
,
151 .IndexedDrawCutIndexEnable
= info
->primitiveRestartEnable
,
153 GEN8_3DSTATE_VF_pack(NULL
, pipeline
->state_vf
, &vf
);
155 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF_TOPOLOGY
,
156 .PrimitiveTopologyType
= topology
);
160 emit_rs_state(struct anv_pipeline
*pipeline
, VkPipelineRsStateCreateInfo
*info
,
161 const struct anv_pipeline_create_info
*extra
)
163 static const uint32_t vk_to_gen_cullmode
[] = {
164 [VK_CULL_MODE_NONE
] = CULLMODE_NONE
,
165 [VK_CULL_MODE_FRONT
] = CULLMODE_FRONT
,
166 [VK_CULL_MODE_BACK
] = CULLMODE_BACK
,
167 [VK_CULL_MODE_FRONT_AND_BACK
] = CULLMODE_BOTH
170 static const uint32_t vk_to_gen_fillmode
[] = {
171 [VK_FILL_MODE_POINTS
] = RASTER_POINT
,
172 [VK_FILL_MODE_WIREFRAME
] = RASTER_WIREFRAME
,
173 [VK_FILL_MODE_SOLID
] = RASTER_SOLID
176 static const uint32_t vk_to_gen_front_face
[] = {
177 [VK_FRONT_FACE_CCW
] = CounterClockwise
,
178 [VK_FRONT_FACE_CW
] = Clockwise
181 struct GEN8_3DSTATE_SF sf
= {
182 GEN8_3DSTATE_SF_header
,
183 .ViewportTransformEnable
= !(extra
&& extra
->disable_viewport
),
184 .TriangleStripListProvokingVertexSelect
= 0,
185 .LineStripListProvokingVertexSelect
= 0,
186 .TriangleFanProvokingVertexSelect
= 0,
187 .PointWidthSource
= pipeline
->writes_point_size
? Vertex
: State
,
191 /* FINISHME: bool32_t rasterizerDiscardEnable; */
193 GEN8_3DSTATE_SF_pack(NULL
, pipeline
->state_sf
, &sf
);
195 struct GEN8_3DSTATE_RASTER raster
= {
196 GEN8_3DSTATE_RASTER_header
,
197 .FrontWinding
= vk_to_gen_front_face
[info
->frontFace
],
198 .CullMode
= vk_to_gen_cullmode
[info
->cullMode
],
199 .FrontFaceFillMode
= vk_to_gen_fillmode
[info
->fillMode
],
200 .BackFaceFillMode
= vk_to_gen_fillmode
[info
->fillMode
],
201 .ScissorRectangleEnable
= !(extra
&& extra
->disable_scissor
),
202 .ViewportZClipTestEnable
= info
->depthClipEnable
205 GEN8_3DSTATE_RASTER_pack(NULL
, pipeline
->state_raster
, &raster
);
207 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_SBE
,
208 .ForceVertexURBEntryReadLength
= false,
209 .ForceVertexURBEntryReadOffset
= false,
210 .PointSpriteTextureCoordinateOrigin
= UPPERLEFT
,
211 .NumberofSFOutputAttributes
=
212 pipeline
->wm_prog_data
.num_varying_inputs
);
217 emit_cb_state(struct anv_pipeline
*pipeline
, VkPipelineCbStateCreateInfo
*info
)
219 struct anv_device
*device
= pipeline
->device
;
221 static const uint32_t vk_to_gen_logic_op
[] = {
222 [VK_LOGIC_OP_COPY
] = LOGICOP_COPY
,
223 [VK_LOGIC_OP_CLEAR
] = LOGICOP_CLEAR
,
224 [VK_LOGIC_OP_AND
] = LOGICOP_AND
,
225 [VK_LOGIC_OP_AND_REVERSE
] = LOGICOP_AND_REVERSE
,
226 [VK_LOGIC_OP_AND_INVERTED
] = LOGICOP_AND_INVERTED
,
227 [VK_LOGIC_OP_NOOP
] = LOGICOP_NOOP
,
228 [VK_LOGIC_OP_XOR
] = LOGICOP_XOR
,
229 [VK_LOGIC_OP_OR
] = LOGICOP_OR
,
230 [VK_LOGIC_OP_NOR
] = LOGICOP_NOR
,
231 [VK_LOGIC_OP_EQUIV
] = LOGICOP_EQUIV
,
232 [VK_LOGIC_OP_INVERT
] = LOGICOP_INVERT
,
233 [VK_LOGIC_OP_OR_REVERSE
] = LOGICOP_OR_REVERSE
,
234 [VK_LOGIC_OP_COPY_INVERTED
] = LOGICOP_COPY_INVERTED
,
235 [VK_LOGIC_OP_OR_INVERTED
] = LOGICOP_OR_INVERTED
,
236 [VK_LOGIC_OP_NAND
] = LOGICOP_NAND
,
237 [VK_LOGIC_OP_SET
] = LOGICOP_SET
,
240 static const uint32_t vk_to_gen_blend
[] = {
241 [VK_BLEND_ZERO
] = BLENDFACTOR_ZERO
,
242 [VK_BLEND_ONE
] = BLENDFACTOR_ONE
,
243 [VK_BLEND_SRC_COLOR
] = BLENDFACTOR_SRC_COLOR
,
244 [VK_BLEND_ONE_MINUS_SRC_COLOR
] = BLENDFACTOR_INV_SRC_COLOR
,
245 [VK_BLEND_DEST_COLOR
] = BLENDFACTOR_DST_COLOR
,
246 [VK_BLEND_ONE_MINUS_DEST_COLOR
] = BLENDFACTOR_INV_DST_COLOR
,
247 [VK_BLEND_SRC_ALPHA
] = BLENDFACTOR_SRC_ALPHA
,
248 [VK_BLEND_ONE_MINUS_SRC_ALPHA
] = BLENDFACTOR_INV_SRC_ALPHA
,
249 [VK_BLEND_DEST_ALPHA
] = BLENDFACTOR_DST_ALPHA
,
250 [VK_BLEND_ONE_MINUS_DEST_ALPHA
] = BLENDFACTOR_INV_DST_ALPHA
,
251 [VK_BLEND_CONSTANT_COLOR
] = BLENDFACTOR_CONST_COLOR
,
252 [VK_BLEND_ONE_MINUS_CONSTANT_COLOR
] = BLENDFACTOR_INV_CONST_COLOR
,
253 [VK_BLEND_CONSTANT_ALPHA
] = BLENDFACTOR_CONST_ALPHA
,
254 [VK_BLEND_ONE_MINUS_CONSTANT_ALPHA
] = BLENDFACTOR_INV_CONST_ALPHA
,
255 [VK_BLEND_SRC_ALPHA_SATURATE
] = BLENDFACTOR_SRC_ALPHA_SATURATE
,
256 [VK_BLEND_SRC1_COLOR
] = BLENDFACTOR_SRC1_COLOR
,
257 [VK_BLEND_ONE_MINUS_SRC1_COLOR
] = BLENDFACTOR_INV_SRC1_COLOR
,
258 [VK_BLEND_SRC1_ALPHA
] = BLENDFACTOR_SRC1_ALPHA
,
259 [VK_BLEND_ONE_MINUS_SRC1_ALPHA
] = BLENDFACTOR_INV_SRC1_ALPHA
,
262 static const uint32_t vk_to_gen_blend_op
[] = {
263 [VK_BLEND_OP_ADD
] = BLENDFUNCTION_ADD
,
264 [VK_BLEND_OP_SUBTRACT
] = BLENDFUNCTION_SUBTRACT
,
265 [VK_BLEND_OP_REVERSE_SUBTRACT
] = BLENDFUNCTION_REVERSE_SUBTRACT
,
266 [VK_BLEND_OP_MIN
] = BLENDFUNCTION_MIN
,
267 [VK_BLEND_OP_MAX
] = BLENDFUNCTION_MAX
,
270 uint32_t num_dwords
= 1 + info
->attachmentCount
* 2;
271 pipeline
->blend_state
=
272 anv_state_pool_alloc(&device
->dynamic_state_pool
, num_dwords
* 4, 64);
274 struct GEN8_BLEND_STATE blend_state
= {
275 .AlphaToCoverageEnable
= info
->alphaToCoverageEnable
,
278 uint32_t *state
= pipeline
->blend_state
.map
;
279 GEN8_BLEND_STATE_pack(NULL
, state
, &blend_state
);
281 for (uint32_t i
= 0; i
< info
->attachmentCount
; i
++) {
282 const VkPipelineCbAttachmentState
*a
= &info
->pAttachments
[i
];
284 struct GEN8_BLEND_STATE_ENTRY entry
= {
285 .LogicOpEnable
= info
->logicOpEnable
,
286 .LogicOpFunction
= vk_to_gen_logic_op
[info
->logicOp
],
287 .ColorBufferBlendEnable
= a
->blendEnable
,
288 .PreBlendSourceOnlyClampEnable
= false,
289 .PreBlendColorClampEnable
= false,
290 .PostBlendColorClampEnable
= false,
291 .SourceBlendFactor
= vk_to_gen_blend
[a
->srcBlendColor
],
292 .DestinationBlendFactor
= vk_to_gen_blend
[a
->destBlendColor
],
293 .ColorBlendFunction
= vk_to_gen_blend_op
[a
->blendOpColor
],
294 .SourceAlphaBlendFactor
= vk_to_gen_blend
[a
->srcBlendAlpha
],
295 .DestinationAlphaBlendFactor
= vk_to_gen_blend
[a
->destBlendAlpha
],
296 .AlphaBlendFunction
= vk_to_gen_blend_op
[a
->blendOpAlpha
],
297 .WriteDisableAlpha
= !(a
->channelWriteMask
& VK_CHANNEL_A_BIT
),
298 .WriteDisableRed
= !(a
->channelWriteMask
& VK_CHANNEL_R_BIT
),
299 .WriteDisableGreen
= !(a
->channelWriteMask
& VK_CHANNEL_G_BIT
),
300 .WriteDisableBlue
= !(a
->channelWriteMask
& VK_CHANNEL_B_BIT
),
303 GEN8_BLEND_STATE_ENTRY_pack(NULL
, state
+ i
* 2 + 1, &entry
);
306 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_BLEND_STATE_POINTERS
,
307 .BlendStatePointer
= pipeline
->blend_state
.offset
,
308 .BlendStatePointerValid
= true);
311 static const uint32_t vk_to_gen_compare_op
[] = {
312 [VK_COMPARE_OP_NEVER
] = COMPAREFUNCTION_NEVER
,
313 [VK_COMPARE_OP_LESS
] = COMPAREFUNCTION_LESS
,
314 [VK_COMPARE_OP_EQUAL
] = COMPAREFUNCTION_EQUAL
,
315 [VK_COMPARE_OP_LESS_EQUAL
] = COMPAREFUNCTION_LEQUAL
,
316 [VK_COMPARE_OP_GREATER
] = COMPAREFUNCTION_GREATER
,
317 [VK_COMPARE_OP_NOT_EQUAL
] = COMPAREFUNCTION_NOTEQUAL
,
318 [VK_COMPARE_OP_GREATER_EQUAL
] = COMPAREFUNCTION_GEQUAL
,
319 [VK_COMPARE_OP_ALWAYS
] = COMPAREFUNCTION_ALWAYS
,
322 static const uint32_t vk_to_gen_stencil_op
[] = {
323 [VK_STENCIL_OP_KEEP
] = 0,
324 [VK_STENCIL_OP_ZERO
] = 0,
325 [VK_STENCIL_OP_REPLACE
] = 0,
326 [VK_STENCIL_OP_INC_CLAMP
] = 0,
327 [VK_STENCIL_OP_DEC_CLAMP
] = 0,
328 [VK_STENCIL_OP_INVERT
] = 0,
329 [VK_STENCIL_OP_INC_WRAP
] = 0,
330 [VK_STENCIL_OP_DEC_WRAP
] = 0
334 emit_ds_state(struct anv_pipeline
*pipeline
, VkPipelineDsStateCreateInfo
*info
)
337 /* We're going to OR this together with the dynamic state. We need
338 * to make sure it's initialized to something useful.
340 memset(pipeline
->state_wm_depth_stencil
, 0,
341 sizeof(pipeline
->state_wm_depth_stencil
));
345 /* bool32_t depthBoundsEnable; // optional (depth_bounds_test) */
347 struct GEN8_3DSTATE_WM_DEPTH_STENCIL wm_depth_stencil
= {
348 .DepthTestEnable
= info
->depthTestEnable
,
349 .DepthBufferWriteEnable
= info
->depthWriteEnable
,
350 .DepthTestFunction
= vk_to_gen_compare_op
[info
->depthCompareOp
],
351 .DoubleSidedStencilEnable
= true,
353 .StencilTestEnable
= info
->stencilTestEnable
,
354 .StencilFailOp
= vk_to_gen_stencil_op
[info
->front
.stencilFailOp
],
355 .StencilPassDepthPassOp
= vk_to_gen_stencil_op
[info
->front
.stencilPassOp
],
356 .StencilPassDepthFailOp
= vk_to_gen_stencil_op
[info
->front
.stencilDepthFailOp
],
357 .StencilTestFunction
= vk_to_gen_compare_op
[info
->front
.stencilCompareOp
],
358 .BackfaceStencilFailOp
= vk_to_gen_stencil_op
[info
->back
.stencilFailOp
],
359 .BackfaceStencilPassDepthPassOp
= vk_to_gen_stencil_op
[info
->back
.stencilPassOp
],
360 .BackfaceStencilPassDepthFailOp
=vk_to_gen_stencil_op
[info
->back
.stencilDepthFailOp
],
361 .BackfaceStencilTestFunction
= vk_to_gen_compare_op
[info
->back
.stencilCompareOp
],
364 GEN8_3DSTATE_WM_DEPTH_STENCIL_pack(NULL
, pipeline
->state_wm_depth_stencil
, &wm_depth_stencil
);
367 VkResult
anv_CreateGraphicsPipeline(
369 const VkGraphicsPipelineCreateInfo
* pCreateInfo
,
370 VkPipeline
* pPipeline
)
372 return anv_pipeline_create(device
, pCreateInfo
, NULL
, pPipeline
);
376 anv_pipeline_destroy(struct anv_device
*device
,
377 struct anv_object
*object
,
378 VkObjectType obj_type
)
380 struct anv_pipeline
*pipeline
= (struct anv_pipeline
*) object
;
382 assert(obj_type
== VK_OBJECT_TYPE_PIPELINE
);
384 anv_compiler_free(pipeline
);
385 anv_reloc_list_finish(&pipeline
->batch
.relocs
, pipeline
->device
);
386 anv_state_stream_finish(&pipeline
->program_stream
);
387 anv_state_pool_free(&device
->dynamic_state_pool
, pipeline
->blend_state
);
388 anv_device_free(pipeline
->device
, pipeline
);
394 const VkGraphicsPipelineCreateInfo
* pCreateInfo
,
395 const struct anv_pipeline_create_info
* extra
,
396 VkPipeline
* pPipeline
)
398 struct anv_device
*device
= (struct anv_device
*) _device
;
399 struct anv_pipeline
*pipeline
;
400 const struct anv_common
*common
;
401 VkPipelineShaderStageCreateInfo
*shader_create_info
;
402 VkPipelineIaStateCreateInfo
*ia_info
= NULL
;
403 VkPipelineRsStateCreateInfo
*rs_info
= NULL
;
404 VkPipelineDsStateCreateInfo
*ds_info
= NULL
;
405 VkPipelineCbStateCreateInfo
*cb_info
= NULL
;
406 VkPipelineVertexInputStateCreateInfo
*vi_info
= NULL
;
408 uint32_t offset
, length
;
410 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO
);
412 pipeline
= anv_device_alloc(device
, sizeof(*pipeline
), 8,
413 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
414 if (pipeline
== NULL
)
415 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
417 pipeline
->base
.destructor
= anv_pipeline_destroy
;
418 pipeline
->device
= device
;
419 pipeline
->layout
= (struct anv_pipeline_layout
*) pCreateInfo
->layout
;
420 memset(pipeline
->shaders
, 0, sizeof(pipeline
->shaders
));
422 result
= anv_reloc_list_init(&pipeline
->batch
.relocs
, device
);
423 if (result
!= VK_SUCCESS
) {
424 anv_device_free(device
, pipeline
);
427 pipeline
->batch
.next
= pipeline
->batch
.start
= pipeline
->batch_data
;
428 pipeline
->batch
.end
= pipeline
->batch
.start
+ sizeof(pipeline
->batch_data
);
430 anv_state_stream_init(&pipeline
->program_stream
,
431 &device
->instruction_block_pool
);
433 for (common
= pCreateInfo
->pNext
; common
; common
= common
->pNext
) {
434 switch (common
->sType
) {
435 case VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO
:
436 vi_info
= (VkPipelineVertexInputStateCreateInfo
*) common
;
438 case VK_STRUCTURE_TYPE_PIPELINE_IA_STATE_CREATE_INFO
:
439 ia_info
= (VkPipelineIaStateCreateInfo
*) common
;
441 case VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO
:
442 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_TESS_STATE_CREATE_INFO");
444 case VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO
:
445 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_VP_STATE_CREATE_INFO");
447 case VK_STRUCTURE_TYPE_PIPELINE_RS_STATE_CREATE_INFO
:
448 rs_info
= (VkPipelineRsStateCreateInfo
*) common
;
450 case VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO
:
451 anv_finishme("VK_STRUCTURE_TYPE_PIPELINE_MS_STATE_CREATE_INFO");
453 case VK_STRUCTURE_TYPE_PIPELINE_CB_STATE_CREATE_INFO
:
454 cb_info
= (VkPipelineCbStateCreateInfo
*) common
;
456 case VK_STRUCTURE_TYPE_PIPELINE_DS_STATE_CREATE_INFO
:
457 ds_info
= (VkPipelineDsStateCreateInfo
*) common
;
459 case VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO
:
460 shader_create_info
= (VkPipelineShaderStageCreateInfo
*) common
;
461 pipeline
->shaders
[shader_create_info
->shader
.stage
] =
462 (struct anv_shader
*) shader_create_info
->shader
.shader
;
469 pipeline
->use_repclear
= extra
&& extra
->use_repclear
;
471 anv_compiler_run(device
->compiler
, pipeline
);
473 /* FIXME: The compiler dead-codes FS inputs when we don't have a VS, so we
474 * hard code this to num_attributes - 2. This is because the attributes
475 * include VUE header and position, which aren't counted as varying
477 if (pipeline
->vs_simd8
== NO_KERNEL
)
478 pipeline
->wm_prog_data
.num_varying_inputs
= vi_info
->attributeCount
- 2;
481 emit_vertex_input(pipeline
, vi_info
);
483 emit_ia_state(pipeline
, ia_info
, extra
);
485 emit_rs_state(pipeline
, rs_info
, extra
);
486 emit_ds_state(pipeline
, ds_info
);
487 emit_cb_state(pipeline
, cb_info
);
489 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VF_STATISTICS
,
490 .StatisticsEnable
= true);
491 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_HS
, .Enable
= false);
492 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_TE
, .TEEnable
= false);
493 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_DS
, .FunctionEnable
= false);
494 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_STREAMOUT
, .SOFunctionEnable
= false);
496 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_VS
,
497 .ConstantBufferOffset
= 0,
498 .ConstantBufferSize
= 4);
499 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_GS
,
500 .ConstantBufferOffset
= 4,
501 .ConstantBufferSize
= 4);
502 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_PUSH_CONSTANT_ALLOC_PS
,
503 .ConstantBufferOffset
= 8,
504 .ConstantBufferSize
= 4);
506 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_WM_CHROMAKEY
,
507 .ChromaKeyKillEnable
= false);
508 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_SBE_SWIZ
);
509 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_AA_LINE_PARAMETERS
);
511 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_CLIP
,
513 .ViewportXYClipTestEnable
= !(extra
&& extra
->disable_viewport
),
514 .MinimumPointWidth
= 0.125,
515 .MaximumPointWidth
= 255.875);
517 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_WM
,
518 .StatisticsEnable
= true,
519 .LineEndCapAntialiasingRegionWidth
= _05pixels
,
520 .LineAntialiasingRegionWidth
= _10pixels
,
521 .EarlyDepthStencilControl
= NORMAL
,
522 .ForceThreadDispatchEnable
= NORMAL
,
523 .PointRasterizationRule
= RASTRULE_UPPER_RIGHT
,
524 .BarycentricInterpolationMode
=
525 pipeline
->wm_prog_data
.barycentric_interp_modes
);
527 uint32_t samples
= 1;
528 uint32_t log2_samples
= __builtin_ffs(samples
) - 1;
529 bool enable_sampling
= samples
> 1 ? true : false;
531 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_MULTISAMPLE
,
532 .PixelPositionOffsetEnable
= enable_sampling
,
533 .PixelLocation
= CENTER
,
534 .NumberofMultisamples
= log2_samples
);
536 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_SAMPLE_MASK
,
537 .SampleMask
= 0xffff);
539 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_VS
,
540 .VSURBStartingAddress
= pipeline
->urb
.vs_start
,
541 .VSURBEntryAllocationSize
= pipeline
->urb
.vs_size
- 1,
542 .VSNumberofURBEntries
= pipeline
->urb
.nr_vs_entries
);
544 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_GS
,
545 .GSURBStartingAddress
= pipeline
->urb
.gs_start
,
546 .GSURBEntryAllocationSize
= pipeline
->urb
.gs_size
- 1,
547 .GSNumberofURBEntries
= pipeline
->urb
.nr_gs_entries
);
549 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_HS
,
550 .HSURBStartingAddress
= pipeline
->urb
.vs_start
,
551 .HSURBEntryAllocationSize
= 0,
552 .HSNumberofURBEntries
= 0);
554 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_URB_DS
,
555 .DSURBStartingAddress
= pipeline
->urb
.vs_start
,
556 .DSURBEntryAllocationSize
= 0,
557 .DSNumberofURBEntries
= 0);
559 const struct brw_gs_prog_data
*gs_prog_data
= &pipeline
->gs_prog_data
;
561 length
= (gs_prog_data
->base
.vue_map
.num_slots
+ 1) / 2 - offset
;
563 if (pipeline
->gs_vec4
== NO_KERNEL
)
564 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_GS
, .Enable
= false);
566 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_GS
,
567 .SingleProgramFlow
= false,
568 .KernelStartPointer
= pipeline
->gs_vec4
,
569 .VectorMaskEnable
= Vmask
,
571 .BindingTableEntryCount
= 0,
572 .ExpectedVertexCount
= pipeline
->gs_vertex_count
,
574 .ScratchSpaceBasePointer
= pipeline
->scratch_start
[VK_SHADER_STAGE_GEOMETRY
],
575 .PerThreadScratchSpace
= ffs(gs_prog_data
->base
.base
.total_scratch
/ 2048),
577 .OutputVertexSize
= gs_prog_data
->output_vertex_size_hwords
* 2 - 1,
578 .OutputTopology
= gs_prog_data
->output_topology
,
579 .VertexURBEntryReadLength
= gs_prog_data
->base
.urb_read_length
,
580 .DispatchGRFStartRegisterForURBData
=
581 gs_prog_data
->base
.base
.dispatch_grf_start_reg
,
583 .MaximumNumberofThreads
= device
->info
.max_gs_threads
,
584 .ControlDataHeaderSize
= gs_prog_data
->control_data_header_size_hwords
,
585 //pipeline->gs_prog_data.dispatch_mode |
586 .StatisticsEnable
= true,
587 .IncludePrimitiveID
= gs_prog_data
->include_primitive_id
,
588 .ReorderMode
= TRAILING
,
591 .ControlDataFormat
= gs_prog_data
->control_data_format
,
593 /* FIXME: mesa sets this based on ctx->Transform.ClipPlanesEnabled:
594 * UserClipDistanceClipTestEnableBitmask_3DSTATE_GS(v)
595 * UserClipDistanceCullTestEnableBitmask(v)
598 .VertexURBEntryOutputReadOffset
= offset
,
599 .VertexURBEntryOutputLength
= length
);
601 const struct brw_vue_prog_data
*vue_prog_data
= &pipeline
->vs_prog_data
.base
;
602 /* Skip the VUE header and position slots */
604 length
= (vue_prog_data
->vue_map
.num_slots
+ 1) / 2 - offset
;
606 if (pipeline
->vs_simd8
== NO_KERNEL
|| (extra
&& extra
->disable_vs
))
607 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VS
,
608 .FunctionEnable
= false,
609 .VertexURBEntryOutputReadOffset
= 1,
610 /* Even if VS is disabled, SBE still gets the amount of
611 * vertex data to read from this field. We use attribute
612 * count - 1, as we don't count the VUE header here. */
613 .VertexURBEntryOutputLength
=
614 DIV_ROUND_UP(vi_info
->attributeCount
- 1, 2));
616 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_VS
,
617 .KernelStartPointer
= pipeline
->vs_simd8
,
618 .SingleVertexDispatch
= Multiple
,
619 .VectorMaskEnable
= Dmask
,
621 .BindingTableEntryCount
=
622 vue_prog_data
->base
.binding_table
.size_bytes
/ 4,
623 .ThreadDispatchPriority
= Normal
,
624 .FloatingPointMode
= IEEE754
,
625 .IllegalOpcodeExceptionEnable
= false,
626 .AccessesUAV
= false,
627 .SoftwareExceptionEnable
= false,
629 .ScratchSpaceBasePointer
= pipeline
->scratch_start
[VK_SHADER_STAGE_VERTEX
],
630 .PerThreadScratchSpace
= ffs(vue_prog_data
->base
.total_scratch
/ 2048),
632 .DispatchGRFStartRegisterForURBData
=
633 vue_prog_data
->base
.dispatch_grf_start_reg
,
634 .VertexURBEntryReadLength
= vue_prog_data
->urb_read_length
,
635 .VertexURBEntryReadOffset
= 0,
637 .MaximumNumberofThreads
= device
->info
.max_vs_threads
- 1,
638 .StatisticsEnable
= false,
639 .SIMD8DispatchEnable
= true,
640 .VertexCacheDisable
= false,
641 .FunctionEnable
= true,
643 .VertexURBEntryOutputReadOffset
= offset
,
644 .VertexURBEntryOutputLength
= length
,
645 .UserClipDistanceClipTestEnableBitmask
= 0,
646 .UserClipDistanceCullTestEnableBitmask
= 0);
648 const struct brw_wm_prog_data
*wm_prog_data
= &pipeline
->wm_prog_data
;
649 uint32_t ksp0
, ksp2
, grf_start0
, grf_start2
;
653 if (pipeline
->ps_simd8
!= NO_KERNEL
) {
654 ksp0
= pipeline
->ps_simd8
;
655 grf_start0
= wm_prog_data
->base
.dispatch_grf_start_reg
;
656 if (pipeline
->ps_simd16
!= NO_KERNEL
) {
657 ksp2
= pipeline
->ps_simd16
;
658 grf_start2
= wm_prog_data
->dispatch_grf_start_reg_16
;
660 } else if (pipeline
->ps_simd16
!= NO_KERNEL
) {
661 ksp0
= pipeline
->ps_simd16
;
662 grf_start0
= wm_prog_data
->dispatch_grf_start_reg_16
;
664 unreachable("no ps shader");
667 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_PS
,
668 .KernelStartPointer0
= ksp0
,
670 .SingleProgramFlow
= false,
671 .VectorMaskEnable
= true,
674 .ScratchSpaceBasePointer
= pipeline
->scratch_start
[VK_SHADER_STAGE_FRAGMENT
],
675 .PerThreadScratchSpace
= ffs(wm_prog_data
->base
.total_scratch
/ 2048),
677 .MaximumNumberofThreadsPerPSD
= 64 - 2,
678 .PositionXYOffsetSelect
= wm_prog_data
->uses_pos_offset
?
679 POSOFFSET_SAMPLE
: POSOFFSET_NONE
,
680 .PushConstantEnable
= wm_prog_data
->base
.nr_params
> 0,
681 ._8PixelDispatchEnable
= pipeline
->ps_simd8
!= NO_KERNEL
,
682 ._16PixelDispatchEnable
= pipeline
->ps_simd16
!= NO_KERNEL
,
683 ._32PixelDispatchEnable
= false,
685 .DispatchGRFStartRegisterForConstantSetupData0
= grf_start0
,
686 .DispatchGRFStartRegisterForConstantSetupData1
= 0,
687 .DispatchGRFStartRegisterForConstantSetupData2
= grf_start2
,
689 .KernelStartPointer1
= 0,
690 .KernelStartPointer2
= ksp2
);
692 bool per_sample_ps
= false;
693 anv_batch_emit(&pipeline
->batch
, GEN8_3DSTATE_PS_EXTRA
,
694 .PixelShaderValid
= true,
695 .PixelShaderKillsPixel
= wm_prog_data
->uses_kill
,
696 .PixelShaderComputedDepthMode
= wm_prog_data
->computed_depth_mode
,
697 .AttributeEnable
= wm_prog_data
->num_varying_inputs
> 0,
698 .oMaskPresenttoRenderTarget
= wm_prog_data
->uses_omask
,
699 .PixelShaderIsPerSample
= per_sample_ps
);
701 *pPipeline
= (VkPipeline
) pipeline
;
706 VkResult
anv_CreateGraphicsPipelineDerivative(
708 const VkGraphicsPipelineCreateInfo
* pCreateInfo
,
709 VkPipeline basePipeline
,
710 VkPipeline
* pPipeline
)
712 stub_return(VK_UNSUPPORTED
);
715 VkResult
anv_CreateComputePipeline(
717 const VkComputePipelineCreateInfo
* pCreateInfo
,
718 VkPipeline
* pPipeline
)
720 struct anv_device
*device
= (struct anv_device
*) _device
;
721 struct anv_pipeline
*pipeline
;
724 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO
);
726 pipeline
= anv_device_alloc(device
, sizeof(*pipeline
), 8,
727 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
728 if (pipeline
== NULL
)
729 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
731 pipeline
->base
.destructor
= anv_pipeline_destroy
;
732 pipeline
->device
= device
;
733 pipeline
->layout
= (struct anv_pipeline_layout
*) pCreateInfo
->layout
;
735 result
= anv_reloc_list_init(&pipeline
->batch
.relocs
, device
);
736 if (result
!= VK_SUCCESS
) {
737 anv_device_free(device
, pipeline
);
740 pipeline
->batch
.next
= pipeline
->batch
.start
= pipeline
->batch_data
;
741 pipeline
->batch
.end
= pipeline
->batch
.start
+ sizeof(pipeline
->batch_data
);
743 anv_state_stream_init(&pipeline
->program_stream
,
744 &device
->instruction_block_pool
);
746 memset(pipeline
->shaders
, 0, sizeof(pipeline
->shaders
));
748 pipeline
->shaders
[VK_SHADER_STAGE_COMPUTE
] =
749 (struct anv_shader
*) pCreateInfo
->cs
.shader
;
751 pipeline
->use_repclear
= false;
753 anv_compiler_run(device
->compiler
, pipeline
);
755 const struct brw_cs_prog_data
*cs_prog_data
= &pipeline
->cs_prog_data
;
757 anv_batch_emit(&pipeline
->batch
, GEN8_MEDIA_VFE_STATE
,
758 .ScratchSpaceBasePointer
= pipeline
->scratch_start
[VK_SHADER_STAGE_FRAGMENT
],
759 .PerThreadScratchSpace
= ffs(cs_prog_data
->base
.total_scratch
/ 2048),
760 .ScratchSpaceBasePointerHigh
= 0,
763 .MaximumNumberofThreads
= device
->info
.max_cs_threads
- 1,
764 .NumberofURBEntries
= 2,
765 .ResetGatewayTimer
= true,
766 .BypassGatewayControl
= true,
767 .URBEntryAllocationSize
= 2,
768 .CURBEAllocationSize
= 0);
770 struct brw_cs_prog_data
*prog_data
= &pipeline
->cs_prog_data
;
771 uint32_t group_size
= prog_data
->local_size
[0] *
772 prog_data
->local_size
[1] * prog_data
->local_size
[2];
773 pipeline
->cs_thread_width_max
= DIV_ROUND_UP(group_size
, prog_data
->simd_size
);
774 uint32_t remainder
= group_size
& (prog_data
->simd_size
- 1);
777 pipeline
->cs_right_mask
= ~0u >> (32 - remainder
);
779 pipeline
->cs_right_mask
= ~0u >> (32 - prog_data
->simd_size
);
782 *pPipeline
= (VkPipeline
) pipeline
;
787 VkResult
anv_StorePipeline(
793 stub_return(VK_UNSUPPORTED
);
796 VkResult
anv_LoadPipeline(
800 VkPipeline
* pPipeline
)
802 stub_return(VK_UNSUPPORTED
);
805 VkResult
anv_LoadPipelineDerivative(
809 VkPipeline basePipeline
,
810 VkPipeline
* pPipeline
)
812 stub_return(VK_UNSUPPORTED
);
815 // Pipeline layout functions
817 VkResult
anv_CreatePipelineLayout(
819 const VkPipelineLayoutCreateInfo
* pCreateInfo
,
820 VkPipelineLayout
* pPipelineLayout
)
822 struct anv_device
*device
= (struct anv_device
*) _device
;
823 struct anv_pipeline_layout
*layout
;
825 assert(pCreateInfo
->sType
== VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO
);
827 layout
= anv_device_alloc(device
, sizeof(*layout
), 8,
828 VK_SYSTEM_ALLOC_TYPE_API_OBJECT
);
830 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY
);
832 layout
->num_sets
= pCreateInfo
->descriptorSetCount
;
834 uint32_t surface_start
[VK_SHADER_STAGE_NUM
] = { 0, };
835 uint32_t sampler_start
[VK_SHADER_STAGE_NUM
] = { 0, };
837 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
838 layout
->stage
[s
].surface_count
= 0;
839 layout
->stage
[s
].sampler_count
= 0;
842 for (uint32_t i
= 0; i
< pCreateInfo
->descriptorSetCount
; i
++) {
843 struct anv_descriptor_set_layout
*set_layout
=
844 (struct anv_descriptor_set_layout
*) pCreateInfo
->pSetLayouts
[i
];
846 layout
->set
[i
].layout
= set_layout
;
847 for (uint32_t s
= 0; s
< VK_SHADER_STAGE_NUM
; s
++) {
848 layout
->set
[i
].surface_start
[s
] = surface_start
[s
];
849 surface_start
[s
] += set_layout
->stage
[s
].surface_count
;
850 layout
->set
[i
].sampler_start
[s
] = sampler_start
[s
];
851 sampler_start
[s
] += set_layout
->stage
[s
].sampler_count
;
853 layout
->stage
[s
].surface_count
+= set_layout
->stage
[s
].surface_count
;
854 layout
->stage
[s
].sampler_count
+= set_layout
->stage
[s
].sampler_count
;
858 *pPipelineLayout
= (VkPipelineLayout
) layout
;